From 24fdbe7d7e6fa74684ea929e5ec4585091e7ba9d Mon Sep 17 00:00:00 2001 From: Benoit Sigoure Date: Wed, 5 May 2010 15:27:53 -0700 Subject: [PATCH] HBASE-2518 Kill all trailing whitespaces in core/src. --- .../main/java/org/apache/hadoop/hbase/Chore.java | 10 +- .../org/apache/hadoop/hbase/ClusterStatus.java | 2 +- .../apache/hadoop/hbase/HBaseConfiguration.java | 24 +- .../org/apache/hadoop/hbase/HColumnDescriptor.java | 38 +- .../java/org/apache/hadoop/hbase/HConstants.java | 100 +- .../main/java/org/apache/hadoop/hbase/HMerge.java | 52 +- .../main/java/org/apache/hadoop/hbase/HMsg.java | 36 +- .../java/org/apache/hadoop/hbase/HRegionInfo.java | 44 +- .../org/apache/hadoop/hbase/HRegionLocation.java | 6 +- .../org/apache/hadoop/hbase/HServerAddress.java | 24 +- .../java/org/apache/hadoop/hbase/HServerInfo.java | 28 +- .../java/org/apache/hadoop/hbase/HServerLoad.java | 14 +- .../org/apache/hadoop/hbase/HTableDescriptor.java | 28 +- .../java/org/apache/hadoop/hbase/KeyValue.java | 178 ++-- .../org/apache/hadoop/hbase/LeaseListener.java | 4 +- .../main/java/org/apache/hadoop/hbase/Leases.java | 34 +- .../org/apache/hadoop/hbase/LocalHBaseCluster.java | 8 +- .../hadoop/hbase/NotServingRegionException.java | 2 +- .../hadoop/hbase/RemoteExceptionHandler.java | 18 +- .../apache/hadoop/hbase/TableExistsException.java | 2 +- .../hadoop/hbase/ValueOverMaxLengthException.java | 2 +- .../org/apache/hadoop/hbase/VersionAnnotation.java | 10 +- .../org/apache/hadoop/hbase/client/Delete.java | 46 +- .../java/org/apache/hadoop/hbase/client/Get.java | 16 +- .../org/apache/hadoop/hbase/client/HBaseAdmin.java | 98 +- .../apache/hadoop/hbase/client/HConnection.java | 52 +- .../hadoop/hbase/client/HConnectionManager.java | 124 ++-- .../org/apache/hadoop/hbase/client/HTable.java | 10 +- .../apache/hadoop/hbase/client/HTableFactory.java | 10 +- .../hadoop/hbase/client/HTableInterface.java | 2 +- .../hbase/client/HTableInterfaceFactory.java | 4 +- .../org/apache/hadoop/hbase/client/HTablePool.java | 14 +- .../apache/hadoop/hbase/client/MetaScanner.java | 16 +- .../org/apache/hadoop/hbase/client/MultiPut.java | 2 +- .../hadoop/hbase/client/MultiPutResponse.java | 2 +- .../java/org/apache/hadoop/hbase/client/Put.java | 126 ++-- .../org/apache/hadoop/hbase/client/Result.java | 78 +- .../apache/hadoop/hbase/client/ResultScanner.java | 8 +- .../hbase/client/RetriesExhaustedException.java | 10 +- .../org/apache/hadoop/hbase/client/RowLock.java | 2 +- .../java/org/apache/hadoop/hbase/client/Scan.java | 60 +- .../hadoop/hbase/client/ScannerCallable.java | 10 +- .../hbase/client/ScannerTimeoutException.java | 2 +- .../apache/hadoop/hbase/client/ServerCallable.java | 8 +- .../hadoop/hbase/client/ServerConnection.java | 4 +- .../hbase/client/UnmodifyableHRegionInfo.java | 4 +- .../hbase/client/UnmodifyableHTableDescriptor.java | 8 +- .../hadoop/hbase/filter/BinaryComparator.java | 2 +- .../hbase/filter/BinaryPrefixComparator.java | 4 +- .../hbase/filter/ColumnPaginationFilter.java | 6 +- .../apache/hadoop/hbase/filter/CompareFilter.java | 14 +- .../org/apache/hadoop/hbase/filter/Filter.java | 2 +- .../org/apache/hadoop/hbase/filter/FilterList.java | 16 +- .../hadoop/hbase/filter/InclusiveStopFilter.java | 2 +- .../org/apache/hadoop/hbase/filter/PageFilter.java | 2 +- .../hadoop/hbase/filter/QualifierFilter.java | 6 +- .../org/apache/hadoop/hbase/filter/RowFilter.java | 6 +- .../filter/SingleColumnValueExcludeFilter.java | 4 +- .../hbase/filter/SingleColumnValueFilter.java | 26 +- .../org/apache/hadoop/hbase/filter/SkipFilter.java | 4 +- .../hadoop/hbase/filter/SubstringComparator.java | 2 +- .../apache/hadoop/hbase/filter/ValueFilter.java | 6 +- .../hadoop/hbase/filter/WhileMatchFilter.java | 2 +- .../apache/hadoop/hbase/filter/package-info.java | 2 +- .../apache/hadoop/hbase/io/CodeToClassAndBack.java | 6 +- .../apache/hadoop/hbase/io/HalfHFileReader.java | 8 +- .../apache/hadoop/hbase/io/HbaseMapWritable.java | 14 +- .../hadoop/hbase/io/HbaseObjectWritable.java | 48 +- .../java/org/apache/hadoop/hbase/io/HeapSize.java | 4 +- .../hadoop/hbase/io/ImmutableBytesWritable.java | 36 +- .../java/org/apache/hadoop/hbase/io/Reference.java | 6 +- .../java/org/apache/hadoop/hbase/io/TimeRange.java | 30 +- .../apache/hadoop/hbase/io/hfile/BlockCache.java | 4 +- .../apache/hadoop/hbase/io/hfile/CachedBlock.java | 30 +- .../hadoop/hbase/io/hfile/CachedBlockQueue.java | 18 +- .../apache/hadoop/hbase/io/hfile/Compression.java | 4 +- .../org/apache/hadoop/hbase/io/hfile/HFile.java | 134 ++-- .../apache/hadoop/hbase/io/hfile/HFileScanner.java | 8 +- .../hadoop/hbase/io/hfile/LruBlockCache.java | 200 ++-- .../hadoop/hbase/io/hfile/SimpleBlockCache.java | 8 +- .../org/apache/hadoop/hbase/ipc/HBaseClient.java | 128 ++-- .../java/org/apache/hadoop/hbase/ipc/HBaseRPC.java | 56 +- .../hadoop/hbase/ipc/HBaseRPCProtocolVersion.java | 2 +- .../hadoop/hbase/ipc/HBaseRPCStatistics.java | 2 +- .../apache/hadoop/hbase/ipc/HBaseRpcMetrics.java | 12 +- .../org/apache/hadoop/hbase/ipc/HBaseServer.java | 222 +++--- .../apache/hadoop/hbase/ipc/HMasterInterface.java | 20 +- .../hadoop/hbase/ipc/HMasterRegionInterface.java | 12 +- .../apache/hadoop/hbase/ipc/HRegionInterface.java | 56 +- .../hadoop/hbase/mapred/TableRecordReader.java | 4 +- .../hadoop/hbase/mapred/TableRecordReaderImpl.java | 2 +- .../org/apache/hadoop/hbase/mapreduce/Driver.java | 2 +- .../org/apache/hadoop/hbase/mapreduce/Export.java | 6 +- .../hbase/mapreduce/GroupingTableMapper.java | 38 +- .../hadoop/hbase/mapreduce/HFileOutputFormat.java | 4 +- .../hadoop/hbase/mapreduce/HRegionPartitioner.java | 22 +- .../hbase/mapreduce/IdentityTableMapper.java | 14 +- .../hbase/mapreduce/IdentityTableReducer.java | 38 +- .../org/apache/hadoop/hbase/mapreduce/Import.java | 10 +- .../hbase/mapreduce/MultiTableOutputFormat.java | 4 +- .../apache/hadoop/hbase/mapreduce/RowCounter.java | 18 +- .../mapreduce/SimpleTotalOrderPartitioner.java | 2 +- .../hadoop/hbase/mapreduce/TableInputFormat.java | 30 +- .../hbase/mapreduce/TableInputFormatBase.java | 34 +- .../hadoop/hbase/mapreduce/TableMapReduceUtil.java | 56 +- .../apache/hadoop/hbase/mapreduce/TableMapper.java | 4 +- .../hadoop/hbase/mapreduce/TableOutputFormat.java | 40 +- .../hadoop/hbase/mapreduce/TableRecordReader.java | 26 +- .../hbase/mapreduce/TableRecordReaderImpl.java | 24 +- .../hadoop/hbase/mapreduce/TableReducer.java | 12 +- .../apache/hadoop/hbase/mapreduce/TableSplit.java | 38 +- .../hadoop/hbase/mapreduce/package-info.java | 6 +- .../org/apache/hadoop/hbase/master/AddColumn.java | 4 +- .../apache/hadoop/hbase/master/BaseScanner.java | 58 +- .../hadoop/hbase/master/ChangeTableState.java | 4 +- .../hadoop/hbase/master/ColumnOperation.java | 4 +- .../apache/hadoop/hbase/master/DeleteColumn.java | 4 +- .../org/apache/hadoop/hbase/master/HMaster.java | 54 +- .../org/apache/hadoop/hbase/master/MetaRegion.java | 6 +- .../apache/hadoop/hbase/master/MetaScanner.java | 20 +- .../apache/hadoop/hbase/master/ModifyColumn.java | 8 +- .../hadoop/hbase/master/ModifyTableMeta.java | 4 +- .../hadoop/hbase/master/ProcessRegionClose.java | 8 +- .../hadoop/hbase/master/ProcessRegionOpen.java | 4 +- .../hbase/master/ProcessRegionStatusChange.java | 4 +- .../hadoop/hbase/master/ProcessServerShutdown.java | 20 +- .../apache/hadoop/hbase/master/RegionManager.java | 254 +++--- .../hadoop/hbase/master/RegionServerOperation.java | 14 +- .../hbase/master/RegionServerOperationQueue.java | 4 +- .../hbase/master/RetryableMetaOperation.java | 10 +- .../apache/hadoop/hbase/master/RootScanner.java | 2 +- .../apache/hadoop/hbase/master/ServerManager.java | 56 +- .../apache/hadoop/hbase/master/TableDelete.java | 6 +- .../hbase/master/ZKMasterAddressWatcher.java | 2 +- .../hadoop/hbase/master/metrics/MasterMetrics.java | 14 +- .../hbase/master/metrics/MasterStatistics.java | 2 +- .../hadoop/hbase/metrics/MetricsMBeanBase.java | 40 +- .../apache/hadoop/hbase/metrics/MetricsRate.java | 20 +- .../metrics/file/TimeStampingFileContext.java | 2 +- .../hadoop/hbase/regionserver/ColumnCount.java | 20 +- .../hadoop/hbase/regionserver/ColumnTracker.java | 12 +- .../hbase/regionserver/CompactSplitThread.java | 34 +- .../hadoop/hbase/regionserver/DeleteCompare.java | 10 +- .../hadoop/hbase/regionserver/DeleteTracker.java | 18 +- .../hbase/regionserver/ExplicitColumnTracker.java | 14 +- .../hadoop/hbase/regionserver/FlushRequester.java | 2 +- .../regionserver/GetClosestRowBeforeTracker.java | 4 +- .../hbase/regionserver/GetDeleteTracker.java | 14 +- .../apache/hadoop/hbase/regionserver/HRegion.java | 328 ++++---- .../hadoop/hbase/regionserver/HRegionServer.java | 194 ++-- .../hadoop/hbase/regionserver/InternalScanner.java | 8 +- .../hadoop/hbase/regionserver/KeyValueHeap.java | 16 +- .../hadoop/hbase/regionserver/KeyValueScanner.java | 8 +- .../hbase/regionserver/KeyValueSkipListSet.java | 2 +- .../hadoop/hbase/regionserver/LogRoller.java | 4 +- .../hadoop/hbase/regionserver/LruHashMap.java | 140 ++-- .../apache/hadoop/hbase/regionserver/MemStore.java | 42 +- .../hadoop/hbase/regionserver/MemStoreFlusher.java | 36 +- .../hadoop/hbase/regionserver/QueryMatcher.java | 74 +- .../regionserver/RegionServerRunningException.java | 2 +- .../hbase/regionserver/ScanDeleteTracker.java | 10 +- .../hbase/regionserver/ScanQueryMatcher.java | 20 +- .../regionserver/ScanWildcardColumnTracker.java | 8 +- .../apache/hadoop/hbase/regionserver/Store.java | 112 ++-- .../hadoop/hbase/regionserver/StoreFile.java | 24 +- .../hbase/regionserver/StoreFileGetScan.java | 12 +- .../hbase/regionserver/StoreFileScanner.java | 16 +- .../hadoop/hbase/regionserver/StoreScanner.java | 8 +- .../hbase/regionserver/WildcardColumnTracker.java | 42 +- .../regionserver/metrics/RegionServerMetrics.java | 22 +- .../metrics/RegionServerStatistics.java | 4 +- .../regionserver/wal/FailedLogCloseException.java | 2 +- .../apache/hadoop/hbase/regionserver/wal/HLog.java | 56 +- .../hadoop/hbase/regionserver/wal/HLogKey.java | 20 +- .../regionserver/wal/SequenceFileLogReader.java | 10 +- .../regionserver/wal/SequenceFileLogWriter.java | 4 +- .../hadoop/hbase/regionserver/wal/WALEdit.java | 30 +- .../apache/hadoop/hbase/thrift/ThriftServer.java | 112 ++-- .../hadoop/hbase/thrift/ThriftUtilities.java | 18 +- .../hbase/thrift/generated/AlreadyExists.java | 6 +- .../hbase/thrift/generated/BatchMutation.java | 12 +- .../hbase/thrift/generated/ColumnDescriptor.java | 38 +- .../hadoop/hbase/thrift/generated/Hbase.java | 978 ++++++++++---------- .../hadoop/hbase/thrift/generated/IOError.java | 6 +- .../hbase/thrift/generated/IllegalArgument.java | 6 +- .../hadoop/hbase/thrift/generated/Mutation.java | 14 +- .../hadoop/hbase/thrift/generated/TCell.java | 10 +- .../hadoop/hbase/thrift/generated/TRegionInfo.java | 22 +- .../hadoop/hbase/thrift/generated/TRowResult.java | 14 +- .../java/org/apache/hadoop/hbase/util/Base64.java | 162 ++-- .../org/apache/hadoop/hbase/util/ClassSize.java | 112 ++-- .../java/org/apache/hadoop/hbase/util/FSUtils.java | 62 +- .../java/org/apache/hadoop/hbase/util/Hash.java | 14 +- .../org/apache/hadoop/hbase/util/InfoServer.java | 10 +- .../apache/hadoop/hbase/util/JVMClusterUtil.java | 8 +- .../org/apache/hadoop/hbase/util/JenkinsHash.java | 50 +- .../java/org/apache/hadoop/hbase/util/Keying.java | 14 +- .../java/org/apache/hadoop/hbase/util/Merge.java | 50 +- .../org/apache/hadoop/hbase/util/MetaUtils.java | 60 +- .../org/apache/hadoop/hbase/util/MurmurHash.java | 6 +- .../java/org/apache/hadoop/hbase/util/Pair.java | 4 +- .../java/org/apache/hadoop/hbase/util/Sleeper.java | 8 +- .../org/apache/hadoop/hbase/util/SoftValueMap.java | 16 +- .../hadoop/hbase/util/SoftValueSortedMap.java | 24 +- .../java/org/apache/hadoop/hbase/util/Strings.java | 2 +- .../java/org/apache/hadoop/hbase/util/Threads.java | 4 +- .../org/apache/hadoop/hbase/util/VersionInfo.java | 14 +- .../org/apache/hadoop/hbase/util/Writables.java | 4 +- .../hadoop/hbase/zookeeper/ZooKeeperWrapper.java | 34 +- core/src/main/resources/hbase-default.xml | 20 +- .../apache/hadoop/hbase/AbstractMergeTestBase.java | 28 +- .../apache/hadoop/hbase/HBaseClusterTestCase.java | 22 +- .../org/apache/hadoop/hbase/HBaseTestCase.java | 122 ++-- .../apache/hadoop/hbase/HBaseTestingUtility.java | 68 +- .../hadoop/hbase/HFilePerformanceEvaluation.java | 86 +- .../hadoop/hbase/MapFilePerformanceEvaluation.java | 84 +- .../org/apache/hadoop/hbase/MiniHBaseCluster.java | 6 +- .../org/apache/hadoop/hbase/MultiRegionTable.java | 12 +- .../apache/hadoop/hbase/PerformanceEvaluation.java | 196 ++-- .../hadoop/hbase/PerformanceEvaluationCommons.java | 6 +- .../java/org/apache/hadoop/hbase/TestCompare.java | 2 +- .../java/org/apache/hadoop/hbase/TestHMsg.java | 2 +- .../org/apache/hadoop/hbase/TestInfoServers.java | 6 +- .../java/org/apache/hadoop/hbase/TestKeyValue.java | 26 +- .../org/apache/hadoop/hbase/TestMergeMeta.java | 4 +- .../apache/hadoop/hbase/TestRegionRebalancing.java | 64 +- .../hadoop/hbase/TestScanMultipleVersions.java | 24 +- .../org/apache/hadoop/hbase/TestSerialization.java | 164 ++-- .../org/apache/hadoop/hbase/TimestampTestBase.java | 38 +- .../org/apache/hadoop/hbase/client/TestAdmin.java | 4 +- .../hadoop/hbase/client/TestFromClientSide.java | 1026 ++++++++++---------- .../hadoop/hbase/client/TestGetRowVersions.java | 4 +- .../apache/hadoop/hbase/client/TestHTablePool.java | 28 +- .../apache/hadoop/hbase/client/TestTimestamp.java | 8 +- .../hbase/filter/TestColumnPaginationFilter.java | 24 +- .../org/apache/hadoop/hbase/filter/TestFilter.java | 232 +++--- .../apache/hadoop/hbase/filter/TestFilterList.java | 12 +- .../apache/hadoop/hbase/filter/TestPageFilter.java | 14 +- .../filter/TestSingleColumnValueExcludeFilter.java | 12 +- .../hbase/filter/TestSingleColumnValueFilter.java | 22 +- .../hadoop/hbase/io/TestHbaseObjectWritable.java | 4 +- .../org/apache/hadoop/hbase/io/TestHeapSize.java | 54 +- .../apache/hadoop/hbase/io/hfile/KVGenerator.java | 12 +- .../apache/hadoop/hbase/io/hfile/KeySampler.java | 6 +- .../apache/hadoop/hbase/io/hfile/NanoTimer.java | 30 +- .../hadoop/hbase/io/hfile/RandomDistribution.java | 28 +- .../apache/hadoop/hbase/io/hfile/RandomSeek.java | 16 +- .../hbase/io/hfile/TestCachedBlockQueue.java | 36 +- .../apache/hadoop/hbase/io/hfile/TestHFile.java | 18 +- .../hbase/io/hfile/TestHFilePerformance.java | 2 +- .../hadoop/hbase/io/hfile/TestHFileSeek.java | 26 +- .../hadoop/hbase/io/hfile/TestLruBlockCache.java | 246 +++--- .../apache/hadoop/hbase/io/hfile/TestSeekTo.java | 24 +- .../hbase/mapreduce/TestHFileOutputFormat.java | 8 +- .../hbase/mapreduce/TestTableInputFormatScan.java | 168 ++-- .../hadoop/hbase/mapreduce/TestTableMapReduce.java | 44 +- .../hbase/mapreduce/TestTimeRangeMapRed.java | 42 +- .../apache/hadoop/hbase/master/OOMEHMaster.java | 6 +- .../hbase/master/TestMasterTransistions.java | 16 +- .../hbase/master/TestMinimumServerCount.java | 2 +- .../hadoop/hbase/master/TestRegionManager.java | 10 +- .../master/TestRegionServerOperationQueue.java | 2 +- .../hadoop/hbase/metrics/TestMetricsMBeanBase.java | 28 +- .../regionserver/DisabledTestRegionServerExit.java | 18 +- .../hbase/regionserver/OOMERegionServer.java | 4 +- .../hadoop/hbase/regionserver/TestCompaction.java | 16 +- .../hbase/regionserver/TestDeleteCompare.java | 34 +- .../regionserver/TestExplicitColumnTracker.java | 34 +- .../regionserver/TestGetClosestAtOrBefore.java | 44 +- .../hbase/regionserver/TestGetDeleteTracker.java | 130 ++-- .../hadoop/hbase/regionserver/TestHRegion.java | 348 ++++---- .../hbase/regionserver/TestKeyValueHeap.java | 38 +- .../hadoop/hbase/regionserver/TestMemStore.java | 94 +- .../hbase/regionserver/TestQueryMatcher.java | 58 +- .../hbase/regionserver/TestScanDeleteTracker.java | 42 +- .../TestScanWildcardColumnTracker.java | 36 +- .../hadoop/hbase/regionserver/TestScanner.java | 98 +- .../hadoop/hbase/regionserver/TestStore.java | 10 +- .../hadoop/hbase/regionserver/TestStoreFile.java | 12 +- .../hbase/regionserver/TestStoreScanner.java | 14 +- .../hadoop/hbase/regionserver/TestWideScanner.java | 4 +- .../regionserver/TestWildcardColumnTracker.java | 108 +- .../hadoop/hbase/regionserver/wal/TestHLog.java | 6 +- .../hbase/regionserver/wal/TestLogRolling.java | 20 +- .../hadoop/hbase/thrift/TestThriftServer.java | 68 +- .../hadoop/hbase/util/SoftValueSortedMapTest.java | 4 +- .../org/apache/hadoop/hbase/util/TestBase64.java | 2 +- .../org/apache/hadoop/hbase/util/TestBytes.java | 8 +- .../org/apache/hadoop/hbase/util/TestKeying.java | 2 +- .../apache/hadoop/hbase/util/TestMergeTool.java | 46 +- .../org/apache/hadoop/hbase/util/TestRootPath.java | 2 +- .../hadoop/hbase/zookeeper/TestHQuorumPeer.java | 2 +- core/src/test/resources/log4j.properties | 2 +- 293 files changed, 5695 insertions(+), 5695 deletions(-) diff --git a/core/src/main/java/org/apache/hadoop/hbase/Chore.java b/core/src/main/java/org/apache/hadoop/hbase/Chore.java index 48fde89..5b425ff 100644 --- a/core/src/main/java/org/apache/hadoop/hbase/Chore.java +++ b/core/src/main/java/org/apache/hadoop/hbase/Chore.java @@ -31,7 +31,7 @@ import org.apache.hadoop.hbase.util.Sleeper; * If an unhandled exception, the threads exit is logged. * Implementers just need to add checking if there is work to be done and if * so, do it. Its the base of most of the chore threads in hbase. - * + * * Don't subclass Chore if the task relies on being woken up for something to * do, such as an entry being added to a queue, etc. */ @@ -39,7 +39,7 @@ public abstract class Chore extends Thread { private final Log LOG = LogFactory.getLog(this.getClass()); private final Sleeper sleeper; protected volatile AtomicBoolean stop; - + /** * @param p Period at which we should run. Will be adjusted appropriately * should we find work and it takes time to complete. @@ -82,7 +82,7 @@ public abstract class Chore extends Thread { LOG.info(getName() + " exiting"); } } - + /** * If the thread is currently sleeping, trigger the core to happen immediately. * If it's in the middle of its operation, will begin another operation @@ -91,7 +91,7 @@ public abstract class Chore extends Thread { public void triggerNow() { this.sleeper.skipSleepCycle(); } - + /** * Override to run a task before we start looping. * @return true if initial chore was successful @@ -100,7 +100,7 @@ public abstract class Chore extends Thread { // Default does nothing. return true; } - + /** * Look for chores. If any found, do them else just return. */ diff --git a/core/src/main/java/org/apache/hadoop/hbase/ClusterStatus.java b/core/src/main/java/org/apache/hadoop/hbase/ClusterStatus.java index bced386..555771d 100644 --- a/core/src/main/java/org/apache/hadoop/hbase/ClusterStatus.java +++ b/core/src/main/java/org/apache/hadoop/hbase/ClusterStatus.java @@ -176,7 +176,7 @@ public class ClusterStatus extends VersionedWritable { /** * Returns detailed region server information: A list of * {@link HServerInfo}, containing server load and resource usage - * statistics as {@link HServerLoad}, containing per-region + * statistics as {@link HServerLoad}, containing per-region * statistics as {@link HServerLoad.RegionLoad}. * @return region server information */ diff --git a/core/src/main/java/org/apache/hadoop/hbase/HBaseConfiguration.java b/core/src/main/java/org/apache/hadoop/hbase/HBaseConfiguration.java index 48cd7c5..80ce666 100644 --- a/core/src/main/java/org/apache/hadoop/hbase/HBaseConfiguration.java +++ b/core/src/main/java/org/apache/hadoop/hbase/HBaseConfiguration.java @@ -30,11 +30,11 @@ import org.apache.hadoop.conf.Configuration; * Adds HBase configuration files to a Configuration */ public class HBaseConfiguration extends Configuration { - + private static final Log LOG = LogFactory.getLog(HBaseConfiguration.class); - + /** - * Instantinating HBaseConfiguration() is deprecated. Please use + * Instantinating HBaseConfiguration() is deprecated. Please use * HBaseConfiguration#create() to construct a plain Configuration */ @Deprecated @@ -45,9 +45,9 @@ public class HBaseConfiguration extends Configuration { LOG.warn("instantinating HBaseConfiguration() is deprecated. Please use" + " HBaseConfiguration#create() to construct a plain Configuration"); } - + /** - * Instantiating HBaseConfiguration() is deprecated. Please use + * Instantiating HBaseConfiguration() is deprecated. Please use * HBaseConfiguration#create(conf) to construct a plain Configuration */ @Deprecated @@ -58,13 +58,13 @@ public class HBaseConfiguration extends Configuration { set(e.getKey(), e.getValue()); } } - + public static Configuration addHbaseResources(Configuration conf) { conf.addResource("hbase-default.xml"); conf.addResource("hbase-site.xml"); return conf; } - + /** * Creates a Configuration with HBase resources * @return a Configuration with HBase resources @@ -73,11 +73,11 @@ public class HBaseConfiguration extends Configuration { Configuration conf = new Configuration(); return addHbaseResources(conf); } - + /** * Creates a clone of passed configuration. * @param that Configuration to clone. - * @return a Configuration created with the hbase-*.xml files plus + * @return a Configuration created with the hbase-*.xml files plus * the given configuration. */ public static Configuration create(final Configuration that) { @@ -87,11 +87,11 @@ public class HBaseConfiguration extends Configuration { } return conf; } - + /** * Returns the hash code value for this HBaseConfiguration. The hash code of a * HBaseConfiguration is defined by the xor of the hash codes of its entries. - * + * * @see Configuration#iterator() How the entries are obtained. */ @Override @@ -103,7 +103,7 @@ public class HBaseConfiguration extends Configuration { /** * Returns the hash code value for this HBaseConfiguration. The hash code of a * Configuration is defined by the xor of the hash codes of its entries. - * + * * @see Configuration#iterator() How the entries are obtained. */ public static int hashCode(Configuration conf) { diff --git a/core/src/main/java/org/apache/hadoop/hbase/HColumnDescriptor.java b/core/src/main/java/org/apache/hadoop/hbase/HColumnDescriptor.java index 420fa07..2a0be34 100644 --- a/core/src/main/java/org/apache/hadoop/hbase/HColumnDescriptor.java +++ b/core/src/main/java/org/apache/hadoop/hbase/HColumnDescriptor.java @@ -36,7 +36,7 @@ import org.apache.hadoop.io.WritableComparable; /** * An HColumnDescriptor contains information about a column family such as the * number of versions, compression settings, etc. - * + * * It is used as input when creating a table or adding a column. Once set, the * parameters that specify a column cannot be changed without deleting the * column and recreating it. If there is data stored in the column, it will be @@ -52,7 +52,7 @@ public class HColumnDescriptor implements WritableComparable // Version 7 -- add new compression and hfile blocksize to HColumnDescriptor (HBASE-1217) private static final byte COLUMN_DESCRIPTOR_VERSION = (byte)7; - /** + /** * The type of compression. * @see org.apache.hadoop.io.SequenceFile.Writer * @deprecated Compression now means which compression library @@ -61,7 +61,7 @@ public class HColumnDescriptor implements WritableComparable @Deprecated public static enum CompressionType { /** Do not compress records. */ - NONE, + NONE, /** Compress values only, each separately. */ RECORD, /** Compress sequences of records together in blocks. */ @@ -116,7 +116,7 @@ public class HColumnDescriptor implements WritableComparable * Default setting for whether or not to use bloomfilters. */ public static final boolean DEFAULT_BLOOMFILTER = false; - + /** * Default time to live of cell contents. */ @@ -147,20 +147,20 @@ public class HColumnDescriptor implements WritableComparable } /** - * Construct a column descriptor specifying only the family name + * Construct a column descriptor specifying only the family name * The other attributes are defaulted. - * + * * @param familyName Column family name. Must be 'printable' -- digit or * letter -- and may not contain a : */ public HColumnDescriptor(final String familyName) { this(Bytes.toBytes(familyName)); } - + /** - * Construct a column descriptor specifying only the family name + * Construct a column descriptor specifying only the family name * The other attributes are defaulted. - * + * * @param familyName Column family name. Must be 'printable' -- digit or * letter -- and may not contain a : */ @@ -173,7 +173,7 @@ public class HColumnDescriptor implements WritableComparable /** * Constructor. - * Makes a deep copy of the supplied descriptor. + * Makes a deep copy of the supplied descriptor. * Can make a modifiable descriptor from an UnmodifyableHColumnDescriptor. * @param desc The descriptor. */ @@ -198,8 +198,8 @@ public class HColumnDescriptor implements WritableComparable * @param timeToLive Time-to-live of cell contents, in seconds * (use HConstants.FOREVER for unlimited TTL) * @param bloomFilter Enable the specified bloom filter for this column - * - * @throws IllegalArgumentException if passed a family name that is made of + * + * @throws IllegalArgumentException if passed a family name that is made of * other than 'word' characters: i.e. [a-zA-Z_0-9] or contains * a : * @throws IllegalArgumentException if the number of versions is <= 0 @@ -211,7 +211,7 @@ public class HColumnDescriptor implements WritableComparable this(familyName, maxVersions, compression, inMemory, blockCacheEnabled, DEFAULT_BLOCKSIZE, timeToLive, bloomFilter, DEFAULT_REPLICATION_SCOPE); } - + /** * Constructor * @param familyName Column family name. Must be 'printable' -- digit or @@ -226,8 +226,8 @@ public class HColumnDescriptor implements WritableComparable * (use HConstants.FOREVER for unlimited TTL) * @param bloomFilter Enable the specified bloom filter for this column * @param scope The scope tag for this column - * - * @throws IllegalArgumentException if passed a family name that is made of + * + * @throws IllegalArgumentException if passed a family name that is made of * other than 'word' characters: i.e. [a-zA-Z_0-9] or contains * a : * @throws IllegalArgumentException if the number of versions is <= 0 @@ -287,7 +287,7 @@ public class HColumnDescriptor implements WritableComparable public byte [] getName() { return name; } - + /** * @return Name of this column family */ @@ -353,7 +353,7 @@ public class HColumnDescriptor implements WritableComparable String n = getValue(COMPRESSION); return Compression.Algorithm.valueOf(n.toUpperCase()); } - + /** @return maximum number of versions */ public synchronized int getMaxVersions() { if (this.cachedMaxVersions == -1) { @@ -424,7 +424,7 @@ public class HColumnDescriptor implements WritableComparable return Boolean.valueOf(value).booleanValue(); return DEFAULT_IN_MEMORY; } - + /** * @param inMemory True if we are to keep all values in the HRegionServer * cache @@ -564,7 +564,7 @@ public class HColumnDescriptor implements WritableComparable result ^= values.hashCode(); return result; } - + // Writable public void readFields(DataInput in) throws IOException { diff --git a/core/src/main/java/org/apache/hadoop/hbase/HConstants.java b/core/src/main/java/org/apache/hadoop/hbase/HConstants.java index 40bb421..7924b94 100644 --- a/core/src/main/java/org/apache/hadoop/hbase/HConstants.java +++ b/core/src/main/java/org/apache/hadoop/hbase/HConstants.java @@ -36,12 +36,12 @@ public interface HConstants { //TODO: ZEROS is only used in HConnectionManager and MetaScanner. Move to // client package and change visibility to default static final String ZEROES = "00000000000000"; - + // For migration /** name of version file */ static final String VERSION_FILE_NAME = "hbase.version"; - + /** * Current version of file system. * Version 4 supports only one kind of bloom filter. @@ -51,17 +51,17 @@ public interface HConstants { */ // public static final String FILE_SYSTEM_VERSION = "6"; public static final String FILE_SYSTEM_VERSION = "7"; - + // Configuration parameters - + //TODO: Is having HBase homed on port 60k OK? - + /** Cluster is in distributed mode or not */ static final String CLUSTER_DISTRIBUTED = "hbase.cluster.distributed"; - + /** Cluster is standalone or pseudo-distributed */ static final String CLUSTER_IS_LOCAL = "false"; - + /** Cluster is fully-distributed */ static final String CLUSTER_IS_DISTRIBUTED = "true"; @@ -112,23 +112,23 @@ public interface HConstants { /** Parameter name for what region server interface to use. */ static final String REGION_SERVER_CLASS = "hbase.regionserver.class"; - + /** Parameter name for what region server implementation to use. */ static final String REGION_SERVER_IMPL= "hbase.regionserver.impl"; - + /** Default region server interface class name. */ static final String DEFAULT_REGION_SERVER_CLASS = HRegionInterface.class.getName(); /** Parameter name for how often threads should wake up */ static final String THREAD_WAKE_FREQUENCY = "hbase.server.thread.wakefrequency"; - + /** Parameter name for how often a region should should perform a major compaction */ static final String MAJOR_COMPACTION_PERIOD = "hbase.hregion.majorcompaction"; /** Parameter name for HBase instance root directory */ static final String HBASE_DIR = "hbase.rootdir"; - - /** Used to construct the name of the log directory for a region server + + /** Used to construct the name of the log directory for a region server * Use '.' as a special character to seperate the log files from table data */ static final String HREGION_LOGDIR_NAME = ".logs"; @@ -137,22 +137,22 @@ public interface HConstants { /** Name of old log file for reconstruction */ static final String HREGION_OLDLOGFILE_NAME = "oldlogfile.log"; - + /** Used to construct the name of the compaction directory during compaction */ static final String HREGION_COMPACTIONDIR_NAME = "compaction.dir"; - + /** Default maximum file size */ static final long DEFAULT_MAX_FILE_SIZE = 256 * 1024 * 1024; - + /** Default size of a reservation block */ static final int DEFAULT_SIZE_RESERVATION_BLOCK = 1024 * 1024 * 5; /** Maximum value length, enforced on KeyValue construction */ static final int MAXIMUM_VALUE_LENGTH = Integer.MAX_VALUE; - + // Always store the location of the root table's HRegion. // This HRegion is never split. - + // region name = table + startkey + regionid. This is the row key. // each row in the root and meta tables describes exactly 1 region // Do we ever need to know all the information that we are storing? @@ -163,7 +163,7 @@ public interface HConstants { // "." (and since no other table name can start with either of these // characters, the root region will always be the first entry in such a Map, // followed by all the meta regions (which will be ordered by their starting - // row key as well), followed by all user tables. So when the Master is + // row key as well), followed by all user tables. So when the Master is // choosing regions to assign, it will always choose the root region first, // followed by the meta regions, followed by user regions. Since the root // and meta regions always need to be on-line, this ensures that they will @@ -174,68 +174,68 @@ public interface HConstants { // // New stuff. Making a slow transition. // - + /** The root table's name.*/ static final byte [] ROOT_TABLE_NAME = Bytes.toBytes("-ROOT-"); /** The META table's name. */ - static final byte [] META_TABLE_NAME = Bytes.toBytes(".META."); + static final byte [] META_TABLE_NAME = Bytes.toBytes(".META."); /** delimiter used between portions of a region name */ public static final int META_ROW_DELIMITER = ','; /** The catalog family as a string*/ static final String CATALOG_FAMILY_STR = "info"; - + /** The catalog family */ static final byte [] CATALOG_FAMILY = Bytes.toBytes(CATALOG_FAMILY_STR); - + /** The catalog historian family */ static final byte [] CATALOG_HISTORIAN_FAMILY = Bytes.toBytes("historian"); - + /** The regioninfo column qualifier */ static final byte [] REGIONINFO_QUALIFIER = Bytes.toBytes("regioninfo"); - + /** The server column qualifier */ static final byte [] SERVER_QUALIFIER = Bytes.toBytes("server"); - + /** The startcode column qualifier */ static final byte [] STARTCODE_QUALIFIER = Bytes.toBytes("serverstartcode"); - + /** The lower-half split region column qualifier */ static final byte [] SPLITA_QUALIFIER = Bytes.toBytes("splitA"); - + /** The upper-half split region column qualifier */ static final byte [] SPLITB_QUALIFIER = Bytes.toBytes("splitB"); - + // Other constants /** * An empty instance. */ static final byte [] EMPTY_BYTE_ARRAY = new byte [0]; - + /** * Used by scanners, etc when they want to start at the beginning of a region */ static final byte [] EMPTY_START_ROW = EMPTY_BYTE_ARRAY; - + /** * Last row in a table. */ static final byte [] EMPTY_END_ROW = EMPTY_START_ROW; - /** - * Used by scanners and others when they're trying to detect the end of a - * table + /** + * Used by scanners and others when they're trying to detect the end of a + * table */ static final byte [] LAST_ROW = EMPTY_BYTE_ARRAY; - + /** * Max length a row can have because of the limitation in TFile. */ static final int MAX_ROW_LENGTH = Short.MAX_VALUE; - + /** When we encode strings, we always specify UTF8 encoding */ static final String UTF8_ENCODING = "UTF-8"; @@ -250,18 +250,18 @@ public interface HConstants { * LATEST_TIMESTAMP in bytes form */ static final byte [] LATEST_TIMESTAMP_BYTES = Bytes.toBytes(LATEST_TIMESTAMP); - + /** * Define for 'return-all-versions'. */ static final int ALL_VERSIONS = Integer.MAX_VALUE; - + /** * Unlimited time-to-live. */ // static final int FOREVER = -1; static final int FOREVER = Integer.MAX_VALUE; - + /** * Seconds in a week */ @@ -279,20 +279,20 @@ public interface HConstants { static final String NAME = "NAME"; static final String VERSIONS = "VERSIONS"; static final String IN_MEMORY = "IN_MEMORY"; - + /** * This is a retry backoff multiplier table similar to the BSD TCP syn * backoff table, a bit more aggressive than simple exponential backoff. - */ + */ public static int RETRY_BACKOFF[] = { 1, 1, 1, 2, 2, 4, 4, 8, 16, 32 }; /** modifyTable op for replacing the table descriptor */ public static enum Modify { CLOSE_REGION, - TABLE_COMPACT, + TABLE_COMPACT, TABLE_FLUSH, TABLE_MAJOR_COMPACT, - TABLE_SET_HTD, + TABLE_SET_HTD, TABLE_SPLIT } @@ -313,32 +313,32 @@ public interface HConstants { * this value means it wasn't meant for replication. */ public static final byte DEFAULT_CLUSTER_ID = 0; - + /** * Parameter name for maximum number of bytes returned when calling a * scanner's next method. */ public static String HBASE_CLIENT_SCANNER_MAX_RESULT_SIZE_KEY = "hbase.client.scanner.max.result.size"; - + /** * Maximum number of bytes returned when calling a scanner's next method. * Note that when a single row is larger than this limit the row is still * returned completely. - * + * * The default value is unlimited. */ public static long DEFAULT_HBASE_CLIENT_SCANNER_MAX_RESULT_SIZE = Long.MAX_VALUE; - - + + /** * HRegion server lease period in milliseconds. Clients must report in within this period * else they are considered dead. Unit measured in ms (milliseconds). */ public static String HBASE_REGIONSERVER_LEASE_PERIOD_KEY = "hbase.regionserver.lease.period"; - - + + /** - * Default value of {@link #HBASE_REGIONSERVER_LEASE_PERIOD_KEY}. + * Default value of {@link #HBASE_REGIONSERVER_LEASE_PERIOD_KEY}. */ public static long DEFAULT_HBASE_REGIONSERVER_LEASE_PERIOD = 60000; diff --git a/core/src/main/java/org/apache/hadoop/hbase/HMerge.java b/core/src/main/java/org/apache/hadoop/hbase/HMerge.java index 8dfbe07..80c3c64 100644 --- a/core/src/main/java/org/apache/hadoop/hbase/HMerge.java +++ b/core/src/main/java/org/apache/hadoop/hbase/HMerge.java @@ -44,29 +44,29 @@ import org.apache.hadoop.hbase.regionserver.wal.HLog; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.Writables; -/** +/** * A non-instantiable class that has a static method capable of compacting * a table by merging adjacent regions. */ class HMerge implements HConstants { static final Log LOG = LogFactory.getLog(HMerge.class); static final Random rand = new Random(); - + /* * Not instantiable */ private HMerge() { super(); } - + /** * Scans the table and merges two adjacent regions if they are small. This * only happens when a lot of rows are deleted. - * + * * When merging the META region, the HBase instance must be offline. * When merging a normal table, the HBase instance must be online, but the - * table must be disabled. - * + * table must be disabled. + * * @param conf - configuration object for HBase * @param fs - FileSystem where regions reside * @param tableName - Table to be compacted @@ -100,7 +100,7 @@ class HMerge implements HConstants { protected final HLog hlog; private final long maxFilesize; - + protected Merger(Configuration conf, FileSystem fs, final byte [] tableName) throws IOException { @@ -119,7 +119,7 @@ class HMerge implements HConstants { this.hlog = new HLog(fs, logdir, oldLogDir, conf, null); } - + void process() throws IOException { try { for(HRegionInfo[] regionsToMerge = next(); @@ -132,19 +132,19 @@ class HMerge implements HConstants { } finally { try { hlog.closeAndDelete(); - + } catch(IOException e) { LOG.error(e); } } } - + protected boolean merge(final HRegionInfo[] info) throws IOException { if(info.length < 2) { LOG.info("only one region - nothing to merge"); return false; } - + HRegion currentRegion = null; long currentSize = 0; HRegion nextRegion = null; @@ -183,13 +183,13 @@ class HMerge implements HConstants { } return true; } - + protected abstract HRegionInfo[] next() throws IOException; - + protected abstract void updateMeta(final byte [] oldRegion1, final byte [] oldRegion2, HRegion newRegion) throws IOException; - + } /** Instantiated to compact a normal user table */ @@ -198,7 +198,7 @@ class HMerge implements HConstants { private final HTable table; private final ResultScanner metaScanner; private HRegionInfo latestRegion; - + OnlineMerger(Configuration conf, FileSystem fs, final byte [] tableName) throws IOException { @@ -208,7 +208,7 @@ class HMerge implements HConstants { this.metaScanner = table.getScanner(CATALOG_FAMILY, REGIONINFO_QUALIFIER); this.latestRegion = null; } - + private HRegionInfo nextRegion() throws IOException { try { Result results = getMetaRow(); @@ -234,7 +234,7 @@ class HMerge implements HConstants { throw e; } } - + protected void checkOfflined(final HRegionInfo hri) throws TableNotDisabledException { if (!hri.isOffline()) { @@ -242,7 +242,7 @@ class HMerge implements HConstants { hri.getRegionNameAsString() + " is not disabled"); } } - + /* * Check current row has a HRegionInfo. Skip to next row if HRI is empty. * @return A Map of the row content else null if we are off the end. @@ -282,7 +282,7 @@ class HMerge implements HConstants { @Override protected void updateMeta(final byte [] oldRegion1, - final byte [] oldRegion2, + final byte [] oldRegion2, HRegion newRegion) throws IOException { byte[][] regionsToDelete = {oldRegion1, oldRegion2}; @@ -314,10 +314,10 @@ class HMerge implements HConstants { private static class OfflineMerger extends Merger { private final List metaRegions = new ArrayList(); private final HRegion root; - + OfflineMerger(Configuration conf, FileSystem fs) throws IOException { - + super(conf, fs, META_TABLE_NAME); Path rootTableDir = HTableDescriptor.getTableDir( @@ -325,16 +325,16 @@ class HMerge implements HConstants { ROOT_TABLE_NAME); // Scan root region to find all the meta regions - + root = new HRegion(rootTableDir, hlog, fs, conf, HRegionInfo.ROOT_REGIONINFO, null); root.initialize(null, null); Scan scan = new Scan(); scan.addColumn(CATALOG_FAMILY, REGIONINFO_QUALIFIER); - InternalScanner rootScanner = + InternalScanner rootScanner = root.getScanner(scan); - + try { List results = new ArrayList(); while(rootScanner.next(results)) { @@ -349,7 +349,7 @@ class HMerge implements HConstants { rootScanner.close(); try { root.close(); - + } catch(IOException e) { LOG.error(e); } @@ -384,7 +384,7 @@ class HMerge implements HConstants { delete.deleteColumns(HConstants.CATALOG_FAMILY, HConstants.SPLITB_QUALIFIER); root.delete(delete, null, true); - + if(LOG.isDebugEnabled()) { LOG.debug("updated columns in row: " + Bytes.toString(regionsToDelete[r])); } diff --git a/core/src/main/java/org/apache/hadoop/hbase/HMsg.java b/core/src/main/java/org/apache/hadoop/hbase/HMsg.java index 7905b65..0df8472 100644 --- a/core/src/main/java/org/apache/hadoop/hbase/HMsg.java +++ b/core/src/main/java/org/apache/hadoop/hbase/HMsg.java @@ -27,9 +27,9 @@ import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.io.Writable; /** - * HMsg is for communicating instructions between the HMaster and the + * HMsg is for communicating instructions between the HMaster and the * HRegionServers. - * + * * Most of the time the messages are simple but some messages are accompanied * by the region affected. HMsg may also carry optional message. */ @@ -48,11 +48,11 @@ public class HMsg implements Writable { public static enum Type { /** null message */ MSG_NONE, - + // Message types sent from master to region server /** Start serving the specified region */ MSG_REGION_OPEN, - + /** Stop serving the specified region */ MSG_REGION_CLOSE, @@ -64,22 +64,22 @@ public class HMsg implements Writable { /** Region server is unknown to master. Restart */ MSG_CALL_SERVER_STARTUP, - + /** Master tells region server to stop */ MSG_REGIONSERVER_STOP, - + /** Stop serving the specified region and don't report back that it's * closed */ MSG_REGION_CLOSE_WITHOUT_REPORT, - + /** Stop serving user regions */ MSG_REGIONSERVER_QUIESCE, // Message types sent from the region server to the master /** region server is now serving the specified region */ MSG_REPORT_OPEN, - + /** region server is no longer serving the specified region */ MSG_REPORT_CLOSE, @@ -88,7 +88,7 @@ public class HMsg implements Writable { /** * Region server split the region associated with this message. - * + * * Note that this message is immediately followed by two MSG_REPORT_OPEN * messages, one for each of the new regions resulting from the split * @deprecated See MSG_REPORT_SPLIT_INCLUDES_DAUGHTERS @@ -97,7 +97,7 @@ public class HMsg implements Writable { /** * Region server is shutting down - * + * * Note that this message is followed by MSG_REPORT_CLOSE messages for each * region the region server was serving, unless it was told to quiesce. */ @@ -107,12 +107,12 @@ public class HMsg implements Writable { * regions */ MSG_REPORT_QUIESCED, - + /** * Flush */ MSG_REGION_FLUSH, - + /** * Run Major Compaction */ @@ -120,7 +120,7 @@ public class HMsg implements Writable { /** * Region server split the region associated with this message. - * + * * Its like MSG_REPORT_SPLIT only it carries the daughters in the message * rather than send them individually in MSG_REPORT_OPEN messages. */ @@ -152,7 +152,7 @@ public class HMsg implements Writable { public HMsg(final HMsg.Type type) { this(type, new HRegionInfo(), null); } - + /** * Construct a message with the specified message and HRegionInfo * @param type Message type @@ -164,7 +164,7 @@ public class HMsg implements Writable { /** * Construct a message with the specified message and HRegionInfo - * + * * @param type Message type * @param hri Region to which message type applies. Cannot be * null. If no info associated, used other Constructor. @@ -176,7 +176,7 @@ public class HMsg implements Writable { /** * Construct a message with the specified message and HRegionInfo - * + * * @param type Message type * @param hri Region to which message type applies. Cannot be * null. If no info associated, used other Constructor. @@ -210,7 +210,7 @@ public class HMsg implements Writable { public Type getType() { return this.type; } - + /** * @param other Message type to compare to * @return True if we are of same message type as other @@ -289,7 +289,7 @@ public class HMsg implements Writable { } return result; } - + // //////////////////////////////////////////////////////////////////////////// // Writable ////////////////////////////////////////////////////////////////////////////// diff --git a/core/src/main/java/org/apache/hadoop/hbase/HRegionInfo.java b/core/src/main/java/org/apache/hadoop/hbase/HRegionInfo.java index 0aa0580..29b0cd6 100644 --- a/core/src/main/java/org/apache/hadoop/hbase/HRegionInfo.java +++ b/core/src/main/java/org/apache/hadoop/hbase/HRegionInfo.java @@ -79,7 +79,7 @@ public class HRegionInfo extends VersionedWritable implements WritableComparable result ^= this.tableDesc.hashCode(); this.hashCode = result; } - + /** * Private constructor used constructing HRegionInfo for the catalog root and * first meta regions @@ -98,10 +98,10 @@ public class HRegionInfo extends VersionedWritable implements WritableComparable super(); this.tableDesc = new HTableDescriptor(); } - + /** * Construct HRegionInfo with explicit parameters - * + * * @param tableDesc the table descriptor * @param startKey first key in region * @param endKey end of key range @@ -115,7 +115,7 @@ public class HRegionInfo extends VersionedWritable implements WritableComparable /** * Construct HRegionInfo with explicit parameters - * + * * @param tableDesc the table descriptor * @param startKey first key in region * @param endKey end of key range @@ -131,7 +131,7 @@ public class HRegionInfo extends VersionedWritable implements WritableComparable /** * Construct HRegionInfo with explicit parameters - * + * * @param tableDesc the table descriptor * @param startKey first key in region * @param endKey end of key range @@ -158,10 +158,10 @@ public class HRegionInfo extends VersionedWritable implements WritableComparable this.tableDesc = tableDesc; setHashCode(); } - + /** * Costruct a copy of another HRegionInfo - * + * * @param other */ public HRegionInfo(HRegionInfo other) { @@ -177,7 +177,7 @@ public class HRegionInfo extends VersionedWritable implements WritableComparable this.hashCode = other.hashCode(); this.encodedName = other.getEncodedName(); } - + private static byte [] createRegionName(final byte [] tableName, final byte [] startKey, final long regionid) { return createRegionName(tableName, startKey, Long.toString(regionid)); @@ -216,7 +216,7 @@ public class HRegionInfo extends VersionedWritable implements WritableComparable System.arraycopy(id, 0, b, offset, id.length); return b; } - + /** * Separate elements of a regionName. * @param regionName @@ -246,11 +246,11 @@ public class HRegionInfo extends VersionedWritable implements WritableComparable byte [] startKey = HConstants.EMPTY_BYTE_ARRAY; if(offset != tableName.length + 1) { startKey = new byte[offset - tableName.length - 1]; - System.arraycopy(regionName, tableName.length + 1, startKey, 0, + System.arraycopy(regionName, tableName.length + 1, startKey, 0, offset - tableName.length - 1); } byte [] id = new byte[regionName.length - offset - 1]; - System.arraycopy(regionName, offset + 1, id, 0, + System.arraycopy(regionName, offset + 1, id, 0, regionName.length - offset - 1); byte [][] elements = new byte[3][]; elements[0] = tableName; @@ -258,7 +258,7 @@ public class HRegionInfo extends VersionedWritable implements WritableComparable elements[2] = id; return elements; } - + /** @return the endKey */ public byte [] getEndKey(){ return endKey; @@ -283,7 +283,7 @@ public class HRegionInfo extends VersionedWritable implements WritableComparable public String getRegionNameAsString() { return this.regionNameStr; } - + /** @return the encoded region name */ public synchronized int getEncodedName() { if (this.encodedName == NO_HASH) { @@ -313,7 +313,7 @@ public class HRegionInfo extends VersionedWritable implements WritableComparable public boolean isRootRegion() { return this.tableDesc.isRootRegion(); } - + /** @return true if this is the meta table */ public boolean isMetaTable() { return this.tableDesc.isMetaTable(); @@ -323,14 +323,14 @@ public class HRegionInfo extends VersionedWritable implements WritableComparable public boolean isMetaRegion() { return this.tableDesc.isMetaRegion(); } - + /** * @return True if has been split and has daughters. */ public boolean isSplit() { return this.split; } - + /** * @param split set split status */ @@ -363,7 +363,7 @@ public class HRegionInfo extends VersionedWritable implements WritableComparable Bytes.toStringBinary(this.startKey) + "', ENDKEY => '" + Bytes.toStringBinary(this.endKey) + "', ENCODED => " + getEncodedName() + "," + - (isOffline()? " OFFLINE => true,": "") + + (isOffline()? " OFFLINE => true,": "") + (isSplit()? " SPLIT => true,": "") + " TABLE => {" + this.tableDesc.toString() + "}"; } @@ -415,7 +415,7 @@ public class HRegionInfo extends VersionedWritable implements WritableComparable tableDesc.write(out); out.writeInt(hashCode); } - + @Override public void readFields(DataInput in) throws IOException { super.readFields(in); @@ -429,16 +429,16 @@ public class HRegionInfo extends VersionedWritable implements WritableComparable this.tableDesc.readFields(in); this.hashCode = in.readInt(); } - + // // Comparable // - + public int compareTo(HRegionInfo o) { if (o == null) { return 1; } - + // Are regions of same table? int result = this.tableDesc.compareTo(o.tableDesc); if (result != 0) { @@ -450,7 +450,7 @@ public class HRegionInfo extends VersionedWritable implements WritableComparable if (result != 0) { return result; } - + // Compare end keys. return Bytes.compareTo(this.endKey, o.endKey); } diff --git a/core/src/main/java/org/apache/hadoop/hbase/HRegionLocation.java b/core/src/main/java/org/apache/hadoop/hbase/HRegionLocation.java index 6be0cff..722991b 100644 --- a/core/src/main/java/org/apache/hadoop/hbase/HRegionLocation.java +++ b/core/src/main/java/org/apache/hadoop/hbase/HRegionLocation.java @@ -29,7 +29,7 @@ public class HRegionLocation implements Comparable { /** * Constructor - * + * * @param regionInfo the HRegionInfo for the region * @param serverAddress the HServerAddress for the region server */ @@ -73,7 +73,7 @@ public class HRegionLocation implements Comparable { result ^= this.serverAddress.hashCode(); return result; } - + /** @return HRegionInfo */ public HRegionInfo getRegionInfo(){ return regionInfo; @@ -87,7 +87,7 @@ public class HRegionLocation implements Comparable { // // Comparable // - + public int compareTo(HRegionLocation o) { int result = this.regionInfo.compareTo(o.regionInfo); if(result == 0) { diff --git a/core/src/main/java/org/apache/hadoop/hbase/HServerAddress.java b/core/src/main/java/org/apache/hadoop/hbase/HServerAddress.java index 90e627b..1edc944 100644 --- a/core/src/main/java/org/apache/hadoop/hbase/HServerAddress.java +++ b/core/src/main/java/org/apache/hadoop/hbase/HServerAddress.java @@ -49,10 +49,10 @@ public class HServerAddress implements WritableComparable { this.stringValue = address.getAddress().getHostAddress() + ":" + address.getPort(); } - + /** * Construct a HServerAddress from a string of the form hostname:port - * + * * @param hostAndPort format 'hostname:port' */ public HServerAddress(String hostAndPort) { @@ -66,7 +66,7 @@ public class HServerAddress implements WritableComparable { this.address = new InetSocketAddress(host, port); this.stringValue = hostAndPort; } - + /** * Construct a HServerAddress from hostname, port number * @param bindAddress host name @@ -76,10 +76,10 @@ public class HServerAddress implements WritableComparable { this.address = new InetSocketAddress(bindAddress, port); this.stringValue = bindAddress + ":" + port; } - + /** * Construct a HServerAddress from another HServerAddress - * + * * @param other the HServerAddress to copy from */ public HServerAddress(HServerAddress other) { @@ -98,7 +98,7 @@ public class HServerAddress implements WritableComparable { public int getPort() { return address.getPort(); } - + /** @return host name */ public String getHostname() { return address.getHostName(); @@ -143,7 +143,7 @@ public class HServerAddress implements WritableComparable { result ^= this.stringValue.hashCode(); return result; } - + // // Writable // @@ -151,11 +151,11 @@ public class HServerAddress implements WritableComparable { public void readFields(DataInput in) throws IOException { String bindAddress = in.readUTF(); int port = in.readInt(); - + if(bindAddress == null || bindAddress.length() == 0) { address = null; stringValue = null; - + } else { address = new InetSocketAddress(bindAddress, port); stringValue = bindAddress + ":" + port; @@ -166,17 +166,17 @@ public class HServerAddress implements WritableComparable { if (address == null) { out.writeUTF(""); out.writeInt(0); - + } else { out.writeUTF(address.getAddress().getHostAddress()); out.writeInt(address.getPort()); } } - + // // Comparable // - + public int compareTo(HServerAddress o) { // Addresses as Strings may not compare though address is for the one // server with only difference being that one address has hostname diff --git a/core/src/main/java/org/apache/hadoop/hbase/HServerInfo.java b/core/src/main/java/org/apache/hadoop/hbase/HServerInfo.java index 2f772bd..bc536f1 100644 --- a/core/src/main/java/org/apache/hadoop/hbase/HServerInfo.java +++ b/core/src/main/java/org/apache/hadoop/hbase/HServerInfo.java @@ -31,7 +31,7 @@ import org.apache.hadoop.io.WritableComparable; /** * HServerInfo contains metainfo about an HRegionServer, Currently it only * contains the server start code. - * + * * In the future it will contain information about the source machine and * load statistics. */ @@ -46,10 +46,10 @@ public class HServerInfo implements WritableComparable { /** default constructor - used by Writable */ public HServerInfo() { - this(new HServerAddress(), 0, + this(new HServerAddress(), 0, HConstants.DEFAULT_REGIONSERVER_INFOPORT, "default name"); } - + /** * Constructor * @param serverAddress @@ -64,7 +64,7 @@ public class HServerInfo implements WritableComparable { this.infoPort = infoPort; this.name = name; } - + /** * Construct a new object using another as input (like a copy constructor) * @param other @@ -95,7 +95,7 @@ public class HServerInfo implements WritableComparable { public synchronized HServerAddress getServerAddress() { return new HServerAddress(serverAddress); } - + /** * Change the server address. * @param serverAddress New server address @@ -104,26 +104,26 @@ public class HServerInfo implements WritableComparable { this.serverAddress = serverAddress; this.serverName = null; } - + /** @return the server start code */ public synchronized long getStartCode() { return startCode; } - + /** * @return Port the info server is listening on. */ public int getInfoPort() { return this.infoPort; } - + /** * @param infoPort - new port of info server */ public void setInfoPort(int infoPort) { this.infoPort = infoPort; } - + /** * @param startCode the startCode to set */ @@ -131,7 +131,7 @@ public class HServerInfo implements WritableComparable { this.startCode = startCode; this.serverName = null; } - + /** * @return the server name in the form hostname_startcode_port */ @@ -148,7 +148,7 @@ public class HServerInfo implements WritableComparable { } return this.serverName; } - + /** * Get the hostname of the server * @return hostname @@ -156,7 +156,7 @@ public class HServerInfo implements WritableComparable { public String getName() { return name; } - + /** * Set the hostname of the server * @param name hostname @@ -201,7 +201,7 @@ public class HServerInfo implements WritableComparable { // Writable - + public void readFields(DataInput in) throws IOException { this.serverAddress.readFields(in); this.startCode = in.readLong(); @@ -229,7 +229,7 @@ public class HServerInfo implements WritableComparable { private static String getServerName(HServerInfo info) { return getServerName(info.getServerAddress(), info.getStartCode()); } - + /** * @param serverAddress in the form hostname:port * @param startCode diff --git a/core/src/main/java/org/apache/hadoop/hbase/HServerLoad.java b/core/src/main/java/org/apache/hadoop/hbase/HServerLoad.java index 853e356..efa7e0e 100644 --- a/core/src/main/java/org/apache/hadoop/hbase/HServerLoad.java +++ b/core/src/main/java/org/apache/hadoop/hbase/HServerLoad.java @@ -49,7 +49,7 @@ public class HServerLoad implements WritableComparable { /** per-region load metrics */ private ArrayList regionLoad = new ArrayList(); - /** + /** * Encapsulates per-region loading metrics. */ public static class RegionLoad implements Writable { @@ -82,7 +82,7 @@ public class HServerLoad implements WritableComparable { * @param storefileIndexSizeMB */ public RegionLoad(final byte[] name, final int stores, - final int storefiles, final int storefileSizeMB, + final int storefiles, final int storefileSizeMB, final int memstoreSizeMB, final int storefileIndexSizeMB) { this.name = name; this.stores = stores; @@ -239,7 +239,7 @@ public class HServerLoad implements WritableComparable { public HServerLoad() { super(); } - + /** * Constructor * @param numberOfRequests @@ -265,7 +265,7 @@ public class HServerLoad implements WritableComparable { /** * Originally, this method factored in the effect of requests going to the * server as well. However, this does not interact very well with the current - * region rebalancing code, which only factors number of regions. For the + * region rebalancing code, which only factors number of regions. For the * interim, until we can figure out how to make rebalancing use all the info * available, we're just going to make load purely the number of regions. * @@ -285,7 +285,7 @@ public class HServerLoad implements WritableComparable { public String toString() { return toString(1); } - + /** * Returns toString() with the number of requests divided by the message * interval in seconds @@ -330,9 +330,9 @@ public class HServerLoad implements WritableComparable { result ^= Integer.valueOf(numberOfRegions).hashCode(); return result; } - + // Getters - + /** * @return the numberOfRegions */ diff --git a/core/src/main/java/org/apache/hadoop/hbase/HTableDescriptor.java b/core/src/main/java/org/apache/hadoop/hbase/HTableDescriptor.java index 29b51c8..d0c220e 100644 --- a/core/src/main/java/org/apache/hadoop/hbase/HTableDescriptor.java +++ b/core/src/main/java/org/apache/hadoop/hbase/HTableDescriptor.java @@ -91,11 +91,11 @@ public class HTableDescriptor implements WritableComparable { public static final boolean DEFAULT_READONLY = false; public static final long DEFAULT_MEMSTORE_FLUSH_SIZE = 1024*1024*64L; - + public static final long DEFAULT_MAX_FILESIZE = 1024*1024*256L; public static final boolean DEFAULT_DEFERRED_LOG_FLUSH = true; - + private volatile Boolean meta = null; private volatile Boolean root = null; private Boolean isDeferredLog = null; @@ -103,9 +103,9 @@ public class HTableDescriptor implements WritableComparable { // Key is hash of the family name. public final Map families = new TreeMap(Bytes.BYTES_RAWCOMPARATOR); - + /** - * Private constructor used internally creating table descriptors for + * Private constructor used internally creating table descriptors for * catalog tables: e.g. .META. and -ROOT-. */ protected HTableDescriptor(final byte [] name, HColumnDescriptor[] families) { @@ -118,7 +118,7 @@ public class HTableDescriptor implements WritableComparable { } /** - * Private constructor used internally creating table descriptors for + * Private constructor used internally creating table descriptors for * catalog tables: e.g. .META. and -ROOT-. */ protected HTableDescriptor(final byte [] name, HColumnDescriptor[] families, @@ -134,8 +134,8 @@ public class HTableDescriptor implements WritableComparable { this.values.put(entry.getKey(), entry.getValue()); } } - - + + /** * Constructs an empty object. * For deserializing an HTableDescriptor instance only. @@ -175,7 +175,7 @@ public class HTableDescriptor implements WritableComparable { /** * Constructor. *

- * Makes a deep copy of the supplied descriptor. + * Makes a deep copy of the supplied descriptor. * Can make a modifiable descriptor from an UnmodifyableHTableDescriptor. * @param desc The descriptor. */ @@ -290,7 +290,7 @@ public class HTableDescriptor implements WritableComparable { public byte[] getValue(byte[] key) { return getValue(new ImmutableBytesWritable(key)); } - + private byte[] getValue(final ImmutableBytesWritable key) { ImmutableBytesWritable ibw = values.get(key); if (ibw == null) @@ -323,7 +323,7 @@ public class HTableDescriptor implements WritableComparable { public void setValue(byte[] key, byte[] value) { setValue(new ImmutableBytesWritable(key), value); } - + /* * @param key The key. * @param value The value. @@ -431,7 +431,7 @@ public class HTableDescriptor implements WritableComparable { return Long.valueOf(Bytes.toString(value)).longValue(); return DEFAULT_MEMSTORE_FLUSH_SIZE; } - + /** * @param memstoreFlushSize memory cache flush size for each hregion */ @@ -622,14 +622,14 @@ public class HTableDescriptor implements WritableComparable { public Collection getFamilies() { return Collections.unmodifiableCollection(this.families.values()); } - + /** * @return Immutable sorted set of the keys of the families. */ public Set getFamiliesKeys() { return Collections.unmodifiableSet(this.families.keySet()); } - + public HColumnDescriptor[] getColumnFamilies() { return getFamilies().toArray(new HColumnDescriptor[0]); } @@ -668,7 +668,7 @@ public class HTableDescriptor implements WritableComparable { 10, // Ten is arbitrary number. Keep versions to help debuggging. Compression.Algorithm.NONE.getName(), true, true, 8 * 1024, HConstants.FOREVER, false, HConstants.REPLICATION_SCOPE_LOCAL) }); - + /** Table descriptor for .META. catalog table */ public static final HTableDescriptor META_TABLEDESC = new HTableDescriptor( HConstants.META_TABLE_NAME, new HColumnDescriptor[] { diff --git a/core/src/main/java/org/apache/hadoop/hbase/KeyValue.java b/core/src/main/java/org/apache/hadoop/hbase/KeyValue.java index be9d11d..7729900 100644 --- a/core/src/main/java/org/apache/hadoop/hbase/KeyValue.java +++ b/core/src/main/java/org/apache/hadoop/hbase/KeyValue.java @@ -36,19 +36,19 @@ import org.apache.hadoop.io.Writable; /** * An HBase Key/Value. - * + * *

If being used client-side, the primary methods to access individual fields - * are {@link #getRow()}, {@link #getFamily()}, {@link #getQualifier()}, + * are {@link #getRow()}, {@link #getFamily()}, {@link #getQualifier()}, * {@link #getTimestamp()}, and {@link #getValue()}. These methods allocate new * byte arrays and return copies so they should be avoided server-side. - * + * *

Instances of this class are immutable. They are not * comparable but Comparators are provided. Comparators change with context, * whether user table or a catalog table comparison context. Its * important that you use the appropriate comparator comparing rows in * particular. There are Comparators for KeyValue instances and then for * just the Key portion of a KeyValue used mostly in {@link HFile}. - * + * *

KeyValue wraps a byte array and has offset and length for passed array * at where to start interpreting the content as a KeyValue blob. The KeyValue * blob format inside the byte array is: @@ -58,7 +58,7 @@ import org.apache.hadoop.io.Writable; * Rowlength maximum is Short.MAX_SIZE, column family length maximum is * Byte.MAX_SIZE, and column qualifier + key length must be < Integer.MAX_SIZE. * The column does not contain the family/qualifier delimiter. - * + * *

TODO: Group Key-only comparators and operations into a Key class, just * for neatness sake, if can figure what to call it. */ @@ -72,7 +72,7 @@ public class KeyValue implements Writable, HeapSize { public static final byte[] COLUMN_FAMILY_DELIM_ARRAY = new byte[]{COLUMN_FAMILY_DELIMITER}; - + /** * Comparator for plain key/values; i.e. non-catalog table key/values. */ @@ -110,10 +110,10 @@ public class KeyValue implements Writable, HeapSize { /** * Get the appropriate row comparator for the specified table. - * + * * Hopefully we can get rid of this, I added this here because it's replacing * something in HSK. We should move completely off of that. - * + * * @param tableName The table name. * @return The comparator. */ @@ -162,13 +162,13 @@ public class KeyValue implements Writable, HeapSize { // Maximum is used when searching; you look from maximum on down. Maximum((byte)255); - + private final byte code; - + Type(final byte c) { this.code = c; } - + public byte getCode() { return this.code; } @@ -194,9 +194,9 @@ public class KeyValue implements Writable, HeapSize { * Makes a Key with highest possible Timestamp, empty row and column. No * key can be equal or lower than this one in memstore or in store file. */ - public static final KeyValue LOWESTKEY = + public static final KeyValue LOWESTKEY = new KeyValue(HConstants.EMPTY_BYTE_ARRAY, HConstants.LATEST_TIMESTAMP); - + private byte [] bytes = null; private int offset = 0; private int length = 0; @@ -238,7 +238,7 @@ public class KeyValue implements Writable, HeapSize { } /** Constructors that build a new backing byte array from fields */ - + /** * Constructs KeyValue structure filled with null value. * Sets type to {@link KeyValue.Type#Maximum} @@ -265,7 +265,7 @@ public class KeyValue implements Writable, HeapSize { * @param family family name * @param qualifier column qualifier */ - public KeyValue(final byte [] row, final byte [] family, + public KeyValue(final byte [] row, final byte [] family, final byte [] qualifier) { this(row, family, qualifier, HConstants.LATEST_TIMESTAMP, Type.Maximum); } @@ -276,7 +276,7 @@ public class KeyValue implements Writable, HeapSize { * @param family family name * @param qualifier column qualifier */ - public KeyValue(final byte [] row, final byte [] family, + public KeyValue(final byte [] row, final byte [] family, final byte [] qualifier, final byte [] value) { this(row, family, qualifier, HConstants.LATEST_TIMESTAMP, Type.Put, value); } @@ -294,7 +294,7 @@ public class KeyValue implements Writable, HeapSize { final byte[] qualifier, final long timestamp, Type type) { this(row, family, qualifier, timestamp, type, null); } - + /** * Constructs KeyValue structure filled with specified values. * @param row row key @@ -308,7 +308,7 @@ public class KeyValue implements Writable, HeapSize { final byte[] qualifier, final long timestamp, final byte[] value) { this(row, family, qualifier, timestamp, Type.Put, value); } - + /** * Constructs KeyValue structure filled with specified values. * @param row row key @@ -322,9 +322,9 @@ public class KeyValue implements Writable, HeapSize { public KeyValue(final byte[] row, final byte[] family, final byte[] qualifier, final long timestamp, Type type, final byte[] value) { - this(row, family, qualifier, 0, qualifier==null ? 0 : qualifier.length, + this(row, family, qualifier, 0, qualifier==null ? 0 : qualifier.length, timestamp, type, value, 0, value==null ? 0 : value.length); - } + } /** * Constructs KeyValue structure filled with specified values. @@ -340,12 +340,12 @@ public class KeyValue implements Writable, HeapSize { * @param vlength value length * @throws IllegalArgumentException */ - public KeyValue(byte [] row, byte [] family, - byte [] qualifier, int qoffset, int qlength, long timestamp, Type type, + public KeyValue(byte [] row, byte [] family, + byte [] qualifier, int qoffset, int qlength, long timestamp, Type type, byte [] value, int voffset, int vlength) { - this(row, 0, row==null ? 0 : row.length, + this(row, 0, row==null ? 0 : row.length, family, 0, family==null ? 0 : family.length, - qualifier, qoffset, qlength, timestamp, type, + qualifier, qoffset, qlength, timestamp, type, value, voffset, vlength); } @@ -374,7 +374,7 @@ public class KeyValue implements Writable, HeapSize { final byte [] qualifier, final int qoffset, final int qlength, final long timestamp, final Type type, final byte [] value, final int voffset, final int vlength) { - this.bytes = createByteArray(row, roffset, rlength, + this.bytes = createByteArray(row, roffset, rlength, family, foffset, flength, qualifier, qoffset, qlength, timestamp, type, value, voffset, vlength); this.length = bytes.length; @@ -383,7 +383,7 @@ public class KeyValue implements Writable, HeapSize { /** * Write KeyValue format into a byte array. - * + * * @param row row key * @param roffset row offset * @param rlength row length @@ -398,7 +398,7 @@ public class KeyValue implements Writable, HeapSize { * @param value column value * @param voffset value offset * @param vlength value length - * @return The newly created byte array. + * @return The newly created byte array. */ static byte [] createByteArray(final byte [] row, final int roffset, final int rlength, final byte [] family, final int foffset, int flength, @@ -431,10 +431,10 @@ public class KeyValue implements Writable, HeapSize { // Value length vlength = value == null? 0 : vlength; if (vlength > HConstants.MAXIMUM_VALUE_LENGTH) { // FindBugs INT_VACUOUS_COMPARISON - throw new IllegalArgumentException("Valuer > " + + throw new IllegalArgumentException("Valuer > " + HConstants.MAXIMUM_VALUE_LENGTH); } - + // Allocate right-sized byte array. byte [] bytes = new byte[KEYVALUE_INFRASTRUCTURE_SIZE + keylength + vlength]; // Write key, value and key row length. @@ -457,7 +457,7 @@ public class KeyValue implements Writable, HeapSize { } return bytes; } - + /** * Write KeyValue format into a byte array. *

@@ -473,7 +473,7 @@ public class KeyValue implements Writable, HeapSize { * @param value * @param voffset * @param vlength - * @return The newly created byte array. + * @return The newly created byte array. */ static byte [] createByteArray(final byte [] row, final int roffset, final int rlength, @@ -528,7 +528,7 @@ public class KeyValue implements Writable, HeapSize { // KeyValue cloning // //--------------------------------------------------------------------------- - + /** * Clones a KeyValue. This creates a copy, re-allocating the buffer. * @return Fully copied clone of this KeyValue @@ -544,7 +544,7 @@ public class KeyValue implements Writable, HeapSize { // String representation // //--------------------------------------------------------------------------- - + public String toString() { if (this.bytes == null || this.bytes.length == 0) { return "empty"; @@ -595,7 +595,7 @@ public class KeyValue implements Writable, HeapSize { // Public Member Accessors // //--------------------------------------------------------------------------- - + /** * @return The byte array backing this KeyValue. */ @@ -622,7 +622,7 @@ public class KeyValue implements Writable, HeapSize { // Length and Offset Calculators // //--------------------------------------------------------------------------- - + /** * Determines the total length of the KeyValue stored in the specified * byte array and offset. Includes all headers. @@ -631,7 +631,7 @@ public class KeyValue implements Writable, HeapSize { * @return length of entire KeyValue, in bytes */ private static int getLength(byte [] bytes, int offset) { - return (2 * Bytes.SIZEOF_INT) + + return (2 * Bytes.SIZEOF_INT) + Bytes.toInt(bytes, offset) + Bytes.toInt(bytes, offset + Bytes.SIZEOF_INT); } @@ -660,7 +660,7 @@ public class KeyValue implements Writable, HeapSize { public int getValueOffset() { return getKeyOffset() + getKeyLength(); } - + /** * @return Value length */ @@ -674,7 +674,7 @@ public class KeyValue implements Writable, HeapSize { public int getRowOffset() { return getKeyOffset() + Bytes.SIZEOF_SHORT; } - + /** * @return Row length */ @@ -688,21 +688,21 @@ public class KeyValue implements Writable, HeapSize { public int getFamilyOffset() { return getFamilyOffset(getRowLength()); } - + /** * @return Family offset */ public int getFamilyOffset(int rlength) { return this.offset + ROW_OFFSET + Bytes.SIZEOF_SHORT + rlength + Bytes.SIZEOF_BYTE; } - + /** * @return Family length */ public byte getFamilyLength() { return getFamilyLength(getFamilyOffset()); } - + /** * @return Family length */ @@ -716,29 +716,29 @@ public class KeyValue implements Writable, HeapSize { public int getQualifierOffset() { return getQualifierOffset(getFamilyOffset()); } - + /** * @return Qualifier offset */ public int getQualifierOffset(int foffset) { return foffset + getFamilyLength(foffset); } - + /** * @return Qualifier length */ public int getQualifierLength() { return getQualifierLength(getRowLength(),getFamilyLength()); } - + /** * @return Qualifier length */ public int getQualifierLength(int rlength, int flength) { - return getKeyLength() - + return getKeyLength() - (KEY_INFRASTRUCTURE_SIZE + rlength + flength); } - + /** * @return Column (family + qualifier) length */ @@ -747,7 +747,7 @@ public class KeyValue implements Writable, HeapSize { int foffset = getFamilyOffset(rlength); return getTotalColumnLength(rlength,foffset); } - + /** * @return Column (family + qualifier) length */ @@ -756,14 +756,14 @@ public class KeyValue implements Writable, HeapSize { int qlength = getQualifierLength(rlength,flength); return flength + qlength; } - + /** * @return Timestamp offset */ public int getTimestampOffset() { return getTimestampOffset(getKeyLength()); } - + /** * @param keylength Pass if you have it to save on a int creation. * @return Timestamp offset @@ -776,7 +776,7 @@ public class KeyValue implements Writable, HeapSize { * @return True if this KeyValue has a LATEST_TIMESTAMP timestamp. */ public boolean isLatestTimestamp() { - return Bytes.compareTo(getBuffer(), getTimestampOffset(), Bytes.SIZEOF_LONG, + return Bytes.compareTo(getBuffer(), getTimestampOffset(), Bytes.SIZEOF_LONG, HConstants.LATEST_TIMESTAMP_BYTES, 0, Bytes.SIZEOF_LONG) == 0; } @@ -788,17 +788,17 @@ public class KeyValue implements Writable, HeapSize { } return false; } - + //--------------------------------------------------------------------------- // // Methods that return copies of fields // //--------------------------------------------------------------------------- - + /** * Do not use unless you have to. Used internally for compacting and testing. - * - * Use {@link #getRow()}, {@link #getFamily()}, {@link #getQualifier()}, and + * + * Use {@link #getRow()}, {@link #getFamily()}, {@link #getQualifier()}, and * {@link #getValue()} if accessing a KeyValue client-side. * @return Copy of the key portion only. */ @@ -808,7 +808,7 @@ public class KeyValue implements Writable, HeapSize { System.arraycopy(getBuffer(), getKeyOffset(), key, 0, keylength); return key; } - + /** * Returns value in a new byte array. * Primarily for use client-side. If server-side, use @@ -823,12 +823,12 @@ public class KeyValue implements Writable, HeapSize { System.arraycopy(getBuffer(), o, result, 0, l); return result; } - + /** * Primarily for use client-side. Returns the row of this KeyValue in a new * byte array.

- * - * If server-side, use {@link #getBuffer()} with appropriate offsets and + * + * If server-side, use {@link #getBuffer()} with appropriate offsets and * lengths instead. * @return Row in a new byte array. */ @@ -841,7 +841,7 @@ public class KeyValue implements Writable, HeapSize { } /** - * + * * @return Timestamp */ public long getTimestamp() { @@ -897,10 +897,10 @@ public class KeyValue implements Writable, HeapSize { } /** - * Primarily for use client-side. Returns the family of this KeyValue in a + * Primarily for use client-side. Returns the family of this KeyValue in a * new byte array.

- * - * If server-side, use {@link #getBuffer()} with appropriate offsets and + * + * If server-side, use {@link #getBuffer()} with appropriate offsets and * lengths instead. * @return Returns family. Makes a copy. */ @@ -913,10 +913,10 @@ public class KeyValue implements Writable, HeapSize { } /** - * Primarily for use client-side. Returns the column qualifier of this + * Primarily for use client-side. Returns the column qualifier of this * KeyValue in a new byte array.

- * - * If server-side, use {@link #getBuffer()} with appropriate offsets and + * + * If server-side, use {@link #getBuffer()} with appropriate offsets and * lengths instead. * Use {@link #getBuffer()} with appropriate offsets and lengths instead. * @return Returns qualifier. Makes a copy. @@ -934,7 +934,7 @@ public class KeyValue implements Writable, HeapSize { // KeyValue splitter // //--------------------------------------------------------------------------- - + /** * Utility class that splits a KeyValue buffer into separate byte arrays. *

@@ -958,7 +958,7 @@ public class KeyValue implements Writable, HeapSize { public byte [] getType() { return this.split[4]; } public byte [] getValue() { return this.split[5]; } } - + public SplitKeyValue split() { SplitKeyValue split = new SplitKeyValue(); int splitOffset = this.offset; @@ -998,13 +998,13 @@ public class KeyValue implements Writable, HeapSize { split.setValue(value); return split; } - + //--------------------------------------------------------------------------- // - // Compare specified fields against those contained in this KeyValue + // Compare specified fields against those contained in this KeyValue // //--------------------------------------------------------------------------- - + /** * @param family * @return True if matching families. @@ -1025,7 +1025,7 @@ public class KeyValue implements Writable, HeapSize { public boolean matchingQualifier(final byte [] qualifier) { int o = getQualifierOffset(); int l = getQualifierLength(); - return Bytes.compareTo(qualifier, 0, qualifier.length, + return Bytes.compareTo(qualifier, 0, qualifier.length, this.bytes, o, l) == 0; } @@ -1135,7 +1135,7 @@ public class KeyValue implements Writable, HeapSize { len); return result; } - + /** * Makes a column in family:qualifier form from separate byte arrays. *

@@ -1147,7 +1147,7 @@ public class KeyValue implements Writable, HeapSize { public static byte [] makeColumn(byte [] family, byte [] qualifier) { return Bytes.add(family, COLUMN_FAMILY_DELIM_ARRAY, qualifier); } - + /** * @param b * @return Index of the family-qualifier colon delimiter character in passed @@ -1226,7 +1226,7 @@ public class KeyValue implements Writable, HeapSize { */ public static class RootComparator extends MetaComparator { private final KeyComparator rawcomparator = new RootKeyComparator(); - + public KeyComparator getRawComparator() { return this.rawcomparator; } @@ -1297,7 +1297,7 @@ public class KeyValue implements Writable, HeapSize { * @return Result comparing rows. */ public int compareRows(final KeyValue left, final KeyValue right) { - return compareRows(left, left.getRowLength(), right, + return compareRows(left, left.getRowLength(), right, right.getRowLength()); } @@ -1330,7 +1330,7 @@ public class KeyValue implements Writable, HeapSize { return getRawComparator().compareRows(left, loffset, llength, right, roffset, rlength); } - + public int compareColumns(final KeyValue left, final byte [] right, final int roffset, final int rlength, final int rfamilyoffset) { int offset = left.getFamilyOffset(); @@ -1408,7 +1408,7 @@ public class KeyValue implements Writable, HeapSize { public boolean matchingRows(final byte [] left, final int loffset, final int llength, final byte [] right, final int roffset, final int rlength) { - int compare = compareRows(left, loffset, llength, + int compare = compareRows(left, loffset, llength, right, roffset, rlength); if (compare != 0) { return false; @@ -1437,7 +1437,7 @@ public class KeyValue implements Writable, HeapSize { protected Object clone() throws CloneNotSupportedException { return new KVComparator(); } - + /** * @return Comparator that ignores timestamps; useful counting versions. */ @@ -1518,7 +1518,7 @@ public class KeyValue implements Writable, HeapSize { final byte [] q, final long ts) { return new KeyValue(row, f, q, ts, Type.Maximum); } - + /** * @param b * @return A KeyValue made of a byte array that holds the key-only part. @@ -1527,7 +1527,7 @@ public class KeyValue implements Writable, HeapSize { public static KeyValue createKeyValueFromKey(final byte [] b) { return createKeyValueFromKey(b, 0, b.length); } - + /** * @param bb * @return A KeyValue made of a byte buffer that holds the key-only part. @@ -1536,7 +1536,7 @@ public class KeyValue implements Writable, HeapSize { public static KeyValue createKeyValueFromKey(final ByteBuffer bb) { return createKeyValueFromKey(bb.array(), bb.arrayOffset(), bb.limit()); } - + /** * @param b * @param o @@ -1565,7 +1565,7 @@ public class KeyValue implements Writable, HeapSize { // "---" + Bytes.toString(right, roffset, rlength)); final int metalength = 7; // '.META.' length int lmetaOffsetPlusDelimiter = loffset + metalength; - int leftFarDelimiter = getDelimiterInReverse(left, + int leftFarDelimiter = getDelimiterInReverse(left, lmetaOffsetPlusDelimiter, llength - metalength, HRegionInfo.DELIMITER); int rmetaOffsetPlusDelimiter = roffset + metalength; @@ -1716,7 +1716,7 @@ public class KeyValue implements Writable, HeapSize { if (compare != 0) { return compare; } - + if (!this.ignoreTimestamp) { // Get timestamps. long ltimestamp = Bytes.toLong(left, @@ -1769,14 +1769,14 @@ public class KeyValue implements Writable, HeapSize { return 0; } } - + // HeapSize public long heapSize() { - return ClassSize.align(ClassSize.OBJECT + ClassSize.REFERENCE + - ClassSize.align(ClassSize.ARRAY + length) + + return ClassSize.align(ClassSize.OBJECT + ClassSize.REFERENCE + + ClassSize.align(ClassSize.ARRAY + length) + (2 * Bytes.SIZEOF_INT)); } - + // this overload assumes that the length bytes have already been read, // and it expects the length of the KeyValue to be explicitly passed // to it. @@ -1786,13 +1786,13 @@ public class KeyValue implements Writable, HeapSize { this.bytes = new byte[this.length]; in.readFully(this.bytes, 0, this.length); } - + // Writable public void readFields(final DataInput in) throws IOException { int length = in.readInt(); readFields(length, in); } - + public void write(final DataOutput out) throws IOException { out.writeInt(this.length); out.write(this.bytes, this.offset, this.length); diff --git a/core/src/main/java/org/apache/hadoop/hbase/LeaseListener.java b/core/src/main/java/org/apache/hadoop/hbase/LeaseListener.java index 90a32ef..54b3452 100644 --- a/core/src/main/java/org/apache/hadoop/hbase/LeaseListener.java +++ b/core/src/main/java/org/apache/hadoop/hbase/LeaseListener.java @@ -21,11 +21,11 @@ package org.apache.hadoop.hbase; /** - * LeaseListener is an interface meant to be implemented by users of the Leases + * LeaseListener is an interface meant to be implemented by users of the Leases * class. * * It receives events from the Leases class about the status of its accompanying - * lease. Users of the Leases class can use a LeaseListener subclass to, for + * lease. Users of the Leases class can use a LeaseListener subclass to, for * example, clean up resources after a lease has expired. */ public interface LeaseListener { diff --git a/core/src/main/java/org/apache/hadoop/hbase/Leases.java b/core/src/main/java/org/apache/hadoop/hbase/Leases.java index e22bee9..e502cc4 100644 --- a/core/src/main/java/org/apache/hadoop/hbase/Leases.java +++ b/core/src/main/java/org/apache/hadoop/hbase/Leases.java @@ -36,15 +36,15 @@ import java.io.IOException; * * There are several server classes in HBase that need to track external * clients that occasionally send heartbeats. - * + * *

These external clients hold resources in the server class. * Those resources need to be released if the external client fails to send a * heartbeat after some interval of time passes. * *

The Leases class is a general reusable class for this kind of pattern. - * An instance of the Leases class will create a thread to do its dirty work. + * An instance of the Leases class will create a thread to do its dirty work. * You should close() the instance if you want to clean up the thread properly. - * + * *

* NOTE: This class extends Thread rather than Chore because the sleep time * can be interrupted when there is something to do, rather than the Chore @@ -60,7 +60,7 @@ public class Leases extends Thread { /** * Creates a lease monitor - * + * * @param leasePeriod - length of time (milliseconds) that the lease is valid * @param leaseCheckFrequency - how often the lease should be checked * (milliseconds) @@ -114,9 +114,9 @@ public class Leases extends Thread { public void closeAfterLeasesExpire() { this.stopRequested = true; } - + /** - * Shut down this Leases instance. All pending leases will be destroyed, + * Shut down this Leases instance. All pending leases will be destroyed, * without any cancellation calls. */ public void close() { @@ -132,10 +132,10 @@ public class Leases extends Thread { /** * Obtain a lease - * + * * @param leaseName name of the lease * @param listener listener that will process lease expirations - * @throws LeaseStillHeldException + * @throws LeaseStillHeldException */ public void createLease(String leaseName, final LeaseListener listener) throws LeaseStillHeldException { @@ -160,25 +160,25 @@ public class Leases extends Thread { @SuppressWarnings("serial") public static class LeaseStillHeldException extends IOException { private final String leaseName; - + /** * @param name */ public LeaseStillHeldException(final String name) { this.leaseName = name; } - + /** @return name of lease */ public String getName() { return this.leaseName; } } - + /** * Renew a lease - * + * * @param leaseName name of lease - * @throws LeaseException + * @throws LeaseException */ public void renewLease(final String leaseName) throws LeaseException { synchronized (leaseQueue) { @@ -197,9 +197,9 @@ public class Leases extends Thread { /** * Client explicitly cancels a lease. - * + * * @param leaseName name of lease - * @throws LeaseException + * @throws LeaseException */ public void cancelLease(final String leaseName) throws LeaseException { synchronized (leaseQueue) { @@ -227,7 +227,7 @@ public class Leases extends Thread { public String getLeaseName() { return leaseName; } - + /** @return listener */ public LeaseListener getListener() { return this.listener; @@ -246,7 +246,7 @@ public class Leases extends Thread { } return this.hashCode() == ((Lease) obj).hashCode(); } - + @Override public int hashCode() { return this.leaseName.hashCode(); diff --git a/core/src/main/java/org/apache/hadoop/hbase/LocalHBaseCluster.java b/core/src/main/java/org/apache/hadoop/hbase/LocalHBaseCluster.java index 767bc99..25c26ee 100644 --- a/core/src/main/java/org/apache/hadoop/hbase/LocalHBaseCluster.java +++ b/core/src/main/java/org/apache/hadoop/hbase/LocalHBaseCluster.java @@ -36,18 +36,18 @@ import org.apache.hadoop.hbase.util.JVMClusterUtil; /** * This class creates a single process HBase cluster. One thread is created for * a master and one per region server. - * + * * Call {@link #startup()} to start the cluster running and {@link #shutdown()} * to close it all down. {@link #join} the cluster is you want to wait on * shutdown completion. - * + * *

Runs master on port 60000 by default. Because we can't just kill the * process -- not till HADOOP-1700 gets fixed and even then.... -- we need to * be able to find the master with a remote client to run shutdown. To use a * port other than 60000, set the hbase.master to a value of 'local:PORT': * that is 'local', not 'localhost', and the port number the master should use * instead of 60000. - * + * *

To make 'local' mode more responsive, make values such as * hbase.regionserver.msginterval, * hbase.master.meta.thread.rescanfrequency, and @@ -203,7 +203,7 @@ public class LocalHBaseCluster implements HConstants { } } } - + /** * Start the cluster. */ diff --git a/core/src/main/java/org/apache/hadoop/hbase/NotServingRegionException.java b/core/src/main/java/org/apache/hadoop/hbase/NotServingRegionException.java index 5c93ebe..32da8cb 100644 --- a/core/src/main/java/org/apache/hadoop/hbase/NotServingRegionException.java +++ b/core/src/main/java/org/apache/hadoop/hbase/NotServingRegionException.java @@ -42,7 +42,7 @@ public class NotServingRegionException extends IOException { public NotServingRegionException(String s) { super(s); } - + /** * Constructor * @param s message diff --git a/core/src/main/java/org/apache/hadoop/hbase/RemoteExceptionHandler.java b/core/src/main/java/org/apache/hadoop/hbase/RemoteExceptionHandler.java index 6fc8e57..c73ff53 100644 --- a/core/src/main/java/org/apache/hadoop/hbase/RemoteExceptionHandler.java +++ b/core/src/main/java/org/apache/hadoop/hbase/RemoteExceptionHandler.java @@ -25,14 +25,14 @@ import java.lang.reflect.InvocationTargetException; import org.apache.hadoop.ipc.RemoteException; -/** +/** * An immutable class which contains a static method for handling * org.apache.hadoop.ipc.RemoteException exceptions. */ public class RemoteExceptionHandler { /* Not instantiable */ private RemoteExceptionHandler() {super();} - + /** * Examine passed Throwable. See if its carrying a RemoteException. If so, * run {@link #decodeRemoteException(RemoteException)} on it. Otherwise, @@ -53,7 +53,7 @@ public class RemoteExceptionHandler { } return result; } - + /** * Examine passed IOException. See if its carrying a RemoteException. If so, * run {@link #decodeRemoteException(RemoteException)} on it. Otherwise, @@ -66,17 +66,17 @@ public class RemoteExceptionHandler { Throwable t = checkThrowable(e); return t instanceof IOException? (IOException)t: new IOException(t); } - + /** * Converts org.apache.hadoop.ipc.RemoteException into original exception, * if possible. If the original exception is an Error or a RuntimeException, * throws the original exception. - * + * * @param re original exception * @return decoded RemoteException if it is an instance of or a subclass of * IOException, or the original RemoteException if it cannot be decoded. - * - * @throws IOException indicating a server error ocurred if the decoded + * + * @throws IOException indicating a server error ocurred if the decoded * exception is not an IOException. The decoded exception is set as * the cause. */ @@ -89,10 +89,10 @@ public class RemoteExceptionHandler { Class[] parameterTypes = { String.class }; Constructor ctor = c.getConstructor(parameterTypes); - + Object[] arguments = { re.getMessage() }; Throwable t = (Throwable) ctor.newInstance(arguments); - + if (t instanceof IOException) { i = (IOException) t; diff --git a/core/src/main/java/org/apache/hadoop/hbase/TableExistsException.java b/core/src/main/java/org/apache/hadoop/hbase/TableExistsException.java index bbcc295..5fde219 100644 --- a/core/src/main/java/org/apache/hadoop/hbase/TableExistsException.java +++ b/core/src/main/java/org/apache/hadoop/hbase/TableExistsException.java @@ -29,7 +29,7 @@ public class TableExistsException extends IOException { /** * Constructor - * + * * @param s message */ public TableExistsException(String s) { diff --git a/core/src/main/java/org/apache/hadoop/hbase/ValueOverMaxLengthException.java b/core/src/main/java/org/apache/hadoop/hbase/ValueOverMaxLengthException.java index 2bc136d..383c9db 100644 --- a/core/src/main/java/org/apache/hadoop/hbase/ValueOverMaxLengthException.java +++ b/core/src/main/java/org/apache/hadoop/hbase/ValueOverMaxLengthException.java @@ -23,7 +23,7 @@ package org.apache.hadoop.hbase; * Thrown when a value is longer than the specified LENGTH */ public class ValueOverMaxLengthException extends DoNotRetryIOException { - + private static final long serialVersionUID = -5525656352372008316L; /** diff --git a/core/src/main/java/org/apache/hadoop/hbase/VersionAnnotation.java b/core/src/main/java/org/apache/hadoop/hbase/VersionAnnotation.java index bf29adf..ecea580 100644 --- a/core/src/main/java/org/apache/hadoop/hbase/VersionAnnotation.java +++ b/core/src/main/java/org/apache/hadoop/hbase/VersionAnnotation.java @@ -26,29 +26,29 @@ import java.lang.annotation.*; @Retention(RetentionPolicy.RUNTIME) @Target(ElementType.PACKAGE) public @interface VersionAnnotation { - + /** * Get the Hadoop version * @return the version string "0.6.3-dev" */ String version(); - + /** * Get the username that compiled Hadoop. */ String user(); - + /** * Get the date when Hadoop was compiled. * @return the date in unix 'date' format */ String date(); - + /** * Get the url for the subversion repository. */ String url(); - + /** * Get the subversion revision. * @return the revision number as a string (eg. "451451") diff --git a/core/src/main/java/org/apache/hadoop/hbase/client/Delete.java b/core/src/main/java/org/apache/hadoop/hbase/client/Delete.java index 2b8ed30..1f60485 100644 --- a/core/src/main/java/org/apache/hadoop/hbase/client/Delete.java +++ b/core/src/main/java/org/apache/hadoop/hbase/client/Delete.java @@ -36,7 +36,7 @@ import java.util.TreeMap; /** * Used to perform Delete operations on a single row. *

- * To delete an entire row, instantiate a Delete object with the row + * To delete an entire row, instantiate a Delete object with the row * to delete. To further define the scope of what to delete, perform * additional methods as outlined below. *

@@ -45,7 +45,7 @@ import java.util.TreeMap; *

* To delete multiple versions of specific columns, execute * {@link #deleteColumns(byte[], byte[]) deleteColumns} - * for each column to delete. + * for each column to delete. *

* To delete specific versions of specific columns, execute * {@link #deleteColumn(byte[], byte[], long) deleteColumn} @@ -69,10 +69,10 @@ public class Delete implements Writable, Row, Comparable { private static final byte DELETE_VERSION = (byte)1; private byte [] row = null; - // This ts is only used when doing a deleteRow. Anything less, + // This ts is only used when doing a deleteRow. Anything less, private long ts; private long lockId = -1L; - private final Map> familyMap = + private final Map> familyMap = new TreeMap>(Bytes.BYTES_COMPARATOR); /** Constructor for Writable. DO NOT USE */ @@ -95,12 +95,12 @@ public class Delete implements Writable, Row, Comparable { /** * Create a Delete operation for the specified row and timestamp, using * an optional row lock.

- * + * * If no further operations are done, this will delete all columns in all - * families of the specified row with a timestamp less than or equal to the + * families of the specified row with a timestamp less than or equal to the * specified timestamp.

- * - * This timestamp is ONLY used for a delete row operation. If specifying + * + * This timestamp is ONLY used for a delete row operation. If specifying * families or columns, you must specify each timestamp individually. * @param row row key * @param timestamp maximum version timestamp (only for delete row) @@ -170,7 +170,7 @@ public class Delete implements Writable, Row, Comparable { familyMap.put(family, list); return this; } - + /** * Delete all versions of the specified column. * @param family family name @@ -181,7 +181,7 @@ public class Delete implements Writable, Row, Comparable { this.deleteColumns(family, qualifier, HConstants.LATEST_TIMESTAMP); return this; } - + /** * Delete all versions of the specified column with a timestamp less than * or equal to the specified timestamp. @@ -200,7 +200,7 @@ public class Delete implements Writable, Row, Comparable { familyMap.put(family, list); return this; } - + /** * Delete the latest version of the specified column. * This is an expensive call in that on the server-side, it first does a @@ -214,7 +214,7 @@ public class Delete implements Writable, Row, Comparable { this.deleteColumn(family, qualifier, HConstants.LATEST_TIMESTAMP); return this; } - + /** * Delete the specified version of the specified column. * @param family family name @@ -232,15 +232,15 @@ public class Delete implements Writable, Row, Comparable { familyMap.put(family, list); return this; } - + /** - * Method for retrieving the delete's familyMap + * Method for retrieving the delete's familyMap * @return familyMap */ public Map> getFamilyMap() { return this.familyMap; } - + /** * Method for retrieving the delete's row * @return row @@ -248,7 +248,7 @@ public class Delete implements Writable, Row, Comparable { public byte [] getRow() { return this.row; } - + /** * Method for retrieving the delete's RowLock * @return RowLock @@ -256,16 +256,16 @@ public class Delete implements Writable, Row, Comparable { public RowLock getRowLock() { return new RowLock(this.row, this.lockId); } - + /** * Method for retrieving the delete's lock ID. - * + * * @return The lock ID. */ public long getLockId() { return this.lockId; } - + /** * Method for retrieving the delete's timestamp * @return timestamp @@ -273,7 +273,7 @@ public class Delete implements Writable, Row, Comparable { public long getTimeStamp() { return this.ts; } - + /** * @return string */ @@ -309,7 +309,7 @@ public class Delete implements Writable, Row, Comparable { sb.append("}"); return sb.toString(); } - + //Writable public void readFields(final DataInput in) throws IOException { int version = in.readByte(); @@ -332,8 +332,8 @@ public class Delete implements Writable, Row, Comparable { } this.familyMap.put(family, list); } - } - + } + public void write(final DataOutput out) throws IOException { out.writeByte(DELETE_VERSION); Bytes.writeByteArray(out, this.row); diff --git a/core/src/main/java/org/apache/hadoop/hbase/client/Get.java b/core/src/main/java/org/apache/hadoop/hbase/client/Get.java index effb7a3..2246632 100644 --- a/core/src/main/java/org/apache/hadoop/hbase/client/Get.java +++ b/core/src/main/java/org/apache/hadoop/hbase/client/Get.java @@ -40,7 +40,7 @@ import java.util.TreeSet; * Used to perform Get operations on a single row. *

* To get everything for a row, instantiate a Get object with the row to get. - * To further define the scope of what to get, perform additional methods as + * To further define the scope of what to get, perform additional methods as * outlined below. *

* To get all columns from specific families, execute {@link #addFamily(byte[]) addFamily} @@ -68,7 +68,7 @@ public class Get implements Writable { private int maxVersions = 1; private Filter filter = null; private TimeRange tr = new TimeRange(); - private Map> familyMap = + private Map> familyMap = new TreeMap>(Bytes.BYTES_COMPARATOR); /** Constructor for Writable. DO NOT USE */ @@ -205,7 +205,7 @@ public class Get implements Writable { /** * Method for retrieving the get's row - * @return row + * @return row */ public byte [] getRow() { return this.row; @@ -233,7 +233,7 @@ public class Get implements Writable { */ public int getMaxVersions() { return this.maxVersions; - } + } /** * Method for retrieving the get's TimeRange @@ -294,7 +294,7 @@ public class Get implements Writable { return sb.toString(); } boolean moreThanOne = false; - for(Map.Entry> entry : + for(Map.Entry> entry : this.familyMap.entrySet()) { if(moreThanOne) { sb.append("), "); @@ -343,7 +343,7 @@ public class Get implements Writable { this.tr = new TimeRange(); tr.readFields(in); int numFamilies = in.readInt(); - this.familyMap = + this.familyMap = new TreeMap>(Bytes.BYTES_COMPARATOR); for(int i=0; i> entry : + for(Map.Entry> entry : familyMap.entrySet()) { Bytes.writeByteArray(out, entry.getKey()); NavigableSet columnSet = entry.getValue(); @@ -400,7 +400,7 @@ public class Get implements Writable { return WritableFactories.newInstance(clazz, new Configuration()); } catch (ClassNotFoundException e) { throw new RuntimeException("Can't find class " + className); - } + } } /** diff --git a/core/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java b/core/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java index 971cff9..9a045b7 100644 --- a/core/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java +++ b/core/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java @@ -61,7 +61,7 @@ public class HBaseAdmin { /** * Constructor - * + * * @param conf Configuration object * @throws MasterNotRunningException if the master is not running */ @@ -85,7 +85,7 @@ public class HBaseAdmin { public HMasterInterface getMaster() throws MasterNotRunningException{ return this.connection.getMaster(); } - + /** @return - true if the master server is running */ public boolean isMasterRunning() { return this.connection.isMasterRunning(); @@ -121,14 +121,14 @@ public class HBaseAdmin { * catalog table that just contains table names and their descriptors. * Right now, it only exists as part of the META table's region info. * - * @return - returns an array of HTableDescriptors + * @return - returns an array of HTableDescriptors * @throws IOException if a remote or network exception occurs */ public HTableDescriptor[] listTables() throws IOException { return this.connection.listTables(); } - + /** * Method for getting the tableDescriptor * @param tableName as a byte [] @@ -139,7 +139,7 @@ public class HBaseAdmin { throws IOException { return this.connection.getHTableDescriptor(tableName); } - + private long getPauseTime(int tries) { int triesCount = tries; if (triesCount >= HConstants.RETRY_BACKOFF.length) @@ -150,9 +150,9 @@ public class HBaseAdmin { /** * Creates a new table. * Synchronous operation. - * + * * @param desc table descriptor for table - * + * * @throws IllegalArgumentException if the table name is reserved * @throws MasterNotRunningException if master is not running * @throws TableExistsException if table already exists (If concurrent @@ -169,7 +169,7 @@ public class HBaseAdmin { // Wait for new table to come on-line connection.locateRegion(desc.getName(), HConstants.EMPTY_START_ROW); break; - + } catch (RegionException e) { if (tries == numRetries - 1) { // Ran out of tries @@ -183,13 +183,13 @@ public class HBaseAdmin { } } } - + /** * Creates a new table but does not block and wait for it to come online. * Asynchronous operation. - * + * * @param desc table descriptor for table - * + * * @throws IllegalArgumentException Bad table name. * @throws MasterNotRunningException if master is not running * @throws TableExistsException if table already exists (If concurrent @@ -213,7 +213,7 @@ public class HBaseAdmin { /** * Deletes a table. * Synchronous operation. - * + * * @param tableName name of table to delete * @throws IOException if a remote or network exception occurs */ @@ -224,7 +224,7 @@ public class HBaseAdmin { /** * Deletes a table. * Synchronous operation. - * + * * @param tableName name of table to delete * @throws IOException if a remote or network exception occurs */ @@ -302,12 +302,12 @@ public class HBaseAdmin { LOG.info("Deleted " + Bytes.toString(tableName)); } - + /** * Brings a table on-line (enables it). * Synchronous operation. - * + * * @param tableName name of the table * @throws IOException if a remote or network exception occurs */ @@ -318,7 +318,7 @@ public class HBaseAdmin { /** * Brings a table on-line (enables it). * Synchronous operation. - * + * * @param tableName name of the table * @throws IOException if a remote or network exception occurs */ @@ -363,7 +363,7 @@ public class HBaseAdmin { * Disables a table (takes it off-line) If it is being served, the master * will tell the servers to stop serving it. * Synchronous operation. - * + * * @param tableName name of table * @throws IOException if a remote or network exception occurs */ @@ -375,7 +375,7 @@ public class HBaseAdmin { * Disables a table (takes it off-line) If it is being served, the master * will tell the servers to stop serving it. * Synchronous operation. - * + * * @param tableName name of table * @throws IOException if a remote or network exception occurs */ @@ -414,7 +414,7 @@ public class HBaseAdmin { } LOG.info("Disabled " + Bytes.toString(tableName)); } - + /** * @param tableName name of table to check * @return true if table is on-line @@ -431,7 +431,7 @@ public class HBaseAdmin { public boolean isTableEnabled(byte[] tableName) throws IOException { return connection.isTableEnabled(tableName); } - + /** * @param tableName name of table to check * @return true if table is off-line @@ -462,7 +462,7 @@ public class HBaseAdmin { /** * Add a column to an existing table. * Asynchronous operation. - * + * * @param tableName name of the table to add column to * @param column column descriptor of column to be added * @throws IOException if a remote or network exception occurs @@ -475,7 +475,7 @@ public class HBaseAdmin { /** * Add a column to an existing table. * Asynchronous operation. - * + * * @param tableName name of the table to add column to * @param column column descriptor of column to be added * @throws IOException if a remote or network exception occurs @@ -496,7 +496,7 @@ public class HBaseAdmin { /** * Delete a column from a table. * Asynchronous operation. - * + * * @param tableName name of table * @param columnName name of column to be deleted * @throws IOException if a remote or network exception occurs @@ -509,7 +509,7 @@ public class HBaseAdmin { /** * Delete a column from a table. * Asynchronous operation. - * + * * @param tableName name of table * @param columnName name of column to be deleted * @throws IOException if a remote or network exception occurs @@ -530,13 +530,13 @@ public class HBaseAdmin { /** * Modify an existing column family on a table. * Asynchronous operation. - * + * * @param tableName name of table * @param columnName name of column to be modified * @param descriptor new column descriptor to use * @throws IOException if a remote or network exception occurs */ - public void modifyColumn(final String tableName, final String columnName, + public void modifyColumn(final String tableName, final String columnName, HColumnDescriptor descriptor) throws IOException { modifyColumn(Bytes.toBytes(tableName), Bytes.toBytes(columnName), @@ -546,13 +546,13 @@ public class HBaseAdmin { /** * Modify an existing column family on a table. * Asynchronous operation. - * + * * @param tableName name of table * @param columnName name of column to be modified * @param descriptor new column descriptor to use * @throws IOException if a remote or network exception occurs */ - public void modifyColumn(final byte [] tableName, final byte [] columnName, + public void modifyColumn(final byte [] tableName, final byte [] columnName, HColumnDescriptor descriptor) throws IOException { if (this.master == null) { @@ -569,7 +569,7 @@ public class HBaseAdmin { /** * Close a region. For expert-admins. * Asynchronous operation. - * + * * @param regionname region name to close * @param args Optional server name. Otherwise, we'll send close to the * server registered in .META. @@ -583,7 +583,7 @@ public class HBaseAdmin { /** * Close a region. For expert-admins. * Asynchronous operation. - * + * * @param regionname region name to close * @param args Optional server name. Otherwise, we'll send close to the * server registered in .META. @@ -602,11 +602,11 @@ public class HBaseAdmin { modifyTable(HConstants.META_TABLE_NAME, HConstants.Modify.CLOSE_REGION, newargs); } - + /** * Flush a table or an individual region. * Asynchronous operation. - * + * * @param tableNameOrRegionName table or region to flush * @throws IOException if a remote or network exception occurs */ @@ -617,7 +617,7 @@ public class HBaseAdmin { /** * Flush a table or an individual region. * Asynchronous operation. - * + * * @param tableNameOrRegionName table or region to flush * @throws IOException if a remote or network exception occurs */ @@ -628,7 +628,7 @@ public class HBaseAdmin { /** * Compact a table or an individual region. * Asynchronous operation. - * + * * @param tableNameOrRegionName table or region to compact * @throws IOException if a remote or network exception occurs */ @@ -639,18 +639,18 @@ public class HBaseAdmin { /** * Compact a table or an individual region. * Asynchronous operation. - * + * * @param tableNameOrRegionName table or region to compact * @throws IOException if a remote or network exception occurs */ public void compact(final byte [] tableNameOrRegionName) throws IOException { modifyTable(tableNameOrRegionName, HConstants.Modify.TABLE_COMPACT); } - + /** * Major compact a table or an individual region. * Asynchronous operation. - * + * * @param tableNameOrRegionName table or region to major compact * @throws IOException if a remote or network exception occurs */ @@ -662,7 +662,7 @@ public class HBaseAdmin { /** * Major compact a table or an individual region. * Asynchronous operation. - * + * * @param tableNameOrRegionName table or region to major compact * @throws IOException if a remote or network exception occurs */ @@ -674,7 +674,7 @@ public class HBaseAdmin { /** * Split a table or an individual region. * Asynchronous operation. - * + * * @param tableNameOrRegionName table or region to split * @throws IOException if a remote or network exception occurs */ @@ -685,7 +685,7 @@ public class HBaseAdmin { /** * Split a table or an individual region. * Asynchronous operation. - * + * * @param tableNameOrRegionName table to region to split * @throws IOException if a remote or network exception occurs */ @@ -700,7 +700,7 @@ public class HBaseAdmin { * @param op * @throws IOException */ - private void modifyTable(final byte [] tableNameOrRegionName, + private void modifyTable(final byte [] tableNameOrRegionName, final HConstants.Modify op) throws IOException { if (tableNameOrRegionName == null) { @@ -712,16 +712,16 @@ public class HBaseAdmin { Object [] args = regionName == null? null: new byte [][] {regionName}; modifyTable(tableName == null? null: tableName, op, args); } - + /** * Modify an existing table, more IRB friendly version. * Asynchronous operation. - * + * * @param tableName name of table. * @param htd modified description of the table * @throws IOException if a remote or network exception occurs */ - public void modifyTable(final byte [] tableName, HTableDescriptor htd) + public void modifyTable(final byte [] tableName, HTableDescriptor htd) throws IOException { modifyTable(tableName, HConstants.Modify.TABLE_SET_HTD, htd); } @@ -729,14 +729,14 @@ public class HBaseAdmin { /** * Modify an existing table. * Asynchronous operation. - * + * * @param tableName name of table. May be null if we are operating on a * region. * @param op table modification operation * @param args operation specific arguments * @throws IOException if a remote or network exception occurs */ - public void modifyTable(final byte [] tableName, HConstants.Modify op, + public void modifyTable(final byte [] tableName, HConstants.Modify op, Object... args) throws IOException { if (this.master == null) { @@ -751,7 +751,7 @@ public class HBaseAdmin { try { switch (op) { case TABLE_SET_HTD: - if (args == null || args.length < 1 || + if (args == null || args.length < 1 || !(args[0] instanceof HTableDescriptor)) { throw new IllegalArgumentException("SET_HTD requires a HTableDescriptor"); } @@ -810,8 +810,8 @@ public class HBaseAdmin { } } - /** - * Shuts down the HBase instance + /** + * Shuts down the HBase instance * @throws IOException if a remote or network exception occurs */ public synchronized void shutdown() throws IOException { diff --git a/core/src/main/java/org/apache/hadoop/hbase/client/HConnection.java b/core/src/main/java/org/apache/hadoop/hbase/client/HConnection.java index d37f4ae..29b8324 100644 --- a/core/src/main/java/org/apache/hadoop/hbase/client/HConnection.java +++ b/core/src/main/java/org/apache/hadoop/hbase/client/HConnection.java @@ -52,7 +52,7 @@ public interface HConnection { /** @return - true if the master server is running */ public boolean isMasterRunning(); - + /** * Checks if tableName exists. * @param tableName Table to check. @@ -71,7 +71,7 @@ public interface HConnection { * @throws IOException if a remote or network exception occurs */ public boolean isTableEnabled(byte[] tableName) throws IOException; - + /** * @param tableName table name * @return true if the table is disabled, false otherwise @@ -93,51 +93,51 @@ public interface HConnection { * catalog table that just contains table names and their descriptors. * Right now, it only exists as part of the META table's region info. * - * @return - returns an array of HTableDescriptors + * @return - returns an array of HTableDescriptors * @throws IOException if a remote or network exception occurs */ public HTableDescriptor[] listTables() throws IOException; - + /** * @param tableName table name - * @return table metadata + * @return table metadata * @throws IOException if a remote or network exception occurs */ public HTableDescriptor getHTableDescriptor(byte[] tableName) throws IOException; - + /** * Find the location of the region of tableName that row * lives in. * @param tableName name of the table row is in * @param row row key you're trying to find the region of - * @return HRegionLocation that describes where to find the reigon in + * @return HRegionLocation that describes where to find the reigon in * question * @throws IOException if a remote or network exception occurs */ public HRegionLocation locateRegion(final byte [] tableName, final byte [] row) throws IOException; - + /** * Allows flushing the region cache. */ - public void clearRegionCache(); - + public void clearRegionCache(); + /** * Find the location of the region of tableName that row * lives in, ignoring any value that might be in the cache. * @param tableName name of the table row is in * @param row row key you're trying to find the region of - * @return HRegionLocation that describes where to find the reigon in + * @return HRegionLocation that describes where to find the reigon in * question * @throws IOException if a remote or network exception occurs */ public HRegionLocation relocateRegion(final byte [] tableName, final byte [] row) - throws IOException; - - /** + throws IOException; + + /** * Establishes a connection to the region server at the specified address. * @param regionServer - the server to connect to * @return proxy for HRegionServer @@ -145,8 +145,8 @@ public interface HConnection { */ public HRegionInterface getHRegionConnection(HServerAddress regionServer) throws IOException; - - /** + + /** * Establishes a connection to the region server at the specified address. * @param regionServer - the server to connect to * @param getMaster - do we check if master is alive @@ -156,7 +156,7 @@ public interface HConnection { public HRegionInterface getHRegionConnection( HServerAddress regionServer, boolean getMaster) throws IOException; - + /** * Find region location hosting passed row * @param tableName table name @@ -170,8 +170,8 @@ public interface HConnection { throws IOException; /** - * Pass in a ServerCallable with your particular bit of logic defined and - * this method will manage the process of doing retries with timed waits + * Pass in a ServerCallable with your particular bit of logic defined and + * this method will manage the process of doing retries with timed waits * and refinds of missing regions. * * @param the type of the return value @@ -180,9 +180,9 @@ public interface HConnection { * @throws IOException if a remote or network exception occurs * @throws RuntimeException other unspecified error */ - public T getRegionServerWithRetries(ServerCallable callable) + public T getRegionServerWithRetries(ServerCallable callable) throws IOException, RuntimeException; - + /** * Pass in a ServerCallable with your particular bit of logic defined and * this method will pass it to the defined region server. @@ -192,10 +192,10 @@ public interface HConnection { * @throws IOException if a remote or network exception occurs * @throws RuntimeException other unspecified error */ - public T getRegionServerForWithoutRetries(ServerCallable callable) + public T getRegionServerForWithoutRetries(ServerCallable callable) throws IOException, RuntimeException; - - + + /** * Process a batch of Puts. Does the retries. * @param list A batch of Puts to process. @@ -215,9 +215,9 @@ public interface HConnection { */ public int processBatchOfDeletes(List list, byte[] tableName) throws IOException; - + public void processBatchOfPuts(List list, final byte[] tableName, ExecutorService pool) throws IOException; - + } diff --git a/core/src/main/java/org/apache/hadoop/hbase/client/HConnectionManager.java b/core/src/main/java/org/apache/hadoop/hbase/client/HConnectionManager.java index 23e713b..0205076 100644 --- a/core/src/main/java/org/apache/hadoop/hbase/client/HConnectionManager.java +++ b/core/src/main/java/org/apache/hadoop/hbase/client/HConnectionManager.java @@ -67,7 +67,7 @@ import java.util.concurrent.atomic.AtomicBoolean; /** * A non-instantiable class that manages connections to multiple tables in * multiple HBase instances. - * + * * Used by {@link HTable} and {@link HBaseAdmin} */ public class HConnectionManager implements HConstants { @@ -90,13 +90,13 @@ public class HConnectionManager implements HConstants { protected HConnectionManager() { super(); } - + private static final int MAX_CACHED_HBASE_INSTANCES=31; - // A LRU Map of master HBaseConfiguration -> connection information for that + // A LRU Map of master HBaseConfiguration -> connection information for that // instance. The objects it contains are mutable and hence require // synchronized access to them. We set instances to 31. The zk default max // connections is 30 so should run into zk issues before hit this value of 31. - private static + private static final Map HBASE_INSTANCES = new LinkedHashMap ((int) (MAX_CACHED_HBASE_INSTANCES/0.75F)+1, 0.75F, true) { @@ -105,10 +105,10 @@ public class HConnectionManager implements HConstants { return size() > MAX_CACHED_HBASE_INSTANCES; } }; - - private static final Map ZK_WRAPPERS = + + private static final Map ZK_WRAPPERS = new HashMap(); - + /** * Get the connection object for the instance specified by the configuration * If no current connection exists, create a new connection for that instance @@ -127,7 +127,7 @@ public class HConnectionManager implements HConstants { } return connection; } - + /** * Delete connection information for the instance specified by configuration * @param conf configuration @@ -180,7 +180,7 @@ public class HConnectionManager implements HConstants { } return ZK_WRAPPERS.get(ZooKeeperWrapper.getZookeeperClusterKey(conf)); } - + /** * This class is responsible to handle connection and reconnection * to a zookeeper quorum. @@ -220,7 +220,7 @@ public class HConnectionManager implements HConstants { resetZooKeeper(); } } - + /** * Get this watcher's ZKW, instanciate it if necessary. * @return ZKW @@ -229,10 +229,10 @@ public class HConnectionManager implements HConstants { public synchronized ZooKeeperWrapper getZooKeeperWrapper() throws IOException { if(zooKeeperWrapper == null) { zooKeeperWrapper = new ZooKeeperWrapper(conf, this); - } + } return zooKeeperWrapper; } - + /** * Clear this connection to zookeeper. */ @@ -257,25 +257,25 @@ public class HConnectionManager implements HConstants { private volatile boolean closed; private volatile HMasterInterface master; private volatile boolean masterChecked; - + private final Object rootRegionLock = new Object(); private final Object metaRegionLock = new Object(); private final Object userRegionLock = new Object(); - + private volatile Configuration conf; - // Known region HServerAddress.toString() -> HRegionInterface + // Known region HServerAddress.toString() -> HRegionInterface private final Map servers = new ConcurrentHashMap(); // Used by master and region servers during safe mode only - private volatile HRegionLocation rootRegionLocation; - - private final Map> + private volatile HRegionLocation rootRegionLocation; + + private final Map> cachedRegionLocations = new HashMap>(); - /** + /** * constructor * @param conf Configuration object */ @@ -287,11 +287,11 @@ public class HConnectionManager implements HConstants { conf.get(REGION_SERVER_CLASS, DEFAULT_REGION_SERVER_CLASS); this.closed = false; - + try { this.serverInterfaceClass = (Class) Class.forName(serverClassName); - + } catch (ClassNotFoundException e) { throw new UnsupportedOperationException( "Unable to find region server interface " + serverClassName, e); @@ -301,7 +301,7 @@ public class HConnectionManager implements HConstants { this.numRetries = conf.getInt("hbase.client.retries.number", 10); this.maxRPCAttempts = conf.getInt("hbase.client.rpc.maxattempts", 1); this.rpcTimeout = conf.getLong(HBASE_REGIONSERVER_LEASE_PERIOD_KEY, DEFAULT_HBASE_REGIONSERVER_LEASE_PERIOD); - + this.master = null; this.masterChecked = false; } @@ -317,7 +317,7 @@ public class HConnectionManager implements HConstants { public void unsetRootRegionLocation() { this.rootRegionLocation = null; } - + // Used by master and region servers during safe mode only public void setRootRegionLocation(HRegionLocation rootRegion) { if (rootRegion == null) { @@ -326,7 +326,7 @@ public class HConnectionManager implements HConstants { } this.rootRegionLocation = rootRegion; } - + public HMasterInterface getMaster() throws MasterNotRunningException { ZooKeeperWrapper zk; try { @@ -347,15 +347,15 @@ public class HConnectionManager implements HConstants { masterLocation = zk.readMasterAddressOrThrow(); HMasterInterface tryMaster = (HMasterInterface)HBaseRPC.getProxy( - HMasterInterface.class, HBaseRPCProtocolVersion.versionID, + HMasterInterface.class, HBaseRPCProtocolVersion.versionID, masterLocation.getInetSocketAddress(), this.conf); - + if (tryMaster.isMasterRunning()) { this.master = tryMaster; this.masterLock.notifyAll(); break; } - + } catch (IOException e) { if (tries == numRetries - 1) { // This was our last chance - don't bother sleeping @@ -390,7 +390,7 @@ public class HConnectionManager implements HConstants { if (this.master == null) { try { getMaster(); - + } catch (MasterNotRunningException e) { return false; } @@ -420,7 +420,7 @@ public class HConnectionManager implements HConstants { } return exists; } - + /* * @param n * @return Truen if passed tablename n is equal to the name @@ -463,11 +463,11 @@ public class HConnectionManager implements HConstants { return uniqueTables.toArray(new HTableDescriptor[uniqueTables.size()]); } - + public boolean isTableEnabled(byte[] tableName) throws IOException { return testTableOnlineState(tableName, true); } - + public boolean isTableDisabled(byte[] tableName) throws IOException { return testTableOnlineState(tableName, false); } @@ -489,7 +489,7 @@ public class HConnectionManager implements HConstants { } } return true; - } + } }; MetaScanner.metaScan(conf, visitor); return available.get(); @@ -522,7 +522,7 @@ public class HConnectionManager implements HConstants { scan.addColumn(CATALOG_FAMILY, REGIONINFO_QUALIFIER); int rows = this.conf.getInt("hbase.meta.scanner.caching", 100); scan.setCaching(rows); - ScannerCallable s = new ScannerCallable(this, + ScannerCallable s = new ScannerCallable(this, (Bytes.equals(tableName, HConstants.META_TABLE_NAME) ? HConstants.ROOT_TABLE_NAME : HConstants.META_TABLE_NAME), scan); try { @@ -560,7 +560,7 @@ public class HConnectionManager implements HConstants { return rowsScanned > 0 && onOffLine; } - private static class HTableDescriptorFinder + private static class HTableDescriptorFinder implements MetaScanner.MetaScannerVisitor { byte[] tableName; HTableDescriptor result; @@ -618,18 +618,18 @@ public class HConnectionManager implements HConstants { throw new IllegalArgumentException( "table name cannot be null or zero length"); } - + if (Bytes.equals(tableName, ROOT_TABLE_NAME)) { synchronized (rootRegionLock) { // This block guards against two threads trying to find the root - // region at the same time. One will go do the find while the + // region at the same time. One will go do the find while the // second waits. The second thread will not do find. - + if (!useCache || rootRegionLocation == null) { this.rootRegionLocation = locateRootRegion(); } return this.rootRegionLocation; - } + } } else if (Bytes.equals(tableName, META_TABLE_NAME)) { return locateRegionInMeta(ROOT_TABLE_NAME, tableName, row, useCache, metaRegionLock); @@ -658,7 +658,7 @@ public class HConnectionManager implements HConstants { return location; } } - + // build the key of the meta region we should be looking for. // the extra 9's on the end are necessary to allow "exact" matches // without knowing the precise region names. @@ -666,7 +666,7 @@ public class HConnectionManager implements HConstants { HConstants.NINES); for (int tries = 0; true; tries++) { if (tries >= numRetries) { - throw new NoServerForRegionException("Unable to find region for " + throw new NoServerForRegionException("Unable to find region for " + Bytes.toStringBinary(row) + " after " + numRetries + " tries."); } @@ -702,10 +702,10 @@ public class HConnectionManager implements HConstants { if (regionInfoRow == null) { throw new TableNotFoundException(Bytes.toString(tableName)); } - byte [] value = regionInfoRow.getValue(CATALOG_FAMILY, + byte [] value = regionInfoRow.getValue(CATALOG_FAMILY, REGIONINFO_QUALIFIER); if (value == null || value.length == 0) { - throw new IOException("HRegionInfo was null or empty in " + + throw new IOException("HRegionInfo was null or empty in " + Bytes.toString(parentTable) + ", row=" + regionInfoRow); } // convert the row result into the HRegionLocation we need! @@ -717,21 +717,21 @@ public class HConnectionManager implements HConstants { "Table '" + Bytes.toString(tableName) + "' was not found."); } if (regionInfo.isOffline()) { - throw new RegionOfflineException("region offline: " + + throw new RegionOfflineException("region offline: " + regionInfo.getRegionNameAsString()); } - + value = regionInfoRow.getValue(CATALOG_FAMILY, SERVER_QUALIFIER); String serverAddress = ""; if(value != null) { serverAddress = Bytes.toString(value); } - if (serverAddress.equals("")) { + if (serverAddress.equals("")) { throw new NoServerForRegionException("No server address listed " + "in " + Bytes.toString(parentTable) + " for region " + regionInfo.getRegionNameAsString()); } - + // instantiate the location location = new HRegionLocation(regionInfo, new HServerAddress(serverAddress)); @@ -773,10 +773,10 @@ public class HConnectionManager implements HConstants { /* * Search the cache for a location that fits our table and row key. * Return null if no suitable region is located. TODO: synchronization note - * + * *

TODO: This method during writing consumes 15% of CPU doing lookup * into the Soft Reference SortedMap. Improve. - * + * * @param tableName * @param row * @return Null or region location found in cache. @@ -883,7 +883,7 @@ public class HConnectionManager implements HConstants { } } } - + /* * @param tableName * @return Map of cached locations for passed tableName @@ -909,9 +909,9 @@ public class HConnectionManager implements HConstants { * Allows flushing the region cache. */ public void clearRegionCache() { - cachedRegionLocations.clear(); + cachedRegionLocations.clear(); } - + /* * Put a newly discovered HRegionLocation into the cache. */ @@ -928,7 +928,7 @@ public class HConnectionManager implements HConstants { } public HRegionInterface getHRegionConnection( - HServerAddress regionServer, boolean getMaster) + HServerAddress regionServer, boolean getMaster) throws IOException { if (getMaster) { getMaster(); @@ -941,7 +941,7 @@ public class HConnectionManager implements HConstants { try { server = (HRegionInterface)HBaseRPC.waitForProxy( serverInterfaceClass, HBaseRPCProtocolVersion.versionID, - regionServer.getInetSocketAddress(), this.conf, + regionServer.getInetSocketAddress(), this.conf, this.maxRPCAttempts, this.rpcTimeout); } catch (RemoteException e) { throw RemoteExceptionHandler.decodeRemoteException(e); @@ -951,9 +951,9 @@ public class HConnectionManager implements HConstants { } return server; } - + public HRegionInterface getHRegionConnection( - HServerAddress regionServer) + HServerAddress regionServer) throws IOException { return getHRegionConnection(regionServer, false); } @@ -969,7 +969,7 @@ public class HConnectionManager implements HConstants { * @return HRegionLocation for root region if found * @throws NoServerForRegionException - if the root region can not be * located after retrying - * @throws IOException + * @throws IOException */ private HRegionLocation locateRootRegion() throws IOException { @@ -1022,7 +1022,7 @@ public class HConnectionManager implements HConstants { throw new NoServerForRegionException("Timed out trying to locate "+ "root region because: " + t.getMessage()); } - + // Sleep and retry finding root region. try { if (LOG.isDebugEnabled()) { @@ -1036,17 +1036,17 @@ public class HConnectionManager implements HConstants { // continue } } - + rootRegionAddress = null; } - + // if the address is null by this point, then the retries have failed, // and we're sort of sunk if (rootRegionAddress == null) { throw new NoServerForRegionException( "unable to locate root region server"); } - + // return the region location return new HRegionLocation( HRegionInfo.ROOT_REGIONINFO, rootRegionAddress); @@ -1074,9 +1074,9 @@ public class HConnectionManager implements HConstants { // continue } } - return null; + return null; } - + public T getRegionServerForWithoutRetries(ServerCallable callable) throws IOException, RuntimeException { try { diff --git a/core/src/main/java/org/apache/hadoop/hbase/client/HTable.java b/core/src/main/java/org/apache/hadoop/hbase/client/HTable.java index 074f80e..5775a44 100644 --- a/core/src/main/java/org/apache/hadoop/hbase/client/HTable.java +++ b/core/src/main/java/org/apache/hadoop/hbase/client/HTable.java @@ -74,7 +74,7 @@ public class HTable implements HTableInterface { private int maxKeyValueSize; private long maxScannerResultSize; - + /** * Creates an object to access a HBase table. * @@ -134,9 +134,9 @@ public class HTable implements HTableInterface { this.autoFlush = true; this.currentWriteBufferSize = 0; this.scannerCaching = conf.getInt("hbase.client.scanner.caching", 1); - + this.maxScannerResultSize = conf.getLong( - HConstants.HBASE_CLIENT_SCANNER_MAX_RESULT_SIZE_KEY, + HConstants.HBASE_CLIENT_SCANNER_MAX_RESULT_SIZE_KEY, HConstants.DEFAULT_HBASE_CLIENT_SCANNER_MAX_RESULT_SIZE); this.maxKeyValueSize = conf.getInt("hbase.client.keyvalue.maxsize", -1); @@ -454,7 +454,7 @@ public class HTable implements HTableInterface { flushCommits(); } } - + public long incrementColumnValue(final byte [] row, final byte [] family, final byte [] qualifier, final long amount) throws IOException { @@ -653,7 +653,7 @@ public class HTable implements HTableInterface { public ArrayList getWriteBuffer() { return writeBuffer; } - + /** * Implements the scanner interface for the HBase client. * If there are multiple regions in a table, this scanner will iterate diff --git a/core/src/main/java/org/apache/hadoop/hbase/client/HTableFactory.java b/core/src/main/java/org/apache/hadoop/hbase/client/HTableFactory.java index 6dfdff6..8987016 100644 --- a/core/src/main/java/org/apache/hadoop/hbase/client/HTableFactory.java +++ b/core/src/main/java/org/apache/hadoop/hbase/client/HTableFactory.java @@ -42,14 +42,14 @@ public class HTableFactory implements HTableInterfaceFactory { @Override public void releaseHTableInterface(HTableInterface table) { - try { + try { table.close(); } catch (IOException ioe) { throw new RuntimeException(ioe); } - + } - - - + + + } diff --git a/core/src/main/java/org/apache/hadoop/hbase/client/HTableInterface.java b/core/src/main/java/org/apache/hadoop/hbase/client/HTableInterface.java index 4a87005..3c2a518 100644 --- a/core/src/main/java/org/apache/hadoop/hbase/client/HTableInterface.java +++ b/core/src/main/java/org/apache/hadoop/hbase/client/HTableInterface.java @@ -230,7 +230,7 @@ public interface HTableInterface { * @throws IOException if a remote or network exception occurs. */ long incrementColumnValue(byte[] row, byte[] family, byte[] qualifier, - long amount, boolean writeToWAL) throws IOException; + long amount, boolean writeToWAL) throws IOException; /** * Tells whether or not 'auto-flush' is turned on. diff --git a/core/src/main/java/org/apache/hadoop/hbase/client/HTableInterfaceFactory.java b/core/src/main/java/org/apache/hadoop/hbase/client/HTableInterfaceFactory.java index a80d94e..5d68291 100644 --- a/core/src/main/java/org/apache/hadoop/hbase/client/HTableInterfaceFactory.java +++ b/core/src/main/java/org/apache/hadoop/hbase/client/HTableInterfaceFactory.java @@ -37,8 +37,8 @@ public interface HTableInterfaceFactory { * @return HTableInterface instance. */ HTableInterface createHTableInterface(Configuration config, byte[] tableName); - - + + /** * Release the HTable resource represented by the table. * @param table diff --git a/core/src/main/java/org/apache/hadoop/hbase/client/HTablePool.java b/core/src/main/java/org/apache/hadoop/hbase/client/HTablePool.java index c7f61fa..0a7bc4f 100755 --- a/core/src/main/java/org/apache/hadoop/hbase/client/HTablePool.java +++ b/core/src/main/java/org/apache/hadoop/hbase/client/HTablePool.java @@ -125,15 +125,15 @@ public class HTablePool { protected HTableInterface createHTable(String tableName) { return this.tableFactory.createHTableInterface(config, Bytes.toBytes(tableName)); } - + /** * Closes all the HTable instances , belonging to the given table, in the table pool. *

- * Note: this is a 'shutdown' of the given table pool and different from - * {@link #putTable(HTableInterface)}, that is used to return the table + * Note: this is a 'shutdown' of the given table pool and different from + * {@link #putTable(HTableInterface)}, that is used to return the table * instance to the pool for future re-use. - * - * @param tableName + * + * @param tableName */ public void closeTablePool(final String tableName) { Queue queue = tables.get(tableName); @@ -149,7 +149,7 @@ public class HTablePool { /** * See {@link #closeTablePool(String)}. - * + * * @param tableName */ public void closeTablePool(final byte[] tableName) { @@ -161,5 +161,5 @@ public class HTablePool { synchronized(queue) { return queue.size(); } - } + } } diff --git a/core/src/main/java/org/apache/hadoop/hbase/client/MetaScanner.java b/core/src/main/java/org/apache/hadoop/hbase/client/MetaScanner.java index b6bfd13..3de661e 100644 --- a/core/src/main/java/org/apache/hadoop/hbase/client/MetaScanner.java +++ b/core/src/main/java/org/apache/hadoop/hbase/client/MetaScanner.java @@ -28,7 +28,7 @@ import org.apache.hadoop.hbase.util.Bytes; import java.io.IOException; /** - * Scanner class that contains the .META. table scanning logic + * Scanner class that contains the .META. table scanning logic * and uses a Retryable scanner. Provided visitors will be called * for each row. */ @@ -37,7 +37,7 @@ class MetaScanner implements HConstants { /** * Scans the meta table and calls a visitor on each RowResult and uses a empty * start row value as table name. - * + * * @param configuration conf * @param visitor A custom visitor * @throws IOException e @@ -51,7 +51,7 @@ class MetaScanner implements HConstants { /** * Scans the meta table and calls a visitor on each RowResult. Uses a table * name to locate meta regions. - * + * * @param configuration config * @param visitor visitor object * @param tableName table name @@ -62,12 +62,12 @@ class MetaScanner implements HConstants { throws IOException { HConnection connection = HConnectionManager.getConnection(configuration); byte [] startRow = tableName == null || tableName.length == 0 ? - HConstants.EMPTY_START_ROW : + HConstants.EMPTY_START_ROW : HRegionInfo.createRegionName(tableName, null, ZEROES); - + // Scan over each meta region ScannerCallable callable; - int rows = configuration.getInt("hbase.meta.scanner.caching", 100); + int rows = configuration.getInt("hbase.meta.scanner.caching", 100); do { Scan scan = new Scan(startRow).addFamily(CATALOG_FAMILY); callable = new ScannerCallable(connection, META_TABLE_NAME, scan); @@ -76,7 +76,7 @@ class MetaScanner implements HConstants { try { callable.setCaching(rows); done: do { - //we have all the rows here + //we have all the rows here Result [] rrs = connection.getRegionServerWithRetries(callable); if (rrs == null || rrs.length == 0 || rrs[0].size() == 0) { break; //exit completely @@ -105,7 +105,7 @@ class MetaScanner implements HConstants { * Visitor method that accepts a RowResult and the meta region location. * Implementations can return false to stop the region's loop if it becomes * unnecessary for some reason. - * + * * @param rowResult result * @return A boolean to know if it should continue to loop in the region * @throws IOException e diff --git a/core/src/main/java/org/apache/hadoop/hbase/client/MultiPut.java b/core/src/main/java/org/apache/hadoop/hbase/client/MultiPut.java index 38bc0a3..8ffd100 100644 --- a/core/src/main/java/org/apache/hadoop/hbase/client/MultiPut.java +++ b/core/src/main/java/org/apache/hadoop/hbase/client/MultiPut.java @@ -62,7 +62,7 @@ public class MultiPut implements Writable { } return size; } - + public void add(byte[] regionName, Put aPut) { List rsput = puts.get(regionName); if (rsput == null) { diff --git a/core/src/main/java/org/apache/hadoop/hbase/client/MultiPutResponse.java b/core/src/main/java/org/apache/hadoop/hbase/client/MultiPutResponse.java index 787c955..0b7a6c6 100644 --- a/core/src/main/java/org/apache/hadoop/hbase/client/MultiPutResponse.java +++ b/core/src/main/java/org/apache/hadoop/hbase/client/MultiPutResponse.java @@ -47,7 +47,7 @@ public class MultiPutResponse implements Writable { public Integer getAnswer(byte[] region) { return answers.get(region); } - + @Override public void write(DataOutput out) throws IOException { out.writeInt(answers.size()); diff --git a/core/src/main/java/org/apache/hadoop/hbase/client/Put.java b/core/src/main/java/org/apache/hadoop/hbase/client/Put.java index bb03a1d..2479b80 100644 --- a/core/src/main/java/org/apache/hadoop/hbase/client/Put.java +++ b/core/src/main/java/org/apache/hadoop/hbase/client/Put.java @@ -37,7 +37,7 @@ import java.util.Map; import java.util.TreeMap; -/** +/** * Used to perform Put operations for a single row. *

* To perform a Put, instantiate a Put object with the row to insert to and @@ -51,18 +51,18 @@ public class Put implements HeapSize, Writable, Row, Comparable { private long timestamp = HConstants.LATEST_TIMESTAMP; private long lockId = -1L; private boolean writeToWAL = true; - + private Map> familyMap = new TreeMap>(Bytes.BYTES_COMPARATOR); - + private static final long OVERHEAD = ClassSize.align( - ClassSize.OBJECT + ClassSize.REFERENCE + - 2 * Bytes.SIZEOF_LONG + Bytes.SIZEOF_BOOLEAN + + ClassSize.OBJECT + ClassSize.REFERENCE + + 2 * Bytes.SIZEOF_LONG + Bytes.SIZEOF_BOOLEAN + ClassSize.REFERENCE + ClassSize.TREEMAP); - + /** Constructor for Writable. DO NOT USE */ public Put() {} - + /** * Create a Put operation for the specified row. * @param row row key @@ -82,7 +82,7 @@ public class Put implements HeapSize, Writable, Row, Comparable { /** * Create a Put operation for the specified row, using a given timestamp. - * + * * @param row row key * @param ts timestamp */ @@ -113,7 +113,7 @@ public class Put implements HeapSize, Writable, Row, Comparable { */ public Put(Put putToCopy) { this(putToCopy.getRow(), putToCopy.timestamp, putToCopy.getRowLock()); - this.familyMap = + this.familyMap = new TreeMap>(Bytes.BYTES_COMPARATOR); for(Map.Entry> entry : putToCopy.getFamilyMap().entrySet()) { @@ -134,7 +134,7 @@ public class Put implements HeapSize, Writable, Row, Comparable { } /** - * Add the specified column and value, with the specified timestamp as + * Add the specified column and value, with the specified timestamp as * its version to this Put operation. * @param family family name * @param qualifier column qualifier @@ -149,9 +149,9 @@ public class Put implements HeapSize, Writable, Row, Comparable { familyMap.put(kv.getFamily(), list); return this; } - + /** - * Add the specified KeyValue to this Put operation. Operation assumes that + * Add the specified KeyValue to this Put operation. Operation assumes that * the passed KeyValue is immutable and its backing array will not be modified * for the duration of this Put. * @param kv individual KeyValue @@ -162,12 +162,12 @@ public class Put implements HeapSize, Writable, Row, Comparable { byte [] family = kv.getFamily(); List list = getKeyValueList(family); //Checking that the row of the kv is the same as the put - int res = Bytes.compareTo(this.row, 0, row.length, + int res = Bytes.compareTo(this.row, 0, row.length, kv.getBuffer(), kv.getRowOffset(), kv.getRowLength()); if(res != 0) { - throw new IOException("The row in the recently added KeyValue " + - Bytes.toStringBinary(kv.getBuffer(), kv.getRowOffset(), - kv.getRowLength()) + " doesn't match the original one " + + throw new IOException("The row in the recently added KeyValue " + + Bytes.toStringBinary(kv.getBuffer(), kv.getRowOffset(), + kv.getRowLength()) + " doesn't match the original one " + Bytes.toStringBinary(this.row)); } list.add(kv); @@ -177,20 +177,20 @@ public class Put implements HeapSize, Writable, Row, Comparable { /* * Create a KeyValue with this objects row key and the Put identifier. - * + * * @return a KeyValue with this objects row key and the Put identifier. */ private KeyValue createPutKeyValue(byte[] family, byte[] qualifier, long ts, byte[] value) { - return new KeyValue(this.row, family, qualifier, ts, KeyValue.Type.Put, + return new KeyValue(this.row, family, qualifier, ts, KeyValue.Type.Put, value); } - + /** - * A convenience method to determine if this object's familyMap contains + * A convenience method to determine if this object's familyMap contains * a value assigned to the given family & qualifier. * Both given arguments must match the KeyValue object to return true. - * + * * @param family column family * @param qualifier column qualifier * @return returns true if the given family and qualifier already has an @@ -199,12 +199,12 @@ public class Put implements HeapSize, Writable, Row, Comparable { public boolean has(byte [] family, byte [] qualifier) { return has(family, qualifier, this.timestamp, new byte[0], true, true); } - + /** - * A convenience method to determine if this object's familyMap contains + * A convenience method to determine if this object's familyMap contains * a value assigned to the given family, qualifier and timestamp. * All 3 given arguments must match the KeyValue object to return true. - * + * * @param family column family * @param qualifier column qualifier * @param ts timestamp @@ -214,12 +214,12 @@ public class Put implements HeapSize, Writable, Row, Comparable { public boolean has(byte [] family, byte [] qualifier, long ts) { return has(family, qualifier, ts, new byte[0], false, true); } - + /** - * A convenience method to determine if this object's familyMap contains + * A convenience method to determine if this object's familyMap contains * a value assigned to the given family, qualifier and timestamp. * All 3 given arguments must match the KeyValue object to return true. - * + * * @param family column family * @param qualifier column qualifier * @param value value to check @@ -229,38 +229,38 @@ public class Put implements HeapSize, Writable, Row, Comparable { public boolean has(byte [] family, byte [] qualifier, byte [] value) { return has(family, qualifier, this.timestamp, value, true, false); } - + /** - * A convenience method to determine if this object's familyMap contains + * A convenience method to determine if this object's familyMap contains * the given value assigned to the given family, qualifier and timestamp. * All 4 given arguments must match the KeyValue object to return true. - * + * * @param family column family * @param qualifier column qualifier * @param ts timestamp * @param value value to check - * @return returns true if the given family, qualifier timestamp and value + * @return returns true if the given family, qualifier timestamp and value * already has an existing KeyValue object in the family map. */ public boolean has(byte [] family, byte [] qualifier, long ts, byte [] value) { return has(family, qualifier, ts, value, false, false); } - + /* - * Private method to determine if this object's familyMap contains + * Private method to determine if this object's familyMap contains * the given value assigned to the given family, qualifier and timestamp * respecting the 2 boolean arguments - * + * * @param family * @param qualifier * @param ts * @param value * @param ignoreTS * @param ignoreValue - * @return returns true if the given family, qualifier timestamp and value + * @return returns true if the given family, qualifier timestamp and value * already has an existing KeyValue object in the family map. */ - private boolean has(byte [] family, byte [] qualifier, long ts, byte [] value, + private boolean has(byte [] family, byte [] qualifier, long ts, byte [] value, boolean ignoreTS, boolean ignoreValue) { List list = getKeyValueList(family); if (list.size() == 0) { @@ -292,13 +292,13 @@ public class Put implements HeapSize, Writable, Row, Comparable { } return false; } - + /** * Returns a list of all KeyValue objects with matching column family and qualifier. - * + * * @param family column family * @param qualifier column qualifier - * @return a list of KeyValue objects with the matching family and qualifier, + * @return a list of KeyValue objects with the matching family and qualifier, * returns an empty list if one doesnt exist for the given family. */ public List get(byte[] family, byte[] qualifier) { @@ -314,7 +314,7 @@ public class Put implements HeapSize, Writable, Row, Comparable { /** * Creates an empty list if one doesnt exist for the given column family * or else it returns the associated list of KeyValue objects. - * + * * @param family column family * @return a list of KeyValue objects, returns an empty list if one doesnt exist. */ @@ -325,7 +325,7 @@ public class Put implements HeapSize, Writable, Row, Comparable { } return list; } - + /** * Method for retrieving the put's familyMap * @return familyMap @@ -333,15 +333,15 @@ public class Put implements HeapSize, Writable, Row, Comparable { public Map> getFamilyMap() { return this.familyMap; } - + /** * Method for retrieving the put's row - * @return row + * @return row */ public byte [] getRow() { return this.row; } - + /** * Method for retrieving the put's RowLock * @return RowLock @@ -349,7 +349,7 @@ public class Put implements HeapSize, Writable, Row, Comparable { public RowLock getRowLock() { return new RowLock(this.row, this.lockId); } - + /** * Method for retrieving the put's lockId * @return lockId @@ -357,7 +357,7 @@ public class Put implements HeapSize, Writable, Row, Comparable { public long getLockId() { return this.lockId; } - + /** * Method to check if the familyMap is empty * @return true if empty, false otherwise @@ -365,16 +365,16 @@ public class Put implements HeapSize, Writable, Row, Comparable { public boolean isEmpty() { return familyMap.isEmpty(); } - + /** * @return Timestamp */ public long getTimeStamp() { return this.timestamp; } - + /** - * @return the number of different families included in this put + * @return the number of different families included in this put */ public int numFamilies() { return familyMap.size(); @@ -390,14 +390,14 @@ public class Put implements HeapSize, Writable, Row, Comparable { } return size; } - + /** * @return true if edits should be applied to WAL, false if not */ public boolean getWriteToWAL() { return this.writeToWAL; } - + /** * Set whether this Put should be written to the WAL or not. * Not writing the WAL means you may lose edits on server crash. @@ -406,9 +406,9 @@ public class Put implements HeapSize, Writable, Row, Comparable { public void setWriteToWAL(boolean write) { this.writeToWAL = write; } - + /** - * @return String + * @return String */ @Override public String toString() { @@ -440,40 +440,40 @@ public class Put implements HeapSize, Writable, Row, Comparable { sb.append("}"); return sb.toString(); } - + public int compareTo(Row p) { return Bytes.compareTo(this.getRow(), p.getRow()); } - + //HeapSize public long heapSize() { long heapsize = OVERHEAD; //Adding row heapsize += ClassSize.align(ClassSize.ARRAY + this.row.length); - + //Adding map overhead - heapsize += + heapsize += ClassSize.align(this.familyMap.size() * ClassSize.MAP_ENTRY); for(Map.Entry> entry : this.familyMap.entrySet()) { //Adding key overhead - heapsize += + heapsize += ClassSize.align(ClassSize.ARRAY + entry.getKey().length); - + //This part is kinds tricky since the JVM can reuse references if you //store the same value, but have a good match with SizeOf at the moment //Adding value overhead heapsize += ClassSize.align(ClassSize.ARRAYLIST); int size = entry.getValue().size(); - heapsize += ClassSize.align(ClassSize.ARRAY + + heapsize += ClassSize.align(ClassSize.ARRAY + size * ClassSize.REFERENCE); - + for(KeyValue kv : entry.getValue()) { heapsize += kv.heapSize(); } } return ClassSize.align((int)heapsize); } - + //Writable public void readFields(final DataInput in) throws IOException { @@ -503,7 +503,7 @@ public class Put implements HeapSize, Writable, Row, Comparable { this.familyMap.put(family, keys); } } - + public void write(final DataOutput out) throws IOException { out.writeByte(PUT_VERSION); diff --git a/core/src/main/java/org/apache/hadoop/hbase/client/Result.java b/core/src/main/java/org/apache/hadoop/hbase/client/Result.java index f9c1113..02ddb4d 100644 --- a/core/src/main/java/org/apache/hadoop/hbase/client/Result.java +++ b/core/src/main/java/org/apache/hadoop/hbase/client/Result.java @@ -39,34 +39,34 @@ import java.util.TreeMap; /** * Single row result of a {@link Get} or {@link Scan} query.

- * + * * Convenience methods are available that return various {@link Map} * structures and values directly.

- * - * To get a complete mapping of all cells in the Result, which can include + * + * To get a complete mapping of all cells in the Result, which can include * multiple families and multiple versions, use {@link #getMap()}.

- * - * To get a mapping of each family to its columns (qualifiers and values), + * + * To get a mapping of each family to its columns (qualifiers and values), * including only the latest version of each, use {@link #getNoVersionMap()}. - * - * To get a mapping of qualifiers to latest values for an individual family use + * + * To get a mapping of qualifiers to latest values for an individual family use * {@link #getFamilyMap(byte[])}.

- * + * * To get the latest value for a specific family and qualifier use {@link #getValue(byte[], byte[])}. * * A Result is backed by an array of {@link KeyValue} objects, each representing * an HBase cell defined by the row, family, qualifier, timestamp, and value.

- * + * * The underlying {@link KeyValue} objects can be accessed through the methods * {@link #sorted()} and {@link #list()}. Each KeyValue can then be accessed - * through {@link KeyValue#getRow()}, {@link KeyValue#getFamily()}, {@link KeyValue#getQualifier()}, + * through {@link KeyValue#getRow()}, {@link KeyValue#getFamily()}, {@link KeyValue#getQualifier()}, * {@link KeyValue#getTimestamp()}, and {@link KeyValue#getValue()}. */ public class Result implements Writable { private static final byte RESULT_VERSION = (byte)1; private KeyValue [] kvs = null; - private NavigableMap>> familyMap = null; // We're not using java serialization. Transient here is just a marker to say // that this is where we cache row if we're ever asked for it. @@ -95,7 +95,7 @@ public class Result implements Writable { public Result(List kvs) { this(kvs.toArray(new KeyValue[0])); } - + /** * Instantiate a Result from the specified raw binary format. * @param bytes raw binary format of Result @@ -131,7 +131,7 @@ public class Result implements Writable { /** * Create a sorted list of the KeyValue's in this result. - * + * * @return The sorted list of KeyValue's. */ public List list() { @@ -159,10 +159,10 @@ public class Result implements Writable { /** * Map of families to all versions of its qualifiers and values. *

- * Returns a three level Map of the form: + * Returns a three level Map of the form: * Map>> *

- * Note: All other map returning methods make use of this map internally. + * Note: All other map returning methods make use of this map internally. * @return map from families to qualifiers to versions */ public NavigableMap>> getMap() { @@ -178,7 +178,7 @@ public class Result implements Writable { for(KeyValue kv : this.kvs) { SplitKeyValue splitKV = kv.split(); byte [] family = splitKV.getFamily(); - NavigableMap> columnMap = + NavigableMap> columnMap = familyMap.get(family); if(columnMap == null) { columnMap = new TreeMap> @@ -217,15 +217,15 @@ public class Result implements Writable { if(isEmpty()) { return null; } - NavigableMap> returnMap = + NavigableMap> returnMap = new TreeMap>(Bytes.BYTES_COMPARATOR); - for(Map.Entry>> + for(Map.Entry>> familyEntry : familyMap.entrySet()) { - NavigableMap qualifierMap = + NavigableMap qualifierMap = new TreeMap(Bytes.BYTES_COMPARATOR); for(Map.Entry> qualifierEntry : familyEntry.getValue().entrySet()) { - byte [] value = + byte [] value = qualifierEntry.getValue().get(qualifierEntry.getValue().firstKey()); qualifierMap.put(qualifierEntry.getKey(), value); } @@ -248,16 +248,16 @@ public class Result implements Writable { if(isEmpty()) { return null; } - NavigableMap returnMap = + NavigableMap returnMap = new TreeMap(Bytes.BYTES_COMPARATOR); - NavigableMap> qualifierMap = + NavigableMap> qualifierMap = familyMap.get(family); if(qualifierMap == null) { return returnMap; } - for(Map.Entry> entry : + for(Map.Entry> entry : qualifierMap.entrySet()) { - byte [] value = + byte [] value = entry.getValue().get(entry.getValue().firstKey()); returnMap.put(entry.getKey(), value); } @@ -294,13 +294,13 @@ public class Result implements Writable { } return versionMap.firstEntry(); } - + private NavigableMap getVersionMap( NavigableMap> qualifierMap, byte [] qualifier) { return qualifier != null? qualifierMap.get(qualifier): qualifierMap.get(new byte[0]); } - + /** * Checks for existence of the specified column. * @param family family name @@ -322,7 +322,7 @@ public class Result implements Writable { NavigableMap versionMap = getVersionMap(qualifierMap, qualifier); return versionMap != null; } - + /** * Returns the value of the first column in the Result. * @return value of the first column @@ -333,19 +333,19 @@ public class Result implements Writable { } return kvs[0].getValue(); } - + /** * Returns the raw binary encoding of this Result.

- * + * * Please note, there may be an offset into the underlying byte array of the - * returned ImmutableBytesWritable. Be sure to use both + * returned ImmutableBytesWritable. Be sure to use both * {@link ImmutableBytesWritable#get()} and {@link ImmutableBytesWritable#getOffset()} * @return pointer to raw binary of Result */ public ImmutableBytesWritable getBytes() { return this.bytes; } - + /** * Check if the underlying KeyValue [] is empty or not * @return true if empty @@ -356,7 +356,7 @@ public class Result implements Writable { } return this.kvs == null || this.kvs.length == 0; } - + /** * @return the size of the underlying KeyValue [] */ @@ -366,7 +366,7 @@ public class Result implements Writable { } return this.kvs == null? 0: this.kvs.length; } - + /** * @return String */ @@ -391,7 +391,7 @@ public class Result implements Writable { sb.append("}"); return sb.toString(); } - + //Writable public void readFields(final DataInput in) throws IOException { @@ -407,7 +407,7 @@ public class Result implements Writable { in.readFully(raw, 0, totalBuffer); bytes = new ImmutableBytesWritable(raw, 0, totalBuffer); } - + //Create KeyValue[] when needed private void readFields() { if (bytes == null) { @@ -426,7 +426,7 @@ public class Result implements Writable { } this.kvs = kvs.toArray(new KeyValue[kvs.size()]); } - + public void write(final DataOutput out) throws IOException { if(isEmpty()) { @@ -443,7 +443,7 @@ public class Result implements Writable { } } } - + public static void writeArray(final DataOutput out, Result [] results) throws IOException { // Write version when writing array form. @@ -479,7 +479,7 @@ public class Result implements Writable { } } } - + public static Result [] readArray(final DataInput in) throws IOException { // Read version for array form. @@ -514,7 +514,7 @@ public class Result implements Writable { offset += keyLen; } int totalLength = offset - initialOffset; - results[i] = new Result(new ImmutableBytesWritable(buf, initialOffset, + results[i] = new Result(new ImmutableBytesWritable(buf, initialOffset, totalLength)); } return results; diff --git a/core/src/main/java/org/apache/hadoop/hbase/client/ResultScanner.java b/core/src/main/java/org/apache/hadoop/hbase/client/ResultScanner.java index 707d2ad..6843018 100644 --- a/core/src/main/java/org/apache/hadoop/hbase/client/ResultScanner.java +++ b/core/src/main/java/org/apache/hadoop/hbase/client/ResultScanner.java @@ -33,18 +33,18 @@ public interface ResultScanner extends Closeable, Iterable { * @return Result object if there is another row, null if the scanner is * exhausted. * @throws IOException e - */ + */ public Result next() throws IOException; - + /** * @param nbRows number of rows to return * @return Between zero and nbRows Results * @throws IOException e */ public Result [] next(int nbRows) throws IOException; - + /** * Closes the scanner and releases any resources it has allocated */ - public void close(); + public void close(); } \ No newline at end of file diff --git a/core/src/main/java/org/apache/hadoop/hbase/client/RetriesExhaustedException.java b/core/src/main/java/org/apache/hadoop/hbase/client/RetriesExhaustedException.java index 1c71c7f..b7cfc78 100644 --- a/core/src/main/java/org/apache/hadoop/hbase/client/RetriesExhaustedException.java +++ b/core/src/main/java/org/apache/hadoop/hbase/client/RetriesExhaustedException.java @@ -20,10 +20,10 @@ import org.apache.hadoop.hbase.util.Bytes; import java.io.IOException; import java.util.List; -/** +/** * Exception thrown by HTable methods when an attempt to do something (like - * commit changes) fails after a bunch of retries. - */ + * commit changes) fails after a bunch of retries. + */ public class RetriesExhaustedException extends IOException { private static final long serialVersionUID = 1876775844L; @@ -31,14 +31,14 @@ public class RetriesExhaustedException extends IOException { super(msg); } - /** + /** * Create a new RetriesExhaustedException from the list of prior failures. * @param serverName name of HRegionServer * @param regionName name of region * @param row The row we were pursuing when we ran out of retries * @param numTries The number of tries we made * @param exceptions List of exceptions that failed before giving up - */ + */ public RetriesExhaustedException(String serverName, final byte [] regionName, final byte [] row, int numTries, List exceptions) { super(getMessage(serverName, regionName, row, numTries, exceptions)); diff --git a/core/src/main/java/org/apache/hadoop/hbase/client/RowLock.java b/core/src/main/java/org/apache/hadoop/hbase/client/RowLock.java index 794aad3..56b0787 100644 --- a/core/src/main/java/org/apache/hadoop/hbase/client/RowLock.java +++ b/core/src/main/java/org/apache/hadoop/hbase/client/RowLock.java @@ -35,7 +35,7 @@ public class RowLock { this.row = row; this.lockId = lockId; } - + /** * Creates a RowLock with only a lock id * @param lockId lock id diff --git a/core/src/main/java/org/apache/hadoop/hbase/client/Scan.java b/core/src/main/java/org/apache/hadoop/hbase/client/Scan.java index f8bb1e2..8bb83fb 100644 --- a/core/src/main/java/org/apache/hadoop/hbase/client/Scan.java +++ b/core/src/main/java/org/apache/hadoop/hbase/client/Scan.java @@ -49,7 +49,7 @@ import java.util.TreeSet; *

* To modify scanner caching for just this scan, use {@link #setCaching(int) setCaching}. *

- * To further define the scope of what to get when scanning, perform additional + * To further define the scope of what to get when scanning, perform additional * methods as outlined below. *

* To get all columns from specific families, execute {@link #addFamily(byte[]) addFamily} @@ -72,7 +72,7 @@ import java.util.TreeSet; *

* To add a filter, execute {@link #setFilter(org.apache.hadoop.hbase.filter.Filter) setFilter}. *

- * Expert: To explicitly disable server-side block caching for this scan, + * Expert: To explicitly disable server-side block caching for this scan, * execute {@link #setCacheBlocks(boolean)}. */ public class Scan implements Writable { @@ -87,7 +87,7 @@ public class Scan implements Writable { private TimeRange tr = new TimeRange(); private Map> familyMap = new TreeMap>(Bytes.BYTES_COMPARATOR); - + /** * Create a Scan operation across all rows. */ @@ -97,7 +97,7 @@ public class Scan implements Writable { this(startRow); this.filter = filter; } - + /** * Create a Scan operation starting at the specified row. *

@@ -108,7 +108,7 @@ public class Scan implements Writable { public Scan(byte [] startRow) { this.startRow = startRow; } - + /** * Create a Scan operation for the range of rows specified. * @param startRow row to start scanner at or after (inclusive) @@ -118,10 +118,10 @@ public class Scan implements Writable { this.startRow = startRow; this.stopRow = stopRow; } - + /** * Creates a new instance of this class while copying all values. - * + * * @param scan The scan instance to copy from. * @throws IOException When copying the values fails. */ @@ -161,7 +161,7 @@ public class Scan implements Writable { familyMap.put(family, null); return this; } - + /** * Get the column from the specified family with the specified qualifier. *

@@ -180,7 +180,7 @@ public class Scan implements Writable { return this; } - + /** * Get versions of columns only within the specified timestamp range, * [minStamp, maxStamp). Note, default maximum versions to return is 1. If @@ -198,7 +198,7 @@ public class Scan implements Writable { tr = new TimeRange(minStamp, maxStamp); return this; } - + /** * Get versions of columns with the specified timestamp. Note, default maximum * versions to return is 1. If your time range spans more than one version @@ -227,7 +227,7 @@ public class Scan implements Writable { this.startRow = startRow; return this; } - + /** * Set the stop row. * @param stopRow row to end at (exclusive) @@ -237,7 +237,7 @@ public class Scan implements Writable { this.stopRow = stopRow; return this; } - + /** * Get all available versions. * @return this @@ -294,7 +294,7 @@ public class Scan implements Writable { this.familyMap = familyMap; return this; } - + /** * Getting the familyMap * @return familyMap @@ -302,7 +302,7 @@ public class Scan implements Writable { public Map> getFamilyMap() { return this.familyMap; } - + /** * @return the number of families in familyMap */ @@ -319,7 +319,7 @@ public class Scan implements Writable { public boolean hasFamilies() { return !this.familyMap.isEmpty(); } - + /** * @return the keys of the familyMap */ @@ -329,7 +329,7 @@ public class Scan implements Writable { } return null; } - + /** * @return the startrow */ @@ -343,13 +343,13 @@ public class Scan implements Writable { public byte [] getStopRow() { return this.stopRow; } - + /** * @return the max number of versions to fetch */ public int getMaxVersions() { return this.maxVersions; - } + } /** * @return maximum number of values to return for a single call to next() @@ -363,15 +363,15 @@ public class Scan implements Writable { */ public int getCaching() { return this.caching; - } + } /** * @return TimeRange */ public TimeRange getTimeRange() { return this.tr; - } - + } + /** * @return RowFilter */ @@ -385,21 +385,21 @@ public class Scan implements Writable { public boolean hasFilter() { return filter != null; } - + /** * Set whether blocks should be cached for this Scan. *

* This is true by default. When true, default settings of the table and * family are used (this will never override caching blocks if the block * cache is disabled for that family or entirely). - * + * * @param cacheBlocks if false, default settings are overridden and blocks * will not be cached */ public void setCacheBlocks(boolean cacheBlocks) { this.cacheBlocks = cacheBlocks; } - + /** * Get whether blocks should be cached for this Scan. * @return true if default caching should be used, false if blocks should not @@ -408,7 +408,7 @@ public class Scan implements Writable { public boolean getCacheBlocks() { return cacheBlocks; } - + /** * @return String */ @@ -465,7 +465,7 @@ public class Scan implements Writable { sb.append("}"); return sb.toString(); } - + @SuppressWarnings("unchecked") private Writable createForName(String className) { try { @@ -474,9 +474,9 @@ public class Scan implements Writable { return WritableFactories.newInstance(clazz, new Configuration()); } catch (ClassNotFoundException e) { throw new RuntimeException("Can't find class " + className); - } + } } - + //Writable public void readFields(final DataInput in) throws IOException { @@ -497,7 +497,7 @@ public class Scan implements Writable { this.tr = new TimeRange(); tr.readFields(in); int numFamilies = in.readInt(); - this.familyMap = + this.familyMap = new TreeMap>(Bytes.BYTES_COMPARATOR); for(int i=0; i { super(connection, tableName, scan.getStartRow()); this.scan = scan; } - + /** * @param reload force reload of server location * @throws IOException @@ -95,7 +95,7 @@ public class ScannerCallable extends ServerCallable { } return null; } - + private void close() { if (this.scannerId == -1L) { return; @@ -112,18 +112,18 @@ public class ScannerCallable extends ServerCallable { return this.server.openScanner(this.location.getRegionInfo().getRegionName(), this.scan); } - + protected Scan getScan() { return scan; } - + /** * Call this when the next invocation of call should close the scanner */ public void setClose() { this.closed = true; } - + /** * @return the HRegionInfo for the current region */ diff --git a/core/src/main/java/org/apache/hadoop/hbase/client/ScannerTimeoutException.java b/core/src/main/java/org/apache/hadoop/hbase/client/ScannerTimeoutException.java index f8eb470..5a10b0e 100644 --- a/core/src/main/java/org/apache/hadoop/hbase/client/ScannerTimeoutException.java +++ b/core/src/main/java/org/apache/hadoop/hbase/client/ScannerTimeoutException.java @@ -23,7 +23,7 @@ package org.apache.hadoop.hbase.client; import org.apache.hadoop.hbase.DoNotRetryIOException; /** - * Thrown when a scanner has timed out. + * Thrown when a scanner has timed out. */ public class ScannerTimeoutException extends DoNotRetryIOException { diff --git a/core/src/main/java/org/apache/hadoop/hbase/client/ServerCallable.java b/core/src/main/java/org/apache/hadoop/hbase/client/ServerCallable.java index 2ed5c67..6f22123 100644 --- a/core/src/main/java/org/apache/hadoop/hbase/client/ServerCallable.java +++ b/core/src/main/java/org/apache/hadoop/hbase/client/ServerCallable.java @@ -47,9 +47,9 @@ public abstract class ServerCallable implements Callable { this.tableName = tableName; this.row = row; } - + /** - * + * * @param reload set this to true if connection should re-find the region * @throws IOException e */ @@ -65,7 +65,7 @@ public abstract class ServerCallable implements Callable { } return location.getServerAddress().toString(); } - + /** @return the region name */ public byte[] getRegionName() { if (location == null) { @@ -73,7 +73,7 @@ public abstract class ServerCallable implements Callable { } return location.getRegionInfo().getRegionName(); } - + /** @return the row */ public byte [] getRow() { return row; diff --git a/core/src/main/java/org/apache/hadoop/hbase/client/ServerConnection.java b/core/src/main/java/org/apache/hadoop/hbase/client/ServerConnection.java index ae09835..4cd35b3 100644 --- a/core/src/main/java/org/apache/hadoop/hbase/client/ServerConnection.java +++ b/core/src/main/java/org/apache/hadoop/hbase/client/ServerConnection.java @@ -32,9 +32,9 @@ public interface ServerConnection extends HConnection { * @param rootRegion region location for root region */ public void setRootRegionLocation(HRegionLocation rootRegion); - + /** - * Unset the root region location in the connection. Called by + * Unset the root region location in the connection. Called by * ServerManager.processRegionClose. */ public void unsetRootRegionLocation(); diff --git a/core/src/main/java/org/apache/hadoop/hbase/client/UnmodifyableHRegionInfo.java b/core/src/main/java/org/apache/hadoop/hbase/client/UnmodifyableHRegionInfo.java index fa49c5f..23e7a6b 100644 --- a/core/src/main/java/org/apache/hadoop/hbase/client/UnmodifyableHRegionInfo.java +++ b/core/src/main/java/org/apache/hadoop/hbase/client/UnmodifyableHRegionInfo.java @@ -25,14 +25,14 @@ import org.apache.hadoop.hbase.HRegionInfo; class UnmodifyableHRegionInfo extends HRegionInfo { /* * Creates an unmodifyable copy of an HRegionInfo - * + * * @param info */ UnmodifyableHRegionInfo(HRegionInfo info) { super(info); this.tableDesc = new UnmodifyableHTableDescriptor(info.getTableDesc()); } - + /** * @param split set split status */ diff --git a/core/src/main/java/org/apache/hadoop/hbase/client/UnmodifyableHTableDescriptor.java b/core/src/main/java/org/apache/hadoop/hbase/client/UnmodifyableHTableDescriptor.java index a6972c4..27d1faa 100644 --- a/core/src/main/java/org/apache/hadoop/hbase/client/UnmodifyableHTableDescriptor.java +++ b/core/src/main/java/org/apache/hadoop/hbase/client/UnmodifyableHTableDescriptor.java @@ -39,8 +39,8 @@ public class UnmodifyableHTableDescriptor extends HTableDescriptor { UnmodifyableHTableDescriptor(final HTableDescriptor desc) { super(desc.getName(), getUnmodifyableFamilies(desc), desc.getValues()); } - - + + /* * @param desc * @return Families as unmodifiable array. @@ -73,7 +73,7 @@ public class UnmodifyableHTableDescriptor extends HTableDescriptor { public HColumnDescriptor removeFamily(final byte [] column) { throw new UnsupportedOperationException("HTableDescriptor is read-only"); } - + /** * @see org.apache.hadoop.hbase.HTableDescriptor#setReadOnly(boolean) */ @@ -119,6 +119,6 @@ public class UnmodifyableHTableDescriptor extends HTableDescriptor { // */ // @Override // public void addIndex(IndexSpecification index) { -// throw new UnsupportedOperationException("HTableDescriptor is read-only"); +// throw new UnsupportedOperationException("HTableDescriptor is read-only"); // } } diff --git a/core/src/main/java/org/apache/hadoop/hbase/filter/BinaryComparator.java b/core/src/main/java/org/apache/hadoop/hbase/filter/BinaryComparator.java index 236d66b..dd4cc26 100644 --- a/core/src/main/java/org/apache/hadoop/hbase/filter/BinaryComparator.java +++ b/core/src/main/java/org/apache/hadoop/hbase/filter/BinaryComparator.java @@ -21,7 +21,7 @@ package org.apache.hadoop.hbase.filter; /** - * A binary comparator which lexicographically compares against the specified + * A binary comparator which lexicographically compares against the specified * byte array using {@link org.apache.hadoop.hbase.util.Bytes#compareTo(byte[], byte[])}. */ public class BinaryComparator extends WritableByteArrayComparable { diff --git a/core/src/main/java/org/apache/hadoop/hbase/filter/BinaryPrefixComparator.java b/core/src/main/java/org/apache/hadoop/hbase/filter/BinaryPrefixComparator.java index c31c4c7..f345ec6 100644 --- a/core/src/main/java/org/apache/hadoop/hbase/filter/BinaryPrefixComparator.java +++ b/core/src/main/java/org/apache/hadoop/hbase/filter/BinaryPrefixComparator.java @@ -26,7 +26,7 @@ import org.apache.hadoop.hbase.util.Bytes; * A comparator which compares against a specified byte array, but only compares * up to the length of this byte array. For the rest it is similar to * {@link BinaryComparator}. - */ + */ public class BinaryPrefixComparator extends WritableByteArrayComparable { /** Nullary constructor for Writable, do not use */ @@ -42,7 +42,7 @@ public class BinaryPrefixComparator extends WritableByteArrayComparable { @Override public int compareTo(byte [] value) { - return Bytes.compareTo(this.value, 0, this.value.length, value, 0, + return Bytes.compareTo(this.value, 0, this.value.length, value, 0, this.value.length); } diff --git a/core/src/main/java/org/apache/hadoop/hbase/filter/ColumnPaginationFilter.java b/core/src/main/java/org/apache/hadoop/hbase/filter/ColumnPaginationFilter.java index 4d12b55..d050bf7 100644 --- a/core/src/main/java/org/apache/hadoop/hbase/filter/ColumnPaginationFilter.java +++ b/core/src/main/java/org/apache/hadoop/hbase/filter/ColumnPaginationFilter.java @@ -27,7 +27,7 @@ import org.apache.hadoop.hbase.KeyValue; /** * A filter, based on the ColumnCountGetFilter, takes two arguments: limit and offset. - * This filter can be used for row-based indexing, where references to other tables are stored across many columns, + * This filter can be used for row-based indexing, where references to other tables are stored across many columns, * in order to efficient lookups and paginated results for end users. */ public class ColumnPaginationFilter implements Filter @@ -57,11 +57,11 @@ public class ColumnPaginationFilter implements Filter public ReturnCode filterKeyValue(KeyValue v) { - if(count >= offset + limit) + if(count >= offset + limit) { return ReturnCode.NEXT_ROW; } - + ReturnCode code = count < offset ? ReturnCode.SKIP : ReturnCode.INCLUDE; count++; return code; diff --git a/core/src/main/java/org/apache/hadoop/hbase/filter/CompareFilter.java b/core/src/main/java/org/apache/hadoop/hbase/filter/CompareFilter.java index 8786f9d..46cf822 100644 --- a/core/src/main/java/org/apache/hadoop/hbase/filter/CompareFilter.java +++ b/core/src/main/java/org/apache/hadoop/hbase/filter/CompareFilter.java @@ -29,7 +29,7 @@ import java.io.IOException; import java.util.Arrays; /** - * This is a generic filter to be used to filter by comparison. It takes an + * This is a generic filter to be used to filter by comparison. It takes an * operator (equal, greater, not equal, etc) and a byte [] comparator. *

* To filter by row key, use {@link RowFilter}. @@ -60,7 +60,7 @@ public abstract class CompareFilter implements Filter { /** greater than */ GREATER, } - + protected CompareOp compareOp; protected WritableByteArrayComparable comparator; @@ -75,7 +75,7 @@ public abstract class CompareFilter implements Filter { * @param compareOp the compare op for row matching * @param comparator the comparator for row matching */ - public CompareFilter(final CompareOp compareOp, + public CompareFilter(final CompareOp compareOp, final WritableByteArrayComparable comparator) { this.compareOp = compareOp; this.comparator = comparator; @@ -101,7 +101,7 @@ public abstract class CompareFilter implements Filter { public ReturnCode filterKeyValue(KeyValue v) { return ReturnCode.INCLUDE; } - + public boolean filterRowKey(byte[] data, int offset, int length) { return false; } @@ -109,7 +109,7 @@ public abstract class CompareFilter implements Filter { public boolean filterRow() { return false; } - + public boolean filterAllRemaining() { return false; } @@ -117,8 +117,8 @@ public abstract class CompareFilter implements Filter { protected boolean doCompare(final CompareOp compareOp, final WritableByteArrayComparable comparator, final byte [] data, final int offset, final int length) { - int compareResult = - comparator.compareTo(Arrays.copyOfRange(data, offset, + int compareResult = + comparator.compareTo(Arrays.copyOfRange(data, offset, offset + length)); switch (compareOp) { case LESS: diff --git a/core/src/main/java/org/apache/hadoop/hbase/filter/Filter.java b/core/src/main/java/org/apache/hadoop/hbase/filter/Filter.java index c3cb9e9..c5cc7db 100644 --- a/core/src/main/java/org/apache/hadoop/hbase/filter/Filter.java +++ b/core/src/main/java/org/apache/hadoop/hbase/filter/Filter.java @@ -29,7 +29,7 @@ import org.apache.hadoop.io.Writable; *

    *
  • {@link #reset()}
  • *
  • {@link #filterAllRemaining()} -> true indicates scan is over, false, keep going on.
  • - *
  • {@link #filterRowKey(byte[],int,int)} -> true to drop this row, + *
  • {@link #filterRowKey(byte[],int,int)} -> true to drop this row, * if false, we will also call
  • *
  • {@link #filterKeyValue(KeyValue)} -> true to drop this key/value
  • *
  • {@link #filterRow()} -> last chance to drop entire row based on the sequence of diff --git a/core/src/main/java/org/apache/hadoop/hbase/filter/FilterList.java b/core/src/main/java/org/apache/hadoop/hbase/filter/FilterList.java index e7585c6..6905e95 100644 --- a/core/src/main/java/org/apache/hadoop/hbase/filter/FilterList.java +++ b/core/src/main/java/org/apache/hadoop/hbase/filter/FilterList.java @@ -33,12 +33,12 @@ import java.util.List; /** * Implementation of {@link Filter} that represents an ordered List of Filters - * which will be evaluated with a specified boolean operator {@link Operator#MUST_PASS_ALL} + * which will be evaluated with a specified boolean operator {@link Operator#MUST_PASS_ALL} * (!AND) or {@link Operator#MUST_PASS_ONE} (!OR). * Since you can use Filter Lists as children of Filter Lists, you can create a * hierarchy of filters to be evaluated. * Defaults to {@link Operator#MUST_PASS_ALL}. - *

    TODO: Fix creation of Configuration on serialization and deserialization. + *

    TODO: Fix creation of Configuration on serialization and deserialization. */ public class FilterList implements Filter { /** set operator */ @@ -64,7 +64,7 @@ public class FilterList implements Filter { /** * Constructor that takes a set of {@link Filter}s. The default operator * MUST_PASS_ALL is assumed. - * + * * @param rowFilters list of filters */ public FilterList(final List rowFilters) { @@ -73,7 +73,7 @@ public class FilterList implements Filter { /** * Constructor that takes an operator. - * + * * @param operator Operator to process filter set with. */ public FilterList(final Operator operator) { @@ -82,7 +82,7 @@ public class FilterList implements Filter { /** * Constructor that takes a set of {@link Filter}s and an operator. - * + * * @param operator Operator to process filter set with. * @param rowFilters Set of row filters. */ @@ -93,7 +93,7 @@ public class FilterList implements Filter { /** * Get the operator. - * + * * @return operator */ public Operator getOperator() { @@ -102,7 +102,7 @@ public class FilterList implements Filter { /** * Get the filters. - * + * * @return filters */ public List getFilters() { @@ -111,7 +111,7 @@ public class FilterList implements Filter { /** * Add a filter. - * + * * @param filter another filter */ public void addFilter(Filter filter) { diff --git a/core/src/main/java/org/apache/hadoop/hbase/filter/InclusiveStopFilter.java b/core/src/main/java/org/apache/hadoop/hbase/filter/InclusiveStopFilter.java index c623510..7e7fcce 100644 --- a/core/src/main/java/org/apache/hadoop/hbase/filter/InclusiveStopFilter.java +++ b/core/src/main/java/org/apache/hadoop/hbase/filter/InclusiveStopFilter.java @@ -64,7 +64,7 @@ public class InclusiveStopFilter implements Filter { // if stopRowKey is <= buffer, then true, filter row. int cmp = Bytes.compareTo(stopRowKey, 0, stopRowKey.length, buffer, offset, length); - + if(cmp < 0) { done = true; } diff --git a/core/src/main/java/org/apache/hadoop/hbase/filter/PageFilter.java b/core/src/main/java/org/apache/hadoop/hbase/filter/PageFilter.java index 97932e3..d10bd4f 100644 --- a/core/src/main/java/org/apache/hadoop/hbase/filter/PageFilter.java +++ b/core/src/main/java/org/apache/hadoop/hbase/filter/PageFilter.java @@ -50,7 +50,7 @@ public class PageFilter implements Filter { /** * Constructor that takes a maximum page size. - * + * * @param pageSize Maximum result size. */ public PageFilter(final long pageSize) { diff --git a/core/src/main/java/org/apache/hadoop/hbase/filter/QualifierFilter.java b/core/src/main/java/org/apache/hadoop/hbase/filter/QualifierFilter.java index aa52e32..2625eb1 100644 --- a/core/src/main/java/org/apache/hadoop/hbase/filter/QualifierFilter.java +++ b/core/src/main/java/org/apache/hadoop/hbase/filter/QualifierFilter.java @@ -24,8 +24,8 @@ import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.client.Get; /** - * This filter is used to filter based on the column qualifier. It takes an - * operator (equal, greater, not equal, etc) and a byte [] comparator for the + * This filter is used to filter based on the column qualifier. It takes an + * operator (equal, greater, not equal, etc) and a byte [] comparator for the * column qualifier portion of a key. *

    * This filter can be wrapped with {@link WhileMatchFilter} and {@link SkipFilter} @@ -58,7 +58,7 @@ public class QualifierFilter extends CompareFilter { public ReturnCode filterKeyValue(KeyValue v) { int qualifierLength = v.getQualifierLength(); if (qualifierLength > 0) { - if (doCompare(this.compareOp, this.comparator, v.getBuffer(), + if (doCompare(this.compareOp, this.comparator, v.getBuffer(), v.getQualifierOffset(), qualifierLength)) { return ReturnCode.SKIP; } diff --git a/core/src/main/java/org/apache/hadoop/hbase/filter/RowFilter.java b/core/src/main/java/org/apache/hadoop/hbase/filter/RowFilter.java index b260a1d..d1f8ba0 100644 --- a/core/src/main/java/org/apache/hadoop/hbase/filter/RowFilter.java +++ b/core/src/main/java/org/apache/hadoop/hbase/filter/RowFilter.java @@ -25,7 +25,7 @@ import org.apache.hadoop.hbase.client.Scan; /** * This filter is used to filter based on the key. It takes an operator - * (equal, greater, not equal, etc) and a byte [] comparator for the row, + * (equal, greater, not equal, etc) and a byte [] comparator for the row, * and column qualifier portions of a key. *

    * This filter can be wrapped with {@link WhileMatchFilter} to add more control. @@ -51,7 +51,7 @@ public class RowFilter extends CompareFilter { * @param rowCompareOp the compare op for row matching * @param rowComparator the comparator for row matching */ - public RowFilter(final CompareOp rowCompareOp, + public RowFilter(final CompareOp rowCompareOp, final WritableByteArrayComparable rowComparator) { super(rowCompareOp, rowComparator); } @@ -68,7 +68,7 @@ public class RowFilter extends CompareFilter { } return ReturnCode.INCLUDE; } - + @Override public boolean filterRowKey(byte[] data, int offset, int length) { if(doCompare(this.compareOp, this.comparator, data, offset, length)) { diff --git a/core/src/main/java/org/apache/hadoop/hbase/filter/SingleColumnValueExcludeFilter.java b/core/src/main/java/org/apache/hadoop/hbase/filter/SingleColumnValueExcludeFilter.java index c110271..cda1ccf 100644 --- a/core/src/main/java/org/apache/hadoop/hbase/filter/SingleColumnValueExcludeFilter.java +++ b/core/src/main/java/org/apache/hadoop/hbase/filter/SingleColumnValueExcludeFilter.java @@ -43,7 +43,7 @@ public class SingleColumnValueExcludeFilter extends SingleColumnValueFilter { * column is found and the condition passes, all columns of the row will be * emitted; except for the tested column value. If the column is not found or * the condition fails, the row will not be emitted. - * + * * @param family name of column family * @param qualifier name of column qualifier * @param compareOp operator @@ -63,7 +63,7 @@ public class SingleColumnValueExcludeFilter extends SingleColumnValueFilter { * Use the filterIfColumnMissing flag to set whether the rest of the columns * in a row will be emitted if the specified column to check is not found in * the row. - * + * * @param family name of column family * @param qualifier name of column qualifier * @param compareOp operator diff --git a/core/src/main/java/org/apache/hadoop/hbase/filter/SingleColumnValueFilter.java b/core/src/main/java/org/apache/hadoop/hbase/filter/SingleColumnValueFilter.java index 63469cb..0b08e9f 100644 --- a/core/src/main/java/org/apache/hadoop/hbase/filter/SingleColumnValueFilter.java +++ b/core/src/main/java/org/apache/hadoop/hbase/filter/SingleColumnValueFilter.java @@ -34,19 +34,19 @@ import java.io.IOException; import java.util.Arrays; /** - * This filter is used to filter cells based on value. It takes a {@link CompareFilter.CompareOp} - * operator (equal, greater, not equal, etc), and either a byte [] value or + * This filter is used to filter cells based on value. It takes a {@link CompareFilter.CompareOp} + * operator (equal, greater, not equal, etc), and either a byte [] value or * a WritableByteArrayComparable. *

    - * If we have a byte [] value then we just do a lexicographic compare. For - * example, if passed value is 'b' and cell has 'a' and the compare operator - * is LESS, then we will filter out this cell (return true). If this is not - * sufficient (eg you want to deserialize a long and then compare it to a fixed + * If we have a byte [] value then we just do a lexicographic compare. For + * example, if passed value is 'b' and cell has 'a' and the compare operator + * is LESS, then we will filter out this cell (return true). If this is not + * sufficient (eg you want to deserialize a long and then compare it to a fixed * long value), then you can pass in your own comparator instead. *

    - * You must also specify a family and qualifier. Only the value of this column - * will be tested. When using this filter on a {@link Scan} with specified - * inputs, the column to be tested should also be added as input (otherwise + * You must also specify a family and qualifier. Only the value of this column + * will be tested. When using this filter on a {@link Scan} with specified + * inputs, the column to be tested should also be added as input (otherwise * the filter will regard the column as missing). *

    * To prevent the entire row from being emitted if the column is not found @@ -64,7 +64,7 @@ public class SingleColumnValueFilter implements Filter { static final Log LOG = LogFactory.getLog(SingleColumnValueFilter.class); protected byte [] columnFamily; - protected byte [] columnQualifier; + protected byte [] columnQualifier; private CompareOp compareOp; private WritableByteArrayComparable comparator; private boolean foundColumn = false; @@ -77,13 +77,13 @@ public class SingleColumnValueFilter implements Filter { */ public SingleColumnValueFilter() { } - + /** * Constructor for binary compare of the value of a single column. If the * column is found and the condition passes, all columns of the row will be * emitted. If the column is not found or the condition fails, the row will * not be emitted. - * + * * @param family name of column family * @param qualifier name of column qualifier * @param compareOp operator @@ -102,7 +102,7 @@ public class SingleColumnValueFilter implements Filter { * Use the filterIfColumnMissing flag to set whether the rest of the columns * in a row will be emitted if the specified column to check is not found in * the row. - * + * * @param family name of column family * @param qualifier name of column qualifier * @param compareOp operator diff --git a/core/src/main/java/org/apache/hadoop/hbase/filter/SkipFilter.java b/core/src/main/java/org/apache/hadoop/hbase/filter/SkipFilter.java index 399f7a7..e46b09b 100644 --- a/core/src/main/java/org/apache/hadoop/hbase/filter/SkipFilter.java +++ b/core/src/main/java/org/apache/hadoop/hbase/filter/SkipFilter.java @@ -27,7 +27,7 @@ import java.io.DataOutput; import java.io.IOException; /** - * A wrapper filter that filters an entire row if any of the KeyValue checks do + * A wrapper filter that filters an entire row if any of the KeyValue checks do * not pass. *

    * For example, if all columns in a row represent weights of different things, @@ -41,7 +41,7 @@ import java.io.IOException; * new BinaryComparator(Bytes.toBytes(0)))); * * Any row which contained a column whose value was 0 will be filtered out. - * Without this filter, the other non-zero valued columns in the row would still + * Without this filter, the other non-zero valued columns in the row would still * be emitted. */ public class SkipFilter implements Filter { diff --git a/core/src/main/java/org/apache/hadoop/hbase/filter/SubstringComparator.java b/core/src/main/java/org/apache/hadoop/hbase/filter/SubstringComparator.java index 60e3409..4869dd1 100644 --- a/core/src/main/java/org/apache/hadoop/hbase/filter/SubstringComparator.java +++ b/core/src/main/java/org/apache/hadoop/hbase/filter/SubstringComparator.java @@ -30,7 +30,7 @@ import java.io.IOException; * the value of a given column. Use it to test if a given substring appears * in a cell value in the column. The comparison is case insensitive. *

    - * Only EQUAL or NOT_EQUAL tests are valid with this comparator. + * Only EQUAL or NOT_EQUAL tests are valid with this comparator. *

    * For example: *

    diff --git a/core/src/main/java/org/apache/hadoop/hbase/filter/ValueFilter.java b/core/src/main/java/org/apache/hadoop/hbase/filter/ValueFilter.java index 0f53389..53cf4ce 100644 --- a/core/src/main/java/org/apache/hadoop/hbase/filter/ValueFilter.java +++ b/core/src/main/java/org/apache/hadoop/hbase/filter/ValueFilter.java @@ -23,8 +23,8 @@ package org.apache.hadoop.hbase.filter; import org.apache.hadoop.hbase.KeyValue; /** - * This filter is used to filter based on column value. It takes an - * operator (equal, greater, not equal, etc) and a byte [] comparator for the + * This filter is used to filter based on column value. It takes an + * operator (equal, greater, not equal, etc) and a byte [] comparator for the * cell value. *

    * This filter can be wrapped with {@link WhileMatchFilter} and {@link SkipFilter} @@ -55,7 +55,7 @@ public class ValueFilter extends CompareFilter { @Override public ReturnCode filterKeyValue(KeyValue v) { - if (doCompare(this.compareOp, this.comparator, v.getBuffer(), + if (doCompare(this.compareOp, this.comparator, v.getBuffer(), v.getValueOffset(), v.getValueLength())) { return ReturnCode.SKIP; } diff --git a/core/src/main/java/org/apache/hadoop/hbase/filter/WhileMatchFilter.java b/core/src/main/java/org/apache/hadoop/hbase/filter/WhileMatchFilter.java index 6a922bd..7c1f404 100644 --- a/core/src/main/java/org/apache/hadoop/hbase/filter/WhileMatchFilter.java +++ b/core/src/main/java/org/apache/hadoop/hbase/filter/WhileMatchFilter.java @@ -30,7 +30,7 @@ import java.io.IOException; * A wrapper filter that returns true from {@link #filterAllRemaining()} as soon * as the wrapped filters {@link Filter#filterRowKey(byte[], int, int)}, * {@link Filter#filterKeyValue(org.apache.hadoop.hbase.KeyValue)}, - * {@link org.apache.hadoop.hbase.filter.Filter#filterRow()} or + * {@link org.apache.hadoop.hbase.filter.Filter#filterRow()} or * {@link org.apache.hadoop.hbase.filter.Filter#filterAllRemaining()} methods * returns true. */ diff --git a/core/src/main/java/org/apache/hadoop/hbase/filter/package-info.java b/core/src/main/java/org/apache/hadoop/hbase/filter/package-info.java index cfe8b1c..73ccef8 100644 --- a/core/src/main/java/org/apache/hadoop/hbase/filter/package-info.java +++ b/core/src/main/java/org/apache/hadoop/hbase/filter/package-info.java @@ -20,7 +20,7 @@ /** * Provides row-level filters applied to HRegion scan results during calls to - * {@link org.apache.hadoop.hbase.client.ResultScanner#next()}. + * {@link org.apache.hadoop.hbase.client.ResultScanner#next()}.

    Filters run the extent of a table unless you wrap your filter in a diff --git a/core/src/main/java/org/apache/hadoop/hbase/io/CodeToClassAndBack.java b/core/src/main/java/org/apache/hadoop/hbase/io/CodeToClassAndBack.java index 2e2bcaf..61a1c5e 100644 --- a/core/src/main/java/org/apache/hadoop/hbase/io/CodeToClassAndBack.java +++ b/core/src/main/java/org/apache/hadoop/hbase/io/CodeToClassAndBack.java @@ -41,12 +41,12 @@ public interface CodeToClassAndBack { */ public static final Map, Byte> CLASS_TO_CODE = new HashMap, Byte>(); - + /** * Class list for supported classes */ public Class[] classList = {byte[].class}; - + /** * The static loader that is used instead of the static constructor in * HbaseMapWritable. @@ -55,7 +55,7 @@ public interface CodeToClassAndBack { new InternalStaticLoader(classList, CODE_TO_CLASS, CLASS_TO_CODE); /** - * Class that loads the static maps with their values. + * Class that loads the static maps with their values. */ public class InternalStaticLoader{ InternalStaticLoader(Class[] classList, diff --git a/core/src/main/java/org/apache/hadoop/hbase/io/HalfHFileReader.java b/core/src/main/java/org/apache/hadoop/hbase/io/HalfHFileReader.java index 0485d5f..c657630 100644 --- a/core/src/main/java/org/apache/hadoop/hbase/io/HalfHFileReader.java +++ b/core/src/main/java/org/apache/hadoop/hbase/io/HalfHFileReader.java @@ -40,10 +40,10 @@ import org.apache.hadoop.hbase.util.Bytes; * of the file with keys that sort greater than those of the bottom half. * The top includes the split files midkey, of the key that follows if it does * not exist in the file. - * + * *

    This type works in tandem with the {@link Reference} type. This class * is used reading while Reference is used writing. - * + * *

    This file is not splitable. Calls to {@link #midkey()} return null. */ public class HalfHFileReader extends HFile.Reader { @@ -116,7 +116,7 @@ public class HalfHFileReader extends HFile.Reader { public boolean next() throws IOException { if (atEnd) return false; - + boolean b = delegate.next(); if (!b) { return b; @@ -215,7 +215,7 @@ public class HalfHFileReader extends HFile.Reader { @Override public byte[] getLastKey() { if (top) { - return super.getLastKey(); + return super.getLastKey(); } // Get a scanner that caches the block and that uses pread. HFileScanner scanner = getScanner(true, true); diff --git a/core/src/main/java/org/apache/hadoop/hbase/io/HbaseMapWritable.java b/core/src/main/java/org/apache/hadoop/hbase/io/HbaseMapWritable.java index a549913..45eb495 100644 --- a/core/src/main/java/org/apache/hadoop/hbase/io/HbaseMapWritable.java +++ b/core/src/main/java/org/apache/hadoop/hbase/io/HbaseMapWritable.java @@ -59,15 +59,15 @@ implements SortedMap, Configurable, Writable, CodeToClassAndBack{ /** * Contructor where another SortedMap can be used - * - * @param map the SortedMap to be used + * + * @param map the SortedMap to be used */ public HbaseMapWritable(SortedMap map){ conf = new AtomicReference(); instance = map; } - - + + /** @return the conf */ public Configuration getConf() { return conf.get(); @@ -97,7 +97,7 @@ implements SortedMap, Configurable, Writable, CodeToClassAndBack{ public V get(Object key) { return instance.get(key); } - + public boolean isEmpty() { return instance.isEmpty(); } @@ -149,7 +149,7 @@ implements SortedMap, Configurable, Writable, CodeToClassAndBack{ public SortedMap tailMap(byte[] fromKey) { return this.instance.tailMap(fromKey); } - + // Writable /** @return the Class class for the specified id */ @@ -167,7 +167,7 @@ implements SortedMap, Configurable, Writable, CodeToClassAndBack{ } return b; } - + /** * @see java.lang.Object#toString() */ diff --git a/core/src/main/java/org/apache/hadoop/hbase/io/HbaseObjectWritable.java b/core/src/main/java/org/apache/hadoop/hbase/io/HbaseObjectWritable.java index 579bd29..3cc5e0f 100644 --- a/core/src/main/java/org/apache/hadoop/hbase/io/HbaseObjectWritable.java +++ b/core/src/main/java/org/apache/hadoop/hbase/io/HbaseObjectWritable.java @@ -62,7 +62,7 @@ import org.apache.hadoop.hbase.regionserver.wal.HLog; import org.apache.hadoop.hbase.regionserver.wal.HLogKey; import org.apache.hadoop.hbase.util.Bytes; -/** +/** * This is a customized version of the polymorphic hadoop * {@link ObjectWritable}. It removes UTF8 (HADOOP-414). * Using {@link Text} intead of UTF-8 saves ~2% CPU between reading and writing @@ -79,7 +79,7 @@ import org.apache.hadoop.hbase.util.Bytes; */ public class HbaseObjectWritable implements Writable, Configurable { protected final static Log LOG = LogFactory.getLog(HbaseObjectWritable.class); - + // Here we maintain two static maps of classes to code and vice versa. // Add new classes+codes as wanted or figure way to auto-generate these // maps from the HMasterInterface. @@ -102,12 +102,12 @@ public class HbaseObjectWritable implements Writable, Configurable { addToMap(Float.TYPE, code++); addToMap(Double.TYPE, code++); addToMap(Void.TYPE, code++); - + // Other java types addToMap(String.class, code++); addToMap(byte [].class, code++); addToMap(byte [][].class, code++); - + // Hadoop types addToMap(Text.class, code++); addToMap(Writable.class, code++); @@ -128,7 +128,7 @@ public class HbaseObjectWritable implements Writable, Configurable { addToMap(HServerInfo.class, code++); addToMap(HTableDescriptor.class, code++); addToMap(MapWritable.class, code++); - + // // HBASE-880 // @@ -167,11 +167,11 @@ public class HbaseObjectWritable implements Writable, Configurable { addToMap(HLog.Entry.class, code++); addToMap(HLog.Entry[].class, code++); addToMap(HLogKey.class, code++); - - // List + + // List addToMap(List.class, code++); } - + private Class declaredClass; private Object instance; private Configuration conf; @@ -180,7 +180,7 @@ public class HbaseObjectWritable implements Writable, Configurable { public HbaseObjectWritable() { super(); } - + /** * @param instance */ @@ -199,10 +199,10 @@ public class HbaseObjectWritable implements Writable, Configurable { /** @return the instance, or null if none. */ public Object get() { return instance; } - + /** @return the class this is meant to be. */ public Class getDeclaredClass() { return declaredClass; } - + /** * Reset the instance. * @param instance @@ -220,11 +220,11 @@ public class HbaseObjectWritable implements Writable, Configurable { return "OW[class=" + declaredClass + ",value=" + instance + "]"; } - + public void readFields(DataInput in) throws IOException { readObject(in, this, this.conf); } - + public void write(DataOutput out) throws IOException { writeObject(out, instance, declaredClass, conf); } @@ -234,7 +234,7 @@ public class HbaseObjectWritable implements Writable, Configurable { /** default constructor for writable */ @SuppressWarnings("unused") public NullInstance() { super(null); } - + /** * @param declaredClass * @param conf @@ -243,16 +243,16 @@ public class HbaseObjectWritable implements Writable, Configurable { super(conf); this.declaredClass = declaredClass; } - + public void readFields(DataInput in) throws IOException { this.declaredClass = CODE_TO_CLASS.get(in.readByte()); } - + public void write(DataOutput out) throws IOException { writeClassCode(out, this.declaredClass); } } - + /** * Write out the code byte for passed Class. * @param out @@ -291,13 +291,13 @@ public class HbaseObjectWritable implements Writable, Configurable { */ @SuppressWarnings("unchecked") public static void writeObject(DataOutput out, Object instance, - Class declaredClass, + Class declaredClass, Configuration conf) throws IOException { Object instanceObj = instance; Class declClass = declaredClass; - + if (instanceObj == null) { // null instanceObj = new NullInstance(declClass, conf); declClass = Writable.class; @@ -365,8 +365,8 @@ public class HbaseObjectWritable implements Writable, Configurable { throw new IOException("Can't write: "+instanceObj+" as "+declClass); } } - - + + /** * Read a {@link Writable}, {@link String}, primitive type, or an array of * the preceding. @@ -379,7 +379,7 @@ public class HbaseObjectWritable implements Writable, Configurable { throws IOException { return readObject(in, null, conf); } - + /** * Read a {@link Writable}, {@link String}, primitive type, or an array of * the preceding. @@ -469,7 +469,7 @@ public class HbaseObjectWritable implements Writable, Configurable { } @SuppressWarnings("unchecked") - private static Class getClassByName(Configuration conf, String className) + private static Class getClassByName(Configuration conf, String className) throws ClassNotFoundException { if(conf != null) { return conf.getClassByName(className); @@ -480,7 +480,7 @@ public class HbaseObjectWritable implements Writable, Configurable { } return Class.forName(className, true, cl); } - + private static void addToMap(final Class clazz, final byte code) { CLASS_TO_CODE.put(clazz, code); CODE_TO_CLASS.put(code, clazz); diff --git a/core/src/main/java/org/apache/hadoop/hbase/io/HeapSize.java b/core/src/main/java/org/apache/hadoop/hbase/io/HeapSize.java index d7b737c..bd78846 100644 --- a/core/src/main/java/org/apache/hadoop/hbase/io/HeapSize.java +++ b/core/src/main/java/org/apache/hadoop/hbase/io/HeapSize.java @@ -31,7 +31,7 @@ package org.apache.hadoop.hbase.io; * For example: *

      * public class SampleObject implements HeapSize {
    - *   
    + *
      *   int [] numbers;
      *   int x;
      * }
    @@ -43,5 +43,5 @@ public interface HeapSize {
        * count of payload and hosting object sizings.
       */
       public long heapSize();
    -  
    +
     }
    diff --git a/core/src/main/java/org/apache/hadoop/hbase/io/ImmutableBytesWritable.java b/core/src/main/java/org/apache/hadoop/hbase/io/ImmutableBytesWritable.java
    index 0617aa4..0a9ec4b 100644
    --- a/core/src/main/java/org/apache/hadoop/hbase/io/ImmutableBytesWritable.java
    +++ b/core/src/main/java/org/apache/hadoop/hbase/io/ImmutableBytesWritable.java
    @@ -29,7 +29,7 @@ import org.apache.hadoop.io.BytesWritable;
     import org.apache.hadoop.io.WritableComparable;
     import org.apache.hadoop.io.WritableComparator;
     
    -/** 
    +/**
      * A byte sequence that is usable as a key or value.  Based on
      * {@link org.apache.hadoop.io.BytesWritable} only this class is NOT resizable
      * and DOES NOT distinguish between the size of the seqeunce and the current
    @@ -43,14 +43,14 @@ implements WritableComparable {
       private byte[] bytes;
       private int offset;
       private int length;
    -  
    +
       /**
        * Create a zero-size sequence.
        */
       public ImmutableBytesWritable() {
         super();
       }
    -  
    +
       /**
        * Create a ImmutableBytesWritable using the byte array as the initial value.
        * @param bytes This array becomes the backing storage for the object.
    @@ -58,7 +58,7 @@ implements WritableComparable {
       public ImmutableBytesWritable(byte[] bytes) {
         this(bytes, 0, bytes.length);
       }
    -  
    +
       /**
        * Set the new ImmutableBytesWritable to the contents of the passed
        * ibw.
    @@ -67,7 +67,7 @@ implements WritableComparable {
       public ImmutableBytesWritable(final ImmutableBytesWritable ibw) {
         this(ibw.get(), 0, ibw.getSize());
       }
    -  
    +
       /**
        * Set the value to a given byte range
        * @param bytes the new byte range to set to
    @@ -80,7 +80,7 @@ implements WritableComparable {
         this.offset = offset;
         this.length = length;
       }
    -  
    +
       /**
        * Get the data from the BytesWritable.
        * @return The data is only valid between 0 and getSize() - 1.
    @@ -92,7 +92,7 @@ implements WritableComparable {
         }
         return this.bytes;
       }
    -  
    +
       /**
        * @param b Use passed bytes as backing array for this instance.
        */
    @@ -110,7 +110,7 @@ implements WritableComparable {
         this.offset = offset;
         this.length = length;
       }
    -  
    +
       /**
        * @return the current size of the buffer.
        */
    @@ -121,7 +121,7 @@ implements WritableComparable {
         }
         return this.length;
       }
    - 
    +
       /**
        * @return the current length of the buffer. same as getSize()
        */
    @@ -134,7 +134,7 @@ implements WritableComparable {
         }
         return this.length;
       }
    -  
    +
       /**
        * @return offset
        */
    @@ -148,19 +148,19 @@ implements WritableComparable {
         in.readFully(this.bytes, 0, this.length);
         this.offset = 0;
       }
    -  
    +
       public void write(final DataOutput out) throws IOException {
         out.writeInt(this.length);
         out.write(this.bytes, this.offset, this.length);
       }
    -  
    +
       // Below methods copied from BytesWritable
     
       @Override
       public int hashCode() {
         return WritableComparator.hashBytes(bytes, this.length);
       }
    -  
    +
       /**
        * Define the sort order of the BytesWritable.
        * @param right_obj The other bytes writable
    @@ -170,7 +170,7 @@ implements WritableComparable {
       public int compareTo(ImmutableBytesWritable right_obj) {
         return compareTo(right_obj.get());
       }
    -  
    +
       /**
        * Compares the bytes in this object to the specified byte array
        * @param that
    @@ -200,7 +200,7 @@ implements WritableComparable {
        * @see java.lang.Object#toString()
        */
       @Override
    -  public String toString() { 
    +  public String toString() {
         StringBuilder sb = new StringBuilder(3*this.bytes.length);
         for (int idx = 0; idx < this.bytes.length; idx++) {
           // if not the first, put a blank separator in
    @@ -218,7 +218,7 @@ implements WritableComparable {
       }
     
       /** A Comparator optimized for ImmutableBytesWritable.
    -   */ 
    +   */
       public static class Comparator extends WritableComparator {
         private BytesWritable.Comparator comparator =
           new BytesWritable.Comparator();
    @@ -236,11 +236,11 @@ implements WritableComparable {
           return comparator.compare(b1, s1, l1, b2, s2, l2);
         }
       }
    -  
    +
       static { // register this comparator
         WritableComparator.define(ImmutableBytesWritable.class, new Comparator());
       }
    -  
    +
       /**
        * @param array List of byte [].
        * @return Array of byte [].
    diff --git a/core/src/main/java/org/apache/hadoop/hbase/io/Reference.java b/core/src/main/java/org/apache/hadoop/hbase/io/Reference.java
    index 4b893d7..219203c 100644
    --- a/core/src/main/java/org/apache/hadoop/hbase/io/Reference.java
    +++ b/core/src/main/java/org/apache/hadoop/hbase/io/Reference.java
    @@ -35,7 +35,7 @@ import org.apache.hadoop.io.Writable;
     /**
      * A reference to the top or bottom half of a store file.  The file referenced
      * lives under a different region.  References are made at region split time.
    - * 
    + *
      * 

    References work with a special half store file type. References know how * to write out the reference format in the file system and are whats juggled * when references are mixed in with direct store files. The half store file @@ -52,7 +52,7 @@ public class Reference implements Writable { private byte [] splitkey; private Range region; - /** + /** * For split HStoreFiles, it specifies if the file covers the lower half or * the upper half of the key range */ @@ -82,7 +82,7 @@ public class Reference implements Writable { } /** - * + * * @return Range */ public Range getFileRegion() { diff --git a/core/src/main/java/org/apache/hadoop/hbase/io/TimeRange.java b/core/src/main/java/org/apache/hadoop/hbase/io/TimeRange.java index d266791..7901de1 100644 --- a/core/src/main/java/org/apache/hadoop/hbase/io/TimeRange.java +++ b/core/src/main/java/org/apache/hadoop/hbase/io/TimeRange.java @@ -48,7 +48,7 @@ public class TimeRange implements Writable { public TimeRange() { allTime = true; } - + /** * Represents interval [minStamp, Long.MAX_VALUE) * @param minStamp the minimum timestamp value, inclusive @@ -56,7 +56,7 @@ public class TimeRange implements Writable { public TimeRange(long minStamp) { this.minStamp = minStamp; } - + /** * Represents interval [minStamp, Long.MAX_VALUE) * @param minStamp the minimum timestamp value, inclusive @@ -64,9 +64,9 @@ public class TimeRange implements Writable { public TimeRange(byte [] minStamp) { this.minStamp = Bytes.toLong(minStamp); } - + /** - * Represents interval [minStamp, maxStamp) + * Represents interval [minStamp, maxStamp) * @param minStamp the minimum timestamp, inclusive * @param maxStamp the maximum timestamp, exclusive * @throws IOException @@ -81,7 +81,7 @@ public class TimeRange implements Writable { } /** - * Represents interval [minStamp, maxStamp) + * Represents interval [minStamp, maxStamp) * @param minStamp the minimum timestamp, inclusive * @param maxStamp the maximum timestamp, exclusive * @throws IOException @@ -90,7 +90,7 @@ public class TimeRange implements Writable { throws IOException { this(Bytes.toLong(minStamp), Bytes.toLong(maxStamp)); } - + /** * @return the smallest timestamp that should be considered */ @@ -104,11 +104,11 @@ public class TimeRange implements Writable { public long getMax() { return maxStamp; } - + /** * Check if the specified timestamp is within this TimeRange. *

    - * Returns true if within interval [minStamp, maxStamp), false + * Returns true if within interval [minStamp, maxStamp), false * if not. * @param bytes timestamp to check * @param offset offset into the bytes @@ -118,11 +118,11 @@ public class TimeRange implements Writable { if(allTime) return true; return withinTimeRange(Bytes.toLong(bytes, offset)); } - + /** * Check if the specified timestamp is within this TimeRange. *

    - * Returns true if within interval [minStamp, maxStamp), false + * Returns true if within interval [minStamp, maxStamp), false * if not. * @param timestamp timestamp to check * @return true if within TimeRange, false if not @@ -132,11 +132,11 @@ public class TimeRange implements Writable { // check if >= minStamp return (minStamp <= timestamp && timestamp < maxStamp); } - + /** * Check if the specified timestamp is within this TimeRange. *

    - * Returns true if within interval [minStamp, maxStamp), false + * Returns true if within interval [minStamp, maxStamp), false * if not. * @param timestamp timestamp to check * @return true if within TimeRange, false if not @@ -146,7 +146,7 @@ public class TimeRange implements Writable { // check if >= minStamp return (timestamp >= minStamp); } - + @Override public String toString() { StringBuilder sb = new StringBuilder(); @@ -156,14 +156,14 @@ public class TimeRange implements Writable { sb.append(this.minStamp); return sb.toString(); } - + //Writable public void readFields(final DataInput in) throws IOException { this.minStamp = in.readLong(); this.maxStamp = in.readLong(); this.allTime = in.readBoolean(); } - + public void write(final DataOutput out) throws IOException { out.writeLong(minStamp); out.writeLong(maxStamp); diff --git a/core/src/main/java/org/apache/hadoop/hbase/io/hfile/BlockCache.java b/core/src/main/java/org/apache/hadoop/hbase/io/hfile/BlockCache.java index 18392cb..a41343e 100644 --- a/core/src/main/java/org/apache/hadoop/hbase/io/hfile/BlockCache.java +++ b/core/src/main/java/org/apache/hadoop/hbase/io/hfile/BlockCache.java @@ -33,14 +33,14 @@ public interface BlockCache { * @param inMemory Whether block should be treated as in-memory */ public void cacheBlock(String blockName, ByteBuffer buf, boolean inMemory); - + /** * Add block to cache (defaults to not in-memory). * @param blockName Zero-based file block number. * @param buf The block contents wrapped in a ByteBuffer. */ public void cacheBlock(String blockName, ByteBuffer buf); - + /** * Fetch block from cache. * @param blockName Block number to fetch. diff --git a/core/src/main/java/org/apache/hadoop/hbase/io/hfile/CachedBlock.java b/core/src/main/java/org/apache/hadoop/hbase/io/hfile/CachedBlock.java index 6657107..9300c13 100644 --- a/core/src/main/java/org/apache/hadoop/hbase/io/hfile/CachedBlock.java +++ b/core/src/main/java/org/apache/hadoop/hbase/io/hfile/CachedBlock.java @@ -27,49 +27,49 @@ import org.apache.hadoop.hbase.util.ClassSize; /** * Represents an entry in the {@link LruBlockCache}. - * + * *

    Makes the block memory-aware with {@link HeapSize} and Comparable * to sort by access time for the LRU. It also takes care of priority by * either instantiating as in-memory or handling the transition from single * to multiple access. */ public class CachedBlock implements HeapSize, Comparable { - + public final static long PER_BLOCK_OVERHEAD = ClassSize.align( ClassSize.OBJECT + (3 * ClassSize.REFERENCE) + (2 * Bytes.SIZEOF_LONG) + ClassSize.STRING + ClassSize.BYTE_BUFFER); - - static enum BlockPriority { + + static enum BlockPriority { /** * Accessed a single time (used for scan-resistance) */ - SINGLE, + SINGLE, /** * Accessed multiple times */ - MULTI, + MULTI, /** * Block from in-memory store */ MEMORY }; - + private final String blockName; private final ByteBuffer buf; private volatile long accessTime; private long size; private BlockPriority priority; - + public CachedBlock(String blockName, ByteBuffer buf, long accessTime) { this(blockName, buf, accessTime, false); } - + public CachedBlock(String blockName, ByteBuffer buf, long accessTime, boolean inMemory) { this.blockName = blockName; this.buf = buf; this.accessTime = accessTime; - this.size = ClassSize.align(blockName.length()) + + this.size = ClassSize.align(blockName.length()) + ClassSize.align(buf.capacity()) + PER_BLOCK_OVERHEAD; if(inMemory) { this.priority = BlockPriority.MEMORY; @@ -77,7 +77,7 @@ public class CachedBlock implements HeapSize, Comparable { this.priority = BlockPriority.SINGLE; } } - + /** * Block has been accessed. Update its local access time. */ @@ -87,7 +87,7 @@ public class CachedBlock implements HeapSize, Comparable { this.priority = BlockPriority.MULTI; } } - + public long heapSize() { return size; } @@ -96,15 +96,15 @@ public class CachedBlock implements HeapSize, Comparable { if(this.accessTime == that.accessTime) return 0; return this.accessTime < that.accessTime ? 1 : -1; } - + public ByteBuffer getBuffer() { return this.buf; } - + public String getName() { return this.blockName; } - + public BlockPriority getPriority() { return this.priority; } diff --git a/core/src/main/java/org/apache/hadoop/hbase/io/hfile/CachedBlockQueue.java b/core/src/main/java/org/apache/hadoop/hbase/io/hfile/CachedBlockQueue.java index 763c531..ab96ac1 100644 --- a/core/src/main/java/org/apache/hadoop/hbase/io/hfile/CachedBlockQueue.java +++ b/core/src/main/java/org/apache/hadoop/hbase/io/hfile/CachedBlockQueue.java @@ -28,22 +28,22 @@ import org.apache.hadoop.hbase.io.HeapSize; * A memory-bound queue that will grow until an element brings * total size >= maxSize. From then on, only entries that are sorted larger * than the smallest current entry will be inserted/replaced. - * + * *

    Use this when you want to find the largest elements (according to their * ordering, not their heap size) that consume as close to the specified * maxSize as possible. Default behavior is to grow just above rather than * just below specified max. - * + * *

    Object used in this queue must implement {@link HeapSize} as well as * {@link Comparable}. */ public class CachedBlockQueue implements HeapSize { - + private PriorityQueue queue; - + private long heapSize; private long maxSize; - + /** * @param maxSize the target size of elements in the queue * @param blockSize expected average size of blocks @@ -55,10 +55,10 @@ public class CachedBlockQueue implements HeapSize { heapSize = 0; this.maxSize = maxSize; } - + /** * Attempt to add the specified cached block to this queue. - * + * *

    If the queue is smaller than the max size, or if the specified element * is ordered before the smallest element in the queue, the element will be * added to the queue. Otherwise, there is no side effect of this call. @@ -82,7 +82,7 @@ public class CachedBlockQueue implements HeapSize { } } } - + /** * Get a sorted List of all elements in this queue, in descending order. * @return list of cached elements in descending order @@ -94,7 +94,7 @@ public class CachedBlockQueue implements HeapSize { } return blocks.toArray(new CachedBlock[blocks.size()]); } - + /** * Total size of all elements in this queue. * @return size of all elements currently in queue, in bytes diff --git a/core/src/main/java/org/apache/hadoop/hbase/io/hfile/Compression.java b/core/src/main/java/org/apache/hadoop/hbase/io/hfile/Compression.java index 702ca56..3a429c0 100644 --- a/core/src/main/java/org/apache/hadoop/hbase/io/hfile/Compression.java +++ b/core/src/main/java/org/apache/hadoop/hbase/io/hfile/Compression.java @@ -5,9 +5,9 @@ * licenses this file to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance with the License. * You may obtain a copy of the License at - * + * * http://www.apache.org/licenses/LICENSE-2.0 - * + * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the diff --git a/core/src/main/java/org/apache/hadoop/hbase/io/hfile/HFile.java b/core/src/main/java/org/apache/hadoop/hbase/io/hfile/HFile.java index a2b35cf..4488ccc 100644 --- a/core/src/main/java/org/apache/hadoop/hbase/io/hfile/HFile.java +++ b/core/src/main/java/org/apache/hadoop/hbase/io/hfile/HFile.java @@ -102,7 +102,7 @@ import org.apache.hadoop.io.compress.Decompressor; * compression ratio over "lzo" but requires 4x CPU to compress and 2x CPU to * decompress, comparing to "lzo". *

- * + * * For more on the background behind HFile, see HBASE-61. *

@@ -124,7 +124,7 @@ import org.apache.hadoop.io.compress.Decompressor; public class HFile { static final Log LOG = LogFactory.getLog(HFile.class); - /* These values are more or less arbitrary, and they are used as a + /* These values are more or less arbitrary, and they are used as a * form of check to make sure the file isn't completely corrupt. */ final static byte [] DATABLOCKMAGIC = @@ -133,14 +133,14 @@ public class HFile { { 'I', 'D', 'X', 'B', 'L', 'K', 41, 43 }; final static byte [] METABLOCKMAGIC = { 'M', 'E', 'T', 'A', 'B', 'L', 'K', 99 }; - final static byte [] TRAILERBLOCKMAGIC = + final static byte [] TRAILERBLOCKMAGIC = { 'T', 'R', 'A', 'B', 'L', 'K', 34, 36 }; /** * Maximum length of key in HFile. */ public final static int MAXIMUM_KEY_LENGTH = Integer.MAX_VALUE; - + /** * Default blocksize for hfile. */ @@ -269,7 +269,7 @@ public class HFile { * @param blocksize * @param compress * @param comparator - * @throws IOException + * @throws IOException * @throws IOException */ public Writer(FileSystem fs, Path path, int blocksize, @@ -314,7 +314,7 @@ public class HFile { this(ostream, blocksize, Compression.getCompressionAlgorithmByName(compress), c); } - + /** * Constructor that takes a stream. * @param ostream Stream to use. @@ -399,13 +399,13 @@ public class HFile { this.compressor, 0); return new DataOutputStream(os); } - + /* * Let go of block compressor and compressing stream gotten in call * {@link #getCompressingStream}. * @param dos * @return How much was written on this stream since it was taken out. - * @see #getCompressingStream() + * @see #getCompressingStream() * @throws IOException */ private int releaseCompressingStream(final DataOutputStream dos) @@ -435,7 +435,7 @@ public class HFile { * from {@link Reader#loadFileInfo()}. * @param k Key * @param v Value - * @throws IOException + * @throws IOException */ public void appendFileInfo(final byte [] k, final byte [] v) throws IOException { @@ -543,7 +543,7 @@ public class HFile { private boolean checkKey(final byte [] key, final int offset, final int length) throws IOException { boolean dupKey = false; - + if (key == null || length <= 0) { throw new IOException("Key cannot be null or empty"); } @@ -553,7 +553,7 @@ public class HFile { } if (this.lastKeyBuffer != null) { int keyComp = this.comparator.compare(this.lastKeyBuffer, this.lastKeyOffset, - this.lastKeyLength, key, offset, length); + this.lastKeyLength, key, offset, length); if (keyComp > 0) { throw new IOException("Added a key not lexically larger than" + " previous key=" + Bytes.toString(key, offset, length) + @@ -587,7 +587,7 @@ public class HFile { finishBlock(); FixedFileTrailer trailer = new FixedFileTrailer(); - + // Write out the metadata blocks if any. ArrayList metaOffsets = null; ArrayList metaDataSizes = null; @@ -618,10 +618,10 @@ public class HFile { // Now finish off the trailer. trailer.dataIndexCount = blockKeys.size(); trailer.metaIndexCount = metaNames.size(); - + trailer.totalUncompressedBytes = totalBytes; trailer.entryCount = entryCount; - + trailer.compressionCodec = this.compressAlgo.ordinal(); trailer.serialize(outputStream); @@ -690,7 +690,7 @@ public class HFile { private BlockIndex metaIndex; FixedFileTrailer trailer; private volatile boolean fileInfoLoaded = false; - + // Filled when we read in the trailer. private Compression.Algorithm compressAlgo; @@ -699,7 +699,7 @@ public class HFile { // Stats read in when we load file info. private int avgKeyLen = -1; private int avgValueLen = -1; - + // Used to ensure we seek correctly. RawComparator comparator; @@ -710,7 +710,7 @@ public class HFile { private final BlockCache cache; public int cacheHits = 0; public int blockLoads = 0; - + // Whether file is from in-memory store private boolean inMemory = false; @@ -727,8 +727,8 @@ public class HFile { this(null, -1, null, false); } - /** - * Opens a HFile. You must load the file info before you can + /** + * Opens a HFile. You must load the file info before you can * use it by calling {@link #loadFileInfo()}. * * @param fs filesystem to load from @@ -743,8 +743,8 @@ public class HFile { this.name = path.toString(); } - /** - * Opens a HFile. You must load the index before you can + /** + * Opens a HFile. You must load the index before you can * use it by calling {@link #loadFileInfo()}. * * @param fsdis input stream. Caller is responsible for closing the passed @@ -788,7 +788,7 @@ public class HFile { public long length() { return this.fileSize; } - + public boolean inMemory() { return this.inMemory; } @@ -909,7 +909,7 @@ public class HFile { } else { blockSize = metaIndex.blockOffsets[block+1] - metaIndex.blockOffsets[block]; } - + ByteBuffer buf = decompress(metaIndex.blockOffsets[block], longToInt(blockSize), metaIndex.blockDataSizes[block], true); byte [] magic = new byte[METABLOCKMAGIC.length]; @@ -1005,18 +1005,18 @@ public class HFile { * @param offset * @param compressedSize * @param decompressedSize - * + * * @return * @throws IOException */ private ByteBuffer decompress(final long offset, final int compressedSize, - final int decompressedSize, final boolean pread) + final int decompressedSize, final boolean pread) throws IOException { Decompressor decompressor = null; ByteBuffer buf = null; try { decompressor = this.compressAlgo.getDecompressor(); - // My guess is that the bounded range fis is needed to stop the + // My guess is that the bounded range fis is needed to stop the // decompressor reading into next block -- IIRC, it just grabs a // bunch of data w/o regard to whether decompressor is coming to end of a // decompression. @@ -1026,15 +1026,15 @@ public class HFile { decompressor, 0); buf = ByteBuffer.allocate(decompressedSize); IOUtils.readFully(is, buf.array(), 0, buf.capacity()); - is.close(); + is.close(); } finally { if (null != decompressor) { - this.compressAlgo.returnDecompressor(decompressor); + this.compressAlgo.returnDecompressor(decompressor); } } return buf; } - + /** * @return First key in the file. May be null if file has no entries. */ @@ -1076,7 +1076,7 @@ public class HFile { return (this.blockIndex != null? this.blockIndex.heapSize(): 0) + ((this.metaIndex != null)? this.metaIndex.heapSize(): 0); } - + /** * @return Midkey for this file. We work with block boundaries only so * returned midkey is an approximation only. @@ -1103,7 +1103,7 @@ public class HFile { private final Reader reader; private ByteBuffer block; private int currBlock; - + private final boolean cacheBlocks; private final boolean pread; @@ -1117,7 +1117,7 @@ public class HFile { this.cacheBlocks = cacheBlocks; this.pread = pread; } - + public KeyValue getKeyValue() { if(this.block == null) { return null; @@ -1179,25 +1179,25 @@ public class HFile { currValueLen = block.getInt(); return true; } - + public int seekTo(byte [] key) throws IOException { return seekTo(key, 0, key.length); } - + public int seekTo(byte[] key, int offset, int length) throws IOException { int b = reader.blockContainingKey(key, offset, length); if (b < 0) return -1; // falls before the beginning of the file! :-( // Avoid re-reading the same block (that'd be dumb). loadBlock(b); - + return blockSeek(key, offset, length, false); } /** * Within a loaded block, seek looking for the first key * that is smaller than (or equal to?) the key we are interested in. - * + * * A note on the seekBefore - if you have seekBefore = true, AND the * first key in the block = key, then you'll get thrown exceptions. * @param key to find @@ -1245,7 +1245,7 @@ public class HFile { public boolean seekBefore(byte [] key) throws IOException { return seekBefore(key, 0, key.length); } - + public boolean seekBefore(byte[] key, int offset, int length) throws IOException { int b = reader.blockContainingKey(key, offset, length); @@ -1304,7 +1304,7 @@ public class HFile { blockFetches++; return true; } - + private void loadBlock(int bloc) throws IOException { if (block == null) { block = reader.readBlock(bloc, this.cacheBlocks, this.pread); @@ -1327,7 +1327,7 @@ public class HFile { return trailer.toString(); } } - + /* * The RFile has a fixed trailer which contains offsets to other variable * parts of the file. Also includes basic metadata on this file. @@ -1347,14 +1347,14 @@ public class HFile { int entryCount; int compressionCodec; int version = 1; - + FixedFileTrailer() { super(); } static int trailerSize() { // Keep this up to date... - return + return ( Bytes.SIZEOF_INT * 5 ) + ( Bytes.SIZEOF_LONG * 4 ) + TRAILERBLOCKMAGIC.length; @@ -1386,7 +1386,7 @@ public class HFile { metaIndexOffset = inputStream.readLong(); metaIndexCount = inputStream.readInt(); - + totalUncompressedBytes = inputStream.readLong(); entryCount = inputStream.readInt(); compressionCodec = inputStream.readInt(); @@ -1425,7 +1425,7 @@ public class HFile { /* Needed doing lookup on blocks. */ final RawComparator comparator; - + /* * Shutdown default constructor */ @@ -1453,7 +1453,7 @@ public class HFile { /** * Adds a new entry in the block index. - * + * * @param key Last key in the block * @param offset file offset where the block is stored * @param dataSize the uncompressed data size @@ -1484,13 +1484,13 @@ public class HFile { // the block with a firstKey < key. This means the value we want is potentially // in the next block. pos --; // in previous block. - + return pos; } // wow, a perfect hit, how unlikely? return pos; } - + /* * @return File midkey. Inexact. Operates on block boundaries. Does * not go into blocks. @@ -1581,12 +1581,12 @@ public class HFile { } public long heapSize() { - long heapsize = ClassSize.align(ClassSize.OBJECT + + long heapsize = ClassSize.align(ClassSize.OBJECT + 2 * Bytes.SIZEOF_INT + (3 + 1) * ClassSize.REFERENCE); - //Calculating the size of blockKeys + //Calculating the size of blockKeys if(blockKeys != null) { //Adding array + references overhead - heapsize += ClassSize.align(ClassSize.ARRAY + + heapsize += ClassSize.align(ClassSize.ARRAY + blockKeys.length * ClassSize.REFERENCE); //Adding bytes for(byte [] bs : blockKeys) { @@ -1594,17 +1594,17 @@ public class HFile { } } if(blockOffsets != null) { - heapsize += ClassSize.align(ClassSize.ARRAY + + heapsize += ClassSize.align(ClassSize.ARRAY + blockOffsets.length * Bytes.SIZEOF_LONG); } if(blockDataSizes != null) { - heapsize += ClassSize.align(ClassSize.ARRAY + + heapsize += ClassSize.align(ClassSize.ARRAY + blockDataSizes.length * Bytes.SIZEOF_INT); } - + return ClassSize.align(heapsize); } - + } /* @@ -1631,7 +1631,7 @@ public class HFile { /** * Get names of supported compression algorithms. The names are acceptable by * HFile.Writer. - * + * * @return Array of strings, each represents a supported compression * algorithm. Currently, the following compression algorithms are * supported. @@ -1658,13 +1658,13 @@ public class HFile { /** * Returns all files belonging to the given region directory. Could return an * empty list. - * + * * @param fs The file system reference. * @param regionDir The region directory to scan. * @return The list of files found. * @throws IOException When scanning the files fails. */ - static List getStoreFiles(FileSystem fs, Path regionDir) + static List getStoreFiles(FileSystem fs, Path regionDir) throws IOException { List res = new ArrayList(); PathFilter dirFilter = new FSUtils.DirFilter(fs); @@ -1679,7 +1679,7 @@ public class HFile { } return res; } - + public static void main(String []args) throws IOException { try { // create options @@ -1725,7 +1725,7 @@ public class HFile { Path regionDir = new Path(tableDir, Integer.toString(enc)); if (verbose) System.out.println("region dir -> " + regionDir); List regionFiles = getStoreFiles(fs, regionDir); - if (verbose) System.out.println("Number of region files found -> " + + if (verbose) System.out.println("Number of region files found -> " + regionFiles.size()); if (verbose) { int i = 1; @@ -1742,7 +1742,7 @@ public class HFile { System.err.println("ERROR, file doesnt exist: " + file); continue; } - // create reader and load file info + // create reader and load file info HFile.Reader reader = new HFile.Reader(fs, file, null, false); Map fileInfo = reader.loadFileInfo(); // scan over file and read key/value's and check if requested @@ -1760,9 +1760,9 @@ public class HFile { // check if rows are in order if (checkRow && pkv != null) { if (Bytes.compareTo(pkv.getRow(), kv.getRow()) > 0) { - System.err.println("WARNING, previous row is greater then" + - " current row\n\tfilename -> " + file + - "\n\tprevious -> " + Bytes.toStringBinary(pkv.getKey()) + + System.err.println("WARNING, previous row is greater then" + + " current row\n\tfilename -> " + file + + "\n\tprevious -> " + Bytes.toStringBinary(pkv.getKey()) + "\n\tcurrent -> " + Bytes.toStringBinary(kv.getKey())); } } @@ -1770,14 +1770,14 @@ public class HFile { if (checkFamily) { String fam = Bytes.toString(kv.getFamily()); if (!file.toString().contains(fam)) { - System.err.println("WARNING, filename does not match kv family," + - "\n\tfilename -> " + file + + System.err.println("WARNING, filename does not match kv family," + + "\n\tfilename -> " + file + "\n\tkeyvalue -> " + Bytes.toStringBinary(kv.getKey())); } if (pkv != null && Bytes.compareTo(pkv.getFamily(), kv.getFamily()) != 0) { System.err.println("WARNING, previous kv has different family" + - " compared to current key\n\tfilename -> " + file + - "\n\tprevious -> " + Bytes.toStringBinary(pkv.getKey()) + + " compared to current key\n\tfilename -> " + file + + "\n\tprevious -> " + Bytes.toStringBinary(pkv.getKey()) + "\n\tcurrent -> " + Bytes.toStringBinary(kv.getKey())); } } @@ -1787,7 +1787,7 @@ public class HFile { if (verbose || printKeyValue) { System.out.println("Scanned kv count -> " + count); } - // print meta data + // print meta data if (printMeta) { System.out.println("Block index size as per heapsize: " + reader.indexSize()); System.out.println(reader.toString()); diff --git a/core/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileScanner.java b/core/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileScanner.java index 6b9673d..9d891c6 100644 --- a/core/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileScanner.java +++ b/core/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileScanner.java @@ -27,7 +27,7 @@ import org.apache.hadoop.hbase.KeyValue; /** * A scanner allows you to position yourself within a HFile and * scan through it. It allows you to reposition yourself as well. - * + * *

A scanner doesn't always have a key/value that it is pointing to * when it is first created and before * {@link #seekTo()}/{@link #seekTo(byte[])} are called. @@ -40,7 +40,7 @@ public interface HFileScanner { /** * SeekTo or just before the passed key. Examine the return * code to figure whether we found the key or not. - * Consider the key stream of all the keys in the file, + * Consider the key stream of all the keys in the file, * k[0] .. k[n], where there are n keys in the file. * @param key Key to find. * @return -1, if key < k[0], no position; @@ -53,7 +53,7 @@ public interface HFileScanner { public int seekTo(byte[] key) throws IOException; public int seekTo(byte[] key, int offset, int length) throws IOException; /** - * Consider the key stream of all the keys in the file, + * Consider the key stream of all the keys in the file, * k[0] .. k[n], where there are n keys in the file. * @param key Key to find * @return false if key <= k[0] or true with scanner in position 'i' such @@ -87,7 +87,7 @@ public interface HFileScanner { /** * Gets a buffer view to the current value. You must call * {@link #seekTo(byte[])} before this method. - * + * * @return byte buffer for the value. The limit is set to the value size, and * the position is 0, the start of the buffer view. */ diff --git a/core/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java b/core/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java index 897983c..cef5d9d 100644 --- a/core/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java +++ b/core/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java @@ -40,35 +40,35 @@ import org.apache.hadoop.hbase.util.ClassSize; * memory-bound using an LRU eviction algorithm, and concurrent: backed by a * {@link ConcurrentHashMap} and with a non-blocking eviction thread giving * constant-time {@link #cacheBlock} and {@link #getBlock} operations.

- * + * * Contains three levels of block priority to allow for * scan-resistance and in-memory families. A block is added with an inMemory * flag if necessary, otherwise a block becomes a single access priority. Once * a blocked is accessed again, it changes to multiple access. This is used * to prevent scans from thrashing the cache, adding a least-frequently-used * element to the eviction algorithm.

- * + * * Each priority is given its own chunk of the total cache to ensure * fairness during eviction. Each priority will retain close to its maximum * size, however, if any priority is not using its entire chunk the others * are able to grow beyond their chunk size.

- * + * * Instantiated at a minimum with the total size and average block size. - * All sizes are in bytes. The block size is not especially important as this + * All sizes are in bytes. The block size is not especially important as this * cache is fully dynamic in its sizing of blocks. It is only used for * pre-allocating data structures and in initial heap estimation of the map.

- * + * * The detailed constructor defines the sizes for the three priorities (they * should total to the maximum size defined). It also sets the levels that * trigger and control the eviction thread.

- * + * * The acceptable size is the cache size level which triggers the eviction * process to start. It evicts enough blocks to get the size below the * minimum size specified.

- * + * * Eviction happens in a separate thread and involves a single full-scan * of the map. It determines how many bytes must be freed to reach the minimum - * size, and then while scanning determines the fewest least-recently-used + * size, and then while scanning determines the fewest least-recently-used * blocks necessary from each of the three priorities (would be 3 times bytes * to free). It then uses the priority chunk sizes to evict fairly according * to the relative sizes and usage. @@ -76,81 +76,81 @@ import org.apache.hadoop.hbase.util.ClassSize; public class LruBlockCache implements BlockCache, HeapSize { static final Log LOG = LogFactory.getLog(LruBlockCache.class); - + /** Default Configuration Parameters*/ - + /** Backing Concurrent Map Configuration */ static final float DEFAULT_LOAD_FACTOR = 0.75f; static final int DEFAULT_CONCURRENCY_LEVEL = 16; - + /** Eviction thresholds */ static final float DEFAULT_MIN_FACTOR = 0.75f; static final float DEFAULT_ACCEPTABLE_FACTOR = 0.85f; - + /** Priority buckets */ static final float DEFAULT_SINGLE_FACTOR = 0.25f; static final float DEFAULT_MULTI_FACTOR = 0.50f; static final float DEFAULT_MEMORY_FACTOR = 0.25f; - + /** Statistics thread */ static final int statThreadPeriod = 60; - + /** Concurrent map (the cache) */ private final ConcurrentHashMap map; - + /** Eviction lock (locked when eviction in process) */ private final ReentrantLock evictionLock = new ReentrantLock(true); - + /** Volatile boolean to track if we are in an eviction process or not */ private volatile boolean evictionInProgress = false; - + /** Eviction thread */ private final EvictionThread evictionThread; - + /** Statistics thread schedule pool (for heavy debugging, could remove) */ private final ScheduledExecutorService scheduleThreadPool = Executors.newScheduledThreadPool(1); - + /** Current size of cache */ private final AtomicLong size; - + /** Current number of cached elements */ private final AtomicLong elements; - + /** Cache access count (sequential ID) */ private final AtomicLong count; - + /** Cache statistics */ private final CacheStats stats; - + /** Maximum allowable size of cache (block put if size > max, evict) */ private long maxSize; /** Approximate block size */ private long blockSize; - + /** Acceptable size of cache (no evictions if size < acceptable) */ private float acceptableFactor; - + /** Minimum threshold of cache (when evicting, evict until size < min) */ private float minFactor; - + /** Single access bucket size */ private float singleFactor; - + /** Multiple access bucket size */ private float multiFactor; - + /** In-memory bucket size */ private float memoryFactor; - + /** Overhead of the structure itself */ private long overhead; - + /** * Default constructor. Specify maximum size and expected average block * size (approximation is fine). - * + * *

All other factors will be calculated based on defaults specified in * this class. * @param maxSize maximum size of cache, in bytes @@ -159,7 +159,7 @@ public class LruBlockCache implements BlockCache, HeapSize { public LruBlockCache(long maxSize, long blockSize) { this(maxSize, blockSize, true); } - + /** * Constructor used for testing. Allows disabling of the eviction thread. */ @@ -171,7 +171,7 @@ public class LruBlockCache implements BlockCache, HeapSize { DEFAULT_SINGLE_FACTOR, DEFAULT_MULTI_FACTOR, DEFAULT_MEMORY_FACTOR); } - + /** * Configurable constructor. Use this constructor if not using defaults. * @param maxSize maximum size of this cache, in bytes @@ -191,7 +191,7 @@ public class LruBlockCache implements BlockCache, HeapSize { float minFactor, float acceptableFactor, float singleFactor, float multiFactor, float memoryFactor) { if(singleFactor + multiFactor + memoryFactor != 1) { - throw new IllegalArgumentException("Single, multi, and memory factors " + + throw new IllegalArgumentException("Single, multi, and memory factors " + " should total 1.0"); } if(minFactor >= acceptableFactor) { @@ -223,16 +223,16 @@ public class LruBlockCache implements BlockCache, HeapSize { this.scheduleThreadPool.scheduleAtFixedRate(new StatisticsThread(this), statThreadPeriod, statThreadPeriod, TimeUnit.SECONDS); } - + public void setMaxSize(long maxSize) { this.maxSize = maxSize; if(this.size.get() > acceptableSize() && !evictionInProgress) { runEviction(); } } - + // BlockCache implementation - + /** * Cache the block with the specified name and buffer. *

@@ -295,7 +295,7 @@ public class LruBlockCache implements BlockCache, HeapSize { stats.evicted(); return block.heapSize(); } - + /** * Multi-threaded call to run the eviction process. */ @@ -306,7 +306,7 @@ public class LruBlockCache implements BlockCache, HeapSize { evictionThread.evict(); } } - + /** * Eviction method. */ @@ -314,25 +314,25 @@ public class LruBlockCache implements BlockCache, HeapSize { // Ensure only one eviction at a time if(!evictionLock.tryLock()) return; - + try { evictionInProgress = true; - + long bytesToFree = size.get() - minSize(); - - LOG.debug("Block cache LRU eviction started. Attempting to free " + + + LOG.debug("Block cache LRU eviction started. Attempting to free " + bytesToFree + " bytes"); - + if(bytesToFree <= 0) return; - + // Instantiate priority buckets - BlockBucket bucketSingle = new BlockBucket(bytesToFree, blockSize, + BlockBucket bucketSingle = new BlockBucket(bytesToFree, blockSize, singleSize()); - BlockBucket bucketMulti = new BlockBucket(bytesToFree, blockSize, + BlockBucket bucketMulti = new BlockBucket(bytesToFree, blockSize, multiSize()); - BlockBucket bucketMemory = new BlockBucket(bytesToFree, blockSize, + BlockBucket bucketMemory = new BlockBucket(bytesToFree, blockSize, memorySize()); - + // Scan entire map putting into appropriate buckets for(CachedBlock cachedBlock : map.values()) { switch(cachedBlock.getPriority()) { @@ -350,17 +350,17 @@ public class LruBlockCache implements BlockCache, HeapSize { } } } - - PriorityQueue bucketQueue = + + PriorityQueue bucketQueue = new PriorityQueue(3); - + bucketQueue.add(bucketSingle); bucketQueue.add(bucketMulti); bucketQueue.add(bucketMemory); - + int remainingBuckets = 3; long bytesFreed = 0; - + BlockBucket bucket; while((bucket = bucketQueue.poll()) != null) { long overflow = bucket.overflow(); @@ -368,28 +368,28 @@ public class LruBlockCache implements BlockCache, HeapSize { long bucketBytesToFree = Math.min(overflow, (bytesToFree - bytesFreed) / remainingBuckets); bytesFreed += bucket.free(bucketBytesToFree); - } + } remainingBuckets--; } - + float singleMB = ((float)bucketSingle.totalSize())/((float)(1024*1024)); float multiMB = ((float)bucketMulti.totalSize())/((float)(1024*1024)); float memoryMB = ((float)bucketMemory.totalSize())/((float)(1024*1024)); - - LOG.debug("Block cache LRU eviction completed. " + + + LOG.debug("Block cache LRU eviction completed. " + "Freed " + bytesFreed + " bytes. " + "Priority Sizes: " + "Single=" + singleMB + "MB (" + bucketSingle.totalSize() + "), " + "Multi=" + multiMB + "MB (" + bucketMulti.totalSize() + ")," + "Memory=" + memoryMB + "MB (" + bucketMemory.totalSize() + ")"); - + } finally { stats.evict(); evictionInProgress = false; evictionLock.unlock(); } } - + /** * Used to group blocks into priority buckets. There will be a BlockBucket * for each priority (single, multi, memory). Once bucketed, the eviction @@ -401,18 +401,18 @@ public class LruBlockCache implements BlockCache, HeapSize { private CachedBlockQueue queue; private long totalSize = 0; private long bucketSize; - + public BlockBucket(long bytesToFree, long blockSize, long bucketSize) { this.bucketSize = bucketSize; queue = new CachedBlockQueue(bytesToFree, blockSize); totalSize = 0; } - + public void add(CachedBlock block) { totalSize += block.heapSize(); queue.add(block); } - + public long free(long toFree) { CachedBlock [] blocks = queue.get(); long freedBytes = 0; @@ -424,21 +424,21 @@ public class LruBlockCache implements BlockCache, HeapSize { } return freedBytes; } - + public long overflow() { return totalSize - bucketSize; } - + public long totalSize() { return totalSize; } - + public int compareTo(BlockBucket that) { if(this.overflow() == that.overflow()) return 0; return this.overflow() > that.overflow() ? 1 : -1; } } - + /** * Get the maximum size of this cache. * @return max size in bytes @@ -446,7 +446,7 @@ public class LruBlockCache implements BlockCache, HeapSize { public long getMaxSize() { return this.maxSize; } - + /** * Get the current size of this cache. * @return current size in bytes @@ -454,7 +454,7 @@ public class LruBlockCache implements BlockCache, HeapSize { public long getCurrentSize() { return this.size.get(); } - + /** * Get the current size of this cache. * @return current size in bytes @@ -462,7 +462,7 @@ public class LruBlockCache implements BlockCache, HeapSize { public long getFreeSize() { return getMaxSize() - getCurrentSize(); } - + /** * Get the size of this cache (number of cached blocks) * @return number of cached blocks @@ -470,14 +470,14 @@ public class LruBlockCache implements BlockCache, HeapSize { public long size() { return this.elements.get(); } - + /** * Get the number of eviction runs that have occurred */ public long getEvictionCount() { return this.stats.getEvictionCount(); } - + /** * Get the number of blocks that have been evicted during the lifetime * of this cache. @@ -485,22 +485,22 @@ public class LruBlockCache implements BlockCache, HeapSize { public long getEvictedCount() { return this.stats.getEvictedCount(); } - + /* * Eviction thread. Sits in waiting state until an eviction is triggered * when the cache size grows above the acceptable level.

- * + * * Thread is triggered into action by {@link LruBlockCache#runEviction()} */ private static class EvictionThread extends Thread { private WeakReference cache; - + public EvictionThread(LruBlockCache cache) { super("LruBlockCache.EvictionThread"); setDaemon(true); this.cache = new WeakReference(cache); } - + @Override public void run() { while(true) { @@ -520,7 +520,7 @@ public class LruBlockCache implements BlockCache, HeapSize { } } } - + /* * Statistics thread. Periodically prints the cache statistics to the log. */ @@ -537,7 +537,7 @@ public class LruBlockCache implements BlockCache, HeapSize { lru.logStats(); } } - + public void logStats() { // Log size long totalSize = heapSize(); @@ -545,7 +545,7 @@ public class LruBlockCache implements BlockCache, HeapSize { float sizeMB = ((float)totalSize)/((float)(1024*1024)); float freeMB = ((float)freeSize)/((float)(1024*1024)); float maxMB = ((float)maxSize)/((float)(1024*1024)); - LruBlockCache.LOG.debug("Cache Stats: Sizes: " + + LruBlockCache.LOG.debug("Cache Stats: Sizes: " + "Total=" + sizeMB + "MB (" + totalSize + "), " + "Free=" + freeMB + "MB (" + freeSize + "), " + "Max=" + maxMB + "MB (" + maxSize +")" + @@ -561,46 +561,46 @@ public class LruBlockCache implements BlockCache, HeapSize { "Miss Ratio=" + stats.getMissRatio()*100 + "%, " + "Evicted/Run=" + stats.evictedPerEviction()); } - + /** * Get counter statistics for this cache. - * + * *

Includes: total accesses, hits, misses, evicted blocks, and runs * of the eviction processes. */ public CacheStats getStats() { return this.stats; } - + public static class CacheStats { private final AtomicLong accessCount = new AtomicLong(0); private final AtomicLong hitCount = new AtomicLong(0); private final AtomicLong missCount = new AtomicLong(0); private final AtomicLong evictionCount = new AtomicLong(0); private final AtomicLong evictedCount = new AtomicLong(0); - + public void miss() { missCount.incrementAndGet(); accessCount.incrementAndGet(); } - + public void hit() { hitCount.incrementAndGet(); accessCount.incrementAndGet(); } - + public void evict() { evictionCount.incrementAndGet(); } - + public void evicted() { evictedCount.incrementAndGet(); } - + public long getRequestCount() { return accessCount.get(); } - + public long getMissCount() { return missCount.get(); } @@ -608,48 +608,48 @@ public class LruBlockCache implements BlockCache, HeapSize { public long getHitCount() { return hitCount.get(); } - + public long getEvictionCount() { return evictionCount.get(); } - + public long getEvictedCount() { return evictedCount.get(); } - + public double getHitRatio() { return ((float)getHitCount()/(float)getRequestCount()); } - + public double getMissRatio() { return ((float)getMissCount()/(float)getRequestCount()); } - + public double evictedPerEviction() { return (float)((float)getEvictedCount()/(float)getEvictionCount()); } } - + public final static long CACHE_FIXED_OVERHEAD = ClassSize.align( - (3 * Bytes.SIZEOF_LONG) + (8 * ClassSize.REFERENCE) + + (3 * Bytes.SIZEOF_LONG) + (8 * ClassSize.REFERENCE) + (5 * Bytes.SIZEOF_FLOAT) + Bytes.SIZEOF_BOOLEAN + ClassSize.OBJECT); - + // HeapSize implementation public long heapSize() { return getCurrentSize(); } - + public static long calculateOverhead(long maxSize, long blockSize, int concurrency){ // FindBugs ICAST_INTEGER_MULTIPLY_CAST_TO_LONG return CACHE_FIXED_OVERHEAD + ClassSize.CONCURRENT_HASHMAP + - ((long)Math.ceil(maxSize*1.2/blockSize) + ((long)Math.ceil(maxSize*1.2/blockSize) * ClassSize.CONCURRENT_HASHMAP_ENTRY) + (concurrency * ClassSize.CONCURRENT_HASHMAP_SEGMENT); } - + // Simple calculators of sizes given factors and maxSize - + private long acceptableSize() { return (long)Math.floor(this.maxSize * this.acceptableFactor); } diff --git a/core/src/main/java/org/apache/hadoop/hbase/io/hfile/SimpleBlockCache.java b/core/src/main/java/org/apache/hadoop/hbase/io/hfile/SimpleBlockCache.java index d87afee..dc71f64 100644 --- a/core/src/main/java/org/apache/hadoop/hbase/io/hfile/SimpleBlockCache.java +++ b/core/src/main/java/org/apache/hadoop/hbase/io/hfile/SimpleBlockCache.java @@ -37,19 +37,19 @@ public class SimpleBlockCache implements BlockCache { this.blockId = blockId; } } - private Map cache = + private Map cache = new HashMap(); private ReferenceQueue q = new ReferenceQueue(); public int dumps = 0; - + /** * Constructor */ public SimpleBlockCache() { super(); } - + void processQueue() { Ref r; while ( (r = (Ref)q.poll()) != null) { @@ -78,7 +78,7 @@ public class SimpleBlockCache implements BlockCache { cache.put(blockName, new Ref(blockName, buf, q)); } - public synchronized void cacheBlock(String blockName, ByteBuffer buf, + public synchronized void cacheBlock(String blockName, ByteBuffer buf, boolean inMemory) { cache.put(blockName, new Ref(blockName, buf, q)); } diff --git a/core/src/main/java/org/apache/hadoop/hbase/ipc/HBaseClient.java b/core/src/main/java/org/apache/hadoop/hbase/ipc/HBaseClient.java index 577b50c..c7083f6 100644 --- a/core/src/main/java/org/apache/hadoop/hbase/ipc/HBaseClient.java +++ b/core/src/main/java/org/apache/hadoop/hbase/ipc/HBaseClient.java @@ -55,14 +55,14 @@ import java.util.concurrent.atomic.AtomicLong; /** A client for an IPC service. IPC calls take a single {@link Writable} as a * parameter, and return a {@link Writable} as their value. A service runs on * a port and is defined by a parameter class and a value class. - * + * *

This is the org.apache.hadoop.ipc.Client renamed as HBaseClient and * moved into this package so can access package-private methods. - * + * * @see HBaseServer */ public class HBaseClient { - + private static final Log LOG = LogFactory.getLog("org.apache.hadoop.ipc.HBaseClient"); protected final Hashtable connections = @@ -82,14 +82,14 @@ public class HBaseClient { protected final SocketFactory socketFactory; // how to create sockets private int refCount = 1; - + final private static String PING_INTERVAL_NAME = "ipc.ping.interval"; final static int DEFAULT_PING_INTERVAL = 60000; // 1 min final static int PING_CALL_ID = -1; - + /** * set the ping interval value in configuration - * + * * @param conf Configuration * @param pingInterval the ping interval */ @@ -101,14 +101,14 @@ public class HBaseClient { /** * Get the ping interval from configuration; * If not set in the configuration, return the default value. - * + * * @param conf Configuration * @return the ping interval */ static int getPingInterval(Configuration conf) { return conf.getInt(PING_INTERVAL_NAME, DEFAULT_PING_INTERVAL); } - + /** * Increment this client's reference count * @@ -116,7 +116,7 @@ public class HBaseClient { synchronized void incCount() { refCount++; } - + /** * Decrement this client's reference count * @@ -124,10 +124,10 @@ public class HBaseClient { synchronized void decCount() { refCount--; } - + /** * Return if this client has no reference - * + * * @return true if this client has no reference; false otherwise */ synchronized boolean isZeroReference() { @@ -158,17 +158,17 @@ public class HBaseClient { /** Set the exception when there is an error. * Notify the caller the call is done. - * + * * @param error exception thrown by the call; either local or remote */ public synchronized void setException(IOException error) { this.error = error; callComplete(); } - - /** Set the return value when there is no error. + + /** Set the return value when there is no error. * Notify the caller the call is done. - * + * * @param value return value of the call. */ public synchronized void setValue(Writable value) { @@ -185,7 +185,7 @@ public class HBaseClient { private Socket socket = null; // connected socket private DataInputStream in; private DataOutputStream out; - + // currently active calls private final Hashtable calls = new Hashtable(); private final AtomicLong lastActivity = new AtomicLong();// last I/O activity time @@ -195,10 +195,10 @@ public class HBaseClient { public Connection(InetSocketAddress address) throws IOException { this(new ConnectionId(address, null)); } - + public Connection(ConnectionId remoteId) throws IOException { if (remoteId.getAddress().isUnresolved()) { - throw new UnknownHostException("unknown host: " + + throw new UnknownHostException("unknown host: " + remoteId.getAddress().getHostName()); } this.remoteId = remoteId; @@ -249,7 +249,7 @@ public class HBaseClient { } sendPing(); } - + /** Read a byte from the stream. * Send a ping if timeout on read. Retries if no failure is detected * until a byte is read. @@ -269,7 +269,7 @@ public class HBaseClient { /** Read bytes into a buffer starting from offset off * Send a ping if timeout on read. Retries if no failure is detected * until a byte is read. - * + * * @return the total number of bytes read; -1 if the connection is closed. */ @Override @@ -283,7 +283,7 @@ public class HBaseClient { } while (true); } } - + /** Connect to the server and set up the I/O streams. It then sends * a header to the server and starts * the connection thread that waits for responses. @@ -293,7 +293,7 @@ public class HBaseClient { if (socket != null || shouldCloseConnection.get()) { return; } - + short ioFailures = 0; short timeoutFailures = 0; try { @@ -371,8 +371,8 @@ public class HBaseClient { try { Thread.sleep(failureSleep); } catch (InterruptedException ignored) {} - - LOG.info("Retrying connect to server: " + remoteId.getAddress() + + + LOG.info("Retrying connect to server: " + remoteId.getAddress() + " after sleeping " + failureSleep + "ms. Already tried " + curRetries + " time(s)."); } @@ -385,17 +385,17 @@ public class HBaseClient { out.write(HBaseServer.CURRENT_VERSION); //When there are more fields we can have ConnectionHeader Writable. DataOutputBuffer buf = new DataOutputBuffer(); - ObjectWritable.writeObject(buf, remoteId.getTicket(), + ObjectWritable.writeObject(buf, remoteId.getTicket(), UserGroupInformation.class, conf); int bufLen = buf.getLength(); out.writeInt(bufLen); out.write(buf.getData(), 0, bufLen); } - + /* wait till someone signals us to start reading RPC response or - * it is idle too long, it is marked as to be closed, + * it is idle too long, it is marked as to be closed, * or the client is marked as not running. - * + * * Return true if it is time to read a response; false otherwise. */ @SuppressWarnings({"ThrowableInstanceNeverThrown"}) @@ -409,7 +409,7 @@ public class HBaseClient { } catch (InterruptedException ignored) {} } } - + if (!calls.isEmpty() && !shouldCloseConnection.get() && running.get()) { return true; } else if (shouldCloseConnection.get()) { @@ -417,7 +417,7 @@ public class HBaseClient { } else if (calls.isEmpty()) { // idle connection closed or stopped markClosed(null); return false; - } else { // get stopped but there are still pending requests + } else { // get stopped but there are still pending requests markClosed((IOException)new IOException().initCause( new InterruptedException())); return false; @@ -428,7 +428,7 @@ public class HBaseClient { return remoteId.getAddress(); } - /* Send a ping to the server if the time elapsed + /* Send a ping to the server if the time elapsed * since last I/O activity is equal to or greater than the ping interval */ protected synchronized void sendPing() throws IOException { @@ -446,7 +446,7 @@ public class HBaseClient { @Override public void run() { if (LOG.isDebugEnabled()) - LOG.debug(getName() + ": starting, having connections " + LOG.debug(getName() + ": starting, having connections " + connections.size()); try { @@ -459,7 +459,7 @@ public class HBaseClient { } close(); - + if (LOG.isDebugEnabled()) LOG.debug(getName() + ": stopped, remaining connections " + connections.size()); @@ -480,7 +480,7 @@ public class HBaseClient { synchronized (this.out) { // FindBugs IS2_INCONSISTENT_SYNC if (LOG.isDebugEnabled()) LOG.debug(getName() + " sending #" + call.id); - + //for serializing the //data to be written d = new DataOutputBuffer(); @@ -499,7 +499,7 @@ public class HBaseClient { // close early IOUtils.closeStream(d); } - } + } /* Receive a response. * Because only one receiver, so no synchronization on in. @@ -509,7 +509,7 @@ public class HBaseClient { return; } touch(); - + try { int id = in.readInt(); // try to read an id @@ -533,14 +533,14 @@ public class HBaseClient { markClosed(e); } } - + private synchronized void markClosed(IOException e) { if (shouldCloseConnection.compareAndSet(false, true)) { closeException = e; notifyAll(); } } - + /** Close the connection. */ private synchronized void close() { if (!shouldCloseConnection.get()) { @@ -583,14 +583,14 @@ public class HBaseClient { if (LOG.isDebugEnabled()) LOG.debug(getName() + ": closed"); } - + /* Cleanup all calls and mark them as done */ private void cleanupCalls() { Iterator> itor = calls.entrySet().iterator() ; while (itor.hasNext()) { - Call c = itor.next().getValue(); + Call c = itor.next().getValue(); c.setException(closeException); // local exception - itor.remove(); + itor.remove(); } } } @@ -599,7 +599,7 @@ public class HBaseClient { private class ParallelCall extends Call { private final ParallelResults results; protected final int index; - + public ParallelCall(Writable param, ParallelResults results, int index) { super(param); this.results = results; @@ -643,10 +643,10 @@ public class HBaseClient { * @param conf configuration * @param factory socket factory */ - public HBaseClient(Class valueClass, Configuration conf, + public HBaseClient(Class valueClass, Configuration conf, SocketFactory factory) { this.valueClass = valueClass; - this.maxIdleTime = + this.maxIdleTime = conf.getInt("hbase.ipc.client.connection.maxidletime", 10000); //10s this.maxRetries = conf.getInt("hbase.ipc.client.connect.max.retries", 0); this.failureSleep = conf.getInt("hbase.client.pause", 2000); @@ -668,7 +668,7 @@ public class HBaseClient { public HBaseClient(Class valueClass, Configuration conf) { this(valueClass, conf, NetUtils.getDefaultSocketFactory(conf)); } - + /** Return the socket factory of this client * * @return this client's socket factory @@ -687,14 +687,14 @@ public class HBaseClient { if (!running.compareAndSet(true, false)) { return; } - + // wake up all connections synchronized (connections) { for (Connection conn : connections.values()) { conn.interrupt(); } } - + // wait until all connections are closed while (!connections.isEmpty()) { try { @@ -706,7 +706,7 @@ public class HBaseClient { /** Make a call, passing param, to the IPC server running at * address, returning the value. Throws exceptions if there are - * network problems or if the remote code threw an exception. + * network problems or if the remote code threw an exception. * @param param writable parameter * @param address network address * @return Writable @@ -716,9 +716,9 @@ public class HBaseClient { throws IOException { return call(param, address, null); } - - public Writable call(Writable param, InetSocketAddress addr, - UserGroupInformation ticket) + + public Writable call(Writable param, InetSocketAddress addr, + UserGroupInformation ticket) throws IOException { Call call = new Call(param); Connection connection = getConnection(addr, ticket, call); @@ -755,11 +755,11 @@ public class HBaseClient { /** * Take an IOException and the address we were trying to connect to * and return an IOException with the input exception as the cause. - * The new exception provides the stack trace of the place where + * The new exception provides the stack trace of the place where * the exception is thrown and some extra diagnostics information. - * If the exception is ConnectException or SocketTimeoutException, + * If the exception is ConnectException or SocketTimeoutException, * return a new one of the same type; Otherwise return an IOException. - * + * * @param addr target address * @param exception the relevant exception * @return an exception to throw @@ -787,7 +787,7 @@ public class HBaseClient { /** Makes a set of calls in parallel. Each parameter is sent to the * corresponding address. When all values are available, or have timed out * or errored, the collected results are returned in an array. The array - * contains nulls for calls that timed out or errored. + * contains nulls for calls that timed out or errored. * @param params writable parameters * @param addresses socket addresses * @return Writable[] @@ -808,7 +808,7 @@ public class HBaseClient { connection.sendParam(call); // send each parameter } catch (IOException e) { // log errors - LOG.info("Calling "+addresses[i]+" caught: " + + LOG.info("Calling "+addresses[i]+" caught: " + e.getMessage(),e); results.size--; // wait for one fewer result } @@ -825,7 +825,7 @@ public class HBaseClient { /* Get a connection from the pool, or create a new one and add it to the * pool. Connections to a given host/port are reused. */ - private Connection getConnection(InetSocketAddress addr, + private Connection getConnection(InetSocketAddress addr, UserGroupInformation ticket, Call call) throws IOException { @@ -834,7 +834,7 @@ public class HBaseClient { throw new IOException("The client is stopped"); } Connection connection; - /* we could avoid this allocation for each RPC by having a + /* we could avoid this allocation for each RPC by having a * connectionsId object and with set() method. We need to manage the * refs for keys in HashMap properly. For now its ok. */ @@ -848,7 +848,7 @@ public class HBaseClient { } } } while (!connection.addCall(call)); - + //we don't invoke the method below inside "synchronized (connections)" //block above. The reason for that is if the server happens to be slow, //it will take longer to establish a connection and that will slow the @@ -864,19 +864,19 @@ public class HBaseClient { private static class ConnectionId { final InetSocketAddress address; final UserGroupInformation ticket; - + ConnectionId(InetSocketAddress address, UserGroupInformation ticket) { this.address = address; this.ticket = ticket; } - + InetSocketAddress getAddress() { return address; } UserGroupInformation getTicket() { return ticket; } - + @Override public boolean equals(Object obj) { if (obj instanceof ConnectionId) { @@ -886,10 +886,10 @@ public class HBaseClient { } return false; } - + @Override public int hashCode() { return address.hashCode() ^ System.identityHashCode(ticket); } - } + } } diff --git a/core/src/main/java/org/apache/hadoop/hbase/ipc/HBaseRPC.java b/core/src/main/java/org/apache/hadoop/hbase/ipc/HBaseRPC.java index 3b63fdf..9873172 100644 --- a/core/src/main/java/org/apache/hadoop/hbase/ipc/HBaseRPC.java +++ b/core/src/main/java/org/apache/hadoop/hbase/ipc/HBaseRPC.java @@ -53,7 +53,7 @@ import java.util.Map; * optimizations like using our own version of ObjectWritable. Class has been * renamed to avoid confusing it w/ hadoop versions. *

- * + * * * A protocol is a Java interface. All parameters and return types must * be one of: @@ -167,9 +167,9 @@ public class HBaseRPC { protected ClientCache() {} /** - * Construct & cache an IPC client with the user-provided SocketFactory + * Construct & cache an IPC client with the user-provided SocketFactory * if no cached client exists. - * + * * @param conf Configuration * @param factory socket factory * @return an IPC client @@ -193,9 +193,9 @@ public class HBaseRPC { } /** - * Construct & cache an IPC client with the default SocketFactory + * Construct & cache an IPC client with the default SocketFactory * if no cached client exists. - * + * * @param conf Configuration * @return an IPC client */ @@ -204,7 +204,7 @@ public class HBaseRPC { } /** - * Stop a RPC client connection + * Stop a RPC client connection * A RPC client is closed only when its reference count becomes zero. * @param client client to stop */ @@ -222,7 +222,7 @@ public class HBaseRPC { } protected final static ClientCache CLIENTS = new ClientCache(); - + private static class Invoker implements InvocationHandler { private InetSocketAddress address; private UserGroupInformation ticket; @@ -235,7 +235,7 @@ public class HBaseRPC { * @param conf configuration * @param factory socket factory */ - public Invoker(InetSocketAddress address, UserGroupInformation ticket, + public Invoker(InetSocketAddress address, UserGroupInformation ticket, Configuration conf, SocketFactory factory) { this.address = address; this.ticket = ticket; @@ -257,8 +257,8 @@ public class HBaseRPC { } return value.get(); } - - /* close the IPC client that's responsible for this invoker's RPCs */ + + /* close the IPC client that's responsible for this invoker's RPCs */ synchronized protected void close() { if (!isClosed) { isClosed = true; @@ -275,7 +275,7 @@ public class HBaseRPC { private String interfaceName; private long clientVersion; private long serverVersion; - + /** * Create a version mismatch exception * @param interfaceName the name of the protocol mismatch @@ -290,23 +290,23 @@ public class HBaseRPC { this.clientVersion = clientVersion; this.serverVersion = serverVersion; } - + /** * Get the interface name - * @return the java class name + * @return the java class name * (eg. org.apache.hadoop.mapred.InterTrackerProtocol) */ public String getInterfaceName() { return interfaceName; } - + /** * @return the client's preferred version */ public long getClientVersion() { return clientVersion; } - + /** * @return the server's agreed to version. */ @@ -314,7 +314,7 @@ public class HBaseRPC { return serverVersion; } } - + /** * @param protocol protocol interface * @param clientVersion which client version we expect @@ -383,7 +383,7 @@ public class HBaseRPC { SocketFactory factory) throws IOException { return getProxy(protocol, clientVersion, addr, null, conf, factory); } - + /** * Construct a client-side proxy object that implements the named protocol, * talking to a server at the named address. @@ -400,23 +400,23 @@ public class HBaseRPC { public static VersionedProtocol getProxy(Class protocol, long clientVersion, InetSocketAddress addr, UserGroupInformation ticket, Configuration conf, SocketFactory factory) - throws IOException { + throws IOException { VersionedProtocol proxy = (VersionedProtocol) Proxy.newProxyInstance( protocol.getClassLoader(), new Class[] { protocol }, new Invoker(addr, ticket, conf, factory)); - long serverVersion = proxy.getProtocolVersion(protocol.getName(), + long serverVersion = proxy.getProtocolVersion(protocol.getName(), clientVersion); if (serverVersion == clientVersion) { return proxy; } - throw new VersionMismatch(protocol.getName(), clientVersion, + throw new VersionMismatch(protocol.getName(), clientVersion, serverVersion); } /** * Construct a client-side proxy object with the default SocketFactory - * + * * @param protocol interface * @param clientVersion version we are expecting * @param addr remote address @@ -462,7 +462,7 @@ public class HBaseRPC { HBaseClient client = CLIENTS.getClient(conf); try { Writable[] wrappedValues = client.call(invocations, addrs); - + if (method.getReturnType() == Void.TYPE) { return null; } @@ -472,7 +472,7 @@ public class HBaseRPC { for (int i = 0; i < values.length; i++) if (wrappedValues[i] != null) values[i] = ((HbaseObjectWritable)wrappedValues[i]).get(); - + return values; } finally { CLIENTS.stopClient(client); @@ -490,7 +490,7 @@ public class HBaseRPC { * @return Server * @throws IOException e */ - public static Server getServer(final Object instance, final String bindAddress, final int port, Configuration conf) + public static Server getServer(final Object instance, final String bindAddress, final int port, Configuration conf) throws IOException { return getServer(instance, bindAddress, port, 1, false, conf); } @@ -510,7 +510,7 @@ public class HBaseRPC { */ public static Server getServer(final Object instance, final String bindAddress, final int port, final int numHandlers, - final boolean verbose, Configuration conf) + final boolean verbose, Configuration conf) throws IOException { return new Server(instance, conf, bindAddress, port, numHandlers, verbose); } @@ -529,11 +529,11 @@ public class HBaseRPC { * @param port the port to listen for connections on * @throws IOException e */ - public Server(Object instance, Configuration conf, String bindAddress, int port) + public Server(Object instance, Configuration conf, String bindAddress, int port) throws IOException { this(instance, conf, bindAddress, port, 1, false); } - + private static String classNameBase(String className) { String[] names = className.split("\\.", -1); if (names == null || names.length == 0) { @@ -541,7 +541,7 @@ public class HBaseRPC { } return names[names.length-1]; } - + /** Construct an RPC server. * @param instance the instance whose methods will be called * @param conf the configuration to use diff --git a/core/src/main/java/org/apache/hadoop/hbase/ipc/HBaseRPCProtocolVersion.java b/core/src/main/java/org/apache/hadoop/hbase/ipc/HBaseRPCProtocolVersion.java index 78dad71..58b29ab 100644 --- a/core/src/main/java/org/apache/hadoop/hbase/ipc/HBaseRPCProtocolVersion.java +++ b/core/src/main/java/org/apache/hadoop/hbase/ipc/HBaseRPCProtocolVersion.java @@ -29,7 +29,7 @@ import org.apache.hadoop.ipc.VersionedProtocol; public interface HBaseRPCProtocolVersion extends VersionedProtocol { /** * Interface version. - * + * * HMasterInterface version history: *

    *
  • Version was incremented to 2 when we brought the hadoop RPC local to diff --git a/core/src/main/java/org/apache/hadoop/hbase/ipc/HBaseRPCStatistics.java b/core/src/main/java/org/apache/hadoop/hbase/ipc/HBaseRPCStatistics.java index e511cd9..c9b0257 100644 --- a/core/src/main/java/org/apache/hadoop/hbase/ipc/HBaseRPCStatistics.java +++ b/core/src/main/java/org/apache/hadoop/hbase/ipc/HBaseRPCStatistics.java @@ -38,7 +38,7 @@ public class HBaseRPCStatistics extends MetricsDynamicMBeanBase { String hostName, String port) { super(registry, "HBaseRPCStatistics"); - String name = String.format("RPCStatistics-%s", + String name = String.format("RPCStatistics-%s", (port != null ? port : "unknown")); mbeanName = MBeanUtil.registerMBean("HBase", name, this); diff --git a/core/src/main/java/org/apache/hadoop/hbase/ipc/HBaseRpcMetrics.java b/core/src/main/java/org/apache/hadoop/hbase/ipc/HBaseRpcMetrics.java index 1d9900c..d88c12d 100644 --- a/core/src/main/java/org/apache/hadoop/hbase/ipc/HBaseRpcMetrics.java +++ b/core/src/main/java/org/apache/hadoop/hbase/ipc/HBaseRpcMetrics.java @@ -30,7 +30,7 @@ import org.apache.hadoop.metrics.util.MetricsRegistry; import org.apache.hadoop.metrics.util.MetricsTimeVaryingRate; /** - * + * * This class is for maintaining the various RPC statistics * and publishing them through the metrics interfaces. * This also registers the JMX MBean for RPC. @@ -45,22 +45,22 @@ public class HBaseRpcMetrics implements Updater { private MetricsRecord metricsRecord; private static Log LOG = LogFactory.getLog(HBaseRpcMetrics.class); private final HBaseRPCStatistics rpcStatistics; - + public HBaseRpcMetrics(String hostName, String port) { MetricsContext context = MetricsUtil.getContext("rpc"); metricsRecord = MetricsUtil.createRecord(context, "metrics"); metricsRecord.setTag("port", port); - LOG.info("Initializing RPC Metrics with hostName=" + LOG.info("Initializing RPC Metrics with hostName=" + hostName + ", port=" + port); context.registerUpdater(this); - + rpcStatistics = new HBaseRPCStatistics(this.registry, hostName, port); } - - + + /** * The metrics variables are public: * - they can be set directly by calling their set/inc methods diff --git a/core/src/main/java/org/apache/hadoop/hbase/ipc/HBaseServer.java b/core/src/main/java/org/apache/hadoop/hbase/ipc/HBaseServer.java index 48fef92..6c51781 100644 --- a/core/src/main/java/org/apache/hadoop/hbase/ipc/HBaseServer.java +++ b/core/src/main/java/org/apache/hadoop/hbase/ipc/HBaseServer.java @@ -63,28 +63,28 @@ import java.util.concurrent.LinkedBlockingQueue; /** An abstract IPC service. IPC calls take a single {@link Writable} as a * parameter, and return a {@link Writable} as their value. A service runs on * a port and is defined by a parameter class and a value class. - * - * + * + * *

    Copied local so can fix HBASE-900. - * + * * @see HBaseClient */ public abstract class HBaseServer { - + /** * The first four bytes of Hadoop RPC connections */ public static final ByteBuffer HEADER = ByteBuffer.wrap("hrpc".getBytes()); - + // 1 : Introduce ping and server does not throw away RPCs - // 3 : RPC was refactored in 0.19 + // 3 : RPC was refactored in 0.19 public static final byte CURRENT_VERSION = 3; - + /** * How many calls/handler are allowed in the queue. */ private static final int MAX_QUEUE_SIZE_PER_HANDLER = 100; - + public static final Log LOG = LogFactory.getLog("org.apache.hadoop.ipc.HBaseServer"); @@ -100,13 +100,13 @@ public abstract class HBaseServer { public static HBaseServer get() { return SERVER.get(); } - + /** This is set to Call object before Handler invokes an RPC and reset * after the call returns. */ protected static final ThreadLocal CurCall = new ThreadLocal(); - - /** Returns the remote side ip address when invoked inside an RPC + + /** Returns the remote side ip address when invoked inside an RPC * Returns null incase of an error. * @return InetAddress */ @@ -126,23 +126,23 @@ public abstract class HBaseServer { return (addr == null) ? null : addr.getHostAddress(); } - protected String bindAddress; + protected String bindAddress; protected int port; // port we listen on private int handlerCount; // number of handler threads protected Class paramClass; // class of call parameters - protected int maxIdleTime; // the maximum idle time after + protected int maxIdleTime; // the maximum idle time after // which a client may be // disconnected protected int thresholdIdleConnections; // the number of idle - // connections after which we - // will start cleaning up idle + // connections after which we + // will start cleaning up idle // connections - int maxConnectionsToNuke; // the max number of + int maxConnectionsToNuke; // the max number of // connections to nuke // during a cleanup - + protected HBaseRpcMetrics rpcMetrics; - + protected Configuration conf; @SuppressWarnings({"FieldCanBeLocal"}) @@ -165,7 +165,7 @@ public abstract class HBaseServer { protected HBaseRPCErrorHandler errorHandler = null; /** - * A convenience method to bind to a given address and report + * A convenience method to bind to a given address and report * better exceptions if the address is not a valid host. * @param socket the socket to bind * @param address the address to bind to @@ -174,13 +174,13 @@ public abstract class HBaseServer { * @throws UnknownHostException if the address isn't a valid host name * @throws IOException other random errors from bind */ - public static void bind(ServerSocket socket, InetSocketAddress address, + public static void bind(ServerSocket socket, InetSocketAddress address, int backlog) throws IOException { try { socket.bind(address, backlog); } catch (BindException e) { BindException bindException = - new BindException("Problem binding to " + address + " : " + + new BindException("Problem binding to " + address + " : " + e.getMessage()); bindException.initCause(e); throw bindException; @@ -188,7 +188,7 @@ public abstract class HBaseServer { // If they try to bind to a different host's address, give a better // error message. if ("Unresolved address".equals(e.getMessage())) { - throw new UnknownHostException("Invalid hostname for server: " + + throw new UnknownHostException("Invalid hostname for server: " + address.getHostName()); } throw e; @@ -211,7 +211,7 @@ public abstract class HBaseServer { this.timestamp = System.currentTimeMillis(); this.response = null; } - + @Override public String toString() { return param.toString() + " from " + connection.toString(); @@ -224,17 +224,17 @@ public abstract class HBaseServer { /** Listens on the socket. Creates jobs for the handler threads*/ private class Listener extends Thread { - + private ServerSocketChannel acceptChannel = null; //the accept channel private Selector selector = null; //the selector that we use for the server private InetSocketAddress address; //the address we bind at private Random rand = new Random(); private long lastCleanupRunTime = 0; //the last time when a cleanup connec- //-tion (for idle connections) ran - private long cleanupInterval = 10000; //the minimum interval between + private long cleanupInterval = 10000; //the minimum interval between //two cleanup runs private int backlogLength = conf.getInt("ipc.server.listen.queue.size", 128); - + public Listener() throws IOException { address = new InetSocketAddress(bindAddress, port); // Create a new server socket and set to non blocking mode @@ -255,7 +255,7 @@ public abstract class HBaseServer { /** cleanup connections from connectionList. Choose a random range * to scan and also have a limit on the number of the connections * that will be cleanedup per run. The criteria for cleanup is the time - * for which the connection was idle. If 'force' is true then all + * for which the connection was idle. If 'force' is true then all * connections will be looked at for the cleanup. * @param force all connections will be looked at for cleanup */ @@ -336,7 +336,7 @@ public abstract class HBaseServer { } } else { // we can run out of memory if we have too many threads - // log the event and sleep for a minute and give + // log the event and sleep for a minute and give // some thread(s) a chance to finish LOG.warn("Out of Memory in server select", e); closeCurrentConnection(key); @@ -363,7 +363,7 @@ public abstract class HBaseServer { selector= null; acceptChannel= null; - + // clean up all connections while (!connectionList.isEmpty()) { closeConnection(connectionList.remove(0)); @@ -385,7 +385,7 @@ public abstract class HBaseServer { InetSocketAddress getAddress() { return (InetSocketAddress)acceptChannel.socket().getLocalSocketAddress(); } - + void doAccept(SelectionKey key) throws IOException, OutOfMemoryError { Connection c; ServerSocketChannel server = (ServerSocketChannel) key.channel(); @@ -415,10 +415,10 @@ public abstract class HBaseServer { int count = 0; Connection c = (Connection)key.attachment(); if (c == null) { - return; + return; } c.setLastContact(System.currentTimeMillis()); - + try { count = c.readAndProcess(); } catch (InterruptedException ieo) { @@ -429,7 +429,7 @@ public abstract class HBaseServer { } if (count < 0) { if (LOG.isDebugEnabled()) - LOG.debug(getName() + ": disconnecting client " + + LOG.debug(getName() + ": disconnecting client " + c.getHostAddress() + ". Number of active connections: "+ numConnections); closeConnection(c); @@ -438,7 +438,7 @@ public abstract class HBaseServer { else { c.setLastContact(System.currentTimeMillis()); } - } + } synchronized void doStop() { if (selector != null) { @@ -459,7 +459,7 @@ public abstract class HBaseServer { private class Responder extends Thread { private Selector writeSelector; private int pending; // connections waiting to register - + final static int PURGE_INTERVAL = 900000; // 15mins Responder() throws IOException { @@ -502,7 +502,7 @@ public abstract class HBaseServer { // LOG.debug("Checking for old call responses."); ArrayList calls; - + // get the list of channels from list of keys. synchronized (writeSelector.keys()) { calls = new ArrayList(writeSelector.keys().size()); @@ -510,12 +510,12 @@ public abstract class HBaseServer { while (iter.hasNext()) { SelectionKey key = iter.next(); Call call = (Call)key.attachment(); - if (call != null && key.channel() == call.connection.channel) { + if (call != null && key.channel() == call.connection.channel) { calls.add(call); } } } - + for(Call call : calls) { doPurge(call, now); } @@ -535,7 +535,7 @@ public abstract class HBaseServer { try { Thread.sleep(60000); } catch (Exception ignored) {} } } catch (Exception e) { - LOG.warn("Exception in Responder " + + LOG.warn("Exception in Responder " + StringUtils.stringifyException(e)); } } @@ -568,7 +568,7 @@ public abstract class HBaseServer { } // - // Remove calls that have been pending in the responseQueue + // Remove calls that have been pending in the responseQueue // for a long time. // private void doPurge(Call call, long now) { @@ -635,18 +635,18 @@ public abstract class HBaseServer { } } else { // - // If we were unable to write the entire response out, then - // insert in Selector queue. + // If we were unable to write the entire response out, then + // insert in Selector queue. // call.connection.responseQueue.addFirst(call); - + if (inHandler) { // set the serve time when the response has to be sent later call.timestamp = System.currentTimeMillis(); - + incPending(); try { - // Wakeup the thread blocked on select, only then can the call + // Wakeup the thread blocked on select, only then can the call // to channel.register() complete. writeSelector.wakeup(); channel.register(writeSelector, SelectionKey.OP_WRITE, call); @@ -659,7 +659,7 @@ public abstract class HBaseServer { } if (LOG.isDebugEnabled()) { LOG.debug(getName() + ": responding to #" + call.id + " from " + - call.connection + " Wrote partial " + numBytes + + call.connection + " Wrote partial " + numBytes + " bytes."); } } @@ -717,7 +717,7 @@ public abstract class HBaseServer { private long lastContact; private int dataLength; protected Socket socket; - // Cache the remote host & port info so that even if the socket is + // Cache the remote host & port info so that even if the socket is // disconnected, we can say where it used to connect to. private String hostAddress; private int remotePort; @@ -745,13 +745,13 @@ public abstract class HBaseServer { socketSendBufferSize); } } - } + } @Override public String toString() { - return getHostAddress() + ":" + remotePort; + return getHostAddress() + ":" + remotePort; } - + public String getHostAddress() { return hostAddress; } @@ -768,17 +768,17 @@ public abstract class HBaseServer { private boolean isIdle() { return rpcCount == 0; } - + /* Decrement the outstanding RPC count */ protected void decRpcCount() { rpcCount--; } - + /* Increment the outstanding RPC count */ private void incRpcCount() { rpcCount++; } - + protected boolean timedOut(long currentTime) { return isIdle() && currentTime - lastContact > maxIdleTime; } @@ -787,14 +787,14 @@ public abstract class HBaseServer { while (true) { /* Read at most one RPC. If the header is not read completely yet * then iterate until we read first RPC or until there is no data left. - */ + */ int count; if (dataLengthBuffer.remaining() > 0) { - count = channelRead(channel, dataLengthBuffer); - if (count < 0 || dataLengthBuffer.remaining() > 0) + count = channelRead(channel, dataLengthBuffer); + if (count < 0 || dataLengthBuffer.remaining() > 0) return count; } - + if (!versionRead) { //Every connection is expected to send the header. ByteBuffer versionBuffer = ByteBuffer.allocate(1); @@ -803,13 +803,13 @@ public abstract class HBaseServer { return count; } int version = versionBuffer.get(0); - - dataLengthBuffer.flip(); + + dataLengthBuffer.flip(); if (!HEADER.equals(dataLengthBuffer) || version != CURRENT_VERSION) { //Warning is ok since this is not supposed to happen. - LOG.warn("Incorrect header or version mismatch from " + + LOG.warn("Incorrect header or version mismatch from " + hostAddress + ":" + remotePort + - " got version " + version + + " got version " + version + " expected version " + CURRENT_VERSION); return -1; } @@ -817,11 +817,11 @@ public abstract class HBaseServer { versionRead = true; continue; } - + if (data == null) { dataLengthBuffer.flip(); dataLength = dataLengthBuffer.getInt(); - + if (dataLength == HBaseClient.PING_CALL_ID) { dataLengthBuffer.clear(); return 0; //ping message @@ -829,9 +829,9 @@ public abstract class HBaseServer { data = ByteBuffer.allocate(dataLength); incRpcCount(); // Increment the rpc count } - + count = channelRead(channel, data); - + if (data.remaining() == 0) { dataLengthBuffer.clear(); data.flip(); @@ -844,7 +844,7 @@ public abstract class HBaseServer { headerRead = true; data = null; continue; - } + } return count; } } @@ -858,18 +858,18 @@ public abstract class HBaseServer { new DataInputStream(new ByteArrayInputStream(data.array())); ticket = (UserGroupInformation) ObjectWritable.readObject(in, conf); } - + private void processData() throws IOException, InterruptedException { DataInputStream dis = new DataInputStream(new ByteArrayInputStream(data.array())); int id = dis.readInt(); // try to read an id - + if (LOG.isDebugEnabled()) LOG.debug(" got #" + id); - + Writable param = ReflectionUtils.newInstance(paramClass, conf); // read param - param.readFields(dis); - + param.readFields(dis); + Call call = new Call(id, param, this); callQueue.put(call); // queue the call; maybe blocked here } @@ -907,11 +907,11 @@ public abstract class HBaseServer { if (LOG.isDebugEnabled()) LOG.debug(getName() + ": has #" + call.id + " from " + call.connection); - + String errorClass = null; String error = null; Writable value = null; - + CurCall.set(call); UserGroupInformation previous = UserGroupInformation.getCurrentUGI(); UserGroupInformation.setCurrentUser(call.connection.ticket); @@ -969,22 +969,22 @@ public abstract class HBaseServer { } } - + protected HBaseServer(String bindAddress, int port, - Class paramClass, int handlerCount, + Class paramClass, int handlerCount, Configuration conf) - throws IOException + throws IOException { this(bindAddress, port, paramClass, handlerCount, conf, Integer.toString(port)); } /* Constructs a server listening on the named port and address. Parameters passed must * be of the named class. The handlerCount determines * the number of handler threads that will be used to process calls. - * + * */ - protected HBaseServer(String bindAddress, int port, - Class paramClass, int handlerCount, - Configuration conf, String serverName) + protected HBaseServer(String bindAddress, int port, + Class paramClass, int handlerCount, + Configuration conf, String serverName) throws IOException { this.bindAddress = bindAddress; this.conf = conf; @@ -993,14 +993,14 @@ public abstract class HBaseServer { this.handlerCount = handlerCount; this.socketSendBufferSize = 0; this.maxQueueSize = handlerCount * MAX_QUEUE_SIZE_PER_HANDLER; - this.callQueue = new LinkedBlockingQueue(maxQueueSize); + this.callQueue = new LinkedBlockingQueue(maxQueueSize); this.maxIdleTime = 2*conf.getInt("ipc.client.connection.maxidletime", 1000); this.maxConnectionsToNuke = conf.getInt("ipc.client.kill.max", 10); this.thresholdIdleConnections = conf.getInt("ipc.client.idlethreshold", 4000); - + // Start the listener here and let it bind to the port listener = new Listener(); - this.port = listener.getAddress().getPort(); + this.port = listener.getAddress().getPort(); this.rpcMetrics = new HBaseRpcMetrics(serverName, Integer.toString(this.port)); this.tcpNoDelay = conf.getBoolean("ipc.server.tcpnodelay", false); @@ -1017,7 +1017,7 @@ public abstract class HBaseServer { } connection.close(); } - + /** Sets the socket buffer size used for responding to RPCs. * @param size send size */ @@ -1028,7 +1028,7 @@ public abstract class HBaseServer { responder.start(); listener.start(); handlers = new Handler[handlerCount]; - + for (int i = 0; i < handlerCount; i++) { handlers[i] = new Handler(i); handlers[i].start(); @@ -1073,11 +1073,11 @@ public abstract class HBaseServer { public synchronized InetSocketAddress getListenerAddress() { return listener.getAddress(); } - - /** Called for each call. + + /** Called for each call. * @param param writable parameter * @param receiveTime time - * @return Writable + * @return Writable * @throws IOException e */ public abstract Writable call(Writable param, long receiveTime) @@ -1090,7 +1090,7 @@ public abstract class HBaseServer { public int getNumOpenConnections() { return numConnections; } - + /** * The number of rpc calls in the queue. * @return The number of rpc calls in the queue. @@ -1105,22 +1105,22 @@ public abstract class HBaseServer { */ public void setErrorHandler(HBaseRPCErrorHandler handler) { this.errorHandler = handler; - } + } /** - * When the read or write buffer size is larger than this limit, i/o will be + * When the read or write buffer size is larger than this limit, i/o will be * done in chunks of this size. Most RPC requests and responses would be * be smaller. */ private static int NIO_BUFFER_LIMIT = 8*1024; //should not be more than 64KB. - + /** * This is a wrapper around {@link WritableByteChannel#write(ByteBuffer)}. - * If the amount of data is large, it writes to channel in smaller chunks. - * This is to avoid jdk from creating many direct buffers as the size of + * If the amount of data is large, it writes to channel in smaller chunks. + * This is to avoid jdk from creating many direct buffers as the size of * buffer increases. This also minimizes extra copies in NIO layer - * as a result of multiple write operations required to write a large - * buffer. + * as a result of multiple write operations required to write a large + * buffer. * * @param channel writable byte channel to write to * @param buffer buffer to write @@ -1128,7 +1128,7 @@ public abstract class HBaseServer { * @throws java.io.IOException e * @see WritableByteChannel#write(ByteBuffer) */ - protected static int channelWrite(WritableByteChannel channel, + protected static int channelWrite(WritableByteChannel channel, ByteBuffer buffer) throws IOException { return (buffer.remaining() <= NIO_BUFFER_LIMIT) ? channel.write(buffer) : channelIO(null, channel, buffer); @@ -1136,17 +1136,17 @@ public abstract class HBaseServer { /** * This is a wrapper around {@link ReadableByteChannel#read(ByteBuffer)}. - * If the amount of data is large, it writes to channel in smaller chunks. - * This is to avoid jdk from creating many direct buffers as the size of + * If the amount of data is large, it writes to channel in smaller chunks. + * This is to avoid jdk from creating many direct buffers as the size of * ByteBuffer increases. There should not be any performance degredation. - * + * * @param channel writable byte channel to write on * @param buffer buffer to write * @return number of bytes written * @throws java.io.IOException e * @see ReadableByteChannel#read(ByteBuffer) */ - protected static int channelRead(ReadableByteChannel channel, + protected static int channelRead(ReadableByteChannel channel, ByteBuffer buffer) throws IOException { return (buffer.remaining() <= NIO_BUFFER_LIMIT) ? channel.read(buffer) : channelIO(channel, null, buffer); @@ -1156,7 +1156,7 @@ public abstract class HBaseServer { * Helper for {@link #channelRead(ReadableByteChannel, ByteBuffer)} * and {@link #channelWrite(WritableByteChannel, ByteBuffer)}. Only * one of readCh or writeCh should be non-null. - * + * * @param readCh read channel * @param writeCh write channel * @param buf buffer to read or write into/out of @@ -1165,31 +1165,31 @@ public abstract class HBaseServer { * @see #channelRead(ReadableByteChannel, ByteBuffer) * @see #channelWrite(WritableByteChannel, ByteBuffer) */ - private static int channelIO(ReadableByteChannel readCh, + private static int channelIO(ReadableByteChannel readCh, WritableByteChannel writeCh, ByteBuffer buf) throws IOException { - + int originalLimit = buf.limit(); int initialRemaining = buf.remaining(); int ret = 0; - + while (buf.remaining() > 0) { try { int ioSize = Math.min(buf.remaining(), NIO_BUFFER_LIMIT); buf.limit(buf.position() + ioSize); - - ret = (readCh == null) ? writeCh.write(buf) : readCh.read(buf); - + + ret = (readCh == null) ? writeCh.write(buf) : readCh.read(buf); + if (ret < ioSize) { break; } } finally { - buf.limit(originalLimit); + buf.limit(originalLimit); } } - int nBytes = initialRemaining - buf.remaining(); + int nBytes = initialRemaining - buf.remaining(); return (nBytes > 0) ? nBytes : ret; - } + } } diff --git a/core/src/main/java/org/apache/hadoop/hbase/ipc/HMasterInterface.java b/core/src/main/java/org/apache/hadoop/hbase/ipc/HMasterInterface.java index d6141cf..172ef5d 100644 --- a/core/src/main/java/org/apache/hadoop/hbase/ipc/HMasterInterface.java +++ b/core/src/main/java/org/apache/hadoop/hbase/ipc/HMasterInterface.java @@ -31,16 +31,16 @@ import java.io.IOException; * Clients interact with the HMasterInterface to gain access to meta-level * HBase functionality, like finding an HRegionServer and creating/destroying * tables. - * + * *

    NOTE: if you change the interface, you must change the RPC version * number in HBaseRPCProtocolVersion - * + * */ public interface HMasterInterface extends HBaseRPCProtocolVersion { /** @return true if master is available */ public boolean isMasterRunning(); - + // Admin tools would use these cmds /** @@ -56,7 +56,7 @@ public interface HMasterInterface extends HBaseRPCProtocolVersion { * @throws IOException e */ public void deleteTable(final byte [] tableName) throws IOException; - + /** * Adds a column to the specified table * @param tableName table to modify @@ -73,8 +73,8 @@ public interface HMasterInterface extends HBaseRPCProtocolVersion { * @param descriptor new column descriptor * @throws IOException e */ - public void modifyColumn(final byte [] tableName, final byte [] columnName, - HColumnDescriptor descriptor) + public void modifyColumn(final byte [] tableName, final byte [] columnName, + HColumnDescriptor descriptor) throws IOException; @@ -86,17 +86,17 @@ public interface HMasterInterface extends HBaseRPCProtocolVersion { */ public void deleteColumn(final byte [] tableName, final byte [] columnName) throws IOException; - + /** * Puts the table on-line (only needed if table has been previously taken offline) * @param tableName table to enable * @throws IOException e */ public void enableTable(final byte [] tableName) throws IOException; - + /** * Take table offline - * + * * @param tableName table to take offline * @throws IOException e */ @@ -104,7 +104,7 @@ public interface HMasterInterface extends HBaseRPCProtocolVersion { /** * Modify a table's metadata - * + * * @param tableName table to modify * @param op the operation to do * @param args arguments for operation diff --git a/core/src/main/java/org/apache/hadoop/hbase/ipc/HMasterRegionInterface.java b/core/src/main/java/org/apache/hadoop/hbase/ipc/HMasterRegionInterface.java index 6188e69..71a0447 100644 --- a/core/src/main/java/org/apache/hadoop/hbase/ipc/HMasterRegionInterface.java +++ b/core/src/main/java/org/apache/hadoop/hbase/ipc/HMasterRegionInterface.java @@ -27,13 +27,13 @@ import org.apache.hadoop.io.MapWritable; import java.io.IOException; /** - * HRegionServers interact with the HMasterRegionInterface to report on local + * HRegionServers interact with the HMasterRegionInterface to report on local * goings-on and to obtain data-handling instructions from the HMaster. *

    Changes here need to be reflected in HbaseObjectWritable HbaseRPC#Invoker. - * + * *

    NOTE: if you change the interface, you must change the RPC version * number in HBaseRPCProtocolVersion - * + * */ public interface HMasterRegionInterface extends HBaseRPCProtocolVersion { @@ -49,16 +49,16 @@ public interface HMasterRegionInterface extends HBaseRPCProtocolVersion { /** * Called to renew lease, tell master what the region server is doing and to * receive new instructions from the master - * + * * @param info server's address and start code * @param msgs things the region server wants to tell the master - * @param mostLoadedRegions Array of HRegionInfos that should contain the + * @param mostLoadedRegions Array of HRegionInfos that should contain the * reporting server's most loaded regions. These are candidates for being * rebalanced. * @return instructions from the master to the region server * @throws IOException e */ - public HMsg[] regionServerReport(HServerInfo info, HMsg msgs[], + public HMsg[] regionServerReport(HServerInfo info, HMsg msgs[], HRegionInfo mostLoadedRegions[]) throws IOException; } \ No newline at end of file diff --git a/core/src/main/java/org/apache/hadoop/hbase/ipc/HRegionInterface.java b/core/src/main/java/org/apache/hadoop/hbase/ipc/HRegionInterface.java index f9e2793..4cbe52a 100644 --- a/core/src/main/java/org/apache/hadoop/hbase/ipc/HRegionInterface.java +++ b/core/src/main/java/org/apache/hadoop/hbase/ipc/HRegionInterface.java @@ -35,26 +35,26 @@ import java.io.IOException; /** * Clients interact with HRegionServers using a handle to the HRegionInterface. - * + * *

    NOTE: if you change the interface, you must change the RPC version * number in HBaseRPCProtocolVersion */ public interface HRegionInterface extends HBaseRPCProtocolVersion { - /** + /** * Get metainfo about an HRegion - * + * * @param regionName name of the region * @return HRegionInfo object for region * @throws NotServingRegionException e */ public HRegionInfo getRegionInfo(final byte [] regionName) throws NotServingRegionException; - + /** - * Return all the data for the row that matches row exactly, + * Return all the data for the row that matches row exactly, * or the one that immediately preceeds it. - * + * * @param regionName region name * @param row row key * @param family Column family to look for row in. @@ -66,11 +66,11 @@ public interface HRegionInterface extends HBaseRPCProtocolVersion { throws IOException; /** - * + * * @return the regions served by this regionserver */ public HRegion [] getOnlineRegionsAsArray(); - + /** * Perform Get operation. * @param regionName name of region to get from @@ -90,17 +90,17 @@ public interface HRegionInterface extends HBaseRPCProtocolVersion { public boolean exists(byte [] regionName, Get get) throws IOException; /** - * Put data into the specified region + * Put data into the specified region * @param regionName region name * @param put the data to be put * @throws IOException e */ public void put(final byte [] regionName, final Put put) throws IOException; - + /** * Put an array of puts into the specified region - * + * * @param regionName region name * @param puts array of puts to execute * @return The number of processed put's. Returns -1 if all Puts @@ -111,7 +111,7 @@ public interface HRegionInterface extends HBaseRPCProtocolVersion { throws IOException; /** - * Deletes all the KeyValues that match those found in the Delete object, + * Deletes all the KeyValues that match those found in the Delete object, * if their ts <= to the Delete. In case of a delete with a specific ts it * only deletes that specific KeyValue. * @param regionName region name @@ -123,7 +123,7 @@ public interface HRegionInterface extends HBaseRPCProtocolVersion { /** * Put an array of deletes into the specified region - * + * * @param regionName region name * @param deletes delete array to execute * @return The number of processed deletes. Returns -1 if all Deletes @@ -137,7 +137,7 @@ public interface HRegionInterface extends HBaseRPCProtocolVersion { * Atomically checks if a row/family/qualifier value match the expectedValue. * If it does, it adds the put. If passed expected value is null, then the * check is for non-existance of the row/column. - * + * * @param regionName region name * @param row row to check * @param family column family @@ -147,16 +147,16 @@ public interface HRegionInterface extends HBaseRPCProtocolVersion { * @throws IOException e * @return true if the new put was execute, false otherwise */ - public boolean checkAndPut(final byte[] regionName, final byte [] row, + public boolean checkAndPut(final byte[] regionName, final byte [] row, final byte [] family, final byte [] qualifier, final byte [] value, final Put put) throws IOException; - + /** * Atomically increments a column value. If the column value isn't long-like, * this could throw an exception. If passed expected value is null, then the * check is for non-existance of the row/column. - * + * * @param regionName region name * @param row row to check * @param family column family @@ -166,18 +166,18 @@ public interface HRegionInterface extends HBaseRPCProtocolVersion { * @return new incremented column value * @throws IOException e */ - public long incrementColumnValue(byte [] regionName, byte [] row, + public long incrementColumnValue(byte [] regionName, byte [] row, byte [] family, byte [] qualifier, long amount, boolean writeToWAL) throws IOException; - - + + // // remote scanner interface // /** * Opens a remote scanner with a RowFilter. - * + * * @param regionName name of region to scan * @param scan configured scan object * @return scannerId scanner identifier used in other calls @@ -185,7 +185,7 @@ public interface HRegionInterface extends HBaseRPCProtocolVersion { */ public long openScanner(final byte [] regionName, final Scan scan) throws IOException; - + /** * Get the next set of values * @param scannerId clientId passed to openScanner @@ -193,7 +193,7 @@ public interface HRegionInterface extends HBaseRPCProtocolVersion { * @throws IOException e */ public Result next(long scannerId) throws IOException; - + /** * Get the next set of values * @param scannerId clientId passed to openScanner @@ -204,10 +204,10 @@ public interface HRegionInterface extends HBaseRPCProtocolVersion { * @throws IOException e */ public Result [] next(long scannerId, int numberOfRows) throws IOException; - + /** * Close a scanner - * + * * @param scannerId the scanner id returned by openScanner * @throws IOException e */ @@ -233,15 +233,15 @@ public interface HRegionInterface extends HBaseRPCProtocolVersion { */ public void unlockRow(final byte [] regionName, final long lockId) throws IOException; - - + + /** * Method used when a master is taking the place of another failed one. * @return All regions assigned on this region server * @throws IOException e */ public HRegionInfo[] getRegionsAssignment() throws IOException; - + /** * Method used when a master is taking the place of another failed one. * @return The HSI diff --git a/core/src/main/java/org/apache/hadoop/hbase/mapred/TableRecordReader.java b/core/src/main/java/org/apache/hadoop/hbase/mapred/TableRecordReader.java index a6a8995..7133860 100644 --- a/core/src/main/java/org/apache/hadoop/hbase/mapred/TableRecordReader.java +++ b/core/src/main/java/org/apache/hadoop/hbase/mapred/TableRecordReader.java @@ -35,7 +35,7 @@ public class TableRecordReader implements RecordReader { private TableRecordReaderImpl recordReaderImpl = new TableRecordReaderImpl(); - + /** * Restart from survivable exceptions by creating a new scanner. * @@ -114,7 +114,7 @@ implements RecordReader { } public long getPos() { - + // This should be the ordinal tuple in the range; // not clear how to calculate... return this.recordReaderImpl.getPos(); diff --git a/core/src/main/java/org/apache/hadoop/hbase/mapred/TableRecordReaderImpl.java b/core/src/main/java/org/apache/hadoop/hbase/mapred/TableRecordReaderImpl.java index f6f99e2..30174e2 100644 --- a/core/src/main/java/org/apache/hadoop/hbase/mapred/TableRecordReaderImpl.java +++ b/core/src/main/java/org/apache/hadoop/hbase/mapred/TableRecordReaderImpl.java @@ -92,7 +92,7 @@ public class TableRecordReaderImpl { restart(startRow); } - byte[] getStartRow() { + byte[] getStartRow() { return this.startRow; } /** diff --git a/core/src/main/java/org/apache/hadoop/hbase/mapreduce/Driver.java b/core/src/main/java/org/apache/hadoop/hbase/mapreduce/Driver.java index fd5b3b0..3d40695 100644 --- a/core/src/main/java/org/apache/hadoop/hbase/mapreduce/Driver.java +++ b/core/src/main/java/org/apache/hadoop/hbase/mapreduce/Driver.java @@ -28,7 +28,7 @@ import org.apache.hadoop.util.ProgramDriver; public class Driver { /** * @param args - * @throws Throwable + * @throws Throwable */ public static void main(String[] args) throws Throwable { ProgramDriver pgd = new ProgramDriver(); diff --git a/core/src/main/java/org/apache/hadoop/hbase/mapreduce/Export.java b/core/src/main/java/org/apache/hadoop/hbase/mapreduce/Export.java index f267758..7b08134 100644 --- a/core/src/main/java/org/apache/hadoop/hbase/mapreduce/Export.java +++ b/core/src/main/java/org/apache/hadoop/hbase/mapreduce/Export.java @@ -51,7 +51,7 @@ public class Export { * @param value The columns. * @param context The current context. * @throws IOException When something is broken with the data. - * @see org.apache.hadoop.mapreduce.Mapper#map(KEYIN, VALUEIN, + * @see org.apache.hadoop.mapreduce.Mapper#map(KEYIN, VALUEIN, * org.apache.hadoop.mapreduce.Mapper.Context) */ @Override @@ -68,7 +68,7 @@ public class Export { /** * Sets up the actual job. - * + * * @param conf The current configuration. * @param args The command line parameters. * @return The newly created job. @@ -115,7 +115,7 @@ public class Export { /** * Main entry point. - * + * * @param args The command line parameters. * @throws Exception When running the job fails. */ diff --git a/core/src/main/java/org/apache/hadoop/hbase/mapreduce/GroupingTableMapper.java b/core/src/main/java/org/apache/hadoop/hbase/mapreduce/GroupingTableMapper.java index 1e4f0fb..c38337b 100644 --- a/core/src/main/java/org/apache/hadoop/hbase/mapreduce/GroupingTableMapper.java +++ b/core/src/main/java/org/apache/hadoop/hbase/mapreduce/GroupingTableMapper.java @@ -40,31 +40,31 @@ public class GroupingTableMapper extends TableMapper implements Configurable { /** - * JobConf parameter to specify the columns used to produce the key passed to + * JobConf parameter to specify the columns used to produce the key passed to * collect from the map phase. */ public static final String GROUP_COLUMNS = "hbase.mapred.groupingtablemap.columns"; - + /** The grouping columns. */ protected byte [][] columns; /** The current configuration. */ private Configuration conf = null; - + /** - * Use this before submitting a TableMap job. It will appropriately set up + * Use this before submitting a TableMap job. It will appropriately set up * the job. * * @param table The table to be processed. * @param scan The scan with the columns etc. - * @param groupColumns A space separated list of columns used to form the + * @param groupColumns A space separated list of columns used to form the * key used in collect. * @param mapper The mapper class. * @param job The current job. * @throws IOException When setting up the job fails. */ @SuppressWarnings("unchecked") - public static void initJob(String table, Scan scan, String groupColumns, + public static void initJob(String table, Scan scan, String groupColumns, Class mapper, Job job) throws IOException { TableMapReduceUtil.initTableMapperJob(table, scan, mapper, ImmutableBytesWritable.class, Result.class, job); @@ -72,18 +72,18 @@ extends TableMapper implements Configurable { } /** - * Extract the grouping columns from value to construct a new key. Pass the - * new key and value to reduce. If any of the grouping columns are not found + * Extract the grouping columns from value to construct a new key. Pass the + * new key and value to reduce. If any of the grouping columns are not found * in the value, the record is skipped. - * - * @param key The current key. + * + * @param key The current key. * @param value The current value. - * @param context The current context. + * @param context The current context. * @throws IOException When writing the record fails. * @throws InterruptedException When the job is aborted. */ @Override - public void map(ImmutableBytesWritable key, Result value, Context context) + public void map(ImmutableBytesWritable key, Result value, Context context) throws IOException, InterruptedException { byte[][] keyVals = extractKeyValues(value); if(keyVals != null) { @@ -97,7 +97,7 @@ extends TableMapper implements Configurable { * null if any of the columns are not found. *

    * Override this method if you want to deal with nulls differently. - * + * * @param r The current values. * @return Array of byte values. */ @@ -107,7 +107,7 @@ extends TableMapper implements Configurable { int numCols = columns.length; if (numCols > 0) { for (KeyValue value: r.list()) { - byte [] column = KeyValue.makeColumn(value.getFamily(), + byte [] column = KeyValue.makeColumn(value.getFamily(), value.getQualifier()); for (int i = 0; i < numCols; i++) { if (Bytes.equals(column, columns[i])) { @@ -125,9 +125,9 @@ extends TableMapper implements Configurable { /** * Create a key by concatenating multiple column values. - *

    + *

    * Override this function in order to produce different types of keys. - * + * * @param vals The current key/values. * @return A key generated by concatenating multiple column values. */ @@ -151,7 +151,7 @@ extends TableMapper implements Configurable { /** * Returns the current configuration. - * + * * @return The current configuration. * @see org.apache.hadoop.conf.Configurable#getConf() */ @@ -162,7 +162,7 @@ extends TableMapper implements Configurable { /** * Sets the configuration. This is used to set up the grouping details. - * + * * @param configuration The configuration to set. * @see org.apache.hadoop.conf.Configurable#setConf( * org.apache.hadoop.conf.Configuration) @@ -176,5 +176,5 @@ extends TableMapper implements Configurable { columns[i] = Bytes.toBytes(cols[i]); } } - + } diff --git a/core/src/main/java/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat.java b/core/src/main/java/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat.java index fefb6bc..2c81723 100644 --- a/core/src/main/java/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat.java +++ b/core/src/main/java/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat.java @@ -50,7 +50,7 @@ import org.mortbay.log.Log; public class HFileOutputFormat extends FileOutputFormat { public RecordWriter getRecordWriter(TaskAttemptContext context) throws IOException, InterruptedException { - // Get the path of the temporary output file + // Get the path of the temporary output file final Path outputPath = FileOutputFormat.getOutputPath(context); final Path outputdir = new FileOutputCommitter(outputPath, context).getWorkPath(); Configuration conf = context.getConfiguration(); @@ -127,7 +127,7 @@ public class HFileOutputFormat extends FileOutputFormat The type of the key. * @param The type of the value. */ -public class HRegionPartitioner +public class HRegionPartitioner extends Partitioner implements Configurable { - + private final Log LOG = LogFactory.getLog(TableInputFormat.class); private Configuration conf = null; private HTable table; - private byte[][] startKeys; - + private byte[][] startKeys; + /** - * Gets the partition number for a given key (hence record) given the total + * Gets the partition number for a given key (hence record) given the total * number of partitions i.e. number of reduce-tasks for the job. - * + * *

    Typically a hash function on a all or a subset of the key.

    * * @param key The key to be partitioned. @@ -80,7 +80,7 @@ implements Configurable { if (Bytes.compareTo(region, this.startKeys[i]) == 0 ){ if (i >= numPartitions-1){ // cover if we have less reduces then regions. - return (Integer.toString(i).hashCode() + return (Integer.toString(i).hashCode() & Integer.MAX_VALUE) % numPartitions; } return i; @@ -92,7 +92,7 @@ implements Configurable { /** * Returns the current configuration. - * + * * @return The current configuration. * @see org.apache.hadoop.conf.Configurable#getConf() */ @@ -104,7 +104,7 @@ implements Configurable { /** * Sets the configuration. This is used to determine the start keys for the * given table. - * + * * @param configuration The configuration to set. * @see org.apache.hadoop.conf.Configurable#setConf( * org.apache.hadoop.conf.Configuration) @@ -114,7 +114,7 @@ implements Configurable { this.conf = configuration; try { HBaseConfiguration.addHbaseResources(conf); - this.table = new HTable(this.conf, + this.table = new HTable(this.conf, configuration.get(TableOutputFormat.OUTPUT_TABLE)); } catch (IOException e) { LOG.error(e); diff --git a/core/src/main/java/org/apache/hadoop/hbase/mapreduce/IdentityTableMapper.java b/core/src/main/java/org/apache/hadoop/hbase/mapreduce/IdentityTableMapper.java index a8cd6e3..fd5d8fe 100644 --- a/core/src/main/java/org/apache/hadoop/hbase/mapreduce/IdentityTableMapper.java +++ b/core/src/main/java/org/apache/hadoop/hbase/mapreduce/IdentityTableMapper.java @@ -33,9 +33,9 @@ public class IdentityTableMapper extends TableMapper { /** - * Use this before submitting a TableMap job. It will appropriately set up + * Use this before submitting a TableMap job. It will appropriately set up * the job. - * + * * @param table The table name. * @param scan The scan with the columns to scan. * @param mapper The mapper class. @@ -51,16 +51,16 @@ extends TableMapper { /** * Pass the key, value to reduce. - * - * @param key The current key. + * + * @param key The current key. * @param value The current value. - * @param context The current context. + * @param context The current context. * @throws IOException When writing the record fails. * @throws InterruptedException When the job is aborted. */ - public void map(ImmutableBytesWritable key, Result value, Context context) + public void map(ImmutableBytesWritable key, Result value, Context context) throws IOException, InterruptedException { context.write(key, value); } - + } diff --git a/core/src/main/java/org/apache/hadoop/hbase/mapreduce/IdentityTableReducer.java b/core/src/main/java/org/apache/hadoop/hbase/mapreduce/IdentityTableReducer.java index eb7609c..90c0a8e 100644 --- a/core/src/main/java/org/apache/hadoop/hbase/mapreduce/IdentityTableReducer.java +++ b/core/src/main/java/org/apache/hadoop/hbase/mapreduce/IdentityTableReducer.java @@ -27,44 +27,44 @@ import org.apache.hadoop.io.Writable; import org.apache.hadoop.mapreduce.OutputFormat; /** - * Convenience class that simply writes all values (which must be - * {@link org.apache.hadoop.hbase.client.Put Put} or + * Convenience class that simply writes all values (which must be + * {@link org.apache.hadoop.hbase.client.Put Put} or * {@link org.apache.hadoop.hbase.client.Delete Delete} instances) - * passed to it out to the configured HBase table. This works in combination + * passed to it out to the configured HBase table. This works in combination * with {@link TableOutputFormat} which actually does the writing to HBase.

    - * + * * Keys are passed along but ignored in TableOutputFormat. However, they can * be used to control how your values will be divided up amongst the specified * number of reducers.

    - * - * You can also use the {@link TableMapReduceUtil} class to set up the two + * + * You can also use the {@link TableMapReduceUtil} class to set up the two * classes in one step: *

    * TableMapReduceUtil.initTableReducerJob("table", IdentityTableReducer.class, job); *
    * This will also set the proper {@link TableOutputFormat} which is given the - * table parameter. The - * {@link org.apache.hadoop.hbase.client.Put Put} or + * table parameter. The + * {@link org.apache.hadoop.hbase.client.Put Put} or * {@link org.apache.hadoop.hbase.client.Delete Delete} define the * row and columns implicitly. */ -public class IdentityTableReducer +public class IdentityTableReducer extends TableReducer { @SuppressWarnings("unused") private static final Log LOG = LogFactory.getLog(IdentityTableReducer.class); - + /** - * Writes each given record, consisting of the row key and the given values, - * to the configured {@link OutputFormat}. It is emitting the row key and each - * {@link org.apache.hadoop.hbase.client.Put Put} or - * {@link org.apache.hadoop.hbase.client.Delete Delete} as separate pairs. - * - * @param key The current row key. - * @param values The {@link org.apache.hadoop.hbase.client.Put Put} or - * {@link org.apache.hadoop.hbase.client.Delete Delete} list for the given + * Writes each given record, consisting of the row key and the given values, + * to the configured {@link OutputFormat}. It is emitting the row key and each + * {@link org.apache.hadoop.hbase.client.Put Put} or + * {@link org.apache.hadoop.hbase.client.Delete Delete} as separate pairs. + * + * @param key The current row key. + * @param values The {@link org.apache.hadoop.hbase.client.Put Put} or + * {@link org.apache.hadoop.hbase.client.Delete Delete} list for the given * row. - * @param context The context of the reduce. + * @param context The context of the reduce. * @throws IOException When writing the record fails. * @throws InterruptedException When the job gets interrupted. */ diff --git a/core/src/main/java/org/apache/hadoop/hbase/mapreduce/Import.java b/core/src/main/java/org/apache/hadoop/hbase/mapreduce/Import.java index 2588c3b..653de67 100644 --- a/core/src/main/java/org/apache/hadoop/hbase/mapreduce/Import.java +++ b/core/src/main/java/org/apache/hadoop/hbase/mapreduce/Import.java @@ -49,7 +49,7 @@ public class Import { * @param value The columns. * @param context The current context. * @throws IOException When something is broken with the data. - * @see org.apache.hadoop.mapreduce.Mapper#map(KEYIN, VALUEIN, + * @see org.apache.hadoop.mapreduce.Mapper#map(KEYIN, VALUEIN, * org.apache.hadoop.mapreduce.Mapper.Context) */ @Override @@ -63,7 +63,7 @@ public class Import { } } - private static Put resultToPut(ImmutableBytesWritable key, Result result) + private static Put resultToPut(ImmutableBytesWritable key, Result result) throws IOException { Put put = new Put(key.get()); for (KeyValue kv : result.raw()) { @@ -75,13 +75,13 @@ public class Import { /** * Sets up the actual job. - * + * * @param conf The current configuration. * @param args The command line parameters. * @return The newly created job. * @throws IOException When setting up the job fails. */ - public static Job createSubmittableJob(Configuration conf, String[] args) + public static Job createSubmittableJob(Configuration conf, String[] args) throws IOException { String tableName = args[0]; Path inputDir = new Path(args[1]); @@ -109,7 +109,7 @@ public class Import { /** * Main entry point. - * + * * @param args The command line parameters. * @throws Exception When running the job fails. */ diff --git a/core/src/main/java/org/apache/hadoop/hbase/mapreduce/MultiTableOutputFormat.java b/core/src/main/java/org/apache/hadoop/hbase/mapreduce/MultiTableOutputFormat.java index 3cf58eb..81d2746 100644 --- a/core/src/main/java/org/apache/hadoop/hbase/mapreduce/MultiTableOutputFormat.java +++ b/core/src/main/java/org/apache/hadoop/hbase/mapreduce/MultiTableOutputFormat.java @@ -46,7 +46,7 @@ import org.apache.hadoop.mapreduce.TaskAttemptContext; * {@link Put} or a {@link Delete} instance. All tables must already exist, and * all Puts and Deletes must reference only valid column families. *

    - * + * *

    * Write-ahead logging (HLog) for Puts can be disabled by setting * {@link #WAL_PROPERTY} to {@link #WAL_OFF}. Default value is {@link #WAL_ON}. @@ -114,7 +114,7 @@ public class MultiTableOutputFormat extends OutputFormat { - + /** Counter enumeration to count the actual rows. */ public static enum Counters {ROWS} /** * Maps the data. - * + * * @param row The current table row key. * @param values The columns. * @param context The current context. * @throws IOException When something is broken with the data. - * @see org.apache.hadoop.mapreduce.Mapper#map(KEYIN, VALUEIN, + * @see org.apache.hadoop.mapreduce.Mapper#map(KEYIN, VALUEIN, * org.apache.hadoop.mapreduce.Mapper.Context) */ @Override @@ -76,13 +76,13 @@ public class RowCounter { /** * Sets up the actual job. - * + * * @param conf The current configuration. * @param args The command line parameters. * @return The newly created job. * @throws IOException When setting up the job fails. */ - public static Job createSubmittableJob(Configuration conf, String[] args) + public static Job createSubmittableJob(Configuration conf, String[] args) throws IOException { String tableName = args[0]; Job job = new Job(conf, NAME + "_" + tableName); @@ -107,7 +107,7 @@ public class RowCounter { scan.addColumn(Bytes.toBytes(fields[0]), Bytes.toBytes(fields[1])); } } - } + } // Second argument is the table name. job.setOutputFormatClass(NullOutputFormat.class); TableMapReduceUtil.initTableMapperJob(tableName, scan, @@ -118,7 +118,7 @@ public class RowCounter { /** * Main entry point. - * + * * @param args The command line parameters. * @throws Exception When running the job fails. */ diff --git a/core/src/main/java/org/apache/hadoop/hbase/mapreduce/SimpleTotalOrderPartitioner.java b/core/src/main/java/org/apache/hadoop/hbase/mapreduce/SimpleTotalOrderPartitioner.java index 5c7af50..af3d588 100644 --- a/core/src/main/java/org/apache/hadoop/hbase/mapreduce/SimpleTotalOrderPartitioner.java +++ b/core/src/main/java/org/apache/hadoop/hbase/mapreduce/SimpleTotalOrderPartitioner.java @@ -52,7 +52,7 @@ implements Configurable { private byte [] endkey; private byte [][] splits; private int lastReduces = -1; - + @Override public int getPartition(final ImmutableBytesWritable key, final VALUE value, final int reduces) { diff --git a/core/src/main/java/org/apache/hadoop/hbase/mapreduce/TableInputFormat.java b/core/src/main/java/org/apache/hadoop/hbase/mapreduce/TableInputFormat.java index 96ac3eb..89674a6 100644 --- a/core/src/main/java/org/apache/hadoop/hbase/mapreduce/TableInputFormat.java +++ b/core/src/main/java/org/apache/hadoop/hbase/mapreduce/TableInputFormat.java @@ -34,11 +34,11 @@ import org.apache.hadoop.util.StringUtils; /** * Convert HBase tabular data into a format that is consumable by Map/Reduce. */ -public class TableInputFormat extends TableInputFormatBase +public class TableInputFormat extends TableInputFormatBase implements Configurable { - + private final Log LOG = LogFactory.getLog(TableInputFormat.class); - + /** Job parameter that specifies the input table. */ public static final String INPUT_TABLE = "hbase.mapreduce.inputtable"; /** Base-64 encoded scanner. All other SCAN_ confs are ignored if this is specified. @@ -61,13 +61,13 @@ implements Configurable { public static final String SCAN_CACHEBLOCKS = "hbase.mapreduce.scan.cacheblocks"; /** The number of rows for caching that will be passed to scanners. */ public static final String SCAN_CACHEDROWS = "hbase.mapreduce.scan.cachedrows"; - + /** The configuration. */ private Configuration conf = null; /** * Returns the current configuration. - * + * * @return The current configuration. * @see org.apache.hadoop.conf.Configurable#getConf() */ @@ -79,7 +79,7 @@ implements Configurable { /** * Sets the configuration. This is used to set the details for the table to * be scanned. - * + * * @param configuration The configuration to set. * @see org.apache.hadoop.conf.Configurable#setConf( * org.apache.hadoop.conf.Configuration) @@ -93,9 +93,9 @@ implements Configurable { } catch (Exception e) { LOG.error(StringUtils.stringifyException(e)); } - + Scan scan = null; - + if (conf.get(SCAN) != null) { try { scan = TableMapReduceUtil.convertStringToScan(conf.get(SCAN)); @@ -105,22 +105,22 @@ implements Configurable { } else { try { scan = new Scan(); - + if (conf.get(SCAN_COLUMNS) != null) { scan.addColumns(conf.get(SCAN_COLUMNS)); } - - if (conf.get(SCAN_COLUMN_FAMILY) != null) { + + if (conf.get(SCAN_COLUMN_FAMILY) != null) { scan.addFamily(Bytes.toBytes(conf.get(SCAN_COLUMN_FAMILY))); } - + if (conf.get(SCAN_TIMESTAMP) != null) { scan.setTimeStamp(Long.parseLong(conf.get(SCAN_TIMESTAMP))); } - + if (conf.get(SCAN_TIMERANGE_START) != null && conf.get(SCAN_TIMERANGE_END) != null) { scan.setTimeRange( - Long.parseLong(conf.get(SCAN_TIMERANGE_START)), + Long.parseLong(conf.get(SCAN_TIMERANGE_START)), Long.parseLong(conf.get(SCAN_TIMERANGE_END))); } @@ -141,5 +141,5 @@ implements Configurable { setScan(scan); } - + } diff --git a/core/src/main/java/org/apache/hadoop/hbase/mapreduce/TableInputFormatBase.java b/core/src/main/java/org/apache/hadoop/hbase/mapreduce/TableInputFormatBase.java index 58145ee..b6865de 100644 --- a/core/src/main/java/org/apache/hadoop/hbase/mapreduce/TableInputFormatBase.java +++ b/core/src/main/java/org/apache/hadoop/hbase/mapreduce/TableInputFormatBase.java @@ -40,8 +40,8 @@ import org.apache.hadoop.mapreduce.TaskAttemptContext; import org.apache.hadoop.util.StringUtils; /** - * A base for {@link TableInputFormat}s. Receives a {@link HTable}, an - * {@link Scan} instance that defines the input columns etc. Subclasses may use + * A base for {@link TableInputFormat}s. Receives a {@link HTable}, an + * {@link Scan} instance that defines the input columns etc. Subclasses may use * other TableRecordReader implementations. *

    * An example of a subclass: @@ -69,7 +69,7 @@ import org.apache.hadoop.util.StringUtils; */ public abstract class TableInputFormatBase extends InputFormat { - + final Log LOG = LogFactory.getLog(TableInputFormatBase.class); /** Holds the details for the internal scanner. */ @@ -79,17 +79,17 @@ extends InputFormat { /** The reader scanning the table, can be a custom one. */ private TableRecordReader tableRecordReader = null; - + /** * Builds a TableRecordReader. If no TableRecordReader was provided, uses * the default. - * + * * @param split The split to work with. * @param context The current context. * @return The newly created record reader. * @throws IOException When creating the reader fails. * @see org.apache.hadoop.mapreduce.InputFormat#createRecordReader( - * org.apache.hadoop.mapreduce.InputSplit, + * org.apache.hadoop.mapreduce.InputSplit, * org.apache.hadoop.mapreduce.TaskAttemptContext) */ @Override @@ -124,7 +124,7 @@ extends InputFormat { @Override public List getSplits(JobContext context) throws IOException { Pair keys = table.getStartEndKeys(); - if (keys == null || keys.getFirst() == null || + if (keys == null || keys.getFirst() == null || keys.getFirst().length == 0) { throw new IOException("Expecting at least one region."); } @@ -132,7 +132,7 @@ extends InputFormat { throw new IOException("No table was provided."); } int count = 0; - List splits = new ArrayList(keys.getFirst().length); + List splits = new ArrayList(keys.getFirst().length); for (int i = 0; i < keys.getFirst().length; i++) { if ( !includeRegionInSplit(keys.getFirst()[i], keys.getSecond()[i])) { continue; @@ -144,19 +144,19 @@ extends InputFormat { // determine if the given start an stop key fall into the region if ((startRow.length == 0 || keys.getSecond()[i].length == 0 || Bytes.compareTo(startRow, keys.getSecond()[i]) < 0) && - (stopRow.length == 0 || + (stopRow.length == 0 || Bytes.compareTo(stopRow, keys.getFirst()[i]) > 0)) { - byte[] splitStart = startRow.length == 0 || - Bytes.compareTo(keys.getFirst()[i], startRow) >= 0 ? + byte[] splitStart = startRow.length == 0 || + Bytes.compareTo(keys.getFirst()[i], startRow) >= 0 ? keys.getFirst()[i] : startRow; - byte[] splitStop = (stopRow.length == 0 || + byte[] splitStop = (stopRow.length == 0 || Bytes.compareTo(keys.getSecond()[i], stopRow) <= 0) && - keys.getSecond()[i].length > 0 ? + keys.getSecond()[i].length > 0 ? keys.getSecond()[i] : stopRow; InputSplit split = new TableSplit(table.getTableName(), splitStart, splitStop, regionLocation); splits.add(split); - if (LOG.isDebugEnabled()) + if (LOG.isDebugEnabled()) LOG.debug("getSplits: split -> " + (count++) + " -> " + split); } } @@ -209,7 +209,7 @@ extends InputFormat { /** * Gets the scan defining the actual details like columns etc. - * + * * @return The internal scan instance. */ public Scan getScan() { @@ -219,7 +219,7 @@ extends InputFormat { /** * Sets the scan defining the actual details like columns etc. - * + * * @param scan The scan to set. */ public void setScan(Scan scan) { @@ -229,7 +229,7 @@ extends InputFormat { /** * Allows subclasses to set the {@link TableRecordReader}. * - * @param tableRecordReader A different {@link TableRecordReader} + * @param tableRecordReader A different {@link TableRecordReader} * implementation. */ protected void setTableRecordReader(TableRecordReader tableRecordReader) { diff --git a/core/src/main/java/org/apache/hadoop/hbase/mapreduce/TableMapReduceUtil.java b/core/src/main/java/org/apache/hadoop/hbase/mapreduce/TableMapReduceUtil.java index e4d93ca..b332280 100644 --- a/core/src/main/java/org/apache/hadoop/hbase/mapreduce/TableMapReduceUtil.java +++ b/core/src/main/java/org/apache/hadoop/hbase/mapreduce/TableMapReduceUtil.java @@ -41,11 +41,11 @@ import org.apache.hadoop.conf.Configuration; */ @SuppressWarnings("unchecked") public class TableMapReduceUtil { - + /** - * Use this before submitting a TableMap job. It will appropriately set up + * Use this before submitting a TableMap job. It will appropriately set up * the job. - * + * * @param table The table name to read from. * @param scan The scan instance with the columns, time range etc. * @param mapper The mapper class to use. @@ -55,8 +55,8 @@ public class TableMapReduceUtil { * @throws IOException When setting up the details fails. */ public static void initTableMapperJob(String table, Scan scan, - Class mapper, - Class outputKeyClass, + Class mapper, + Class outputKeyClass, Class outputValueClass, Job job) throws IOException { job.setInputFormatClass(TableInputFormat.class); if (outputValueClass != null) job.setMapOutputValueClass(outputValueClass); @@ -69,13 +69,13 @@ public class TableMapReduceUtil { /** * Writes the given scan into a Base64 encoded string. - * + * * @param scan The scan to write out. * @return The scan saved in a Base64 encoded string. * @throws IOException When writing the scan fails. */ static String convertScanToString(Scan scan) throws IOException { - ByteArrayOutputStream out = new ByteArrayOutputStream(); + ByteArrayOutputStream out = new ByteArrayOutputStream(); DataOutputStream dos = new DataOutputStream(out); scan.write(dos); return Base64.encodeBytes(out.toByteArray()); @@ -83,7 +83,7 @@ public class TableMapReduceUtil { /** * Converts the given Base64 string back into a Scan instance. - * + * * @param base64 The scan details. * @return The newly created Scan instance. * @throws IOException When reading the scan instance fails. @@ -95,15 +95,15 @@ public class TableMapReduceUtil { scan.readFields(dis); return scan; } - + /** * Use this before submitting a TableReduce job. It will * appropriately set up the JobConf. - * + * * @param table The output table. * @param reducer The reducer class to use. * @param job The current job to adjust. - * @throws IOException When determining the region count fails. + * @throws IOException When determining the region count fails. */ public static void initTableReducerJob(String table, Class reducer, Job job) @@ -131,16 +131,16 @@ public class TableMapReduceUtil { /** * Use this before submitting a TableReduce job. It will * appropriately set up the JobConf. - * + * * @param table The output table. * @param reducer The reducer class to use. * @param job The current job to adjust. - * @param partitioner Partitioner to use. Pass null to use + * @param partitioner Partitioner to use. Pass null to use * default partitioner. * @param quorumAddress Distant cluster to write to * @param serverClass redefined hbase.regionserver.class * @param serverImpl redefined hbase.regionserver.impl - * @throws IOException When determining the region count fails. + * @throws IOException When determining the region count fails. */ public static void initTableReducerJob(String table, Class reducer, Job job, @@ -177,17 +177,17 @@ public class TableMapReduceUtil { job.setPartitionerClass(partitioner); } } - + /** - * Ensures that the given number of reduce tasks for the given job - * configuration does not exceed the number of regions for the given table. - * + * Ensures that the given number of reduce tasks for the given job + * configuration does not exceed the number of regions for the given table. + * * @param table The table to get the region count for. * @param job The current job to adjust. * @throws IOException When retrieving the table details fails. */ - public static void limitNumReduceTasks(String table, Job job) - throws IOException { + public static void limitNumReduceTasks(String table, Job job) + throws IOException { HTable outputTable = new HTable(job.getConfiguration(), table); int regions = outputTable.getRegionsInfo().size(); if (job.getNumReduceTasks() > regions) @@ -195,25 +195,25 @@ public class TableMapReduceUtil { } /** - * Sets the number of reduce tasks for the given job configuration to the - * number of regions the given table has. - * + * Sets the number of reduce tasks for the given job configuration to the + * number of regions the given table has. + * * @param table The table to get the region count for. * @param job The current job to adjust. * @throws IOException When retrieving the table details fails. */ - public static void setNumReduceTasks(String table, Job job) - throws IOException { + public static void setNumReduceTasks(String table, Job job) + throws IOException { HTable outputTable = new HTable(job.getConfiguration(), table); int regions = outputTable.getRegionsInfo().size(); job.setNumReduceTasks(regions); } - + /** * Sets the number of rows to return and cache with each scanner iteration. * Higher caching values will enable faster mapreduce jobs at the expense of * requiring more heap to contain the cached rows. - * + * * @param job The current job to adjust. * @param batchSize The number of rows to return in batch with each scanner * iteration. @@ -221,5 +221,5 @@ public class TableMapReduceUtil { public static void setScannerCaching(Job job, int batchSize) { job.getConfiguration().setInt("hbase.client.scanner.caching", batchSize); } - + } \ No newline at end of file diff --git a/core/src/main/java/org/apache/hadoop/hbase/mapreduce/TableMapper.java b/core/src/main/java/org/apache/hadoop/hbase/mapreduce/TableMapper.java index ae0af5b..bbceb63 100644 --- a/core/src/main/java/org/apache/hadoop/hbase/mapreduce/TableMapper.java +++ b/core/src/main/java/org/apache/hadoop/hbase/mapreduce/TableMapper.java @@ -24,9 +24,9 @@ import org.apache.hadoop.hbase.io.ImmutableBytesWritable; import org.apache.hadoop.mapreduce.Mapper; /** - * Extends the base Mapper class to add the required input key + * Extends the base Mapper class to add the required input key * and value classes. - * + * * @param The type of the key. * @param The type of the value. * @see org.apache.hadoop.mapreduce.Mapper diff --git a/core/src/main/java/org/apache/hadoop/hbase/mapreduce/TableOutputFormat.java b/core/src/main/java/org/apache/hadoop/hbase/mapreduce/TableOutputFormat.java index 41fe3f9..47453e0 100644 --- a/core/src/main/java/org/apache/hadoop/hbase/mapreduce/TableOutputFormat.java +++ b/core/src/main/java/org/apache/hadoop/hbase/mapreduce/TableOutputFormat.java @@ -38,9 +38,9 @@ import org.apache.hadoop.conf.Configuration; /** * Convert Map/Reduce output and write it to an HBase table. The KEY is ignored - * while the output value must be either a {@link Put} or a - * {@link Delete} instance. - * + * while the output value must be either a {@link Put} or a + * {@link Delete} instance. + * * @param The type of the key. Ignored in this class. */ public class TableOutputFormat extends OutputFormat { @@ -59,18 +59,18 @@ public class TableOutputFormat extends OutputFormat { /** * Writes the reducer output to an HBase table. - * + * * @param The type of the key. */ - protected static class TableRecordWriter + protected static class TableRecordWriter extends RecordWriter { - + /** The table to write to. */ private HTable table; /** * Instantiate a TableRecordWriter with the HBase HClient for writing. - * + * * @param table The table to write to. */ public TableRecordWriter(HTable table) { @@ -79,37 +79,37 @@ public class TableOutputFormat extends OutputFormat { /** * Closes the writer, in this case flush table commits. - * + * * @param context The context. * @throws IOException When closing the writer fails. * @see org.apache.hadoop.mapreduce.RecordWriter#close(org.apache.hadoop.mapreduce.TaskAttemptContext) */ @Override - public void close(TaskAttemptContext context) + public void close(TaskAttemptContext context) throws IOException { table.flushCommits(); } /** * Writes a key/value pair into the table. - * + * * @param key The key. * @param value The value. * @throws IOException When writing fails. * @see org.apache.hadoop.mapreduce.RecordWriter#write(java.lang.Object, java.lang.Object) */ @Override - public void write(KEY key, Writable value) + public void write(KEY key, Writable value) throws IOException { if (value instanceof Put) this.table.put(new Put((Put)value)); else if (value instanceof Delete) this.table.delete(new Delete((Delete)value)); else throw new IOException("Pass a Delete or a Put"); } } - + /** * Creates a new record writer. - * + * * @param context The current task context. * @return The newly created writer instance. * @throws IOException When creating the writer fails. @@ -118,7 +118,7 @@ public class TableOutputFormat extends OutputFormat { */ @Override public RecordWriter getRecordWriter( - TaskAttemptContext context) + TaskAttemptContext context) throws IOException, InterruptedException { // expecting exactly one path Configuration conf = new Configuration(context.getConfiguration()); @@ -150,9 +150,9 @@ public class TableOutputFormat extends OutputFormat { /** * Checks if the output target exists. - * + * * @param context The current context. - * @throws IOException When the check fails. + * @throws IOException When the check fails. * @throws InterruptedException When the job is aborted. * @see org.apache.hadoop.mapreduce.OutputFormat#checkOutputSpecs(org.apache.hadoop.mapreduce.JobContext) */ @@ -160,12 +160,12 @@ public class TableOutputFormat extends OutputFormat { public void checkOutputSpecs(JobContext context) throws IOException, InterruptedException { // TODO Check if the table exists? - + } /** * Returns the output committer. - * + * * @param context The current context. * @return The committer. * @throws IOException When creating the committer fails. @@ -173,9 +173,9 @@ public class TableOutputFormat extends OutputFormat { * @see org.apache.hadoop.mapreduce.OutputFormat#getOutputCommitter(org.apache.hadoop.mapreduce.TaskAttemptContext) */ @Override - public OutputCommitter getOutputCommitter(TaskAttemptContext context) + public OutputCommitter getOutputCommitter(TaskAttemptContext context) throws IOException, InterruptedException { return new TableOutputCommitter(); } - + } diff --git a/core/src/main/java/org/apache/hadoop/hbase/mapreduce/TableRecordReader.java b/core/src/main/java/org/apache/hadoop/hbase/mapreduce/TableRecordReader.java index 903b4a8..fa7de8f 100644 --- a/core/src/main/java/org/apache/hadoop/hbase/mapreduce/TableRecordReader.java +++ b/core/src/main/java/org/apache/hadoop/hbase/mapreduce/TableRecordReader.java @@ -30,14 +30,14 @@ import org.apache.hadoop.mapreduce.RecordReader; import org.apache.hadoop.mapreduce.TaskAttemptContext; /** - * Iterate over an HBase table data, return (ImmutableBytesWritable, Result) + * Iterate over an HBase table data, return (ImmutableBytesWritable, Result) * pairs. */ public class TableRecordReader extends RecordReader { - + private TableRecordReaderImpl recordReaderImpl = new TableRecordReaderImpl(); - + /** * Restart from survivable exceptions by creating a new scanner. * @@ -51,7 +51,7 @@ extends RecordReader { /** * Build the scanner. Not done in constructor to allow for extension. * - * @throws IOException When restarting the scan fails. + * @throws IOException When restarting the scan fails. */ public void init() throws IOException { this.recordReaderImpl.init(); @@ -59,7 +59,7 @@ extends RecordReader { /** * Sets the HBase table. - * + * * @param htable The {@link HTable} to scan. */ public void setHTable(HTable htable) { @@ -68,7 +68,7 @@ extends RecordReader { /** * Sets the scan defining the actual details like columns etc. - * + * * @param scan The scan to set. */ public void setScan(Scan scan) { @@ -77,7 +77,7 @@ extends RecordReader { /** * Closes the split. - * + * * @see org.apache.hadoop.mapreduce.RecordReader#close() */ @Override @@ -87,7 +87,7 @@ extends RecordReader { /** * Returns the current key. - * + * * @return The current key. * @throws IOException * @throws InterruptedException When the job is aborted. @@ -101,7 +101,7 @@ extends RecordReader { /** * Returns the current value. - * + * * @return The current value. * @throws IOException When the value is faulty. * @throws InterruptedException When the job is aborted. @@ -114,13 +114,13 @@ extends RecordReader { /** * Initializes the reader. - * + * * @param inputsplit The split to work with. * @param context The current task context. * @throws IOException When setting up the reader fails. * @throws InterruptedException When the job is aborted. * @see org.apache.hadoop.mapreduce.RecordReader#initialize( - * org.apache.hadoop.mapreduce.InputSplit, + * org.apache.hadoop.mapreduce.InputSplit, * org.apache.hadoop.mapreduce.TaskAttemptContext) */ @Override @@ -131,7 +131,7 @@ extends RecordReader { /** * Positions the record reader to the next record. - * + * * @return true if there was another record. * @throws IOException When reading the record failed. * @throws InterruptedException When the job was aborted. @@ -144,7 +144,7 @@ extends RecordReader { /** * The current progress of the record reader through its data. - * + * * @return A number between 0.0 and 1.0, the fraction of the data read. * @see org.apache.hadoop.mapreduce.RecordReader#getProgress() */ diff --git a/core/src/main/java/org/apache/hadoop/hbase/mapreduce/TableRecordReaderImpl.java b/core/src/main/java/org/apache/hadoop/hbase/mapreduce/TableRecordReaderImpl.java index 6a2d8bb..c1803df 100644 --- a/core/src/main/java/org/apache/hadoop/hbase/mapreduce/TableRecordReaderImpl.java +++ b/core/src/main/java/org/apache/hadoop/hbase/mapreduce/TableRecordReaderImpl.java @@ -31,7 +31,7 @@ import org.apache.hadoop.hbase.io.ImmutableBytesWritable; import org.apache.hadoop.util.StringUtils; /** - * Iterate over an HBase table data, return (ImmutableBytesWritable, Result) + * Iterate over an HBase table data, return (ImmutableBytesWritable, Result) * pairs. */ public class TableRecordReaderImpl { @@ -61,7 +61,7 @@ public class TableRecordReaderImpl { /** * Build the scanner. Not done in constructor to allow for extension. * - * @throws IOException When restarting the scan fails. + * @throws IOException When restarting the scan fails. */ public void init() throws IOException { restart(scan.getStartRow()); @@ -69,7 +69,7 @@ public class TableRecordReaderImpl { /** * Sets the HBase table. - * + * * @param htable The {@link HTable} to scan. */ public void setHTable(HTable htable) { @@ -78,7 +78,7 @@ public class TableRecordReaderImpl { /** * Sets the scan defining the actual details like columns etc. - * + * * @param scan The scan to set. */ public void setScan(Scan scan) { @@ -87,8 +87,8 @@ public class TableRecordReaderImpl { /** * Closes the split. - * - * + * + * */ public void close() { this.scanner.close(); @@ -96,7 +96,7 @@ public class TableRecordReaderImpl { /** * Returns the current key. - * + * * @return The current key. * @throws IOException * @throws InterruptedException When the job is aborted. @@ -108,7 +108,7 @@ public class TableRecordReaderImpl { /** * Returns the current value. - * + * * @return The current value. * @throws IOException When the value is faulty. * @throws InterruptedException When the job is aborted. @@ -120,7 +120,7 @@ public class TableRecordReaderImpl { /** * Positions the record reader to the next record. - * + * * @return true if there was another record. * @throws IOException When reading the record failed. * @throws InterruptedException When the job was aborted. @@ -131,7 +131,7 @@ public class TableRecordReaderImpl { try { value = this.scanner.next(); } catch (IOException e) { - LOG.debug("recovered from " + StringUtils.stringifyException(e)); + LOG.debug("recovered from " + StringUtils.stringifyException(e)); restart(lastRow); scanner.next(); // skip presumed already mapped row value = scanner.next(); @@ -146,12 +146,12 @@ public class TableRecordReaderImpl { /** * The current progress of the record reader through its data. - * + * * @return A number between 0.0 and 1.0, the fraction of the data read. */ public float getProgress() { // Depends on the total number of tuples return 0; } - + } diff --git a/core/src/main/java/org/apache/hadoop/hbase/mapreduce/TableReducer.java b/core/src/main/java/org/apache/hadoop/hbase/mapreduce/TableReducer.java index 64540ac..d087f85 100644 --- a/core/src/main/java/org/apache/hadoop/hbase/mapreduce/TableReducer.java +++ b/core/src/main/java/org/apache/hadoop/hbase/mapreduce/TableReducer.java @@ -24,16 +24,16 @@ import org.apache.hadoop.mapreduce.Reducer; /** * Extends the basic Reducer class to add the required key and - * value input/output classes. While the input key and value as well as the - * output key can be anything handed in from the previous map phase the output - * value must be either a {@link org.apache.hadoop.hbase.client.Put Put} + * value input/output classes. While the input key and value as well as the + * output key can be anything handed in from the previous map phase the output + * value must be either a {@link org.apache.hadoop.hbase.client.Put Put} * or a {@link org.apache.hadoop.hbase.client.Delete Delete} instance when * using the {@link TableOutputFormat} class. *

    - * This class is extended by {@link IdentityTableReducer} but can also be + * This class is extended by {@link IdentityTableReducer} but can also be * subclassed to implement similar features or any custom code needed. It has - * the advantage to enforce the output value to a specific basic type. - * + * the advantage to enforce the output value to a specific basic type. + * * @param The type of the input key. * @param The type of the input value. * @param The type of the output key. diff --git a/core/src/main/java/org/apache/hadoop/hbase/mapreduce/TableSplit.java b/core/src/main/java/org/apache/hadoop/hbase/mapreduce/TableSplit.java index 55fb933..082c931 100644 --- a/core/src/main/java/org/apache/hadoop/hbase/mapreduce/TableSplit.java +++ b/core/src/main/java/org/apache/hadoop/hbase/mapreduce/TableSplit.java @@ -32,9 +32,9 @@ import org.apache.hadoop.mapreduce.InputSplit; * A table split corresponds to a key range (low, high). All references to row * below refer to the key of the row. */ -public class TableSplit extends InputSplit +public class TableSplit extends InputSplit implements Writable, Comparable { - + private byte [] tableName; private byte [] startRow; private byte [] endRow; @@ -48,7 +48,7 @@ implements Writable, Comparable { /** * Creates a new instance while assigning all variables. - * + * * @param tableName The name of the current table. * @param startRow The start row of the split. * @param endRow The end row of the split. @@ -64,8 +64,8 @@ implements Writable, Comparable { /** * Returns the table name. - * - * @return The table name. + * + * @return The table name. */ public byte [] getTableName() { return tableName; @@ -73,26 +73,26 @@ implements Writable, Comparable { /** * Returns the start row. - * + * * @return The start row. - */ + */ public byte [] getStartRow() { return startRow; } /** * Returns the end row. - * - * @return The end row. + * + * @return The end row. */ public byte [] getEndRow() { return endRow; } - /** + /** * Returns the region location. - * - * @return The region's location. + * + * @return The region's location. */ public String getRegionLocation() { return regionLocation; @@ -100,7 +100,7 @@ implements Writable, Comparable { /** * Returns the region's location as an array. - * + * * @return The array containing the region location. * @see org.apache.hadoop.mapreduce.InputSplit#getLocations() */ @@ -111,7 +111,7 @@ implements Writable, Comparable { /** * Returns the length of the split. - * + * * @return The length of the split. * @see org.apache.hadoop.mapreduce.InputSplit#getLength() */ @@ -123,7 +123,7 @@ implements Writable, Comparable { /** * Reads the values of each field. - * + * * @param in The input to read from. * @throws IOException When reading the input fails. */ @@ -137,7 +137,7 @@ implements Writable, Comparable { /** * Writes the field values to the output. - * + * * @param out The output to write to. * @throws IOException When writing the values to the output fails. */ @@ -151,7 +151,7 @@ implements Writable, Comparable { /** * Returns the details about this instance as a string. - * + * * @return The values of this instance as a string. * @see java.lang.Object#toString() */ @@ -163,7 +163,7 @@ implements Writable, Comparable { /** * Compares this split against the given one. - * + * * @param split The split to compare to. * @return The result of the comparison. * @see java.lang.Comparable#compareTo(java.lang.Object) @@ -172,7 +172,7 @@ implements Writable, Comparable { public int compareTo(TableSplit split) { return Bytes.compareTo(getStartRow(), split.getStartRow()); } - + @Override public boolean equals(Object o) { if (o == null || !(o instanceof TableSplit)) { diff --git a/core/src/main/java/org/apache/hadoop/hbase/mapreduce/package-info.java b/core/src/main/java/org/apache/hadoop/hbase/mapreduce/package-info.java index 9c25f07..62537ef 100644 --- a/core/src/main/java/org/apache/hadoop/hbase/mapreduce/package-info.java +++ b/core/src/main/java/org/apache/hadoop/hbase/mapreduce/package-info.java @@ -99,7 +99,7 @@ below. If running the reduce step makes sense in your case, its usually better to have lots of reducers so load is spread across the hbase cluster.

    There is also a new hbase partitioner that will run as many reducers as -currently existing regions. The +currently existing regions. The {@link org.apache.hadoop.hbase.mapreduce.HRegionPartitioner} is suitable when your table is large and your upload is not such that it will greatly alter the number of existing regions when done; otherwise use the default @@ -119,7 +119,7 @@ The row id must be formatted as a {@link org.apache.hadoop.hbase.io.ImmutableByt value as a {@link org.apache.hadoop.hbase.KeyValue} (A KeyValue holds the value for a cell and its coordinates; row/family/qualifier/timestamp, etc.). Note that you must specify a timestamp when you create the KeyValue in your map task -otherwise the KeyValue will be created with the default LATEST_TIMESTAMP (Long.MAX_VALUE). +otherwise the KeyValue will be created with the default LATEST_TIMESTAMP (Long.MAX_VALUE). Use System.currentTimeMillis() if your data does not inherently bear a timestamp. Your reduce task will also need to emit the KeyValues in order. See {@link org.apache.hadoop.hbase.mapreduce.KeyValueSortReducer} @@ -143,7 +143,7 @@ that is intimate with your tables key namespace and that knows how to distribute keys among the reducers so a total order is maintained. If your keys are distributed with some regularity across a defined key space -- i.e. you know the start and end keys -- then the - {@link org.apache.hadoop.hbase.mapreduce.SimpleTotalOrderPartitioner} + {@link org.apache.hadoop.hbase.mapreduce.SimpleTotalOrderPartitioner} may be all you need.

    See org.apache.hadoop.hbase.mapreduce.TestHFileOutputFormat for an example diff --git a/core/src/main/java/org/apache/hadoop/hbase/master/AddColumn.java b/core/src/main/java/org/apache/hadoop/hbase/master/AddColumn.java index 51c8dd2..686521b 100644 --- a/core/src/main/java/org/apache/hadoop/hbase/master/AddColumn.java +++ b/core/src/main/java/org/apache/hadoop/hbase/master/AddColumn.java @@ -29,8 +29,8 @@ import java.io.IOException; class AddColumn extends ColumnOperation { private final HColumnDescriptor newColumn; - AddColumn(final HMaster master, final byte [] tableName, - final HColumnDescriptor newColumn) + AddColumn(final HMaster master, final byte [] tableName, + final HColumnDescriptor newColumn) throws IOException { super(master, tableName); this.newColumn = newColumn; diff --git a/core/src/main/java/org/apache/hadoop/hbase/master/BaseScanner.java b/core/src/main/java/org/apache/hadoop/hbase/master/BaseScanner.java index e2a468d..57f745e 100644 --- a/core/src/main/java/org/apache/hadoop/hbase/master/BaseScanner.java +++ b/core/src/main/java/org/apache/hadoop/hbase/master/BaseScanner.java @@ -56,11 +56,11 @@ import java.util.concurrent.atomic.AtomicBoolean; /** * Base HRegion scanner class. Holds utilty common to ROOT and * META HRegion scanners. - * + * *

    How do we know if all regions are assigned? After the initial scan of * the ROOT and META regions, all regions known at * that time will have been or are in the process of being assigned.

    - * + * *

    When a region is split the region server notifies the master of the * split and the new regions are assigned. But suppose the master loses the * split message? We need to periodically rescan the ROOT and @@ -69,34 +69,34 @@ import java.util.concurrent.atomic.AtomicBoolean; *

  • If we rescan, any regions that are new but not assigned will have * no server info. Any regions that are not being served by the same * server will get re-assigned.
  • - * + * *
  • Thus a periodic rescan of the root region will find any new * META regions where we missed the META split * message or we failed to detect a server death and consequently need to * assign the region to a new server.
  • - * + * *
  • if we keep track of all the known META regions, then * we can rescan them periodically. If we do this then we can detect any * regions for which we missed a region split message.
  • *
- * + * * Thus just keeping track of all the META regions permits * periodic rescanning which will detect unassigned regions (new or * otherwise) without the need to keep track of every region.

- * + * *

So the ROOT region scanner needs to wake up: *

    *
  1. when the master receives notification that the ROOT * region has been opened.
  2. *
  3. periodically after the first scan
  4. *
- * + * * The META scanner needs to wake up: *
    *
  1. when a META region comes on line
  2. * periodically to rescan the online META regions *
- * + * *

A META region is not 'online' until it has been scanned * once. */ @@ -120,16 +120,16 @@ abstract class BaseScanner extends Chore implements HConstants { } private final boolean rootRegion; protected final HMaster master; - + protected boolean initialScanComplete; - + protected abstract boolean initialScan(); protected abstract void maintenanceScan(); - - // will use this variable to synchronize and make sure we aren't interrupted + + // will use this variable to synchronize and make sure we aren't interrupted // mid-scan final Object scannerLock = new Object(); - + BaseScanner(final HMaster master, final boolean rootRegion, final AtomicBoolean stop) { super(master.getConfiguration(). @@ -138,17 +138,17 @@ abstract class BaseScanner extends Chore implements HConstants { this.master = master; this.initialScanComplete = false; } - + /** @return true if initial scan completed successfully */ public boolean isInitialScanComplete() { return initialScanComplete; } - + @Override protected boolean initialChore() { return initialScan(); } - + @Override protected void chore() { maintenanceScan(); @@ -205,7 +205,7 @@ abstract class BaseScanner extends Chore implements HConstants { e = RemoteExceptionHandler.decodeRemoteException((RemoteException) e); if (e instanceof UnknownScannerException) { // Reset scannerId so we do not try closing a scanner the other side - // has lost account of: prevents duplicated stack trace out of the + // has lost account of: prevents duplicated stack trace out of the // below close in the finally. scannerId = -1L; } @@ -223,7 +223,7 @@ abstract class BaseScanner extends Chore implements HConstants { } // Scan is finished. - + // First clean up any meta region rows which had null HRegionInfos if (emptyRows.size() > 0) { LOG.warn("Found " + emptyRows.size() + " rows with empty HRegionInfo " + @@ -284,7 +284,7 @@ abstract class BaseScanner extends Chore implements HConstants { * the filesystem, then a daughters was not added to .META. -- must have been * a crash before their addition. Add them here. * @param metaRegionName Meta region name: e.g. .META.,,1 - * @param server HRegionInterface of meta server to talk to + * @param server HRegionInterface of meta server to talk to * @param parent HRegionInfo of split offlined parent * @param rowContent Content of parent row in * metaRegionName @@ -292,7 +292,7 @@ abstract class BaseScanner extends Chore implements HConstants { * the filesystem. * @throws IOException */ - private boolean cleanupAndVerifySplits(final byte [] metaRegionName, + private boolean cleanupAndVerifySplits(final byte [] metaRegionName, final HRegionInterface srvr, final HRegionInfo parent, Result rowContent) throws IOException { @@ -315,7 +315,7 @@ abstract class BaseScanner extends Chore implements HConstants { return result; } - + /* * See if the passed daughter has references in the filesystem to the parent * and if not, remove the note of daughter region in the parent row: its @@ -331,7 +331,7 @@ abstract class BaseScanner extends Chore implements HConstants { * @return True if this daughter still has references to the parent. * @throws IOException */ - private boolean checkDaughter(final byte [] metaRegionName, + private boolean checkDaughter(final byte [] metaRegionName, final HRegionInterface srvr, final HRegionInfo parent, final Result rowContent, final byte [] qualifier) throws IOException { @@ -397,7 +397,7 @@ abstract class BaseScanner extends Chore implements HConstants { * @param daughter * @throws IOException */ - private void addDaughterRowChecked(final byte [] metaRegionName, + private void addDaughterRowChecked(final byte [] metaRegionName, final HRegionInterface srvr, final byte [] parent, final HRegionInfo split, final byte [] daughter) throws IOException { @@ -460,7 +460,7 @@ abstract class BaseScanner extends Chore implements HConstants { * @param qualifier * @throws IOException */ - private void removeDaughterFromParent(final byte [] metaRegionName, + private void removeDaughterFromParent(final byte [] metaRegionName, final HRegionInterface srvr, final HRegionInfo parent, final HRegionInfo split, final byte [] qualifier) throws IOException { @@ -473,20 +473,20 @@ abstract class BaseScanner extends Chore implements HConstants { srvr.delete(metaRegionName, delete); } - /* + /* * Checks if a daughter region -- either splitA or splitB -- still holds * references to parent. If not, removes reference to the split from * the parent meta region row so we don't check it any more. * @param metaRegionName Name of meta region to look in. * @param srvr Where region resides. - * @param parent Parent region name. + * @param parent Parent region name. * @param rowContent Keyed content of the parent row in meta region. * @param split Which column family. * @param qualifier Which of the daughters to look at, splitA or splitB. * @return True if still has references to parent. * @throws IOException */ - private boolean hasReferences(final byte [] metaRegionName, + private boolean hasReferences(final byte [] metaRegionName, final HRegionInterface srvr, final HRegionInfo parent, Result rowContent, final HRegionInfo split, byte [] qualifier) throws IOException { @@ -532,13 +532,13 @@ abstract class BaseScanner extends Chore implements HConstants { */ protected void checkAssigned(final HRegionInterface regionServer, final MetaRegion meta, final HRegionInfo info, - final String serverAddress, final long startCode) + final String serverAddress, final long startCode) throws IOException { String serverName = null; String sa = serverAddress; long sc = startCode; if (sa == null || sa.length() <= 0) { - // Scans are sloppy. They don't respect row locks and they get and + // Scans are sloppy. They don't respect row locks and they get and // cache a row internally so may have data that is a little stale. Make // sure that for sure this serverAddress is null. We are trying to // avoid double-assignments. See hbase-1784. Will have to wait till diff --git a/core/src/main/java/org/apache/hadoop/hbase/master/ChangeTableState.java b/core/src/main/java/org/apache/hadoop/hbase/master/ChangeTableState.java index aa4e175..5c1bd4e 100644 --- a/core/src/main/java/org/apache/hadoop/hbase/master/ChangeTableState.java +++ b/core/src/main/java/org/apache/hadoop/hbase/master/ChangeTableState.java @@ -45,8 +45,8 @@ class ChangeTableState extends TableOperation { new TreeMap>(); protected long lockid; - ChangeTableState(final HMaster master, final byte [] tableName, - final boolean onLine) + ChangeTableState(final HMaster master, final byte [] tableName, + final boolean onLine) throws IOException { super(master, tableName); this.online = onLine; diff --git a/core/src/main/java/org/apache/hadoop/hbase/master/ColumnOperation.java b/core/src/main/java/org/apache/hadoop/hbase/master/ColumnOperation.java index 92faf39..59379bc 100644 --- a/core/src/main/java/org/apache/hadoop/hbase/master/ColumnOperation.java +++ b/core/src/main/java/org/apache/hadoop/hbase/master/ColumnOperation.java @@ -31,8 +31,8 @@ import java.io.IOException; abstract class ColumnOperation extends TableOperation { private final Log LOG = LogFactory.getLog(this.getClass()); - - protected ColumnOperation(final HMaster master, final byte [] tableName) + + protected ColumnOperation(final HMaster master, final byte [] tableName) throws IOException { super(master, tableName); } diff --git a/core/src/main/java/org/apache/hadoop/hbase/master/DeleteColumn.java b/core/src/main/java/org/apache/hadoop/hbase/master/DeleteColumn.java index db06c8c..1d58668 100644 --- a/core/src/main/java/org/apache/hadoop/hbase/master/DeleteColumn.java +++ b/core/src/main/java/org/apache/hadoop/hbase/master/DeleteColumn.java @@ -30,8 +30,8 @@ import java.io.IOException; class DeleteColumn extends ColumnOperation { private final byte [] columnName; - DeleteColumn(final HMaster master, final byte [] tableName, - final byte [] columnName) + DeleteColumn(final HMaster master, final byte [] tableName, + final byte [] columnName) throws IOException { super(master, tableName); this.columnName = columnName; diff --git a/core/src/main/java/org/apache/hadoop/hbase/master/HMaster.java b/core/src/main/java/org/apache/hadoop/hbase/master/HMaster.java index 5bc98b9..187bd12 100644 --- a/core/src/main/java/org/apache/hadoop/hbase/master/HMaster.java +++ b/core/src/main/java/org/apache/hadoop/hbase/master/HMaster.java @@ -103,7 +103,7 @@ import java.util.concurrent.locks.ReentrantLock; * @see HMasterRegionInterface * @see Watcher */ -public class HMaster extends Thread implements HConstants, HMasterInterface, +public class HMaster extends Thread implements HConstants, HMasterInterface, HMasterRegionInterface, Watcher { // MASTER is name of the webapp and the attribute name used stuffing this //instance into web context. @@ -121,9 +121,9 @@ public class HMaster extends Thread implements HConstants, HMasterInterface, private final Configuration conf; private final Path rootdir; private InfoServer infoServer; - private final int threadWakeFrequency; + private final int threadWakeFrequency; private final int numRetries; - + // Metrics is set when we call run. private final MasterMetrics metrics; @@ -152,8 +152,8 @@ public class HMaster extends Thread implements HConstants, HMasterInterface, private long lastFragmentationQuery = -1L; private Map fragmentation = null; private final RegionServerOperationQueue regionServerOperationQueue; - - /** + + /** * Constructor * @param conf configuration * @throws IOException @@ -199,10 +199,10 @@ public class HMaster extends Thread implements HConstants, HMasterInterface, this.zkMasterAddressWatcher.writeAddressToZooKeeper(this.address, true); this.regionServerOperationQueue = new RegionServerOperationQueue(this.conf, this.closed); - + serverManager = new ServerManager(this); regionManager = new RegionManager(this); - + setName(MASTER); this.metrics = new MasterMetrics(MASTER); // We're almost open for business @@ -327,7 +327,7 @@ public class HMaster extends Thread implements HConstants, HMasterInterface, /** * @return HBase root dir. - * @throws IOException + * @throws IOException */ public Path getRootDir() { return this.rootdir; @@ -463,7 +463,7 @@ public class HMaster extends Thread implements HConstants, HMasterInterface, LOG.fatal("Unhandled exception. Starting shutdown.", t); this.closed.set(true); } - + // Wait for all the remaining region servers to report in. this.serverManager.letRegionServersShutdown(); @@ -536,7 +536,7 @@ public class HMaster extends Thread implements HConstants, HMasterInterface, assignedRegions.put(r.getRegionName(), r); } } - LOG.info("Inspection found " + assignedRegions.size() + " regions, " + + LOG.info("Inspection found " + assignedRegions.size() + " regions, " + (isRootRegionAssigned ? "with -ROOT-" : "but -ROOT- was MIA")); splitLogAfterStartup(); } @@ -667,7 +667,7 @@ public class HMaster extends Thread implements HConstants, HMasterInterface, return mw; } - public HMsg [] regionServerReport(HServerInfo serverInfo, HMsg msgs[], + public HMsg [] regionServerReport(HServerInfo serverInfo, HMsg msgs[], HRegionInfo[] mostLoadedRegions) throws IOException { return adornRegionServerAnswer(serverInfo, @@ -677,7 +677,7 @@ public class HMaster extends Thread implements HConstants, HMasterInterface, /** * Override if you'd add messages to return to regionserver hsi * @param messages Messages to add to - * @return Messages to return to + * @return Messages to return to */ protected HMsg [] adornRegionServerAnswer(final HServerInfo hsi, final HMsg [] msgs) { @@ -695,7 +695,7 @@ public class HMaster extends Thread implements HConstants, HMasterInterface, } public void createTable(HTableDescriptor desc) - throws IOException { + throws IOException { if (!isMasterRunning()) { throw new MasterNotRunningException(); } @@ -724,7 +724,7 @@ public class HMaster extends Thread implements HConstants, HMasterInterface, } } - private synchronized void createTable(final HRegionInfo newRegion) + private synchronized void createTable(final HRegionInfo newRegion) throws IOException { String tableName = newRegion.getTableDesc().getNameAsString(); // 1. Check to see if table already exists. Get meta region where @@ -763,11 +763,11 @@ public class HMaster extends Thread implements HConstants, HMasterInterface, } public void addColumn(byte [] tableName, HColumnDescriptor column) - throws IOException { + throws IOException { new AddColumn(this, tableName, column).process(); } - public void modifyColumn(byte [] tableName, byte [] columnName, + public void modifyColumn(byte [] tableName, byte [] columnName, HColumnDescriptor descriptor) throws IOException { new ModifyColumn(this, tableName, columnName, descriptor).process(); @@ -899,7 +899,7 @@ public class HMaster extends Thread implements HConstants, HMasterInterface, } return null; } - + /** * Get row from meta table. * @param row @@ -915,7 +915,7 @@ public class HMaster extends Thread implements HConstants, HMasterInterface, get.addFamily(family); return srvr.get(meta.getRegionName(), get); } - + /* * @param meta * @return Server connection to meta .META. region. @@ -926,12 +926,12 @@ public class HMaster extends Thread implements HConstants, HMasterInterface, return this.connection.getHRegionConnection(meta.getServer()); } - public void modifyTable(final byte[] tableName, HConstants.Modify op, + public void modifyTable(final byte[] tableName, HConstants.Modify op, Writable[] args) throws IOException { switch (op) { case TABLE_SET_HTD: - if (args == null || args.length < 1 || + if (args == null || args.length < 1 || !(args[0] instanceof HTableDescriptor)) throw new IOException("SET_HTD request requires an HTableDescriptor"); HTableDescriptor htd = (HTableDescriptor) args[0]; @@ -978,12 +978,12 @@ public class HMaster extends Thread implements HConstants, HMasterInterface, if (args.length == 2) { servername = Bytes.toString(((ImmutableBytesWritable)args[1]).get()); } - // Need hri + // Need hri Result rr = getFromMETA(regionname, HConstants.CATALOG_FAMILY); HRegionInfo hri = getHRegionInfo(rr.getRow(), rr); if (servername == null) { // Get server from the .META. if it wasn't passed as argument - servername = + servername = Bytes.toString(rr.getValue(CATALOG_FAMILY, SERVER_QUALIFIER)); } // Take region out of the intransistions in case it got stuck there doing @@ -1056,12 +1056,12 @@ public class HMaster extends Thread implements HConstants, HMasterInterface, /* * When we find rows in a meta region that has an empty HRegionInfo, we * clean them up here. - * + * * @param s connection to server serving meta region * @param metaRegionName name of the meta region we scanned * @param emptyRows the row keys that had empty HRegionInfos */ - protected void deleteEmptyMetaRows(HRegionInterface s, + protected void deleteEmptyMetaRows(HRegionInterface s, byte [] metaRegionName, List emptyRows) { for (byte [] regionName: emptyRows) { @@ -1083,10 +1083,10 @@ public class HMaster extends Thread implements HConstants, HMasterInterface, @Override public void process(WatchedEvent event) { LOG.debug(("Event " + event.getType() + " with path " + event.getPath())); - // Master should kill itself if its session expired or if its + // Master should kill itself if its session expired or if its // znode was deleted manually (usually for testing purposes) - if(event.getState() == KeeperState.Expired || - (event.getType().equals(EventType.NodeDeleted) && + if(event.getState() == KeeperState.Expired || + (event.getType().equals(EventType.NodeDeleted) && event.getPath().equals(this.zooKeeperWrapper.getMasterElectionZNode())) && !shutdownRequested.get()) { diff --git a/core/src/main/java/org/apache/hadoop/hbase/master/MetaRegion.java b/core/src/main/java/org/apache/hadoop/hbase/master/MetaRegion.java index 0d326b7..8d9a2db 100644 --- a/core/src/main/java/org/apache/hadoop/hbase/master/MetaRegion.java +++ b/core/src/main/java/org/apache/hadoop/hbase/master/MetaRegion.java @@ -39,7 +39,7 @@ public class MetaRegion implements Comparable { } this.regionInfo = regionInfo; } - + @Override public String toString() { return "{server: " + this.server.toString() + ", regionname: " + @@ -62,13 +62,13 @@ public class MetaRegion implements Comparable { return regionInfo.getStartKey(); } - + /** @return the endKey */ public byte [] getEndKey() { return regionInfo.getEndKey(); } - + public HRegionInfo getRegionInfo() { return regionInfo; } diff --git a/core/src/main/java/org/apache/hadoop/hbase/master/MetaScanner.java b/core/src/main/java/org/apache/hadoop/hbase/master/MetaScanner.java index c90ac9b..e6434ba 100644 --- a/core/src/main/java/org/apache/hadoop/hbase/master/MetaScanner.java +++ b/core/src/main/java/org/apache/hadoop/hbase/master/MetaScanner.java @@ -30,24 +30,24 @@ import java.util.concurrent.TimeUnit; /** * MetaScanner META table. - * + * * When a META server comes on line, a MetaRegion object is * queued up by regionServerReport() and this thread wakes up. * - * It's important to do this work in a separate thread, or else the blocking + * It's important to do this work in a separate thread, or else the blocking * action would prevent other work from getting done. */ class MetaScanner extends BaseScanner { /** Initial work for the meta scanner is queued up here */ private volatile BlockingQueue metaRegionsToScan = new LinkedBlockingQueue(); - + private final List metaRegionsToRescan = new ArrayList(); - + /** * Constructor - * + * * @param master */ public MetaScanner(HMaster master) { @@ -88,7 +88,7 @@ class MetaScanner extends BaseScanner { // Make sure the file system is still available this.master.checkFileSystem(); } catch (Exception e) { - // If for some reason we get some other kind of exception, + // If for some reason we get some other kind of exception, // at least log it rather than go out silently. LOG.error("Unexpected exception", e); } @@ -102,7 +102,7 @@ class MetaScanner extends BaseScanner { (region == null && metaRegionsToScan.size() > 0) && !metaRegionsScanned()) { try { - region = metaRegionsToScan.poll(this.master.getThreadWakeFrequency(), + region = metaRegionsToScan.poll(this.master.getThreadWakeFrequency(), TimeUnit.MILLISECONDS); } catch (InterruptedException e) { // continue @@ -134,7 +134,7 @@ class MetaScanner extends BaseScanner { } /* - * Called by the meta scanner when it has completed scanning all meta + * Called by the meta scanner when it has completed scanning all meta * regions. This wakes up any threads that were waiting for this to happen. * @param totalRows Total rows scanned. * @param regionCount Count of regions in .META. table. @@ -171,10 +171,10 @@ class MetaScanner extends BaseScanner { } return this.master.isClosed(); } - + /** * Add another meta region to scan to the queue. - */ + */ void addMetaRegionToScan(MetaRegion m) { metaRegionsToScan.add(m); } diff --git a/core/src/main/java/org/apache/hadoop/hbase/master/ModifyColumn.java b/core/src/main/java/org/apache/hadoop/hbase/master/ModifyColumn.java index 2c3cd45..2099444 100644 --- a/core/src/main/java/org/apache/hadoop/hbase/master/ModifyColumn.java +++ b/core/src/main/java/org/apache/hadoop/hbase/master/ModifyColumn.java @@ -30,9 +30,9 @@ import java.io.IOException; class ModifyColumn extends ColumnOperation { private final HColumnDescriptor descriptor; private final byte [] columnName; - - ModifyColumn(final HMaster master, final byte [] tableName, - final byte [] columnName, HColumnDescriptor descriptor) + + ModifyColumn(final HMaster master, final byte [] tableName, + final byte [] columnName, HColumnDescriptor descriptor) throws IOException { super(master, tableName); this.descriptor = descriptor; @@ -48,7 +48,7 @@ class ModifyColumn extends ColumnOperation { updateRegionInfo(server, m.getRegionName(), i); } else { // otherwise, we have an error. throw new InvalidColumnNameException("Column family '" + - Bytes.toString(columnName) + + Bytes.toString(columnName) + "' doesn't exist, so cannot be modified."); } } diff --git a/core/src/main/java/org/apache/hadoop/hbase/master/ModifyTableMeta.java b/core/src/main/java/org/apache/hadoop/hbase/master/ModifyTableMeta.java index 24e98a1..22beb3d 100644 --- a/core/src/main/java/org/apache/hadoop/hbase/master/ModifyTableMeta.java +++ b/core/src/main/java/org/apache/hadoop/hbase/master/ModifyTableMeta.java @@ -38,8 +38,8 @@ class ModifyTableMeta extends TableOperation { private HTableDescriptor desc; - ModifyTableMeta(final HMaster master, final byte [] tableName, - HTableDescriptor desc) + ModifyTableMeta(final HMaster master, final byte [] tableName, + HTableDescriptor desc) throws IOException { super(master, tableName); this.desc = desc; diff --git a/core/src/main/java/org/apache/hadoop/hbase/master/ProcessRegionClose.java b/core/src/main/java/org/apache/hadoop/hbase/master/ProcessRegionClose.java index 7a50fb5..56b819b 100644 --- a/core/src/main/java/org/apache/hadoop/hbase/master/ProcessRegionClose.java +++ b/core/src/main/java/org/apache/hadoop/hbase/master/ProcessRegionClose.java @@ -26,10 +26,10 @@ import java.io.IOException; /** * ProcessRegionClose is the way we do post-processing on a closed region. We - * only spawn one of these asynchronous tasks when the region needs to be + * only spawn one of these asynchronous tasks when the region needs to be * either offlined or deleted. We used to create one of these tasks whenever * a region was closed, but since closing a region that isn't being offlined - * or deleted doesn't actually require post processing, it's no longer + * or deleted doesn't actually require post processing, it's no longer * necessary. */ class ProcessRegionClose extends ProcessRegionStatusChange { @@ -42,7 +42,7 @@ class ProcessRegionClose extends ProcessRegionStatusChange { * @param offlineRegion if true, set the region to offline in meta * @param reassignRegion if true, region is to be reassigned */ - public ProcessRegionClose(HMaster master, HRegionInfo regionInfo, + public ProcessRegionClose(HMaster master, HRegionInfo regionInfo, boolean offlineRegion, boolean reassignRegion) { super(master, regionInfo); @@ -74,7 +74,7 @@ class ProcessRegionClose extends ProcessRegionStatusChange { // We can't proceed unless the meta region we are going to update // is online. metaRegionAvailable() will put this operation on the - // delayedToDoQueue, so return true so the operation is not put + // delayedToDoQueue, so return true so the operation is not put // back on the toDoQueue if (metaRegionAvailable()) { diff --git a/core/src/main/java/org/apache/hadoop/hbase/master/ProcessRegionOpen.java b/core/src/main/java/org/apache/hadoop/hbase/master/ProcessRegionOpen.java index 8eba4ee..14433b8 100644 --- a/core/src/main/java/org/apache/hadoop/hbase/master/ProcessRegionOpen.java +++ b/core/src/main/java/org/apache/hadoop/hbase/master/ProcessRegionOpen.java @@ -28,9 +28,9 @@ import org.apache.hadoop.hbase.util.Bytes; import java.io.IOException; -/** +/** * ProcessRegionOpen is instantiated when a region server reports that it is - * serving a region. This applies to all meta and user regions except the + * serving a region. This applies to all meta and user regions except the * root region which is handled specially. */ class ProcessRegionOpen extends ProcessRegionStatusChange { diff --git a/core/src/main/java/org/apache/hadoop/hbase/master/ProcessRegionStatusChange.java b/core/src/main/java/org/apache/hadoop/hbase/master/ProcessRegionStatusChange.java index dafb6bb..0073c7e 100644 --- a/core/src/main/java/org/apache/hadoop/hbase/master/ProcessRegionStatusChange.java +++ b/core/src/main/java/org/apache/hadoop/hbase/master/ProcessRegionStatusChange.java @@ -22,7 +22,7 @@ package org.apache.hadoop.hbase.master; import org.apache.hadoop.hbase.HRegionInfo; /** - * Abstract class that performs common operations for + * Abstract class that performs common operations for * @see ProcessRegionClose and @see ProcessRegionOpen */ abstract class ProcessRegionStatusChange extends RegionServerOperation { @@ -41,7 +41,7 @@ abstract class ProcessRegionStatusChange extends RegionServerOperation { this.regionInfo = regionInfo; this.isMetaTable = regionInfo.isMetaTable(); } - + protected boolean metaRegionAvailable() { boolean available = true; if (isMetaTable) { diff --git a/core/src/main/java/org/apache/hadoop/hbase/master/ProcessServerShutdown.java b/core/src/main/java/org/apache/hadoop/hbase/master/ProcessServerShutdown.java index ff926ee..69c7d9b 100644 --- a/core/src/main/java/org/apache/hadoop/hbase/master/ProcessServerShutdown.java +++ b/core/src/main/java/org/apache/hadoop/hbase/master/ProcessServerShutdown.java @@ -39,7 +39,7 @@ import java.util.List; import java.util.Map; import java.util.Set; -/** +/** * Instantiated when a server's lease has expired, meaning it has crashed. * The region server's log file needs to be split up for each region it was * serving, and the regions need to get reassigned. @@ -119,19 +119,19 @@ class ProcessServerShutdown extends RegionServerOperation { return this.deadServerAddress; } - private void closeRegionsInTransition() { + private void closeRegionsInTransition() { Map inTransition = master.getRegionManager().getRegionsInTransitionOnServer(deadServer); for (Map.Entry entry : inTransition.entrySet()) { String regionName = entry.getKey(); RegionState state = entry.getValue(); - + LOG.info("Region " + regionName + " was in transition " + state + " on dead server " + deadServer + " - marking unassigned"); master.getRegionManager().setUnassigned(state.getRegionInfo(), true); } } - + @Override public String toString() { return "ProcessServerShutdown of " + this.deadServer; @@ -227,7 +227,7 @@ class ProcessServerShutdown extends RegionServerOperation { } // Scan complete. Remove any rows which had empty HRegionInfos - + if (emptyRows.size() > 0) { LOG.warn("Found " + emptyRows.size() + " rows with empty HRegionInfo while scanning meta region " + @@ -271,7 +271,7 @@ class ProcessServerShutdown extends RegionServerOperation { ScanMetaRegions(MetaRegion m, HMaster master) { super(m, master); } - + public Boolean call() throws IOException { if (LOG.isDebugEnabled()) { LOG.debug("process server shutdown scanning " + @@ -291,9 +291,9 @@ class ProcessServerShutdown extends RegionServerOperation { LOG.info("process shutdown of server " + this.deadServer + ": logSplit: " + logSplit + ", rootRescanned: " + rootRescanned + - ", numberOfMetaRegions: " + + ", numberOfMetaRegions: " + master.getRegionManager().numMetaRegions() + - ", onlineMetaRegions.size(): " + + ", onlineMetaRegions.size(): " + master.getRegionManager().numOnlineMetaRegions()); if (!logSplit) { // Process the old log file @@ -348,7 +348,7 @@ class ProcessServerShutdown extends RegionServerOperation { if (LOG.isDebugEnabled()) { LOG.debug("process server shutdown scanning root region on " + - master.getRegionManager().getRootRegionLocation().getBindAddress() + + master.getRegionManager().getRootRegionLocation().getBindAddress() + " finished " + Thread.currentThread().getName()); } rootRescanned = true; @@ -371,7 +371,7 @@ class ProcessServerShutdown extends RegionServerOperation { Bytes.toString(r.getRegionName()) + " on " + r.getServer()); } } - + closeRegionsInTransition(); // Remove this server from dead servers list. Finished splitting logs. diff --git a/core/src/main/java/org/apache/hadoop/hbase/master/RegionManager.java b/core/src/main/java/org/apache/hadoop/hbase/master/RegionManager.java index 4fd53f6..d6d59a4 100644 --- a/core/src/main/java/org/apache/hadoop/hbase/master/RegionManager.java +++ b/core/src/main/java/org/apache/hadoop/hbase/master/RegionManager.java @@ -58,16 +58,16 @@ import java.util.concurrent.atomic.AtomicReference; /** * Class to manage assigning regions to servers, state of root and meta, etc. - */ + */ public class RegionManager implements HConstants { protected static final Log LOG = LogFactory.getLog(RegionManager.class); - + private AtomicReference rootRegionLocation = new AtomicReference(null); private final RootScanner rootScannerThread; final MetaScanner metaScannerThread; - + /** Set by root scanner to indicate the number of meta regions */ private final AtomicInteger numberOfMetaRegions = new AtomicInteger(); @@ -76,20 +76,20 @@ public class RegionManager implements HConstants { new ConcurrentSkipListMap(Bytes.BYTES_COMPARATOR); private static final byte[] OVERLOADED = Bytes.toBytes("Overloaded"); - + private static final byte [] META_REGION_PREFIX = Bytes.toBytes(".META.,"); /** * Map of region name to RegionState for regions that are in transition such as - * + * * unassigned -> pendingOpen -> open * closing -> pendingClose -> closed; if (closed && !offline) -> unassigned - * + * * At the end of a transition, removeRegion is used to remove the region from * the map (since it is no longer in transition) - * + * * Note: Needs to be SortedMap so we can specify a comparator - * + * * @see RegionState inner-class below */ final SortedMap regionsInTransition = @@ -147,7 +147,7 @@ public class RegionManager implements HConstants { Threads.setDaemonThreadRunning(rootScannerThread, "RegionManager.rootScanner"); Threads.setDaemonThreadRunning(metaScannerThread, - "RegionManager.metaScanner"); + "RegionManager.metaScanner"); } void unsetRootRegion() { @@ -158,7 +158,7 @@ public class RegionManager implements HConstants { LOG.info("-ROOT- region unset (but not set to be reassigned)"); } } - + void reassignRootRegion() { unsetRootRegion(); if (!master.getShutdownRequested().get()) { @@ -171,12 +171,12 @@ public class RegionManager implements HConstants { } } } - + /* * Assigns regions to region servers attempting to balance the load across - * all region servers. Note that no synchronization is necessary as the caller + * all region servers. Note that no synchronization is necessary as the caller * (ServerManager.processMsgs) already owns the monitor for the RegionManager. - * + * * @param info * @param mostLoadedRegions * @param returnMsgs @@ -198,21 +198,21 @@ public class RegionManager implements HConstants { if (isSingleServer) { assignRegionsToOneServer(regionsToAssign, info, returnMsgs); } else { - // otherwise, give this server a few regions taking into account the + // otherwise, give this server a few regions taking into account the // load of all the other servers. assignRegionsToMultipleServers(thisServersLoad, regionsToAssign, info, returnMsgs); } } } - + /* * Make region assignments taking into account multiple servers' loads. * * Note that no synchronization is needed while we iterate over * regionsInTransition because this method is only called by assignRegions * whose caller owns the monitor for RegionManager - * + * * TODO: This code is unintelligible. REWRITE. Add TESTS! St.Ack 09/30/2009 * @param thisServersLoad * @param regionsToAssign @@ -220,7 +220,7 @@ public class RegionManager implements HConstants { * @param returnMsgs */ private void assignRegionsToMultipleServers(final HServerLoad thisServersLoad, - final Set regionsToAssign, final HServerInfo info, + final Set regionsToAssign, final HServerInfo info, final ArrayList returnMsgs) { boolean isMetaAssign = false; for (RegionState s : regionsToAssign) { @@ -300,11 +300,11 @@ public class RegionManager implements HConstants { /* * Assign all to the only server. An unlikely case but still possible. - * + * * Note that no synchronization is needed on regionsInTransition while - * iterating on it because the only caller is assignRegions whose caller owns + * iterating on it because the only caller is assignRegions whose caller owns * the monitor for RegionManager - * + * * @param regionsToAssign * @param serverName * @param returnMsgs @@ -366,11 +366,11 @@ public class RegionManager implements HConstants { /* * Get the set of regions that should be assignable in this pass. - * + * * Note that no synchronization on regionsInTransition is needed because the * only caller (assignRegions, whose caller is ServerManager.processMsgs) owns * the monitor for RegionManager - */ + */ private Set regionsAwaitingAssignment(HServerAddress addr, boolean isSingleServer) { // set of regions we want to assign to this server @@ -413,7 +413,7 @@ public class RegionManager implements HConstants { // and are on-line continue; } - if (!i.isMetaRegion() && + if (!i.isMetaRegion() && !master.getServerManager().canAssignUserRegions()) { LOG.debug("user region " + i.getRegionNameAsString() + " is in transition but not enough servers yet"); @@ -422,18 +422,18 @@ public class RegionManager implements HConstants { if (s.isUnassigned()) { regionsToAssign.add(s); } - } + } } return regionsToAssign; } - + /* * Figure out the load that is next highest amongst all regionservers. Also, - * return how many servers exist at that load. + * return how many servers exist at that load. */ - private int computeNextHeaviestLoad(HServerLoad referenceLoad, + private int computeNextHeaviestLoad(HServerLoad referenceLoad, HServerLoad heavierLoad) { - + SortedMap> heavyServers = new TreeMap>(); synchronized (master.getLoadToServers()) { @@ -464,14 +464,14 @@ public class RegionManager implements HConstants { * some or all of its most loaded regions, allowing it to reduce its load. * The closed regions will then get picked up by other underloaded machines. * - * Note that no synchronization is needed because the only caller + * Note that no synchronization is needed because the only caller * (assignRegions) whose caller owns the monitor for RegionManager */ - void unassignSomeRegions(final HServerInfo info, + void unassignSomeRegions(final HServerInfo info, int numRegionsToClose, final HRegionInfo[] mostLoadedRegions, ArrayList returnMsgs) { - LOG.debug("Choosing to reassign " + numRegionsToClose - + " regions. mostLoadedRegions has " + mostLoadedRegions.length + LOG.debug("Choosing to reassign " + numRegionsToClose + + " regions. mostLoadedRegions has " + mostLoadedRegions.length + " regions in it."); int regionIdx = 0; int regionsClosed = 0; @@ -516,23 +516,23 @@ public class RegionManager implements HConstants { return !pathname.equals(HLog.HREGION_LOGDIR_NAME) && !pathname.equals(VERSION_FILE_NAME); } - + } /* * PathFilter that accepts all but compaction.dir names. */ static class RegionDirFilter implements PathFilter { - public boolean accept(Path path) { + public boolean accept(Path path) { return !path.getName().equals(HREGION_COMPACTIONDIR_NAME); } } /** * @return the rough number of the regions on fs - * Note: this method simply counts the regions on fs by accumulating all the dirs + * Note: this method simply counts the regions on fs by accumulating all the dirs * in each table dir (${HBASE_ROOT}/$TABLE) and skipping logfiles, compaction dirs. - * @throws IOException + * @throws IOException */ public int countRegionsOnFS() throws IOException { int regions = 0; @@ -569,7 +569,7 @@ public class RegionManager implements HConstants { } return false; } - + /** * Return a map of the regions in transition on a server. * Returned map entries are region name -> RegionState @@ -604,9 +604,9 @@ public class RegionManager implements HConstants { LOG.debug("meta and root scanners notified"); } } - + /** Stop the region assigner */ - public void stop() { + public void stop() { try { if (rootScannerThread.isAlive()) { rootScannerThread.join(); // Wait for the root scanner to finish. @@ -624,7 +624,7 @@ public class RegionManager implements HConstants { master.getZooKeeperWrapper().clearRSDirectory(); master.getZooKeeperWrapper().close(); } - + /** * Block until meta regions are online or we're shutting down. * @return true if we found meta regions, false if we're closing. @@ -635,9 +635,9 @@ public class RegionManager implements HConstants { numberOfMetaRegions.get() == onlineMetaRegions.size()); } } - + /** - * Search our map of online meta regions to find the first meta region that + * Search our map of online meta regions to find the first meta region that * should contain a pointer to newRegion. * @param newRegion * @return MetaRegion where the newRegion should live @@ -651,13 +651,13 @@ public class RegionManager implements HConstants { } else { if (onlineMetaRegions.containsKey(newRegion.getRegionName())) { return onlineMetaRegions.get(newRegion.getRegionName()); - } + } return onlineMetaRegions.get(onlineMetaRegions.headMap( newRegion.getRegionName()).lastKey()); } } } - + /** * Get a set of all the meta regions that contain info about a given table. * @param tableName Table you need to know all the meta regions for @@ -724,8 +724,8 @@ public class RegionManager implements HConstants { * written * @throws IOException */ - public void createRegion(HRegionInfo newRegion, HRegionInterface server, - byte [] metaRegionName) + public void createRegion(HRegionInfo newRegion, HRegionInterface server, + byte [] metaRegionName) throws IOException { // 2. Create the HRegion HRegion region = HRegion.createHRegion(newRegion, this.master.getRootDir(), @@ -734,11 +734,11 @@ public class RegionManager implements HConstants { // 3. Insert into meta HRegionInfo info = region.getRegionInfo(); byte [] regionName = region.getRegionName(); - + Put put = new Put(regionName); put.add(CATALOG_FAMILY, REGIONINFO_QUALIFIER, Writables.getBytes(info)); server.put(metaRegionName, put); - + // 4. Close the new region to flush it to disk. Close its log file too. region.close(); region.getLog().closeAndDelete(); @@ -746,17 +746,17 @@ public class RegionManager implements HConstants { // 5. Get it assigned to a server setUnassigned(info, true); } - - /** - * Set a MetaRegion as online. - * @param metaRegion + + /** + * Set a MetaRegion as online. + * @param metaRegion */ public void putMetaRegionOnline(MetaRegion metaRegion) { onlineMetaRegions.put(metaRegion.getStartKey(), metaRegion); } - /** - * Get a list of online MetaRegions + /** + * Get a list of online MetaRegions * @return list of MetaRegion objects */ public List getListOfOnlineMetaRegions() { @@ -766,26 +766,26 @@ public class RegionManager implements HConstants { } return regions; } - - /** - * Count of online meta regions + + /** + * Count of online meta regions * @return count of online meta regions */ public int numOnlineMetaRegions() { return onlineMetaRegions.size(); } - - /** - * Check if a meta region is online by its name + + /** + * Check if a meta region is online by its name * @param startKey name of the meta region to check * @return true if the region is online, false otherwise */ public boolean isMetaRegionOnline(byte [] startKey) { return onlineMetaRegions.containsKey(startKey); } - - /** - * Set an online MetaRegion offline - remove it from the map. + + /** + * Set an online MetaRegion offline - remove it from the map. * @param startKey region name * @return the MetaRegion that was taken offline. */ @@ -845,7 +845,7 @@ public class RegionManager implements HConstants { // Has an outstanding meta region to be assigned. return true; } - } + } } return false; } @@ -931,7 +931,7 @@ public class RegionManager implements HConstants { /** * Remove a region from the region state map. - * + * * @param info */ public void removeRegion(HRegionInfo info) { @@ -939,7 +939,7 @@ public class RegionManager implements HConstants { this.regionsInTransition.remove(info.getRegionNameAsString()); } } - + /** * @param regionName * @return true if the named region is in a transition state @@ -964,8 +964,8 @@ public class RegionManager implements HConstants { return false; } - /** - * Set a region to unassigned + /** + * Set a region to unassigned * @param info Region to set unassigned * @param force if true mark region unassigned whatever its current state */ @@ -982,8 +982,8 @@ public class RegionManager implements HConstants { s.setUnassigned(); } } - - /** + + /** * Check if a region is on the unassigned list * @param info HRegionInfo to check for * @return true if on the unassigned list, false if it isn't. Note that this @@ -999,11 +999,11 @@ public class RegionManager implements HConstants { } return false; } - + /** * Check if a region has been assigned and we're waiting for a response from * the region server. - * + * * @param regionName name of the region * @return true if open, false otherwise */ @@ -1029,7 +1029,7 @@ public class RegionManager implements HConstants { } } } - + /** * @param regionName * @return true if region is marked to be offlined. @@ -1044,8 +1044,8 @@ public class RegionManager implements HConstants { return false; } - /** - * Mark a region as closing + /** + * Mark a region as closing * @param serverName * @param regionInfo * @param setOffline @@ -1067,11 +1067,11 @@ public class RegionManager implements HConstants { this.regionsInTransition.put(regionInfo.getRegionNameAsString(), s); } } - - /** - * Remove the map of region names to region infos waiting to be offlined for a + + /** + * Remove the map of region names to region infos waiting to be offlined for a * given server - * + * * @param serverName * @return set of infos to close */ @@ -1087,10 +1087,10 @@ public class RegionManager implements HConstants { } return result; } - + /** * Called when we have told a region server to close the region - * + * * @param regionName */ public void setPendingClose(String regionName) { @@ -1101,7 +1101,7 @@ public class RegionManager implements HConstants { } } } - + /** * @param regionName */ @@ -1120,8 +1120,8 @@ public class RegionManager implements HConstants { public void addMetaRegionToScan(MetaRegion m) { metaScannerThread.addMetaRegionToScan(m); } - - /** + + /** * Check if the initial root scan has been completed. * @return true if scan completed, false otherwise */ @@ -1129,22 +1129,22 @@ public class RegionManager implements HConstants { return rootScannerThread.isInitialScanComplete(); } - /** + /** * Check if the initial meta scan has been completed. * @return true if meta completed, false otherwise - */ + */ public boolean isInitialMetaScanComplete() { return metaScannerThread.isInitialScanComplete(); } - /** + /** * Get the root region location. * @return HServerAddress describing root region server. */ public HServerAddress getRootRegionLocation() { return rootRegionLocation.get(); } - + /** * Block until either the root region location is available or we're shutting * down. @@ -1164,7 +1164,7 @@ public class RegionManager implements HConstants { } } } - + /** * Return the number of meta regions. * @return number of meta regions @@ -1172,7 +1172,7 @@ public class RegionManager implements HConstants { public int numMetaRegions() { return numberOfMetaRegions.get(); } - + /** * Bump the count of meta regions up one */ @@ -1221,9 +1221,9 @@ public class RegionManager implements HConstants { synchronized (rootRegionLocation) { rootRegionLocation.set(new HServerAddress(address)); rootRegionLocation.notifyAll(); - } + } } - + /** * Set the number of meta regions. * @param num Number of meta regions @@ -1317,7 +1317,7 @@ public class RegionManager implements HConstants { applyActions(serverInfo, returnMsgs, this.regionsToMajorCompact, HMsg.Type.MSG_REGION_MAJOR_COMPACT); } - + private void applyActions(final HServerInfo serverInfo, final ArrayList returnMsgs, final SortedMap> map, @@ -1342,28 +1342,28 @@ public class RegionManager implements HConstants { * Class to balance region servers load. * It keeps Region Servers load in slop range by unassigning Regions * from most loaded servers. - * + * * Equilibrium is reached when load of all serves are in slop range - * [avgLoadMinusSlop, avgLoadPlusSlop], where + * [avgLoadMinusSlop, avgLoadPlusSlop], where * avgLoadPlusSlop = Math.ceil(avgLoad * (1 + this.slop)), and * avgLoadMinusSlop = Math.floor(avgLoad * (1 - this.slop)) - 1. */ private class LoadBalancer { private float slop; // hbase.regions.slop private final int maxRegToClose; // hbase.regions.close.max - + LoadBalancer(Configuration conf) { this.slop = conf.getFloat("hbase.regions.slop", (float)0.3); if (this.slop <= 0) this.slop = 1; //maxRegToClose to constrain balance closing per one iteration - // -1 to turn off + // -1 to turn off // TODO: change default in HBASE-862, need a suggestion this.maxRegToClose = conf.getInt("hbase.regions.close.max", -1); } /** * Balance server load by unassigning some regions. - * + * * @param info - server info * @param mostLoadedRegions - array of most loaded regions * @param returnMsgs - array of return massages @@ -1377,27 +1377,27 @@ public class RegionManager implements HConstants { if(servLoad.getLoad() <= Math.ceil(avg) || avg <= 2.0) { return; } - + // check if current server is overloaded int numRegionsToClose = balanceFromOverloaded(servLoad, avg); - + // check if we can unload server by low loaded servers if(numRegionsToClose <= 0) { - numRegionsToClose = balanceToLowloaded(info.getServerName(), servLoad, + numRegionsToClose = balanceToLowloaded(info.getServerName(), servLoad, avg); } - + if(maxRegToClose > 0) { numRegionsToClose = Math.min(numRegionsToClose, maxRegToClose); } - + if(numRegionsToClose > 0) { - unassignSomeRegions(info, numRegionsToClose, mostLoadedRegions, + unassignSomeRegions(info, numRegionsToClose, mostLoadedRegions, returnMsgs); } } - /* + /* * Check if server load is not overloaded (with load > avgLoadPlusSlop). * @return number of regions to unassign. */ @@ -1414,12 +1414,12 @@ public class RegionManager implements HConstants { return 0; } - /* - * Check if server is most loaded and can be unloaded to + /* + * Check if server is most loaded and can be unloaded to * low loaded servers (with load < avgLoadMinusSlop). * @return number of regions to unassign. */ - private int balanceToLowloaded(String srvName, HServerLoad srvLoad, + private int balanceToLowloaded(String srvName, HServerLoad srvLoad, double avgLoad) { SortedMap> loadToServers = @@ -1427,18 +1427,18 @@ public class RegionManager implements HConstants { // check if server most loaded if (!loadToServers.get(loadToServers.lastKey()).contains(srvName)) return 0; - + // this server is most loaded, we will try to unload it by lowest // loaded servers int avgLoadMinusSlop = (int)Math.floor(avgLoad * (1 - this.slop)) - 1; int lowestLoad = loadToServers.firstKey().getNumberOfRegions(); - + if(lowestLoad >= avgLoadMinusSlop) return 0; // there is no low loaded servers - + int lowSrvCount = loadToServers.get(loadToServers.firstKey()).size(); int numRegionsToClose = 0; - + int numSrvRegs = srvLoad.getNumberOfRegions(); int numMoveToLowLoaded = (avgLoadMinusSlop - lowestLoad) * lowSrvCount; numRegionsToClose = numSrvRegs - (int)Math.ceil(avgLoad); @@ -1494,7 +1494,7 @@ public class RegionManager implements HConstants { */ static class RegionState implements Comparable { private final HRegionInfo regionInfo; - + enum State { UNASSIGNED, // awaiting a server to be assigned PENDING_OPEN, // told a server to open, hasn't opened yet @@ -1502,13 +1502,13 @@ public class RegionManager implements HConstants { CLOSING, // a msg has been enqueued to close ths region, but not delivered to RS yet PENDING_CLOSE, // msg has been delivered to RS to close this region CLOSED // region has been closed but not yet marked in meta - + } - + private State state; - + private boolean isOfflined; - + /* Set when region is assigned or closing */ private String serverName = null; @@ -1517,11 +1517,11 @@ public class RegionManager implements HConstants { this.regionInfo = info; this.state = state; } - + synchronized HRegionInfo getRegionInfo() { return this.regionInfo; } - + synchronized byte [] getRegionName() { return this.regionInfo.getRegionName(); } @@ -1537,7 +1537,7 @@ public class RegionManager implements HConstants { * @return true if the region is being opened */ synchronized boolean isOpening() { - return state == State.UNASSIGNED || + return state == State.UNASSIGNED || state == State.PENDING_OPEN || state == State.OPEN; } @@ -1550,7 +1550,7 @@ public class RegionManager implements HConstants { } /* - * Note: callers of this method (reassignRootRegion, + * Note: callers of this method (reassignRootRegion, * regionsAwaitingAssignment, setUnassigned) ensure that this method is not * called unless it is safe to do so. */ @@ -1596,7 +1596,7 @@ public class RegionManager implements HConstants { this.serverName = serverName; this.isOfflined = setOffline; } - + synchronized boolean isPendingClose() { return state == State.PENDING_CLOSE; } @@ -1612,7 +1612,7 @@ public class RegionManager implements HConstants { synchronized boolean isClosed() { return state == State.CLOSED; } - + synchronized void setClosed() { if (state != State.PENDING_CLOSE && state != State.PENDING_OPEN && @@ -1623,7 +1623,7 @@ public class RegionManager implements HConstants { } state = State.CLOSED; } - + synchronized boolean isOfflined() { return (state == State.CLOSING || state == State.PENDING_CLOSE) && isOfflined; @@ -1634,7 +1634,7 @@ public class RegionManager implements HConstants { return ("name=" + Bytes.toString(getRegionName()) + ", state=" + this.state); } - + @Override public boolean equals(Object o) { if (this == o) { @@ -1645,12 +1645,12 @@ public class RegionManager implements HConstants { } return this.compareTo((RegionState) o) == 0; } - + @Override public int hashCode() { return Bytes.toString(getRegionName()).hashCode(); } - + public int compareTo(RegionState o) { if (o == null) { return 1; diff --git a/core/src/main/java/org/apache/hadoop/hbase/master/RegionServerOperation.java b/core/src/main/java/org/apache/hadoop/hbase/master/RegionServerOperation.java index eb3cb7d..7556389 100644 --- a/core/src/main/java/org/apache/hadoop/hbase/master/RegionServerOperation.java +++ b/core/src/main/java/org/apache/hadoop/hbase/master/RegionServerOperation.java @@ -28,13 +28,13 @@ import java.util.concurrent.Delayed; import java.util.concurrent.TimeUnit; abstract class RegionServerOperation implements Delayed, HConstants { - protected static final Log LOG = + protected static final Log LOG = LogFactory.getLog(RegionServerOperation.class.getName()); - + private long expire; protected final HMaster master; private int delay; - + protected RegionServerOperation(HMaster master) { this.master = master; this.delay = this.master.getConfiguration(). @@ -63,12 +63,12 @@ abstract class RegionServerOperation implements Delayed, HConstants { void setDelay(final int d) { this.delay = d; } - + public int compareTo(Delayed o) { return Long.valueOf(getDelay(TimeUnit.MILLISECONDS) - o.getDelay(TimeUnit.MILLISECONDS)).intValue(); } - + protected void requeue() { this.master.getRegionServerOperationQueue().putOnDelayQueue(this); } @@ -97,9 +97,9 @@ abstract class RegionServerOperation implements Delayed, HConstants { // in the run queue, put this request on the delay queue to give // other threads the opportunity to get the meta regions on-line. if (LOG.isDebugEnabled()) { - LOG.debug("numberOfMetaRegions: " + + LOG.debug("numberOfMetaRegions: " + master.getRegionManager().numMetaRegions() + - ", onlineMetaRegions.size(): " + + ", onlineMetaRegions.size(): " + master.getRegionManager().numOnlineMetaRegions()); LOG.debug("Requeuing because not all meta regions are online"); } diff --git a/core/src/main/java/org/apache/hadoop/hbase/master/RegionServerOperationQueue.java b/core/src/main/java/org/apache/hadoop/hbase/master/RegionServerOperationQueue.java index c85c141..ea8864a 100644 --- a/core/src/main/java/org/apache/hadoop/hbase/master/RegionServerOperationQueue.java +++ b/core/src/main/java/org/apache/hadoop/hbase/master/RegionServerOperationQueue.java @@ -51,7 +51,7 @@ import org.apache.hadoop.ipc.RemoteException; public class RegionServerOperationQueue { // TODO: Build up the junit test of this class. private final Log LOG = LogFactory.getLog(this.getClass()); - + /** * Enums returned by {@link RegionServerOperationQueue#process()}; */ @@ -114,7 +114,7 @@ public class RegionServerOperationQueue { * @return {@link ProcessingResultCode#PROCESSED}, * {@link ProcessingResultCode#REQUEUED}, * {@link ProcessingResultCode#REQUEUED_BUT_PROBLEM} - */ + */ public synchronized ProcessingResultCode process(final HServerAddress rootRegionLocation) { RegionServerOperation op = null; // Only process the delayed queue if root region is online. If offline, diff --git a/core/src/main/java/org/apache/hadoop/hbase/master/RetryableMetaOperation.java b/core/src/main/java/org/apache/hadoop/hbase/master/RetryableMetaOperation.java index 80745db..7b66785 100644 --- a/core/src/main/java/org/apache/hadoop/hbase/master/RetryableMetaOperation.java +++ b/core/src/main/java/org/apache/hadoop/hbase/master/RetryableMetaOperation.java @@ -44,16 +44,16 @@ abstract class RetryableMetaOperation implements Callable { protected final Sleeper sleeper; protected final MetaRegion m; protected final HMaster master; - + protected HRegionInterface server; - + protected RetryableMetaOperation(MetaRegion m, HMaster master) { this.m = m; this.master = master; this.sleeper = new Sleeper(this.master.getThreadWakeFrequency(), this.master.getClosed()); } - + protected T doWithRetries() throws IOException, RuntimeException { List exceptions = new ArrayList(); @@ -77,7 +77,7 @@ abstract class RetryableMetaOperation implements Callable { if (tries == this.master.getNumRetries() - 1) { if (LOG.isDebugEnabled()) { StringBuilder message = new StringBuilder( - "Trying to contact region server for regionName '" + + "Trying to contact region server for regionName '" + Bytes.toString(m.getRegionName()) + "', but failed after " + (tries + 1) + " attempts.\n"); int i = 1; @@ -98,6 +98,6 @@ abstract class RetryableMetaOperation implements Callable { } this.sleeper.sleep(); } - return null; + return null; } } \ No newline at end of file diff --git a/core/src/main/java/org/apache/hadoop/hbase/master/RootScanner.java b/core/src/main/java/org/apache/hadoop/hbase/master/RootScanner.java index 1c14fa0..7547928 100644 --- a/core/src/main/java/org/apache/hadoop/hbase/master/RootScanner.java +++ b/core/src/main/java/org/apache/hadoop/hbase/master/RootScanner.java @@ -61,7 +61,7 @@ class RootScanner extends BaseScanner { // Make sure the file system is still available master.checkFileSystem(); } catch (Exception e) { - // If for some reason we get some other kind of exception, + // If for some reason we get some other kind of exception, // at least log it rather than go out silently. LOG.error("Unexpected exception", e); } diff --git a/core/src/main/java/org/apache/hadoop/hbase/master/ServerManager.java b/core/src/main/java/org/apache/hadoop/hbase/master/ServerManager.java index f32ffa2..561b60a 100644 --- a/core/src/main/java/org/apache/hadoop/hbase/master/ServerManager.java +++ b/core/src/main/java/org/apache/hadoop/hbase/master/ServerManager.java @@ -54,7 +54,7 @@ import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicInteger; /** - * The ServerManager class manages info about region servers - HServerInfo, + * The ServerManager class manages info about region servers - HServerInfo, * load numbers, dying servers, etc. */ public class ServerManager implements HConstants { @@ -84,10 +84,10 @@ public class ServerManager implements HConstants { Collections.synchronizedSortedMap(new TreeMap>()); // Map of server names -> server load private final Map serversToLoad = - new ConcurrentHashMap(); + new ConcurrentHashMap(); private HMaster master; - + /* The regionserver will not be assigned or asked close regions if it * is currently opening >= this many regions. */ @@ -127,7 +127,7 @@ public class ServerManager implements HConstants { sb.append("]"); deadServersList = sb.toString(); } - LOG.info(numServers + " region servers, " + numDeadServers + + LOG.info(numServers + " region servers, " + numDeadServers + " dead, average load " + averageLoad + (deadServersList != null? deadServers: "")); } @@ -161,7 +161,7 @@ public class ServerManager implements HConstants { * @param serverInfo * @throws Leases.LeaseStillHeldException */ - void regionServerStartup(final HServerInfo serverInfo) + void regionServerStartup(final HServerInfo serverInfo) throws Leases.LeaseStillHeldException { HServerInfo info = new HServerInfo(serverInfo); String serverName = info.getServerName(); @@ -241,14 +241,14 @@ public class ServerManager implements HConstants { /** * Called to process the messages sent from the region server to the master * along with the heart beat. - * + * * @param serverInfo * @param msgs * @param mostLoadedRegions Array of regions the region server is submitting * as candidates to be rebalanced, should it be overloaded * @return messages from master to region server indicating what region * server should do. - * + * * @throws IOException */ HMsg [] regionServerReport(final HServerInfo serverInfo, @@ -305,12 +305,12 @@ public class ServerManager implements HConstants { // This state is reachable if: // // 1) RegionServer A started - // 2) RegionServer B started on the same machine, then + // 2) RegionServer B started on the same machine, then // clobbered A in regionServerStartup. // 3) RegionServer A returns, expecting to work as usual. // // The answer is to ask A to shut down for good. - + if (LOG.isDebugEnabled()) { LOG.debug("region server race condition detected: " + info.getServerName()); @@ -320,7 +320,7 @@ public class ServerManager implements HConstants { removeServerInfo(info.getServerName(), info.getServerAddress()); this.serversToServerInfo.notifyAll(); } - + return new HMsg[] {HMsg.REGIONSERVER_STOP}; } else { return processRegionServerAllsWell(info, mostLoadedRegions, msgs); @@ -329,9 +329,9 @@ public class ServerManager implements HConstants { /* * Region server is exiting with a clean shutdown. - * + * * In this case, the server sends MSG_REPORT_EXITING in msgs[0] followed by - * a MSG_REPORT_CLOSE for each region it was serving. + * a MSG_REPORT_CLOSE for each region it was serving. * @param serverInfo * @param msgs */ @@ -367,7 +367,7 @@ public class ServerManager implements HConstants { } } } - + // There should not be any regions in transition for this server - the // server should finish transitions itself before closing Map inTransition = @@ -459,7 +459,7 @@ public class ServerManager implements HConstants { case MSG_REPORT_PROCESS_OPEN: openingCount++; break; - + case MSG_REPORT_OPEN: processRegionOpen(serverInfo, region, returnMsgs); break; @@ -472,7 +472,7 @@ public class ServerManager implements HConstants { processSplitRegion(region, incomingMsgs[++i].getRegionInfo(), incomingMsgs[++i].getRegionInfo()); break; - + case MSG_REPORT_SPLIT_INCLUDES_DAUGHTERS: processSplitRegion(region, incomingMsgs[i].getDaughterA(), incomingMsgs[i].getDaughterB()); @@ -494,7 +494,7 @@ public class ServerManager implements HConstants { } // Figure out what the RegionServer ought to do, and write back. - + // Should we tell it close regions because its overloaded? If its // currently opening regions, leave it alone till all are open. if (openingCount < this.nobalancingCount) { @@ -520,7 +520,7 @@ public class ServerManager implements HConstants { synchronized (master.getRegionManager()) { // Cancel any actions pending for the affected region. // This prevents the master from sending a SPLIT message if the table - // has already split by the region server. + // has already split by the region server. this.master.getRegionManager().endActions(region.getRegionName()); assignSplitDaughter(a); assignSplitDaughter(b); @@ -565,7 +565,7 @@ public class ServerManager implements HConstants { * @param region * @param returnMsgs */ - private void processRegionOpen(HServerInfo serverInfo, + private void processRegionOpen(HServerInfo serverInfo, HRegionInfo region, ArrayList returnMsgs) { boolean duplicateAssignment = false; synchronized (master.getRegionManager()) { @@ -586,7 +586,7 @@ public class ServerManager implements HConstants { } } else { // Not root region. If it is not a pending region, then we are - // going to treat it as a duplicate assignment, although we can't + // going to treat it as a duplicate assignment, although we can't // tell for certain that's the case. if (this.master.getRegionManager().isPendingOpen( region.getRegionNameAsString())) { @@ -596,20 +596,20 @@ public class ServerManager implements HConstants { duplicateAssignment = true; } } - + if (duplicateAssignment) { LOG.warn("region server " + serverInfo.getServerAddress().toString() + " should not have opened region " + Bytes.toString(region.getRegionName())); // This Region should not have been opened. - // Ask the server to shut it down, but don't report it as closed. - // Otherwise the HMaster will think the Region was closed on purpose, + // Ask the server to shut it down, but don't report it as closed. + // Otherwise the HMaster will think the Region was closed on purpose, // and then try to reopen it elsewhere; that's not what we want. returnMsgs.add(new HMsg(HMsg.Type.MSG_REGION_CLOSE_WITHOUT_REPORT, region, "Duplicate assignment".getBytes())); } else { if (region.isRootRegion()) { - // it was assigned, and it's not a duplicate assignment, so take it out + // it was assigned, and it's not a duplicate assignment, so take it out // of the unassigned list. this.master.getRegionManager().removeRegion(region); @@ -666,7 +666,7 @@ public class ServerManager implements HConstants { this.master.getRegionServerOperationQueue().put(op); } } - + /** Update a server load information because it's shutting down*/ private boolean removeServerInfo(final String serverName, final HServerAddress serverAddress) { @@ -713,9 +713,9 @@ public class ServerManager implements HConstants { } } - /** - * Compute the average load across all region servers. - * Currently, this uses a very naive computation - just uses the number of + /** + * Compute the average load across all region servers. + * Currently, this uses a very naive computation - just uses the number of * regions being served, ignoring stats about number of requests. * @return the average load */ @@ -813,7 +813,7 @@ public class ServerManager implements HConstants { } } } - + /** Watcher triggered when a RS znode is deleted */ private class ServerExpirer implements Watcher { private String server; diff --git a/core/src/main/java/org/apache/hadoop/hbase/master/TableDelete.java b/core/src/main/java/org/apache/hadoop/hbase/master/TableDelete.java index 51000e6..1153e62 100644 --- a/core/src/main/java/org/apache/hadoop/hbase/master/TableDelete.java +++ b/core/src/main/java/org/apache/hadoop/hbase/master/TableDelete.java @@ -31,7 +31,7 @@ import org.apache.hadoop.hbase.util.Bytes; import java.io.IOException; -/** +/** * Instantiated to delete a table. Table must be offline. */ class TableDelete extends TableOperation { @@ -63,13 +63,13 @@ class TableDelete extends TableOperation { HRegion.removeRegionFromMETA(server, m.getRegionName(), i.getRegionName()); HRegion.deleteRegion(this.master.getFileSystem(), this.master.getRootDir(), i); - + } catch (IOException e) { LOG.error("failed to delete region " + Bytes.toString(i.getRegionName()), RemoteExceptionHandler.checkIOException(e)); } } - + // delete the table's folder from fs. this.master.getFileSystem().delete(new Path(this.master.getRootDir(), Bytes.toString(this.tableName)), true); diff --git a/core/src/main/java/org/apache/hadoop/hbase/master/ZKMasterAddressWatcher.java b/core/src/main/java/org/apache/hadoop/hbase/master/ZKMasterAddressWatcher.java index 07ea934..da78fe2 100644 --- a/core/src/main/java/org/apache/hadoop/hbase/master/ZKMasterAddressWatcher.java +++ b/core/src/main/java/org/apache/hadoop/hbase/master/ZKMasterAddressWatcher.java @@ -71,7 +71,7 @@ class ZKMasterAddressWatcher implements Watcher { LOG.debug("Master address ZNode deleted, notifying waiting masters"); notifyAll(); } - } else if(type.equals(EventType.NodeCreated) && + } else if(type.equals(EventType.NodeCreated) && event.getPath().equals(this.zookeeper.clusterStateZNode)) { LOG.debug("Resetting watch on cluster state node."); this.zookeeper.setClusterStateWatch(this); diff --git a/core/src/main/java/org/apache/hadoop/hbase/master/metrics/MasterMetrics.java b/core/src/main/java/org/apache/hadoop/hbase/master/metrics/MasterMetrics.java index af101c6..9bea50f 100644 --- a/core/src/main/java/org/apache/hadoop/hbase/master/metrics/MasterMetrics.java +++ b/core/src/main/java/org/apache/hadoop/hbase/master/metrics/MasterMetrics.java @@ -28,7 +28,7 @@ import org.apache.hadoop.metrics.jvm.JvmMetrics; import org.apache.hadoop.metrics.util.MetricsRegistry; -/** +/** * This class is for maintaining the various master statistics * and publishing them through the metrics interfaces. *

@@ -43,7 +43,7 @@ public class MasterMetrics implements Updater { /* * Count of requests to the cluster since last call to metrics update */ - private final MetricsRate cluster_requests = + private final MetricsRate cluster_requests = new MetricsRate("cluster_requests", registry); public MasterMetrics(final String name) { @@ -58,16 +58,16 @@ public class MasterMetrics implements Updater { LOG.info("Initialized"); } - + public void shutdown() { if (masterStatistics != null) masterStatistics.shutdown(); } - + /** * Since this object is a registered updater, this method will be called * periodically, e.g. every 5 seconds. - * @param unused + * @param unused */ public void doUpdates(MetricsContext unused) { synchronized (this) { @@ -75,7 +75,7 @@ public class MasterMetrics implements Updater { } this.metricsRecord.update(); } - + public void resetAllMinMax() { // Nothing to do } @@ -86,7 +86,7 @@ public class MasterMetrics implements Updater { public float getRequests() { return this.cluster_requests.getPreviousIntervalValue(); } - + /** * @param inc How much to add to requests. */ diff --git a/core/src/main/java/org/apache/hadoop/hbase/master/metrics/MasterStatistics.java b/core/src/main/java/org/apache/hadoop/hbase/master/metrics/MasterStatistics.java index ec3e0de..d885348 100644 --- a/core/src/main/java/org/apache/hadoop/hbase/master/metrics/MasterStatistics.java +++ b/core/src/main/java/org/apache/hadoop/hbase/master/metrics/MasterStatistics.java @@ -32,7 +32,7 @@ public class MasterStatistics extends MetricsMBeanBase { public MasterStatistics(MetricsRegistry registry) { super(registry, "MasterStatistics"); - mbeanName = MBeanUtil.registerMBean("Master", "MasterStatistics", this); + mbeanName = MBeanUtil.registerMBean("Master", "MasterStatistics", this); } public void shutdown() { diff --git a/core/src/main/java/org/apache/hadoop/hbase/metrics/MetricsMBeanBase.java b/core/src/main/java/org/apache/hadoop/hbase/metrics/MetricsMBeanBase.java index 1f852c1..3d09f95 100644 --- a/core/src/main/java/org/apache/hadoop/hbase/metrics/MetricsMBeanBase.java +++ b/core/src/main/java/org/apache/hadoop/hbase/metrics/MetricsMBeanBase.java @@ -38,8 +38,8 @@ import org.apache.hadoop.metrics.util.MetricsRegistry; /** * Extends the Hadoop MetricsDynamicMBeanBase class to provide JMX support for - * custom HBase MetricsBase implementations. MetricsDynamicMBeanBase ignores - * registered MetricsBase instance that are not instances of one of the + * custom HBase MetricsBase implementations. MetricsDynamicMBeanBase ignores + * registered MetricsBase instance that are not instances of one of the * org.apache.hadoop.metrics.util implementations. * */ @@ -50,13 +50,13 @@ public class MetricsMBeanBase extends MetricsDynamicMBeanBase { protected final MetricsRegistry registry; protected final String description; protected int registryLength; - /** HBase MetricsBase implementations that MetricsDynamicMBeanBase does - * not understand + /** HBase MetricsBase implementations that MetricsDynamicMBeanBase does + * not understand */ - protected Map extendedAttributes = + protected Map extendedAttributes = new HashMap(); protected MBeanInfo extendedInfo; - + protected MetricsMBeanBase( MetricsRegistry mr, String description ) { super(copyMinusHBaseMetrics(mr), description); this.registry = mr; @@ -87,45 +87,45 @@ public class MetricsMBeanBase extends MetricsDynamicMBeanBase { attributes.add(attr); parentAttributes.add(attr.getName()); } - + this.registryLength = this.registry.getMetricsList().size(); - + for (MetricsBase metric : this.registry.getMetricsList()) { if (metric.getName() == null || parentAttributes.contains(metric.getName())) continue; - + // add on custom HBase metric types if (metric instanceof org.apache.hadoop.hbase.metrics.MetricsRate) { - attributes.add( new MBeanAttributeInfo(metric.getName(), + attributes.add( new MBeanAttributeInfo(metric.getName(), "java.lang.Float", metric.getDescription(), true, false, false) ); extendedAttributes.put(metric.getName(), metric); } // else, its probably a hadoop metric already registered. Skip it. } - this.extendedInfo = new MBeanInfo( this.getClass().getName(), - this.description, attributes.toArray( new MBeanAttributeInfo[0] ), - parentInfo.getConstructors(), parentInfo.getOperations(), + this.extendedInfo = new MBeanInfo( this.getClass().getName(), + this.description, attributes.toArray( new MBeanAttributeInfo[0] ), + parentInfo.getConstructors(), parentInfo.getOperations(), parentInfo.getNotifications() ); } private void checkAndUpdateAttributes() { - if (this.registryLength != this.registry.getMetricsList().size()) + if (this.registryLength != this.registry.getMetricsList().size()) this.init(); } - + @Override public Object getAttribute( String name ) throws AttributeNotFoundException, MBeanException, ReflectionException { - + if (name == null) { throw new IllegalArgumentException("Attribute name is NULL"); } /* * Ugly. Since MetricsDynamicMBeanBase implementation is private, - * we need to first check the parent class for the attribute. + * we need to first check the parent class for the attribute. * In case that the MetricsRegistry contents have changed, this will * allow the parent to update it's internal structures (which we rely on * to update our own. @@ -133,9 +133,9 @@ public class MetricsMBeanBase extends MetricsDynamicMBeanBase { try { return super.getAttribute(name); } catch (AttributeNotFoundException ex) { - + checkAndUpdateAttributes(); - + MetricsBase metric = this.extendedAttributes.get(name); if (metric != null) { if (metric instanceof MetricsRate) { @@ -146,7 +146,7 @@ public class MetricsMBeanBase extends MetricsDynamicMBeanBase { } } } - + throw new AttributeNotFoundException(); } diff --git a/core/src/main/java/org/apache/hadoop/hbase/metrics/MetricsRate.java b/core/src/main/java/org/apache/hadoop/hbase/metrics/MetricsRate.java index 13fe5ef..fc1dc36 100644 --- a/core/src/main/java/org/apache/hadoop/hbase/metrics/MetricsRate.java +++ b/core/src/main/java/org/apache/hadoop/hbase/metrics/MetricsRate.java @@ -30,12 +30,12 @@ import org.apache.hadoop.util.StringUtils; */ public class MetricsRate extends MetricsBase { private static final Log LOG = LogFactory.getLog("org.apache.hadoop.hbase.metrics"); - + private int value; private float prevRate; private long ts; - - public MetricsRate(final String name, final MetricsRegistry registry, + + public MetricsRate(final String name, final MetricsRegistry registry, final String description) { super(name, description); this.value = 0; @@ -43,19 +43,19 @@ public class MetricsRate extends MetricsBase { this.ts = System.currentTimeMillis(); registry.add(name, this); } - + public MetricsRate(final String name, final MetricsRegistry registry) { this(name, registry, NO_DESCRIPTION); } - + public synchronized void inc(final int incr) { value += incr; } - + public synchronized void inc() { value++; } - + private synchronized void intervalHeartBeat() { long now = System.currentTimeMillis(); long diff = (now-ts)/1000; @@ -64,18 +64,18 @@ public class MetricsRate extends MetricsBase { this.value = 0; this.ts = now; } - + @Override public synchronized void pushMetric(final MetricsRecord mr) { intervalHeartBeat(); try { mr.setMetric(getName(), getPreviousIntervalValue()); } catch (Exception e) { - LOG.info("pushMetric failed for " + getName() + "\n" + + LOG.info("pushMetric failed for " + getName() + "\n" + StringUtils.stringifyException(e)); } } - + public synchronized float getPreviousIntervalValue() { return this.prevRate; } diff --git a/core/src/main/java/org/apache/hadoop/hbase/metrics/file/TimeStampingFileContext.java b/core/src/main/java/org/apache/hadoop/hbase/metrics/file/TimeStampingFileContext.java index a5ffc6e..000e0d3 100644 --- a/core/src/main/java/org/apache/hadoop/hbase/metrics/file/TimeStampingFileContext.java +++ b/core/src/main/java/org/apache/hadoop/hbase/metrics/file/TimeStampingFileContext.java @@ -40,7 +40,7 @@ public class TimeStampingFileContext extends FileContext { private File file = null; private PrintWriter writer = null; private final SimpleDateFormat sdf; - + public TimeStampingFileContext() { super(); this.sdf = new SimpleDateFormat("yyyy-MM-dd'T'HH:mm:ss"); diff --git a/core/src/main/java/org/apache/hadoop/hbase/regionserver/ColumnCount.java b/core/src/main/java/org/apache/hadoop/hbase/regionserver/ColumnCount.java index 549b1fe..1be0280 100644 --- a/core/src/main/java/org/apache/hadoop/hbase/regionserver/ColumnCount.java +++ b/core/src/main/java/org/apache/hadoop/hbase/regionserver/ColumnCount.java @@ -29,7 +29,7 @@ public class ColumnCount { private final int offset; private final int length; private int count; - + /** * Constructor * @param column the qualifier to count the versions for @@ -37,7 +37,7 @@ public class ColumnCount { public ColumnCount(byte [] column) { this(column, 0); } - + /** * Constructor * @param column the qualifier to count the versions for @@ -46,7 +46,7 @@ public class ColumnCount { public ColumnCount(byte [] column, int count) { this(column, 0, column.length, count); } - + /** * Constuctor * @param column the qualifier to count the versions for @@ -60,28 +60,28 @@ public class ColumnCount { this.length = length; this.count = count; } - + /** * @return the buffer */ public byte [] getBuffer(){ return this.bytes; } - + /** * @return the offset */ public int getOffset(){ return this.offset; } - + /** * @return the length */ public int getLength(){ return this.length; - } - + } + /** * Decrement the current version count * @return current count @@ -104,9 +104,9 @@ public class ColumnCount { */ public void setCount(int count) { this.count = count; - } + } + - /** * Check to see if needed to fetch more versions * @param max diff --git a/core/src/main/java/org/apache/hadoop/hbase/regionserver/ColumnTracker.java b/core/src/main/java/org/apache/hadoop/hbase/regionserver/ColumnTracker.java index dfb3026..35b03a1 100644 --- a/core/src/main/java/org/apache/hadoop/hbase/regionserver/ColumnTracker.java +++ b/core/src/main/java/org/apache/hadoop/hbase/regionserver/ColumnTracker.java @@ -23,7 +23,7 @@ import org.apache.hadoop.hbase.regionserver.QueryMatcher.MatchCode; /** * Implementing classes of this interface will be used for the tracking - * and enforcement of columns and numbers of versions during the course of a + * and enforcement of columns and numbers of versions during the course of a * Get or Scan operation. *

* Currently there are two different types of Store/Family-level queries. @@ -38,7 +38,7 @@ import org.apache.hadoop.hbase.regionserver.QueryMatcher.MatchCode; * what action should be taken. *

  • {@link #update} is called at the end of every StoreFile or memstore. *

    - * This class is NOT thread-safe as queries are never multi-threaded + * This class is NOT thread-safe as queries are never multi-threaded */ public interface ColumnTracker { /** @@ -49,19 +49,19 @@ public interface ColumnTracker { * @return The match code instance. */ public MatchCode checkColumn(byte [] bytes, int offset, int length); - + /** * Updates internal variables in between files */ public void update(); - + /** * Resets the Matcher */ public void reset(); - + /** - * + * * @return true when done. */ public boolean done(); diff --git a/core/src/main/java/org/apache/hadoop/hbase/regionserver/CompactSplitThread.java b/core/src/main/java/org/apache/hadoop/hbase/regionserver/CompactSplitThread.java index 0bc0513..41509e2 100644 --- a/core/src/main/java/org/apache/hadoop/hbase/regionserver/CompactSplitThread.java +++ b/core/src/main/java/org/apache/hadoop/hbase/regionserver/CompactSplitThread.java @@ -37,23 +37,23 @@ import java.util.concurrent.LinkedBlockingQueue; import java.util.concurrent.TimeUnit; import java.util.concurrent.locks.ReentrantLock; -/** +/** * Compact region on request and then run split if appropriate */ class CompactSplitThread extends Thread implements HConstants { static final Log LOG = LogFactory.getLog(CompactSplitThread.class); - + private HTable root = null; private HTable meta = null; private final long frequency; private final ReentrantLock lock = new ReentrantLock(); - + private final HRegionServer server; private final Configuration conf; - + private final BlockingQueue compactionQueue = new LinkedBlockingQueue(); - + private final HashSet regionsInQueue = new HashSet(); /** @param server */ @@ -65,7 +65,7 @@ class CompactSplitThread extends Thread implements HConstants { conf.getLong("hbase.regionserver.thread.splitcompactcheckfrequency", 20 * 1000); } - + @Override public void run() { int count = 0; @@ -144,7 +144,7 @@ class CompactSplitThread extends Thread implements HConstants { } } } - + private void split(final HRegion region, final byte [] midKey) throws IOException { final HRegionInfo oldRegionInfo = region.getRegionInfo(); @@ -154,7 +154,7 @@ class CompactSplitThread extends Thread implements HConstants { // Didn't need to be split return; } - + // When a region is split, the META table needs to updated if we're // splitting a 'normal' region, and the ROOT table needs to be // updated if we are splitting a META region. @@ -181,14 +181,14 @@ class CompactSplitThread extends Thread implements HConstants { this.server.removeFromOnlineRegions(oldRegionInfo); Put put = new Put(oldRegionInfo.getRegionName()); - put.add(CATALOG_FAMILY, REGIONINFO_QUALIFIER, + put.add(CATALOG_FAMILY, REGIONINFO_QUALIFIER, Writables.getBytes(oldRegionInfo)); put.add(CATALOG_FAMILY, SPLITA_QUALIFIER, Writables.getBytes(newRegions[0].getRegionInfo())); put.add(CATALOG_FAMILY, SPLITB_QUALIFIER, Writables.getBytes(newRegions[1].getRegionInfo())); t.put(put); - + // If we crash here, then the daughters will not be added and we'll have // and offlined parent but no daughters to take up the slack. hbase-2244 // adds fixup to the metascanners. @@ -200,7 +200,7 @@ class CompactSplitThread extends Thread implements HConstants { newRegions[i].getRegionInfo())); t.put(put); } - + // If we crash here, the master will not know of the new daughters and they // will not be assigned. The metascanner when it runs will notice and take // care of assigning the new daughters. @@ -218,20 +218,20 @@ class CompactSplitThread extends Thread implements HConstants { /** * Only interrupt once it's done with a run through the work loop. - */ + */ void interruptIfNecessary() { if (lock.tryLock()) { this.interrupt(); } } - + /** - * Returns the current size of the queue containing regions that are - * processed. + * Returns the current size of the queue containing regions that are + * processed. * * @return The current size of the regions queue. */ public int getCompactionQueueSize() { - return compactionQueue.size(); - } + return compactionQueue.size(); + } } \ No newline at end of file diff --git a/core/src/main/java/org/apache/hadoop/hbase/regionserver/DeleteCompare.java b/core/src/main/java/org/apache/hadoop/hbase/regionserver/DeleteCompare.java index 16ca9ae..11da530 100644 --- a/core/src/main/java/org/apache/hadoop/hbase/regionserver/DeleteCompare.java +++ b/core/src/main/java/org/apache/hadoop/hbase/regionserver/DeleteCompare.java @@ -25,10 +25,10 @@ import org.apache.hadoop.hbase.util.Bytes; /** - * Class that provides static method needed when putting deletes into memstore + * Class that provides static method needed when putting deletes into memstore */ public class DeleteCompare { - + /** * Return codes from deleteCompare. */ @@ -37,12 +37,12 @@ public class DeleteCompare { * Do nothing. Move to next KV in memstore */ SKIP, - + /** * Add to the list of deletes. */ DELETE, - + /** * Stop looking at KVs in memstore. Finalize. */ @@ -134,5 +134,5 @@ public class DeleteCompare { } else { return DeleteCode.SKIP; } - } + } } diff --git a/core/src/main/java/org/apache/hadoop/hbase/regionserver/DeleteTracker.java b/core/src/main/java/org/apache/hadoop/hbase/regionserver/DeleteTracker.java index 2c86caf..b425bf2 100644 --- a/core/src/main/java/org/apache/hadoop/hbase/regionserver/DeleteTracker.java +++ b/core/src/main/java/org/apache/hadoop/hbase/regionserver/DeleteTracker.java @@ -26,10 +26,10 @@ package org.apache.hadoop.hbase.regionserver; * This class is utilized through three methods: *

    • {@link #add} when encountering a Delete *
    • {@link #isDeleted} when checking if a Put KeyValue has been deleted - *
    • {@link #update} when reaching the end of a StoreFile + *
    • {@link #update} when reaching the end of a StoreFile */ public interface DeleteTracker { - + /** * Add the specified KeyValue to the list of deletes to check against for * this row operation. @@ -43,7 +43,7 @@ public interface DeleteTracker { */ public void add(byte [] buffer, int qualifierOffset, int qualifierLength, long timestamp, byte type); - + /** * Check if the specified KeyValue buffer has been deleted by a previously * seen delete. @@ -55,12 +55,12 @@ public interface DeleteTracker { */ public boolean isDeleted(byte [] buffer, int qualifierOffset, int qualifierLength, long timestamp); - + /** * @return true if there are no current delete, false otherwise */ public boolean isEmpty(); - + /** * Called at the end of every StoreFile. *

      @@ -68,14 +68,14 @@ public interface DeleteTracker { * when the end of each StoreFile is reached. */ public void update(); - + /** * Called between rows. *

      * This clears everything as if a new DeleteTracker was instantiated. */ public void reset(); - + /** * Return codes for comparison of two Deletes. @@ -85,7 +85,7 @@ public interface DeleteTracker { * INCLUDE means add the specified Delete to the merged list. * NEXT means move to the next element in the specified list(s). */ - enum DeleteCompare { + enum DeleteCompare { INCLUDE_OLD_NEXT_OLD, INCLUDE_OLD_NEXT_BOTH, INCLUDE_NEW_NEXT_NEW, @@ -93,5 +93,5 @@ public interface DeleteTracker { NEXT_OLD, NEXT_NEW } - + } diff --git a/core/src/main/java/org/apache/hadoop/hbase/regionserver/ExplicitColumnTracker.java b/core/src/main/java/org/apache/hadoop/hbase/regionserver/ExplicitColumnTracker.java index d814f90..fd07ed5 100644 --- a/core/src/main/java/org/apache/hadoop/hbase/regionserver/ExplicitColumnTracker.java +++ b/core/src/main/java/org/apache/hadoop/hbase/regionserver/ExplicitColumnTracker.java @@ -26,7 +26,7 @@ import org.apache.hadoop.hbase.regionserver.QueryMatcher.MatchCode; import org.apache.hadoop.hbase.util.Bytes; /** - * This class is used for the tracking and enforcement of columns and numbers + * This class is used for the tracking and enforcement of columns and numbers * of versions during the course of a Get or Scan operation, when explicit * column qualifiers have been asked for in the query. * @@ -34,7 +34,7 @@ import org.apache.hadoop.hbase.util.Bytes; * for both scans and gets. The main difference is 'next' and 'done' collapse * for the scan case (since we see all columns in order), and we only reset * between rows. - * + * *

      * This class is utilized by {@link QueryMatcher} through two methods: *

      • {@link #checkColumn} is called when a Put satisfies all other @@ -42,7 +42,7 @@ import org.apache.hadoop.hbase.util.Bytes; * what action should be taken. *
      • {@link #update} is called at the end of every StoreFile or memstore. *

        - * This class is NOT thread-safe as queries are never multi-threaded + * This class is NOT thread-safe as queries are never multi-threaded */ public class ExplicitColumnTracker implements ColumnTracker { @@ -51,7 +51,7 @@ public class ExplicitColumnTracker implements ColumnTracker { private final List columnsToReuse; private int index; private ColumnCount column; - + /** * Default constructor. * @param columns columns specified user in query @@ -66,7 +66,7 @@ public class ExplicitColumnTracker implements ColumnTracker { } reset(); } - + /** * Done when there are no more columns to match against. */ @@ -77,7 +77,7 @@ public class ExplicitColumnTracker implements ColumnTracker { public ColumnCount getColumnHint() { return this.column; } - + /** * Checks against the parameters of the query and the columns which have * already been processed by this query. @@ -135,7 +135,7 @@ public class ExplicitColumnTracker implements ColumnTracker { } } while(true); } - + /** * Called at the end of every StoreFile or memstore. */ diff --git a/core/src/main/java/org/apache/hadoop/hbase/regionserver/FlushRequester.java b/core/src/main/java/org/apache/hadoop/hbase/regionserver/FlushRequester.java index 86e0d2d..38ac209 100644 --- a/core/src/main/java/org/apache/hadoop/hbase/regionserver/FlushRequester.java +++ b/core/src/main/java/org/apache/hadoop/hbase/regionserver/FlushRequester.java @@ -29,7 +29,7 @@ package org.apache.hadoop.hbase.regionserver; public interface FlushRequester { /** * Tell the listener the cache needs to be flushed. - * + * * @param region the HRegion requesting the cache flush */ void request(HRegion region); diff --git a/core/src/main/java/org/apache/hadoop/hbase/regionserver/GetClosestRowBeforeTracker.java b/core/src/main/java/org/apache/hadoop/hbase/regionserver/GetClosestRowBeforeTracker.java index ecd44f7..52fbbd3 100644 --- a/core/src/main/java/org/apache/hadoop/hbase/regionserver/GetClosestRowBeforeTracker.java +++ b/core/src/main/java/org/apache/hadoop/hbase/regionserver/GetClosestRowBeforeTracker.java @@ -129,7 +129,7 @@ class GetClosestRowBeforeTracker { return isDeleted(kv, rowdeletes); } - /** + /** * Check if the specified KeyValue buffer has been deleted by a previously * seen delete. * @param kv @@ -237,4 +237,4 @@ class GetClosestRowBeforeTracker { this.tablenamePlusDelimiterLength, kv.getBuffer(), kv.getRowOffset(), this.tablenamePlusDelimiterLength) == 0; } -} \ No newline at end of file +} \ No newline at end of file diff --git a/core/src/main/java/org/apache/hadoop/hbase/regionserver/GetDeleteTracker.java b/core/src/main/java/org/apache/hadoop/hbase/regionserver/GetDeleteTracker.java index b865f50..8e68d65 100644 --- a/core/src/main/java/org/apache/hadoop/hbase/regionserver/GetDeleteTracker.java +++ b/core/src/main/java/org/apache/hadoop/hbase/regionserver/GetDeleteTracker.java @@ -35,7 +35,7 @@ import org.apache.hadoop.hbase.util.Bytes; *

      • {@link #isDeleted} when checking if a Put KeyValue has been deleted *
      • {@link #update} when reaching the end of a StoreFile *

        - * This class is NOT thread-safe as queries are never multi-threaded + * This class is NOT thread-safe as queries are never multi-threaded */ public class GetDeleteTracker implements DeleteTracker { private static long UNSET = -1L; @@ -76,7 +76,7 @@ public class GetDeleteTracker implements DeleteTracker { } } - /** + /** * Check if the specified KeyValue buffer has been deleted by a previously * seen delete. * @param buffer KeyValue buffer @@ -100,7 +100,7 @@ public class GetDeleteTracker implements DeleteTracker { // Check column int ret = Bytes.compareTo(buffer, qualifierOffset, qualifierLength, - this.delete.buffer, this.delete.qualifierOffset, + this.delete.buffer, this.delete.qualifierOffset, this.delete.qualifierLength); while (ret != 0) { if (ret <= -1) { @@ -120,7 +120,7 @@ public class GetDeleteTracker implements DeleteTracker { } } - + // Check Timestamp if(timestamp > this.delete.timestamp) { return false; @@ -186,7 +186,7 @@ public class GetDeleteTracker implements DeleteTracker { } // Merge previous deletes with new deletes - List mergeDeletes = + List mergeDeletes = new ArrayList(this.newDeletes.size()); int oldIndex = 0; int newIndex = 0; @@ -295,7 +295,7 @@ public class GetDeleteTracker implements DeleteTracker { } } - private void mergeDown(List mergeDeletes, List srcDeletes, + private void mergeDown(List mergeDeletes, List srcDeletes, int srcIndex) { int index = srcIndex; while(index < srcDeletes.size()) { @@ -335,7 +335,7 @@ public class GetDeleteTracker implements DeleteTracker { } if(oldDelete.timestamp < newDelete.timestamp) { return DeleteCompare.INCLUDE_NEW_NEXT_BOTH; - } + } return DeleteCompare.INCLUDE_OLD_NEXT_BOTH; } diff --git a/core/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java b/core/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java index 9f5a315..35fef19 100644 --- a/core/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java +++ b/core/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java @@ -77,9 +77,9 @@ import java.util.concurrent.locks.ReentrantReadWriteLock; * for each row. A given table consists of one or more HRegions. * *

        We maintain multiple HStores for a single HRegion. - * + * *

        An Store is a set of rows with some column data; together, - * they make up all the data for the rows. + * they make up all the data for the rows. * *

        Each HRegion has a 'startKey' and 'endKey'. *

        The first is inclusive, the second is exclusive (except for @@ -95,15 +95,15 @@ import java.util.concurrent.locks.ReentrantReadWriteLock; * constructed, it holds a read lock until it is closed. A close takes out a * write lock and consequently will block for ongoing operations and will block * new operations from starting while the close is in progress. - * + * *

        An HRegion is defined by its table and its key extent. - * + * *

        It consists of at least one Store. The number of Stores should be * configurable, so that data which is accessed together is stored in the same - * Store. Right now, we approximate that by building a single Store for - * each column family. (This config info will be communicated via the + * Store. Right now, we approximate that by building a single Store for + * each column family. (This config info will be communicated via the * tabledesc.) - * + * *

        The HTableDescriptor contains metainfo about the HRegion's table. * regionName is a unique identifier for this HRegion. (startKey, endKey] * defines the keyspace for this HRegion. @@ -113,8 +113,8 @@ public class HRegion implements HConstants, HeapSize { // , Writable{ static final String SPLITDIR = "splits"; static final String MERGEDIR = "merges"; final AtomicBoolean closed = new AtomicBoolean(false); - /* Closing can take some time; use the closing flag if there is stuff we don't - * want to do while in closing state; e.g. like offer this region up to the + /* Closing can take some time; use the closing flag if there is stuff we don't + * want to do while in closing state; e.g. like offer this region up to the * master as a region to close if the carrying regionserver is overloaded. * Once set, it is never cleared. */ @@ -133,13 +133,13 @@ public class HRegion implements HConstants, HeapSize { // , Writable{ protected final Map stores = new ConcurrentSkipListMap(Bytes.BYTES_RAWCOMPARATOR); - + //These variable are just used for getting data out of the region, to test on //client side // private int numStores = 0; // private int [] storeSize = null; // private byte [] name = null; - + final AtomicLong memstoreSize = new AtomicLong(0); // This is the table subdirectory. @@ -181,7 +181,7 @@ public class HRegion implements HConstants, HeapSize { // , Writable{ this.writesEnabled = !onOff; this.readOnly = onOff; } - + boolean isReadOnly() { return this.readOnly; } @@ -201,7 +201,7 @@ public class HRegion implements HConstants, HeapSize { // , Writable{ // Used to guard splits and closes private final ReentrantReadWriteLock splitsAndClosesLock = new ReentrantReadWriteLock(); - private final ReentrantReadWriteLock newScannerLock = + private final ReentrantReadWriteLock newScannerLock = new ReentrantReadWriteLock(); // Stop updates lock @@ -210,7 +210,7 @@ public class HRegion implements HConstants, HeapSize { // , Writable{ private final Object splitLock = new Object(); private long minSequenceId; private boolean splitRequest; - + /** * Name of the region info file that resides just under the region directory. */ @@ -238,7 +238,7 @@ public class HRegion implements HConstants, HeapSize { // , Writable{ this.regionInfo = null; this.threadWakeFrequency = 0L; } - + /** * HRegion constructor. * @@ -251,7 +251,7 @@ public class HRegion implements HConstants, HeapSize { // , Writable{ * appropriate log info for this HRegion. If there is a previous log file * (implying that the HRegion has been written-to before), then read it from * the supplied path. - * @param fs is the filesystem. + * @param fs is the filesystem. * @param conf is global configuration settings. * @param regionInfo - HRegionInfo that describes the region * is new), then read them from the supplied path. @@ -259,7 +259,7 @@ public class HRegion implements HConstants, HeapSize { // , Writable{ * making progress to master -- otherwise master might think region deploy * failed. Can be null. */ - public HRegion(Path basedir, HLog log, FileSystem fs, Configuration conf, + public HRegion(Path basedir, HLog log, FileSystem fs, Configuration conf, HRegionInfo regionInfo, FlushRequester flushListener) { this.basedir = basedir; this.comparator = regionInfo.getComparator(); @@ -291,7 +291,7 @@ public class HRegion implements HConstants, HeapSize { // , Writable{ /** * Initialize this region and get it ready to roll. * Called after construction. - * + * * @param initialFiles * @param reporter * @throws IOException @@ -308,7 +308,7 @@ public class HRegion implements HConstants, HeapSize { // , Writable{ // Load in all the HStores. long maxSeqId = -1; long minSeqIdToRecover = Integer.MAX_VALUE; - + for (HColumnDescriptor c : this.regionInfo.getTableDesc().getFamilies()) { Store store = instantiateHStore(this.basedir, c, oldLogFile, reporter); this.stores.put(c.getName(), store); @@ -316,7 +316,7 @@ public class HRegion implements HConstants, HeapSize { // , Writable{ if (storeSeqId > maxSeqId) { maxSeqId = storeSeqId; } - + long storeSeqIdBeforeRecovery = store.getMaxSeqIdBeforeLogRecovery(); if (storeSeqIdBeforeRecovery < minSeqIdToRecover) { minSeqIdToRecover = storeSeqIdBeforeRecovery; @@ -331,7 +331,7 @@ public class HRegion implements HConstants, HeapSize { // , Writable{ } fs.delete(oldLogFile, false); } - + // Add one to the current maximum sequence id so new edits are beyond. this.minSequenceId = maxSeqId + 1; @@ -390,7 +390,7 @@ public class HRegion implements HConstants, HeapSize { // , Writable{ // Name of this file has two leading and trailing underscores so it doesn't // clash w/ a store/family name. There is possibility, but assumption is // that its slim (don't want to use control character in filename because - // + // Path regioninfo = new Path(this.regiondir, REGIONINFO_FILE); if (this.fs.exists(regioninfo) && this.fs.getFileStatus(regioninfo).getLen() > 0) { @@ -414,7 +414,7 @@ public class HRegion implements HConstants, HeapSize { // , Writable{ long getMinSequenceId() { return this.minSequenceId; } - + /** @return a HRegionInfo object for this region */ public HRegionInfo getRegionInfo() { return this.regionInfo; @@ -424,7 +424,7 @@ public class HRegion implements HConstants, HeapSize { // , Writable{ public boolean isClosed() { return this.closed.get(); } - + /** * @return True if closing process has started. */ @@ -433,16 +433,16 @@ public class HRegion implements HConstants, HeapSize { // , Writable{ } /** - * Close down this HRegion. Flush the cache, shut down each HStore, don't + * Close down this HRegion. Flush the cache, shut down each HStore, don't * service any more calls. * - *

        This method could take some time to execute, so don't call it from a + *

        This method could take some time to execute, so don't call it from a * time-sensitive thread. - * - * @return Vector of all the storage files that the HRegion's component + * + * @return Vector of all the storage files that the HRegion's component * HStores make use of. It's a list of all HStoreFile objects. Returns empty * vector if already closed and null if judged that it should not close. - * + * * @throws IOException */ public List close() throws IOException { @@ -453,14 +453,14 @@ public class HRegion implements HConstants, HeapSize { // , Writable{ * Close down this HRegion. Flush the cache unless abort parameter is true, * Shut down each HStore, don't service any more calls. * - * This method could take some time to execute, so don't call it from a + * This method could take some time to execute, so don't call it from a * time-sensitive thread. - * + * * @param abort true if server is aborting (only during testing) - * @return Vector of all the storage files that the HRegion's component + * @return Vector of all the storage files that the HRegion's component * HStores make use of. It's a list of HStoreFile objects. Can be null if * we are not to close at this time or we are already closed. - * + * * @throws IOException */ public List close(final boolean abort) throws IOException { @@ -508,12 +508,12 @@ public class HRegion implements HConstants, HeapSize { // , Writable{ // outstanding updates. waitOnRowLocks(); LOG.debug("No more row locks outstanding on region " + this); - + // Don't flush the cache if we are aborting if (!abort) { internalFlushcache(); } - + List result = new ArrayList(); for (Store store: stores.values()) { result.addAll(store.close()); @@ -596,11 +596,11 @@ public class HRegion implements HConstants, HeapSize { // , Writable{ public long getLastFlushTime() { return this.lastFlushTime; } - + ////////////////////////////////////////////////////////////////////////////// - // HRegion maintenance. + // HRegion maintenance. // - // These methods are meant to be called periodically by the HRegionServer for + // These methods are meant to be called periodically by the HRegionServer for // upkeep. ////////////////////////////////////////////////////////////////////////////// @@ -719,11 +719,11 @@ public class HRegion implements HConstants, HeapSize { // , Writable{ } return d; } - + protected void prepareToSplit() { // nothing } - + /* * @param dir * @return compaction directory for the passed in dir @@ -740,7 +740,7 @@ public class HRegion implements HConstants, HeapSize { // , Writable{ private void doRegionCompactionPrep() throws IOException { doRegionCompactionCleanup(); } - + /* * Removes the compaction directory for this Store. * @throws IOException @@ -761,13 +761,13 @@ public class HRegion implements HConstants, HeapSize { // , Writable{ * Called by compaction thread and after region is opened to compact the * HStores if necessary. * - *

        This operation could block for a long time, so don't call it from a + *

        This operation could block for a long time, so don't call it from a * time-sensitive thread. * * Note that no locking is necessary at this level because compaction only * conflicts with a region split, and that cannot happen because the region * server does them sequentially and not in parallel. - * + * * @return mid key if split is needed * @throws IOException */ @@ -781,13 +781,13 @@ public class HRegion implements HConstants, HeapSize { // , Writable{ * Called by compaction thread and after region is opened to compact the * HStores if necessary. * - *

        This operation could block for a long time, so don't call it from a + *

        This operation could block for a long time, so don't call it from a * time-sensitive thread. * * Note that no locking is necessary at this level because compaction only * conflicts with a region split, and that cannot happen because the region * server does them sequentially and not in parallel. - * + * * @param majorCompaction True to force a major compaction regardless of thresholds * @return split row if split is needed * @throws IOException @@ -815,7 +815,7 @@ public class HRegion implements HConstants, HeapSize { // , Writable{ return splitRow; } } - LOG.info("Starting" + (majorCompaction? " major " : " ") + + LOG.info("Starting" + (majorCompaction? " major " : " ") + "compaction on region " + this); long startTime = System.currentTimeMillis(); doRegionCompactionPrep(); @@ -828,7 +828,7 @@ public class HRegion implements HConstants, HeapSize { // , Writable{ } } doRegionCompactionCleanup(); - String timeTaken = StringUtils.formatTimeDiff(System.currentTimeMillis(), + String timeTaken = StringUtils.formatTimeDiff(System.currentTimeMillis(), startTime); LOG.info("compaction completed on region " + this + " in " + timeTaken); } finally { @@ -845,7 +845,7 @@ public class HRegion implements HConstants, HeapSize { // , Writable{ /** * Flush the cache. - * + * * When this method is called the cache will be flushed unless: *

          *
        1. the cache is empty
        2. @@ -854,11 +854,11 @@ public class HRegion implements HConstants, HeapSize { // , Writable{ *
        3. writes are disabled
        4. *
        * - *

        This method may block for some time, so it should not be called from a + *

        This method may block for some time, so it should not be called from a * time-sensitive thread. - * + * * @return true if cache was flushed - * + * * @throws IOException * @throws DroppedSnapshotException Thrown when replay of hlog is required * because a Snapshot was not properly persisted. @@ -877,7 +877,7 @@ public class HRegion implements HConstants, HeapSize { // , Writable{ writestate.flushing + ", writesEnabled=" + writestate.writesEnabled); } - return false; + return false; } } try { @@ -906,25 +906,25 @@ public class HRegion implements HConstants, HeapSize { // , Writable{ * flushed. (That way, during recovery, we know when we can rely on the * on-disk flushed structures and when we have to recover the memstore from * the log.) - * + * *

        So, we have a three-step process: - * + * *

        • A. Flush the memstore to the on-disk stores, noting the current * sequence ID for the log.
        • - * + * *
        • B. Write a FLUSHCACHE-COMPLETE message to the log, using the sequence * ID that was current at the time of memstore-flush.
        • - * + * *
        • C. Get rid of the memstore structures that are now redundant, as * they've been flushed to the on-disk HStores.
        • *
        *

        This method is protected, but can be accessed via several public * routes. - * + * *

        This method may block for some time. - * + * * @return true if the region needs compacting - * + * * @throws IOException * @throws DroppedSnapshotException Thrown when replay of hlog is required * because a Snapshot was not properly persisted. @@ -1002,7 +1002,7 @@ public class HRegion implements HConstants, HeapSize { // , Writable{ // B. Write a FLUSHCACHE-COMPLETE message to the log. // This tells future readers that the HStores were emitted correctly, - // and that all updates to the log for this regionName that have lower + // and that all updates to the log for this regionName that have lower // log-sequence-ids can be safely ignored. this.log.completeCacheFlush(getRegionName(), regionInfo.getTableDesc().getName(), completeSequenceId, @@ -1013,7 +1013,7 @@ public class HRegion implements HConstants, HeapSize { // , Writable{ synchronized (this) { notifyAll(); // FindBugs NN_NAKED_NOTIFY } - + if (LOG.isDebugEnabled()) { long now = System.currentTimeMillis(); LOG.debug("Finished memstore flush of ~" + @@ -1023,27 +1023,27 @@ public class HRegion implements HConstants, HeapSize { // , Writable{ } return compactionRequested; } - + /** * Get the sequence number to be associated with this cache flush. Used by * TransactionalRegion to not complete pending transactions. - * - * + * + * * @param currentSequenceId * @return sequence id to complete the cache flush with - */ + */ protected long getCompleteCacheFlushSequenceId(long currentSequenceId) { return currentSequenceId; } - + ////////////////////////////////////////////////////////////////////////////// // get() methods for client use. ////////////////////////////////////////////////////////////////////////////// /** - * Return all the data for the row that matches row exactly, - * or the one that immediately preceeds it, at or immediately before + * Return all the data for the row that matches row exactly, + * or the one that immediately preceeds it, at or immediately before * ts. - * + * * @param row row key * @return map of values * @throws IOException @@ -1054,10 +1054,10 @@ public class HRegion implements HConstants, HeapSize { // , Writable{ } /** - * Return all the data for the row that matches row exactly, - * or the one that immediately preceeds it, at or immediately before + * Return all the data for the row that matches row exactly, + * or the one that immediately preceeds it, at or immediately before * ts. - * + * * @param row row key * @param family * @return map of values @@ -1089,7 +1089,7 @@ public class HRegion implements HConstants, HeapSize { // , Writable{ } /** - * Return an iterator that scans over the HRegion, returning the indicated + * Return an iterator that scans over the HRegion, returning the indicated * columns and rows specified by the {@link Scan}. *

        * This Iterator must be closed by the caller. @@ -1102,7 +1102,7 @@ public class HRegion implements HConstants, HeapSize { // , Writable{ throws IOException { return getScanner(scan, null); } - + protected InternalScanner getScanner(Scan scan, List additionalScanners) throws IOException { newScannerLock.readLock().lock(); try { @@ -1120,7 +1120,7 @@ public class HRegion implements HConstants, HeapSize { // , Writable{ } } return new RegionScanner(scan, additionalScanners); - + } finally { newScannerLock.readLock().unlock(); } @@ -1169,8 +1169,8 @@ public class HRegion implements HConstants, HeapSize { // , Writable{ splitsAndClosesLock.readLock().unlock(); } } - - + + /** * @param familyMap map of family to edits for the given family. * @param writeToWAL @@ -1187,10 +1187,10 @@ public class HRegion implements HConstants, HeapSize { // , Writable{ for (Map.Entry> e : familyMap.entrySet()) { - byte[] family = e.getKey(); + byte[] family = e.getKey(); List kvs = e.getValue(); Map kvCount = new TreeMap(Bytes.BYTES_COMPARATOR); - + Store store = getStore(family); for (KeyValue kv: kvs) { // Check if time is LATEST, change to time of most recent addition if so @@ -1262,9 +1262,9 @@ public class HRegion implements HConstants, HeapSize { // , Writable{ // for (Map.Entry> e : familyMap.entrySet()) { - byte[] family = e.getKey(); + byte[] family = e.getKey(); List kvs = e.getValue(); - + Store store = getStore(family); for (KeyValue kv: kvs) { size = this.memstoreSize.addAndGet(store.delete(kv)); @@ -1279,7 +1279,7 @@ public class HRegion implements HConstants, HeapSize { // , Writable{ requestFlush(); } } - + /** * @param put * @throws IOException @@ -1287,7 +1287,7 @@ public class HRegion implements HConstants, HeapSize { // , Writable{ public void put(Put put) throws IOException { this.put(put, null, put.getWriteToWAL()); } - + /** * @param put * @param writeToWAL @@ -1317,7 +1317,7 @@ public class HRegion implements HConstants, HeapSize { // , Writable{ checkReadOnly(); // Do a rough check that we have resources to accept a write. The check is - // 'rough' in that between the resource check and the call to obtain a + // 'rough' in that between the resource check and the call to obtain a // read lock, resources may run out. For now, the thought is that this // will be extremely rare; we'll deal with it when it happens. checkResources(); @@ -1343,13 +1343,13 @@ public class HRegion implements HConstants, HeapSize { // , Writable{ } } - - //TODO, Think that gets/puts and deletes should be refactored a bit so that + + //TODO, Think that gets/puts and deletes should be refactored a bit so that //the getting of the lock happens before, so that you would just pass it into - //the methods. So in the case of checkAndPut you could just do lockRow, + //the methods. So in the case of checkAndPut you could just do lockRow, //get, put, unlockRow or something /** - * + * * @param row * @param family * @param qualifier @@ -1361,10 +1361,10 @@ public class HRegion implements HConstants, HeapSize { // , Writable{ * @return true if the new put was execute, false otherwise */ public boolean checkAndPut(byte [] row, byte [] family, byte [] qualifier, - byte [] expectedValue, Put put, Integer lockId, boolean writeToWAL) + byte [] expectedValue, Put put, Integer lockId, boolean writeToWAL) throws IOException{ checkReadOnly(); - //TODO, add check for value length or maybe even better move this to the + //TODO, add check for value length or maybe even better move this to the //client if this becomes a global setting checkResources(); splitsAndClosesLock.readLock().lock(); @@ -1376,7 +1376,7 @@ public class HRegion implements HConstants, HeapSize { // , Writable{ byte [] now = Bytes.toBytes(System.currentTimeMillis()); // Lock row - Integer lid = getLock(lockId, get.getRow()); + Integer lid = getLock(lockId, get.getRow()); List result = new ArrayList(); try { //Getting data @@ -1396,7 +1396,7 @@ public class HRegion implements HConstants, HeapSize { // , Writable{ if (matches) { // All edits for the given row (across all column families) must happen atomically. put(put.getFamilyMap(), writeToWAL); - return true; + return true; } return false; } finally { @@ -1404,10 +1404,10 @@ public class HRegion implements HConstants, HeapSize { // , Writable{ } } finally { splitsAndClosesLock.readLock().unlock(); - } + } } - - + + /** * Checks if any stamps is Long.MAX_VALUE. If so, sets them to now. *

        @@ -1427,10 +1427,10 @@ public class HRegion implements HConstants, HeapSize { // , Writable{ } return true; } - + // /* -// * Utility method to verify values length. +// * Utility method to verify values length. // * @param batchUpdate The update to verify // * @throws IOException Thrown if a value is too long // */ @@ -1438,7 +1438,7 @@ public class HRegion implements HConstants, HeapSize { // , Writable{ // throws IOException { // Map> families = put.getFamilyMap(); // for(Map.Entry> entry : families.entrySet()) { -// HColumnDescriptor hcd = +// HColumnDescriptor hcd = // this.regionInfo.getTableDesc().getFamily(entry.getKey()); // int maxLen = hcd.getMaxValueLength(); // for(KeyValue kv : entry.getValue()) { @@ -1453,7 +1453,7 @@ public class HRegion implements HConstants, HeapSize { // , Writable{ /* * Check if resources to support an update. - * + * * Here we synchronize on HRegion, a broad scoped lock. Its appropriate * given we're figuring in here whether this region is able to take on * writes. This is only method with a synchronize (at time of writing), @@ -1500,7 +1500,7 @@ public class HRegion implements HConstants, HeapSize { // , Writable{ } } - /** + /** * Add updates first to the hlog and then add values to memstore. * Warning: Assumption is caller has lock on passed in row. * @param edits Cell updates by column @@ -1514,7 +1514,7 @@ public class HRegion implements HConstants, HeapSize { // , Writable{ this.put(familyMap, true); } - /** + /** * Add updates first to the hlog (if writeToWal) and then add values to memstore. * Warning: Assumption is caller has lock on passed in row. * @param familyMap map of family to edits for the given family. @@ -1530,7 +1530,7 @@ public class HRegion implements HConstants, HeapSize { // , Writable{ this.updatesLock.readLock().lock(); try { WALEdit walEdit = new WALEdit(); - + // check if column families are valid; // check if any timestampupdates are needed; // and if writeToWAL is set, then also collapse edits into a single list. @@ -1544,7 +1544,7 @@ public class HRegion implements HConstants, HeapSize { // , Writable{ // update timestamp on keys if required. if (updateKeys(edits, byteNow)) { if (writeToWAL) { - // bunch up all edits across all column families into a + // bunch up all edits across all column families into a // single WALEdit. for (KeyValue kv : edits) { walEdit.add(kv); @@ -1570,7 +1570,7 @@ public class HRegion implements HConstants, HeapSize { // , Writable{ long size = 0; // now make changes to the memstore for (Map.Entry> e : familyMap.entrySet()) { - byte[] family = e.getKey(); + byte[] family = e.getKey(); List edits = e.getValue(); Store store = getStore(family); @@ -1621,7 +1621,7 @@ public class HRegion implements HConstants, HeapSize { // , Writable{ // Used by subclasses; e.g. THBase. } - protected Store instantiateHStore(Path baseDir, + protected Store instantiateHStore(Path baseDir, HColumnDescriptor c, Path oldLogFile, Progressable reporter) throws IOException { return new Store(baseDir, this, c, this.fs, oldLogFile, @@ -1636,7 +1636,7 @@ public class HRegion implements HConstants, HeapSize { // , Writable{ * TODO: Make this lookup faster. */ public Store getStore(final byte [] column) { - return this.stores.get(column); + return this.stores.get(column); } ////////////////////////////////////////////////////////////////////////////// @@ -1669,10 +1669,10 @@ public class HRegion implements HConstants, HeapSize { // , Writable{ * But it acts as a guard on the client; a miswritten client just can't * submit the name of a row and start writing to it; it must know the correct * lockid, which matches the lock list in memory. - * - *

        It would be more memory-efficient to assume a correctly-written client, + * + *

        It would be more memory-efficient to assume a correctly-written client, * which maybe we'll do in the future. - * + * * @param row Name of row to lock. * @throws IOException * @return The id of the held lock. @@ -1695,7 +1695,7 @@ public class HRegion implements HConstants, HeapSize { // , Writable{ // generate a new lockid. Attempt to insert the new [lockid, row]. // if this lockid already exists in the map then revert and retry // We could have first done a lockIds.get, and if it does not exist only - // then do a lockIds.put, but the hope is that the lockIds.put will + // then do a lockIds.put, but the hope is that the lockIds.put will // mostly return null the first time itself because there won't be // too many lockId collisions. byte [] prev = null; @@ -1717,7 +1717,7 @@ public class HRegion implements HConstants, HeapSize { // , Writable{ splitsAndClosesLock.readLock().unlock(); } } - + /** * Used by unit tests. * @param lockid @@ -1728,8 +1728,8 @@ public class HRegion implements HConstants, HeapSize { // , Writable{ return lockIds.get(lockid); } } - - /** + + /** * Release the row lock! * @param lockid The lock ID to release. */ @@ -1740,7 +1740,7 @@ public class HRegion implements HConstants, HeapSize { // , Writable{ lockedRows.notifyAll(); } } - + /** * See if row is currently locked. * @param lockid @@ -1754,14 +1754,14 @@ public class HRegion implements HConstants, HeapSize { // , Writable{ return false; } } - + /** * Returns existing row lock if found, otherwise * obtains a new row lock and returns it. * @param lockid * @return lockid */ - private Integer getLock(Integer lockid, byte [] row) + private Integer getLock(Integer lockid, byte [] row) throws IOException { Integer lid = null; if (lockid == null) { @@ -1774,7 +1774,7 @@ public class HRegion implements HConstants, HeapSize { // , Writable{ } return lid; } - + private void waitOnRowLocks() { synchronized (lockedRows) { while (!this.lockedRows.isEmpty()) { @@ -1789,7 +1789,7 @@ public class HRegion implements HConstants, HeapSize { // , Writable{ } } } - + @Override public boolean equals(Object o) { if (!(o instanceof HRegion)) { @@ -1797,12 +1797,12 @@ public class HRegion implements HConstants, HeapSize { // , Writable{ } return this.hashCode() == ((HRegion)o).hashCode(); } - + @Override public int hashCode() { return Bytes.hashCode(this.regionInfo.getRegionName()); } - + @Override public String toString() { return this.regionInfo.getRegionNameAsString(); @@ -1834,20 +1834,20 @@ public class HRegion implements HConstants, HeapSize { // , Writable{ } else { this.stopRow = scan.getStopRow(); } - + List scanners = new ArrayList(); if (additionalScanners != null) { scanners.addAll(additionalScanners); } - for (Map.Entry> entry : + for (Map.Entry> entry : scan.getFamilyMap().entrySet()) { Store store = stores.get(entry.getKey()); scanners.add(store.getScanner(scan, entry.getValue())); } - this.storeHeap = + this.storeHeap = new KeyValueHeap(scanners.toArray(new KeyValueScanner[0]), comparator); } - + RegionScanner(Scan scan) { this(scan, null); } @@ -1912,7 +1912,7 @@ public class HRegion implements HConstants, HeapSize { // , Writable{ filterCurrentRow = false; // See if we passed stopRow if (this.stopRow != null && - comparator.compareRows(this.stopRow, 0, this.stopRow.length, + comparator.compareRows(this.stopRow, 0, this.stopRow.length, currentRow, 0, currentRow.length) <= 0) { return false; } @@ -1960,7 +1960,7 @@ public class HRegion implements HConstants, HeapSize { // , Writable{ } /** - * + * * @param scanner to be closed */ public void close(KeyValueScanner scanner) { @@ -1968,7 +1968,7 @@ public class HRegion implements HConstants, HeapSize { // , Writable{ scanner.close(); } catch(NullPointerException npe) {} } - + /** * @return the current storeHeap */ @@ -1976,7 +1976,7 @@ public class HRegion implements HConstants, HeapSize { // , Writable{ return this.storeHeap; } } - + // Utility methods /** @@ -1989,7 +1989,7 @@ public class HRegion implements HConstants, HeapSize { // , Writable{ * @param rootDir Root directory for HBase instance * @param conf * @return new HRegion - * + * * @throws IOException */ public static HRegion createHRegion(final HRegionInfo info, final Path rootDir, @@ -2007,7 +2007,7 @@ public class HRegion implements HConstants, HeapSize { // , Writable{ region.initialize(null, null); return region; } - + /** * Convenience method to open a HRegion outside of an HRegionServer context. * @param info Info for region to be opened. @@ -2018,7 +2018,7 @@ public class HRegion implements HConstants, HeapSize { // , Writable{ * up. HRegionStore does this every time it opens a new region. * @param conf * @return new HRegion - * + * * @throws IOException */ public static HRegion openHRegion(final HRegionInfo info, final Path rootDir, @@ -2039,18 +2039,18 @@ public class HRegion implements HConstants, HeapSize { // , Writable{ } return r; } - + /** * Inserts a new region's meta information into the passed * meta region. Used by the HMaster bootstrap code adding * new table to ROOT table. - * + * * @param meta META HRegion to be updated * @param r HRegion to add to meta * * @throws IOException */ - public static void addRegionToMETA(HRegion meta, HRegion r) + public static void addRegionToMETA(HRegion meta, HRegion r) throws IOException { meta.checkResources(); // The row key is the region name @@ -2103,7 +2103,7 @@ public class HRegion implements HConstants, HeapSize { // , Writable{ srvr.put(metaRegionName, put); cleanRegionInMETA(srvr, metaRegionName, info); } - + /** * Clean COL_SERVER and COL_STARTCODE for passed info in * .META. @@ -2123,7 +2123,7 @@ public class HRegion implements HConstants, HeapSize { // , Writable{ /** * Deletes all the files for a HRegion - * + * * @param fs the file system object * @param rootdir qualified path of HBase root directory * @param info HRegionInfo for region to be deleted @@ -2146,7 +2146,7 @@ public class HRegion implements HConstants, HeapSize { // , Writable{ /** * Computes the Path of the HRegion - * + * * @param tabledir qualified path for table * @param name ENCODED region name * @return Path of HRegion directory @@ -2154,10 +2154,10 @@ public class HRegion implements HConstants, HeapSize { // , Writable{ public static Path getRegionDir(final Path tabledir, final int name) { return new Path(tabledir, Integer.toString(name)); } - + /** * Computes the Path of the HRegion - * + * * @param rootdir qualified path of HBase root directory * @param info HRegionInfo for the region * @return qualified path of region directory @@ -2171,7 +2171,7 @@ public class HRegion implements HConstants, HeapSize { // , Writable{ /** * Determines if the specified row is within the row range specified by the * specified HRegionInfo - * + * * @param info HRegionInfo that specifies the row range * @param row row to be checked * @return true if the row is within the range specified by the HRegionInfo @@ -2185,7 +2185,7 @@ public class HRegion implements HConstants, HeapSize { // , Writable{ /** * Make the directories for a specific column family - * + * * @param fs the file system * @param tabledir base directory where region will live (usually the table dir) * @param hri @@ -2203,7 +2203,7 @@ public class HRegion implements HConstants, HeapSize { // , Writable{ /** * Merge two HRegions. The regions must be adjacent and must not overlap. - * + * * @param srcA * @param srcB * @return new merged HRegion @@ -2235,7 +2235,7 @@ public class HRegion implements HConstants, HeapSize { // , Writable{ /** * Merge two regions whether they are adjacent or not. - * + * * @param a region a * @param b region b * @return new merged region @@ -2250,12 +2250,12 @@ public class HRegion implements HConstants, HeapSize { // , Writable{ FileSystem fs = a.getFilesystem(); // Make sure each region's cache is empty - + a.flushcache(); b.flushcache(); - + // Compact each region so we only have one store file per family - + a.compactStores(true); if (LOG.isDebugEnabled()) { LOG.debug("Files for region: " + a); @@ -2266,12 +2266,12 @@ public class HRegion implements HConstants, HeapSize { // , Writable{ LOG.debug("Files for region: " + b); listPaths(fs, b.getRegionDir()); } - + Configuration conf = a.getConf(); HTableDescriptor tabledesc = a.getTableDesc(); HLog log = a.getLog(); Path basedir = a.getBaseDir(); - // Presume both are of same region type -- i.e. both user or catalog + // Presume both are of same region type -- i.e. both user or catalog // table regions. This way can use comparator. final byte [] startKey = a.comparator.matchingRows(a.getStartKey(), 0, a.getStartKey().length, @@ -2279,7 +2279,7 @@ public class HRegion implements HConstants, HeapSize { // , Writable{ b.comparator.matchingRows(b.getStartKey(), 0, b.getStartKey().length, EMPTY_BYTE_ARRAY, 0, EMPTY_BYTE_ARRAY.length)? EMPTY_BYTE_ARRAY: - a.comparator.compareRows(a.getStartKey(), 0, a.getStartKey().length, + a.comparator.compareRows(a.getStartKey(), 0, a.getStartKey().length, b.getStartKey(), 0, b.getStartKey().length) <= 0? a.getStartKey(): b.getStartKey(); final byte [] endKey = a.comparator.matchingRows(a.getEndKey(), 0, @@ -2293,7 +2293,7 @@ public class HRegion implements HConstants, HeapSize { // , Writable{ HRegionInfo newRegionInfo = new HRegionInfo(tabledesc, startKey, endKey); LOG.info("Creating new region " + newRegionInfo.toString()); - int encodedName = newRegionInfo.getEncodedName(); + int encodedName = newRegionInfo.getEncodedName(); Path newRegionDir = HRegion.getRegionDir(a.getBaseDir(), encodedName); if(fs.exists(newRegionDir)) { throw new IOException("Cannot merge; target file collision at " + @@ -2353,7 +2353,7 @@ public class HRegion implements HConstants, HeapSize { // , Writable{ } /* - * Fills a map with a vector of store files keyed by column family. + * Fills a map with a vector of store files keyed by column family. * @param byFamily Map to fill. * @param storeFiles Store files to process. * @param family @@ -2375,7 +2375,7 @@ public class HRegion implements HConstants, HeapSize { // , Writable{ /** * @return True if needs a mojor compaction. - * @throws IOException + * @throws IOException */ boolean isMajorCompaction() throws IOException { for (Store store: this.stores.values()) { @@ -2388,7 +2388,7 @@ public class HRegion implements HConstants, HeapSize { // , Writable{ /* * List the files under the specified directory - * + * * @param fs * @param dir * @throws IOException @@ -2411,7 +2411,7 @@ public class HRegion implements HConstants, HeapSize { // , Writable{ } } - + // // HBASE-880 // @@ -2433,7 +2433,7 @@ public class HRegion implements HConstants, HeapSize { // , Writable{ } } // Lock row - Integer lid = getLock(lockid, get.getRow()); + Integer lid = getLock(lockid, get.getRow()); List result = new ArrayList(); try { for (Map.Entry> entry: @@ -2453,7 +2453,7 @@ public class HRegion implements HConstants, HeapSize { // , Writable{ } /** - * + * * @param row * @param family * @param qualifier @@ -2519,13 +2519,13 @@ public class HRegion implements HConstants, HeapSize { // , Writable{ return result; } - - + + // // New HBASE-880 Helpers // - - private void checkFamily(final byte [] family) + + private void checkFamily(final byte [] family) throws NoSuchColumnFamilyException { if(!regionInfo.getTableDesc().hasFamily(family)) { throw new NoSuchColumnFamilyException("Column family " + @@ -2537,9 +2537,9 @@ public class HRegion implements HConstants, HeapSize { // , Writable{ public static final long FIXED_OVERHEAD = ClassSize.align( (5 * Bytes.SIZEOF_LONG) + Bytes.SIZEOF_BOOLEAN + (20 * ClassSize.REFERENCE) + ClassSize.OBJECT + Bytes.SIZEOF_INT); - + public static final long DEEP_OVERHEAD = ClassSize.align(FIXED_OVERHEAD + - ClassSize.OBJECT + (2 * ClassSize.ATOMIC_BOOLEAN) + + ClassSize.OBJECT + (2 * ClassSize.ATOMIC_BOOLEAN) + ClassSize.ATOMIC_LONG + ClassSize.ATOMIC_INTEGER + // Using TreeMap for TreeSet @@ -2552,7 +2552,7 @@ public class HRegion implements HConstants, HeapSize { // , Writable{ ClassSize.align(ClassSize.OBJECT + (5 * Bytes.SIZEOF_BOOLEAN)) + (3 * ClassSize.REENTRANT_LOCK)); - + public long heapSize() { long heapSize = DEEP_OVERHEAD; for(Store store : this.stores.values()) { @@ -2661,7 +2661,7 @@ public class HRegion implements HConstants, HeapSize { // , Writable{ * ./bin/hbase org.apache.hadoop.hbase.regionserver.HRegion * * @param args - * @throws IOException + * @throws IOException */ public static void main(String[] args) throws IOException { if (args.length < 1) { diff --git a/core/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java b/core/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java index a830a5b..135f11f 100644 --- a/core/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java +++ b/core/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java @@ -126,7 +126,7 @@ public class HRegionServer implements HConstants, HRegionInterface, // plain boolean so we can pass a reference to Chore threads. Otherwise, // Chore threads need to know about the hosting class. protected final AtomicBoolean stopRequested = new AtomicBoolean(false); - + protected final AtomicBoolean quiesced = new AtomicBoolean(false); // Go down hard. Used if file system becomes unavailable and also in @@ -135,7 +135,7 @@ public class HRegionServer implements HConstants, HRegionInterface, // If false, the file system has become unavailable protected volatile boolean fsOk; - + protected HServerInfo serverInfo; protected final Configuration conf; @@ -162,7 +162,7 @@ public class HRegionServer implements HConstants, HRegionInterface, protected final int numRegionsToReport; private final long maxScannerResultSize; - + // Remote HMaster private HMasterRegionInterface hbaseMaster; @@ -172,7 +172,7 @@ public class HRegionServer implements HConstants, HRegionInterface, // Leases private Leases leases; - + // Request counter private volatile AtomicInteger requestCount = new AtomicInteger(); @@ -180,25 +180,25 @@ public class HRegionServer implements HConstants, HRegionInterface, // is name of the webapp and the attribute name used stuffing this instance // into web context. InfoServer infoServer; - + /** region server process name */ public static final String REGIONSERVER = "regionserver"; - + /* * Space is reserved in HRS constructor and then released when aborting * to recover from an OOME. See HBASE-706. TODO: Make this percentage of the * heap or a minimum. */ private final LinkedList reservedSpace = new LinkedList(); - + private RegionServerMetrics metrics; // Compactions CompactSplitThread compactSplitThread; - // Cache flushing + // Cache flushing MemStoreFlusher cacheFlusher; - + /* Check for major compactions. */ Chore majorCompactionChecker; @@ -244,7 +244,7 @@ public class HRegionServer implements HConstants, HRegionInterface, machineName = DNS.getDefaultHost( conf.get("hbase.regionserver.dns.interface","default"), conf.get("hbase.regionserver.dns.nameserver","default")); - String addressStr = machineName + ":" + + String addressStr = machineName + ":" + conf.get(REGIONSERVER_PORT, Integer.toString(DEFAULT_REGIONSERVER_PORT)); // This is not necessarily the address we will run with. The address we // use will be in #serverInfo data member. For example, we may have been @@ -259,7 +259,7 @@ public class HRegionServer implements HConstants, HRegionInterface, this.connection = ServerConnectionManager.getConnection(conf); this.isOnline = false; - + // Config'ed params this.numRetries = conf.getInt("hbase.client.retries.number", 2); this.threadWakeFrequency = conf.getInt(THREAD_WAKE_FREQUENCY, 10 * 1000); @@ -268,14 +268,14 @@ public class HRegionServer implements HConstants, HRegionInterface, sleeper = new Sleeper(this.msgInterval, this.stopRequested); this.maxScannerResultSize = conf.getLong( - HConstants.HBASE_CLIENT_SCANNER_MAX_RESULT_SIZE_KEY, + HConstants.HBASE_CLIENT_SCANNER_MAX_RESULT_SIZE_KEY, HConstants.DEFAULT_HBASE_CLIENT_SCANNER_MAX_RESULT_SIZE); - + // Task thread to process requests from Master this.worker = new Worker(); - this.numRegionsToReport = - conf.getInt("hbase.regionserver.numregionstoreport", 10); + this.numRegionsToReport = + conf.getInt("hbase.regionserver.numregionstoreport", 10); this.rpcTimeout = conf.getLong(HBASE_REGIONSERVER_LEASE_PERIOD_KEY, DEFAULT_HBASE_REGIONSERVER_LEASE_PERIOD); @@ -294,7 +294,7 @@ public class HRegionServer implements HConstants, HRegionInterface, this.shutdownHDFS.set(true); // Server to handle client requests - this.server = HBaseRPC.getServer(this, address.getBindAddress(), + this.server = HBaseRPC.getServer(this, address.getBindAddress(), address.getPort(), conf.getInt("hbase.regionserver.handler.count", 10), false, conf); this.server.setErrorHandler(this); @@ -326,13 +326,13 @@ public class HRegionServer implements HConstants, HRegionInterface, // Cache flushing thread. this.cacheFlusher = new MemStoreFlusher(conf, this); - + // Compaction thread this.compactSplitThread = new CompactSplitThread(this); - + // Log rolling thread this.hlogRoller = new LogRoller(this); - + // Background thread to check for major compactions; needed if region // has not gotten updates in a while. Make it run at a lesser frequency. int multiplier = this.conf.getInt(THREAD_WAKE_FREQUENCY + @@ -417,7 +417,7 @@ public class HRegionServer implements HConstants, HRegionInterface, /** * The HRegionServer sticks in this loop until closed. It repeatedly checks - * in with the HMaster, sending heartbeats & reports, and receiving HRegion + * in with the HMaster, sending heartbeats & reports, and receiving HRegion * load/unload instructions. */ public void run() { @@ -510,7 +510,7 @@ public class HRegionServer implements HConstants, HRegionInterface, } catch (IOException e) { this.abortRequested = true; this.stopRequested.set(true); - e = RemoteExceptionHandler.checkIOException(e); + e = RemoteExceptionHandler.checkIOException(e); LOG.fatal("error restarting server", e); break; } @@ -577,7 +577,7 @@ public class HRegionServer implements HConstants, HRegionInterface, } } now = System.currentTimeMillis(); - HMsg msg = this.outboundMsgs.poll((msgInterval - (now - lastMsg)), + HMsg msg = this.outboundMsgs.poll((msgInterval - (now - lastMsg)), TimeUnit.MILLISECONDS); // If we got something, add it to list of things to send. if (msg != null) outboundMessages.add(msg); @@ -787,16 +787,16 @@ public class HRegionServer implements HConstants, HRegionInterface, stores += r.stores.size(); for (Store store: r.stores.values()) { storefiles += store.getStorefilesCount(); - storefileSizeMB += + storefileSizeMB += (int)(store.getStorefilesSize()/1024/1024); - storefileIndexSizeMB += + storefileIndexSizeMB += (int)(store.getStorefilesIndexSize()/1024/1024); } } return new HServerLoad.RegionLoad(name, stores, storefiles, storefileSizeMB, memstoreSizeMB, storefileIndexSizeMB); } - + /** * @param regionName * @return An instance of RegionLoad. @@ -871,12 +871,12 @@ public class HRegionServer implements HConstants, HRegionInterface, } return stop; } - - + + /** * Checks to see if the file system is still accessible. * If not, sets abortRequested and stopRequested - * + * * @return false if file system is not available */ protected boolean checkFileSystem() { @@ -901,7 +901,7 @@ public class HRegionServer implements HConstants, HRegionInterface, private final HRegionServer instance; private final Thread mainThread; private final AtomicBoolean shutdownHDFS; - + /** * @param instance * @param mainThread @@ -917,7 +917,7 @@ public class HRegionServer implements HConstants, HRegionInterface, @Override public void run() { LOG.info("Starting shutdown thread"); - + // tell the region server to stop this.instance.stop(); @@ -966,7 +966,7 @@ public class HRegionServer implements HConstants, HRegionInterface, } /** - * Report the status of the server. A server is online once all the startup + * Report the status of the server. A server is online once all the startup * is completed (setting up filesystem, starting service threads, etc.). This * method is designed mostly to be useful in tests. * @return true if online, false if not. @@ -974,7 +974,7 @@ public class HRegionServer implements HConstants, HRegionInterface, public boolean isOnline() { return isOnline; } - + private HLog setupHLog() throws RegionServerRunningException, IOException { Path oldLogDir = new Path(rootDir, HREGION_OLDLOGDIR_NAME); @@ -991,17 +991,17 @@ public class HRegionServer implements HConstants, HRegionInterface, return newlog; } - // instantiate + // instantiate protected HLog instantiateHLog(Path logdir, Path oldLogDir) throws IOException { HLog newlog = new HLog(fs, logdir, oldLogDir, conf, hlogRoller); return newlog; } - + protected LogRoller getLogRoller() { return hlogRoller; - } - + } + /* * @param interval Interval since last time metrics were called. */ @@ -1030,7 +1030,7 @@ public class HRegionServer implements HConstants, HRegionInterface, synchronized (r.stores) { stores += r.stores.size(); for(Map.Entry ee: r.stores.entrySet()) { - Store store = ee.getValue(); + Store store = ee.getValue(); storefiles += store.getStorefilesCount(); storefileIndexSize += store.getStorefilesIndexSize(); } @@ -1091,7 +1091,7 @@ public class HRegionServer implements HConstants, HRegionInterface, Threads.setDaemonThreadRunning(this.workerThread, n + ".worker", handler); Threads.setDaemonThreadRunning(this.majorCompactionChecker, n + ".majorCompactionChecker", handler); - + // Leases is not a Thread. Internally it runs a daemon thread. If it gets // an unhandled exception, it will just exit. this.leases.setName(n + ".leaseChecker"); @@ -1121,7 +1121,7 @@ public class HRegionServer implements HConstants, HRegionInterface, // update HRS server info this.serverInfo.setInfoPort(port); } - } + } } // Start Server. This service is like leases in that it internally runs @@ -1191,7 +1191,7 @@ public class HRegionServer implements HConstants, HRegionInterface, this.stopRequested.set(true); synchronized(this) { // Wakes run() if it is sleeping - notifyAll(); // FindBugs NN_NAKED_NOTIFY + notifyAll(); // FindBugs NN_NAKED_NOTIFY } } @@ -1208,7 +1208,7 @@ public class HRegionServer implements HConstants, HRegionInterface, stop(); } - /** + /** * Wait on all threads to finish. * Presumption is that all closes and stops have already been called. */ @@ -1276,7 +1276,7 @@ public class HRegionServer implements HConstants, HRegionInterface, if (LOG.isDebugEnabled()) LOG.debug("sending initial server load: " + hsl); lastMsg = System.currentTimeMillis(); - boolean startCodeOk = false; + boolean startCodeOk = false; while(!startCodeOk) { serverInfo.setStartCode(System.currentTimeMillis()); startCodeOk = zooKeeperWrapper.writeRSLocation(this.serverInfo); @@ -1308,13 +1308,13 @@ public class HRegionServer implements HConstants, HRegionInterface, private void reportClose(final HRegionInfo region, final byte[] message) { this.outboundMsgs.add(new HMsg(HMsg.Type.MSG_REPORT_CLOSE, region, message)); } - + /** * Add to the outbound message buffer - * - * When a region splits, we need to tell the master that there are two new + * + * When a region splits, we need to tell the master that there are two new * regions that need to be assigned. - * + * * We do not need to inform the master about the old region, because we've * updated the meta or root regions, and the master will pick that up on its * next rescan of the root or meta tables. @@ -1347,7 +1347,7 @@ public class HRegionServer implements HConstants, HRegionInterface, final BlockingQueue toDo = new LinkedBlockingQueue(); private Worker worker; private Thread workerThread; - + /** Thread that performs long running requests from the master */ class Worker implements Runnable { void stop() { @@ -1355,7 +1355,7 @@ public class HRegionServer implements HConstants, HRegionInterface, toDo.notifyAll(); } } - + public void run() { try { while(!stopRequested.get()) { @@ -1417,12 +1417,12 @@ public class HRegionServer implements HConstants, HRegionInterface, e.msg.isType(Type.MSG_REGION_MAJOR_COMPACT), e.msg.getType().name()); break; - + case MSG_REGION_FLUSH: region = getRegion(info.getRegionName()); region.flushcache(); break; - + case TESTING_MSG_BLOCK_RS: while (!stopRequested.get()) { Threads.sleep(1000); @@ -1500,9 +1500,9 @@ public class HRegionServer implements HConstants, HRegionInterface, this.lock.writeLock().unlock(); } } - reportOpen(regionInfo); + reportOpen(regionInfo); } - + protected HRegion instantiateRegion(final HRegionInfo regionInfo) throws IOException { HRegion r = new HRegion(HTableDescriptor.getTableDir(rootDir, regionInfo @@ -1513,9 +1513,9 @@ public class HRegionServer implements HConstants, HRegionInterface, addProcessingMessage(regionInfo); } }); - return r; + return r; } - + /** * Add a MSG_REPORT_PROCESS_OPEN to the outbound queue. * This method is called while region is in the queue of regions to process @@ -1569,7 +1569,7 @@ public class HRegionServer implements HConstants, HRegionInterface, } return regionsToClose; } - + /* * Thread to run close of a region. */ @@ -1580,7 +1580,7 @@ public class HRegionServer implements HConstants, HRegionInterface, super(Thread.currentThread().getName() + ".regionCloser." + r.toString()); this.r = r; } - + @Override public void run() { try { @@ -1652,7 +1652,7 @@ public class HRegionServer implements HConstants, HRegionInterface, } - public Result getClosestRowBefore(final byte [] regionName, + public Result getClosestRowBefore(final byte [] regionName, final byte [] row, final byte [] family) throws IOException { checkOpen(); @@ -1660,8 +1660,8 @@ public class HRegionServer implements HConstants, HRegionInterface, try { // locate the region we're operating on HRegion region = getRegion(regionName); - // ask the region for all the data - + // ask the region for all the data + Result r = region.getClosestRowBefore(row, family); return r; } catch (Throwable t) { @@ -1697,7 +1697,7 @@ public class HRegionServer implements HConstants, HRegionInterface, throws IOException { if (put.getRow() == null) throw new IllegalArgumentException("update has null row"); - + checkOpen(); this.requestCount.incrementAndGet(); HRegion region = getRegion(regionName); @@ -1745,7 +1745,7 @@ public class HRegionServer implements HConstants, HRegionInterface, } /** - * + * * @param regionName * @param row * @param family @@ -1756,12 +1756,12 @@ public class HRegionServer implements HConstants, HRegionInterface, * @return true if the new put was execute, false otherwise */ public boolean checkAndPut(final byte[] regionName, final byte [] row, - final byte [] family, final byte [] qualifier, final byte [] value, + final byte [] family, final byte [] qualifier, final byte [] value, final Put put) throws IOException{ //Getting actual value Get get = new Get(row); get.addColumn(family, qualifier); - + checkOpen(); this.requestCount.incrementAndGet(); HRegion region = getRegion(regionName); @@ -1776,7 +1776,7 @@ public class HRegionServer implements HConstants, HRegionInterface, throw convertThrowableToIOE(cleanup(t)); } } - + // // remote scanner interface // @@ -1801,7 +1801,7 @@ public class HRegionServer implements HConstants, HRegionInterface, throw convertThrowableToIOE(cleanup(t, "Failed openScanner")); } } - + protected long addScanner(InternalScanner s) throws LeaseStillHeldException { long scannerId = -1L; scannerId = rand.nextLong(); @@ -1830,7 +1830,7 @@ public class HRegionServer implements HConstants, HRegionInterface, try { checkOpen(); } catch (IOException e) { - // If checkOpen failed, server not running or filesystem gone, + // If checkOpen failed, server not running or filesystem gone, // cancel this lease; filesystem is gone or we're closing or something. this.leases.cancelLease(scannerName); throw e; @@ -1869,7 +1869,7 @@ public class HRegionServer implements HConstants, HRegionInterface, } throw convertThrowableToIOE(cleanup(t)); } - } + } public void close(final long scannerId) throws IOException { try { @@ -1886,17 +1886,17 @@ public class HRegionServer implements HConstants, HRegionInterface, } } - /** + /** * Instantiated as a scanner lease. * If the lease times out, the scanner is closed */ private class ScannerListener implements LeaseListener { private final String scannerName; - + ScannerListener(final String n) { this.scannerName = n; } - + public void leaseExpired() { LOG.info("Scanner " + this.scannerName + " lease expired"); InternalScanner s = scanners.remove(this.scannerName); @@ -1909,7 +1909,7 @@ public class HRegionServer implements HConstants, HRegionInterface, } } } - + // // Methods that do the actual work for the remote API // @@ -2076,7 +2076,7 @@ public class HRegionServer implements HConstants, HRegionInterface, public InfoServer getInfoServer() { return infoServer; } - + /** * @return true if a stop has been requested. */ @@ -2085,7 +2085,7 @@ public class HRegionServer implements HConstants, HRegionInterface, } /** - * + * * @return the configuration */ public Configuration getConfiguration() { @@ -2107,7 +2107,7 @@ public class HRegionServer implements HConstants, HRegionInterface, public HRegion [] getOnlineRegionsAsArray() { return getOnlineRegions().toArray(new HRegion[0]); } - + /** * @return The HRegionInfos from online regions sorted */ @@ -2120,10 +2120,10 @@ public class HRegionServer implements HConstants, HRegionInterface, } return result; } - + /** - * This method removes HRegion corresponding to hri from the Map of onlineRegions. - * + * This method removes HRegion corresponding to hri from the Map of onlineRegions. + * * @param hri the HRegionInfo corresponding to the HRegion to-be-removed. * @return the removed HRegion, or null if the HRegion was not in onlineRegions. */ @@ -2158,7 +2158,7 @@ public class HRegionServer implements HConstants, HRegionInterface, } return sortedRegions; } - + /** * @param regionName * @return HRegion for the passed regionName or null if named @@ -2177,8 +2177,8 @@ public class HRegionServer implements HConstants, HRegionInterface, public FlushRequester getFlushRequester() { return this.cacheFlusher; } - - /** + + /** * Protected utility method for safely obtaining an HRegion handle. * @param regionName Name of online {@link HRegion} to return * @return {@link HRegion} for regionName @@ -2221,10 +2221,10 @@ public class HRegionServer implements HConstants, HRegionInterface, } return regions.toArray(new HRegionInfo[regions.size()]); } - - /** + + /** * Called to verify that this server is up and running. - * + * * @throws IOException */ protected void checkOpen() throws IOException { @@ -2236,14 +2236,14 @@ public class HRegionServer implements HConstants, HRegionInterface, throw new IOException("File system not available"); } } - + /** * @return Returns list of non-closed regions hosted on this server. If no * regions to check, returns an empty list. */ protected Set getRegionsToCheck() { HashSet regionsToCheck = new HashSet(); - //TODO: is this locking necessary? + //TODO: is this locking necessary? lock.readLock().lock(); try { regionsToCheck.addAll(this.onlineRegions.values()); @@ -2260,9 +2260,9 @@ public class HRegionServer implements HConstants, HRegionInterface, return regionsToCheck; } - public long getProtocolVersion(final String protocol, + public long getProtocolVersion(final String protocol, final long clientVersion) - throws IOException { + throws IOException { if (protocol.equals(HRegionInterface.class.getName())) { return HBaseRPCProtocolVersion.versionID; } @@ -2317,21 +2317,21 @@ public class HRegionServer implements HConstants, HRegionInterface, public HServerInfo getServerInfo() { return this.serverInfo; } /** {@inheritDoc} */ - public long incrementColumnValue(byte [] regionName, byte [] row, + public long incrementColumnValue(byte [] regionName, byte [] row, byte [] family, byte [] qualifier, long amount, boolean writeToWAL) throws IOException { checkOpen(); if (regionName == null) { - throw new IOException("Invalid arguments to incrementColumnValue " + + throw new IOException("Invalid arguments to incrementColumnValue " + "regionName is null"); } requestCount.incrementAndGet(); try { HRegion region = getRegion(regionName); - long retval = region.incrementColumnValue(row, family, qualifier, amount, + long retval = region.incrementColumnValue(row, family, qualifier, amount, writeToWAL); - + return retval; } catch (IOException e) { checkFileSystem(); @@ -2348,7 +2348,7 @@ public class HRegionServer implements HConstants, HRegionInterface, } return regions; } - + /** {@inheritDoc} */ public HServerInfo getHServerInfo() throws IOException { return serverInfo; @@ -2384,7 +2384,7 @@ public class HRegionServer implements HConstants, HRegionInterface, // // Main program and support routines // - + /** * @param hrs * @return Thread the RegionServer is running in correctly named. @@ -2410,7 +2410,7 @@ public class HRegionServer implements HConstants, HRegionInterface, private static void printUsageAndExit() { printUsageAndExit(null); } - + private static void printUsageAndExit(final String message) { if (message != null) { System.err.println(message); @@ -2448,7 +2448,7 @@ public class HRegionServer implements HConstants, HRegionInterface, printUsageAndExit(); } Configuration conf = HBaseConfiguration.create(); - + // Process command-line args. TODO: Better cmd-line processing // (but hopefully something not as painful as cli options). for (String cmd: args) { @@ -2473,13 +2473,13 @@ public class HRegionServer implements HConstants, HRegionInterface, } break; } - + if (cmd.equals("stop")) { printUsageAndExit("To shutdown the regionserver run " + "bin/hbase-daemon.sh stop regionserver or send a kill signal to" + "the regionserver pid"); } - + // Print out usage if we get to here. printUsageAndExit(); } diff --git a/core/src/main/java/org/apache/hadoop/hbase/regionserver/InternalScanner.java b/core/src/main/java/org/apache/hadoop/hbase/regionserver/InternalScanner.java index 4f9d412..0f5f36c 100644 --- a/core/src/main/java/org/apache/hadoop/hbase/regionserver/InternalScanner.java +++ b/core/src/main/java/org/apache/hadoop/hbase/regionserver/InternalScanner.java @@ -27,10 +27,10 @@ import java.util.List; /** * Internal scanners differ from client-side scanners in that they operate on - * HStoreKeys and byte[] instead of RowResults. This is because they are + * HStoreKeys and byte[] instead of RowResults. This is because they are * actually close to how the data is physically stored, and therefore it is more - * convenient to interact with them that way. It is also much easier to merge - * the results across SortedMaps than RowResults. + * convenient to interact with them that way. It is also much easier to merge + * the results across SortedMaps than RowResults. * *

        Additionally, we need to be able to determine if the scanner is doing * wildcard column matches (when only a column family is specified or if a @@ -50,7 +50,7 @@ public interface InternalScanner extends Closeable { /** * Grab the next row's worth of values with a limit on the number of values - * to return. + * to return. * @param result return output array * @param limit limit on row count to get * @return true if more rows exist after this one, false if scanner is done diff --git a/core/src/main/java/org/apache/hadoop/hbase/regionserver/KeyValueHeap.java b/core/src/main/java/org/apache/hadoop/hbase/regionserver/KeyValueHeap.java index e846a83..260a9b7 100644 --- a/core/src/main/java/org/apache/hadoop/hbase/regionserver/KeyValueHeap.java +++ b/core/src/main/java/org/apache/hadoop/hbase/regionserver/KeyValueHeap.java @@ -46,14 +46,14 @@ public class KeyValueHeap implements KeyValueScanner, InternalScanner { private KVScannerComparator comparator; /** - * Constructor. This KeyValueHeap will handle closing of passed in + * Constructor. This KeyValueHeap will handle closing of passed in * KeyValueScanners. * @param scanners * @param comparator */ public KeyValueHeap(KeyValueScanner [] scanners, KVComparator comparator) { this.comparator = new KVScannerComparator(comparator); - this.heap = new PriorityQueue(scanners.length, + this.heap = new PriorityQueue(scanners.length, this.comparator); for (KeyValueScanner scanner : scanners) { if (scanner.peek() != null) { @@ -64,14 +64,14 @@ public class KeyValueHeap implements KeyValueScanner, InternalScanner { } this.current = heap.poll(); } - + public KeyValue peek() { if(this.current == null) { return null; } return this.current.peek(); } - + public KeyValue next() { if(this.current == null) { return null; @@ -101,7 +101,7 @@ public class KeyValueHeap implements KeyValueScanner, InternalScanner { * InternalScanner as well as KeyValueScanner (a {@link StoreScanner}). * @param result * @param limit - * @return true if there are more keys, false if all scanners are done + * @return true if there are more keys, false if all scanners are done */ public boolean next(List result, int limit) throws IOException { InternalScanner currentAsInternal = (InternalScanner)this.current; @@ -124,7 +124,7 @@ public class KeyValueHeap implements KeyValueScanner, InternalScanner { * This can ONLY be called when you are using Scanners that implement * InternalScanner as well as KeyValueScanner (a {@link StoreScanner}). * @param result - * @return true if there are more keys, false if all scanners are done + * @return true if there are more keys, false if all scanners are done */ public boolean next(List result) throws IOException { return next(result, -1); @@ -168,9 +168,9 @@ public class KeyValueHeap implements KeyValueScanner, InternalScanner { scanner.close(); } } - + /** - * Seeks all scanners at or below the specified seek key. If we earlied-out + * Seeks all scanners at or below the specified seek key. If we earlied-out * of a row, we may end up skipping values that were never reached yet. * Rather than iterating down, we want to give the opportunity to re-seek. *

        diff --git a/core/src/main/java/org/apache/hadoop/hbase/regionserver/KeyValueScanner.java b/core/src/main/java/org/apache/hadoop/hbase/regionserver/KeyValueScanner.java index d097250..657018f 100644 --- a/core/src/main/java/org/apache/hadoop/hbase/regionserver/KeyValueScanner.java +++ b/core/src/main/java/org/apache/hadoop/hbase/regionserver/KeyValueScanner.java @@ -30,20 +30,20 @@ public interface KeyValueScanner { * @return the next KeyValue */ public KeyValue peek(); - + /** - * Return the next KeyValue in this scanner, iterating the scanner + * Return the next KeyValue in this scanner, iterating the scanner * @return the next KeyValue */ public KeyValue next(); - + /** * Seek the scanner at or after the specified KeyValue. * @param key seek value * @return true if scanner has values left, false if end of scanner */ public boolean seek(KeyValue key); - + /** * Close the KeyValue scanner. */ diff --git a/core/src/main/java/org/apache/hadoop/hbase/regionserver/KeyValueSkipListSet.java b/core/src/main/java/org/apache/hadoop/hbase/regionserver/KeyValueSkipListSet.java index 419bbfb..fbd309a 100644 --- a/core/src/main/java/org/apache/hadoop/hbase/regionserver/KeyValueSkipListSet.java +++ b/core/src/main/java/org/apache/hadoop/hbase/regionserver/KeyValueSkipListSet.java @@ -61,7 +61,7 @@ class KeyValueSkipListSet implements NavigableSet { */ static class MapEntryIterator implements Iterator { private final Iterator> iterator; - + MapEntryIterator(final Iterator> i) { this.iterator = i; } diff --git a/core/src/main/java/org/apache/hadoop/hbase/regionserver/LogRoller.java b/core/src/main/java/org/apache/hadoop/hbase/regionserver/LogRoller.java index bc71c70..fed59eb 100644 --- a/core/src/main/java/org/apache/hadoop/hbase/regionserver/LogRoller.java +++ b/core/src/main/java/org/apache/hadoop/hbase/regionserver/LogRoller.java @@ -32,13 +32,13 @@ import java.util.concurrent.locks.ReentrantLock; /** * Runs periodically to determine if the HLog should be rolled. - * + * * NOTE: This class extends Thread rather than Chore because the sleep time * can be interrupted when there is something to do, rather than the Chore * sleep time which is invariant. */ class LogRoller extends Thread implements LogRollListener { - static final Log LOG = LogFactory.getLog(LogRoller.class); + static final Log LOG = LogFactory.getLog(LogRoller.class); private final ReentrantLock rollLock = new ReentrantLock(); private final AtomicBoolean rollLog = new AtomicBoolean(false); private final HRegionServer server; diff --git a/core/src/main/java/org/apache/hadoop/hbase/regionserver/LruHashMap.java b/core/src/main/java/org/apache/hadoop/hbase/regionserver/LruHashMap.java index fb0031f..161ae18 100644 --- a/core/src/main/java/org/apache/hadoop/hbase/regionserver/LruHashMap.java +++ b/core/src/main/java/org/apache/hadoop/hbase/regionserver/LruHashMap.java @@ -36,8 +36,8 @@ import java.util.Set; * The LruHashMap is a memory-aware HashMap with a configurable maximum * memory footprint. *

        - * It maintains an ordered list of all entries in the map ordered by - * access time. When space needs to be freed becase the maximum has been + * It maintains an ordered list of all entries in the map ordered by + * access time. When space needs to be freed becase the maximum has been * reached, or the application has asked to free memory, entries will be * evicted according to an LRU (least-recently-used) algorithm. That is, * those entries which have not been accessed the longest will be evicted @@ -52,8 +52,8 @@ public class LruHashMap implements HeapSize, Map { static final Log LOG = LogFactory.getLog(LruHashMap.class); - - /** The default size (in bytes) of the LRU */ + + /** The default size (in bytes) of the LRU */ private static final long DEFAULT_MAX_MEM_USAGE = 50000; /** The default capacity of the hash table */ private static final int DEFAULT_INITIAL_CAPACITY = 16; @@ -61,12 +61,12 @@ implements HeapSize, Map { private static final int MAXIMUM_CAPACITY = 1 << 30; /** The default load factor to use */ private static final float DEFAULT_LOAD_FACTOR = 0.75f; - + /** Memory overhead of this Object (for HeapSize) */ - private static final int OVERHEAD = 5 * Bytes.SIZEOF_LONG + - 2 * Bytes.SIZEOF_INT + 2 * Bytes.SIZEOF_FLOAT + 3 * ClassSize.REFERENCE + + private static final int OVERHEAD = 5 * Bytes.SIZEOF_LONG + + 2 * Bytes.SIZEOF_INT + 2 * Bytes.SIZEOF_FLOAT + 3 * ClassSize.REFERENCE + 1 * ClassSize.ARRAY; - + /** Load factor allowed (usually 75%) */ private final float loadFactor; /** Number of key/vals in the map */ @@ -85,7 +85,7 @@ implements HeapSize, Map { private long memTotal = 0; /** Amount of available memory */ private long memFree = 0; - + /** Number of successful (found) get() calls */ private long hitCount = 0; /** Number of unsuccessful (not found) get() calls */ @@ -120,7 +120,7 @@ implements HeapSize, Map { throw new IllegalArgumentException("Max memory usage too small to " + "support base overhead"); } - + /** Find a power of 2 >= initialCapacity */ int capacity = calculateCapacity(initialCapacity); this.loadFactor = loadFactor; @@ -145,7 +145,7 @@ implements HeapSize, Map { public LruHashMap(int initialCapacity, float loadFactor) { this(initialCapacity, loadFactor, DEFAULT_MAX_MEM_USAGE); } - + /** * Constructs a new, empty map with the specified initial capacity and * with the default load factor and maximum memory usage. @@ -173,14 +173,14 @@ implements HeapSize, Map { } /** - * Constructs a new, empty map with the default initial capacity, + * Constructs a new, empty map with the default initial capacity, * load factor and maximum memory usage. */ public LruHashMap() { this(DEFAULT_INITIAL_CAPACITY, DEFAULT_LOAD_FACTOR, DEFAULT_MAX_MEM_USAGE); } - + //-------------------------------------------------------------------------- /** * Get the currently available memory for this LRU in bytes. @@ -191,7 +191,7 @@ implements HeapSize, Map { public long getMemFree() { return memFree; } - + /** * Get the maximum memory allowed for this LRU in bytes. * @@ -200,7 +200,7 @@ implements HeapSize, Map { public long getMemMax() { return memTotal; } - + /** * Get the currently used memory for this LRU in bytes. * @@ -209,7 +209,7 @@ implements HeapSize, Map { public long getMemUsed() { return (memTotal - memFree); // FindBugs IS2_INCONSISTENT_SYNC } - + /** * Get the number of hits to the map. This is the number of times * a call to get() returns a matched key. @@ -219,7 +219,7 @@ implements HeapSize, Map { public long getHitCount() { return hitCount; } - + /** * Get the number of misses to the map. This is the number of times * a call to get() returns null. @@ -229,7 +229,7 @@ implements HeapSize, Map { public long getMissCount() { return missCount; // FindBugs IS2_INCONSISTENT_SYNC } - + /** * Get the hit ratio. This is the number of hits divided by the * total number of requests. @@ -240,7 +240,7 @@ implements HeapSize, Map { return (double)((double)hitCount/ ((double)(hitCount+missCount))); } - + /** * Free the requested amount of memory from the LRU map. * @@ -261,7 +261,7 @@ implements HeapSize, Map { } return freedMemory; } - + /** * The total memory usage of this map * @@ -270,7 +270,7 @@ implements HeapSize, Map { public long heapSize() { return (memTotal - memFree); } - + //-------------------------------------------------------------------------- /** * Retrieves the value associated with the specified key. @@ -286,7 +286,7 @@ implements HeapSize, Map { checkKey((K)key); int hash = hash(key); int i = hashIndex(hash, entries.length); - Entry e = entries[i]; + Entry e = entries[i]; while (true) { if (e == null) { missCount++; @@ -313,7 +313,7 @@ implements HeapSize, Map { * @param key the key * @param value the value * @return the value that was previously mapped to this key, null if none - * @throws UnsupportedOperationException if either objects do not + * @throws UnsupportedOperationException if either objects do not * implement HeapSize * @throws NullPointerException if the key or value is null */ @@ -322,7 +322,7 @@ implements HeapSize, Map { checkValue(value); int hash = hash(key); int i = hashIndex(hash, entries.length); - + // For old values for (Entry e = entries[i]; e != null; e = e.next) { if (e.hash == hash && isEqual(key, e.key)) { @@ -338,7 +338,7 @@ implements HeapSize, Map { checkAndFreeMemory(memChange); return null; } - + /** * Deletes the mapping for the specified key if it exists. * @@ -381,7 +381,7 @@ implements HeapSize, Map { public synchronized void clear() { memFree += clearAll(); } - + //-------------------------------------------------------------------------- /** * Checks whether there is a value in the map for the specified key. @@ -396,9 +396,9 @@ implements HeapSize, Map { checkKey((K)key); int hash = hash(key); int i = hashIndex(hash, entries.length); - Entry e = entries[i]; + Entry e = entries[i]; while (e != null) { - if (e.hash == hash && isEqual(key, e.key)) + if (e.hash == hash && isEqual(key, e.key)) return true; e = e.next; } @@ -407,7 +407,7 @@ implements HeapSize, Map { /** * Checks whether this is a mapping which contains the specified value. - * + * * Does not affect the LRU. This is an inefficient operation. * * @param value the value to check @@ -443,7 +443,7 @@ implements HeapSize, Map { throw new NullPointerException("null keys are not allowed"); } } - + /** * Enforces value constraints. Null values are not permitted and value must * implement HeapSize. It should not be necessary to verify the second @@ -461,7 +461,7 @@ implements HeapSize, Map { throw new NullPointerException("null values are not allowed"); } } - + /** * Returns the minimum memory usage of the base map structure. * @@ -470,7 +470,7 @@ implements HeapSize, Map { private long getMinimumUsage() { return OVERHEAD + (entries.length * ClassSize.REFERENCE); } - + //-------------------------------------------------------------------------- /** * Evicts and frees based on LRU until at least as much memory as requested @@ -497,7 +497,7 @@ implements HeapSize, Map { removeEntry(headPtr); return freed; } - + /** * Moves the specified entry to the most recently used slot of the * LRU. This is called whenever an entry is fetched. @@ -543,10 +543,10 @@ implements HeapSize, Map { } else { prev.next = next; } - + Entry prevPtr = e.getPrevPtr(); Entry nextPtr = e.getNextPtr(); - + if(prevPtr != null && nextPtr != null) { prevPtr.setNextPtr(nextPtr); nextPtr.setPrevPtr(prevPtr); @@ -557,7 +557,7 @@ implements HeapSize, Map { headPtr = nextPtr; nextPtr.setPrevPtr(null); } - + return; } prev = e; @@ -587,7 +587,7 @@ implements HeapSize, Map { } else { prev.next = next; } - + // Updating LRU Entry prevPtr = e.getPrevPtr(); Entry nextPtr = e.getNextPtr(); @@ -601,7 +601,7 @@ implements HeapSize, Map { headPtr = nextPtr; nextPtr.setPrevPtr(null); } - + return e; } prev = e; @@ -668,7 +668,7 @@ implements HeapSize, Map { size = 0; return freedMemory; } - + //-------------------------------------------------------------------------- /** * Recreates the entire contents of the hashmap into a new array @@ -680,7 +680,7 @@ implements HeapSize, Map { private void growTable(int newCapacity) { Entry [] oldTable = entries; int oldCapacity = oldTable.length; - + // Do not allow growing the table beyond the max capacity if (oldCapacity == MAXIMUM_CAPACITY) { threshold = Integer.MAX_VALUE; @@ -689,12 +689,12 @@ implements HeapSize, Map { // Determine how much additional space will be required to grow the array long requiredSpace = (newCapacity - oldCapacity) * ClassSize.REFERENCE; - + // Verify/enforce we have sufficient memory to grow checkAndFreeMemory(requiredSpace); Entry [] newTable = new Entry[newCapacity]; - + // Transfer existing entries to new hash table for(int i=0; i < oldCapacity; i++) { Entry entry = oldTable[i]; @@ -731,7 +731,7 @@ implements HeapSize, Map { h ^= (h >>> 10); return h; } - + /** * Compares two objects for equality. Method uses equals method and * assumes neither value is null. @@ -743,7 +743,7 @@ implements HeapSize, Map { private boolean isEqual(Object x, Object y) { return (x == y || x.equals(y)); } - + /** * Determines the index into the current hash table for the specified * hashValue. @@ -778,7 +778,7 @@ implements HeapSize, Map { } return newCapacity; } - + /** * Calculates the threshold of the map given the capacity and load * factor. Once the number of entries in the map grows to the @@ -799,7 +799,7 @@ implements HeapSize, Map { memFree -= OVERHEAD; memFree -= (entries.length * ClassSize.REFERENCE); } - + //-------------------------------------------------------------------------- /** * Debugging function that returns a List sorted by access time. @@ -833,7 +833,7 @@ implements HeapSize, Map { } return entrySet; } - + /** * Get the head of the linked list (least recently used). * @@ -842,16 +842,16 @@ implements HeapSize, Map { public Entry getHeadPtr() { return headPtr; } - + /** * Get the tail of the linked list (most recently used). - * + * * @return tail of linked list */ public Entry getTailPtr() { return tailPtr; } - + //-------------------------------------------------------------------------- /** * To best optimize this class, some of the methods that are part of a @@ -860,7 +860,7 @@ implements HeapSize, Map { * significant overhead and code complexity to support and are * unnecessary for the requirements of this class. */ - + /** * Intentionally unimplemented. */ @@ -884,7 +884,7 @@ implements HeapSize, Map { throw new UnsupportedOperationException( "hashCode(Object) is intentionally unimplemented"); } - + /** * Intentionally unimplemented. */ @@ -892,7 +892,7 @@ implements HeapSize, Map { throw new UnsupportedOperationException( "keySet() is intentionally unimplemented"); } - + /** * Intentionally unimplemented. */ @@ -900,7 +900,7 @@ implements HeapSize, Map { throw new UnsupportedOperationException( "putAll() is intentionally unimplemented"); } - + /** * Intentionally unimplemented. */ @@ -922,9 +922,9 @@ implements HeapSize, Map { protected static class Entry implements Map.Entry, HeapSize { /** The baseline overhead memory usage of this class */ - static final int OVERHEAD = 1 * Bytes.SIZEOF_LONG + + static final int OVERHEAD = 1 * Bytes.SIZEOF_LONG + 5 * ClassSize.REFERENCE + 2 * Bytes.SIZEOF_INT; - + /** The key */ protected final K key; /** The value */ @@ -933,12 +933,12 @@ implements HeapSize, Map { protected final int hash; /** The next entry in the hash chain (for collisions) */ protected Entry next; - + /** The previous entry in the LRU list (towards LRU) */ protected Entry prevPtr; /** The next entry in the LRU list (towards MRU) */ protected Entry nextPtr; - + /** The precomputed heap size of this entry */ protected long heapSize; @@ -979,7 +979,7 @@ implements HeapSize, Map { public V getValue() { return value; } - + /** * Set the value of this entry. * @@ -995,7 +995,7 @@ implements HeapSize, Map { value = newValue; return oldValue; } - + /** * Replace the value of this entry. * @@ -1011,7 +1011,7 @@ implements HeapSize, Map { heapSize += sizeDiff; return sizeDiff; } - + /** * Returns true is the specified entry has the same key and the * same value as this entry. @@ -1028,13 +1028,13 @@ implements HeapSize, Map { if (k1 == k2 || (k1 != null && k1.equals(k2))) { Object v1 = getValue(); Object v2 = e.getValue(); - if (v1 == v2 || (v1 != null && v1.equals(v2))) + if (v1 == v2 || (v1 != null && v1.equals(v2))) return true; } return false; } - - /** + + /** * Returns the hash code of the entry by xor'ing the hash values * of the key and value of this entry. * @@ -1043,7 +1043,7 @@ implements HeapSize, Map { public int hashCode() { return (key.hashCode() ^ value.hashCode()); } - + /** * Returns String representation of the entry in form "key=value" * @@ -1061,15 +1061,15 @@ implements HeapSize, Map { protected void setPrevPtr(Entry prevPtr){ this.prevPtr = prevPtr; } - + /** * Returns the previous pointer for the entry in the LRU. * @return previous entry */ protected Entry getPrevPtr(){ return prevPtr; - } - + } + /** * Sets the next pointer for the entry in the LRU. * @param nextPtr next entry @@ -1077,7 +1077,7 @@ implements HeapSize, Map { protected void setNextPtr(Entry nextPtr){ this.nextPtr = nextPtr; } - + /** * Returns the next pointer for the entry in teh LRU. * @return next entry @@ -1085,7 +1085,7 @@ implements HeapSize, Map { protected Entry getNextPtr(){ return nextPtr; } - + /** * Returns the pre-computed and "deep" size of the Entry * @return size of the entry in bytes diff --git a/core/src/main/java/org/apache/hadoop/hbase/regionserver/MemStore.java b/core/src/main/java/org/apache/hadoop/hbase/regionserver/MemStore.java index 5ef7a1b..87cdb47 100644 --- a/core/src/main/java/org/apache/hadoop/hbase/regionserver/MemStore.java +++ b/core/src/main/java/org/apache/hadoop/hbase/regionserver/MemStore.java @@ -204,7 +204,7 @@ public class MemStore implements HeapSize { return s; } - /** + /** * Write a delete * @param delete * @return approximate size of the passed key and value. @@ -221,7 +221,7 @@ public class MemStore implements HeapSize { //TODO Would be nice with if we had an iterator for this, so we could remove //things that needs to be removed while iterating and don't have to go //back and do it afterwards - + try { boolean notpresent = false; List deletes = new ArrayList(); @@ -230,34 +230,34 @@ public class MemStore implements HeapSize { //Parse the delete, so that it is only done once byte [] deleteBuffer = delete.getBuffer(); int deleteOffset = delete.getOffset(); - + int deleteKeyLen = Bytes.toInt(deleteBuffer, deleteOffset); deleteOffset += Bytes.SIZEOF_INT + Bytes.SIZEOF_INT; - + short deleteRowLen = Bytes.toShort(deleteBuffer, deleteOffset); deleteOffset += Bytes.SIZEOF_SHORT; int deleteRowOffset = deleteOffset; - + deleteOffset += deleteRowLen; - + byte deleteFamLen = deleteBuffer[deleteOffset]; deleteOffset += Bytes.SIZEOF_BYTE + deleteFamLen; - + int deleteQualifierOffset = deleteOffset; int deleteQualifierLen = deleteKeyLen - deleteRowLen - deleteFamLen - - Bytes.SIZEOF_SHORT - Bytes.SIZEOF_BYTE - Bytes.SIZEOF_LONG - + Bytes.SIZEOF_SHORT - Bytes.SIZEOF_BYTE - Bytes.SIZEOF_LONG - Bytes.SIZEOF_BYTE; - + deleteOffset += deleteQualifierLen; - + int deleteTimestampOffset = deleteOffset; deleteOffset += Bytes.SIZEOF_LONG; byte deleteType = deleteBuffer[deleteOffset]; - + //Comparing with tail from memstore for (KeyValue kv : tail) { - DeleteCode res = DeleteCompare.deleteCompare(kv, deleteBuffer, - deleteRowOffset, deleteRowLen, deleteQualifierOffset, + DeleteCode res = DeleteCompare.deleteCompare(kv, deleteBuffer, + deleteRowOffset, deleteRowLen, deleteQualifierOffset, deleteQualifierLen, deleteTimestampOffset, deleteType, comparator.getRawComparator()); if (res == DeleteCode.DONE) { @@ -272,7 +272,7 @@ public class MemStore implements HeapSize { notpresent = this.kvset.remove(kv); s -= heapSizeChange(kv, notpresent); } - + // Adding the delete to memstore. Add any value, as long as // same instance each time. s += heapSizeChange(delete, this.kvset.add(delete)); @@ -282,7 +282,7 @@ public class MemStore implements HeapSize { this.size.addAndGet(s); return s; } - + /** * @param kv Find the row that comes after this one. If null, we return the * first. @@ -533,7 +533,7 @@ public class MemStore implements HeapSize { void readLockUnlock() { this.lock.readLock().unlock(); } - + /** * * @param set memstore or snapshot @@ -566,7 +566,7 @@ public class MemStore implements HeapSize { } return false; } - + /* * MemStoreScanner implements the KeyValueScanner. @@ -730,7 +730,7 @@ public class MemStore implements HeapSize { public final static long FIXED_OVERHEAD = ClassSize.align( ClassSize.OBJECT + (8 * ClassSize.REFERENCE)); - + public final static long DEEP_OVERHEAD = ClassSize.align(FIXED_OVERHEAD + ClassSize.REENTRANT_LOCK + ClassSize.ATOMIC_LONG + ClassSize.COPYONWRITE_ARRAYSET + ClassSize.COPYONWRITE_ARRAYLIST + @@ -744,11 +744,11 @@ public class MemStore implements HeapSize { * @return Size */ long heapSizeChange(final KeyValue kv, final boolean notpresent) { - return notpresent ? + return notpresent ? ClassSize.align(ClassSize.CONCURRENT_SKIPLISTMAP_ENTRY + kv.heapSize()): 0; } - + /** * Get the entire heap usage for this MemStore not including keys in the * snapshot. @@ -757,7 +757,7 @@ public class MemStore implements HeapSize { public long heapSize() { return size.get(); } - + /** * Get the heap usage of KVs in this MemStore. */ diff --git a/core/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreFlusher.java b/core/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreFlusher.java index 7f63a61..263384e 100644 --- a/core/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreFlusher.java +++ b/core/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreFlusher.java @@ -45,14 +45,14 @@ import java.util.concurrent.locks.ReentrantLock; * NOTE: This class extends Thread rather than Chore because the sleep time * can be interrupted when there is something to do, rather than the Chore * sleep time which is invariant. - * + * * @see FlushRequester */ class MemStoreFlusher extends Thread implements FlushRequester { static final Log LOG = LogFactory.getLog(MemStoreFlusher.class); private final BlockingQueue flushQueue = new LinkedBlockingQueue(); - + private final HashSet regionsInQueue = new HashSet(); private final long threadWakeFrequency; @@ -61,7 +61,7 @@ class MemStoreFlusher extends Thread implements FlushRequester { protected final long globalMemStoreLimit; protected final long globalMemStoreLimitLowMark; - + private static final float DEFAULT_UPPER = 0.4f; private static final float DEFAULT_LOWER = 0.25f; private static final String UPPER_KEY = @@ -91,7 +91,7 @@ class MemStoreFlusher extends Thread implements FlushRequester { "because supplied " + LOWER_KEY + " was > " + UPPER_KEY); } this.globalMemStoreLimitLowMark = lower; - this.blockingStoreFilesNumber = + this.blockingStoreFilesNumber = conf.getInt("hbase.hstore.blockingStoreFiles", -1); if (this.blockingStoreFilesNumber == -1) { this.blockingStoreFilesNumber = 1 + @@ -120,7 +120,7 @@ class MemStoreFlusher extends Thread implements FlushRequester { float limit = c.getFloat(key, defaultLimit); return getMemStoreLimit(max, limit, defaultLimit); } - + static long getMemStoreLimit(final long max, final float limit, final float defaultLimit) { if (limit >= 0.9f || limit < 0.1f) { @@ -129,7 +129,7 @@ class MemStoreFlusher extends Thread implements FlushRequester { } return (long)(max * limit); } - + @Override public void run() { while (!this.server.isStopRequested()) { @@ -159,7 +159,7 @@ class MemStoreFlusher extends Thread implements FlushRequester { this.flushQueue.clear(); LOG.info(getName() + " exiting"); } - + public void request(HRegion r) { synchronized (regionsInQueue) { if (!regionsInQueue.contains(r)) { @@ -168,10 +168,10 @@ class MemStoreFlusher extends Thread implements FlushRequester { } } } - + /** * Only interrupt once it's done with a run through the work loop. - */ + */ void interruptIfNecessary() { lock.lock(); try { @@ -180,10 +180,10 @@ class MemStoreFlusher extends Thread implements FlushRequester { lock.unlock(); } } - + /* * Flush a region. - * + * * @param region the region to be flushed * @param removeFromQueue True if the region needs to be removed from the * flush queue. False if called from the main flusher run loop and true if @@ -196,21 +196,21 @@ class MemStoreFlusher extends Thread implements FlushRequester { * That compactions do not run when called out of flushSomeRegions means that * compactions can be reported by the historian without danger of deadlock * (HBASE-670). - * + * *

        In the main run loop, regions have already been removed from the flush * queue, and if this method is called for the relief of memory pressure, - * this may not be necessarily true. We want to avoid trying to remove + * this may not be necessarily true. We want to avoid trying to remove * region from the queue because if it has already been removed, it requires a * sequential scan of the queue to determine that it is not in the queue. - * + * *

        If called from flushSomeRegions, the region may be in the queue but - * it may have been determined that the region had a significant amount of + * it may have been determined that the region had a significant amount of * memory in use and needed to be flushed to relieve memory pressure. In this * case, its flush may preempt the pending request in the queue, and if so, * it needs to be removed from the queue to avoid flushing the region * multiple times. - * - * @return true if the region was successfully flushed, false otherwise. If + * + * @return true if the region was successfully flushed, false otherwise. If * false, there will be accompanying log messages explaining why the log was * not flushed. */ @@ -334,7 +334,7 @@ class MemStoreFlusher extends Thread implements FlushRequester { } /** - * Check if the regionserver's memstore memory usage is greater than the + * Check if the regionserver's memstore memory usage is greater than the * limit. If so, flush regions with the biggest memstores until we're down * to the lower limit. This method blocks callers until we're down to a safe * amount of memstore consumption. diff --git a/core/src/main/java/org/apache/hadoop/hbase/regionserver/QueryMatcher.java b/core/src/main/java/org/apache/hadoop/hbase/regionserver/QueryMatcher.java index 259fd5a..81f4d21 100644 --- a/core/src/main/java/org/apache/hadoop/hbase/regionserver/QueryMatcher.java +++ b/core/src/main/java/org/apache/hadoop/hbase/regionserver/QueryMatcher.java @@ -33,7 +33,7 @@ import java.util.NavigableSet; * This is the primary class used to process KeyValues during a Get or Scan * operation. *

        - * It encapsulates the handling of the column and version input parameters to + * It encapsulates the handling of the column and version input parameters to * the query through a {@link ColumnTracker}. *

        * Deletes are handled using the {@link DeleteTracker}. @@ -41,10 +41,10 @@ import java.util.NavigableSet; * All other query parameters are accessed from the client-specified Get. *

        * The primary method used is {@link #match} with the current KeyValue. It will - * return a {@link QueryMatcher.MatchCode} - * + * return a {@link QueryMatcher.MatchCode} + * * , deletes, - * versions, + * versions, */ public class QueryMatcher { /** @@ -59,17 +59,17 @@ public class QueryMatcher { * Include KeyValue in the returned result */ INCLUDE, - + /** * Do not include KeyValue in the returned result */ SKIP, - + /** * Do not include, jump to next StoreFile or memstore (in time order) */ NEXT, - + /** * Do not include, return current result */ @@ -93,25 +93,25 @@ public class QueryMatcher { */ DONE_SCAN, } - + /** Keeps track of deletes */ protected DeleteTracker deletes; - + /** Keeps track of columns and versions */ protected ColumnTracker columns; - + /** Key to seek to in memstore and StoreFiles */ protected KeyValue startKey; - + /** Row comparator for the region this query is for */ KeyComparator rowComparator; - + /** Row the query is on */ protected byte [] row; - + /** TimeRange the query is for */ protected TimeRange tr; - + /** Oldest allowed version stamp for TTL enforcement */ protected long oldestStamp; @@ -125,7 +125,7 @@ public class QueryMatcher { * @param ttl * @param rowComparator */ - public QueryMatcher(Get get, byte [] family, + public QueryMatcher(Get get, byte [] family, NavigableSet columns, long ttl, KeyComparator rowComparator, int maxVersions) { this.row = get.getRow(); @@ -164,7 +164,7 @@ public class QueryMatcher { this.startKey = matcher.getStartKey(); reset(); } - + /** * Main method for ColumnMatcher. *

        @@ -195,10 +195,10 @@ public class QueryMatcher { // Directly act on KV buffer byte [] bytes = kv.getBuffer(); int offset = kv.getOffset(); - + int keyLength = Bytes.toInt(bytes, offset); offset += KeyValue.ROW_OFFSET; - + short rowLength = Bytes.toShort(bytes, offset); offset += Bytes.SIZEOF_SHORT; @@ -207,7 +207,7 @@ public class QueryMatcher { /* Check ROW * If past query's row, go to next StoreFile * If not reached query's row, go to next KeyValue - */ + */ int ret = this.rowComparator.compareRows(row, 0, row.length, bytes, offset, rowLength); if (ret <= -1) { @@ -220,7 +220,7 @@ public class QueryMatcher { offset += rowLength; byte familyLength = bytes[offset]; offset += Bytes.SIZEOF_BYTE + familyLength; - + int columnLength = keyLength + KeyValue.ROW_OFFSET - (offset - kv.getOffset()) - KeyValue.TIMESTAMP_TYPE_SIZE; int columnOffset = offset; @@ -244,14 +244,14 @@ public class QueryMatcher { */ byte type = bytes[offset]; // if delete type == delete family, return done_row - + if (isDelete(type)) { if (tr.withinOrAfterTimeRange(timestamp)) { this.deletes.add(bytes, columnOffset, columnLength, timestamp, type); } return MatchCode.SKIP; // skip the delete cell. } - + /* Check TimeRange * If outside of range, move to next KeyValue */ @@ -274,8 +274,8 @@ public class QueryMatcher { * Returns a MatchCode directly, identical language * If matched column without enough versions, include * If enough versions of this column or does not match, skip - * If have moved past - * If enough versions of everything, + * If have moved past + * If enough versions of everything, * TODO: No mapping from Filter.ReturnCode to MatchCode. */ MatchCode mc = columns.checkColumn(bytes, columnOffset, columnLength); @@ -293,7 +293,7 @@ public class QueryMatcher { protected boolean isDelete(byte type) { return (type != KeyValue.Type.Put.getCode()); } - + protected boolean isExpired(long timestamp) { return (timestamp < oldestStamp); } @@ -309,18 +309,18 @@ public class QueryMatcher { public ColumnCount getSeekColumn() { return this.columns.getColumnHint(); } - + /** * Called after reading each section (memstore, snapshot, storefiles). *

        * This method will update the internal structures to be accurate for - * the next section. + * the next section. */ public void update() { this.deletes.update(); this.columns.update(); } - + /** * Resets the current columns and deletes */ @@ -336,52 +336,52 @@ public class QueryMatcher { public void setRow(byte [] row) { this.row = row; } - + /** - * + * * @return the start key */ public KeyValue getStartKey() { return this.startKey; } - + /** * @return the TimeRange */ public TimeRange getTimeRange() { return this.tr; } - + /** * @return the oldest stamp */ public long getOldestStamp() { return this.oldestStamp; } - + /** * @return current KeyComparator */ public KeyComparator getRowComparator() { return this.rowComparator; } - + /** * @return ColumnTracker */ public ColumnTracker getColumnTracker() { return this.columns; } - + /** * @return DeleteTracker */ public DeleteTracker getDeleteTracker() { return this.deletes; } - + /** - * + * * @return true when done. */ public boolean isDone() { diff --git a/core/src/main/java/org/apache/hadoop/hbase/regionserver/RegionServerRunningException.java b/core/src/main/java/org/apache/hadoop/hbase/regionserver/RegionServerRunningException.java index 17874d9..ed36ed7 100644 --- a/core/src/main/java/org/apache/hadoop/hbase/regionserver/RegionServerRunningException.java +++ b/core/src/main/java/org/apache/hadoop/hbase/regionserver/RegionServerRunningException.java @@ -27,7 +27,7 @@ import java.io.IOException; */ public class RegionServerRunningException extends IOException { private static final long serialVersionUID = 1L << 31 - 1L; - + /** Default Constructor */ public RegionServerRunningException() { super(); diff --git a/core/src/main/java/org/apache/hadoop/hbase/regionserver/ScanDeleteTracker.java b/core/src/main/java/org/apache/hadoop/hbase/regionserver/ScanDeleteTracker.java index e29463b..d9e285b 100644 --- a/core/src/main/java/org/apache/hadoop/hbase/regionserver/ScanDeleteTracker.java +++ b/core/src/main/java/org/apache/hadoop/hbase/regionserver/ScanDeleteTracker.java @@ -36,7 +36,7 @@ import org.apache.hadoop.hbase.util.Bytes; *

      • {@link #isDeleted} when checking if a Put KeyValue has been deleted *
      • {@link #update} when reaching the end of a StoreFile or row for scans *

        - * This class is NOT thread-safe as queries are never multi-threaded + * This class is NOT thread-safe as queries are never multi-threaded */ public class ScanDeleteTracker implements DeleteTracker { @@ -53,7 +53,7 @@ public class ScanDeleteTracker implements DeleteTracker { public ScanDeleteTracker() { super(); } - + /** * Add the specified KeyValue to the list of deletes to check against for * this row operation. @@ -91,7 +91,7 @@ public class ScanDeleteTracker implements DeleteTracker { // missing else is never called. } - /** + /** * Check if the specified KeyValue buffer has been deleted by a previously * seen delete. * @@ -107,7 +107,7 @@ public class ScanDeleteTracker implements DeleteTracker { if (timestamp < familyStamp) { return true; } - + if (deleteBuffer != null) { int ret = Bytes.compareTo(deleteBuffer, deleteOffset, deleteLength, buffer, qualifierOffset, qualifierLength); @@ -150,7 +150,7 @@ public class ScanDeleteTracker implements DeleteTracker { } @Override - // should not be called at all even (!) + // should not be called at all even (!) public void update() { this.reset(); } diff --git a/core/src/main/java/org/apache/hadoop/hbase/regionserver/ScanQueryMatcher.java b/core/src/main/java/org/apache/hadoop/hbase/regionserver/ScanQueryMatcher.java index 6a8cf30..e0cf35f 100644 --- a/core/src/main/java/org/apache/hadoop/hbase/regionserver/ScanQueryMatcher.java +++ b/core/src/main/java/org/apache/hadoop/hbase/regionserver/ScanQueryMatcher.java @@ -44,7 +44,7 @@ public class ScanQueryMatcher extends QueryMatcher { * @param rowComparator */ public ScanQueryMatcher(Scan scan, byte [] family, - NavigableSet columns, long ttl, + NavigableSet columns, long ttl, KeyValue.KeyComparator rowComparator, int maxVersions) { this.tr = scan.getTimeRange(); this.oldestStamp = System.currentTimeMillis() - ttl; @@ -52,7 +52,7 @@ public class ScanQueryMatcher extends QueryMatcher { this.deletes = new ScanDeleteTracker(); this.startKey = KeyValue.createFirstOnRow(scan.getStartRow()); this.filter = scan.getFilter(); - + // Single branch to deal with two types of reads (columns vs all in family) if (columns == null || columns.size() == 0) { // use a specialized scan for wildcard column tracker. @@ -71,7 +71,7 @@ public class ScanQueryMatcher extends QueryMatcher { * - include the current KeyValue (MatchCode.INCLUDE) * - ignore the current KeyValue (MatchCode.SKIP) * - got to the next row (MatchCode.DONE) - * + * * @param kv KeyValue to check * @return The match code instance. */ @@ -82,14 +82,14 @@ public class ScanQueryMatcher extends QueryMatcher { byte [] bytes = kv.getBuffer(); int offset = kv.getOffset(); - int initialOffset = offset; + int initialOffset = offset; int keyLength = Bytes.toInt(bytes, offset, Bytes.SIZEOF_INT); offset += KeyValue.ROW_OFFSET; - + short rowLength = Bytes.toShort(bytes, offset, Bytes.SIZEOF_SHORT); offset += Bytes.SIZEOF_SHORT; - + int ret = this.rowComparator.compareRows(row, 0, row.length, bytes, offset, rowLength); if (ret <= -1) { @@ -109,17 +109,17 @@ public class ScanQueryMatcher extends QueryMatcher { stickyNextRow = true; return MatchCode.SEEK_NEXT_ROW; } - + //Passing rowLength offset += rowLength; //Skipping family byte familyLength = bytes [offset]; offset += familyLength + 1; - + int qualLength = keyLength + KeyValue.ROW_OFFSET - (offset - initialOffset) - KeyValue.TIMESTAMP_TYPE_SIZE; - + long timestamp = kv.getTimestamp(); if (isExpired(timestamp)) { // done, the rest of this column will also be expired as well. @@ -132,7 +132,7 @@ public class ScanQueryMatcher extends QueryMatcher { this.deletes.add(bytes, offset, qualLength, timestamp, type); // Can't early out now, because DelFam come before any other keys } - // May be able to optimize the SKIP here, if we matched + // May be able to optimize the SKIP here, if we matched // due to a DelFam, we can skip to next row // due to a DelCol, we can skip to next col // But it requires more info out of isDelete(). diff --git a/core/src/main/java/org/apache/hadoop/hbase/regionserver/ScanWildcardColumnTracker.java b/core/src/main/java/org/apache/hadoop/hbase/regionserver/ScanWildcardColumnTracker.java index dab1879..a32d6e3 100644 --- a/core/src/main/java/org/apache/hadoop/hbase/regionserver/ScanWildcardColumnTracker.java +++ b/core/src/main/java/org/apache/hadoop/hbase/regionserver/ScanWildcardColumnTracker.java @@ -29,7 +29,7 @@ import org.apache.hadoop.hbase.util.Bytes; * Keeps track of the columns for a scan if they are not explicitly specified */ public class ScanWildcardColumnTracker implements ColumnTracker { - private static final Log LOG = + private static final Log LOG = LogFactory.getLog(ScanWildcardColumnTracker.class); private byte [] columnBuffer = null; private int columnOffset = 0; @@ -103,7 +103,7 @@ public class ScanWildcardColumnTracker implements ColumnTracker { currentCount = 0; if (++currentCount > maxVersions) return MatchCode.SKIP; - return MatchCode.INCLUDE; + return MatchCode.INCLUDE; } @Override @@ -122,7 +122,7 @@ public class ScanWildcardColumnTracker implements ColumnTracker { * Used by matcher and scan/get to get a hint of the next column * to seek to after checkColumn() returns SKIP. Returns the next interesting * column we want, or NULL there is none (wildcard scanner). - * + * * @return The column count. */ public ColumnCount getColumnHint() { @@ -131,7 +131,7 @@ public class ScanWildcardColumnTracker implements ColumnTracker { /** - * We can never know a-priori if we are done, so always return false. + * We can never know a-priori if we are done, so always return false. * @return false */ @Override diff --git a/core/src/main/java/org/apache/hadoop/hbase/regionserver/Store.java b/core/src/main/java/org/apache/hadoop/hbase/regionserver/Store.java index 4328d03..b460ad6 100644 --- a/core/src/main/java/org/apache/hadoop/hbase/regionserver/Store.java +++ b/core/src/main/java/org/apache/hadoop/hbase/regionserver/Store.java @@ -67,7 +67,7 @@ import java.util.concurrent.locks.ReentrantReadWriteLock; * A Store holds a column family in a Region. Its a memstore and a set of zero * or more StoreFiles, which stretch backwards over time. * - *

        There's no reason to consider append-logging at this level; all logging + *

        There's no reason to consider append-logging at this level; all logging * and locking is handled at the HRegion level. Store just provides * services to manage sets of StoreFiles. One of the most important of those * services is compaction services where files are aggregated once they pass @@ -140,7 +140,7 @@ public class Store implements HConstants, HeapSize { private final int blocksize; private final boolean blockcache; private final Compression.Algorithm compression; - + // Comparing KeyValues final KeyValue.KVComparator comparator; final KeyValue.KVComparator comparatorIgnoringType; @@ -191,7 +191,7 @@ public class Store implements HConstants, HeapSize { this.ttl *= 1000; } this.memstore = new MemStore(this.comparator); - this.regionCompactionDir = new Path(HRegion.getCompactionDir(basedir), + this.regionCompactionDir = new Path(HRegion.getCompactionDir(basedir), Integer.toString(info.getEncodedName())); this.storeName = this.family.getName(); this.storeNameStr = Bytes.toString(this.storeName); @@ -200,10 +200,10 @@ public class Store implements HConstants, HeapSize { // MIN_COMMITS_FOR_COMPACTION map files this.compactionThreshold = conf.getInt("hbase.hstore.compactionThreshold", 3); - + // Check if this is in-memory store this.inMemory = family.isInMemory(); - + // By default we split region if a file > DEFAULT_MAX_FILE_SIZE. long maxFileSize = info.getTableDesc().getMaxFileSize(); if (maxFileSize == HConstants.DEFAULT_MAX_FILE_SIZE) { @@ -233,7 +233,7 @@ public class Store implements HConstants, HeapSize { this.maxSeqId = newId; // start with the log id we just recovered. } } - + HColumnDescriptor getFamily() { return this.family; } @@ -241,7 +241,7 @@ public class Store implements HConstants, HeapSize { long getMaxSequenceId() { return this.maxSeqId; } - + long getMaxSeqIdBeforeLogRecovery() { return maxSeqIdBeforeLogRecovery; } @@ -291,10 +291,10 @@ public class Store implements HConstants, HeapSize { } /* - * Read the reconstructionLog and put into memstore. + * Read the reconstructionLog and put into memstore. * - * We can ignore any log message that has a sequence ID that's equal to or - * lower than maxSeqID. (Because we know such log messages are already + * We can ignore any log message that has a sequence ID that's equal to or + * lower than maxSeqID. (Because we know such log messages are already * reflected in the HFiles.) * * @return the new max sequence id as per the log, or -1 if no log recovered @@ -377,7 +377,7 @@ public class Store implements HConstants, HeapSize { } finally { logReader.close(); } - + if (maxSeqIdInLog > -1) { // We read some edits, so we should flush the memstore this.snapshot(); @@ -437,7 +437,7 @@ public class Store implements HConstants, HeapSize { /** * Adds a value to the memstore - * + * * @param kv * @return memstore size delta */ @@ -452,7 +452,7 @@ public class Store implements HConstants, HeapSize { /** * Adds a value to the memstore - * + * * @param kv * @return memstore size delta */ @@ -474,10 +474,10 @@ public class Store implements HConstants, HeapSize { /** * Close all the readers - * + * * We don't need to worry about subsequent requests because the HRegion holds * a write lock that will prevent any more reads or writes. - * + * * @throws IOException */ List close() throws IOException { @@ -567,7 +567,7 @@ public class Store implements HConstants, HeapSize { writer.close(); } } - StoreFile sf = new StoreFile(this.fs, writer.getPath(), blockcache, + StoreFile sf = new StoreFile(this.fs, writer.getPath(), blockcache, this.conf, this.inMemory); Reader r = sf.getReader(); this.storeSize += r.length(); @@ -657,21 +657,21 @@ public class Store implements HConstants, HeapSize { ////////////////////////////////////////////////////////////////////////////// /** - * Compact the StoreFiles. This method may take some time, so the calling + * Compact the StoreFiles. This method may take some time, so the calling * thread must be able to block for long periods. - * + * *

        During this time, the Store can work as usual, getting values from * StoreFiles and writing new StoreFiles from the memstore. - * - * Existing StoreFiles are not destroyed until the new compacted StoreFile is + * + * Existing StoreFiles are not destroyed until the new compacted StoreFile is * completely written-out to disk. * *

        The compactLock prevents multiple simultaneous compactions. * The structureLock prevents us from interfering with other write operations. - * - *

        We don't want to hold the structureLock for the whole time, as a compact() + * + *

        We don't want to hold the structureLock for the whole time, as a compact() * can be lengthy and we want to allow cache-flushes during this period. - * + * * @param mc True to force a major compaction regardless of thresholds * @return row to split around if a split is needed, null otherwise * @throws IOException @@ -699,7 +699,7 @@ public class Store implements HConstants, HeapSize { } boolean references = hasReferences(filesToCompact); - if (!majorcompaction && !references && + if (!majorcompaction && !references && (forceSplit || (filesToCompact.size() < compactionThreshold))) { return checkSplit(forceSplit); } @@ -733,14 +733,14 @@ public class Store implements HConstants, HeapSize { fileSizes[i] = len; totalSize += len; } - + if (!majorcompaction && !references) { - // Here we select files for incremental compaction. - // The rule is: if the largest(oldest) one is more than twice the + // Here we select files for incremental compaction. + // The rule is: if the largest(oldest) one is more than twice the // size of the second, skip the largest, and continue to next..., // until we meet the compactionThreshold limit. for (point = 0; point < countOfFiles - 1; point++) { - if ((fileSizes[point] < fileSizes[point + 1] * 2) && + if ((fileSizes[point] < fileSizes[point + 1] * 2) && (countOfFiles - point) <= maxFilesToCompact) { break; } @@ -763,7 +763,7 @@ public class Store implements HConstants, HeapSize { " file(s), size: " + skipped); } } - + // Ready to go. Have list of files to compact. LOG.debug("Started compaction of " + filesToCompact.size() + " file(s)" + (references? ", hasReferences=true,": " ") + " into " + @@ -798,7 +798,7 @@ public class Store implements HConstants, HeapSize { /* * Gets lowest timestamp from files in a dir - * + * * @param fs * @param dir * @throws IOException @@ -866,7 +866,7 @@ public class Store implements HConstants, HeapSize { /** * Do a minor/major compaction. Uses the scan infrastructure to make it easy. - * + * * @param filesToCompact which files to compact * @param majorCompaction true to major compact (prune all deletes, max versions, etc) * @param maxId Readers maximum sequence id. @@ -942,14 +942,14 @@ public class Store implements HConstants, HeapSize { } /* - * It's assumed that the compactLock will be acquired prior to calling this + * It's assumed that the compactLock will be acquired prior to calling this * method! Otherwise, it is not thread-safe! * *

        It works by processing a compaction that's been written to disk. - * + * *

        It is usually invoked at the end of a compaction, but might also be * invoked at HStore startup, if the prior execution died midway through. - * + * *

        Moving the compacted TreeMap into place means: *

            * 1) Moving the new compacted StoreFile into place
        @@ -957,7 +957,7 @@ public class Store implements HConstants, HeapSize {
            * 3) Loading the new TreeMap.
            * 4) Compute new store size
            * 
        - * + * * @param compactedFiles list of files that were compacted * @param compactedFile StoreFile that is the result of the compaction * @return StoreFile created. May be null. @@ -1038,7 +1038,7 @@ public class Store implements HConstants, HeapSize { public int getNumberOfstorefiles() { return this.storefiles.size(); } - + /* * @param wantedVersions How many versions were asked for. * @return wantedVersions or this families' VERSIONS. @@ -1065,8 +1065,8 @@ public class Store implements HConstants, HeapSize { /** * Find the key that matches row exactly, or the one that immediately - * preceeds it. WARNING: Only use this method on a table where writes occur - * with strictly increasing timestamps. This method assumes this pattern of + * preceeds it. WARNING: Only use this method on a table where writes occur + * with strictly increasing timestamps. This method assumes this pattern of * writes in order to make it reasonably performant. Also our search is * dependent on the axiom that deletes are for cells that are in the container * that follows whether a memstore snapshot or a storefile, not for the @@ -1260,8 +1260,8 @@ public class Store implements HConstants, HeapSize { byte [] lk = r.getLastKey(); KeyValue lastKey = KeyValue.createKeyValueFromKey(lk, 0, lk.length); // if the midkey is the same as the first and last keys, then we cannot - // (ever) split this region. - if (this.comparator.compareRows(mk, firstKey) == 0 && + // (ever) split this region. + if (this.comparator.compareRows(mk, firstKey) == 0 && this.comparator.compareRows(mk, lastKey) == 0) { if (LOG.isDebugEnabled()) { LOG.debug("cannot split because midkey is the same as first or " + @@ -1278,12 +1278,12 @@ public class Store implements HConstants, HeapSize { } return null; } - + /** @return aggregate size of HStore */ public long getSize() { return storeSize; } - + ////////////////////////////////////////////////////////////////////////////// // File administration ////////////////////////////////////////////////////////////////////////////// @@ -1383,7 +1383,7 @@ public class Store implements HConstants, HeapSize { * @param kv Key to find. * @return True if we were able to seek the scanner to b or to * the key just after. - * @throws IOException + * @throws IOException */ static boolean getClosest(final HFileScanner s, final KeyValue kv) throws IOException { @@ -1404,15 +1404,15 @@ public class Store implements HConstants, HeapSize { } return true; } - + /** * Retrieve results from this store given the specified Get parameters. * @param get Get operation * @param columns List of columns to match, can be empty (not null) - * @param result List to add results to + * @param result List to add results to * @throws IOException */ - public void get(Get get, NavigableSet columns, List result) + public void get(Get get, NavigableSet columns, List result) throws IOException { KeyComparator keyComparator = this.comparator.getRawComparator(); @@ -1426,12 +1426,12 @@ public class Store implements HConstants, HeapSize { // Received early-out from memstore return; } - + // Check if we even have storefiles if (this.storefiles.isEmpty()) { return; } - + // Get storefiles for this store List storefileScanners = new ArrayList(); for (StoreFile sf : this.storefiles.descendingMap().values()) { @@ -1443,11 +1443,11 @@ public class Store implements HConstants, HeapSize { // Get a scanner that caches the block and uses pread storefileScanners.add(r.getScanner(true, true)); } - + // StoreFileGetScan will handle reading this store's storefiles StoreFileGetScan scanner = new StoreFileGetScan(storefileScanners, matcher); - - // Run a GET scan and put results into the specified list + + // Run a GET scan and put results into the specified list scanner.get(result); } finally { this.lock.readLock().unlock(); @@ -1521,17 +1521,17 @@ public class Store implements HConstants, HeapSize { public boolean hasTooManyStoreFiles() { return this.storefiles.size() > this.compactionThreshold; } - + public static final long FIXED_OVERHEAD = ClassSize.align( ClassSize.OBJECT + (17 * ClassSize.REFERENCE) + (6 * Bytes.SIZEOF_LONG) + (3 * Bytes.SIZEOF_INT) + Bytes.SIZEOF_BOOLEAN + ClassSize.align(ClassSize.ARRAY)); - + public static final long DEEP_OVERHEAD = ClassSize.align(FIXED_OVERHEAD + - ClassSize.OBJECT + ClassSize.REENTRANT_LOCK + - ClassSize.CONCURRENT_SKIPLISTMAP + + ClassSize.OBJECT + ClassSize.REENTRANT_LOCK + + ClassSize.CONCURRENT_SKIPLISTMAP + ClassSize.CONCURRENT_SKIPLISTMAP_ENTRY + ClassSize.OBJECT); - + @Override public long heapSize() { return DEEP_OVERHEAD + this.memstore.heapSize(); diff --git a/core/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFile.java b/core/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFile.java index 324d26f..038c09e 100644 --- a/core/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFile.java +++ b/core/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFile.java @@ -78,7 +78,7 @@ public class StoreFile implements HConstants { private boolean blockcache; // Is this from an in-memory store private boolean inMemory; - + // Keys for metadata stored in backing HFile. private static final byte [] MAX_SEQ_ID_KEY = Bytes.toBytes("MAX_SEQ_ID_KEY"); // Set when we obtain a Reader. @@ -89,7 +89,7 @@ public class StoreFile implements HConstants { // If true, this file was product of a major compaction. Its then set // whenever you get a Reader. private AtomicBoolean majorCompaction = null; - + /* * Regex that will work for straight filenames and for reference names. * If reference, then the regex has more than just one group. Group 1 is @@ -105,17 +105,17 @@ public class StoreFile implements HConstants { private final Configuration conf; /** - * Constructor, loads a reader and it's indices, etc. May allocate a + * Constructor, loads a reader and it's indices, etc. May allocate a * substantial amount of ram depending on the underlying files (10-20MB?). - * + * * @param fs The current file system to use. * @param p The path of the file. * @param blockcache true if the block cache is enabled. * @param conf The current configuration. * @throws IOException When opening the reader fails. */ - StoreFile(final FileSystem fs, final Path p, final boolean blockcache, - final Configuration conf, final boolean inMemory) + StoreFile(final FileSystem fs, final Path p, final boolean blockcache, + final Configuration conf, final boolean inMemory) throws IOException { this.conf = conf; this.fs = fs; @@ -220,7 +220,7 @@ public class StoreFile implements HConstants { /** * Returns the block cache or null in case none should be used. - * + * * @param conf The current configuration. * @return The block cache or null. */ @@ -263,7 +263,7 @@ public class StoreFile implements HConstants { throw new IllegalAccessError("Already open"); } if (isReference()) { - this.reader = new HalfHFileReader(this.fs, this.referencePath, + this.reader = new HalfHFileReader(this.fs, this.referencePath, getBlockCache(), this.reference); } else { this.reader = new Reader(this.fs, this.path, getBlockCache(), @@ -285,7 +285,7 @@ public class StoreFile implements HConstants { this.sequenceid += 1; } } - + } b = map.get(MAJOR_COMPACTION_KEY); if (b != null) { @@ -327,7 +327,7 @@ public class StoreFile implements HConstants { /** * Delete this file - * @throws IOException + * @throws IOException */ public void delete() throws IOException { close(); @@ -446,7 +446,7 @@ public class StoreFile implements HConstants { * Write file metadata. * Call before you call close on the passed w since its written * as metadata to that file. - * + * * @param w hfile writer * @param maxSequenceId Maximum sequence id. * @throws IOException @@ -487,7 +487,7 @@ public class StoreFile implements HConstants { throws IOException { // A reference to the bottom half of the hsf store file. Reference r = new Reference(splitRow, range); - // Add the referred-to regions name as a dot separated suffix. + // Add the referred-to regions name as a dot separated suffix. // See REF_NAME_PARSER regex above. The referred-to regions name is // up in the path of the passed in f -- parentdir is family, // then the directory above is the region name. diff --git a/core/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileGetScan.java b/core/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileGetScan.java index 73b2e04..4d2e742 100644 --- a/core/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileGetScan.java +++ b/core/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileGetScan.java @@ -35,7 +35,7 @@ public class StoreFileGetScan { private QueryMatcher matcher; private KeyValue startKey; - + /** * Constructor * @param scanners @@ -55,7 +55,7 @@ public class StoreFileGetScan { * proceeding to the next StoreFile. *

        * This strategy allows for optimal, stateless (no persisted Scanners) - * early-out scenarios. + * early-out scenarios. * @param result List to add results to * @throws IOException */ @@ -67,15 +67,15 @@ public class StoreFileGetScan { } } } - + /** * Performs a GET operation on a single StoreFile. * @param scanner * @param result * @return true if done with this store, false if must continue to next - * @throws IOException + * @throws IOException */ - public boolean getStoreFile(HFileScanner scanner, List result) + public boolean getStoreFile(HFileScanner scanner, List result) throws IOException { if (scanner.seekTo(startKey.getBuffer(), startKey.getKeyOffset(), startKey.getKeyLength()) == -1) { @@ -108,5 +108,5 @@ public class StoreFileGetScan { } while(scanner.next()); return false; } - + } diff --git a/core/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileScanner.java b/core/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileScanner.java index 7d128ad..676b103 100644 --- a/core/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileScanner.java +++ b/core/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileScanner.java @@ -29,10 +29,10 @@ import java.io.IOException; * A KeyValue scanner that iterates over a single HFile */ class StoreFileScanner implements KeyValueScanner { - + private HFileScanner hfs; private KeyValue cur = null; - + /** * Implements a {@link KeyValueScanner} on top of the specified {@link HFileScanner} * @param hfs HFile scanner @@ -44,11 +44,11 @@ class StoreFileScanner implements KeyValueScanner { public String toString() { return "StoreFileScanner[" + hfs.toString() + ", cur=" + cur + "]"; } - + public KeyValue peek() { return cur; } - + public KeyValue next() { KeyValue retKey = cur; cur = hfs.getKeyValue(); @@ -62,7 +62,7 @@ class StoreFileScanner implements KeyValueScanner { } return retKey; } - + public boolean seek(KeyValue key) { try { if(!seekAtOrAfter(hfs, key)) { @@ -77,14 +77,14 @@ class StoreFileScanner implements KeyValueScanner { return false; } } - + public void close() { // Nothing to close on HFileScanner? cur = null; } - + /** - * + * * @param s * @param k * @return diff --git a/core/src/main/java/org/apache/hadoop/hbase/regionserver/StoreScanner.java b/core/src/main/java/org/apache/hadoop/hbase/regionserver/StoreScanner.java index c00663a..64689ca 100644 --- a/core/src/main/java/org/apache/hadoop/hbase/regionserver/StoreScanner.java +++ b/core/src/main/java/org/apache/hadoop/hbase/regionserver/StoreScanner.java @@ -74,7 +74,7 @@ class StoreScanner implements KeyValueScanner, InternalScanner, ChangedReadersOb /** * Used for major compactions.

        - * + * * Opens a scanner across specified StoreFiles. */ StoreScanner(Store store, Scan scan, KeyValueScanner [] scanners) { @@ -100,7 +100,7 @@ class StoreScanner implements KeyValueScanner, InternalScanner, ChangedReadersOb final KeyValueScanner [] scanners) { this.store = null; this.cacheBlocks = scan.getCacheBlocks(); - this.matcher = new ScanQueryMatcher(scan, colFamily, columns, ttl, + this.matcher = new ScanQueryMatcher(scan, colFamily, columns, ttl, comparator.getRawComparator(), scan.getMaxVersions()); // Seek all scanners to the initial key @@ -197,12 +197,12 @@ class StoreScanner implements KeyValueScanner, InternalScanner, ChangedReadersOb case SKIP: this.heap.next(); break; - + default: throw new RuntimeException("UNEXPECTED"); } } - + if (!results.isEmpty()) { // copy jazz outResult.addAll(results); diff --git a/core/src/main/java/org/apache/hadoop/hbase/regionserver/WildcardColumnTracker.java b/core/src/main/java/org/apache/hadoop/hbase/regionserver/WildcardColumnTracker.java index 1a9413b..0e673c4 100644 --- a/core/src/main/java/org/apache/hadoop/hbase/regionserver/WildcardColumnTracker.java +++ b/core/src/main/java/org/apache/hadoop/hbase/regionserver/WildcardColumnTracker.java @@ -26,7 +26,7 @@ import java.util.ArrayList; import java.util.List; /** - * This class is used for the tracking and enforcement of columns and numbers + * This class is used for the tracking and enforcement of columns and numbers * of versions during the course of a Get or Scan operation, when all available * column qualifiers have been asked for in the query. *

        @@ -36,20 +36,20 @@ import java.util.List; * what action should be taken. *

      • {@link #update} is called at the end of every StoreFile or memstore. *

        - * This class is NOT thread-safe as queries are never multi-threaded + * This class is NOT thread-safe as queries are never multi-threaded */ public class WildcardColumnTracker implements ColumnTracker { - + private int maxVersions; - + protected List columns; private int index; private ColumnCount column; - - private List newColumns; + + private List newColumns; private int newIndex; private ColumnCount newColumn; - + /** * Default constructor. * @param maxVersions maximum versions to return per columns @@ -58,7 +58,7 @@ public class WildcardColumnTracker implements ColumnTracker { this.maxVersions = maxVersions; reset(); } - + public void reset() { this.index = 0; this.column = null; @@ -67,7 +67,7 @@ public class WildcardColumnTracker implements ColumnTracker { this.newIndex = 0; this.newColumn = null; } - + /** * Can never early-out from reading more storefiles in Wildcard case. */ @@ -241,7 +241,7 @@ public class WildcardColumnTracker implements ColumnTracker { } } while(true); } - + /** * Called at the end of every StoreFile or memstore. */ @@ -253,14 +253,14 @@ public class WildcardColumnTracker implements ColumnTracker { } return; } - + // If no new columns, retain previous columns and return if(this.newColumns.size() == 0) { this.index = 0; this.column = this.columns.get(index); return; } - + // Merge previous columns with new columns // There will be no overlapping List mergeColumns = new ArrayList( @@ -271,14 +271,14 @@ public class WildcardColumnTracker implements ColumnTracker { newColumn = newColumns.get(0); while(true) { int ret = Bytes.compareTo( - column.getBuffer(), column.getOffset(),column.getLength(), + column.getBuffer(), column.getOffset(),column.getLength(), newColumn.getBuffer(), newColumn.getOffset(), newColumn.getLength()); - + // Existing is smaller than new, add existing and iterate it if(ret <= -1) { mergeColumns.add(column); if(++index == columns.size()) { - // No more existing left, merge down rest of new and return + // No more existing left, merge down rest of new and return mergeDown(mergeColumns, newColumns, newIndex); finish(mergeColumns); return; @@ -286,7 +286,7 @@ public class WildcardColumnTracker implements ColumnTracker { column = columns.get(index); continue; } - + // New is smaller than existing, add new and iterate it mergeColumns.add(newColumn); if(++newIndex == newColumns.size()) { @@ -299,23 +299,23 @@ public class WildcardColumnTracker implements ColumnTracker { continue; } } - - private void mergeDown(List mergeColumns, + + private void mergeDown(List mergeColumns, List srcColumns, int srcIndex) { int index = srcIndex; while(index < srcColumns.size()) { mergeColumns.add(srcColumns.get(index++)); } } - + private void finish(List mergeColumns) { this.columns = mergeColumns; this.index = 0; this.column = this.columns.size() > 0? columns.get(index) : null; - + this.newColumns = new ArrayList(); this.newIndex = 0; this.newColumn = null; } - + } diff --git a/core/src/main/java/org/apache/hadoop/hbase/regionserver/metrics/RegionServerMetrics.java b/core/src/main/java/org/apache/hadoop/hbase/regionserver/metrics/RegionServerMetrics.java index 1bd8049..d6e5729 100644 --- a/core/src/main/java/org/apache/hadoop/hbase/regionserver/metrics/RegionServerMetrics.java +++ b/core/src/main/java/org/apache/hadoop/hbase/regionserver/metrics/RegionServerMetrics.java @@ -38,7 +38,7 @@ import org.apache.hadoop.metrics.util.MetricsTimeVaryingRate; import java.lang.management.ManagementFactory; import java.lang.management.MemoryUsage; -/** +/** * This class is for maintaining the various regionserver statistics * and publishing them through the metrics interfaces. *

        @@ -53,10 +53,10 @@ public class RegionServerMetrics implements Updater { private static final int MB = 1024*1024; private MetricsRegistry registry = new MetricsRegistry(); private final RegionServerStatistics statistics; - + public final MetricsTimeVaryingRate atomicIncrementTime = new MetricsTimeVaryingRate("atomicIncrementTime", registry); - + /** * Count of regions carried by this regionserver */ @@ -113,25 +113,25 @@ public class RegionServerMetrics implements Updater { /** * Size of the compaction queue. */ - public final MetricsIntValue compactionQueueSize = + public final MetricsIntValue compactionQueueSize = new MetricsIntValue("compactionQueueSize", registry); - + /** * filesystem read latency */ - public final MetricsTimeVaryingRate fsReadLatency = + public final MetricsTimeVaryingRate fsReadLatency = new MetricsTimeVaryingRate("fsReadLatency", registry); /** * filesystem write latency */ - public final MetricsTimeVaryingRate fsWriteLatency = + public final MetricsTimeVaryingRate fsWriteLatency = new MetricsTimeVaryingRate("fsWriteLatency", registry); /** * filesystem sync latency */ - public final MetricsTimeVaryingRate fsSyncLatency = + public final MetricsTimeVaryingRate fsSyncLatency = new MetricsTimeVaryingRate("fsSyncLatency", registry); public RegionServerMetrics() { @@ -172,7 +172,7 @@ public class RegionServerMetrics implements Updater { this.blockCacheFree.pushMetric(this.metricsRecord); this.blockCacheCount.pushMetric(this.metricsRecord); this.blockCacheHitRatio.pushMetric(this.metricsRecord); - + // Mix in HFile and HLog metrics // Be careful. Here is code for MTVR from up in hadoop: // public synchronized void inc(final int numOps, final long time) { @@ -213,14 +213,14 @@ public class RegionServerMetrics implements Updater { public float getRequests() { return this.requests.getPreviousIntervalValue(); } - + /** * @param inc How much to add to requests. */ public void incrementRequests(final int inc) { this.requests.inc(inc); } - + @Override public String toString() { StringBuilder sb = new StringBuilder(); diff --git a/core/src/main/java/org/apache/hadoop/hbase/regionserver/metrics/RegionServerStatistics.java b/core/src/main/java/org/apache/hadoop/hbase/regionserver/metrics/RegionServerStatistics.java index 53c798a..04fe7b1 100644 --- a/core/src/main/java/org/apache/hadoop/hbase/regionserver/metrics/RegionServerStatistics.java +++ b/core/src/main/java/org/apache/hadoop/hbase/regionserver/metrics/RegionServerStatistics.java @@ -35,8 +35,8 @@ public class RegionServerStatistics extends MetricsMBeanBase { public RegionServerStatistics(MetricsRegistry registry, String rsName) { super(registry, "RegionServerStatistics"); - mbeanName = MBeanUtil.registerMBean("RegionServer", - "RegionServerStatistics", this); + mbeanName = MBeanUtil.registerMBean("RegionServer", + "RegionServerStatistics", this); } public void shutdown() { diff --git a/core/src/main/java/org/apache/hadoop/hbase/regionserver/wal/FailedLogCloseException.java b/core/src/main/java/org/apache/hadoop/hbase/regionserver/wal/FailedLogCloseException.java index c001d40..393b1d2 100644 --- a/core/src/main/java/org/apache/hadoop/hbase/regionserver/wal/FailedLogCloseException.java +++ b/core/src/main/java/org/apache/hadoop/hbase/regionserver/wal/FailedLogCloseException.java @@ -29,7 +29,7 @@ public class FailedLogCloseException extends IOException { private static final long serialVersionUID = 1759152841462990925L; /** - * + * */ public FailedLogCloseException() { super(); diff --git a/core/src/main/java/org/apache/hadoop/hbase/regionserver/wal/HLog.java b/core/src/main/java/org/apache/hadoop/hbase/regionserver/wal/HLog.java index 95edd9c..a013211 100644 --- a/core/src/main/java/org/apache/hadoop/hbase/regionserver/wal/HLog.java +++ b/core/src/main/java/org/apache/hadoop/hbase/regionserver/wal/HLog.java @@ -100,7 +100,7 @@ import java.util.concurrent.locks.ReentrantLock; * start of a cache flush and the completion point, appends are allowed but log * rolling is not. To prevent log rolling taking place during this period, a * separate reentrant lock is used. - * + * *

        To read an HLog, call {@link #getReader(org.apache.hadoop.fs.FileSystem, * org.apache.hadoop.fs.Path, org.apache.hadoop.conf.Configuration)}. * @@ -141,7 +141,7 @@ public class HLog implements HConstants, Syncable { void close() throws IOException; void sync() throws IOException; void append(Entry entry) throws IOException; - long getLength() throws IOException; + long getLength() throws IOException; } // used to indirectly tell syncFs to force the sync @@ -153,7 +153,7 @@ public class HLog implements HConstants, Syncable { Writer writer; /* - * Map of all log files but the current one. + * Map of all log files but the current one. */ final SortedMap outputfiles = Collections.synchronizedSortedMap(new TreeMap()); @@ -169,7 +169,7 @@ public class HLog implements HConstants, Syncable { private final AtomicLong logSeqNum = new AtomicLong(0); private volatile long filenum = -1; - + private final AtomicInteger numEntries = new AtomicInteger(0); // If > than this size, roll the log. @@ -311,7 +311,7 @@ public class HLog implements HConstants, Syncable { LOG.debug("Change sequence number from " + logSeqNum + " to " + newvalue); } } - + /** * @return log sequence number */ @@ -432,7 +432,7 @@ public class HLog implements HConstants, Syncable { throw ie; } } - + /* * Clean up old commit logs. * @return If lots of logs, flush the returned region so next time through @@ -541,7 +541,7 @@ public class HLog implements HConstants, Syncable { FailedLogCloseException flce = new FailedLogCloseException("#" + currentfilenum); flce.initCause(e); - throw e; + throw e; } if (currentfilenum >= 0) { oldFile = computeFilename(currentfilenum); @@ -616,7 +616,7 @@ public class HLog implements HConstants, Syncable { } /** Append an entry to the log. - * + * * @param regionInfo * @param logEdit * @param now Time of this edit write. @@ -640,11 +640,11 @@ public class HLog implements HConstants, Syncable { protected HLogKey makeKey(byte[] regionName, byte[] tableName, long seqnum, long now) { return new HLogKey(regionName, tableName, seqnum, now); } - - - + + + /** Append an entry to the log. - * + * * @param regionInfo * @param logEdit * @param logKey @@ -876,7 +876,7 @@ public class HLog implements HConstants, Syncable { this.listener.logRollRequested(); } } - + protected void doWrite(HRegionInfo info, HLogKey logKey, WALEdit logEdit) throws IOException { if (!this.enabled) { @@ -1013,7 +1013,7 @@ public class HLog implements HConstants, Syncable { public static boolean isMetaFamily(byte [] family) { return Bytes.equals(METAFAMILY, family); } - + /** * Split up a bunch of regionserver commit log files that are no longer * being written to, into new files, one per region for region to replay on @@ -1029,7 +1029,7 @@ public class HLog implements HConstants, Syncable { */ public static List splitLog(final Path rootDir, final Path srcDir, Path oldLogDir, final FileSystem fs, final Configuration conf) throws IOException { - + long millis = System.currentTimeMillis(); List splits = null; if (!fs.exists(srcDir)) { @@ -1076,13 +1076,13 @@ public class HLog implements HConstants, Syncable { this.w = w; } } - + @SuppressWarnings("unchecked") public static Class getKeyClass(Configuration conf) { - return (Class) + return (Class) conf.getClass("hbase.regionserver.hlog.keyclass", HLogKey.class); } - + public static HLogKey newKey(Configuration conf) throws IOException { Class keyClass = getKeyClass(conf); try { @@ -1109,28 +1109,28 @@ public class HLog implements HConstants, Syncable { Collections.synchronizedMap( new TreeMap(Bytes.BYTES_COMPARATOR)); List splits = null; - + // Number of threads to use when log splitting to rewrite the logs. // More means faster but bigger mem consumption. int logWriterThreads = conf.getInt("hbase.regionserver.hlog.splitlog.writer.threads", 3); - + // Number of logs to read concurrently when log splitting. // More means faster but bigger mem consumption */ int concurrentLogReads = conf.getInt("hbase.regionserver.hlog.splitlog.reader.threads", 3); // Is append supported? try { - int maxSteps = Double.valueOf(Math.ceil((logfiles.length * 1.0) / + int maxSteps = Double.valueOf(Math.ceil((logfiles.length * 1.0) / concurrentLogReads)).intValue(); for (int step = 0; step < maxSteps; step++) { - final Map> logEntries = + final Map> logEntries = new TreeMap>(Bytes.BYTES_COMPARATOR); // Stop at logfiles.length when it's the last step - int endIndex = step == maxSteps - 1? logfiles.length: + int endIndex = step == maxSteps - 1? logfiles.length: step * concurrentLogReads + concurrentLogReads; for (int i = (step * concurrentLogReads); i < endIndex; i++) { - // Check for possibly empty file. With appends, currently Hadoop + // Check for possibly empty file. With appends, currently Hadoop // reports a zero length even if the file has been sync'd. Revisit if // HADOOP-4751 is committed. long length = logfiles[i].getLen(); @@ -1347,7 +1347,7 @@ public class HLog implements HConstants, Syncable { /** * Construct the HLog directory name - * + * * @param info HServerInfo for server * @return the HLog directory name */ @@ -1357,7 +1357,7 @@ public class HLog implements HConstants, Syncable { /** * Construct the HLog directory name - * + * * @param serverAddress * @param startCode * @return the HLog directory name @@ -1370,10 +1370,10 @@ public class HLog implements HConstants, Syncable { return getHLogDirectoryName( HServerInfo.getServerName(serverAddress, startCode)); } - + /** * Construct the HLog directory name - * + * * @param serverName * @return the HLog directory name */ diff --git a/core/src/main/java/org/apache/hadoop/hbase/regionserver/wal/HLogKey.java b/core/src/main/java/org/apache/hadoop/hbase/regionserver/wal/HLogKey.java index 4e97470..7c1184c 100644 --- a/core/src/main/java/org/apache/hadoop/hbase/regionserver/wal/HLogKey.java +++ b/core/src/main/java/org/apache/hadoop/hbase/regionserver/wal/HLogKey.java @@ -29,11 +29,11 @@ import java.io.*; /** * A Key for an entry in the change log. - * - * The log intermingles edits to many tables and rows, so each log entry - * identifies the appropriate table and row. Within a table and row, they're + * + * The log intermingles edits to many tables and rows, so each log entry + * identifies the appropriate table and row. Within a table and row, they're * also sorted. - * + * *

        Some Transactional edits (START, COMMIT, ABORT) will not have an * associated row. */ @@ -53,7 +53,7 @@ public class HLogKey implements WritableComparable { public HLogKey() { this(null, null, 0L, HConstants.LATEST_TIMESTAMP); } - + /** * Create the log key! * We maintain the tablename mainly for debugging purposes. @@ -82,7 +82,7 @@ public class HLogKey implements WritableComparable { public byte [] getRegionName() { return regionName; } - + /** @return table name */ public byte [] getTablename() { return tablename; @@ -92,7 +92,7 @@ public class HLogKey implements WritableComparable { public long getLogSeqNum() { return logSeqNum; } - + void setLogSeqNum(long logSeqNum) { this.logSeqNum = logSeqNum; } @@ -141,7 +141,7 @@ public class HLogKey implements WritableComparable { return Bytes.toString(tablename) + "/" + Bytes.toString(regionName) + "/" + logSeqNum; } - + @Override public boolean equals(Object obj) { if (this == obj) { @@ -152,7 +152,7 @@ public class HLogKey implements WritableComparable { } return compareTo((HLogKey)obj) == 0; } - + @Override public int hashCode() { int result = Bytes.hashCode(this.regionName); @@ -190,7 +190,7 @@ public class HLogKey implements WritableComparable { out.writeByte(this.clusterId); out.writeInt(this.scope); } - + public void readFields(DataInput in) throws IOException { this.regionName = Bytes.readByteArray(in); this.tablename = Bytes.readByteArray(in); diff --git a/core/src/main/java/org/apache/hadoop/hbase/regionserver/wal/SequenceFileLogReader.java b/core/src/main/java/org/apache/hadoop/hbase/regionserver/wal/SequenceFileLogReader.java index 00da2c9..87482fe 100644 --- a/core/src/main/java/org/apache/hadoop/hbase/regionserver/wal/SequenceFileLogReader.java +++ b/core/src/main/java/org/apache/hadoop/hbase/regionserver/wal/SequenceFileLogReader.java @@ -31,7 +31,7 @@ import org.apache.hadoop.hbase.regionserver.wal.WALEdit; import org.apache.hadoop.io.SequenceFile; public class SequenceFileLogReader implements HLog.Reader { - + /** * Hack just to set the correct file length up in SequenceFile.Reader. * See HADOOP-6307. The below is all about setting the right length on the @@ -40,7 +40,7 @@ public class SequenceFileLogReader implements HLog.Reader { * the available on the stream. The below is ugly. It makes getPos, the * first time its called, return length of the file -- i.e. tell a lie -- just * so this line up in SF.Reader's constructor ends up with right answer: - * + * * this.end = in.getPos() + length; * */ @@ -49,14 +49,14 @@ public class SequenceFileLogReader implements HLog.Reader { WALReader(final FileSystem fs, final Path p, final Configuration c) throws IOException { super(fs, p, c); - + } @Override protected FSDataInputStream openFile(FileSystem fs, Path file, int bufferSize, long length) throws IOException { - return new WALReaderFSDataInputStream(super.openFile(fs, file, + return new WALReaderFSDataInputStream(super.openFile(fs, file, bufferSize, length), length); } @@ -93,7 +93,7 @@ public class SequenceFileLogReader implements HLog.Reader { Configuration conf; WALReader reader; - + public SequenceFileLogReader() { } @Override diff --git a/core/src/main/java/org/apache/hadoop/hbase/regionserver/wal/SequenceFileLogWriter.java b/core/src/main/java/org/apache/hadoop/hbase/regionserver/wal/SequenceFileLogWriter.java index 9aa7d5e..d77875a 100644 --- a/core/src/main/java/org/apache/hadoop/hbase/regionserver/wal/SequenceFileLogWriter.java +++ b/core/src/main/java/org/apache/hadoop/hbase/regionserver/wal/SequenceFileLogWriter.java @@ -43,8 +43,8 @@ public class SequenceFileLogWriter implements HLog.Writer { @Override public void init(FileSystem fs, Path path, Configuration conf) throws IOException { - writer = SequenceFile.createWriter(fs, conf, path, - HLog.getKeyClass(conf), WALEdit.class, + writer = SequenceFile.createWriter(fs, conf, path, + HLog.getKeyClass(conf), WALEdit.class, fs.getConf().getInt("io.file.buffer.size", 4096), (short) conf.getInt("hbase.regionserver.hlog.replication", fs.getDefaultReplication()), diff --git a/core/src/main/java/org/apache/hadoop/hbase/regionserver/wal/WALEdit.java b/core/src/main/java/org/apache/hadoop/hbase/regionserver/wal/WALEdit.java index 150f8a2..52c2446 100644 --- a/core/src/main/java/org/apache/hadoop/hbase/regionserver/wal/WALEdit.java +++ b/core/src/main/java/org/apache/hadoop/hbase/regionserver/wal/WALEdit.java @@ -34,48 +34,48 @@ import org.apache.hadoop.io.Writable; /** * WALEdit: Used in HBase's transaction log (WAL) to represent - * the collection of edits (KeyValue objects) corresponding to a + * the collection of edits (KeyValue objects) corresponding to a * single transaction. The class implements "Writable" interface * for serializing/deserializing a set of KeyValue items. - * + * * Previously, if a transaction contains 3 edits to c1, c2, c3 for a row R, * the HLog would have three log entries as follows: - * + * * : * : * : - * + * * This presents problems because row level atomicity of transactions * was not guaranteed. If we crash after few of the above appends make * it, then recovery will restore a partial transaction. * * In the new world, all the edits for a given transaction are written * out as a single record, for example: - * + * * : - * + * * where, the WALEdit is serialized as: * <-1, # of edits, , , ... > * For example: * <-1, 3, , , > - * - * The -1 marker is just a special way of being backward compatible with + * + * The -1 marker is just a special way of being backward compatible with * an old HLog which would have contained a single . - * + * * The deserializer for WALEdit backward compatibly detects if the record * is an old style KeyValue or the new style WALEdit. * */ public class WALEdit implements Writable { - + private final int VERSION_2 = -1; - + private final ArrayList kvs = new ArrayList(); private NavigableMap scopes; - + public WALEdit() { } - + public void add(KeyValue kv) { this.kvs.add(kv); } @@ -87,7 +87,7 @@ public class WALEdit implements Writable { public int size() { return kvs.size(); } - + public List getKeyValues() { return kvs; } @@ -171,5 +171,5 @@ public class WALEdit implements Writable { sb.append(">]"); return sb.toString(); } - + } diff --git a/core/src/main/java/org/apache/hadoop/hbase/thrift/ThriftServer.java b/core/src/main/java/org/apache/hadoop/hbase/thrift/ThriftServer.java index 45d7910..441917b 100644 --- a/core/src/main/java/org/apache/hadoop/hbase/thrift/ThriftServer.java +++ b/core/src/main/java/org/apache/hadoop/hbase/thrift/ThriftServer.java @@ -89,7 +89,7 @@ import java.util.TreeMap; * Hbase API specified in the Hbase.thrift IDL file. */ public class ThriftServer { - + /** * The HBaseHandler is a glue object that connects Thrift RPC calls to the * HBase client API primarily defined in the HBaseAdmin and HTable objects. @@ -102,7 +102,7 @@ public class ThriftServer { // nextScannerId and scannerMap are used to manage scanner state protected int nextScannerId = 0; protected HashMap scannerMap = null; - + private static ThreadLocal> threadLocalTables = new ThreadLocal>() { @Override protected Map initialValue() { @@ -110,10 +110,10 @@ public class ThriftServer { } }; - + /** * Returns a list of all the column families for a given htable. - * + * * @param table * @return * @throws IOException @@ -122,15 +122,15 @@ public class ThriftServer { HColumnDescriptor[] cds = table.getTableDescriptor().getColumnFamilies(); byte[][] columns = new byte[cds.length][]; for (int i = 0; i < cds.length; i++) { - columns[i] = Bytes.add(cds[i].getName(), + columns[i] = Bytes.add(cds[i].getName(), KeyValue.COLUMN_FAMILY_DELIM_ARRAY); } return columns; } - + /** * Creates and returns an HTable instance from a given table name. - * + * * @param tableName * name of table * @return HTable object @@ -146,11 +146,11 @@ public class ThriftServer { } return tables.get(table); } - + /** * Assigns a unique ID to the scanner and adds the mapping to an internal * hash-map. - * + * * @param scanner * @return integer scanner id */ @@ -159,31 +159,31 @@ public class ThriftServer { scannerMap.put(id, scanner); return id; } - + /** * Returns the scanner associated with the specified ID. - * + * * @param id * @return a Scanner, or null if ID was invalid. */ protected synchronized ResultScanner getScanner(int id) { return scannerMap.get(id); } - + /** * Removes the scanner associated with the specified ID from the internal * id->scanner hash-map. - * + * * @param id * @return a Scanner, or null if ID was invalid. */ protected synchronized ResultScanner removeScanner(int id) { return scannerMap.remove(id); } - + /** * Constructs an HBaseHandler object. - * + * * @throws MasterNotRunningException */ HBaseHandler() throws MasterNotRunningException { @@ -191,7 +191,7 @@ public class ThriftServer { admin = new HBaseAdmin(conf); scannerMap = new HashMap(); } - + public void enableTable(final byte[] tableName) throws IOError { try{ admin.enableTable(tableName); @@ -199,7 +199,7 @@ public class ThriftServer { throw new IOError(e.getMessage()); } } - + public void disableTable(final byte[] tableName) throws IOError{ try{ admin.disableTable(tableName); @@ -207,7 +207,7 @@ public class ThriftServer { throw new IOError(e.getMessage()); } } - + public boolean isTableEnabled(final byte[] tableName) throws IOError { try { return HTable.isTableEnabled(tableName); @@ -215,7 +215,7 @@ public class ThriftServer { throw new IOError(e.getMessage()); } } - + public void compact(byte[] tableNameOrRegionName) throws IOError { try{ admin.compact(tableNameOrRegionName); @@ -229,9 +229,9 @@ public class ThriftServer { admin.majorCompact(tableNameOrRegionName); } catch (IOException e) { throw new IOError(e.getMessage()); - } + } } - + public List getTableNames() throws IOError { try { HTableDescriptor[] tables = this.admin.listTables(); @@ -244,7 +244,7 @@ public class ThriftServer { throw new IOError(e.getMessage()); } } - + public List getTableRegions(byte[] tableName) throws IOError { try{ @@ -266,7 +266,7 @@ public class ThriftServer { throw new IOError(e.getMessage()); } } - + @Deprecated public List get(byte[] tableName, byte[] row, byte[] column) throws IOError { @@ -293,7 +293,7 @@ public class ThriftServer { throw new IOError(e.getMessage()); } } - + @Deprecated public List getVer(byte[] tableName, byte[] row, byte[] column, int numVersions) throws IOError { @@ -304,7 +304,7 @@ public class ThriftServer { return getVer(tableName, row, famAndQf[0], famAndQf[1], numVersions); } - public List getVer(byte [] tableName, byte [] row, byte [] family, + public List getVer(byte [] tableName, byte [] row, byte [] family, byte [] qualifier, int numVersions) throws IOError { try { HTable table = getTable(tableName); @@ -317,7 +317,7 @@ public class ThriftServer { throw new IOError(e.getMessage()); } } - + @Deprecated public List getVerTs(byte[] tableName, byte[] row, byte[] column, long timestamp, int numVersions) throws IOError { @@ -326,7 +326,7 @@ public class ThriftServer { return getVerTs(tableName, row, famAndQf[0], new byte[0], timestamp, numVersions); } - return getVerTs(tableName, row, famAndQf[0], famAndQf[1], timestamp, + return getVerTs(tableName, row, famAndQf[0], famAndQf[1], timestamp, numVersions); } @@ -344,25 +344,25 @@ public class ThriftServer { throw new IOError(e.getMessage()); } } - + public List getRow(byte[] tableName, byte[] row) throws IOError { return getRowWithColumnsTs(tableName, row, null, HConstants.LATEST_TIMESTAMP); } - + public List getRowWithColumns(byte[] tableName, byte[] row, List columns) throws IOError { return getRowWithColumnsTs(tableName, row, columns, HConstants.LATEST_TIMESTAMP); } - + public List getRowTs(byte[] tableName, byte[] row, long timestamp) throws IOError { return getRowWithColumnsTs(tableName, row, null, timestamp); } - + public List getRowWithColumnsTs(byte[] tableName, byte[] row, List columns, long timestamp) throws IOError { try { @@ -390,12 +390,12 @@ public class ThriftServer { throw new IOError(e.getMessage()); } } - + public void deleteAll(byte[] tableName, byte[] row, byte[] column) throws IOError { deleteAllTs(tableName, row, column, HConstants.LATEST_TIMESTAMP); } - + public void deleteAllTs(byte[] tableName, byte[] row, byte[] column, long timestamp) throws IOError { try { @@ -408,16 +408,16 @@ public class ThriftServer { delete.deleteColumns(famAndQf[0], famAndQf[1], timestamp); } table.delete(delete); - + } catch (IOException e) { throw new IOError(e.getMessage()); } } - + public void deleteAllRow(byte[] tableName, byte[] row) throws IOError { deleteAllRowTs(tableName, row, HConstants.LATEST_TIMESTAMP); } - + public void deleteAllRowTs(byte[] tableName, byte[] row, long timestamp) throws IOError { try { @@ -428,7 +428,7 @@ public class ThriftServer { throw new IOError(e.getMessage()); } } - + public void createTable(byte[] tableName, List columnFamilies) throws IOError, IllegalArgument, AlreadyExists { @@ -448,7 +448,7 @@ public class ThriftServer { throw new IllegalArgument(e.getMessage()); } } - + public void deleteTable(byte[] tableName) throws IOError { if (LOG.isDebugEnabled()) { LOG.debug("deleteTable: table=" + new String(tableName)); @@ -462,13 +462,13 @@ public class ThriftServer { throw new IOError(e.getMessage()); } } - + public void mutateRow(byte[] tableName, byte[] row, List mutations) throws IOError, IllegalArgument { mutateRowTs(tableName, row, mutations, HConstants.LATEST_TIMESTAMP); } - - public void mutateRowTs(byte[] tableName, byte[] row, + + public void mutateRowTs(byte[] tableName, byte[] row, List mutations, long timestamp) throws IOError, IllegalArgument { HTable table = null; try { @@ -504,8 +504,8 @@ public class ThriftServer { throw new IllegalArgument(e.getMessage()); } } - - public void mutateRows(byte[] tableName, List rowBatches) + + public void mutateRows(byte[] tableName, List rowBatches) throws IOError, IllegalArgument, TException { mutateRowsTs(tableName, rowBatches, HConstants.LATEST_TIMESTAMP); } @@ -559,18 +559,18 @@ public class ThriftServer { } @Deprecated - public long atomicIncrement(byte[] tableName, byte[] row, byte[] column, + public long atomicIncrement(byte[] tableName, byte[] row, byte[] column, long amount) throws IOError, IllegalArgument, TException { byte [][] famAndQf = KeyValue.parseColumn(column); if(famAndQf.length == 1) { - return atomicIncrement(tableName, row, famAndQf[0], new byte[0], + return atomicIncrement(tableName, row, famAndQf[0], new byte[0], amount); } return atomicIncrement(tableName, row, famAndQf[0], famAndQf[1], amount); } public long atomicIncrement(byte [] tableName, byte [] row, byte [] family, - byte [] qualifier, long amount) + byte [] qualifier, long amount) throws IOError, IllegalArgument, TException { HTable table; try { @@ -580,7 +580,7 @@ public class ThriftServer { throw new IOError(e.getMessage()); } } - + public void scannerClose(int id) throws IOError, IllegalArgument { LOG.debug("scannerClose: id=" + id); ResultScanner scanner = getScanner(id); @@ -590,7 +590,7 @@ public class ThriftServer { scanner.close(); removeScanner(id); } - + public List scannerGetList(int id,int nbRows) throws IllegalArgument, IOError { LOG.debug("scannerGetList: id=" + id); ResultScanner scanner = getScanner(id); @@ -632,7 +632,7 @@ public class ThriftServer { throw new IOError(e.getMessage()); } } - + public int scannerOpenWithStop(byte[] tableName, byte[] startRow, byte[] stopRow, List columns) throws IOError, TException { try { @@ -699,7 +699,7 @@ public class ThriftServer { throw new IOError(e.getMessage()); } } - + public int scannerOpenWithStopTs(byte[] tableName, byte[] startRow, byte[] stopRow, List columns, long timestamp) throws IOError, TException { @@ -723,16 +723,16 @@ public class ThriftServer { throw new IOError(e.getMessage()); } } - + public Map getColumnDescriptors( byte[] tableName) throws IOError, TException { try { TreeMap columns = new TreeMap(Bytes.BYTES_COMPARATOR); - + HTable table = getTable(tableName); HTableDescriptor desc = table.getTableDescriptor(); - + for (HColumnDescriptor e : desc.getFamilies()) { ColumnDescriptor col = ThriftUtilities.colDescFromHbase(e); columns.put(col.name, col); @@ -741,9 +741,9 @@ public class ThriftServer { } catch (IOException e) { throw new IOError(e.getMessage()); } - } + } } - + // // Main program and support routines // @@ -868,7 +868,7 @@ public class ThriftServer { /** * @param args - * @throws Exception + * @throws Exception */ public static void main(String [] args) throws Exception { doMain(args); diff --git a/core/src/main/java/org/apache/hadoop/hbase/thrift/ThriftUtilities.java b/core/src/main/java/org/apache/hadoop/hbase/thrift/ThriftUtilities.java index 176996a..9be58d7 100644 --- a/core/src/main/java/org/apache/hadoop/hbase/thrift/ThriftUtilities.java +++ b/core/src/main/java/org/apache/hadoop/hbase/thrift/ThriftUtilities.java @@ -33,11 +33,11 @@ import org.apache.hadoop.hbase.thrift.generated.TRowResult; import org.apache.hadoop.hbase.util.Bytes; public class ThriftUtilities { - + /** * This utility method creates a new Hbase HColumnDescriptor object based on a * Thrift ColumnDescriptor "struct". - * + * * @param in * Thrift ColumnDescriptor object * @return HColumnDescriptor @@ -51,7 +51,7 @@ public class ThriftUtilities { if (in.bloomFilterType.compareTo("NONE") != 0) { bloom = true; } - + if (in.name == null || in.name.length <= 0) { throw new IllegalArgument("column name is empty"); } @@ -61,11 +61,11 @@ public class ThriftUtilities { in.timeToLive, bloom); return col; } - + /** * This utility method creates a new Thrift ColumnDescriptor "struct" based on * an Hbase HColumnDescriptor object. - * + * * @param in * Hbase HColumnDescriptor object * @return Thrift ColumnDescriptor @@ -84,7 +84,7 @@ public class ThriftUtilities { /** * This utility method creates a list of Thrift TCell "struct" based on * an Hbase Cell object. The empty list is returned if the input is null. - * + * * @param in * Hbase Cell object * @return Thrift TCell array @@ -120,7 +120,7 @@ public class ThriftUtilities { * This utility method creates a list of Thrift TRowResult "struct" based on * an Hbase RowResult object. The empty list is returned if the input is * null. - * + * * @param in * Hbase RowResult object * @return Thrift TRowResult array @@ -135,14 +135,14 @@ public class ThriftUtilities { result.row = result_.getRow(); result.columns = new TreeMap(Bytes.BYTES_COMPARATOR); for(KeyValue kv : result_.sorted()) { - result.columns.put(KeyValue.makeColumn(kv.getFamily(), + result.columns.put(KeyValue.makeColumn(kv.getFamily(), kv.getQualifier()), new TCell(kv.getValue(), kv.getTimestamp())); } results.add(result); } return results; } - + static public List rowResultFromHBase(Result in) { Result [] result = { in }; return rowResultFromHBase(result); diff --git a/core/src/main/java/org/apache/hadoop/hbase/thrift/generated/AlreadyExists.java b/core/src/main/java/org/apache/hadoop/hbase/thrift/generated/AlreadyExists.java index fb578eb..2bd4f77 100644 --- a/core/src/main/java/org/apache/hadoop/hbase/thrift/generated/AlreadyExists.java +++ b/core/src/main/java/org/apache/hadoop/hbase/thrift/generated/AlreadyExists.java @@ -94,7 +94,7 @@ public class AlreadyExists extends Exception implements TBase metaDataMap = Collections.unmodifiableMap(new EnumMap<_Fields, FieldMetaData>(_Fields.class) {{ - put(_Fields.MESSAGE, new FieldMetaData("message", TFieldRequirementType.DEFAULT, + put(_Fields.MESSAGE, new FieldMetaData("message", TFieldRequirementType.DEFAULT, new FieldValueMetaData(TType.STRING))); }}); @@ -259,7 +259,7 @@ public class AlreadyExists extends Exception implements TBase, java.io.Seri // isset id assignments public static final Map<_Fields, FieldMetaData> metaDataMap = Collections.unmodifiableMap(new EnumMap<_Fields, FieldMetaData>(_Fields.class) {{ - put(_Fields.ROW, new FieldMetaData("row", TFieldRequirementType.DEFAULT, + put(_Fields.ROW, new FieldMetaData("row", TFieldRequirementType.DEFAULT, new FieldValueMetaData(TType.STRING))); - put(_Fields.MUTATIONS, new FieldMetaData("mutations", TFieldRequirementType.DEFAULT, - new ListMetaData(TType.LIST, + put(_Fields.MUTATIONS, new FieldMetaData("mutations", TFieldRequirementType.DEFAULT, + new ListMetaData(TType.LIST, new StructMetaData(TType.STRUCT, Mutation.class)))); }}); @@ -358,7 +358,7 @@ public class BatchMutation implements TBase, java.io.Seri while (true) { field = iprot.readFieldBegin(); - if (field.type == TType.STOP) { + if (field.type == TType.STOP) { break; } _Fields fieldId = _Fields.findByThriftId(field.id); @@ -369,7 +369,7 @@ public class BatchMutation implements TBase, java.io.Seri case ROW: if (field.type == TType.STRING) { this.row = iprot.readBinary(); - } else { + } else { TProtocolUtil.skip(iprot, field.type); } break; @@ -387,7 +387,7 @@ public class BatchMutation implements TBase, java.io.Seri } iprot.readListEnd(); } - } else { + } else { TProtocolUtil.skip(iprot, field.type); } break; diff --git a/core/src/main/java/org/apache/hadoop/hbase/thrift/generated/ColumnDescriptor.java b/core/src/main/java/org/apache/hadoop/hbase/thrift/generated/ColumnDescriptor.java index a38d9f0..a883a59 100644 --- a/core/src/main/java/org/apache/hadoop/hbase/thrift/generated/ColumnDescriptor.java +++ b/core/src/main/java/org/apache/hadoop/hbase/thrift/generated/ColumnDescriptor.java @@ -137,23 +137,23 @@ public class ColumnDescriptor implements TBase, java.i private BitSet __isset_bit_vector = new BitSet(6); public static final Map<_Fields, FieldMetaData> metaDataMap = Collections.unmodifiableMap(new EnumMap<_Fields, FieldMetaData>(_Fields.class) {{ - put(_Fields.NAME, new FieldMetaData("name", TFieldRequirementType.DEFAULT, + put(_Fields.NAME, new FieldMetaData("name", TFieldRequirementType.DEFAULT, new FieldValueMetaData(TType.STRING))); - put(_Fields.MAX_VERSIONS, new FieldMetaData("maxVersions", TFieldRequirementType.DEFAULT, + put(_Fields.MAX_VERSIONS, new FieldMetaData("maxVersions", TFieldRequirementType.DEFAULT, new FieldValueMetaData(TType.I32))); - put(_Fields.COMPRESSION, new FieldMetaData("compression", TFieldRequirementType.DEFAULT, + put(_Fields.COMPRESSION, new FieldMetaData("compression", TFieldRequirementType.DEFAULT, new FieldValueMetaData(TType.STRING))); - put(_Fields.IN_MEMORY, new FieldMetaData("inMemory", TFieldRequirementType.DEFAULT, + put(_Fields.IN_MEMORY, new FieldMetaData("inMemory", TFieldRequirementType.DEFAULT, new FieldValueMetaData(TType.BOOL))); - put(_Fields.BLOOM_FILTER_TYPE, new FieldMetaData("bloomFilterType", TFieldRequirementType.DEFAULT, + put(_Fields.BLOOM_FILTER_TYPE, new FieldMetaData("bloomFilterType", TFieldRequirementType.DEFAULT, new FieldValueMetaData(TType.STRING))); - put(_Fields.BLOOM_FILTER_VECTOR_SIZE, new FieldMetaData("bloomFilterVectorSize", TFieldRequirementType.DEFAULT, + put(_Fields.BLOOM_FILTER_VECTOR_SIZE, new FieldMetaData("bloomFilterVectorSize", TFieldRequirementType.DEFAULT, new FieldValueMetaData(TType.I32))); - put(_Fields.BLOOM_FILTER_NB_HASHES, new FieldMetaData("bloomFilterNbHashes", TFieldRequirementType.DEFAULT, + put(_Fields.BLOOM_FILTER_NB_HASHES, new FieldMetaData("bloomFilterNbHashes", TFieldRequirementType.DEFAULT, new FieldValueMetaData(TType.I32))); - put(_Fields.BLOCK_CACHE_ENABLED, new FieldMetaData("blockCacheEnabled", TFieldRequirementType.DEFAULT, + put(_Fields.BLOCK_CACHE_ENABLED, new FieldMetaData("blockCacheEnabled", TFieldRequirementType.DEFAULT, new FieldValueMetaData(TType.BOOL))); - put(_Fields.TIME_TO_LIVE, new FieldMetaData("timeToLive", TFieldRequirementType.DEFAULT, + put(_Fields.TIME_TO_LIVE, new FieldMetaData("timeToLive", TFieldRequirementType.DEFAULT, new FieldValueMetaData(TType.I32))); }}); @@ -836,7 +836,7 @@ public class ColumnDescriptor implements TBase, java.i while (true) { field = iprot.readFieldBegin(); - if (field.type == TType.STOP) { + if (field.type == TType.STOP) { break; } _Fields fieldId = _Fields.findByThriftId(field.id); @@ -847,7 +847,7 @@ public class ColumnDescriptor implements TBase, java.i case NAME: if (field.type == TType.STRING) { this.name = iprot.readBinary(); - } else { + } else { TProtocolUtil.skip(iprot, field.type); } break; @@ -855,14 +855,14 @@ public class ColumnDescriptor implements TBase, java.i if (field.type == TType.I32) { this.maxVersions = iprot.readI32(); setMaxVersionsIsSet(true); - } else { + } else { TProtocolUtil.skip(iprot, field.type); } break; case COMPRESSION: if (field.type == TType.STRING) { this.compression = iprot.readString(); - } else { + } else { TProtocolUtil.skip(iprot, field.type); } break; @@ -870,14 +870,14 @@ public class ColumnDescriptor implements TBase, java.i if (field.type == TType.BOOL) { this.inMemory = iprot.readBool(); setInMemoryIsSet(true); - } else { + } else { TProtocolUtil.skip(iprot, field.type); } break; case BLOOM_FILTER_TYPE: if (field.type == TType.STRING) { this.bloomFilterType = iprot.readString(); - } else { + } else { TProtocolUtil.skip(iprot, field.type); } break; @@ -885,7 +885,7 @@ public class ColumnDescriptor implements TBase, java.i if (field.type == TType.I32) { this.bloomFilterVectorSize = iprot.readI32(); setBloomFilterVectorSizeIsSet(true); - } else { + } else { TProtocolUtil.skip(iprot, field.type); } break; @@ -893,7 +893,7 @@ public class ColumnDescriptor implements TBase, java.i if (field.type == TType.I32) { this.bloomFilterNbHashes = iprot.readI32(); setBloomFilterNbHashesIsSet(true); - } else { + } else { TProtocolUtil.skip(iprot, field.type); } break; @@ -901,7 +901,7 @@ public class ColumnDescriptor implements TBase, java.i if (field.type == TType.BOOL) { this.blockCacheEnabled = iprot.readBool(); setBlockCacheEnabledIsSet(true); - } else { + } else { TProtocolUtil.skip(iprot, field.type); } break; @@ -909,7 +909,7 @@ public class ColumnDescriptor implements TBase, java.i if (field.type == TType.I32) { this.timeToLive = iprot.readI32(); setTimeToLiveIsSet(true); - } else { + } else { TProtocolUtil.skip(iprot, field.type); } break; diff --git a/core/src/main/java/org/apache/hadoop/hbase/thrift/generated/Hbase.java b/core/src/main/java/org/apache/hadoop/hbase/thrift/generated/Hbase.java index 87ad479..64cbfde 100644 --- a/core/src/main/java/org/apache/hadoop/hbase/thrift/generated/Hbase.java +++ b/core/src/main/java/org/apache/hadoop/hbase/thrift/generated/Hbase.java @@ -42,7 +42,7 @@ public class Hbase { /** * Brings a table on-line (enables it) - * + * * @param tableName name of the table */ public void enableTable(byte[] tableName) throws IOError, TException; @@ -50,14 +50,14 @@ public class Hbase { /** * Disables a table (takes it off-line) If it is being served, the master * will tell the servers to stop serving it. - * + * * @param tableName name of the table */ public void disableTable(byte[] tableName) throws IOError, TException; /** * @return true if table is on-line - * + * * @param tableName name of the table to check */ public boolean isTableEnabled(byte[] tableName) throws IOError, TException; @@ -75,7 +75,7 @@ public class Hbase { /** * List all the column families assoicated with a table. * @return list of column family descriptors - * + * * @param tableName table name */ public Map getColumnDescriptors(byte[] tableName) throws IOError, TException; @@ -83,7 +83,7 @@ public class Hbase { /** * List the regions associated with a table. * @return list of region descriptors - * + * * @param tableName table name */ public List getTableRegions(byte[] tableName) throws IOError, TException; @@ -93,22 +93,22 @@ public class Hbase { * field for each ColumnDescriptor must be set and must end in a * colon (:). All other fields are optional and will get default * values if not explicitly specified. - * + * * @throws IllegalArgument if an input parameter is invalid * @throws AlreadyExists if the table name already exists - * + * * @param tableName name of table to create - * + * * @param columnFamilies list of column family descriptors */ public void createTable(byte[] tableName, List columnFamilies) throws IOError, IllegalArgument, AlreadyExists, TException; /** * Deletes a table - * + * * @throws IOError if table doesn't exist on server or there was some other * problem - * + * * @param tableName name of table to delete */ public void deleteTable(byte[] tableName) throws IOError, TException; @@ -116,13 +116,13 @@ public class Hbase { /** * Get a single TCell for the specified table, row, and column at the * latest timestamp. Returns an empty list if no such value exists. - * + * * @return value for specified row/column - * + * * @param tableName name of table - * + * * @param row row key - * + * * @param column column name */ public List get(byte[] tableName, byte[] row, byte[] column) throws IOError, TException; @@ -130,15 +130,15 @@ public class Hbase { /** * Get the specified number of versions for the specified table, * row, and column. - * + * * @return list of cells for specified row/column - * + * * @param tableName name of table - * + * * @param row row key - * + * * @param column column name - * + * * @param numVersions number of versions to retrieve */ public List getVer(byte[] tableName, byte[] row, byte[] column, int numVersions) throws IOError, TException; @@ -147,17 +147,17 @@ public class Hbase { * Get the specified number of versions for the specified table, * row, and column. Only versions less than or equal to the specified * timestamp will be returned. - * + * * @return list of cells for specified row/column - * + * * @param tableName name of table - * + * * @param row row key - * + * * @param column column name - * + * * @param timestamp timestamp - * + * * @param numVersions number of versions to retrieve */ public List getVerTs(byte[] tableName, byte[] row, byte[] column, long timestamp, int numVersions) throws IOError, TException; @@ -165,11 +165,11 @@ public class Hbase { /** * Get all the data for the specified table and row at the latest * timestamp. Returns an empty list if the row does not exist. - * + * * @return TRowResult containing the row and map of columns to TCells - * + * * @param tableName name of table - * + * * @param row row key */ public List getRow(byte[] tableName, byte[] row) throws IOError, TException; @@ -177,13 +177,13 @@ public class Hbase { /** * Get the specified columns for the specified table and row at the latest * timestamp. Returns an empty list if the row does not exist. - * + * * @return TRowResult containing the row and map of columns to TCells - * + * * @param tableName name of table - * + * * @param row row key - * + * * @param columns List of columns to return, null for all columns */ public List getRowWithColumns(byte[] tableName, byte[] row, List columns) throws IOError, TException; @@ -191,13 +191,13 @@ public class Hbase { /** * Get all the data for the specified table and row at the specified * timestamp. Returns an empty list if the row does not exist. - * + * * @return TRowResult containing the row and map of columns to TCells - * + * * @param tableName name of the table - * + * * @param row row key - * + * * @param timestamp timestamp */ public List getRowTs(byte[] tableName, byte[] row, long timestamp) throws IOError, TException; @@ -205,15 +205,15 @@ public class Hbase { /** * Get the specified columns for the specified table and row at the specified * timestamp. Returns an empty list if the row does not exist. - * + * * @return TRowResult containing the row and map of columns to TCells - * + * * @param tableName name of table - * + * * @param row row key - * + * * @param columns List of columns to return, null for all columns - * + * * @param timestamp */ public List getRowWithColumnsTs(byte[] tableName, byte[] row, List columns, long timestamp) throws IOError, TException; @@ -223,11 +223,11 @@ public class Hbase { * single transaction. If an exception is thrown, then the * transaction is aborted. Default current timestamp is used, and * all entries will have an identical timestamp. - * + * * @param tableName name of table - * + * * @param row row key - * + * * @param mutations list of mutation commands */ public void mutateRow(byte[] tableName, byte[] row, List mutations) throws IOError, IllegalArgument, TException; @@ -237,13 +237,13 @@ public class Hbase { * single transaction. If an exception is thrown, then the * transaction is aborted. The specified timestamp is used, and * all entries will have an identical timestamp. - * + * * @param tableName name of table - * + * * @param row row key - * + * * @param mutations list of mutation commands - * + * * @param timestamp timestamp */ public void mutateRowTs(byte[] tableName, byte[] row, List mutations, long timestamp) throws IOError, IllegalArgument, TException; @@ -253,9 +253,9 @@ public class Hbase { * in a single transaction. If an exception is thrown, then the * transaction is aborted. Default current timestamp is used, and * all entries will have an identical timestamp. - * + * * @param tableName name of table - * + * * @param rowBatches list of row batches */ public void mutateRows(byte[] tableName, List rowBatches) throws IOError, IllegalArgument, TException; @@ -265,35 +265,35 @@ public class Hbase { * in a single transaction. If an exception is thrown, then the * transaction is aborted. The specified timestamp is used, and * all entries will have an identical timestamp. - * + * * @param tableName name of table - * + * * @param rowBatches list of row batches - * + * * @param timestamp timestamp */ public void mutateRowsTs(byte[] tableName, List rowBatches, long timestamp) throws IOError, IllegalArgument, TException; /** * Atomically increment the column value specified. Returns the next value post increment. - * + * * @param tableName name of table - * + * * @param row row to increment - * + * * @param column name of column - * + * * @param value amount to increment by */ public long atomicIncrement(byte[] tableName, byte[] row, byte[] column, long value) throws IOError, IllegalArgument, TException; /** * Delete all cells that match the passed row and column. - * + * * @param tableName name of table - * + * * @param row Row to update - * + * * @param column name of column whose value is to be deleted */ public void deleteAll(byte[] tableName, byte[] row, byte[] column) throws IOError, TException; @@ -301,22 +301,22 @@ public class Hbase { /** * Delete all cells that match the passed row and column and whose * timestamp is equal-to or older than the passed timestamp. - * + * * @param tableName name of table - * + * * @param row Row to update - * + * * @param column name of column whose value is to be deleted - * + * * @param timestamp timestamp */ public void deleteAllTs(byte[] tableName, byte[] row, byte[] column, long timestamp) throws IOError, TException; /** * Completely delete the row's cells. - * + * * @param tableName name of table - * + * * @param row key of the row to be completely deleted. */ public void deleteAllRow(byte[] tableName, byte[] row) throws IOError, TException; @@ -324,11 +324,11 @@ public class Hbase { /** * Completely delete the row's cells marked with a timestamp * equal-to or older than the passed timestamp. - * + * * @param tableName name of table - * + * * @param row key of the row to be completely deleted. - * + * * @param timestamp timestamp */ public void deleteAllRowTs(byte[] tableName, byte[] row, long timestamp) throws IOError, TException; @@ -336,14 +336,14 @@ public class Hbase { /** * Get a scanner on the current table starting at the specified row and * ending at the last row in the table. Return the specified columns. - * + * * @return scanner id to be used with other scanner procedures - * + * * @param tableName name of table - * + * * @param startRow Starting row in table to scan. * Send "" (empty string) to start at the first row. - * + * * @param columns columns to scan. If column name is a column family, all * columns of the specified column family are returned. It's also possible * to pass a regex in the column qualifier. @@ -354,17 +354,17 @@ public class Hbase { * Get a scanner on the current table starting and stopping at the * specified rows. ending at the last row in the table. Return the * specified columns. - * + * * @return scanner id to be used with other scanner procedures - * + * * @param tableName name of table - * + * * @param startRow Starting row in table to scan. * Send "" (empty string) to start at the first row. - * + * * @param stopRow row to stop scanning on. This row is *not* included in the * scanner's results - * + * * @param columns columns to scan. If column name is a column family, all * columns of the specified column family are returned. It's also possible * to pass a regex in the column qualifier. @@ -374,13 +374,13 @@ public class Hbase { /** * Open a scanner for a given prefix. That is all rows will have the specified * prefix. No other rows will be returned. - * + * * @return scanner id to use with other scanner calls - * + * * @param tableName name of table - * + * * @param startAndPrefix the prefix (and thus start row) of the keys you want - * + * * @param columns the columns you want returned */ public int scannerOpenWithPrefix(byte[] tableName, byte[] startAndPrefix, List columns) throws IOError, TException; @@ -389,18 +389,18 @@ public class Hbase { * Get a scanner on the current table starting at the specified row and * ending at the last row in the table. Return the specified columns. * Only values with the specified timestamp are returned. - * + * * @return scanner id to be used with other scanner procedures - * + * * @param tableName name of table - * + * * @param startRow Starting row in table to scan. * Send "" (empty string) to start at the first row. - * + * * @param columns columns to scan. If column name is a column family, all * columns of the specified column family are returned. It's also possible * to pass a regex in the column qualifier. - * + * * @param timestamp timestamp */ public int scannerOpenTs(byte[] tableName, byte[] startRow, List columns, long timestamp) throws IOError, TException; @@ -410,21 +410,21 @@ public class Hbase { * specified rows. ending at the last row in the table. Return the * specified columns. Only values with the specified timestamp are * returned. - * + * * @return scanner id to be used with other scanner procedures - * + * * @param tableName name of table - * + * * @param startRow Starting row in table to scan. * Send "" (empty string) to start at the first row. - * + * * @param stopRow row to stop scanning on. This row is *not* included in the * scanner's results - * + * * @param columns columns to scan. If column name is a column family, all * columns of the specified column family are returned. It's also possible * to pass a regex in the column qualifier. - * + * * @param timestamp timestamp */ public int scannerOpenWithStopTs(byte[] tableName, byte[] startRow, byte[] stopRow, List columns, long timestamp) throws IOError, TException; @@ -434,11 +434,11 @@ public class Hbase { * row in the table. When there are no more rows in the table, or a key * greater-than-or-equal-to the scanner's specified stopRow is reached, * an empty list is returned. - * + * * @return a TRowResult containing the current row and a map of the columns to TCells. * @throws IllegalArgument if ScannerID is invalid * @throws NotFound when the scanner reaches the end - * + * * @param id id of a scanner returned by scannerOpen */ public List scannerGet(int id) throws IOError, IllegalArgument, TException; @@ -448,22 +448,22 @@ public class Hbase { * rows and advances to the next row in the table. When there are no more * rows in the table, or a key greater-than-or-equal-to the scanner's * specified stopRow is reached, an empty list is returned. - * + * * @return a TRowResult containing the current row and a map of the columns to TCells. * @throws IllegalArgument if ScannerID is invalid * @throws NotFound when the scanner reaches the end - * + * * @param id id of a scanner returned by scannerOpen - * + * * @param nbRows number of results to return */ public List scannerGetList(int id, int nbRows) throws IOError, IllegalArgument, TException; /** * Closes the server-state associated with an open scanner. - * + * * @throws IllegalArgument if ScannerID is invalid - * + * * @param id id of a scanner returned by scannerOpen */ public void scannerClose(int id) throws IOError, IllegalArgument, TException; @@ -2876,7 +2876,7 @@ public class Hbase { // isset id assignments public static final Map<_Fields, FieldMetaData> metaDataMap = Collections.unmodifiableMap(new EnumMap<_Fields, FieldMetaData>(_Fields.class) {{ - put(_Fields.TABLE_NAME, new FieldMetaData("tableName", TFieldRequirementType.DEFAULT, + put(_Fields.TABLE_NAME, new FieldMetaData("tableName", TFieldRequirementType.DEFAULT, new FieldValueMetaData(TType.STRING))); }}); @@ -3047,7 +3047,7 @@ public class Hbase { while (true) { field = iprot.readFieldBegin(); - if (field.type == TType.STOP) { + if (field.type == TType.STOP) { break; } _Fields fieldId = _Fields.findByThriftId(field.id); @@ -3058,7 +3058,7 @@ public class Hbase { case TABLE_NAME: if (field.type == TType.STRING) { this.tableName = iprot.readBinary(); - } else { + } else { TProtocolUtil.skip(iprot, field.type); } break; @@ -3172,7 +3172,7 @@ public class Hbase { // isset id assignments public static final Map<_Fields, FieldMetaData> metaDataMap = Collections.unmodifiableMap(new EnumMap<_Fields, FieldMetaData>(_Fields.class) {{ - put(_Fields.IO, new FieldMetaData("io", TFieldRequirementType.DEFAULT, + put(_Fields.IO, new FieldMetaData("io", TFieldRequirementType.DEFAULT, new FieldValueMetaData(TType.STRUCT))); }}); @@ -3337,7 +3337,7 @@ public class Hbase { while (true) { field = iprot.readFieldBegin(); - if (field.type == TType.STOP) { + if (field.type == TType.STOP) { break; } _Fields fieldId = _Fields.findByThriftId(field.id); @@ -3349,7 +3349,7 @@ public class Hbase { if (field.type == TType.STRUCT) { this.io = new IOError(); this.io.read(iprot); - } else { + } else { TProtocolUtil.skip(iprot, field.type); } break; @@ -3468,7 +3468,7 @@ public class Hbase { // isset id assignments public static final Map<_Fields, FieldMetaData> metaDataMap = Collections.unmodifiableMap(new EnumMap<_Fields, FieldMetaData>(_Fields.class) {{ - put(_Fields.TABLE_NAME, new FieldMetaData("tableName", TFieldRequirementType.DEFAULT, + put(_Fields.TABLE_NAME, new FieldMetaData("tableName", TFieldRequirementType.DEFAULT, new FieldValueMetaData(TType.STRING))); }}); @@ -3639,7 +3639,7 @@ public class Hbase { while (true) { field = iprot.readFieldBegin(); - if (field.type == TType.STOP) { + if (field.type == TType.STOP) { break; } _Fields fieldId = _Fields.findByThriftId(field.id); @@ -3650,7 +3650,7 @@ public class Hbase { case TABLE_NAME: if (field.type == TType.STRING) { this.tableName = iprot.readBinary(); - } else { + } else { TProtocolUtil.skip(iprot, field.type); } break; @@ -3764,7 +3764,7 @@ public class Hbase { // isset id assignments public static final Map<_Fields, FieldMetaData> metaDataMap = Collections.unmodifiableMap(new EnumMap<_Fields, FieldMetaData>(_Fields.class) {{ - put(_Fields.IO, new FieldMetaData("io", TFieldRequirementType.DEFAULT, + put(_Fields.IO, new FieldMetaData("io", TFieldRequirementType.DEFAULT, new FieldValueMetaData(TType.STRUCT))); }}); @@ -3929,7 +3929,7 @@ public class Hbase { while (true) { field = iprot.readFieldBegin(); - if (field.type == TType.STOP) { + if (field.type == TType.STOP) { break; } _Fields fieldId = _Fields.findByThriftId(field.id); @@ -3941,7 +3941,7 @@ public class Hbase { if (field.type == TType.STRUCT) { this.io = new IOError(); this.io.read(iprot); - } else { + } else { TProtocolUtil.skip(iprot, field.type); } break; @@ -4060,7 +4060,7 @@ public class Hbase { // isset id assignments public static final Map<_Fields, FieldMetaData> metaDataMap = Collections.unmodifiableMap(new EnumMap<_Fields, FieldMetaData>(_Fields.class) {{ - put(_Fields.TABLE_NAME, new FieldMetaData("tableName", TFieldRequirementType.DEFAULT, + put(_Fields.TABLE_NAME, new FieldMetaData("tableName", TFieldRequirementType.DEFAULT, new FieldValueMetaData(TType.STRING))); }}); @@ -4231,7 +4231,7 @@ public class Hbase { while (true) { field = iprot.readFieldBegin(); - if (field.type == TType.STOP) { + if (field.type == TType.STOP) { break; } _Fields fieldId = _Fields.findByThriftId(field.id); @@ -4242,7 +4242,7 @@ public class Hbase { case TABLE_NAME: if (field.type == TType.STRING) { this.tableName = iprot.readBinary(); - } else { + } else { TProtocolUtil.skip(iprot, field.type); } break; @@ -4361,9 +4361,9 @@ public class Hbase { private BitSet __isset_bit_vector = new BitSet(1); public static final Map<_Fields, FieldMetaData> metaDataMap = Collections.unmodifiableMap(new EnumMap<_Fields, FieldMetaData>(_Fields.class) {{ - put(_Fields.SUCCESS, new FieldMetaData("success", TFieldRequirementType.DEFAULT, + put(_Fields.SUCCESS, new FieldMetaData("success", TFieldRequirementType.DEFAULT, new FieldValueMetaData(TType.BOOL))); - put(_Fields.IO, new FieldMetaData("io", TFieldRequirementType.DEFAULT, + put(_Fields.IO, new FieldMetaData("io", TFieldRequirementType.DEFAULT, new FieldValueMetaData(TType.STRUCT))); }}); @@ -4592,7 +4592,7 @@ public class Hbase { while (true) { field = iprot.readFieldBegin(); - if (field.type == TType.STOP) { + if (field.type == TType.STOP) { break; } _Fields fieldId = _Fields.findByThriftId(field.id); @@ -4604,7 +4604,7 @@ public class Hbase { if (field.type == TType.BOOL) { this.success = iprot.readBool(); setSuccessIsSet(true); - } else { + } else { TProtocolUtil.skip(iprot, field.type); } break; @@ -4612,7 +4612,7 @@ public class Hbase { if (field.type == TType.STRUCT) { this.io = new IOError(); this.io.read(iprot); - } else { + } else { TProtocolUtil.skip(iprot, field.type); } break; @@ -4733,7 +4733,7 @@ public class Hbase { // isset id assignments public static final Map<_Fields, FieldMetaData> metaDataMap = Collections.unmodifiableMap(new EnumMap<_Fields, FieldMetaData>(_Fields.class) {{ - put(_Fields.TABLE_NAME_OR_REGION_NAME, new FieldMetaData("tableNameOrRegionName", TFieldRequirementType.DEFAULT, + put(_Fields.TABLE_NAME_OR_REGION_NAME, new FieldMetaData("tableNameOrRegionName", TFieldRequirementType.DEFAULT, new FieldValueMetaData(TType.STRING))); }}); @@ -4898,7 +4898,7 @@ public class Hbase { while (true) { field = iprot.readFieldBegin(); - if (field.type == TType.STOP) { + if (field.type == TType.STOP) { break; } _Fields fieldId = _Fields.findByThriftId(field.id); @@ -4909,7 +4909,7 @@ public class Hbase { case TABLE_NAME_OR_REGION_NAME: if (field.type == TType.STRING) { this.tableNameOrRegionName = iprot.readBinary(); - } else { + } else { TProtocolUtil.skip(iprot, field.type); } break; @@ -5023,7 +5023,7 @@ public class Hbase { // isset id assignments public static final Map<_Fields, FieldMetaData> metaDataMap = Collections.unmodifiableMap(new EnumMap<_Fields, FieldMetaData>(_Fields.class) {{ - put(_Fields.IO, new FieldMetaData("io", TFieldRequirementType.DEFAULT, + put(_Fields.IO, new FieldMetaData("io", TFieldRequirementType.DEFAULT, new FieldValueMetaData(TType.STRUCT))); }}); @@ -5188,7 +5188,7 @@ public class Hbase { while (true) { field = iprot.readFieldBegin(); - if (field.type == TType.STOP) { + if (field.type == TType.STOP) { break; } _Fields fieldId = _Fields.findByThriftId(field.id); @@ -5200,7 +5200,7 @@ public class Hbase { if (field.type == TType.STRUCT) { this.io = new IOError(); this.io.read(iprot); - } else { + } else { TProtocolUtil.skip(iprot, field.type); } break; @@ -5313,7 +5313,7 @@ public class Hbase { // isset id assignments public static final Map<_Fields, FieldMetaData> metaDataMap = Collections.unmodifiableMap(new EnumMap<_Fields, FieldMetaData>(_Fields.class) {{ - put(_Fields.TABLE_NAME_OR_REGION_NAME, new FieldMetaData("tableNameOrRegionName", TFieldRequirementType.DEFAULT, + put(_Fields.TABLE_NAME_OR_REGION_NAME, new FieldMetaData("tableNameOrRegionName", TFieldRequirementType.DEFAULT, new FieldValueMetaData(TType.STRING))); }}); @@ -5478,7 +5478,7 @@ public class Hbase { while (true) { field = iprot.readFieldBegin(); - if (field.type == TType.STOP) { + if (field.type == TType.STOP) { break; } _Fields fieldId = _Fields.findByThriftId(field.id); @@ -5489,7 +5489,7 @@ public class Hbase { case TABLE_NAME_OR_REGION_NAME: if (field.type == TType.STRING) { this.tableNameOrRegionName = iprot.readBinary(); - } else { + } else { TProtocolUtil.skip(iprot, field.type); } break; @@ -5603,7 +5603,7 @@ public class Hbase { // isset id assignments public static final Map<_Fields, FieldMetaData> metaDataMap = Collections.unmodifiableMap(new EnumMap<_Fields, FieldMetaData>(_Fields.class) {{ - put(_Fields.IO, new FieldMetaData("io", TFieldRequirementType.DEFAULT, + put(_Fields.IO, new FieldMetaData("io", TFieldRequirementType.DEFAULT, new FieldValueMetaData(TType.STRUCT))); }}); @@ -5768,7 +5768,7 @@ public class Hbase { while (true) { field = iprot.readFieldBegin(); - if (field.type == TType.STOP) { + if (field.type == TType.STOP) { break; } _Fields fieldId = _Fields.findByThriftId(field.id); @@ -5780,7 +5780,7 @@ public class Hbase { if (field.type == TType.STRUCT) { this.io = new IOError(); this.io.read(iprot); - } else { + } else { TProtocolUtil.skip(iprot, field.type); } break; @@ -5982,7 +5982,7 @@ public class Hbase { while (true) { field = iprot.readFieldBegin(); - if (field.type == TType.STOP) { + if (field.type == TType.STOP) { break; } _Fields fieldId = _Fields.findByThriftId(field.id); @@ -6091,10 +6091,10 @@ public class Hbase { // isset id assignments public static final Map<_Fields, FieldMetaData> metaDataMap = Collections.unmodifiableMap(new EnumMap<_Fields, FieldMetaData>(_Fields.class) {{ - put(_Fields.SUCCESS, new FieldMetaData("success", TFieldRequirementType.DEFAULT, - new ListMetaData(TType.LIST, + put(_Fields.SUCCESS, new FieldMetaData("success", TFieldRequirementType.DEFAULT, + new ListMetaData(TType.LIST, new FieldValueMetaData(TType.STRING)))); - put(_Fields.IO, new FieldMetaData("io", TFieldRequirementType.DEFAULT, + put(_Fields.IO, new FieldMetaData("io", TFieldRequirementType.DEFAULT, new FieldValueMetaData(TType.STRUCT))); }}); @@ -6342,7 +6342,7 @@ public class Hbase { while (true) { field = iprot.readFieldBegin(); - if (field.type == TType.STOP) { + if (field.type == TType.STOP) { break; } _Fields fieldId = _Fields.findByThriftId(field.id); @@ -6363,7 +6363,7 @@ public class Hbase { } iprot.readListEnd(); } - } else { + } else { TProtocolUtil.skip(iprot, field.type); } break; @@ -6371,7 +6371,7 @@ public class Hbase { if (field.type == TType.STRUCT) { this.io = new IOError(); this.io.read(iprot); - } else { + } else { TProtocolUtil.skip(iprot, field.type); } break; @@ -6509,7 +6509,7 @@ public class Hbase { // isset id assignments public static final Map<_Fields, FieldMetaData> metaDataMap = Collections.unmodifiableMap(new EnumMap<_Fields, FieldMetaData>(_Fields.class) {{ - put(_Fields.TABLE_NAME, new FieldMetaData("tableName", TFieldRequirementType.DEFAULT, + put(_Fields.TABLE_NAME, new FieldMetaData("tableName", TFieldRequirementType.DEFAULT, new FieldValueMetaData(TType.STRING))); }}); @@ -6680,7 +6680,7 @@ public class Hbase { while (true) { field = iprot.readFieldBegin(); - if (field.type == TType.STOP) { + if (field.type == TType.STOP) { break; } _Fields fieldId = _Fields.findByThriftId(field.id); @@ -6691,7 +6691,7 @@ public class Hbase { case TABLE_NAME: if (field.type == TType.STRING) { this.tableName = iprot.readBinary(); - } else { + } else { TProtocolUtil.skip(iprot, field.type); } break; @@ -6808,11 +6808,11 @@ public class Hbase { // isset id assignments public static final Map<_Fields, FieldMetaData> metaDataMap = Collections.unmodifiableMap(new EnumMap<_Fields, FieldMetaData>(_Fields.class) {{ - put(_Fields.SUCCESS, new FieldMetaData("success", TFieldRequirementType.DEFAULT, - new MapMetaData(TType.MAP, - new FieldValueMetaData(TType.STRING), + put(_Fields.SUCCESS, new FieldMetaData("success", TFieldRequirementType.DEFAULT, + new MapMetaData(TType.MAP, + new FieldValueMetaData(TType.STRING), new StructMetaData(TType.STRUCT, ColumnDescriptor.class)))); - put(_Fields.IO, new FieldMetaData("io", TFieldRequirementType.DEFAULT, + put(_Fields.IO, new FieldMetaData("io", TFieldRequirementType.DEFAULT, new FieldValueMetaData(TType.STRUCT))); }}); @@ -7037,7 +7037,7 @@ public class Hbase { while (true) { field = iprot.readFieldBegin(); - if (field.type == TType.STOP) { + if (field.type == TType.STOP) { break; } _Fields fieldId = _Fields.findByThriftId(field.id); @@ -7061,7 +7061,7 @@ public class Hbase { } iprot.readMapEnd(); } - } else { + } else { TProtocolUtil.skip(iprot, field.type); } break; @@ -7069,7 +7069,7 @@ public class Hbase { if (field.type == TType.STRUCT) { this.io = new IOError(); this.io.read(iprot); - } else { + } else { TProtocolUtil.skip(iprot, field.type); } break; @@ -7208,7 +7208,7 @@ public class Hbase { // isset id assignments public static final Map<_Fields, FieldMetaData> metaDataMap = Collections.unmodifiableMap(new EnumMap<_Fields, FieldMetaData>(_Fields.class) {{ - put(_Fields.TABLE_NAME, new FieldMetaData("tableName", TFieldRequirementType.DEFAULT, + put(_Fields.TABLE_NAME, new FieldMetaData("tableName", TFieldRequirementType.DEFAULT, new FieldValueMetaData(TType.STRING))); }}); @@ -7379,7 +7379,7 @@ public class Hbase { while (true) { field = iprot.readFieldBegin(); - if (field.type == TType.STOP) { + if (field.type == TType.STOP) { break; } _Fields fieldId = _Fields.findByThriftId(field.id); @@ -7390,7 +7390,7 @@ public class Hbase { case TABLE_NAME: if (field.type == TType.STRING) { this.tableName = iprot.readBinary(); - } else { + } else { TProtocolUtil.skip(iprot, field.type); } break; @@ -7507,10 +7507,10 @@ public class Hbase { // isset id assignments public static final Map<_Fields, FieldMetaData> metaDataMap = Collections.unmodifiableMap(new EnumMap<_Fields, FieldMetaData>(_Fields.class) {{ - put(_Fields.SUCCESS, new FieldMetaData("success", TFieldRequirementType.DEFAULT, - new ListMetaData(TType.LIST, + put(_Fields.SUCCESS, new FieldMetaData("success", TFieldRequirementType.DEFAULT, + new ListMetaData(TType.LIST, new StructMetaData(TType.STRUCT, TRegionInfo.class)))); - put(_Fields.IO, new FieldMetaData("io", TFieldRequirementType.DEFAULT, + put(_Fields.IO, new FieldMetaData("io", TFieldRequirementType.DEFAULT, new FieldValueMetaData(TType.STRUCT))); }}); @@ -7758,7 +7758,7 @@ public class Hbase { while (true) { field = iprot.readFieldBegin(); - if (field.type == TType.STOP) { + if (field.type == TType.STOP) { break; } _Fields fieldId = _Fields.findByThriftId(field.id); @@ -7780,7 +7780,7 @@ public class Hbase { } iprot.readListEnd(); } - } else { + } else { TProtocolUtil.skip(iprot, field.type); } break; @@ -7788,7 +7788,7 @@ public class Hbase { if (field.type == TType.STRUCT) { this.io = new IOError(); this.io.read(iprot); - } else { + } else { TProtocolUtil.skip(iprot, field.type); } break; @@ -7935,10 +7935,10 @@ public class Hbase { // isset id assignments public static final Map<_Fields, FieldMetaData> metaDataMap = Collections.unmodifiableMap(new EnumMap<_Fields, FieldMetaData>(_Fields.class) {{ - put(_Fields.TABLE_NAME, new FieldMetaData("tableName", TFieldRequirementType.DEFAULT, + put(_Fields.TABLE_NAME, new FieldMetaData("tableName", TFieldRequirementType.DEFAULT, new FieldValueMetaData(TType.STRING))); - put(_Fields.COLUMN_FAMILIES, new FieldMetaData("columnFamilies", TFieldRequirementType.DEFAULT, - new ListMetaData(TType.LIST, + put(_Fields.COLUMN_FAMILIES, new FieldMetaData("columnFamilies", TFieldRequirementType.DEFAULT, + new ListMetaData(TType.LIST, new StructMetaData(TType.STRUCT, ColumnDescriptor.class)))); }}); @@ -8198,7 +8198,7 @@ public class Hbase { while (true) { field = iprot.readFieldBegin(); - if (field.type == TType.STOP) { + if (field.type == TType.STOP) { break; } _Fields fieldId = _Fields.findByThriftId(field.id); @@ -8209,7 +8209,7 @@ public class Hbase { case TABLE_NAME: if (field.type == TType.STRING) { this.tableName = iprot.readBinary(); - } else { + } else { TProtocolUtil.skip(iprot, field.type); } break; @@ -8227,7 +8227,7 @@ public class Hbase { } iprot.readListEnd(); } - } else { + } else { TProtocolUtil.skip(iprot, field.type); } break; @@ -8367,11 +8367,11 @@ public class Hbase { // isset id assignments public static final Map<_Fields, FieldMetaData> metaDataMap = Collections.unmodifiableMap(new EnumMap<_Fields, FieldMetaData>(_Fields.class) {{ - put(_Fields.IO, new FieldMetaData("io", TFieldRequirementType.DEFAULT, + put(_Fields.IO, new FieldMetaData("io", TFieldRequirementType.DEFAULT, new FieldValueMetaData(TType.STRUCT))); - put(_Fields.IA, new FieldMetaData("ia", TFieldRequirementType.DEFAULT, + put(_Fields.IA, new FieldMetaData("ia", TFieldRequirementType.DEFAULT, new FieldValueMetaData(TType.STRUCT))); - put(_Fields.EXIST, new FieldMetaData("exist", TFieldRequirementType.DEFAULT, + put(_Fields.EXIST, new FieldMetaData("exist", TFieldRequirementType.DEFAULT, new FieldValueMetaData(TType.STRUCT))); }}); @@ -8664,7 +8664,7 @@ public class Hbase { while (true) { field = iprot.readFieldBegin(); - if (field.type == TType.STOP) { + if (field.type == TType.STOP) { break; } _Fields fieldId = _Fields.findByThriftId(field.id); @@ -8676,7 +8676,7 @@ public class Hbase { if (field.type == TType.STRUCT) { this.io = new IOError(); this.io.read(iprot); - } else { + } else { TProtocolUtil.skip(iprot, field.type); } break; @@ -8684,7 +8684,7 @@ public class Hbase { if (field.type == TType.STRUCT) { this.ia = new IllegalArgument(); this.ia.read(iprot); - } else { + } else { TProtocolUtil.skip(iprot, field.type); } break; @@ -8692,7 +8692,7 @@ public class Hbase { if (field.type == TType.STRUCT) { this.exist = new AlreadyExists(); this.exist.read(iprot); - } else { + } else { TProtocolUtil.skip(iprot, field.type); } break; @@ -8835,7 +8835,7 @@ public class Hbase { // isset id assignments public static final Map<_Fields, FieldMetaData> metaDataMap = Collections.unmodifiableMap(new EnumMap<_Fields, FieldMetaData>(_Fields.class) {{ - put(_Fields.TABLE_NAME, new FieldMetaData("tableName", TFieldRequirementType.DEFAULT, + put(_Fields.TABLE_NAME, new FieldMetaData("tableName", TFieldRequirementType.DEFAULT, new FieldValueMetaData(TType.STRING))); }}); @@ -9006,7 +9006,7 @@ public class Hbase { while (true) { field = iprot.readFieldBegin(); - if (field.type == TType.STOP) { + if (field.type == TType.STOP) { break; } _Fields fieldId = _Fields.findByThriftId(field.id); @@ -9017,7 +9017,7 @@ public class Hbase { case TABLE_NAME: if (field.type == TType.STRING) { this.tableName = iprot.readBinary(); - } else { + } else { TProtocolUtil.skip(iprot, field.type); } break; @@ -9131,7 +9131,7 @@ public class Hbase { // isset id assignments public static final Map<_Fields, FieldMetaData> metaDataMap = Collections.unmodifiableMap(new EnumMap<_Fields, FieldMetaData>(_Fields.class) {{ - put(_Fields.IO, new FieldMetaData("io", TFieldRequirementType.DEFAULT, + put(_Fields.IO, new FieldMetaData("io", TFieldRequirementType.DEFAULT, new FieldValueMetaData(TType.STRUCT))); }}); @@ -9296,7 +9296,7 @@ public class Hbase { while (true) { field = iprot.readFieldBegin(); - if (field.type == TType.STOP) { + if (field.type == TType.STOP) { break; } _Fields fieldId = _Fields.findByThriftId(field.id); @@ -9308,7 +9308,7 @@ public class Hbase { if (field.type == TType.STRUCT) { this.io = new IOError(); this.io.read(iprot); - } else { + } else { TProtocolUtil.skip(iprot, field.type); } break; @@ -9445,11 +9445,11 @@ public class Hbase { // isset id assignments public static final Map<_Fields, FieldMetaData> metaDataMap = Collections.unmodifiableMap(new EnumMap<_Fields, FieldMetaData>(_Fields.class) {{ - put(_Fields.TABLE_NAME, new FieldMetaData("tableName", TFieldRequirementType.DEFAULT, + put(_Fields.TABLE_NAME, new FieldMetaData("tableName", TFieldRequirementType.DEFAULT, new FieldValueMetaData(TType.STRING))); - put(_Fields.ROW, new FieldMetaData("row", TFieldRequirementType.DEFAULT, + put(_Fields.ROW, new FieldMetaData("row", TFieldRequirementType.DEFAULT, new FieldValueMetaData(TType.STRING))); - put(_Fields.COLUMN, new FieldMetaData("column", TFieldRequirementType.DEFAULT, + put(_Fields.COLUMN, new FieldMetaData("column", TFieldRequirementType.DEFAULT, new FieldValueMetaData(TType.STRING))); }}); @@ -9760,7 +9760,7 @@ public class Hbase { while (true) { field = iprot.readFieldBegin(); - if (field.type == TType.STOP) { + if (field.type == TType.STOP) { break; } _Fields fieldId = _Fields.findByThriftId(field.id); @@ -9771,21 +9771,21 @@ public class Hbase { case TABLE_NAME: if (field.type == TType.STRING) { this.tableName = iprot.readBinary(); - } else { + } else { TProtocolUtil.skip(iprot, field.type); } break; case ROW: if (field.type == TType.STRING) { this.row = iprot.readBinary(); - } else { + } else { TProtocolUtil.skip(iprot, field.type); } break; case COLUMN: if (field.type == TType.STRING) { this.column = iprot.readBinary(); - } else { + } else { TProtocolUtil.skip(iprot, field.type); } break; @@ -9928,10 +9928,10 @@ public class Hbase { // isset id assignments public static final Map<_Fields, FieldMetaData> metaDataMap = Collections.unmodifiableMap(new EnumMap<_Fields, FieldMetaData>(_Fields.class) {{ - put(_Fields.SUCCESS, new FieldMetaData("success", TFieldRequirementType.DEFAULT, - new ListMetaData(TType.LIST, + put(_Fields.SUCCESS, new FieldMetaData("success", TFieldRequirementType.DEFAULT, + new ListMetaData(TType.LIST, new StructMetaData(TType.STRUCT, TCell.class)))); - put(_Fields.IO, new FieldMetaData("io", TFieldRequirementType.DEFAULT, + put(_Fields.IO, new FieldMetaData("io", TFieldRequirementType.DEFAULT, new FieldValueMetaData(TType.STRUCT))); }}); @@ -10179,7 +10179,7 @@ public class Hbase { while (true) { field = iprot.readFieldBegin(); - if (field.type == TType.STOP) { + if (field.type == TType.STOP) { break; } _Fields fieldId = _Fields.findByThriftId(field.id); @@ -10201,7 +10201,7 @@ public class Hbase { } iprot.readListEnd(); } - } else { + } else { TProtocolUtil.skip(iprot, field.type); } break; @@ -10209,7 +10209,7 @@ public class Hbase { if (field.type == TType.STRUCT) { this.io = new IOError(); this.io.read(iprot); - } else { + } else { TProtocolUtil.skip(iprot, field.type); } break; @@ -10376,13 +10376,13 @@ public class Hbase { private BitSet __isset_bit_vector = new BitSet(1); public static final Map<_Fields, FieldMetaData> metaDataMap = Collections.unmodifiableMap(new EnumMap<_Fields, FieldMetaData>(_Fields.class) {{ - put(_Fields.TABLE_NAME, new FieldMetaData("tableName", TFieldRequirementType.DEFAULT, + put(_Fields.TABLE_NAME, new FieldMetaData("tableName", TFieldRequirementType.DEFAULT, new FieldValueMetaData(TType.STRING))); - put(_Fields.ROW, new FieldMetaData("row", TFieldRequirementType.DEFAULT, + put(_Fields.ROW, new FieldMetaData("row", TFieldRequirementType.DEFAULT, new FieldValueMetaData(TType.STRING))); - put(_Fields.COLUMN, new FieldMetaData("column", TFieldRequirementType.DEFAULT, + put(_Fields.COLUMN, new FieldMetaData("column", TFieldRequirementType.DEFAULT, new FieldValueMetaData(TType.STRING))); - put(_Fields.NUM_VERSIONS, new FieldMetaData("numVersions", TFieldRequirementType.DEFAULT, + put(_Fields.NUM_VERSIONS, new FieldMetaData("numVersions", TFieldRequirementType.DEFAULT, new FieldValueMetaData(TType.I32))); }}); @@ -10763,7 +10763,7 @@ public class Hbase { while (true) { field = iprot.readFieldBegin(); - if (field.type == TType.STOP) { + if (field.type == TType.STOP) { break; } _Fields fieldId = _Fields.findByThriftId(field.id); @@ -10774,21 +10774,21 @@ public class Hbase { case TABLE_NAME: if (field.type == TType.STRING) { this.tableName = iprot.readBinary(); - } else { + } else { TProtocolUtil.skip(iprot, field.type); } break; case ROW: if (field.type == TType.STRING) { this.row = iprot.readBinary(); - } else { + } else { TProtocolUtil.skip(iprot, field.type); } break; case COLUMN: if (field.type == TType.STRING) { this.column = iprot.readBinary(); - } else { + } else { TProtocolUtil.skip(iprot, field.type); } break; @@ -10796,7 +10796,7 @@ public class Hbase { if (field.type == TType.I32) { this.numVersions = iprot.readI32(); setNumVersionsIsSet(true); - } else { + } else { TProtocolUtil.skip(iprot, field.type); } break; @@ -10946,10 +10946,10 @@ public class Hbase { // isset id assignments public static final Map<_Fields, FieldMetaData> metaDataMap = Collections.unmodifiableMap(new EnumMap<_Fields, FieldMetaData>(_Fields.class) {{ - put(_Fields.SUCCESS, new FieldMetaData("success", TFieldRequirementType.DEFAULT, - new ListMetaData(TType.LIST, + put(_Fields.SUCCESS, new FieldMetaData("success", TFieldRequirementType.DEFAULT, + new ListMetaData(TType.LIST, new StructMetaData(TType.STRUCT, TCell.class)))); - put(_Fields.IO, new FieldMetaData("io", TFieldRequirementType.DEFAULT, + put(_Fields.IO, new FieldMetaData("io", TFieldRequirementType.DEFAULT, new FieldValueMetaData(TType.STRUCT))); }}); @@ -11197,7 +11197,7 @@ public class Hbase { while (true) { field = iprot.readFieldBegin(); - if (field.type == TType.STOP) { + if (field.type == TType.STOP) { break; } _Fields fieldId = _Fields.findByThriftId(field.id); @@ -11219,7 +11219,7 @@ public class Hbase { } iprot.readListEnd(); } - } else { + } else { TProtocolUtil.skip(iprot, field.type); } break; @@ -11227,7 +11227,7 @@ public class Hbase { if (field.type == TType.STRUCT) { this.io = new IOError(); this.io.read(iprot); - } else { + } else { TProtocolUtil.skip(iprot, field.type); } break; @@ -11404,15 +11404,15 @@ public class Hbase { private BitSet __isset_bit_vector = new BitSet(2); public static final Map<_Fields, FieldMetaData> metaDataMap = Collections.unmodifiableMap(new EnumMap<_Fields, FieldMetaData>(_Fields.class) {{ - put(_Fields.TABLE_NAME, new FieldMetaData("tableName", TFieldRequirementType.DEFAULT, + put(_Fields.TABLE_NAME, new FieldMetaData("tableName", TFieldRequirementType.DEFAULT, new FieldValueMetaData(TType.STRING))); - put(_Fields.ROW, new FieldMetaData("row", TFieldRequirementType.DEFAULT, + put(_Fields.ROW, new FieldMetaData("row", TFieldRequirementType.DEFAULT, new FieldValueMetaData(TType.STRING))); - put(_Fields.COLUMN, new FieldMetaData("column", TFieldRequirementType.DEFAULT, + put(_Fields.COLUMN, new FieldMetaData("column", TFieldRequirementType.DEFAULT, new FieldValueMetaData(TType.STRING))); - put(_Fields.TIMESTAMP, new FieldMetaData("timestamp", TFieldRequirementType.DEFAULT, + put(_Fields.TIMESTAMP, new FieldMetaData("timestamp", TFieldRequirementType.DEFAULT, new FieldValueMetaData(TType.I64))); - put(_Fields.NUM_VERSIONS, new FieldMetaData("numVersions", TFieldRequirementType.DEFAULT, + put(_Fields.NUM_VERSIONS, new FieldMetaData("numVersions", TFieldRequirementType.DEFAULT, new FieldValueMetaData(TType.I32))); }}); @@ -11861,7 +11861,7 @@ public class Hbase { while (true) { field = iprot.readFieldBegin(); - if (field.type == TType.STOP) { + if (field.type == TType.STOP) { break; } _Fields fieldId = _Fields.findByThriftId(field.id); @@ -11872,21 +11872,21 @@ public class Hbase { case TABLE_NAME: if (field.type == TType.STRING) { this.tableName = iprot.readBinary(); - } else { + } else { TProtocolUtil.skip(iprot, field.type); } break; case ROW: if (field.type == TType.STRING) { this.row = iprot.readBinary(); - } else { + } else { TProtocolUtil.skip(iprot, field.type); } break; case COLUMN: if (field.type == TType.STRING) { this.column = iprot.readBinary(); - } else { + } else { TProtocolUtil.skip(iprot, field.type); } break; @@ -11894,7 +11894,7 @@ public class Hbase { if (field.type == TType.I64) { this.timestamp = iprot.readI64(); setTimestampIsSet(true); - } else { + } else { TProtocolUtil.skip(iprot, field.type); } break; @@ -11902,7 +11902,7 @@ public class Hbase { if (field.type == TType.I32) { this.numVersions = iprot.readI32(); setNumVersionsIsSet(true); - } else { + } else { TProtocolUtil.skip(iprot, field.type); } break; @@ -12059,10 +12059,10 @@ public class Hbase { // isset id assignments public static final Map<_Fields, FieldMetaData> metaDataMap = Collections.unmodifiableMap(new EnumMap<_Fields, FieldMetaData>(_Fields.class) {{ - put(_Fields.SUCCESS, new FieldMetaData("success", TFieldRequirementType.DEFAULT, - new ListMetaData(TType.LIST, + put(_Fields.SUCCESS, new FieldMetaData("success", TFieldRequirementType.DEFAULT, + new ListMetaData(TType.LIST, new StructMetaData(TType.STRUCT, TCell.class)))); - put(_Fields.IO, new FieldMetaData("io", TFieldRequirementType.DEFAULT, + put(_Fields.IO, new FieldMetaData("io", TFieldRequirementType.DEFAULT, new FieldValueMetaData(TType.STRUCT))); }}); @@ -12310,7 +12310,7 @@ public class Hbase { while (true) { field = iprot.readFieldBegin(); - if (field.type == TType.STOP) { + if (field.type == TType.STOP) { break; } _Fields fieldId = _Fields.findByThriftId(field.id); @@ -12332,7 +12332,7 @@ public class Hbase { } iprot.readListEnd(); } - } else { + } else { TProtocolUtil.skip(iprot, field.type); } break; @@ -12340,7 +12340,7 @@ public class Hbase { if (field.type == TType.STRUCT) { this.io = new IOError(); this.io.read(iprot); - } else { + } else { TProtocolUtil.skip(iprot, field.type); } break; @@ -12487,9 +12487,9 @@ public class Hbase { // isset id assignments public static final Map<_Fields, FieldMetaData> metaDataMap = Collections.unmodifiableMap(new EnumMap<_Fields, FieldMetaData>(_Fields.class) {{ - put(_Fields.TABLE_NAME, new FieldMetaData("tableName", TFieldRequirementType.DEFAULT, + put(_Fields.TABLE_NAME, new FieldMetaData("tableName", TFieldRequirementType.DEFAULT, new FieldValueMetaData(TType.STRING))); - put(_Fields.ROW, new FieldMetaData("row", TFieldRequirementType.DEFAULT, + put(_Fields.ROW, new FieldMetaData("row", TFieldRequirementType.DEFAULT, new FieldValueMetaData(TType.STRING))); }}); @@ -12730,7 +12730,7 @@ public class Hbase { while (true) { field = iprot.readFieldBegin(); - if (field.type == TType.STOP) { + if (field.type == TType.STOP) { break; } _Fields fieldId = _Fields.findByThriftId(field.id); @@ -12741,14 +12741,14 @@ public class Hbase { case TABLE_NAME: if (field.type == TType.STRING) { this.tableName = iprot.readBinary(); - } else { + } else { TProtocolUtil.skip(iprot, field.type); } break; case ROW: if (field.type == TType.STRING) { this.row = iprot.readBinary(); - } else { + } else { TProtocolUtil.skip(iprot, field.type); } break; @@ -12878,10 +12878,10 @@ public class Hbase { // isset id assignments public static final Map<_Fields, FieldMetaData> metaDataMap = Collections.unmodifiableMap(new EnumMap<_Fields, FieldMetaData>(_Fields.class) {{ - put(_Fields.SUCCESS, new FieldMetaData("success", TFieldRequirementType.DEFAULT, - new ListMetaData(TType.LIST, + put(_Fields.SUCCESS, new FieldMetaData("success", TFieldRequirementType.DEFAULT, + new ListMetaData(TType.LIST, new StructMetaData(TType.STRUCT, TRowResult.class)))); - put(_Fields.IO, new FieldMetaData("io", TFieldRequirementType.DEFAULT, + put(_Fields.IO, new FieldMetaData("io", TFieldRequirementType.DEFAULT, new FieldValueMetaData(TType.STRUCT))); }}); @@ -13102,7 +13102,7 @@ public class Hbase { while (true) { field = iprot.readFieldBegin(); - if (field.type == TType.STOP) { + if (field.type == TType.STOP) { break; } _Fields fieldId = _Fields.findByThriftId(field.id); @@ -13124,7 +13124,7 @@ public class Hbase { } iprot.readListEnd(); } - } else { + } else { TProtocolUtil.skip(iprot, field.type); } break; @@ -13132,7 +13132,7 @@ public class Hbase { if (field.type == TType.STRUCT) { this.io = new IOError(); this.io.read(iprot); - } else { + } else { TProtocolUtil.skip(iprot, field.type); } break; @@ -13288,12 +13288,12 @@ public class Hbase { // isset id assignments public static final Map<_Fields, FieldMetaData> metaDataMap = Collections.unmodifiableMap(new EnumMap<_Fields, FieldMetaData>(_Fields.class) {{ - put(_Fields.TABLE_NAME, new FieldMetaData("tableName", TFieldRequirementType.DEFAULT, + put(_Fields.TABLE_NAME, new FieldMetaData("tableName", TFieldRequirementType.DEFAULT, new FieldValueMetaData(TType.STRING))); - put(_Fields.ROW, new FieldMetaData("row", TFieldRequirementType.DEFAULT, + put(_Fields.ROW, new FieldMetaData("row", TFieldRequirementType.DEFAULT, new FieldValueMetaData(TType.STRING))); - put(_Fields.COLUMNS, new FieldMetaData("columns", TFieldRequirementType.DEFAULT, - new ListMetaData(TType.LIST, + put(_Fields.COLUMNS, new FieldMetaData("columns", TFieldRequirementType.DEFAULT, + new ListMetaData(TType.LIST, new FieldValueMetaData(TType.STRING)))); }}); @@ -13623,7 +13623,7 @@ public class Hbase { while (true) { field = iprot.readFieldBegin(); - if (field.type == TType.STOP) { + if (field.type == TType.STOP) { break; } _Fields fieldId = _Fields.findByThriftId(field.id); @@ -13634,14 +13634,14 @@ public class Hbase { case TABLE_NAME: if (field.type == TType.STRING) { this.tableName = iprot.readBinary(); - } else { + } else { TProtocolUtil.skip(iprot, field.type); } break; case ROW: if (field.type == TType.STRING) { this.row = iprot.readBinary(); - } else { + } else { TProtocolUtil.skip(iprot, field.type); } break; @@ -13658,7 +13658,7 @@ public class Hbase { } iprot.readListEnd(); } - } else { + } else { TProtocolUtil.skip(iprot, field.type); } break; @@ -13808,10 +13808,10 @@ public class Hbase { // isset id assignments public static final Map<_Fields, FieldMetaData> metaDataMap = Collections.unmodifiableMap(new EnumMap<_Fields, FieldMetaData>(_Fields.class) {{ - put(_Fields.SUCCESS, new FieldMetaData("success", TFieldRequirementType.DEFAULT, - new ListMetaData(TType.LIST, + put(_Fields.SUCCESS, new FieldMetaData("success", TFieldRequirementType.DEFAULT, + new ListMetaData(TType.LIST, new StructMetaData(TType.STRUCT, TRowResult.class)))); - put(_Fields.IO, new FieldMetaData("io", TFieldRequirementType.DEFAULT, + put(_Fields.IO, new FieldMetaData("io", TFieldRequirementType.DEFAULT, new FieldValueMetaData(TType.STRUCT))); }}); @@ -14032,7 +14032,7 @@ public class Hbase { while (true) { field = iprot.readFieldBegin(); - if (field.type == TType.STOP) { + if (field.type == TType.STOP) { break; } _Fields fieldId = _Fields.findByThriftId(field.id); @@ -14054,7 +14054,7 @@ public class Hbase { } iprot.readListEnd(); } - } else { + } else { TProtocolUtil.skip(iprot, field.type); } break; @@ -14062,7 +14062,7 @@ public class Hbase { if (field.type == TType.STRUCT) { this.io = new IOError(); this.io.read(iprot); - } else { + } else { TProtocolUtil.skip(iprot, field.type); } break; @@ -14220,11 +14220,11 @@ public class Hbase { private BitSet __isset_bit_vector = new BitSet(1); public static final Map<_Fields, FieldMetaData> metaDataMap = Collections.unmodifiableMap(new EnumMap<_Fields, FieldMetaData>(_Fields.class) {{ - put(_Fields.TABLE_NAME, new FieldMetaData("tableName", TFieldRequirementType.DEFAULT, + put(_Fields.TABLE_NAME, new FieldMetaData("tableName", TFieldRequirementType.DEFAULT, new FieldValueMetaData(TType.STRING))); - put(_Fields.ROW, new FieldMetaData("row", TFieldRequirementType.DEFAULT, + put(_Fields.ROW, new FieldMetaData("row", TFieldRequirementType.DEFAULT, new FieldValueMetaData(TType.STRING))); - put(_Fields.TIMESTAMP, new FieldMetaData("timestamp", TFieldRequirementType.DEFAULT, + put(_Fields.TIMESTAMP, new FieldMetaData("timestamp", TFieldRequirementType.DEFAULT, new FieldValueMetaData(TType.I64))); }}); @@ -14535,7 +14535,7 @@ public class Hbase { while (true) { field = iprot.readFieldBegin(); - if (field.type == TType.STOP) { + if (field.type == TType.STOP) { break; } _Fields fieldId = _Fields.findByThriftId(field.id); @@ -14546,14 +14546,14 @@ public class Hbase { case TABLE_NAME: if (field.type == TType.STRING) { this.tableName = iprot.readBinary(); - } else { + } else { TProtocolUtil.skip(iprot, field.type); } break; case ROW: if (field.type == TType.STRING) { this.row = iprot.readBinary(); - } else { + } else { TProtocolUtil.skip(iprot, field.type); } break; @@ -14561,7 +14561,7 @@ public class Hbase { if (field.type == TType.I64) { this.timestamp = iprot.readI64(); setTimestampIsSet(true); - } else { + } else { TProtocolUtil.skip(iprot, field.type); } break; @@ -14698,10 +14698,10 @@ public class Hbase { // isset id assignments public static final Map<_Fields, FieldMetaData> metaDataMap = Collections.unmodifiableMap(new EnumMap<_Fields, FieldMetaData>(_Fields.class) {{ - put(_Fields.SUCCESS, new FieldMetaData("success", TFieldRequirementType.DEFAULT, - new ListMetaData(TType.LIST, + put(_Fields.SUCCESS, new FieldMetaData("success", TFieldRequirementType.DEFAULT, + new ListMetaData(TType.LIST, new StructMetaData(TType.STRUCT, TRowResult.class)))); - put(_Fields.IO, new FieldMetaData("io", TFieldRequirementType.DEFAULT, + put(_Fields.IO, new FieldMetaData("io", TFieldRequirementType.DEFAULT, new FieldValueMetaData(TType.STRUCT))); }}); @@ -14922,7 +14922,7 @@ public class Hbase { while (true) { field = iprot.readFieldBegin(); - if (field.type == TType.STOP) { + if (field.type == TType.STOP) { break; } _Fields fieldId = _Fields.findByThriftId(field.id); @@ -14944,7 +14944,7 @@ public class Hbase { } iprot.readListEnd(); } - } else { + } else { TProtocolUtil.skip(iprot, field.type); } break; @@ -14952,7 +14952,7 @@ public class Hbase { if (field.type == TType.STRUCT) { this.io = new IOError(); this.io.read(iprot); - } else { + } else { TProtocolUtil.skip(iprot, field.type); } break; @@ -15113,14 +15113,14 @@ public class Hbase { private BitSet __isset_bit_vector = new BitSet(1); public static final Map<_Fields, FieldMetaData> metaDataMap = Collections.unmodifiableMap(new EnumMap<_Fields, FieldMetaData>(_Fields.class) {{ - put(_Fields.TABLE_NAME, new FieldMetaData("tableName", TFieldRequirementType.DEFAULT, + put(_Fields.TABLE_NAME, new FieldMetaData("tableName", TFieldRequirementType.DEFAULT, new FieldValueMetaData(TType.STRING))); - put(_Fields.ROW, new FieldMetaData("row", TFieldRequirementType.DEFAULT, + put(_Fields.ROW, new FieldMetaData("row", TFieldRequirementType.DEFAULT, new FieldValueMetaData(TType.STRING))); - put(_Fields.COLUMNS, new FieldMetaData("columns", TFieldRequirementType.DEFAULT, - new ListMetaData(TType.LIST, + put(_Fields.COLUMNS, new FieldMetaData("columns", TFieldRequirementType.DEFAULT, + new ListMetaData(TType.LIST, new FieldValueMetaData(TType.STRING)))); - put(_Fields.TIMESTAMP, new FieldMetaData("timestamp", TFieldRequirementType.DEFAULT, + put(_Fields.TIMESTAMP, new FieldMetaData("timestamp", TFieldRequirementType.DEFAULT, new FieldValueMetaData(TType.I64))); }}); @@ -15514,7 +15514,7 @@ public class Hbase { while (true) { field = iprot.readFieldBegin(); - if (field.type == TType.STOP) { + if (field.type == TType.STOP) { break; } _Fields fieldId = _Fields.findByThriftId(field.id); @@ -15525,14 +15525,14 @@ public class Hbase { case TABLE_NAME: if (field.type == TType.STRING) { this.tableName = iprot.readBinary(); - } else { + } else { TProtocolUtil.skip(iprot, field.type); } break; case ROW: if (field.type == TType.STRING) { this.row = iprot.readBinary(); - } else { + } else { TProtocolUtil.skip(iprot, field.type); } break; @@ -15549,7 +15549,7 @@ public class Hbase { } iprot.readListEnd(); } - } else { + } else { TProtocolUtil.skip(iprot, field.type); } break; @@ -15557,7 +15557,7 @@ public class Hbase { if (field.type == TType.I64) { this.timestamp = iprot.readI64(); setTimestampIsSet(true); - } else { + } else { TProtocolUtil.skip(iprot, field.type); } break; @@ -15714,10 +15714,10 @@ public class Hbase { // isset id assignments public static final Map<_Fields, FieldMetaData> metaDataMap = Collections.unmodifiableMap(new EnumMap<_Fields, FieldMetaData>(_Fields.class) {{ - put(_Fields.SUCCESS, new FieldMetaData("success", TFieldRequirementType.DEFAULT, - new ListMetaData(TType.LIST, + put(_Fields.SUCCESS, new FieldMetaData("success", TFieldRequirementType.DEFAULT, + new ListMetaData(TType.LIST, new StructMetaData(TType.STRUCT, TRowResult.class)))); - put(_Fields.IO, new FieldMetaData("io", TFieldRequirementType.DEFAULT, + put(_Fields.IO, new FieldMetaData("io", TFieldRequirementType.DEFAULT, new FieldValueMetaData(TType.STRUCT))); }}); @@ -15938,7 +15938,7 @@ public class Hbase { while (true) { field = iprot.readFieldBegin(); - if (field.type == TType.STOP) { + if (field.type == TType.STOP) { break; } _Fields fieldId = _Fields.findByThriftId(field.id); @@ -15960,7 +15960,7 @@ public class Hbase { } iprot.readListEnd(); } - } else { + } else { TProtocolUtil.skip(iprot, field.type); } break; @@ -15968,7 +15968,7 @@ public class Hbase { if (field.type == TType.STRUCT) { this.io = new IOError(); this.io.read(iprot); - } else { + } else { TProtocolUtil.skip(iprot, field.type); } break; @@ -16124,12 +16124,12 @@ public class Hbase { // isset id assignments public static final Map<_Fields, FieldMetaData> metaDataMap = Collections.unmodifiableMap(new EnumMap<_Fields, FieldMetaData>(_Fields.class) {{ - put(_Fields.TABLE_NAME, new FieldMetaData("tableName", TFieldRequirementType.DEFAULT, + put(_Fields.TABLE_NAME, new FieldMetaData("tableName", TFieldRequirementType.DEFAULT, new FieldValueMetaData(TType.STRING))); - put(_Fields.ROW, new FieldMetaData("row", TFieldRequirementType.DEFAULT, + put(_Fields.ROW, new FieldMetaData("row", TFieldRequirementType.DEFAULT, new FieldValueMetaData(TType.STRING))); - put(_Fields.MUTATIONS, new FieldMetaData("mutations", TFieldRequirementType.DEFAULT, - new ListMetaData(TType.LIST, + put(_Fields.MUTATIONS, new FieldMetaData("mutations", TFieldRequirementType.DEFAULT, + new ListMetaData(TType.LIST, new StructMetaData(TType.STRUCT, Mutation.class)))); }}); @@ -16459,7 +16459,7 @@ public class Hbase { while (true) { field = iprot.readFieldBegin(); - if (field.type == TType.STOP) { + if (field.type == TType.STOP) { break; } _Fields fieldId = _Fields.findByThriftId(field.id); @@ -16470,14 +16470,14 @@ public class Hbase { case TABLE_NAME: if (field.type == TType.STRING) { this.tableName = iprot.readBinary(); - } else { + } else { TProtocolUtil.skip(iprot, field.type); } break; case ROW: if (field.type == TType.STRING) { this.row = iprot.readBinary(); - } else { + } else { TProtocolUtil.skip(iprot, field.type); } break; @@ -16495,7 +16495,7 @@ public class Hbase { } iprot.readListEnd(); } - } else { + } else { TProtocolUtil.skip(iprot, field.type); } break; @@ -16645,9 +16645,9 @@ public class Hbase { // isset id assignments public static final Map<_Fields, FieldMetaData> metaDataMap = Collections.unmodifiableMap(new EnumMap<_Fields, FieldMetaData>(_Fields.class) {{ - put(_Fields.IO, new FieldMetaData("io", TFieldRequirementType.DEFAULT, + put(_Fields.IO, new FieldMetaData("io", TFieldRequirementType.DEFAULT, new FieldValueMetaData(TType.STRUCT))); - put(_Fields.IA, new FieldMetaData("ia", TFieldRequirementType.DEFAULT, + put(_Fields.IA, new FieldMetaData("ia", TFieldRequirementType.DEFAULT, new FieldValueMetaData(TType.STRUCT))); }}); @@ -16876,7 +16876,7 @@ public class Hbase { while (true) { field = iprot.readFieldBegin(); - if (field.type == TType.STOP) { + if (field.type == TType.STOP) { break; } _Fields fieldId = _Fields.findByThriftId(field.id); @@ -16888,7 +16888,7 @@ public class Hbase { if (field.type == TType.STRUCT) { this.io = new IOError(); this.io.read(iprot); - } else { + } else { TProtocolUtil.skip(iprot, field.type); } break; @@ -16896,7 +16896,7 @@ public class Hbase { if (field.type == TType.STRUCT) { this.ia = new IllegalArgument(); this.ia.read(iprot); - } else { + } else { TProtocolUtil.skip(iprot, field.type); } break; @@ -17056,14 +17056,14 @@ public class Hbase { private BitSet __isset_bit_vector = new BitSet(1); public static final Map<_Fields, FieldMetaData> metaDataMap = Collections.unmodifiableMap(new EnumMap<_Fields, FieldMetaData>(_Fields.class) {{ - put(_Fields.TABLE_NAME, new FieldMetaData("tableName", TFieldRequirementType.DEFAULT, + put(_Fields.TABLE_NAME, new FieldMetaData("tableName", TFieldRequirementType.DEFAULT, new FieldValueMetaData(TType.STRING))); - put(_Fields.ROW, new FieldMetaData("row", TFieldRequirementType.DEFAULT, + put(_Fields.ROW, new FieldMetaData("row", TFieldRequirementType.DEFAULT, new FieldValueMetaData(TType.STRING))); - put(_Fields.MUTATIONS, new FieldMetaData("mutations", TFieldRequirementType.DEFAULT, - new ListMetaData(TType.LIST, + put(_Fields.MUTATIONS, new FieldMetaData("mutations", TFieldRequirementType.DEFAULT, + new ListMetaData(TType.LIST, new StructMetaData(TType.STRUCT, Mutation.class)))); - put(_Fields.TIMESTAMP, new FieldMetaData("timestamp", TFieldRequirementType.DEFAULT, + put(_Fields.TIMESTAMP, new FieldMetaData("timestamp", TFieldRequirementType.DEFAULT, new FieldValueMetaData(TType.I64))); }}); @@ -17463,7 +17463,7 @@ public class Hbase { while (true) { field = iprot.readFieldBegin(); - if (field.type == TType.STOP) { + if (field.type == TType.STOP) { break; } _Fields fieldId = _Fields.findByThriftId(field.id); @@ -17474,14 +17474,14 @@ public class Hbase { case TABLE_NAME: if (field.type == TType.STRING) { this.tableName = iprot.readBinary(); - } else { + } else { TProtocolUtil.skip(iprot, field.type); } break; case ROW: if (field.type == TType.STRING) { this.row = iprot.readBinary(); - } else { + } else { TProtocolUtil.skip(iprot, field.type); } break; @@ -17499,7 +17499,7 @@ public class Hbase { } iprot.readListEnd(); } - } else { + } else { TProtocolUtil.skip(iprot, field.type); } break; @@ -17507,7 +17507,7 @@ public class Hbase { if (field.type == TType.I64) { this.timestamp = iprot.readI64(); setTimestampIsSet(true); - } else { + } else { TProtocolUtil.skip(iprot, field.type); } break; @@ -17664,9 +17664,9 @@ public class Hbase { // isset id assignments public static final Map<_Fields, FieldMetaData> metaDataMap = Collections.unmodifiableMap(new EnumMap<_Fields, FieldMetaData>(_Fields.class) {{ - put(_Fields.IO, new FieldMetaData("io", TFieldRequirementType.DEFAULT, + put(_Fields.IO, new FieldMetaData("io", TFieldRequirementType.DEFAULT, new FieldValueMetaData(TType.STRUCT))); - put(_Fields.IA, new FieldMetaData("ia", TFieldRequirementType.DEFAULT, + put(_Fields.IA, new FieldMetaData("ia", TFieldRequirementType.DEFAULT, new FieldValueMetaData(TType.STRUCT))); }}); @@ -17895,7 +17895,7 @@ public class Hbase { while (true) { field = iprot.readFieldBegin(); - if (field.type == TType.STOP) { + if (field.type == TType.STOP) { break; } _Fields fieldId = _Fields.findByThriftId(field.id); @@ -17907,7 +17907,7 @@ public class Hbase { if (field.type == TType.STRUCT) { this.io = new IOError(); this.io.read(iprot); - } else { + } else { TProtocolUtil.skip(iprot, field.type); } break; @@ -17915,7 +17915,7 @@ public class Hbase { if (field.type == TType.STRUCT) { this.ia = new IllegalArgument(); this.ia.read(iprot); - } else { + } else { TProtocolUtil.skip(iprot, field.type); } break; @@ -18055,10 +18055,10 @@ public class Hbase { // isset id assignments public static final Map<_Fields, FieldMetaData> metaDataMap = Collections.unmodifiableMap(new EnumMap<_Fields, FieldMetaData>(_Fields.class) {{ - put(_Fields.TABLE_NAME, new FieldMetaData("tableName", TFieldRequirementType.DEFAULT, + put(_Fields.TABLE_NAME, new FieldMetaData("tableName", TFieldRequirementType.DEFAULT, new FieldValueMetaData(TType.STRING))); - put(_Fields.ROW_BATCHES, new FieldMetaData("rowBatches", TFieldRequirementType.DEFAULT, - new ListMetaData(TType.LIST, + put(_Fields.ROW_BATCHES, new FieldMetaData("rowBatches", TFieldRequirementType.DEFAULT, + new ListMetaData(TType.LIST, new StructMetaData(TType.STRUCT, BatchMutation.class)))); }}); @@ -18318,7 +18318,7 @@ public class Hbase { while (true) { field = iprot.readFieldBegin(); - if (field.type == TType.STOP) { + if (field.type == TType.STOP) { break; } _Fields fieldId = _Fields.findByThriftId(field.id); @@ -18329,7 +18329,7 @@ public class Hbase { case TABLE_NAME: if (field.type == TType.STRING) { this.tableName = iprot.readBinary(); - } else { + } else { TProtocolUtil.skip(iprot, field.type); } break; @@ -18347,7 +18347,7 @@ public class Hbase { } iprot.readListEnd(); } - } else { + } else { TProtocolUtil.skip(iprot, field.type); } break; @@ -18484,9 +18484,9 @@ public class Hbase { // isset id assignments public static final Map<_Fields, FieldMetaData> metaDataMap = Collections.unmodifiableMap(new EnumMap<_Fields, FieldMetaData>(_Fields.class) {{ - put(_Fields.IO, new FieldMetaData("io", TFieldRequirementType.DEFAULT, + put(_Fields.IO, new FieldMetaData("io", TFieldRequirementType.DEFAULT, new FieldValueMetaData(TType.STRUCT))); - put(_Fields.IA, new FieldMetaData("ia", TFieldRequirementType.DEFAULT, + put(_Fields.IA, new FieldMetaData("ia", TFieldRequirementType.DEFAULT, new FieldValueMetaData(TType.STRUCT))); }}); @@ -18715,7 +18715,7 @@ public class Hbase { while (true) { field = iprot.readFieldBegin(); - if (field.type == TType.STOP) { + if (field.type == TType.STOP) { break; } _Fields fieldId = _Fields.findByThriftId(field.id); @@ -18727,7 +18727,7 @@ public class Hbase { if (field.type == TType.STRUCT) { this.io = new IOError(); this.io.read(iprot); - } else { + } else { TProtocolUtil.skip(iprot, field.type); } break; @@ -18735,7 +18735,7 @@ public class Hbase { if (field.type == TType.STRUCT) { this.ia = new IllegalArgument(); this.ia.read(iprot); - } else { + } else { TProtocolUtil.skip(iprot, field.type); } break; @@ -18886,12 +18886,12 @@ public class Hbase { private BitSet __isset_bit_vector = new BitSet(1); public static final Map<_Fields, FieldMetaData> metaDataMap = Collections.unmodifiableMap(new EnumMap<_Fields, FieldMetaData>(_Fields.class) {{ - put(_Fields.TABLE_NAME, new FieldMetaData("tableName", TFieldRequirementType.DEFAULT, + put(_Fields.TABLE_NAME, new FieldMetaData("tableName", TFieldRequirementType.DEFAULT, new FieldValueMetaData(TType.STRING))); - put(_Fields.ROW_BATCHES, new FieldMetaData("rowBatches", TFieldRequirementType.DEFAULT, - new ListMetaData(TType.LIST, + put(_Fields.ROW_BATCHES, new FieldMetaData("rowBatches", TFieldRequirementType.DEFAULT, + new ListMetaData(TType.LIST, new StructMetaData(TType.STRUCT, BatchMutation.class)))); - put(_Fields.TIMESTAMP, new FieldMetaData("timestamp", TFieldRequirementType.DEFAULT, + put(_Fields.TIMESTAMP, new FieldMetaData("timestamp", TFieldRequirementType.DEFAULT, new FieldValueMetaData(TType.I64))); }}); @@ -19221,7 +19221,7 @@ public class Hbase { while (true) { field = iprot.readFieldBegin(); - if (field.type == TType.STOP) { + if (field.type == TType.STOP) { break; } _Fields fieldId = _Fields.findByThriftId(field.id); @@ -19232,7 +19232,7 @@ public class Hbase { case TABLE_NAME: if (field.type == TType.STRING) { this.tableName = iprot.readBinary(); - } else { + } else { TProtocolUtil.skip(iprot, field.type); } break; @@ -19250,7 +19250,7 @@ public class Hbase { } iprot.readListEnd(); } - } else { + } else { TProtocolUtil.skip(iprot, field.type); } break; @@ -19258,7 +19258,7 @@ public class Hbase { if (field.type == TType.I64) { this.timestamp = iprot.readI64(); setTimestampIsSet(true); - } else { + } else { TProtocolUtil.skip(iprot, field.type); } break; @@ -19402,9 +19402,9 @@ public class Hbase { // isset id assignments public static final Map<_Fields, FieldMetaData> metaDataMap = Collections.unmodifiableMap(new EnumMap<_Fields, FieldMetaData>(_Fields.class) {{ - put(_Fields.IO, new FieldMetaData("io", TFieldRequirementType.DEFAULT, + put(_Fields.IO, new FieldMetaData("io", TFieldRequirementType.DEFAULT, new FieldValueMetaData(TType.STRUCT))); - put(_Fields.IA, new FieldMetaData("ia", TFieldRequirementType.DEFAULT, + put(_Fields.IA, new FieldMetaData("ia", TFieldRequirementType.DEFAULT, new FieldValueMetaData(TType.STRUCT))); }}); @@ -19633,7 +19633,7 @@ public class Hbase { while (true) { field = iprot.readFieldBegin(); - if (field.type == TType.STOP) { + if (field.type == TType.STOP) { break; } _Fields fieldId = _Fields.findByThriftId(field.id); @@ -19645,7 +19645,7 @@ public class Hbase { if (field.type == TType.STRUCT) { this.io = new IOError(); this.io.read(iprot); - } else { + } else { TProtocolUtil.skip(iprot, field.type); } break; @@ -19653,7 +19653,7 @@ public class Hbase { if (field.type == TType.STRUCT) { this.ia = new IllegalArgument(); this.ia.read(iprot); - } else { + } else { TProtocolUtil.skip(iprot, field.type); } break; @@ -19813,13 +19813,13 @@ public class Hbase { private BitSet __isset_bit_vector = new BitSet(1); public static final Map<_Fields, FieldMetaData> metaDataMap = Collections.unmodifiableMap(new EnumMap<_Fields, FieldMetaData>(_Fields.class) {{ - put(_Fields.TABLE_NAME, new FieldMetaData("tableName", TFieldRequirementType.DEFAULT, + put(_Fields.TABLE_NAME, new FieldMetaData("tableName", TFieldRequirementType.DEFAULT, new FieldValueMetaData(TType.STRING))); - put(_Fields.ROW, new FieldMetaData("row", TFieldRequirementType.DEFAULT, + put(_Fields.ROW, new FieldMetaData("row", TFieldRequirementType.DEFAULT, new FieldValueMetaData(TType.STRING))); - put(_Fields.COLUMN, new FieldMetaData("column", TFieldRequirementType.DEFAULT, + put(_Fields.COLUMN, new FieldMetaData("column", TFieldRequirementType.DEFAULT, new FieldValueMetaData(TType.STRING))); - put(_Fields.VALUE, new FieldMetaData("value", TFieldRequirementType.DEFAULT, + put(_Fields.VALUE, new FieldMetaData("value", TFieldRequirementType.DEFAULT, new FieldValueMetaData(TType.I64))); }}); @@ -20200,7 +20200,7 @@ public class Hbase { while (true) { field = iprot.readFieldBegin(); - if (field.type == TType.STOP) { + if (field.type == TType.STOP) { break; } _Fields fieldId = _Fields.findByThriftId(field.id); @@ -20211,21 +20211,21 @@ public class Hbase { case TABLE_NAME: if (field.type == TType.STRING) { this.tableName = iprot.readBinary(); - } else { + } else { TProtocolUtil.skip(iprot, field.type); } break; case ROW: if (field.type == TType.STRING) { this.row = iprot.readBinary(); - } else { + } else { TProtocolUtil.skip(iprot, field.type); } break; case COLUMN: if (field.type == TType.STRING) { this.column = iprot.readBinary(); - } else { + } else { TProtocolUtil.skip(iprot, field.type); } break; @@ -20233,7 +20233,7 @@ public class Hbase { if (field.type == TType.I64) { this.value = iprot.readI64(); setValueIsSet(true); - } else { + } else { TProtocolUtil.skip(iprot, field.type); } break; @@ -20388,11 +20388,11 @@ public class Hbase { private BitSet __isset_bit_vector = new BitSet(1); public static final Map<_Fields, FieldMetaData> metaDataMap = Collections.unmodifiableMap(new EnumMap<_Fields, FieldMetaData>(_Fields.class) {{ - put(_Fields.SUCCESS, new FieldMetaData("success", TFieldRequirementType.DEFAULT, + put(_Fields.SUCCESS, new FieldMetaData("success", TFieldRequirementType.DEFAULT, new FieldValueMetaData(TType.I64))); - put(_Fields.IO, new FieldMetaData("io", TFieldRequirementType.DEFAULT, + put(_Fields.IO, new FieldMetaData("io", TFieldRequirementType.DEFAULT, new FieldValueMetaData(TType.STRUCT))); - put(_Fields.IA, new FieldMetaData("ia", TFieldRequirementType.DEFAULT, + put(_Fields.IA, new FieldMetaData("ia", TFieldRequirementType.DEFAULT, new FieldValueMetaData(TType.STRUCT))); }}); @@ -20685,7 +20685,7 @@ public class Hbase { while (true) { field = iprot.readFieldBegin(); - if (field.type == TType.STOP) { + if (field.type == TType.STOP) { break; } _Fields fieldId = _Fields.findByThriftId(field.id); @@ -20697,7 +20697,7 @@ public class Hbase { if (field.type == TType.I64) { this.success = iprot.readI64(); setSuccessIsSet(true); - } else { + } else { TProtocolUtil.skip(iprot, field.type); } break; @@ -20705,7 +20705,7 @@ public class Hbase { if (field.type == TType.STRUCT) { this.io = new IOError(); this.io.read(iprot); - } else { + } else { TProtocolUtil.skip(iprot, field.type); } break; @@ -20713,7 +20713,7 @@ public class Hbase { if (field.type == TType.STRUCT) { this.ia = new IllegalArgument(); this.ia.read(iprot); - } else { + } else { TProtocolUtil.skip(iprot, field.type); } break; @@ -20870,11 +20870,11 @@ public class Hbase { // isset id assignments public static final Map<_Fields, FieldMetaData> metaDataMap = Collections.unmodifiableMap(new EnumMap<_Fields, FieldMetaData>(_Fields.class) {{ - put(_Fields.TABLE_NAME, new FieldMetaData("tableName", TFieldRequirementType.DEFAULT, + put(_Fields.TABLE_NAME, new FieldMetaData("tableName", TFieldRequirementType.DEFAULT, new FieldValueMetaData(TType.STRING))); - put(_Fields.ROW, new FieldMetaData("row", TFieldRequirementType.DEFAULT, + put(_Fields.ROW, new FieldMetaData("row", TFieldRequirementType.DEFAULT, new FieldValueMetaData(TType.STRING))); - put(_Fields.COLUMN, new FieldMetaData("column", TFieldRequirementType.DEFAULT, + put(_Fields.COLUMN, new FieldMetaData("column", TFieldRequirementType.DEFAULT, new FieldValueMetaData(TType.STRING))); }}); @@ -21185,7 +21185,7 @@ public class Hbase { while (true) { field = iprot.readFieldBegin(); - if (field.type == TType.STOP) { + if (field.type == TType.STOP) { break; } _Fields fieldId = _Fields.findByThriftId(field.id); @@ -21196,21 +21196,21 @@ public class Hbase { case TABLE_NAME: if (field.type == TType.STRING) { this.tableName = iprot.readBinary(); - } else { + } else { TProtocolUtil.skip(iprot, field.type); } break; case ROW: if (field.type == TType.STRING) { this.row = iprot.readBinary(); - } else { + } else { TProtocolUtil.skip(iprot, field.type); } break; case COLUMN: if (field.type == TType.STRING) { this.column = iprot.readBinary(); - } else { + } else { TProtocolUtil.skip(iprot, field.type); } break; @@ -21350,7 +21350,7 @@ public class Hbase { // isset id assignments public static final Map<_Fields, FieldMetaData> metaDataMap = Collections.unmodifiableMap(new EnumMap<_Fields, FieldMetaData>(_Fields.class) {{ - put(_Fields.IO, new FieldMetaData("io", TFieldRequirementType.DEFAULT, + put(_Fields.IO, new FieldMetaData("io", TFieldRequirementType.DEFAULT, new FieldValueMetaData(TType.STRUCT))); }}); @@ -21515,7 +21515,7 @@ public class Hbase { while (true) { field = iprot.readFieldBegin(); - if (field.type == TType.STOP) { + if (field.type == TType.STOP) { break; } _Fields fieldId = _Fields.findByThriftId(field.id); @@ -21527,7 +21527,7 @@ public class Hbase { if (field.type == TType.STRUCT) { this.io = new IOError(); this.io.read(iprot); - } else { + } else { TProtocolUtil.skip(iprot, field.type); } break; @@ -21675,13 +21675,13 @@ public class Hbase { private BitSet __isset_bit_vector = new BitSet(1); public static final Map<_Fields, FieldMetaData> metaDataMap = Collections.unmodifiableMap(new EnumMap<_Fields, FieldMetaData>(_Fields.class) {{ - put(_Fields.TABLE_NAME, new FieldMetaData("tableName", TFieldRequirementType.DEFAULT, + put(_Fields.TABLE_NAME, new FieldMetaData("tableName", TFieldRequirementType.DEFAULT, new FieldValueMetaData(TType.STRING))); - put(_Fields.ROW, new FieldMetaData("row", TFieldRequirementType.DEFAULT, + put(_Fields.ROW, new FieldMetaData("row", TFieldRequirementType.DEFAULT, new FieldValueMetaData(TType.STRING))); - put(_Fields.COLUMN, new FieldMetaData("column", TFieldRequirementType.DEFAULT, + put(_Fields.COLUMN, new FieldMetaData("column", TFieldRequirementType.DEFAULT, new FieldValueMetaData(TType.STRING))); - put(_Fields.TIMESTAMP, new FieldMetaData("timestamp", TFieldRequirementType.DEFAULT, + put(_Fields.TIMESTAMP, new FieldMetaData("timestamp", TFieldRequirementType.DEFAULT, new FieldValueMetaData(TType.I64))); }}); @@ -22062,7 +22062,7 @@ public class Hbase { while (true) { field = iprot.readFieldBegin(); - if (field.type == TType.STOP) { + if (field.type == TType.STOP) { break; } _Fields fieldId = _Fields.findByThriftId(field.id); @@ -22073,21 +22073,21 @@ public class Hbase { case TABLE_NAME: if (field.type == TType.STRING) { this.tableName = iprot.readBinary(); - } else { + } else { TProtocolUtil.skip(iprot, field.type); } break; case ROW: if (field.type == TType.STRING) { this.row = iprot.readBinary(); - } else { + } else { TProtocolUtil.skip(iprot, field.type); } break; case COLUMN: if (field.type == TType.STRING) { this.column = iprot.readBinary(); - } else { + } else { TProtocolUtil.skip(iprot, field.type); } break; @@ -22095,7 +22095,7 @@ public class Hbase { if (field.type == TType.I64) { this.timestamp = iprot.readI64(); setTimestampIsSet(true); - } else { + } else { TProtocolUtil.skip(iprot, field.type); } break; @@ -22242,7 +22242,7 @@ public class Hbase { // isset id assignments public static final Map<_Fields, FieldMetaData> metaDataMap = Collections.unmodifiableMap(new EnumMap<_Fields, FieldMetaData>(_Fields.class) {{ - put(_Fields.IO, new FieldMetaData("io", TFieldRequirementType.DEFAULT, + put(_Fields.IO, new FieldMetaData("io", TFieldRequirementType.DEFAULT, new FieldValueMetaData(TType.STRUCT))); }}); @@ -22407,7 +22407,7 @@ public class Hbase { while (true) { field = iprot.readFieldBegin(); - if (field.type == TType.STOP) { + if (field.type == TType.STOP) { break; } _Fields fieldId = _Fields.findByThriftId(field.id); @@ -22419,7 +22419,7 @@ public class Hbase { if (field.type == TType.STRUCT) { this.io = new IOError(); this.io.read(iprot); - } else { + } else { TProtocolUtil.skip(iprot, field.type); } break; @@ -22547,9 +22547,9 @@ public class Hbase { // isset id assignments public static final Map<_Fields, FieldMetaData> metaDataMap = Collections.unmodifiableMap(new EnumMap<_Fields, FieldMetaData>(_Fields.class) {{ - put(_Fields.TABLE_NAME, new FieldMetaData("tableName", TFieldRequirementType.DEFAULT, + put(_Fields.TABLE_NAME, new FieldMetaData("tableName", TFieldRequirementType.DEFAULT, new FieldValueMetaData(TType.STRING))); - put(_Fields.ROW, new FieldMetaData("row", TFieldRequirementType.DEFAULT, + put(_Fields.ROW, new FieldMetaData("row", TFieldRequirementType.DEFAULT, new FieldValueMetaData(TType.STRING))); }}); @@ -22790,7 +22790,7 @@ public class Hbase { while (true) { field = iprot.readFieldBegin(); - if (field.type == TType.STOP) { + if (field.type == TType.STOP) { break; } _Fields fieldId = _Fields.findByThriftId(field.id); @@ -22801,14 +22801,14 @@ public class Hbase { case TABLE_NAME: if (field.type == TType.STRING) { this.tableName = iprot.readBinary(); - } else { + } else { TProtocolUtil.skip(iprot, field.type); } break; case ROW: if (field.type == TType.STRING) { this.row = iprot.readBinary(); - } else { + } else { TProtocolUtil.skip(iprot, field.type); } break; @@ -22935,7 +22935,7 @@ public class Hbase { // isset id assignments public static final Map<_Fields, FieldMetaData> metaDataMap = Collections.unmodifiableMap(new EnumMap<_Fields, FieldMetaData>(_Fields.class) {{ - put(_Fields.IO, new FieldMetaData("io", TFieldRequirementType.DEFAULT, + put(_Fields.IO, new FieldMetaData("io", TFieldRequirementType.DEFAULT, new FieldValueMetaData(TType.STRUCT))); }}); @@ -23100,7 +23100,7 @@ public class Hbase { while (true) { field = iprot.readFieldBegin(); - if (field.type == TType.STOP) { + if (field.type == TType.STOP) { break; } _Fields fieldId = _Fields.findByThriftId(field.id); @@ -23112,7 +23112,7 @@ public class Hbase { if (field.type == TType.STRUCT) { this.io = new IOError(); this.io.read(iprot); - } else { + } else { TProtocolUtil.skip(iprot, field.type); } break; @@ -23251,11 +23251,11 @@ public class Hbase { private BitSet __isset_bit_vector = new BitSet(1); public static final Map<_Fields, FieldMetaData> metaDataMap = Collections.unmodifiableMap(new EnumMap<_Fields, FieldMetaData>(_Fields.class) {{ - put(_Fields.TABLE_NAME, new FieldMetaData("tableName", TFieldRequirementType.DEFAULT, + put(_Fields.TABLE_NAME, new FieldMetaData("tableName", TFieldRequirementType.DEFAULT, new FieldValueMetaData(TType.STRING))); - put(_Fields.ROW, new FieldMetaData("row", TFieldRequirementType.DEFAULT, + put(_Fields.ROW, new FieldMetaData("row", TFieldRequirementType.DEFAULT, new FieldValueMetaData(TType.STRING))); - put(_Fields.TIMESTAMP, new FieldMetaData("timestamp", TFieldRequirementType.DEFAULT, + put(_Fields.TIMESTAMP, new FieldMetaData("timestamp", TFieldRequirementType.DEFAULT, new FieldValueMetaData(TType.I64))); }}); @@ -23566,7 +23566,7 @@ public class Hbase { while (true) { field = iprot.readFieldBegin(); - if (field.type == TType.STOP) { + if (field.type == TType.STOP) { break; } _Fields fieldId = _Fields.findByThriftId(field.id); @@ -23577,14 +23577,14 @@ public class Hbase { case TABLE_NAME: if (field.type == TType.STRING) { this.tableName = iprot.readBinary(); - } else { + } else { TProtocolUtil.skip(iprot, field.type); } break; case ROW: if (field.type == TType.STRING) { this.row = iprot.readBinary(); - } else { + } else { TProtocolUtil.skip(iprot, field.type); } break; @@ -23592,7 +23592,7 @@ public class Hbase { if (field.type == TType.I64) { this.timestamp = iprot.readI64(); setTimestampIsSet(true); - } else { + } else { TProtocolUtil.skip(iprot, field.type); } break; @@ -23726,7 +23726,7 @@ public class Hbase { // isset id assignments public static final Map<_Fields, FieldMetaData> metaDataMap = Collections.unmodifiableMap(new EnumMap<_Fields, FieldMetaData>(_Fields.class) {{ - put(_Fields.IO, new FieldMetaData("io", TFieldRequirementType.DEFAULT, + put(_Fields.IO, new FieldMetaData("io", TFieldRequirementType.DEFAULT, new FieldValueMetaData(TType.STRUCT))); }}); @@ -23891,7 +23891,7 @@ public class Hbase { while (true) { field = iprot.readFieldBegin(); - if (field.type == TType.STOP) { + if (field.type == TType.STOP) { break; } _Fields fieldId = _Fields.findByThriftId(field.id); @@ -23903,7 +23903,7 @@ public class Hbase { if (field.type == TType.STRUCT) { this.io = new IOError(); this.io.read(iprot); - } else { + } else { TProtocolUtil.skip(iprot, field.type); } break; @@ -24046,12 +24046,12 @@ public class Hbase { // isset id assignments public static final Map<_Fields, FieldMetaData> metaDataMap = Collections.unmodifiableMap(new EnumMap<_Fields, FieldMetaData>(_Fields.class) {{ - put(_Fields.TABLE_NAME, new FieldMetaData("tableName", TFieldRequirementType.DEFAULT, + put(_Fields.TABLE_NAME, new FieldMetaData("tableName", TFieldRequirementType.DEFAULT, new FieldValueMetaData(TType.STRING))); - put(_Fields.START_ROW, new FieldMetaData("startRow", TFieldRequirementType.DEFAULT, + put(_Fields.START_ROW, new FieldMetaData("startRow", TFieldRequirementType.DEFAULT, new FieldValueMetaData(TType.STRING))); - put(_Fields.COLUMNS, new FieldMetaData("columns", TFieldRequirementType.DEFAULT, - new ListMetaData(TType.LIST, + put(_Fields.COLUMNS, new FieldMetaData("columns", TFieldRequirementType.DEFAULT, + new ListMetaData(TType.LIST, new FieldValueMetaData(TType.STRING)))); }}); @@ -24387,7 +24387,7 @@ public class Hbase { while (true) { field = iprot.readFieldBegin(); - if (field.type == TType.STOP) { + if (field.type == TType.STOP) { break; } _Fields fieldId = _Fields.findByThriftId(field.id); @@ -24398,14 +24398,14 @@ public class Hbase { case TABLE_NAME: if (field.type == TType.STRING) { this.tableName = iprot.readBinary(); - } else { + } else { TProtocolUtil.skip(iprot, field.type); } break; case START_ROW: if (field.type == TType.STRING) { this.startRow = iprot.readBinary(); - } else { + } else { TProtocolUtil.skip(iprot, field.type); } break; @@ -24422,7 +24422,7 @@ public class Hbase { } iprot.readListEnd(); } - } else { + } else { TProtocolUtil.skip(iprot, field.type); } break; @@ -24574,9 +24574,9 @@ public class Hbase { private BitSet __isset_bit_vector = new BitSet(1); public static final Map<_Fields, FieldMetaData> metaDataMap = Collections.unmodifiableMap(new EnumMap<_Fields, FieldMetaData>(_Fields.class) {{ - put(_Fields.SUCCESS, new FieldMetaData("success", TFieldRequirementType.DEFAULT, + put(_Fields.SUCCESS, new FieldMetaData("success", TFieldRequirementType.DEFAULT, new FieldValueMetaData(TType.I32))); - put(_Fields.IO, new FieldMetaData("io", TFieldRequirementType.DEFAULT, + put(_Fields.IO, new FieldMetaData("io", TFieldRequirementType.DEFAULT, new FieldValueMetaData(TType.STRUCT))); }}); @@ -24805,7 +24805,7 @@ public class Hbase { while (true) { field = iprot.readFieldBegin(); - if (field.type == TType.STOP) { + if (field.type == TType.STOP) { break; } _Fields fieldId = _Fields.findByThriftId(field.id); @@ -24817,7 +24817,7 @@ public class Hbase { if (field.type == TType.I32) { this.success = iprot.readI32(); setSuccessIsSet(true); - } else { + } else { TProtocolUtil.skip(iprot, field.type); } break; @@ -24825,7 +24825,7 @@ public class Hbase { if (field.type == TType.STRUCT) { this.io = new IOError(); this.io.read(iprot); - } else { + } else { TProtocolUtil.skip(iprot, field.type); } break; @@ -24987,14 +24987,14 @@ public class Hbase { // isset id assignments public static final Map<_Fields, FieldMetaData> metaDataMap = Collections.unmodifiableMap(new EnumMap<_Fields, FieldMetaData>(_Fields.class) {{ - put(_Fields.TABLE_NAME, new FieldMetaData("tableName", TFieldRequirementType.DEFAULT, + put(_Fields.TABLE_NAME, new FieldMetaData("tableName", TFieldRequirementType.DEFAULT, new FieldValueMetaData(TType.STRING))); - put(_Fields.START_ROW, new FieldMetaData("startRow", TFieldRequirementType.DEFAULT, + put(_Fields.START_ROW, new FieldMetaData("startRow", TFieldRequirementType.DEFAULT, new FieldValueMetaData(TType.STRING))); - put(_Fields.STOP_ROW, new FieldMetaData("stopRow", TFieldRequirementType.DEFAULT, + put(_Fields.STOP_ROW, new FieldMetaData("stopRow", TFieldRequirementType.DEFAULT, new FieldValueMetaData(TType.STRING))); - put(_Fields.COLUMNS, new FieldMetaData("columns", TFieldRequirementType.DEFAULT, - new ListMetaData(TType.LIST, + put(_Fields.COLUMNS, new FieldMetaData("columns", TFieldRequirementType.DEFAULT, + new ListMetaData(TType.LIST, new FieldValueMetaData(TType.STRING)))); }}); @@ -25402,7 +25402,7 @@ public class Hbase { while (true) { field = iprot.readFieldBegin(); - if (field.type == TType.STOP) { + if (field.type == TType.STOP) { break; } _Fields fieldId = _Fields.findByThriftId(field.id); @@ -25413,21 +25413,21 @@ public class Hbase { case TABLE_NAME: if (field.type == TType.STRING) { this.tableName = iprot.readBinary(); - } else { + } else { TProtocolUtil.skip(iprot, field.type); } break; case START_ROW: if (field.type == TType.STRING) { this.startRow = iprot.readBinary(); - } else { + } else { TProtocolUtil.skip(iprot, field.type); } break; case STOP_ROW: if (field.type == TType.STRING) { this.stopRow = iprot.readBinary(); - } else { + } else { TProtocolUtil.skip(iprot, field.type); } break; @@ -25444,7 +25444,7 @@ public class Hbase { } iprot.readListEnd(); } - } else { + } else { TProtocolUtil.skip(iprot, field.type); } break; @@ -25609,9 +25609,9 @@ public class Hbase { private BitSet __isset_bit_vector = new BitSet(1); public static final Map<_Fields, FieldMetaData> metaDataMap = Collections.unmodifiableMap(new EnumMap<_Fields, FieldMetaData>(_Fields.class) {{ - put(_Fields.SUCCESS, new FieldMetaData("success", TFieldRequirementType.DEFAULT, + put(_Fields.SUCCESS, new FieldMetaData("success", TFieldRequirementType.DEFAULT, new FieldValueMetaData(TType.I32))); - put(_Fields.IO, new FieldMetaData("io", TFieldRequirementType.DEFAULT, + put(_Fields.IO, new FieldMetaData("io", TFieldRequirementType.DEFAULT, new FieldValueMetaData(TType.STRUCT))); }}); @@ -25840,7 +25840,7 @@ public class Hbase { while (true) { field = iprot.readFieldBegin(); - if (field.type == TType.STOP) { + if (field.type == TType.STOP) { break; } _Fields fieldId = _Fields.findByThriftId(field.id); @@ -25852,7 +25852,7 @@ public class Hbase { if (field.type == TType.I32) { this.success = iprot.readI32(); setSuccessIsSet(true); - } else { + } else { TProtocolUtil.skip(iprot, field.type); } break; @@ -25860,7 +25860,7 @@ public class Hbase { if (field.type == TType.STRUCT) { this.io = new IOError(); this.io.read(iprot); - } else { + } else { TProtocolUtil.skip(iprot, field.type); } break; @@ -26005,12 +26005,12 @@ public class Hbase { // isset id assignments public static final Map<_Fields, FieldMetaData> metaDataMap = Collections.unmodifiableMap(new EnumMap<_Fields, FieldMetaData>(_Fields.class) {{ - put(_Fields.TABLE_NAME, new FieldMetaData("tableName", TFieldRequirementType.DEFAULT, + put(_Fields.TABLE_NAME, new FieldMetaData("tableName", TFieldRequirementType.DEFAULT, new FieldValueMetaData(TType.STRING))); - put(_Fields.START_AND_PREFIX, new FieldMetaData("startAndPrefix", TFieldRequirementType.DEFAULT, + put(_Fields.START_AND_PREFIX, new FieldMetaData("startAndPrefix", TFieldRequirementType.DEFAULT, new FieldValueMetaData(TType.STRING))); - put(_Fields.COLUMNS, new FieldMetaData("columns", TFieldRequirementType.DEFAULT, - new ListMetaData(TType.LIST, + put(_Fields.COLUMNS, new FieldMetaData("columns", TFieldRequirementType.DEFAULT, + new ListMetaData(TType.LIST, new FieldValueMetaData(TType.STRING)))); }}); @@ -26340,7 +26340,7 @@ public class Hbase { while (true) { field = iprot.readFieldBegin(); - if (field.type == TType.STOP) { + if (field.type == TType.STOP) { break; } _Fields fieldId = _Fields.findByThriftId(field.id); @@ -26351,14 +26351,14 @@ public class Hbase { case TABLE_NAME: if (field.type == TType.STRING) { this.tableName = iprot.readBinary(); - } else { + } else { TProtocolUtil.skip(iprot, field.type); } break; case START_AND_PREFIX: if (field.type == TType.STRING) { this.startAndPrefix = iprot.readBinary(); - } else { + } else { TProtocolUtil.skip(iprot, field.type); } break; @@ -26375,7 +26375,7 @@ public class Hbase { } iprot.readListEnd(); } - } else { + } else { TProtocolUtil.skip(iprot, field.type); } break; @@ -26527,9 +26527,9 @@ public class Hbase { private BitSet __isset_bit_vector = new BitSet(1); public static final Map<_Fields, FieldMetaData> metaDataMap = Collections.unmodifiableMap(new EnumMap<_Fields, FieldMetaData>(_Fields.class) {{ - put(_Fields.SUCCESS, new FieldMetaData("success", TFieldRequirementType.DEFAULT, + put(_Fields.SUCCESS, new FieldMetaData("success", TFieldRequirementType.DEFAULT, new FieldValueMetaData(TType.I32))); - put(_Fields.IO, new FieldMetaData("io", TFieldRequirementType.DEFAULT, + put(_Fields.IO, new FieldMetaData("io", TFieldRequirementType.DEFAULT, new FieldValueMetaData(TType.STRUCT))); }}); @@ -26758,7 +26758,7 @@ public class Hbase { while (true) { field = iprot.readFieldBegin(); - if (field.type == TType.STOP) { + if (field.type == TType.STOP) { break; } _Fields fieldId = _Fields.findByThriftId(field.id); @@ -26770,7 +26770,7 @@ public class Hbase { if (field.type == TType.I32) { this.success = iprot.readI32(); setSuccessIsSet(true); - } else { + } else { TProtocolUtil.skip(iprot, field.type); } break; @@ -26778,7 +26778,7 @@ public class Hbase { if (field.type == TType.STRUCT) { this.io = new IOError(); this.io.read(iprot); - } else { + } else { TProtocolUtil.skip(iprot, field.type); } break; @@ -26940,14 +26940,14 @@ public class Hbase { private BitSet __isset_bit_vector = new BitSet(1); public static final Map<_Fields, FieldMetaData> metaDataMap = Collections.unmodifiableMap(new EnumMap<_Fields, FieldMetaData>(_Fields.class) {{ - put(_Fields.TABLE_NAME, new FieldMetaData("tableName", TFieldRequirementType.DEFAULT, + put(_Fields.TABLE_NAME, new FieldMetaData("tableName", TFieldRequirementType.DEFAULT, new FieldValueMetaData(TType.STRING))); - put(_Fields.START_ROW, new FieldMetaData("startRow", TFieldRequirementType.DEFAULT, + put(_Fields.START_ROW, new FieldMetaData("startRow", TFieldRequirementType.DEFAULT, new FieldValueMetaData(TType.STRING))); - put(_Fields.COLUMNS, new FieldMetaData("columns", TFieldRequirementType.DEFAULT, - new ListMetaData(TType.LIST, + put(_Fields.COLUMNS, new FieldMetaData("columns", TFieldRequirementType.DEFAULT, + new ListMetaData(TType.LIST, new FieldValueMetaData(TType.STRING)))); - put(_Fields.TIMESTAMP, new FieldMetaData("timestamp", TFieldRequirementType.DEFAULT, + put(_Fields.TIMESTAMP, new FieldMetaData("timestamp", TFieldRequirementType.DEFAULT, new FieldValueMetaData(TType.I64))); }}); @@ -27353,7 +27353,7 @@ public class Hbase { while (true) { field = iprot.readFieldBegin(); - if (field.type == TType.STOP) { + if (field.type == TType.STOP) { break; } _Fields fieldId = _Fields.findByThriftId(field.id); @@ -27364,14 +27364,14 @@ public class Hbase { case TABLE_NAME: if (field.type == TType.STRING) { this.tableName = iprot.readBinary(); - } else { + } else { TProtocolUtil.skip(iprot, field.type); } break; case START_ROW: if (field.type == TType.STRING) { this.startRow = iprot.readBinary(); - } else { + } else { TProtocolUtil.skip(iprot, field.type); } break; @@ -27388,7 +27388,7 @@ public class Hbase { } iprot.readListEnd(); } - } else { + } else { TProtocolUtil.skip(iprot, field.type); } break; @@ -27396,7 +27396,7 @@ public class Hbase { if (field.type == TType.I64) { this.timestamp = iprot.readI64(); setTimestampIsSet(true); - } else { + } else { TProtocolUtil.skip(iprot, field.type); } break; @@ -27555,9 +27555,9 @@ public class Hbase { private BitSet __isset_bit_vector = new BitSet(1); public static final Map<_Fields, FieldMetaData> metaDataMap = Collections.unmodifiableMap(new EnumMap<_Fields, FieldMetaData>(_Fields.class) {{ - put(_Fields.SUCCESS, new FieldMetaData("success", TFieldRequirementType.DEFAULT, + put(_Fields.SUCCESS, new FieldMetaData("success", TFieldRequirementType.DEFAULT, new FieldValueMetaData(TType.I32))); - put(_Fields.IO, new FieldMetaData("io", TFieldRequirementType.DEFAULT, + put(_Fields.IO, new FieldMetaData("io", TFieldRequirementType.DEFAULT, new FieldValueMetaData(TType.STRUCT))); }}); @@ -27786,7 +27786,7 @@ public class Hbase { while (true) { field = iprot.readFieldBegin(); - if (field.type == TType.STOP) { + if (field.type == TType.STOP) { break; } _Fields fieldId = _Fields.findByThriftId(field.id); @@ -27798,7 +27798,7 @@ public class Hbase { if (field.type == TType.I32) { this.success = iprot.readI32(); setSuccessIsSet(true); - } else { + } else { TProtocolUtil.skip(iprot, field.type); } break; @@ -27806,7 +27806,7 @@ public class Hbase { if (field.type == TType.STRUCT) { this.io = new IOError(); this.io.read(iprot); - } else { + } else { TProtocolUtil.skip(iprot, field.type); } break; @@ -27979,16 +27979,16 @@ public class Hbase { private BitSet __isset_bit_vector = new BitSet(1); public static final Map<_Fields, FieldMetaData> metaDataMap = Collections.unmodifiableMap(new EnumMap<_Fields, FieldMetaData>(_Fields.class) {{ - put(_Fields.TABLE_NAME, new FieldMetaData("tableName", TFieldRequirementType.DEFAULT, + put(_Fields.TABLE_NAME, new FieldMetaData("tableName", TFieldRequirementType.DEFAULT, new FieldValueMetaData(TType.STRING))); - put(_Fields.START_ROW, new FieldMetaData("startRow", TFieldRequirementType.DEFAULT, + put(_Fields.START_ROW, new FieldMetaData("startRow", TFieldRequirementType.DEFAULT, new FieldValueMetaData(TType.STRING))); - put(_Fields.STOP_ROW, new FieldMetaData("stopRow", TFieldRequirementType.DEFAULT, + put(_Fields.STOP_ROW, new FieldMetaData("stopRow", TFieldRequirementType.DEFAULT, new FieldValueMetaData(TType.STRING))); - put(_Fields.COLUMNS, new FieldMetaData("columns", TFieldRequirementType.DEFAULT, - new ListMetaData(TType.LIST, + put(_Fields.COLUMNS, new FieldMetaData("columns", TFieldRequirementType.DEFAULT, + new ListMetaData(TType.LIST, new FieldValueMetaData(TType.STRING)))); - put(_Fields.TIMESTAMP, new FieldMetaData("timestamp", TFieldRequirementType.DEFAULT, + put(_Fields.TIMESTAMP, new FieldMetaData("timestamp", TFieldRequirementType.DEFAULT, new FieldValueMetaData(TType.I64))); }}); @@ -28466,7 +28466,7 @@ public class Hbase { while (true) { field = iprot.readFieldBegin(); - if (field.type == TType.STOP) { + if (field.type == TType.STOP) { break; } _Fields fieldId = _Fields.findByThriftId(field.id); @@ -28477,21 +28477,21 @@ public class Hbase { case TABLE_NAME: if (field.type == TType.STRING) { this.tableName = iprot.readBinary(); - } else { + } else { TProtocolUtil.skip(iprot, field.type); } break; case START_ROW: if (field.type == TType.STRING) { this.startRow = iprot.readBinary(); - } else { + } else { TProtocolUtil.skip(iprot, field.type); } break; case STOP_ROW: if (field.type == TType.STRING) { this.stopRow = iprot.readBinary(); - } else { + } else { TProtocolUtil.skip(iprot, field.type); } break; @@ -28508,7 +28508,7 @@ public class Hbase { } iprot.readListEnd(); } - } else { + } else { TProtocolUtil.skip(iprot, field.type); } break; @@ -28516,7 +28516,7 @@ public class Hbase { if (field.type == TType.I64) { this.timestamp = iprot.readI64(); setTimestampIsSet(true); - } else { + } else { TProtocolUtil.skip(iprot, field.type); } break; @@ -28688,9 +28688,9 @@ public class Hbase { private BitSet __isset_bit_vector = new BitSet(1); public static final Map<_Fields, FieldMetaData> metaDataMap = Collections.unmodifiableMap(new EnumMap<_Fields, FieldMetaData>(_Fields.class) {{ - put(_Fields.SUCCESS, new FieldMetaData("success", TFieldRequirementType.DEFAULT, + put(_Fields.SUCCESS, new FieldMetaData("success", TFieldRequirementType.DEFAULT, new FieldValueMetaData(TType.I32))); - put(_Fields.IO, new FieldMetaData("io", TFieldRequirementType.DEFAULT, + put(_Fields.IO, new FieldMetaData("io", TFieldRequirementType.DEFAULT, new FieldValueMetaData(TType.STRUCT))); }}); @@ -28919,7 +28919,7 @@ public class Hbase { while (true) { field = iprot.readFieldBegin(); - if (field.type == TType.STOP) { + if (field.type == TType.STOP) { break; } _Fields fieldId = _Fields.findByThriftId(field.id); @@ -28931,7 +28931,7 @@ public class Hbase { if (field.type == TType.I32) { this.success = iprot.readI32(); setSuccessIsSet(true); - } else { + } else { TProtocolUtil.skip(iprot, field.type); } break; @@ -28939,7 +28939,7 @@ public class Hbase { if (field.type == TType.STRUCT) { this.io = new IOError(); this.io.read(iprot); - } else { + } else { TProtocolUtil.skip(iprot, field.type); } break; @@ -29068,7 +29068,7 @@ public class Hbase { private BitSet __isset_bit_vector = new BitSet(1); public static final Map<_Fields, FieldMetaData> metaDataMap = Collections.unmodifiableMap(new EnumMap<_Fields, FieldMetaData>(_Fields.class) {{ - put(_Fields.ID, new FieldMetaData("id", TFieldRequirementType.DEFAULT, + put(_Fields.ID, new FieldMetaData("id", TFieldRequirementType.DEFAULT, new FieldValueMetaData(TType.I32))); }}); @@ -29239,7 +29239,7 @@ public class Hbase { while (true) { field = iprot.readFieldBegin(); - if (field.type == TType.STOP) { + if (field.type == TType.STOP) { break; } _Fields fieldId = _Fields.findByThriftId(field.id); @@ -29251,7 +29251,7 @@ public class Hbase { if (field.type == TType.I32) { this.id = iprot.readI32(); setIdIsSet(true); - } else { + } else { TProtocolUtil.skip(iprot, field.type); } break; @@ -29365,12 +29365,12 @@ public class Hbase { // isset id assignments public static final Map<_Fields, FieldMetaData> metaDataMap = Collections.unmodifiableMap(new EnumMap<_Fields, FieldMetaData>(_Fields.class) {{ - put(_Fields.SUCCESS, new FieldMetaData("success", TFieldRequirementType.DEFAULT, - new ListMetaData(TType.LIST, + put(_Fields.SUCCESS, new FieldMetaData("success", TFieldRequirementType.DEFAULT, + new ListMetaData(TType.LIST, new StructMetaData(TType.STRUCT, TRowResult.class)))); - put(_Fields.IO, new FieldMetaData("io", TFieldRequirementType.DEFAULT, + put(_Fields.IO, new FieldMetaData("io", TFieldRequirementType.DEFAULT, new FieldValueMetaData(TType.STRUCT))); - put(_Fields.IA, new FieldMetaData("ia", TFieldRequirementType.DEFAULT, + put(_Fields.IA, new FieldMetaData("ia", TFieldRequirementType.DEFAULT, new FieldValueMetaData(TType.STRUCT))); }}); @@ -29647,7 +29647,7 @@ public class Hbase { while (true) { field = iprot.readFieldBegin(); - if (field.type == TType.STOP) { + if (field.type == TType.STOP) { break; } _Fields fieldId = _Fields.findByThriftId(field.id); @@ -29669,7 +29669,7 @@ public class Hbase { } iprot.readListEnd(); } - } else { + } else { TProtocolUtil.skip(iprot, field.type); } break; @@ -29677,7 +29677,7 @@ public class Hbase { if (field.type == TType.STRUCT) { this.io = new IOError(); this.io.read(iprot); - } else { + } else { TProtocolUtil.skip(iprot, field.type); } break; @@ -29685,7 +29685,7 @@ public class Hbase { if (field.type == TType.STRUCT) { this.ia = new IllegalArgument(); this.ia.read(iprot); - } else { + } else { TProtocolUtil.skip(iprot, field.type); } break; @@ -29847,9 +29847,9 @@ public class Hbase { private BitSet __isset_bit_vector = new BitSet(2); public static final Map<_Fields, FieldMetaData> metaDataMap = Collections.unmodifiableMap(new EnumMap<_Fields, FieldMetaData>(_Fields.class) {{ - put(_Fields.ID, new FieldMetaData("id", TFieldRequirementType.DEFAULT, + put(_Fields.ID, new FieldMetaData("id", TFieldRequirementType.DEFAULT, new FieldValueMetaData(TType.I32))); - put(_Fields.NB_ROWS, new FieldMetaData("nbRows", TFieldRequirementType.DEFAULT, + put(_Fields.NB_ROWS, new FieldMetaData("nbRows", TFieldRequirementType.DEFAULT, new FieldValueMetaData(TType.I32))); }}); @@ -30088,7 +30088,7 @@ public class Hbase { while (true) { field = iprot.readFieldBegin(); - if (field.type == TType.STOP) { + if (field.type == TType.STOP) { break; } _Fields fieldId = _Fields.findByThriftId(field.id); @@ -30100,7 +30100,7 @@ public class Hbase { if (field.type == TType.I32) { this.id = iprot.readI32(); setIdIsSet(true); - } else { + } else { TProtocolUtil.skip(iprot, field.type); } break; @@ -30108,7 +30108,7 @@ public class Hbase { if (field.type == TType.I32) { this.nbRows = iprot.readI32(); setNbRowsIsSet(true); - } else { + } else { TProtocolUtil.skip(iprot, field.type); } break; @@ -30229,12 +30229,12 @@ public class Hbase { // isset id assignments public static final Map<_Fields, FieldMetaData> metaDataMap = Collections.unmodifiableMap(new EnumMap<_Fields, FieldMetaData>(_Fields.class) {{ - put(_Fields.SUCCESS, new FieldMetaData("success", TFieldRequirementType.DEFAULT, - new ListMetaData(TType.LIST, + put(_Fields.SUCCESS, new FieldMetaData("success", TFieldRequirementType.DEFAULT, + new ListMetaData(TType.LIST, new StructMetaData(TType.STRUCT, TRowResult.class)))); - put(_Fields.IO, new FieldMetaData("io", TFieldRequirementType.DEFAULT, + put(_Fields.IO, new FieldMetaData("io", TFieldRequirementType.DEFAULT, new FieldValueMetaData(TType.STRUCT))); - put(_Fields.IA, new FieldMetaData("ia", TFieldRequirementType.DEFAULT, + put(_Fields.IA, new FieldMetaData("ia", TFieldRequirementType.DEFAULT, new FieldValueMetaData(TType.STRUCT))); }}); @@ -30511,7 +30511,7 @@ public class Hbase { while (true) { field = iprot.readFieldBegin(); - if (field.type == TType.STOP) { + if (field.type == TType.STOP) { break; } _Fields fieldId = _Fields.findByThriftId(field.id); @@ -30533,7 +30533,7 @@ public class Hbase { } iprot.readListEnd(); } - } else { + } else { TProtocolUtil.skip(iprot, field.type); } break; @@ -30541,7 +30541,7 @@ public class Hbase { if (field.type == TType.STRUCT) { this.io = new IOError(); this.io.read(iprot); - } else { + } else { TProtocolUtil.skip(iprot, field.type); } break; @@ -30549,7 +30549,7 @@ public class Hbase { if (field.type == TType.STRUCT) { this.ia = new IllegalArgument(); this.ia.read(iprot); - } else { + } else { TProtocolUtil.skip(iprot, field.type); } break; @@ -30701,7 +30701,7 @@ public class Hbase { private BitSet __isset_bit_vector = new BitSet(1); public static final Map<_Fields, FieldMetaData> metaDataMap = Collections.unmodifiableMap(new EnumMap<_Fields, FieldMetaData>(_Fields.class) {{ - put(_Fields.ID, new FieldMetaData("id", TFieldRequirementType.DEFAULT, + put(_Fields.ID, new FieldMetaData("id", TFieldRequirementType.DEFAULT, new FieldValueMetaData(TType.I32))); }}); @@ -30872,7 +30872,7 @@ public class Hbase { while (true) { field = iprot.readFieldBegin(); - if (field.type == TType.STOP) { + if (field.type == TType.STOP) { break; } _Fields fieldId = _Fields.findByThriftId(field.id); @@ -30884,7 +30884,7 @@ public class Hbase { if (field.type == TType.I32) { this.id = iprot.readI32(); setIdIsSet(true); - } else { + } else { TProtocolUtil.skip(iprot, field.type); } break; @@ -30995,9 +30995,9 @@ public class Hbase { // isset id assignments public static final Map<_Fields, FieldMetaData> metaDataMap = Collections.unmodifiableMap(new EnumMap<_Fields, FieldMetaData>(_Fields.class) {{ - put(_Fields.IO, new FieldMetaData("io", TFieldRequirementType.DEFAULT, + put(_Fields.IO, new FieldMetaData("io", TFieldRequirementType.DEFAULT, new FieldValueMetaData(TType.STRUCT))); - put(_Fields.IA, new FieldMetaData("ia", TFieldRequirementType.DEFAULT, + put(_Fields.IA, new FieldMetaData("ia", TFieldRequirementType.DEFAULT, new FieldValueMetaData(TType.STRUCT))); }}); @@ -31226,7 +31226,7 @@ public class Hbase { while (true) { field = iprot.readFieldBegin(); - if (field.type == TType.STOP) { + if (field.type == TType.STOP) { break; } _Fields fieldId = _Fields.findByThriftId(field.id); @@ -31238,7 +31238,7 @@ public class Hbase { if (field.type == TType.STRUCT) { this.io = new IOError(); this.io.read(iprot); - } else { + } else { TProtocolUtil.skip(iprot, field.type); } break; @@ -31246,7 +31246,7 @@ public class Hbase { if (field.type == TType.STRUCT) { this.ia = new IllegalArgument(); this.ia.read(iprot); - } else { + } else { TProtocolUtil.skip(iprot, field.type); } break; diff --git a/core/src/main/java/org/apache/hadoop/hbase/thrift/generated/IOError.java b/core/src/main/java/org/apache/hadoop/hbase/thrift/generated/IOError.java index 191d301..d2bfa10 100644 --- a/core/src/main/java/org/apache/hadoop/hbase/thrift/generated/IOError.java +++ b/core/src/main/java/org/apache/hadoop/hbase/thrift/generated/IOError.java @@ -106,7 +106,7 @@ public class IOError extends Exception implements TBase, java.i // isset id assignments public static final Map<_Fields, FieldMetaData> metaDataMap = Collections.unmodifiableMap(new EnumMap<_Fields, FieldMetaData>(_Fields.class) {{ - put(_Fields.MESSAGE, new FieldMetaData("message", TFieldRequirementType.DEFAULT, + put(_Fields.MESSAGE, new FieldMetaData("message", TFieldRequirementType.DEFAULT, new FieldValueMetaData(TType.STRING))); }}); @@ -271,7 +271,7 @@ public class IOError extends Exception implements TBase, java.i while (true) { field = iprot.readFieldBegin(); - if (field.type == TType.STOP) { + if (field.type == TType.STOP) { break; } _Fields fieldId = _Fields.findByThriftId(field.id); @@ -282,7 +282,7 @@ public class IOError extends Exception implements TBase, java.i case MESSAGE: if (field.type == TType.STRING) { this.message = iprot.readString(); - } else { + } else { TProtocolUtil.skip(iprot, field.type); } break; diff --git a/core/src/main/java/org/apache/hadoop/hbase/thrift/generated/IllegalArgument.java b/core/src/main/java/org/apache/hadoop/hbase/thrift/generated/IllegalArgument.java index 63e15bd..6eb2700 100644 --- a/core/src/main/java/org/apache/hadoop/hbase/thrift/generated/IllegalArgument.java +++ b/core/src/main/java/org/apache/hadoop/hbase/thrift/generated/IllegalArgument.java @@ -105,7 +105,7 @@ public class IllegalArgument extends Exception implements TBase metaDataMap = Collections.unmodifiableMap(new EnumMap<_Fields, FieldMetaData>(_Fields.class) {{ - put(_Fields.MESSAGE, new FieldMetaData("message", TFieldRequirementType.DEFAULT, + put(_Fields.MESSAGE, new FieldMetaData("message", TFieldRequirementType.DEFAULT, new FieldValueMetaData(TType.STRING))); }}); @@ -270,7 +270,7 @@ public class IllegalArgument extends Exception implements TBase, java.io.Serializable, private BitSet __isset_bit_vector = new BitSet(1); public static final Map<_Fields, FieldMetaData> metaDataMap = Collections.unmodifiableMap(new EnumMap<_Fields, FieldMetaData>(_Fields.class) {{ - put(_Fields.IS_DELETE, new FieldMetaData("isDelete", TFieldRequirementType.DEFAULT, + put(_Fields.IS_DELETE, new FieldMetaData("isDelete", TFieldRequirementType.DEFAULT, new FieldValueMetaData(TType.BOOL))); - put(_Fields.COLUMN, new FieldMetaData("column", TFieldRequirementType.DEFAULT, + put(_Fields.COLUMN, new FieldMetaData("column", TFieldRequirementType.DEFAULT, new FieldValueMetaData(TType.STRING))); - put(_Fields.VALUE, new FieldMetaData("value", TFieldRequirementType.DEFAULT, + put(_Fields.VALUE, new FieldMetaData("value", TFieldRequirementType.DEFAULT, new FieldValueMetaData(TType.STRING))); }}); @@ -411,7 +411,7 @@ public class Mutation implements TBase, java.io.Serializable, while (true) { field = iprot.readFieldBegin(); - if (field.type == TType.STOP) { + if (field.type == TType.STOP) { break; } _Fields fieldId = _Fields.findByThriftId(field.id); @@ -423,21 +423,21 @@ public class Mutation implements TBase, java.io.Serializable, if (field.type == TType.BOOL) { this.isDelete = iprot.readBool(); setIsDeleteIsSet(true); - } else { + } else { TProtocolUtil.skip(iprot, field.type); } break; case COLUMN: if (field.type == TType.STRING) { this.column = iprot.readBinary(); - } else { + } else { TProtocolUtil.skip(iprot, field.type); } break; case VALUE: if (field.type == TType.STRING) { this.value = iprot.readBinary(); - } else { + } else { TProtocolUtil.skip(iprot, field.type); } break; diff --git a/core/src/main/java/org/apache/hadoop/hbase/thrift/generated/TCell.java b/core/src/main/java/org/apache/hadoop/hbase/thrift/generated/TCell.java index 1bc3c55..ed021d3 100644 --- a/core/src/main/java/org/apache/hadoop/hbase/thrift/generated/TCell.java +++ b/core/src/main/java/org/apache/hadoop/hbase/thrift/generated/TCell.java @@ -112,9 +112,9 @@ public class TCell implements TBase, java.io.Serializable, Clonea private BitSet __isset_bit_vector = new BitSet(1); public static final Map<_Fields, FieldMetaData> metaDataMap = Collections.unmodifiableMap(new EnumMap<_Fields, FieldMetaData>(_Fields.class) {{ - put(_Fields.VALUE, new FieldMetaData("value", TFieldRequirementType.DEFAULT, + put(_Fields.VALUE, new FieldMetaData("value", TFieldRequirementType.DEFAULT, new FieldValueMetaData(TType.STRING))); - put(_Fields.TIMESTAMP, new FieldMetaData("timestamp", TFieldRequirementType.DEFAULT, + put(_Fields.TIMESTAMP, new FieldMetaData("timestamp", TFieldRequirementType.DEFAULT, new FieldValueMetaData(TType.I64))); }}); @@ -343,7 +343,7 @@ public class TCell implements TBase, java.io.Serializable, Clonea while (true) { field = iprot.readFieldBegin(); - if (field.type == TType.STOP) { + if (field.type == TType.STOP) { break; } _Fields fieldId = _Fields.findByThriftId(field.id); @@ -354,7 +354,7 @@ public class TCell implements TBase, java.io.Serializable, Clonea case VALUE: if (field.type == TType.STRING) { this.value = iprot.readBinary(); - } else { + } else { TProtocolUtil.skip(iprot, field.type); } break; @@ -362,7 +362,7 @@ public class TCell implements TBase, java.io.Serializable, Clonea if (field.type == TType.I64) { this.timestamp = iprot.readI64(); setTimestampIsSet(true); - } else { + } else { TProtocolUtil.skip(iprot, field.type); } break; diff --git a/core/src/main/java/org/apache/hadoop/hbase/thrift/generated/TRegionInfo.java b/core/src/main/java/org/apache/hadoop/hbase/thrift/generated/TRegionInfo.java index 649fb1b..a397431 100644 --- a/core/src/main/java/org/apache/hadoop/hbase/thrift/generated/TRegionInfo.java +++ b/core/src/main/java/org/apache/hadoop/hbase/thrift/generated/TRegionInfo.java @@ -119,15 +119,15 @@ public class TRegionInfo implements TBase, java.io.Serializ private BitSet __isset_bit_vector = new BitSet(2); public static final Map<_Fields, FieldMetaData> metaDataMap = Collections.unmodifiableMap(new EnumMap<_Fields, FieldMetaData>(_Fields.class) {{ - put(_Fields.START_KEY, new FieldMetaData("startKey", TFieldRequirementType.DEFAULT, + put(_Fields.START_KEY, new FieldMetaData("startKey", TFieldRequirementType.DEFAULT, new FieldValueMetaData(TType.STRING))); - put(_Fields.END_KEY, new FieldMetaData("endKey", TFieldRequirementType.DEFAULT, + put(_Fields.END_KEY, new FieldMetaData("endKey", TFieldRequirementType.DEFAULT, new FieldValueMetaData(TType.STRING))); - put(_Fields.ID, new FieldMetaData("id", TFieldRequirementType.DEFAULT, + put(_Fields.ID, new FieldMetaData("id", TFieldRequirementType.DEFAULT, new FieldValueMetaData(TType.I64))); - put(_Fields.NAME, new FieldMetaData("name", TFieldRequirementType.DEFAULT, + put(_Fields.NAME, new FieldMetaData("name", TFieldRequirementType.DEFAULT, new FieldValueMetaData(TType.STRING))); - put(_Fields.VERSION, new FieldMetaData("version", TFieldRequirementType.DEFAULT, + put(_Fields.VERSION, new FieldMetaData("version", TFieldRequirementType.DEFAULT, new FieldValueMetaData(TType.BYTE))); }}); @@ -546,7 +546,7 @@ public class TRegionInfo implements TBase, java.io.Serializ while (true) { field = iprot.readFieldBegin(); - if (field.type == TType.STOP) { + if (field.type == TType.STOP) { break; } _Fields fieldId = _Fields.findByThriftId(field.id); @@ -557,14 +557,14 @@ public class TRegionInfo implements TBase, java.io.Serializ case START_KEY: if (field.type == TType.STRING) { this.startKey = iprot.readBinary(); - } else { + } else { TProtocolUtil.skip(iprot, field.type); } break; case END_KEY: if (field.type == TType.STRING) { this.endKey = iprot.readBinary(); - } else { + } else { TProtocolUtil.skip(iprot, field.type); } break; @@ -572,14 +572,14 @@ public class TRegionInfo implements TBase, java.io.Serializ if (field.type == TType.I64) { this.id = iprot.readI64(); setIdIsSet(true); - } else { + } else { TProtocolUtil.skip(iprot, field.type); } break; case NAME: if (field.type == TType.STRING) { this.name = iprot.readBinary(); - } else { + } else { TProtocolUtil.skip(iprot, field.type); } break; @@ -587,7 +587,7 @@ public class TRegionInfo implements TBase, java.io.Serializ if (field.type == TType.BYTE) { this.version = iprot.readByte(); setVersionIsSet(true); - } else { + } else { TProtocolUtil.skip(iprot, field.type); } break; diff --git a/core/src/main/java/org/apache/hadoop/hbase/thrift/generated/TRowResult.java b/core/src/main/java/org/apache/hadoop/hbase/thrift/generated/TRowResult.java index ce5c2bf..39d0f9b 100644 --- a/core/src/main/java/org/apache/hadoop/hbase/thrift/generated/TRowResult.java +++ b/core/src/main/java/org/apache/hadoop/hbase/thrift/generated/TRowResult.java @@ -107,11 +107,11 @@ public class TRowResult implements TBase, java.io.Serializab // isset id assignments public static final Map<_Fields, FieldMetaData> metaDataMap = Collections.unmodifiableMap(new EnumMap<_Fields, FieldMetaData>(_Fields.class) {{ - put(_Fields.ROW, new FieldMetaData("row", TFieldRequirementType.DEFAULT, + put(_Fields.ROW, new FieldMetaData("row", TFieldRequirementType.DEFAULT, new FieldValueMetaData(TType.STRING))); - put(_Fields.COLUMNS, new FieldMetaData("columns", TFieldRequirementType.DEFAULT, - new MapMetaData(TType.MAP, - new FieldValueMetaData(TType.STRING), + put(_Fields.COLUMNS, new FieldMetaData("columns", TFieldRequirementType.DEFAULT, + new MapMetaData(TType.MAP, + new FieldValueMetaData(TType.STRING), new StructMetaData(TType.STRUCT, TCell.class)))); }}); @@ -336,7 +336,7 @@ public class TRowResult implements TBase, java.io.Serializab while (true) { field = iprot.readFieldBegin(); - if (field.type == TType.STOP) { + if (field.type == TType.STOP) { break; } _Fields fieldId = _Fields.findByThriftId(field.id); @@ -347,7 +347,7 @@ public class TRowResult implements TBase, java.io.Serializab case ROW: if (field.type == TType.STRING) { this.row = iprot.readBinary(); - } else { + } else { TProtocolUtil.skip(iprot, field.type); } break; @@ -367,7 +367,7 @@ public class TRowResult implements TBase, java.io.Serializab } iprot.readMapEnd(); } - } else { + } else { TProtocolUtil.skip(iprot, field.type); } break; diff --git a/core/src/main/java/org/apache/hadoop/hbase/util/Base64.java b/core/src/main/java/org/apache/hadoop/hbase/util/Base64.java index 4b934b3..892f808 100644 --- a/core/src/main/java/org/apache/hadoop/hbase/util/Base64.java +++ b/core/src/main/java/org/apache/hadoop/hbase/util/Base64.java @@ -137,7 +137,7 @@ public class Base64 { /** * Encode using Base64-like encoding that is URL and Filename safe as - * described in Section 4 of RFC3548: + * described in Section 4 of RFC3548: * * http://www.faqs.org/rfcs/rfc3548.html. * It is important to note that data encoded this way is not @@ -155,7 +155,7 @@ public class Base64 { public final static int ORDERED = 32; /* ******** P R I V A T E F I E L D S ******** */ - + private static final Log LOG = LogFactory.getLog(Base64.class); /** Maximum line length (76) of Base64 output. */ @@ -286,8 +286,8 @@ public class Base64 { * exactly the same as the input value. It is described in the RFC change * request: * http://www.faqs.org/qa/rfcc-1940.html. - * - * It replaces "plus" and "slash" with "hyphen" and "underscore" and + * + * It replaces "plus" and "slash" with "hyphen" and "underscore" and * rearranges the alphabet so that the characters are in their natural sort * order. */ @@ -353,7 +353,7 @@ public class Base64 { } else if ((options & ORDERED) == ORDERED) { return _ORDERED_ALPHABET; - + } else { return _STANDARD_ALPHABET; } @@ -370,10 +370,10 @@ public class Base64 { protected static byte[] getDecodabet(int options) { if ((options & URL_SAFE) == URL_SAFE) { return _URL_SAFE_DECODABET; - + } else if ((options & ORDERED) == ORDERED) { return _ORDERED_DECODABET; - + } else { return _STANDARD_DECODABET; } @@ -384,9 +384,9 @@ public class Base64 { /** * Main program. Used for testing. - * + * * Encodes or decodes two files from the command line - * + * * @param args command arguments */ public static void main(String[] args) { @@ -411,7 +411,7 @@ public class Base64 { /** * Prints command line usage. - * + * * @param msg A message to include with usage info. */ private static void usage(String msg) { @@ -427,7 +427,7 @@ public class Base64 { * significant bytes in your array is given by numSigBytes. The * array threeBytes needs only be as big as numSigBytes. * Code can reuse a byte array by passing a four-byte array as b4. - * + * * @param b4 A reusable byte array to reduce array instantiation * @param threeBytes the array to convert * @param numSigBytes the number of significant bytes in your array @@ -454,7 +454,7 @@ public class Base64 { *

        * This is the lowest level of the encoding methods with all possible * parameters. - * + * * @param source the array to convert * @param srcOffset the index where conversion begins * @param numSigBytes the number of significant bytes in your array @@ -468,7 +468,7 @@ public class Base64 { int numSigBytes, byte[] destination, int destOffset, int options) { byte[] ALPHABET = getAlphabet(options); - // 1 2 3 + // 1 2 3 // 01234567890123456789012345678901 Bit position // --------000000001111111122222222 Array position from threeBytes // --------| || || || | Six bit groups to index ALPHABET @@ -516,7 +516,7 @@ public class Base64 { * serialized object. If the object cannot be serialized or there is another * error, the method will return null. The object is not * GZip-compressed before being encoded. - * + * * @param serializableObject The object to encode * @return The Base64-encoded object * @since 1.4 @@ -541,7 +541,7 @@ public class Base64 { *

        * Example: * encodeObject( myObj, Base64.GZIP | Base64.DONT_BREAK_LINES ) - * + * * @param serializableObject The object to encode * @param options Specified options * @see Base64#GZIP @@ -569,11 +569,11 @@ public class Base64 { } catch (UnsupportedEncodingException uue) { return new String(baos.toByteArray()); - + } catch (IOException e) { LOG.error("error encoding object", e); return null; - + } finally { if (oos != null) { try { @@ -599,7 +599,7 @@ public class Base64 { /** * Encodes a byte array into Base64 notation. Does not GZip-compress data. - * + * * @param source The data to convert * @return encoded byte array * @since 1.4 @@ -617,13 +617,13 @@ public class Base64 { *

      • DONT_BREAK_LINES: don't break lines at 76 characters. Note: * Technically, this makes your encoding non-compliant.
      • *
      - * + * *

      * Example: encodeBytes( myData, Base64.GZIP ) or *

      * Example: * encodeBytes( myData, Base64.GZIP | Base64.DONT_BREAK_LINES ) - * + * * @param source The data to convert * @param options Specified options * @see Base64#GZIP @@ -639,7 +639,7 @@ public class Base64 { /** * Encodes a byte array into Base64 notation. Does not GZip-compress data. - * + * * @param source The data to convert * @param off Offset in array where conversion should begin * @param len Length of data to convert @@ -659,13 +659,13 @@ public class Base64 { *

    • DONT_BREAK_LINES: don't break lines at 76 characters. Note: * Technically, this makes your encoding non-compliant.
    • *
    - * + * *

    * Example: encodeBytes( myData, Base64.GZIP ) or *

    * Example: * encodeBytes( myData, Base64.GZIP | Base64.DONT_BREAK_LINES ) - * + * * @param source The data to convert * @param off Offset in array where conversion should begin * @param len Length of data to convert @@ -684,7 +684,7 @@ public class Base64 { GZIPOutputStream gzos = null; try { - gzos = + gzos = new GZIPOutputStream(new Base64OutputStream(baos, ENCODE | options)); gzos.write(source, off, len); @@ -698,7 +698,7 @@ public class Base64 { } catch (IOException e) { LOG.error("error encoding byte array", e); return null; - + } finally { if (gzos != null) { try { @@ -715,7 +715,7 @@ public class Base64 { } // end finally } // end Compress - + // Don't compress. Better not to use streams at all then. boolean breakLines = ((options & DONT_BREAK_LINES) == 0); @@ -770,7 +770,7 @@ public class Base64 { * This is the lowest level of the decoding methods with all possible * parameters. *

    - * + * * @param source the array to convert * @param srcOffset the index where conversion begins * @param destination the array to hold the conversion @@ -796,7 +796,7 @@ public class Base64 { destination[destOffset] = (byte) (outBuff >>> 16); return 1; - + } else if (source[srcOffset + 3] == EQUALS_SIGN) { // Example: DkL= // Two ways to do the same thing. Don't know which way I like best. // int outBuff = ( ( DECODABET[ source[ srcOffset ] ] << 24 ) >>> 6 ) @@ -829,9 +829,9 @@ public class Base64 { destination[destOffset + 2] = (byte) (outBuff); return 3; - + } catch (Exception e) { - LOG.error("error decoding bytes at " + source[srcOffset] + ": " + + LOG.error("error decoding bytes at " + source[srcOffset] + ": " + (DECODABET[source[srcOffset]]) + ", " + source[srcOffset + 1] + ": " + (DECODABET[source[srcOffset + 1]]) + ", " + source[srcOffset + 2] + ": " + (DECODABET[source[srcOffset + 2]]) + @@ -846,7 +846,7 @@ public class Base64 { * Very low-level access to decoding ASCII characters in the form of a byte * array. Does not support automatically gunzipping or any other "fancy" * features. - * + * * @param source The Base64 encoded data * @param off The offset of where to begin decoding * @param len The length of characters to decode @@ -899,7 +899,7 @@ public class Base64 { /** * Decodes data from Base64 notation, automatically detecting gzip-compressed * data and decompressing it. - * + * * @param s the string to decode * @return the decoded data * @since 1.4 @@ -911,7 +911,7 @@ public class Base64 { /** * Decodes data from Base64 notation, automatically detecting gzip-compressed * data and decompressing it. - * + * * @param s the string to decode * @param options options for decode * @see Base64#URL_SAFE @@ -977,7 +977,7 @@ public class Base64 { /** * Attempts to decode Base64 data and deserialize a Java Object within. * Returns null if there was an error. - * + * * @param encodedObject The Base64 data to decode * @return The decoded and deserialized object * @since 1.5 @@ -1013,11 +1013,11 @@ public class Base64 { /** * Convenience method for encoding data to a file. - * + * * @param dataToEncode byte array of data to encode in base64 form * @param filename Filename for saving encoded data * @return true if successful, false otherwise - * + * * @since 2.1 */ public static boolean encodeToFile(byte[] dataToEncode, String filename) { @@ -1031,7 +1031,7 @@ public class Base64 { } catch (IOException e) { LOG.error("error encoding file: " + filename, e); success = false; - + } finally { if (bos != null) { try { @@ -1047,11 +1047,11 @@ public class Base64 { /** * Convenience method for decoding data to a file. - * + * * @param dataToDecode Base64-encoded data as a string * @param filename Filename for saving decoded data * @return true if successful, false otherwise - * + * * @since 2.1 */ public static boolean decodeToFile(String dataToDecode, String filename) { @@ -1061,7 +1061,7 @@ public class Base64 { bos = new Base64OutputStream(new FileOutputStream(filename), DECODE); bos.write(dataToDecode.getBytes(PREFERRED_ENCODING)); success = true; - + } catch (IOException e) { LOG.error("error decoding to file: " + filename, e); success = false; @@ -1081,10 +1081,10 @@ public class Base64 { /** * Convenience method for reading a base64-encoded file and decoding it. - * + * * @param filename Filename for reading encoded data * @return decoded byte array or null if unsuccessful - * + * * @since 2.1 */ public static byte[] decodeFromFile(String filename) { @@ -1096,33 +1096,33 @@ public class Base64 { // Check the size of file if (file.length() > Integer.MAX_VALUE) { - LOG.fatal("File is too big for this convenience method (" + + LOG.fatal("File is too big for this convenience method (" + file.length() + " bytes)."); return null; } // end if: file too big for int index - + buffer = new byte[(int) file.length()]; // Open a stream - + bis = new Base64InputStream(new BufferedInputStream( new FileInputStream(file)), DECODE); // Read until done - + int length = 0; for (int numBytes; (numBytes = bis.read(buffer, length, 4096)) >= 0; ) { length += numBytes; } - + // Save in a variable to return - + decodedData = new byte[length]; System.arraycopy(buffer, 0, decodedData, 0, length); } catch (IOException e) { LOG.error("Error decoding from file " + filename, e); - + } finally { if (bis != null) { try { @@ -1138,10 +1138,10 @@ public class Base64 { /** * Convenience method for reading a binary file and base64-encoding it. - * + * * @param filename Filename for reading binary data * @return base64-encoded string or null if unsuccessful - * + * * @since 2.1 */ public static String encodeFromFile(String filename) { @@ -1149,9 +1149,9 @@ public class Base64 { Base64InputStream bis = null; try { File file = new File(filename); - + // Need max() for math on small files (v2.2.1) - + byte[] buffer = new byte[Math.max((int) (file.length() * 1.4), 40)]; // Open a stream @@ -1166,12 +1166,12 @@ public class Base64 { } // Save in a variable to return - + encodedData = new String(buffer, 0, length, PREFERRED_ENCODING); } catch (IOException e) { LOG.error("Error encoding from file " + filename, e); - + } finally { if (bis != null) { try { @@ -1187,7 +1187,7 @@ public class Base64 { /** * Reads infile and encodes it to outfile. - * + * * @param infile Input file * @param outfile Output file * @since 2.2 @@ -1198,7 +1198,7 @@ public class Base64 { try { out = new BufferedOutputStream(new FileOutputStream(outfile)); out.write(encoded.getBytes("US-ASCII")); // Strict, 7-bit output. - + } catch (IOException e) { LOG.error("error encoding from file " + infile + " to " + outfile, e); @@ -1215,7 +1215,7 @@ public class Base64 { /** * Reads infile and decodes it to outfile. - * + * * @param infile Input file * @param outfile Output file * @since 2.2 @@ -1226,7 +1226,7 @@ public class Base64 { try { out = new BufferedOutputStream(new FileOutputStream(outfile)); out.write(decoded); - + } catch (IOException e) { LOG.error("error decoding from file " + infile + " to " + outfile, e); @@ -1247,7 +1247,7 @@ public class Base64 { * A {@link Base64.Base64InputStream} will read data from another * InputStream, given in the constructor, and * encode/decode to/from Base64 notation on the fly. - * + * * @see Base64 * @since 1.3 */ @@ -1264,7 +1264,7 @@ public class Base64 { /** * Constructs a {@link Base64InputStream} in DECODE mode. - * + * * @param in the InputStream from which to read data. * @since 1.3 */ @@ -1276,18 +1276,18 @@ public class Base64 { * Constructs a {@link Base64.Base64InputStream} in either ENCODE or DECODE mode. *

    * Valid options: - * + * *

          *   ENCODE or DECODE: Encode or Decode as data is read.
          *   DONT_BREAK_LINES: don't break lines at 76 characters
          *     (only meaningful when encoding)
          *     <i>Note: Technically, this makes your encoding non-compliant.</i>
          * 
    - * + * *

    * Example: new Base64.Base64InputStream( in, Base64.DECODE ) - * - * + * + * * @param in the InputStream from which to read data. * @param options Specified options * @see Base64#ENCODE @@ -1311,7 +1311,7 @@ public class Base64 { /** * Reads enough of the input stream to convert to/from Base64 and returns * the next byte. - * + * * @return next byte * @since 1.3 */ @@ -1369,10 +1369,10 @@ public class Base64 { if (i == 4) { numSigBytes = decode4to3(b4, 0, buffer, 0, options); position = 0; - + } else if (i == 0) { return -1; - + } else { // Must have broken out from above. throw new IOException("Improperly padded Base64 input."); @@ -1415,7 +1415,7 @@ public class Base64 { * Calls {@link #read()} repeatedly until the end of stream is reached or * len bytes are read. Returns number of bytes read into array * or -1 if end of stream is encountered. - * + * * @param dest array to hold values * @param off offset for array * @param len max number of bytes to read into array @@ -1447,7 +1447,7 @@ public class Base64 { * A {@link Base64.Base64OutputStream} will write data to another * OutputStream, given in the constructor, and * encode/decode to/from Base64 notation on the fly. - * + * * @see Base64 * @since 1.3 */ @@ -1465,7 +1465,7 @@ public class Base64 { /** * Constructs a {@link Base64OutputStream} in ENCODE mode. - * + * * @param out the OutputStream to which data will be written. * @since 1.3 */ @@ -1477,17 +1477,17 @@ public class Base64 { * Constructs a {@link Base64OutputStream} in either ENCODE or DECODE mode. *

    * Valid options: - * + * *

      *
    • ENCODE or DECODE: Encode or Decode as data is read.
    • *
    • DONT_BREAK_LINES: don't break lines at 76 characters (only * meaningful when encoding) Note: Technically, this makes your * encoding non-compliant.
    • *
    - * + * *

    * Example: new Base64.Base64OutputStream( out, Base64.ENCODE ) - * + * * @param out the OutputStream to which data will be written. * @param options Specified options. * @see Base64#ENCODE @@ -1514,7 +1514,7 @@ public class Base64 { * notation. When encoding, bytes are buffered three at a time before the * output stream actually gets a write() call. When decoding, bytes are * buffered four at a time. - * + * * @param theByte the byte to write * @since 1.3 */ @@ -1539,7 +1539,7 @@ public class Base64 { position = 0; } // end if: enough to output - + } else { // Meaningful Base64 character? if (decodabet[theByte & 0x7f] > WHITE_SPACE_ENC) { @@ -1549,7 +1549,7 @@ public class Base64 { out.write(b4, 0, len); position = 0; } // end if: enough to output - + } else if (decodabet[theByte & 0x7f] != WHITE_SPACE_ENC) { throw new IOException("Invalid character in Base64 data."); } // end else: not white space either @@ -1559,7 +1559,7 @@ public class Base64 { /** * Calls {@link #write(int)} repeatedly until len bytes are * written. - * + * * @param theBytes array from which to read bytes * @param off offset for array * @param len max number of bytes to read into array @@ -1582,7 +1582,7 @@ public class Base64 { /** * Method added by PHIL. [Thanks, PHIL. -Rob] This pads the buffer without * closing the stream. - * + * * @throws IOException e */ public void flushBase64() throws IOException { @@ -1600,7 +1600,7 @@ public class Base64 { /** * Flushes and closes (I think, in the superclass) the stream. - * + * * @since 1.3 */ @Override @@ -1631,7 +1631,7 @@ public class Base64 { /** * Resumes encoding of the stream. May be helpful if you need to embed a * piece of base640-encoded data in a stream. - * + * * @since 1.5.1 */ public void resumeEncoding() { diff --git a/core/src/main/java/org/apache/hadoop/hbase/util/ClassSize.java b/core/src/main/java/org/apache/hadoop/hbase/util/ClassSize.java index baecf3c..e11a844 100755 --- a/core/src/main/java/org/apache/hadoop/hbase/util/ClassSize.java +++ b/core/src/main/java/org/apache/hadoop/hbase/util/ClassSize.java @@ -30,12 +30,12 @@ import java.util.Properties; /** * Class for determining the "size" of a class, an attempt to calculate the * actual bytes that an object of this class will occupy in memory - * + * * The core of this class is taken from the Derby project */ public class ClassSize { static final Log LOG = LogFactory.getLog(ClassSize.class); - + private static int nrOfRefsPerObj = 2; /** Array overhead */ @@ -43,61 +43,61 @@ public class ClassSize { /** Overhead for ArrayList(0) */ public static int ARRAYLIST = 0; - + /** Overhead for ByteBuffer */ public static int BYTE_BUFFER = 0; /** Overhead for an Integer */ public static int INTEGER = 0; - + /** Overhead for entry in map */ public static int MAP_ENTRY = 0; - + /** Object overhead is minimum 2 * reference size (8 bytes on 64-bit) */ public static int OBJECT = 0; - + /** Reference size is 8 bytes on 64-bit, 4 bytes on 32-bit */ public static int REFERENCE = 0; - + /** String overhead */ public static int STRING = 0; /** Overhead for TreeMap */ public static int TREEMAP = 0; - + /** Overhead for ConcurrentHashMap */ public static int CONCURRENT_HASHMAP = 0; - + /** Overhead for ConcurrentHashMap.Entry */ public static int CONCURRENT_HASHMAP_ENTRY = 0; - + /** Overhead for ConcurrentHashMap.Segment */ public static int CONCURRENT_HASHMAP_SEGMENT = 0; - + /** Overhead for ConcurrentSkipListMap */ public static int CONCURRENT_SKIPLISTMAP = 0; - + /** Overhead for ConcurrentSkipListMap Entry */ public static int CONCURRENT_SKIPLISTMAP_ENTRY = 0; - + /** Overhead for ReentrantReadWriteLock */ public static int REENTRANT_LOCK = 0; - + /** Overhead for AtomicLong */ public static int ATOMIC_LONG = 0; - + /** Overhead for AtomicInteger */ public static int ATOMIC_INTEGER = 0; - + /** Overhead for AtomicBoolean */ public static int ATOMIC_BOOLEAN = 0; - + /** Overhead for CopyOnWriteArraySet */ public static int COPYONWRITE_ARRAYSET = 0; - + /** Overhead for CopyOnWriteArrayList */ public static int COPYONWRITE_ARRAYLIST = 0; - + private static final String THIRTY_TWO = "32"; /** @@ -108,7 +108,7 @@ public class ClassSize { // Figure out whether this is a 32 or 64 bit machine. Properties sysProps = System.getProperties(); String arcModel = sysProps.getProperty("sun.arch.data.model"); - + //Default value is set to 8, covering the case when arcModel is unknown REFERENCE = 8; if (arcModel.equals(THIRTY_TWO)) { @@ -116,7 +116,7 @@ public class ClassSize { } OBJECT = 2 * REFERENCE; - + ARRAY = 3 * REFERENCE; ARRAYLIST = align(OBJECT + align(REFERENCE) + align(ARRAY) + @@ -124,48 +124,48 @@ public class ClassSize { //noinspection PointlessArithmeticExpression BYTE_BUFFER = align(OBJECT + align(REFERENCE) + align(ARRAY) + - (5 * Bytes.SIZEOF_INT) + - (3 * Bytes.SIZEOF_BOOLEAN) + Bytes.SIZEOF_LONG); - + (5 * Bytes.SIZEOF_INT) + + (3 * Bytes.SIZEOF_BOOLEAN) + Bytes.SIZEOF_LONG); + INTEGER = align(OBJECT + Bytes.SIZEOF_INT); - + MAP_ENTRY = align(OBJECT + 5 * REFERENCE + Bytes.SIZEOF_BOOLEAN); - + TREEMAP = align(OBJECT + (2 * Bytes.SIZEOF_INT) + align(7 * REFERENCE)); - + STRING = align(OBJECT + ARRAY + REFERENCE + 3 * Bytes.SIZEOF_INT); - - CONCURRENT_HASHMAP = align((2 * Bytes.SIZEOF_INT) + ARRAY + + + CONCURRENT_HASHMAP = align((2 * Bytes.SIZEOF_INT) + ARRAY + (6 * REFERENCE) + OBJECT); - + CONCURRENT_HASHMAP_ENTRY = align(REFERENCE + OBJECT + (3 * REFERENCE) + (2 * Bytes.SIZEOF_INT)); - - CONCURRENT_HASHMAP_SEGMENT = align(REFERENCE + OBJECT + + + CONCURRENT_HASHMAP_SEGMENT = align(REFERENCE + OBJECT + (3 * Bytes.SIZEOF_INT) + Bytes.SIZEOF_FLOAT + ARRAY); - + CONCURRENT_SKIPLISTMAP = align(Bytes.SIZEOF_INT + OBJECT + (8 * REFERENCE)); - + CONCURRENT_SKIPLISTMAP_ENTRY = align( - align(OBJECT + (3 * REFERENCE)) + /* one node per entry */ + align(OBJECT + (3 * REFERENCE)) + /* one node per entry */ align((OBJECT + (3 * REFERENCE))/2)); /* one index per two entries */ - + REENTRANT_LOCK = align(OBJECT + (3 * REFERENCE)); - + ATOMIC_LONG = align(OBJECT + Bytes.SIZEOF_LONG); - + ATOMIC_INTEGER = align(OBJECT + Bytes.SIZEOF_INT); - + ATOMIC_BOOLEAN = align(OBJECT + Bytes.SIZEOF_BOOLEAN); - + COPYONWRITE_ARRAYSET = align(OBJECT + REFERENCE); - + COPYONWRITE_ARRAYLIST = align(OBJECT + (2 * REFERENCE) + ARRAY); } - + /** - * The estimate of the size of a class instance depends on whether the JVM - * uses 32 or 64 bit addresses, that is it depends on the size of an object + * The estimate of the size of a class instance depends on whether the JVM + * uses 32 or 64 bit addresses, that is it depends on the size of an object * reference. It is a linear function of the size of a reference, e.g. * 24 + 5*r where r is the size of a reference (usually 4 or 8 bytes). * @@ -183,7 +183,7 @@ public class ClassSize { int primitives = 0; int arrays = 0; //The number of references that a new object takes - int references = nrOfRefsPerObj; + int references = nrOfRefsPerObj; for ( ; null != cl; cl = cl.getSuperclass()) { Field[] field = cl.getDeclaredFields(); @@ -230,7 +230,7 @@ public class ClassSize { } /** - * Estimate the static space taken up by a class instance given the + * Estimate the static space taken up by a class instance given the * coefficients returned by getSizeCoefficients. * * @param coeff the coefficients @@ -247,19 +247,19 @@ public class ClassSize { if (LOG.isDebugEnabled()) { // Write out region name as string and its encoded name. LOG.debug("Primitives " + coeff[0] + ", arrays " + coeff[1] + - ", references(includes " + nrOfRefsPerObj + - " for object overhead) " + coeff[2] + ", refSize " + REFERENCE + + ", references(includes " + nrOfRefsPerObj + + " for object overhead) " + coeff[2] + ", refSize " + REFERENCE + ", size " + size); } } return size; - } + } /** - * Estimate the static space taken up by the fields of a class. This includes - * the space taken up by by references (the pointer) but not by the referenced - * object. So the estimated size of an array field does not depend on the size - * of the array. Similarly the size of an object (reference) field does not + * Estimate the static space taken up by the fields of a class. This includes + * the space taken up by by references (the pointer) but not by the referenced + * object. So the estimated size of an array field does not depend on the size + * of the array. Similarly the size of an object (reference) field does not * depend on the object. * * @param cl class @@ -269,7 +269,7 @@ public class ClassSize { @SuppressWarnings("unchecked") public static long estimateBase(Class cl, boolean debug) { return estimateBaseFromCoefficients( getSizeCoefficients(cl, debug), debug); - } + } /** * Aligns a number to 8. @@ -279,7 +279,7 @@ public class ClassSize { public static int align(int num) { return (int)(align((long)num)); } - + /** * Aligns a number to 8. * @param num number to align to 8 @@ -290,6 +290,6 @@ public class ClassSize { //stored and sent together return ((num + 7) >> 3) << 3; } - + } diff --git a/core/src/main/java/org/apache/hadoop/hbase/util/FSUtils.java b/core/src/main/java/org/apache/hadoop/hbase/util/FSUtils.java index 2454cb1..fb89afd 100644 --- a/core/src/main/java/org/apache/hadoop/hbase/util/FSUtils.java +++ b/core/src/main/java/org/apache/hadoop/hbase/util/FSUtils.java @@ -102,11 +102,11 @@ public class FSUtils { /** * Checks to see if the specified file system is available - * + * * @param fs filesystem * @throws IOException e */ - public static void checkFileSystemAvailable(final FileSystem fs) + public static void checkFileSystemAvailable(final FileSystem fs) throws IOException { if (!(fs instanceof DistributedFileSystem)) { return; @@ -129,10 +129,10 @@ public class FSUtils { io.initCause(exception); throw io; } - + /** * Verifies current version of file system - * + * * @param fs filesystem object * @param rootdir root hbase directory * @return null if no version file exists, version string otherwise. @@ -153,20 +153,20 @@ public class FSUtils { } return version; } - + /** * Verifies current version of file system - * + * * @param fs file system * @param rootdir root directory of HBase installation - * @param message if true, issues a message on System.out - * + * @param message if true, issues a message on System.out + * * @throws IOException e */ - public static void checkVersion(FileSystem fs, Path rootdir, + public static void checkVersion(FileSystem fs, Path rootdir, boolean message) throws IOException { String version = getVersion(fs, rootdir); - + if (version == null) { if (!rootRegionExists(fs, rootdir)) { // rootDir is empty (no version file and no root region) @@ -176,7 +176,7 @@ public class FSUtils { } } else if (version.compareTo(HConstants.FILE_SYSTEM_VERSION) == 0) return; - + // version is deprecated require migration // Output on stdout so user sees it in terminal. String msg = "File system needs to be upgraded." @@ -188,28 +188,28 @@ public class FSUtils { } throw new FileSystemVersionException(msg); } - + /** * Sets version of file system - * + * * @param fs filesystem object * @param rootdir hbase root * @throws IOException e */ - public static void setVersion(FileSystem fs, Path rootdir) + public static void setVersion(FileSystem fs, Path rootdir) throws IOException { setVersion(fs, rootdir, HConstants.FILE_SYSTEM_VERSION); } /** * Sets version of file system - * + * * @param fs filesystem object * @param rootdir hbase root directory * @param version version to set * @throws IOException e */ - public static void setVersion(FileSystem fs, Path rootdir, String version) + public static void setVersion(FileSystem fs, Path rootdir, String version) throws IOException { FSDataOutputStream s = fs.create(new Path(rootdir, HConstants.VERSION_FILE_NAME)); @@ -220,7 +220,7 @@ public class FSUtils { /** * Verifies root directory path is a valid URI with a scheme - * + * * @param root root directory path * @return Passed root argument. * @throws IOException if not a valid URI with a scheme @@ -288,7 +288,7 @@ public class FSUtils { * This method is useful if you want to print out a Path without qualifying * Filesystem instance. * @param p Filesystem Path whose 'path' component we are to return. - * @return Path portion of the Filesystem + * @return Path portion of the Filesystem */ public static String getPath(Path p) { return p.toUri().getPath(); @@ -306,7 +306,7 @@ public class FSUtils { /** * Checks if root region exists - * + * * @param fs file system * @param rootdir root directory of HBase installation * @return true if exists @@ -366,42 +366,42 @@ public class FSUtils { } /** - * Returns the total overall fragmentation percentage. Includes .META. and + * Returns the total overall fragmentation percentage. Includes .META. and * -ROOT- as well. - * + * * @param master The master defining the HBase root and file system. * @return A map for each table and its percentage. * @throws IOException When scanning the directory fails. */ - public static int getTotalTableFragmentation(final HMaster master) + public static int getTotalTableFragmentation(final HMaster master) throws IOException { Map map = getTableFragmentation(master); return map != null && map.size() > 0 ? map.get("-TOTAL-") : -1; } - + /** * Runs through the HBase rootdir and checks how many stores for each table - * have more than one file in them. Checks -ROOT- and .META. too. The total - * percentage across all tables is stored under the special key "-TOTAL-". - * + * have more than one file in them. Checks -ROOT- and .META. too. The total + * percentage across all tables is stored under the special key "-TOTAL-". + * * @param master The master defining the HBase root and file system. * @return A map for each table and its percentage. * @throws IOException When scanning the directory fails. */ public static Map getTableFragmentation( - final HMaster master) + final HMaster master) throws IOException { Path path = master.getRootDir(); // since HMaster.getFileSystem() is package private FileSystem fs = path.getFileSystem(master.getConfiguration()); return getTableFragmentation(fs, path); } - + /** * Runs through the HBase rootdir and checks how many stores for each table - * have more than one file in them. Checks -ROOT- and .META. too. The total - * percentage across all tables is stored under the special key "-TOTAL-". - * + * have more than one file in them. Checks -ROOT- and .META. too. The total + * percentage across all tables is stored under the special key "-TOTAL-". + * * @param fs The file system to use. * @param hbaseRootDir The root directory to scan. * @return A map for each table and its percentage. diff --git a/core/src/main/java/org/apache/hadoop/hbase/util/Hash.java b/core/src/main/java/org/apache/hadoop/hbase/util/Hash.java index 996295f..9e1d9e2 100644 --- a/core/src/main/java/org/apache/hadoop/hbase/util/Hash.java +++ b/core/src/main/java/org/apache/hadoop/hbase/util/Hash.java @@ -32,7 +32,7 @@ public abstract class Hash { public static final int JENKINS_HASH = 0; /** Constant to denote {@link MurmurHash}. */ public static final int MURMUR_HASH = 1; - + /** * This utility method converts String representation of hash function name * to a symbolic constant. Currently two function types are supported, @@ -49,7 +49,7 @@ public abstract class Hash { return INVALID_HASH; } } - + /** * This utility method converts the name of the configured * hash type to a symbolic constant. @@ -60,7 +60,7 @@ public abstract class Hash { String name = conf.get("hbase.hash.type", "murmur"); return parseHashType(name); } - + /** * Get a singleton instance of hash function of a given type. * @param type predefined hash type @@ -76,7 +76,7 @@ public abstract class Hash { return null; } } - + /** * Get a singleton instance of hash function of a type * defined in the configuration. @@ -87,7 +87,7 @@ public abstract class Hash { int type = getHashType(conf); return getInstance(type); } - + /** * Calculate a hash using all bytes from the input argument, and * a seed of -1. @@ -97,7 +97,7 @@ public abstract class Hash { public int hash(byte[] bytes) { return hash(bytes, bytes.length, -1); } - + /** * Calculate a hash using all bytes from the input argument, * and a provided seed value. @@ -108,7 +108,7 @@ public abstract class Hash { public int hash(byte[] bytes, int initval) { return hash(bytes, bytes.length, initval); } - + /** * Calculate a hash using bytes from 0 to length, and * the provided seed value diff --git a/core/src/main/java/org/apache/hadoop/hbase/util/InfoServer.java b/core/src/main/java/org/apache/hadoop/hbase/util/InfoServer.java index 068c909..98bb55b 100644 --- a/core/src/main/java/org/apache/hadoop/hbase/util/InfoServer.java +++ b/core/src/main/java/org/apache/hadoop/hbase/util/InfoServer.java @@ -37,14 +37,14 @@ import java.util.Map; * "/static/" -> points to common static files (src/webapps/static) * "/" -> the jsp server code from (src/webapps/) */ -public class InfoServer extends HttpServer { +public class InfoServer extends HttpServer { /** * Create a status server on the given port. * The jsp scripts are taken from src/webapps/name. * @param name The name of the server * @param bindAddress address to bind to * @param port The port to use on the server - * @param findPort whether the server should start at the given port and + * @param findPort whether the server should start at the given port and * increment by 1 until it finds a free port. * @throws IOException e */ @@ -71,7 +71,7 @@ public class InfoServer extends HttpServer { this.defaultContexts.put(oldLogsContext, Boolean.FALSE); } // Now do my logs. - // set up the context for "/logs/" if "hadoop.log.dir" property is defined. + // set up the context for "/logs/" if "hadoop.log.dir" property is defined. String logDir = System.getProperty("hbase.log.dir"); if (logDir != null) { Context logContext = new Context(parent, "/logs"); @@ -102,8 +102,8 @@ public class InfoServer extends HttpServer { private static String getWebAppsPath(final String path) throws IOException { URL url = InfoServer.class.getClassLoader().getResource(path); - if (url == null) - throw new IOException("webapps not found in CLASSPATH: " + path); + if (url == null) + throw new IOException("webapps not found in CLASSPATH: " + path); return url.toString(); } diff --git a/core/src/main/java/org/apache/hadoop/hbase/util/JVMClusterUtil.java b/core/src/main/java/org/apache/hadoop/hbase/util/JVMClusterUtil.java index 20adba7..baabc76 100644 --- a/core/src/main/java/org/apache/hadoop/hbase/util/JVMClusterUtil.java +++ b/core/src/main/java/org/apache/hadoop/hbase/util/JVMClusterUtil.java @@ -1,6 +1,6 @@ /** * Copyright 2010 The Apache Software Foundation - * + * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -39,7 +39,7 @@ public class JVMClusterUtil { */ public static class RegionServerThread extends Thread { private final HRegionServer regionServer; - + public RegionServerThread(final HRegionServer r, final int index) { super(r, "RegionServer:" + index); this.regionServer = r; @@ -49,7 +49,7 @@ public class JVMClusterUtil { public HRegionServer getRegionServer() { return this.regionServer; } - + /** * Block until the region server has come online, indicating it is ready * to be used. @@ -77,7 +77,7 @@ public class JVMClusterUtil { public static JVMClusterUtil.RegionServerThread createRegionServerThread(final Configuration c, final Class hrsc, final int index) throws IOException { - HRegionServer server; + HRegionServer server; try { server = hrsc.getConstructor(Configuration.class).newInstance(c); } catch (Exception e) { diff --git a/core/src/main/java/org/apache/hadoop/hbase/util/JenkinsHash.java b/core/src/main/java/org/apache/hadoop/hbase/util/JenkinsHash.java index cdb91ee..0c6c607 100644 --- a/core/src/main/java/org/apache/hadoop/hbase/util/JenkinsHash.java +++ b/core/src/main/java/org/apache/hadoop/hbase/util/JenkinsHash.java @@ -25,13 +25,13 @@ import java.io.IOException; /** * Produces 32-bit hash for hash table lookup. - * + * *

    lookup3.c, by Bob Jenkins, May 2006, Public Domain.
      *
      * You can use this free for any purpose.  It's in the public domain.
      * It has no warranty.
      * 
    - * + * * @see lookup3.c * @see Hash Functions (and how this * function compares to others such as CRC, MD?, etc @@ -41,9 +41,9 @@ import java.io.IOException; public class JenkinsHash extends Hash { private static long INT_MASK = 0x00000000ffffffffL; private static long BYTE_MASK = 0x00000000000000ffL; - + private static JenkinsHash _instance = new JenkinsHash(); - + public static Hash getInstance() { return _instance; } @@ -55,26 +55,26 @@ public class JenkinsHash extends Hash { /** * taken from hashlittle() -- hash a variable-length key into a 32-bit value - * + * * @param key the key (the unaligned variable-length array of bytes) * @param nbytes number of bytes to include in hash * @param initval can be any integer value * @return a 32-bit value. Every bit of the key affects every bit of the * return value. Two keys differing by one or two bits will have totally * different hash values. - * + * *

    The best hash table sizes are powers of 2. There is no need to do mod * a prime (mod is sooo slow!). If you need less than 32 bits, use a bitmask. * For example, if you need only 10 bits, do * h = (h & hashmask(10)); * In which case, the hash table should have hashsize(10) elements. - * + * *

    If you are hashing n strings byte[][] k, do it like this: * for (int i = 0, h = 0; i < n; ++i) h = hash( k[i], h); - * + * *

    By Bob Jenkins, 2006. bob_jenkins@burtleburtle.net. You may use this * code any way you wish, private, educational, or commercial. It's free. - * + * *

    Use for hash table lookup, or anything where one collision in 2^^32 is * acceptable. Do NOT use for cryptographic purposes. */ @@ -99,16 +99,16 @@ public class JenkinsHash extends Hash { c = (c + (((key[offset + 9] & BYTE_MASK) << 8) & INT_MASK)) & INT_MASK; c = (c + (((key[offset + 10] & BYTE_MASK) << 16) & INT_MASK)) & INT_MASK; c = (c + (((key[offset + 11] & BYTE_MASK) << 24) & INT_MASK)) & INT_MASK; - + /* * mix -- mix 3 32-bit values reversibly. * This is reversible, so any information in (a,b,c) before mix() is * still in (a,b,c) after mix(). - * + * * If four pairs of (a,b,c) inputs are run through mix(), or through * mix() in reverse, there are at least 32 bits of the output that * are sometimes the same for one pair and different for another pair. - * + * * This was tested for: * - pairs that differed by one bit, by two bits, in any combination * of top bits of (a,b,c), or in any combination of bottom bits of @@ -119,22 +119,22 @@ public class JenkinsHash extends Hash { * difference. * - the base values were pseudorandom, all zero but one bit set, or * all zero plus a counter that starts at zero. - * + * * Some k values for my "a-=c; a^=rot(c,k); c+=b;" arrangement that * satisfy this are * 4 6 8 16 19 4 * 9 15 3 18 27 15 * 14 9 3 7 17 3 - * Well, "9 15 3 18 27 15" didn't quite get 32 bits diffing for + * Well, "9 15 3 18 27 15" didn't quite get 32 bits diffing for * "differ" defined as + with a one-bit base and a two-bit delta. I * used http://burtleburtle.net/bob/hash/avalanche.html to choose * the operations, constants, and arrangements of the variables. - * + * * This does not achieve avalanche. There are input bits of (a,b,c) * that fail to affect some output bits of (a,b,c), especially of a. * The most thoroughly mixed value is c, but it doesn't really even * achieve avalanche in c. - * + * * This allows some parallelism. Read-after-writes are good at doubling * the number of bits affected, so the goal of mixing pulls in the * opposite direction as the goal of parallelism. I did what I could. @@ -151,7 +151,7 @@ public class JenkinsHash extends Hash { * b -= a; b ^= rot(a,19); a += c; \ * c -= b; c ^= rot(b, 4); b += a; \ * } - * + * * mix(a,b,c); */ a = (a - c) & INT_MASK; a ^= rot(c, 4); c = (c + b) & INT_MASK; @@ -195,21 +195,21 @@ public class JenkinsHash extends Hash { } /* * final -- final mixing of 3 32-bit values (a,b,c) into c - * + * * Pairs of (a,b,c) values differing in only a few bits will usually * produce values of c that look totally different. This was tested for * - pairs that differed by one bit, by two bits, in any combination * of top bits of (a,b,c), or in any combination of bottom bits of * (a,b,c). - * + * * - "differ" is defined as +, -, ^, or ~^. For + and -, I transformed * the output delta to a Gray code (a^(a>>1)) so a string of 1's (as * is commonly produced by subtraction) look like a single 1-bit * difference. - * + * * - the base values were pseudorandom, all zero but one bit set, or * all zero plus a counter that starts at zero. - * + * * These constants passed: * 14 11 25 16 4 14 24 * 12 14 25 16 4 14 24 @@ -217,9 +217,9 @@ public class JenkinsHash extends Hash { * 4 8 15 26 3 22 24 * 10 8 15 26 3 22 24 * 11 8 15 26 3 22 24 - * + * * #define final(a,b,c) \ - * { + * { * c ^= b; c -= rot(b,14); \ * a ^= c; a -= rot(c,11); \ * b ^= a; b -= rot(a,25); \ @@ -228,7 +228,7 @@ public class JenkinsHash extends Hash { * b ^= a; b -= rot(a,14); \ * c ^= b; c -= rot(b,24); \ * } - * + * */ c ^= b; c = (c - rot(b,14)) & INT_MASK; a ^= c; a = (a - rot(c,11)) & INT_MASK; @@ -240,7 +240,7 @@ public class JenkinsHash extends Hash { return (int)(c & INT_MASK); } - + /** * Compute the hash of the specified file * @param args name of file to compute hash of. diff --git a/core/src/main/java/org/apache/hadoop/hbase/util/Keying.java b/core/src/main/java/org/apache/hadoop/hbase/util/Keying.java index e0564f7..d3b83f4 100644 --- a/core/src/main/java/org/apache/hadoop/hbase/util/Keying.java +++ b/core/src/main/java/org/apache/hadoop/hbase/util/Keying.java @@ -37,7 +37,7 @@ public class Keying { /** * Makes a key out of passed URI for use as row name or column qualifier. - * + * * This method runs transforms on the passed URI so it sits better * as a key (or portion-of-a-key) in hbase. The host portion of * the URI authority is reversed so subdomains sort under their parent @@ -49,10 +49,10 @@ public class Keying { * r:http://org.apache.lucene/index.html?query=something#middle * The transforms are reversible. No transform is done if passed URI is * not hierarchical. - * + * *

    If authority userinfo is present, will mess up the sort * (until we do more work).

    - * + * * @param u URL to transform. * @return An opaque URI of artificial 'r' scheme with host portion of URI * authority reversed (if present). @@ -70,10 +70,10 @@ public class Keying { } return SCHEME + m.group(1) + reverseHostname(m.group(2)) + m.group(3); } - + /** * Reverse the {@link #createKey(String)} transform. - * + * * @param s URI made by {@link #createKey(String)}. * @return 'Restored' URI made by reversing the {@link #createKey(String)} * transform. @@ -89,14 +89,14 @@ public class Keying { } return m.group(1) + reverseHostname(m.group(2)) + m.group(3); } - + private static Matcher getMatcher(final String u) { if (u == null || u.length() <= 0) { return null; } return URI_RE_PARSER.matcher(u); } - + private static String reverseHostname(final String hostname) { if (hostname == null) { return ""; diff --git a/core/src/main/java/org/apache/hadoop/hbase/util/Merge.java b/core/src/main/java/org/apache/hadoop/hbase/util/Merge.java index dc370ee..c78110e 100644 --- a/core/src/main/java/org/apache/hadoop/hbase/util/Merge.java +++ b/core/src/main/java/org/apache/hadoop/hbase/util/Merge.java @@ -85,7 +85,7 @@ public class Merge extends Configured implements Tool { LOG.fatal("File system is not available", e); return -1; } - + // Verify HBase is down LOG.info("Verifying that HBase is not running..."); try { @@ -95,9 +95,9 @@ public class Merge extends Configured implements Tool { } catch (MasterNotRunningException e) { // Expected. Ignore. } - + // Initialize MetaUtils and and get the root of the HBase installation - + this.utils = new MetaUtils(getConf()); this.rootdir = FSUtils.getRootDir(getConf()); try { @@ -119,14 +119,14 @@ public class Merge extends Configured implements Tool { ); return -1; - + } finally { if (this.utils != null) { this.utils.shutdown(); } } } - + /** @return HRegionInfo for merge result */ HRegionInfo getMergedHRegionInfo() { return this.mergeInfo; @@ -150,25 +150,25 @@ public class Merge extends Configured implements Tool { get.addColumn(HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER); List cells2 = rootRegion.get(get, null).list(); HRegionInfo info2 = Writables.getHRegionInfo((cells2 == null)? null: cells2.get(0).getValue()); - HRegion merged = merge(info1, rootRegion, info2, rootRegion); + HRegion merged = merge(info1, rootRegion, info2, rootRegion); LOG.info("Adding " + merged.getRegionInfo() + " to " + rootRegion.getRegionInfo()); HRegion.addRegionToMETA(rootRegion, merged); merged.close(); } - + private static class MetaScannerListener implements MetaUtils.ScannerListener { private final byte [] region1; private final byte [] region2; private HRegionInfo meta1 = null; private HRegionInfo meta2 = null; - + MetaScannerListener(final byte [] region1, final byte [] region2) { this.region1 = region1; this.region2 = region2; } - + public boolean processRow(HRegionInfo info) { if (meta1 == null && HRegion.rowIsInRange(info, region1)) { meta1 = info; @@ -179,16 +179,16 @@ public class Merge extends Configured implements Tool { } return meta1 == null || (region2 != null && meta2 == null); } - + HRegionInfo getMeta1() { return meta1; } - + HRegionInfo getMeta2() { return meta2; } } - + /* * Merges two regions from a user table. */ @@ -257,7 +257,7 @@ public class Merge extends Configured implements Tool { HRegion.addRegionToMETA(mergeMeta, merged); merged.close(); } - + /* * Actually merge two regions and update their info in the meta region(s) * If the meta is split, meta1 may be different from meta2. (and we may have @@ -292,21 +292,21 @@ public class Merge extends Configured implements Tool { r1.close(); } } - + // Remove the old regions from meta. // HRegion.merge has already deleted their files - + removeRegionFromMeta(meta1, info1); removeRegionFromMeta(meta2, info2); this.mergeInfo = merged.getRegionInfo(); return merged; } - + /* * Removes a region's meta information from the passed meta * region. - * + * * @param meta META HRegion to be updated * @param regioninfo HRegionInfo of region to remove from meta * @@ -317,8 +317,8 @@ public class Merge extends Configured implements Tool { if (LOG.isDebugEnabled()) { LOG.debug("Removing region: " + regioninfo + " from " + meta); } - - Delete delete = new Delete(regioninfo.getRegionName(), + + Delete delete = new Delete(regioninfo.getRegionName(), System.currentTimeMillis(), null); meta.delete(delete, null, true); } @@ -326,7 +326,7 @@ public class Merge extends Configured implements Tool { /* * Adds a region's meta information from the passed meta * region. - * + * * @param metainfo META HRegionInfo to be updated * @param region HRegion to add to meta * @@ -335,7 +335,7 @@ public class Merge extends Configured implements Tool { private int parseArgs(String[] args) throws IOException { GenericOptionsParser parser = new GenericOptionsParser(getConf(), args); - + String[] remainingArgs = parser.getRemainingArgs(); if (remainingArgs.length != 3) { usage(); @@ -343,7 +343,7 @@ public class Merge extends Configured implements Tool { } tableName = Bytes.toBytes(remainingArgs[0]); isMetaTable = Bytes.compareTo(tableName, HConstants.META_TABLE_NAME) == 0; - + region1 = Bytes.toBytesBinary(remainingArgs[1]); region2 = Bytes.toBytesBinary(remainingArgs[2]); int status = 0; @@ -355,7 +355,7 @@ public class Merge extends Configured implements Tool { } return status; } - + private boolean notInTable(final byte [] tn, final byte [] rn) { if (WritableComparator.compareBytes(tn, 0, tn.length, rn, 0, tn.length) != 0) { LOG.error("Region " + Bytes.toString(rn) + " does not belong to table " + @@ -364,12 +364,12 @@ public class Merge extends Configured implements Tool { } return false; } - + private void usage() { System.err.println( "Usage: bin/hbase merge \n"); } - + public static void main(String[] args) { int status; try { diff --git a/core/src/main/java/org/apache/hadoop/hbase/util/MetaUtils.java b/core/src/main/java/org/apache/hadoop/hbase/util/MetaUtils.java index 1cdcdf5..4481b12 100644 --- a/core/src/main/java/org/apache/hadoop/hbase/util/MetaUtils.java +++ b/core/src/main/java/org/apache/hadoop/hbase/util/MetaUtils.java @@ -64,14 +64,14 @@ public class MetaUtils { private HRegion rootRegion; private Map metaRegions = Collections.synchronizedSortedMap( new TreeMap(Bytes.BYTES_COMPARATOR)); - - /** Default constructor + + /** Default constructor * @throws IOException e */ public MetaUtils() throws IOException { this(HBaseConfiguration.create()); } - + /** * @param conf Configuration * @throws IOException e @@ -107,7 +107,7 @@ public class MetaUtils { } return this.log; } - + /** * @return HRegion for root region * @throws IOException e @@ -118,10 +118,10 @@ public class MetaUtils { } return this.rootRegion; } - + /** * Open or return cached opened meta region - * + * * @param metaInfo HRegionInfo for meta region * @return meta HRegion * @throws IOException e @@ -135,7 +135,7 @@ public class MetaUtils { } return meta; } - + /** * Closes catalog regions if open. Also closes and deletes the HLog. You * must call this method if you want to persist changes made during a @@ -180,18 +180,18 @@ public class MetaUtils { public interface ScannerListener { /** * Callback so client of scanner can process row contents - * + * * @param info HRegionInfo for row * @return false to terminate the scan * @throws IOException e */ public boolean processRow(HRegionInfo info) throws IOException; } - + /** * Scans the root region. For every meta region found, calls the listener with * the HRegionInfo of the meta region. - * + * * @param listener method to be called for each meta region found * @throws IOException e */ @@ -249,7 +249,7 @@ public class MetaUtils { *

    Use for reading meta only. Does not close region when done. * Use {@link #getMetaRegion(HRegionInfo)} instead if writing. Adds * meta region to list that will get a close on {@link #shutdown()}. - * + * * @param metaRegionInfo HRegionInfo for meta region * @param listener method to be called for each meta region found * @throws IOException e @@ -278,7 +278,7 @@ public class MetaUtils { meta.compactStores(); return meta; } - + /** * Set a single region on/offline. * This is a tool to repair tables that have offlined tables in their midst. @@ -310,18 +310,18 @@ public class MetaUtils { HRegionInfo info = Writables.getHRegionInfo(value); Put put = new Put(row); info.setOffline(onlineOffline); - put.add(HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER, + put.add(HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER, Writables.getBytes(info)); t.put(put); - + Delete delete = new Delete(row); delete.deleteColumns(HConstants.CATALOG_FAMILY, HConstants.SERVER_QUALIFIER); delete.deleteColumns(HConstants.CATALOG_FAMILY, HConstants.STARTCODE_QUALIFIER); - + t.delete(delete); } - + /** * Offline version of the online TableOperation, * org.apache.hadoop.hbase.master.AddColumn. @@ -337,7 +337,7 @@ public class MetaUtils { final HRegion m = getMetaRegion(hri); scanMetaRegion(m, new ScannerListener() { private boolean inTable = true; - + @SuppressWarnings("synthetic-access") public boolean processRow(HRegionInfo info) throws IOException { LOG.debug("Testing " + Bytes.toString(tableName) + " against " + @@ -355,7 +355,7 @@ public class MetaUtils { }}); } } - + /** * Offline version of the online TableOperation, * org.apache.hadoop.hbase.master.DeleteColumn. @@ -370,7 +370,7 @@ public class MetaUtils { final HRegion m = getMetaRegion(hri); scanMetaRegion(m, new ScannerListener() { private boolean inTable = true; - + @SuppressWarnings("synthetic-access") public boolean processRow(HRegionInfo info) throws IOException { if (Bytes.equals(info.getTableDesc().getName(), tableName)) { @@ -393,15 +393,15 @@ public class MetaUtils { }}); } } - + /** * Update COL_REGIONINFO in meta region r with HRegionInfo hri - * + * * @param r region * @param hri region info * @throws IOException e */ - public void updateMETARegionInfo(HRegion r, final HRegionInfo hri) + public void updateMETARegionInfo(HRegion r, final HRegionInfo hri) throws IOException { if (LOG.isDebugEnabled()) { Get get = new Get(hri.getRegionName()); @@ -416,14 +416,14 @@ public class MetaUtils { return; } HRegionInfo h = Writables.getHRegionInfoOrNull(value); - - LOG.debug("Old " + Bytes.toString(HConstants.CATALOG_FAMILY) + ":" + + + LOG.debug("Old " + Bytes.toString(HConstants.CATALOG_FAMILY) + ":" + Bytes.toString(HConstants.REGIONINFO_QUALIFIER) + " for " + hri.toString() + " in " + r.toString() + " is: " + h.toString()); } - + Put put = new Put(hri.getRegionName()); - put.add(HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER, + put.add(HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER, Writables.getBytes(hri)); r.put(put); @@ -440,8 +440,8 @@ public class MetaUtils { return; } HRegionInfo h = Writables.getHRegionInfoOrNull(value); - LOG.debug("New " + Bytes.toString(HConstants.CATALOG_FAMILY) + ":" + - Bytes.toString(HConstants.REGIONINFO_QUALIFIER) + " for " + + LOG.debug("New " + Bytes.toString(HConstants.CATALOG_FAMILY) + ":" + + Bytes.toString(HConstants.REGIONINFO_QUALIFIER) + " for " + hri.toString() + " in " + r.toString() + " is: " + h.toString()); } } @@ -464,7 +464,7 @@ public class MetaUtils { // Return all meta regions that contain the passed tablename. scanRootRegion(new ScannerListener() { private final Log SL_LOG = LogFactory.getLog(this.getClass()); - + public boolean processRow(HRegionInfo info) throws IOException { SL_LOG.debug("Testing " + info); if (Bytes.equals(info.getTableDesc().getName(), @@ -476,7 +476,7 @@ public class MetaUtils { }}); return result; } - + /** * @param n Table name. * @return True if a catalog table, -ROOT- or .META. diff --git a/core/src/main/java/org/apache/hadoop/hbase/util/MurmurHash.java b/core/src/main/java/org/apache/hadoop/hbase/util/MurmurHash.java index 7e0d82c..fcf543e 100644 --- a/core/src/main/java/org/apache/hadoop/hbase/util/MurmurHash.java +++ b/core/src/main/java/org/apache/hadoop/hbase/util/MurmurHash.java @@ -23,17 +23,17 @@ package org.apache.hadoop.hbase.util; /** * This is a very fast, non-cryptographic hash suitable for general hash-based * lookup. See http://murmurhash.googlepages.com/ for more details. - * + * *

    The C version of MurmurHash 2.0 found at that site was ported * to Java by Andrzej Bialecki (ab at getopt org).

    */ public class MurmurHash extends Hash { private static MurmurHash _instance = new MurmurHash(); - + public static Hash getInstance() { return _instance; } - + @Override public int hash(byte[] data, int length, int seed) { int m = 0x5bd1e995; diff --git a/core/src/main/java/org/apache/hadoop/hbase/util/Pair.java b/core/src/main/java/org/apache/hadoop/hbase/util/Pair.java index 9d22777..ff296b6 100644 --- a/core/src/main/java/org/apache/hadoop/hbase/util/Pair.java +++ b/core/src/main/java/org/apache/hadoop/hbase/util/Pair.java @@ -24,8 +24,8 @@ import java.io.Serializable; /** * A generic class for pairs. - * @param - * @param + * @param + * @param */ public class Pair implements Serializable { diff --git a/core/src/main/java/org/apache/hadoop/hbase/util/Sleeper.java b/core/src/main/java/org/apache/hadoop/hbase/util/Sleeper.java index b855538..e5b4a5f 100644 --- a/core/src/main/java/org/apache/hadoop/hbase/util/Sleeper.java +++ b/core/src/main/java/org/apache/hadoop/hbase/util/Sleeper.java @@ -27,7 +27,7 @@ import java.util.concurrent.atomic.AtomicBoolean; /** * Sleeper for current thread. * Sleeps for passed period. Also checks passed boolean and if interrupted, - * will return if the flag is set (rather than go back to sleep until its + * will return if the flag is set (rather than go back to sleep until its * sleep time is up). */ public class Sleeper { @@ -35,7 +35,7 @@ public class Sleeper { private final int period; private final AtomicBoolean stop; private static final long MINIMAL_DELTA_FOR_LOGGING = 10000; - + private final Object sleepLock = new Object(); private boolean triggerWake = false; @@ -47,7 +47,7 @@ public class Sleeper { this.period = sleep; this.stop = stop; } - + /** * Sleep for period. */ @@ -65,7 +65,7 @@ public class Sleeper { sleepLock.notify(); } } - + /** * Sleep for period adjusted by passed startTime * @param startTime Time some task started previous to now. Time to sleep diff --git a/core/src/main/java/org/apache/hadoop/hbase/util/SoftValueMap.java b/core/src/main/java/org/apache/hadoop/hbase/util/SoftValueMap.java index 73e5722..6294a52 100644 --- a/core/src/main/java/org/apache/hadoop/hbase/util/SoftValueMap.java +++ b/core/src/main/java/org/apache/hadoop/hbase/util/SoftValueMap.java @@ -29,7 +29,7 @@ import java.util.Set; /** * A Map that uses Soft Reference values internally. Use as a simple cache. - * + * * @param key class * @param value class */ @@ -37,11 +37,11 @@ public class SoftValueMap implements Map { private final Map> internalMap = new HashMap>(); private final ReferenceQueue rq; - + public SoftValueMap() { this(new ReferenceQueue()); } - + public SoftValueMap(final ReferenceQueue rq) { this.rq = rq; } @@ -67,12 +67,12 @@ public class SoftValueMap implements Map { new SoftValue(key, value, this.rq)); return oldValue == null ? null : oldValue.get(); } - + @SuppressWarnings("unchecked") public void putAll(Map map) { throw new RuntimeException("Not implemented"); } - + @SuppressWarnings({"SuspiciousMethodCalls"}) public V get(Object key) { checkReferences(); @@ -94,16 +94,16 @@ public class SoftValueMap implements Map { } public boolean containsKey(Object key) { - checkReferences(); + checkReferences(); return this.internalMap.containsKey(key); } - + public boolean containsValue(Object value) { /* checkReferences(); return internalMap.containsValue(value);*/ throw new UnsupportedOperationException("Don't support containsValue!"); } - + public boolean isEmpty() { checkReferences(); return this.internalMap.isEmpty(); diff --git a/core/src/main/java/org/apache/hadoop/hbase/util/SoftValueSortedMap.java b/core/src/main/java/org/apache/hadoop/hbase/util/SoftValueSortedMap.java index 6718505..709892c 100644 --- a/core/src/main/java/org/apache/hadoop/hbase/util/SoftValueSortedMap.java +++ b/core/src/main/java/org/apache/hadoop/hbase/util/SoftValueSortedMap.java @@ -33,19 +33,19 @@ import java.util.TreeSet; * A SortedMap implementation that uses Soft Reference values * internally to make it play well with the GC when in a low-memory * situation. Use as a cache where you also need SortedMap functionality. - * + * * @param key class * @param value class */ public class SoftValueSortedMap implements SortedMap { private final SortedMap> internalMap; private final ReferenceQueue rq = new ReferenceQueue(); - + /** Constructor */ public SoftValueSortedMap() { this(new TreeMap>()); } - + /** * Constructor * @param c comparator @@ -53,7 +53,7 @@ public class SoftValueSortedMap implements SortedMap { public SoftValueSortedMap(final Comparator c) { this(new TreeMap>(c)); } - + /** For headMap and tailMap support * @param original object to wrap */ @@ -83,12 +83,12 @@ public class SoftValueSortedMap implements SortedMap { new SoftValue(key, value, this.rq)); return oldValue == null ? null : oldValue.get(); } - + @SuppressWarnings("unchecked") public synchronized void putAll(Map map) { throw new RuntimeException("Not implemented"); } - + @SuppressWarnings({"SuspiciousMethodCalls"}) public synchronized V get(Object key) { checkReferences(); @@ -110,10 +110,10 @@ public class SoftValueSortedMap implements SortedMap { } public synchronized boolean containsKey(Object key) { - checkReferences(); + checkReferences(); return this.internalMap.containsKey(key); } - + public synchronized boolean containsValue(Object value) { /* checkReferences(); return internalMap.containsValue(value);*/ @@ -129,22 +129,22 @@ public class SoftValueSortedMap implements SortedMap { checkReferences(); return internalMap.lastKey(); } - + public synchronized SoftValueSortedMap headMap(K key) { checkReferences(); return new SoftValueSortedMap(this.internalMap.headMap(key)); } - + public synchronized SoftValueSortedMap tailMap(K key) { checkReferences(); return new SoftValueSortedMap(this.internalMap.tailMap(key)); } - + public synchronized SoftValueSortedMap subMap(K fromKey, K toKey) { checkReferences(); return new SoftValueSortedMap(this.internalMap.subMap(fromKey, toKey)); } - + public synchronized boolean isEmpty() { checkReferences(); return this.internalMap.isEmpty(); diff --git a/core/src/main/java/org/apache/hadoop/hbase/util/Strings.java b/core/src/main/java/org/apache/hadoop/hbase/util/Strings.java index 7fef3e3..c2cad2e 100644 --- a/core/src/main/java/org/apache/hadoop/hbase/util/Strings.java +++ b/core/src/main/java/org/apache/hadoop/hbase/util/Strings.java @@ -25,7 +25,7 @@ package org.apache.hadoop.hbase.util; public class Strings { public final static String DEFAULT_SEPARATOR = "="; public final static String DEFAULT_KEYVALUE_SEPARATOR = ", "; - + /** * Append to a StringBuilder a key/value. * Uses default separators. diff --git a/core/src/main/java/org/apache/hadoop/hbase/util/Threads.java b/core/src/main/java/org/apache/hadoop/hbase/util/Threads.java index 61141a4..26f470d 100644 --- a/core/src/main/java/org/apache/hadoop/hbase/util/Threads.java +++ b/core/src/main/java/org/apache/hadoop/hbase/util/Threads.java @@ -31,7 +31,7 @@ import java.lang.Thread.UncaughtExceptionHandler; */ public class Threads { protected static final Log LOG = LogFactory.getLog(Threads.class); - + /** * Utility method that sets name, daemon status and starts passed thread. * @param t thread to frob @@ -42,7 +42,7 @@ public class Threads { final String name) { return setDaemonThreadRunning(t, name, null); } - + /** * Utility method that sets name, daemon status and starts passed thread. * @param t thread to frob diff --git a/core/src/main/java/org/apache/hadoop/hbase/util/VersionInfo.java b/core/src/main/java/org/apache/hadoop/hbase/util/VersionInfo.java index 4574212..24e98df 100644 --- a/core/src/main/java/org/apache/hadoop/hbase/util/VersionInfo.java +++ b/core/src/main/java/org/apache/hadoop/hbase/util/VersionInfo.java @@ -29,7 +29,7 @@ import org.apache.hadoop.hbase.VersionAnnotation; public class VersionInfo { private static Package myPackage; private static VersionAnnotation version; - + static { myPackage = VersionAnnotation.class.getPackage(); version = myPackage.getAnnotation(VersionAnnotation.class); @@ -42,7 +42,7 @@ public class VersionInfo { static Package getPackage() { return myPackage; } - + /** * Get the hbase version. * @return the hbase version string, eg. "0.6.3-dev" @@ -50,7 +50,7 @@ public class VersionInfo { public static String getVersion() { return version != null ? version.version() : "Unknown"; } - + /** * Get the subversion revision number for the root directory * @return the revision number, eg. "451451" @@ -58,7 +58,7 @@ public class VersionInfo { public static String getRevision() { return version != null ? version.revision() : "Unknown"; } - + /** * The date that hbase was compiled. * @return the compilation date in unix date format @@ -66,7 +66,7 @@ public class VersionInfo { public static String getDate() { return version != null ? version.date() : "Unknown"; } - + /** * The user that compiled hbase. * @return the username of the user @@ -74,7 +74,7 @@ public class VersionInfo { public static String getUser() { return version != null ? version.user() : "Unknown"; } - + /** * Get the subversion URL for the root hbase directory. * @return the url @@ -82,7 +82,7 @@ public class VersionInfo { public static String getUrl() { return version != null ? version.url() : "Unknown"; } - + public static void main(String[] args) { System.out.println("HBase " + getVersion()); System.out.println("Subversion " + getUrl() + " -r " + getRevision()); diff --git a/core/src/main/java/org/apache/hadoop/hbase/util/Writables.java b/core/src/main/java/org/apache/hadoop/hbase/util/Writables.java index 26623a7..4bff615 100644 --- a/core/src/main/java/org/apache/hadoop/hbase/util/Writables.java +++ b/core/src/main/java/org/apache/hadoop/hbase/util/Writables.java @@ -35,7 +35,7 @@ import java.io.IOException; public class Writables { /** * @param w writable - * @return The bytes of w gotten by running its + * @return The bytes of w gotten by running its * {@link Writable#write(java.io.DataOutput)} method. * @throws IOException e * @see #getWritable(byte[], Writable) @@ -118,7 +118,7 @@ public class Writables { throws IOException { return (HRegionInfo)getWritable(bytes, new HRegionInfo()); } - + /** * @param bytes serialized bytes * @return A HRegionInfo instance built out of passed bytes diff --git a/core/src/main/java/org/apache/hadoop/hbase/zookeeper/ZooKeeperWrapper.java b/core/src/main/java/org/apache/hadoop/hbase/zookeeper/ZooKeeperWrapper.java index 8ba5d64..106fcc0 100644 --- a/core/src/main/java/org/apache/hadoop/hbase/zookeeper/ZooKeeperWrapper.java +++ b/core/src/main/java/org/apache/hadoop/hbase/zookeeper/ZooKeeperWrapper.java @@ -103,7 +103,7 @@ public class ZooKeeperWrapper implements HConstants { "master"); String stateZNodeName = conf.get("zookeeper.znode.state", "shutdown"); - + rootRegionZNode = getZNode(parentZNode, rootServerZNodeName); rsZNode = getZNode(parentZNode, rsZNodeName); masterElectionZNode = getZNode(parentZNode, masterAddressZNodeName); @@ -193,31 +193,31 @@ public class ZooKeeperWrapper implements HConstants { } return sb.toString(); } - + /** * Gets the statistics from the given server. Uses a 1 minute timeout. - * + * * @param server The server to get the statistics from. * @return The array of response strings. * @throws IOException When the socket communication fails. */ - public String[] getServerStats(String server) + public String[] getServerStats(String server) throws IOException { return getServerStats(server, 60 * 1000); } - + /** * Gets the statistics from the given server. - * + * * @param server The server to get the statistics from. * @param timeout The socket timeout to use. * @return The array of response strings. * @throws IOException When the socket communication fails. */ - public String[] getServerStats(String server, int timeout) + public String[] getServerStats(String server, int timeout) throws IOException { String[] sp = server.split(":"); - Socket socket = new Socket(sp[0], + Socket socket = new Socket(sp[0], sp.length > 1 ? Integer.parseInt(sp[1]) : 2181); socket.setSoTimeout(timeout); PrintWriter out = new PrintWriter(socket.getOutputStream(), true); @@ -305,7 +305,7 @@ public class ZooKeeperWrapper implements HConstants { public HServerAddress readMasterAddress(Watcher watcher) { return readAddress(masterElectionZNode, watcher); } - + /** * Watch the state of the cluster, up or down * @param watcher Watcher to set on cluster state node @@ -319,7 +319,7 @@ public class ZooKeeperWrapper implements HConstants { LOG.warn("Failed to check on ZNode " + clusterStateZNode, e); } } - + /** * Set the cluster state, up or down * @param up True to write the node, false to delete it @@ -332,7 +332,7 @@ public class ZooKeeperWrapper implements HConstants { try { if(up) { byte[] data = Bytes.toBytes("up"); - zooKeeper.create(clusterStateZNode, data, + zooKeeper.create(clusterStateZNode, data, Ids.OPEN_ACL_UNSAFE, CreateMode.PERSISTENT); LOG.debug("State node wrote in ZooKeeper"); } else { @@ -579,7 +579,7 @@ public class ZooKeeperWrapper implements HConstants { return false; } - + /** * Scans the regions servers directory * @return A list of server addresses @@ -587,7 +587,7 @@ public class ZooKeeperWrapper implements HConstants { public List scanRSDirectory() { return scanAddressDirectory(rsZNode, null); } - + /** * Method used to make sure the region server directory is empty. * @@ -605,7 +605,7 @@ public class ZooKeeperWrapper implements HConstants { LOG.warn("Failed to delete " + rsZNode + " znodes in ZooKeeper: " + e); } } - + private boolean checkExistenceOf(String path) { Stat stat = null; try { @@ -630,7 +630,7 @@ public class ZooKeeperWrapper implements HConstants { LOG.warn("Failed to close connection with ZooKeeper"); } } - + public String getZNode(String parentZNode, String znodeName) { return znodeName.charAt(0) == ZNODE_PATH_SEPARATOR ? znodeName : joinPath(parentZNode, znodeName); @@ -731,6 +731,6 @@ public class ZooKeeperWrapper implements HConstants { return conf.get(ZOOKEEPER_QUORUM)+":"+ conf.get(ZOOKEEPER_ZNODE_PARENT); } - - + + } diff --git a/core/src/main/resources/hbase-default.xml b/core/src/main/resources/hbase-default.xml index 5e08625..f93782a 100644 --- a/core/src/main/resources/hbase-default.xml +++ b/core/src/main/resources/hbase-default.xml @@ -66,7 +66,7 @@ 2097152 Size of the write buffer in bytes. A bigger buffer takes more memory -- on both the client and server side since server instantiates - the passed write buffer to process it -- but reduces the number of RPC. + the passed write buffer to process it -- but reduces the number of RPC. For an estimate of server-side memory-used, evaluate hbase.client.write.buffer * hbase.regionserver.handler.count @@ -143,7 +143,7 @@ instance. This is to set an upper boundary for a single entry saved in a storage file. Since they cannot be split it helps avoiding that a region cannot be split any further because the data is too large. It seems wise - to set this to a fraction of the maximum region size. Setting it to zero + to set this to a fraction of the maximum region size. Setting it to zero or less disables the check. @@ -231,7 +231,7 @@ hbase.regionserver.dns.interface default - The name of the Network Interface from which a region server + The name of the Network Interface from which a region server should report its IP address. @@ -246,7 +246,7 @@ hbase.master.dns.interface default - The name of the Network Interface from which a master + The name of the Network Interface from which a master should report its IP address. @@ -254,14 +254,14 @@ hbase.master.dns.nameserver default The host name or IP address of the name server (DNS) - which a master should use to determine the host name used + which a master should use to determine the host name used for communication and display purposes. hbase.regionserver.global.memstore.upperLimit 0.4 - Maximum size of all memstores in a region server before new + Maximum size of all memstores in a region server before new updates are blocked and flushes are forced. Defaults to 40% of heap @@ -269,12 +269,12 @@ hbase.regionserver.global.memstore.lowerLimit 0.35 When memstores are being forced to flush to make room in - memory, keep flushing until we hit this mark. Defaults to 30% of heap. + memory, keep flushing until we hit this mark. Defaults to 30% of heap. This value equal to hbase.regionserver.global.memstore.upperLimit causes - the minimum possible flushing to occur when updates are blocked due to + the minimum possible flushing to occur when updates are blocked due to memstore limiting. - + hbase.hbasemaster.maxregionopen 120000 @@ -359,7 +359,7 @@ During a compaction, updates cannot be flushed to disk. Long compactions require memory sufficient to carry the logging of all updates across the duration of the compaction. - + If too large, clients timeout during compaction. diff --git a/core/src/test/java/org/apache/hadoop/hbase/AbstractMergeTestBase.java b/core/src/test/java/org/apache/hadoop/hbase/AbstractMergeTestBase.java index 8f677ca..7be9762 100644 --- a/core/src/test/java/org/apache/hadoop/hbase/AbstractMergeTestBase.java +++ b/core/src/test/java/org/apache/hadoop/hbase/AbstractMergeTestBase.java @@ -39,27 +39,27 @@ public abstract class AbstractMergeTestBase extends HBaseClusterTestCase { protected HTableDescriptor desc; protected ImmutableBytesWritable value; protected boolean startMiniHBase; - + public AbstractMergeTestBase() { this(true); } - - /** constructor + + /** constructor * @param startMiniHBase */ public AbstractMergeTestBase(boolean startMiniHBase) { super(); - + this.startMiniHBase = startMiniHBase; - + // We will use the same value for the rows as that is not really important here - + String partialValue = String.valueOf(System.currentTimeMillis()); StringBuilder val = new StringBuilder(); while(val.length() < 1024) { val.append(partialValue); } - + try { value = new ImmutableBytesWritable( val.toString().getBytes(HConstants.UTF8_ENCODING)); @@ -81,10 +81,10 @@ public abstract class AbstractMergeTestBase extends HBaseClusterTestCase { public void preHBaseClusterSetup() throws Exception { conf.setLong("hbase.hregion.max.filesize", 64L * 1024L * 1024L); - // We create three data regions: The first is too large to merge since it - // will be > 64 MB in size. The second two will be smaller and will be + // We create three data regions: The first is too large to merge since it + // will be > 64 MB in size. The second two will be smaller and will be // selected for merging. - + // To ensure that the first region is larger than 64MB we need to write at // least 65536 rows. We will make certain by writing 70000 @@ -103,12 +103,12 @@ public abstract class AbstractMergeTestBase extends HBaseClusterTestCase { createAregion(row_70001, row_80001, 70001, 10000), createAregion(row_80001, null, 80001, 11000) }; - + // Now create the root and meta regions and insert the data regions // created above into the meta createRootAndMetaRegions(); - + for(int i = 0; i < regions.length; i++) { HRegion.addRegionToMETA(meta, regions[i]); } @@ -118,9 +118,9 @@ public abstract class AbstractMergeTestBase extends HBaseClusterTestCase { private HRegion createAregion(byte [] startKey, byte [] endKey, int firstRow, int nrows) throws IOException { - + HRegion region = createNewHRegion(desc, startKey, endKey); - + System.out.println("created region " + Bytes.toString(region.getRegionName())); diff --git a/core/src/test/java/org/apache/hadoop/hbase/HBaseClusterTestCase.java b/core/src/test/java/org/apache/hadoop/hbase/HBaseClusterTestCase.java index 837e956..500ab70 100644 --- a/core/src/test/java/org/apache/hadoop/hbase/HBaseClusterTestCase.java +++ b/core/src/test/java/org/apache/hadoop/hbase/HBaseClusterTestCase.java @@ -51,22 +51,22 @@ public abstract class HBaseClusterTestCase extends HBaseTestCase { public HBaseClusterTestCase() { this(1); } - + /** * Start a MiniHBaseCluster with regionServers region servers in-process to * start with. Also, start a MiniDfsCluster before starting the hbase cluster. * The configuration used will be edited so that this works correctly. * @param regionServers number of region servers to start. - */ + */ public HBaseClusterTestCase(int regionServers) { this(regionServers, true); } - + /** in-process to * start with. Optionally, startDfs indicates if a MiniDFSCluster should be * started. If startDfs is false, the assumption is that an external DFS is * configured in hbase-site.xml and is already started, or you have started a - * MiniDFSCluster on your own and edited the configuration in memory. (You + * MiniDFSCluster on your own and edited the configuration in memory. (You * can modify the config used by overriding the preHBaseClusterSetup method.) * @param regionServers number of region servers to start. * @param startDfs set to true if MiniDFS should be started @@ -83,12 +83,12 @@ public abstract class HBaseClusterTestCase extends HBaseTestCase { /** * Subclass hook. - * + * * Run after dfs is ready but before hbase cluster is started up. */ protected void preHBaseClusterSetup() throws Exception { // continue - } + } /** * Actually start the MiniHBase instance. @@ -110,13 +110,13 @@ public abstract class HBaseClusterTestCase extends HBaseTestCase { new HTable(conf, HConstants.META_TABLE_NAME); } } - + /** * Run after hbase cluster is started up. */ protected void postHBaseClusterSetup() throws Exception { // continue - } + } @Override protected void setUp() throws Exception { @@ -139,9 +139,9 @@ public abstract class HBaseClusterTestCase extends HBaseTestCase { // do the super setup now. if we had done it first, then we would have // gotten our conf all mangled and a local fs started up. super.setUp(); - + // run the pre-cluster setup - preHBaseClusterSetup(); + preHBaseClusterSetup(); // start the instance hBaseClusterSetup(); @@ -194,7 +194,7 @@ public abstract class HBaseClusterTestCase extends HBaseTestCase { // "Temporary end-of-test thread dump debugging HADOOP-2040: " + getName()); } - + /** * Use this utility method debugging why cluster won't go down. On a * period it throws a thread dump. Method ends when all cluster diff --git a/core/src/test/java/org/apache/hadoop/hbase/HBaseTestCase.java b/core/src/test/java/org/apache/hadoop/hbase/HBaseTestCase.java index bee9b6c..4258737 100644 --- a/core/src/test/java/org/apache/hadoop/hbase/HBaseTestCase.java +++ b/core/src/test/java/org/apache/hadoop/hbase/HBaseTestCase.java @@ -72,11 +72,11 @@ public abstract class HBaseTestCase extends TestCase { protected static final byte [] START_KEY_BYTES = {FIRST_CHAR, FIRST_CHAR, FIRST_CHAR}; protected String START_KEY; protected static final int MAXVERSIONS = 3; - + static { initialize(); } - + public volatile HBaseConfiguration conf; /** constructor */ @@ -84,7 +84,7 @@ public abstract class HBaseTestCase extends TestCase { super(); init(); } - + /** * @param name */ @@ -92,7 +92,7 @@ public abstract class HBaseTestCase extends TestCase { super(name); init(); } - + private void init() { conf = new HBaseConfiguration(); try { @@ -102,7 +102,7 @@ public abstract class HBaseTestCase extends TestCase { fail(); } } - + /** * Note that this method must be called after the mini hdfs cluster has * started or we end up with a local file system. @@ -131,7 +131,7 @@ public abstract class HBaseTestCase extends TestCase { throw e; } } - + @Override protected void tearDown() throws Exception { try { @@ -158,11 +158,11 @@ public abstract class HBaseTestCase extends TestCase { Path rootdir = filesystem.makeQualified( new Path(conf.get(HConstants.HBASE_DIR))); filesystem.mkdirs(rootdir); - + return HRegion.createHRegion(new HRegionInfo(desc, startKey, endKey), rootdir, conf); } - + protected HRegion openClosedRegion(final HRegion closedRegion) throws IOException { HRegion r = new HRegion(closedRegion.getBaseDir(), closedRegion.getLog(), @@ -171,7 +171,7 @@ public abstract class HBaseTestCase extends TestCase { r.initialize(null, null); return r; } - + /** * Create a table of name name with {@link COLUMNS} for * families. @@ -181,7 +181,7 @@ public abstract class HBaseTestCase extends TestCase { protected HTableDescriptor createTableDescriptor(final String name) { return createTableDescriptor(name, MAXVERSIONS); } - + /** * Create a table of name name with {@link COLUMNS} for * families. @@ -204,7 +204,7 @@ public abstract class HBaseTestCase extends TestCase { false, HConstants.REPLICATION_SCOPE_LOCAL)); return htd; } - + /** * Add content to region r on the passed column * column. @@ -261,13 +261,13 @@ public abstract class HBaseTestCase extends TestCase { throws IOException { return addContent(updater, columnFamily, null, startKeyBytes, endKey, -1); } - + protected static long addContent(final Incommon updater, final String family, final String column, final byte [] startKeyBytes, final byte [] endKey) throws IOException { return addContent(updater, family, column, startKeyBytes, endKey, -1); } - + /** * Add content to region r on the passed column * column. @@ -337,7 +337,7 @@ public abstract class HBaseTestCase extends TestCase { } catch (IOException ex) { ex.printStackTrace(); throw ex; - } + } } catch (RuntimeException ex) { ex.printStackTrace(); throw ex; @@ -353,7 +353,7 @@ public abstract class HBaseTestCase extends TestCase { } return count; } - + /** * Implementors can flushcache. */ @@ -363,16 +363,16 @@ public abstract class HBaseTestCase extends TestCase { */ public void flushcache() throws IOException; } - + /** * Interface used by tests so can do common operations against an HTable * or an HRegion. - * + * * TOOD: Come up w/ a better name for this interface. */ public static interface Incommon { /** - * + * * @param delete * @param lockid * @param writeToWAL @@ -388,7 +388,7 @@ public abstract class HBaseTestCase extends TestCase { public void put(Put put) throws IOException; public Result get(Get get) throws IOException; - + /** * @param family * @param qualifiers @@ -401,35 +401,35 @@ public abstract class HBaseTestCase extends TestCase { byte [] firstRow, long ts) throws IOException; } - + /** * A class that makes a {@link Incommon} out of a {@link HRegion} */ public static class HRegionIncommon implements Incommon, FlushCache { final HRegion region; - + /** * @param HRegion */ public HRegionIncommon(final HRegion HRegion) { this.region = HRegion; } - + public void put(Put put) throws IOException { region.put(put); } - + public void delete(Delete delete, Integer lockid, boolean writeToWAL) throws IOException { this.region.delete(delete, lockid, writeToWAL); } - + public Result get(Get get) throws IOException { return region.get(get, null); } - + public ScannerIncommon getScanner(byte [] family, byte [][] qualifiers, - byte [] firstRow, long ts) + byte [] firstRow, long ts) throws IOException { Scan scan = new Scan(firstRow); if(qualifiers == null || qualifiers.length == 0) { @@ -440,14 +440,14 @@ public abstract class HBaseTestCase extends TestCase { } } scan.setTimeRange(0, ts); - return new + return new InternalScannerIncommon(region.getScanner(scan)); } - + public Result get(Get get, Integer lockid) throws IOException{ return this.region.get(get, lockid); } - + public void flushcache() throws IOException { this.region.flushcache(); @@ -467,23 +467,23 @@ public abstract class HBaseTestCase extends TestCase { super(); this.table = table; } - + public void put(Put put) throws IOException { table.put(put); } - - + + public void delete(Delete delete, Integer lockid, boolean writeToWAL) throws IOException { this.table.delete(delete); } - + public Result get(Get get) throws IOException { return table.get(get); } public ScannerIncommon getScanner(byte [] family, byte [][] qualifiers, - byte [] firstRow, long ts) + byte [] firstRow, long ts) throws IOException { Scan scan = new Scan(firstRow); if(qualifiers == null || qualifiers.length == 0) { @@ -494,25 +494,25 @@ public abstract class HBaseTestCase extends TestCase { } } scan.setTimeRange(0, ts); - return new + return new ClientScannerIncommon(table.getScanner(scan)); } } - - public interface ScannerIncommon + + public interface ScannerIncommon extends Iterable { public boolean next(List values) throws IOException; - + public void close() throws IOException; } - + public static class ClientScannerIncommon implements ScannerIncommon { ResultScanner scanner; public ClientScannerIncommon(ResultScanner scanner) { this.scanner = scanner; } - + public boolean next(List values) throws IOException { Result results = scanner.next(); @@ -523,38 +523,38 @@ public abstract class HBaseTestCase extends TestCase { values.addAll(results.list()); return true; } - + public void close() throws IOException { scanner.close(); } - + @SuppressWarnings("unchecked") public Iterator iterator() { return scanner.iterator(); } } - + public static class InternalScannerIncommon implements ScannerIncommon { InternalScanner scanner; - + public InternalScannerIncommon(InternalScanner scanner) { this.scanner = scanner; } - + public boolean next(List results) throws IOException { return scanner.next(results); } - + public void close() throws IOException { scanner.close(); } - + public Iterator iterator() { throw new UnsupportedOperationException(); } } - + // protected void assertCellEquals(final HRegion region, final byte [] row, // final byte [] column, final long timestamp, final String value) // throws IOException { @@ -565,11 +565,11 @@ public abstract class HBaseTestCase extends TestCase { // cell_value); // } else { // if (cell_value == null) { -// fail(Bytes.toString(column) + " at timestamp " + timestamp + +// fail(Bytes.toString(column) + " at timestamp " + timestamp + // "\" was expected to be \"" + value + " but was null"); // } // if (cell_value != null) { -// assertEquals(Bytes.toString(column) + " at timestamp " +// assertEquals(Bytes.toString(column) + " at timestamp " // + timestamp, value, new String(cell_value.getValue())); // } // } @@ -582,30 +582,30 @@ public abstract class HBaseTestCase extends TestCase { Get get = new Get(row); get.setTimeStamp(timestamp); Result res = region.get(get, null); - NavigableMap>> map = + NavigableMap>> map = res.getMap(); byte [] res_value = map.get(family).get(qualifier).get(timestamp); - + if (value == null) { assertEquals(Bytes.toString(family) + " " + Bytes.toString(qualifier) + " at timestamp " + timestamp, null, res_value); } else { if (res_value == null) { - fail(Bytes.toString(family) + " " + Bytes.toString(qualifier) + - " at timestamp " + timestamp + "\" was expected to be \"" + + fail(Bytes.toString(family) + " " + Bytes.toString(qualifier) + + " at timestamp " + timestamp + "\" was expected to be \"" + Bytes.toStringBinary(value) + " but was null"); } if (res_value != null) { assertEquals(Bytes.toString(family) + " " + Bytes.toString(qualifier) + - " at timestamp " + + " at timestamp " + timestamp, value, new String(res_value)); } } } - + /** * Initializes parameters used in the test environment: - * + * * Sets the configuration parameter TEST_DIRECTORY_KEY if not already set. * Sets the boolean debugging if "DEBUGGING" is set in the environment. * If debugging is enabled, reconfigures logging so that the root log level is @@ -620,7 +620,7 @@ public abstract class HBaseTestCase extends TestCase { /** * Common method to close down a MiniDFSCluster and the associated file system - * + * * @param cluster */ public static void shutdownDfs(MiniDFSCluster cluster) { @@ -645,14 +645,14 @@ public abstract class HBaseTestCase extends TestCase { } } } - + protected void createRootAndMetaRegions() throws IOException { root = HRegion.createHRegion(HRegionInfo.ROOT_REGIONINFO, testDir, conf); - meta = HRegion.createHRegion(HRegionInfo.FIRST_META_REGIONINFO, testDir, + meta = HRegion.createHRegion(HRegionInfo.FIRST_META_REGIONINFO, testDir, conf); HRegion.addRegionToMETA(root, meta); } - + protected void closeRootAndMeta() throws IOException { if (meta != null) { meta.close(); diff --git a/core/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java b/core/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java index 52467f5..be97d3c 100644 --- a/core/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java +++ b/core/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java @@ -138,7 +138,7 @@ public class HBaseTestingUtility { /** * Start up a minicluster of hbase, dfs, and zookeeper. - * @throws Exception + * @throws Exception */ public void startMiniCluster() throws Exception { startMiniCluster(1); @@ -148,7 +148,7 @@ public class HBaseTestingUtility { * Call this if you only want a zk cluster. * @see #startMiniZKCluster() if you want zk + dfs + hbase mini cluster. * @throws Exception - * @see #shutdownMiniZKCluster() + * @see #shutdownMiniZKCluster() */ public void startMiniZKCluster() throws Exception { isRunningCluster(); @@ -169,7 +169,7 @@ public class HBaseTestingUtility { * @see #startMiniZKCluster() */ public void shutdownMiniZKCluster() throws IOException { - if (this.zkCluster != null) this.zkCluster.shutdown(); + if (this.zkCluster != null) this.zkCluster.shutdown(); } /** @@ -203,7 +203,7 @@ public class HBaseTestingUtility { // the TEST_DIRECTORY_KEY to make bad blocks, a feature we are not using, // but otherwise, just in constructor. System.setProperty(TEST_DIRECTORY_KEY, oldBuildTestDir); - + // Mangle conf so fs parameter points to minidfs we just started up FileSystem fs = this.dfsCluster.getFileSystem(); this.conf.set("fs.defaultFS", fs.getUri().toString()); @@ -279,7 +279,7 @@ public class HBaseTestingUtility { * @return An HTable instance for the created table. * @throws IOException */ - public HTable createTable(byte[] tableName, byte[] family) + public HTable createTable(byte[] tableName, byte[] family) throws IOException{ return createTable(tableName, new byte[][]{family}); } @@ -291,7 +291,7 @@ public class HBaseTestingUtility { * @return An HTable instance for the created table. * @throws IOException */ - public HTable createTable(byte[] tableName, byte[][] families) + public HTable createTable(byte[] tableName, byte[][] families) throws IOException { HTableDescriptor desc = new HTableDescriptor(tableName); for(byte[] family : families) { @@ -408,16 +408,16 @@ public class HBaseTestingUtility { } return rowCount; } - + /** * Creates many regions names "aaa" to "zzz". - * + * * @param table The table to use for the data. * @param columnFamily The family to insert the data into. * @return count of regions created. * @throws IOException When creating the regions fails. */ - public int createMultiRegions(HTable table, byte[] columnFamily) + public int createMultiRegions(HTable table, byte[] columnFamily) throws IOException { return createMultiRegions(getConfiguration(), table, columnFamily); } @@ -431,11 +431,11 @@ public class HBaseTestingUtility { * @throws IOException When creating the regions fails. */ public int createMultiRegions(final Configuration c, final HTable table, - final byte[] columnFamily) + final byte[] columnFamily) throws IOException { byte[][] KEYS = { HConstants.EMPTY_BYTE_ARRAY, Bytes.toBytes("bbb"), - Bytes.toBytes("ccc"), Bytes.toBytes("ddd"), Bytes.toBytes("eee"), + Bytes.toBytes("ccc"), Bytes.toBytes("ddd"), Bytes.toBytes("eee"), Bytes.toBytes("fff"), Bytes.toBytes("ggg"), Bytes.toBytes("hhh"), Bytes.toBytes("iii"), Bytes.toBytes("jjj"), Bytes.toBytes("kkk"), Bytes.toBytes("lll"), Bytes.toBytes("mmm"), Bytes.toBytes("nnn"), @@ -452,18 +452,18 @@ public class HBaseTestingUtility { htd.addFamily(hcd); } // remove empty region - this is tricky as the mini cluster during the test - // setup already has the ",,123456789" row with an empty start - // and end key. Adding the custom regions below adds those blindly, - // including the new start region from empty to "bbb". lg + // setup already has the ",,123456789" row with an empty start + // and end key. Adding the custom regions below adds those blindly, + // including the new start region from empty to "bbb". lg List rows = getMetaTableRows(); // add custom ones int count = 0; for (int i = 0; i < KEYS.length; i++) { int j = (i + 1) % KEYS.length; - HRegionInfo hri = new HRegionInfo(table.getTableDescriptor(), + HRegionInfo hri = new HRegionInfo(table.getTableDescriptor(), KEYS[i], KEYS[j]); Put put = new Put(hri.getRegionName()); - put.add(HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER, + put.add(HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER, Writables.getBytes(hri)); meta.put(put); LOG.info("createMultiRegions: inserted " + hri.toString()); @@ -471,7 +471,7 @@ public class HBaseTestingUtility { } // see comment above, remove "old" (or previous) single region for (byte[] row : rows) { - LOG.info("createMultiRegions: deleting meta row -> " + + LOG.info("createMultiRegions: deleting meta row -> " + Bytes.toStringBinary(row)); meta.delete(new Delete(row)); } @@ -491,7 +491,7 @@ public class HBaseTestingUtility { List rows = new ArrayList(); ResultScanner s = t.getScanner(new Scan()); for (Result result : s) { - LOG.info("getMetaTableRows: row -> " + + LOG.info("getMetaTableRows: row -> " + Bytes.toStringBinary(result.getRow())); rows.add(result.getRow()); } @@ -509,7 +509,7 @@ public class HBaseTestingUtility { ArrayList deletes = new ArrayList(); ResultScanner s = t.getScanner(new Scan()); for (Result result : s) { - LOG.info("emptyMetaTable: remove row -> " + + LOG.info("emptyMetaTable: remove row -> " + Bytes.toStringBinary(result.getRow())); Delete del = new Delete(result.getRow()); deletes.add(del); @@ -517,9 +517,9 @@ public class HBaseTestingUtility { s.close(); t.delete(deletes); } - + /** - * Starts a MiniMRCluster with a default number of + * Starts a MiniMRCluster with a default number of * TaskTracker's. * * @throws IOException When starting the cluster fails. @@ -527,7 +527,7 @@ public class HBaseTestingUtility { public void startMiniMapReduceCluster() throws IOException { startMiniMapReduceCluster(2); } - + /** * Starts a MiniMRCluster. * @@ -540,13 +540,13 @@ public class HBaseTestingUtility { Configuration c = getConfiguration(); System.setProperty("hadoop.log.dir", c.get("hadoop.log.dir")); c.set("mapred.output.dir", c.get("hadoop.tmp.dir")); - mrCluster = new MiniMRCluster(servers, + mrCluster = new MiniMRCluster(servers, FileSystem.get(c).getUri().toString(), 1); LOG.info("Mini mapreduce cluster started"); } - + /** - * Stops the previously started MiniMRCluster. + * Stops the previously started MiniMRCluster. */ public void shutdownMiniMapReduceCluster() { LOG.info("Stopping mini mapreduce cluster..."); @@ -608,13 +608,13 @@ public class HBaseTestingUtility { /** * Get the HBase cluster. - * + * * @return hbase cluster */ public MiniHBaseCluster getHBaseCluster() { return hbaseCluster; } - + /** * Returns a HBaseAdmin instance. * @@ -627,9 +627,9 @@ public class HBaseTestingUtility { } return hbaseAdmin; } - + /** - * Closes the named region. + * Closes the named region. * * @param regionName The region to close. * @throws IOException @@ -637,9 +637,9 @@ public class HBaseTestingUtility { public void closeRegion(String regionName) throws IOException { closeRegion(Bytes.toBytes(regionName)); } - + /** - * Closes the named region. + * Closes the named region. * * @param regionName The region to close. * @throws IOException @@ -648,9 +648,9 @@ public class HBaseTestingUtility { HBaseAdmin admin = getHBaseAdmin(); admin.closeRegion(regionName, (Object[]) null); } - + /** - * Closes the region containing the given row. + * Closes the region containing the given row. * * @param row The row to find the containing region. * @param table The table to find the region. @@ -661,7 +661,7 @@ public class HBaseTestingUtility { } /** - * Closes the region containing the given row. + * Closes the region containing the given row. * * @param row The row to find the containing region. * @param table The table to find the region. diff --git a/core/src/test/java/org/apache/hadoop/hbase/HFilePerformanceEvaluation.java b/core/src/test/java/org/apache/hadoop/hbase/HFilePerformanceEvaluation.java index 659a4af..c8de05c 100644 --- a/core/src/test/java/org/apache/hadoop/hbase/HFilePerformanceEvaluation.java +++ b/core/src/test/java/org/apache/hadoop/hbase/HFilePerformanceEvaluation.java @@ -42,14 +42,14 @@ import org.apache.hadoop.hbase.util.Bytes; *

    */ public class HFilePerformanceEvaluation { - + private static final int ROW_LENGTH = 10; private static final int ROW_COUNT = 1000000; private static final int RFILE_BLOCKSIZE = 8 * 1024; - + static final Log LOG = LogFactory.getLog(HFilePerformanceEvaluation.class.getName()); - + static byte [] format(final int i) { String v = Integer.toString(i); return Bytes.toBytes("0000000000".substring(v.length()) + v); @@ -110,9 +110,9 @@ public class HFilePerformanceEvaluation { } } }); - + } - + protected void runBenchmark(RowOrientedBenchmark benchmark, int rowCount) throws Exception { LOG.info("Running " + benchmark.getClass().getSimpleName() + " for " + @@ -121,14 +121,14 @@ public class HFilePerformanceEvaluation { LOG.info("Running " + benchmark.getClass().getSimpleName() + " for " + rowCount + " rows took " + elapsedTime + "ms."); } - + static abstract class RowOrientedBenchmark { - + protected final Configuration conf; protected final FileSystem fs; protected final Path mf; protected final int totalRows; - + public RowOrientedBenchmark(Configuration conf, FileSystem fs, Path mf, int totalRows) { this.conf = conf; @@ -136,21 +136,21 @@ public class HFilePerformanceEvaluation { this.mf = mf; this.totalRows = totalRows; } - + void setUp() throws Exception { // do nothing } - + abstract void doRow(int i) throws Exception; - + protected int getReportingPeriod() { return this.totalRows / 10; } - + void tearDown() throws Exception { // do nothing } - + /** * Run benchmark * @return elapsed time. @@ -173,76 +173,76 @@ public class HFilePerformanceEvaluation { } return elapsedTime; } - + } - + static class SequentialWriteBenchmark extends RowOrientedBenchmark { protected HFile.Writer writer; private Random random = new Random(); private byte[] bytes = new byte[ROW_LENGTH]; - + public SequentialWriteBenchmark(Configuration conf, FileSystem fs, Path mf, int totalRows) { super(conf, fs, mf, totalRows); } - + @Override void setUp() throws Exception { writer = new HFile.Writer(this.fs, this.mf, RFILE_BLOCKSIZE, (Compression.Algorithm) null, null); } - + @Override void doRow(int i) throws Exception { - writer.append(format(i), generateValue()); + writer.append(format(i), generateValue()); } - + private byte[] generateValue() { random.nextBytes(bytes); return bytes; } - + @Override protected int getReportingPeriod() { return this.totalRows; // don't report progress } - + @Override void tearDown() throws Exception { writer.close(); } - + } - + static abstract class ReadBenchmark extends RowOrientedBenchmark { - + protected HFile.Reader reader; - + public ReadBenchmark(Configuration conf, FileSystem fs, Path mf, int totalRows) { super(conf, fs, mf, totalRows); } - + @Override void setUp() throws Exception { reader = new HFile.Reader(this.fs, this.mf, null, false); this.reader.loadFileInfo(); } - + @Override void tearDown() throws Exception { reader.close(); } - + } static class SequentialReadBenchmark extends ReadBenchmark { private HFileScanner scanner; - + public SequentialReadBenchmark(Configuration conf, FileSystem fs, Path mf, int totalRows) { super(conf, fs, mf, totalRows); } - + @Override void setUp() throws Exception { super.setUp(); @@ -259,16 +259,16 @@ public class HFilePerformanceEvaluation { PerformanceEvaluationCommons.assertValueSize(v.limit(), ROW_LENGTH); } } - + @Override protected int getReportingPeriod() { return this.totalRows; // don't report progress } - + } - + static class UniformRandomReadBenchmark extends ReadBenchmark { - + private Random random = new Random(); public UniformRandomReadBenchmark(Configuration conf, FileSystem fs, @@ -286,12 +286,12 @@ public class HFilePerformanceEvaluation { ByteBuffer v = scanner.getValue(); PerformanceEvaluationCommons.assertValueSize(v.limit(), ROW_LENGTH); } - + private byte [] getRandomRow() { return format(random.nextInt(totalRows)); } } - + static class UniformRandomSmallScan extends ReadBenchmark { private Random random = new Random(); @@ -319,14 +319,14 @@ public class HFilePerformanceEvaluation { PerformanceEvaluationCommons.assertValueSize(v.limit(), ROW_LENGTH); } } - + private byte [] getRandomRow() { return format(random.nextInt(totalRows)); } } - + static class GaussianRandomReadBenchmark extends ReadBenchmark { - + private RandomData randomData = new RandomDataImpl(); public GaussianRandomReadBenchmark(Configuration conf, FileSystem fs, @@ -353,11 +353,11 @@ public class HFilePerformanceEvaluation { return format(r); } } - + /** * @param args - * @throws Exception - * @throws IOException + * @throws Exception + * @throws IOException */ public static void main(String[] args) throws Exception { new HFilePerformanceEvaluation().runBenchmarks(); diff --git a/core/src/test/java/org/apache/hadoop/hbase/MapFilePerformanceEvaluation.java b/core/src/test/java/org/apache/hadoop/hbase/MapFilePerformanceEvaluation.java index 009e631..3740677 100644 --- a/core/src/test/java/org/apache/hadoop/hbase/MapFilePerformanceEvaluation.java +++ b/core/src/test/java/org/apache/hadoop/hbase/MapFilePerformanceEvaluation.java @@ -43,7 +43,7 @@ public class MapFilePerformanceEvaluation { protected final HBaseConfiguration conf; private static final int ROW_LENGTH = 10; private static final int ROW_COUNT = 100000; - + static final Log LOG = LogFactory.getLog(MapFilePerformanceEvaluation.class.getName()); @@ -54,7 +54,7 @@ public class MapFilePerformanceEvaluation { super(); this.conf = c; } - + static ImmutableBytesWritable format(final int i, ImmutableBytesWritable w) { String v = Integer.toString(i); w.set(Bytes.toBytes("0000000000".substring(v.length()) + v)); @@ -69,7 +69,7 @@ public class MapFilePerformanceEvaluation { } runBenchmark(new SequentialWriteBenchmark(conf, fs, mf, ROW_COUNT), ROW_COUNT); - + PerformanceEvaluationCommons.concurrentReads(new Runnable() { public void run() { try { @@ -111,7 +111,7 @@ public class MapFilePerformanceEvaluation { } }); } - + protected void runBenchmark(RowOrientedBenchmark benchmark, int rowCount) throws Exception { LOG.info("Running " + benchmark.getClass().getSimpleName() + " for " + @@ -120,14 +120,14 @@ public class MapFilePerformanceEvaluation { LOG.info("Running " + benchmark.getClass().getSimpleName() + " for " + rowCount + " rows took " + elapsedTime + "ms."); } - + static abstract class RowOrientedBenchmark { - + protected final Configuration conf; protected final FileSystem fs; protected final Path mf; protected final int totalRows; - + public RowOrientedBenchmark(Configuration conf, FileSystem fs, Path mf, int totalRows) { this.conf = conf; @@ -135,21 +135,21 @@ public class MapFilePerformanceEvaluation { this.mf = mf; this.totalRows = totalRows; } - + void setUp() throws Exception { // do nothing } - + abstract void doRow(int i) throws Exception; - + protected int getReportingPeriod() { return this.totalRows / 10; } - + void tearDown() throws Exception { // do nothing } - + /** * Run benchmark * @return elapsed time. @@ -172,77 +172,77 @@ public class MapFilePerformanceEvaluation { } return elapsedTime; } - + } - + static class SequentialWriteBenchmark extends RowOrientedBenchmark { - + protected MapFile.Writer writer; private Random random = new Random(); private byte[] bytes = new byte[ROW_LENGTH]; private ImmutableBytesWritable key = new ImmutableBytesWritable(); private ImmutableBytesWritable value = new ImmutableBytesWritable(); - + public SequentialWriteBenchmark(Configuration conf, FileSystem fs, Path mf, int totalRows) { super(conf, fs, mf, totalRows); } - + @Override void setUp() throws Exception { writer = new MapFile.Writer(conf, fs, mf.toString(), ImmutableBytesWritable.class, ImmutableBytesWritable.class); } - + @Override void doRow(int i) throws Exception { value.set(generateValue()); - writer.append(format(i, key), value); + writer.append(format(i, key), value); } - + private byte[] generateValue() { random.nextBytes(bytes); return bytes; } - + @Override protected int getReportingPeriod() { return this.totalRows; // don't report progress } - + @Override void tearDown() throws Exception { writer.close(); } - + } - + static abstract class ReadBenchmark extends RowOrientedBenchmark { ImmutableBytesWritable key = new ImmutableBytesWritable(); ImmutableBytesWritable value = new ImmutableBytesWritable(); - + protected MapFile.Reader reader; - + public ReadBenchmark(Configuration conf, FileSystem fs, Path mf, int totalRows) { super(conf, fs, mf, totalRows); } - + @Override void setUp() throws Exception { reader = new MapFile.Reader(fs, mf.toString(), conf); } - + @Override void tearDown() throws Exception { reader.close(); } - + } static class SequentialReadBenchmark extends ReadBenchmark { ImmutableBytesWritable verify = new ImmutableBytesWritable(); - + public SequentialReadBenchmark(Configuration conf, FileSystem fs, Path mf, int totalRows) { super(conf, fs, mf, totalRows); @@ -255,16 +255,16 @@ public class MapFilePerformanceEvaluation { format(i, this.verify).get()); PerformanceEvaluationCommons.assertValueSize(ROW_LENGTH, value.getSize()); } - + @Override protected int getReportingPeriod() { return this.totalRows; // don't report progress } - + } - + static class UniformRandomReadBenchmark extends ReadBenchmark { - + private Random random = new Random(); public UniformRandomReadBenchmark(Configuration conf, FileSystem fs, @@ -278,13 +278,13 @@ public class MapFilePerformanceEvaluation { ImmutableBytesWritable r = (ImmutableBytesWritable)reader.get(k, value); PerformanceEvaluationCommons.assertValueSize(r.getSize(), ROW_LENGTH); } - + private ImmutableBytesWritable getRandomRow() { return format(random.nextInt(totalRows), key); } - + } - + static class UniformRandomSmallScan extends ReadBenchmark { private Random random = new Random(); @@ -308,7 +308,7 @@ public class MapFilePerformanceEvaluation { PerformanceEvaluationCommons.assertValueSize(this.value.getSize(), ROW_LENGTH); } } - + private ImmutableBytesWritable getRandomRow() { return format(random.nextInt(totalRows), key); } @@ -328,19 +328,19 @@ public class MapFilePerformanceEvaluation { ImmutableBytesWritable r = (ImmutableBytesWritable)reader.get(k, value); PerformanceEvaluationCommons.assertValueSize(r.getSize(), ROW_LENGTH); } - + private ImmutableBytesWritable getGaussianRandomRow() { int r = (int) randomData.nextGaussian((double)totalRows / 2.0, (double)totalRows / 10.0); return format(r, key); } - + } /** * @param args - * @throws Exception - * @throws IOException + * @throws Exception + * @throws IOException */ public static void main(String[] args) throws Exception { new MapFilePerformanceEvaluation(new HBaseConfiguration()). diff --git a/core/src/test/java/org/apache/hadoop/hbase/MiniHBaseCluster.java b/core/src/test/java/org/apache/hadoop/hbase/MiniHBaseCluster.java index b80c8ec..60a4f38 100644 --- a/core/src/test/java/org/apache/hadoop/hbase/MiniHBaseCluster.java +++ b/core/src/test/java/org/apache/hadoop/hbase/MiniHBaseCluster.java @@ -49,12 +49,12 @@ public class MiniHBaseCluster implements HConstants { public LocalHBaseCluster hbaseCluster; /** - * Start a MiniHBaseCluster. + * Start a MiniHBaseCluster. * @param conf Configuration to be used for cluster * @param numRegionServers initial number of region servers to start. * @throws IOException */ - public MiniHBaseCluster(Configuration conf, int numRegionServers) + public MiniHBaseCluster(Configuration conf, int numRegionServers) throws IOException { this.conf = conf; init(numRegionServers); @@ -239,7 +239,7 @@ public class MiniHBaseCluster implements HConstants { /** * Shut down the mini HBase cluster - * @throws IOException + * @throws IOException */ public void shutdown() throws IOException { if (this.hbaseCluster != null) { diff --git a/core/src/test/java/org/apache/hadoop/hbase/MultiRegionTable.java b/core/src/test/java/org/apache/hadoop/hbase/MultiRegionTable.java index 65e2ea8..b8ad4c7 100644 --- a/core/src/test/java/org/apache/hadoop/hbase/MultiRegionTable.java +++ b/core/src/test/java/org/apache/hadoop/hbase/MultiRegionTable.java @@ -55,7 +55,7 @@ public class MultiRegionTable extends HBaseClusterTestCase { Bytes.toBytes("xxx"), Bytes.toBytes("yyy") }; - + protected final byte [] columnFamily; protected HTableDescriptor desc; @@ -68,13 +68,13 @@ public class MultiRegionTable extends HBaseClusterTestCase { public MultiRegionTable(int nServers, final String familyName) { super(nServers); - + this.columnFamily = Bytes.toBytes(familyName); // These are needed for the new and improved Map/Reduce framework System.setProperty("hadoop.log.dir", conf.get("hadoop.log.dir")); conf.set("mapred.output.dir", conf.get("hadoop.tmp.dir")); } - + /** * Run after dfs is ready but before hbase cluster is started up. */ @@ -96,13 +96,13 @@ public class MultiRegionTable extends HBaseClusterTestCase { for(int i = 0; i < regions.length; i++) { HRegion.addRegionToMETA(meta, regions[i]); } - + closeRootAndMeta(); } catch (Exception e) { shutdownDfs(dfsCluster); throw e; } - } + } private HRegion createARegion(byte [] startKey, byte [] endKey) throws IOException { HRegion region = createNewHRegion(desc, startKey, endKey); @@ -110,7 +110,7 @@ public class MultiRegionTable extends HBaseClusterTestCase { closeRegionAndDeleteLog(region); return region; } - + private void closeRegionAndDeleteLog(HRegion region) throws IOException { region.close(); region.getLog().closeAndDelete(); diff --git a/core/src/test/java/org/apache/hadoop/hbase/PerformanceEvaluation.java b/core/src/test/java/org/apache/hadoop/hbase/PerformanceEvaluation.java index ceb1fa2..d76c75e 100644 --- a/core/src/test/java/org/apache/hadoop/hbase/PerformanceEvaluation.java +++ b/core/src/test/java/org/apache/hadoop/hbase/PerformanceEvaluation.java @@ -84,22 +84,22 @@ import org.apache.hadoop.util.LineReader; * command-line which test to run and how many clients are participating in * this experiment. Run java PerformanceEvaluation --help to * obtain usage. - * + * *

    This class sets up and runs the evaluation programs described in * Section 7, Performance Evaluation, of the Bigtable * paper, pages 8-10. - * + * *

    If number of clients > 1, we start up a MapReduce job. Each map task * runs an individual client. Each client does about 1GB of data. */ public class PerformanceEvaluation implements HConstants { protected static final Log LOG = LogFactory.getLog(PerformanceEvaluation.class.getName()); - + private static final int ROW_LENGTH = 1000; private static final int ONE_GB = 1024 * 1024 * 1000; private static final int ROWS_PER_GB = ONE_GB / ROW_LENGTH; - + public static final byte [] TABLE_NAME = Bytes.toBytes("TestTable"); public static final byte [] FAMILY_NAME = Bytes.toBytes("info"); public static final byte [] QUALIFIER_NAME = Bytes.toBytes("data"); @@ -111,7 +111,7 @@ public class PerformanceEvaluation implements HConstants { } protected Map commands = new TreeMap(); - + volatile Configuration conf; private boolean miniCluster = false; private boolean nomapred = false; @@ -127,9 +127,9 @@ public class PerformanceEvaluation implements HConstants { public static final Pattern LINE_PATTERN = Pattern.compile("startRow=(\\d+),\\s+" + "perClientRunRows=(\\d+),\\s+" + - "totalRows=(\\d+),\\s+" + - "clients=(\\d+),\\s+" + - "flushCommits=(\\w+),\\s+" + + "totalRows=(\\d+),\\s+" + + "clients=(\\d+),\\s+" + + "flushCommits=(\\w+),\\s+" + "writeToWAL=(\\w+)"); /** @@ -141,8 +141,8 @@ public class PerformanceEvaluation implements HConstants { ELAPSED_TIME, /** number of rows */ ROWS} - - + + /** * Constructor * @param c Configuration object @@ -174,13 +174,13 @@ public class PerformanceEvaluation implements HConstants { "Run scan test using a filter to find a specific row based on it's value (make sure to use --rows=20)"); } - protected void addCommandDescriptor(Class cmdClass, + protected void addCommandDescriptor(Class cmdClass, String name, String description) { - CmdDescriptor cmdDescriptor = + CmdDescriptor cmdDescriptor = new CmdDescriptor(cmdClass, name, description); commands.put(name, cmdDescriptor); } - + /** * Implementations can have their status set. */ @@ -192,11 +192,11 @@ public class PerformanceEvaluation implements HConstants { */ void setStatus(final String msg) throws IOException; } - + /** * This class works as the InputSplit of Performance Evaluation - * MapReduce InputFormat, and the Record Value of RecordReader. - * Each map task will only read one record from a PeInputSplit, + * MapReduce InputFormat, and the Record Value of RecordReader. + * Each map task will only read one record from a PeInputSplit, * the record value is the PeInputSplit itself. */ public static class PeInputSplit extends InputSplit implements Writable { @@ -206,7 +206,7 @@ public class PerformanceEvaluation implements HConstants { private int clients = 0; private boolean flushCommits = false; private boolean writeToWAL = true; - + public PeInputSplit() { this.startRow = 0; this.rows = 0; @@ -215,7 +215,7 @@ public class PerformanceEvaluation implements HConstants { this.flushCommits = false; this.writeToWAL = true; } - + public PeInputSplit(int startRow, int rows, int totalRows, int clients, boolean flushCommits, boolean writeToWAL) { this.startRow = startRow; @@ -225,13 +225,13 @@ public class PerformanceEvaluation implements HConstants { this.flushCommits = flushCommits; this.writeToWAL = writeToWAL; } - + @Override public void readFields(DataInput in) throws IOException { this.startRow = in.readInt(); this.rows = in.readInt(); this.totalRows = in.readInt(); - this.clients = in.readInt(); + this.clients = in.readInt(); this.flushCommits = in.readBoolean(); this.writeToWAL = in.readBoolean(); } @@ -245,29 +245,29 @@ public class PerformanceEvaluation implements HConstants { out.writeBoolean(flushCommits); out.writeBoolean(writeToWAL); } - + @Override public long getLength() throws IOException, InterruptedException { return 0; } - + @Override public String[] getLocations() throws IOException, InterruptedException { return new String[0]; } - + public int getStartRow() { return startRow; } - + public int getRows() { return rows; } - + public int getTotalRows() { return totalRows; } - + public int getClients() { return clients; } @@ -280,10 +280,10 @@ public class PerformanceEvaluation implements HConstants { return writeToWAL; } } - + /** * InputFormat of Performance Evaluation MapReduce job. - * It extends from FileInputFormat, want to use it's methods such as setInputPaths(). + * It extends from FileInputFormat, want to use it's methods such as setInputPaths(). */ public static class PeInputFormat extends FileInputFormat { @@ -291,7 +291,7 @@ public class PerformanceEvaluation implements HConstants { public List getSplits(JobContext job) throws IOException { // generate splits List splitList = new ArrayList(); - + for (FileStatus file: listStatus(job)) { Path path = file.getPath(); FileSystem fs = path.getFileSystem(job.getConfiguration()); @@ -313,7 +313,7 @@ public class PerformanceEvaluation implements HConstants { boolean flushCommits = Boolean.parseBoolean(m.group(5)); boolean writeToWAL = Boolean.parseBoolean(m.group(6)); - LOG.debug("split["+ splitList.size() + "] " + + LOG.debug("split["+ splitList.size() + "] " + " startRow=" + startRow + " rows=" + rows + " totalRows=" + totalRows + @@ -322,60 +322,60 @@ public class PerformanceEvaluation implements HConstants { " writeToWAL=" + writeToWAL); PeInputSplit newSplit = - new PeInputSplit(startRow, rows, totalRows, clients, + new PeInputSplit(startRow, rows, totalRows, clients, flushCommits, writeToWAL); splitList.add(newSplit); } } in.close(); } - + LOG.info("Total # of splits: " + splitList.size()); return splitList; } - + @Override public RecordReader createRecordReader(InputSplit split, TaskAttemptContext context) { return new PeRecordReader(); } - + public static class PeRecordReader extends RecordReader { private boolean readOver = false; private PeInputSplit split = null; private NullWritable key = null; private PeInputSplit value = null; - + @Override - public void initialize(InputSplit split, TaskAttemptContext context) + public void initialize(InputSplit split, TaskAttemptContext context) throws IOException, InterruptedException { this.readOver = false; this.split = (PeInputSplit)split; } - + @Override public boolean nextKeyValue() throws IOException, InterruptedException { if(readOver) { return false; } - + key = NullWritable.get(); value = (PeInputSplit)split; - + readOver = true; return true; } - + @Override public NullWritable getCurrentKey() throws IOException, InterruptedException { return key; } - + @Override public PeInputSplit getCurrentValue() throws IOException, InterruptedException { return value; } - + @Override public float getProgress() throws IOException, InterruptedException { if(readOver) { @@ -384,18 +384,18 @@ public class PerformanceEvaluation implements HConstants { return 0.0f; } } - + @Override public void close() throws IOException { // do nothing } } } - + /** * MapReduce job that runs a performance evaluation client in each map task. */ - public static class EvaluationMapTask + public static class EvaluationMapTask extends Mapper { /** configuration parameter name that contains the command */ @@ -432,18 +432,18 @@ public class PerformanceEvaluation implements HConstants { return clazz; } - protected void map(NullWritable key, PeInputSplit value, final Context context) + protected void map(NullWritable key, PeInputSplit value, final Context context) throws IOException, InterruptedException { - + Status status = new Status() { public void setStatus(String msg) { - context.setStatus(msg); + context.setStatus(msg); } }; - + // Evaluation task long elapsedTime = this.pe.runOneClient(this.cmd, value.getStartRow(), - value.getRows(), value.getTotalRows(), + value.getRows(), value.getTotalRows(), value.isFlushCommits(), value.isWriteToWAL(), status); // Collect how much time the thing took. Report as map output and @@ -454,7 +454,7 @@ public class PerformanceEvaluation implements HConstants { context.progress(); } } - + /* * If table does not already exist, create. * @param c Client to use checking. @@ -490,7 +490,7 @@ public class PerformanceEvaluation implements HConstants { doMapReduce(cmd); } } - + /* * Run all clients in this vm each to its own thread. * @param cmd Command to run. @@ -536,7 +536,7 @@ public class PerformanceEvaluation implements HConstants { } } } - + /* * Run a mapreduce job. Run as many maps as asked-for clients. * Before we start up the job, write out an input file with instruction @@ -552,24 +552,24 @@ public class PerformanceEvaluation implements HConstants { Job job = new Job(this.conf); job.setJarByClass(PerformanceEvaluation.class); job.setJobName("HBase Performance Evaluation"); - + job.setInputFormatClass(PeInputFormat.class); PeInputFormat.setInputPaths(job, inputDir); - + job.setOutputKeyClass(LongWritable.class); job.setOutputValueClass(LongWritable.class); - + job.setMapperClass(EvaluationMapTask.class); job.setReducerClass(LongSumReducer.class); - + job.setNumReduceTasks(1); - + job.setOutputFormatClass(TextOutputFormat.class); TextOutputFormat.setOutputPath(job, new Path(inputDir,"outputs")); - + job.waitForCompletion(true); } - + /* * Write input file of offsets-per-client for the mapreduce job. * @param c Configuration @@ -694,7 +694,7 @@ public class PerformanceEvaluation implements HConstants { */ static abstract class Test { // Below is make it so when Tests are all running in the one - // jvm, that they each have a differently seeded Random. + // jvm, that they each have a differently seeded Random. private static final Random randomSeed = new Random(System.currentTimeMillis()); private static long nextRandomSeed() { @@ -729,16 +729,16 @@ public class PerformanceEvaluation implements HConstants { this.flushCommits = options.isFlushCommits(); this.writeToWAL = options.isWriteToWAL(); } - + private String generateStatus(final int sr, final int i, final int lr) { return sr + "/" + i + "/" + lr; } - + protected int getReportingPeriod() { int period = this.perClientRunRows / 10; return period == 0? this.perClientRunRows: period; } - + void testSetup() throws IOException { this.admin = new HBaseAdmin(conf); this.table = new HTable(conf, tableName); @@ -752,7 +752,7 @@ public class PerformanceEvaluation implements HConstants { this.table.flushCommits(); } } - + /* * Run test * @return Elapsed time. @@ -811,7 +811,7 @@ public class PerformanceEvaluation implements HConstants { } s.close(); } - + @Override protected int getReportingPeriod() { int period = this.perClientRunRows / 100; @@ -924,12 +924,12 @@ public class PerformanceEvaluation implements HConstants { } } - + static class RandomWriteTest extends Test { RandomWriteTest(Configuration conf, TestOptions options, Status status) { super(conf, options, status); } - + @Override void testRow(final int i) throws IOException { byte [] row = getRandomRow(this.rand, this.totalRows); @@ -940,19 +940,19 @@ public class PerformanceEvaluation implements HConstants { table.put(put); } } - + static class ScanTest extends Test { private ResultScanner testScanner; ScanTest(Configuration conf, TestOptions options, Status status) { super(conf, options, status); } - + @Override void testSetup() throws IOException { super.testSetup(); } - + @Override void testTakedown() throws IOException { if (this.testScanner != null) { @@ -960,8 +960,8 @@ public class PerformanceEvaluation implements HConstants { } super.testTakedown(); } - - + + @Override void testRow(final int i) throws IOException { if (this.testScanner == null) { @@ -973,12 +973,12 @@ public class PerformanceEvaluation implements HConstants { } } - + static class SequentialReadTest extends Test { SequentialReadTest(Configuration conf, TestOptions options, Status status) { super(conf, options, status); } - + @Override void testRow(final int i) throws IOException { Get get = new Get(format(i)); @@ -987,12 +987,12 @@ public class PerformanceEvaluation implements HConstants { } } - + static class SequentialWriteTest extends Test { SequentialWriteTest(Configuration conf, TestOptions options, Status status) { super(conf, options, status); } - + @Override void testRow(final int i) throws IOException { Put put = new Put(format(i)); @@ -1036,7 +1036,7 @@ public class PerformanceEvaluation implements HConstants { return scan; } } - + /* * Format passed integer. * @param number @@ -1052,7 +1052,7 @@ public class PerformanceEvaluation implements HConstants { } return b; } - + /* * This method takes some time and is done inline uploading data. For * example, doing the mapfile test, generation of the key and value @@ -1064,14 +1064,14 @@ public class PerformanceEvaluation implements HConstants { r.nextBytes(b); return b; } - + static byte [] getRandomRow(final Random random, final int totalRows) { return format(random.nextInt(Integer.MAX_VALUE) % totalRows); } - + long runOneClient(final Class cmd, final int startRow, - final int perClientRunRows, final int totalRows, - boolean flushCommits, boolean writeToWAL, + final int perClientRunRows, final int totalRows, + boolean flushCommits, boolean writeToWAL, final Status status) throws IOException { status.setStatus("Start " + cmd + " at offset " + startRow + " for " + @@ -1099,7 +1099,7 @@ public class PerformanceEvaluation implements HConstants { "ms at offset " + startRow + " for " + perClientRunRows + " rows"); return totalElapsedTime; } - + private void runNIsOne(final Class cmd) { Status status = new Status() { public void setStatus(String msg) throws IOException { @@ -1115,7 +1115,7 @@ public class PerformanceEvaluation implements HConstants { status); } catch (Exception e) { LOG.error("Failed", e); - } + } } private void runTest(final Class cmd) throws IOException, @@ -1127,7 +1127,7 @@ public class PerformanceEvaluation implements HConstants { dfsCluster = new MiniDFSCluster(conf, 2, true, (String[])null); zooKeeperCluster = new MiniZooKeeperCluster(); int zooKeeperPort = zooKeeperCluster.startup(new File(System.getProperty("java.io.tmpdir"))); - + // mangle the conf so that the fs parameter points to the minidfs we // just started up FileSystem fs = dfsCluster.getFileSystem(); @@ -1139,14 +1139,14 @@ public class PerformanceEvaluation implements HConstants { FSUtils.setVersion(fs, parentdir); hbaseMiniCluster = new MiniHBaseCluster(this.conf, N); } - + try { if (N == 1) { // If there is only one client and one HRegionServer, we assume nothing // has been set up at all. runNIsOne(cmd); } else { - // Else, run + // Else, run runNIsMoreThanOne(cmd); } } finally { @@ -1157,11 +1157,11 @@ public class PerformanceEvaluation implements HConstants { } } } - + protected void printUsage() { printUsage(null); } - + protected void printUsage(final String message) { if (message != null && message.length() > 0) { System.err.println(message); @@ -1203,16 +1203,16 @@ public class PerformanceEvaluation implements HConstants { // Set total number of rows to write. this.R = this.R * N; } - + public int doCommandLine(final String[] args) { // Process command-line args. TODO: Better cmd-line processing - // (but hopefully something not as painful as cli options). + // (but hopefully something not as painful as cli options). int errCode = -1; if (args.length < 1) { printUsage(); return errCode; } - + try { for (int i = 0; i < args.length; i++) { String cmd = args[i]; @@ -1221,19 +1221,19 @@ public class PerformanceEvaluation implements HConstants { errCode = 0; break; } - + final String miniClusterArgKey = "--miniCluster"; if (cmd.startsWith(miniClusterArgKey)) { this.miniCluster = true; continue; } - + final String nmr = "--nomapred"; if (cmd.startsWith(nmr)) { this.nomapred = true; continue; } - + final String rows = "--rows="; if (cmd.startsWith(rows)) { this.R = Integer.parseInt(cmd.substring(rows.length())); @@ -1259,14 +1259,14 @@ public class PerformanceEvaluation implements HConstants { errCode = 0; break; } - + printUsage(); break; } } catch (Exception e) { e.printStackTrace(); } - + return errCode; } diff --git a/core/src/test/java/org/apache/hadoop/hbase/PerformanceEvaluationCommons.java b/core/src/test/java/org/apache/hadoop/hbase/PerformanceEvaluationCommons.java index 78d984c..eac7207 100644 --- a/core/src/test/java/org/apache/hadoop/hbase/PerformanceEvaluationCommons.java +++ b/core/src/test/java/org/apache/hadoop/hbase/PerformanceEvaluationCommons.java @@ -33,13 +33,13 @@ import org.apache.commons.logging.LogFactory; public class PerformanceEvaluationCommons { static final Log LOG = LogFactory.getLog(PerformanceEvaluationCommons.class.getName()); - + public static void assertValueSize(final int expectedSize, final int got) { if (got != expectedSize) { throw new AssertionError("Expected " + expectedSize + " but got " + got); } } - + public static void assertKey(final byte [] expected, final ByteBuffer got) { byte [] b = new byte[got.limit()]; got.get(b, 0, got.limit()); @@ -53,7 +53,7 @@ public class PerformanceEvaluationCommons { " but got " + org.apache.hadoop.hbase.util.Bytes.toString(got)); } } - + public static void concurrentReads(final Runnable r) { final int count = 1; long now = System.currentTimeMillis(); diff --git a/core/src/test/java/org/apache/hadoop/hbase/TestCompare.java b/core/src/test/java/org/apache/hadoop/hbase/TestCompare.java index a807b12..bbac815 100644 --- a/core/src/test/java/org/apache/hadoop/hbase/TestCompare.java +++ b/core/src/test/java/org/apache/hadoop/hbase/TestCompare.java @@ -26,7 +26,7 @@ import org.apache.hadoop.hbase.util.Bytes; * Test comparing HBase objects. */ public class TestCompare extends TestCase { - + /** * Sort of HRegionInfo. */ diff --git a/core/src/test/java/org/apache/hadoop/hbase/TestHMsg.java b/core/src/test/java/org/apache/hadoop/hbase/TestHMsg.java index ee3be20..2b75f06 100644 --- a/core/src/test/java/org/apache/hadoop/hbase/TestHMsg.java +++ b/core/src/test/java/org/apache/hadoop/hbase/TestHMsg.java @@ -54,7 +54,7 @@ public class TestHMsg extends TestCase { new HRegionInfo(new HTableDescriptor(Bytes.toBytes("test")), b, b)); assertNotSame(-1, msgs.indexOf(hmsg)); } - + public void testSerialization() throws IOException { // Check out new HMsg that carries two daughter split regions. byte [] abytes = Bytes.toBytes("a"); diff --git a/core/src/test/java/org/apache/hadoop/hbase/TestInfoServers.java b/core/src/test/java/org/apache/hadoop/hbase/TestInfoServers.java index 1810582..2d969f2 100644 --- a/core/src/test/java/org/apache/hadoop/hbase/TestInfoServers.java +++ b/core/src/test/java/org/apache/hadoop/hbase/TestInfoServers.java @@ -33,7 +33,7 @@ import org.apache.hadoop.hbase.client.HTable; */ public class TestInfoServers extends HBaseClusterTestCase { static final Log LOG = LogFactory.getLog(TestInfoServers.class); - + @Override protected void preHBaseClusterSetup() { // Bring up info servers on 'odd' port numbers in case the test is not @@ -41,7 +41,7 @@ public class TestInfoServers extends HBaseClusterTestCase { conf.setInt("hbase.master.info.port", 60011); conf.setInt("hbase.regionserver.info.port", 60031); } - + /** * @throws Exception */ @@ -56,7 +56,7 @@ public class TestInfoServers extends HBaseClusterTestCase { assertHasExpectedContent(new URL("http://localhost:" + port + "/index.html"), "regionserver"); } - + private void assertHasExpectedContent(final URL u, final String expected) throws IOException { LOG.info("Testing " + u.toString() + " has " + expected); diff --git a/core/src/test/java/org/apache/hadoop/hbase/TestKeyValue.java b/core/src/test/java/org/apache/hadoop/hbase/TestKeyValue.java index 49681b5..030e979 100644 --- a/core/src/test/java/org/apache/hadoop/hbase/TestKeyValue.java +++ b/core/src/test/java/org/apache/hadoop/hbase/TestKeyValue.java @@ -40,7 +40,7 @@ public class TestKeyValue extends TestCase { byte [] qualifier1 = Bytes.toBytes("def"); byte [] family2 = Bytes.toBytes("abcd"); byte [] qualifier2 = Bytes.toBytes("ef"); - + KeyValue aaa = new KeyValue(a, family1, qualifier1, 0L, Type.Put, a); assertFalse(aaa.matchingColumn(family2, qualifier2)); assertTrue(aaa.matchingColumn(family1, qualifier1)); @@ -62,7 +62,7 @@ public class TestKeyValue extends TestCase { check(Bytes.toBytes(getName()), Bytes.toBytes(getName()), null, 1, null); check(HConstants.EMPTY_BYTE_ARRAY, Bytes.toBytes(getName()), null, 1, null); } - + private void check(final byte [] row, final byte [] family, byte [] qualifier, final long timestamp, final byte [] value) { KeyValue kv = new KeyValue(row, family, qualifier, timestamp, value); @@ -71,7 +71,7 @@ public class TestKeyValue extends TestCase { // Call toString to make sure it works. LOG.info(kv.toString()); } - + public void testPlainCompare() throws Exception { final byte [] a = Bytes.toBytes("aaa"); final byte [] b = Bytes.toBytes("bbb"); @@ -118,11 +118,11 @@ public class TestKeyValue extends TestCase { KVComparator c = new KeyValue.RootComparator(); assertTrue(c.compare(b, a) < 0); KeyValue aa = new KeyValue(Bytes.toBytes(".META.,,1"), now); - KeyValue bb = new KeyValue(Bytes.toBytes(".META.,,1"), + KeyValue bb = new KeyValue(Bytes.toBytes(".META.,,1"), Bytes.toBytes("info"), Bytes.toBytes("regioninfo"), 1235943454602L, (byte[])null); assertTrue(c.compare(aa, bb) < 0); - + // Meta compares KeyValue aaa = new KeyValue( Bytes.toBytes("TestScanMultipleVersions,row_0500,1236020145502"), now); @@ -130,12 +130,12 @@ public class TestKeyValue extends TestCase { Bytes.toBytes("TestScanMultipleVersions,,99999999999999"), now); c = new KeyValue.MetaComparator(); assertTrue(c.compare(bbb, aaa) < 0); - + KeyValue aaaa = new KeyValue(Bytes.toBytes("TestScanMultipleVersions,,1236023996656"), Bytes.toBytes("info"), Bytes.toBytes("regioninfo"), 1236024396271L, (byte[])null); assertTrue(c.compare(aaaa, bbb) < 0); - + KeyValue x = new KeyValue(Bytes.toBytes("TestScanMultipleVersions,row_0500,1236034574162"), Bytes.toBytes("info"), Bytes.toBytes(""), 9223372036854775807L, (byte[])null); @@ -152,7 +152,7 @@ public class TestKeyValue extends TestCase { /** * Tests cases where rows keys have characters below the ','. * See HBASE-832 - * @throws IOException + * @throws IOException */ public void testKeyValueBorderCases() throws IOException { // % sorts before , so if we don't do special comparator, rowB would @@ -163,15 +163,15 @@ public class TestKeyValue extends TestCase { Bytes.toBytes("fam"), Bytes.toBytes(""), Long.MAX_VALUE, (byte[])null); assertTrue(KeyValue.META_COMPARATOR.compare(rowA, rowB) < 0); - rowA = new KeyValue(Bytes.toBytes("testtable,,1234"), Bytes.toBytes("fam"), + rowA = new KeyValue(Bytes.toBytes("testtable,,1234"), Bytes.toBytes("fam"), Bytes.toBytes(""), Long.MAX_VALUE, (byte[])null); - rowB = new KeyValue(Bytes.toBytes("testtable,$www.hbase.org/,99999"), + rowB = new KeyValue(Bytes.toBytes("testtable,$www.hbase.org/,99999"), Bytes.toBytes("fam"), Bytes.toBytes(""), Long.MAX_VALUE, (byte[])null); assertTrue(KeyValue.META_COMPARATOR.compare(rowA, rowB) < 0); - rowA = new KeyValue(Bytes.toBytes(".META.,testtable,www.hbase.org/,1234,4321"), + rowA = new KeyValue(Bytes.toBytes(".META.,testtable,www.hbase.org/,1234,4321"), Bytes.toBytes("fam"), Bytes.toBytes(""), Long.MAX_VALUE, (byte[])null); - rowB = new KeyValue(Bytes.toBytes(".META.,testtable,www.hbase.org/%20,99999,99999"), + rowB = new KeyValue(Bytes.toBytes(".META.,testtable,www.hbase.org/%20,99999,99999"), Bytes.toBytes("fam"), Bytes.toBytes(""), Long.MAX_VALUE, (byte[])null); assertTrue(KeyValue.ROOT_COMPARATOR.compare(rowA, rowB) < 0); } @@ -275,6 +275,6 @@ public class TestKeyValue extends TestCase { // Test multiple KeyValues in a single blob. // TODO actually write this test! - + } } diff --git a/core/src/test/java/org/apache/hadoop/hbase/TestMergeMeta.java b/core/src/test/java/org/apache/hadoop/hbase/TestMergeMeta.java index 9ee211a..611589b 100644 --- a/core/src/test/java/org/apache/hadoop/hbase/TestMergeMeta.java +++ b/core/src/test/java/org/apache/hadoop/hbase/TestMergeMeta.java @@ -24,7 +24,7 @@ import java.io.IOException; /** Tests region merging */ public class TestMergeMeta extends AbstractMergeTestBase { - /** constructor + /** constructor * @throws Exception */ public TestMergeMeta() throws Exception { @@ -32,7 +32,7 @@ public class TestMergeMeta extends AbstractMergeTestBase { conf.setLong("hbase.client.pause", 1 * 1000); conf.setInt("hbase.client.retries.number", 2); } - + /** * test case * @throws IOException diff --git a/core/src/test/java/org/apache/hadoop/hbase/TestRegionRebalancing.java b/core/src/test/java/org/apache/hadoop/hbase/TestRegionRebalancing.java index 7b2f8fc..23ed1f6 100644 --- a/core/src/test/java/org/apache/hadoop/hbase/TestRegionRebalancing.java +++ b/core/src/test/java/org/apache/hadoop/hbase/TestRegionRebalancing.java @@ -42,11 +42,11 @@ public class TestRegionRebalancing extends HBaseClusterTestCase { HTable table; HTableDescriptor desc; - + final byte[] FIVE_HUNDRED_KBYTES; - + final byte [] FAMILY_NAME = Bytes.toBytes("col"); - + /** constructor */ public TestRegionRebalancing() { super(1); @@ -54,11 +54,11 @@ public class TestRegionRebalancing extends HBaseClusterTestCase { for (int i = 0; i < 500 * 1024; i++) { FIVE_HUNDRED_KBYTES[i] = 'x'; } - + desc = new HTableDescriptor("test"); desc.addFamily(new HColumnDescriptor(FAMILY_NAME)); } - + /** * Before the hbase cluster starts up, create some dummy regions. */ @@ -72,32 +72,32 @@ public class TestRegionRebalancing extends HBaseClusterTestCase { } startKeys.add(null); LOG.info(startKeys.size() + " start keys generated"); - + List regions = new ArrayList(); for (int i = 0; i < 20; i++) { regions.add(createAregion(startKeys.get(i), startKeys.get(i+1))); } - + // Now create the root and meta regions and insert the data regions // created above into the meta - + createRootAndMetaRegions(); for (HRegion region : regions) { HRegion.addRegionToMETA(meta, region); } closeRootAndMeta(); } - + /** * For HBASE-71. Try a few different configurations of starting and stopping * region servers to see if the assignment or regions is pretty balanced. - * @throws IOException + * @throws IOException */ public void testRebalancing() throws IOException { table = new HTable(conf, "test"); - assertEquals("Test table should have 20 regions", + assertEquals("Test table should have 20 regions", 20, table.getStartKeys().length); - + // verify that the region assignments are balanced to start out assertRegionsAreBalanced(); @@ -105,32 +105,32 @@ public class TestRegionRebalancing extends HBaseClusterTestCase { // add a region server - total of 2 cluster.startRegionServer(); assertRegionsAreBalanced(); - + // add a region server - total of 3 - LOG.debug("Adding 3rd region server."); + LOG.debug("Adding 3rd region server."); cluster.startRegionServer(); assertRegionsAreBalanced(); - + // kill a region server - total of 2 LOG.debug("Killing the 3rd region server."); cluster.stopRegionServer(2, false); cluster.waitOnRegionServer(2); assertRegionsAreBalanced(); - + // start two more region servers - total of 4 LOG.debug("Adding 3rd region server"); cluster.startRegionServer(); - LOG.debug("Adding 4th region server"); - cluster.startRegionServer(); + LOG.debug("Adding 4th region server"); + cluster.startRegionServer(); assertRegionsAreBalanced(); for (int i = 0; i < 6; i++){ - LOG.debug("Adding " + (i + 5) + "th region server"); + LOG.debug("Adding " + (i + 5) + "th region server"); cluster.startRegionServer(); } assertRegionsAreBalanced(); } - + /** figure out how many regions are currently being served. */ private int getRegionCount() { int total = 0; @@ -139,7 +139,7 @@ public class TestRegionRebalancing extends HBaseClusterTestCase { } return total; } - + /** * Determine if regions are balanced. Figure out the total, divide by the * number of online servers, then test if each server is +/- 1 of average @@ -160,39 +160,39 @@ public class TestRegionRebalancing extends HBaseClusterTestCase { double avg = cluster.getMaster().getAverageLoad(); int avgLoadPlusSlop = (int)Math.ceil(avg * (1 + slop)); int avgLoadMinusSlop = (int)Math.floor(avg * (1 - slop)) - 1; - LOG.debug("There are " + servers.size() + " servers and " + regionCount + LOG.debug("There are " + servers.size() + " servers and " + regionCount + " regions. Load Average: " + avg + " low border: " + avgLoadMinusSlop + ", up border: " + avgLoadPlusSlop + "; attempt: " + i); for (HRegionServer server : servers) { int serverLoad = server.getOnlineRegions().size(); LOG.debug(server.hashCode() + " Avg: " + avg + " actual: " + serverLoad); - if (!(avg > 2.0 && serverLoad <= avgLoadPlusSlop + if (!(avg > 2.0 && serverLoad <= avgLoadPlusSlop && serverLoad >= avgLoadMinusSlop)) { LOG.debug(server.hashCode() + " Isn't balanced!!! Avg: " + avg + " actual: " + serverLoad + " slop: " + slop); success = false; } } - + if (!success) { - // one or more servers are not balanced. sleep a little to give it a + // one or more servers are not balanced. sleep a little to give it a // chance to catch up. then, go back to the retry loop. try { Thread.sleep(10000); } catch (InterruptedException e) {} - + continue; } - + // if we get here, all servers were balanced, so we should just return. return; } - // if we get here, we tried 5 times and never got to short circuit out of + // if we get here, we tried 5 times and never got to short circuit out of // the retry loop, so this is a failure. fail("After 5 attempts, region assignments were not balanced."); } - + private List getOnlineRegionServers() { List list = new ArrayList(); for (JVMClusterUtil.RegionServerThread rst : cluster.getRegionServerThreads()) { @@ -204,7 +204,7 @@ public class TestRegionRebalancing extends HBaseClusterTestCase { } /** - * Wait until all the regions are assigned. + * Wait until all the regions are assigned. */ private void waitForAllRegionsAssigned() { while (getRegionCount() < 22) { @@ -218,9 +218,9 @@ public class TestRegionRebalancing extends HBaseClusterTestCase { /** * create a region with the specified start and end key and exactly one row - * inside. + * inside. */ - private HRegion createAregion(byte [] startKey, byte [] endKey) + private HRegion createAregion(byte [] startKey, byte [] endKey) throws IOException { HRegion region = createNewHRegion(desc, startKey, endKey); byte [] keyToWrite = startKey == null ? Bytes.toBytes("row_000") : startKey; diff --git a/core/src/test/java/org/apache/hadoop/hbase/TestScanMultipleVersions.java b/core/src/test/java/org/apache/hadoop/hbase/TestScanMultipleVersions.java index 2976477..1f51703 100644 --- a/core/src/test/java/org/apache/hadoop/hbase/TestScanMultipleVersions.java +++ b/core/src/test/java/org/apache/hadoop/hbase/TestScanMultipleVersions.java @@ -50,9 +50,9 @@ public class TestScanMultipleVersions extends HBaseClusterTestCase { @Override protected void preHBaseClusterSetup() throws Exception { testDir = new Path(conf.get(HConstants.HBASE_DIR)); - + // Create table description - + this.desc = new HTableDescriptor(TABLE_NAME); this.desc.addFamily(new HColumnDescriptor(HConstants.CATALOG_FAMILY)); @@ -72,7 +72,7 @@ public class TestScanMultipleVersions extends HBaseClusterTestCase { // Insert data for (int j = 0; j < TIMESTAMPS.length; j++) { Put put = new Put(ROWS[i], TIMESTAMPS[j], null); - put.add(HConstants.CATALOG_FAMILY, null, TIMESTAMPS[j], + put.add(HConstants.CATALOG_FAMILY, null, TIMESTAMPS[j], Bytes.toBytes(TIMESTAMPS[j])); REGIONS[i].put(put); } @@ -85,7 +85,7 @@ public class TestScanMultipleVersions extends HBaseClusterTestCase { // Close root and meta regions closeRootAndMeta(); } - + /** * @throws Exception */ @@ -106,7 +106,7 @@ public class TestScanMultipleVersions extends HBaseClusterTestCase { assertTrue(cellCount == 1); } } - + // Case 1: scan with LATEST_TIMESTAMP. Should get two rows int count = 0; Scan scan = new Scan(); @@ -124,7 +124,7 @@ public class TestScanMultipleVersions extends HBaseClusterTestCase { // Case 2: Scan with a timestamp greater than most recent timestamp // (in this case > 1000 and < LATEST_TIMESTAMP. Should get 2 rows. - + count = 0; scan = new Scan(); scan.setTimeRange(1000L, Long.MAX_VALUE); @@ -139,10 +139,10 @@ public class TestScanMultipleVersions extends HBaseClusterTestCase { } finally { s.close(); } - + // Case 3: scan with timestamp equal to most recent timestamp // (in this case == 1000. Should get 2 rows. - + count = 0; scan = new Scan(); scan.setTimeStamp(1000L); @@ -157,10 +157,10 @@ public class TestScanMultipleVersions extends HBaseClusterTestCase { } finally { s.close(); } - + // Case 4: scan with timestamp greater than first timestamp but less than // second timestamp (100 < timestamp < 1000). Should get 2 rows. - + count = 0; scan = new Scan(); scan.setTimeRange(100L, 1000L); @@ -175,10 +175,10 @@ public class TestScanMultipleVersions extends HBaseClusterTestCase { } finally { s.close(); } - + // Case 5: scan with timestamp equal to first timestamp (100) // Should get 2 rows. - + count = 0; scan = new Scan(); scan.setTimeStamp(100L); diff --git a/core/src/test/java/org/apache/hadoop/hbase/TestSerialization.java b/core/src/test/java/org/apache/hadoop/hbase/TestSerialization.java index 1c9f30e..9810385 100644 --- a/core/src/test/java/org/apache/hadoop/hbase/TestSerialization.java +++ b/core/src/test/java/org/apache/hadoop/hbase/TestSerialization.java @@ -89,7 +89,7 @@ public class TestSerialization extends HBaseTestCase { assertTrue(hmw.size() == 1); assertTrue(Bytes.equals("value".getBytes(), hmw.get("key".getBytes()))); } - + public void testHMsg() throws Exception { HMsg m = new HMsg(HMsg.Type.MSG_REGIONSERVER_QUIESCE); byte [] mb = Writables.getBytes(m); @@ -103,7 +103,7 @@ public class TestSerialization extends HBaseTestCase { deserializedHMsg = (HMsg)Writables.getWritable(mb, new HMsg()); assertTrue(m.equals(deserializedHMsg)); } - + public void testTableDescriptor() throws Exception { HTableDescriptor htd = createTableDescriptor(getName()); byte [] mb = Writables.getBytes(htd); @@ -131,7 +131,7 @@ public class TestSerialization extends HBaseTestCase { assertEquals(hri.getTableDesc().getFamilies().size(), deserializedHri.getTableDesc().getFamilies().size()); } - + /** * Test ServerInfo serialization * @throws Exception @@ -144,7 +144,7 @@ public class TestSerialization extends HBaseTestCase { (HServerInfo)Writables.getWritable(b, new HServerInfo()); assertTrue(hsi.equals(deserializedHsi)); } - + public void testPut() throws Exception{ byte[] row = "row".getBytes(); byte[] fam = "fam".getBytes(); @@ -156,10 +156,10 @@ public class TestSerialization extends HBaseTestCase { byte[] qf6 = "qf6".getBytes(); byte[] qf7 = "qf7".getBytes(); byte[] qf8 = "qf8".getBytes(); - + long ts = System.currentTimeMillis(); byte[] val = "val".getBytes(); - + Put put = new Put(row); put.add(fam, qf1, ts, val); put.add(fam, qf2, ts, val); @@ -169,7 +169,7 @@ public class TestSerialization extends HBaseTestCase { put.add(fam, qf6, ts, val); put.add(fam, qf7, ts, val); put.add(fam, qf8, ts, val); - + byte[] sb = Writables.getBytes(put); Put desPut = (Put)Writables.getWritable(sb, new Put()); @@ -178,7 +178,7 @@ public class TestSerialization extends HBaseTestCase { // desPut = (Put)Writables.getWritable(sb, new Put()); // long stop = System.nanoTime(); // System.out.println("timer " +(stop-start)); - + assertTrue(Bytes.equals(put.getRow(), desPut.getRow())); List list = null; List desList = null; @@ -192,18 +192,18 @@ public class TestSerialization extends HBaseTestCase { } } - + public void testPut2() throws Exception{ byte[] row = "testAbort,,1243116656250".getBytes(); byte[] fam = "historian".getBytes(); byte[] qf1 = "creation".getBytes(); - + long ts = 9223372036854775807L; byte[] val = "dont-care".getBytes(); - + Put put = new Put(row); put.add(fam, qf1, ts, val); - + byte[] sb = Writables.getBytes(put); Put desPut = (Put)Writables.getWritable(sb, new Put()); @@ -219,18 +219,18 @@ public class TestSerialization extends HBaseTestCase { } } } - - + + public void testDelete() throws Exception{ byte[] row = "row".getBytes(); byte[] fam = "fam".getBytes(); byte[] qf1 = "qf1".getBytes(); - + long ts = System.currentTimeMillis(); - + Delete delete = new Delete(row); delete.deleteColumn(fam, qf1, ts); - + byte[] sb = Writables.getBytes(delete); Delete desDelete = (Delete)Writables.getWritable(sb, new Delete()); @@ -247,29 +247,29 @@ public class TestSerialization extends HBaseTestCase { } } } - + public void testGet() throws Exception{ byte[] row = "row".getBytes(); byte[] fam = "fam".getBytes(); byte[] qf1 = "qf1".getBytes(); - + long ts = System.currentTimeMillis(); int maxVersions = 2; long lockid = 5; RowLock rowLock = new RowLock(lockid); - + Get get = new Get(row, rowLock); get.addColumn(fam, qf1); get.setTimeRange(ts, ts+1); get.setMaxVersions(maxVersions); - + byte[] sb = Writables.getBytes(get); Get desGet = (Get)Writables.getWritable(sb, new Get()); assertTrue(Bytes.equals(get.getRow(), desGet.getRow())); Set set = null; Set desSet = null; - + for(Map.Entry> entry : get.getFamilyMap().entrySet()){ assertTrue(desGet.getFamilyMap().containsKey(entry.getKey())); @@ -279,7 +279,7 @@ public class TestSerialization extends HBaseTestCase { assertTrue(desSet.contains(qualifier)); } } - + assertEquals(get.getLockId(), desGet.getLockId()); assertEquals(get.getMaxVersions(), desGet.getMaxVersions()); TimeRange tr = get.getTimeRange(); @@ -287,22 +287,22 @@ public class TestSerialization extends HBaseTestCase { assertEquals(tr.getMax(), desTr.getMax()); assertEquals(tr.getMin(), desTr.getMin()); } - + public void testScan() throws Exception{ byte[] startRow = "startRow".getBytes(); byte[] stopRow = "stopRow".getBytes(); byte[] fam = "fam".getBytes(); byte[] qf1 = "qf1".getBytes(); - + long ts = System.currentTimeMillis(); int maxVersions = 2; - + Scan scan = new Scan(startRow, stopRow); scan.addColumn(fam, qf1); scan.setTimeRange(ts, ts+1); scan.setMaxVersions(maxVersions); - + byte[] sb = Writables.getBytes(scan); Scan desScan = (Scan)Writables.getWritable(sb, new Scan()); @@ -311,7 +311,7 @@ public class TestSerialization extends HBaseTestCase { assertEquals(scan.getCacheBlocks(), desScan.getCacheBlocks()); Set set = null; Set desSet = null; - + for(Map.Entry> entry : scan.getFamilyMap().entrySet()){ assertTrue(desScan.getFamilyMap().containsKey(entry.getKey())); @@ -320,7 +320,7 @@ public class TestSerialization extends HBaseTestCase { for(byte[] column : set){ assertTrue(desSet.contains(column)); } - + // Test filters are serialized properly. scan = new Scan(startRow); byte [] prefix = Bytes.toBytes(getName()); @@ -330,14 +330,14 @@ public class TestSerialization extends HBaseTestCase { Filter f = desScan.getFilter(); assertTrue(f instanceof PrefixFilter); } - + assertEquals(scan.getMaxVersions(), desScan.getMaxVersions()); TimeRange tr = scan.getTimeRange(); TimeRange desTr = desScan.getTimeRange(); assertEquals(tr.getMax(), desTr.getMax()); assertEquals(tr.getMin(), desTr.getMin()); } - + public void testResultEmpty() throws Exception { List keys = new ArrayList(); Result r = new Result(keys); @@ -346,33 +346,33 @@ public class TestSerialization extends HBaseTestCase { Result deserializedR = (Result)Writables.getWritable(rb, new Result()); assertTrue(deserializedR.isEmpty()); } - - + + public void testResult() throws Exception { byte [] rowA = Bytes.toBytes("rowA"); byte [] famA = Bytes.toBytes("famA"); byte [] qfA = Bytes.toBytes("qfA"); byte [] valueA = Bytes.toBytes("valueA"); - + byte [] rowB = Bytes.toBytes("rowB"); byte [] famB = Bytes.toBytes("famB"); byte [] qfB = Bytes.toBytes("qfB"); byte [] valueB = Bytes.toBytes("valueB"); - + KeyValue kvA = new KeyValue(rowA, famA, qfA, valueA); KeyValue kvB = new KeyValue(rowB, famB, qfB, valueB); - + Result result = new Result(new KeyValue[]{kvA, kvB}); - + byte [] rb = Writables.getBytes(result); Result deResult = (Result)Writables.getWritable(rb, new Result()); - + assertTrue("results are not equivalent, first key mismatch", result.sorted()[0].equals(deResult.sorted()[0])); - + assertTrue("results are not equivalent, second key mismatch", result.sorted()[1].equals(deResult.sorted()[1])); - + // Test empty Result Result r = new Result(); byte [] b = Writables.getBytes(r); @@ -385,25 +385,25 @@ public class TestSerialization extends HBaseTestCase { byte [] famA = Bytes.toBytes("famA"); byte [] qfA = Bytes.toBytes("qfA"); byte [] valueA = Bytes.toBytes("valueA"); - + byte [] rowB = Bytes.toBytes("rowB"); byte [] famB = Bytes.toBytes("famB"); byte [] qfB = Bytes.toBytes("qfB"); byte [] valueB = Bytes.toBytes("valueB"); - + KeyValue kvA = new KeyValue(rowA, famA, qfA, valueA); KeyValue kvB = new KeyValue(rowB, famB, qfB, valueB); - + Result result = new Result(new KeyValue[]{kvA, kvB}); - + byte [] rb = Writables.getBytes(result); - - + + // Call getRow() first Result deResult = (Result)Writables.getWritable(rb, new Result()); byte [] row = deResult.getRow(); assertTrue(Bytes.equals(row, rowA)); - + // Call sorted() first deResult = (Result)Writables.getWritable(rb, new Result()); assertTrue("results are not equivalent, first key mismatch", @@ -417,44 +417,44 @@ public class TestSerialization extends HBaseTestCase { result.raw()[0].equals(deResult.raw()[0])); assertTrue("results are not equivalent, second key mismatch", result.raw()[1].equals(deResult.raw()[1])); - - + + } - + public void testResultArray() throws Exception { byte [] rowA = Bytes.toBytes("rowA"); byte [] famA = Bytes.toBytes("famA"); byte [] qfA = Bytes.toBytes("qfA"); byte [] valueA = Bytes.toBytes("valueA"); - + byte [] rowB = Bytes.toBytes("rowB"); byte [] famB = Bytes.toBytes("famB"); byte [] qfB = Bytes.toBytes("qfB"); byte [] valueB = Bytes.toBytes("valueB"); - + KeyValue kvA = new KeyValue(rowA, famA, qfA, valueA); KeyValue kvB = new KeyValue(rowB, famB, qfB, valueB); - + Result result1 = new Result(new KeyValue[]{kvA, kvB}); Result result2 = new Result(new KeyValue[]{kvB}); Result result3 = new Result(new KeyValue[]{kvB}); - + Result [] results = new Result [] {result1, result2, result3}; - + ByteArrayOutputStream byteStream = new ByteArrayOutputStream(); DataOutputStream out = new DataOutputStream(byteStream); Result.writeArray(out, results); - + byte [] rb = byteStream.toByteArray(); - + DataInputBuffer in = new DataInputBuffer(); in.reset(rb, 0, rb.length); - + Result [] deResults = Result.readArray(in); - + assertTrue(results.length == deResults.length); - + for(int i=0;i keys = new ArrayList(); Result r = new Result(keys); @@ -477,61 +477,61 @@ public class TestSerialization extends HBaseTestCase { ByteArrayOutputStream byteStream = new ByteArrayOutputStream(); DataOutputStream out = new DataOutputStream(byteStream); - + Result.writeArray(out, results); - + results = null; - + byteStream = new ByteArrayOutputStream(); out = new DataOutputStream(byteStream); Result.writeArray(out, results); - + byte [] rb = byteStream.toByteArray(); - + DataInputBuffer in = new DataInputBuffer(); in.reset(rb, 0, rb.length); - + Result [] deResults = Result.readArray(in); - + assertTrue(deResults.length == 0); - + results = new Result[0]; byteStream = new ByteArrayOutputStream(); out = new DataOutputStream(byteStream); Result.writeArray(out, results); - + rb = byteStream.toByteArray(); - + in = new DataInputBuffer(); in.reset(rb, 0, rb.length); - + deResults = Result.readArray(in); - + assertTrue(deResults.length == 0); - + } - + public void testTimeRange(String[] args) throws Exception{ TimeRange tr = new TimeRange(0,5); byte [] mb = Writables.getBytes(tr); TimeRange deserializedTr = (TimeRange)Writables.getWritable(mb, new TimeRange()); - + assertEquals(tr.getMax(), deserializedTr.getMax()); assertEquals(tr.getMin(), deserializedTr.getMin()); - + } - + public void testKeyValue2() throws Exception { byte[] row = getName().getBytes(); byte[] fam = "fam".getBytes(); byte[] qf = "qf".getBytes(); long ts = System.currentTimeMillis(); byte[] val = "val".getBytes(); - + KeyValue kv = new KeyValue(row, fam, qf, ts, val); - + byte [] mb = Writables.getBytes(kv); KeyValue deserializedKv = (KeyValue)Writables.getWritable(mb, new KeyValue()); diff --git a/core/src/test/java/org/apache/hadoop/hbase/TimestampTestBase.java b/core/src/test/java/org/apache/hadoop/hbase/TimestampTestBase.java index dfc1c4e..1105509 100644 --- a/core/src/test/java/org/apache/hadoop/hbase/TimestampTestBase.java +++ b/core/src/test/java/org/apache/hadoop/hbase/TimestampTestBase.java @@ -35,10 +35,10 @@ public class TimestampTestBase extends HBaseTestCase { private static final long T0 = 10L; private static final long T1 = 100L; private static final long T2 = 200L; - + private static final byte [] FAMILY_NAME = Bytes.toBytes("colfamily1"); private static final byte [] QUALIFIER_NAME = Bytes.toBytes("contents"); - + private static final byte [] ROW = Bytes.toBytes("row"); /* @@ -64,11 +64,11 @@ public class TimestampTestBase extends HBaseTestCase { // Verify that I get back T2 through T1 -- that the latest version has // been deleted. assertVersions(incommon, new long [] {T2, T1, T0}); - + // Flush everything out to disk and then retry flusher.flushcache(); assertVersions(incommon, new long [] {T2, T1, T0}); - + // Now add, back a latest so I can test remove other than the latest. put(incommon); assertVersions(incommon, new long [] {HConstants.LATEST_TIMESTAMP, T2, T1}); @@ -88,15 +88,15 @@ public class TimestampTestBase extends HBaseTestCase { Delete delete = new Delete(ROW); delete.deleteColumns(FAMILY_NAME, QUALIFIER_NAME, T2); incommon.delete(delete, null, true); - + // Should only be current value in set. Assert this is so assertOnlyLatest(incommon, HConstants.LATEST_TIMESTAMP); - + // Flush everything out to disk and then redo above tests flusher.flushcache(); assertOnlyLatest(incommon, HConstants.LATEST_TIMESTAMP); } - + private static void assertOnlyLatest(final Incommon incommon, final long currentTime) throws IOException { @@ -109,7 +109,7 @@ public class TimestampTestBase extends HBaseTestCase { long time = Bytes.toLong(result.sorted()[0].getValue()); assertEquals(time, currentTime); } - + /* * Assert that returned versions match passed in timestamps and that results * are returned in the right order. Assert that values when converted to @@ -141,10 +141,10 @@ public class TimestampTestBase extends HBaseTestCase { t = Bytes.toLong(kvs[i].getValue()); assertEquals(tss[i], t); } - + // Determine highest stamp to set as next max stamp long maxStamp = kvs[0].getTimestamp(); - + // Specify a timestamp get multiple versions. get = new Get(ROW); get.addColumn(FAMILY_NAME, QUALIFIER_NAME); @@ -157,11 +157,11 @@ public class TimestampTestBase extends HBaseTestCase { t = Bytes.toLong(kvs[i-1].getValue()); assertEquals(tss[i], t); } - + // Test scanner returns expected version assertScanContentTimestamp(incommon, tss[0]); } - + /* * Run test scanning different timestamps. * @param incommon @@ -186,9 +186,9 @@ public class TimestampTestBase extends HBaseTestCase { assertEquals(count, assertScanContentTimestamp(incommon, T0)); assertEquals(count, assertScanContentTimestamp(incommon, T1)); } - + /* - * Assert that the scan returns only values < timestamp. + * Assert that the scan returns only values < timestamp. * @param r * @param ts * @return Count of items scanned. @@ -215,22 +215,22 @@ public class TimestampTestBase extends HBaseTestCase { // value.clear(); // } } finally { - scanner.close(); + scanner.close(); } return count; } - + public static void put(final Incommon loader, final long ts) throws IOException { put(loader, Bytes.toBytes(ts), ts); } - + public static void put(final Incommon loader) throws IOException { long ts = HConstants.LATEST_TIMESTAMP; put(loader, Bytes.toBytes(ts), ts); } - + /* * Put values. * @param loader @@ -245,7 +245,7 @@ public class TimestampTestBase extends HBaseTestCase { put.add(FAMILY_NAME, QUALIFIER_NAME, bytes); loader.put(put); } - + public static void delete(final Incommon loader) throws IOException { delete(loader, null); } diff --git a/core/src/test/java/org/apache/hadoop/hbase/client/TestAdmin.java b/core/src/test/java/org/apache/hadoop/hbase/client/TestAdmin.java index 905bd29..7a3f34c 100644 --- a/core/src/test/java/org/apache/hadoop/hbase/client/TestAdmin.java +++ b/core/src/test/java/org/apache/hadoop/hbase/client/TestAdmin.java @@ -428,8 +428,8 @@ public class TestAdmin { this.admin.createTable(htd); HTable table = new HTable("myTestTable"); HTableDescriptor confirmedHtd = table.getTableDescriptor(); - - assertEquals(htd.compareTo(confirmedHtd), 0); + + assertEquals(htd.compareTo(confirmedHtd), 0); } } diff --git a/core/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide.java b/core/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide.java index f9bae50..d92e7b4 100644 --- a/core/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide.java +++ b/core/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide.java @@ -123,7 +123,7 @@ public class TestFromClientSide { /** * Test from client side of an involved filter against a multi family that * involves deletes. - * + * * @throws Exception */ @Test @@ -294,7 +294,7 @@ public class TestFromClientSide { assertEquals(endKeyCount - 1, minusOneCount); // For above test... study logs. Make sure we do "Finished with scanning.." // in first region and that we do not fall into the next region. - + key = new byte [] {'a', 'a', 'a'}; int countBBB = countRows(t, createScanWithRowFilter(key, null, CompareFilter.CompareOp.EQUAL)); @@ -443,10 +443,10 @@ public class TestFromClientSide { HTable ht = TEST_UTIL.createTable(TABLE, FAMILY); byte [][] ROWS = makeN(ROW, 10); byte [][] QUALIFIERS = { - Bytes.toBytes("col0--"), Bytes.toBytes("col1--"), - Bytes.toBytes("col2--"), Bytes.toBytes("col3--"), - Bytes.toBytes("col4--"), Bytes.toBytes("col5--"), - Bytes.toBytes("col6--"), Bytes.toBytes("col7--"), + Bytes.toBytes("col0--"), Bytes.toBytes("col1--"), + Bytes.toBytes("col2--"), Bytes.toBytes("col3--"), + Bytes.toBytes("col4--"), Bytes.toBytes("col5--"), + Bytes.toBytes("col6--"), Bytes.toBytes("col7--"), Bytes.toBytes("col8--"), Bytes.toBytes("col9--") }; for(int i=0;i<10;i++) { @@ -464,7 +464,7 @@ public class TestFromClientSide { for(Result result : ht.getScanner(scan)) { assertEquals(result.size(), 1); assertTrue(Bytes.equals(result.raw()[0].getRow(), ROWS[expectedIndex])); - assertTrue(Bytes.equals(result.raw()[0].getQualifier(), + assertTrue(Bytes.equals(result.raw()[0].getQualifier(), QUALIFIERS[expectedIndex])); expectedIndex++; } @@ -480,108 +480,108 @@ public class TestFromClientSide { byte [] TABLE = Bytes.toBytes("testSimpleMissing"); HTable ht = TEST_UTIL.createTable(TABLE, FAMILY); byte [][] ROWS = makeN(ROW, 4); - + // Try to get a row on an empty table Get get = new Get(ROWS[0]); Result result = ht.get(get); assertEmptyResult(result); - + get = new Get(ROWS[0]); get.addFamily(FAMILY); result = ht.get(get); assertEmptyResult(result); - + get = new Get(ROWS[0]); get.addColumn(FAMILY, QUALIFIER); result = ht.get(get); assertEmptyResult(result); - + Scan scan = new Scan(); result = getSingleScanResult(ht, scan); assertNullResult(result); - - + + scan = new Scan(ROWS[0]); result = getSingleScanResult(ht, scan); assertNullResult(result); - + scan = new Scan(ROWS[0],ROWS[1]); result = getSingleScanResult(ht, scan); assertNullResult(result); - + scan = new Scan(); scan.addFamily(FAMILY); result = getSingleScanResult(ht, scan); assertNullResult(result); - + scan = new Scan(); scan.addColumn(FAMILY, QUALIFIER); result = getSingleScanResult(ht, scan); assertNullResult(result); - + // Insert a row - + Put put = new Put(ROWS[2]); put.add(FAMILY, QUALIFIER, VALUE); ht.put(put); - + // Try to get empty rows around it - + get = new Get(ROWS[1]); result = ht.get(get); assertEmptyResult(result); - + get = new Get(ROWS[0]); get.addFamily(FAMILY); result = ht.get(get); assertEmptyResult(result); - + get = new Get(ROWS[3]); get.addColumn(FAMILY, QUALIFIER); result = ht.get(get); assertEmptyResult(result); - + // Try to scan empty rows around it - + scan = new Scan(ROWS[3]); result = getSingleScanResult(ht, scan); assertNullResult(result); - + scan = new Scan(ROWS[0],ROWS[2]); result = getSingleScanResult(ht, scan); assertNullResult(result); - + // Make sure we can actually get the row - + get = new Get(ROWS[2]); result = ht.get(get); assertSingleResult(result, ROWS[2], FAMILY, QUALIFIER, VALUE); - + get = new Get(ROWS[2]); get.addFamily(FAMILY); result = ht.get(get); assertSingleResult(result, ROWS[2], FAMILY, QUALIFIER, VALUE); - + get = new Get(ROWS[2]); get.addColumn(FAMILY, QUALIFIER); result = ht.get(get); assertSingleResult(result, ROWS[2], FAMILY, QUALIFIER, VALUE); - + // Make sure we can scan the row - + scan = new Scan(); result = getSingleScanResult(ht, scan); assertSingleResult(result, ROWS[2], FAMILY, QUALIFIER, VALUE); - + scan = new Scan(ROWS[0],ROWS[3]); result = getSingleScanResult(ht, scan); assertSingleResult(result, ROWS[2], FAMILY, QUALIFIER, VALUE); - + scan = new Scan(ROWS[2],ROWS[3]); result = getSingleScanResult(ht, scan); assertSingleResult(result, ROWS[2], FAMILY, QUALIFIER, VALUE); } - + /** * Test basic puts, gets, scans, and deletes for a single row * in a multiple family table. @@ -593,51 +593,51 @@ public class TestFromClientSide { byte [][] FAMILIES = makeNAscii(FAMILY, 10); byte [][] QUALIFIERS = makeN(QUALIFIER, 10); byte [][] VALUES = makeN(VALUE, 10); - + HTable ht = TEST_UTIL.createTable(TABLE, FAMILIES); - + Get get; Scan scan; Delete delete; Put put; Result result; - + //////////////////////////////////////////////////////////////////////////// // Insert one column to one family //////////////////////////////////////////////////////////////////////////// - + put = new Put(ROWS[0]); put.add(FAMILIES[4], QUALIFIERS[0], VALUES[0]); ht.put(put); - + // Get the single column getVerifySingleColumn(ht, ROWS, 0, FAMILIES, 4, QUALIFIERS, 0, VALUES, 0); - + // Scan the single column scanVerifySingleColumn(ht, ROWS, 0, FAMILIES, 4, QUALIFIERS, 0, VALUES, 0); - + // Get empty results around inserted column getVerifySingleEmpty(ht, ROWS, 0, FAMILIES, 4, QUALIFIERS, 0); - + // Scan empty results around inserted column scanVerifySingleEmpty(ht, ROWS, 0, FAMILIES, 4, QUALIFIERS, 0); - + //////////////////////////////////////////////////////////////////////////// // Flush memstore and run same tests from storefiles //////////////////////////////////////////////////////////////////////////// - + TEST_UTIL.flush(); - + // Redo get and scan tests from storefile getVerifySingleColumn(ht, ROWS, 0, FAMILIES, 4, QUALIFIERS, 0, VALUES, 0); scanVerifySingleColumn(ht, ROWS, 0, FAMILIES, 4, QUALIFIERS, 0, VALUES, 0); getVerifySingleEmpty(ht, ROWS, 0, FAMILIES, 4, QUALIFIERS, 0); scanVerifySingleEmpty(ht, ROWS, 0, FAMILIES, 4, QUALIFIERS, 0); - + //////////////////////////////////////////////////////////////////////////// // Now, Test reading from memstore and storefiles at once //////////////////////////////////////////////////////////////////////////// - + // Insert multiple columns to two other families put = new Put(ROWS[0]); put.add(FAMILIES[2], QUALIFIERS[2], VALUES[2]); @@ -648,23 +648,23 @@ public class TestFromClientSide { put.add(FAMILIES[7], QUALIFIERS[7], VALUES[7]); put.add(FAMILIES[9], QUALIFIERS[0], VALUES[0]); ht.put(put); - + // Get multiple columns across multiple families and get empties around it singleRowGetTest(ht, ROWS, FAMILIES, QUALIFIERS, VALUES); - + // Scan multiple columns across multiple families and scan empties around it singleRowScanTest(ht, ROWS, FAMILIES, QUALIFIERS, VALUES); //////////////////////////////////////////////////////////////////////////// // Flush the table again //////////////////////////////////////////////////////////////////////////// - + TEST_UTIL.flush(); - + // Redo tests again singleRowGetTest(ht, ROWS, FAMILIES, QUALIFIERS, VALUES); singleRowScanTest(ht, ROWS, FAMILIES, QUALIFIERS, VALUES); - + // Insert more data to memstore put = new Put(ROWS[0]); put.add(FAMILIES[6], QUALIFIERS[5], VALUES[5]); @@ -672,225 +672,225 @@ public class TestFromClientSide { put.add(FAMILIES[6], QUALIFIERS[9], VALUES[9]); put.add(FAMILIES[4], QUALIFIERS[3], VALUES[3]); ht.put(put); - + //////////////////////////////////////////////////////////////////////////// // Delete a storefile column //////////////////////////////////////////////////////////////////////////// delete = new Delete(ROWS[0]); delete.deleteColumns(FAMILIES[6], QUALIFIERS[7]); ht.delete(delete); - + // Try to get deleted column get = new Get(ROWS[0]); get.addColumn(FAMILIES[6], QUALIFIERS[7]); result = ht.get(get); assertEmptyResult(result); - + // Try to scan deleted column scan = new Scan(); scan.addColumn(FAMILIES[6], QUALIFIERS[7]); result = getSingleScanResult(ht, scan); assertNullResult(result); - + // Make sure we can still get a column before it and after it get = new Get(ROWS[0]); get.addColumn(FAMILIES[6], QUALIFIERS[6]); result = ht.get(get); assertSingleResult(result, ROWS[0], FAMILIES[6], QUALIFIERS[6], VALUES[6]); - + get = new Get(ROWS[0]); get.addColumn(FAMILIES[6], QUALIFIERS[8]); result = ht.get(get); assertSingleResult(result, ROWS[0], FAMILIES[6], QUALIFIERS[8], VALUES[8]); - + // Make sure we can still scan a column before it and after it scan = new Scan(); scan.addColumn(FAMILIES[6], QUALIFIERS[6]); result = getSingleScanResult(ht, scan); assertSingleResult(result, ROWS[0], FAMILIES[6], QUALIFIERS[6], VALUES[6]); - + scan = new Scan(); scan.addColumn(FAMILIES[6], QUALIFIERS[8]); result = getSingleScanResult(ht, scan); assertSingleResult(result, ROWS[0], FAMILIES[6], QUALIFIERS[8], VALUES[8]); - + //////////////////////////////////////////////////////////////////////////// // Delete a memstore column //////////////////////////////////////////////////////////////////////////// delete = new Delete(ROWS[0]); delete.deleteColumns(FAMILIES[6], QUALIFIERS[8]); ht.delete(delete); - + // Try to get deleted column get = new Get(ROWS[0]); get.addColumn(FAMILIES[6], QUALIFIERS[8]); result = ht.get(get); assertEmptyResult(result); - + // Try to scan deleted column scan = new Scan(); scan.addColumn(FAMILIES[6], QUALIFIERS[8]); result = getSingleScanResult(ht, scan); assertNullResult(result); - + // Make sure we can still get a column before it and after it get = new Get(ROWS[0]); get.addColumn(FAMILIES[6], QUALIFIERS[6]); result = ht.get(get); assertSingleResult(result, ROWS[0], FAMILIES[6], QUALIFIERS[6], VALUES[6]); - + get = new Get(ROWS[0]); get.addColumn(FAMILIES[6], QUALIFIERS[9]); result = ht.get(get); assertSingleResult(result, ROWS[0], FAMILIES[6], QUALIFIERS[9], VALUES[9]); - + // Make sure we can still scan a column before it and after it scan = new Scan(); scan.addColumn(FAMILIES[6], QUALIFIERS[6]); result = getSingleScanResult(ht, scan); assertSingleResult(result, ROWS[0], FAMILIES[6], QUALIFIERS[6], VALUES[6]); - + scan = new Scan(); scan.addColumn(FAMILIES[6], QUALIFIERS[9]); result = getSingleScanResult(ht, scan); assertSingleResult(result, ROWS[0], FAMILIES[6], QUALIFIERS[9], VALUES[9]); - + //////////////////////////////////////////////////////////////////////////// // Delete joint storefile/memstore family //////////////////////////////////////////////////////////////////////////// - + delete = new Delete(ROWS[0]); delete.deleteFamily(FAMILIES[4]); ht.delete(delete); - + // Try to get storefile column in deleted family get = new Get(ROWS[0]); get.addColumn(FAMILIES[4], QUALIFIERS[4]); result = ht.get(get); assertEmptyResult(result); - + // Try to get memstore column in deleted family get = new Get(ROWS[0]); get.addColumn(FAMILIES[4], QUALIFIERS[3]); result = ht.get(get); assertEmptyResult(result); - + // Try to get deleted family get = new Get(ROWS[0]); get.addFamily(FAMILIES[4]); result = ht.get(get); assertEmptyResult(result); - + // Try to scan storefile column in deleted family scan = new Scan(); scan.addColumn(FAMILIES[4], QUALIFIERS[4]); result = getSingleScanResult(ht, scan); assertNullResult(result); - + // Try to scan memstore column in deleted family scan = new Scan(); scan.addColumn(FAMILIES[4], QUALIFIERS[3]); result = getSingleScanResult(ht, scan); assertNullResult(result); - + // Try to scan deleted family scan = new Scan(); scan.addFamily(FAMILIES[4]); result = getSingleScanResult(ht, scan); assertNullResult(result); - + // Make sure we can still get another family get = new Get(ROWS[0]); get.addColumn(FAMILIES[2], QUALIFIERS[2]); result = ht.get(get); assertSingleResult(result, ROWS[0], FAMILIES[2], QUALIFIERS[2], VALUES[2]); - + get = new Get(ROWS[0]); get.addColumn(FAMILIES[6], QUALIFIERS[9]); result = ht.get(get); assertSingleResult(result, ROWS[0], FAMILIES[6], QUALIFIERS[9], VALUES[9]); - + // Make sure we can still scan another family scan = new Scan(); scan.addColumn(FAMILIES[6], QUALIFIERS[6]); result = getSingleScanResult(ht, scan); assertSingleResult(result, ROWS[0], FAMILIES[6], QUALIFIERS[6], VALUES[6]); - + scan = new Scan(); scan.addColumn(FAMILIES[6], QUALIFIERS[9]); result = getSingleScanResult(ht, scan); assertSingleResult(result, ROWS[0], FAMILIES[6], QUALIFIERS[9], VALUES[9]); - + //////////////////////////////////////////////////////////////////////////// // Flush everything and rerun delete tests //////////////////////////////////////////////////////////////////////////// - + TEST_UTIL.flush(); - + // Try to get storefile column in deleted family get = new Get(ROWS[0]); get.addColumn(FAMILIES[4], QUALIFIERS[4]); result = ht.get(get); assertEmptyResult(result); - + // Try to get memstore column in deleted family get = new Get(ROWS[0]); get.addColumn(FAMILIES[4], QUALIFIERS[3]); result = ht.get(get); assertEmptyResult(result); - + // Try to get deleted family get = new Get(ROWS[0]); get.addFamily(FAMILIES[4]); result = ht.get(get); assertEmptyResult(result); - + // Try to scan storefile column in deleted family scan = new Scan(); scan.addColumn(FAMILIES[4], QUALIFIERS[4]); result = getSingleScanResult(ht, scan); assertNullResult(result); - + // Try to scan memstore column in deleted family scan = new Scan(); scan.addColumn(FAMILIES[4], QUALIFIERS[3]); result = getSingleScanResult(ht, scan); assertNullResult(result); - + // Try to scan deleted family scan = new Scan(); scan.addFamily(FAMILIES[4]); result = getSingleScanResult(ht, scan); assertNullResult(result); - + // Make sure we can still get another family get = new Get(ROWS[0]); get.addColumn(FAMILIES[2], QUALIFIERS[2]); result = ht.get(get); assertSingleResult(result, ROWS[0], FAMILIES[2], QUALIFIERS[2], VALUES[2]); - + get = new Get(ROWS[0]); get.addColumn(FAMILIES[6], QUALIFIERS[9]); result = ht.get(get); assertSingleResult(result, ROWS[0], FAMILIES[6], QUALIFIERS[9], VALUES[9]); - + // Make sure we can still scan another family scan = new Scan(); scan.addColumn(FAMILIES[6], QUALIFIERS[6]); result = getSingleScanResult(ht, scan); assertSingleResult(result, ROWS[0], FAMILIES[6], QUALIFIERS[6], VALUES[6]); - + scan = new Scan(); scan.addColumn(FAMILIES[6], QUALIFIERS[9]); result = getSingleScanResult(ht, scan); assertSingleResult(result, ROWS[0], FAMILIES[6], QUALIFIERS[9], VALUES[9]); - + } @Test public void testNull() throws Exception { byte [] TABLE = Bytes.toBytes("testNull"); - + // Null table name (should NOT work) try { TEST_UTIL.createTable(null, FAMILY); @@ -902,9 +902,9 @@ public class TestFromClientSide { TEST_UTIL.createTable(TABLE, (byte[])null); fail("Creating a table with a null family passed, should fail"); } catch(Exception e) {} - + HTable ht = TEST_UTIL.createTable(TABLE, FAMILY); - + // Null row (should NOT work) try { Put put = new Put((byte[])null); @@ -912,7 +912,7 @@ public class TestFromClientSide { ht.put(put); fail("Inserting a null row worked, should throw exception"); } catch(Exception e) {} - + // Null qualifier (should work) { Put put = new Put(ROW); @@ -941,55 +941,55 @@ public class TestFromClientSide { Put put = new Put(ROW); put.add(FAMILY, HConstants.EMPTY_BYTE_ARRAY, VALUE); ht.put(put); - + getTestNull(ht, ROW, FAMILY, VALUE); - + scanTestNull(ht, ROW, FAMILY, VALUE); - + // Flush and try again - + TEST_UTIL.flush(); - + getTestNull(ht, ROW, FAMILY, VALUE); - + scanTestNull(ht, ROW, FAMILY, VALUE); - + Delete delete = new Delete(ROW); delete.deleteColumns(FAMILY, HConstants.EMPTY_BYTE_ARRAY); ht.delete(delete); - + Get get = new Get(ROW); Result result = ht.get(get); assertEmptyResult(result); - + } catch(Exception e) { throw new IOException("Using a row with null qualifier threw exception, should "); } - + // Null value try { Put put = new Put(ROW); put.add(FAMILY, QUALIFIER, null); ht.put(put); - + Get get = new Get(ROW); get.addColumn(FAMILY, QUALIFIER); Result result = ht.get(get); assertSingleResult(result, ROW, FAMILY, QUALIFIER, null); - + Scan scan = new Scan(); scan.addColumn(FAMILY, QUALIFIER); result = getSingleScanResult(ht, scan); assertSingleResult(result, ROW, FAMILY, QUALIFIER, null); - + Delete delete = new Delete(ROW); delete.deleteColumns(FAMILY, QUALIFIER); ht.delete(delete); - + get = new Get(ROW); result = ht.get(get); assertEmptyResult(result); - + } catch(Exception e) { throw new IOException("Null values should be allowed, but threw exception"); } @@ -998,12 +998,12 @@ public class TestFromClientSide { @Test public void testVersions() throws Exception { byte [] TABLE = Bytes.toBytes("testVersions"); - + long [] STAMPS = makeStamps(20); byte [][] VALUES = makeNAscii(VALUE, 20); - + HTable ht = TEST_UTIL.createTable(TABLE, FAMILY, 10); - + // Insert 4 versions of same column Put put = new Put(ROW); put.add(FAMILY, QUALIFIER, STAMPS[1], VALUES[1]); @@ -1011,7 +1011,7 @@ public class TestFromClientSide { put.add(FAMILY, QUALIFIER, STAMPS[4], VALUES[4]); put.add(FAMILY, QUALIFIER, STAMPS[5], VALUES[5]); ht.put(put); - + // Verify we can get each one properly getVersionAndVerify(ht, ROW, FAMILY, QUALIFIER, STAMPS[1], VALUES[1]); getVersionAndVerify(ht, ROW, FAMILY, QUALIFIER, STAMPS[2], VALUES[2]); @@ -1021,7 +1021,7 @@ public class TestFromClientSide { scanVersionAndVerify(ht, ROW, FAMILY, QUALIFIER, STAMPS[2], VALUES[2]); scanVersionAndVerify(ht, ROW, FAMILY, QUALIFIER, STAMPS[4], VALUES[4]); scanVersionAndVerify(ht, ROW, FAMILY, QUALIFIER, STAMPS[5], VALUES[5]); - + // Verify we don't accidentally get others getVersionAndVerifyMissing(ht, ROW, FAMILY, QUALIFIER, STAMPS[0]); getVersionAndVerifyMissing(ht, ROW, FAMILY, QUALIFIER, STAMPS[3]); @@ -1029,30 +1029,30 @@ public class TestFromClientSide { scanVersionAndVerifyMissing(ht, ROW, FAMILY, QUALIFIER, STAMPS[0]); scanVersionAndVerifyMissing(ht, ROW, FAMILY, QUALIFIER, STAMPS[3]); scanVersionAndVerifyMissing(ht, ROW, FAMILY, QUALIFIER, STAMPS[6]); - + // Ensure maxVersions in query is respected Get get = new Get(ROW); get.addColumn(FAMILY, QUALIFIER); get.setMaxVersions(2); Result result = ht.get(get); - assertNResult(result, ROW, FAMILY, QUALIFIER, + assertNResult(result, ROW, FAMILY, QUALIFIER, new long [] {STAMPS[4], STAMPS[5]}, new byte[][] {VALUES[4], VALUES[5]}, 0, 1); - + Scan scan = new Scan(ROW); scan.addColumn(FAMILY, QUALIFIER); scan.setMaxVersions(2); result = getSingleScanResult(ht, scan); - assertNResult(result, ROW, FAMILY, QUALIFIER, + assertNResult(result, ROW, FAMILY, QUALIFIER, new long [] {STAMPS[4], STAMPS[5]}, new byte[][] {VALUES[4], VALUES[5]}, 0, 1); - + // Flush and redo TEST_UTIL.flush(); - + // Verify we can get each one properly getVersionAndVerify(ht, ROW, FAMILY, QUALIFIER, STAMPS[1], VALUES[1]); getVersionAndVerify(ht, ROW, FAMILY, QUALIFIER, STAMPS[2], VALUES[2]); @@ -1062,7 +1062,7 @@ public class TestFromClientSide { scanVersionAndVerify(ht, ROW, FAMILY, QUALIFIER, STAMPS[2], VALUES[2]); scanVersionAndVerify(ht, ROW, FAMILY, QUALIFIER, STAMPS[4], VALUES[4]); scanVersionAndVerify(ht, ROW, FAMILY, QUALIFIER, STAMPS[5], VALUES[5]); - + // Verify we don't accidentally get others getVersionAndVerifyMissing(ht, ROW, FAMILY, QUALIFIER, STAMPS[0]); getVersionAndVerifyMissing(ht, ROW, FAMILY, QUALIFIER, STAMPS[3]); @@ -1070,27 +1070,27 @@ public class TestFromClientSide { scanVersionAndVerifyMissing(ht, ROW, FAMILY, QUALIFIER, STAMPS[0]); scanVersionAndVerifyMissing(ht, ROW, FAMILY, QUALIFIER, STAMPS[3]); scanVersionAndVerifyMissing(ht, ROW, FAMILY, QUALIFIER, STAMPS[6]); - + // Ensure maxVersions in query is respected get = new Get(ROW); get.addColumn(FAMILY, QUALIFIER); get.setMaxVersions(2); result = ht.get(get); - assertNResult(result, ROW, FAMILY, QUALIFIER, + assertNResult(result, ROW, FAMILY, QUALIFIER, new long [] {STAMPS[4], STAMPS[5]}, new byte[][] {VALUES[4], VALUES[5]}, 0, 1); - + scan = new Scan(ROW); scan.addColumn(FAMILY, QUALIFIER); scan.setMaxVersions(2); result = getSingleScanResult(ht, scan); - assertNResult(result, ROW, FAMILY, QUALIFIER, + assertNResult(result, ROW, FAMILY, QUALIFIER, new long [] {STAMPS[4], STAMPS[5]}, new byte[][] {VALUES[4], VALUES[5]}, 0, 1); - - + + // Add some memstore and retest // Insert 4 more versions of same column and a dupe @@ -1100,42 +1100,42 @@ public class TestFromClientSide { put.add(FAMILY, QUALIFIER, STAMPS[7], VALUES[7]); put.add(FAMILY, QUALIFIER, STAMPS[8], VALUES[8]); ht.put(put); - + // Ensure maxVersions in query is respected get = new Get(ROW); get.addColumn(FAMILY, QUALIFIER); get.setMaxVersions(); result = ht.get(get); - assertNResult(result, ROW, FAMILY, QUALIFIER, + assertNResult(result, ROW, FAMILY, QUALIFIER, new long [] {STAMPS[1], STAMPS[2], STAMPS[3], STAMPS[4], STAMPS[5], STAMPS[6], STAMPS[7], STAMPS[8]}, new byte[][] {VALUES[1], VALUES[2], VALUES[3], VALUES[4], VALUES[5], VALUES[6], VALUES[7], VALUES[8]}, 0, 7); - + scan = new Scan(ROW); scan.addColumn(FAMILY, QUALIFIER); scan.setMaxVersions(); result = getSingleScanResult(ht, scan); - assertNResult(result, ROW, FAMILY, QUALIFIER, + assertNResult(result, ROW, FAMILY, QUALIFIER, new long [] {STAMPS[1], STAMPS[2], STAMPS[3], STAMPS[4], STAMPS[5], STAMPS[6], STAMPS[7], STAMPS[8]}, new byte[][] {VALUES[1], VALUES[2], VALUES[3], VALUES[4], VALUES[5], VALUES[6], VALUES[7], VALUES[8]}, 0, 7); - + get = new Get(ROW); get.setMaxVersions(); result = ht.get(get); - assertNResult(result, ROW, FAMILY, QUALIFIER, + assertNResult(result, ROW, FAMILY, QUALIFIER, new long [] {STAMPS[1], STAMPS[2], STAMPS[3], STAMPS[4], STAMPS[5], STAMPS[6], STAMPS[7], STAMPS[8]}, new byte[][] {VALUES[1], VALUES[2], VALUES[3], VALUES[4], VALUES[5], VALUES[6], VALUES[7], VALUES[8]}, 0, 7); - + scan = new Scan(ROW); scan.setMaxVersions(); result = getSingleScanResult(ht, scan); - assertNResult(result, ROW, FAMILY, QUALIFIER, + assertNResult(result, ROW, FAMILY, QUALIFIER, new long [] {STAMPS[1], STAMPS[2], STAMPS[3], STAMPS[4], STAMPS[5], STAMPS[6], STAMPS[7], STAMPS[8]}, new byte[][] {VALUES[1], VALUES[2], VALUES[3], VALUES[4], VALUES[5], VALUES[6], VALUES[7], VALUES[8]}, 0, 7); - + // Verify we can get each one properly getVersionAndVerify(ht, ROW, FAMILY, QUALIFIER, STAMPS[1], VALUES[1]); getVersionAndVerify(ht, ROW, FAMILY, QUALIFIER, STAMPS[2], VALUES[2]); @@ -1145,13 +1145,13 @@ public class TestFromClientSide { scanVersionAndVerify(ht, ROW, FAMILY, QUALIFIER, STAMPS[2], VALUES[2]); scanVersionAndVerify(ht, ROW, FAMILY, QUALIFIER, STAMPS[4], VALUES[4]); scanVersionAndVerify(ht, ROW, FAMILY, QUALIFIER, STAMPS[7], VALUES[7]); - + // Verify we don't accidentally get others getVersionAndVerifyMissing(ht, ROW, FAMILY, QUALIFIER, STAMPS[0]); getVersionAndVerifyMissing(ht, ROW, FAMILY, QUALIFIER, STAMPS[9]); scanVersionAndVerifyMissing(ht, ROW, FAMILY, QUALIFIER, STAMPS[0]); scanVersionAndVerifyMissing(ht, ROW, FAMILY, QUALIFIER, STAMPS[9]); - + // Ensure maxVersions of table is respected TEST_UTIL.flush(); @@ -1163,50 +1163,50 @@ public class TestFromClientSide { put.add(FAMILY, QUALIFIER, STAMPS[13], VALUES[13]); put.add(FAMILY, QUALIFIER, STAMPS[15], VALUES[15]); ht.put(put); - + get = new Get(ROW); get.addColumn(FAMILY, QUALIFIER); get.setMaxVersions(Integer.MAX_VALUE); result = ht.get(get); - assertNResult(result, ROW, FAMILY, QUALIFIER, + assertNResult(result, ROW, FAMILY, QUALIFIER, new long [] {STAMPS[3], STAMPS[4], STAMPS[5], STAMPS[6], STAMPS[7], STAMPS[8], STAMPS[9], STAMPS[11], STAMPS[13], STAMPS[15]}, new byte[][] {VALUES[3], VALUES[4], VALUES[5], VALUES[6], VALUES[7], VALUES[8], VALUES[9], VALUES[11], VALUES[13], VALUES[15]}, 0, 9); - + scan = new Scan(ROW); scan.addColumn(FAMILY, QUALIFIER); scan.setMaxVersions(Integer.MAX_VALUE); result = getSingleScanResult(ht, scan); - assertNResult(result, ROW, FAMILY, QUALIFIER, + assertNResult(result, ROW, FAMILY, QUALIFIER, new long [] {STAMPS[3], STAMPS[4], STAMPS[5], STAMPS[6], STAMPS[7], STAMPS[8], STAMPS[9], STAMPS[11], STAMPS[13], STAMPS[15]}, new byte[][] {VALUES[3], VALUES[4], VALUES[5], VALUES[6], VALUES[7], VALUES[8], VALUES[9], VALUES[11], VALUES[13], VALUES[15]}, 0, 9); - + // Delete a version in the memstore and a version in a storefile Delete delete = new Delete(ROW); delete.deleteColumn(FAMILY, QUALIFIER, STAMPS[11]); delete.deleteColumn(FAMILY, QUALIFIER, STAMPS[7]); ht.delete(delete); - + // Test that it's gone get = new Get(ROW); get.addColumn(FAMILY, QUALIFIER); get.setMaxVersions(Integer.MAX_VALUE); result = ht.get(get); - assertNResult(result, ROW, FAMILY, QUALIFIER, + assertNResult(result, ROW, FAMILY, QUALIFIER, new long [] {STAMPS[1], STAMPS[2], STAMPS[3], STAMPS[4], STAMPS[5], STAMPS[6], STAMPS[8], STAMPS[9], STAMPS[13], STAMPS[15]}, new byte[][] {VALUES[1], VALUES[2], VALUES[3], VALUES[4], VALUES[5], VALUES[6], VALUES[8], VALUES[9], VALUES[13], VALUES[15]}, 0, 9); - + scan = new Scan(ROW); scan.addColumn(FAMILY, QUALIFIER); scan.setMaxVersions(Integer.MAX_VALUE); result = getSingleScanResult(ht, scan); - assertNResult(result, ROW, FAMILY, QUALIFIER, + assertNResult(result, ROW, FAMILY, QUALIFIER, new long [] {STAMPS[1], STAMPS[2], STAMPS[3], STAMPS[4], STAMPS[5], STAMPS[6], STAMPS[8], STAMPS[9], STAMPS[13], STAMPS[15]}, new byte[][] {VALUES[1], VALUES[2], VALUES[3], VALUES[4], VALUES[5], VALUES[6], VALUES[8], VALUES[9], VALUES[13], VALUES[15]}, 0, 9); - + } @Test @@ -1216,8 +1216,8 @@ public class TestFromClientSide { int [] LIMITS = {1,3,5}; long [] STAMPS = makeStamps(10); byte [][] VALUES = makeNAscii(VALUE, 10); - HTable ht = TEST_UTIL.createTable(TABLE, FAMILIES, LIMITS); - + HTable ht = TEST_UTIL.createTable(TABLE, FAMILIES, LIMITS); + // Insert limit + 1 on each family Put put = new Put(ROW); put.add(FAMILIES[0], QUALIFIER, STAMPS[0], VALUES[0]); @@ -1234,123 +1234,123 @@ public class TestFromClientSide { put.add(FAMILIES[2], QUALIFIER, STAMPS[5], VALUES[5]); put.add(FAMILIES[2], QUALIFIER, STAMPS[6], VALUES[6]); ht.put(put); - + // Verify we only get the right number out of each // Family0 - + Get get = new Get(ROW); get.addColumn(FAMILIES[0], QUALIFIER); get.setMaxVersions(Integer.MAX_VALUE); Result result = ht.get(get); - assertNResult(result, ROW, FAMILIES[0], QUALIFIER, + assertNResult(result, ROW, FAMILIES[0], QUALIFIER, new long [] {STAMPS[1]}, new byte[][] {VALUES[1]}, 0, 0); - + get = new Get(ROW); get.addFamily(FAMILIES[0]); get.setMaxVersions(Integer.MAX_VALUE); result = ht.get(get); - assertNResult(result, ROW, FAMILIES[0], QUALIFIER, + assertNResult(result, ROW, FAMILIES[0], QUALIFIER, new long [] {STAMPS[1]}, new byte[][] {VALUES[1]}, 0, 0); - + Scan scan = new Scan(ROW); scan.addColumn(FAMILIES[0], QUALIFIER); scan.setMaxVersions(Integer.MAX_VALUE); result = getSingleScanResult(ht, scan); - assertNResult(result, ROW, FAMILIES[0], QUALIFIER, + assertNResult(result, ROW, FAMILIES[0], QUALIFIER, new long [] {STAMPS[1]}, new byte[][] {VALUES[1]}, 0, 0); - + scan = new Scan(ROW); scan.addFamily(FAMILIES[0]); scan.setMaxVersions(Integer.MAX_VALUE); result = getSingleScanResult(ht, scan); - assertNResult(result, ROW, FAMILIES[0], QUALIFIER, + assertNResult(result, ROW, FAMILIES[0], QUALIFIER, new long [] {STAMPS[1]}, new byte[][] {VALUES[1]}, 0, 0); - + // Family1 - + get = new Get(ROW); get.addColumn(FAMILIES[1], QUALIFIER); get.setMaxVersions(Integer.MAX_VALUE); result = ht.get(get); - assertNResult(result, ROW, FAMILIES[1], QUALIFIER, + assertNResult(result, ROW, FAMILIES[1], QUALIFIER, new long [] {STAMPS[1], STAMPS[2], STAMPS[3]}, new byte[][] {VALUES[1], VALUES[2], VALUES[3]}, 0, 2); - + get = new Get(ROW); get.addFamily(FAMILIES[1]); get.setMaxVersions(Integer.MAX_VALUE); result = ht.get(get); - assertNResult(result, ROW, FAMILIES[1], QUALIFIER, + assertNResult(result, ROW, FAMILIES[1], QUALIFIER, new long [] {STAMPS[1], STAMPS[2], STAMPS[3]}, new byte[][] {VALUES[1], VALUES[2], VALUES[3]}, 0, 2); - + scan = new Scan(ROW); scan.addColumn(FAMILIES[1], QUALIFIER); scan.setMaxVersions(Integer.MAX_VALUE); result = getSingleScanResult(ht, scan); - assertNResult(result, ROW, FAMILIES[1], QUALIFIER, + assertNResult(result, ROW, FAMILIES[1], QUALIFIER, new long [] {STAMPS[1], STAMPS[2], STAMPS[3]}, new byte[][] {VALUES[1], VALUES[2], VALUES[3]}, 0, 2); - + scan = new Scan(ROW); scan.addFamily(FAMILIES[1]); scan.setMaxVersions(Integer.MAX_VALUE); result = getSingleScanResult(ht, scan); - assertNResult(result, ROW, FAMILIES[1], QUALIFIER, + assertNResult(result, ROW, FAMILIES[1], QUALIFIER, new long [] {STAMPS[1], STAMPS[2], STAMPS[3]}, new byte[][] {VALUES[1], VALUES[2], VALUES[3]}, 0, 2); - + // Family2 - + get = new Get(ROW); get.addColumn(FAMILIES[2], QUALIFIER); get.setMaxVersions(Integer.MAX_VALUE); result = ht.get(get); - assertNResult(result, ROW, FAMILIES[2], QUALIFIER, + assertNResult(result, ROW, FAMILIES[2], QUALIFIER, new long [] {STAMPS[2], STAMPS[3], STAMPS[4], STAMPS[5], STAMPS[6]}, new byte[][] {VALUES[2], VALUES[3], VALUES[4], VALUES[5], VALUES[6]}, 0, 4); - + get = new Get(ROW); get.addFamily(FAMILIES[2]); get.setMaxVersions(Integer.MAX_VALUE); result = ht.get(get); - assertNResult(result, ROW, FAMILIES[2], QUALIFIER, + assertNResult(result, ROW, FAMILIES[2], QUALIFIER, new long [] {STAMPS[2], STAMPS[3], STAMPS[4], STAMPS[5], STAMPS[6]}, new byte[][] {VALUES[2], VALUES[3], VALUES[4], VALUES[5], VALUES[6]}, 0, 4); - + scan = new Scan(ROW); scan.addColumn(FAMILIES[2], QUALIFIER); scan.setMaxVersions(Integer.MAX_VALUE); result = getSingleScanResult(ht, scan); - assertNResult(result, ROW, FAMILIES[2], QUALIFIER, + assertNResult(result, ROW, FAMILIES[2], QUALIFIER, new long [] {STAMPS[2], STAMPS[3], STAMPS[4], STAMPS[5], STAMPS[6]}, new byte[][] {VALUES[2], VALUES[3], VALUES[4], VALUES[5], VALUES[6]}, 0, 4); - + scan = new Scan(ROW); scan.addFamily(FAMILIES[2]); scan.setMaxVersions(Integer.MAX_VALUE); result = getSingleScanResult(ht, scan); - assertNResult(result, ROW, FAMILIES[2], QUALIFIER, + assertNResult(result, ROW, FAMILIES[2], QUALIFIER, new long [] {STAMPS[2], STAMPS[3], STAMPS[4], STAMPS[5], STAMPS[6]}, new byte[][] {VALUES[2], VALUES[3], VALUES[4], VALUES[5], VALUES[6]}, 0, 4); - + // Try all families get = new Get(ROW); @@ -1358,7 +1358,7 @@ public class TestFromClientSide { result = ht.get(get); assertTrue("Expected 9 keys but received " + result.size(), result.size() == 9); - + get = new Get(ROW); get.addFamily(FAMILIES[0]); get.addFamily(FAMILIES[1]); @@ -1367,7 +1367,7 @@ public class TestFromClientSide { result = ht.get(get); assertTrue("Expected 9 keys but received " + result.size(), result.size() == 9); - + get = new Get(ROW); get.addColumn(FAMILIES[0], QUALIFIER); get.addColumn(FAMILIES[1], QUALIFIER); @@ -1376,13 +1376,13 @@ public class TestFromClientSide { result = ht.get(get); assertTrue("Expected 9 keys but received " + result.size(), result.size() == 9); - + scan = new Scan(ROW); scan.setMaxVersions(Integer.MAX_VALUE); result = getSingleScanResult(ht, scan); assertTrue("Expected 9 keys but received " + result.size(), result.size() == 9); - + scan = new Scan(ROW); scan.setMaxVersions(Integer.MAX_VALUE); scan.addFamily(FAMILIES[0]); @@ -1391,7 +1391,7 @@ public class TestFromClientSide { result = getSingleScanResult(ht, scan); assertTrue("Expected 9 keys but received " + result.size(), result.size() == 9); - + scan = new Scan(ROW); scan.setMaxVersions(Integer.MAX_VALUE); scan.addColumn(FAMILIES[0], QUALIFIER); @@ -1400,47 +1400,47 @@ public class TestFromClientSide { result = getSingleScanResult(ht, scan); assertTrue("Expected 9 keys but received " + result.size(), result.size() == 9); - + } @Test public void testDeletes() throws Exception { byte [] TABLE = Bytes.toBytes("testDeletes"); - + byte [][] ROWS = makeNAscii(ROW, 6); byte [][] FAMILIES = makeNAscii(FAMILY, 3); byte [][] VALUES = makeN(VALUE, 5); long [] ts = {1000, 2000, 3000, 4000, 5000}; - + HTable ht = TEST_UTIL.createTable(TABLE, FAMILIES); - + Put put = new Put(ROW); put.add(FAMILIES[0], QUALIFIER, ts[0], VALUES[0]); put.add(FAMILIES[0], QUALIFIER, ts[1], VALUES[1]); ht.put(put); - + Delete delete = new Delete(ROW); delete.deleteFamily(FAMILIES[0], ts[0]); ht.delete(delete); - + Get get = new Get(ROW); get.addFamily(FAMILIES[0]); get.setMaxVersions(Integer.MAX_VALUE); Result result = ht.get(get); - assertNResult(result, ROW, FAMILIES[0], QUALIFIER, + assertNResult(result, ROW, FAMILIES[0], QUALIFIER, new long [] {ts[1]}, new byte[][] {VALUES[1]}, 0, 0); - + Scan scan = new Scan(ROW); scan.addFamily(FAMILIES[0]); scan.setMaxVersions(Integer.MAX_VALUE); result = getSingleScanResult(ht, scan); - assertNResult(result, ROW, FAMILIES[0], QUALIFIER, + assertNResult(result, ROW, FAMILIES[0], QUALIFIER, new long [] {ts[1]}, new byte[][] {VALUES[1]}, 0, 0); - + // Test delete latest version put = new Put(ROW); put.add(FAMILIES[0], QUALIFIER, ts[4], VALUES[4]); @@ -1450,107 +1450,107 @@ public class TestFromClientSide { put.add(FAMILIES[0], null, ts[2], VALUES[2]); put.add(FAMILIES[0], null, ts[3], VALUES[3]); ht.put(put); - + delete = new Delete(ROW); delete.deleteColumn(FAMILIES[0], QUALIFIER); ht.delete(delete); - + get = new Get(ROW); get.addColumn(FAMILIES[0], QUALIFIER); get.setMaxVersions(Integer.MAX_VALUE); result = ht.get(get); - assertNResult(result, ROW, FAMILIES[0], QUALIFIER, + assertNResult(result, ROW, FAMILIES[0], QUALIFIER, new long [] {ts[1], ts[2], ts[3]}, new byte[][] {VALUES[1], VALUES[2], VALUES[3]}, 0, 2); - + scan = new Scan(ROW); scan.addColumn(FAMILIES[0], QUALIFIER); scan.setMaxVersions(Integer.MAX_VALUE); result = getSingleScanResult(ht, scan); - assertNResult(result, ROW, FAMILIES[0], QUALIFIER, + assertNResult(result, ROW, FAMILIES[0], QUALIFIER, new long [] {ts[1], ts[2], ts[3]}, new byte[][] {VALUES[1], VALUES[2], VALUES[3]}, 0, 2); - + // Test for HBASE-1847 delete = new Delete(ROW); delete.deleteColumn(FAMILIES[0], null); ht.delete(delete); - + // Cleanup null qualifier delete = new Delete(ROW); delete.deleteColumns(FAMILIES[0], null); ht.delete(delete); - + // Expected client behavior might be that you can re-put deleted values // But alas, this is not to be. We can't put them back in either case. - + put = new Put(ROW); put.add(FAMILIES[0], QUALIFIER, ts[0], VALUES[0]); put.add(FAMILIES[0], QUALIFIER, ts[4], VALUES[4]); ht.put(put); - + // The Get returns the latest value but then does not return the - // oldest, which was never deleted, ts[1]. - + // oldest, which was never deleted, ts[1]. + get = new Get(ROW); get.addFamily(FAMILIES[0]); get.setMaxVersions(Integer.MAX_VALUE); result = ht.get(get); - assertNResult(result, ROW, FAMILIES[0], QUALIFIER, + assertNResult(result, ROW, FAMILIES[0], QUALIFIER, new long [] {ts[2], ts[3], ts[4]}, new byte[][] {VALUES[2], VALUES[3], VALUES[4]}, 0, 2); - + // The Scanner returns the previous values, the expected-unexpected behavior - + scan = new Scan(ROW); scan.addFamily(FAMILIES[0]); scan.setMaxVersions(Integer.MAX_VALUE); result = getSingleScanResult(ht, scan); - assertNResult(result, ROW, FAMILIES[0], QUALIFIER, + assertNResult(result, ROW, FAMILIES[0], QUALIFIER, new long [] {ts[1], ts[2], ts[3]}, new byte[][] {VALUES[1], VALUES[2], VALUES[3]}, 0, 2); - + // Test deleting an entire family from one row but not the other various ways - + put = new Put(ROWS[0]); put.add(FAMILIES[1], QUALIFIER, ts[0], VALUES[0]); put.add(FAMILIES[1], QUALIFIER, ts[1], VALUES[1]); put.add(FAMILIES[2], QUALIFIER, ts[2], VALUES[2]); put.add(FAMILIES[2], QUALIFIER, ts[3], VALUES[3]); ht.put(put); - + put = new Put(ROWS[1]); put.add(FAMILIES[1], QUALIFIER, ts[0], VALUES[0]); put.add(FAMILIES[1], QUALIFIER, ts[1], VALUES[1]); put.add(FAMILIES[2], QUALIFIER, ts[2], VALUES[2]); put.add(FAMILIES[2], QUALIFIER, ts[3], VALUES[3]); ht.put(put); - + put = new Put(ROWS[2]); put.add(FAMILIES[1], QUALIFIER, ts[0], VALUES[0]); put.add(FAMILIES[1], QUALIFIER, ts[1], VALUES[1]); put.add(FAMILIES[2], QUALIFIER, ts[2], VALUES[2]); put.add(FAMILIES[2], QUALIFIER, ts[3], VALUES[3]); ht.put(put); - + delete = new Delete(ROWS[0]); delete.deleteFamily(FAMILIES[2]); ht.delete(delete); - + delete = new Delete(ROWS[1]); delete.deleteColumns(FAMILIES[1], QUALIFIER); ht.delete(delete); - + delete = new Delete(ROWS[2]); delete.deleteColumn(FAMILIES[1], QUALIFIER); delete.deleteColumn(FAMILIES[1], QUALIFIER); delete.deleteColumn(FAMILIES[2], QUALIFIER); ht.delete(delete); - + get = new Get(ROWS[0]); get.addFamily(FAMILIES[1]); get.addFamily(FAMILIES[2]); @@ -1558,11 +1558,11 @@ public class TestFromClientSide { result = ht.get(get); assertTrue("Expected 2 keys but received " + result.size(), result.size() == 2); - assertNResult(result, ROWS[0], FAMILIES[1], QUALIFIER, + assertNResult(result, ROWS[0], FAMILIES[1], QUALIFIER, new long [] {ts[0], ts[1]}, new byte[][] {VALUES[0], VALUES[1]}, 0, 1); - + scan = new Scan(ROWS[0]); scan.addFamily(FAMILIES[1]); scan.addFamily(FAMILIES[2]); @@ -1570,11 +1570,11 @@ public class TestFromClientSide { result = getSingleScanResult(ht, scan); assertTrue("Expected 2 keys but received " + result.size(), result.size() == 2); - assertNResult(result, ROWS[0], FAMILIES[1], QUALIFIER, + assertNResult(result, ROWS[0], FAMILIES[1], QUALIFIER, new long [] {ts[0], ts[1]}, new byte[][] {VALUES[0], VALUES[1]}, 0, 1); - + get = new Get(ROWS[1]); get.addFamily(FAMILIES[1]); get.addFamily(FAMILIES[2]); @@ -1590,7 +1590,7 @@ public class TestFromClientSide { result = getSingleScanResult(ht, scan); assertTrue("Expected 2 keys but received " + result.size(), result.size() == 2); - + get = new Get(ROWS[2]); get.addFamily(FAMILIES[1]); get.addFamily(FAMILIES[2]); @@ -1598,7 +1598,7 @@ public class TestFromClientSide { result = ht.get(get); assertTrue("Expected 1 key but received " + result.size(), result.size() == 1); - assertNResult(result, ROWS[2], FAMILIES[2], QUALIFIER, + assertNResult(result, ROWS[2], FAMILIES[2], QUALIFIER, new long [] {ts[2]}, new byte[][] {VALUES[2]}, 0, 0); @@ -1610,26 +1610,26 @@ public class TestFromClientSide { result = getSingleScanResult(ht, scan); assertTrue("Expected 1 key but received " + result.size(), result.size() == 1); - assertNResult(result, ROWS[2], FAMILIES[2], QUALIFIER, + assertNResult(result, ROWS[2], FAMILIES[2], QUALIFIER, new long [] {ts[2]}, new byte[][] {VALUES[2]}, 0, 0); - + // Test if we delete the family first in one row (HBASE-1541) - + delete = new Delete(ROWS[3]); delete.deleteFamily(FAMILIES[1]); ht.delete(delete); - + put = new Put(ROWS[3]); put.add(FAMILIES[2], QUALIFIER, VALUES[0]); ht.put(put); - + put = new Put(ROWS[4]); put.add(FAMILIES[1], QUALIFIER, VALUES[1]); put.add(FAMILIES[2], QUALIFIER, VALUES[2]); ht.put(put); - + get = new Get(ROWS[3]); get.addFamily(FAMILIES[1]); get.addFamily(FAMILIES[2]); @@ -1637,7 +1637,7 @@ public class TestFromClientSide { result = ht.get(get); assertTrue("Expected 1 key but received " + result.size(), result.size() == 1); - + get = new Get(ROWS[4]); get.addFamily(FAMILIES[1]); get.addFamily(FAMILIES[2]); @@ -1664,7 +1664,7 @@ public class TestFromClientSide { assertTrue(Bytes.equals(result.sorted()[0].getValue(), VALUES[1])); assertTrue(Bytes.equals(result.sorted()[1].getValue(), VALUES[2])); scanner.close(); - + // Add test of bulk deleting. for (int i = 0; i < 10; i++) { byte [] bytes = Bytes.toBytes(i); @@ -1698,65 +1698,65 @@ public class TestFromClientSide { /** * Baseline "scalability" test. - * + * * Tests one hundred families, one million columns, one million versions */ @Ignore @Test public void testMillions() throws Exception { - + // 100 families - + // millions of columns - + // millions of versions - + } @Ignore @Test public void testMultipleRegionsAndBatchPuts() throws Exception { // Two family table - + // Insert lots of rows - + // Insert to the same row with batched puts - + // Insert to multiple rows with batched puts - + // Split the table - + // Get row from first region - + // Get row from second region - + // Scan all rows - + // Insert to multiple regions with batched puts - + // Get row from first region - + // Get row from second region - + // Scan all rows - - + + } @Ignore @Test public void testMultipleRowMultipleFamily() throws Exception { - + } // // JIRA Testers // - + /** * HBASE-867 * If millions of columns in a column family, hbase scanner won't come up - * - * Test will create numRows rows, each with numColsPerRow columns + * + * Test will create numRows rows, each with numColsPerRow columns * (1 version each), and attempt to scan them all. - * + * * To test at scale, up numColsPerRow to the millions * (have not gotten that to work running as junit though) */ @@ -1764,16 +1764,16 @@ public class TestFromClientSide { public void testJiraTest867() throws Exception { int numRows = 10; int numColsPerRow = 2000; - + byte [] TABLE = Bytes.toBytes("testJiraTest867"); - + byte [][] ROWS = makeN(ROW, numRows); byte [][] QUALIFIERS = makeN(QUALIFIER, numColsPerRow); - + HTable ht = TEST_UTIL.createTable(TABLE, FAMILY); - + // Insert rows - + for(int i=0;i some timestamp + * Scan for columns > some timestamp */ @Test public void testJiraTest1182() throws Exception { @@ -1989,11 +1989,11 @@ public class TestFromClientSide { byte [] TABLE = Bytes.toBytes("testJiraTest1182"); byte [][] VALUES = makeNAscii(VALUE, 7); long [] STAMPS = makeStamps(7); - + HTable ht = TEST_UTIL.createTable(TABLE, FAMILY, 10); - + // Insert lots versions - + Put put = new Put(ROW); put.add(FAMILY, QUALIFIER, STAMPS[0], VALUES[0]); put.add(FAMILY, QUALIFIER, STAMPS[1], VALUES[1]); @@ -2002,27 +2002,27 @@ public class TestFromClientSide { put.add(FAMILY, QUALIFIER, STAMPS[4], VALUES[4]); put.add(FAMILY, QUALIFIER, STAMPS[5], VALUES[5]); ht.put(put); - + getVersionRangeAndVerifyGreaterThan(ht, ROW, FAMILY, QUALIFIER, STAMPS, VALUES, 0, 5); getVersionRangeAndVerifyGreaterThan(ht, ROW, FAMILY, QUALIFIER, STAMPS, VALUES, 2, 5); getVersionRangeAndVerifyGreaterThan(ht, ROW, FAMILY, QUALIFIER, STAMPS, VALUES, 4, 5); - + scanVersionRangeAndVerifyGreaterThan(ht, ROW, FAMILY, QUALIFIER, STAMPS, VALUES, 0, 5); scanVersionRangeAndVerifyGreaterThan(ht, ROW, FAMILY, QUALIFIER, STAMPS, VALUES, 2, 5); scanVersionRangeAndVerifyGreaterThan(ht, ROW, FAMILY, QUALIFIER, STAMPS, VALUES, 4, 5); - + // Try same from storefile TEST_UTIL.flush(); getVersionRangeAndVerifyGreaterThan(ht, ROW, FAMILY, QUALIFIER, STAMPS, VALUES, 0, 5); getVersionRangeAndVerifyGreaterThan(ht, ROW, FAMILY, QUALIFIER, STAMPS, VALUES, 2, 5); getVersionRangeAndVerifyGreaterThan(ht, ROW, FAMILY, QUALIFIER, STAMPS, VALUES, 4, 5); - + scanVersionRangeAndVerifyGreaterThan(ht, ROW, FAMILY, QUALIFIER, STAMPS, VALUES, 0, 5); scanVersionRangeAndVerifyGreaterThan(ht, ROW, FAMILY, QUALIFIER, STAMPS, VALUES, 2, 5); scanVersionRangeAndVerifyGreaterThan(ht, ROW, FAMILY, QUALIFIER, STAMPS, VALUES, 4, 5); } - + /** * HBASE-52 * Add a means of scanning over all versions @@ -2032,11 +2032,11 @@ public class TestFromClientSide { byte [] TABLE = Bytes.toBytes("testJiraTest52"); byte [][] VALUES = makeNAscii(VALUE, 7); long [] STAMPS = makeStamps(7); - + HTable ht = TEST_UTIL.createTable(TABLE, FAMILY, 10); - + // Insert lots versions - + Put put = new Put(ROW); put.add(FAMILY, QUALIFIER, STAMPS[0], VALUES[0]); put.add(FAMILY, QUALIFIER, STAMPS[1], VALUES[1]); @@ -2045,25 +2045,25 @@ public class TestFromClientSide { put.add(FAMILY, QUALIFIER, STAMPS[4], VALUES[4]); put.add(FAMILY, QUALIFIER, STAMPS[5], VALUES[5]); ht.put(put); - + getAllVersionsAndVerify(ht, ROW, FAMILY, QUALIFIER, STAMPS, VALUES, 0, 5); - + scanAllVersionsAndVerify(ht, ROW, FAMILY, QUALIFIER, STAMPS, VALUES, 0, 5); // Try same from storefile TEST_UTIL.flush(); getAllVersionsAndVerify(ht, ROW, FAMILY, QUALIFIER, STAMPS, VALUES, 0, 5); - + scanAllVersionsAndVerify(ht, ROW, FAMILY, QUALIFIER, STAMPS, VALUES, 0, 5); } // // Bulk Testers // - - private void getVersionRangeAndVerifyGreaterThan(HTable ht, byte [] row, - byte [] family, byte [] qualifier, long [] stamps, byte [][] values, + + private void getVersionRangeAndVerifyGreaterThan(HTable ht, byte [] row, + byte [] family, byte [] qualifier, long [] stamps, byte [][] values, int start, int end) throws IOException { Get get = new Get(row); @@ -2073,7 +2073,7 @@ public class TestFromClientSide { Result result = ht.get(get); assertNResult(result, row, family, qualifier, stamps, values, start+1, end); } - + private void getVersionRangeAndVerify(HTable ht, byte [] row, byte [] family, byte [] qualifier, long [] stamps, byte [][] values, int start, int end) throws IOException { @@ -2084,7 +2084,7 @@ public class TestFromClientSide { Result result = ht.get(get); assertNResult(result, row, family, qualifier, stamps, values, start, end); } - + private void getAllVersionsAndVerify(HTable ht, byte [] row, byte [] family, byte [] qualifier, long [] stamps, byte [][] values, int start, int end) throws IOException { @@ -2094,9 +2094,9 @@ public class TestFromClientSide { Result result = ht.get(get); assertNResult(result, row, family, qualifier, stamps, values, start, end); } - - private void scanVersionRangeAndVerifyGreaterThan(HTable ht, byte [] row, - byte [] family, byte [] qualifier, long [] stamps, byte [][] values, + + private void scanVersionRangeAndVerifyGreaterThan(HTable ht, byte [] row, + byte [] family, byte [] qualifier, long [] stamps, byte [][] values, int start, int end) throws IOException { Scan scan = new Scan(row); @@ -2106,7 +2106,7 @@ public class TestFromClientSide { Result result = getSingleScanResult(ht, scan); assertNResult(result, row, family, qualifier, stamps, values, start+1, end); } - + private void scanVersionRangeAndVerify(HTable ht, byte [] row, byte [] family, byte [] qualifier, long [] stamps, byte [][] values, int start, int end) throws IOException { @@ -2127,7 +2127,7 @@ public class TestFromClientSide { Result result = getSingleScanResult(ht, scan); assertNResult(result, row, family, qualifier, stamps, values, start, end); } - + private void getVersionAndVerify(HTable ht, byte [] row, byte [] family, byte [] qualifier, long stamp, byte [] value) throws Exception { @@ -2138,7 +2138,7 @@ public class TestFromClientSide { Result result = ht.get(get); assertSingleResult(result, row, family, qualifier, stamp, value); } - + private void getVersionAndVerifyMissing(HTable ht, byte [] row, byte [] family, byte [] qualifier, long stamp) throws Exception { @@ -2149,7 +2149,7 @@ public class TestFromClientSide { Result result = ht.get(get); assertEmptyResult(result); } - + private void scanVersionAndVerify(HTable ht, byte [] row, byte [] family, byte [] qualifier, long stamp, byte [] value) throws Exception { @@ -2160,8 +2160,8 @@ public class TestFromClientSide { Result result = getSingleScanResult(ht, scan); assertSingleResult(result, row, family, qualifier, stamp, value); } - - private void scanVersionAndVerifyMissing(HTable ht, byte [] row, + + private void scanVersionAndVerifyMissing(HTable ht, byte [] row, byte [] family, byte [] qualifier, long stamp) throws Exception { Scan scan = new Scan(row); @@ -2171,79 +2171,79 @@ public class TestFromClientSide { Result result = getSingleScanResult(ht, scan); assertNullResult(result); } - - private void getTestNull(HTable ht, byte [] row, byte [] family, + + private void getTestNull(HTable ht, byte [] row, byte [] family, byte [] value) throws Exception { - + Get get = new Get(row); get.addColumn(family, null); Result result = ht.get(get); assertSingleResult(result, row, family, null, value); - + get = new Get(row); get.addColumn(family, HConstants.EMPTY_BYTE_ARRAY); result = ht.get(get); assertSingleResult(result, row, family, HConstants.EMPTY_BYTE_ARRAY, value); - + get = new Get(row); get.addFamily(family); result = ht.get(get); assertSingleResult(result, row, family, HConstants.EMPTY_BYTE_ARRAY, value); - + get = new Get(row); result = ht.get(get); assertSingleResult(result, row, family, HConstants.EMPTY_BYTE_ARRAY, value); - + } - - private void scanTestNull(HTable ht, byte [] row, byte [] family, + + private void scanTestNull(HTable ht, byte [] row, byte [] family, byte [] value) throws Exception { - + Scan scan = new Scan(); scan.addColumn(family, null); Result result = getSingleScanResult(ht, scan); assertSingleResult(result, row, family, HConstants.EMPTY_BYTE_ARRAY, value); - + scan = new Scan(); scan.addColumn(family, HConstants.EMPTY_BYTE_ARRAY); result = getSingleScanResult(ht, scan); assertSingleResult(result, row, family, HConstants.EMPTY_BYTE_ARRAY, value); - + scan = new Scan(); scan.addFamily(family); result = getSingleScanResult(ht, scan); assertSingleResult(result, row, family, HConstants.EMPTY_BYTE_ARRAY, value); - + scan = new Scan(); result = getSingleScanResult(ht, scan); assertSingleResult(result, row, family, HConstants.EMPTY_BYTE_ARRAY, value); - + } - - private void singleRowGetTest(HTable ht, byte [][] ROWS, byte [][] FAMILIES, + + private void singleRowGetTest(HTable ht, byte [][] ROWS, byte [][] FAMILIES, byte [][] QUALIFIERS, byte [][] VALUES) throws Exception { - + // Single column from memstore Get get = new Get(ROWS[0]); get.addColumn(FAMILIES[4], QUALIFIERS[0]); Result result = ht.get(get); assertSingleResult(result, ROWS[0], FAMILIES[4], QUALIFIERS[0], VALUES[0]); - + // Single column from storefile get = new Get(ROWS[0]); get.addColumn(FAMILIES[2], QUALIFIERS[2]); result = ht.get(get); assertSingleResult(result, ROWS[0], FAMILIES[2], QUALIFIERS[2], VALUES[2]); - + // Single column from storefile, family match get = new Get(ROWS[0]); get.addFamily(FAMILIES[7]); result = ht.get(get); assertSingleResult(result, ROWS[0], FAMILIES[7], QUALIFIERS[7], VALUES[7]); - + // Two columns, one from memstore one from storefile, same family, // wildcard match get = new Get(ROWS[0]); @@ -2251,7 +2251,7 @@ public class TestFromClientSide { result = ht.get(get); assertDoubleResult(result, ROWS[0], FAMILIES[4], QUALIFIERS[0], VALUES[0], FAMILIES[4], QUALIFIERS[4], VALUES[4]); - + // Two columns, one from memstore one from storefile, same family, // explicit match get = new Get(ROWS[0]); @@ -2260,7 +2260,7 @@ public class TestFromClientSide { result = ht.get(get); assertDoubleResult(result, ROWS[0], FAMILIES[4], QUALIFIERS[0], VALUES[0], FAMILIES[4], QUALIFIERS[4], VALUES[4]); - + // Three column, one from memstore two from storefile, different families, // wildcard match get = new Get(ROWS[0]); @@ -2269,7 +2269,7 @@ public class TestFromClientSide { result = ht.get(get); assertNResult(result, ROWS[0], FAMILIES, QUALIFIERS, VALUES, new int [][] { {4, 0, 0}, {4, 4, 4}, {7, 7, 7} }); - + // Multiple columns from everywhere storefile, many family, wildcard get = new Get(ROWS[0]); get.addFamily(FAMILIES[2]); @@ -2278,10 +2278,10 @@ public class TestFromClientSide { get.addFamily(FAMILIES[7]); result = ht.get(get); assertNResult(result, ROWS[0], FAMILIES, QUALIFIERS, VALUES, - new int [][] { + new int [][] { {2, 2, 2}, {2, 4, 4}, {4, 0, 0}, {4, 4, 4}, {6, 6, 6}, {6, 7, 7}, {7, 7, 7} }); - + // Multiple columns from everywhere storefile, many family, wildcard get = new Get(ROWS[0]); get.addColumn(FAMILIES[2], QUALIFIERS[2]); @@ -2294,54 +2294,54 @@ public class TestFromClientSide { get.addColumn(FAMILIES[7], QUALIFIERS[8]); result = ht.get(get); assertNResult(result, ROWS[0], FAMILIES, QUALIFIERS, VALUES, - new int [][] { + new int [][] { {2, 2, 2}, {2, 4, 4}, {4, 0, 0}, {4, 4, 4}, {6, 6, 6}, {6, 7, 7}, {7, 7, 7} }); - + // Everything get = new Get(ROWS[0]); result = ht.get(get); assertNResult(result, ROWS[0], FAMILIES, QUALIFIERS, VALUES, - new int [][] { + new int [][] { {2, 2, 2}, {2, 4, 4}, {4, 0, 0}, {4, 4, 4}, {6, 6, 6}, {6, 7, 7}, {7, 7, 7}, {9, 0, 0} }); - + // Get around inserted columns - + get = new Get(ROWS[1]); result = ht.get(get); assertEmptyResult(result); - + get = new Get(ROWS[0]); get.addColumn(FAMILIES[4], QUALIFIERS[3]); get.addColumn(FAMILIES[2], QUALIFIERS[3]); result = ht.get(get); assertEmptyResult(result); - + } - - private void singleRowScanTest(HTable ht, byte [][] ROWS, byte [][] FAMILIES, + + private void singleRowScanTest(HTable ht, byte [][] ROWS, byte [][] FAMILIES, byte [][] QUALIFIERS, byte [][] VALUES) throws Exception { - + // Single column from memstore Scan scan = new Scan(); scan.addColumn(FAMILIES[4], QUALIFIERS[0]); Result result = getSingleScanResult(ht, scan); assertSingleResult(result, ROWS[0], FAMILIES[4], QUALIFIERS[0], VALUES[0]); - + // Single column from storefile scan = new Scan(); scan.addColumn(FAMILIES[2], QUALIFIERS[2]); result = getSingleScanResult(ht, scan); assertSingleResult(result, ROWS[0], FAMILIES[2], QUALIFIERS[2], VALUES[2]); - + // Single column from storefile, family match scan = new Scan(); scan.addFamily(FAMILIES[7]); result = getSingleScanResult(ht, scan); assertSingleResult(result, ROWS[0], FAMILIES[7], QUALIFIERS[7], VALUES[7]); - + // Two columns, one from memstore one from storefile, same family, // wildcard match scan = new Scan(); @@ -2349,7 +2349,7 @@ public class TestFromClientSide { result = getSingleScanResult(ht, scan); assertDoubleResult(result, ROWS[0], FAMILIES[4], QUALIFIERS[0], VALUES[0], FAMILIES[4], QUALIFIERS[4], VALUES[4]); - + // Two columns, one from memstore one from storefile, same family, // explicit match scan = new Scan(); @@ -2358,7 +2358,7 @@ public class TestFromClientSide { result = getSingleScanResult(ht, scan); assertDoubleResult(result, ROWS[0], FAMILIES[4], QUALIFIERS[0], VALUES[0], FAMILIES[4], QUALIFIERS[4], VALUES[4]); - + // Three column, one from memstore two from storefile, different families, // wildcard match scan = new Scan(); @@ -2367,7 +2367,7 @@ public class TestFromClientSide { result = getSingleScanResult(ht, scan); assertNResult(result, ROWS[0], FAMILIES, QUALIFIERS, VALUES, new int [][] { {4, 0, 0}, {4, 4, 4}, {7, 7, 7} }); - + // Multiple columns from everywhere storefile, many family, wildcard scan = new Scan(); scan.addFamily(FAMILIES[2]); @@ -2376,10 +2376,10 @@ public class TestFromClientSide { scan.addFamily(FAMILIES[7]); result = getSingleScanResult(ht, scan); assertNResult(result, ROWS[0], FAMILIES, QUALIFIERS, VALUES, - new int [][] { + new int [][] { {2, 2, 2}, {2, 4, 4}, {4, 0, 0}, {4, 4, 4}, {6, 6, 6}, {6, 7, 7}, {7, 7, 7} }); - + // Multiple columns from everywhere storefile, many family, wildcard scan = new Scan(); scan.addColumn(FAMILIES[2], QUALIFIERS[2]); @@ -2392,24 +2392,24 @@ public class TestFromClientSide { scan.addColumn(FAMILIES[7], QUALIFIERS[8]); result = getSingleScanResult(ht, scan); assertNResult(result, ROWS[0], FAMILIES, QUALIFIERS, VALUES, - new int [][] { + new int [][] { {2, 2, 2}, {2, 4, 4}, {4, 0, 0}, {4, 4, 4}, {6, 6, 6}, {6, 7, 7}, {7, 7, 7} }); - + // Everything scan = new Scan(); result = getSingleScanResult(ht, scan); assertNResult(result, ROWS[0], FAMILIES, QUALIFIERS, VALUES, - new int [][] { + new int [][] { {2, 2, 2}, {2, 4, 4}, {4, 0, 0}, {4, 4, 4}, {6, 6, 6}, {6, 7, 7}, {7, 7, 7}, {9, 0, 0} }); - + // Scan around inserted columns - + scan = new Scan(ROWS[1]); result = getSingleScanResult(ht, scan); assertNullResult(result); - + scan = new Scan(); scan.addColumn(FAMILIES[4], QUALIFIERS[3]); scan.addColumn(FAMILIES[2], QUALIFIERS[3]); @@ -2419,7 +2419,7 @@ public class TestFromClientSide { /** * Verify a single column using gets. - * Expects family and qualifier arrays to be valid for at least + * Expects family and qualifier arrays to be valid for at least * the range: idx-2 < idx < idx+2 */ private void getVerifySingleColumn(HTable ht, @@ -2428,39 +2428,39 @@ public class TestFromClientSide { byte [][] QUALIFIERS, int QUALIFIERIDX, byte [][] VALUES, int VALUEIDX) throws Exception { - + Get get = new Get(ROWS[ROWIDX]); Result result = ht.get(get); - assertSingleResult(result, ROWS[ROWIDX], FAMILIES[FAMILYIDX], + assertSingleResult(result, ROWS[ROWIDX], FAMILIES[FAMILYIDX], QUALIFIERS[QUALIFIERIDX], VALUES[VALUEIDX]); - + get = new Get(ROWS[ROWIDX]); get.addFamily(FAMILIES[FAMILYIDX]); result = ht.get(get); - assertSingleResult(result, ROWS[ROWIDX], FAMILIES[FAMILYIDX], + assertSingleResult(result, ROWS[ROWIDX], FAMILIES[FAMILYIDX], QUALIFIERS[QUALIFIERIDX], VALUES[VALUEIDX]); - + get = new Get(ROWS[ROWIDX]); get.addFamily(FAMILIES[FAMILYIDX-2]); get.addFamily(FAMILIES[FAMILYIDX]); get.addFamily(FAMILIES[FAMILYIDX+2]); result = ht.get(get); - assertSingleResult(result, ROWS[ROWIDX], FAMILIES[FAMILYIDX], + assertSingleResult(result, ROWS[ROWIDX], FAMILIES[FAMILYIDX], QUALIFIERS[QUALIFIERIDX], VALUES[VALUEIDX]); - + get = new Get(ROWS[ROWIDX]); get.addColumn(FAMILIES[FAMILYIDX], QUALIFIERS[0]); result = ht.get(get); - assertSingleResult(result, ROWS[ROWIDX], FAMILIES[FAMILYIDX], + assertSingleResult(result, ROWS[ROWIDX], FAMILIES[FAMILYIDX], QUALIFIERS[QUALIFIERIDX], VALUES[VALUEIDX]); - + get = new Get(ROWS[ROWIDX]); get.addColumn(FAMILIES[FAMILYIDX], QUALIFIERS[1]); get.addFamily(FAMILIES[FAMILYIDX]); result = ht.get(get); - assertSingleResult(result, ROWS[ROWIDX], FAMILIES[FAMILYIDX], + assertSingleResult(result, ROWS[ROWIDX], FAMILIES[FAMILYIDX], QUALIFIERS[QUALIFIERIDX], VALUES[VALUEIDX]); - + get = new Get(ROWS[ROWIDX]); get.addFamily(FAMILIES[FAMILYIDX]); get.addColumn(FAMILIES[FAMILYIDX+1], QUALIFIERS[1]); @@ -2468,17 +2468,17 @@ public class TestFromClientSide { get.addFamily(FAMILIES[FAMILYIDX-1]); get.addFamily(FAMILIES[FAMILYIDX+2]); result = ht.get(get); - assertSingleResult(result, ROWS[ROWIDX], FAMILIES[FAMILYIDX], + assertSingleResult(result, ROWS[ROWIDX], FAMILIES[FAMILYIDX], QUALIFIERS[QUALIFIERIDX], VALUES[VALUEIDX]); - + } - - + + /** * Verify a single column using scanners. - * Expects family and qualifier arrays to be valid for at least + * Expects family and qualifier arrays to be valid for at least * the range: idx-2 to idx+2 - * Expects row array to be valid for at least idx to idx+2 + * Expects row array to be valid for at least idx to idx+2 */ private void scanVerifySingleColumn(HTable ht, byte [][] ROWS, int ROWIDX, @@ -2489,112 +2489,112 @@ public class TestFromClientSide { Scan scan = new Scan(); Result result = getSingleScanResult(ht, scan); - assertSingleResult(result, ROWS[ROWIDX], FAMILIES[FAMILYIDX], + assertSingleResult(result, ROWS[ROWIDX], FAMILIES[FAMILYIDX], QUALIFIERS[QUALIFIERIDX], VALUES[VALUEIDX]); - + scan = new Scan(ROWS[ROWIDX]); result = getSingleScanResult(ht, scan); - assertSingleResult(result, ROWS[ROWIDX], FAMILIES[FAMILYIDX], + assertSingleResult(result, ROWS[ROWIDX], FAMILIES[FAMILYIDX], QUALIFIERS[QUALIFIERIDX], VALUES[VALUEIDX]); - + scan = new Scan(ROWS[ROWIDX], ROWS[ROWIDX+1]); result = getSingleScanResult(ht, scan); - assertSingleResult(result, ROWS[ROWIDX], FAMILIES[FAMILYIDX], + assertSingleResult(result, ROWS[ROWIDX], FAMILIES[FAMILYIDX], QUALIFIERS[QUALIFIERIDX], VALUES[VALUEIDX]); - + scan = new Scan(HConstants.EMPTY_START_ROW, ROWS[ROWIDX+1]); result = getSingleScanResult(ht, scan); - assertSingleResult(result, ROWS[ROWIDX], FAMILIES[FAMILYIDX], + assertSingleResult(result, ROWS[ROWIDX], FAMILIES[FAMILYIDX], QUALIFIERS[QUALIFIERIDX], VALUES[VALUEIDX]); - + scan = new Scan(); scan.addFamily(FAMILIES[FAMILYIDX]); result = getSingleScanResult(ht, scan); - assertSingleResult(result, ROWS[ROWIDX], FAMILIES[FAMILYIDX], + assertSingleResult(result, ROWS[ROWIDX], FAMILIES[FAMILYIDX], QUALIFIERS[QUALIFIERIDX], VALUES[VALUEIDX]); - + scan = new Scan(); scan.addColumn(FAMILIES[FAMILYIDX], QUALIFIERS[QUALIFIERIDX]); result = getSingleScanResult(ht, scan); - assertSingleResult(result, ROWS[ROWIDX], FAMILIES[FAMILYIDX], + assertSingleResult(result, ROWS[ROWIDX], FAMILIES[FAMILYIDX], QUALIFIERS[QUALIFIERIDX], VALUES[VALUEIDX]); - + scan = new Scan(); scan.addColumn(FAMILIES[FAMILYIDX], QUALIFIERS[QUALIFIERIDX+1]); scan.addFamily(FAMILIES[FAMILYIDX]); result = getSingleScanResult(ht, scan); - assertSingleResult(result, ROWS[ROWIDX], FAMILIES[FAMILYIDX], + assertSingleResult(result, ROWS[ROWIDX], FAMILIES[FAMILYIDX], QUALIFIERS[QUALIFIERIDX], VALUES[VALUEIDX]); - + scan = new Scan(); scan.addColumn(FAMILIES[FAMILYIDX-1], QUALIFIERS[QUALIFIERIDX+1]); scan.addColumn(FAMILIES[FAMILYIDX], QUALIFIERS[QUALIFIERIDX]); scan.addFamily(FAMILIES[FAMILYIDX+1]); result = getSingleScanResult(ht, scan); - assertSingleResult(result, ROWS[ROWIDX], FAMILIES[FAMILYIDX], + assertSingleResult(result, ROWS[ROWIDX], FAMILIES[FAMILYIDX], QUALIFIERS[QUALIFIERIDX], VALUES[VALUEIDX]); } - + /** * Verify we do not read any values by accident around a single column * Same requirements as getVerifySingleColumn */ - private void getVerifySingleEmpty(HTable ht, - byte [][] ROWS, int ROWIDX, - byte [][] FAMILIES, int FAMILYIDX, + private void getVerifySingleEmpty(HTable ht, + byte [][] ROWS, int ROWIDX, + byte [][] FAMILIES, int FAMILYIDX, byte [][] QUALIFIERS, int QUALIFIERIDX) throws Exception { - + Get get = new Get(ROWS[ROWIDX]); get.addFamily(FAMILIES[4]); get.addColumn(FAMILIES[4], QUALIFIERS[1]); Result result = ht.get(get); assertEmptyResult(result); - + get = new Get(ROWS[ROWIDX]); get.addFamily(FAMILIES[4]); get.addColumn(FAMILIES[4], QUALIFIERS[2]); result = ht.get(get); assertEmptyResult(result); - + get = new Get(ROWS[ROWIDX]); get.addFamily(FAMILIES[3]); get.addColumn(FAMILIES[4], QUALIFIERS[2]); get.addFamily(FAMILIES[5]); result = ht.get(get); assertEmptyResult(result); - + get = new Get(ROWS[ROWIDX+1]); result = ht.get(get); assertEmptyResult(result); - + } - private void scanVerifySingleEmpty(HTable ht, - byte [][] ROWS, int ROWIDX, - byte [][] FAMILIES, int FAMILYIDX, + private void scanVerifySingleEmpty(HTable ht, + byte [][] ROWS, int ROWIDX, + byte [][] FAMILIES, int FAMILYIDX, byte [][] QUALIFIERS, int QUALIFIERIDX) throws Exception { - - Scan scan = new Scan(ROWS[ROWIDX+1]); + + Scan scan = new Scan(ROWS[ROWIDX+1]); Result result = getSingleScanResult(ht, scan); assertNullResult(result); - + scan = new Scan(ROWS[ROWIDX+1],ROWS[ROWIDX+2]); result = getSingleScanResult(ht, scan); assertNullResult(result); - + scan = new Scan(HConstants.EMPTY_START_ROW, ROWS[ROWIDX]); result = getSingleScanResult(ht, scan); assertNullResult(result); - + scan = new Scan(); scan.addColumn(FAMILIES[FAMILYIDX], QUALIFIERS[QUALIFIERIDX+1]); scan.addFamily(FAMILIES[FAMILYIDX-1]); result = getSingleScanResult(ht, scan); assertNullResult(result); - + } // @@ -2617,24 +2617,24 @@ public class TestFromClientSide { "Got value [" + Bytes.toString(key.getValue()) + "]", equals(value, key.getValue())); } - + private void assertNumKeys(Result result, int n) throws Exception { assertTrue("Expected " + n + " keys but got " + result.size(), result.size() == n); } - private void assertNResult(Result result, byte [] row, + private void assertNResult(Result result, byte [] row, byte [][] families, byte [][] qualifiers, byte [][] values, int [][] idxs) throws Exception { assertTrue("Expected row [" + Bytes.toString(row) + "] " + "Got row [" + Bytes.toString(result.getRow()) +"]", equals(row, result.getRow())); - assertTrue("Expected " + idxs.length + " keys but result contains " + assertTrue("Expected " + idxs.length + " keys but result contains " + result.size(), result.size() == idxs.length); - + KeyValue [] keys = result.sorted(); - + for(int i=0;i 256) { return makeNBig(base, n); @@ -2795,7 +2795,7 @@ public class TestFromClientSide { } return ret; } - + private byte [][] makeN(byte [] base, int n) { if (n > 256) { return makeNBig(base, n); @@ -2806,7 +2806,7 @@ public class TestFromClientSide { } return ret; } - + private byte [][] makeNBig(byte [] base, int n) { byte [][] ret = new byte[n][]; for(int i=0;i times[i]); } } - + // Flush data to disk and try again TEST_UTIL.flush(); - + // Reset times for(int i=0;i>> map = r.getMap(); - NavigableMap> familyMap = + NavigableMap> familyMap = map.get(CONTENTS); NavigableMap versionMap = familyMap.get(CONTENTS); assertTrue(versionMap.size() == 2); diff --git a/core/src/test/java/org/apache/hadoop/hbase/client/TestHTablePool.java b/core/src/test/java/org/apache/hadoop/hbase/client/TestHTablePool.java index 394fd67..4813052 100644 --- a/core/src/test/java/org/apache/hadoop/hbase/client/TestHTablePool.java +++ b/core/src/test/java/org/apache/hadoop/hbase/client/TestHTablePool.java @@ -39,18 +39,18 @@ import org.junit.Test; public class TestHTablePool { private static HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility(); - + @BeforeClass - public static void beforeClass() throws Exception { + public static void beforeClass() throws Exception { TEST_UTIL.startMiniCluster(1); } - + @AfterClass - public static void afterClass() throws IOException { + public static void afterClass() throws IOException { TEST_UTIL.shutdownMiniCluster(); } - + @Test public void testTableWithStringName() { HTablePool pool = new HTablePool((HBaseConfiguration)null, Integer.MAX_VALUE); @@ -131,11 +131,11 @@ public class TestHTablePool { Assert.assertSame(table1, sameTable1); Assert.assertSame(table2, sameTable2); } - - + + @Test - public void testCloseTablePool() throws IOException { - + public void testCloseTablePool() throws IOException { + HTablePool pool = new HTablePool(TEST_UTIL.getConfiguration(), 4); String tableName = "testTable"; HBaseAdmin admin = new HBaseAdmin(TEST_UTIL.getConfiguration()); @@ -149,24 +149,24 @@ public class TestHTablePool { tableDescriptor.addFamily(new HColumnDescriptor("randomFamily")); admin.createTable(tableDescriptor); - + // Request tables from an empty pool HTableInterface[] tables = new HTableInterface[4]; for (int i = 0; i < 4; ++i ) { tables[i] = pool.getTable(tableName); } - + pool.closeTablePool(tableName); - + for (int i = 0; i < 4; ++i ) { pool.putTable(tables[i]); } Assert.assertEquals(4, pool.getCurrentPoolSize(tableName)); - + pool.closeTablePool(tableName); Assert.assertEquals(0, pool.getCurrentPoolSize(tableName)); - + } } diff --git a/core/src/test/java/org/apache/hadoop/hbase/client/TestTimestamp.java b/core/src/test/java/org/apache/hadoop/hbase/client/TestTimestamp.java index bcefa7c..db42192 100644 --- a/core/src/test/java/org/apache/hadoop/hbase/client/TestTimestamp.java +++ b/core/src/test/java/org/apache/hadoop/hbase/client/TestTimestamp.java @@ -32,7 +32,7 @@ import org.apache.hadoop.hbase.TimestampTestBase; */ public class TestTimestamp extends HBaseClusterTestCase { public static String COLUMN_NAME = "colfamily1"; - + /** constructor */ public TestTimestamp() { super(); @@ -51,7 +51,7 @@ public class TestTimestamp extends HBaseClusterTestCase { cluster.flushcache(); } }); - + // Perhaps drop and readd the table between tests so the former does // not pollute this latter? Or put into separate tests. TimestampTestBase.doTestTimestampScanning(incommon, new FlushCache() { @@ -60,8 +60,8 @@ public class TestTimestamp extends HBaseClusterTestCase { } }); } - - /* + + /* * Create a table named TABLE_NAME. * @return An instance of an HTable connected to the created table. * @throws IOException diff --git a/core/src/test/java/org/apache/hadoop/hbase/filter/TestColumnPaginationFilter.java b/core/src/test/java/org/apache/hadoop/hbase/filter/TestColumnPaginationFilter.java index 0a47f95..2c8af2a 100644 --- a/core/src/test/java/org/apache/hadoop/hbase/filter/TestColumnPaginationFilter.java +++ b/core/src/test/java/org/apache/hadoop/hbase/filter/TestColumnPaginationFilter.java @@ -39,19 +39,19 @@ public class TestColumnPaginationFilter extends TestCase private static final byte[] COLUMN_FAMILY = Bytes.toBytes("test"); private static final byte[] VAL_1 = Bytes.toBytes("a"); private static final byte [] COLUMN_QUALIFIER = Bytes.toBytes("foo"); - + private Filter columnPaginationFilter; - + @Override protected void setUp() throws Exception { super.setUp(); columnPaginationFilter = getColumnPaginationFilter(); - + } private Filter getColumnPaginationFilter() { return new ColumnPaginationFilter(1,0); } - + private Filter serializationTest(Filter filter) throws Exception { ByteArrayOutputStream stream = new ByteArrayOutputStream(); DataOutputStream out = new DataOutputStream(stream); @@ -66,29 +66,29 @@ public class TestColumnPaginationFilter extends TestCase return newFilter; } - - + + /** * The more specific functionality tests are contained within the TestFilters class. This class is mainly for testing * serialization - * + * * @param filter * @throws Exception */ - private void basicFilterTests(ColumnPaginationFilter filter) throws Exception + private void basicFilterTests(ColumnPaginationFilter filter) throws Exception { KeyValue kv = new KeyValue(ROW, COLUMN_FAMILY, COLUMN_QUALIFIER, VAL_1); assertTrue("basicFilter1", filter.filterKeyValue(kv) == Filter.ReturnCode.INCLUDE); } - + /** * Tests serialization * @throws Exception - */ + */ public void testSerialization() throws Exception { Filter newFilter = serializationTest(columnPaginationFilter); basicFilterTests((ColumnPaginationFilter)newFilter); - } - + } + } diff --git a/core/src/test/java/org/apache/hadoop/hbase/filter/TestFilter.java b/core/src/test/java/org/apache/hadoop/hbase/filter/TestFilter.java index ab0846d..ecc6114 100644 --- a/core/src/test/java/org/apache/hadoop/hbase/filter/TestFilter.java +++ b/core/src/test/java/org/apache/hadoop/hbase/filter/TestFilter.java @@ -50,7 +50,7 @@ import org.apache.hadoop.hbase.util.Bytes; public class TestFilter extends HBaseTestCase { private final Log LOG = LogFactory.getLog(this.getClass()); private HRegion region; - + // // Rows, Qualifiers, and Values are in two groups, One and Two. // @@ -64,7 +64,7 @@ public class TestFilter extends HBaseTestCase { Bytes.toBytes("testRowTwo-0"), Bytes.toBytes("testRowTwo-1"), Bytes.toBytes("testRowTwo-2"), Bytes.toBytes("testRowTwo-3") }; - + private static final byte [][] FAMILIES = { Bytes.toBytes("testFamilyOne"), Bytes.toBytes("testFamilyTwo") }; @@ -73,20 +73,20 @@ public class TestFilter extends HBaseTestCase { Bytes.toBytes("testQualifierOne-0"), Bytes.toBytes("testQualifierOne-1"), Bytes.toBytes("testQualifierOne-2"), Bytes.toBytes("testQualifierOne-3") }; - + private static final byte [][] QUALIFIERS_TWO = { Bytes.toBytes("testQualifierTwo-0"), Bytes.toBytes("testQualifierTwo-1"), Bytes.toBytes("testQualifierTwo-2"), Bytes.toBytes("testQualifierTwo-3") }; - + private static final byte [][] VALUES = { Bytes.toBytes("testValueOne"), Bytes.toBytes("testValueTwo") }; - + private long numRows = ROWS_ONE.length + ROWS_TWO.length; private long colsPerRow = FAMILIES.length * QUALIFIERS_ONE.length; - - + + protected void setUp() throws Exception { super.setUp(); HTableDescriptor htd = new HTableDescriptor(getName()); @@ -94,7 +94,7 @@ public class TestFilter extends HBaseTestCase { htd.addFamily(new HColumnDescriptor(FAMILIES[1])); HRegionInfo info = new HRegionInfo(htd, null, null, false); this.region = HRegion.createHRegion(info, this.testDir, this.conf); - + // Insert first half for(byte [] ROW : ROWS_ONE) { Put p = new Put(ROW); @@ -110,10 +110,10 @@ public class TestFilter extends HBaseTestCase { } this.region.put(p); } - + // Flush this.region.flushcache(); - + // Insert second half (reverse families) for(byte [] ROW : ROWS_ONE) { Put p = new Put(ROW); @@ -129,14 +129,14 @@ public class TestFilter extends HBaseTestCase { } this.region.put(p); } - + // Delete the second qualifier from all rows and families for(byte [] ROW : ROWS_ONE) { Delete d = new Delete(ROW); d.deleteColumns(FAMILIES[0], QUALIFIERS_ONE[1]); d.deleteColumns(FAMILIES[1], QUALIFIERS_ONE[1]); this.region.delete(d, null, false); - } + } for(byte [] ROW : ROWS_TWO) { Delete d = new Delete(ROW); d.deleteColumns(FAMILIES[0], QUALIFIERS_TWO[1]); @@ -144,7 +144,7 @@ public class TestFilter extends HBaseTestCase { this.region.delete(d, null, false); } colsPerRow -= 2; - + // Delete the second rows from both groups, one column at a time for(byte [] QUALIFIER : QUALIFIERS_ONE) { Delete d = new Delete(ROWS_ONE[1]); @@ -170,7 +170,7 @@ public class TestFilter extends HBaseTestCase { // No filter long expectedRows = this.numRows; long expectedKeys = this.colsPerRow; - + // Both families Scan s = new Scan(); verifyScan(s, expectedRows, expectedKeys); @@ -180,7 +180,7 @@ public class TestFilter extends HBaseTestCase { s.addFamily(FAMILIES[0]); verifyScan(s, expectedRows, expectedKeys/2); } - + public void testPrefixFilter() throws Exception { // Grab rows from group one (half of total) long expectedRows = this.numRows / 2; @@ -189,9 +189,9 @@ public class TestFilter extends HBaseTestCase { s.setFilter(new PrefixFilter(Bytes.toBytes("testRowOne"))); verifyScan(s, expectedRows, expectedKeys); } - + public void testPageFilter() throws Exception { - + // KVs in first 6 rows KeyValue [] expectedKVs = { // testRowOne-0 @@ -237,7 +237,7 @@ public class TestFilter extends HBaseTestCase { new KeyValue(ROWS_TWO[3], FAMILIES[1], QUALIFIERS_TWO[2], VALUES[1]), new KeyValue(ROWS_TWO[3], FAMILIES[1], QUALIFIERS_TWO[3], VALUES[1]) }; - + // Grab all 6 rows long expectedRows = 6; long expectedKeys = this.colsPerRow; @@ -246,7 +246,7 @@ public class TestFilter extends HBaseTestCase { verifyScan(s, expectedRows, expectedKeys); s.setFilter(new PageFilter(expectedRows)); verifyScanFull(s, expectedKVs); - + // Grab first 4 rows (6 cols per row) expectedRows = 4; expectedKeys = this.colsPerRow; @@ -255,7 +255,7 @@ public class TestFilter extends HBaseTestCase { verifyScan(s, expectedRows, expectedKeys); s.setFilter(new PageFilter(expectedRows)); verifyScanFull(s, Arrays.copyOf(expectedKVs, 24)); - + // Grab first 2 rows expectedRows = 2; expectedKeys = this.colsPerRow; @@ -273,7 +273,7 @@ public class TestFilter extends HBaseTestCase { verifyScan(s, expectedRows, expectedKeys); s.setFilter(new PageFilter(expectedRows)); verifyScanFull(s, Arrays.copyOf(expectedKVs, 6)); - + } /** @@ -362,18 +362,18 @@ public class TestFilter extends HBaseTestCase { } } } - + public void testInclusiveStopFilter() throws IOException { // Grab rows from group one - + // If we just use start/stop row, we get total/2 - 1 rows long expectedRows = (this.numRows / 2) - 1; long expectedKeys = this.colsPerRow; - Scan s = new Scan(Bytes.toBytes("testRowOne-0"), + Scan s = new Scan(Bytes.toBytes("testRowOne-0"), Bytes.toBytes("testRowOne-3")); verifyScan(s, expectedRows, expectedKeys); - + // Now use start row with inclusive stop filter expectedRows = this.numRows / 2; s = new Scan(Bytes.toBytes("testRowOne-0")); @@ -381,14 +381,14 @@ public class TestFilter extends HBaseTestCase { verifyScan(s, expectedRows, expectedKeys); // Grab rows from group two - + // If we just use start/stop row, we get total/2 - 1 rows expectedRows = (this.numRows / 2) - 1; expectedKeys = this.colsPerRow; - s = new Scan(Bytes.toBytes("testRowTwo-0"), + s = new Scan(Bytes.toBytes("testRowTwo-0"), Bytes.toBytes("testRowTwo-3")); verifyScan(s, expectedRows, expectedKeys); - + // Now use start row with inclusive stop filter expectedRows = this.numRows / 2; s = new Scan(Bytes.toBytes("testRowTwo-0")); @@ -396,9 +396,9 @@ public class TestFilter extends HBaseTestCase { verifyScan(s, expectedRows, expectedKeys); } - + public void testQualifierFilter() throws IOException { - + // Match two keys (one from each family) in half the rows long expectedRows = this.numRows / 2; long expectedKeys = 2; @@ -407,7 +407,7 @@ public class TestFilter extends HBaseTestCase { Scan s = new Scan(); s.setFilter(f); verifyScanNoEarlyOut(s, expectedRows, expectedKeys); - + // Match keys less than same qualifier // Expect only two keys (one from each family) in half the rows expectedRows = this.numRows / 2; @@ -417,7 +417,7 @@ public class TestFilter extends HBaseTestCase { s = new Scan(); s.setFilter(f); verifyScanNoEarlyOut(s, expectedRows, expectedKeys); - + // Match keys less than or equal // Expect four keys (two from each family) in half the rows expectedRows = this.numRows / 2; @@ -427,7 +427,7 @@ public class TestFilter extends HBaseTestCase { s = new Scan(); s.setFilter(f); verifyScanNoEarlyOut(s, expectedRows, expectedKeys); - + // Match keys not equal // Expect four keys (two from each family) // Only look in first group of rows @@ -438,7 +438,7 @@ public class TestFilter extends HBaseTestCase { s = new Scan(HConstants.EMPTY_START_ROW, Bytes.toBytes("testRowTwo")); s.setFilter(f); verifyScanNoEarlyOut(s, expectedRows, expectedKeys); - + // Match keys greater or equal // Expect four keys (two from each family) // Only look in first group of rows @@ -449,7 +449,7 @@ public class TestFilter extends HBaseTestCase { s = new Scan(HConstants.EMPTY_START_ROW, Bytes.toBytes("testRowTwo")); s.setFilter(f); verifyScanNoEarlyOut(s, expectedRows, expectedKeys); - + // Match keys greater // Expect two keys (one from each family) // Only look in first group of rows @@ -460,7 +460,7 @@ public class TestFilter extends HBaseTestCase { s = new Scan(HConstants.EMPTY_START_ROW, Bytes.toBytes("testRowTwo")); s.setFilter(f); verifyScanNoEarlyOut(s, expectedRows, expectedKeys); - + // Match keys not equal to // Look across rows and fully validate the keys and ordering // Expect varied numbers of keys, 4 per row in group one, 6 per row in group two @@ -468,7 +468,7 @@ public class TestFilter extends HBaseTestCase { new BinaryComparator(QUALIFIERS_ONE[2])); s = new Scan(); s.setFilter(f); - + KeyValue [] kvs = { // testRowOne-0 new KeyValue(ROWS_ONE[0], FAMILIES[0], QUALIFIERS_ONE[0], VALUES[0]), @@ -508,8 +508,8 @@ public class TestFilter extends HBaseTestCase { new KeyValue(ROWS_TWO[3], FAMILIES[1], QUALIFIERS_TWO[3], VALUES[1]), }; verifyScanFull(s, kvs); - - + + // Test across rows and groups with a regex // Filter out "test*-2" // Expect 4 keys per row across both groups @@ -517,7 +517,7 @@ public class TestFilter extends HBaseTestCase { new RegexStringComparator("test.+-2")); s = new Scan(); s.setFilter(f); - + kvs = new KeyValue [] { // testRowOne-0 new KeyValue(ROWS_ONE[0], FAMILIES[0], QUALIFIERS_ONE[0], VALUES[0]), @@ -551,9 +551,9 @@ public class TestFilter extends HBaseTestCase { new KeyValue(ROWS_TWO[3], FAMILIES[1], QUALIFIERS_TWO[3], VALUES[1]), }; verifyScanFull(s, kvs); - + } - + public void testRowFilter() throws IOException { // Match a single row, all keys @@ -564,7 +564,7 @@ public class TestFilter extends HBaseTestCase { Scan s = new Scan(); s.setFilter(f); verifyScanNoEarlyOut(s, expectedRows, expectedKeys); - + // Match a two rows, one from each group, using regex expectedRows = 2; expectedKeys = this.colsPerRow; @@ -573,7 +573,7 @@ public class TestFilter extends HBaseTestCase { s = new Scan(); s.setFilter(f); verifyScanNoEarlyOut(s, expectedRows, expectedKeys); - + // Match rows less than // Expect all keys in one row expectedRows = 1; @@ -583,7 +583,7 @@ public class TestFilter extends HBaseTestCase { s = new Scan(); s.setFilter(f); verifyScanNoEarlyOut(s, expectedRows, expectedKeys); - + // Match rows less than or equal // Expect all keys in two rows expectedRows = 2; @@ -593,7 +593,7 @@ public class TestFilter extends HBaseTestCase { s = new Scan(); s.setFilter(f); verifyScanNoEarlyOut(s, expectedRows, expectedKeys); - + // Match rows not equal // Expect all keys in all but one row expectedRows = this.numRows - 1; @@ -603,7 +603,7 @@ public class TestFilter extends HBaseTestCase { s = new Scan(); s.setFilter(f); verifyScanNoEarlyOut(s, expectedRows, expectedKeys); - + // Match keys greater or equal // Expect all keys in all but one row expectedRows = this.numRows - 1; @@ -613,7 +613,7 @@ public class TestFilter extends HBaseTestCase { s = new Scan(); s.setFilter(f); verifyScanNoEarlyOut(s, expectedRows, expectedKeys); - + // Match keys greater // Expect all keys in all but two rows expectedRows = this.numRows - 2; @@ -623,7 +623,7 @@ public class TestFilter extends HBaseTestCase { s = new Scan(); s.setFilter(f); verifyScanNoEarlyOut(s, expectedRows, expectedKeys); - + // Match rows not equal to testRowTwo-2 // Look across rows and fully validate the keys and ordering // Should see all keys in all rows but testRowTwo-2 @@ -631,7 +631,7 @@ public class TestFilter extends HBaseTestCase { new BinaryComparator(Bytes.toBytes("testRowOne-2"))); s = new Scan(); s.setFilter(f); - + KeyValue [] kvs = { // testRowOne-0 new KeyValue(ROWS_ONE[0], FAMILIES[0], QUALIFIERS_ONE[0], VALUES[0]), @@ -670,8 +670,8 @@ public class TestFilter extends HBaseTestCase { new KeyValue(ROWS_TWO[3], FAMILIES[1], QUALIFIERS_TWO[3], VALUES[1]), }; verifyScanFull(s, kvs); - - + + // Test across rows and groups with a regex // Filter out everything that doesn't match "*-2" // Expect all keys in two rows @@ -679,7 +679,7 @@ public class TestFilter extends HBaseTestCase { new RegexStringComparator(".+-2")); s = new Scan(); s.setFilter(f); - + kvs = new KeyValue [] { // testRowOne-2 new KeyValue(ROWS_ONE[2], FAMILIES[0], QUALIFIERS_ONE[0], VALUES[0]), @@ -697,11 +697,11 @@ public class TestFilter extends HBaseTestCase { new KeyValue(ROWS_TWO[2], FAMILIES[1], QUALIFIERS_TWO[3], VALUES[1]) }; verifyScanFull(s, kvs); - + } - + public void testValueFilter() throws IOException { - + // Match group one rows long expectedRows = this.numRows / 2; long expectedKeys = this.colsPerRow; @@ -719,7 +719,7 @@ public class TestFilter extends HBaseTestCase { s = new Scan(); s.setFilter(f); verifyScanNoEarlyOut(s, expectedRows, expectedKeys); - + // Match all values using regex expectedRows = this.numRows; expectedKeys = this.colsPerRow; @@ -728,7 +728,7 @@ public class TestFilter extends HBaseTestCase { s = new Scan(); s.setFilter(f); verifyScanNoEarlyOut(s, expectedRows, expectedKeys); - + // Match values less than // Expect group one rows expectedRows = this.numRows / 2; @@ -738,7 +738,7 @@ public class TestFilter extends HBaseTestCase { s = new Scan(); s.setFilter(f); verifyScanNoEarlyOut(s, expectedRows, expectedKeys); - + // Match values less than or equal // Expect all rows expectedRows = this.numRows; @@ -758,7 +758,7 @@ public class TestFilter extends HBaseTestCase { s = new Scan(); s.setFilter(f); verifyScanNoEarlyOut(s, expectedRows, expectedKeys); - + // Match values not equal // Expect half the rows expectedRows = this.numRows / 2; @@ -768,7 +768,7 @@ public class TestFilter extends HBaseTestCase { s = new Scan(); s.setFilter(f); verifyScanNoEarlyOut(s, expectedRows, expectedKeys); - + // Match values greater or equal // Expect all rows expectedRows = this.numRows; @@ -778,7 +778,7 @@ public class TestFilter extends HBaseTestCase { s = new Scan(); s.setFilter(f); verifyScanNoEarlyOut(s, expectedRows, expectedKeys); - + // Match values greater // Expect half rows expectedRows = this.numRows / 2; @@ -788,7 +788,7 @@ public class TestFilter extends HBaseTestCase { s = new Scan(); s.setFilter(f); verifyScanNoEarlyOut(s, expectedRows, expectedKeys); - + // Match values not equal to testValueOne // Look across rows and fully validate the keys and ordering // Should see all keys in all group two rows @@ -796,7 +796,7 @@ public class TestFilter extends HBaseTestCase { new BinaryComparator(Bytes.toBytes("testValueOne"))); s = new Scan(); s.setFilter(f); - + KeyValue [] kvs = { // testRowTwo-0 new KeyValue(ROWS_TWO[0], FAMILIES[0], QUALIFIERS_TWO[0], VALUES[1]), @@ -822,16 +822,16 @@ public class TestFilter extends HBaseTestCase { }; verifyScanFull(s, kvs); } - + public void testSkipFilter() throws IOException { - + // Test for qualifier regex: "testQualifierOne-2" // Should only get rows from second group, and all keys Filter f = new SkipFilter(new QualifierFilter(CompareOp.NOT_EQUAL, new BinaryComparator(Bytes.toBytes("testQualifierOne-2")))); Scan s = new Scan(); s.setFilter(f); - + KeyValue [] kvs = { // testRowTwo-0 new KeyValue(ROWS_TWO[0], FAMILIES[0], QUALIFIERS_TWO[0], VALUES[1]), @@ -857,12 +857,12 @@ public class TestFilter extends HBaseTestCase { }; verifyScanFull(s, kvs); } - + // TODO: This is important... need many more tests for ordering, etc // There are limited tests elsewhere but we need HRegion level ones here public void testFilterList() throws IOException { - - // Test getting a single row, single key using Row, Qualifier, and Value + + // Test getting a single row, single key using Row, Qualifier, and Value // regular expression and substring filters // Use must pass all List filters = new ArrayList(); @@ -888,10 +888,10 @@ public class TestFilter extends HBaseTestCase { s = new Scan(); s.setFilter(f); verifyScanNoEarlyOut(s, this.numRows, this.colsPerRow); - - + + } - + public void testFirstKeyOnlyFilter() throws IOException { Scan s = new Scan(); s.setFilter(new FirstKeyOnlyFilter()); @@ -906,18 +906,18 @@ public class TestFilter extends HBaseTestCase { }; verifyScanFull(s, kvs); } - + public void testSingleColumnValueFilter() throws IOException { - + // From HBASE-1821 // Desired action is to combine two SCVF in a FilterList // Want to return only rows that match both conditions - + // Need to change one of the group one columns to use group two value Put p = new Put(ROWS_ONE[2]); p.add(FAMILIES[0], QUALIFIERS_ONE[2], VALUES[1]); this.region.put(p); - + // Now let's grab rows that have Q_ONE[0](VALUES[0]) and Q_ONE[2](VALUES[1]) // Since group two rows don't have these qualifiers, they will pass // so limiting scan to group one @@ -938,7 +938,7 @@ public class TestFilter extends HBaseTestCase { }; verifyScanNoEarlyOut(s, 1, 3); verifyScanFull(s, kvs); - + // In order to get expected behavior without limiting to group one // need to wrap SCVFs in SkipFilters filters = new ArrayList(); @@ -955,7 +955,7 @@ public class TestFilter extends HBaseTestCase { verifyScanFull(s, kvs); // More tests from HBASE-1821 for Clint and filterIfMissing flag - + byte [][] ROWS_THREE = { Bytes.toBytes("rowThree-0"), Bytes.toBytes("rowThree-1"), Bytes.toBytes("rowThree-2"), Bytes.toBytes("rowThree-3") @@ -963,28 +963,28 @@ public class TestFilter extends HBaseTestCase { // Give row 0 and 2 QUALIFIERS_ONE[0] (VALUE[0] VALUE[1]) // Give row 1 and 3 QUALIFIERS_ONE[1] (VALUE[0] VALUE[1]) - + KeyValue [] srcKVs = new KeyValue [] { new KeyValue(ROWS_THREE[0], FAMILIES[0], QUALIFIERS_ONE[0], VALUES[0]), new KeyValue(ROWS_THREE[1], FAMILIES[0], QUALIFIERS_ONE[0], VALUES[1]), new KeyValue(ROWS_THREE[2], FAMILIES[0], QUALIFIERS_ONE[1], VALUES[0]), new KeyValue(ROWS_THREE[3], FAMILIES[0], QUALIFIERS_ONE[1], VALUES[1]) }; - + for(KeyValue kv : srcKVs) { this.region.put(new Put(kv.getRow()).add(kv)); } - + // Match VALUES[0] against QUALIFIERS_ONE[0] with filterIfMissing = false // Expect 3 rows (0, 2, 3) - SingleColumnValueFilter scvf = new SingleColumnValueFilter(FAMILIES[0], + SingleColumnValueFilter scvf = new SingleColumnValueFilter(FAMILIES[0], QUALIFIERS_ONE[0], CompareOp.EQUAL, VALUES[0]); s = new Scan(ROWS_THREE[0], Bytes.toBytes("rowThree-4")); s.addFamily(FAMILIES[0]); s.setFilter(scvf); kvs = new KeyValue [] { srcKVs[0], srcKVs[2], srcKVs[3] }; verifyScanFull(s, kvs); - + // Match VALUES[0] against QUALIFIERS_ONE[0] with filterIfMissing = true // Expect 1 row (0) scvf = new SingleColumnValueFilter(FAMILIES[0], QUALIFIERS_ONE[0], @@ -995,10 +995,10 @@ public class TestFilter extends HBaseTestCase { s.setFilter(scvf); kvs = new KeyValue [] { srcKVs[0] }; verifyScanFull(s, kvs); - + // Match VALUES[1] against QUALIFIERS_ONE[1] with filterIfMissing = true // Expect 1 row (3) - scvf = new SingleColumnValueFilter(FAMILIES[0], + scvf = new SingleColumnValueFilter(FAMILIES[0], QUALIFIERS_ONE[1], CompareOp.EQUAL, VALUES[1]); scvf.setFilterIfMissing(true); s = new Scan(ROWS_THREE[0], Bytes.toBytes("rowThree-4")); @@ -1006,14 +1006,14 @@ public class TestFilter extends HBaseTestCase { s.setFilter(scvf); kvs = new KeyValue [] { srcKVs[3] }; verifyScanFull(s, kvs); - + // Add QUALIFIERS_ONE[1] to ROWS_THREE[0] with VALUES[0] KeyValue kvA = new KeyValue(ROWS_THREE[0], FAMILIES[0], QUALIFIERS_ONE[1], VALUES[0]); this.region.put(new Put(kvA.getRow()).add(kvA)); - + // Match VALUES[1] against QUALIFIERS_ONE[1] with filterIfMissing = true // Expect 1 row (3) - scvf = new SingleColumnValueFilter(FAMILIES[0], + scvf = new SingleColumnValueFilter(FAMILIES[0], QUALIFIERS_ONE[1], CompareOp.EQUAL, VALUES[1]); scvf.setFilterIfMissing(true); s = new Scan(ROWS_THREE[0], Bytes.toBytes("rowThree-4")); @@ -1021,10 +1021,10 @@ public class TestFilter extends HBaseTestCase { s.setFilter(scvf); kvs = new KeyValue [] { srcKVs[3] }; verifyScanFull(s, kvs); - + } - - private void verifyScan(Scan s, long expectedRows, long expectedKeys) + + private void verifyScan(Scan s, long expectedRows, long expectedKeys) throws IOException { InternalScanner scanner = this.region.getScanner(s); List results = new ArrayList(); @@ -1035,7 +1035,7 @@ public class TestFilter extends HBaseTestCase { KeyValue.COMPARATOR); LOG.info("counter=" + i + ", " + results); if (results.isEmpty()) break; - assertTrue("Scanned too many rows! Only expected " + expectedRows + + assertTrue("Scanned too many rows! Only expected " + expectedRows + " total but already scanned " + (i+1), expectedRows > i); assertEquals("Expected " + expectedKeys + " keys per row but " + "returned " + results.size(), expectedKeys, results.size()); @@ -1046,9 +1046,9 @@ public class TestFilter extends HBaseTestCase { } - - private void verifyScanNoEarlyOut(Scan s, long expectedRows, - long expectedKeys) + + private void verifyScanNoEarlyOut(Scan s, long expectedRows, + long expectedKeys) throws IOException { InternalScanner scanner = this.region.getScanner(s); List results = new ArrayList(); @@ -1059,7 +1059,7 @@ public class TestFilter extends HBaseTestCase { KeyValue.COMPARATOR); LOG.info("counter=" + i + ", " + results); if(results.isEmpty()) break; - assertTrue("Scanned too many rows! Only expected " + expectedRows + + assertTrue("Scanned too many rows! Only expected " + expectedRows + " total but already scanned " + (i+1), expectedRows > i); assertEquals("Expected " + expectedKeys + " keys per row but " + "returned " + results.size(), expectedKeys, results.size()); @@ -1080,20 +1080,20 @@ public class TestFilter extends HBaseTestCase { Arrays.sort(results.toArray(new KeyValue[results.size()]), KeyValue.COMPARATOR); if(results.isEmpty()) break; - assertTrue("Scanned too many keys! Only expected " + kvs.length + - " total but already scanned " + (results.size() + idx) + - (results.isEmpty() ? "" : "(" + results.get(0).toString() + ")"), + assertTrue("Scanned too many keys! Only expected " + kvs.length + + " total but already scanned " + (results.size() + idx) + + (results.isEmpty() ? "" : "(" + results.get(0).toString() + ")"), kvs.length >= idx + results.size()); for(KeyValue kv : results) { - LOG.info("row=" + row + ", result=" + kv.toString() + + LOG.info("row=" + row + ", result=" + kv.toString() + ", match=" + kvs[idx].toString()); - assertTrue("Row mismatch", + assertTrue("Row mismatch", Bytes.equals(kv.getRow(), kvs[idx].getRow())); - assertTrue("Family mismatch", + assertTrue("Family mismatch", Bytes.equals(kv.getFamily(), kvs[idx].getFamily())); - assertTrue("Qualifier mismatch", + assertTrue("Qualifier mismatch", Bytes.equals(kv.getQualifier(), kvs[idx].getQualifier())); - assertTrue("Value mismatch", + assertTrue("Value mismatch", Bytes.equals(kv.getValue(), kvs[idx].getValue())); idx++; } @@ -1103,10 +1103,10 @@ public class TestFilter extends HBaseTestCase { assertEquals("Expected " + kvs.length + " total keys but scanned " + idx, kvs.length, idx); } - - + + public void testColumnPaginationFilter() throws Exception { - + // Set of KVs (page: 1; pageSize: 1) - the first set of 1 column per row KeyValue [] expectedKVs = { // testRowOne-0 @@ -1122,7 +1122,7 @@ public class TestFilter extends HBaseTestCase { // testRowTwo-3 new KeyValue(ROWS_TWO[3], FAMILIES[0], QUALIFIERS_TWO[0], VALUES[1]) }; - + // Set of KVs (page: 3; pageSize: 1) - the third set of 1 column per row KeyValue [] expectedKVs2 = { @@ -1139,7 +1139,7 @@ public class TestFilter extends HBaseTestCase { // testRowTwo-3 new KeyValue(ROWS_TWO[3], FAMILIES[0], QUALIFIERS_TWO[3], VALUES[1]), }; - + // Set of KVs (page: 2; pageSize 2) - the 2nd set of 2 columns per row KeyValue [] expectedKVs3 = { // testRowOne-0 @@ -1161,8 +1161,8 @@ public class TestFilter extends HBaseTestCase { new KeyValue(ROWS_TWO[3], FAMILIES[0], QUALIFIERS_TWO[3], VALUES[1]), new KeyValue(ROWS_TWO[3], FAMILIES[1], QUALIFIERS_TWO[0], VALUES[1]), }; - - + + // Set of KVs (page: 2; pageSize 2) - the 2nd set of 2 columns per row KeyValue [] expectedKVs4 = { @@ -1171,8 +1171,8 @@ public class TestFilter extends HBaseTestCase { long expectedRows = this.numRows; long expectedKeys = 1; Scan s = new Scan(); - - + + // Page 1; 1 Column per page (Limit 1, Offset 0) s.setFilter(new ColumnPaginationFilter(1,0)); verifyScan(s, expectedRows, expectedKeys); @@ -1182,7 +1182,7 @@ public class TestFilter extends HBaseTestCase { s.setFilter(new ColumnPaginationFilter(1,2)); verifyScan(s, expectedRows, expectedKeys); this.verifyScanFull(s, expectedKVs2); - + // Page 2; 2 Results per page (Limit 2, Offset 2) s.setFilter(new ColumnPaginationFilter(2,2)); expectedKeys = 2; @@ -1194,6 +1194,6 @@ public class TestFilter extends HBaseTestCase { expectedKeys = 0; expectedRows = 0; verifyScan(s, expectedRows, 0); - this.verifyScanFull(s, expectedKVs4); + this.verifyScanFull(s, expectedKVs4); } } diff --git a/core/src/test/java/org/apache/hadoop/hbase/filter/TestFilterList.java b/core/src/test/java/org/apache/hadoop/hbase/filter/TestFilterList.java index 6101b6c..b39ca3a 100644 --- a/core/src/test/java/org/apache/hadoop/hbase/filter/TestFilterList.java +++ b/core/src/test/java/org/apache/hadoop/hbase/filter/TestFilterList.java @@ -67,7 +67,7 @@ public class TestFilterList extends TestCase { */ filterMPONE.reset(); assertFalse(filterMPONE.filterAllRemaining()); - + /* Will pass both */ byte [] rowkey = Bytes.toBytes("yyyyyyyyy"); for (int i = 0; i < MAX_PAGES - 1; i++) { @@ -85,7 +85,7 @@ public class TestFilterList extends TestCase { KeyValue kv = new KeyValue(rowkey, rowkey, Bytes.toBytes(0), Bytes.toBytes(0)); assertTrue(Filter.ReturnCode.INCLUDE == filterMPONE.filterKeyValue(kv)); - + /* PageFilter will fail now, but should pass because we match yyy */ rowkey = Bytes.toBytes("yyy"); assertFalse(filterMPONE.filterRowKey(rowkey, 0, rowkey.length)); @@ -93,7 +93,7 @@ public class TestFilterList extends TestCase { kv = new KeyValue(rowkey, rowkey, Bytes.toBytes(0), Bytes.toBytes(0)); assertTrue(Filter.ReturnCode.INCLUDE == filterMPONE.filterKeyValue(kv)); - + /* We should filter any row */ rowkey = Bytes.toBytes("z"); assertTrue(filterMPONE.filterRowKey(rowkey, 0, rowkey.length)); @@ -168,7 +168,7 @@ public class TestFilterList extends TestCase { */ filterMPONE.reset(); assertFalse(filterMPONE.filterAllRemaining()); - + /* We should be able to fill MAX_PAGES without incrementing page counter */ byte [] rowkey = Bytes.toBytes("yyyyyyyy"); for (int i = 0; i < MAX_PAGES; i++) { @@ -178,7 +178,7 @@ public class TestFilterList extends TestCase { assertTrue(Filter.ReturnCode.INCLUDE == filterMPONE.filterKeyValue(kv)); assertFalse(filterMPONE.filterRow()); } - + /* Now let's fill the page filter */ rowkey = Bytes.toBytes("xxxxxxx"); for (int i = 0; i < MAX_PAGES; i++) { @@ -188,7 +188,7 @@ public class TestFilterList extends TestCase { assertTrue(Filter.ReturnCode.INCLUDE == filterMPONE.filterKeyValue(kv)); assertFalse(filterMPONE.filterRow()); } - + /* We should still be able to include even though page filter is at max */ rowkey = Bytes.toBytes("yyy"); for (int i = 0; i < MAX_PAGES; i++) { diff --git a/core/src/test/java/org/apache/hadoop/hbase/filter/TestPageFilter.java b/core/src/test/java/org/apache/hadoop/hbase/filter/TestPageFilter.java index 74cb656..f47ba90 100644 --- a/core/src/test/java/org/apache/hadoop/hbase/filter/TestPageFilter.java +++ b/core/src/test/java/org/apache/hadoop/hbase/filter/TestPageFilter.java @@ -40,7 +40,7 @@ public class TestPageFilter extends TestCase { Filter f = new PageFilter(ROW_LIMIT); pageSizeTests(f); } - + /** * Test filter serialization * @throws Exception @@ -57,33 +57,33 @@ public class TestPageFilter extends TestCase { DataInputStream in = new DataInputStream(new ByteArrayInputStream(buffer)); Filter newFilter = new PageFilter(); newFilter.readFields(in); - + // Ensure the serialization preserved the filter by running a full test. pageSizeTests(newFilter); } - + private void pageSizeTests(Filter f) throws Exception { testFiltersBeyondPageSize(f, ROW_LIMIT); } - + private void testFiltersBeyondPageSize(final Filter f, final int pageSize) { int count = 0; for (int i = 0; i < (pageSize * 2); i++) { boolean filterOut = f.filterRow(); - + if(filterOut) { break; } else { count++; } - + // If at last row, should tell us to skip all remaining if(count == pageSize) { assertTrue(f.filterAllRemaining()); } else { assertFalse(f.filterAllRemaining()); } - + } assertEquals(pageSize, count); } diff --git a/core/src/test/java/org/apache/hadoop/hbase/filter/TestSingleColumnValueExcludeFilter.java b/core/src/test/java/org/apache/hadoop/hbase/filter/TestSingleColumnValueExcludeFilter.java index 81249cb..4a1b576 100644 --- a/core/src/test/java/org/apache/hadoop/hbase/filter/TestSingleColumnValueExcludeFilter.java +++ b/core/src/test/java/org/apache/hadoop/hbase/filter/TestSingleColumnValueExcludeFilter.java @@ -26,10 +26,10 @@ import org.apache.hadoop.hbase.filter.CompareFilter.CompareOp; import org.apache.hadoop.hbase.util.Bytes; /** - * Tests for {@link SingleColumnValueExcludeFilter}. Because this filter + * Tests for {@link SingleColumnValueExcludeFilter}. Because this filter * extends {@link SingleColumnValueFilter}, only the added functionality is * tested. That is, method filterKeyValue(KeyValue). - * + * * @author ferdy * */ @@ -48,7 +48,7 @@ public class TestSingleColumnValueExcludeFilter extends TestCase { public void testFilterKeyValue() throws Exception { Filter filter = new SingleColumnValueExcludeFilter(COLUMN_FAMILY, COLUMN_QUALIFIER, CompareOp.EQUAL, VAL_1); - + // A 'match' situation KeyValue kv; kv = new KeyValue(ROW, COLUMN_FAMILY, COLUMN_QUALIFIER_2, VAL_1); @@ -61,7 +61,7 @@ public class TestSingleColumnValueExcludeFilter extends TestCase { kv = new KeyValue(ROW, COLUMN_FAMILY, COLUMN_QUALIFIER_2, VAL_1); assertTrue("otherColumn", filter.filterKeyValue(kv) == Filter.ReturnCode.INCLUDE); assertFalse("allRemainingWhenMatch", filter.filterAllRemaining()); - + // A 'mismatch' situation filter.reset(); // INCLUDE expected because test column has not yet passed @@ -73,6 +73,6 @@ public class TestSingleColumnValueExcludeFilter extends TestCase { // After a mismatch (at least with LatestVersionOnly), subsequent columns are EXCLUDE kv = new KeyValue(ROW, COLUMN_FAMILY, COLUMN_QUALIFIER_2, VAL_1); assertTrue("otherColumn", filter.filterKeyValue(kv) == Filter.ReturnCode.NEXT_ROW); - } - + } + } diff --git a/core/src/test/java/org/apache/hadoop/hbase/filter/TestSingleColumnValueFilter.java b/core/src/test/java/org/apache/hadoop/hbase/filter/TestSingleColumnValueFilter.java index 25d18be..677a625 100644 --- a/core/src/test/java/org/apache/hadoop/hbase/filter/TestSingleColumnValueFilter.java +++ b/core/src/test/java/org/apache/hadoop/hbase/filter/TestSingleColumnValueFilter.java @@ -41,9 +41,9 @@ public class TestSingleColumnValueFilter extends TestCase { private static final byte[] VAL_2 = Bytes.toBytes("ab"); private static final byte[] VAL_3 = Bytes.toBytes("abc"); private static final byte[] VAL_4 = Bytes.toBytes("abcd"); - private static final byte[] FULLSTRING_1 = + private static final byte[] FULLSTRING_1 = Bytes.toBytes("The quick brown fox jumps over the lazy dog."); - private static final byte[] FULLSTRING_2 = + private static final byte[] FULLSTRING_2 = Bytes.toBytes("The slow grey fox trips over the lazy dog."); private static final String QUICK_SUBSTR = "quick"; private static final String QUICK_REGEX = ".+quick.+"; @@ -102,7 +102,7 @@ public class TestSingleColumnValueFilter extends TestCase { assertFalse("basicFilterNotNull", filter.filterRow()); } - private void substrFilterTests(Filter filter) + private void substrFilterTests(Filter filter) throws Exception { KeyValue kv = new KeyValue(ROW, COLUMN_FAMILY, COLUMN_QUALIFIER, FULLSTRING_1); @@ -115,7 +115,7 @@ public class TestSingleColumnValueFilter extends TestCase { assertFalse("substrFilterNotNull", filter.filterRow()); } - private void regexFilterTests(Filter filter) + private void regexFilterTests(Filter filter) throws Exception { KeyValue kv = new KeyValue(ROW, COLUMN_FAMILY, COLUMN_QUALIFIER, FULLSTRING_1); @@ -126,8 +126,8 @@ public class TestSingleColumnValueFilter extends TestCase { assertTrue("regexFalse", filter.filterKeyValue(kv) == Filter.ReturnCode.INCLUDE); assertFalse("regexFilterAllRemaining", filter.filterAllRemaining()); assertFalse("regexFilterNotNull", filter.filterRow()); - } - + } + private Filter serializationTest(Filter filter) throws Exception { // Decompose filter to bytes. @@ -136,13 +136,13 @@ public class TestSingleColumnValueFilter extends TestCase { filter.write(out); out.close(); byte[] buffer = stream.toByteArray(); - + // Recompose filter. DataInputStream in = new DataInputStream(new ByteArrayInputStream(buffer)); Filter newFilter = new SingleColumnValueFilter(); newFilter.readFields(in); - + return newFilter; } @@ -154,12 +154,12 @@ public class TestSingleColumnValueFilter extends TestCase { basicFilterTests((SingleColumnValueFilter)basicFilter); substrFilterTests(substrFilter); regexFilterTests(regexFilter); - } + } /** * Tests serialization * @throws Exception - */ + */ public void testSerialization() throws Exception { Filter newFilter = serializationTest(basicFilter); basicFilterTests((SingleColumnValueFilter)newFilter); @@ -167,5 +167,5 @@ public class TestSingleColumnValueFilter extends TestCase { substrFilterTests(newFilter); newFilter = serializationTest(regexFilter); regexFilterTests(newFilter); - } + } } diff --git a/core/src/test/java/org/apache/hadoop/hbase/io/TestHbaseObjectWritable.java b/core/src/test/java/org/apache/hadoop/hbase/io/TestHbaseObjectWritable.java index ae920f6..18a4654 100644 --- a/core/src/test/java/org/apache/hadoop/hbase/io/TestHbaseObjectWritable.java +++ b/core/src/test/java/org/apache/hadoop/hbase/io/TestHbaseObjectWritable.java @@ -98,7 +98,7 @@ public class TestHbaseObjectWritable extends TestCase { PrefixFilter.class); assertTrue(obj instanceof PrefixFilter); } - + private Object doType(final HBaseConfiguration conf, final Object value, final Class clazz) throws IOException { @@ -113,5 +113,5 @@ public class TestHbaseObjectWritable extends TestCase { dis.close(); return product; } - + } \ No newline at end of file diff --git a/core/src/test/java/org/apache/hadoop/hbase/io/TestHeapSize.java b/core/src/test/java/org/apache/hadoop/hbase/io/TestHeapSize.java index 0f5fc1f..804880d 100644 --- a/core/src/test/java/org/apache/hadoop/hbase/io/TestHeapSize.java +++ b/core/src/test/java/org/apache/hadoop/hbase/io/TestHeapSize.java @@ -49,14 +49,14 @@ import org.apache.hadoop.hbase.util.ClassSize; /** * Testing the sizing that HeapSize offers and compares to the size given by - * ClassSize. + * ClassSize. */ public class TestHeapSize extends TestCase { static final Log LOG = LogFactory.getLog(TestHeapSize.class); // List of classes implementing HeapSize // BatchOperation, BatchUpdate, BlockIndex, Entry, Entry, HStoreKey // KeyValue, LruBlockCache, LruHashMap, Put, HLogKey - + /** * Test our hard-coded sizing of native java objects */ @@ -65,7 +65,7 @@ public class TestHeapSize extends TestCase { Class cl = null; long expected = 0L; long actual = 0L; - + // ArrayList cl = ArrayList.class; expected = ClassSize.estimateBase(cl, false); @@ -74,7 +74,7 @@ public class TestHeapSize extends TestCase { ClassSize.estimateBase(cl, true); assertEquals(expected, actual); } - + // ByteBuffer cl = ByteBuffer.class; expected = ClassSize.estimateBase(cl, false); @@ -83,7 +83,7 @@ public class TestHeapSize extends TestCase { ClassSize.estimateBase(cl, true); assertEquals(expected, actual); } - + // Integer cl = Integer.class; expected = ClassSize.estimateBase(cl, false); @@ -92,7 +92,7 @@ public class TestHeapSize extends TestCase { ClassSize.estimateBase(cl, true); assertEquals(expected, actual); } - + // Map.Entry // Interface is public, all others are not. Hard to size via ClassSize // cl = Map.Entry.class; @@ -102,7 +102,7 @@ public class TestHeapSize extends TestCase { // ClassSize.estimateBase(cl, true); // assertEquals(expected, actual); // } - + // Object cl = Object.class; expected = ClassSize.estimateBase(cl, false); @@ -111,7 +111,7 @@ public class TestHeapSize extends TestCase { ClassSize.estimateBase(cl, true); assertEquals(expected, actual); } - + // TreeMap cl = TreeMap.class; expected = ClassSize.estimateBase(cl, false); @@ -120,7 +120,7 @@ public class TestHeapSize extends TestCase { ClassSize.estimateBase(cl, true); assertEquals(expected, actual); } - + // String cl = String.class; expected = ClassSize.estimateBase(cl, false); @@ -183,7 +183,7 @@ public class TestHeapSize extends TestCase { ClassSize.estimateBase(cl, true); assertEquals(expected, actual); } - + // CopyOnWriteArraySet cl = CopyOnWriteArraySet.class; expected = ClassSize.estimateBase(cl, false); @@ -192,7 +192,7 @@ public class TestHeapSize extends TestCase { ClassSize.estimateBase(cl, true); assertEquals(expected, actual); } - + // CopyOnWriteArrayList cl = CopyOnWriteArrayList.class; expected = ClassSize.estimateBase(cl, false); @@ -201,22 +201,22 @@ public class TestHeapSize extends TestCase { ClassSize.estimateBase(cl, true); assertEquals(expected, actual); } - - + + } - + /** - * Testing the classes that implements HeapSize and are a part of 0.20. - * Some are not tested here for example BlockIndex which is tested in + * Testing the classes that implements HeapSize and are a part of 0.20. + * Some are not tested here for example BlockIndex which is tested in * TestHFile since it is a non public class - * @throws IOException + * @throws IOException */ @SuppressWarnings("unchecked") public void testSizes() throws IOException { Class cl = null; long expected = 0L; long actual = 0L; - + //KeyValue cl = KeyValue.class; expected = ClassSize.estimateBase(cl, false); @@ -226,7 +226,7 @@ public class TestHeapSize extends TestCase { ClassSize.estimateBase(cl, true); assertEquals(expected, actual); } - + //Put cl = Put.class; expected = ClassSize.estimateBase(cl, false); @@ -238,7 +238,7 @@ public class TestHeapSize extends TestCase { ClassSize.estimateBase(cl, true); assertEquals(expected, actual); } - + //LruBlockCache Overhead cl = LruBlockCache.class; actual = LruBlockCache.CACHE_FIXED_OVERHEAD; @@ -247,7 +247,7 @@ public class TestHeapSize extends TestCase { ClassSize.estimateBase(cl, true); assertEquals(expected, actual); } - + // CachedBlock Fixed Overhead // We really need "deep" sizing but ClassSize does not do this. // Perhaps we should do all these more in this style.... @@ -262,7 +262,7 @@ public class TestHeapSize extends TestCase { ClassSize.estimateBase(ByteBuffer.class, true); assertEquals(expected, actual); } - + // MemStore Overhead cl = MemStore.class; actual = MemStore.FIXED_OVERHEAD; @@ -271,7 +271,7 @@ public class TestHeapSize extends TestCase { ClassSize.estimateBase(cl, true); assertEquals(expected, actual); } - + // MemStore Deep Overhead actual = MemStore.DEEP_OVERHEAD; expected = ClassSize.estimateBase(cl, false); @@ -290,7 +290,7 @@ public class TestHeapSize extends TestCase { ClassSize.estimateBase(CopyOnWriteArrayList.class, true); assertEquals(expected, actual); } - + // Store Overhead cl = Store.class; actual = Store.FIXED_OVERHEAD; @@ -299,7 +299,7 @@ public class TestHeapSize extends TestCase { ClassSize.estimateBase(cl, true); assertEquals(expected, actual); } - + // Region Overhead cl = HRegion.class; actual = HRegion.FIXED_OVERHEAD; @@ -308,12 +308,12 @@ public class TestHeapSize extends TestCase { ClassSize.estimateBase(cl, true); assertEquals(expected, actual); } - + // Currently NOT testing Deep Overheads of many of these classes. // Deep overheads cover a vast majority of stuff, but will not be 100% // accurate because it's unclear when we're referencing stuff that's already // accounted for. But we have satisfied our two core requirements. - // Sizing is quite accurate now, and our tests will throw errors if + // Sizing is quite accurate now, and our tests will throw errors if // any of these classes are modified without updating overhead sizes. } diff --git a/core/src/test/java/org/apache/hadoop/hbase/io/hfile/KVGenerator.java b/core/src/test/java/org/apache/hadoop/hbase/io/hfile/KVGenerator.java index ca8b80a..b22cb8c 100644 --- a/core/src/test/java/org/apache/hadoop/hbase/io/hfile/KVGenerator.java +++ b/core/src/test/java/org/apache/hadoop/hbase/io/hfile/KVGenerator.java @@ -5,9 +5,9 @@ * licenses this file to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance with the License. * You may obtain a copy of the License at - * + * * http://www.apache.org/licenses/LICENSE-2.0 - * + * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the @@ -55,7 +55,7 @@ class KVGenerator { lastKey = new BytesWritable(); fillKey(lastKey); } - + private void fillKey(BytesWritable o) { int len = keyLenRNG.nextInt(); if (len < MIN_KEY_LEN) len = MIN_KEY_LEN; @@ -89,16 +89,16 @@ class KVGenerator { n += l; } } - + private void incrementPrefix() { for (int i = MIN_KEY_LEN - 1; i >= 0; --i) { ++prefix[i]; if (prefix[i] != 0) return; } - + throw new RuntimeException("Prefix overflown"); } - + public void next(BytesWritable key, BytesWritable value, boolean dupKey) { if (dupKey) { key.set(lastKey); diff --git a/core/src/test/java/org/apache/hadoop/hbase/io/hfile/KeySampler.java b/core/src/test/java/org/apache/hadoop/hbase/io/hfile/KeySampler.java index e6cf763..2489029 100644 --- a/core/src/test/java/org/apache/hadoop/hbase/io/hfile/KeySampler.java +++ b/core/src/test/java/org/apache/hadoop/hbase/io/hfile/KeySampler.java @@ -5,9 +5,9 @@ * licenses this file to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance with the License. * You may obtain a copy of the License at - * + * * http://www.apache.org/licenses/LICENSE-2.0 - * + * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the @@ -48,7 +48,7 @@ class KeySampler { return (b[o] & 0xff) << 24 | (b[o + 1] & 0xff) << 16 | (b[o + 2] & 0xff) << 8 | (b[o + 3] & 0xff); } - + public void next(BytesWritable key) { key.setSize(Math.max(MIN_KEY_LEN, keyLenRNG.nextInt())); random.nextBytes(key.get()); diff --git a/core/src/test/java/org/apache/hadoop/hbase/io/hfile/NanoTimer.java b/core/src/test/java/org/apache/hadoop/hbase/io/hfile/NanoTimer.java index 1312da0..a133cb4 100644 --- a/core/src/test/java/org/apache/hadoop/hbase/io/hfile/NanoTimer.java +++ b/core/src/test/java/org/apache/hadoop/hbase/io/hfile/NanoTimer.java @@ -5,9 +5,9 @@ * licenses this file to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance with the License. * You may obtain a copy of the License at - * + * * http://www.apache.org/licenses/LICENSE-2.0 - * + * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the @@ -31,7 +31,7 @@ public class NanoTimer { /** * Constructor - * + * * @param start * Start the timer upon construction. */ @@ -41,7 +41,7 @@ public class NanoTimer { /** * Start the timer. - * + * * Note: No effect if timer is already started. */ public void start() { @@ -53,7 +53,7 @@ public class NanoTimer { /** * Stop the timer. - * + * * Note: No effect if timer is already stopped. */ public void stop() { @@ -65,7 +65,7 @@ public class NanoTimer { /** * Read the timer. - * + * * @return the elapsed time in nano-seconds. Note: If the timer is never * started before, -1 is returned. */ @@ -86,7 +86,7 @@ public class NanoTimer { /** * Checking whether the timer is started - * + * * @return true if timer is started. */ public boolean isStarted() { @@ -95,7 +95,7 @@ public class NanoTimer { /** * Format the elapsed time to a human understandable string. - * + * * Note: If timer is never started, "ERR" will be returned. */ public String toString() { @@ -109,7 +109,7 @@ public class NanoTimer { /** * A utility method to format a time duration in nano seconds into a human * understandable stirng. - * + * * @param t * Time duration in nano seconds. * @return String representation. @@ -161,19 +161,19 @@ public class NanoTimer { /** * StringBuilder sb = new StringBuilder(); String sep = ""; - * + * * if (dd > 0) { String unit = (dd > 1) ? "days" : "day"; * sb.append(String.format("%s%d%s", sep, dd, unit)); sep = " "; } - * + * * if (hh > 0) { String unit = (hh > 1) ? "hrs" : "hr"; * sb.append(String.format("%s%d%s", sep, hh, unit)); sep = " "; } - * + * * if (mm > 0) { String unit = (mm > 1) ? "mins" : "min"; * sb.append(String.format("%s%d%s", sep, mm, unit)); sep = " "; } - * + * * if (ss > 0) { String unit = (ss > 1) ? "secs" : "sec"; * sb.append(String.format("%s%.3f%s", sep, ss, unit)); sep = " "; } - * + * * return sb.toString(); */ } @@ -184,7 +184,7 @@ public class NanoTimer { /** * Simple tester. - * + * * @param args */ public static void main(String[] args) { diff --git a/core/src/test/java/org/apache/hadoop/hbase/io/hfile/RandomDistribution.java b/core/src/test/java/org/apache/hadoop/hbase/io/hfile/RandomDistribution.java index 3219664..7232cad 100644 --- a/core/src/test/java/org/apache/hadoop/hbase/io/hfile/RandomDistribution.java +++ b/core/src/test/java/org/apache/hadoop/hbase/io/hfile/RandomDistribution.java @@ -5,9 +5,9 @@ * licenses this file to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance with the License. * You may obtain a copy of the License at - * + * * http://www.apache.org/licenses/LICENSE-2.0 - * + * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the @@ -36,7 +36,7 @@ public class RandomDistribution { public static interface DiscreteRNG { /** * Get the next random number - * + * * @return the next random number. */ public int nextInt(); @@ -53,14 +53,14 @@ public class RandomDistribution { /** * Generate random integers from min (inclusive) to max (exclusive) * following even distribution. - * + * * @param random * The basic random number generator. * @param min * Minimum integer * @param max * maximum integer (exclusive). - * + * */ public Flat(Random random, int min, int max) { if (min >= max) { @@ -70,7 +70,7 @@ public class RandomDistribution { this.min = min; this.max = max; } - + /** * @see DiscreteRNG#nextInt() */ @@ -83,7 +83,7 @@ public class RandomDistribution { /** * Zipf distribution. The ratio of the probabilities of integer i and j is * defined as follows: - * + * * P(i)/P(j)=((j-min+1)/(i-min+1))^sigma. */ public static final class Zipf implements DiscreteRNG { @@ -94,7 +94,7 @@ public class RandomDistribution { /** * Constructor - * + * * @param r * The random number generator. * @param min @@ -110,7 +110,7 @@ public class RandomDistribution { /** * Constructor. - * + * * @param r * The random number generator. * @param min @@ -186,9 +186,9 @@ public class RandomDistribution { /** * Binomial distribution. - * + * * P(k)=select(n, k)*p^k*(1-p)^(n-k) (k = 0, 1, ..., n) - * + * * P(k)=select(max-min-1, k-min)*p^(k-min)*(1-p)^(k-min)*(1-p)^(max-k-1) */ public static final class Binomial implements DiscreteRNG { @@ -204,7 +204,7 @@ public class RandomDistribution { } return ret; } - + private static double power(double p, int k) { return Math.exp(k * Math.log(p)); } @@ -212,7 +212,7 @@ public class RandomDistribution { /** * Generate random integers from min (inclusive) to max (exclusive) * following Binomial distribution. - * + * * @param random * The basic random number generator. * @param min @@ -221,7 +221,7 @@ public class RandomDistribution { * maximum integer (exclusive). * @param p * parameter. - * + * */ public Binomial(Random random, int min, int max, double p) { if (min >= max) { diff --git a/core/src/test/java/org/apache/hadoop/hbase/io/hfile/RandomSeek.java b/core/src/test/java/org/apache/hadoop/hbase/io/hfile/RandomSeek.java index 8e55719..61b20ca 100644 --- a/core/src/test/java/org/apache/hadoop/hbase/io/hfile/RandomSeek.java +++ b/core/src/test/java/org/apache/hadoop/hbase/io/hfile/RandomSeek.java @@ -56,14 +56,14 @@ public class RandomSeek { return "2" + Integer.toString(7+r.nextInt(2)) + Integer.toString(r.nextInt(100)); //return new String(r.nextInt(100)); } - + public static void main(String [] argv) throws IOException { Configuration conf = new Configuration(); conf.setInt("io.file.buffer.size", 64*1024); RawLocalFileSystem rlfs = new RawLocalFileSystem(); rlfs.setConf(conf); LocalFileSystem lfs = new LocalFileSystem(rlfs); - + Path path = new Path("/Users/ryan/rfile.big.txt"); long start = System.currentTimeMillis(); SimpleBlockCache cache = new SimpleBlockCache(); @@ -72,11 +72,11 @@ public class RandomSeek { reader.loadFileInfo(); System.out.println(reader.trailer); long end = System.currentTimeMillis(); - + System.out.println("Index read time: " + (end - start)); List keys = slurp("/Users/ryan/xaa.50k"); - + // Get a scanner that doesn't cache and that uses pread. HFileScanner scanner = reader.getScanner(false, true); int count; @@ -108,17 +108,17 @@ public class RandomSeek { totalBytes += k.limit(); totalBytes += v.limit(); } - + if ( count % 1000 == 0 ) { end = System.nanoTime(); - + System.out.println("Cache block count: " + cache.size() + " dumped: "+ cache.dumps); //System.out.println("Cache size: " + cache.heapSize()); double msTime = ((end - start) / 1000000.0); - System.out.println("Seeked: "+ count + " in " + msTime + " (ms) " + System.out.println("Seeked: "+ count + " in " + msTime + " (ms) " + (1000.0 / msTime ) + " seeks/ms " + (msTime / 1000.0) + " ms/seek"); - + start = System.nanoTime(); } } diff --git a/core/src/test/java/org/apache/hadoop/hbase/io/hfile/TestCachedBlockQueue.java b/core/src/test/java/org/apache/hadoop/hbase/io/hfile/TestCachedBlockQueue.java index 5d1cf38..0c6e162 100644 --- a/core/src/test/java/org/apache/hadoop/hbase/io/hfile/TestCachedBlockQueue.java +++ b/core/src/test/java/org/apache/hadoop/hbase/io/hfile/TestCachedBlockQueue.java @@ -23,7 +23,7 @@ import java.nio.ByteBuffer; import junit.framework.TestCase; public class TestCachedBlockQueue extends TestCase { - + public void testQueue() throws Exception { CachedBlock cb1 = new CachedBlock(1000, "cb1", 1); @@ -36,9 +36,9 @@ public class TestCachedBlockQueue extends TestCase { CachedBlock cb8 = new CachedBlock(1500, "cb8", 8); CachedBlock cb9 = new CachedBlock(1000, "cb9", 9); CachedBlock cb10 = new CachedBlock(1500, "cb10", 10); - + CachedBlockQueue queue = new CachedBlockQueue(10000,1000); - + queue.add(cb1); queue.add(cb2); queue.add(cb3); @@ -49,14 +49,14 @@ public class TestCachedBlockQueue extends TestCase { queue.add(cb8); queue.add(cb9); queue.add(cb10); - + // We expect cb1 through cb8 to be in the queue long expectedSize = cb1.heapSize() + cb2.heapSize() + cb3.heapSize() + cb4.heapSize() + cb5.heapSize() + cb6.heapSize() + cb7.heapSize() + cb8.heapSize(); - + assertEquals(queue.heapSize(), expectedSize); - + org.apache.hadoop.hbase.io.hfile.CachedBlock [] blocks = queue.get(); assertEquals(blocks[0].getName(), "cb1"); assertEquals(blocks[1].getName(), "cb2"); @@ -66,9 +66,9 @@ public class TestCachedBlockQueue extends TestCase { assertEquals(blocks[5].getName(), "cb6"); assertEquals(blocks[6].getName(), "cb7"); assertEquals(blocks[7].getName(), "cb8"); - + } - + public void testQueueSmallBlockEdgeCase() throws Exception { CachedBlock cb1 = new CachedBlock(1000, "cb1", 1); @@ -81,9 +81,9 @@ public class TestCachedBlockQueue extends TestCase { CachedBlock cb8 = new CachedBlock(1500, "cb8", 8); CachedBlock cb9 = new CachedBlock(1000, "cb9", 9); CachedBlock cb10 = new CachedBlock(1500, "cb10", 10); - + CachedBlockQueue queue = new CachedBlockQueue(10000,1000); - + queue.add(cb1); queue.add(cb2); queue.add(cb3); @@ -94,21 +94,21 @@ public class TestCachedBlockQueue extends TestCase { queue.add(cb8); queue.add(cb9); queue.add(cb10); - + CachedBlock cb0 = new CachedBlock(10 + CachedBlock.PER_BLOCK_OVERHEAD, "cb0", 0); queue.add(cb0); - + // This is older so we must include it, but it will not end up kicking // anything out because (heapSize - cb8.heapSize + cb0.heapSize < maxSize) // and we must always maintain heapSize >= maxSize once we achieve it. - + // We expect cb0 through cb8 to be in the queue long expectedSize = cb1.heapSize() + cb2.heapSize() + cb3.heapSize() + cb4.heapSize() + cb5.heapSize() + cb6.heapSize() + cb7.heapSize() + cb8.heapSize() + cb0.heapSize(); - + assertEquals(queue.heapSize(), expectedSize); - + org.apache.hadoop.hbase.io.hfile.CachedBlock [] blocks = queue.get(); assertEquals(blocks[0].getName(), "cb0"); assertEquals(blocks[1].getName(), "cb1"); @@ -119,9 +119,9 @@ public class TestCachedBlockQueue extends TestCase { assertEquals(blocks[6].getName(), "cb6"); assertEquals(blocks[7].getName(), "cb7"); assertEquals(blocks[8].getName(), "cb8"); - + } - + private static class CachedBlock extends org.apache.hadoop.hbase.io.hfile.CachedBlock { public CachedBlock(long heapSize, String name, long accessTime) { @@ -130,5 +130,5 @@ public class TestCachedBlockQueue extends TestCase { accessTime,false); } } - + } diff --git a/core/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFile.java b/core/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFile.java index ff94c07..6b32b25 100644 --- a/core/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFile.java +++ b/core/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFile.java @@ -46,7 +46,7 @@ import org.apache.hadoop.io.RawComparator; */ public class TestHFile extends HBaseTestCase { static final Log LOG = LogFactory.getLog(TestHFile.class); - + private static String ROOT_DIR = System.getProperty("test.build.data", "/tmp/TestHFile"); private final int minBlockSize = 512; @@ -55,7 +55,7 @@ public class TestHFile extends HBaseTestCase { /** * Test empty HFile. * Test all features work reasonably when hfile is empty of entries. - * @throws IOException + * @throws IOException */ public void testEmptyHFile() throws IOException { Path f = new Path(ROOT_DIR, getName()); @@ -216,7 +216,7 @@ public class TestHFile extends HBaseTestCase { metablocks("none"); metablocks("gz"); } - + public void testNullMetaBlocks() throws Exception { Path mFile = new Path(ROOT_DIR, "nometa.hfile"); FSDataOutputStream fout = createFSOutput(mFile); @@ -238,8 +238,8 @@ public class TestHFile extends HBaseTestCase { assertTrue(Compression.Algorithm.GZ.ordinal() == 1); assertTrue(Compression.Algorithm.NONE.ordinal() == 2); } - - + + public void testComparator() throws IOException { Path mFile = new Path(ROOT_DIR, "meta.tfile"); FSDataOutputStream fout = createFSOutput(mFile); @@ -249,7 +249,7 @@ public class TestHFile extends HBaseTestCase { public int compare(byte[] b1, int s1, int l1, byte[] b2, int s2, int l2) { return -Bytes.compareTo(b1, s1, l1, b2, s2, l2); - + } @Override public int compare(byte[] o1, byte[] o2) { @@ -261,7 +261,7 @@ public class TestHFile extends HBaseTestCase { writer.append("1".getBytes(), "0".getBytes()); writer.close(); } - + /** * Checks if the HeapSize calculator is within reason */ @@ -270,7 +270,7 @@ public class TestHFile extends HBaseTestCase { Class cl = null; long expected = 0L; long actual = 0L; - + cl = BlockIndex.class; expected = ClassSize.estimateBase(cl, false); BlockIndex bi = new BlockIndex(Bytes.BYTES_RAWCOMPARATOR); @@ -284,5 +284,5 @@ public class TestHFile extends HBaseTestCase { assertEquals(expected, actual); } } - + } diff --git a/core/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFilePerformance.java b/core/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFilePerformance.java index 2dc9b6c..7edfa34 100644 --- a/core/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFilePerformance.java +++ b/core/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFilePerformance.java @@ -239,7 +239,7 @@ public class TestHFilePerformance extends TestCase { fs.getFileStatus(path).getLen(), null, false); reader.loadFileInfo(); switch (method) { - + case 0: case 1: default: diff --git a/core/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileSeek.java b/core/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileSeek.java index 7f63980..de6fd79 100644 --- a/core/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileSeek.java +++ b/core/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileSeek.java @@ -5,9 +5,9 @@ * licenses this file to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance with the License. * You may obtain a copy of the License at - * + * * http://www.apache.org/licenses/LICENSE-2.0 - * + * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the @@ -49,7 +49,7 @@ import org.apache.hadoop.io.BytesWritable; * Remove after tfile is committed and use the tfile version of this class * instead.

    */ -public class TestHFileSeek extends TestCase { +public class TestHFileSeek extends TestCase { private MyOptions options; private Configuration conf; private Path path; @@ -85,7 +85,7 @@ public class TestHFileSeek extends TestCase { new KVGenerator(rng, true, keyLenGen, valLenGen, wordLenGen, options.dictSize); } - + @Override public void tearDown() { try { @@ -95,7 +95,7 @@ public class TestHFileSeek extends TestCase { // Nothing } } - + private static FSDataOutputStream createFSOutput(Path name, FileSystem fs) throws IOException { if (fs.exists(name)) { @@ -149,7 +149,7 @@ public class TestHFileSeek extends TestCase { System.out.printf("time: %s...file size: %.2fMB...disk thrpt: %.2fMB/s\n", timer.toString(), (double) fsize / 1024 / 1024, fsize / duration); } - + public void seekTFile() throws IOException { int miss = 0; long totalBytes = 0; @@ -186,7 +186,7 @@ public class TestHFileSeek extends TestCase { (double) totalBytes / 1024 / (options.seekCount - miss)); } - + public void testSeeks() throws IOException { if (options.doCreate()) { createTFile(); @@ -200,7 +200,7 @@ public class TestHFileSeek extends TestCase { fs.delete(path, true); } } - + private static class IntegerRange { private final int from, to; @@ -233,7 +233,7 @@ public class TestHFileSeek extends TestCase { int dictSize = 1000; int minWordLen = 5; int maxWordLen = 20; - + String rootDir = System.getProperty("test.build.data", "/tmp/TestTFileSeek"); String file = "TestTFileSeek"; @@ -391,7 +391,7 @@ public class TestHFileSeek extends TestCase { if (line.hasOption('o')) { fsOutputBufferSize = Integer.parseInt(line.getOptionValue('o')); } - + if (line.hasOption('n')) { seekCount = Integer.parseInt(line.getOptionValue('n')); } @@ -415,7 +415,7 @@ public class TestHFileSeek extends TestCase { if (line.hasOption('r')) { rootDir = line.getOptionValue('r'); } - + if (line.hasOption('f')) { file = line.getOptionValue('f'); } @@ -478,11 +478,11 @@ public class TestHFileSeek extends TestCase { return (op & OP_READ) != 0; } } - + public static void main(String[] argv) throws IOException { TestHFileSeek testCase = new TestHFileSeek(); MyOptions options = new MyOptions(argv); - + if (options.proceed == false) { return; } diff --git a/core/src/test/java/org/apache/hadoop/hbase/io/hfile/TestLruBlockCache.java b/core/src/test/java/org/apache/hadoop/hbase/io/hfile/TestLruBlockCache.java index 1bca651..bcde04a 100644 --- a/core/src/test/java/org/apache/hadoop/hbase/io/hfile/TestLruBlockCache.java +++ b/core/src/test/java/org/apache/hadoop/hbase/io/hfile/TestLruBlockCache.java @@ -29,27 +29,27 @@ import junit.framework.TestCase; /** * Tests the concurrent LruBlockCache.

    - * + * * Tests will ensure it grows and shrinks in size properly, - * evictions run when they're supposed to and do what they should, + * evictions run when they're supposed to and do what they should, * and that cached blocks are accessible when expected to be. */ public class TestLruBlockCache extends TestCase { - + public void testBackgroundEvictionThread() throws Exception { long maxSize = 100000; long blockSize = calculateBlockSizeDefault(maxSize, 9); // room for 9, will evict - + LruBlockCache cache = new LruBlockCache(maxSize,blockSize); - + Block [] blocks = generateFixedBlocks(10, blockSize, "block"); - + // Add all the blocks for(Block block : blocks) { cache.cacheBlock(block.blockName, block.buf); } - + // Let the eviction run int n = 0; while(cache.getEvictionCount() == 0) { @@ -58,43 +58,43 @@ public class TestLruBlockCache extends TestCase { assertTrue(n++ < 2); } System.out.println("Background Evictions run: " + cache.getEvictionCount()); - + // A single eviction run should have occurred assertEquals(cache.getEvictionCount(), 1); } - + public void testCacheSimple() throws Exception { - + long maxSize = 1000000; long blockSize = calculateBlockSizeDefault(maxSize, 101); - + LruBlockCache cache = new LruBlockCache(maxSize, blockSize); Block [] blocks = generateRandomBlocks(100, blockSize); - + long expectedCacheSize = cache.heapSize(); - + // Confirm empty for(Block block : blocks) { assertTrue(cache.getBlock(block.blockName) == null); } - + // Add blocks for(Block block : blocks) { cache.cacheBlock(block.blockName, block.buf); expectedCacheSize += block.heapSize(); } - + // Verify correctly calculated cache heap size assertEquals(expectedCacheSize, cache.heapSize()); - + // Check if all blocks are properly cached and retrieved for(Block block : blocks) { ByteBuffer buf = cache.getBlock(block.blockName); assertTrue(buf != null); assertEquals(buf.capacity(), block.buf.capacity()); } - + // Re-add same blocks and ensure nothing has changed for(Block block : blocks) { try { @@ -104,52 +104,52 @@ public class TestLruBlockCache extends TestCase { // expected } } - + // Verify correctly calculated cache heap size assertEquals(expectedCacheSize, cache.heapSize()); - + // Check if all blocks are properly cached and retrieved for(Block block : blocks) { ByteBuffer buf = cache.getBlock(block.blockName); assertTrue(buf != null); assertEquals(buf.capacity(), block.buf.capacity()); } - + // Expect no evictions assertEquals(0, cache.getEvictionCount()); } - + public void testCacheEvictionSimple() throws Exception { - + long maxSize = 100000; long blockSize = calculateBlockSizeDefault(maxSize, 10); - + LruBlockCache cache = new LruBlockCache(maxSize,blockSize,false); - + Block [] blocks = generateFixedBlocks(10, blockSize, "block"); - + long expectedCacheSize = cache.heapSize(); - + // Add all the blocks for(Block block : blocks) { cache.cacheBlock(block.blockName, block.buf); expectedCacheSize += block.heapSize(); } - + // A single eviction run should have occurred assertEquals(1, cache.getEvictionCount()); - + // Our expected size overruns acceptable limit - assertTrue(expectedCacheSize > + assertTrue(expectedCacheSize > (maxSize * LruBlockCache.DEFAULT_ACCEPTABLE_FACTOR)); - + // But the cache did not grow beyond max assertTrue(cache.heapSize() < maxSize); - + // And is still below the acceptable limit - assertTrue(cache.heapSize() < + assertTrue(cache.heapSize() < (maxSize * LruBlockCache.DEFAULT_ACCEPTABLE_FACTOR)); - + // All blocks except block 0 and 1 should be in the cache assertTrue(cache.getBlock(blocks[0].blockName) == null); assertTrue(cache.getBlock(blocks[1].blockName) == null); @@ -160,308 +160,308 @@ public class TestLruBlockCache extends TestCase { } public void testCacheEvictionTwoPriorities() throws Exception { - + long maxSize = 100000; long blockSize = calculateBlockSizeDefault(maxSize, 10); - + LruBlockCache cache = new LruBlockCache(maxSize,blockSize,false); - + Block [] singleBlocks = generateFixedBlocks(5, 10000, "single"); Block [] multiBlocks = generateFixedBlocks(5, 10000, "multi"); - + long expectedCacheSize = cache.heapSize(); - + // Add and get the multi blocks for(Block block : multiBlocks) { cache.cacheBlock(block.blockName, block.buf); expectedCacheSize += block.heapSize(); assertEquals(cache.getBlock(block.blockName), block.buf); } - + // Add the single blocks (no get) for(Block block : singleBlocks) { cache.cacheBlock(block.blockName, block.buf); expectedCacheSize += block.heapSize(); } - + // A single eviction run should have occurred assertEquals(cache.getEvictionCount(), 1); - + // We expect two entries evicted assertEquals(cache.getEvictedCount(), 2); - + // Our expected size overruns acceptable limit - assertTrue(expectedCacheSize > + assertTrue(expectedCacheSize > (maxSize * LruBlockCache.DEFAULT_ACCEPTABLE_FACTOR)); - + // But the cache did not grow beyond max assertTrue(cache.heapSize() <= maxSize); - + // And is now below the acceptable limit - assertTrue(cache.heapSize() <= + assertTrue(cache.heapSize() <= (maxSize * LruBlockCache.DEFAULT_ACCEPTABLE_FACTOR)); - + // We expect fairness across the two priorities. // This test makes multi go barely over its limit, in-memory // empty, and the rest in single. Two single evictions and // one multi eviction expected. assertTrue(cache.getBlock(singleBlocks[0].blockName) == null); assertTrue(cache.getBlock(multiBlocks[0].blockName) == null); - + // And all others to be cached for(int i=1;i<4;i++) { - assertEquals(cache.getBlock(singleBlocks[i].blockName), + assertEquals(cache.getBlock(singleBlocks[i].blockName), singleBlocks[i].buf); - assertEquals(cache.getBlock(multiBlocks[i].blockName), + assertEquals(cache.getBlock(multiBlocks[i].blockName), multiBlocks[i].buf); } } public void testCacheEvictionThreePriorities() throws Exception { - + long maxSize = 100000; long blockSize = calculateBlockSize(maxSize, 10); - + LruBlockCache cache = new LruBlockCache(maxSize, blockSize, false, (int)Math.ceil(1.2*maxSize/blockSize), - LruBlockCache.DEFAULT_LOAD_FACTOR, + LruBlockCache.DEFAULT_LOAD_FACTOR, LruBlockCache.DEFAULT_CONCURRENCY_LEVEL, 0.98f, // min 0.99f, // acceptable 0.33f, // single 0.33f, // multi 0.34f);// memory - - + + Block [] singleBlocks = generateFixedBlocks(5, blockSize, "single"); Block [] multiBlocks = generateFixedBlocks(5, blockSize, "multi"); Block [] memoryBlocks = generateFixedBlocks(5, blockSize, "memory"); - + long expectedCacheSize = cache.heapSize(); - + // Add 3 blocks from each priority for(int i=0;i<3;i++) { - + // Just add single blocks cache.cacheBlock(singleBlocks[i].blockName, singleBlocks[i].buf); expectedCacheSize += singleBlocks[i].heapSize(); - + // Add and get multi blocks cache.cacheBlock(multiBlocks[i].blockName, multiBlocks[i].buf); expectedCacheSize += multiBlocks[i].heapSize(); cache.getBlock(multiBlocks[i].blockName); - + // Add memory blocks as such cache.cacheBlock(memoryBlocks[i].blockName, memoryBlocks[i].buf, true); expectedCacheSize += memoryBlocks[i].heapSize(); - + } - + // Do not expect any evictions yet assertEquals(0, cache.getEvictionCount()); - + // Verify cache size assertEquals(expectedCacheSize, cache.heapSize()); - + // Insert a single block, oldest single should be evicted cache.cacheBlock(singleBlocks[3].blockName, singleBlocks[3].buf); - + // Single eviction, one thing evicted assertEquals(1, cache.getEvictionCount()); assertEquals(1, cache.getEvictedCount()); - + // Verify oldest single block is the one evicted assertEquals(null, cache.getBlock(singleBlocks[0].blockName)); - + // Change the oldest remaining single block to a multi cache.getBlock(singleBlocks[1].blockName); - + // Insert another single block cache.cacheBlock(singleBlocks[4].blockName, singleBlocks[4].buf); - + // Two evictions, two evicted. assertEquals(2, cache.getEvictionCount()); assertEquals(2, cache.getEvictedCount()); - + // Oldest multi block should be evicted now assertEquals(null, cache.getBlock(multiBlocks[0].blockName)); - + // Insert another memory block cache.cacheBlock(memoryBlocks[3].blockName, memoryBlocks[3].buf, true); - + // Three evictions, three evicted. assertEquals(3, cache.getEvictionCount()); assertEquals(3, cache.getEvictedCount()); - + // Oldest memory block should be evicted now assertEquals(null, cache.getBlock(memoryBlocks[0].blockName)); - + // Add a block that is twice as big (should force two evictions) Block [] bigBlocks = generateFixedBlocks(3, blockSize*3, "big"); cache.cacheBlock(bigBlocks[0].blockName, bigBlocks[0].buf); - + // Four evictions, six evicted (inserted block 3X size, expect +3 evicted) assertEquals(4, cache.getEvictionCount()); assertEquals(6, cache.getEvictedCount()); - + // Expect three remaining singles to be evicted assertEquals(null, cache.getBlock(singleBlocks[2].blockName)); assertEquals(null, cache.getBlock(singleBlocks[3].blockName)); assertEquals(null, cache.getBlock(singleBlocks[4].blockName)); - + // Make the big block a multi block cache.getBlock(bigBlocks[0].blockName); - + // Cache another single big block cache.cacheBlock(bigBlocks[1].blockName, bigBlocks[1].buf); - + // Five evictions, nine evicted (3 new) assertEquals(5, cache.getEvictionCount()); assertEquals(9, cache.getEvictedCount()); - + // Expect three remaining multis to be evicted assertEquals(null, cache.getBlock(singleBlocks[1].blockName)); assertEquals(null, cache.getBlock(multiBlocks[1].blockName)); assertEquals(null, cache.getBlock(multiBlocks[2].blockName)); - + // Cache a big memory block cache.cacheBlock(bigBlocks[2].blockName, bigBlocks[2].buf, true); - + // Six evictions, twelve evicted (3 new) assertEquals(6, cache.getEvictionCount()); assertEquals(12, cache.getEvictedCount()); - + // Expect three remaining in-memory to be evicted assertEquals(null, cache.getBlock(memoryBlocks[1].blockName)); assertEquals(null, cache.getBlock(memoryBlocks[2].blockName)); assertEquals(null, cache.getBlock(memoryBlocks[3].blockName)); - - + + } - + // test scan resistance public void testScanResistance() throws Exception { long maxSize = 100000; long blockSize = calculateBlockSize(maxSize, 10); - + LruBlockCache cache = new LruBlockCache(maxSize, blockSize, false, (int)Math.ceil(1.2*maxSize/blockSize), - LruBlockCache.DEFAULT_LOAD_FACTOR, + LruBlockCache.DEFAULT_LOAD_FACTOR, LruBlockCache.DEFAULT_CONCURRENCY_LEVEL, 0.66f, // min 0.99f, // acceptable 0.33f, // single 0.33f, // multi - 0.34f);// memory - + 0.34f);// memory + Block [] singleBlocks = generateFixedBlocks(20, blockSize, "single"); Block [] multiBlocks = generateFixedBlocks(5, blockSize, "multi"); - + // Add 5 multi blocks for(Block block : multiBlocks) { cache.cacheBlock(block.blockName, block.buf); cache.getBlock(block.blockName); } - + // Add 5 single blocks for(int i=0;i<5;i++) { cache.cacheBlock(singleBlocks[i].blockName, singleBlocks[i].buf); } - + // An eviction ran assertEquals(1, cache.getEvictionCount()); - + // To drop down to 2/3 capacity, we'll need to evict 4 blocks assertEquals(4, cache.getEvictedCount()); - + // Should have been taken off equally from single and multi assertEquals(null, cache.getBlock(singleBlocks[0].blockName)); assertEquals(null, cache.getBlock(singleBlocks[1].blockName)); assertEquals(null, cache.getBlock(multiBlocks[0].blockName)); assertEquals(null, cache.getBlock(multiBlocks[1].blockName)); - + // Let's keep "scanning" by adding single blocks. From here on we only // expect evictions from the single bucket. - + // Every time we reach 10 total blocks (every 4 inserts) we get 4 single // blocks evicted. Inserting 13 blocks should yield 3 more evictions and // 12 more evicted. - + for(int i=5;i<18;i++) { cache.cacheBlock(singleBlocks[i].blockName, singleBlocks[i].buf); } - + // 4 total evictions, 16 total evicted assertEquals(4, cache.getEvictionCount()); assertEquals(16, cache.getEvictedCount()); - + // Should now have 7 total blocks assertEquals(7, cache.size()); } - + // test setMaxSize public void testResizeBlockCache() throws Exception { - + long maxSize = 300000; long blockSize = calculateBlockSize(maxSize, 31); - + LruBlockCache cache = new LruBlockCache(maxSize, blockSize, false, (int)Math.ceil(1.2*maxSize/blockSize), - LruBlockCache.DEFAULT_LOAD_FACTOR, + LruBlockCache.DEFAULT_LOAD_FACTOR, LruBlockCache.DEFAULT_CONCURRENCY_LEVEL, 0.98f, // min 0.99f, // acceptable 0.33f, // single 0.33f, // multi 0.34f);// memory - + Block [] singleBlocks = generateFixedBlocks(10, blockSize, "single"); Block [] multiBlocks = generateFixedBlocks(10, blockSize, "multi"); Block [] memoryBlocks = generateFixedBlocks(10, blockSize, "memory"); - + // Add all blocks from all priorities for(int i=0;i<10;i++) { - + // Just add single blocks cache.cacheBlock(singleBlocks[i].blockName, singleBlocks[i].buf); - + // Add and get multi blocks cache.cacheBlock(multiBlocks[i].blockName, multiBlocks[i].buf); cache.getBlock(multiBlocks[i].blockName); - + // Add memory blocks as such cache.cacheBlock(memoryBlocks[i].blockName, memoryBlocks[i].buf, true); } - + // Do not expect any evictions yet assertEquals(0, cache.getEvictionCount()); - + // Resize to half capacity plus an extra block (otherwise we evict an extra) cache.setMaxSize((long)(maxSize * 0.5f)); - + // Should have run a single eviction assertEquals(1, cache.getEvictionCount()); // And we expect 1/2 of the blocks to be evicted assertEquals(15, cache.getEvictedCount()); - + // And the oldest 5 blocks from each category should be gone for(int i=0;i<5;i++) { assertEquals(null, cache.getBlock(singleBlocks[i].blockName)); assertEquals(null, cache.getBlock(multiBlocks[i].blockName)); assertEquals(null, cache.getBlock(memoryBlocks[i].blockName)); } - + // And the newest 5 blocks should still be accessible for(int i=5;i<10;i++) { assertEquals(singleBlocks[i].buf, cache.getBlock(singleBlocks[i].blockName)); assertEquals(multiBlocks[i].buf, cache.getBlock(multiBlocks[i].blockName)); assertEquals(memoryBlocks[i].buf, cache.getBlock(memoryBlocks[i].blockName)); - } + } } - + private Block [] generateFixedBlocks(int numBlocks, int size, String pfx) { Block [] blocks = new Block[numBlocks]; for(int i=0;i() { - // Starts at a particular row + // Starts at a particular row private int counter = startrow; private ImmutableBytesWritable key; private ImmutableBytesWritable value; private final Random random = new Random(System.currentTimeMillis()); - + public void close() throws IOException { // Nothing to do. } @@ -124,7 +124,7 @@ public class TestHFileOutputFormat extends HBaseTestCase { public void initialize(InputSplit arg0, TaskAttemptContext arg1) throws IOException, InterruptedException { // Nothing to do. - + } public boolean nextKeyValue() throws IOException, InterruptedException { diff --git a/core/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableInputFormatScan.java b/core/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableInputFormatScan.java index a816278..d61f6ba 100644 --- a/core/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableInputFormatScan.java +++ b/core/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableInputFormatScan.java @@ -51,10 +51,10 @@ import static org.junit.Assert.assertTrue; /** * Tests various scan start and stop row scenarios. This is set in a scan and * tested in a MapReduce job to see if that is handed over and done properly - * too. + * too. */ public class TestTableInputFormatScan { - + static final Log LOG = LogFactory.getLog(TestTableInputFormatScan.class); static final HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility(); @@ -62,9 +62,9 @@ public class TestTableInputFormatScan { static final byte[] INPUT_FAMILY = Bytes.toBytes("contents"); static final String KEY_STARTROW = "startRow"; static final String KEY_LASTROW = "stpRow"; - + private static HTable table = null; - + @BeforeClass public static void setUpBeforeClass() throws Exception { // switch TIF to log at DEBUG level @@ -90,7 +90,7 @@ public class TestTableInputFormatScan { public void setUp() throws Exception { // nothing } - + /** * @throws java.lang.Exception */ @@ -105,229 +105,229 @@ public class TestTableInputFormatScan { */ public static class ScanMapper extends TableMapper { - + /** * Pass the key and value to reduce. - * - * @param key The key, here "aaa", "aab" etc. + * + * @param key The key, here "aaa", "aab" etc. * @param value The value is the same as the key. * @param context The task context. * @throws IOException When reading the rows fails. */ @Override public void map(ImmutableBytesWritable key, Result value, - Context context) + Context context) throws IOException, InterruptedException { if (value.size() != 1) { throw new IOException("There should only be one input column"); } - Map>> + Map>> cf = value.getMap(); if(!cf.containsKey(INPUT_FAMILY)) { - throw new IOException("Wrong input columns. Missing: '" + + throw new IOException("Wrong input columns. Missing: '" + Bytes.toString(INPUT_FAMILY) + "'."); } String val = Bytes.toStringBinary(value.getValue(INPUT_FAMILY, null)); - LOG.info("map: key -> " + Bytes.toStringBinary(key.get()) + + LOG.info("map: key -> " + Bytes.toStringBinary(key.get()) + ", value -> " + val); context.write(key, key); } - + } - + /** * Checks the last and first key seen against the scanner boundaries. */ - public static class ScanReducer - extends Reducer { - + private String first = null; private String last = null; - - protected void reduce(ImmutableBytesWritable key, - Iterable values, Context context) + + protected void reduce(ImmutableBytesWritable key, + Iterable values, Context context) throws IOException ,InterruptedException { int count = 0; for (ImmutableBytesWritable value : values) { String val = Bytes.toStringBinary(value.get()); - LOG.info("reduce: key[" + count + "] -> " + + LOG.info("reduce: key[" + count + "] -> " + Bytes.toStringBinary(key.get()) + ", value -> " + val); if (first == null) first = val; last = val; count++; } } - - protected void cleanup(Context context) + + protected void cleanup(Context context) throws IOException, InterruptedException { Configuration c = context.getConfiguration(); - String startRow = c.get(KEY_STARTROW); + String startRow = c.get(KEY_STARTROW); String lastRow = c.get(KEY_LASTROW); LOG.info("cleanup: first -> \"" + first + "\", start row -> \"" + startRow + "\""); LOG.info("cleanup: last -> \"" + last + "\", last row -> \"" + lastRow + "\""); - if (startRow != null && startRow.length() > 0) { + if (startRow != null && startRow.length() > 0) { assertEquals(startRow, first); } - if (lastRow != null && lastRow.length() > 0) { + if (lastRow != null && lastRow.length() > 0) { assertEquals(lastRow, last); } } - + } - + /** * Tests a MR scan using specific start and stop rows. - * + * * @throws IOException - * @throws ClassNotFoundException - * @throws InterruptedException + * @throws ClassNotFoundException + * @throws InterruptedException */ @Test - public void testScanEmptyToEmpty() + public void testScanEmptyToEmpty() throws IOException, InterruptedException, ClassNotFoundException { testScan(null, null, null); } /** * Tests a MR scan using specific start and stop rows. - * + * * @throws IOException - * @throws ClassNotFoundException - * @throws InterruptedException + * @throws ClassNotFoundException + * @throws InterruptedException */ @Test - public void testScanEmptyToAPP() + public void testScanEmptyToAPP() throws IOException, InterruptedException, ClassNotFoundException { testScan(null, "app", "apo"); } - + /** * Tests a MR scan using specific start and stop rows. - * + * * @throws IOException - * @throws ClassNotFoundException - * @throws InterruptedException + * @throws ClassNotFoundException + * @throws InterruptedException */ @Test - public void testScanEmptyToBBA() + public void testScanEmptyToBBA() throws IOException, InterruptedException, ClassNotFoundException { testScan(null, "bba", "baz"); } - + /** * Tests a MR scan using specific start and stop rows. - * + * * @throws IOException - * @throws ClassNotFoundException - * @throws InterruptedException + * @throws ClassNotFoundException + * @throws InterruptedException */ @Test - public void testScanEmptyToBBB() + public void testScanEmptyToBBB() throws IOException, InterruptedException, ClassNotFoundException { testScan(null, "bbb", "bba"); } - + /** * Tests a MR scan using specific start and stop rows. - * + * * @throws IOException - * @throws ClassNotFoundException - * @throws InterruptedException + * @throws ClassNotFoundException + * @throws InterruptedException */ @Test - public void testScanEmptyToOPP() + public void testScanEmptyToOPP() throws IOException, InterruptedException, ClassNotFoundException { testScan(null, "opp", "opo"); } - + /** * Tests a MR scan using specific start and stop rows. - * + * * @throws IOException - * @throws ClassNotFoundException - * @throws InterruptedException + * @throws ClassNotFoundException + * @throws InterruptedException */ @Test - public void testScanOBBToOPP() + public void testScanOBBToOPP() throws IOException, InterruptedException, ClassNotFoundException { testScan("obb", "opp", "opo"); } /** * Tests a MR scan using specific start and stop rows. - * + * * @throws IOException - * @throws ClassNotFoundException - * @throws InterruptedException + * @throws ClassNotFoundException + * @throws InterruptedException */ @Test - public void testScanOBBToQPP() + public void testScanOBBToQPP() throws IOException, InterruptedException, ClassNotFoundException { testScan("obb", "qpp", "qpo"); } - + /** * Tests a MR scan using specific start and stop rows. - * + * * @throws IOException - * @throws ClassNotFoundException - * @throws InterruptedException + * @throws ClassNotFoundException + * @throws InterruptedException */ @Test - public void testScanOPPToEmpty() + public void testScanOPPToEmpty() throws IOException, InterruptedException, ClassNotFoundException { testScan("opp", null, "zzz"); } - + /** * Tests a MR scan using specific start and stop rows. - * + * * @throws IOException - * @throws ClassNotFoundException - * @throws InterruptedException + * @throws ClassNotFoundException + * @throws InterruptedException */ @Test - public void testScanYYXToEmpty() + public void testScanYYXToEmpty() throws IOException, InterruptedException, ClassNotFoundException { testScan("yyx", null, "zzz"); } /** * Tests a MR scan using specific start and stop rows. - * + * * @throws IOException - * @throws ClassNotFoundException - * @throws InterruptedException + * @throws ClassNotFoundException + * @throws InterruptedException */ @Test - public void testScanYYYToEmpty() + public void testScanYYYToEmpty() throws IOException, InterruptedException, ClassNotFoundException { testScan("yyy", null, "zzz"); } /** * Tests a MR scan using specific start and stop rows. - * + * * @throws IOException - * @throws ClassNotFoundException - * @throws InterruptedException + * @throws ClassNotFoundException + * @throws InterruptedException */ @Test - public void testScanYZYToEmpty() + public void testScanYZYToEmpty() throws IOException, InterruptedException, ClassNotFoundException { testScan("yzy", null, "zzz"); } /** * Tests a MR scan using specific start and stop rows. - * + * * @throws IOException - * @throws ClassNotFoundException - * @throws InterruptedException + * @throws ClassNotFoundException + * @throws InterruptedException */ @SuppressWarnings("deprecation") - private void testScan(String start, String stop, String last) + private void testScan(String start, String stop, String last) throws IOException, InterruptedException, ClassNotFoundException { String jobName = "Scan" + (start != null ? start.toUpperCase() : "Empty") + "To" + (stop != null ? stop.toUpperCase() : "Empty"); @@ -346,11 +346,11 @@ public class TestTableInputFormatScan { LOG.info("scan before: " + scan); Job job = new Job(c, jobName); TableMapReduceUtil.initTableMapperJob( - Bytes.toString(TABLE_NAME), scan, ScanMapper.class, + Bytes.toString(TABLE_NAME), scan, ScanMapper.class, ImmutableBytesWritable.class, ImmutableBytesWritable.class, job); job.setReducerClass(ScanReducer.class); job.setNumReduceTasks(1); // one to get final "first" and "last" key - FileOutputFormat.setOutputPath(job, new Path(job.getJobName())); + FileOutputFormat.setOutputPath(job, new Path(job.getJobName())); LOG.info("Started " + job.getJobName()); job.waitForCompletion(true); assertTrue(job.isComplete()); diff --git a/core/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableMapReduce.java b/core/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableMapReduce.java index 4a7d62c..d8b11db 100644 --- a/core/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableMapReduce.java +++ b/core/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableMapReduce.java @@ -50,14 +50,14 @@ import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat; * a particular cell, and write it back to the table. */ public class TestTableMapReduce extends MultiRegionTable { - + private static final Log LOG = LogFactory.getLog(TestTableMapReduce.class); static final String MULTI_REGION_TABLE_NAME = "mrtest"; static final byte[] INPUT_FAMILY = Bytes.toBytes("contents"); static final byte[] OUTPUT_FAMILY = Bytes.toBytes("text"); - - /** constructor */ + + /** constructor */ public TestTableMapReduce() { super(Bytes.toString(INPUT_FAMILY)); desc = new HTableDescriptor(MULTI_REGION_TABLE_NAME); @@ -70,30 +70,30 @@ public class TestTableMapReduce extends MultiRegionTable { */ public static class ProcessContentsMapper extends TableMapper { - + /** * Pass the key, and reversed value to reduce - * - * @param key - * @param value + * + * @param key + * @param value * @param context - * @throws IOException + * @throws IOException */ public void map(ImmutableBytesWritable key, Result value, - Context context) + Context context) throws IOException, InterruptedException { if (value.size() != 1) { throw new IOException("There should only be one input column"); } - Map>> + Map>> cf = value.getMap(); if(!cf.containsKey(INPUT_FAMILY)) { - throw new IOException("Wrong input columns. Missing: '" + + throw new IOException("Wrong input columns. Missing: '" + Bytes.toString(INPUT_FAMILY) + "'."); } // Get the original value and reverse it - String originalValue = new String(value.getValue(INPUT_FAMILY, null), + String originalValue = new String(value.getValue(INPUT_FAMILY, null), HConstants.UTF8_ENCODING); StringBuilder newValue = new StringBuilder(originalValue); newValue.reverse(); @@ -103,19 +103,19 @@ public class TestTableMapReduce extends MultiRegionTable { context.write(key, outval); } } - + /** * Test a map/reduce against a multi-region table * @throws IOException - * @throws ClassNotFoundException - * @throws InterruptedException + * @throws ClassNotFoundException + * @throws InterruptedException */ - public void testMultiRegionTable() + public void testMultiRegionTable() throws IOException, InterruptedException, ClassNotFoundException { runTestOnTable(new HTable(conf, MULTI_REGION_TABLE_NAME)); } - private void runTestOnTable(HTable table) + private void runTestOnTable(HTable table) throws IOException, InterruptedException, ClassNotFoundException { MiniMRCluster mrCluster = new MiniMRCluster(2, fs.getUri().toString(), 1); @@ -128,12 +128,12 @@ public class TestTableMapReduce extends MultiRegionTable { scan.addFamily(INPUT_FAMILY); TableMapReduceUtil.initTableMapperJob( Bytes.toString(table.getTableName()), scan, - ProcessContentsMapper.class, ImmutableBytesWritable.class, + ProcessContentsMapper.class, ImmutableBytesWritable.class, Put.class, job); TableMapReduceUtil.initTableReducerJob( Bytes.toString(table.getTableName()), IdentityTableReducer.class, job); - FileOutputFormat.setOutputPath(job, new Path("test")); + FileOutputFormat.setOutputPath(job, new Path("test")); LOG.info("Started " + Bytes.toString(table.getTableName())); job.waitForCompletion(true); LOG.info("After map/reduce completion"); @@ -177,7 +177,7 @@ public class TestTableMapReduce extends MultiRegionTable { /** * Looks at every value of the mapreduce output and verifies that indeed * the values have been reversed. - * + * * @param table Table to scan. * @throws IOException * @throws NullPointerException if we failed to find a cell value @@ -210,14 +210,14 @@ public class TestTableMapReduce extends MultiRegionTable { break; } } - + String first = ""; if (firstValue == null) { throw new NullPointerException(Bytes.toString(r.getRow()) + ": first value is null"); } first = new String(firstValue, HConstants.UTF8_ENCODING); - + String second = ""; if (secondValue == null) { throw new NullPointerException(Bytes.toString(r.getRow()) + diff --git a/core/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTimeRangeMapRed.java b/core/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTimeRangeMapRed.java index 667d43b..516d567 100644 --- a/core/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTimeRangeMapRed.java +++ b/core/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTimeRangeMapRed.java @@ -52,11 +52,11 @@ import org.apache.hadoop.mapreduce.Job; import org.apache.hadoop.mapreduce.lib.output.NullOutputFormat; public class TestTimeRangeMapRed extends HBaseClusterTestCase { - + private final static Log log = LogFactory.getLog(TestTimeRangeMapRed.class); - + private static final byte [] KEY = Bytes.toBytes("row1"); - private static final NavigableMap TIMESTAMP = + private static final NavigableMap TIMESTAMP = new TreeMap(); static { TIMESTAMP.put((long)1245620000, false); @@ -69,22 +69,22 @@ public class TestTimeRangeMapRed extends HBaseClusterTestCase { } static final long MINSTAMP = 1245620005; static final long MAXSTAMP = 1245620100 + 1; // maxStamp itself is excluded. so increment it. - + static final byte[] TABLE_NAME = Bytes.toBytes("table123"); static final byte[] FAMILY_NAME = Bytes.toBytes("text"); static final byte[] COLUMN_NAME = Bytes.toBytes("input"); - + protected HTableDescriptor desc; protected HTable table; - + public TestTimeRangeMapRed() { super(); System.setProperty("hadoop.log.dir", conf.get("hadoop.log.dir")); conf.set("mapred.output.dir", conf.get("hadoop.tmp.dir")); this.setOpenMetaTable(true); } - - @Override + + @Override public void setUp() throws Exception { super.setUp(); desc = new HTableDescriptor(TABLE_NAME); @@ -95,14 +95,14 @@ public class TestTimeRangeMapRed extends HBaseClusterTestCase { admin.createTable(desc); table = new HTable(conf, desc.getName()); } - - private static class ProcessTimeRangeMapper + + private static class ProcessTimeRangeMapper extends TableMapper implements Configurable { - + private Configuration conf = null; private HTable table = null; - + @Override public void map(ImmutableBytesWritable key, Result result, Context context) @@ -111,7 +111,7 @@ public class TestTimeRangeMapRed extends HBaseClusterTestCase { for (KeyValue kv : result.sorted()) { tsList.add(kv.getTimestamp()); } - + for (Long ts : tsList) { Put put = new Put(key.get()); put.add(FAMILY_NAME, COLUMN_NAME, ts, Bytes.toBytes(true)); @@ -134,10 +134,10 @@ public class TestTimeRangeMapRed extends HBaseClusterTestCase { e.printStackTrace(); } } - + } - - public void testTimeRangeMapRed() + + public void testTimeRangeMapRed() throws IOException, InterruptedException, ClassNotFoundException { prepareTest(); runTestOnTable(); @@ -149,11 +149,11 @@ public class TestTimeRangeMapRed extends HBaseClusterTestCase { Put put = new Put(KEY); put.add(FAMILY_NAME, COLUMN_NAME, entry.getKey(), Bytes.toBytes(false)); table.put(put); - } + } table.flushCommits(); } - - private void runTestOnTable() + + private void runTestOnTable() throws IOException, InterruptedException, ClassNotFoundException { MiniMRCluster mrCluster = new MiniMRCluster(2, fs.getUri().toString(), 1); Job job = null; @@ -165,7 +165,7 @@ public class TestTimeRangeMapRed extends HBaseClusterTestCase { scan.addColumn(FAMILY_NAME, COLUMN_NAME); scan.setTimeRange(MINSTAMP, MAXSTAMP); scan.setMaxVersions(); - TableMapReduceUtil.initTableMapperJob(Bytes.toString(TABLE_NAME), + TableMapReduceUtil.initTableMapperJob(Bytes.toString(TABLE_NAME), scan, ProcessTimeRangeMapper.class, Text.class, Text.class, job); job.waitForCompletion(true); } catch (IOException e) { @@ -177,7 +177,7 @@ public class TestTimeRangeMapRed extends HBaseClusterTestCase { FileUtil.fullyDelete( new File(job.getConfiguration().get("hadoop.tmp.dir"))); } - } + } } private void verify() throws IOException { diff --git a/core/src/test/java/org/apache/hadoop/hbase/master/OOMEHMaster.java b/core/src/test/java/org/apache/hadoop/hbase/master/OOMEHMaster.java index 5d16bfd..607a2f0 100644 --- a/core/src/test/java/org/apache/hadoop/hbase/master/OOMEHMaster.java +++ b/core/src/test/java/org/apache/hadoop/hbase/master/OOMEHMaster.java @@ -36,13 +36,13 @@ import org.apache.hadoop.hbase.HServerInfo; */ public class OOMEHMaster extends HMaster { private List retainer = new ArrayList(); - + public OOMEHMaster(HBaseConfiguration conf) throws IOException { super(conf); } - + @Override - public HMsg[] regionServerReport(HServerInfo serverInfo, HMsg[] msgs, + public HMsg[] regionServerReport(HServerInfo serverInfo, HMsg[] msgs, HRegionInfo[] mostLoadedRegions) throws IOException { // Retain 1M. diff --git a/core/src/test/java/org/apache/hadoop/hbase/master/TestMasterTransistions.java b/core/src/test/java/org/apache/hadoop/hbase/master/TestMasterTransistions.java index 99241dc..f5cebb0 100644 --- a/core/src/test/java/org/apache/hadoop/hbase/master/TestMasterTransistions.java +++ b/core/src/test/java/org/apache/hadoop/hbase/master/TestMasterTransistions.java @@ -108,7 +108,7 @@ public class TestMasterTransistions { this.copyOfOnlineRegions = this.victim.getCopyOfOnlineRegionsSortedBySize().values(); } - + @Override public boolean process(HServerInfo serverInfo, HMsg incomingMsg) { if (!victim.getServerInfo().equals(serverInfo) || @@ -161,7 +161,7 @@ public class TestMasterTransistions { * we kill it. We then wait on all regions to combe back on line. If bug * is fixed, this should happen soon as the processing of the killed server is * done. - * @see HBASE-2482 + * @see HBASE-2482 */ @Test public void testKillRSWithOpeningRegion2482() throws Exception { MiniHBaseCluster cluster = TEST_UTIL.getHBaseCluster(); @@ -209,7 +209,7 @@ public class TestMasterTransistions { * @param cluster * @param hrs * @return Count of regions closed. - * @throws IOException + * @throws IOException */ private int closeAlltNonCatalogRegions(final MiniHBaseCluster cluster, final MiniHBaseCluster.MiniHBaseClusterRegionServer hrs) @@ -247,7 +247,7 @@ public class TestMasterTransistions { private int closeCount = 0; static final int SERVER_DURATION = 3 * 1000; static final int CLOSE_DURATION = 1 * 1000; - + HBase2428Listener(final MiniHBaseCluster c, final HServerAddress metaAddress, final HRegionInfo closingHRI, final int otherServerIndex) { this.cluster = c; @@ -332,7 +332,7 @@ public class TestMasterTransistions { /** * In 2428, the meta region has just been set offline and then a close comes * in. - * @see HBASE-2428 + * @see HBASE-2428 */ @Test public void testRegionCloseWhenNoMetaHBase2428() throws Exception { MiniHBaseCluster cluster = TEST_UTIL.getHBaseCluster(); @@ -351,7 +351,7 @@ public class TestMasterTransistions { // Get a region out on the otherServer. final HRegionInfo hri = otherServer.getOnlineRegions().iterator().next().getRegionInfo(); - + // Add our ReionServerOperationsListener HBase2428Listener listener = new HBase2428Listener(cluster, metaHRS.getHServerInfo().getServerAddress(), hri, otherServerIndex); @@ -414,10 +414,10 @@ public class TestMasterTransistions { // If I get to here and all rows have a Server, then all have been assigned. if (rows == countOfRegions) break; LOG.info("Found=" + rows); - Threads.sleep(1000); + Threads.sleep(1000); } } - + /* * @return Count of regions in meta table. * @throws IOException diff --git a/core/src/test/java/org/apache/hadoop/hbase/master/TestMinimumServerCount.java b/core/src/test/java/org/apache/hadoop/hbase/master/TestMinimumServerCount.java index 0490951..d6f2c02 100644 --- a/core/src/test/java/org/apache/hadoop/hbase/master/TestMinimumServerCount.java +++ b/core/src/test/java/org/apache/hadoop/hbase/master/TestMinimumServerCount.java @@ -76,7 +76,7 @@ public class TestMinimumServerCount extends HBaseClusterTestCase { } Thread.sleep(10 * 1000); assertFalse(admin.isTableAvailable(TABLE_NAME)); - + // now start another region server cluster.startRegionServer(); diff --git a/core/src/test/java/org/apache/hadoop/hbase/master/TestRegionManager.java b/core/src/test/java/org/apache/hadoop/hbase/master/TestRegionManager.java index b3c3c41..c7c8b06 100644 --- a/core/src/test/java/org/apache/hadoop/hbase/master/TestRegionManager.java +++ b/core/src/test/java/org/apache/hadoop/hbase/master/TestRegionManager.java @@ -44,11 +44,11 @@ public class TestRegionManager extends HBaseClusterTestCase { // 1st .META. region will be something like .META.,,1253625700761 HRegionInfo metaRegionInfo0 = new HRegionInfo(metaTableDesc, Bytes.toBytes(""), regionInfo0.getRegionName()); MetaRegion meta0 = new MetaRegion(address, metaRegionInfo0); - + byte[] startKey1 = Bytes.toBytes("j"); byte[] endKey1 = Bytes.toBytes("m"); HRegionInfo regionInfo1 = new HRegionInfo(tableDesc, startKey1, endKey1); - // 2nd .META. region will be something like .META.,_MY_TABLE_,f,1253625700761,1253625700761 + // 2nd .META. region will be something like .META.,_MY_TABLE_,f,1253625700761,1253625700761 HRegionInfo metaRegionInfo1 = new HRegionInfo(metaTableDesc, regionInfo0.getRegionName(), regionInfo1.getRegionName()); MetaRegion meta1 = new MetaRegion(address, metaRegionInfo1); @@ -60,13 +60,13 @@ public class TestRegionManager extends HBaseClusterTestCase { byte[] startKeyX = Bytes.toBytes("h"); byte[] endKeyX = Bytes.toBytes("j"); HRegionInfo regionInfoX = new HRegionInfo(tableDesc, startKeyX, endKeyX); - - + + master.getRegionManager().offlineMetaRegion(startKey0); master.getRegionManager().putMetaRegionOnline(meta0); master.getRegionManager().putMetaRegionOnline(meta1); master.getRegionManager().putMetaRegionOnline(meta2); - + // for (byte[] b : master.regionManager.getOnlineMetaRegions().keySet()) { // System.out.println("FROM TEST KEY " + b +" " +new String(b)); // } diff --git a/core/src/test/java/org/apache/hadoop/hbase/master/TestRegionServerOperationQueue.java b/core/src/test/java/org/apache/hadoop/hbase/master/TestRegionServerOperationQueue.java index 4dfef22..dea4edd 100644 --- a/core/src/test/java/org/apache/hadoop/hbase/master/TestRegionServerOperationQueue.java +++ b/core/src/test/java/org/apache/hadoop/hbase/master/TestRegionServerOperationQueue.java @@ -45,7 +45,7 @@ public class TestRegionServerOperationQueue { @After public void tearDown() throws Exception { } - + @Test public void testNothing() throws Exception { } diff --git a/core/src/test/java/org/apache/hadoop/hbase/metrics/TestMetricsMBeanBase.java b/core/src/test/java/org/apache/hadoop/hbase/metrics/TestMetricsMBeanBase.java index 6c6ecdc..cd939cf 100644 --- a/core/src/test/java/org/apache/hadoop/hbase/metrics/TestMetricsMBeanBase.java +++ b/core/src/test/java/org/apache/hadoop/hbase/metrics/TestMetricsMBeanBase.java @@ -43,14 +43,14 @@ public class TestMetricsMBeanBase extends TestCase { super(registry, "TestStatistics"); } } - + private MetricsRegistry registry; private MetricsRecord metricsRecord; private TestStatistics stats; private MetricsRate metricsRate; private MetricsIntValue intValue; private MetricsTimeVaryingRate varyRate; - + public void setUp() { this.registry = new MetricsRegistry(); this.metricsRate = new MetricsRate("metricsRate", registry, "test"); @@ -61,13 +61,13 @@ public class TestMetricsMBeanBase extends TestCase { this.metricsRecord = MetricsUtil.createRecord(context, "test"); this.metricsRecord.setTag("TestStatistics", "test"); //context.registerUpdater(this); - + } - + public void tearDown() { - + } - + public void testGetAttribute() throws Exception { this.metricsRate.inc(2); this.metricsRate.pushMetric(this.metricsRecord); @@ -76,8 +76,8 @@ public class TestMetricsMBeanBase extends TestCase { this.varyRate.inc(10); this.varyRate.inc(50); this.varyRate.pushMetric(this.metricsRecord); - - + + assertEquals( 2.0, (Float)this.stats.getAttribute("metricsRate"), 0.001 ); assertEquals( 5, this.stats.getAttribute("intValue") ); assertEquals( 10L, this.stats.getAttribute("varyRateMinTime") ); @@ -85,17 +85,17 @@ public class TestMetricsMBeanBase extends TestCase { assertEquals( 30L, this.stats.getAttribute("varyRateAvgTime") ); assertEquals( 2, this.stats.getAttribute("varyRateNumOps") ); } - + public void testGetMBeanInfo() { MBeanInfo info = this.stats.getMBeanInfo(); MBeanAttributeInfo[] attributes = info.getAttributes(); assertEquals( 6, attributes.length ); - - Map attributeByName = + + Map attributeByName = new HashMap(attributes.length); for (MBeanAttributeInfo attr : attributes) attributeByName.put(attr.getName(), attr); - + assertAttribute( attributeByName.get("metricsRate"), "metricsRate", "java.lang.Float", "test"); assertAttribute( attributeByName.get("intValue"), @@ -109,10 +109,10 @@ public class TestMetricsMBeanBase extends TestCase { assertAttribute( attributeByName.get("varyRateNumOps"), "varyRateNumOps", "java.lang.Integer", "test"); } - + protected void assertAttribute(MBeanAttributeInfo attr, String name, String type, String description) { - + assertEquals(attr.getName(), name); assertEquals(attr.getType(), type); assertEquals(attr.getDescription(), description); diff --git a/core/src/test/java/org/apache/hadoop/hbase/regionserver/DisabledTestRegionServerExit.java b/core/src/test/java/org/apache/hadoop/hbase/regionserver/DisabledTestRegionServerExit.java index f1b425b..e90df70 100644 --- a/core/src/test/java/org/apache/hadoop/hbase/regionserver/DisabledTestRegionServerExit.java +++ b/core/src/test/java/org/apache/hadoop/hbase/regionserver/DisabledTestRegionServerExit.java @@ -51,11 +51,11 @@ public class DisabledTestRegionServerExit extends HBaseClusterTestCase { public DisabledTestRegionServerExit() { super(2); conf.setInt("ipc.client.connect.max.retries", 5); // reduce ipc retries - conf.setInt("ipc.client.timeout", 10000); // and ipc timeout + conf.setInt("ipc.client.timeout", 10000); // and ipc timeout conf.setInt("hbase.client.pause", 10000); // increase client timeout conf.setInt("hbase.client.retries.number", 10); // increase HBase retries } - + /** * Test abort of region server. * @throws IOException @@ -77,7 +77,7 @@ public class DisabledTestRegionServerExit extends HBaseClusterTestCase { t.start(); threadDumpingJoin(t); } - + /** * Test abort of region server. * Test is flakey up on hudson. Needs work. @@ -100,7 +100,7 @@ public class DisabledTestRegionServerExit extends HBaseClusterTestCase { t.start(); threadDumpingJoin(t); } - + private byte [] createTableAndAddRow(final String tableName) throws IOException { HTableDescriptor desc = new HTableDescriptor(tableName); @@ -119,14 +119,14 @@ public class DisabledTestRegionServerExit extends HBaseClusterTestCase { /* * Stop the region server serving the meta region and wait for the meta region * to get reassigned. This is always the most problematic case. - * + * * @param abort set to true if region server should be aborted, if false it * is just shut down. */ private void stopOrAbortMetaRegionServer(boolean abort) { List regionThreads = cluster.getRegionServerThreads(); - + int server = -1; for (int i = 0; i < regionThreads.size() && server == -1; i++) { HRegionServer s = regionThreads.get(i).getRegionServer(); @@ -144,14 +144,14 @@ public class DisabledTestRegionServerExit extends HBaseClusterTestCase { } if (abort) { this.cluster.abortRegionServer(server); - + } else { this.cluster.stopRegionServer(server); } LOG.info(this.cluster.waitOnRegionServer(server) + " has been " + (abort ? "aborted" : "shut down")); } - + /* * Run verification in a thread so I can concurrently run a thread-dumper * while we're waiting (because in this test sometimes the meta scanner @@ -173,7 +173,7 @@ public class DisabledTestRegionServerExit extends HBaseClusterTestCase { ResultScanner s = t.getScanner(scan); s.close(); - + } catch (IOException e) { LOG.fatal("could not re-open meta table because", e); fail(); diff --git a/core/src/test/java/org/apache/hadoop/hbase/regionserver/OOMERegionServer.java b/core/src/test/java/org/apache/hadoop/hbase/regionserver/OOMERegionServer.java index 5c83b42..fd1d1bf 100644 --- a/core/src/test/java/org/apache/hadoop/hbase/regionserver/OOMERegionServer.java +++ b/core/src/test/java/org/apache/hadoop/hbase/regionserver/OOMERegionServer.java @@ -39,7 +39,7 @@ public class OOMERegionServer extends HRegionServer { public OOMERegionServer(HBaseConfiguration conf) throws IOException { super(conf); } - + public void put(byte [] regionName, Put put) throws IOException { super.put(regionName, put); @@ -48,7 +48,7 @@ public class OOMERegionServer extends HRegionServer { this.retainer.add(put); } } - + public static void main(String[] args) { HRegionServer.doMain(args, OOMERegionServer.class); } diff --git a/core/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompaction.java b/core/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompaction.java index afe7b5b..f1566d3 100644 --- a/core/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompaction.java +++ b/core/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompaction.java @@ -54,17 +54,17 @@ public class TestCompaction extends HBaseTestCase { private static final int COMPACTION_THRESHOLD = MAXVERSIONS; private MiniDFSCluster cluster; - + /** constructor */ public TestCompaction() { super(); - + // Set cache flush size to 1MB conf.setInt("hbase.hregion.memstore.flush.size", 1024*1024); conf.setInt("hbase.hregion.memstore.block.multiplier", 10); this.cluster = null; } - + @Override public void setUp() throws Exception { this.cluster = new MiniDFSCluster(conf, 2, true, (String[])null); @@ -75,10 +75,10 @@ public class TestCompaction extends HBaseTestCase { HTableDescriptor htd = createTableDescriptor(getName()); this.r = createNewHRegion(htd, null, null); this.compactionDir = HRegion.getCompactionDir(this.r.getBaseDir()); - this.regionCompactionDir = new Path(this.compactionDir, + this.regionCompactionDir = new Path(this.compactionDir, Integer.toString(this.r.getRegionInfo().getEncodedName())); } - + @Override public void tearDown() throws Exception { HLog hlog = r.getLog(); @@ -139,7 +139,7 @@ public class TestCompaction extends HBaseTestCase { // Assert == 3 when we ask for versions. addContent(new HRegionIncommon(r), Bytes.toString(COLUMN_FAMILY)); - + // FIX!! // Cell[] cellValues = // Cell.createSingleCellArray(r.get(STARTROW, COLUMN_FAMILY_TEXT, -1, 100 /*Too many*/)); @@ -177,7 +177,7 @@ public class TestCompaction extends HBaseTestCase { byte [][] famAndQf = {COLUMN_FAMILY, null}; delete.deleteFamily(famAndQf[0]); r.delete(delete, null, true); - + // Assert deleted. result = r.get(new Get(secondRowBytes).addFamily(COLUMN_FAMILY_TEXT).setMaxVersions(100), null ); @@ -258,7 +258,7 @@ public class TestCompaction extends HBaseTestCase { } private void createSmallerStoreFile(final HRegion region) throws IOException { - HRegionIncommon loader = new HRegionIncommon(region); + HRegionIncommon loader = new HRegionIncommon(region); addContent(loader, Bytes.toString(COLUMN_FAMILY), ("" + "bbb").getBytes(), null); loader.flushcache(); diff --git a/core/src/test/java/org/apache/hadoop/hbase/regionserver/TestDeleteCompare.java b/core/src/test/java/org/apache/hadoop/hbase/regionserver/TestDeleteCompare.java index 5370b95..6ea4a1c 100644 --- a/core/src/test/java/org/apache/hadoop/hbase/regionserver/TestDeleteCompare.java +++ b/core/src/test/java/org/apache/hadoop/hbase/regionserver/TestDeleteCompare.java @@ -73,21 +73,21 @@ public class TestDeleteCompare extends TestCase { int deleteQualifierLen = delete.getQualifierLength(); int deleteTimestampOffset = deleteQualifierOffset + deleteQualifierLen; byte deleteType = deleteBuffer[deleteTimestampOffset +Bytes.SIZEOF_LONG]; - + List actual = new ArrayList(); for(KeyValue mem : memstore){ actual.add(DeleteCompare.deleteCompare(mem, deleteBuffer, deleteRowOffset, deleteRowLen, deleteQualifierOffset, deleteQualifierLen, deleteTimestampOffset, deleteType, KeyValue.KEY_COMPARATOR)); - + } - + assertEquals(expected.size(), actual.size()); for(int i=0; i memstore = new TreeSet(KeyValue.COMPARATOR); @@ -103,7 +103,7 @@ public class TestDeleteCompare extends TestCase { expected.add(DeleteCode.DELETE); expected.add(DeleteCode.DELETE); expected.add(DeleteCode.DONE); - + KeyValue delete = KeyValueTestUtil.create("row11", "fam", "col1", 2, KeyValue.Type.DeleteColumn, "dont-care"); byte [] deleteBuffer = delete.getBuffer(); @@ -113,22 +113,22 @@ public class TestDeleteCompare extends TestCase { int deleteQualifierLen = delete.getQualifierLength(); int deleteTimestampOffset = deleteQualifierOffset + deleteQualifierLen; byte deleteType = deleteBuffer[deleteTimestampOffset +Bytes.SIZEOF_LONG]; - + List actual = new ArrayList(); for(KeyValue mem : memstore){ actual.add(DeleteCompare.deleteCompare(mem, deleteBuffer, deleteRowOffset, deleteRowLen, deleteQualifierOffset, deleteQualifierLen, deleteTimestampOffset, deleteType, KeyValue.KEY_COMPARATOR)); - + } - + assertEquals(expected.size(), actual.size()); for(int i=0; i memstore = new TreeSet(KeyValue.COMPARATOR); @@ -141,7 +141,7 @@ public class TestDeleteCompare extends TestCase { expected.add(DeleteCode.SKIP); expected.add(DeleteCode.DELETE); expected.add(DeleteCode.DONE); - + KeyValue delete = KeyValueTestUtil.create("row11", "fam", "col1", 2, KeyValue.Type.Delete, "dont-care"); byte [] deleteBuffer = delete.getBuffer(); @@ -151,20 +151,20 @@ public class TestDeleteCompare extends TestCase { int deleteQualifierLen = delete.getQualifierLength(); int deleteTimestampOffset = deleteQualifierOffset + deleteQualifierLen; byte deleteType = deleteBuffer[deleteTimestampOffset +Bytes.SIZEOF_LONG]; - + List actual = new ArrayList(); for(KeyValue mem : memstore){ actual.add(DeleteCompare.deleteCompare(mem, deleteBuffer, deleteRowOffset, deleteRowLen, deleteQualifierOffset, deleteQualifierLen, deleteTimestampOffset, deleteType, KeyValue.KEY_COMPARATOR)); } - + assertEquals(expected.size(), actual.size()); for(int i=0; i memstore = new TreeSet(KeyValue.COMPARATOR); @@ -194,15 +194,15 @@ public class TestDeleteCompare extends TestCase { int deleteQualifierLen = delete.getQualifierLength(); int deleteTimestampOffset = deleteQualifierOffset + deleteQualifierLen; byte deleteType = deleteBuffer[deleteTimestampOffset +Bytes.SIZEOF_LONG]; - + List actual = new ArrayList(); for(KeyValue mem : memstore){ actual.add(DeleteCompare.deleteCompare(mem, deleteBuffer, deleteRowOffset, deleteRowLen, deleteQualifierOffset, deleteQualifierLen, deleteTimestampOffset, deleteType, KeyValue.KEY_COMPARATOR)); - + } - + assertEquals(expected.size(), actual.size()); for(int i=0; i scanner = new ArrayList(); scanner.add(col1); @@ -68,15 +68,15 @@ implements HConstants { scanner.add(col3); scanner.add(col4); scanner.add(col5); - + //Initialize result - List result = new ArrayList(); - + List result = new ArrayList(); + //"Match" for(byte [] col : scanner){ result.add(exp.checkColumn(col, 0, col.length)); } - + assertEquals(expected.size(), result.size()); for(int i=0; i< expected.size(); i++){ assertEquals(expected.get(i), result.get(i)); @@ -86,18 +86,18 @@ implements HConstants { } } } - + public void testGet_MultiVersion(){ if(PRINT){ System.out.println("\nMultiVersion"); } - + //Create tracker TreeSet columns = new TreeSet(Bytes.BYTES_COMPARATOR); //Looking for every other columns.add(col2); columns.add(col4); - + List expected = new ArrayList(); expected.add(MatchCode.SKIP); expected.add(MatchCode.SKIP); @@ -119,9 +119,9 @@ implements HConstants { expected.add(MatchCode.DONE); expected.add(MatchCode.DONE); int maxVersions = 2; - + ColumnTracker exp = new ExplicitColumnTracker(columns, maxVersions); - + //Create "Scanner" List scanner = new ArrayList(); scanner.add(col1); @@ -139,15 +139,15 @@ implements HConstants { scanner.add(col5); scanner.add(col5); scanner.add(col5); - + //Initialize result - List result = new ArrayList(); - + List result = new ArrayList(); + //"Match" for(byte [] col : scanner){ result.add(exp.checkColumn(col, 0, col.length)); } - + assertEquals(expected.size(), result.size()); for(int i=0; i< expected.size(); i++){ assertEquals(expected.get(i), result.get(i)); @@ -182,5 +182,5 @@ implements HConstants { } } - + } diff --git a/core/src/test/java/org/apache/hadoop/hbase/regionserver/TestGetClosestAtOrBefore.java b/core/src/test/java/org/apache/hadoop/hbase/regionserver/TestGetClosestAtOrBefore.java index 95dded5..dcb7252 100644 --- a/core/src/test/java/org/apache/hadoop/hbase/regionserver/TestGetClosestAtOrBefore.java +++ b/core/src/test/java/org/apache/hadoop/hbase/regionserver/TestGetClosestAtOrBefore.java @@ -42,12 +42,12 @@ import org.apache.hadoop.hdfs.MiniDFSCluster; /** * {@link TestGet} is a medley of tests of get all done up as a single test. - * This class + * This class */ public class TestGetClosestAtOrBefore extends HBaseTestCase implements HConstants { static final Log LOG = LogFactory.getLog(TestGetClosestAtOrBefore.class); private MiniDFSCluster miniHdfs; - + private static final byte [] T00 = Bytes.toBytes("000"); private static final byte [] T10 = Bytes.toBytes("010"); private static final byte [] T11 = Bytes.toBytes("011"); @@ -184,36 +184,36 @@ public class TestGetClosestAtOrBefore extends HBaseTestCase implements HConstant try { HTableDescriptor htd = createTableDescriptor(getName()); region = createNewHRegion(htd, null, null); - + Put p = new Put(T00); p.add(c0, c0, T00); region.put(p); - + p = new Put(T10); p.add(c0, c0, T10); region.put(p); - + p = new Put(T20); p.add(c0, c0, T20); region.put(p); - + Result r = region.getClosestRowBefore(T20, c0); assertTrue(Bytes.equals(T20, r.getRow())); - + Delete d = new Delete(T20); d.deleteColumn(c0, c0); region.delete(d, null, false); - + r = region.getClosestRowBefore(T20, c0); assertTrue(Bytes.equals(T10, r.getRow())); - + p = new Put(T30); p.add(c0, c0, T30); region.put(p); - + r = region.getClosestRowBefore(T30, c0); assertTrue(Bytes.equals(T30, r.getRow())); - + d = new Delete(T30); d.deleteColumn(c0, c0); region.delete(d, null, false); @@ -230,7 +230,7 @@ public class TestGetClosestAtOrBefore extends HBaseTestCase implements HConstant assertTrue(Bytes.equals(T10, r.getRow())); r = region.getClosestRowBefore(T31, c0); assertTrue(Bytes.equals(T10, r.getRow())); - + // Put into a different column family. Should make it so I still get t10 p = new Put(T20); p.add(c1, c1, T20); @@ -240,14 +240,14 @@ public class TestGetClosestAtOrBefore extends HBaseTestCase implements HConstant assertTrue(Bytes.equals(T10, r.getRow())); r = region.getClosestRowBefore(T31, c0); assertTrue(Bytes.equals(T10, r.getRow())); - + region.flushcache(); - + r = region.getClosestRowBefore(T30, c0); assertTrue(Bytes.equals(T10, r.getRow())); r = region.getClosestRowBefore(T31, c0); assertTrue(Bytes.equals(T10, r.getRow())); - + // Now try combo of memcache and mapfiles. Delete the t20 COLUMS[1] // in memory; make sure we get back t10 again. d = new Delete(T20); @@ -255,14 +255,14 @@ public class TestGetClosestAtOrBefore extends HBaseTestCase implements HConstant region.delete(d, null, false); r = region.getClosestRowBefore(T30, c0); assertTrue(Bytes.equals(T10, r.getRow())); - + // Ask for a value off the end of the file. Should return t10. r = region.getClosestRowBefore(T31, c0); assertTrue(Bytes.equals(T10, r.getRow())); region.flushcache(); r = region.getClosestRowBefore(T31, c0); assertTrue(Bytes.equals(T10, r.getRow())); - + // Ok. Let the candidate come out of hfile but have delete of // the candidate be in memory. p = new Put(T11); @@ -291,15 +291,15 @@ public class TestGetClosestAtOrBefore extends HBaseTestCase implements HConstant try { HTableDescriptor htd = createTableDescriptor(getName()); region = createNewHRegion(htd, null, null); - + Put p = new Put(T10); p.add(c0, c0, T10); region.put(p); - + p = new Put(T30); p.add(c0, c0, T30); region.put(p); - + p = new Put(T40); p.add(c0, c0, T40); region.put(p); @@ -317,11 +317,11 @@ public class TestGetClosestAtOrBefore extends HBaseTestCase implements HConstant p = new Put(T20); p.add(c0, c0, T20); region.put(p); - + // try finding "035" r = region.getClosestRowBefore(T35, c0); assertTrue(Bytes.equals(T30, r.getRow())); - + region.flushcache(); // try finding "035" diff --git a/core/src/test/java/org/apache/hadoop/hbase/regionserver/TestGetDeleteTracker.java b/core/src/test/java/org/apache/hadoop/hbase/regionserver/TestGetDeleteTracker.java index 36c9843..fe96822 100644 --- a/core/src/test/java/org/apache/hadoop/hbase/regionserver/TestGetDeleteTracker.java +++ b/core/src/test/java/org/apache/hadoop/hbase/regionserver/TestGetDeleteTracker.java @@ -31,37 +31,37 @@ import org.apache.hadoop.hbase.util.Bytes; public class TestGetDeleteTracker extends HBaseTestCase implements HConstants { - + private static final boolean PRINT = true; - + private byte [] col1 = null; private byte [] col2 = null; - + private int col1Len = 0; private int col2Len = 0; private byte [] empty = null; - + private long ts1 = 0L; private long ts2 = 0L; private long ts3 = 0L; - - + + private Delete del10 = null; private Delete del11 = null; private Delete delQf10 = null; private Delete delQf11 = null; private Delete delFam10 = null; - + private Delete del20 = null; private Delete del21 = null; private Delete delQf20 = null; private Delete delQf21 = null; private Delete delFam20 = null; - - + + private Delete del30 = null; - + GetDeleteTracker dt = null; private byte del = KeyValue.Type.Delete.getCode(); private byte delCol = KeyValue.Type.DeleteColumn.getCode(); @@ -74,7 +74,7 @@ public class TestGetDeleteTracker extends HBaseTestCase implements HConstants { col2 = "col2".getBytes(); col1Len = col1.length; col2Len = col2.length; - + empty = new byte[0]; //ts1 @@ -84,7 +84,7 @@ public class TestGetDeleteTracker extends HBaseTestCase implements HConstants { delQf10 = new Delete(col1, 0, col1Len, delCol, ts1); delQf11 = new Delete(col2, 0, col2Len, delCol, ts1); delFam10 = new Delete(empty, 0, 0, delFam, ts1); - + //ts2 ts2 = System.nanoTime(); del20 = new Delete(col1, 0, col1Len, del, ts2); @@ -92,109 +92,109 @@ public class TestGetDeleteTracker extends HBaseTestCase implements HConstants { delQf20 = new Delete(col1, 0, col1Len, delCol, ts2); delQf21 = new Delete(col2, 0, col2Len, delCol, ts2); delFam20 = new Delete(empty, 0, 0, delFam, ts1); - + //ts3 ts3 = System.nanoTime(); del30 = new Delete(col1, 0, col1Len, del, ts3); } - + public void testUpdate_CompareDeletes() { GetDeleteTracker.DeleteCompare res = null; - - + + //Testing Delete and Delete res = dt.compareDeletes(del10, del10); assertEquals(DeleteTracker.DeleteCompare.INCLUDE_OLD_NEXT_BOTH, res); - - //Testing Delete qf1 and Delete qf2 and <==> + + //Testing Delete qf1 and Delete qf2 and <==> res = dt.compareDeletes(del10, del11); assertEquals(DeleteTracker.DeleteCompare.INCLUDE_OLD_NEXT_OLD, res); res = dt.compareDeletes(del11, del10); assertEquals(DeleteTracker.DeleteCompare.INCLUDE_NEW_NEXT_NEW, res); - - //Testing Delete ts1 and Delete ts2 and <==> + + //Testing Delete ts1 and Delete ts2 and <==> res = dt.compareDeletes(del10, del20); assertEquals(DeleteTracker.DeleteCompare.INCLUDE_NEW_NEXT_NEW, res); res = dt.compareDeletes(del20, del10); assertEquals(DeleteTracker.DeleteCompare.INCLUDE_OLD_NEXT_OLD, res); - - - + + + //Testing DeleteColumn and DeleteColumn res = dt.compareDeletes(delQf10, delQf10); assertEquals(DeleteTracker.DeleteCompare.INCLUDE_OLD_NEXT_BOTH, res); - - //Testing DeleteColumn qf1 and DeleteColumn qf2 and <==> + + //Testing DeleteColumn qf1 and DeleteColumn qf2 and <==> res = dt.compareDeletes(delQf10, delQf11); assertEquals(DeleteTracker.DeleteCompare.INCLUDE_OLD_NEXT_OLD, res); res = dt.compareDeletes(delQf11, delQf10); assertEquals(DeleteTracker.DeleteCompare.INCLUDE_NEW_NEXT_NEW, res); - - //Testing DeleteColumn ts1 and DeleteColumn ts2 and <==> + + //Testing DeleteColumn ts1 and DeleteColumn ts2 and <==> res = dt.compareDeletes(delQf10, delQf20); assertEquals(DeleteTracker.DeleteCompare.INCLUDE_NEW_NEXT_BOTH, res); res = dt.compareDeletes(delQf20, delQf10); assertEquals(DeleteTracker.DeleteCompare.INCLUDE_OLD_NEXT_BOTH, res); - - - - //Testing Delete and DeleteColumn and <==> + + + + //Testing Delete and DeleteColumn and <==> res = dt.compareDeletes(del10, delQf10); assertEquals(DeleteTracker.DeleteCompare.NEXT_OLD, res); res = dt.compareDeletes(delQf10, del10); assertEquals(DeleteTracker.DeleteCompare.NEXT_NEW, res); - //Testing Delete qf1 and DeleteColumn qf2 and <==> + //Testing Delete qf1 and DeleteColumn qf2 and <==> res = dt.compareDeletes(del10, delQf11); assertEquals(DeleteTracker.DeleteCompare.INCLUDE_OLD_NEXT_OLD, res); res = dt.compareDeletes(delQf11, del10); assertEquals(DeleteTracker.DeleteCompare.INCLUDE_NEW_NEXT_NEW, res); - - //Testing Delete qf2 and DeleteColumn qf1 and <==> + + //Testing Delete qf2 and DeleteColumn qf1 and <==> res = dt.compareDeletes(del11, delQf10); assertEquals(DeleteTracker.DeleteCompare.INCLUDE_NEW_NEXT_NEW, res); res = dt.compareDeletes(delQf10, del11); assertEquals(DeleteTracker.DeleteCompare.INCLUDE_OLD_NEXT_OLD, res); - - //Testing Delete ts2 and DeleteColumn ts1 and <==> + + //Testing Delete ts2 and DeleteColumn ts1 and <==> res = dt.compareDeletes(del20, delQf10); assertEquals(DeleteTracker.DeleteCompare.INCLUDE_OLD_NEXT_OLD, res); res = dt.compareDeletes(delQf10, del20); assertEquals(DeleteTracker.DeleteCompare.INCLUDE_NEW_NEXT_NEW, res); - - //Testing Delete ts1 and DeleteColumn ts2 and <==> + + //Testing Delete ts1 and DeleteColumn ts2 and <==> res = dt.compareDeletes(del10, delQf20); assertEquals(DeleteTracker.DeleteCompare.NEXT_OLD, res); res = dt.compareDeletes(delQf20, del10); assertEquals(DeleteTracker.DeleteCompare.NEXT_NEW, res); - + } - + public void testUpdate(){ //Building lists List dels1 = new ArrayList(); dels1.add(delQf10); dels1.add(del21); - + List dels2 = new ArrayList(); dels2.add(delFam10); dels2.add(del30); dels2.add(delQf20); - + List res = new ArrayList(); res.add(del30); res.add(delQf20); res.add(del21); - + //Adding entries for(Delete del : dels1){ dt.add(del.buffer, del.qualifierOffset, del.qualifierLength, del.timestamp, del.type); } - + //update() dt.update(); - + //Check deleteList List delList = dt.deletes; assertEquals(dels1.size(), delList.size()); @@ -206,7 +206,7 @@ public class TestGetDeleteTracker extends HBaseTestCase implements HConstants { assertEquals(dels1.get(i).timestamp, delList.get(i).timestamp); assertEquals(dels1.get(i).type, delList.get(i).type); } - + //Add more entries for(Delete del : dels2){ dt.add(del.buffer, del.qualifierOffset, del.qualifierLength, @@ -214,7 +214,7 @@ public class TestGetDeleteTracker extends HBaseTestCase implements HConstants { } //Update() dt.update(); - + //Check deleteList delList = dt.deletes; @@ -226,14 +226,14 @@ public class TestGetDeleteTracker extends HBaseTestCase implements HConstants { assertEquals(res.get(i).timestamp, delList.get(i).timestamp); assertEquals(res.get(i).type, delList.get(i).type); if(PRINT){ - System.out.println("Qf " +new String(delList.get(i).buffer) + - ", timestamp, " +delList.get(i).timestamp+ + System.out.println("Qf " +new String(delList.get(i).buffer) + + ", timestamp, " +delList.get(i).timestamp+ ", type " +KeyValue.Type.codeToType(delList.get(i).type)); } } - + } - + /** * Test if a KeyValue is in the lists of deletes already. Cases that needs to * be tested are: @@ -247,7 +247,7 @@ public class TestGetDeleteTracker extends HBaseTestCase implements HConstants { List dels = new ArrayList(); dels.add(delQf10); dels.add(del21); - + //Adding entries for(Delete del : dels){ dt.add(del.buffer, del.qualifierOffset, del.qualifierLength, @@ -262,50 +262,50 @@ public class TestGetDeleteTracker extends HBaseTestCase implements HConstants { //Building lists List dels = new ArrayList(); dels.add(del21); - + //Adding entries for(Delete del : dels){ - dt.add(del.buffer, del.qualifierOffset, del.qualifierLength, + dt.add(del.buffer, del.qualifierOffset, del.qualifierLength, del.timestamp, del.type); } - + //update() dt.update(); - + assertEquals(true, dt.isDeleted(col2, 0, col2Len, ts2)); } - + public void testIsDeleted_DeleteColumn(){ //Building lists List dels = new ArrayList(); dels.add(delQf21); - + //Adding entries for(Delete del : dels){ dt.add(del.buffer, del.qualifierOffset, del.qualifierLength, del.timestamp, del.type); } - + //update() dt.update(); - + assertEquals(true, dt.isDeleted(col2, 0, col2Len, ts1)); } - + public void testIsDeleted_DeleteFamily(){ //Building lists List dels = new ArrayList(); dels.add(delFam20); - + //Adding entries for(Delete del : dels){ dt.add(del.buffer, del.qualifierOffset, del.qualifierLength, del.timestamp, del.type); } - + //update() dt.update(); - + assertEquals(true, dt.isDeleted(col2, 0, col2Len, ts1)); } @@ -323,5 +323,5 @@ public class TestGetDeleteTracker extends HBaseTestCase implements HConstants { dt.update(); assertEquals(false, dt.isDeleted(col2, 0, col2Len, 7000000)); } - + } diff --git a/core/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java b/core/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java index ac5f753..616ba67 100644 --- a/core/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java +++ b/core/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java @@ -51,7 +51,7 @@ import org.apache.hadoop.hbase.util.Bytes; /** * Basic stand-alone testing of HRegion. - * + * * A lot of the meta information for an HRegion now lives inside other * HRegions or in the HBaseMaster, so only basic testing is possible. */ @@ -60,7 +60,7 @@ public class TestHRegion extends HBaseTestCase { HRegion region = null; private final String DIR = "test/build/data/TestHRegion/"; - + private final int MAX_VERSIONS = 2; // Test names @@ -81,11 +81,11 @@ public class TestHRegion extends HBaseTestCase { } ////////////////////////////////////////////////////////////////////////////// - // New tests that doesn't spin up a mini cluster but rather just test the + // New tests that doesn't spin up a mini cluster but rather just test the // individual code pieces in the HRegion. Putting files locally in // /tmp/testtable ////////////////////////////////////////////////////////////////////////////// - + /** * An involved filter test. Has multiple column families and deletes in mix. @@ -235,7 +235,7 @@ public class TestHRegion extends HBaseTestCase { byte [] val1 = Bytes.toBytes("value1"); byte [] val2 = Bytes.toBytes("value2"); Integer lockId = null; - + //Setting up region String method = this.getName(); initHRegion(tableName, method, fam1); @@ -258,7 +258,7 @@ public class TestHRegion extends HBaseTestCase { res = region.checkAndPut(row1, fam1, qf1, val1, put, lockId, true); assertTrue(res); } - + public void testCheckAndPut_WithWrongValue() throws IOException{ byte [] tableName = Bytes.toBytes("testtable"); byte [] row1 = Bytes.toBytes("row1"); @@ -276,7 +276,7 @@ public class TestHRegion extends HBaseTestCase { Put put = new Put(row1); put.add(fam1, qf1, val1); region.put(put); - + //checkAndPut with wrong value boolean res = region.checkAndPut(row1, fam1, qf1, val2, put, lockId, true); assertEquals(false, res); @@ -298,12 +298,12 @@ public class TestHRegion extends HBaseTestCase { Put put = new Put(row1); put.add(fam1, qf1, val1); region.put(put); - + //checkAndPut with correct value boolean res = region.checkAndPut(row1, fam1, qf1, val1, put, lockId, true); assertEquals(true, res); } - + public void testCheckAndPut_ThatPutWasWritten() throws IOException{ byte [] tableName = Bytes.toBytes("testtable"); byte [] row1 = Bytes.toBytes("row1"); @@ -315,7 +315,7 @@ public class TestHRegion extends HBaseTestCase { Integer lockId = null; byte [][] families = {fam1, fam2}; - + //Setting up region String method = this.getName(); initHRegion(tableName, method, families); @@ -324,34 +324,34 @@ public class TestHRegion extends HBaseTestCase { Put put = new Put(row1); put.add(fam1, qf1, val1); region.put(put); - + //Creating put to add long ts = System.currentTimeMillis(); KeyValue kv = new KeyValue(row1, fam2, qf1, ts, KeyValue.Type.Put, val2); put = new Put(row1); put.add(kv); - + //checkAndPut with wrong value Store store = region.getStore(fam1); store.memstore.kvset.size(); - + boolean res = region.checkAndPut(row1, fam1, qf1, val1, put, lockId, true); assertEquals(true, res); store.memstore.kvset.size(); - + Get get = new Get(row1); get.addColumn(fam2, qf1); KeyValue [] actual = region.get(get, null).raw(); - + KeyValue [] expected = {kv}; - + assertEquals(expected.length, actual.length); for(int i=0; i> deleteMap = new HashMap>(); @@ -447,12 +447,12 @@ public class TestHRegion extends HBaseTestCase { get = new Get(row); result = region.get(get, null); assertEquals(3, result.size()); - + // Now delete all... then test I can add stuff back delete = new Delete(row); region.delete(delete, null, false); assertEquals(0, region.get(get, null).size()); - try { + try { Thread.sleep(10); } catch (InterruptedException e) { e.printStackTrace(); @@ -461,7 +461,7 @@ public class TestHRegion extends HBaseTestCase { result = region.get(get, null); assertEquals(1, result.size()); } - + public void testDeleteRowWithFutureTs() throws IOException { byte [] tableName = Bytes.toBytes("testtable"); byte [] fam = Bytes.toBytes("info"); @@ -486,7 +486,7 @@ public class TestHRegion extends HBaseTestCase { Get get = new Get(row).addColumn(fam, serverinfo); Result result = region.get(get, null); assertEquals(1, result.size()); - + // delete the future row delete = new Delete(row,HConstants.LATEST_TIMESTAMP-3,null); region.delete(delete, null, true); @@ -557,7 +557,7 @@ public class TestHRegion extends HBaseTestCase { region.put(put); Thread.sleep(10); - + // now delete the value: region.delete(delete, null, true); @@ -593,19 +593,19 @@ public class TestHRegion extends HBaseTestCase { } - + public void testDelete_CheckTimestampUpdated() throws IOException { byte [] row1 = Bytes.toBytes("row1"); byte [] col1 = Bytes.toBytes("col1"); byte [] col2 = Bytes.toBytes("col2"); byte [] col3 = Bytes.toBytes("col3"); - + //Setting up region String method = this.getName(); initHRegion(tableName, method, fam1); - - //Building checkerList + + //Building checkerList List kvs = new ArrayList(); kvs.add(new KeyValue(row1, fam1, col1, null)); kvs.add(new KeyValue(row1, fam1, col2, null)); @@ -626,7 +626,7 @@ public class TestHRegion extends HBaseTestCase { now = kv.getTimestamp(); } } - + ////////////////////////////////////////////////////////////////////////////// // Get tests ////////////////////////////////////////////////////////////////////////////// @@ -636,14 +636,14 @@ public class TestHRegion extends HBaseTestCase { byte [] fam1 = Bytes.toBytes("fam1"); byte [] fam2 = Bytes.toBytes("False"); byte [] col1 = Bytes.toBytes("col1"); - + //Setting up region String method = this.getName(); initHRegion(tableName, method, fam1); - + Get get = new Get(row1); get.addColumn(fam2, col1); - + //Test try { region.get(get, null); @@ -663,11 +663,11 @@ public class TestHRegion extends HBaseTestCase { byte [] col3 = Bytes.toBytes("col3"); byte [] col4 = Bytes.toBytes("col4"); byte [] col5 = Bytes.toBytes("col5"); - + //Setting up region String method = this.getName(); initHRegion(tableName, method, fam1); - + //Add to memstore Put put = new Put(row1); put.add(fam1, col1, null); @@ -710,17 +710,17 @@ public class TestHRegion extends HBaseTestCase { byte [] tableName = Bytes.toBytes("emptytable"); byte [] row = Bytes.toBytes("row"); byte [] fam = Bytes.toBytes("fam"); - + String method = this.getName(); initHRegion(tableName, method, fam); - + Get get = new Get(row); get.addFamily(fam); Result r = region.get(get, null); - + assertTrue(r.isEmpty()); } - + //Test that checked if there was anything special when reading from the ROOT //table. To be able to use this test you need to comment the part in //HTableDescriptor that checks for '-' and '.'. You also need to remove the @@ -734,7 +734,7 @@ public class TestHRegion extends HBaseTestCase { Put put = new Put(HConstants.EMPTY_START_ROW); put.add(HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER, null); region.put(put); - + Get get = new Get(HConstants.EMPTY_START_ROW); get.addColumn(HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER); @@ -742,10 +742,10 @@ public class TestHRegion extends HBaseTestCase { KeyValue kv1 = new KeyValue(HConstants.EMPTY_START_ROW, HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER); KeyValue [] expected = {kv1}; - + //Test from memstore Result res = region.get(get, null); - + assertEquals(expected.length, res.size()); for(int i=0; i result = new ArrayList(); s.next(result); - + assertEquals(expected.length, result.size()); for(int i=0; ithreads = new ArrayList(threadCount); for (int i = 0; i < threadCount; i++) { threads.add(new Thread(Integer.toString(i)) { @@ -827,7 +827,7 @@ public class TestHRegion extends HBaseTestCase { } LOG.debug(getName() + " set " + Integer.toString(lockCount) + " locks"); - + // Abort outstanding locks. for (int i = lockCount - 1; i >= 0; i--) { region.releaseRowLock(lockids[i]); @@ -838,12 +838,12 @@ public class TestHRegion extends HBaseTestCase { } }); } - + // Startup all our threads. for (Thread t : threads) { t.start(); } - + // Now wait around till all are done. for (Thread t: threads) { while (t.isAlive()) { @@ -856,19 +856,19 @@ public class TestHRegion extends HBaseTestCase { } LOG.info("locks completed."); } - + ////////////////////////////////////////////////////////////////////////////// // Merge test - ////////////////////////////////////////////////////////////////////////////// + ////////////////////////////////////////////////////////////////////////////// public void testMerge() throws IOException { byte [] tableName = Bytes.toBytes("testtable"); byte [][] families = {fam1, fam2, fam3}; - + HBaseConfiguration hc = initSplit(); //Setting up region String method = this.getName(); initHRegion(tableName, method, hc, families); - + try { LOG.info("" + addContent(region, fam3)); region.flushcache(); @@ -923,7 +923,7 @@ public class TestHRegion extends HBaseTestCase { } } } - + ////////////////////////////////////////////////////////////////////////////// // Scanner tests ////////////////////////////////////////////////////////////////////////////// @@ -931,13 +931,13 @@ public class TestHRegion extends HBaseTestCase { byte [] tableName = Bytes.toBytes("testtable"); byte [] fam1 = Bytes.toBytes("fam1"); byte [] fam2 = Bytes.toBytes("fam2"); - + byte [][] families = {fam1, fam2}; - + //Setting up region String method = this.getName(); initHRegion(tableName, method, families); - + Scan scan = new Scan(); scan.addFamily(fam1); scan.addFamily(fam2); @@ -947,7 +947,7 @@ public class TestHRegion extends HBaseTestCase { assertTrue("Families could not be found in Region", false); } } - + public void testGetScanner_WithNotOkFamilies() throws IOException { byte [] tableName = Bytes.toBytes("testtable"); byte [] fam1 = Bytes.toBytes("fam1"); @@ -969,7 +969,7 @@ public class TestHRegion extends HBaseTestCase { } assertTrue("Families could not be found in Region", ok); } - + public void testGetScanner_WithNoFamilies() throws IOException { byte [] tableName = Bytes.toBytes("testtable"); byte [] row1 = Bytes.toBytes("row1"); @@ -977,13 +977,13 @@ public class TestHRegion extends HBaseTestCase { byte [] fam2 = Bytes.toBytes("fam2"); byte [] fam3 = Bytes.toBytes("fam3"); byte [] fam4 = Bytes.toBytes("fam4"); - + byte [][] families = {fam1, fam2, fam3, fam4}; - + //Setting up region String method = this.getName(); initHRegion(tableName, method, families); - + //Putting data in Region Put put = new Put(row1); put.add(fam1, null, null); @@ -991,21 +991,21 @@ public class TestHRegion extends HBaseTestCase { put.add(fam3, null, null); put.add(fam4, null, null); region.put(put); - + Scan scan = null; InternalScanner is = null; - - //Testing to see how many scanners that is produced by getScanner, starting + + //Testing to see how many scanners that is produced by getScanner, starting //with known number, 2 - current = 1 scan = new Scan(); scan.addFamily(fam2); scan.addFamily(fam4); is = region.getScanner(scan); assertEquals(1, ((RegionScanner)is).getStoreHeap().getHeap().size()); - + scan = new Scan(); is = region.getScanner(scan); - assertEquals(families.length -1, + assertEquals(families.length -1, ((RegionScanner)is).getStoreHeap().getHeap().size()); } @@ -1017,14 +1017,14 @@ public class TestHRegion extends HBaseTestCase { byte [] fam2 = Bytes.toBytes("fam2"); byte [] fam3 = Bytes.toBytes("fam3"); byte [] fam4 = Bytes.toBytes("fam4"); - + byte [][] families = {fam1, fam2, fam3, fam4}; long ts = System.currentTimeMillis(); - + //Setting up region String method = this.getName(); initHRegion(tableName, method, families); - + //Putting data in Region Put put = null; put = new Put(row1); @@ -1040,39 +1040,39 @@ public class TestHRegion extends HBaseTestCase { put.add(fam3, null, ts, null); put.add(fam4, null, ts, null); region.put(put); - + Scan scan = new Scan(); scan.addFamily(fam2); scan.addFamily(fam4); InternalScanner is = region.getScanner(scan); - + List res = null; - + //Result 1 List expected1 = new ArrayList(); expected1.add(new KeyValue(row1, fam2, null, ts, KeyValue.Type.Put, null)); expected1.add(new KeyValue(row1, fam4, null, ts, KeyValue.Type.Put, null)); - + res = new ArrayList(); is.next(res); for(int i=0; i expected2 = new ArrayList(); expected2.add(new KeyValue(row2, fam2, null, ts, KeyValue.Type.Put, null)); expected2.add(new KeyValue(row2, fam4, null, ts, KeyValue.Type.Put, null)); - + res = new ArrayList(); is.next(res); for(int i=0; i expected = new ArrayList(); expected.add(kv13); expected.add(kv12); - + Scan scan = new Scan(row1); scan.addColumn(fam1, qf1); scan.setMaxVersions(MAX_VERSIONS); List actual = new ArrayList(); InternalScanner scanner = region.getScanner(scan); - + boolean hasNext = scanner.next(actual); assertEquals(false, hasNext); - + //Verify result for(int i=0; i expected = new ArrayList(); expected.add(kv13); expected.add(kv12); expected.add(kv23); expected.add(kv22); - + Scan scan = new Scan(row1); scan.addColumn(fam1, qf1); scan.addColumn(fam1, qf2); scan.setMaxVersions(MAX_VERSIONS); List actual = new ArrayList(); InternalScanner scanner = region.getScanner(scan); - + boolean hasNext = scanner.next(actual); assertEquals(false, hasNext); - + //Verify result for(int i=0; i expected = new ArrayList(); expected.add(kv14); @@ -1249,7 +1249,7 @@ public class TestHRegion extends HBaseTestCase { expected.add(kv24); expected.add(kv23); expected.add(kv22); - + Scan scan = new Scan(row1); scan.addColumn(fam1, qf1); scan.addColumn(fam1, qf2); @@ -1257,17 +1257,17 @@ public class TestHRegion extends HBaseTestCase { scan.setMaxVersions(versions); List actual = new ArrayList(); InternalScanner scanner = region.getScanner(scan); - + boolean hasNext = scanner.next(actual); assertEquals(false, hasNext); - + //Verify result for(int i=0; i expected = new ArrayList(); expected.add(kv13); expected.add(kv12); expected.add(kv23); expected.add(kv22); - + Scan scan = new Scan(row1); scan.addFamily(fam1); scan.setMaxVersions(MAX_VERSIONS); List actual = new ArrayList(); InternalScanner scanner = region.getScanner(scan); - + boolean hasNext = scanner.next(actual); assertEquals(false, hasNext); - + //Verify result for(int i=0; i expected = new ArrayList(); expected.add(kv13); expected.add(kv12); expected.add(kv23); expected.add(kv22); - + Scan scan = new Scan(row1); scan.addFamily(fam1); scan.setMaxVersions(MAX_VERSIONS); List actual = new ArrayList(); InternalScanner scanner = region.getScanner(scan); - + boolean hasNext = scanner.next(actual); assertEquals(false, hasNext); - + //Verify result for(int i=0; i expected = new ArrayList(); expected.add(kv14); @@ -1647,22 +1647,22 @@ public class TestHRegion extends HBaseTestCase { expected.add(kv24); expected.add(kv23); expected.add(kv22); - + Scan scan = new Scan(row1); int versions = 3; scan.setMaxVersions(versions); List actual = new ArrayList(); InternalScanner scanner = region.getScanner(scan); - + boolean hasNext = scanner.next(actual); assertEquals(false, hasNext); - + //Verify result for(int i=0; ifirstValue. * @param r @@ -1877,7 +1877,7 @@ public class TestHRegion extends HBaseTestCase { s.close(); } } - + protected HRegion [] split(final HRegion r, final byte [] splitRow) throws IOException { // Assert can get mid key from passed region. @@ -1886,17 +1886,17 @@ public class TestHRegion extends HBaseTestCase { assertEquals(regions.length, 2); return regions; } - + private HBaseConfiguration initSplit() { HBaseConfiguration conf = new HBaseConfiguration(); // Always compact if there is more than one store file. conf.setInt("hbase.hstore.compactionThreshold", 2); - + // Make lease timeout longer, lease checks less frequent conf.setInt("hbase.master.lease.thread.wakefrequency", 5 * 1000); - + conf.setInt(HConstants.HBASE_REGIONSERVER_LEASE_PERIOD_KEY, 10 * 1000); - + // Increase the amount of time between client retries conf.setLong("hbase.client.pause", 15 * 1000); @@ -1904,14 +1904,14 @@ public class TestHRegion extends HBaseTestCase { // below. After adding all data, the first region is 1.3M conf.setLong("hbase.hregion.max.filesize", 1024 * 128); return conf; - } + } private void initHRegion (byte [] tableName, String callingMethod, byte[] ... families) throws IOException { initHRegion(tableName, callingMethod, new HBaseConfiguration(), families); } - + private void initHRegion (byte [] tableName, String callingMethod, HBaseConfiguration conf, byte [] ... families) throws IOException{ @@ -1920,7 +1920,7 @@ public class TestHRegion extends HBaseTestCase { htd.addFamily(new HColumnDescriptor(family)); } HRegionInfo info = new HRegionInfo(htd, null, null, false); - Path path = new Path(DIR + callingMethod); + Path path = new Path(DIR + callingMethod); region = HRegion.createHRegion(info, path, conf); } } diff --git a/core/src/test/java/org/apache/hadoop/hbase/regionserver/TestKeyValueHeap.java b/core/src/test/java/org/apache/hadoop/hbase/regionserver/TestKeyValueHeap.java index 3bc87fe..8c1428b 100644 --- a/core/src/test/java/org/apache/hadoop/hbase/regionserver/TestKeyValueHeap.java +++ b/core/src/test/java/org/apache/hadoop/hbase/regionserver/TestKeyValueHeap.java @@ -34,7 +34,7 @@ import org.apache.hadoop.hbase.util.Bytes; public class TestKeyValueHeap extends HBaseTestCase implements HConstants { private static final boolean PRINT = false; - + List scanners = new ArrayList(); private byte [] row1; @@ -103,7 +103,7 @@ implements HConstants { //Creating KeyValueHeap KeyValueHeap kvh = new KeyValueHeap(scanners.toArray(new Scanner[0]), KeyValue.COMPARATOR); - + List actual = new ArrayList(); while(kvh.peek() != null){ actual.add(kvh.next()); @@ -117,20 +117,20 @@ implements HConstants { "\nactual " +actual.get(i) +"\n"); } } - + //Check if result is sorted according to Comparator for(int i=0; i l1 = new ArrayList(); l1.add(new KeyValue(row1, fam1, col5, data)); l1.add(new KeyValue(row2, fam1, col1, data)); @@ -152,17 +152,17 @@ implements HConstants { List expected = new ArrayList(); expected.add(new KeyValue(row2, fam1, col1, data)); - + //Creating KeyValueHeap KeyValueHeap kvh = new KeyValueHeap(scanners.toArray(new Scanner[0]), KeyValue.COMPARATOR); - + KeyValue seekKv = new KeyValue(row2, fam1, null, null); kvh.seek(seekKv); - + List actual = new ArrayList(); actual.add(kvh.peek()); - + assertEquals(expected.size(), actual.size()); for(int i=0; i l1 = new ArrayList(); l1.add(new KeyValue(row1, fam1, col5, data)); l1.add(new KeyValue(row2, fam1, col1, data)); @@ -195,21 +195,21 @@ implements HConstants { l3.add(new KeyValue(row1, fam2, col2, data)); l3.add(new KeyValue(row2, fam1, col3, data)); scanners.add(new Scanner(l3)); - + List l4 = new ArrayList(); scanners.add(new Scanner(l4)); //Creating KeyValueHeap KeyValueHeap kvh = new KeyValueHeap(scanners.toArray(new Scanner[0]), KeyValue.COMPARATOR); - + while(kvh.next() != null); - + for(Scanner scanner : scanners) { assertTrue(scanner.isClosed()); } } - + private static class Scanner implements KeyValueScanner { private Iterator iter; private KeyValue current; @@ -220,9 +220,9 @@ implements HConstants { iter = list.iterator(); if(iter.hasNext()){ current = iter.next(); - } + } } - + public KeyValue peek() { return current; } @@ -240,11 +240,11 @@ implements HConstants { public void close(){ closed = true; } - + public boolean isClosed() { return closed; } - + public boolean seek(KeyValue seekKv) { while(iter.hasNext()){ KeyValue next = iter.next(); diff --git a/core/src/test/java/org/apache/hadoop/hbase/regionserver/TestMemStore.java b/core/src/test/java/org/apache/hadoop/hbase/regionserver/TestMemStore.java index 8f939dd..7c5c7ad 100644 --- a/core/src/test/java/org/apache/hadoop/hbase/regionserver/TestMemStore.java +++ b/core/src/test/java/org/apache/hadoop/hbase/regionserver/TestMemStore.java @@ -67,7 +67,7 @@ public class TestMemStore extends TestCase { found.getValue())); } - /** + /** * Test memstore snapshot happening while scanning. * @throws IOException */ @@ -150,7 +150,7 @@ public class TestMemStore extends TestCase { assertEquals(rowCount, count); } - /** + /** * Test memstore snapshots * @throws IOException */ @@ -176,12 +176,12 @@ public class TestMemStore extends TestCase { KeyValue key0 = new KeyValue(row, family, qf, stamps[0], values[0]); KeyValue key1 = new KeyValue(row, family, qf, stamps[1], values[1]); KeyValue key2 = new KeyValue(row, family, qf, stamps[2], values[2]); - + m.add(key0); m.add(key1); m.add(key2); - - assertTrue("Expected memstore to hold 3 values, actually has " + + + assertTrue("Expected memstore to hold 3 values, actually has " + m.kvset.size(), m.kvset.size() == 3); } @@ -222,7 +222,7 @@ public class TestMemStore extends TestCase { ////////////////////////////////////////////////////////////////////////////// /** Test getNextRow from memstore - * @throws InterruptedException + * @throws InterruptedException */ public void testGetNextRow() throws Exception { addRows(this.memstore); @@ -265,7 +265,7 @@ public class TestMemStore extends TestCase { } } } - + public void testGet_Basic_Found() throws IOException { byte [] row = Bytes.toBytes("testrow"); byte [] fam = Bytes.toBytes("testfamily"); @@ -273,7 +273,7 @@ public class TestMemStore extends TestCase { byte [] qf2 = Bytes.toBytes("testqualifier2"); byte [] qf3 = Bytes.toBytes("testqualifier3"); byte [] val = Bytes.toBytes("testval"); - + //Setting up memstore KeyValue add1 = new KeyValue(row, fam ,qf1, val); KeyValue add2 = new KeyValue(row, fam ,qf2, val); @@ -281,7 +281,7 @@ public class TestMemStore extends TestCase { memstore.add(add1); memstore.add(add2); memstore.add(add3); - + //test Get get = new Get(row); NavigableSet columns = new TreeSet(Bytes.BYTES_COMPARATOR); @@ -290,12 +290,12 @@ public class TestMemStore extends TestCase { QueryMatcher matcher = new QueryMatcher(get, fam, columns, ttl, KeyValue.KEY_COMPARATOR, 1); - + List result = new ArrayList(); boolean res = memstore.get(matcher, result); assertEquals(true, res); } - + public void testGet_Basic_NotFound() throws IOException { byte [] row = Bytes.toBytes("testrow"); byte [] fam = Bytes.toBytes("testfamily"); @@ -303,13 +303,13 @@ public class TestMemStore extends TestCase { byte [] qf2 = Bytes.toBytes("testqualifier2"); byte [] qf3 = Bytes.toBytes("testqualifier3"); byte [] val = Bytes.toBytes("testval"); - + //Setting up memstore KeyValue add1 = new KeyValue(row, fam ,qf1, val); KeyValue add3 = new KeyValue(row, fam ,qf3, val); memstore.add(add1); memstore.add(add3); - + //test Get get = new Get(row); NavigableSet columns = new TreeSet(Bytes.BYTES_COMPARATOR); @@ -318,7 +318,7 @@ public class TestMemStore extends TestCase { QueryMatcher matcher = new QueryMatcher(get, fam, columns, ttl, KeyValue.KEY_COMPARATOR, 1); - + List result = new ArrayList(); boolean res = memstore.get(matcher, result); assertEquals(false, res); @@ -333,7 +333,7 @@ public class TestMemStore extends TestCase { byte [] qf4 = Bytes.toBytes("testqualifier4"); byte [] qf5 = Bytes.toBytes("testqualifier5"); byte [] val = Bytes.toBytes("testval"); - + //Creating get Get get = new Get(row); NavigableSet columns = new TreeSet(Bytes.BYTES_COMPARATOR); @@ -343,7 +343,7 @@ public class TestMemStore extends TestCase { QueryMatcher matcher = new QueryMatcher(get, fam, columns, ttl, KeyValue.KEY_COMPARATOR, 1); - + //Setting up memstore memstore.add(new KeyValue(row, fam ,qf1, val)); memstore.add(new KeyValue(row, fam ,qf2, val)); @@ -356,12 +356,12 @@ public class TestMemStore extends TestCase { memstore.add(new KeyValue(row, fam ,qf4, val)); memstore.add(new KeyValue(row, fam ,qf5, val)); assertEquals(2, memstore.kvset.size()); - + List result = new ArrayList(); boolean res = memstore.get(matcher, result); assertEquals(true, res); } - + public void testGet_SpecificTimeStamp() throws IOException { byte [] row = Bytes.toBytes("testrow"); byte [] fam = Bytes.toBytes("testfamily"); @@ -369,11 +369,11 @@ public class TestMemStore extends TestCase { byte [] qf2 = Bytes.toBytes("testqualifier2"); byte [] qf3 = Bytes.toBytes("testqualifier3"); byte [] val = Bytes.toBytes("testval"); - + long ts1 = System.currentTimeMillis(); long ts2 = ts1++; long ts3 = ts2++; - + //Creating get Get get = new Get(row); get.setTimeStamp(ts2); @@ -385,7 +385,7 @@ public class TestMemStore extends TestCase { QueryMatcher matcher = new QueryMatcher(get, fam, columns, ttl, KeyValue.KEY_COMPARATOR, 1); - + //Setting up expected List expected = new ArrayList(); KeyValue kv1 = new KeyValue(row, fam ,qf1, ts2, val); @@ -394,7 +394,7 @@ public class TestMemStore extends TestCase { expected.add(kv1); expected.add(kv2); expected.add(kv3); - + //Setting up memstore memstore.add(new KeyValue(row, fam ,qf1, ts1, val)); memstore.add(new KeyValue(row, fam ,qf2, ts1, val)); @@ -405,11 +405,11 @@ public class TestMemStore extends TestCase { memstore.add(new KeyValue(row, fam ,qf1, ts3, val)); memstore.add(new KeyValue(row, fam ,qf2, ts3, val)); memstore.add(new KeyValue(row, fam ,qf3, ts3, val)); - + //Get List result = new ArrayList(); memstore.get(matcher, result); - + assertEquals(expected.size(), result.size()); for(int i=0; i expected = new ArrayList(); expected.add(put3); expected.add(del2); - + assertEquals(2, memstore.kvset.size()); int i = 0; for (KeyValue kv: memstore.kvset) { assertEquals(expected.get(i++), kv); } } - - + + public void testGetWithDeleteFamily() throws IOException { byte [] row = Bytes.toBytes("testrow"); byte [] fam = Bytes.toBytes("testfamily"); @@ -494,7 +494,7 @@ public class TestMemStore extends TestCase { byte [] qf3 = Bytes.toBytes("testqualifier3"); byte [] val = Bytes.toBytes("testval"); long ts = System.nanoTime(); - + KeyValue put1 = new KeyValue(row, fam, qf1, ts, val); KeyValue put2 = new KeyValue(row, fam, qf2, ts, val); KeyValue put3 = new KeyValue(row, fam, qf3, ts, val); @@ -504,22 +504,22 @@ public class TestMemStore extends TestCase { memstore.add(put2); memstore.add(put3); memstore.add(put4); - - KeyValue del = + + KeyValue del = new KeyValue(row, fam, null, ts, KeyValue.Type.DeleteFamily, val); memstore.delete(del); List expected = new ArrayList(); expected.add(del); expected.add(put4); - + assertEquals(2, memstore.kvset.size()); int i = 0; for (KeyValue kv: memstore.kvset) { assertEquals(expected.get(i++), kv); } } - + public void testKeepDeleteInmemstore() { byte [] row = Bytes.toBytes("testrow"); byte [] fam = Bytes.toBytes("testfamily"); @@ -570,30 +570,30 @@ public class TestMemStore extends TestCase { assertEquals(delete, memstore.kvset.first()); } - + ////////////////////////////////////////////////////////////////////////////// // Helpers - ////////////////////////////////////////////////////////////////////////////// + ////////////////////////////////////////////////////////////////////////////// private byte [] makeQualifier(final int i1, final int i2){ return Bytes.toBytes(Integer.toString(i1) + ";" + Integer.toString(i2)); } - + /** * Adds {@link #ROW_COUNT} rows and {@link #COLUMNS_COUNT} * @param hmc Instance to add rows to. * @return How many rows we added. - * @throws IOException + * @throws IOException */ private int addRows(final MemStore hmc) { return addRows(hmc, HConstants.LATEST_TIMESTAMP); } - + /** * Adds {@link #ROW_COUNT} rows and {@link #COLUMNS_COUNT} * @param hmc Instance to add rows to. * @return How many rows we added. - * @throws IOException + * @throws IOException */ private int addRows(final MemStore hmc, final long ts) { for (int i = 0; i < ROW_COUNT; i++) { diff --git a/core/src/test/java/org/apache/hadoop/hbase/regionserver/TestQueryMatcher.java b/core/src/test/java/org/apache/hadoop/hbase/regionserver/TestQueryMatcher.java index 564ea2d..7602b2f 100644 --- a/core/src/test/java/org/apache/hadoop/hbase/regionserver/TestQueryMatcher.java +++ b/core/src/test/java/org/apache/hadoop/hbase/regionserver/TestQueryMatcher.java @@ -37,7 +37,7 @@ import org.apache.hadoop.hbase.util.Bytes; public class TestQueryMatcher extends HBaseTestCase implements HConstants { private static final boolean PRINT = false; - + private byte [] row1; private byte [] row2; private byte [] fam1; @@ -80,10 +80,10 @@ implements HConstants { } - public void testMatch_ExplicitColumns() + public void testMatch_ExplicitColumns() throws IOException { //Moving up from the Tracker by using Gets and List instead - //of just byte [] + //of just byte [] //Expected result List expected = new ArrayList(); @@ -116,17 +116,17 @@ implements HConstants { for(int i=0; i< expected.size(); i++){ assertEquals(expected.get(i), actual.get(i)); if(PRINT){ - System.out.println("expected "+expected.get(i)+ + System.out.println("expected "+expected.get(i)+ ", actual " +actual.get(i)); } } } - public void testMatch_Wildcard() + public void testMatch_Wildcard() throws IOException { //Moving up from the Tracker by using Gets and List instead - //of just byte [] + //of just byte [] //Expected result List expected = new ArrayList(); @@ -157,24 +157,24 @@ implements HConstants { for(int i=0; i< expected.size(); i++){ assertEquals(expected.get(i), actual.get(i)); if(PRINT){ - System.out.println("expected "+expected.get(i)+ + System.out.println("expected "+expected.get(i)+ ", actual " +actual.get(i)); } } } - + /** - * Verify that {@link QueryMatcher} only skips expired KeyValue - * instances and does not exit early from the row (skipping + * Verify that {@link QueryMatcher} only skips expired KeyValue + * instances and does not exit early from the row (skipping * later non-expired KeyValues). This version mimics a Get with * explicitly specified column qualifiers. - * + * * @throws IOException */ public void testMatch_ExpiredExplicit() throws IOException { - + long testTTL = 1000; MatchCode [] expected = new MatchCode[] { MatchCode.SKIP, @@ -184,10 +184,10 @@ implements HConstants { MatchCode.SKIP, MatchCode.NEXT }; - + QueryMatcher qm = new QueryMatcher(get, fam2, get.getFamilyMap().get(fam2), testTTL, rowComparator, 1); - + long now = System.currentTimeMillis(); KeyValue [] kvs = new KeyValue[] { new KeyValue(row1, fam2, col1, now-100, data), @@ -195,36 +195,36 @@ implements HConstants { new KeyValue(row1, fam2, col3, now-5000, data), new KeyValue(row1, fam2, col4, now-500, data), new KeyValue(row1, fam2, col5, now-10000, data), - new KeyValue(row2, fam1, col1, now-10, data) + new KeyValue(row2, fam1, col1, now-10, data) }; List actual = new ArrayList(kvs.length); for (KeyValue kv : kvs) { actual.add( qm.match(kv) ); } - + assertEquals(expected.length, actual.size()); for (int i=0; i actual = new ArrayList(kvs.length); for (KeyValue kv : kvs) { actual.add( qm.match(kv) ); } - + assertEquals(expected.length, actual.size()); for (int i=0; i qualifiers = new ArrayList(); qualifiers.add(Bytes.toBytes("qualifer1")); qualifiers.add(Bytes.toBytes("qualifer2")); qualifiers.add(Bytes.toBytes("qualifer3")); qualifiers.add(Bytes.toBytes("qualifer4")); - + //Setting up expected result List expected = new ArrayList(); expected.add(MatchCode.INCLUDE); expected.add(MatchCode.INCLUDE); expected.add(MatchCode.INCLUDE); expected.add(MatchCode.INCLUDE); - + List actual = new ArrayList(); - + for(byte [] qualifier : qualifiers) { MatchCode mc = tracker.checkColumn(qualifier, 0, qualifier.length); actual.add(mc); @@ -62,28 +62,28 @@ public class TestScanWildcardColumnTracker extends HBaseTestCase { assertEquals(expected.get(i), actual.get(i)); } } - + public void testCheckColumn_EnforceVersions() { //Create a WildcardColumnTracker - ScanWildcardColumnTracker tracker = + ScanWildcardColumnTracker tracker = new ScanWildcardColumnTracker(VERSIONS); - + //Create list of qualifiers List qualifiers = new ArrayList(); qualifiers.add(Bytes.toBytes("qualifer1")); qualifiers.add(Bytes.toBytes("qualifer1")); qualifiers.add(Bytes.toBytes("qualifer1")); qualifiers.add(Bytes.toBytes("qualifer2")); - + //Setting up expected result List expected = new ArrayList(); expected.add(MatchCode.INCLUDE); expected.add(MatchCode.INCLUDE); expected.add(MatchCode.SKIP); expected.add(MatchCode.INCLUDE); - + List actual = new ArrayList(); - + for(byte [] qualifier : qualifiers) { MatchCode mc = tracker.checkColumn(qualifier, 0, qualifier.length); actual.add(mc); @@ -94,19 +94,19 @@ public class TestScanWildcardColumnTracker extends HBaseTestCase { assertEquals(expected.get(i), actual.get(i)); } } - + public void DisabledTestCheckColumn_WrongOrder() { //Create a WildcardColumnTracker - ScanWildcardColumnTracker tracker = + ScanWildcardColumnTracker tracker = new ScanWildcardColumnTracker(VERSIONS); - + //Create list of qualifiers List qualifiers = new ArrayList(); qualifiers.add(Bytes.toBytes("qualifer2")); qualifiers.add(Bytes.toBytes("qualifer1")); - + boolean ok = false; - + try { for(byte [] qualifier : qualifiers) { tracker.checkColumn(qualifier, 0, qualifier.length); @@ -117,5 +117,5 @@ public class TestScanWildcardColumnTracker extends HBaseTestCase { assertEquals(true, ok); } - + } diff --git a/core/src/test/java/org/apache/hadoop/hbase/regionserver/TestScanner.java b/core/src/test/java/org/apache/hadoop/hbase/regionserver/TestScanner.java index d36ce5a..f168b9c 100644 --- a/core/src/test/java/org/apache/hadoop/hbase/regionserver/TestScanner.java +++ b/core/src/test/java/org/apache/hadoop/hbase/regionserver/TestScanner.java @@ -52,7 +52,7 @@ import org.apache.hadoop.hdfs.MiniDFSCluster; */ public class TestScanner extends HBaseTestCase { private final Log LOG = LogFactory.getLog(this.getClass()); - + private static final byte [] FIRST_ROW = HConstants.EMPTY_START_ROW; private static final byte [][] COLS = { HConstants.CATALOG_FAMILY }; private static final byte [][] EXPLICIT_COLS = { @@ -60,7 +60,7 @@ public class TestScanner extends HBaseTestCase { // TODO ryan //HConstants.STARTCODE_QUALIFIER }; - + static final HTableDescriptor TESTTABLEDESC = new HTableDescriptor("testscanner"); static { @@ -73,9 +73,9 @@ public class TestScanner extends HBaseTestCase { public static final HRegionInfo REGION_INFO = new HRegionInfo(TESTTABLEDESC, HConstants.EMPTY_BYTE_ARRAY, HConstants.EMPTY_BYTE_ARRAY); - + private static final byte [] ROW_KEY = REGION_INFO.getRegionName(); - + private static final long START_CODE = Long.MAX_VALUE; private MiniDFSCluster cluster = null; @@ -89,12 +89,12 @@ public class TestScanner extends HBaseTestCase { this.conf.set(HConstants.HBASE_DIR, this.cluster.getFileSystem().getHomeDirectory().toString()); super.setUp(); - + } /** * Test basic stop row filter works. - * @throws Exception + * @throws Exception */ public void testStopRow() throws Exception { byte [] startrow = Bytes.toBytes("bbb"); @@ -140,7 +140,7 @@ public class TestScanner extends HBaseTestCase { shutdownDfs(this.cluster); } } - + void rowPrefixFilter(Scan scan) throws IOException { List results = new ArrayList(); scan.addFamily(HConstants.CATALOG_FAMILY); @@ -156,7 +156,7 @@ public class TestScanner extends HBaseTestCase { } s.close(); } - + void rowInclusiveStopFilter(Scan scan, byte[] stopRow) throws IOException { List results = new ArrayList(); scan.addFamily(HConstants.CATALOG_FAMILY); @@ -171,7 +171,7 @@ public class TestScanner extends HBaseTestCase { } s.close(); } - + public void testFilters() throws IOException { try { this.r = createNewHRegion(REGION_INFO.getTableDesc(), null, null); @@ -181,7 +181,7 @@ public class TestScanner extends HBaseTestCase { Scan scan = new Scan(); scan.setFilter(newFilter); rowPrefixFilter(scan); - + byte[] stopRow = Bytes.toBytes("bbc"); newFilter = new WhileMatchFilter(new InclusiveStopFilter(stopRow)); scan = new Scan(); @@ -202,7 +202,7 @@ public class TestScanner extends HBaseTestCase { try { r = createNewHRegion(TESTTABLEDESC, null, null); region = new HRegionIncommon(r); - + // Write information to the meta table Put put = new Put(ROW_KEY, System.currentTimeMillis(), null); @@ -216,23 +216,23 @@ public class TestScanner extends HBaseTestCase { // What we just committed is in the memstore. Verify that we can get // it back both with scanning and get - + scan(false, null); getRegionInfo(); - + // Close and re-open - + r.close(); r = openClosedRegion(r); region = new HRegionIncommon(r); // Verify we can get the data back now that it is on disk. - + scan(false, null); getRegionInfo(); - + // Store some new information - + HServerAddress address = new HServerAddress("foo.bar.com:1234"); put = new Put(ROW_KEY, System.currentTimeMillis(), null); @@ -242,45 +242,45 @@ public class TestScanner extends HBaseTestCase { // put.add(HConstants.COL_STARTCODE, Bytes.toBytes(START_CODE)); region.put(put); - + // Validate that we can still get the HRegionInfo, even though it is in // an older row on disk and there is a newer row in the memstore - + scan(true, address.toString()); getRegionInfo(); - + // flush cache region.flushcache(); // Validate again - + scan(true, address.toString()); getRegionInfo(); // Close and reopen - + r.close(); r = openClosedRegion(r); region = new HRegionIncommon(r); // Validate again - + scan(true, address.toString()); getRegionInfo(); // Now update the information again address = new HServerAddress("bar.foo.com:4321"); - + put = new Put(ROW_KEY, System.currentTimeMillis(), null); put.add(HConstants.CATALOG_FAMILY, HConstants.SERVER_QUALIFIER, Bytes.toBytes(address.toString())); region.put(put); - + // Validate again - + scan(true, address.toString()); getRegionInfo(); @@ -289,26 +289,26 @@ public class TestScanner extends HBaseTestCase { region.flushcache(); // Validate again - + scan(true, address.toString()); getRegionInfo(); // Close and reopen - + r.close(); r = openClosedRegion(r); region = new HRegionIncommon(r); // Validate again - + scan(true, address.toString()); getRegionInfo(); - + // clean up - + r.close(); r.getLog().closeAndDelete(); - + } finally { shutdownDfs(cluster); } @@ -318,17 +318,17 @@ public class TestScanner extends HBaseTestCase { private void validateRegionInfo(byte [] regionBytes) throws IOException { HRegionInfo info = (HRegionInfo) Writables.getWritable(regionBytes, new HRegionInfo()); - + assertEquals(REGION_INFO.getRegionId(), info.getRegionId()); assertEquals(0, info.getStartKey().length); assertEquals(0, info.getEndKey().length); assertEquals(0, Bytes.compareTo(info.getRegionName(), REGION_INFO.getRegionName())); assertEquals(0, info.getTableDesc().compareTo(REGION_INFO.getTableDesc())); } - + /** Use a scanner to get the region info and then validate the results */ private void scan(boolean validateStartcode, String serverName) - throws IOException { + throws IOException { InternalScanner scanner = null; Scan scan = null; List results = new ArrayList(); @@ -336,7 +336,7 @@ public class TestScanner extends HBaseTestCase { COLS, EXPLICIT_COLS }; - + for(int i = 0; i < scanColumns.length; i++) { try { scan = new Scan(FIRST_ROW); @@ -345,26 +345,26 @@ public class TestScanner extends HBaseTestCase { } scanner = r.getScanner(scan); while (scanner.next(results)) { - assertTrue(hasColumn(results, HConstants.CATALOG_FAMILY, + assertTrue(hasColumn(results, HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER)); - byte [] val = getColumn(results, HConstants.CATALOG_FAMILY, + byte [] val = getColumn(results, HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER).getValue(); validateRegionInfo(val); if(validateStartcode) { -// assertTrue(hasColumn(results, HConstants.CATALOG_FAMILY, +// assertTrue(hasColumn(results, HConstants.CATALOG_FAMILY, // HConstants.STARTCODE_QUALIFIER)); -// val = getColumn(results, HConstants.CATALOG_FAMILY, +// val = getColumn(results, HConstants.CATALOG_FAMILY, // HConstants.STARTCODE_QUALIFIER).getValue(); assertNotNull(val); assertFalse(val.length == 0); long startCode = Bytes.toLong(val); assertEquals(START_CODE, startCode); } - + if(serverName != null) { - assertTrue(hasColumn(results, HConstants.CATALOG_FAMILY, + assertTrue(hasColumn(results, HConstants.CATALOG_FAMILY, HConstants.SERVER_QUALIFIER)); - val = getColumn(results, HConstants.CATALOG_FAMILY, + val = getColumn(results, HConstants.CATALOG_FAMILY, HConstants.SERVER_QUALIFIER).getValue(); assertNotNull(val); assertFalse(val.length == 0); @@ -389,9 +389,9 @@ public class TestScanner extends HBaseTestCase { return true; } } - return false; + return false; } - + private KeyValue getColumn(final List kvs, final byte [] family, final byte [] qualifier) { for (KeyValue kv: kvs) { @@ -401,15 +401,15 @@ public class TestScanner extends HBaseTestCase { } return null; } - - + + /** Use get to retrieve the HRegionInfo and validate it */ private void getRegionInfo() throws IOException { Get get = new Get(ROW_KEY); get.addColumn(HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER); Result result = region.get(get, null); byte [] bytes = result.value(); - validateRegionInfo(bytes); + validateRegionInfo(bytes); } /** @@ -460,7 +460,7 @@ public class TestScanner extends HBaseTestCase { } } - + /* * @param hri Region * @param flushIndex At what row we start the flush. diff --git a/core/src/test/java/org/apache/hadoop/hbase/regionserver/TestStore.java b/core/src/test/java/org/apache/hadoop/hbase/regionserver/TestStore.java index 997a462..ea43491 100644 --- a/core/src/test/java/org/apache/hadoop/hbase/regionserver/TestStore.java +++ b/core/src/test/java/org/apache/hadoop/hbase/regionserver/TestStore.java @@ -45,7 +45,7 @@ import java.util.TreeSet; import java.util.concurrent.ConcurrentSkipListSet; /** - * Test class fosr the Store + * Test class fosr the Store */ public class TestStore extends TestCase { Store store; @@ -97,7 +97,7 @@ public class TestStore extends TestCase { HColumnDescriptor hcd = new HColumnDescriptor(family); HBaseConfiguration conf = new HBaseConfiguration(); FileSystem fs = FileSystem.get(conf); - Path reconstructionLog = null; + Path reconstructionLog = null; Progressable reporter = null; fs.delete(logdir, true); @@ -107,12 +107,12 @@ public class TestStore extends TestCase { HRegionInfo info = new HRegionInfo(htd, null, null, false); HLog hlog = new HLog(fs, logdir, oldLogDir, conf, null); HRegion region = new HRegion(basedir, hlog, fs, conf, info, null); - + store = new Store(basedir, region, hcd, fs, reconstructionLog, conf, reporter); } - + ////////////////////////////////////////////////////////////////////////////// // Get tests ////////////////////////////////////////////////////////////////////////////// @@ -154,7 +154,7 @@ public class TestStore extends TestCase { */ public void testGet_FromMemStoreOnly() throws IOException { init(this.getName()); - + //Put data in memstore this.store.add(new KeyValue(row, family, qf1, null)); this.store.add(new KeyValue(row, family, qf2, null)); diff --git a/core/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreFile.java b/core/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreFile.java index fa7d103..0c6efde 100644 --- a/core/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreFile.java +++ b/core/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreFile.java @@ -40,7 +40,7 @@ import org.apache.hadoop.hdfs.MiniDFSCluster; public class TestStoreFile extends HBaseTestCase { static final Log LOG = LogFactory.getLog(TestStoreFile.class); private MiniDFSCluster cluster; - + @Override public void setUp() throws Exception { try { @@ -53,7 +53,7 @@ public class TestStoreFile extends HBaseTestCase { } super.setUp(); } - + @Override public void tearDown() throws Exception { super.tearDown(); @@ -75,7 +75,7 @@ public class TestStoreFile extends HBaseTestCase { writeStoreFile(writer); checkHalfHFile(new StoreFile(this.fs, writer.getPath(), true, conf, false)); } - + /* * Writes HStoreKey and ImmutableBytes data to passed writer and * then closes it. @@ -98,7 +98,7 @@ public class TestStoreFile extends HBaseTestCase { writer.close(); } } - + /** * Test that our mechanism of writing store files in one region to reference * store files in other regions works. @@ -175,7 +175,7 @@ public class TestStoreFile extends HBaseTestCase { while ((!topScanner.isSeeked() && topScanner.seekTo()) || (topScanner.isSeeked() && topScanner.next())) { key = topScanner.getKey(); - + assertTrue(topScanner.getReader().getComparator().compare(key.array(), key.arrayOffset(), key.limit(), midkey, 0, midkey.length) >= 0); if (first) { @@ -184,7 +184,7 @@ public class TestStoreFile extends HBaseTestCase { } } LOG.info("Last in top: " + Bytes.toString(Bytes.toBytes(key))); - + first = true; HFileScanner bottomScanner = bottom.getScanner(false, false); while ((!bottomScanner.isSeeked() && bottomScanner.seekTo()) || diff --git a/core/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreScanner.java b/core/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreScanner.java index ea1f93e..47d57fd 100644 --- a/core/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreScanner.java +++ b/core/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreScanner.java @@ -50,7 +50,7 @@ public class TestStoreScanner extends TestCase { } return cols; } - + public void testScanTimeRange() throws IOException { String r1 = "R1"; // returns only 1 of these 2 even though same timestamp @@ -102,7 +102,7 @@ public class TestStoreScanner extends TestCase { results = new ArrayList(); assertEquals(true, scan.next(results)); assertEquals(3, results.size()); - + } public void testScanSameTimestamp() throws IOException { @@ -336,7 +336,7 @@ public class TestStoreScanner extends TestCase { KeyValueTestUtil.create("R1", "cf", "g", 11, KeyValue.Type.Delete, "dont-care"), KeyValueTestUtil.create("R1", "cf", "h", 11, KeyValue.Type.Put, "dont-care"), KeyValueTestUtil.create("R1", "cf", "i", 11, KeyValue.Type.Put, "dont-care"), - KeyValueTestUtil.create("R2", "cf", "a", 11, KeyValue.Type.Put, "dont-care"), + KeyValueTestUtil.create("R2", "cf", "a", 11, KeyValue.Type.Put, "dont-care"), }; KeyValueScanner [] scanners = new KeyValueScanner[] { new KeyValueScanFixture(KeyValue.COMPARATOR, kvs) @@ -407,9 +407,9 @@ public class TestStoreScanner extends TestCase { results.clear(); assertEquals(false, scan.next(results)); } - + /** - * Test expiration of KeyValues in combination with a configured TTL for + * Test expiration of KeyValues in combination with a configured TTL for * a column family (as should be triggered in a major compaction). */ public void testWildCardTtlScan() throws IOException { @@ -439,14 +439,14 @@ public class TestStoreScanner extends TestCase { assertEquals(kvs[1], results.get(0)); assertEquals(kvs[2], results.get(1)); results.clear(); - + assertEquals(true, scanner.next(results)); assertEquals(3, results.size()); assertEquals(kvs[4], results.get(0)); assertEquals(kvs[5], results.get(1)); assertEquals(kvs[6], results.get(2)); results.clear(); - + assertEquals(false, scanner.next(results)); } } diff --git a/core/src/test/java/org/apache/hadoop/hbase/regionserver/TestWideScanner.java b/core/src/test/java/org/apache/hadoop/hbase/regionserver/TestWideScanner.java index c1ba67b..7a7ec33 100644 --- a/core/src/test/java/org/apache/hadoop/hbase/regionserver/TestWideScanner.java +++ b/core/src/test/java/org/apache/hadoop/hbase/regionserver/TestWideScanner.java @@ -65,10 +65,10 @@ public class TestWideScanner extends HBaseTestCase { // Set the hbase.rootdir to be the home directory in mini dfs. this.conf.set(HConstants.HBASE_DIR, this.cluster.getFileSystem().getHomeDirectory().toString()); - super.setUp(); + super.setUp(); } - private int addWideContent(HRegion region, byte[] family) + private int addWideContent(HRegion region, byte[] family) throws IOException { int count = 0; // add a few rows of 2500 columns (we'll use batch of 1000) to make things diff --git a/core/src/test/java/org/apache/hadoop/hbase/regionserver/TestWildcardColumnTracker.java b/core/src/test/java/org/apache/hadoop/hbase/regionserver/TestWildcardColumnTracker.java index a6072c6..7b309ce 100644 --- a/core/src/test/java/org/apache/hadoop/hbase/regionserver/TestWildcardColumnTracker.java +++ b/core/src/test/java/org/apache/hadoop/hbase/regionserver/TestWildcardColumnTracker.java @@ -30,8 +30,8 @@ import org.apache.hadoop.hbase.util.Bytes; public class TestWildcardColumnTracker extends HBaseTestCase implements HConstants { - private boolean PRINT = false; - + private boolean PRINT = false; + public void testGet_SingleVersion() { if(PRINT) { System.out.println("SingleVersion"); @@ -41,7 +41,7 @@ implements HConstants { byte [] col3 = Bytes.toBytes("col3"); byte [] col4 = Bytes.toBytes("col4"); byte [] col5 = Bytes.toBytes("col5"); - + //Create tracker List expected = new ArrayList(); expected.add(MatchCode.INCLUDE); @@ -50,9 +50,9 @@ implements HConstants { expected.add(MatchCode.INCLUDE); expected.add(MatchCode.INCLUDE); int maxVersions = 1; - + ColumnTracker exp = new WildcardColumnTracker(maxVersions); - + //Create "Scanner" List scanner = new ArrayList(); scanner.add(col1); @@ -60,15 +60,15 @@ implements HConstants { scanner.add(col3); scanner.add(col4); scanner.add(col5); - + //Initialize result - List result = new ArrayList(); - + List result = new ArrayList(); + //"Match" for(byte [] col : scanner){ result.add(exp.checkColumn(col, 0, col.length)); } - + assertEquals(expected.size(), result.size()); for(int i=0; i< expected.size(); i++){ assertEquals(expected.get(i), result.get(i)); @@ -79,7 +79,7 @@ implements HConstants { } } - + public void testGet_MultiVersion() { if(PRINT) { System.out.println("\nMultiVersion"); @@ -89,7 +89,7 @@ implements HConstants { byte [] col3 = Bytes.toBytes("col3"); byte [] col4 = Bytes.toBytes("col4"); byte [] col5 = Bytes.toBytes("col5"); - + //Create tracker List expected = new ArrayList(); int size = 5; @@ -99,9 +99,9 @@ implements HConstants { expected.add(MatchCode.SKIP); } int maxVersions = 2; - + ColumnTracker exp = new WildcardColumnTracker(maxVersions); - + //Create "Scanner" List scanner = new ArrayList(); scanner.add(col1); @@ -119,15 +119,15 @@ implements HConstants { scanner.add(col5); scanner.add(col5); scanner.add(col5); - + //Initialize result - List result = new ArrayList(); - + List result = new ArrayList(); + //"Match" for(byte [] col : scanner){ result.add(exp.checkColumn(col, 0, col.length)); } - + assertEquals(expected.size(), result.size()); for(int i=0; i< expected.size(); i++){ assertEquals(expected.get(i), result.get(i)); @@ -137,7 +137,7 @@ implements HConstants { } } } - + public void testUpdate_SameColumns(){ if(PRINT) { System.out.println("\nUpdate_SameColumns"); @@ -147,7 +147,7 @@ implements HConstants { byte [] col3 = Bytes.toBytes("col3"); byte [] col4 = Bytes.toBytes("col4"); byte [] col5 = Bytes.toBytes("col5"); - + //Create tracker List expected = new ArrayList(); int size = 10; @@ -157,11 +157,11 @@ implements HConstants { for(int i=0; i<5; i++){ expected.add(MatchCode.SKIP); } - + int maxVersions = 2; - + ColumnTracker wild = new WildcardColumnTracker(maxVersions); - + //Create "Scanner" List scanner = new ArrayList(); scanner.add(col1); @@ -169,10 +169,10 @@ implements HConstants { scanner.add(col3); scanner.add(col4); scanner.add(col5); - + //Initialize result - List result = new ArrayList(); - + List result = new ArrayList(); + //"Match" for(int i=0; i<3; i++){ for(byte [] col : scanner){ @@ -180,7 +180,7 @@ implements HConstants { } wild.update(); } - + assertEquals(expected.size(), result.size()); for(int i=0; i expected = new ArrayList(); int size = 10; @@ -217,11 +217,11 @@ implements HConstants { for(int i=0; i<5; i++){ expected.add(MatchCode.SKIP); } - + int maxVersions = 1; - + ColumnTracker wild = new WildcardColumnTracker(maxVersions); - + //Create "Scanner" List scanner = new ArrayList(); scanner.add(col0); @@ -229,10 +229,10 @@ implements HConstants { scanner.add(col2); scanner.add(col3); scanner.add(col4); - + //Initialize result - List result = new ArrayList(); - + List result = new ArrayList(); + for(byte [] col : scanner){ result.add(wild.checkColumn(col, 0, col.length)); } @@ -253,8 +253,8 @@ implements HConstants { //Scanner again for(byte [] col : scanner){ result.add(wild.checkColumn(col, 0, col.length)); - } - + } + //"Match" assertEquals(expected.size(), result.size()); for(int i=0; i expected = new ArrayList(); int size = 5; @@ -298,11 +298,11 @@ implements HConstants { for(int i=0; i scanner = new ArrayList(); scanner.add(col0); @@ -310,10 +310,10 @@ implements HConstants { scanner.add(col4); scanner.add(col6); scanner.add(col8); - + //Initialize result - List result = new ArrayList(); - + List result = new ArrayList(); + for(int i=0; i<2; i++){ for(byte [] col : scanner){ result.add(wild.checkColumn(col, 0, col.length)); @@ -336,11 +336,11 @@ implements HConstants { //Scanner again for(byte [] col : scanner){ result.add(wild.checkColumn(col, 0, col.length)); - } - + } + //"Match" assertEquals(expected.size(), result.size()); - + for(int i=0; i splits, final int howmany) throws IOException { assertEquals(howmany, splits.size()); @@ -270,7 +270,7 @@ public class TestHLog extends HBaseTestCase implements HConstants { long timestamp = System.currentTimeMillis(); WALEdit cols = new WALEdit(); for (int i = 0; i < COL_COUNT; i++) { - cols.add(new KeyValue(row, Bytes.toBytes("column"), + cols.add(new KeyValue(row, Bytes.toBytes("column"), Bytes.toBytes(Integer.toString(i)), timestamp, new byte[] { (byte)(i + '0') })); } diff --git a/core/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestLogRolling.java b/core/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestLogRolling.java index aaa348c..785a89f 100644 --- a/core/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestLogRolling.java +++ b/core/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestLogRolling.java @@ -45,7 +45,7 @@ public class TestLogRolling extends HBaseClusterTestCase { private HLog log; private String tableName; private byte[] value; - + /** * constructor * @throws Exception @@ -58,14 +58,14 @@ public class TestLogRolling extends HBaseClusterTestCase { this.log = null; this.tableName = null; this.value = null; - + String className = this.getClass().getName(); StringBuilder v = new StringBuilder(className); while (v.length() < 1000) { v.append(className); } value = Bytes.toBytes(v.toString()); - + } catch (Exception e) { LOG.fatal("error in constructor", e); throw e; @@ -95,13 +95,13 @@ public class TestLogRolling extends HBaseClusterTestCase { // a chance to run. conf.setInt(HConstants.THREAD_WAKE_FREQUENCY, 2 * 1000); } - + private void startAndWriteData() throws Exception { // When the META table can be opened, the region servers are running new HTable(conf, HConstants.META_TABLE_NAME); this.server = cluster.getRegionServerThreads().get(0).getRegionServer(); this.log = server.getLog(); - + // Create the test table and open it HTableDescriptor desc = new HTableDescriptor(tableName); desc.addFamily(new HColumnDescriptor(HConstants.CATALOG_FAMILY)); @@ -125,7 +125,7 @@ public class TestLogRolling extends HBaseClusterTestCase { /** * Tests that logs are deleted - * + * * @throws Exception */ public void testLogRolling() throws Exception { @@ -133,18 +133,18 @@ public class TestLogRolling extends HBaseClusterTestCase { try { startAndWriteData(); LOG.info("after writing there are " + log.getNumLogFiles() + " log files"); - + // flush all regions - + List regions = new ArrayList(server.getOnlineRegions()); for (HRegion r: regions) { r.flushcache(); } - + // Now roll the log log.rollWriter(); - + int count = log.getNumLogFiles(); LOG.info("after flushing all regions and rolling logs there are " + log.getNumLogFiles() + " log files"); diff --git a/core/src/test/java/org/apache/hadoop/hbase/thrift/TestThriftServer.java b/core/src/test/java/org/apache/hadoop/hbase/thrift/TestThriftServer.java index 2f6ff85..7b7831f 100644 --- a/core/src/test/java/org/apache/hadoop/hbase/thrift/TestThriftServer.java +++ b/core/src/test/java/org/apache/hadoop/hbase/thrift/TestThriftServer.java @@ -31,8 +31,8 @@ import org.apache.hadoop.hbase.thrift.generated.TRowResult; import org.apache.hadoop.hbase.util.Bytes; /** - * Unit testing for ThriftServer.HBaseHandler, a part of the - * org.apache.hadoop.hbase.thrift package. + * Unit testing for ThriftServer.HBaseHandler, a part of the + * org.apache.hadoop.hbase.thrift package. */ public class TestThriftServer extends HBaseClusterTestCase { @@ -49,11 +49,11 @@ public class TestThriftServer extends HBaseClusterTestCase { private static byte[] valueDname = Bytes.toBytes("valueD"); /** - * Runs all of the tests under a single JUnit test method. We + * Runs all of the tests under a single JUnit test method. We * consolidate all testing to one method because HBaseClusterTestCase - * is prone to OutOfMemoryExceptions when there are three or more + * is prone to OutOfMemoryExceptions when there are three or more * JUnit test methods. - * + * * @throws Exception */ public void testAll() throws Exception { @@ -65,10 +65,10 @@ public class TestThriftServer extends HBaseClusterTestCase { } /** - * Tests for creating, enabling, disabling, and deleting tables. Also - * tests that creating a table with an invalid column name yields an + * Tests for creating, enabling, disabling, and deleting tables. Also + * tests that creating a table with an invalid column name yields an * IllegalArgument exception. - * + * * @throws Exception */ public void doTestTableCreateDrop() throws Exception { @@ -95,10 +95,10 @@ public class TestThriftServer extends HBaseClusterTestCase { } /** - * Tests adding a series of Mutations and BatchMutations, including a - * delete mutation. Also tests data retrieval, and getting back multiple - * versions. - * + * Tests adding a series of Mutations and BatchMutations, including a + * delete mutation. Also tests data retrieval, and getting back multiple + * versions. + * * @throws Exception */ public void doTestTableMutations() throws Exception { @@ -159,10 +159,10 @@ public class TestThriftServer extends HBaseClusterTestCase { } /** - * Similar to testTableMutations(), except Mutations are applied with - * specific timestamps and data retrieval uses these timestamps to - * extract specific versions of data. - * + * Similar to testTableMutations(), except Mutations are applied with + * specific timestamps and data retrieval uses these timestamps to + * extract specific versions of data. + * * @throws Exception */ public void doTestTableTimestampsAndColumns() throws Exception { @@ -199,10 +199,10 @@ public class TestThriftServer extends HBaseClusterTestCase { //assertTrue(Bytes.equals(rowResult1.columns.get(columnAname).value, valueAname)); assertTrue(Bytes.equals(rowResult1.columns.get(columnBname).value, valueBname)); assertTrue(Bytes.equals(rowResult2.columns.get(columnBname).value, valueCname)); - + // ColumnAname has been deleted, and will never be visible even with a getRowTs() assertFalse(rowResult2.columns.containsKey(columnAname)); - + List columns = new ArrayList(); columns.add(columnBname); @@ -213,7 +213,7 @@ public class TestThriftServer extends HBaseClusterTestCase { rowResult1 = handler.getRowWithColumnsTs(tableAname, rowAname, columns, time1).get(0); assertTrue(Bytes.equals(rowResult1.columns.get(columnBname).value, valueBname)); assertFalse(rowResult1.columns.containsKey(columnAname)); - + // Apply some timestamped deletes // this actually deletes _everything_. // nukes everything in columnB: forever. @@ -238,9 +238,9 @@ public class TestThriftServer extends HBaseClusterTestCase { } /** - * Tests the four different scanner-opening methods (with and without - * a stoprow, with and without a timestamp). - * + * Tests the four different scanner-opening methods (with and without + * a stoprow, with and without a timestamp). + * * @throws Exception */ public void doTestTableScanners() throws Exception { @@ -289,12 +289,12 @@ public class TestThriftServer extends HBaseClusterTestCase { closeScanner(scanner2, handler); // Test a scanner on the first row and first column only, no timestamp - int scanner3 = handler.scannerOpenWithStop(tableAname, rowAname, rowBname, + int scanner3 = handler.scannerOpenWithStop(tableAname, rowAname, rowBname, getColumnList(true, false)); closeScanner(scanner3, handler); // Test a scanner on the first row and second column only, with timestamp - int scanner4 = handler.scannerOpenWithStopTs(tableAname, rowAname, rowBname, + int scanner4 = handler.scannerOpenWithStopTs(tableAname, rowAname, rowBname, getColumnList(false, true), time1); TRowResult rowResult4a = handler.scannerGet(scanner4).get(0); assertEquals(rowResult4a.columns.size(), 1); @@ -306,8 +306,8 @@ public class TestThriftServer extends HBaseClusterTestCase { } /** - * - * @return a List of ColumnDescriptors for use in creating a table. Has one + * + * @return a List of ColumnDescriptors for use in creating a table. Has one * default ColumnDescriptor and one ColumnDescriptor with fewer versions */ private List getColumnDescriptors() { @@ -319,7 +319,7 @@ public class TestThriftServer extends HBaseClusterTestCase { cDescriptors.add(cDescA); // A slightly customized ColumnDescriptor (only 2 versions) - ColumnDescriptor cDescB = new ColumnDescriptor(columnBname, 2, "NONE", + ColumnDescriptor cDescB = new ColumnDescriptor(columnBname, 2, "NONE", false, "NONE", 0, 0, false, -1); cDescriptors.add(cDescB); @@ -327,7 +327,7 @@ public class TestThriftServer extends HBaseClusterTestCase { } /** - * + * * @param includeA whether or not to include columnA * @param includeB whether or not to include columnB * @return a List of column names for use in retrieving a scanner @@ -340,8 +340,8 @@ public class TestThriftServer extends HBaseClusterTestCase { } /** - * - * @return a List of Mutations for a row, with columnA having valueA + * + * @return a List of Mutations for a row, with columnA having valueA * and columnB having valueB */ private List getMutations() { @@ -352,12 +352,12 @@ public class TestThriftServer extends HBaseClusterTestCase { } /** - * + * * @return a List of BatchMutations with the following effects: * (rowA, columnA): delete * (rowA, columnB): place valueC * (rowB, columnA): place valueC - * (rowB, columnB): place valueD + * (rowB, columnB): place valueD */ private List getBatchMutations() { List batchMutations = new ArrayList(); @@ -381,9 +381,9 @@ public class TestThriftServer extends HBaseClusterTestCase { } /** - * Asserts that the passed scanner is exhausted, and then closes + * Asserts that the passed scanner is exhausted, and then closes * the scanner. - * + * * @param scannerId the scanner to close * @param handler the HBaseHandler interfacing to HBase * @throws Exception diff --git a/core/src/test/java/org/apache/hadoop/hbase/util/SoftValueSortedMapTest.java b/core/src/test/java/org/apache/hadoop/hbase/util/SoftValueSortedMapTest.java index 90e2515..15f2ca8 100644 --- a/core/src/test/java/org/apache/hadoop/hbase/util/SoftValueSortedMapTest.java +++ b/core/src/test/java/org/apache/hadoop/hbase/util/SoftValueSortedMapTest.java @@ -33,9 +33,9 @@ public class SoftValueSortedMapTest { byte[] block = new byte[849*1024*1024]; // FindBugs DLS_DEAD_LOCAL_STORE System.out.println(map.size()); } - + public static void main(String[] args) { testMap(new SoftValueSortedMap()); - testMap(new TreeMap()); + testMap(new TreeMap()); } } \ No newline at end of file diff --git a/core/src/test/java/org/apache/hadoop/hbase/util/TestBase64.java b/core/src/test/java/org/apache/hadoop/hbase/util/TestBase64.java index 20382be..9be5c0c 100644 --- a/core/src/test/java/org/apache/hadoop/hbase/util/TestBase64.java +++ b/core/src/test/java/org/apache/hadoop/hbase/util/TestBase64.java @@ -52,7 +52,7 @@ public class TestBase64 extends TestCase { */ public void testBase64() throws UnsupportedEncodingException { TreeMap sorted = new TreeMap(); - + for (int i = 0; i < uris.length; i++) { byte[] bytes = uris[i].getBytes("UTF-8"); sorted.put(Base64.encodeBytes(bytes, Base64.ORDERED), uris[i]); diff --git a/core/src/test/java/org/apache/hadoop/hbase/util/TestBytes.java b/core/src/test/java/org/apache/hadoop/hbase/util/TestBytes.java index 023de88..ec9785f 100644 --- a/core/src/test/java/org/apache/hadoop/hbase/util/TestBytes.java +++ b/core/src/test/java/org/apache/hadoop/hbase/util/TestBytes.java @@ -109,7 +109,7 @@ public class TestBytes extends TestCase { byte [] key2 = {4,9}; byte [] key2_2 = {4}; byte [] key3 = {5,11}; - + assertEquals(1, Bytes.binarySearch(arr, key1, 0, 1, Bytes.BYTES_RAWCOMPARATOR)); assertEquals(0, Bytes.binarySearch(arr, key1, 1, 1, @@ -125,7 +125,7 @@ public class TestBytes extends TestCase { assertEquals(5, Bytes.binarySearch(arr, key3, 1, 1, Bytes.BYTES_RAWCOMPARATOR)); } - + public void testIncrementBytes() throws IOException { assertTrue(checkTestIncrementBytes(10, 1)); @@ -146,8 +146,8 @@ public class TestBytes extends TestCase { assertTrue(checkTestIncrementBytes(-12, -34565445)); assertTrue(checkTestIncrementBytes(-1546543452, -34565445)); } - - private static boolean checkTestIncrementBytes(long val, long amount) + + private static boolean checkTestIncrementBytes(long val, long amount) throws IOException { byte[] value = Bytes.toBytes(val); byte [] testValue = {-1, -1, -1, -1, -1, -1, -1, -1}; diff --git a/core/src/test/java/org/apache/hadoop/hbase/util/TestKeying.java b/core/src/test/java/org/apache/hadoop/hbase/util/TestKeying.java index 14106aa..7ce5520 100644 --- a/core/src/test/java/org/apache/hadoop/hbase/util/TestKeying.java +++ b/core/src/test/java/org/apache/hadoop/hbase/util/TestKeying.java @@ -52,7 +52,7 @@ public class TestKeying extends TestCase { checkTransform("filename"); } - + private void checkTransform(final String u) { String k = Keying.createKey(u); String uri = Keying.keyToUri(k); diff --git a/core/src/test/java/org/apache/hadoop/hbase/util/TestMergeTool.java b/core/src/test/java/org/apache/hadoop/hbase/util/TestMergeTool.java index 5140cb4..3d6f0ec 100644 --- a/core/src/test/java/org/apache/hadoop/hbase/util/TestMergeTool.java +++ b/core/src/test/java/org/apache/hadoop/hbase/util/TestMergeTool.java @@ -49,13 +49,13 @@ public class TestMergeTool extends HBaseTestCase { // static final byte [] COLUMN_NAME = Bytes.toBytes("contents:"); static final byte [] FAMILY = Bytes.toBytes("contents"); static final byte [] QUALIFIER = Bytes.toBytes("dc"); - + private final HRegionInfo[] sourceRegions = new HRegionInfo[5]; private final HRegion[] regions = new HRegion[5]; private HTableDescriptor desc; private byte [][][] rows; private MiniDFSCluster dfsCluster = null; - + @Override public void setUp() throws Exception { this.conf.set("hbase.hstore.compactionThreshold", "2"); @@ -70,45 +70,45 @@ public class TestMergeTool extends HBaseTestCase { // Region 0 will contain the key range [row_0200,row_0300) sourceRegions[0] = new HRegionInfo(this.desc, Bytes.toBytes("row_0200"), Bytes.toBytes("row_0300")); - + // Region 1 will contain the key range [row_0250,row_0400) and overlaps // with Region 0 sourceRegions[1] = new HRegionInfo(this.desc, Bytes.toBytes("row_0250"), Bytes.toBytes("row_0400")); - + // Region 2 will contain the key range [row_0100,row_0200) and is adjacent // to Region 0 or the region resulting from the merge of Regions 0 and 1 sourceRegions[2] = - new HRegionInfo(this.desc, Bytes.toBytes("row_0100"), + new HRegionInfo(this.desc, Bytes.toBytes("row_0100"), Bytes.toBytes("row_0200")); - + // Region 3 will contain the key range [row_0500,row_0600) and is not // adjacent to any of Regions 0, 1, 2 or the merged result of any or all // of those regions sourceRegions[3] = - new HRegionInfo(this.desc, Bytes.toBytes("row_0500"), + new HRegionInfo(this.desc, Bytes.toBytes("row_0500"), Bytes.toBytes("row_0600")); - + // Region 4 will have empty start and end keys and overlaps all regions. sourceRegions[4] = - new HRegionInfo(this.desc, HConstants.EMPTY_BYTE_ARRAY, + new HRegionInfo(this.desc, HConstants.EMPTY_BYTE_ARRAY, HConstants.EMPTY_BYTE_ARRAY); - + /* * Now create some row keys */ this.rows = new byte [5][][]; this.rows[0] = Bytes.toByteArrays(new String[] { "row_0210", "row_0280" }); - this.rows[1] = Bytes.toByteArrays(new String[] { "row_0260", "row_0350", + this.rows[1] = Bytes.toByteArrays(new String[] { "row_0260", "row_0350", "row_035" }); - this.rows[2] = Bytes.toByteArrays(new String[] { "row_0110", "row_0175", + this.rows[2] = Bytes.toByteArrays(new String[] { "row_0110", "row_0175", "row_0175", "row_0175"}); - this.rows[3] = Bytes.toByteArrays(new String[] { "row_0525", "row_0560", + this.rows[3] = Bytes.toByteArrays(new String[] { "row_0525", "row_0560", "row_0560", "row_0560", "row_0560"}); - this.rows[4] = Bytes.toByteArrays(new String[] { "row_0050", "row_1000", + this.rows[4] = Bytes.toByteArrays(new String[] { "row_0050", "row_1000", "row_1000", "row_1000", "row_1000", "row_1000" }); - + // Start up dfs this.dfsCluster = new MiniDFSCluster(conf, 2, true, (String[])null); this.fs = this.dfsCluster.getFileSystem(); @@ -121,7 +121,7 @@ public class TestMergeTool extends HBaseTestCase { // Note: we must call super.setUp after starting the mini cluster or // we will end up with a local file system - + super.setUp(); try { // Create root and meta regions @@ -145,7 +145,7 @@ public class TestMergeTool extends HBaseTestCase { } // Close root and meta regions closeRootAndMeta(); - + } catch (Exception e) { shutdownDfs(dfsCluster); throw e; @@ -157,7 +157,7 @@ public class TestMergeTool extends HBaseTestCase { super.tearDown(); shutdownDfs(dfsCluster); } - + /* * @param msg Message that describes this merge * @param regionName1 @@ -178,7 +178,7 @@ public class TestMergeTool extends HBaseTestCase { ); assertTrue("'" + msg + "' failed", errCode == 0); HRegionInfo mergedInfo = merger.getMergedHRegionInfo(); - + // Now verify that we can read all the rows from regions 0, 1 // in the new merged region. HRegion merged = @@ -188,7 +188,7 @@ public class TestMergeTool extends HBaseTestCase { LOG.info("Verified " + msg); return merged; } - + private void verifyMerge(final HRegion merged, final int upperbound) throws IOException { //Test @@ -207,9 +207,9 @@ public class TestMergeTool extends HBaseTestCase { } finally { scanner.close(); } - + //!Test - + for (int i = 0; i < upperbound; i++) { for (int j = 0; j < rows[i].length; j++) { Get get = new Get(rows[i][j]); @@ -265,7 +265,7 @@ public class TestMergeTool extends HBaseTestCase { merged = mergeAndVerify("merging regions 0+1+2 and 3", merged.getRegionInfo().getRegionNameAsString(), this.sourceRegions[3].getRegionNameAsString(), log, 4); - + // Merge the result of merging regions 0, 1, 2 and 3 with region 4 merged = mergeAndVerify("merging regions 0+1+2+3 and 4", merged.getRegionInfo().getRegionNameAsString(), diff --git a/core/src/test/java/org/apache/hadoop/hbase/util/TestRootPath.java b/core/src/test/java/org/apache/hadoop/hbase/util/TestRootPath.java index 8a80038..10c9926 100644 --- a/core/src/test/java/org/apache/hadoop/hbase/util/TestRootPath.java +++ b/core/src/test/java/org/apache/hadoop/hbase/util/TestRootPath.java @@ -34,7 +34,7 @@ import org.apache.hadoop.fs.Path; */ public class TestRootPath extends TestCase { private static final Log LOG = LogFactory.getLog(TestRootPath.class); - + /** The test */ public void testRootPath() { try { diff --git a/core/src/test/java/org/apache/hadoop/hbase/zookeeper/TestHQuorumPeer.java b/core/src/test/java/org/apache/hadoop/hbase/zookeeper/TestHQuorumPeer.java index 25c5b36..f0d90c0 100644 --- a/core/src/test/java/org/apache/hadoop/hbase/zookeeper/TestHQuorumPeer.java +++ b/core/src/test/java/org/apache/hadoop/hbase/zookeeper/TestHQuorumPeer.java @@ -120,7 +120,7 @@ public class TestHQuorumPeer extends HBaseTestCase { server = servers.get(Long.valueOf(0)); assertEquals("foo.bar", server.addr.getHostName()); } - + /** * Test Case for HBASE-2305 */ diff --git a/core/src/test/resources/log4j.properties b/core/src/test/resources/log4j.properties index 4b8f2c4..9d42f74 100644 --- a/core/src/test/resources/log4j.properties +++ b/core/src/test/resources/log4j.properties @@ -31,7 +31,7 @@ log4j.appender.DRFA.layout.ConversionPattern=%d %-5p [%t] %C{2}(%L): %m%n # # console -# Add "console" to rootlogger above if you want to use this +# Add "console" to rootlogger above if you want to use this # log4j.appender.console=org.apache.log4j.ConsoleAppender log4j.appender.console.target=System.err -- 1.7.1.4.g83535