From f2feb6697aa442934c883d1aac65e8b3bb8bde60 Mon Sep 17 00:00:00 2001 From: Benoit Sigoure Date: Wed, 5 May 2010 15:32:47 -0700 Subject: [PATCH] HBASE-2518 Kill all trailing whitespaces in src/java. --- src/java/org/apache/hadoop/hbase/Chore.java | 10 +- .../org/apache/hadoop/hbase/ClusterStatus.java | 2 +- .../apache/hadoop/hbase/HBaseConfiguration.java | 16 +- .../org/apache/hadoop/hbase/HColumnDescriptor.java | 46 +- src/java/org/apache/hadoop/hbase/HConstants.java | 90 +- src/java/org/apache/hadoop/hbase/HMerge.java | 52 +- src/java/org/apache/hadoop/hbase/HMsg.java | 36 +- src/java/org/apache/hadoop/hbase/HRegionInfo.java | 44 +- .../org/apache/hadoop/hbase/HRegionLocation.java | 6 +- .../org/apache/hadoop/hbase/HServerAddress.java | 24 +- src/java/org/apache/hadoop/hbase/HServerInfo.java | 28 +- src/java/org/apache/hadoop/hbase/HServerLoad.java | 14 +- src/java/org/apache/hadoop/hbase/HStoreKey.java | 68 +- .../org/apache/hadoop/hbase/HTableDescriptor.java | 26 +- src/java/org/apache/hadoop/hbase/KeyValue.java | 194 +++--- .../org/apache/hadoop/hbase/LeaseListener.java | 4 +- src/java/org/apache/hadoop/hbase/Leases.java | 34 +- .../org/apache/hadoop/hbase/LocalHBaseCluster.java | 6 +- .../hadoop/hbase/NotServingRegionException.java | 2 +- .../hadoop/hbase/RemoteExceptionHandler.java | 18 +- .../apache/hadoop/hbase/TableExistsException.java | 2 +- .../hadoop/hbase/ValueOverMaxLengthException.java | 2 +- .../org/apache/hadoop/hbase/VersionAnnotation.java | 10 +- .../org/apache/hadoop/hbase/client/Delete.java | 58 +- src/java/org/apache/hadoop/hbase/client/Get.java | 18 +- .../org/apache/hadoop/hbase/client/HBaseAdmin.java | 114 ++-- .../apache/hadoop/hbase/client/HConnection.java | 44 +- .../hadoop/hbase/client/HConnectionManager.java | 128 ++-- .../org/apache/hadoop/hbase/client/HTable.java | 386 +++++----- .../org/apache/hadoop/hbase/client/HTablePool.java | 12 +- .../apache/hadoop/hbase/client/MetaScanner.java | 16 +- .../org/apache/hadoop/hbase/client/MultiPut.java | 2 +- .../hadoop/hbase/client/MultiPutResponse.java | 2 +- src/java/org/apache/hadoop/hbase/client/Put.java | 128 ++-- .../org/apache/hadoop/hbase/client/Result.java | 82 +- .../apache/hadoop/hbase/client/ResultScanner.java | 8 +- .../hbase/client/RetriesExhaustedException.java | 10 +- .../org/apache/hadoop/hbase/client/RowLock.java | 2 +- src/java/org/apache/hadoop/hbase/client/Scan.java | 92 +- .../org/apache/hadoop/hbase/client/Scanner.java | 6 +- .../hadoop/hbase/client/ScannerCallable.java | 10 +- .../hbase/client/ScannerTimeoutException.java | 2 +- .../apache/hadoop/hbase/client/ServerCallable.java | 8 +- .../hadoop/hbase/client/ServerConnection.java | 4 +- .../hbase/client/UnmodifyableHRegionInfo.java | 4 +- .../hbase/client/UnmodifyableHTableDescriptor.java | 8 +- .../hadoop/hbase/filter/BinaryComparator.java | 2 +- .../hbase/filter/BinaryPrefixComparator.java | 4 +- .../hbase/filter/ColumnPaginationFilter.java | 6 +- .../hadoop/hbase/filter/ColumnValueFilter.java | 26 +- .../apache/hadoop/hbase/filter/CompareFilter.java | 14 +- .../org/apache/hadoop/hbase/filter/Filter.java | 2 +- .../org/apache/hadoop/hbase/filter/FilterList.java | 16 +- .../hadoop/hbase/filter/InclusiveStopFilter.java | 2 +- .../hbase/filter/InclusiveStopRowFilter.java | 2 +- .../org/apache/hadoop/hbase/filter/PageFilter.java | 2 +- .../apache/hadoop/hbase/filter/PageRowFilter.java | 4 +- .../hadoop/hbase/filter/PrefixRowFilter.java | 18 +- .../hadoop/hbase/filter/QualifierFilter.java | 6 +- .../hadoop/hbase/filter/RegExpRowFilter.java | 14 +- .../hadoop/hbase/filter/RegexStringComparator.java | 2 +- .../org/apache/hadoop/hbase/filter/RowFilter.java | 6 +- .../hadoop/hbase/filter/RowFilterInterface.java | 64 +- .../apache/hadoop/hbase/filter/RowFilterSet.java | 30 +- .../hbase/filter/SingleColumnValueFilter.java | 26 +- .../org/apache/hadoop/hbase/filter/SkipFilter.java | 4 +- .../apache/hadoop/hbase/filter/StopRowFilter.java | 24 +- .../hadoop/hbase/filter/SubstringComparator.java | 2 +- .../apache/hadoop/hbase/filter/ValueFilter.java | 6 +- .../hadoop/hbase/filter/WhileMatchFilter.java | 2 +- .../hadoop/hbase/filter/WhileMatchRowFilter.java | 40 +- .../apache/hadoop/hbase/filter/package-info.java | 2 +- .../org/apache/hadoop/hbase/io/BatchOperation.java | 12 +- .../org/apache/hadoop/hbase/io/BatchUpdate.java | 66 +- src/java/org/apache/hadoop/hbase/io/Cell.java | 10 +- .../apache/hadoop/hbase/io/CodeToClassAndBack.java | 6 +- .../apache/hadoop/hbase/io/HalfHFileReader.java | 8 +- .../apache/hadoop/hbase/io/HbaseMapWritable.java | 14 +- .../hadoop/hbase/io/HbaseObjectWritable.java | 46 +- src/java/org/apache/hadoop/hbase/io/HeapSize.java | 4 +- .../hadoop/hbase/io/ImmutableBytesWritable.java | 36 +- src/java/org/apache/hadoop/hbase/io/Reference.java | 6 +- src/java/org/apache/hadoop/hbase/io/RowResult.java | 36 +- src/java/org/apache/hadoop/hbase/io/TimeRange.java | 30 +- .../apache/hadoop/hbase/io/hfile/BlockCache.java | 4 +- .../apache/hadoop/hbase/io/hfile/CachedBlock.java | 30 +- .../hadoop/hbase/io/hfile/CachedBlockQueue.java | 18 +- .../apache/hadoop/hbase/io/hfile/Compression.java | 4 +- .../org/apache/hadoop/hbase/io/hfile/HFile.java | 128 ++-- .../apache/hadoop/hbase/io/hfile/HFileScanner.java | 8 +- .../hadoop/hbase/io/hfile/LruBlockCache.java | 202 +++--- .../hadoop/hbase/io/hfile/SimpleBlockCache.java | 12 +- .../org/apache/hadoop/hbase/ipc/HBaseClient.java | 140 ++-- src/java/org/apache/hadoop/hbase/ipc/HBaseRPC.java | 56 +- .../hadoop/hbase/ipc/HBaseRPCProtocolVersion.java | 2 +- .../hadoop/hbase/ipc/HBaseRPCStatistics.java | 4 +- .../apache/hadoop/hbase/ipc/HBaseRpcMetrics.java | 12 +- .../org/apache/hadoop/hbase/ipc/HBaseServer.java | 228 +++--- .../apache/hadoop/hbase/ipc/HMasterInterface.java | 26 +- .../hadoop/hbase/ipc/HMasterRegionInterface.java | 12 +- .../apache/hadoop/hbase/ipc/HRegionInterface.java | 58 +- .../hadoop/hbase/mapred/BuildTableIndex.java | 2 +- .../org/apache/hadoop/hbase/mapred/Driver.java | 2 +- .../hadoop/hbase/mapred/GroupingTableMap.java | 32 +- .../hadoop/hbase/mapred/HRegionPartitioner.java | 14 +- .../hadoop/hbase/mapred/IdentityTableMap.java | 16 +- .../hadoop/hbase/mapred/IdentityTableReduce.java | 14 +- .../hadoop/hbase/mapred/IndexOutputFormat.java | 2 +- .../hadoop/hbase/mapred/IndexTableReduce.java | 2 +- .../org/apache/hadoop/hbase/mapred/RowCounter.java | 6 +- .../hadoop/hbase/mapred/TableInputFormatBase.java | 4 +- .../hadoop/hbase/mapred/TableMapReduceUtil.java | 68 +- .../hadoop/hbase/mapred/TableOutputFormat.java | 14 +- .../apache/hadoop/hbase/mapred/package-info.java | 20 +- .../hadoop/hbase/mapreduce/BuildTableIndex.java | 20 +- .../org/apache/hadoop/hbase/mapreduce/Driver.java | 2 +- .../org/apache/hadoop/hbase/mapreduce/Export.java | 6 +- .../hbase/mapreduce/GroupingTableMapper.java | 36 +- .../hadoop/hbase/mapreduce/HFileOutputFormat.java | 4 +- .../hadoop/hbase/mapreduce/HRegionPartitioner.java | 22 +- .../hbase/mapreduce/IdentityTableMapper.java | 14 +- .../hbase/mapreduce/IdentityTableReducer.java | 38 +- .../org/apache/hadoop/hbase/mapreduce/Import.java | 10 +- .../hadoop/hbase/mapreduce/IndexConfiguration.java | 2 +- .../hadoop/hbase/mapreduce/IndexOutputFormat.java | 10 +- .../hadoop/hbase/mapreduce/IndexRecordWriter.java | 34 +- .../hadoop/hbase/mapreduce/IndexTableReducer.java | 22 +- .../hbase/mapreduce/LuceneDocumentWrapper.java | 2 +- .../apache/hadoop/hbase/mapreduce/RowCounter.java | 14 +- .../hadoop/hbase/mapreduce/TableInputFormat.java | 30 +- .../hbase/mapreduce/TableInputFormatBase.java | 58 +- .../hadoop/hbase/mapreduce/TableMapReduceUtil.java | 56 +- .../apache/hadoop/hbase/mapreduce/TableMapper.java | 4 +- .../hadoop/hbase/mapreduce/TableOutputFormat.java | 42 +- .../hadoop/hbase/mapreduce/TableReducer.java | 12 +- .../apache/hadoop/hbase/mapreduce/TableSplit.java | 38 +- .../hadoop/hbase/mapreduce/package-info.java | 6 +- .../org/apache/hadoop/hbase/master/AddColumn.java | 4 +- .../apache/hadoop/hbase/master/BaseScanner.java | 56 +- .../hadoop/hbase/master/ChangeTableState.java | 4 +- .../hadoop/hbase/master/ColumnOperation.java | 4 +- .../apache/hadoop/hbase/master/DeleteColumn.java | 4 +- .../org/apache/hadoop/hbase/master/HMaster.java | 112 ++-- .../org/apache/hadoop/hbase/master/MetaRegion.java | 6 +- .../apache/hadoop/hbase/master/MetaScanner.java | 20 +- .../apache/hadoop/hbase/master/ModifyColumn.java | 8 +- .../hadoop/hbase/master/ModifyTableMeta.java | 4 +- .../hadoop/hbase/master/ProcessRegionClose.java | 8 +- .../hadoop/hbase/master/ProcessRegionOpen.java | 4 +- .../hbase/master/ProcessRegionStatusChange.java | 4 +- .../hadoop/hbase/master/ProcessServerShutdown.java | 20 +- .../apache/hadoop/hbase/master/RegionManager.java | 262 +++--- .../hadoop/hbase/master/RegionServerOperation.java | 12 +- .../hbase/master/RegionServerOperationQueue.java | 4 +- .../hbase/master/RetryableMetaOperation.java | 10 +- .../apache/hadoop/hbase/master/RootScanner.java | 2 +- .../apache/hadoop/hbase/master/ServerManager.java | 92 +- .../apache/hadoop/hbase/master/TableDelete.java | 6 +- .../apache/hadoop/hbase/master/TableOperation.java | 6 +- .../hbase/master/ZKMasterAddressWatcher.java | 2 +- .../hadoop/hbase/master/metrics/MasterMetrics.java | 14 +- .../hbase/master/metrics/MasterStatistics.java | 2 +- .../hadoop/hbase/metrics/MetricsMBeanBase.java | 40 +- .../apache/hadoop/hbase/metrics/MetricsRate.java | 20 +- .../metrics/file/TimeStampingFileContext.java | 2 +- .../migration/nineteen/HStoreFileToStoreFile.java | 2 +- .../hadoop/hbase/migration/nineteen/HStoreKey.java | 136 ++-- .../migration/nineteen/io/BloomFilterMapFile.java | 26 +- .../hbase/migration/nineteen/io/HBaseMapFile.java | 12 +- .../migration/nineteen/io/HalfMapFileReader.java | 20 +- .../hbase/migration/nineteen/io/Reference.java | 16 +- .../nineteen/onelab/filter/BloomFilter.java | 62 +- .../onelab/filter/CountingBloomFilter.java | 64 +- .../nineteen/onelab/filter/DynamicBloomFilter.java | 54 +- .../migration/nineteen/onelab/filter/Filter.java | 56 +- .../nineteen/onelab/filter/HashFunction.java | 52 +- .../migration/nineteen/onelab/filter/Key.java | 56 +- .../nineteen/onelab/filter/RemoveScheme.java | 42 +- .../onelab/filter/RetouchedBloomFilter.java | 60 +- .../nineteen/regionserver/HStoreFile.java | 66 +- .../hadoop/hbase/regionserver/ColumnCount.java | 18 +- .../hadoop/hbase/regionserver/ColumnTracker.java | 12 +- .../hbase/regionserver/CompactSplitThread.java | 26 +- .../hadoop/hbase/regionserver/DeleteCompare.java | 10 +- .../hadoop/hbase/regionserver/DeleteTracker.java | 18 +- .../hbase/regionserver/ExplicitColumnTracker.java | 14 +- .../regionserver/FailedLogCloseException.java | 2 +- .../hadoop/hbase/regionserver/FlushRequester.java | 2 +- .../regionserver/GetClosestRowBeforeTracker.java | 4 +- .../hbase/regionserver/GetDeleteTracker.java | 14 +- .../org/apache/hadoop/hbase/regionserver/HLog.java | 182 ++-- .../apache/hadoop/hbase/regionserver/HLogKey.java | 20 +- .../apache/hadoop/hbase/regionserver/HRegion.java | 312 ++++---- .../hadoop/hbase/regionserver/HRegionServer.java | 210 +++--- .../hadoop/hbase/regionserver/InternalScanner.java | 8 +- .../hadoop/hbase/regionserver/KeyValueHeap.java | 16 +- .../hadoop/hbase/regionserver/KeyValueScanner.java | 8 +- .../hbase/regionserver/KeyValueSkipListSet.java | 2 +- .../hadoop/hbase/regionserver/LogRoller.java | 4 +- .../hadoop/hbase/regionserver/LruHashMap.java | 140 ++-- .../apache/hadoop/hbase/regionserver/MemStore.java | 22 +- .../hadoop/hbase/regionserver/MemStoreFlusher.java | 36 +- .../hadoop/hbase/regionserver/QueryMatcher.java | 74 +- .../regionserver/ReadWriteConsistencyControl.java | 2 +- .../regionserver/RegionServerRunningException.java | 2 +- .../hbase/regionserver/ScanDeleteTracker.java | 10 +- .../hbase/regionserver/ScanQueryMatcher.java | 20 +- .../regionserver/ScanWildcardColumnTracker.java | 8 +- .../apache/hadoop/hbase/regionserver/Store.java | 116 ++-- .../hadoop/hbase/regionserver/StoreFile.java | 24 +- .../hbase/regionserver/StoreFileGetScan.java | 12 +- .../hbase/regionserver/StoreFileScanner.java | 16 +- .../hadoop/hbase/regionserver/StoreScanner.java | 8 +- .../apache/hadoop/hbase/regionserver/WALEdit.java | 32 +- .../hbase/regionserver/WildcardColumnTracker.java | 42 +- .../regionserver/metrics/RegionServerMetrics.java | 20 +- .../metrics/RegionServerStatistics.java | 4 +- .../hadoop/hbase/rest/AbstractController.java | 2 +- .../org/apache/hadoop/hbase/rest/Dispatcher.java | 22 +- .../apache/hadoop/hbase/rest/RowController.java | 2 +- .../org/apache/hadoop/hbase/rest/RowModel.java | 8 +- .../hadoop/hbase/rest/ScannerController.java | 14 +- .../org/apache/hadoop/hbase/rest/ScannerModel.java | 10 +- src/java/org/apache/hadoop/hbase/rest/Status.java | 14 +- .../apache/hadoop/hbase/rest/TableController.java | 4 +- .../org/apache/hadoop/hbase/rest/TableModel.java | 18 +- .../apache/hadoop/hbase/rest/TimestampModel.java | 10 +- .../hadoop/hbase/rest/descriptors/RestCell.java | 16 +- .../rest/descriptors/RowUpdateDescriptor.java | 6 +- .../hbase/rest/descriptors/ScannerDescriptor.java | 4 +- .../hbase/rest/descriptors/ScannerIdentifier.java | 4 +- .../rest/descriptors/TimestampsDescriptor.java | 4 +- .../hbase/rest/exception/HBaseRestException.java | 2 +- .../rest/filter/ColumnValueFilterFactory.java | 4 +- .../hadoop/hbase/rest/filter/FilterFactory.java | 8 +- .../rest/filter/InclusiveStopRowFilterFactory.java | 2 +- .../hbase/rest/filter/RowFilterSetFactory.java | 6 +- .../hbase/rest/filter/StopRowFilterFactory.java | 2 +- .../rest/filter/WhileMatchRowFilterFactory.java | 2 +- .../hbase/rest/parser/HBaseRestParserFactory.java | 2 +- .../hadoop/hbase/rest/parser/IHBaseRestParser.java | 4 +- .../hadoop/hbase/rest/parser/XMLRestParser.java | 10 +- .../rest/serializer/AbstractRestSerializer.java | 6 +- .../hbase/rest/serializer/IRestSerializer.java | 54 +- .../hbase/rest/serializer/ISerializable.java | 4 +- .../rest/serializer/RestSerializerFactory.java | 4 +- .../hbase/rest/serializer/SimpleXMLSerializer.java | 46 +- .../apache/hadoop/hbase/thrift/ThriftServer.java | 118 ++-- .../hadoop/hbase/thrift/ThriftUtilities.java | 18 +- .../hbase/thrift/generated/AlreadyExists.java | 6 +- .../hbase/thrift/generated/BatchMutation.java | 12 +- .../hbase/thrift/generated/ColumnDescriptor.java | 38 +- .../hadoop/hbase/thrift/generated/Hbase.java | 898 ++++++++++---------- .../hadoop/hbase/thrift/generated/IOError.java | 6 +- .../hbase/thrift/generated/IllegalArgument.java | 6 +- .../hadoop/hbase/thrift/generated/Mutation.java | 14 +- .../hadoop/hbase/thrift/generated/TCell.java | 10 +- .../hadoop/hbase/thrift/generated/TRegionInfo.java | 22 +- .../hadoop/hbase/thrift/generated/TRowResult.java | 14 +- src/java/org/apache/hadoop/hbase/util/Base64.java | 170 ++-- src/java/org/apache/hadoop/hbase/util/Bytes.java | 2 +- .../org/apache/hadoop/hbase/util/ClassSize.java | 118 ++-- src/java/org/apache/hadoop/hbase/util/FSUtils.java | 46 +- src/java/org/apache/hadoop/hbase/util/Hash.java | 14 +- .../org/apache/hadoop/hbase/util/InfoServer.java | 10 +- .../apache/hadoop/hbase/util/JVMClusterUtil.java | 8 +- .../org/apache/hadoop/hbase/util/JenkinsHash.java | 50 +- src/java/org/apache/hadoop/hbase/util/Keying.java | 14 +- src/java/org/apache/hadoop/hbase/util/Merge.java | 54 +- .../org/apache/hadoop/hbase/util/MetaUtils.java | 66 +- src/java/org/apache/hadoop/hbase/util/Migrate.java | 36 +- .../org/apache/hadoop/hbase/util/MurmurHash.java | 6 +- src/java/org/apache/hadoop/hbase/util/Sleeper.java | 8 +- .../org/apache/hadoop/hbase/util/SoftValueMap.java | 16 +- .../hadoop/hbase/util/SoftValueSortedMap.java | 24 +- src/java/org/apache/hadoop/hbase/util/Strings.java | 2 +- src/java/org/apache/hadoop/hbase/util/Threads.java | 4 +- .../org/apache/hadoop/hbase/util/VersionInfo.java | 14 +- .../org/apache/hadoop/hbase/util/Writables.java | 6 +- .../hadoop/hbase/zookeeper/ZooKeeperWrapper.java | 36 +- 280 files changed, 4593 insertions(+), 4593 deletions(-) diff --git a/src/java/org/apache/hadoop/hbase/Chore.java b/src/java/org/apache/hadoop/hbase/Chore.java index 48fde89..5b425ff 100644 --- a/src/java/org/apache/hadoop/hbase/Chore.java +++ b/src/java/org/apache/hadoop/hbase/Chore.java @@ -31,7 +31,7 @@ import org.apache.hadoop.hbase.util.Sleeper; * If an unhandled exception, the threads exit is logged. * Implementers just need to add checking if there is work to be done and if * so, do it. Its the base of most of the chore threads in hbase. - * + * * Don't subclass Chore if the task relies on being woken up for something to * do, such as an entry being added to a queue, etc. */ @@ -39,7 +39,7 @@ public abstract class Chore extends Thread { private final Log LOG = LogFactory.getLog(this.getClass()); private final Sleeper sleeper; protected volatile AtomicBoolean stop; - + /** * @param p Period at which we should run. Will be adjusted appropriately * should we find work and it takes time to complete. @@ -82,7 +82,7 @@ public abstract class Chore extends Thread { LOG.info(getName() + " exiting"); } } - + /** * If the thread is currently sleeping, trigger the core to happen immediately. * If it's in the middle of its operation, will begin another operation @@ -91,7 +91,7 @@ public abstract class Chore extends Thread { public void triggerNow() { this.sleeper.skipSleepCycle(); } - + /** * Override to run a task before we start looping. * @return true if initial chore was successful @@ -100,7 +100,7 @@ public abstract class Chore extends Thread { // Default does nothing. return true; } - + /** * Look for chores. If any found, do them else just return. */ diff --git a/src/java/org/apache/hadoop/hbase/ClusterStatus.java b/src/java/org/apache/hadoop/hbase/ClusterStatus.java index 74bbadc..2649301 100644 --- a/src/java/org/apache/hadoop/hbase/ClusterStatus.java +++ b/src/java/org/apache/hadoop/hbase/ClusterStatus.java @@ -177,7 +177,7 @@ public class ClusterStatus extends VersionedWritable { /** * Returns detailed region server information: A list of * {@link HServerInfo}, containing server load and resource usage - * statistics as {@link HServerLoad}, containing per-region + * statistics as {@link HServerLoad}, containing per-region * statistics as {@link HServerLoad.RegionLoad}. * @return region server information */ diff --git a/src/java/org/apache/hadoop/hbase/HBaseConfiguration.java b/src/java/org/apache/hadoop/hbase/HBaseConfiguration.java index 03e65aa..5e2e295 100644 --- a/src/java/org/apache/hadoop/hbase/HBaseConfiguration.java +++ b/src/java/org/apache/hadoop/hbase/HBaseConfiguration.java @@ -33,7 +33,7 @@ public class HBaseConfiguration extends Configuration { super(); addHbaseResources(); } - + /** * Create a clone of passed configuration. * @param c Configuration to clone. @@ -44,16 +44,16 @@ public class HBaseConfiguration extends Configuration { set(e.getKey(), e.getValue()); } } - + private void addHbaseResources() { addResource("hbase-default.xml"); addResource("hbase-site.xml"); } - + /** * Returns the hash code value for this HBaseConfiguration. The hash code of a * HBaseConfiguration is defined by the xor of the hash codes of its entries. - * + * * @see Configuration#iterator() How the entries are obtained. */ @Override @@ -75,7 +75,7 @@ public class HBaseConfiguration extends Configuration { return false; if (!(obj instanceof HBaseConfiguration)) return false; - + HBaseConfiguration otherConf = (HBaseConfiguration) obj; if (size() != otherConf.size()) { return false; @@ -89,9 +89,9 @@ public class HBaseConfiguration extends Configuration { return false; } } - + return true; } - - + + } diff --git a/src/java/org/apache/hadoop/hbase/HColumnDescriptor.java b/src/java/org/apache/hadoop/hbase/HColumnDescriptor.java index b2fc413..a149483 100644 --- a/src/java/org/apache/hadoop/hbase/HColumnDescriptor.java +++ b/src/java/org/apache/hadoop/hbase/HColumnDescriptor.java @@ -39,7 +39,7 @@ import org.apache.hadoop.io.WritableComparable; /** * An HColumnDescriptor contains information about a column family such as the * number of versions, compression settings, etc. - * + * * It is used as input when creating a table or adding a column. Once set, the * parameters that specify a column cannot be changed without deleting the * column and recreating it. If there is data stored in the column, it will be @@ -55,7 +55,7 @@ public class HColumnDescriptor implements ISerializable, WritableComparable: */ public HColumnDescriptor(final String familyName) { this(Bytes.toBytes(familyName)); } - + /** - * Construct a column descriptor specifying only the family name + * Construct a column descriptor specifying only the family name * The other attributes are defaulted. - * + * * @param familyName Column family name. Must be 'printable' -- digit or * letter -- and end in a : */ @@ -170,7 +170,7 @@ public class HColumnDescriptor implements ISerializable, WritableComparable[a-zA-Z_0-9] and does not * end in a : * @throws IllegalArgumentException if the number of versions is <= 0 @@ -212,7 +212,7 @@ public class HColumnDescriptor implements ISerializable, WritableComparable: * @param maxVersions Maximum number of versions to keep @@ -225,8 +225,8 @@ public class HColumnDescriptor implements ISerializable, WritableComparable[a-zA-Z_0-9] and does not * end in a : * @throws IllegalArgumentException if the number of versions is <= 0 @@ -235,12 +235,12 @@ public class HColumnDescriptor implements ISerializable, WritableComparable[a-zA-Z_0-9] and does not * end in a : * @throws IllegalArgumentException if the number of versions is <= 0 @@ -398,7 +398,7 @@ public class HColumnDescriptor implements ISerializable, WritableComparable metaRegions = new ArrayList(); private final HRegion root; - + OfflineMerger(HBaseConfiguration conf, FileSystem fs) throws IOException { - + super(conf, fs, META_TABLE_NAME); Path rootTableDir = HTableDescriptor.getTableDir( @@ -323,16 +323,16 @@ class HMerge implements HConstants { ROOT_TABLE_NAME); // Scan root region to find all the meta regions - + root = HRegion.newHRegion(rootTableDir, hlog, fs, conf, HRegionInfo.ROOT_REGIONINFO, null); root.initialize(null, null); Scan scan = new Scan(); scan.addColumn(CATALOG_FAMILY, REGIONINFO_QUALIFIER); - InternalScanner rootScanner = + InternalScanner rootScanner = root.getScanner(scan); - + try { List results = new ArrayList(); while(rootScanner.next(results)) { @@ -347,7 +347,7 @@ class HMerge implements HConstants { rootScanner.close(); try { root.close(); - + } catch(IOException e) { LOG.error(e); } @@ -382,7 +382,7 @@ class HMerge implements HConstants { delete.deleteColumns(HConstants.CATALOG_FAMILY, HConstants.SPLITB_QUALIFIER); root.delete(delete, null, true); - + if(LOG.isDebugEnabled()) { LOG.debug("updated columns in row: " + Bytes.toString(regionsToDelete[r])); } diff --git a/src/java/org/apache/hadoop/hbase/HMsg.java b/src/java/org/apache/hadoop/hbase/HMsg.java index 6a350b3..1632732 100644 --- a/src/java/org/apache/hadoop/hbase/HMsg.java +++ b/src/java/org/apache/hadoop/hbase/HMsg.java @@ -27,9 +27,9 @@ import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.io.Writable; /** - * HMsg is for communicating instructions between the HMaster and the + * HMsg is for communicating instructions between the HMaster and the * HRegionServers. - * + * * Most of the time the messages are simple but some messages are accompanied * by the region affected. HMsg may also carry optional message. */ @@ -40,11 +40,11 @@ public class HMsg implements Writable { public static enum Type { /** null message */ MSG_NONE, - + // Message types sent from master to region server /** Start serving the specified region */ MSG_REGION_OPEN, - + /** Stop serving the specified region */ MSG_REGION_CLOSE, @@ -56,22 +56,22 @@ public class HMsg implements Writable { /** Region server is unknown to master. Restart */ MSG_CALL_SERVER_STARTUP, - + /** Master tells region server to stop */ MSG_REGIONSERVER_STOP, - + /** Stop serving the specified region and don't report back that it's * closed */ MSG_REGION_CLOSE_WITHOUT_REPORT, - + /** Stop serving user regions */ MSG_REGIONSERVER_QUIESCE, // Message types sent from the region server to the master /** region server is now serving the specified region */ MSG_REPORT_OPEN, - + /** region server is no longer serving the specified region */ MSG_REPORT_CLOSE, @@ -80,7 +80,7 @@ public class HMsg implements Writable { /** * Region server split the region associated with this message. - * + * * Note that this message is immediately followed by two MSG_REPORT_OPEN * messages, one for each of the new regions resulting from the split * @deprecated See MSG_REPORT_SPLIT_INCLUDES_DAUGHTERS @@ -89,7 +89,7 @@ public class HMsg implements Writable { /** * Region server is shutting down - * + * * Note that this message is followed by MSG_REPORT_CLOSE messages for each * region the region server was serving, unless it was told to quiesce. */ @@ -99,12 +99,12 @@ public class HMsg implements Writable { * regions */ MSG_REPORT_QUIESCED, - + /** * Flush */ MSG_REGION_FLUSH, - + /** * Run Major Compaction */ @@ -112,7 +112,7 @@ public class HMsg implements Writable { /** * Region server split the region associated with this message. - * + * * Its like MSG_REPORT_SPLIT only it carries the daughters in the message * rather than send them individually in MSG_REPORT_OPEN messages. */ @@ -144,7 +144,7 @@ public class HMsg implements Writable { public HMsg(final HMsg.Type type) { this(type, new HRegionInfo(), null); } - + /** * Construct a message with the specified message and HRegionInfo * @param type Message type @@ -156,7 +156,7 @@ public class HMsg implements Writable { /** * Construct a message with the specified message and HRegionInfo - * + * * @param type Message type * @param hri Region to which message type applies. Cannot be * null. If no info associated, used other Constructor. @@ -168,7 +168,7 @@ public class HMsg implements Writable { /** * Construct a message with the specified message and HRegionInfo - * + * * @param type Message type * @param hri Region to which message type applies. Cannot be * null. If no info associated, used other Constructor. @@ -202,7 +202,7 @@ public class HMsg implements Writable { public Type getType() { return this.type; } - + /** * @param other Message type to compare to * @return True if we are of same message type as other @@ -281,7 +281,7 @@ public class HMsg implements Writable { } return result; } - + // //////////////////////////////////////////////////////////////////////////// // Writable ////////////////////////////////////////////////////////////////////////////// diff --git a/src/java/org/apache/hadoop/hbase/HRegionInfo.java b/src/java/org/apache/hadoop/hbase/HRegionInfo.java index 0aa0580..29b0cd6 100644 --- a/src/java/org/apache/hadoop/hbase/HRegionInfo.java +++ b/src/java/org/apache/hadoop/hbase/HRegionInfo.java @@ -79,7 +79,7 @@ public class HRegionInfo extends VersionedWritable implements WritableComparable result ^= this.tableDesc.hashCode(); this.hashCode = result; } - + /** * Private constructor used constructing HRegionInfo for the catalog root and * first meta regions @@ -98,10 +98,10 @@ public class HRegionInfo extends VersionedWritable implements WritableComparable super(); this.tableDesc = new HTableDescriptor(); } - + /** * Construct HRegionInfo with explicit parameters - * + * * @param tableDesc the table descriptor * @param startKey first key in region * @param endKey end of key range @@ -115,7 +115,7 @@ public class HRegionInfo extends VersionedWritable implements WritableComparable /** * Construct HRegionInfo with explicit parameters - * + * * @param tableDesc the table descriptor * @param startKey first key in region * @param endKey end of key range @@ -131,7 +131,7 @@ public class HRegionInfo extends VersionedWritable implements WritableComparable /** * Construct HRegionInfo with explicit parameters - * + * * @param tableDesc the table descriptor * @param startKey first key in region * @param endKey end of key range @@ -158,10 +158,10 @@ public class HRegionInfo extends VersionedWritable implements WritableComparable this.tableDesc = tableDesc; setHashCode(); } - + /** * Costruct a copy of another HRegionInfo - * + * * @param other */ public HRegionInfo(HRegionInfo other) { @@ -177,7 +177,7 @@ public class HRegionInfo extends VersionedWritable implements WritableComparable this.hashCode = other.hashCode(); this.encodedName = other.getEncodedName(); } - + private static byte [] createRegionName(final byte [] tableName, final byte [] startKey, final long regionid) { return createRegionName(tableName, startKey, Long.toString(regionid)); @@ -216,7 +216,7 @@ public class HRegionInfo extends VersionedWritable implements WritableComparable System.arraycopy(id, 0, b, offset, id.length); return b; } - + /** * Separate elements of a regionName. * @param regionName @@ -246,11 +246,11 @@ public class HRegionInfo extends VersionedWritable implements WritableComparable byte [] startKey = HConstants.EMPTY_BYTE_ARRAY; if(offset != tableName.length + 1) { startKey = new byte[offset - tableName.length - 1]; - System.arraycopy(regionName, tableName.length + 1, startKey, 0, + System.arraycopy(regionName, tableName.length + 1, startKey, 0, offset - tableName.length - 1); } byte [] id = new byte[regionName.length - offset - 1]; - System.arraycopy(regionName, offset + 1, id, 0, + System.arraycopy(regionName, offset + 1, id, 0, regionName.length - offset - 1); byte [][] elements = new byte[3][]; elements[0] = tableName; @@ -258,7 +258,7 @@ public class HRegionInfo extends VersionedWritable implements WritableComparable elements[2] = id; return elements; } - + /** @return the endKey */ public byte [] getEndKey(){ return endKey; @@ -283,7 +283,7 @@ public class HRegionInfo extends VersionedWritable implements WritableComparable public String getRegionNameAsString() { return this.regionNameStr; } - + /** @return the encoded region name */ public synchronized int getEncodedName() { if (this.encodedName == NO_HASH) { @@ -313,7 +313,7 @@ public class HRegionInfo extends VersionedWritable implements WritableComparable public boolean isRootRegion() { return this.tableDesc.isRootRegion(); } - + /** @return true if this is the meta table */ public boolean isMetaTable() { return this.tableDesc.isMetaTable(); @@ -323,14 +323,14 @@ public class HRegionInfo extends VersionedWritable implements WritableComparable public boolean isMetaRegion() { return this.tableDesc.isMetaRegion(); } - + /** * @return True if has been split and has daughters. */ public boolean isSplit() { return this.split; } - + /** * @param split set split status */ @@ -363,7 +363,7 @@ public class HRegionInfo extends VersionedWritable implements WritableComparable Bytes.toStringBinary(this.startKey) + "', ENDKEY => '" + Bytes.toStringBinary(this.endKey) + "', ENCODED => " + getEncodedName() + "," + - (isOffline()? " OFFLINE => true,": "") + + (isOffline()? " OFFLINE => true,": "") + (isSplit()? " SPLIT => true,": "") + " TABLE => {" + this.tableDesc.toString() + "}"; } @@ -415,7 +415,7 @@ public class HRegionInfo extends VersionedWritable implements WritableComparable tableDesc.write(out); out.writeInt(hashCode); } - + @Override public void readFields(DataInput in) throws IOException { super.readFields(in); @@ -429,16 +429,16 @@ public class HRegionInfo extends VersionedWritable implements WritableComparable this.tableDesc.readFields(in); this.hashCode = in.readInt(); } - + // // Comparable // - + public int compareTo(HRegionInfo o) { if (o == null) { return 1; } - + // Are regions of same table? int result = this.tableDesc.compareTo(o.tableDesc); if (result != 0) { @@ -450,7 +450,7 @@ public class HRegionInfo extends VersionedWritable implements WritableComparable if (result != 0) { return result; } - + // Compare end keys. return Bytes.compareTo(this.endKey, o.endKey); } diff --git a/src/java/org/apache/hadoop/hbase/HRegionLocation.java b/src/java/org/apache/hadoop/hbase/HRegionLocation.java index 6be0cff..722991b 100644 --- a/src/java/org/apache/hadoop/hbase/HRegionLocation.java +++ b/src/java/org/apache/hadoop/hbase/HRegionLocation.java @@ -29,7 +29,7 @@ public class HRegionLocation implements Comparable { /** * Constructor - * + * * @param regionInfo the HRegionInfo for the region * @param serverAddress the HServerAddress for the region server */ @@ -73,7 +73,7 @@ public class HRegionLocation implements Comparable { result ^= this.serverAddress.hashCode(); return result; } - + /** @return HRegionInfo */ public HRegionInfo getRegionInfo(){ return regionInfo; @@ -87,7 +87,7 @@ public class HRegionLocation implements Comparable { // // Comparable // - + public int compareTo(HRegionLocation o) { int result = this.regionInfo.compareTo(o.regionInfo); if(result == 0) { diff --git a/src/java/org/apache/hadoop/hbase/HServerAddress.java b/src/java/org/apache/hadoop/hbase/HServerAddress.java index 90e627b..1edc944 100644 --- a/src/java/org/apache/hadoop/hbase/HServerAddress.java +++ b/src/java/org/apache/hadoop/hbase/HServerAddress.java @@ -49,10 +49,10 @@ public class HServerAddress implements WritableComparable { this.stringValue = address.getAddress().getHostAddress() + ":" + address.getPort(); } - + /** * Construct a HServerAddress from a string of the form hostname:port - * + * * @param hostAndPort format 'hostname:port' */ public HServerAddress(String hostAndPort) { @@ -66,7 +66,7 @@ public class HServerAddress implements WritableComparable { this.address = new InetSocketAddress(host, port); this.stringValue = hostAndPort; } - + /** * Construct a HServerAddress from hostname, port number * @param bindAddress host name @@ -76,10 +76,10 @@ public class HServerAddress implements WritableComparable { this.address = new InetSocketAddress(bindAddress, port); this.stringValue = bindAddress + ":" + port; } - + /** * Construct a HServerAddress from another HServerAddress - * + * * @param other the HServerAddress to copy from */ public HServerAddress(HServerAddress other) { @@ -98,7 +98,7 @@ public class HServerAddress implements WritableComparable { public int getPort() { return address.getPort(); } - + /** @return host name */ public String getHostname() { return address.getHostName(); @@ -143,7 +143,7 @@ public class HServerAddress implements WritableComparable { result ^= this.stringValue.hashCode(); return result; } - + // // Writable // @@ -151,11 +151,11 @@ public class HServerAddress implements WritableComparable { public void readFields(DataInput in) throws IOException { String bindAddress = in.readUTF(); int port = in.readInt(); - + if(bindAddress == null || bindAddress.length() == 0) { address = null; stringValue = null; - + } else { address = new InetSocketAddress(bindAddress, port); stringValue = bindAddress + ":" + port; @@ -166,17 +166,17 @@ public class HServerAddress implements WritableComparable { if (address == null) { out.writeUTF(""); out.writeInt(0); - + } else { out.writeUTF(address.getAddress().getHostAddress()); out.writeInt(address.getPort()); } } - + // // Comparable // - + public int compareTo(HServerAddress o) { // Addresses as Strings may not compare though address is for the one // server with only difference being that one address has hostname diff --git a/src/java/org/apache/hadoop/hbase/HServerInfo.java b/src/java/org/apache/hadoop/hbase/HServerInfo.java index 322f8f4..424e809 100644 --- a/src/java/org/apache/hadoop/hbase/HServerInfo.java +++ b/src/java/org/apache/hadoop/hbase/HServerInfo.java @@ -31,7 +31,7 @@ import org.apache.hadoop.io.WritableComparable; /** * HServerInfo contains metainfo about an HRegionServer, Currently it only * contains the server start code. - * + * * In the future it will contain information about the source machine and * load statistics. */ @@ -46,10 +46,10 @@ public class HServerInfo implements WritableComparable { /** default constructor - used by Writable */ public HServerInfo() { - this(new HServerAddress(), 0, + this(new HServerAddress(), 0, HConstants.DEFAULT_REGIONSERVER_INFOPORT, "default name"); } - + /** * Constructor * @param serverAddress @@ -64,7 +64,7 @@ public class HServerInfo implements WritableComparable { this.infoPort = infoPort; this.name = name; } - + /** * Construct a new object using another as input (like a copy constructor) * @param other @@ -95,7 +95,7 @@ public class HServerInfo implements WritableComparable { public synchronized HServerAddress getServerAddress() { return new HServerAddress(serverAddress); } - + /** * Change the server address. * @param serverAddress New server address @@ -104,26 +104,26 @@ public class HServerInfo implements WritableComparable { this.serverAddress = serverAddress; this.serverName = null; } - + /** @return the server start code */ public synchronized long getStartCode() { return startCode; } - + /** * @return Port the info server is listening on. */ public int getInfoPort() { return this.infoPort; } - + /** * @param infoPort - new port of info server */ public void setInfoPort(int infoPort) { this.infoPort = infoPort; } - + /** * @param startCode the startCode to set */ @@ -131,7 +131,7 @@ public class HServerInfo implements WritableComparable { this.startCode = startCode; this.serverName = null; } - + /** * @return the server name in the form hostname_startcode_port */ @@ -148,7 +148,7 @@ public class HServerInfo implements WritableComparable { } return this.serverName; } - + /** * Get the hostname of the server * @return hostname @@ -156,7 +156,7 @@ public class HServerInfo implements WritableComparable { public String getName() { return name; } - + /** * Set the hostname of the server * @param name hostname @@ -201,7 +201,7 @@ public class HServerInfo implements WritableComparable { // Writable - + public void readFields(DataInput in) throws IOException { this.serverAddress.readFields(in); this.startCode = in.readLong(); @@ -229,7 +229,7 @@ public class HServerInfo implements WritableComparable { private static String getServerName(HServerInfo info) { return getServerName(info.getServerAddress(), info.getStartCode()); } - + /** * @param serverAddress in the form hostname:port * @param startCode diff --git a/src/java/org/apache/hadoop/hbase/HServerLoad.java b/src/java/org/apache/hadoop/hbase/HServerLoad.java index 853e356..efa7e0e 100644 --- a/src/java/org/apache/hadoop/hbase/HServerLoad.java +++ b/src/java/org/apache/hadoop/hbase/HServerLoad.java @@ -49,7 +49,7 @@ public class HServerLoad implements WritableComparable { /** per-region load metrics */ private ArrayList regionLoad = new ArrayList(); - /** + /** * Encapsulates per-region loading metrics. */ public static class RegionLoad implements Writable { @@ -82,7 +82,7 @@ public class HServerLoad implements WritableComparable { * @param storefileIndexSizeMB */ public RegionLoad(final byte[] name, final int stores, - final int storefiles, final int storefileSizeMB, + final int storefiles, final int storefileSizeMB, final int memstoreSizeMB, final int storefileIndexSizeMB) { this.name = name; this.stores = stores; @@ -239,7 +239,7 @@ public class HServerLoad implements WritableComparable { public HServerLoad() { super(); } - + /** * Constructor * @param numberOfRequests @@ -265,7 +265,7 @@ public class HServerLoad implements WritableComparable { /** * Originally, this method factored in the effect of requests going to the * server as well. However, this does not interact very well with the current - * region rebalancing code, which only factors number of regions. For the + * region rebalancing code, which only factors number of regions. For the * interim, until we can figure out how to make rebalancing use all the info * available, we're just going to make load purely the number of regions. * @@ -285,7 +285,7 @@ public class HServerLoad implements WritableComparable { public String toString() { return toString(1); } - + /** * Returns toString() with the number of requests divided by the message * interval in seconds @@ -330,9 +330,9 @@ public class HServerLoad implements WritableComparable { result ^= Integer.valueOf(numberOfRegions).hashCode(); return result; } - + // Getters - + /** * @return the numberOfRegions */ diff --git a/src/java/org/apache/hadoop/hbase/HStoreKey.java b/src/java/org/apache/hadoop/hbase/HStoreKey.java index 5068431..4545e1b 100644 --- a/src/java/org/apache/hadoop/hbase/HStoreKey.java +++ b/src/java/org/apache/hadoop/hbase/HStoreKey.java @@ -67,12 +67,12 @@ public class HStoreKey implements WritableComparable, HeapSize { public HStoreKey() { super(); } - + /** * Create an HStoreKey specifying only the row * The column defaults to the empty string, the time stamp defaults to * Long.MAX_VALUE and the table defaults to empty string - * + * * @param row - row key */ public HStoreKey(final byte [] row) { @@ -83,17 +83,17 @@ public class HStoreKey implements WritableComparable, HeapSize { * Create an HStoreKey specifying only the row * The column defaults to the empty string, the time stamp defaults to * Long.MAX_VALUE and the table defaults to empty string - * + * * @param row - row key */ public HStoreKey(final String row) { this(Bytes.toBytes(row), Long.MAX_VALUE); } - + /** * Create an HStoreKey specifying the row and timestamp * The column and table names default to the empty string - * + * * @param row row key * @param timestamp timestamp value */ @@ -105,7 +105,7 @@ public class HStoreKey implements WritableComparable, HeapSize { * Create an HStoreKey specifying the row and column names * The timestamp defaults to LATEST_TIMESTAMP * and table name defaults to the empty string - * + * * @param row row key * @param column column key */ @@ -117,7 +117,7 @@ public class HStoreKey implements WritableComparable, HeapSize { * Create an HStoreKey specifying the row and column names * The timestamp defaults to LATEST_TIMESTAMP * and table name defaults to the empty string - * + * * @param row row key * @param column column key */ @@ -127,7 +127,7 @@ public class HStoreKey implements WritableComparable, HeapSize { /** * Create an HStoreKey specifying all the fields - * Does not make copies of the passed byte arrays. Presumes the passed + * Does not make copies of the passed byte arrays. Presumes the passed * arrays immutable. * @param row row key * @param column column key @@ -139,7 +139,7 @@ public class HStoreKey implements WritableComparable, HeapSize { /** * Create an HStoreKey specifying all the fields with specified table - * Does not make copies of the passed byte arrays. Presumes the passed + * Does not make copies of the passed byte arrays. Presumes the passed * arrays immutable. * @param row row key * @param column column key @@ -154,7 +154,7 @@ public class HStoreKey implements WritableComparable, HeapSize { /** * Constructs a new HStoreKey from another - * + * * @param other the source key */ public HStoreKey(final HStoreKey other) { @@ -167,16 +167,16 @@ public class HStoreKey implements WritableComparable, HeapSize { /** * Change the value of the row key - * + * * @param newrow new row key value */ public void setRow(final byte [] newrow) { this.row = newrow; } - + /** * Change the value of the column in this key - * + * * @param c new column family value */ public void setColumn(final byte [] c) { @@ -185,16 +185,16 @@ public class HStoreKey implements WritableComparable, HeapSize { /** * Change the value of the timestamp field - * + * * @param timestamp new timestamp value */ public void setVersion(final long timestamp) { this.timestamp = timestamp; } - + /** * Set the value of this HStoreKey from the supplied key - * + * * @param k key value to copy */ public void set(final HStoreKey k) { @@ -202,12 +202,12 @@ public class HStoreKey implements WritableComparable, HeapSize { this.column = k.getColumn(); this.timestamp = k.getTimestamp(); } - + /** @return value of row key */ public byte [] getRow() { return row; } - + /** @return value of column */ public byte [] getColumn() { return this.column; @@ -224,17 +224,17 @@ public class HStoreKey implements WritableComparable, HeapSize { * @return True if same row and column. * @see #matchesWithoutColumn(HStoreKey) * @see #matchesRowFamily(HStoreKey) - */ + */ public boolean matchesRowCol(final HStoreKey other) { return HStoreKey.equalsTwoRowKeys(getRow(), other.getRow()) && Bytes.equals(getColumn(), other.getColumn()); } - + /** * Compares the row and timestamp of two keys - * + * * @param other Key to copmare against. Compares row and timestamp. - * + * * @return True if same row and timestamp is greater than other * @see #matchesRowCol(HStoreKey) * @see #matchesRowFamily(HStoreKey) @@ -246,9 +246,9 @@ public class HStoreKey implements WritableComparable, HeapSize { /** * Compares the row and column family of two keys - * + * * @param that Key to compare against. Compares row and column family - * + * * @return true if same row and column family * @see #matchesRowCol(HStoreKey) * @see #matchesWithoutColumn(HStoreKey) @@ -307,7 +307,7 @@ public class HStoreKey implements WritableComparable, HeapSize { // Comparable /** - * @param o + * @param o * @return int * @deprecated Use Comparators instead. This can give wrong results. */ @@ -329,7 +329,7 @@ public class HStoreKey implements WritableComparable, HeapSize { if (left == null && right == null) return 0; if (left == null) return -1; if (right == null) return 1; - + int result = Bytes.compareTo(left.getRow(), right.getRow()); if (result != 0) { return result; @@ -357,7 +357,7 @@ public class HStoreKey implements WritableComparable, HeapSize { * @param column * @return New byte array that holds column family prefix only * (Does not include the colon DELIMITER). - * @throws ColumnNameParseException + * @throws ColumnNameParseException * @see #parseColumn(byte[]) */ public static byte [] getFamily(final byte [] column) @@ -372,7 +372,7 @@ public class HStoreKey implements WritableComparable, HeapSize { System.arraycopy(column, 0, result, 0, index); return result; } - + /** * @param column * @return Return hash of family portion of passed column. @@ -383,7 +383,7 @@ public class HStoreKey implements WritableComparable, HeapSize { // delimiter return Bytes.mapKey(column, index > 0? index: column.length); } - + /** * @param family * @param column @@ -429,7 +429,7 @@ public class HStoreKey implements WritableComparable, HeapSize { * @return Return array of size two whose first element has the family * prefix of passed column c and whose second element is the * column qualifier. - * @throws ColumnNameParseException + * @throws ColumnNameParseException */ public static byte [][] parseColumn(final byte [] c) throws ColumnNameParseException { @@ -559,7 +559,7 @@ public class HStoreKey implements WritableComparable, HeapSize { } /** - * @return The bytes of hsk gotten by running its + * @return The bytes of hsk gotten by running its * {@link Writable#write(java.io.DataOutput)} method. * @throws IOException */ @@ -573,7 +573,7 @@ public class HStoreKey implements WritableComparable, HeapSize { * row and column. This is a customized version of * {@link Writables#getBytes(Writable)} * @param hsk Instance - * @return The bytes of hsk gotten by running its + * @return The bytes of hsk gotten by running its * {@link Writable#write(java.io.DataOutput)} method. * @throws IOException */ @@ -796,7 +796,7 @@ public class HStoreKey implements WritableComparable, HeapSize { if (left == null && right == null) return 0; if (left == null) return -1; if (right == null) return 1; - + byte [] lrow = left.getRow(); byte [] rrow = right.getRow(); int result = compareRows(lrow, 0, lrow.length, rrow, 0, rrow.length); @@ -962,7 +962,7 @@ public class HStoreKey implements WritableComparable, HeapSize { } /** - * RawComparator for plain -- i.e. non-catalog table keys such as + * RawComparator for plain -- i.e. non-catalog table keys such as * -ROOT- and .META. -- HStoreKeys. Compares at byte level. Knows how to * handle the vints that introduce row and columns in the HSK byte array * representation. Adds diff --git a/src/java/org/apache/hadoop/hbase/HTableDescriptor.java b/src/java/org/apache/hadoop/hbase/HTableDescriptor.java index eeaa938..fc5eba4 100644 --- a/src/java/org/apache/hadoop/hbase/HTableDescriptor.java +++ b/src/java/org/apache/hadoop/hbase/HTableDescriptor.java @@ -91,18 +91,18 @@ ISerializable { public static final boolean DEFAULT_READONLY = false; public static final long DEFAULT_MEMSTORE_FLUSH_SIZE = 1024*1024*64L; - + public static final long DEFAULT_MAX_FILESIZE = 1024*1024*256L; - + private volatile Boolean meta = null; private volatile Boolean root = null; // Key is hash of the family name. public final Map families = new TreeMap(Bytes.BYTES_RAWCOMPARATOR); - + /** - * Private constructor used internally creating table descriptors for + * Private constructor used internally creating table descriptors for * catalog tables: e.g. .META. and -ROOT-. */ protected HTableDescriptor(final byte [] name, HColumnDescriptor[] families) { @@ -115,7 +115,7 @@ ISerializable { } /** - * Private constructor used internally creating table descriptors for + * Private constructor used internally creating table descriptors for * catalog tables: e.g. .META. and -ROOT-. */ protected HTableDescriptor(final byte [] name, HColumnDescriptor[] families, @@ -131,8 +131,8 @@ ISerializable { this.values.put(entry.getKey(), entry.getValue()); } } - - + + /** * Constructs an empty object. * For deserializing an HTableDescriptor instance only. @@ -172,7 +172,7 @@ ISerializable { /** * Constructor. *

- * Makes a deep copy of the supplied descriptor. + * Makes a deep copy of the supplied descriptor. * Can make a modifiable descriptor from an UnmodifyableHTableDescriptor. * @param desc The descriptor. */ @@ -287,7 +287,7 @@ ISerializable { public byte[] getValue(byte[] key) { return getValue(new ImmutableBytesWritable(key)); } - + private byte[] getValue(final ImmutableBytesWritable key) { ImmutableBytesWritable ibw = values.get(key); if (ibw == null) @@ -320,7 +320,7 @@ ISerializable { public void setValue(byte[] key, byte[] value) { setValue(new ImmutableBytesWritable(key), value); } - + /* * @param key The key. * @param value The value. @@ -409,7 +409,7 @@ ISerializable { return Long.valueOf(Bytes.toString(value)).longValue(); return DEFAULT_MEMSTORE_FLUSH_SIZE; } - + /** * @param memstoreFlushSize memory cache flush size for each hregion */ @@ -600,7 +600,7 @@ ISerializable { public Collection getFamilies() { return Collections.unmodifiableCollection(this.families.values()); } - + /** * @return Immutable sorted set of the keys of the families. */ @@ -646,7 +646,7 @@ ISerializable { 10, // Ten is arbitrary number. Keep versions to help debuggging. Compression.Algorithm.NONE.getName(), true, true, 8 * 1024, HConstants.FOREVER, false) }); - + /** Table descriptor for .META. catalog table */ public static final HTableDescriptor META_TABLEDESC = new HTableDescriptor( HConstants.META_TABLE_NAME, new HColumnDescriptor[] { diff --git a/src/java/org/apache/hadoop/hbase/KeyValue.java b/src/java/org/apache/hadoop/hbase/KeyValue.java index f2d8538..e7b6e0e 100644 --- a/src/java/org/apache/hadoop/hbase/KeyValue.java +++ b/src/java/org/apache/hadoop/hbase/KeyValue.java @@ -35,19 +35,19 @@ import org.apache.hadoop.io.Writable; /** * An HBase Key/Value. - * + * *

If being used client-side, the primary methods to access individual fields - * are {@link #getRow()}, {@link #getFamily()}, {@link #getQualifier()}, + * are {@link #getRow()}, {@link #getFamily()}, {@link #getQualifier()}, * {@link #getTimestamp()}, and {@link #getValue()}. These methods allocate new * byte arrays and return copies so they should be avoided server-side. - * + * *

Instances of this class are immutable. They are not * comparable but Comparators are provided. Comparators change with context, * whether user table or a catalog table comparison context. Its * important that you use the appropriate comparator comparing rows in * particular. There are Comparators for KeyValue instances and then for * just the Key portion of a KeyValue used mostly in {@link HFile}. - * + * *

KeyValue wraps a byte array and has offset and length for passed array * at where to start interpreting the content as a KeyValue blob. The KeyValue * blob format inside the byte array is: @@ -57,7 +57,7 @@ import org.apache.hadoop.io.Writable; * Rowlength maximum is Short.MAX_SIZE, column family length maximum is * Byte.MAX_SIZE, and column qualifier + key length must be < Integer.MAX_SIZE. * The column does not contain the family/qualifier delimiter. - * + * *

TODO: Group Key-only comparators and operations into a Key class, just * for neatness sake, if can figure what to call it. */ @@ -71,7 +71,7 @@ public class KeyValue implements Writable, HeapSize { public static final byte[] COLUMN_FAMILY_DELIM_ARRAY = new byte[]{COLUMN_FAMILY_DELIMITER}; - + /** * Comparator for plain key/values; i.e. non-catalog table key/values. */ @@ -109,10 +109,10 @@ public class KeyValue implements Writable, HeapSize { /** * Get the appropriate row comparator for the specified table. - * + * * Hopefully we can get rid of this, I added this here because it's replacing * something in HSK. We should move completely off of that. - * + * * @param tableName The table name. * @return The comparator. */ @@ -161,13 +161,13 @@ public class KeyValue implements Writable, HeapSize { // Maximum is used when searching; you look from maximum on down. Maximum((byte)255); - + private final byte code; - + Type(final byte c) { this.code = c; } - + public byte getCode() { return this.code; } @@ -193,9 +193,9 @@ public class KeyValue implements Writable, HeapSize { * Makes a Key with highest possible Timestamp, empty row and column. No * key can be equal or lower than this one in memstore or in store file. */ - public static final KeyValue LOWESTKEY = + public static final KeyValue LOWESTKEY = new KeyValue(HConstants.EMPTY_BYTE_ARRAY, HConstants.LATEST_TIMESTAMP); - + private byte [] bytes = null; private int offset = 0; private int length = 0; @@ -216,7 +216,7 @@ public class KeyValue implements Writable, HeapSize { /** Dragon time over, return to normal business */ - + /** Writable Constructor -- DO NOT USE */ public KeyValue() {} @@ -254,23 +254,23 @@ public class KeyValue implements Writable, HeapSize { } /** Temporary constructors until 880/1249 is committed to remove deps */ - + /** * Temporary. */ public KeyValue(final byte [] row, final byte [] column) { this(row, column, HConstants.LATEST_TIMESTAMP, null); } - + public KeyValue(final byte [] row, final byte [] column, long ts) { this(row, column, ts, null); } - + public KeyValue(final byte [] row, final byte [] column, long ts, byte [] value) { this(row, column, ts, Type.Put, value); } - + public KeyValue(final byte [] row, final byte [] column, long ts, Type type, byte [] value) { int rlength = row == null ? 0 : row.length; @@ -281,9 +281,9 @@ public class KeyValue implements Writable, HeapSize { this.length = this.bytes.length; this.offset = 0; } - + /** Constructors that build a new backing byte array from fields */ - + /** * Constructs KeyValue structure filled with null value. * Sets type to {@link KeyValue.Type#Maximum} @@ -310,7 +310,7 @@ public class KeyValue implements Writable, HeapSize { * @param family family name * @param qualifier column qualifier */ - public KeyValue(final byte [] row, final byte [] family, + public KeyValue(final byte [] row, final byte [] family, final byte [] qualifier) { this(row, family, qualifier, HConstants.LATEST_TIMESTAMP, Type.Maximum); } @@ -321,7 +321,7 @@ public class KeyValue implements Writable, HeapSize { * @param family family name * @param qualifier column qualifier */ - public KeyValue(final byte [] row, final byte [] family, + public KeyValue(final byte [] row, final byte [] family, final byte [] qualifier, final byte [] value) { this(row, family, qualifier, HConstants.LATEST_TIMESTAMP, Type.Put, value); } @@ -339,7 +339,7 @@ public class KeyValue implements Writable, HeapSize { final byte[] qualifier, final long timestamp, Type type) { this(row, family, qualifier, timestamp, type, null); } - + /** * Constructs KeyValue structure filled with specified values. * @param row row key @@ -353,7 +353,7 @@ public class KeyValue implements Writable, HeapSize { final byte[] qualifier, final long timestamp, final byte[] value) { this(row, family, qualifier, timestamp, Type.Put, value); } - + /** * Constructs KeyValue structure filled with specified values. * @param row row key @@ -367,9 +367,9 @@ public class KeyValue implements Writable, HeapSize { public KeyValue(final byte[] row, final byte[] family, final byte[] qualifier, final long timestamp, Type type, final byte[] value) { - this(row, family, qualifier, 0, qualifier==null ? 0 : qualifier.length, + this(row, family, qualifier, 0, qualifier==null ? 0 : qualifier.length, timestamp, type, value, 0, value==null ? 0 : value.length); - } + } /** * Constructs KeyValue structure filled with specified values. @@ -385,12 +385,12 @@ public class KeyValue implements Writable, HeapSize { * @param vlength value length * @throws IllegalArgumentException */ - public KeyValue(byte [] row, byte [] family, - byte [] qualifier, int qoffset, int qlength, long timestamp, Type type, + public KeyValue(byte [] row, byte [] family, + byte [] qualifier, int qoffset, int qlength, long timestamp, Type type, byte [] value, int voffset, int vlength) { - this(row, 0, row==null ? 0 : row.length, + this(row, 0, row==null ? 0 : row.length, family, 0, family==null ? 0 : family.length, - qualifier, qoffset, qlength, timestamp, type, + qualifier, qoffset, qlength, timestamp, type, value, voffset, vlength); } @@ -419,7 +419,7 @@ public class KeyValue implements Writable, HeapSize { final byte [] qualifier, final int qoffset, final int qlength, final long timestamp, final Type type, final byte [] value, final int voffset, final int vlength) { - this.bytes = createByteArray(row, roffset, rlength, + this.bytes = createByteArray(row, roffset, rlength, family, foffset, flength, qualifier, qoffset, qlength, timestamp, type, value, voffset, vlength); this.length = bytes.length; @@ -428,7 +428,7 @@ public class KeyValue implements Writable, HeapSize { /** * Write KeyValue format into a byte array. - * + * * @param row row key * @param roffset row offset * @param rlength row length @@ -443,7 +443,7 @@ public class KeyValue implements Writable, HeapSize { * @param value column value * @param voffset value offset * @param vlength value length - * @return The newly created byte array. + * @return The newly created byte array. */ static byte [] createByteArray(final byte [] row, final int roffset, final int rlength, final byte [] family, final int foffset, int flength, @@ -476,10 +476,10 @@ public class KeyValue implements Writable, HeapSize { // Value length vlength = value == null? 0 : vlength; if (vlength > HConstants.MAXIMUM_VALUE_LENGTH) { - throw new IllegalArgumentException("Valuer > " + + throw new IllegalArgumentException("Valuer > " + HConstants.MAXIMUM_VALUE_LENGTH); } - + // Allocate right-sized byte array. byte [] bytes = new byte[KEYVALUE_INFRASTRUCTURE_SIZE + keylength + vlength]; // Write key, value and key row length. @@ -502,7 +502,7 @@ public class KeyValue implements Writable, HeapSize { } return bytes; } - + /** * Write KeyValue format into a byte array. *

@@ -518,7 +518,7 @@ public class KeyValue implements Writable, HeapSize { * @param value * @param voffset * @param vlength - * @return The newly created byte array. + * @return The newly created byte array. */ static byte [] createByteArray(final byte [] row, final int roffset, final int rlength, @@ -560,7 +560,7 @@ public class KeyValue implements Writable, HeapSize { // KeyValue cloning // //--------------------------------------------------------------------------- - + /** * Clones a KeyValue. This creates a copy, re-allocating the buffer. * @return Fully copied clone of this KeyValue @@ -576,7 +576,7 @@ public class KeyValue implements Writable, HeapSize { // String representation // //--------------------------------------------------------------------------- - + public String toString() { if (this.bytes == null || this.bytes.length == 0) { return "empty"; @@ -627,7 +627,7 @@ public class KeyValue implements Writable, HeapSize { // Public Member Accessors // //--------------------------------------------------------------------------- - + /** * @return The byte array backing this KeyValue. */ @@ -654,7 +654,7 @@ public class KeyValue implements Writable, HeapSize { // Length and Offset Calculators // //--------------------------------------------------------------------------- - + /** * Determines the total length of the KeyValue stored in the specified * byte array and offset. Includes all headers. @@ -663,7 +663,7 @@ public class KeyValue implements Writable, HeapSize { * @return length of entire KeyValue, in bytes */ private static int getLength(byte [] bytes, int offset) { - return (2 * Bytes.SIZEOF_INT) + + return (2 * Bytes.SIZEOF_INT) + Bytes.toInt(bytes, offset) + Bytes.toInt(bytes, offset + Bytes.SIZEOF_INT); } @@ -692,7 +692,7 @@ public class KeyValue implements Writable, HeapSize { public int getValueOffset() { return getKeyOffset() + getKeyLength(); } - + /** * @return Value length */ @@ -706,7 +706,7 @@ public class KeyValue implements Writable, HeapSize { public int getRowOffset() { return getKeyOffset() + Bytes.SIZEOF_SHORT; } - + /** * @return Row length */ @@ -720,21 +720,21 @@ public class KeyValue implements Writable, HeapSize { public int getFamilyOffset() { return getFamilyOffset(getRowLength()); } - + /** * @return Family offset */ public int getFamilyOffset(int rlength) { return this.offset + ROW_OFFSET + Bytes.SIZEOF_SHORT + rlength + Bytes.SIZEOF_BYTE; } - + /** * @return Family length */ public byte getFamilyLength() { return getFamilyLength(getFamilyOffset()); } - + /** * @return Family length */ @@ -748,29 +748,29 @@ public class KeyValue implements Writable, HeapSize { public int getQualifierOffset() { return getQualifierOffset(getFamilyOffset()); } - + /** * @return Qualifier offset */ public int getQualifierOffset(int foffset) { return foffset + getFamilyLength(foffset); } - + /** * @return Qualifier length */ public int getQualifierLength() { return getQualifierLength(getRowLength(),getFamilyLength()); } - + /** * @return Qualifier length */ public int getQualifierLength(int rlength, int flength) { - return getKeyLength() - + return getKeyLength() - (KEY_INFRASTRUCTURE_SIZE + rlength + flength); } - + /** * @return Column (family + qualifier) length */ @@ -779,7 +779,7 @@ public class KeyValue implements Writable, HeapSize { int foffset = getFamilyOffset(rlength); return getTotalColumnLength(rlength,foffset); } - + /** * @return Column (family + qualifier) length */ @@ -788,14 +788,14 @@ public class KeyValue implements Writable, HeapSize { int qlength = getQualifierLength(rlength,flength); return flength + qlength; } - + /** * @return Timestamp offset */ public int getTimestampOffset() { return getTimestampOffset(getKeyLength()); } - + /** * @param keylength Pass if you have it to save on a int creation. * @return Timestamp offset @@ -808,7 +808,7 @@ public class KeyValue implements Writable, HeapSize { * @return True if this KeyValue has a LATEST_TIMESTAMP timestamp. */ public boolean isLatestTimestamp() { - return Bytes.compareTo(getBuffer(), getTimestampOffset(), Bytes.SIZEOF_LONG, + return Bytes.compareTo(getBuffer(), getTimestampOffset(), Bytes.SIZEOF_LONG, HConstants.LATEST_TIMESTAMP_BYTES, 0, Bytes.SIZEOF_LONG) == 0; } @@ -820,17 +820,17 @@ public class KeyValue implements Writable, HeapSize { } return false; } - + //--------------------------------------------------------------------------- // // Methods that return copies of fields // //--------------------------------------------------------------------------- - + /** * Do not use unless you have to. Used internally for compacting and testing. - * - * Use {@link #getRow()}, {@link #getFamily()}, {@link #getQualifier()}, and + * + * Use {@link #getRow()}, {@link #getFamily()}, {@link #getQualifier()}, and * {@link #getValue()} if accessing a KeyValue client-side. * @return Copy of the key portion only. */ @@ -840,7 +840,7 @@ public class KeyValue implements Writable, HeapSize { System.arraycopy(getBuffer(), getKeyOffset(), key, 0, keylength); return key; } - + /** * Returns value in a new byte array. * Primarily for use client-side. If server-side, use @@ -855,12 +855,12 @@ public class KeyValue implements Writable, HeapSize { System.arraycopy(getBuffer(), o, result, 0, l); return result; } - + /** * Primarily for use client-side. Returns the row of this KeyValue in a new * byte array.

- * - * If server-side, use {@link #getBuffer()} with appropriate offsets and + * + * If server-side, use {@link #getBuffer()} with appropriate offsets and * lengths instead. * @return Row in a new byte array. */ @@ -873,7 +873,7 @@ public class KeyValue implements Writable, HeapSize { } /** - * + * * @return Timestamp */ public long getTimestamp() { @@ -931,8 +931,8 @@ public class KeyValue implements Writable, HeapSize { /** * Primarily for use client-side. Returns the column of this KeyValue in the * deprecated format: family:qualifier, and in a new byte array.

- * - * If server-side, use {@link #getBuffer()} with appropriate offsets and + * + * If server-side, use {@link #getBuffer()} with appropriate offsets and * lengths instead. * @return Returns column. Makes a copy. Inserts delimiter. */ @@ -948,10 +948,10 @@ public class KeyValue implements Writable, HeapSize { } /** - * Primarily for use client-side. Returns the family of this KeyValue in a + * Primarily for use client-side. Returns the family of this KeyValue in a * new byte array.

- * - * If server-side, use {@link #getBuffer()} with appropriate offsets and + * + * If server-side, use {@link #getBuffer()} with appropriate offsets and * lengths instead. * @return Returns family. Makes a copy. */ @@ -964,10 +964,10 @@ public class KeyValue implements Writable, HeapSize { } /** - * Primarily for use client-side. Returns the column qualifier of this + * Primarily for use client-side. Returns the column qualifier of this * KeyValue in a new byte array.

- * - * If server-side, use {@link #getBuffer()} with appropriate offsets and + * + * If server-side, use {@link #getBuffer()} with appropriate offsets and * lengths instead. * Use {@link #getBuffer()} with appropriate offsets and lengths instead. * @return Returns qualifier. Makes a copy. @@ -985,7 +985,7 @@ public class KeyValue implements Writable, HeapSize { // KeyValue splitter // //--------------------------------------------------------------------------- - + /** * Utility class that splits a KeyValue buffer into separate byte arrays. *

@@ -1009,7 +1009,7 @@ public class KeyValue implements Writable, HeapSize { public byte [] getType() { return this.split[4]; } public byte [] getValue() { return this.split[5]; } } - + public SplitKeyValue split() { SplitKeyValue split = new SplitKeyValue(); int splitOffset = this.offset; @@ -1049,13 +1049,13 @@ public class KeyValue implements Writable, HeapSize { split.setValue(value); return split; } - + //--------------------------------------------------------------------------- // - // Compare specified fields against those contained in this KeyValue + // Compare specified fields against those contained in this KeyValue // //--------------------------------------------------------------------------- - + /** * @param family * @return True if matching families. @@ -1076,7 +1076,7 @@ public class KeyValue implements Writable, HeapSize { public boolean matchingQualifier(final byte [] qualifier) { int o = getQualifierOffset(); int l = getQualifierLength(); - return Bytes.compareTo(qualifier, 0, qualifier.length, + return Bytes.compareTo(qualifier, 0, qualifier.length, this.bytes, o, l) == 0; } @@ -1178,7 +1178,7 @@ public class KeyValue implements Writable, HeapSize { /** * Splits a column in family:qualifier form into separate byte arrays. - * + * * @param c The column. * @return The parsed column. */ @@ -1199,7 +1199,7 @@ public class KeyValue implements Writable, HeapSize { len); return result; } - + /** * @param b * @return Index of the family-qualifier colon delimiter character in passed @@ -1278,7 +1278,7 @@ public class KeyValue implements Writable, HeapSize { */ public static class RootComparator extends MetaComparator { private final KeyComparator rawcomparator = new RootKeyComparator(); - + public KeyComparator getRawComparator() { return this.rawcomparator; } @@ -1349,7 +1349,7 @@ public class KeyValue implements Writable, HeapSize { * @return Result comparing rows. */ public int compareRows(final KeyValue left, final KeyValue right) { - return compareRows(left, left.getRowLength(), right, + return compareRows(left, left.getRowLength(), right, right.getRowLength()); } @@ -1382,7 +1382,7 @@ public class KeyValue implements Writable, HeapSize { return getRawComparator().compareRows(left, loffset, llength, right, roffset, rlength); } - + public int compareColumns(final KeyValue left, final byte [] right, final int roffset, final int rlength, final int rfamilyoffset) { int offset = left.getFamilyOffset(); @@ -1460,7 +1460,7 @@ public class KeyValue implements Writable, HeapSize { public boolean matchingRows(final byte [] left, final int loffset, final int llength, final byte [] right, final int roffset, final int rlength) { - int compare = compareRows(left, loffset, llength, + int compare = compareRows(left, loffset, llength, right, roffset, rlength); if (compare != 0) { return false; @@ -1489,7 +1489,7 @@ public class KeyValue implements Writable, HeapSize { protected Object clone() throws CloneNotSupportedException { return new KVComparator(); } - + /** * @return Comparator that ignores timestamps; useful counting versions. */ @@ -1534,7 +1534,7 @@ public class KeyValue implements Writable, HeapSize { * Create a KeyValue that is smaller than all other possible KeyValues * for the given row. That is any (valid) KeyValue on 'row' would sort * _after_ the result. - * + * * @param row - row key (arbitrary byte array) * @return First possible KeyValue on passed row */ @@ -1593,7 +1593,7 @@ public class KeyValue implements Writable, HeapSize { final byte [] q, final long ts) { return new KeyValue(row, f, q, ts, Type.Maximum); } - + /** * @param b * @param o @@ -1622,7 +1622,7 @@ public class KeyValue implements Writable, HeapSize { // "---" + Bytes.toString(right, roffset, rlength)); final int metalength = 7; // '.META.' length int lmetaOffsetPlusDelimiter = loffset + metalength; - int leftFarDelimiter = getDelimiterInReverse(left, + int leftFarDelimiter = getDelimiterInReverse(left, lmetaOffsetPlusDelimiter, llength - metalength, HRegionInfo.DELIMITER); int rmetaOffsetPlusDelimiter = roffset + metalength; @@ -1756,7 +1756,7 @@ public class KeyValue implements Writable, HeapSize { // if row matches, and no column in the 'left' AND put type is 'minimum', // then return that left is larger than right. - + // This supports 'last key on a row' - the magic is if there is no column in the // left operand, and the left operand has a type of '0' - magical value, // then we say the left is bigger. This will let us seek to the last key in @@ -1778,7 +1778,7 @@ public class KeyValue implements Writable, HeapSize { if (compare != 0) { return compare; } - + if (!this.ignoreTimestamp) { // Get timestamps. long ltimestamp = Bytes.toLong(left, @@ -1828,15 +1828,15 @@ public class KeyValue implements Writable, HeapSize { return 0; } } - + // HeapSize public long heapSize() { - return ClassSize.align(ClassSize.OBJECT + ClassSize.REFERENCE + - ClassSize.align(ClassSize.ARRAY + length) + + return ClassSize.align(ClassSize.OBJECT + ClassSize.REFERENCE + + ClassSize.align(ClassSize.ARRAY + length) + (2 * Bytes.SIZEOF_INT) + Bytes.SIZEOF_LONG); } - + // this overload assumes that the length bytes have already been read, // and it expects the length of the KeyValue to be explicitly passed // to it. @@ -1846,13 +1846,13 @@ public class KeyValue implements Writable, HeapSize { this.bytes = new byte[this.length]; in.readFully(this.bytes, 0, this.length); } - + // Writable public void readFields(final DataInput in) throws IOException { int length = in.readInt(); readFields(length, in); } - + public void write(final DataOutput out) throws IOException { out.writeInt(this.length); out.write(this.bytes, this.offset, this.length); diff --git a/src/java/org/apache/hadoop/hbase/LeaseListener.java b/src/java/org/apache/hadoop/hbase/LeaseListener.java index 90a32ef..54b3452 100644 --- a/src/java/org/apache/hadoop/hbase/LeaseListener.java +++ b/src/java/org/apache/hadoop/hbase/LeaseListener.java @@ -21,11 +21,11 @@ package org.apache.hadoop.hbase; /** - * LeaseListener is an interface meant to be implemented by users of the Leases + * LeaseListener is an interface meant to be implemented by users of the Leases * class. * * It receives events from the Leases class about the status of its accompanying - * lease. Users of the Leases class can use a LeaseListener subclass to, for + * lease. Users of the Leases class can use a LeaseListener subclass to, for * example, clean up resources after a lease has expired. */ public interface LeaseListener { diff --git a/src/java/org/apache/hadoop/hbase/Leases.java b/src/java/org/apache/hadoop/hbase/Leases.java index a78ffef..f2410d5 100644 --- a/src/java/org/apache/hadoop/hbase/Leases.java +++ b/src/java/org/apache/hadoop/hbase/Leases.java @@ -36,15 +36,15 @@ import java.io.IOException; * * There are several server classes in HBase that need to track external * clients that occasionally send heartbeats. - * + * *

These external clients hold resources in the server class. * Those resources need to be released if the external client fails to send a * heartbeat after some interval of time passes. * *

The Leases class is a general reusable class for this kind of pattern. - * An instance of the Leases class will create a thread to do its dirty work. + * An instance of the Leases class will create a thread to do its dirty work. * You should close() the instance if you want to clean up the thread properly. - * + * *

* NOTE: This class extends Thread rather than Chore because the sleep time * can be interrupted when there is something to do, rather than the Chore @@ -60,7 +60,7 @@ public class Leases extends Thread { /** * Creates a lease monitor - * + * * @param leasePeriod - length of time (milliseconds) that the lease is valid * @param leaseCheckFrequency - how often the lease should be checked * (milliseconds) @@ -114,9 +114,9 @@ public class Leases extends Thread { public void closeAfterLeasesExpire() { this.stopRequested = true; } - + /** - * Shut down this Leases instance. All pending leases will be destroyed, + * Shut down this Leases instance. All pending leases will be destroyed, * without any cancellation calls. */ public void close() { @@ -132,10 +132,10 @@ public class Leases extends Thread { /** * Obtain a lease - * + * * @param leaseName name of the lease * @param listener listener that will process lease expirations - * @throws LeaseStillHeldException + * @throws LeaseStillHeldException */ public void createLease(String leaseName, final LeaseListener listener) throws LeaseStillHeldException { @@ -160,25 +160,25 @@ public class Leases extends Thread { @SuppressWarnings("serial") public static class LeaseStillHeldException extends IOException { private final String leaseName; - + /** * @param name */ public LeaseStillHeldException(final String name) { this.leaseName = name; } - + /** @return name of lease */ public String getName() { return this.leaseName; } } - + /** * Renew a lease - * + * * @param leaseName name of lease - * @throws LeaseException + * @throws LeaseException */ public void renewLease(final String leaseName) throws LeaseException { synchronized (leaseQueue) { @@ -197,9 +197,9 @@ public class Leases extends Thread { /** * Client explicitly cancels a lease. - * + * * @param leaseName name of lease - * @throws LeaseException + * @throws LeaseException */ public void cancelLease(final String leaseName) throws LeaseException { synchronized (leaseQueue) { @@ -227,7 +227,7 @@ public class Leases extends Thread { public String getLeaseName() { return leaseName; } - + /** @return listener */ public LeaseListener getListener() { return this.listener; @@ -246,7 +246,7 @@ public class Leases extends Thread { } return this.hashCode() == ((Lease) obj).hashCode(); } - + @Override public int hashCode() { return this.leaseName.hashCode(); diff --git a/src/java/org/apache/hadoop/hbase/LocalHBaseCluster.java b/src/java/org/apache/hadoop/hbase/LocalHBaseCluster.java index 2a3af51..7abf37c 100644 --- a/src/java/org/apache/hadoop/hbase/LocalHBaseCluster.java +++ b/src/java/org/apache/hadoop/hbase/LocalHBaseCluster.java @@ -36,18 +36,18 @@ import org.apache.hadoop.hbase.util.JVMClusterUtil; /** * This class creates a single process HBase cluster. One thread is created for * a master and one per region server. - * + * * Call {@link #startup()} to start the cluster running and {@link #shutdown()} * to close it all down. {@link #join} the cluster is you want to wait on * shutdown completion. - * + * *

Runs master on port 60000 by default. Because we can't just kill the * process -- not till HADOOP-1700 gets fixed and even then.... -- we need to * be able to find the master with a remote client to run shutdown. To use a * port other than 60000, set the hbase.master to a value of 'local:PORT': * that is 'local', not 'localhost', and the port number the master should use * instead of 60000. - * + * *

To make 'local' mode more responsive, make values such as * hbase.regionserver.msginterval, * hbase.master.meta.thread.rescanfrequency, and diff --git a/src/java/org/apache/hadoop/hbase/NotServingRegionException.java b/src/java/org/apache/hadoop/hbase/NotServingRegionException.java index 5c93ebe..32da8cb 100644 --- a/src/java/org/apache/hadoop/hbase/NotServingRegionException.java +++ b/src/java/org/apache/hadoop/hbase/NotServingRegionException.java @@ -42,7 +42,7 @@ public class NotServingRegionException extends IOException { public NotServingRegionException(String s) { super(s); } - + /** * Constructor * @param s message diff --git a/src/java/org/apache/hadoop/hbase/RemoteExceptionHandler.java b/src/java/org/apache/hadoop/hbase/RemoteExceptionHandler.java index 6fc8e57..c73ff53 100644 --- a/src/java/org/apache/hadoop/hbase/RemoteExceptionHandler.java +++ b/src/java/org/apache/hadoop/hbase/RemoteExceptionHandler.java @@ -25,14 +25,14 @@ import java.lang.reflect.InvocationTargetException; import org.apache.hadoop.ipc.RemoteException; -/** +/** * An immutable class which contains a static method for handling * org.apache.hadoop.ipc.RemoteException exceptions. */ public class RemoteExceptionHandler { /* Not instantiable */ private RemoteExceptionHandler() {super();} - + /** * Examine passed Throwable. See if its carrying a RemoteException. If so, * run {@link #decodeRemoteException(RemoteException)} on it. Otherwise, @@ -53,7 +53,7 @@ public class RemoteExceptionHandler { } return result; } - + /** * Examine passed IOException. See if its carrying a RemoteException. If so, * run {@link #decodeRemoteException(RemoteException)} on it. Otherwise, @@ -66,17 +66,17 @@ public class RemoteExceptionHandler { Throwable t = checkThrowable(e); return t instanceof IOException? (IOException)t: new IOException(t); } - + /** * Converts org.apache.hadoop.ipc.RemoteException into original exception, * if possible. If the original exception is an Error or a RuntimeException, * throws the original exception. - * + * * @param re original exception * @return decoded RemoteException if it is an instance of or a subclass of * IOException, or the original RemoteException if it cannot be decoded. - * - * @throws IOException indicating a server error ocurred if the decoded + * + * @throws IOException indicating a server error ocurred if the decoded * exception is not an IOException. The decoded exception is set as * the cause. */ @@ -89,10 +89,10 @@ public class RemoteExceptionHandler { Class[] parameterTypes = { String.class }; Constructor ctor = c.getConstructor(parameterTypes); - + Object[] arguments = { re.getMessage() }; Throwable t = (Throwable) ctor.newInstance(arguments); - + if (t instanceof IOException) { i = (IOException) t; diff --git a/src/java/org/apache/hadoop/hbase/TableExistsException.java b/src/java/org/apache/hadoop/hbase/TableExistsException.java index bbcc295..5fde219 100644 --- a/src/java/org/apache/hadoop/hbase/TableExistsException.java +++ b/src/java/org/apache/hadoop/hbase/TableExistsException.java @@ -29,7 +29,7 @@ public class TableExistsException extends IOException { /** * Constructor - * + * * @param s message */ public TableExistsException(String s) { diff --git a/src/java/org/apache/hadoop/hbase/ValueOverMaxLengthException.java b/src/java/org/apache/hadoop/hbase/ValueOverMaxLengthException.java index 2bc136d..383c9db 100644 --- a/src/java/org/apache/hadoop/hbase/ValueOverMaxLengthException.java +++ b/src/java/org/apache/hadoop/hbase/ValueOverMaxLengthException.java @@ -23,7 +23,7 @@ package org.apache.hadoop.hbase; * Thrown when a value is longer than the specified LENGTH */ public class ValueOverMaxLengthException extends DoNotRetryIOException { - + private static final long serialVersionUID = -5525656352372008316L; /** diff --git a/src/java/org/apache/hadoop/hbase/VersionAnnotation.java b/src/java/org/apache/hadoop/hbase/VersionAnnotation.java index bf29adf..ecea580 100644 --- a/src/java/org/apache/hadoop/hbase/VersionAnnotation.java +++ b/src/java/org/apache/hadoop/hbase/VersionAnnotation.java @@ -26,29 +26,29 @@ import java.lang.annotation.*; @Retention(RetentionPolicy.RUNTIME) @Target(ElementType.PACKAGE) public @interface VersionAnnotation { - + /** * Get the Hadoop version * @return the version string "0.6.3-dev" */ String version(); - + /** * Get the username that compiled Hadoop. */ String user(); - + /** * Get the date when Hadoop was compiled. * @return the date in unix 'date' format */ String date(); - + /** * Get the url for the subversion repository. */ String url(); - + /** * Get the subversion revision. * @return the revision number as a string (eg. "451451") diff --git a/src/java/org/apache/hadoop/hbase/client/Delete.java b/src/java/org/apache/hadoop/hbase/client/Delete.java index 1dfa863..677e58d 100644 --- a/src/java/org/apache/hadoop/hbase/client/Delete.java +++ b/src/java/org/apache/hadoop/hbase/client/Delete.java @@ -36,7 +36,7 @@ import org.apache.hadoop.hbase.util.Bytes; /** * Used to perform Delete operations on a single row. *

- * To delete an entire row, instantiate a Delete object with the row + * To delete an entire row, instantiate a Delete object with the row * to delete. To further define the scope of what to delete, perform * additional methods as outlined below. *

@@ -45,7 +45,7 @@ import org.apache.hadoop.hbase.util.Bytes; *

* To delete multiple versions of specific columns, execute * {@link #deleteColumns(byte [],byte []) deleteColumns} - * for each column to delete. + * for each column to delete. *

* To delete specific versions of specific columns, execute * {@link #deleteColumn(byte [],byte [],long) deleteColumn} @@ -67,10 +67,10 @@ import org.apache.hadoop.hbase.util.Bytes; */ public class Delete implements Writable, Row, Comparable { private byte [] row = null; - // This ts is only used when doing a deleteRow. Anything less, + // This ts is only used when doing a deleteRow. Anything less, private long ts; private long lockId = -1L; - private final Map> familyMap = + private final Map> familyMap = new TreeMap>(Bytes.BYTES_COMPARATOR); /** Constructor for Writable. DO NOT USE */ @@ -93,12 +93,12 @@ public class Delete implements Writable, Row, Comparable { /** * Create a Delete operation for the specified row and timestamp, using * an optional row lock.

- * + * * If no further operations are done, this will delete all columns in all - * families of the specified row with a timestamp less than or equal to the + * families of the specified row with a timestamp less than or equal to the * specified timestamp.

- * - * This timestamp is ONLY used for a delete row operation. If specifying + * + * This timestamp is ONLY used for a delete row operation. If specifying * families or columns, you must specify each timestamp individually. * @param row row key * @param timestamp maximum version timestamp (only for delete row) @@ -166,7 +166,7 @@ public class Delete implements Writable, Row, Comparable { familyMap.put(family, list); return this; } - + /** * Delete all versions of the specified column. * @param family family name @@ -176,7 +176,7 @@ public class Delete implements Writable, Row, Comparable { this.deleteColumns(family, qualifier, HConstants.LATEST_TIMESTAMP); return this; } - + /** * Delete all versions of the specified column with a timestamp less than * or equal to the specified timestamp. @@ -194,20 +194,20 @@ public class Delete implements Writable, Row, Comparable { familyMap.put(family, list); return this; } - + /** - * Delete all versions of the specified column, given in + * Delete all versions of the specified column, given in * family:qualifier notation, and with a timestamp less than - * or equal to the specified timestamp. + * or equal to the specified timestamp. * @param column colon-delimited family and qualifier - * @param timestamp maximum version timestamp + * @param timestamp maximum version timestamp */ public Delete deleteColumns(byte [] column, long timestamp) { byte [][] parts = KeyValue.parseColumn(column); this.deleteColumns(parts[0], parts[1], timestamp); return this; } - + /** * Delete the latest version of the specified column. * This is an expensive call in that on the server-side, it first does a @@ -220,7 +220,7 @@ public class Delete implements Writable, Row, Comparable { this.deleteColumn(family, qualifier, HConstants.LATEST_TIMESTAMP); return this; } - + /** * Delete the specified version of the specified column. * @param family family name @@ -242,26 +242,26 @@ public class Delete implements Writable, Row, Comparable { byte [][] parts = KeyValue.parseColumn(column); this.deleteColumns(parts[0], parts[1]); } - + /** * Delete the latest version of the specified column, given in * family:qualifier notation. - * @param column colon-delimited family and qualifier + * @param column colon-delimited family and qualifier */ public Delete deleteColumn(byte [] column) { byte [][] parts = KeyValue.parseColumn(column); this.deleteColumn(parts[0], parts[1], HConstants.LATEST_TIMESTAMP); return this; } - + /** - * Method for retrieving the delete's familyMap + * Method for retrieving the delete's familyMap * @return familyMap */ public Map> getFamilyMap() { return this.familyMap; } - + /** * Method for retrieving the delete's row * @return row @@ -269,7 +269,7 @@ public class Delete implements Writable, Row, Comparable { public byte [] getRow() { return this.row; } - + /** * Method for retrieving the delete's RowLock * @return RowLock @@ -277,16 +277,16 @@ public class Delete implements Writable, Row, Comparable { public RowLock getRowLock() { return new RowLock(this.row, this.lockId); } - + /** * Method for retrieving the delete's lock ID. - * + * * @return The lock ID. */ public long getLockId() { return this.lockId; } - + /** * Method for retrieving the delete's timestamp * @return timestamp @@ -294,7 +294,7 @@ public class Delete implements Writable, Row, Comparable { public long getTimeStamp() { return this.ts; } - + /** * @return string */ @@ -330,7 +330,7 @@ public class Delete implements Writable, Row, Comparable { sb.append("}"); return sb.toString(); } - + //Writable public void readFields(final DataInput in) throws IOException { this.row = Bytes.readByteArray(in); @@ -349,8 +349,8 @@ public class Delete implements Writable, Row, Comparable { } this.familyMap.put(family, list); } - } - + } + public void write(final DataOutput out) throws IOException { Bytes.writeByteArray(out, this.row); out.writeLong(this.ts); diff --git a/src/java/org/apache/hadoop/hbase/client/Get.java b/src/java/org/apache/hadoop/hbase/client/Get.java index 7efad3e..ff0bdd5 100644 --- a/src/java/org/apache/hadoop/hbase/client/Get.java +++ b/src/java/org/apache/hadoop/hbase/client/Get.java @@ -41,7 +41,7 @@ import org.apache.hadoop.io.WritableFactories; * Used to perform Get operations on a single row. *

* To get everything for a row, instantiate a Get object with the row to get. - * To further define the scope of what to get, perform additional methods as + * To further define the scope of what to get, perform additional methods as * outlined below. *

* To get all columns from specific families, execute {@link #addFamily(byte[]) addFamily} @@ -67,7 +67,7 @@ public class Get implements Writable { private int maxVersions = 1; private Filter filter = null; private TimeRange tr = new TimeRange(); - private Map> familyMap = + private Map> familyMap = new TreeMap>(Bytes.BYTES_COMPARATOR); /** Constructor for Writable. DO NOT USE */ @@ -176,7 +176,7 @@ public class Get implements Writable { /** * Get versions of columns with the specified timestamp. - * @param timestamp version timestamp + * @param timestamp version timestamp */ public Get setTimeStamp(long timestamp) { try { @@ -230,7 +230,7 @@ public class Get implements Writable { /** * Method for retrieving the get's row - * @return row + * @return row */ public byte [] getRow() { return this.row; @@ -258,7 +258,7 @@ public class Get implements Writable { */ public int getMaxVersions() { return this.maxVersions; - } + } /** * Method for retrieving the get's TimeRange @@ -318,7 +318,7 @@ public class Get implements Writable { return sb.toString(); } boolean moreThanOne = false; - for(Map.Entry> entry : + for(Map.Entry> entry : this.familyMap.entrySet()) { if(moreThanOne) { sb.append("), "); @@ -363,7 +363,7 @@ public class Get implements Writable { this.tr = new TimeRange(); tr.readFields(in); int numFamilies = in.readInt(); - this.familyMap = + this.familyMap = new TreeMap>(Bytes.BYTES_COMPARATOR); for(int i=0; i> entry : + for(Map.Entry> entry : familyMap.entrySet()) { Bytes.writeByteArray(out, entry.getKey()); NavigableSet columnSet = entry.getValue(); @@ -419,6 +419,6 @@ public class Get implements Writable { return WritableFactories.newInstance(clazz, new Configuration()); } catch (ClassNotFoundException e) { throw new RuntimeException("Can't find class " + className); - } + } } } diff --git a/src/java/org/apache/hadoop/hbase/client/HBaseAdmin.java b/src/java/org/apache/hadoop/hbase/client/HBaseAdmin.java index 810f21c..e7407f3 100644 --- a/src/java/org/apache/hadoop/hbase/client/HBaseAdmin.java +++ b/src/java/org/apache/hadoop/hbase/client/HBaseAdmin.java @@ -61,7 +61,7 @@ public class HBaseAdmin { /** * Constructor - * + * * @param conf Configuration object * @throws MasterNotRunningException */ @@ -85,7 +85,7 @@ public class HBaseAdmin { public HMasterInterface getMaster() throws MasterNotRunningException{ return this.connection.getMaster(); } - + /** @return - true if the master server is running */ public boolean isMasterRunning() { return this.connection.isMasterRunning(); @@ -121,14 +121,14 @@ public class HBaseAdmin { * catalog table that just contains table names and their descriptors. * Right now, it only exists as part of the META table's region info. * - * @return - returns an array of HTableDescriptors + * @return - returns an array of HTableDescriptors * @throws IOException */ public HTableDescriptor[] listTables() throws IOException { return this.connection.listTables(); } - + /** * Method for getting the tableDescriptor * @param tableName as a byte [] @@ -139,7 +139,7 @@ public class HBaseAdmin { throws IOException { return this.connection.getHTableDescriptor(tableName); } - + private long getPauseTime(int tries) { int triesCount = tries; if (triesCount >= HConstants.RETRY_BACKOFF.length) @@ -150,9 +150,9 @@ public class HBaseAdmin { /** * Creates a new table. * Synchronous operation. - * + * * @param desc table descriptor for table - * + * * @throws IllegalArgumentException if the table name is reserved * @throws MasterNotRunningException if master is not running * @throws TableExistsException if table already exists (If concurrent @@ -169,19 +169,19 @@ public class HBaseAdmin { * Creates a new table with the specified number of regions. The start key * specified will become the end key of the first region of the table, and * the end key specified will become the start key of the last region of the - * table (the first region has a null start key and the last region has a + * table (the first region has a null start key and the last region has a * null end key). - * + * * BigInteger math will be used to divide the key range specified into * enough segments to make the required number of total regions. - * + * * Synchronous operation. - * + * * @param desc table descriptor for table * @param startKey beginning of key range * @param endKey end of key range * @param numRegions the total number of regions to create - * + * * @throws IllegalArgumentException if the table name is reserved * @throws MasterNotRunningException if master is not running * @throws TableExistsException if table already exists (If concurrent @@ -189,7 +189,7 @@ public class HBaseAdmin { * and attempt-at-creation). * @throws IOException */ - public void createTable(HTableDescriptor desc, byte [] startKey, + public void createTable(HTableDescriptor desc, byte [] startKey, byte [] endKey, int numRegions) throws IOException { HTableDescriptor.isLegalTableName(desc.getName()); @@ -211,10 +211,10 @@ public class HBaseAdmin { * number of split keys plus one (the first region has a null start key and * the last region has a null end key). * Synchronous operation. - * + * * @param desc table descriptor for table * @param splitKeys array of split keys for the initial regions of the table - * + * * @throws IllegalArgumentException if the table name is reserved * @throws MasterNotRunningException if master is not running * @throws TableExistsException if table already exists (If concurrent @@ -242,7 +242,7 @@ public class HBaseAdmin { // Wait for new table to come on-line connection.locateRegion(desc.getName(), HConstants.EMPTY_START_ROW); break; - + } catch (RegionException e) { if (tries == numRetries - 1) { // Ran out of tries @@ -256,13 +256,13 @@ public class HBaseAdmin { } } } - + /** * Creates a new table but does not block and wait for it to come online. * Asynchronous operation. - * + * * @param desc table descriptor for table - * + * * @throws IllegalArgumentException Bad table name. * @throws MasterNotRunningException if master is not running * @throws TableExistsException if table already exists (If concurrent @@ -286,7 +286,7 @@ public class HBaseAdmin { /** * Deletes a table. * Synchronous operation. - * + * * @param tableName name of table to delete * @throws IOException */ @@ -297,7 +297,7 @@ public class HBaseAdmin { /** * Deletes a table. * Synchronous operation. - * + * * @param tableName name of table to delete * @throws IOException */ @@ -376,12 +376,12 @@ public class HBaseAdmin { LOG.info("Deleted " + Bytes.toString(tableName)); } - + /** * Brings a table on-line (enables it). * Synchronous operation. - * + * * @param tableName name of the table * @throws IOException */ @@ -392,7 +392,7 @@ public class HBaseAdmin { /** * Brings a table on-line (enables it). * Synchronous operation. - * + * * @param tableName name of the table * @throws IOException */ @@ -437,7 +437,7 @@ public class HBaseAdmin { * Disables a table (takes it off-line) If it is being served, the master * will tell the servers to stop serving it. * Synchronous operation. - * + * * @param tableName name of table * @throws IOException */ @@ -449,7 +449,7 @@ public class HBaseAdmin { * Disables a table (takes it off-line) If it is being served, the master * will tell the servers to stop serving it. * Synchronous operation. - * + * * @param tableName name of table * @throws IOException */ @@ -489,7 +489,7 @@ public class HBaseAdmin { } LOG.info("Disabled " + Bytes.toString(tableName)); } - + /** * @param tableName name of table to check * @return true if table is on-line @@ -506,7 +506,7 @@ public class HBaseAdmin { public boolean isTableEnabled(byte[] tableName) throws IOException { return connection.isTableEnabled(tableName); } - + /** * @param tableName name of table to check * @return true if table is off-line @@ -537,7 +537,7 @@ public class HBaseAdmin { /** * Add a column to an existing table. * Asynchronous operation. - * + * * @param tableName name of the table to add column to * @param column column descriptor of column to be added * @throws IOException @@ -550,7 +550,7 @@ public class HBaseAdmin { /** * Add a column to an existing table. * Asynchronous operation. - * + * * @param tableName name of the table to add column to * @param column column descriptor of column to be added * @throws IOException @@ -571,7 +571,7 @@ public class HBaseAdmin { /** * Delete a column from a table. * Asynchronous operation. - * + * * @param tableName name of table * @param columnName name of column to be deleted * @throws IOException @@ -584,7 +584,7 @@ public class HBaseAdmin { /** * Delete a column from a table. * Asynchronous operation. - * + * * @param tableName name of table * @param columnName name of column to be deleted * @throws IOException @@ -605,13 +605,13 @@ public class HBaseAdmin { /** * Modify an existing column family on a table. * Asynchronous operation. - * + * * @param tableName name of table * @param columnName name of column to be modified * @param descriptor new column descriptor to use * @throws IOException */ - public void modifyColumn(final String tableName, final String columnName, + public void modifyColumn(final String tableName, final String columnName, HColumnDescriptor descriptor) throws IOException { modifyColumn(Bytes.toBytes(tableName), Bytes.toBytes(columnName), @@ -621,13 +621,13 @@ public class HBaseAdmin { /** * Modify an existing column family on a table. * Asynchronous operation. - * + * * @param tableName name of table * @param columnName name of column to be modified * @param descriptor new column descriptor to use * @throws IOException */ - public void modifyColumn(final byte [] tableName, final byte [] columnName, + public void modifyColumn(final byte [] tableName, final byte [] columnName, HColumnDescriptor descriptor) throws IOException { if (this.master == null) { @@ -644,7 +644,7 @@ public class HBaseAdmin { /** * Close a region. For expert-admins. * Asynchronous operation. - * + * * @param regionname * @param args Optional server name. Otherwise, we'll send close to the * server registered in .META. @@ -658,7 +658,7 @@ public class HBaseAdmin { /** * Close a region. For expert-admins. * Asynchronous operation. - * + * * @param regionname * @param args Optional server name. Otherwise, we'll send close to the * server registered in .META. @@ -679,11 +679,11 @@ public class HBaseAdmin { modifyTable(HConstants.META_TABLE_NAME, HConstants.Modify.CLOSE_REGION, newargs); } - + /** * Flush a table or an individual region. * Asynchronous operation. - * + * * @param tableNameOrRegionName * @throws IOException */ @@ -694,7 +694,7 @@ public class HBaseAdmin { /** * Flush a table or an individual region. * Asynchronous operation. - * + * * @param tableNameOrRegionName * @throws IOException */ @@ -705,7 +705,7 @@ public class HBaseAdmin { /** * Compact a table or an individual region. * Asynchronous operation. - * + * * @param tableNameOrRegionName * @throws IOException */ @@ -716,18 +716,18 @@ public class HBaseAdmin { /** * Compact a table or an individual region. * Asynchronous operation. - * + * * @param tableNameOrRegionName * @throws IOException */ public void compact(final byte [] tableNameOrRegionName) throws IOException { modifyTable(tableNameOrRegionName, HConstants.Modify.TABLE_COMPACT); } - + /** * Major compact a table or an individual region. * Asynchronous operation. - * + * * @param tableNameOrRegionName * @throws IOException */ @@ -739,7 +739,7 @@ public class HBaseAdmin { /** * Major compact a table or an individual region. * Asynchronous operation. - * + * * @param tableNameOrRegionName * @throws IOException */ @@ -751,7 +751,7 @@ public class HBaseAdmin { /** * Split a table or an individual region. * Asynchronous operation. - * + * * @param tableNameOrRegionName * @throws IOException */ @@ -762,7 +762,7 @@ public class HBaseAdmin { /** * Split a table or an individual region. * Asynchronous operation. - * + * * @param tableNameOrRegionName * @throws IOException */ @@ -777,7 +777,7 @@ public class HBaseAdmin { * @param op * @throws IOException */ - private void modifyTable(final byte [] tableNameOrRegionName, + private void modifyTable(final byte [] tableNameOrRegionName, final HConstants.Modify op) throws IOException { if (tableNameOrRegionName == null) { @@ -789,16 +789,16 @@ public class HBaseAdmin { Object [] args = regionName == null? null: new byte [][] {regionName}; modifyTable(tableName == null? null: tableName, op, args); } - + /** * Modify an existing table, more IRB friendly version. * Asynchronous operation. - * + * * @param tableName name of table. * @param htd modified description of the table * @throws IOException */ - public void modifyTable(final byte [] tableName, HTableDescriptor htd) + public void modifyTable(final byte [] tableName, HTableDescriptor htd) throws IOException { modifyTable(tableName, HConstants.Modify.TABLE_SET_HTD, htd); } @@ -806,14 +806,14 @@ public class HBaseAdmin { /** * Modify an existing table. * Asynchronous operation. - * + * * @param tableName name of table. May be null if we are operating on a * region. * @param op table modification operation * @param args operation specific arguments * @throws IOException */ - public void modifyTable(final byte [] tableName, HConstants.Modify op, + public void modifyTable(final byte [] tableName, HConstants.Modify op, Object... args) throws IOException { if (this.master == null) { @@ -828,7 +828,7 @@ public class HBaseAdmin { try { switch (op) { case TABLE_SET_HTD: - if (args == null || args.length < 1 || + if (args == null || args.length < 1 || !(args[0] instanceof HTableDescriptor)) { throw new IllegalArgumentException("SET_HTD requires a HTableDescriptor"); } @@ -887,8 +887,8 @@ public class HBaseAdmin { } } - /** - * Shuts down the HBase instance + /** + * Shuts down the HBase instance * @throws IOException */ public synchronized void shutdown() throws IOException { diff --git a/src/java/org/apache/hadoop/hbase/client/HConnection.java b/src/java/org/apache/hadoop/hbase/client/HConnection.java index c072b16..e9188d6 100644 --- a/src/java/org/apache/hadoop/hbase/client/HConnection.java +++ b/src/java/org/apache/hadoop/hbase/client/HConnection.java @@ -52,7 +52,7 @@ public interface HConnection { /** @return - true if the master server is running */ public boolean isMasterRunning(); - + /** * Checks if tableName exists. * @param tableName Table to check. @@ -71,7 +71,7 @@ public interface HConnection { * @throws IOException */ public boolean isTableEnabled(byte[] tableName) throws IOException; - + /** * @param tableName * @return true if the table is disabled, false otherwise @@ -93,14 +93,14 @@ public interface HConnection { * catalog table that just contains table names and their descriptors. * Right now, it only exists as part of the META table's region info. * - * @return - returns an array of HTableDescriptors + * @return - returns an array of HTableDescriptors * @throws IOException */ public HTableDescriptor[] listTables() throws IOException; - + /** * @param tableName - * @return table metadata + * @return table metadata * @throws IOException */ public HTableDescriptor getHTableDescriptor(byte[] tableName) @@ -109,35 +109,35 @@ public interface HConnection { /** * Allows flushing the region cache. */ - public void clearRegionCache(); + public void clearRegionCache(); /** * Find the location of the region of tableName that row * lives in. * @param tableName name of the table row is in * @param row row key you're trying to find the region of - * @return HRegionLocation that describes where to find the reigon in + * @return HRegionLocation that describes where to find the reigon in * question * @throws IOException */ public HRegionLocation locateRegion(final byte [] tableName, final byte [] row) throws IOException; - + /** * Find the location of the region of tableName that row * lives in, ignoring any value that might be in the cache. * @param tableName name of the table row is in * @param row row key you're trying to find the region of - * @return HRegionLocation that describes where to find the reigon in + * @return HRegionLocation that describes where to find the reigon in * question * @throws IOException */ public HRegionLocation relocateRegion(final byte [] tableName, final byte [] row) - throws IOException; - - /** + throws IOException; + + /** * Establishes a connection to the region server at the specified address. * @param regionServer - the server to connect to * @return proxy for HRegionServer @@ -145,8 +145,8 @@ public interface HConnection { */ public HRegionInterface getHRegionConnection(HServerAddress regionServer) throws IOException; - - /** + + /** * Establishes a connection to the region server at the specified address. * @param regionServer - the server to connect to * @param getMaster - do we check if master is alive @@ -156,7 +156,7 @@ public interface HConnection { public HRegionInterface getHRegionConnection( HServerAddress regionServer, boolean getMaster) throws IOException; - + /** * Find region location hosting passed row * @param tableName @@ -170,8 +170,8 @@ public interface HConnection { throws IOException; /** - * Pass in a ServerCallable with your particular bit of logic defined and - * this method will manage the process of doing retries with timed waits + * Pass in a ServerCallable with your particular bit of logic defined and + * this method will manage the process of doing retries with timed waits * and refinds of missing regions. * * @param the type of the return value @@ -180,9 +180,9 @@ public interface HConnection { * @throws IOException * @throws RuntimeException */ - public T getRegionServerWithRetries(ServerCallable callable) + public T getRegionServerWithRetries(ServerCallable callable) throws IOException, RuntimeException; - + /** * Pass in a ServerCallable with your particular bit of logic defined and * this method will pass it to the defined region server. @@ -194,8 +194,8 @@ public interface HConnection { */ public T getRegionServerWithoutRetries(ServerCallable callable) throws IOException, RuntimeException; - - + + /** * Process a batch of Puts. Does the retries. * @param list A batch of Puts to process. @@ -215,7 +215,7 @@ public interface HConnection { */ public int processBatchOfDeletes(List list, byte[] tableName) throws IOException; - + public void processBatchOfPuts(List list, final byte[] tableName, ExecutorService pool) throws IOException; diff --git a/src/java/org/apache/hadoop/hbase/client/HConnectionManager.java b/src/java/org/apache/hadoop/hbase/client/HConnectionManager.java index 7f60686..02b7281 100644 --- a/src/java/org/apache/hadoop/hbase/client/HConnectionManager.java +++ b/src/java/org/apache/hadoop/hbase/client/HConnectionManager.java @@ -66,7 +66,7 @@ import org.apache.zookeeper.Watcher.Event.KeeperState; /** * A non-instantiable class that manages connections to multiple tables in * multiple HBase instances. - * + * * Used by {@link HTable} and {@link HBaseAdmin} */ public class HConnectionManager implements HConstants { @@ -89,13 +89,13 @@ public class HConnectionManager implements HConstants { protected HConnectionManager() { super(); } - + private static final int MAX_CACHED_HBASE_INSTANCES=31; - // A LRU Map of master HBaseConfiguration -> connection information for that + // A LRU Map of master HBaseConfiguration -> connection information for that // instance. The objects it contains are mutable and hence require // synchronized access to them. We set instances to 31. The zk default max // connections is 30 so should run into zk issues before hit this value of 31. - private static + private static final Map HBASE_INSTANCES = new LinkedHashMap ((int) (MAX_CACHED_HBASE_INSTANCES/0.75F)+1, 0.75F, true) { @@ -104,10 +104,10 @@ public class HConnectionManager implements HConstants { return size() > MAX_CACHED_HBASE_INSTANCES; } }; - - private static final Map ZK_WRAPPERS = + + private static final Map ZK_WRAPPERS = new HashMap(); - + /** * Get the connection object for the instance specified by the configuration * If no current connection exists, create a new connection for that instance @@ -125,7 +125,7 @@ public class HConnectionManager implements HConstants { } return connection; } - + /** * Delete connection information for the instance specified by configuration * @param conf @@ -176,7 +176,7 @@ public class HConnectionManager implements HConstants { } return ZK_WRAPPERS.get(conf.get(HConstants.ZOOKEEPER_QUORUM)); } - + /** * This class is responsible to handle connection and reconnection * to a zookeeper quorum. @@ -217,7 +217,7 @@ public class HConnectionManager implements HConstants { resetZooKeeper(); } } - + /** * Get this watcher's ZKW, instanciate it if necessary. * @return ZKW @@ -225,10 +225,10 @@ public class HConnectionManager implements HConstants { public synchronized ZooKeeperWrapper getZooKeeperWrapper() throws IOException { if(zooKeeperWrapper == null) { zooKeeperWrapper = new ZooKeeperWrapper(conf, this); - } + } return zooKeeperWrapper; } - + /** * Clear this connection to zookeeper. */ @@ -253,25 +253,25 @@ public class HConnectionManager implements HConstants { private volatile boolean closed; private volatile HMasterInterface master; private volatile boolean masterChecked; - + private final Object rootRegionLock = new Object(); private final Object metaRegionLock = new Object(); private final Object userRegionLock = new Object(); - + private volatile HBaseConfiguration conf; - - // Known region HServerAddress.toString() -> HRegionInterface + + // Known region HServerAddress.toString() -> HRegionInterface private final Map servers = new ConcurrentHashMap(); // Used by master and region servers during safe mode only - private volatile HRegionLocation rootRegionLocation; - - private final Map> + private volatile HRegionLocation rootRegionLocation; + + private final Map> cachedRegionLocations = new HashMap>(); - /** + /** * constructor * @param conf Configuration object */ @@ -283,11 +283,11 @@ public class HConnectionManager implements HConstants { conf.get(REGION_SERVER_CLASS, DEFAULT_REGION_SERVER_CLASS); this.closed = false; - + try { this.serverInterfaceClass = (Class) Class.forName(serverClassName); - + } catch (ClassNotFoundException e) { throw new UnsupportedOperationException( "Unable to find region server interface " + serverClassName, e); @@ -297,7 +297,7 @@ public class HConnectionManager implements HConstants { this.numRetries = conf.getInt("hbase.client.retries.number", 10); this.maxRPCAttempts = conf.getInt("hbase.client.rpc.maxattempts", 1); this.rpcTimeout = conf.getLong("hbase.regionserver.lease.period", 60000); - + this.master = null; this.masterChecked = false; } @@ -313,7 +313,7 @@ public class HConnectionManager implements HConstants { public void unsetRootRegionLocation() { this.rootRegionLocation = null; } - + // Used by master and region servers during safe mode only public void setRootRegionLocation(HRegionLocation rootRegion) { if (rootRegion == null) { @@ -322,7 +322,7 @@ public class HConnectionManager implements HConstants { } this.rootRegionLocation = rootRegion; } - + public HMasterInterface getMaster() throws MasterNotRunningException { ZooKeeperWrapper zk = null; try { @@ -343,15 +343,15 @@ public class HConnectionManager implements HConstants { masterLocation = zk.readMasterAddressOrThrow(); HMasterInterface tryMaster = (HMasterInterface)HBaseRPC.getProxy( - HMasterInterface.class, HBaseRPCProtocolVersion.versionID, + HMasterInterface.class, HBaseRPCProtocolVersion.versionID, masterLocation.getInetSocketAddress(), this.conf); - + if (tryMaster.isMasterRunning()) { this.master = tryMaster; this.masterLock.notifyAll(); break; } - + } catch (IOException e) { if (tries == numRetries - 1) { // This was our last chance - don't bother sleeping @@ -386,7 +386,7 @@ public class HConnectionManager implements HConstants { if (this.master == null) { try { getMaster(); - + } catch (MasterNotRunningException e) { return false; } @@ -416,7 +416,7 @@ public class HConnectionManager implements HConstants { } return exists; } - + /* * @param n * @return Truen if passed tablename n is equal to the name @@ -459,11 +459,11 @@ public class HConnectionManager implements HConstants { return uniqueTables.toArray(new HTableDescriptor[uniqueTables.size()]); } - + public boolean isTableEnabled(byte[] tableName) throws IOException { return testTableOnlineState(tableName, true); } - + public boolean isTableDisabled(byte[] tableName) throws IOException { return testTableOnlineState(tableName, false); } @@ -485,7 +485,7 @@ public class HConnectionManager implements HConstants { } } return true; - } + } }; MetaScanner.metaScan(conf, visitor); return available.get(); @@ -518,7 +518,7 @@ public class HConnectionManager implements HConstants { scan.addColumn(CATALOG_FAMILY, REGIONINFO_QUALIFIER); int rows = this.conf.getInt("hbase.meta.scanner.caching", 100); scan.setCaching(rows); - ScannerCallable s = new ScannerCallable(this, + ScannerCallable s = new ScannerCallable(this, (Bytes.equals(tableName, HConstants.META_TABLE_NAME) ? HConstants.ROOT_TABLE_NAME : HConstants.META_TABLE_NAME), scan); try { @@ -560,7 +560,7 @@ public class HConnectionManager implements HConstants { return rowsScanned > 0 && onOffLine; } - private static class HTableDescriptorFinder + private static class HTableDescriptorFinder implements MetaScanner.MetaScannerVisitor { byte[] tableName; HTableDescriptor result; @@ -618,18 +618,18 @@ public class HConnectionManager implements HConstants { throw new IllegalArgumentException( "table name cannot be null or zero length"); } - + if (Bytes.equals(tableName, ROOT_TABLE_NAME)) { synchronized (rootRegionLock) { // This block guards against two threads trying to find the root - // region at the same time. One will go do the find while the + // region at the same time. One will go do the find while the // second waits. The second thread will not do find. - + if (!useCache || rootRegionLocation == null) { this.rootRegionLocation = locateRootRegion(); } return this.rootRegionLocation; - } + } } else if (Bytes.equals(tableName, META_TABLE_NAME)) { return locateRegionInMeta(ROOT_TABLE_NAME, tableName, row, useCache, metaRegionLock); @@ -659,7 +659,7 @@ public class HConnectionManager implements HConstants { } else { deleteCachedLocation(tableName, row); } - + // build the key of the meta region we should be looking for. // the extra 9's on the end are necessary to allow "exact" matches // without knowing the precise region names. @@ -667,7 +667,7 @@ public class HConnectionManager implements HConstants { HConstants.NINES); for (int tries = 0; true; tries++) { if (tries >= numRetries) { - throw new NoServerForRegionException("Unable to find region for " + throw new NoServerForRegionException("Unable to find region for " + Bytes.toStringBinary(row) + " after " + numRetries + " tries."); } @@ -704,10 +704,10 @@ public class HConnectionManager implements HConstants { throw new TableNotFoundException(Bytes.toString(tableName)); } - byte [] value = regionInfoRow.getValue(CATALOG_FAMILY, + byte [] value = regionInfoRow.getValue(CATALOG_FAMILY, REGIONINFO_QUALIFIER); if (value == null || value.length == 0) { - throw new IOException("HRegionInfo was null or empty in " + + throw new IOException("HRegionInfo was null or empty in " + Bytes.toString(parentTable)); } // convert the row result into the HRegionLocation we need! @@ -719,21 +719,21 @@ public class HConnectionManager implements HConstants { "Table '" + Bytes.toString(tableName) + "' was not found."); } if (regionInfo.isOffline()) { - throw new RegionOfflineException("region offline: " + + throw new RegionOfflineException("region offline: " + regionInfo.getRegionNameAsString()); } - + value = regionInfoRow.getValue(CATALOG_FAMILY, SERVER_QUALIFIER); String serverAddress = ""; if(value != null) { serverAddress = Bytes.toString(value); } - if (serverAddress.equals("")) { + if (serverAddress.equals("")) { throw new NoServerForRegionException("No server address listed " + "in " + Bytes.toString(parentTable) + " for region " + regionInfo.getRegionNameAsString()); } - + // instantiate the location location = new HRegionLocation(regionInfo, new HServerAddress(serverAddress)); @@ -765,7 +765,7 @@ public class HConnectionManager implements HConstants { } } try{ - Thread.sleep(getPauseTime(tries)); + Thread.sleep(getPauseTime(tries)); } catch (InterruptedException e){ // continue } @@ -775,10 +775,10 @@ public class HConnectionManager implements HConstants { /* * Search the cache for a location that fits our table and row key. * Return null if no suitable region is located. TODO: synchronization note - * + * *

TODO: This method during writing consumes 15% of CPU doing lookup * into the Soft Reference SortedMap. Improve. - * + * * @param tableName * @param row * @return Null or region location found in cache. @@ -846,7 +846,7 @@ public class HConnectionManager implements HConstants { * Allows flushing the region cache. */ public void clearRegionCache() { - this.cachedRegionLocations.clear(); + this.cachedRegionLocations.clear(); } /* @@ -893,7 +893,7 @@ public class HConnectionManager implements HConstants { } } } - + /* * @param tableName * @return Map of cached locations for passed tableName @@ -931,7 +931,7 @@ public class HConnectionManager implements HConstants { } public HRegionInterface getHRegionConnection( - HServerAddress regionServer, boolean getMaster) + HServerAddress regionServer, boolean getMaster) throws IOException { if (getMaster) { getMaster(); @@ -944,7 +944,7 @@ public class HConnectionManager implements HConstants { try { server = (HRegionInterface)HBaseRPC.waitForProxy( serverInterfaceClass, HBaseRPCProtocolVersion.versionID, - regionServer.getInetSocketAddress(), this.conf, + regionServer.getInetSocketAddress(), this.conf, this.maxRPCAttempts, this.rpcTimeout); } catch (RemoteException e) { throw RemoteExceptionHandler.decodeRemoteException(e); @@ -954,9 +954,9 @@ public class HConnectionManager implements HConstants { } return server; } - + public HRegionInterface getHRegionConnection( - HServerAddress regionServer) + HServerAddress regionServer) throws IOException { return getHRegionConnection(regionServer, false); } @@ -972,7 +972,7 @@ public class HConnectionManager implements HConstants { * @return HRegionLocation for root region if found * @throws NoServerForRegionException - if the root region can not be * located after retrying - * @throws IOException + * @throws IOException */ private HRegionLocation locateRootRegion() throws IOException { @@ -1028,7 +1028,7 @@ public class HConnectionManager implements HConstants { throw new NoServerForRegionException("Timed out trying to locate "+ "root region because: " + t.getMessage()); } - + // Sleep and retry finding root region. try { if (LOG.isDebugEnabled()) { @@ -1042,23 +1042,23 @@ public class HConnectionManager implements HConstants { // continue } } - + rootRegionAddress = null; } - + // if the address is null by this point, then the retries have failed, // and we're sort of sunk if (rootRegionAddress == null) { throw new NoServerForRegionException( "unable to locate root region server"); } - + // return the region location return new HRegionLocation( HRegionInfo.ROOT_REGIONINFO, rootRegionAddress); } - public T getRegionServerWithRetries(ServerCallable callable) + public T getRegionServerWithRetries(ServerCallable callable) throws IOException, RuntimeException { List exceptions = new ArrayList(); for(int tries = 0; tries < numRetries; tries++) { @@ -1079,9 +1079,9 @@ public class HConnectionManager implements HConstants { // continue } } - return null; + return null; } - + public T getRegionServerWithoutRetries(ServerCallable callable) throws IOException, RuntimeException { try { diff --git a/src/java/org/apache/hadoop/hbase/client/HTable.java b/src/java/org/apache/hadoop/hbase/client/HTable.java index 2d2ee0d..cdcec4c 100644 --- a/src/java/org/apache/hadoop/hbase/client/HTable.java +++ b/src/java/org/apache/hadoop/hbase/client/HTable.java @@ -62,15 +62,15 @@ import org.apache.hadoop.hbase.util.Writables; /** * Used to communicate with a single HBase table. * This class is not thread safe. Use one instance per thread. - * - * Puts, deletes, checkAndPut and incrementColumnValue are - * done in an exclusive (and thus serial) fashion for each row. + * + * Puts, deletes, checkAndPut and incrementColumnValue are + * done in an exclusive (and thus serial) fashion for each row. * These calls acquire a row lock which is shared with the lockRow - * calls. - * - * Gets and Scans will not return half written data. That is, + * calls. + * + * Gets and Scans will not return half written data. That is, * all mutation operations are atomic on a row basis with - * respect to other concurrent readers and writers. + * respect to other concurrent readers and writers. */ public class HTable { private final HConnection connection; @@ -85,7 +85,7 @@ public class HTable { private int maxKeyValueSize; private long maxScannerResultSize; - + /** * Creates an object to access a HBase table. * @@ -110,7 +110,7 @@ public class HTable { /** * Creates an object to access a HBase table. - * + * * @param conf Configuration object to use. * @param tableName Name of the table. * @throws IOException if a remote or network exception occurs @@ -122,7 +122,7 @@ public class HTable { /** * Creates an object to access a HBase table. - * + * * @param conf Configuration object to use. * @param tableName Name of the table. * @throws IOException if a remote or network exception occurs @@ -144,9 +144,9 @@ public class HTable { this.autoFlush = true; this.currentWriteBufferSize = 0; this.scannerCaching = conf.getInt("hbase.client.scanner.caching", 1); - + this.maxScannerResultSize = conf.getLong( - HConstants.HBASE_CLIENT_SCANNER_MAX_RESULT_SIZE_KEY, + HConstants.HBASE_CLIENT_SCANNER_MAX_RESULT_SIZE_KEY, HConstants.DEFAULT_HBASE_CLIENT_SCANNER_MAX_RESULT_SIZE); this.maxKeyValueSize = conf.getInt("hbase.client.keyvalue.maxsize", -1); @@ -197,7 +197,7 @@ public class HTable { public static boolean isTableEnabled(byte[] tableName) throws IOException { return isTableEnabled(new HBaseConfiguration(), tableName); } - + /** * Tells whether or not a table is enabled or not. * @param conf The Configuration object to use. @@ -262,7 +262,7 @@ public class HTable { public HConnection getConnection() { return this.connection; } - + /** * Gets the number of rows that a scanner will fetch at once. *

@@ -332,7 +332,7 @@ public class HTable { MetaScannerVisitor visitor = new MetaScannerVisitor() { public boolean processRow(Result rowResult) throws IOException { HRegionInfo info = Writables.getHRegionInfo( - rowResult.getValue(HConstants.CATALOG_FAMILY, + rowResult.getValue(HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER)); if (Bytes.equals(info.getTableDesc().getName(), getTableName())) { if (!(info.isOffline() || info.isSplit())) { @@ -362,21 +362,21 @@ public class HTable { MetaScannerVisitor visitor = new MetaScannerVisitor() { public boolean processRow(Result rowResult) throws IOException { HRegionInfo info = Writables.getHRegionInfo( - rowResult.getValue(HConstants.CATALOG_FAMILY, + rowResult.getValue(HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER)); - + if (!(Bytes.equals(info.getTableDesc().getName(), getTableName()))) { return false; } HServerAddress server = new HServerAddress(); - byte [] value = rowResult.getValue(HConstants.CATALOG_FAMILY, + byte [] value = rowResult.getValue(HConstants.CATALOG_FAMILY, HConstants.SERVER_QUALIFIER); if (value != null && value.length > 0) { String address = Bytes.toString(value); server = new HServerAddress(address); } - + if (!(info.isOffline() || info.isSplit())) { regionMap.put(new UnmodifyableHRegionInfo(info), server); } @@ -389,9 +389,9 @@ public class HTable { } /** - * Return the row that matches row exactly, + * Return the row that matches row exactly, * or the one that immediately precedes it. - * + * * @param row A row key. * @param family Column family to include in the {@link Result}. * @throws IOException if a remote or network exception occurs. @@ -409,9 +409,9 @@ public class HTable { } /** - * Return the row that matches row exactly, + * Return the row that matches row exactly, * or the one that immediately preceeds it. - * + * * @param row row key * @param family Column family to look for row in. * @return map of values @@ -426,7 +426,7 @@ public class HTable { return r == null || r.isEmpty()? null: r.getRowResult(); } - /** + /** * Returns a scanner on the current table as specified by the {@link Scan} * object. * @@ -443,7 +443,7 @@ public class HTable { /** * Gets a scanner on the current table for the given family. - * + * * @param family The column family to scan. * @return A scanner. * @throws IOException if a remote or network exception occurs. @@ -454,10 +454,10 @@ public class HTable { scan.addFamily(family); return getScanner(scan); } - + /** * Gets a scanner on the current table for the given family and qualifier. - * + * * @param family The column family to scan. * @param qualifier The column qualifier to scan. * @return A scanner. @@ -489,10 +489,10 @@ public class HTable { } ); } - + /** * Deletes the specified cells/row. - * + * * @param delete The object that specifies what to delete. * @throws IOException if a remote or network exception occurs. * @since 0.20.0 @@ -508,7 +508,7 @@ public class HTable { } ); } - + /** * Deletes the specified cells/rows in bulk. * @param deletes List of things to delete. List gets modified by this @@ -547,7 +547,7 @@ public class HTable { flushCommits(); } } - + /** * Puts some data in the table, in batch. *

@@ -572,7 +572,7 @@ public class HTable { flushCommits(); } } - + /** * Atomically increments a column value. *

@@ -587,7 +587,7 @@ public class HTable { * @return The new value, post increment. * @throws IOException if a remote or network exception occurs. */ - public long incrementColumnValue(final byte [] row, final byte [] family, + public long incrementColumnValue(final byte [] row, final byte [] family, final byte [] qualifier, final long amount) throws IOException { return incrementColumnValue(row, family, qualifier, amount, true); @@ -598,7 +598,7 @@ public class HTable { * and is not a big-endian long, this could throw an exception. If the column * value does not yet exist it is initialized to amount and * written to the specified column. - * + * *

Setting writeToWAL to false means that in a fail scenario, you will lose * any increments that have not been flushed. * @param row The row that contains the cell to increment. @@ -616,7 +616,7 @@ public class HTable { * @return The new value, post increment. * @throws IOException if a remote or network exception occurs. */ - public long incrementColumnValue(final byte [] row, final byte [] family, + public long incrementColumnValue(final byte [] row, final byte [] family, final byte [] qualifier, final long amount, final boolean writeToWAL) throws IOException { NullPointerException npe = null; @@ -634,7 +634,7 @@ public class HTable { new ServerCallable(connection, tableName, row) { public Long call() throws IOException { return server.incrementColumnValue( - location.getRegionInfo().getRegionName(), row, family, + location.getRegionInfo().getRegionName(), row, family, qualifier, amount, writeToWAL); } } @@ -644,7 +644,7 @@ public class HTable { /** * Atomically checks if a row/family/qualifier value match the expectedValue. * If it does, it adds the put. - * + * * @param row * @param family * @param qualifier @@ -653,8 +653,8 @@ public class HTable { * @throws IOException * @return true if the new put was execute, false otherwise */ - public boolean checkAndPut(final byte [] row, - final byte [] family, final byte [] qualifier, final byte [] value, + public boolean checkAndPut(final byte [] row, + final byte [] family, final byte [] qualifier, final byte [] value, final Put put) throws IOException { return connection.getRegionServerWithRetries( @@ -666,12 +666,12 @@ public class HTable { } ).booleanValue(); } - + /** * Test for the existence of columns in the table, as specified in the Get.

- * + * * This will return true if the Get matches one or more keys, false if not.

- * + * * This is a server-side call so it prevents any data from being transfered * to the client. * @param get @@ -688,7 +688,7 @@ public class HTable { } ).booleanValue(); } - + /** * Executes all the buffered {@link Put} operations. *

@@ -712,17 +712,17 @@ public class HTable { /** * Releases any resources help or pending changes in internal buffers. - * + * * @throws IOException if a remote or network exception occurs. */ public void close() throws IOException{ flushCommits(); this.pool.shutdownNow(); } - + /** * Utility method that verifies Put is well formed. - * + * * @param put * @throws IllegalArgumentException */ @@ -731,7 +731,7 @@ public class HTable { throw new IllegalArgumentException("No columns to insert"); } } - + /** * Obtains a lock on a row. * @@ -775,7 +775,7 @@ public class HTable { } ); } - + /** * Tells whether or not 'auto-flush' is turned on. * @@ -807,7 +807,7 @@ public class HTable { /** * Set the size of the buffer in bytes. - * If the new size is lower than the current size of data in the + * If the new size is lower than the current size of data in the * write buffer, the buffer is flushed. * @param writeBufferSize * @throws IOException @@ -828,10 +828,10 @@ public class HTable { } // Old API. Pre-hbase-880, hbase-1304. - + /** * Get a single value for the specified row and column - * + * * @param row row key * @param column column name * @return value for specified row/column @@ -843,7 +843,7 @@ public class HTable { return get(Bytes.toBytes(row), Bytes.toBytes(column)); } - /** + /** * Get a single value for the specified row and column * * @param row row key @@ -858,7 +858,7 @@ public class HTable { return get(Bytes.toBytes(row), Bytes.toBytes(column), numVersions); } - /** + /** * Get a single value for the specified row and column * * @param row row key @@ -876,7 +876,7 @@ public class HTable { return r == null || r.size() <= 0? null: r.getCellValue(); } - /** + /** * Get the specified number of versions of the specified row and column * @param row row key * @param column column name @@ -891,7 +891,7 @@ public class HTable { return get(row, column, HConstants.LATEST_TIMESTAMP, numVersions); } - /** + /** * Get the specified number of versions of the specified row and column with * the specified timestamp. * @@ -909,7 +909,7 @@ public class HTable { return get(Bytes.toBytes(row), Bytes.toBytes(column), timestamp, numVersions); } - /** + /** * Get the specified number of versions of the specified row and column with * the specified timestamp. * @@ -932,15 +932,15 @@ public class HTable { g.addColumn(fq[0], fq[1]); } g.setMaxVersions(numVersions); - g.setTimeRange(0, + g.setTimeRange(0, timestamp == HConstants.LATEST_TIMESTAMP ? timestamp : timestamp+1); Result r = get(g); return r == null || r.size() <= 0? null: r.getCellValues(); } - /** + /** * Get all the data for the specified row at the latest timestamp - * + * * @param row row key * @return RowResult is null if row does not exist. * @throws IOException @@ -950,9 +950,9 @@ public class HTable { return getRow(Bytes.toBytes(row)); } - /** + /** * Get all the data for the specified row at the latest timestamp - * + * * @param row row key * @return RowResult is null if row does not exist. * @throws IOException @@ -961,10 +961,10 @@ public class HTable { public RowResult getRow(final byte [] row) throws IOException { return getRow(row, HConstants.LATEST_TIMESTAMP); } - - /** + + /** * Get more than one version of all columns for the specified row - * + * * @param row row key * @param numVersions number of versions to return * @return RowResult is null if row does not exist. @@ -973,13 +973,13 @@ public class HTable { */ public RowResult getRow(final String row, final int numVersions) throws IOException { - return getRow(Bytes.toBytes(row), null, + return getRow(Bytes.toBytes(row), null, HConstants.LATEST_TIMESTAMP, numVersions, null); } - /** + /** * Get more than one version of all columns for the specified row - * + * * @param row row key * @param numVersions number of versions to return * @return RowResult is null if row does not exist. @@ -991,38 +991,38 @@ public class HTable { return getRow(row, null, HConstants.LATEST_TIMESTAMP, numVersions, null); } - /** + /** * Get all the data for the specified row at a specified timestamp - * + * * @param row row key * @param ts timestamp * @return RowResult is null if row does not exist. * @throws IOException * @deprecated As of hbase 0.20.0, replaced by {@link #get(Get)} */ - public RowResult getRow(final String row, final long ts) + public RowResult getRow(final String row, final long ts) throws IOException { return getRow(Bytes.toBytes(row), ts); } - /** + /** * Get all the data for the specified row at a specified timestamp - * + * * @param row row key * @param ts timestamp * @return RowResult is null if row does not exist. * @throws IOException * @deprecated As of hbase 0.20.0, replaced by {@link #get(Get)} */ - public RowResult getRow(final byte [] row, final long ts) + public RowResult getRow(final byte [] row, final long ts) throws IOException { return getRow(row,null,ts); } - - /** + + /** * Get more than one version of all columns for the specified row * at a specified timestamp - * + * * @param row row key * @param ts timestamp * @param numVersions number of versions to return @@ -1034,11 +1034,11 @@ public class HTable { final int numVersions) throws IOException { return getRow(Bytes.toBytes(row), null, ts, numVersions, null); } - - /** + + /** * Get more than one version of all columns for the specified row * at a specified timestamp - * + * * @param row row key * @param timestamp timestamp * @param numVersions number of versions to return @@ -1051,37 +1051,37 @@ public class HTable { return getRow(row, null, timestamp, numVersions, null); } - /** + /** * Get selected columns for the specified row at the latest timestamp - * + * * @param row row key * @param columns Array of column names and families you want to retrieve. * @return RowResult is null if row does not exist. * @throws IOException * @deprecated As of hbase 0.20.0, replaced by {@link #get(Get)} */ - public RowResult getRow(final String row, final String [] columns) + public RowResult getRow(final String row, final String [] columns) throws IOException { return getRow(Bytes.toBytes(row), Bytes.toByteArrays(columns)); } - /** + /** * Get selected columns for the specified row at the latest timestamp - * + * * @param row row key * @param columns Array of column names and families you want to retrieve. * @return RowResult is null if row does not exist. * @throws IOException * @deprecated As of hbase 0.20.0, replaced by {@link #get(Get)} */ - public RowResult getRow(final byte [] row, final byte [][] columns) + public RowResult getRow(final byte [] row, final byte [][] columns) throws IOException { return getRow(row, columns, HConstants.LATEST_TIMESTAMP); } - - /** + + /** * Get more than one version of selected columns for the specified row - * + * * @param row row key * @param columns Array of column names and families you want to retrieve. * @param numVersions number of versions to return @@ -1094,10 +1094,10 @@ public class HTable { return getRow(Bytes.toBytes(row), Bytes.toByteArrays(columns), HConstants.LATEST_TIMESTAMP, numVersions, null); } - - /** + + /** * Get more than one version of selected columns for the specified row - * + * * @param row row key * @param columns Array of column names and families you want to retrieve. * @param numVersions number of versions to return @@ -1110,9 +1110,9 @@ public class HTable { return getRow(row, columns, HConstants.LATEST_TIMESTAMP, numVersions, null); } - /** + /** * Get selected columns for the specified row at a specified timestamp - * + * * @param row row key * @param columns Array of column names and families you want to retrieve. * @param ts timestamp @@ -1120,15 +1120,15 @@ public class HTable { * @throws IOException * @deprecated As of hbase 0.20.0, replaced by {@link #get(Get)} */ - public RowResult getRow(final String row, final String [] columns, - final long ts) - throws IOException { + public RowResult getRow(final String row, final String [] columns, + final long ts) + throws IOException { return getRow(Bytes.toBytes(row), Bytes.toByteArrays(columns), ts); } - /** + /** * Get selected columns for the specified row at a specified timestamp - * + * * @param row row key * @param columns Array of column names and families you want to retrieve. * @param ts timestamp @@ -1136,16 +1136,16 @@ public class HTable { * @throws IOException * @deprecated As of hbase 0.20.0, replaced by {@link #get(Get)} */ - public RowResult getRow(final byte [] row, final byte [][] columns, - final long ts) - throws IOException { + public RowResult getRow(final byte [] row, final byte [][] columns, + final long ts) + throws IOException { return getRow(row,columns,ts,1,null); } - - /** + + /** * Get more than one version of selected columns for the specified row, * using an existing row lock. - * + * * @param row row key * @param columns Array of column names and families you want to retrieve. * @param numVersions number of versions to return @@ -1161,21 +1161,21 @@ public class HTable { numVersions, rowLock); } - /** + /** * Get selected columns for the specified row at a specified timestamp * using existing row lock. - * + * * @param row row key * @param columns Array of column names and families you want to retrieve. * @param ts timestamp - * @param numVersions + * @param numVersions * @param rl row lock * @return RowResult is null if row does not exist. * @throws IOException * @deprecated As of hbase 0.20.0, replaced by {@link #get(Get)} */ - public RowResult getRow(final byte [] row, final byte [][] columns, - final long ts, final int numVersions, final RowLock rl) + public RowResult getRow(final byte [] row, final byte [][] columns, + final long ts, final int numVersions, final RowLock rl) throws IOException { Get g = rl != null? new Get(row, rl): new Get(row); if (columns != null) { @@ -1189,13 +1189,13 @@ public class HTable { } } g.setMaxVersions(numVersions); - g.setTimeRange(0, + g.setTimeRange(0, ts == HConstants.LATEST_TIMESTAMP ? ts : ts+1); Result r = get(g); return r == null || r.size() <= 0? null: r.getRowResult(); } - /** + /** * Get a scanner on the current table starting at first row. * Return the specified columns. * @@ -1213,7 +1213,7 @@ public class HTable { return getScanner(Bytes.toByteArrays(columns), HConstants.EMPTY_START_ROW); } - /** + /** * Get a scanner on the current table starting at the specified row. * Return the specified columns. * @@ -1232,7 +1232,7 @@ public class HTable { return getScanner(Bytes.toByteArrays(columns), Bytes.toBytes(startRow)); } - /** + /** * Get a scanner on the current table starting at first row. * Return the specified columns. * @@ -1251,7 +1251,7 @@ public class HTable { HConstants.LATEST_TIMESTAMP, null); } - /** + /** * Get a scanner on the current table starting at the specified row. * Return the specified columns. * @@ -1269,8 +1269,8 @@ public class HTable { throws IOException { return getScanner(columns, startRow, HConstants.LATEST_TIMESTAMP, null); } - - /** + + /** * Get a scanner on the current table starting at the specified row. * Return the specified columns. * @@ -1290,8 +1290,8 @@ public class HTable { throws IOException { return getScanner(columns, startRow, timestamp, null); } - - /** + + /** * Get a scanner on the current table starting at the specified row. * Return the specified columns. * @@ -1308,11 +1308,11 @@ public class HTable { */ public Scanner getScanner(final byte[][] columns, final byte [] startRow, RowFilterInterface filter) - throws IOException { + throws IOException { return getScanner(columns, startRow, HConstants.LATEST_TIMESTAMP, filter); } - - /** + + /** * Get a scanner on the current table starting at the specified row and * ending just before stopRow. * Return the specified columns. @@ -1336,7 +1336,7 @@ public class HTable { return getScanner(columns, startRow, stopRow, HConstants.LATEST_TIMESTAMP); } - /** + /** * Get a scanner on the current table starting at the specified row and * ending just before stopRow. * Return the specified columns. @@ -1362,7 +1362,7 @@ public class HTable { Bytes.toBytes(stopRow), timestamp); } - /** + /** * Get a scanner on the current table starting at the specified row and * ending just before stopRow. * Return the specified columns. @@ -1388,7 +1388,7 @@ public class HTable { new WhileMatchRowFilter(new StopRowFilter(stopRow))); } - /** + /** * Get a scanner on the current table starting at the specified row. * Return the specified columns. * @@ -1411,7 +1411,7 @@ public class HTable { timestamp, filter); } - /** + /** * Get a scanner on the current table starting at the specified row. * Return the specified columns. * @@ -1444,7 +1444,7 @@ public class HTable { scan.addColumn(splits[0], splits[1]); } } - scan.setTimeRange(0, + scan.setTimeRange(0, timestamp == HConstants.LATEST_TIMESTAMP ? timestamp : timestamp+1); OldClientScanner s = new OldClientScanner(new ClientScanner(scan)); s.initialize(); @@ -1472,7 +1472,7 @@ public class HTable { public void deleteAll(final String row) throws IOException { deleteAll(row, null); } - + /** * Completely delete the row's cells. * @@ -1512,11 +1512,11 @@ public class HTable { deleteAll(row, null, ts); } - /** + /** * Delete all cells that match the passed row and column. * @param row Row to update * @param column name of column whose value is to be deleted - * @throws IOException + * @throws IOException * @deprecated As of hbase 0.20.0, replaced by {@link #delete(Delete)} */ public void deleteAll(final String row, final String column) @@ -1524,13 +1524,13 @@ public class HTable { deleteAll(row, column, HConstants.LATEST_TIMESTAMP); } - /** + /** * Delete all cells that match the passed row and column and whose * timestamp is equal-to or older than the passed timestamp. * @param row Row to update * @param column name of column whose value is to be deleted * @param ts Delete all cells of the same timestamp or older. - * @throws IOException + * @throws IOException * @deprecated As of hbase 0.20.0, replaced by {@link #delete(Delete)} */ public void deleteAll(final String row, final String column, final long ts) @@ -1539,13 +1539,13 @@ public class HTable { column != null? Bytes.toBytes(column): null, ts); } - /** + /** * Delete all cells that match the passed row and column and whose * timestamp is equal-to or older than the passed timestamp. * @param row Row to update * @param column name of column whose value is to be deleted * @param ts Delete all cells of the same timestamp or older. - * @throws IOException + * @throws IOException * @deprecated As of hbase 0.20.0, replaced by {@link #delete(Delete)} */ public void deleteAll(final byte [] row, final byte [] column, final long ts) @@ -1553,7 +1553,7 @@ public class HTable { deleteAll(row,column,ts,null); } - /** + /** * Delete all cells that match the passed row and column and whose * timestamp is equal-to or older than the passed timestamp, using an * existing row lock. @@ -1561,7 +1561,7 @@ public class HTable { * @param column name of column whose value is to be deleted * @param ts Delete all cells of the same timestamp or older. * @param rl Existing row lock - * @throws IOException + * @throws IOException * @deprecated As of hbase 0.20.0, replaced by {@link #delete(Delete)} */ public void deleteAll(final byte [] row, final byte [] column, final long ts, @@ -1573,12 +1573,12 @@ public class HTable { } delete(d); } - - /** + + /** * Delete all cells that match the passed row and column. * @param row Row to update * @param colRegex column regex expression - * @throws IOException + * @throws IOException * @deprecated As of hbase 0.20.0, replaced by {@link #delete(Delete)} */ public void deleteAllByRegex(final String row, final String colRegex) @@ -1586,35 +1586,35 @@ public class HTable { deleteAllByRegex(row, colRegex, HConstants.LATEST_TIMESTAMP); } - /** + /** * Delete all cells that match the passed row and column and whose * timestamp is equal-to or older than the passed timestamp. * @param row Row to update * @param colRegex Column Regex expression * @param ts Delete all cells of the same timestamp or older. - * @throws IOException + * @throws IOException * @deprecated As of hbase 0.20.0, replaced by {@link #delete(Delete)} */ - public void deleteAllByRegex(final String row, final String colRegex, + public void deleteAllByRegex(final String row, final String colRegex, final long ts) throws IOException { deleteAllByRegex(Bytes.toBytes(row), colRegex, ts); } - /** + /** * Delete all cells that match the passed row and column and whose * timestamp is equal-to or older than the passed timestamp. * @param row Row to update * @param colRegex Column Regex expression * @param ts Delete all cells of the same timestamp or older. - * @throws IOException + * @throws IOException * @deprecated As of hbase 0.20.0, replaced by {@link #delete(Delete)} */ - public void deleteAllByRegex(final byte [] row, final String colRegex, + public void deleteAllByRegex(final byte [] row, final String colRegex, final long ts) throws IOException { deleteAllByRegex(row, colRegex, ts, null); } - - /** + + /** * Delete all cells that match the passed row and column and whose * timestamp is equal-to or older than the passed timestamp, using an * existing row lock. @@ -1622,10 +1622,10 @@ public class HTable { * @param colRegex Column regex expression * @param ts Delete all cells of the same timestamp or older. * @param rl Existing row lock - * @throws IOException + * @throws IOException * @deprecated As of hbase 0.20.0, replaced by {@link #delete(Delete)} */ - public void deleteAllByRegex(final byte [] row, final String colRegex, + public void deleteAllByRegex(final byte [] row, final String colRegex, final long ts, final RowLock rl) throws IOException { throw new UnsupportedOperationException("TODO: Not yet implemented"); @@ -1639,7 +1639,7 @@ public class HTable { * @throws IOException * @deprecated As of hbase 0.20.0, replaced by {@link #delete(Delete)} */ - public void deleteFamily(final String row, final String family) + public void deleteFamily(final String row, final String family) throws IOException { deleteFamily(row, family, HConstants.LATEST_TIMESTAMP); } @@ -1652,7 +1652,7 @@ public class HTable { * @throws IOException * @deprecated As of hbase 0.20.0, replaced by {@link #delete(Delete)} */ - public void deleteFamily(final byte[] row, final byte[] family) + public void deleteFamily(final byte[] row, final byte[] family) throws IOException { deleteFamily(row, family, HConstants.LATEST_TIMESTAMP); } @@ -1666,7 +1666,7 @@ public class HTable { * @param timestamp Timestamp to match * @throws IOException * @deprecated As of hbase 0.20.0, replaced by {@link #delete(Delete)} - */ + */ public void deleteFamily(final String row, final String family, final long timestamp) throws IOException{ @@ -1683,7 +1683,7 @@ public class HTable { * @throws IOException * @deprecated As of hbase 0.20.0, replaced by {@link #delete(Delete)} */ - public void deleteFamily(final byte [] row, final byte [] family, + public void deleteFamily(final byte [] row, final byte [] family, final long timestamp) throws IOException { deleteFamily(row,family,timestamp,null); @@ -1700,16 +1700,16 @@ public class HTable { * @throws IOException * @deprecated As of hbase 0.20.0, replaced by {@link #delete(Delete)} */ - public void deleteFamily(final byte [] row, final byte [] family, + public void deleteFamily(final byte [] row, final byte [] family, final long timestamp, final RowLock rl) throws IOException { Delete d = new Delete(row, HConstants.LATEST_TIMESTAMP, rl); d.deleteFamily(stripColon(family), timestamp); delete(d); } - + /** - * Delete all cells for a row with matching column family regex + * Delete all cells for a row with matching column family regex * at all timestamps. * * @param row The row to operate on @@ -1717,13 +1717,13 @@ public class HTable { * @throws IOException * @deprecated As of hbase 0.20.0, replaced by {@link #delete(Delete)} */ - public void deleteFamilyByRegex(final String row, final String familyRegex) + public void deleteFamilyByRegex(final String row, final String familyRegex) throws IOException { deleteFamilyByRegex(row, familyRegex, HConstants.LATEST_TIMESTAMP); } /** - * Delete all cells for a row with matching column family regex + * Delete all cells for a row with matching column family regex * at all timestamps. * * @param row The row to operate on @@ -1731,7 +1731,7 @@ public class HTable { * @throws IOException * @deprecated As of hbase 0.20.0, replaced by {@link #delete(Delete)} */ - public void deleteFamilyByRegex(final byte[] row, final String familyRegex) + public void deleteFamilyByRegex(final byte[] row, final String familyRegex) throws IOException { deleteFamilyByRegex(row, familyRegex, HConstants.LATEST_TIMESTAMP); } @@ -1745,7 +1745,7 @@ public class HTable { * @param timestamp Timestamp to match * @throws IOException * @deprecated As of hbase 0.20.0, replaced by {@link #delete(Delete)} - */ + */ public void deleteFamilyByRegex(final String row, final String familyRegex, final long timestamp) throws IOException{ @@ -1762,17 +1762,17 @@ public class HTable { * @throws IOException * @deprecated As of hbase 0.20.0, replaced by {@link #delete(Delete)} */ - public void deleteFamilyByRegex(final byte [] row, final String familyRegex, + public void deleteFamilyByRegex(final byte [] row, final String familyRegex, final long timestamp) throws IOException { deleteFamilyByRegex(row,familyRegex,timestamp,null); } - + /** * Delete all cells for a row with matching column family regex with * timestamps less than or equal to timestamp, using existing * row lock. - * + * * @param row The row to operate on * @param familyRegex Column Family Regex * @param timestamp Timestamp to match @@ -1788,7 +1788,7 @@ public class HTable { /** * Test for the existence of a row in the table. - * + * * @param row The row * @return true if the row exists, false otherwise * @throws IOException @@ -1800,7 +1800,7 @@ public class HTable { /** * Test for the existence of a row and column in the table. - * + * * @param row The row * @param column The column * @return true if the row exists, false otherwise @@ -1814,7 +1814,7 @@ public class HTable { /** * Test for the existence of a coordinate in the table. - * + * * @param row The row * @param column The column * @param timestamp The timestamp @@ -1829,7 +1829,7 @@ public class HTable { /** * Test for the existence of a coordinate in the table. - * + * * @param row The row * @param column The column * @param timestamp The timestamp @@ -1842,7 +1842,7 @@ public class HTable { final long timestamp, final RowLock rl) throws IOException { final Get g = new Get(row, rl); g.addColumn(column); - g.setTimeRange(0, + g.setTimeRange(0, timestamp == HConstants.LATEST_TIMESTAMP ? timestamp : timestamp+1); return exists(g); } @@ -1854,12 +1854,12 @@ public class HTable { * @throws IOException * @deprecated As of hbase 0.20.0, replaced by {@link #delete(Delete)} or * {@link #put(Put)} - */ - public synchronized void commit(final BatchUpdate batchUpdate) + */ + public synchronized void commit(final BatchUpdate batchUpdate) throws IOException { commit(batchUpdate, null); } - + /** * Commit a BatchUpdate to the table using existing row lock. * If autoFlush is false, the update is buffered @@ -1868,9 +1868,9 @@ public class HTable { * @throws IOException * @deprecated As of hbase 0.20.0, replaced by {@link #delete(Delete)} or * {@link #put(Put)} - */ + */ public synchronized void commit(final BatchUpdate batchUpdate, - final RowLock rl) + final RowLock rl) throws IOException { for (BatchOperation bo: batchUpdate) { if (!bo.isPut()) throw new IOException("Only Puts in BU as of 0.20.0"); @@ -1887,7 +1887,7 @@ public class HTable { * @throws IOException * @deprecated As of hbase 0.20.0, replaced by {@link #delete(Delete)} or * {@link #put(List)} - */ + */ public synchronized void commit(final List batchUpdates) throws IOException { // Am I breaking something here in old API by doing this? @@ -1895,11 +1895,11 @@ public class HTable { commit(bu); } } - + /** - * Atomically checks if a row's values match the expectedValues. + * Atomically checks if a row's values match the expectedValues. * If it does, it uses the batchUpdate to update the row.

- * + * * This operation is not currently supported, use {@link #checkAndPut} * @param batchUpdate batchupdate to apply if check is successful * @param expectedValues values to check @@ -1932,11 +1932,11 @@ public class HTable { private long lastNext; // Keep lastResult returned successfully in case we have to reset scanner. private Result lastResult = null; - + protected ClientScanner(final Scan scan) { if (CLIENT_LOG.isDebugEnabled()) { - CLIENT_LOG.debug("Creating scanner over " - + Bytes.toString(getTableName()) + CLIENT_LOG.debug("Creating scanner over " + + Bytes.toString(getTableName()) + " starting at key '" + Bytes.toStringBinary(scan.getStartRow()) + "'"); } this.scan = scan; @@ -1964,7 +1964,7 @@ public class HTable { protected Scan getScan() { return scan; } - + protected long getTimestamp() { return lastNext; } @@ -2005,7 +2005,7 @@ public class HTable { getConnection().getRegionServerWithRetries(callable); this.callable = null; } - + // Where to start the next scanner byte [] localStartKey = null; @@ -2033,10 +2033,10 @@ public class HTable { if (CLIENT_LOG.isDebugEnabled()) { CLIENT_LOG.debug("Advancing internal scanner to startKey at '" + Bytes.toStringBinary(localStartKey) + "'"); - } + } try { callable = getScannerCallable(localStartKey, nbRows); - // Open a scanner on the region server starting at the + // Open a scanner on the region server starting at the // beginning of the region getConnection().getRegionServerWithRetries(callable); this.currentRegion = callable.getHRegionInfo(); @@ -2046,11 +2046,11 @@ public class HTable { } return true; } - + protected ScannerCallable getScannerCallable(byte [] localStartKey, int nbRows) { scan.setStartRow(localStartKey); - ScannerCallable s = new ScannerCallable(getConnection(), + ScannerCallable s = new ScannerCallable(getConnection(), getTableName(), scan); s.setCaching(nbRows); return s; @@ -2066,7 +2066,7 @@ public class HTable { Result [] values = null; long remainingResultSize = maxScannerResultSize; int countdown = this.caching; - // We need to reset it if it's a new callable that was created + // We need to reset it if it's a new callable that was created // with a countdown in nextScanner callable.setCaching(this.caching); // This flag is set when we want to skip the result returned. We do @@ -2174,7 +2174,7 @@ public class HTable { return new Iterator() { // The next RowResult, possibly pre-read Result next = null; - + // return true if there is another item pending, false if there isn't. // this method is where the actual advancing takes place, but you need // to call next() to consume it. hasNext() will only advance if there @@ -2199,7 +2199,7 @@ public class HTable { if (!hasNext()) { return null; } - + // if we get to here, then hasNext() has given us an item to return. // we want to return the item and then null out the next pointer, so // we use a temporary variable. @@ -2220,7 +2220,7 @@ public class HTable { */ protected class OldClientScanner implements Scanner { private final ClientScanner cs; - + OldClientScanner(final ClientScanner cs) { this.cs = cs; } @@ -2256,7 +2256,7 @@ public class HTable { return new Iterator() { // The next RowResult, possibly pre-read RowResult next = null; - + // return true if there is another item pending, false if there isn't. // this method is where the actual advancing takes place, but you need // to call next() to consume it. hasNext() will only advance if there @@ -2268,7 +2268,7 @@ public class HTable { return next != null; } catch (IOException e) { throw new RuntimeException(e); - } + } } return true; } @@ -2281,7 +2281,7 @@ public class HTable { if (!hasNext()) { return null; } - + // if we get to here, then hasNext() has given us an item to return. // we want to return the item and then null out the next pointer, so // we use a temporary variable. @@ -2296,7 +2296,7 @@ public class HTable { }; } } - + private static byte [] stripColon(final byte [] n) { byte col = n[n.length-1]; if (col == ':') { diff --git a/src/java/org/apache/hadoop/hbase/client/HTablePool.java b/src/java/org/apache/hadoop/hbase/client/HTablePool.java index 3683e46..a68b3c7 100755 --- a/src/java/org/apache/hadoop/hbase/client/HTablePool.java +++ b/src/java/org/apache/hadoop/hbase/client/HTablePool.java @@ -30,17 +30,17 @@ import org.apache.hadoop.hbase.util.Bytes; /** * A simple pool of HTable instances.

- * + * * Each HTablePool acts as a pool for all tables. To use, instantiate an * HTablePool and use {@link #getTable(String)} to get an HTable from the pool. * Once you are done with it, return it to the pool with {@link #putTable(HTable)}.

- * + * * A pool can be created with a maxSize which defines the most HTable * references that will ever be retained for each table. Otherwise the default * is {@link Integer#MAX_VALUE}.

*/ public class HTablePool { - private final Map> tables = + private final Map> tables = Collections.synchronizedMap(new HashMap>()); private final HBaseConfiguration config; private final int maxSize; @@ -64,7 +64,7 @@ public class HTablePool { /** * Get a reference to the specified table from the pool.

- * + * * Create a new one if one is not available. * @param tableName * @return a reference to the specified table @@ -89,7 +89,7 @@ public class HTablePool { /** * Get a reference to the specified table from the pool.

- * + * * Create a new one if one is not available. * @param tableName * @return a reference to the specified table @@ -101,7 +101,7 @@ public class HTablePool { /** * Puts the specified HTable back into the pool.

- * + * * If the pool already contains maxSize references to the table, * then nothing happens. * @param table diff --git a/src/java/org/apache/hadoop/hbase/client/MetaScanner.java b/src/java/org/apache/hadoop/hbase/client/MetaScanner.java index dd2464e..f3ff282 100644 --- a/src/java/org/apache/hadoop/hbase/client/MetaScanner.java +++ b/src/java/org/apache/hadoop/hbase/client/MetaScanner.java @@ -28,7 +28,7 @@ import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.util.Bytes; /** - * Scanner class that contains the .META. table scanning logic + * Scanner class that contains the .META. table scanning logic * and uses a Retryable scanner. Provided visitors will be called * for each row. */ @@ -37,7 +37,7 @@ class MetaScanner implements HConstants { /** * Scans the meta table and calls a visitor on each RowResult and uses a empty * start row value as table name. - * + * * @param configuration * @param visitor A custom visitor * @throws IOException @@ -51,7 +51,7 @@ class MetaScanner implements HConstants { /** * Scans the meta table and calls a visitor on each RowResult. Uses a table * name to locate meta regions. - * + * * @param configuration * @param visitor * @param tableName @@ -62,12 +62,12 @@ class MetaScanner implements HConstants { throws IOException { HConnection connection = HConnectionManager.getConnection(configuration); byte [] startRow = tableName == null || tableName.length == 0 ? - HConstants.EMPTY_START_ROW : + HConstants.EMPTY_START_ROW : HRegionInfo.createRegionName(tableName, null, ZEROES); - + // Scan over each meta region ScannerCallable callable = null; - int rows = configuration.getInt("hbase.meta.scanner.caching", 100); + int rows = configuration.getInt("hbase.meta.scanner.caching", 100); do { Scan scan = new Scan(startRow).addFamily(CATALOG_FAMILY); callable = new ScannerCallable(connection, META_TABLE_NAME, scan); @@ -76,7 +76,7 @@ class MetaScanner implements HConstants { try { callable.setCaching(rows); done: do { - //we have all the rows here + //we have all the rows here Result [] rrs = connection.getRegionServerWithRetries(callable); if (rrs == null || rrs.length == 0 || rrs[0].size() == 0) { break done; //exit completely @@ -105,7 +105,7 @@ class MetaScanner implements HConstants { * Visitor method that accepts a RowResult and the meta region location. * Implementations can return false to stop the region's loop if it becomes * unnecessary for some reason. - * + * * @param rowResult * @return A boolean to know if it should continue to loop in the region * @throws IOException diff --git a/src/java/org/apache/hadoop/hbase/client/MultiPut.java b/src/java/org/apache/hadoop/hbase/client/MultiPut.java index 073d132..b0cc5ba 100644 --- a/src/java/org/apache/hadoop/hbase/client/MultiPut.java +++ b/src/java/org/apache/hadoop/hbase/client/MultiPut.java @@ -52,7 +52,7 @@ public class MultiPut implements Writable { } return size; } - + public void add(byte[] regionName, Put aPut) { List rsput = puts.get(regionName); if (rsput == null) { diff --git a/src/java/org/apache/hadoop/hbase/client/MultiPutResponse.java b/src/java/org/apache/hadoop/hbase/client/MultiPutResponse.java index 198c964..bd7cc80 100644 --- a/src/java/org/apache/hadoop/hbase/client/MultiPutResponse.java +++ b/src/java/org/apache/hadoop/hbase/client/MultiPutResponse.java @@ -46,7 +46,7 @@ public class MultiPutResponse implements Writable { public Integer getAnswer(byte[] region) { return answers.get(region); } - + @Override public void write(DataOutput out) throws IOException { out.writeInt(answers.size()); diff --git a/src/java/org/apache/hadoop/hbase/client/Put.java b/src/java/org/apache/hadoop/hbase/client/Put.java index 1a189d1..aceaac7 100644 --- a/src/java/org/apache/hadoop/hbase/client/Put.java +++ b/src/java/org/apache/hadoop/hbase/client/Put.java @@ -37,7 +37,7 @@ import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.ClassSize; -/** +/** * Used to perform Put operations for a single row. *

* To perform a Put, instantiate a Put object with the row to insert to and @@ -49,18 +49,18 @@ public class Put implements HeapSize, Writable, Row, Comparable { private long timestamp = HConstants.LATEST_TIMESTAMP; private long lockId = -1L; private boolean writeToWAL = true; - + private Map> familyMap = new TreeMap>(Bytes.BYTES_COMPARATOR); - + private static final long OVERHEAD = ClassSize.align( - ClassSize.OBJECT + ClassSize.REFERENCE + - 2 * Bytes.SIZEOF_LONG + Bytes.SIZEOF_BOOLEAN + + ClassSize.OBJECT + ClassSize.REFERENCE + + 2 * Bytes.SIZEOF_LONG + Bytes.SIZEOF_BOOLEAN + ClassSize.REFERENCE + ClassSize.TREEMAP); - + /** Constructor for Writable. DO NOT USE */ public Put() {} - + /** * Create a Put operation for the specified row. * @param row row key @@ -92,7 +92,7 @@ public class Put implements HeapSize, Writable, Row, Comparable { this(putToCopy.getRow(), putToCopy.getRowLock()); this.timestamp = putToCopy.timestamp; this.writeToWAL = putToCopy.writeToWAL; - this.familyMap = + this.familyMap = new TreeMap>(Bytes.BYTES_COMPARATOR); for(Map.Entry> entry : putToCopy.getFamilyMap().entrySet()) { @@ -111,7 +111,7 @@ public class Put implements HeapSize, Writable, Row, Comparable { } /** - * Add the specified column and value, with the specified timestamp as + * Add the specified column and value, with the specified timestamp as * its version to this Put operation. * @param column Old style column name with family and qualifier put together * with a colon. @@ -124,7 +124,7 @@ public class Put implements HeapSize, Writable, Row, Comparable { } /** - * Add the specified column and value, with the specified timestamp as + * Add the specified column and value, with the specified timestamp as * its version to this Put operation. * @param family family name * @param qualifier column qualifier @@ -138,9 +138,9 @@ public class Put implements HeapSize, Writable, Row, Comparable { familyMap.put(kv.getFamily(), list); return this; } - + /** - * Add the specified KeyValue to this Put operation. Operation assumes that + * Add the specified KeyValue to this Put operation. Operation assumes that * the passed KeyValue is immutable and its backing array will not be modified * for the duration of this Put. * @param kv @@ -149,12 +149,12 @@ public class Put implements HeapSize, Writable, Row, Comparable { byte [] family = kv.getFamily(); List list = getKeyValueList(family); //Checking that the row of the kv is the same as the put - int res = Bytes.compareTo(this.row, 0, row.length, + int res = Bytes.compareTo(this.row, 0, row.length, kv.getBuffer(), kv.getRowOffset(), kv.getRowLength()); if(res != 0) { - throw new IOException("The row in the recently added KeyValue " + - Bytes.toStringBinary(kv.getBuffer(), kv.getRowOffset(), - kv.getRowLength()) + " doesn't match the original one " + + throw new IOException("The row in the recently added KeyValue " + + Bytes.toStringBinary(kv.getBuffer(), kv.getRowOffset(), + kv.getRowLength()) + " doesn't match the original one " + Bytes.toStringBinary(this.row)); } list.add(kv); @@ -164,7 +164,7 @@ public class Put implements HeapSize, Writable, Row, Comparable { /** * Create a KeyValue with this objects row key and the Put identifier. - * + * * @param family * @param qualifier * @param ts @@ -173,15 +173,15 @@ public class Put implements HeapSize, Writable, Row, Comparable { */ private KeyValue createPutKeyValue(byte[] family, byte[] qualifier, long ts, byte[] value) { - return new KeyValue(this.row, family, qualifier, ts, KeyValue.Type.Put, + return new KeyValue(this.row, family, qualifier, ts, KeyValue.Type.Put, value); } - + /** - * A convenience method to determine if this object's familyMap contains + * A convenience method to determine if this object's familyMap contains * a value assigned to the given family & qualifier. * Both given arguments must match the KeyValue object to return true. - * + * * @param family * @param qualifier * @return returns true if the given family and qualifier already has an @@ -190,12 +190,12 @@ public class Put implements HeapSize, Writable, Row, Comparable { public boolean has(byte [] family, byte [] qualifier) { return has(family, qualifier, this.timestamp, new byte[0], true, true); } - + /** - * A convenience method to determine if this object's familyMap contains + * A convenience method to determine if this object's familyMap contains * a value assigned to the given family, qualifier and timestamp. * All 3 given arguments must match the KeyValue object to return true. - * + * * @param family * @param qualifier * @param ts @@ -205,12 +205,12 @@ public class Put implements HeapSize, Writable, Row, Comparable { public boolean has(byte [] family, byte [] qualifier, long ts) { return has(family, qualifier, ts, new byte[0], false, true); } - + /** - * A convenience method to determine if this object's familyMap contains + * A convenience method to determine if this object's familyMap contains * a value assigned to the given family, qualifier and timestamp. * All 3 given arguments must match the KeyValue object to return true. - * + * * @param family * @param qualifier * @param value @@ -220,38 +220,38 @@ public class Put implements HeapSize, Writable, Row, Comparable { public boolean has(byte [] family, byte [] qualifier, byte [] value) { return has(family, qualifier, this.timestamp, value, true, false); } - + /** - * A convenience method to determine if this object's familyMap contains + * A convenience method to determine if this object's familyMap contains * the given value assigned to the given family, qualifier and timestamp. * All 4 given arguments must match the KeyValue object to return true. - * + * * @param family * @param qualifier * @param ts * @param value - * @return returns true if the given family, qualifier timestamp and value + * @return returns true if the given family, qualifier timestamp and value * already has an existing KeyValue object in the family map. */ public boolean has(byte [] family, byte [] qualifier, long ts, byte [] value) { return has(family, qualifier, ts, value, false, false); } - + /** - * Private method to determine if this object's familyMap contains + * Private method to determine if this object's familyMap contains * the given value assigned to the given family, qualifier and timestamp * respecting the 2 boolean arguments - * + * * @param family * @param qualifier * @param ts * @param value * @param ignoreTS * @param ignoreValue - * @return returns true if the given family, qualifier timestamp and value + * @return returns true if the given family, qualifier timestamp and value * already has an existing KeyValue object in the family map. */ - private boolean has(byte [] family, byte [] qualifier, long ts, byte [] value, + private boolean has(byte [] family, byte [] qualifier, long ts, byte [] value, boolean ignoreTS, boolean ignoreValue) { List list = getKeyValueList(family); if (list.size() == 0 ) { @@ -284,13 +284,13 @@ public class Put implements HeapSize, Writable, Row, Comparable { } return false; } - + /** * Returns a list of all KeyValue objects with matching column family and qualifier. - * + * * @param family * @param qualifier - * @return a list of KeyValue objects with the matching family and qualifier, + * @return a list of KeyValue objects with the matching family and qualifier, * returns an empty list if one doesnt exist for the given family. */ public List get(byte[] family, byte[] qualifier) { @@ -306,7 +306,7 @@ public class Put implements HeapSize, Writable, Row, Comparable { /** * Creates an empty list if one doesnt exist for the given column family * or else it returns the associated list of KeyValue objects. - * + * * @param family * @return a list of KeyValue objects, returns an empty list if one doesnt exist. */ @@ -317,7 +317,7 @@ public class Put implements HeapSize, Writable, Row, Comparable { } return list; } - + /** * Method for retrieving the put's familyMap * @return familyMap @@ -325,15 +325,15 @@ public class Put implements HeapSize, Writable, Row, Comparable { public Map> getFamilyMap() { return this.familyMap; } - + /** * Method for retrieving the put's row - * @return row + * @return row */ public byte [] getRow() { return this.row; } - + /** * Method for retrieving the put's RowLock * @return RowLock @@ -341,7 +341,7 @@ public class Put implements HeapSize, Writable, Row, Comparable { public RowLock getRowLock() { return new RowLock(this.row, this.lockId); } - + /** * Method for retrieving the put's lockId * @return lockId @@ -349,7 +349,7 @@ public class Put implements HeapSize, Writable, Row, Comparable { public long getLockId() { return this.lockId; } - + /** * Method to check if the familyMap is empty * @return true if empty, false otherwise @@ -357,14 +357,14 @@ public class Put implements HeapSize, Writable, Row, Comparable { public boolean isEmpty() { return familyMap.isEmpty(); } - + /** * @return Timestamp */ public long getTimeStamp() { return this.timestamp; } - + /** * Method for setting the timestamp * NOTE - This does not affect the timestamp for values previously added to this Put. @@ -375,9 +375,9 @@ public class Put implements HeapSize, Writable, Row, Comparable { this.timestamp = timestamp; return this; } - + /** - * @return the number of different families included in this put + * @return the number of different families included in this put */ public int numFamilies() { return familyMap.size(); @@ -393,14 +393,14 @@ public class Put implements HeapSize, Writable, Row, Comparable { } return size; } - + /** * @return true if edits should be applied to WAL, false if not */ public boolean getWriteToWAL() { return this.writeToWAL; } - + /** * Set whether this Put should be written to the WAL or not. * Not writing the WAL means you may lose edits on server crash. @@ -409,9 +409,9 @@ public class Put implements HeapSize, Writable, Row, Comparable { public void setWriteToWAL(boolean write) { this.writeToWAL = write; } - + /** - * @return String + * @return String */ @Override public String toString() { @@ -443,40 +443,40 @@ public class Put implements HeapSize, Writable, Row, Comparable { sb.append("}"); return sb.toString(); } - + public int compareTo(Row p) { return Bytes.compareTo(this.getRow(), p.getRow()); } - + //HeapSize public long heapSize() { long heapsize = OVERHEAD; //Adding row heapsize += ClassSize.align(ClassSize.ARRAY + this.row.length); - + //Adding map overhead - heapsize += + heapsize += ClassSize.align(this.familyMap.size() * ClassSize.MAP_ENTRY); for(Map.Entry> entry : this.familyMap.entrySet()) { //Adding key overhead - heapsize += + heapsize += ClassSize.align(ClassSize.ARRAY + entry.getKey().length); - + //This part is kinds tricky since the JVM can reuse references if you //store the same value, but have a good match with SizeOf at the moment //Adding value overhead heapsize += ClassSize.align(ClassSize.ARRAYLIST); int size = entry.getValue().size(); - heapsize += ClassSize.align(ClassSize.ARRAY + + heapsize += ClassSize.align(ClassSize.ARRAY + size * ClassSize.REFERENCE); - + for(KeyValue kv : entry.getValue()) { heapsize += kv.heapSize(); } } return ClassSize.align((int)heapsize); } - + //Writable public void readFields(final DataInput in) throws IOException { @@ -502,7 +502,7 @@ public class Put implements HeapSize, Writable, Row, Comparable { this.familyMap.put(family, keys); } } - + public void write(final DataOutput out) throws IOException { Bytes.writeByteArray(out, this.row); diff --git a/src/java/org/apache/hadoop/hbase/client/Result.java b/src/java/org/apache/hadoop/hbase/client/Result.java index 83ec906..5d311a9 100644 --- a/src/java/org/apache/hadoop/hbase/client/Result.java +++ b/src/java/org/apache/hadoop/hbase/client/Result.java @@ -41,32 +41,32 @@ import org.apache.hadoop.io.Writable; /** * Single row result of a {@link Get} or {@link Scan} query.

- * + * * Convenience methods are available that return various {@link Map} * structures and values directly.

- * - * To get a complete mapping of all cells in the Result, which can include + * + * To get a complete mapping of all cells in the Result, which can include * multiple families and multiple versions, use {@link #getMap()}.

- * - * To get a mapping of each family to its columns (qualifiers and values), + * + * To get a mapping of each family to its columns (qualifiers and values), * including only the latest version of each, use {@link #getNoVersionMap()}. - * - * To get a mapping of qualifiers to latest values for an individual family use + * + * To get a mapping of qualifiers to latest values for an individual family use * {@link #getFamilyMap(byte[])}.

- * + * * To get the latest value for a specific family and qualifier use {@link #getValue(byte[], byte[])}. * * A Result is backed by an array of {@link KeyValue} objects, each representing * an HBase cell defined by the row, family, qualifier, timestamp, and value.

- * + * * The underlying {@link KeyValue} objects can be accessed through the methods * {@link #sorted()} and {@link #list()}. Each KeyValue can then be accessed - * through {@link KeyValue#getRow()}, {@link KeyValue#getFamily()}, {@link KeyValue#getQualifier()}, + * through {@link KeyValue#getRow()}, {@link KeyValue#getFamily()}, {@link KeyValue#getQualifier()}, * {@link KeyValue#getTimestamp()}, and {@link KeyValue#getValue()}. */ public class Result implements Writable { private KeyValue [] kvs = null; - private NavigableMap>> familyMap = null; // We're not using java serialization. Transient here is just a marker to say // that this is where we cache row if we're ever asked for it. @@ -95,7 +95,7 @@ public class Result implements Writable { public Result(List kvs) { this(kvs.toArray(new KeyValue[0])); } - + /** * Instantiate a Result from the specified raw binary format. * @param bytes raw binary format of Result @@ -131,7 +131,7 @@ public class Result implements Writable { /** * Create a sorted list of the KeyValue's in this result. - * + * * @return The sorted list of KeyValue's. */ public List list() { @@ -159,10 +159,10 @@ public class Result implements Writable { /** * Map of families to all versions of its qualifiers and values. *

- * Returns a three level Map of the form: + * Returns a three level Map of the form: * Map>> *

- * Note: All other map returning methods make use of this map internally. + * Note: All other map returning methods make use of this map internally. * @return map from families to qualifiers to versions */ public NavigableMap>> getMap() { @@ -178,7 +178,7 @@ public class Result implements Writable { for(KeyValue kv : this.kvs) { SplitKeyValue splitKV = kv.split(); byte [] family = splitKV.getFamily(); - NavigableMap> columnMap = + NavigableMap> columnMap = familyMap.get(family); if(columnMap == null) { columnMap = new TreeMap> @@ -217,15 +217,15 @@ public class Result implements Writable { if(isEmpty()) { return null; } - NavigableMap> returnMap = + NavigableMap> returnMap = new TreeMap>(Bytes.BYTES_COMPARATOR); - for(Map.Entry>> + for(Map.Entry>> familyEntry : familyMap.entrySet()) { - NavigableMap qualifierMap = + NavigableMap qualifierMap = new TreeMap(Bytes.BYTES_COMPARATOR); for(Map.Entry> qualifierEntry : familyEntry.getValue().entrySet()) { - byte [] value = + byte [] value = qualifierEntry.getValue().get(qualifierEntry.getValue().firstKey()); qualifierMap.put(qualifierEntry.getKey(), value); } @@ -247,16 +247,16 @@ public class Result implements Writable { if(isEmpty()) { return null; } - NavigableMap returnMap = + NavigableMap returnMap = new TreeMap(Bytes.BYTES_COMPARATOR); - NavigableMap> qualifierMap = + NavigableMap> qualifierMap = familyMap.get(family); if(qualifierMap == null) { return returnMap; } - for(Map.Entry> entry : + for(Map.Entry> entry : qualifierMap.entrySet()) { - byte [] value = + byte [] value = entry.getValue().get(entry.getValue().firstKey()); returnMap.put(entry.getKey(), value); } @@ -319,13 +319,13 @@ public class Result implements Writable { } return versionMap.firstEntry(); } - + private NavigableMap getVersionMap( NavigableMap> qualifierMap, byte [] qualifier) { return qualifier != null? qualifierMap.get(qualifier): qualifierMap.get(new byte[0]); } - + /** * Get the latest version of the specified column, * using

family:qualifier
notation. @@ -340,7 +340,7 @@ public class Result implements Writable { return null; } } - + /** * Checks for existence of the specified column. * @param family family name @@ -365,7 +365,7 @@ public class Result implements Writable { } return true; } - + /** * Returns this Result in the old return format, {@link RowResult}. * @return a RowResult @@ -376,7 +376,7 @@ public class Result implements Writable { } return RowResult.createRowResult(Arrays.asList(kvs)); } - + /** * Returns the value of the first column in the Result. * @return value of the first column @@ -387,19 +387,19 @@ public class Result implements Writable { } return kvs[0].getValue(); } - + /** * Returns the raw binary encoding of this Result.

- * + * * Please note, there may be an offset into the underlying byte array of the - * returned ImmutableBytesWritable. Be sure to use both + * returned ImmutableBytesWritable. Be sure to use both * {@link ImmutableBytesWritable#get()} and {@link ImmutableBytesWritable#getOffset()} * @return pointer to raw binary of Result */ public ImmutableBytesWritable getBytes() { return this.bytes; } - + /** * Check if the underlying KeyValue [] is empty or not * @return true if empty @@ -410,7 +410,7 @@ public class Result implements Writable { } return this.kvs == null || this.kvs.length == 0; } - + /** * @return the size of the underlying KeyValue [] */ @@ -420,7 +420,7 @@ public class Result implements Writable { } return this.kvs == null? 0: this.kvs.length; } - + /** * @return String */ @@ -445,7 +445,7 @@ public class Result implements Writable { sb.append("}"); return sb.toString(); } - + //Writable public void readFields(final DataInput in) throws IOException { @@ -461,7 +461,7 @@ public class Result implements Writable { in.readFully(raw, 0, totalBuffer); bytes = new ImmutableBytesWritable(raw, 0, totalBuffer); } - + //Create KeyValue[] when needed private void readFields() { if (bytes == null) { @@ -480,7 +480,7 @@ public class Result implements Writable { } this.kvs = kvs.toArray(new KeyValue[kvs.size()]); } - + public void write(final DataOutput out) throws IOException { if(isEmpty()) { @@ -497,7 +497,7 @@ public class Result implements Writable { } } } - + public static void writeArray(final DataOutput out, Result [] results) throws IOException { if(results == null || results.length == 0) { @@ -528,7 +528,7 @@ public class Result implements Writable { } } } - + public static Result [] readArray(final DataInput in) throws IOException { int numResults = in.readInt(); @@ -555,7 +555,7 @@ public class Result implements Writable { offset += keyLen; } int totalLength = offset - initialOffset; - results[i] = new Result(new ImmutableBytesWritable(buf, initialOffset, + results[i] = new Result(new ImmutableBytesWritable(buf, initialOffset, totalLength)); } return results; diff --git a/src/java/org/apache/hadoop/hbase/client/ResultScanner.java b/src/java/org/apache/hadoop/hbase/client/ResultScanner.java index 28f6112..dfc9591 100644 --- a/src/java/org/apache/hadoop/hbase/client/ResultScanner.java +++ b/src/java/org/apache/hadoop/hbase/client/ResultScanner.java @@ -35,18 +35,18 @@ public interface ResultScanner extends Closeable, Iterable { * @return Result object if there is another row, null if the scanner is * exhausted. * @throws IOException - */ + */ public Result next() throws IOException; - + /** * @param nbRows number of rows to return * @return Between zero and nbRows Results * @throws IOException */ public Result [] next(int nbRows) throws IOException; - + /** * Closes the scanner and releases any resources it has allocated */ - public void close(); + public void close(); } \ No newline at end of file diff --git a/src/java/org/apache/hadoop/hbase/client/RetriesExhaustedException.java b/src/java/org/apache/hadoop/hbase/client/RetriesExhaustedException.java index 6b659aa..d0773d9 100644 --- a/src/java/org/apache/hadoop/hbase/client/RetriesExhaustedException.java +++ b/src/java/org/apache/hadoop/hbase/client/RetriesExhaustedException.java @@ -20,10 +20,10 @@ import java.util.List; import org.apache.hadoop.hbase.util.Bytes; -/** +/** * Exception thrown by HTable methods when an attempt to do something (like - * commit changes) fails after a bunch of retries. - */ + * commit changes) fails after a bunch of retries. + */ public class RetriesExhaustedException extends IOException { private static final long serialVersionUID = 1876775844L; @@ -31,14 +31,14 @@ public class RetriesExhaustedException extends IOException { super(msg); } - /** + /** * Create a new RetriesExhaustedException from the list of prior failures. * @param serverName name of HRegionServer * @param regionName name of region * @param row The row we were pursuing when we ran out of retries * @param numTries The number of tries we made * @param exceptions List of exceptions that failed before giving up - */ + */ public RetriesExhaustedException(String serverName, final byte [] regionName, final byte [] row, int numTries, List exceptions) { super(getMessage(serverName, regionName, row, numTries, exceptions)); diff --git a/src/java/org/apache/hadoop/hbase/client/RowLock.java b/src/java/org/apache/hadoop/hbase/client/RowLock.java index 3c8c461..5abb3a4 100644 --- a/src/java/org/apache/hadoop/hbase/client/RowLock.java +++ b/src/java/org/apache/hadoop/hbase/client/RowLock.java @@ -35,7 +35,7 @@ public class RowLock { this.row = row; this.lockId = lockId; } - + /** * Creates a RowLock with only a lock id * @param lockId diff --git a/src/java/org/apache/hadoop/hbase/client/Scan.java b/src/java/org/apache/hadoop/hbase/client/Scan.java index eddefad..b28b32e 100644 --- a/src/java/org/apache/hadoop/hbase/client/Scan.java +++ b/src/java/org/apache/hadoop/hbase/client/Scan.java @@ -55,7 +55,7 @@ import org.apache.hadoop.io.WritableFactories; *

* To modify scanner caching for just this scan, use {@link #setCaching(int) setCaching}. *

- * To further define the scope of what to get when scanning, perform additional + * To further define the scope of what to get when scanning, perform additional * methods as outlined below. *

* To get all columns from specific families, execute {@link #addFamily(byte[]) addFamily} @@ -73,12 +73,12 @@ import org.apache.hadoop.io.WritableFactories; * To limit the number of versions of each column to be returned, execute * {@link #setMaxVersions(int) setMaxVersions}. *

- * To limit the maximum number of values returned for each call to next(), execute + * To limit the maximum number of values returned for each call to next(), execute * {@link #setBatch(int) setBatch}. *

* To add a filter, execute {@link #setFilter(org.apache.hadoop.hbase.filter.Filter) setFilter}. *

- * Expert: To explicitly disable server-side block caching for this scan, + * Expert: To explicitly disable server-side block caching for this scan, * execute {@link #setCacheBlocks(boolean)}. */ public class Scan implements Writable { @@ -105,7 +105,7 @@ public class Scan implements Writable { // additional data for the scan protected Map values = new HashMap(); - + /** * Create a Scan operation across all rows. */ @@ -115,7 +115,7 @@ public class Scan implements Writable { this(startRow); this.filter = filter; } - + /** * Create a Scan operation starting at the specified row. *

@@ -126,7 +126,7 @@ public class Scan implements Writable { public Scan(byte [] startRow) { this.startRow = startRow; } - + /** * Create a Scan operation for the range of rows specified. * @param startRow row to start scanner at or after (inclusive) @@ -136,10 +136,10 @@ public class Scan implements Writable { this.startRow = startRow; this.stopRow = stopRow; } - + /** * Creates a new instance of this class while copying all values. - * + * * @param scan The scan instance to copy from. * @throws IOException When copying the values fails. */ @@ -197,7 +197,7 @@ public class Scan implements Writable { familyMap.put(family, EMPTY_NAVIGABLE_SET); return this; } - + /** * Get the column from the specified family with the specified qualifier. *

@@ -218,12 +218,12 @@ public class Scan implements Writable { } /** - * Parses a combined family and qualifier and adds either both or just the - * family in case there is not qualifier. This assumes the older colon + * Parses a combined family and qualifier and adds either both or just the + * family in case there is not qualifier. This assumes the older colon * divided notation, e.g. "data:contents" or "meta:". *

* Note: It will through an error when the colon is missing. - * + * * @param familyAndQualifier * @return A reference to this instance. * @throws IllegalArgumentException When the colon is missing. @@ -231,18 +231,18 @@ public class Scan implements Writable { public Scan addColumn(byte[] familyAndQualifier) { byte [][] fq = KeyValue.parseColumn(familyAndQualifier); if (fq.length > 1 && fq[1] != null && fq[1].length > 0) { - addColumn(fq[0], fq[1]); + addColumn(fq[0], fq[1]); } else { addFamily(fq[0]); } return this; } - + /** * Adds an array of columns specified using old format, family:qualifier. *

* Overrides previous calls to addFamily for any families in the input. - * + * * @param columns array of columns, formatted as

family:qualifier
*/ public Scan addColumns(byte [][] columns) { @@ -257,7 +257,7 @@ public class Scan implements Writable { * command line) column definitions, e.g. "data:contents mime:". The columns * must be space delimited and always have a colon (":") to denote family * and qualifier. - * + * * @param columns The columns to parse. * @return A reference to this instance. */ @@ -270,15 +270,15 @@ public class Scan implements Writable { } /** - * Helps to convert the binary column families and qualifiers to a text + * Helps to convert the binary column families and qualifiers to a text * representation, e.g. "data:mimetype data:contents meta:". Binary values * are properly encoded using {@link Bytes#toBytesBinary(String)}. - * + * * @return The columns in an old style string format. */ public String getInputColumns() { StringBuilder cols = new StringBuilder(); - for (Map.Entry> e : + for (Map.Entry> e : familyMap.entrySet()) { byte[] fam = e.getKey(); if (cols.length() > 0) { @@ -297,14 +297,14 @@ public class Scan implements Writable { cols.append(Bytes.toStringBinary(qual)); } } else { - // only add the family but with old style delimiter + // only add the family but with old style delimiter cols.append(Bytes.toStringBinary(fam)); cols.append(":"); } } return cols.toString(); } - + /** * Get versions of columns only within the specified timestamp range, * [minStamp, maxStamp). Note, default maximum versions to return is 1. If @@ -321,7 +321,7 @@ public class Scan implements Writable { tr = new TimeRange(minStamp, maxStamp); return this; } - + /** * Get versions of columns with the specified timestamp. Note, default maximum * versions to return is 1. If your time range spans more than one version @@ -348,7 +348,7 @@ public class Scan implements Writable { this.startRow = startRow; return this; } - + /** * Set the stop row. * @param stopRow @@ -357,7 +357,7 @@ public class Scan implements Writable { this.stopRow = stopRow; return this; } - + /** * Get all available versions. */ @@ -405,16 +405,16 @@ public class Scan implements Writable { /** * Set an old-style filter interface to use. Note: not all features of the * old style filters are supported. - * + * * @deprecated * @param filter - * @return The scan instance. + * @return The scan instance. */ public Scan setOldFilter(RowFilterInterface filter) { oldFilter = filter; return this; } - + /** * Setting the familyMap * @param familyMap @@ -423,7 +423,7 @@ public class Scan implements Writable { this.familyMap = familyMap; return this; } - + /** * Getting the familyMap * @return familyMap @@ -431,7 +431,7 @@ public class Scan implements Writable { public Map> getFamilyMap() { return this.familyMap; } - + /** * @return the number of families in familyMap */ @@ -448,7 +448,7 @@ public class Scan implements Writable { public boolean hasFamilies() { return !this.familyMap.isEmpty(); } - + /** * @return the keys of the familyMap */ @@ -458,7 +458,7 @@ public class Scan implements Writable { } return null; } - + /** * @return the startrow */ @@ -472,13 +472,13 @@ public class Scan implements Writable { public byte [] getStopRow() { return this.stopRow; } - + /** * @return the max number of versions to fetch */ public int getMaxVersions() { return this.maxVersions; - } + } /** * @return maximum number of values to return for a single call to next() @@ -492,15 +492,15 @@ public class Scan implements Writable { */ public int getCaching() { return this.caching; - } + } /** * @return TimeRange */ public TimeRange getTimeRange() { return this.tr; - } - + } + /** * @return RowFilter */ @@ -516,28 +516,28 @@ public class Scan implements Writable { public RowFilterInterface getOldFilter() { return oldFilter; } - + /** * @return true is a filter has been specified, false if not */ public boolean hasFilter() { return filter != null || oldFilter != null; } - + /** * Set whether blocks should be cached for this Scan. *

* This is true by default. When true, default settings of the table and * family are used (this will never override caching blocks if the block * cache is disabled for that family or entirely). - * + * * @param cacheBlocks if false, default settings are overridden and blocks * will not be cached */ public void setCacheBlocks(boolean cacheBlocks) { this.cacheBlocks = cacheBlocks; } - + /** * Get whether blocks should be cached for this Scan. * @return true if default caching should be used, false if blocks should not @@ -620,7 +620,7 @@ public class Scan implements Writable { public void remove(final byte [] key) { values.remove(new ImmutableBytesWritable(key)); } - + /** * @return String */ @@ -674,7 +674,7 @@ public class Scan implements Writable { } } sb.append("}"); - + for (Map.Entry e: values.entrySet()) { String key = Bytes.toString(e.getKey().get()); @@ -691,7 +691,7 @@ public class Scan implements Writable { return sb.toString(); } - + @SuppressWarnings("unchecked") private Writable createForName(String className) { try { @@ -700,9 +700,9 @@ public class Scan implements Writable { return WritableFactories.newInstance(clazz, new Configuration()); } catch (ClassNotFoundException e) { throw new RuntimeException("Can't find class " + className); - } + } } - + //Writable public void readFields(final DataInput in) throws IOException { @@ -730,7 +730,7 @@ public class Scan implements Writable { this.tr = new TimeRange(); tr.readFields(in); int numFamilies = in.readInt(); - this.familyMap = + this.familyMap = new TreeMap>(Bytes.BYTES_COMPARATOR); for(int i=0; i { * @return RowResult object if there is another row, null if the scanner is * exhausted. * @throws IOException - */ + */ public RowResult next() throws IOException; - + /** * @param nbRows number of rows to return * @return Between zero and nbRows Results * @throws IOException */ public RowResult [] next(int nbRows) throws IOException; - + /** * Closes the scanner and releases any resources it has allocated */ diff --git a/src/java/org/apache/hadoop/hbase/client/ScannerCallable.java b/src/java/org/apache/hadoop/hbase/client/ScannerCallable.java index 4b99052..c2ffcd9 100644 --- a/src/java/org/apache/hadoop/hbase/client/ScannerCallable.java +++ b/src/java/org/apache/hadoop/hbase/client/ScannerCallable.java @@ -51,7 +51,7 @@ public class ScannerCallable extends ServerCallable { super(connection, tableName, scan.getStartRow()); this.scan = scan; } - + /** * @param reload * @throws IOException @@ -96,7 +96,7 @@ public class ScannerCallable extends ServerCallable { } return null; } - + private void close() { if (this.scannerId == -1L) { return; @@ -113,18 +113,18 @@ public class ScannerCallable extends ServerCallable { return this.server.openScanner(this.location.getRegionInfo().getRegionName(), this.scan); } - + protected Scan getScan() { return scan; } - + /** * Call this when the next invocation of call should close the scanner */ public void setClose() { this.closed = true; } - + /** * @return the HRegionInfo for the current region */ diff --git a/src/java/org/apache/hadoop/hbase/client/ScannerTimeoutException.java b/src/java/org/apache/hadoop/hbase/client/ScannerTimeoutException.java index 7b31935..40df867 100644 --- a/src/java/org/apache/hadoop/hbase/client/ScannerTimeoutException.java +++ b/src/java/org/apache/hadoop/hbase/client/ScannerTimeoutException.java @@ -23,7 +23,7 @@ package org.apache.hadoop.hbase.client; import org.apache.hadoop.hbase.DoNotRetryIOException; /** - * Thrown when a scanner has timed out. + * Thrown when a scanner has timed out. */ public class ScannerTimeoutException extends DoNotRetryIOException { diff --git a/src/java/org/apache/hadoop/hbase/client/ServerCallable.java b/src/java/org/apache/hadoop/hbase/client/ServerCallable.java index a26a96a..a7b3cf8 100644 --- a/src/java/org/apache/hadoop/hbase/client/ServerCallable.java +++ b/src/java/org/apache/hadoop/hbase/client/ServerCallable.java @@ -47,9 +47,9 @@ public abstract class ServerCallable implements Callable { this.tableName = tableName; this.row = row; } - + /** - * + * * @param reload set this to true if connection should re-find the region * @throws IOException */ @@ -65,7 +65,7 @@ public abstract class ServerCallable implements Callable { } return location.getServerAddress().toString(); } - + /** @return the region name */ public byte[] getRegionName() { if (location == null) { @@ -73,7 +73,7 @@ public abstract class ServerCallable implements Callable { } return location.getRegionInfo().getRegionName(); } - + /** @return the row */ public byte [] getRow() { return row; diff --git a/src/java/org/apache/hadoop/hbase/client/ServerConnection.java b/src/java/org/apache/hadoop/hbase/client/ServerConnection.java index 1edfb60..630b266 100644 --- a/src/java/org/apache/hadoop/hbase/client/ServerConnection.java +++ b/src/java/org/apache/hadoop/hbase/client/ServerConnection.java @@ -32,9 +32,9 @@ public interface ServerConnection extends HConnection { * @param rootRegion */ public void setRootRegionLocation(HRegionLocation rootRegion); - + /** - * Unset the root region location in the connection. Called by + * Unset the root region location in the connection. Called by * ServerManager.processRegionClose. */ public void unsetRootRegionLocation(); diff --git a/src/java/org/apache/hadoop/hbase/client/UnmodifyableHRegionInfo.java b/src/java/org/apache/hadoop/hbase/client/UnmodifyableHRegionInfo.java index 2519a07..543badf 100644 --- a/src/java/org/apache/hadoop/hbase/client/UnmodifyableHRegionInfo.java +++ b/src/java/org/apache/hadoop/hbase/client/UnmodifyableHRegionInfo.java @@ -25,14 +25,14 @@ import org.apache.hadoop.hbase.HRegionInfo; class UnmodifyableHRegionInfo extends HRegionInfo { /* * Creates an unmodifyable copy of an HRegionInfo - * + * * @param info */ UnmodifyableHRegionInfo(HRegionInfo info) { super(info); this.tableDesc = new UnmodifyableHTableDescriptor(info.getTableDesc()); } - + /** * @param split set split status */ diff --git a/src/java/org/apache/hadoop/hbase/client/UnmodifyableHTableDescriptor.java b/src/java/org/apache/hadoop/hbase/client/UnmodifyableHTableDescriptor.java index 6ed3769..4850927 100644 --- a/src/java/org/apache/hadoop/hbase/client/UnmodifyableHTableDescriptor.java +++ b/src/java/org/apache/hadoop/hbase/client/UnmodifyableHTableDescriptor.java @@ -40,8 +40,8 @@ public class UnmodifyableHTableDescriptor extends HTableDescriptor { UnmodifyableHTableDescriptor(final HTableDescriptor desc) { super(desc.getName(), getUnmodifyableFamilies(desc), desc.getValues()); } - - + + /* * @param desc * @return Families as unmodifiable array. @@ -74,7 +74,7 @@ public class UnmodifyableHTableDescriptor extends HTableDescriptor { public HColumnDescriptor removeFamily(final byte [] column) { throw new UnsupportedOperationException("HTableDescriptor is read-only"); } - + /** * @see org.apache.hadoop.hbase.HTableDescriptor#setReadOnly(boolean) */ @@ -120,6 +120,6 @@ public class UnmodifyableHTableDescriptor extends HTableDescriptor { // */ // @Override // public void addIndex(IndexSpecification index) { -// throw new UnsupportedOperationException("HTableDescriptor is read-only"); +// throw new UnsupportedOperationException("HTableDescriptor is read-only"); // } } diff --git a/src/java/org/apache/hadoop/hbase/filter/BinaryComparator.java b/src/java/org/apache/hadoop/hbase/filter/BinaryComparator.java index e127632..1491e19 100644 --- a/src/java/org/apache/hadoop/hbase/filter/BinaryComparator.java +++ b/src/java/org/apache/hadoop/hbase/filter/BinaryComparator.java @@ -21,7 +21,7 @@ package org.apache.hadoop.hbase.filter; /** - * A binary comparator which lexicographically compares against the specified + * A binary comparator which lexicographically compares against the specified * byte array using {@link org.apache.hadoop.hbase.util.Bytes#compareTo(byte[], byte[])}. */ public class BinaryComparator extends WritableByteArrayComparable { diff --git a/src/java/org/apache/hadoop/hbase/filter/BinaryPrefixComparator.java b/src/java/org/apache/hadoop/hbase/filter/BinaryPrefixComparator.java index 1db00b0..70f9855 100644 --- a/src/java/org/apache/hadoop/hbase/filter/BinaryPrefixComparator.java +++ b/src/java/org/apache/hadoop/hbase/filter/BinaryPrefixComparator.java @@ -26,7 +26,7 @@ import org.apache.hadoop.hbase.util.Bytes; * A comparator which compares against a specified byte array, but only compares * up to the length of this byte array. For the rest it is similar to * {@link BinaryComparator}. - */ + */ public class BinaryPrefixComparator extends WritableByteArrayComparable { /** Nullary constructor for Writable, do not use */ @@ -42,7 +42,7 @@ public class BinaryPrefixComparator extends WritableByteArrayComparable { @Override public int compareTo(byte [] value) { - return Bytes.compareTo(this.value, 0, this.value.length, value, 0, + return Bytes.compareTo(this.value, 0, this.value.length, value, 0, this.value.length); } diff --git a/src/java/org/apache/hadoop/hbase/filter/ColumnPaginationFilter.java b/src/java/org/apache/hadoop/hbase/filter/ColumnPaginationFilter.java index 4d12b55..d050bf7 100644 --- a/src/java/org/apache/hadoop/hbase/filter/ColumnPaginationFilter.java +++ b/src/java/org/apache/hadoop/hbase/filter/ColumnPaginationFilter.java @@ -27,7 +27,7 @@ import org.apache.hadoop.hbase.KeyValue; /** * A filter, based on the ColumnCountGetFilter, takes two arguments: limit and offset. - * This filter can be used for row-based indexing, where references to other tables are stored across many columns, + * This filter can be used for row-based indexing, where references to other tables are stored across many columns, * in order to efficient lookups and paginated results for end users. */ public class ColumnPaginationFilter implements Filter @@ -57,11 +57,11 @@ public class ColumnPaginationFilter implements Filter public ReturnCode filterKeyValue(KeyValue v) { - if(count >= offset + limit) + if(count >= offset + limit) { return ReturnCode.NEXT_ROW; } - + ReturnCode code = count < offset ? ReturnCode.SKIP : ReturnCode.INCLUDE; count++; return code; diff --git a/src/java/org/apache/hadoop/hbase/filter/ColumnValueFilter.java b/src/java/org/apache/hadoop/hbase/filter/ColumnValueFilter.java index fcd67a2..e307ddd 100644 --- a/src/java/org/apache/hadoop/hbase/filter/ColumnValueFilter.java +++ b/src/java/org/apache/hadoop/hbase/filter/ColumnValueFilter.java @@ -34,7 +34,7 @@ import org.apache.hadoop.io.ObjectWritable; /** * This filter is a no-op in HBase 0.20. Don't use it. - * + * * This filter is used to filter based on the value of a given column. It takes * an operator (equal, greater, not equal, etc) and either a byte [] value or a * byte [] comparator. If we have a byte [] value then we just do a @@ -72,7 +72,7 @@ public class ColumnValueFilter implements RowFilterInterface { /** * Constructor. - * + * * @param columnName name of column * @param compareOp operator * @param value value to compare column values against @@ -81,14 +81,14 @@ public class ColumnValueFilter implements RowFilterInterface { final byte[] value) { this(columnName, compareOp, value, true); } - + /** * Constructor. - * + * * @param columnName name of column * @param compareOp operator * @param value value to compare column values against - * @param filterIfColumnMissing if true then we will filter rows that don't have the column. + * @param filterIfColumnMissing if true then we will filter rows that don't have the column. */ public ColumnValueFilter(final byte[] columnName, final CompareOp compareOp, final byte[] value, boolean filterIfColumnMissing) { @@ -100,7 +100,7 @@ public class ColumnValueFilter implements RowFilterInterface { /** * Constructor. - * + * * @param columnName name of column * @param compareOp operator * @param comparator Comparator to use. @@ -109,14 +109,14 @@ public class ColumnValueFilter implements RowFilterInterface { final WritableByteArrayComparable comparator) { this(columnName, compareOp, comparator, true); } - + /** * Constructor. - * + * * @param columnName name of column * @param compareOp operator * @param comparator Comparator to use. - * @param filterIfColumnMissing if true then we will filter rows that don't have the column. + * @param filterIfColumnMissing if true then we will filter rows that don't have the column. */ public ColumnValueFilter(final byte[] columnName, final CompareOp compareOp, final WritableByteArrayComparable comparator, boolean filterIfColumnMissing) { @@ -134,7 +134,7 @@ public class ColumnValueFilter implements RowFilterInterface { return false; } - + public boolean filterColumn(final byte[] rowKey, final byte[] colKey, final byte[] data) { if (!filterIfColumnMissing) { @@ -143,7 +143,7 @@ public class ColumnValueFilter implements RowFilterInterface { if (!Arrays.equals(colKey, columnName)) { return false; } - return filterColumnValue(data, 0, data.length); + return filterColumnValue(data, 0, data.length); } @@ -186,7 +186,7 @@ public class ColumnValueFilter implements RowFilterInterface { throw new RuntimeException("Unknown Compare op " + compareOp.name()); } } - + public boolean filterAllRemaining() { return false; } @@ -196,7 +196,7 @@ public class ColumnValueFilter implements RowFilterInterface { return false; if (filterIfColumnMissing) { return !columns.containsKey(columnName); - } + } // Otherwise we must do the filter here Cell colCell = columns.get(columnName); if (colCell == null) { diff --git a/src/java/org/apache/hadoop/hbase/filter/CompareFilter.java b/src/java/org/apache/hadoop/hbase/filter/CompareFilter.java index 3360bfa..91937c9 100644 --- a/src/java/org/apache/hadoop/hbase/filter/CompareFilter.java +++ b/src/java/org/apache/hadoop/hbase/filter/CompareFilter.java @@ -29,7 +29,7 @@ import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.io.HbaseObjectWritable; /** - * This is a generic filter to be used to filter by comparison. It takes an + * This is a generic filter to be used to filter by comparison. It takes an * operator (equal, greater, not equal, etc) and a byte [] comparator. *

* To filter by row key, use {@link RowFilter}. @@ -60,7 +60,7 @@ public abstract class CompareFilter implements Filter { /** greater than */ GREATER; } - + protected CompareOp compareOp; protected WritableByteArrayComparable comparator; @@ -75,7 +75,7 @@ public abstract class CompareFilter implements Filter { * @param compareOp the compare op for row matching * @param comparator the comparator for row matching */ - public CompareFilter(final CompareOp compareOp, + public CompareFilter(final CompareOp compareOp, final WritableByteArrayComparable comparator) { this.compareOp = compareOp; this.comparator = comparator; @@ -101,7 +101,7 @@ public abstract class CompareFilter implements Filter { public ReturnCode filterKeyValue(KeyValue v) { return ReturnCode.INCLUDE; } - + public boolean filterRowKey(byte[] data, int offset, int length) { return false; } @@ -109,7 +109,7 @@ public abstract class CompareFilter implements Filter { public boolean filterRow() { return false; } - + public boolean filterAllRemaining() { return false; } @@ -117,8 +117,8 @@ public abstract class CompareFilter implements Filter { protected boolean doCompare(final CompareOp compareOp, final WritableByteArrayComparable comparator, final byte [] data, final int offset, final int length) { - int compareResult = - comparator.compareTo(Arrays.copyOfRange(data, offset, + int compareResult = + comparator.compareTo(Arrays.copyOfRange(data, offset, offset + length)); switch (compareOp) { case LESS: diff --git a/src/java/org/apache/hadoop/hbase/filter/Filter.java b/src/java/org/apache/hadoop/hbase/filter/Filter.java index e4ab857..4833084 100644 --- a/src/java/org/apache/hadoop/hbase/filter/Filter.java +++ b/src/java/org/apache/hadoop/hbase/filter/Filter.java @@ -29,7 +29,7 @@ import org.apache.hadoop.hbase.KeyValue; *

    *
  • {@link #reset()}
  • *
  • {@link #filterAllRemaining()} -> true indicates scan is over, false, keep going on.
  • - *
  • {@link #filterRowKey(byte[],int,int)} -> true to drop this row, + *
  • {@link #filterRowKey(byte[],int,int)} -> true to drop this row, * if false, we will also call
  • *
  • {@link #filterKeyValue(KeyValue)} -> true to drop this key/value
  • *
  • {@link #filterRow()} -> last chance to drop entire row based on the sequence of diff --git a/src/java/org/apache/hadoop/hbase/filter/FilterList.java b/src/java/org/apache/hadoop/hbase/filter/FilterList.java index 108f579..582b987 100644 --- a/src/java/org/apache/hadoop/hbase/filter/FilterList.java +++ b/src/java/org/apache/hadoop/hbase/filter/FilterList.java @@ -34,12 +34,12 @@ import org.apache.hadoop.io.Writable; /** * Implementation of {@link Filter} that represents an ordered List of Filters - * which will be evaluated with a specified boolean operator {@link Operator#MUST_PASS_ALL} + * which will be evaluated with a specified boolean operator {@link Operator#MUST_PASS_ALL} * (!AND) or {@link Operator#MUST_PASS_ONE} (!OR). * Since you can use Filter Lists as children of Filter Lists, you can create a * hierarchy of filters to be evaluated. * Defaults to {@link Operator#MUST_PASS_ALL}. - *

    TODO: Fix creation of Configuration on serialization and deserialization. + *

    TODO: Fix creation of Configuration on serialization and deserialization. */ public class FilterList implements Filter { /** set operator */ @@ -65,7 +65,7 @@ public class FilterList implements Filter { /** * Constructor that takes a set of {@link Filter}s. The default operator * MUST_PASS_ALL is assumed. - * + * * @param rowFilters */ public FilterList(final List rowFilters) { @@ -74,7 +74,7 @@ public class FilterList implements Filter { /** * Constructor that takes an operator. - * + * * @param operator Operator to process filter set with. */ public FilterList(final Operator operator) { @@ -83,7 +83,7 @@ public class FilterList implements Filter { /** * Constructor that takes a set of {@link Filter}s and an operator. - * + * * @param operator Operator to process filter set with. * @param rowFilters Set of row filters. */ @@ -94,7 +94,7 @@ public class FilterList implements Filter { /** * Get the operator. - * + * * @return operator */ public Operator getOperator() { @@ -103,7 +103,7 @@ public class FilterList implements Filter { /** * Get the filters. - * + * * @return filters */ public List getFilters() { @@ -112,7 +112,7 @@ public class FilterList implements Filter { /** * Add a filter. - * + * * @param filter */ public void addFilter(Filter filter) { diff --git a/src/java/org/apache/hadoop/hbase/filter/InclusiveStopFilter.java b/src/java/org/apache/hadoop/hbase/filter/InclusiveStopFilter.java index 372ecd1..0675736 100644 --- a/src/java/org/apache/hadoop/hbase/filter/InclusiveStopFilter.java +++ b/src/java/org/apache/hadoop/hbase/filter/InclusiveStopFilter.java @@ -63,7 +63,7 @@ public class InclusiveStopFilter implements Filter { // if stopRowKey is <= buffer, then true, filter row. int cmp = Bytes.compareTo(stopRowKey, 0, stopRowKey.length, buffer, offset, length); - + if(cmp < 0) { done = true; } diff --git a/src/java/org/apache/hadoop/hbase/filter/InclusiveStopRowFilter.java b/src/java/org/apache/hadoop/hbase/filter/InclusiveStopRowFilter.java index 4360b12..4a0b793 100644 --- a/src/java/org/apache/hadoop/hbase/filter/InclusiveStopRowFilter.java +++ b/src/java/org/apache/hadoop/hbase/filter/InclusiveStopRowFilter.java @@ -36,7 +36,7 @@ public class InclusiveStopRowFilter extends StopRowFilter { /** * Constructor that takes a stopRowKey on which to filter - * + * * @param stopRowKey rowKey to filter on. */ public InclusiveStopRowFilter(final byte [] stopRowKey) { diff --git a/src/java/org/apache/hadoop/hbase/filter/PageFilter.java b/src/java/org/apache/hadoop/hbase/filter/PageFilter.java index a7007bf..d956d3a 100644 --- a/src/java/org/apache/hadoop/hbase/filter/PageFilter.java +++ b/src/java/org/apache/hadoop/hbase/filter/PageFilter.java @@ -50,7 +50,7 @@ public class PageFilter implements Filter { /** * Constructor that takes a maximum page size. - * + * * @param pageSize Maximum result size. */ public PageFilter(final long pageSize) { diff --git a/src/java/org/apache/hadoop/hbase/filter/PageRowFilter.java b/src/java/org/apache/hadoop/hbase/filter/PageRowFilter.java index 5d9eca7..6f53d41 100644 --- a/src/java/org/apache/hadoop/hbase/filter/PageRowFilter.java +++ b/src/java/org/apache/hadoop/hbase/filter/PageRowFilter.java @@ -32,7 +32,7 @@ import org.apache.hadoop.hbase.io.Cell; * Implementation of RowFilterInterface that limits results to a specific page * size. It terminates scanning once the number of filter-passed results is >= * the given page size. - * + * *

    * Note that this filter cannot guarantee that the number of results returned * to a client are <= page size. This is because the filter is applied @@ -58,7 +58,7 @@ public class PageRowFilter implements RowFilterInterface { /** * Constructor that takes a maximum page size. - * + * * @param pageSize Maximum result size. */ public PageRowFilter(final long pageSize) { diff --git a/src/java/org/apache/hadoop/hbase/filter/PrefixRowFilter.java b/src/java/org/apache/hadoop/hbase/filter/PrefixRowFilter.java index b7ef415..e643a07 100644 --- a/src/java/org/apache/hadoop/hbase/filter/PrefixRowFilter.java +++ b/src/java/org/apache/hadoop/hbase/filter/PrefixRowFilter.java @@ -36,21 +36,21 @@ import org.apache.hadoop.hbase.util.Bytes; */ public class PrefixRowFilter implements RowFilterInterface { protected byte[] prefix; - + /** * Constructor that takes a row prefix to filter on - * @param prefix + * @param prefix */ public PrefixRowFilter(byte[] prefix) { this.prefix = prefix; } - + /** * Default Constructor, filters nothing. Required for RPC * deserialization */ public PrefixRowFilter() { } - + public void reset() { // Nothing to reset } @@ -62,15 +62,15 @@ public class PrefixRowFilter implements RowFilterInterface { public void rowProcessed(boolean filtered, byte[] key, int offset, int length) { // does not care } - + public boolean processAlways() { return false; } - + public boolean filterAllRemaining() { return false; } - + public boolean filterRowKey(final byte [] rowKey) { return filterRowKey(rowKey, 0, rowKey.length); } @@ -109,11 +109,11 @@ public class PrefixRowFilter implements RowFilterInterface { public void validate(final byte [][] columns) { // does not do this } - + public void readFields(final DataInput in) throws IOException { prefix = Bytes.readByteArray(in); } - + public void write(final DataOutput out) throws IOException { Bytes.writeByteArray(out, prefix); } diff --git a/src/java/org/apache/hadoop/hbase/filter/QualifierFilter.java b/src/java/org/apache/hadoop/hbase/filter/QualifierFilter.java index e50470b..d9d6e84 100644 --- a/src/java/org/apache/hadoop/hbase/filter/QualifierFilter.java +++ b/src/java/org/apache/hadoop/hbase/filter/QualifierFilter.java @@ -24,8 +24,8 @@ import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.client.Get; /** - * This filter is used to filter based on the column qualifier. It takes an - * operator (equal, greater, not equal, etc) and a byte [] comparator for the + * This filter is used to filter based on the column qualifier. It takes an + * operator (equal, greater, not equal, etc) and a byte [] comparator for the * column qualifier portion of a key. *

    * This filter can be wrapped with {@link WhileMatchFilter} and {@link SkipFilter} @@ -58,7 +58,7 @@ public class QualifierFilter extends CompareFilter { public ReturnCode filterKeyValue(KeyValue v) { int qualifierLength = v.getQualifierLength(); if (qualifierLength > 0) { - if (doCompare(this.compareOp, this.comparator, v.getBuffer(), + if (doCompare(this.compareOp, this.comparator, v.getBuffer(), v.getQualifierOffset(), qualifierLength)) { return ReturnCode.SKIP; } diff --git a/src/java/org/apache/hadoop/hbase/filter/RegExpRowFilter.java b/src/java/org/apache/hadoop/hbase/filter/RegExpRowFilter.java index 8dbc34b..e6a287d 100644 --- a/src/java/org/apache/hadoop/hbase/filter/RegExpRowFilter.java +++ b/src/java/org/apache/hadoop/hbase/filter/RegExpRowFilter.java @@ -40,7 +40,7 @@ import org.apache.hadoop.hbase.util.Bytes; * Implementation of RowFilterInterface that can filter by rowkey regular * expression and/or individual column values (equals comparison only). Multiple * column filters imply an implicit conjunction of filter criteria. - * + * * Note that column value filtering in this interface has been replaced by * {@link ColumnValueFilter}. * @deprecated This interface doesn't work well in new KeyValue world. @@ -65,7 +65,7 @@ public class RegExpRowFilter implements RowFilterInterface { /** * Constructor that takes a row key regular expression to filter on. - * + * * @param rowKeyRegExp */ public RegExpRowFilter(final String rowKeyRegExp) { @@ -75,7 +75,7 @@ public class RegExpRowFilter implements RowFilterInterface { /** * @deprecated Column filtering has been replaced by {@link ColumnValueFilter} * Constructor that takes a row key regular expression to filter on. - * + * * @param rowKeyRegExp * @param columnFilter */ @@ -85,7 +85,7 @@ public class RegExpRowFilter implements RowFilterInterface { this.rowKeyRegExp = rowKeyRegExp; this.setColumnFilters(columnFilter); } - + public void rowProcessed(boolean filtered, byte [] rowKey) { rowProcessed(filtered, rowKey, 0, rowKey.length); } @@ -98,11 +98,11 @@ public class RegExpRowFilter implements RowFilterInterface { public boolean processAlways() { return false; } - + /** * @deprecated Column filtering has been replaced by {@link ColumnValueFilter} * Specify a value that must be matched for the given column. - * + * * @param colKey * the column to match on * @param value @@ -120,7 +120,7 @@ public class RegExpRowFilter implements RowFilterInterface { /** * @deprecated Column filtering has been replaced by {@link ColumnValueFilter} * Set column filters for a number of columns. - * + * * @param columnFilter * Map of columns with value criteria. */ diff --git a/src/java/org/apache/hadoop/hbase/filter/RegexStringComparator.java b/src/java/org/apache/hadoop/hbase/filter/RegexStringComparator.java index 89665d2..7987999 100644 --- a/src/java/org/apache/hadoop/hbase/filter/RegexStringComparator.java +++ b/src/java/org/apache/hadoop/hbase/filter/RegexStringComparator.java @@ -38,7 +38,7 @@ import java.nio.charset.IllegalCharsetNameException; * regular expression matches a cell value in the column. *

    * Only EQUAL or NOT_EQUAL {@link org.apache.hadoop.hbase.filter.CompareFilter.CompareOp} - * comparisons are valid with this comparator. + * comparisons are valid with this comparator. *

    * For example: *

    diff --git a/src/java/org/apache/hadoop/hbase/filter/RowFilter.java b/src/java/org/apache/hadoop/hbase/filter/RowFilter.java index a094db4..162a78c 100644 --- a/src/java/org/apache/hadoop/hbase/filter/RowFilter.java +++ b/src/java/org/apache/hadoop/hbase/filter/RowFilter.java @@ -25,7 +25,7 @@ import org.apache.hadoop.hbase.client.Scan; /** * This filter is used to filter based on the key. It takes an operator - * (equal, greater, not equal, etc) and a byte [] comparator for the row, + * (equal, greater, not equal, etc) and a byte [] comparator for the row, * and column qualifier portions of a key. *

    * This filter can be wrapped with {@link WhileMatchFilter} to add more control. @@ -51,7 +51,7 @@ public class RowFilter extends CompareFilter { * @param rowCompareOp the compare op for row matching * @param rowComparator the comparator for row matching */ - public RowFilter(final CompareOp rowCompareOp, + public RowFilter(final CompareOp rowCompareOp, final WritableByteArrayComparable rowComparator) { super(rowCompareOp, rowComparator); } @@ -68,7 +68,7 @@ public class RowFilter extends CompareFilter { } return ReturnCode.INCLUDE; } - + @Override public boolean filterRowKey(byte[] data, int offset, int length) { if(doCompare(this.compareOp, this.comparator, data, offset, length)) { diff --git a/src/java/org/apache/hadoop/hbase/filter/RowFilterInterface.java b/src/java/org/apache/hadoop/hbase/filter/RowFilterInterface.java index 76763d5..7cb85b2 100644 --- a/src/java/org/apache/hadoop/hbase/filter/RowFilterInterface.java +++ b/src/java/org/apache/hadoop/hbase/filter/RowFilterInterface.java @@ -27,7 +27,7 @@ import org.apache.hadoop.hbase.io.Cell; import org.apache.hadoop.io.Writable; /** - * + * * Interface used for row-level filters applied to HRegion.HScanner scan * results during calls to next(). * @@ -46,17 +46,17 @@ import org.apache.hadoop.io.Writable; public interface RowFilterInterface extends Writable { /** * Resets the state of the filter. Used prior to the start of a Region scan. - * + * */ void reset(); /** - * Called to let filter know the final decision (to pass or filter) on a - * given row. With out HScanner calling this, the filter does not know if a - * row passed filtering even if it passed the row itself because other - * filters may have failed the row. E.g. when this filter is a member of a + * Called to let filter know the final decision (to pass or filter) on a + * given row. With out HScanner calling this, the filter does not know if a + * row passed filtering even if it passed the row itself because other + * filters may have failed the row. E.g. when this filter is a member of a * RowFilterSet with an OR operator. - * + * * @see RowFilterSet * @param filtered * @param key @@ -65,12 +65,12 @@ public interface RowFilterInterface extends Writable { void rowProcessed(boolean filtered, byte [] key); /** - * Called to let filter know the final decision (to pass or filter) on a - * given row. With out HScanner calling this, the filter does not know if a - * row passed filtering even if it passed the row itself because other - * filters may have failed the row. E.g. when this filter is a member of a + * Called to let filter know the final decision (to pass or filter) on a + * given row. With out HScanner calling this, the filter does not know if a + * row passed filtering even if it passed the row itself because other + * filters may have failed the row. E.g. when this filter is a member of a * RowFilterSet with an OR operator. - * + * * @see RowFilterSet * @param filtered * @param key @@ -80,31 +80,31 @@ public interface RowFilterInterface extends Writable { void rowProcessed(boolean filtered, byte [] key, int offset, int length); /** - * Returns whether or not the filter should always be processed in any - * filtering call. This precaution is necessary for filters that maintain - * state and need to be updated according to their response to filtering - * calls (see WhileMatchRowFilter for an example). At times, filters nested - * in RowFilterSets may or may not be called because the RowFilterSet - * determines a result as fast as possible. Returning true for + * Returns whether or not the filter should always be processed in any + * filtering call. This precaution is necessary for filters that maintain + * state and need to be updated according to their response to filtering + * calls (see WhileMatchRowFilter for an example). At times, filters nested + * in RowFilterSets may or may not be called because the RowFilterSet + * determines a result as fast as possible. Returning true for * processAlways() ensures that the filter will always be called. - * + * * @return whether or not to always process the filter */ boolean processAlways(); - + /** * Determines if the filter has decided that all remaining results should be * filtered (skipped). This is used to prevent the scanner from scanning a * the rest of the HRegion when for sure the filter will exclude all * remaining rows. - * + * * @return true if the filter intends to filter all remaining rows. */ boolean filterAllRemaining(); /** * Filters on just a row key. This is the first chance to stop a row. - * + * * @param rowKey * @return true if given row key is filtered and row should not be processed. * @deprecated Use {@link #filterRowKey(byte[], int, int)} instead. @@ -113,7 +113,7 @@ public interface RowFilterInterface extends Writable { /** * Filters on just a row key. This is the first chance to stop a row. - * + * * @param rowKey * @param offset * @param length @@ -122,9 +122,9 @@ public interface RowFilterInterface extends Writable { boolean filterRowKey(final byte [] rowKey, final int offset, final int length); /** - * Filters on row key, column name, and column value. This will take individual columns out of a row, + * Filters on row key, column name, and column value. This will take individual columns out of a row, * but the rest of the row will still get through. - * + * * @param rowKey row key to filter on. * @param columnName column name to filter on * @param columnValue column value to filter on @@ -137,9 +137,9 @@ public interface RowFilterInterface extends Writable { final byte [] columnValue); /** - * Filters on row key, column name, and column value. This will take individual columns out of a row, + * Filters on row key, column name, and column value. This will take individual columns out of a row, * but the rest of the row will still get through. - * + * * @param rowKey row key to filter on. * @param colunmName column name to filter on * @param columnValue column value to filter on @@ -151,16 +151,16 @@ public interface RowFilterInterface extends Writable { final int vlength); /** - * Filter on the fully assembled row. This is the last chance to stop a row. - * + * Filter on the fully assembled row. This is the last chance to stop a row. + * * @param columns * @return true if row filtered and should not be processed. */ boolean filterRow(final SortedMap columns); /** - * Filter on the fully assembled row. This is the last chance to stop a row. - * + * Filter on the fully assembled row. This is the last chance to stop a row. + * * @param results * @return true if row filtered and should not be processed. */ @@ -174,7 +174,7 @@ public interface RowFilterInterface extends Writable { * list will be ignored. In the case of null value filters, all rows will pass * the filter. This behavior should be 'undefined' for the user and therefore * not permitted. - * + * * @param columns */ void validate(final byte [][] columns); diff --git a/src/java/org/apache/hadoop/hbase/filter/RowFilterSet.java b/src/java/org/apache/hadoop/hbase/filter/RowFilterSet.java index c9872ef..7208ad1 100644 --- a/src/java/org/apache/hadoop/hbase/filter/RowFilterSet.java +++ b/src/java/org/apache/hadoop/hbase/filter/RowFilterSet.java @@ -35,8 +35,8 @@ import org.apache.hadoop.io.ObjectWritable; /** * Implementation of RowFilterInterface that represents a set of RowFilters - * which will be evaluated with a specified boolean operator MUST_PASS_ALL - * (!AND) or MUST_PASS_ONE (!OR). Since you can use RowFilterSets as children + * which will be evaluated with a specified boolean operator MUST_PASS_ALL + * (!AND) or MUST_PASS_ONE (!OR). Since you can use RowFilterSets as children * of RowFilterSet, you can create a hierarchy of filters to be evaluated. * * It is highly likely this construct will no longer work! @@ -65,9 +65,9 @@ public class RowFilterSet implements RowFilterInterface { } /** - * Constructor that takes a set of RowFilters. The default operator + * Constructor that takes a set of RowFilters. The default operator * MUST_PASS_ALL is assumed. - * + * * @param rowFilters */ public RowFilterSet(final Set rowFilters) { @@ -76,7 +76,7 @@ public class RowFilterSet implements RowFilterInterface { /** * Constructor that takes a set of RowFilters and an operator. - * + * * @param operator Operator to process filter set with. * @param rowFilters Set of row filters. */ @@ -87,29 +87,29 @@ public class RowFilterSet implements RowFilterInterface { } /** Get the operator. - * + * * @return operator */ public Operator getOperator() { return operator; } - + /** Get the filters. - * + * * @return filters */ public Set getFilters() { return filters; } - + /** Add a filter. - * + * * @param filter */ public void addFilter(RowFilterInterface filter) { this.filters.add(filter); } - + public void validate(final byte [][] columns) { for (RowFilterInterface filter : filters) { filter.validate(columns); @@ -140,7 +140,7 @@ public class RowFilterSet implements RowFilterInterface { } return false; } - + public boolean filterAllRemaining() { boolean result = operator == Operator.MUST_PASS_ONE; for (RowFilterInterface filter : filters) { @@ -187,7 +187,7 @@ public class RowFilterSet implements RowFilterInterface { return result; } - public boolean filterColumn(final byte [] rowKey, final byte [] colKey, + public boolean filterColumn(final byte [] rowKey, final byte [] colKey, final byte[] data) { return filterColumn(rowKey, 0, rowKey.length, colKey, 0, colKey.length, data, 0, data.length); @@ -201,14 +201,14 @@ public class RowFilterSet implements RowFilterInterface { for (RowFilterInterface filter : filters) { if (!resultFound) { if (operator == Operator.MUST_PASS_ALL) { - if (filter.filterAllRemaining() || + if (filter.filterAllRemaining() || filter.filterColumn(rowKey, roffset, rlength, columnName, coffset, clength, columnValue, voffset, vlength)) { result = true; resultFound = true; } } else if (operator == Operator.MUST_PASS_ONE) { - if (!filter.filterAllRemaining() && + if (!filter.filterAllRemaining() && !filter.filterColumn(rowKey, roffset, rlength, columnName, coffset, clength, columnValue, voffset, vlength)) { result = false; diff --git a/src/java/org/apache/hadoop/hbase/filter/SingleColumnValueFilter.java b/src/java/org/apache/hadoop/hbase/filter/SingleColumnValueFilter.java index 03dd83b..4c482e7 100644 --- a/src/java/org/apache/hadoop/hbase/filter/SingleColumnValueFilter.java +++ b/src/java/org/apache/hadoop/hbase/filter/SingleColumnValueFilter.java @@ -35,19 +35,19 @@ import org.apache.hadoop.hbase.util.Bytes; /** * This filter is used to filter cells based on value. It takes a - * {@link org.apache.hadoop.hbase.filter.CompareFilter.CompareOp} - * operator (equal, greater, not equal, etc), and either a byte [] value or + * {@link org.apache.hadoop.hbase.filter.CompareFilter.CompareOp} + * operator (equal, greater, not equal, etc), and either a byte [] value or * a {@link org.apache.hadoop.hbase.filter.WritableByteArrayComparable}. *

    - * If we have a byte [] value then we just do a lexicographic compare. For - * example, if passed value is 'b' and cell has 'a' and the compare operator - * is LESS, then we will filter out this cell (return true). If this is not - * sufficient (eg you want to deserialize a long and then compare it to a fixed + * If we have a byte [] value then we just do a lexicographic compare. For + * example, if passed value is 'b' and cell has 'a' and the compare operator + * is LESS, then we will filter out this cell (return true). If this is not + * sufficient (eg you want to deserialize a long and then compare it to a fixed * long value), then you can pass in your own comparator instead. *

    - * You must also specify a family and qualifier. Only the value of this column - * will be tested. When using this filter on a {@link Scan} with specified - * inputs, the column to be tested should also be added as input (otherwise + * You must also specify a family and qualifier. Only the value of this column + * will be tested. When using this filter on a {@link Scan} with specified + * inputs, the column to be tested should also be added as input (otherwise * the filter will regard the column as missing). *

    * To prevent the entire row from being emitted if the column is not found @@ -65,7 +65,7 @@ public class SingleColumnValueFilter implements Filter { static final Log LOG = LogFactory.getLog(SingleColumnValueFilter.class); private byte [] columnFamily; - private byte [] columnQualifier; + private byte [] columnQualifier; private CompareOp compareOp; private WritableByteArrayComparable comparator; private boolean foundColumn = false; @@ -78,13 +78,13 @@ public class SingleColumnValueFilter implements Filter { */ public SingleColumnValueFilter() { } - + /** * Constructor for binary compare of the value of a single column. If the * column is found and the condition passes, all columns of the row will be * emitted. If the column is not found or the condition fails, the row will * not be emitted. - * + * * @param family name of column family * @param qualifier name of column qualifier * @param compareOp operator @@ -103,7 +103,7 @@ public class SingleColumnValueFilter implements Filter { * Use the filterIfColumnMissing flag to set whether the rest of the columns * in a row will be emitted if the specified column to check is not found in * the row. - * + * * @param family name of column family * @param qualifier name of column qualifier * @param compareOp operator diff --git a/src/java/org/apache/hadoop/hbase/filter/SkipFilter.java b/src/java/org/apache/hadoop/hbase/filter/SkipFilter.java index 3a9c5cf..17e1a37 100644 --- a/src/java/org/apache/hadoop/hbase/filter/SkipFilter.java +++ b/src/java/org/apache/hadoop/hbase/filter/SkipFilter.java @@ -27,7 +27,7 @@ import java.io.IOException; import java.io.DataInput; /** - * A wrapper filter that filters an entire row if any of the KeyValue checks do + * A wrapper filter that filters an entire row if any of the KeyValue checks do * not pass. *

    * For example, if all columns in a row represent weights of different things, @@ -41,7 +41,7 @@ import java.io.DataInput; * new BinaryComparator(Bytes.toBytes(0)))); * * Any row which contained a column whose value was 0 will be filtered out. - * Without this filter, the other non-zero valued columns in the row would still + * Without this filter, the other non-zero valued columns in the row would still * be emitted. */ public class SkipFilter implements Filter { diff --git a/src/java/org/apache/hadoop/hbase/filter/StopRowFilter.java b/src/java/org/apache/hadoop/hbase/filter/StopRowFilter.java index 38884a3..9755529 100644 --- a/src/java/org/apache/hadoop/hbase/filter/StopRowFilter.java +++ b/src/java/org/apache/hadoop/hbase/filter/StopRowFilter.java @@ -30,14 +30,14 @@ import org.apache.hadoop.hbase.io.Cell; import org.apache.hadoop.hbase.util.Bytes; /** - * Implementation of RowFilterInterface that filters out rows greater than or + * Implementation of RowFilterInterface that filters out rows greater than or * equal to a specified rowKey. * * @deprecated Use filters that are rooted on @{link Filter} instead */ public class StopRowFilter implements RowFilterInterface { private byte [] stopRowKey; - + /** * Default constructor, filters nothing. Required though for RPC * deserialization. @@ -48,16 +48,16 @@ public class StopRowFilter implements RowFilterInterface { /** * Constructor that takes a stopRowKey on which to filter - * + * * @param stopRowKey rowKey to filter on. */ public StopRowFilter(final byte [] stopRowKey) { this.stopRowKey = stopRowKey; } - + /** * An accessor for the stopRowKey - * + * * @return the filter's stopRowKey */ public byte [] getStopRowKey() { @@ -83,7 +83,7 @@ public class StopRowFilter implements RowFilterInterface { public boolean processAlways() { return false; } - + public boolean filterAllRemaining() { return false; } @@ -104,11 +104,11 @@ public class StopRowFilter implements RowFilterInterface { } /** - * Because StopRowFilter does not examine column information, this method + * Because StopRowFilter does not examine column information, this method * defaults to calling the rowKey-only version of filter. - * @param rowKey - * @param colKey - * @param data + * @param rowKey + * @param colKey + * @param data * @return boolean */ public boolean filterColumn(final byte [] rowKey, final byte [] colKey, @@ -123,9 +123,9 @@ public class StopRowFilter implements RowFilterInterface { } /** - * Because StopRowFilter does not examine column information, this method + * Because StopRowFilter does not examine column information, this method * defaults to calling filterAllRemaining(). - * @param columns + * @param columns * @return boolean */ public boolean filterRow(final SortedMap columns) { diff --git a/src/java/org/apache/hadoop/hbase/filter/SubstringComparator.java b/src/java/org/apache/hadoop/hbase/filter/SubstringComparator.java index 4690940..b1d111f 100644 --- a/src/java/org/apache/hadoop/hbase/filter/SubstringComparator.java +++ b/src/java/org/apache/hadoop/hbase/filter/SubstringComparator.java @@ -30,7 +30,7 @@ import org.apache.hadoop.hbase.util.Bytes; * the value of a given column. Use it to test if a given substring appears * in a cell value in the column. The comparison is case insensitive. *

    - * Only EQUAL or NOT_EQUAL tests are valid with this comparator. + * Only EQUAL or NOT_EQUAL tests are valid with this comparator. *

    * For example: *

    diff --git a/src/java/org/apache/hadoop/hbase/filter/ValueFilter.java b/src/java/org/apache/hadoop/hbase/filter/ValueFilter.java index f70517e..77196c0 100644 --- a/src/java/org/apache/hadoop/hbase/filter/ValueFilter.java +++ b/src/java/org/apache/hadoop/hbase/filter/ValueFilter.java @@ -24,8 +24,8 @@ import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.client.Get; /** - * This filter is used to filter based on column value. It takes an - * operator (equal, greater, not equal, etc) and a byte [] comparator for the + * This filter is used to filter based on column value. It takes an + * operator (equal, greater, not equal, etc) and a byte [] comparator for the * cell value. *

    * This filter can be wrapped with {@link WhileMatchFilter} and {@link SkipFilter} @@ -56,7 +56,7 @@ public class ValueFilter extends CompareFilter { @Override public ReturnCode filterKeyValue(KeyValue v) { - if (doCompare(this.compareOp, this.comparator, v.getBuffer(), + if (doCompare(this.compareOp, this.comparator, v.getBuffer(), v.getValueOffset(), v.getValueLength())) { return ReturnCode.SKIP; } diff --git a/src/java/org/apache/hadoop/hbase/filter/WhileMatchFilter.java b/src/java/org/apache/hadoop/hbase/filter/WhileMatchFilter.java index 728cce9..a696120 100644 --- a/src/java/org/apache/hadoop/hbase/filter/WhileMatchFilter.java +++ b/src/java/org/apache/hadoop/hbase/filter/WhileMatchFilter.java @@ -30,7 +30,7 @@ import java.io.DataInput; * A wrapper filter that returns true from {@link #filterAllRemaining()} as soon * as the wrapped filters {@link Filter#filterRowKey(byte[], int, int)}, * {@link Filter#filterKeyValue(org.apache.hadoop.hbase.KeyValue)}, - * {@link org.apache.hadoop.hbase.filter.Filter#filterRow()} or + * {@link org.apache.hadoop.hbase.filter.Filter#filterRow()} or * {@link org.apache.hadoop.hbase.filter.Filter#filterAllRemaining()} methods * returns true. */ diff --git a/src/java/org/apache/hadoop/hbase/filter/WhileMatchRowFilter.java b/src/java/org/apache/hadoop/hbase/filter/WhileMatchRowFilter.java index 9f0f937..8b911b9 100644 --- a/src/java/org/apache/hadoop/hbase/filter/WhileMatchRowFilter.java +++ b/src/java/org/apache/hadoop/hbase/filter/WhileMatchRowFilter.java @@ -29,10 +29,10 @@ import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.io.Cell; /** - * WhileMatchRowFilter is a wrapper filter that filters everything after the - * first filtered row. Once the nested filter returns true for either of it's - * filter(..) methods or filterNotNull(SortedMap), this wrapper's - * filterAllRemaining() will return true. All filtering methods will + * WhileMatchRowFilter is a wrapper filter that filters everything after the + * first filtered row. Once the nested filter returns true for either of it's + * filter(..) methods or filterNotNull(SortedMap), this wrapper's + * filterAllRemaining() will return true. All filtering methods will * thereafter defer to the result of filterAllRemaining(). * * @deprecated Use filters that are rooted on @{link Filter} instead @@ -48,7 +48,7 @@ public class WhileMatchRowFilter implements RowFilterInterface { public WhileMatchRowFilter() { super(); } - + /** * Constructor * @param filter @@ -56,16 +56,16 @@ public class WhileMatchRowFilter implements RowFilterInterface { public WhileMatchRowFilter(RowFilterInterface filter) { this.filter = filter; } - + /** * Returns the internal filter being wrapped - * + * * @return the internal filter */ public RowFilterInterface getInternalFilter() { return this.filter; } - + public void reset() { this.filterAllRemaining = false; this.filter.reset(); @@ -74,18 +74,18 @@ public class WhileMatchRowFilter implements RowFilterInterface { public boolean processAlways() { return true; } - + /** - * Returns true once the nested filter has filtered out a row (returned true + * Returns true once the nested filter has filtered out a row (returned true * on a call to one of it's filtering methods). Until then it returns false. - * - * @return true/false whether the nested filter has returned true on a filter + * + * @return true/false whether the nested filter has returned true on a filter * call. */ public boolean filterAllRemaining() { return this.filterAllRemaining || this.filter.filterAllRemaining(); } - + public boolean filterRowKey(final byte [] rowKey) { changeFAR(this.filter.filterRowKey(rowKey, 0, rowKey.length)); return filterAllRemaining(); @@ -101,7 +101,7 @@ public class WhileMatchRowFilter implements RowFilterInterface { changeFAR(this.filter.filterColumn(rowKey, colKey, data)); return filterAllRemaining(); } - + public boolean filterRow(final SortedMap columns) { changeFAR(this.filter.filterRow(columns)); return filterAllRemaining(); @@ -113,9 +113,9 @@ public class WhileMatchRowFilter implements RowFilterInterface { } /** - * Change filterAllRemaining from false to true if value is true, otherwise + * Change filterAllRemaining from false to true if value is true, otherwise * leave as is. - * + * * @param value */ private void changeFAR(boolean value) { @@ -129,14 +129,14 @@ public class WhileMatchRowFilter implements RowFilterInterface { public void rowProcessed(boolean filtered, byte[] key, int offset, int length) { this.filter.rowProcessed(filtered, key, offset, length); } - + public void validate(final byte [][] columns) { this.filter.validate(columns); } - + public void readFields(DataInput in) throws IOException { String className = in.readUTF(); - + try { this.filter = (RowFilterInterface)(Class.forName(className). newInstance()); @@ -152,7 +152,7 @@ public class WhileMatchRowFilter implements RowFilterInterface { e); } } - + public void write(DataOutput out) throws IOException { out.writeUTF(this.filter.getClass().getName()); this.filter.write(out); diff --git a/src/java/org/apache/hadoop/hbase/filter/package-info.java b/src/java/org/apache/hadoop/hbase/filter/package-info.java index 965f786..e9db40d 100644 --- a/src/java/org/apache/hadoop/hbase/filter/package-info.java +++ b/src/java/org/apache/hadoop/hbase/filter/package-info.java @@ -18,7 +18,7 @@ * limitations under the License. */ /**Provides row-level filters applied to HRegion scan results during calls to - * {@link org.apache.hadoop.hbase.client.ResultScanner#next()}. + * {@link org.apache.hadoop.hbase.client.ResultScanner#next()}.

    Since HBase 0.20.0, {@link org.apache.hadoop.hbase.filter.Filter} is the new Interface used filtering. It replaces the deprecated diff --git a/src/java/org/apache/hadoop/hbase/io/BatchOperation.java b/src/java/org/apache/hadoop/hbase/io/BatchOperation.java index e52b0f6..5d19cb2 100644 --- a/src/java/org/apache/hadoop/hbase/io/BatchOperation.java +++ b/src/java/org/apache/hadoop/hbase/io/BatchOperation.java @@ -28,7 +28,7 @@ import org.apache.hadoop.io.Writable; /** * Batch update operation. - * + * * If value is null, its a DELETE operation. If its non-null, its a PUT. * This object is purposely bare-bones because many instances are created * during bulk uploads. We have one class for DELETEs and PUTs rather than @@ -42,12 +42,12 @@ public class BatchOperation implements Writable, HeapSize { */ // JHat says this is 32 bytes. public final int ESTIMATED_HEAP_TAX = 36; - + private byte [] column = null; - + // A null value defines DELETE operations. private byte [] value = null; - + /** * Default constructor */ @@ -118,7 +118,7 @@ public class BatchOperation implements Writable, HeapSize { public String toString() { return "column => " + Bytes.toString(this.column) + ", value => '...'"; } - + // Writable methods // This is a hotspot when updating deserializing incoming client submissions. @@ -142,7 +142,7 @@ public class BatchOperation implements Writable, HeapSize { out.write(value); } } - + public long heapSize() { return Bytes.ESTIMATED_HEAP_TAX * 2 + this.column.length + this.value.length + ESTIMATED_HEAP_TAX; diff --git a/src/java/org/apache/hadoop/hbase/io/BatchUpdate.java b/src/java/org/apache/hadoop/hbase/io/BatchUpdate.java index 65d5c0e..5d5c4c4 100644 --- a/src/java/org/apache/hadoop/hbase/io/BatchUpdate.java +++ b/src/java/org/apache/hadoop/hbase/io/BatchUpdate.java @@ -38,7 +38,7 @@ import org.apache.hadoop.io.WritableComparable; /** * A Writable object that contains a series of BatchOperations - * + * * There is one BatchUpdate object per server, so a series of batch operations * can result in multiple BatchUpdate objects if the batch contains rows that * are served by multiple region servers. @@ -47,25 +47,25 @@ import org.apache.hadoop.io.WritableComparable; public class BatchUpdate implements WritableComparable, Iterable, HeapSize { private static final Log LOG = LogFactory.getLog(BatchUpdate.class); - + /** * Estimated 'shallow size' of this object not counting payload. */ // Shallow size is 56. Add 32 for the arraylist below. public static final int ESTIMATED_HEAP_TAX = 56 + 32; - + // the row being updated private byte [] row = null; private long size = 0; - + // the batched operations private ArrayList operations = new ArrayList(); - + private long timestamp = HConstants.LATEST_TIMESTAMP; - + private long rowLock = -1l; - + /** * Default constructor used serializing. Do not use directly. */ @@ -76,7 +76,7 @@ implements WritableComparable, Iterable, HeapSize { /** * Initialize a BatchUpdate operation on a row. Timestamp is assumed to be * now. - * + * * @param row */ public BatchUpdate(final String row) { @@ -86,7 +86,7 @@ implements WritableComparable, Iterable, HeapSize { /** * Initialize a BatchUpdate operation on a row. Timestamp is assumed to be * now. - * + * * @param row */ public BatchUpdate(final byte [] row) { @@ -95,14 +95,14 @@ implements WritableComparable, Iterable, HeapSize { /** * Initialize a BatchUpdate operation on a row with a specific timestamp. - * + * * @param row * @param timestamp */ public BatchUpdate(final String row, long timestamp){ this(Bytes.toBytes(row), timestamp); } - + /** * Recopy constructor * @param buToCopy BatchUpdate to copy @@ -122,7 +122,7 @@ implements WritableComparable, Iterable, HeapSize { /** * Initialize a BatchUpdate operation on a row with a specific timestamp. - * + * * @param row * @param timestamp */ @@ -132,7 +132,7 @@ implements WritableComparable, Iterable, HeapSize { this.operations = new ArrayList(); this.size = (row == null)? 0: row.length; } - + /** * Create a batch operation. * @param rr the RowResult @@ -143,7 +143,7 @@ implements WritableComparable, Iterable, HeapSize { this.put(entry.getKey(), entry.getValue().getValue()); } } - + /** * Get the row lock associated with this update * @return the row lock @@ -172,29 +172,29 @@ implements WritableComparable, Iterable, HeapSize { public long getTimestamp() { return timestamp; } - + /** * Set this BatchUpdate's timestamp. - * + * * @param timestamp - */ + */ public void setTimestamp(long timestamp) { this.timestamp = timestamp; } - + /** * Get the current value of the specified column - * + * * @param column column name * @return byte[] the cell value, returns null if the column does not exist. */ public synchronized byte[] get(final String column) { return get(Bytes.toBytes(column)); } - + /** - * Get the current value of the specified column - * + * Get the current value of the specified column + * * @param column column name * @return byte[] the cell value, returns null if the column does not exist. */ @@ -209,7 +209,7 @@ implements WritableComparable, Iterable, HeapSize { /** * Get the current columns - * + * * @return byte[][] an array of byte[] columns */ public synchronized byte[][] getColumns() { @@ -222,17 +222,17 @@ implements WritableComparable, Iterable, HeapSize { /** * Check if the specified column is currently assigned a value - * + * * @param column column to check for * @return boolean true if the given column exists */ public synchronized boolean hasColumn(String column) { return hasColumn(Bytes.toBytes(column)); } - + /** * Check if the specified column is currently assigned a value - * + * * @param column column to check for * @return boolean true if the given column exists */ @@ -243,8 +243,8 @@ implements WritableComparable, Iterable, HeapSize { } return true; } - - /** + + /** * Change a value for the specified column * * @param column column whose value is being set @@ -254,7 +254,7 @@ implements WritableComparable, Iterable, HeapSize { put(Bytes.toBytes(column), val); } - /** + /** * Change a value for the specified column * * @param column column whose value is being set @@ -270,7 +270,7 @@ implements WritableComparable, Iterable, HeapSize { operations.add(bo); } - /** + /** * Delete the value for a column * Deletes the cell whose row/column/commit-timestamp match those of the * delete. @@ -280,7 +280,7 @@ implements WritableComparable, Iterable, HeapSize { delete(Bytes.toBytes(column)); } - /** + /** * Delete the value for a column * Deletes the cell whose row/column/commit-timestamp match those of the * delete. @@ -293,7 +293,7 @@ implements WritableComparable, Iterable, HeapSize { // // Iterable // - + /** * @return Iterator */ @@ -363,7 +363,7 @@ implements WritableComparable, Iterable, HeapSize { return this.row.length + Bytes.ESTIMATED_HEAP_TAX + this.size + ESTIMATED_HEAP_TAX; } - + /** * Code to test sizes of BatchUpdate arrays. * @param args diff --git a/src/java/org/apache/hadoop/hbase/io/Cell.java b/src/java/org/apache/hadoop/hbase/io/Cell.java index f918cfd..ec7f267 100644 --- a/src/java/org/apache/hadoop/hbase/io/Cell.java +++ b/src/java/org/apache/hadoop/hbase/io/Cell.java @@ -62,7 +62,7 @@ public class Cell implements Writable, Iterable>, /** * Create a new Cell with a given value and timestamp. Used by HStore. - * + * * @param value * @param timestamp */ @@ -72,7 +72,7 @@ public class Cell implements Writable, Iterable>, /** * Create a new Cell with a given value and timestamp. Used by HStore. - * + * * @param value * @param timestamp */ @@ -82,7 +82,7 @@ public class Cell implements Writable, Iterable>, /** * Create a new Cell with a given value and timestamp. Used by HStore. - * + * * @param bb * @param timestamp */ @@ -134,7 +134,7 @@ public class Cell implements Writable, Iterable>, /** * Add a new timestamp and value to this cell provided timestamp does not * already exist - * + * * @param val * @param ts */ @@ -264,7 +264,7 @@ public class Cell implements Writable, Iterable>, /* * (non-Javadoc) - * + * * @see * org.apache.hadoop.hbase.rest.serializer.ISerializable#restSerialize(org * .apache.hadoop.hbase.rest.serializer.IRestSerializer) diff --git a/src/java/org/apache/hadoop/hbase/io/CodeToClassAndBack.java b/src/java/org/apache/hadoop/hbase/io/CodeToClassAndBack.java index d7495e9..ead3bbb 100644 --- a/src/java/org/apache/hadoop/hbase/io/CodeToClassAndBack.java +++ b/src/java/org/apache/hadoop/hbase/io/CodeToClassAndBack.java @@ -41,12 +41,12 @@ public interface CodeToClassAndBack { */ public static final Map, Byte> CLASS_TO_CODE = new HashMap, Byte>(); - + /** * Class list for supported classes */ public Class[] classList = {byte[].class, Cell.class}; - + /** * The static loader that is used instead of the static constructor in * HbaseMapWritable. @@ -55,7 +55,7 @@ public interface CodeToClassAndBack { new InternalStaticLoader(classList, CODE_TO_CLASS, CLASS_TO_CODE); /** - * Class that loads the static maps with their values. + * Class that loads the static maps with their values. */ public class InternalStaticLoader{ InternalStaticLoader(Class[] classList, diff --git a/src/java/org/apache/hadoop/hbase/io/HalfHFileReader.java b/src/java/org/apache/hadoop/hbase/io/HalfHFileReader.java index 0485d5f..c657630 100644 --- a/src/java/org/apache/hadoop/hbase/io/HalfHFileReader.java +++ b/src/java/org/apache/hadoop/hbase/io/HalfHFileReader.java @@ -40,10 +40,10 @@ import org.apache.hadoop.hbase.util.Bytes; * of the file with keys that sort greater than those of the bottom half. * The top includes the split files midkey, of the key that follows if it does * not exist in the file. - * + * *

    This type works in tandem with the {@link Reference} type. This class * is used reading while Reference is used writing. - * + * *

    This file is not splitable. Calls to {@link #midkey()} return null. */ public class HalfHFileReader extends HFile.Reader { @@ -116,7 +116,7 @@ public class HalfHFileReader extends HFile.Reader { public boolean next() throws IOException { if (atEnd) return false; - + boolean b = delegate.next(); if (!b) { return b; @@ -215,7 +215,7 @@ public class HalfHFileReader extends HFile.Reader { @Override public byte[] getLastKey() { if (top) { - return super.getLastKey(); + return super.getLastKey(); } // Get a scanner that caches the block and that uses pread. HFileScanner scanner = getScanner(true, true); diff --git a/src/java/org/apache/hadoop/hbase/io/HbaseMapWritable.java b/src/java/org/apache/hadoop/hbase/io/HbaseMapWritable.java index a549913..45eb495 100644 --- a/src/java/org/apache/hadoop/hbase/io/HbaseMapWritable.java +++ b/src/java/org/apache/hadoop/hbase/io/HbaseMapWritable.java @@ -59,15 +59,15 @@ implements SortedMap, Configurable, Writable, CodeToClassAndBack{ /** * Contructor where another SortedMap can be used - * - * @param map the SortedMap to be used + * + * @param map the SortedMap to be used */ public HbaseMapWritable(SortedMap map){ conf = new AtomicReference(); instance = map; } - - + + /** @return the conf */ public Configuration getConf() { return conf.get(); @@ -97,7 +97,7 @@ implements SortedMap, Configurable, Writable, CodeToClassAndBack{ public V get(Object key) { return instance.get(key); } - + public boolean isEmpty() { return instance.isEmpty(); } @@ -149,7 +149,7 @@ implements SortedMap, Configurable, Writable, CodeToClassAndBack{ public SortedMap tailMap(byte[] fromKey) { return this.instance.tailMap(fromKey); } - + // Writable /** @return the Class class for the specified id */ @@ -167,7 +167,7 @@ implements SortedMap, Configurable, Writable, CodeToClassAndBack{ } return b; } - + /** * @see java.lang.Object#toString() */ diff --git a/src/java/org/apache/hadoop/hbase/io/HbaseObjectWritable.java b/src/java/org/apache/hadoop/hbase/io/HbaseObjectWritable.java index 4c68d33..ad8b2ec 100644 --- a/src/java/org/apache/hadoop/hbase/io/HbaseObjectWritable.java +++ b/src/java/org/apache/hadoop/hbase/io/HbaseObjectWritable.java @@ -58,7 +58,7 @@ import org.apache.hadoop.io.WritableFactories; import org.apache.hadoop.hbase.regionserver.HRegion; import org.apache.hadoop.hbase.util.Bytes; -/** +/** * This is a customized version of the polymorphic hadoop * {@link ObjectWritable}. It removes UTF8 (HADOOP-414). * Using {@link Text} intead of UTF-8 saves ~2% CPU between reading and writing @@ -75,7 +75,7 @@ import org.apache.hadoop.hbase.util.Bytes; */ public class HbaseObjectWritable implements Writable, Configurable { protected final static Log LOG = LogFactory.getLog(HbaseObjectWritable.class); - + // Here we maintain two static maps of classes to code and vice versa. // Add new classes+codes as wanted or figure way to auto-generate these // maps from the HMasterInterface. @@ -98,12 +98,12 @@ public class HbaseObjectWritable implements Writable, Configurable { addToMap(Float.TYPE, code++); addToMap(Double.TYPE, code++); addToMap(Void.TYPE, code++); - + // Other java types addToMap(String.class, code++); addToMap(byte [].class, code++); addToMap(byte [][].class, code++); - + // Hadoop types addToMap(Text.class, code++); addToMap(Writable.class, code++); @@ -126,7 +126,7 @@ public class HbaseObjectWritable implements Writable, Configurable { addToMap(HServerInfo.class, code++); addToMap(HTableDescriptor.class, code++); addToMap(MapWritable.class, code++); - + // // HBASE-880 // @@ -162,10 +162,10 @@ public class HbaseObjectWritable implements Writable, Configurable { addToMap(MultiPut.class, code++); addToMap(MultiPutResponse.class, code++); - // List + // List addToMap(List.class, code++); } - + private Class declaredClass; private Object instance; private Configuration conf; @@ -174,7 +174,7 @@ public class HbaseObjectWritable implements Writable, Configurable { public HbaseObjectWritable() { super(); } - + /** * @param instance */ @@ -193,10 +193,10 @@ public class HbaseObjectWritable implements Writable, Configurable { /** @return the instance, or null if none. */ public Object get() { return instance; } - + /** @return the class this is meant to be. */ public Class getDeclaredClass() { return declaredClass; } - + /** * Reset the instance. * @param instance @@ -214,11 +214,11 @@ public class HbaseObjectWritable implements Writable, Configurable { return "OW[class=" + declaredClass + ",value=" + instance + "]"; } - + public void readFields(DataInput in) throws IOException { readObject(in, this, this.conf); } - + public void write(DataOutput out) throws IOException { writeObject(out, instance, declaredClass, conf); } @@ -227,7 +227,7 @@ public class HbaseObjectWritable implements Writable, Configurable { Class declaredClass; /** default constructor for writable */ public NullInstance() { super(null); } - + /** * @param declaredClass * @param conf @@ -236,16 +236,16 @@ public class HbaseObjectWritable implements Writable, Configurable { super(conf); this.declaredClass = declaredClass; } - + public void readFields(DataInput in) throws IOException { this.declaredClass = CODE_TO_CLASS.get(in.readByte()); } - + public void write(DataOutput out) throws IOException { writeClassCode(out, this.declaredClass); } } - + /** * Write out the code byte for passed Class. * @param out @@ -279,13 +279,13 @@ public class HbaseObjectWritable implements Writable, Configurable { */ @SuppressWarnings("unchecked") public static void writeObject(DataOutput out, Object instance, - Class declaredClass, + Class declaredClass, Configuration conf) throws IOException { Object instanceObj = instance; Class declClass = declaredClass; - + if (instanceObj == null) { // null instanceObj = new NullInstance(declClass, conf); declClass = Writable.class; @@ -345,8 +345,8 @@ public class HbaseObjectWritable implements Writable, Configurable { throw new IOException("Can't write: "+instanceObj+" as "+declClass); } } - - + + /** * Read a {@link Writable}, {@link String}, primitive type, or an array of * the preceding. @@ -359,7 +359,7 @@ public class HbaseObjectWritable implements Writable, Configurable { throws IOException { return readObject(in, null, conf); } - + /** * Read a {@link Writable}, {@link String}, primitive type, or an array of * the preceding. @@ -443,7 +443,7 @@ public class HbaseObjectWritable implements Writable, Configurable { } @SuppressWarnings("unchecked") - private static Class getClassByName(Configuration conf, String className) + private static Class getClassByName(Configuration conf, String className) throws ClassNotFoundException { if(conf != null) { return conf.getClassByName(className); @@ -454,7 +454,7 @@ public class HbaseObjectWritable implements Writable, Configurable { } return Class.forName(className, true, cl); } - + private static void addToMap(final Class clazz, final byte code) { CLASS_TO_CODE.put(clazz, code); CODE_TO_CLASS.put(code, clazz); diff --git a/src/java/org/apache/hadoop/hbase/io/HeapSize.java b/src/java/org/apache/hadoop/hbase/io/HeapSize.java index d7b737c..bd78846 100644 --- a/src/java/org/apache/hadoop/hbase/io/HeapSize.java +++ b/src/java/org/apache/hadoop/hbase/io/HeapSize.java @@ -31,7 +31,7 @@ package org.apache.hadoop.hbase.io; * For example: *

      * public class SampleObject implements HeapSize {
    - *   
    + *
      *   int [] numbers;
      *   int x;
      * }
    @@ -43,5 +43,5 @@ public interface HeapSize {
        * count of payload and hosting object sizings.
       */
       public long heapSize();
    -  
    +
     }
    diff --git a/src/java/org/apache/hadoop/hbase/io/ImmutableBytesWritable.java b/src/java/org/apache/hadoop/hbase/io/ImmutableBytesWritable.java
    index cc048c3..20a9318 100644
    --- a/src/java/org/apache/hadoop/hbase/io/ImmutableBytesWritable.java
    +++ b/src/java/org/apache/hadoop/hbase/io/ImmutableBytesWritable.java
    @@ -29,7 +29,7 @@ import org.apache.hadoop.io.BytesWritable;
     import org.apache.hadoop.io.WritableComparable;
     import org.apache.hadoop.io.WritableComparator;
     
    -/** 
    +/**
      * A byte sequence that is usable as a key or value.  Based on
      * {@link org.apache.hadoop.io.BytesWritable} only this class is NOT resizable
      * and DOES NOT distinguish between the size of the seqeunce and the current
    @@ -43,14 +43,14 @@ implements WritableComparable {
       private byte[] bytes;
       private int offset;
       private int length;
    -  
    +
       /**
        * Create a zero-size sequence.
        */
       public ImmutableBytesWritable() {
         super();
       }
    -  
    +
       /**
        * Create a ImmutableBytesWritable using the byte array as the initial value.
        * @param bytes This array becomes the backing storage for the object.
    @@ -58,7 +58,7 @@ implements WritableComparable {
       public ImmutableBytesWritable(byte[] bytes) {
         this(bytes, 0, bytes.length);
       }
    -  
    +
       /**
        * Set the new ImmutableBytesWritable to the contents of the passed
        * ibw.
    @@ -67,7 +67,7 @@ implements WritableComparable {
       public ImmutableBytesWritable(final ImmutableBytesWritable ibw) {
         this(ibw.get(), 0, ibw.getSize());
       }
    -  
    +
       /**
        * Set the value to a given byte range
        * @param bytes the new byte range to set to
    @@ -80,7 +80,7 @@ implements WritableComparable {
         this.offset = offset;
         this.length = length;
       }
    -  
    +
       /**
        * Get the data from the BytesWritable.
        * @return The data is only valid between 0 and getSize() - 1.
    @@ -92,7 +92,7 @@ implements WritableComparable {
         }
         return this.bytes;
       }
    -  
    +
       /**
        * @param b Use passed bytes as backing array for this instance.
        */
    @@ -110,7 +110,7 @@ implements WritableComparable {
         this.offset = offset;
         this.length = length;
       }
    -  
    +
       /**
        * @return the current size of the buffer.
        */
    @@ -121,7 +121,7 @@ implements WritableComparable {
         }
         return this.length;
       }
    - 
    +
       /**
        * @return the current length of the buffer. same as getSize()
        */
    @@ -134,7 +134,7 @@ implements WritableComparable {
         }
         return this.length;
       }
    -  
    +
       /**
        * @return offset
        */
    @@ -148,19 +148,19 @@ implements WritableComparable {
         in.readFully(this.bytes, 0, this.length);
         this.offset = 0;
       }
    -  
    +
       public void write(final DataOutput out) throws IOException {
         out.writeInt(this.length);
         out.write(this.bytes, this.offset, this.length);
       }
    -  
    +
       // Below methods copied from BytesWritable
     
       @Override
       public int hashCode() {
         return WritableComparator.hashBytes(bytes, this.length);
       }
    -  
    +
       /**
        * Define the sort order of the BytesWritable.
        * @param right_obj The other bytes writable
    @@ -170,7 +170,7 @@ implements WritableComparable {
       public int compareTo(ImmutableBytesWritable right_obj) {
         return compareTo(right_obj.get());
       }
    -  
    +
       /**
        * Compares the bytes in this object to the specified byte array
        * @param that
    @@ -200,7 +200,7 @@ implements WritableComparable {
        * @see java.lang.Object#toString()
        */
       @Override
    -  public String toString() { 
    +  public String toString() {
         StringBuffer sb = new StringBuffer(3*this.bytes.length);
         for (int idx = 0; idx < this.bytes.length; idx++) {
           // if not the first, put a blank separator in
    @@ -218,7 +218,7 @@ implements WritableComparable {
       }
     
       /** A Comparator optimized for ImmutableBytesWritable.
    -   */ 
    +   */
       public static class Comparator extends WritableComparator {
         private BytesWritable.Comparator comparator =
           new BytesWritable.Comparator();
    @@ -236,11 +236,11 @@ implements WritableComparable {
           return comparator.compare(b1, s1, l1, b2, s2, l2);
         }
       }
    -  
    +
       static { // register this comparator
         WritableComparator.define(ImmutableBytesWritable.class, new Comparator());
       }
    -  
    +
       /**
        * @param array List of byte [].
        * @return Array of byte [].
    diff --git a/src/java/org/apache/hadoop/hbase/io/Reference.java b/src/java/org/apache/hadoop/hbase/io/Reference.java
    index 8cf7b66..5d4d72e 100644
    --- a/src/java/org/apache/hadoop/hbase/io/Reference.java
    +++ b/src/java/org/apache/hadoop/hbase/io/Reference.java
    @@ -36,7 +36,7 @@ import org.apache.hadoop.io.Writable;
     /**
      * A reference to the top or bottom half of a store file.  The file referenced
      * lives under a different region.  References are made at region split time.
    - * 
    + *
      * 

    References work with a special half store file type. References know how * to write out the reference format in the file system and are whats juggled * when references are mixed in with direct store files. The half store file @@ -53,7 +53,7 @@ public class Reference implements Writable { private byte [] splitkey; private Range region; - /** + /** * For split HStoreFiles, it specifies if the file covers the lower half or * the upper half of the key range */ @@ -83,7 +83,7 @@ public class Reference implements Writable { } /** - * + * * @return Range */ public Range getFileRegion() { diff --git a/src/java/org/apache/hadoop/hbase/io/RowResult.java b/src/java/org/apache/hadoop/hbase/io/RowResult.java index a703e04..746144a 100644 --- a/src/java/org/apache/hadoop/hbase/io/RowResult.java +++ b/src/java/org/apache/hadoop/hbase/io/RowResult.java @@ -66,7 +66,7 @@ public class RowResult implements Writable, SortedMap, this.row = row; this.cells = m; } - + /** * Get the row for this RowResult * @return the row @@ -75,9 +75,9 @@ public class RowResult implements Writable, SortedMap, return row; } - // + // // Map interface - // + // public Cell put(byte [] key, Cell value) { throw new UnsupportedOperationException("RowResult is read-only!"); @@ -99,7 +99,7 @@ public class RowResult implements Writable, SortedMap, public boolean containsKey(Object key) { return cells.containsKey(key); } - + /** * Check if the key can be found in this RowResult * @param key @@ -136,10 +136,10 @@ public class RowResult implements Writable, SortedMap, public Set> entrySet() { return Collections.unmodifiableSet(this.cells.entrySet()); } - + /** * This method used solely for the REST serialization - * + * * @return Cells */ public RestCell[] getCells() { @@ -159,7 +159,7 @@ public class RowResult implements Writable, SortedMap, } return result; } - + /** * Get the Cell that corresponds to column * @param column @@ -168,7 +168,7 @@ public class RowResult implements Writable, SortedMap, public Cell get(byte [] column) { return this.cells.get(column); } - + /** * Get the Cell that corresponds to column, using a String key * @param key @@ -187,7 +187,7 @@ public class RowResult implements Writable, SortedMap, public Cell get(byte [] family, byte [] columnQualifier) { return get(Bytes.add(family, KeyValue.COLUMN_FAMILY_DELIM_ARRAY, columnQualifier)); } - + public Comparator comparator() { return this.cells.comparator(); @@ -219,25 +219,25 @@ public class RowResult implements Writable, SortedMap, public class Entry implements Map.Entry { private final byte [] column; private final Cell cell; - + Entry(byte [] row, Cell cell) { this.column = row; this.cell = cell; } - + public Cell setValue(Cell c) { throw new UnsupportedOperationException("RowResult is read-only!"); } - + public byte [] getKey() { return column; } - + public Cell getValue() { return cell; } } - + @Override public String toString() { StringBuilder sb = new StringBuilder(); @@ -264,20 +264,20 @@ public class RowResult implements Writable, SortedMap, sb.append(ioe.toString()); } } else { - sb.append(Bytes.toStringBinary(v)); + sb.append(Bytes.toStringBinary(v)); } sb.append(")"); } sb.append("}"); return sb.toString(); } - + /* (non-Javadoc) * @see org.apache.hadoop.hbase.rest.xml.IOutputXML#toXML() */ public void restSerialize(IRestSerializer serializer) throws HBaseRestException { serializer.serializeRowResult(this); - } + } /** * @param l @@ -322,7 +322,7 @@ public class RowResult implements Writable, SortedMap, Bytes.writeByteArray(out, this.row); this.cells.write(out); } - + // // Comparable // diff --git a/src/java/org/apache/hadoop/hbase/io/TimeRange.java b/src/java/org/apache/hadoop/hbase/io/TimeRange.java index 404ddf9..18cf86b 100644 --- a/src/java/org/apache/hadoop/hbase/io/TimeRange.java +++ b/src/java/org/apache/hadoop/hbase/io/TimeRange.java @@ -48,7 +48,7 @@ public class TimeRange implements Writable { public TimeRange() { allTime = true; } - + /** * Represents interval [minStamp, Long.MAX_VALUE) * @param minStamp the minimum timestamp value, inclusive @@ -56,7 +56,7 @@ public class TimeRange implements Writable { public TimeRange(long minStamp) { this.minStamp = minStamp; } - + /** * Represents interval [minStamp, Long.MAX_VALUE) * @param minStamp the minimum timestamp value, inclusive @@ -64,9 +64,9 @@ public class TimeRange implements Writable { public TimeRange(byte [] minStamp) { this.minStamp = Bytes.toLong(minStamp); } - + /** - * Represents interval [minStamp, maxStamp) + * Represents interval [minStamp, maxStamp) * @param minStamp the minimum timestamp, inclusive * @param maxStamp the maximum timestamp, exclusive * @throws IOException @@ -81,7 +81,7 @@ public class TimeRange implements Writable { } /** - * Represents interval [minStamp, maxStamp) + * Represents interval [minStamp, maxStamp) * @param minStamp the minimum timestamp, inclusive * @param maxStamp the maximum timestamp, exclusive * @throws IOException @@ -90,7 +90,7 @@ public class TimeRange implements Writable { throws IOException { this(Bytes.toLong(minStamp), Bytes.toLong(maxStamp)); } - + /** * @return the smallest timestamp that should be considered */ @@ -104,11 +104,11 @@ public class TimeRange implements Writable { public long getMax() { return maxStamp; } - + /** * Check if the specified timestamp is within this TimeRange. *

    - * Returns true if within interval [minStamp, maxStamp), false + * Returns true if within interval [minStamp, maxStamp), false * if not. * @param bytes timestamp to check * @param offset offset into the bytes @@ -118,11 +118,11 @@ public class TimeRange implements Writable { if(allTime) return true; return withinTimeRange(Bytes.toLong(bytes, offset)); } - + /** * Check if the specified timestamp is within this TimeRange. *

    - * Returns true if within interval [minStamp, maxStamp), false + * Returns true if within interval [minStamp, maxStamp), false * if not. * @param timestamp timestamp to check * @return true if within TimeRange, false if not @@ -132,11 +132,11 @@ public class TimeRange implements Writable { // check if >= minStamp return (minStamp <= timestamp && timestamp < maxStamp); } - + /** * Check if the specified timestamp is within this TimeRange. *

    - * Returns true if within interval [minStamp, maxStamp), false + * Returns true if within interval [minStamp, maxStamp), false * if not. * @param timestamp timestamp to check * @return true if within TimeRange, false if not @@ -146,7 +146,7 @@ public class TimeRange implements Writable { // check if >= minStamp return (timestamp >= minStamp); } - + @Override public String toString() { StringBuffer sb = new StringBuffer(); @@ -156,14 +156,14 @@ public class TimeRange implements Writable { sb.append(this.minStamp); return sb.toString(); } - + //Writable public void readFields(final DataInput in) throws IOException { this.minStamp = in.readLong(); this.maxStamp = in.readLong(); this.allTime = in.readBoolean(); } - + public void write(final DataOutput out) throws IOException { out.writeLong(minStamp); out.writeLong(maxStamp); diff --git a/src/java/org/apache/hadoop/hbase/io/hfile/BlockCache.java b/src/java/org/apache/hadoop/hbase/io/hfile/BlockCache.java index 18392cb..a41343e 100644 --- a/src/java/org/apache/hadoop/hbase/io/hfile/BlockCache.java +++ b/src/java/org/apache/hadoop/hbase/io/hfile/BlockCache.java @@ -33,14 +33,14 @@ public interface BlockCache { * @param inMemory Whether block should be treated as in-memory */ public void cacheBlock(String blockName, ByteBuffer buf, boolean inMemory); - + /** * Add block to cache (defaults to not in-memory). * @param blockName Zero-based file block number. * @param buf The block contents wrapped in a ByteBuffer. */ public void cacheBlock(String blockName, ByteBuffer buf); - + /** * Fetch block from cache. * @param blockName Block number to fetch. diff --git a/src/java/org/apache/hadoop/hbase/io/hfile/CachedBlock.java b/src/java/org/apache/hadoop/hbase/io/hfile/CachedBlock.java index 6657107..9300c13 100644 --- a/src/java/org/apache/hadoop/hbase/io/hfile/CachedBlock.java +++ b/src/java/org/apache/hadoop/hbase/io/hfile/CachedBlock.java @@ -27,49 +27,49 @@ import org.apache.hadoop.hbase.util.ClassSize; /** * Represents an entry in the {@link LruBlockCache}. - * + * *

    Makes the block memory-aware with {@link HeapSize} and Comparable * to sort by access time for the LRU. It also takes care of priority by * either instantiating as in-memory or handling the transition from single * to multiple access. */ public class CachedBlock implements HeapSize, Comparable { - + public final static long PER_BLOCK_OVERHEAD = ClassSize.align( ClassSize.OBJECT + (3 * ClassSize.REFERENCE) + (2 * Bytes.SIZEOF_LONG) + ClassSize.STRING + ClassSize.BYTE_BUFFER); - - static enum BlockPriority { + + static enum BlockPriority { /** * Accessed a single time (used for scan-resistance) */ - SINGLE, + SINGLE, /** * Accessed multiple times */ - MULTI, + MULTI, /** * Block from in-memory store */ MEMORY }; - + private final String blockName; private final ByteBuffer buf; private volatile long accessTime; private long size; private BlockPriority priority; - + public CachedBlock(String blockName, ByteBuffer buf, long accessTime) { this(blockName, buf, accessTime, false); } - + public CachedBlock(String blockName, ByteBuffer buf, long accessTime, boolean inMemory) { this.blockName = blockName; this.buf = buf; this.accessTime = accessTime; - this.size = ClassSize.align(blockName.length()) + + this.size = ClassSize.align(blockName.length()) + ClassSize.align(buf.capacity()) + PER_BLOCK_OVERHEAD; if(inMemory) { this.priority = BlockPriority.MEMORY; @@ -77,7 +77,7 @@ public class CachedBlock implements HeapSize, Comparable { this.priority = BlockPriority.SINGLE; } } - + /** * Block has been accessed. Update its local access time. */ @@ -87,7 +87,7 @@ public class CachedBlock implements HeapSize, Comparable { this.priority = BlockPriority.MULTI; } } - + public long heapSize() { return size; } @@ -96,15 +96,15 @@ public class CachedBlock implements HeapSize, Comparable { if(this.accessTime == that.accessTime) return 0; return this.accessTime < that.accessTime ? 1 : -1; } - + public ByteBuffer getBuffer() { return this.buf; } - + public String getName() { return this.blockName; } - + public BlockPriority getPriority() { return this.priority; } diff --git a/src/java/org/apache/hadoop/hbase/io/hfile/CachedBlockQueue.java b/src/java/org/apache/hadoop/hbase/io/hfile/CachedBlockQueue.java index e11a43d..73452d7 100644 --- a/src/java/org/apache/hadoop/hbase/io/hfile/CachedBlockQueue.java +++ b/src/java/org/apache/hadoop/hbase/io/hfile/CachedBlockQueue.java @@ -28,22 +28,22 @@ import org.apache.hadoop.hbase.io.HeapSize; * A memory-bound queue that will grow until an element brings * total size >= maxSize. From then on, only entries that are sorted larger * than the smallest current entry will be inserted/replaced. - * + * *

    Use this when you want to find the largest elements (according to their * ordering, not their heap size) that consume as close to the specified * maxSize as possible. Default behavior is to grow just above rather than * just below specified max. - * + * *

    Object used in this queue must implement {@link HeapSize} as well as * {@link Comparable}. */ public class CachedBlockQueue implements HeapSize { - + private PriorityQueue queue; - + private long heapSize; private long maxSize; - + /** * @param maxSize the target size of elements in the queue * @param blockSize expected average size of blocks @@ -55,10 +55,10 @@ public class CachedBlockQueue implements HeapSize { heapSize = 0; this.maxSize = maxSize; } - + /** * Attempt to add the specified cached block to this queue. - * + * *

    If the queue is smaller than the max size, or if the specified element * is ordered before the smallest element in the queue, the element will be * added to the queue. Otherwise, there is no side effect of this call. @@ -82,7 +82,7 @@ public class CachedBlockQueue implements HeapSize { } } } - + /** * Get a sorted List of all elements in this queue, in descending order. * @return list of cached elements in descending order @@ -94,7 +94,7 @@ public class CachedBlockQueue implements HeapSize { } return blocks.toArray(new CachedBlock[blocks.size()]); } - + /** * Total size of all elements in this queue. * @return size of all elements currently in queue, in bytes diff --git a/src/java/org/apache/hadoop/hbase/io/hfile/Compression.java b/src/java/org/apache/hadoop/hbase/io/hfile/Compression.java index b3b8c95..fd09dc8 100644 --- a/src/java/org/apache/hadoop/hbase/io/hfile/Compression.java +++ b/src/java/org/apache/hadoop/hbase/io/hfile/Compression.java @@ -6,9 +6,9 @@ * "License"); you may not use this file except in compliance with the * License. * You may obtain a copy of the License at - * + * * http://www.apache.org/licenses/LICENSE-2.0 - * + * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the diff --git a/src/java/org/apache/hadoop/hbase/io/hfile/HFile.java b/src/java/org/apache/hadoop/hbase/io/hfile/HFile.java index 7962ec0..2951996 100644 --- a/src/java/org/apache/hadoop/hbase/io/hfile/HFile.java +++ b/src/java/org/apache/hadoop/hbase/io/hfile/HFile.java @@ -101,7 +101,7 @@ import org.apache.hadoop.io.compress.Decompressor; * compression ratio over "lzo" but requires 4x CPU to compress and 2x CPU to * decompress, comparing to "lzo". *

- * + * * For more on the background behind HFile, see HBASE-61. *

@@ -123,7 +123,7 @@ import org.apache.hadoop.io.compress.Decompressor; public class HFile { static final Log LOG = LogFactory.getLog(HFile.class); - /* These values are more or less arbitrary, and they are used as a + /* These values are more or less arbitrary, and they are used as a * form of check to make sure the file isn't completely corrupt. */ final static byte [] DATABLOCKMAGIC = @@ -132,14 +132,14 @@ public class HFile { { 'I', 'D', 'X', 'B', 'L', 'K', 41, 43 }; final static byte [] METABLOCKMAGIC = { 'M', 'E', 'T', 'A', 'B', 'L', 'K', 99 }; - final static byte [] TRAILERBLOCKMAGIC = + final static byte [] TRAILERBLOCKMAGIC = { 'T', 'R', 'A', 'B', 'L', 'K', 34, 36 }; /** * Maximum length of key in HFile. */ public final static int MAXIMUM_KEY_LENGTH = Integer.MAX_VALUE; - + /** * Default blocksize for hfile. */ @@ -268,7 +268,7 @@ public class HFile { * @param blocksize * @param compress * @param comparator - * @throws IOException + * @throws IOException * @throws IOException */ public Writer(FileSystem fs, Path path, int blocksize, @@ -313,7 +313,7 @@ public class HFile { this(ostream, blocksize, Compression.getCompressionAlgorithmByName(compress), c); } - + /** * Constructor that takes a stream. * @param ostream Stream to use. @@ -398,13 +398,13 @@ public class HFile { this.compressor, 0); return new DataOutputStream(os); } - + /* * Let go of block compressor and compressing stream gotten in call * {@link #getCompressingStream}. * @param dos * @return How much was written on this stream since it was taken out. - * @see #getCompressingStream() + * @see #getCompressingStream() * @throws IOException */ private int releaseCompressingStream(final DataOutputStream dos) @@ -434,7 +434,7 @@ public class HFile { * from {@link Reader#loadFileInfo()}. * @param k Key * @param v Value - * @throws IOException + * @throws IOException */ public void appendFileInfo(final byte [] k, final byte [] v) throws IOException { @@ -577,7 +577,7 @@ public class HFile { finishBlock(); FixedFileTrailer trailer = new FixedFileTrailer(); - + // Write out the metadata blocks if any. ArrayList metaOffsets = null; ArrayList metaDataSizes = null; @@ -608,10 +608,10 @@ public class HFile { // Now finish off the trailer. trailer.dataIndexCount = blockKeys.size(); trailer.metaIndexCount = metaNames.size(); - + trailer.totalUncompressedBytes = totalBytes; trailer.entryCount = entryCount; - + trailer.compressionCodec = this.compressAlgo.ordinal(); trailer.serialize(outputStream); @@ -680,7 +680,7 @@ public class HFile { private BlockIndex metaIndex; FixedFileTrailer trailer; private volatile boolean fileInfoLoaded = false; - + // Filled when we read in the trailer. private Compression.Algorithm compressAlgo; @@ -689,7 +689,7 @@ public class HFile { // Stats read in when we load file info. private int avgKeyLen = -1; private int avgValueLen = -1; - + // Used to ensure we seek correctly. RawComparator comparator; @@ -700,7 +700,7 @@ public class HFile { private final BlockCache cache; public int cacheHits = 0; public int blockLoads = 0; - + // Whether file is from in-memory store private boolean inMemory = false; @@ -717,8 +717,8 @@ public class HFile { this(null, -1, null, false); } - /** - * Opens a HFile. You must load the file info before you can + /** + * Opens a HFile. You must load the file info before you can * use it by calling {@link #loadFileInfo()}. * * @param fs filesystem to load from @@ -733,8 +733,8 @@ public class HFile { this.name = path.toString(); } - /** - * Opens a HFile. You must load the index before you can + /** + * Opens a HFile. You must load the index before you can * use it by calling {@link #loadFileInfo()}. * * @param fsdis input stream. Caller is responsible for closing the passed @@ -778,7 +778,7 @@ public class HFile { public long length() { return this.fileSize; } - + public boolean inMemory() { return this.inMemory; } @@ -899,7 +899,7 @@ public class HFile { } else { blockSize = metaIndex.blockOffsets[block+1] - metaIndex.blockOffsets[block]; } - + ByteBuffer buf = decompress(metaIndex.blockOffsets[block], longToInt(blockSize), metaIndex.blockDataSizes[block], true); byte [] magic = new byte[METABLOCKMAGIC.length]; @@ -995,18 +995,18 @@ public class HFile { * @param offset * @param compressedSize * @param decompressedSize - * + * * @return * @throws IOException */ private ByteBuffer decompress(final long offset, final int compressedSize, - final int decompressedSize, final boolean pread) + final int decompressedSize, final boolean pread) throws IOException { Decompressor decompressor = null; ByteBuffer buf = null; try { decompressor = this.compressAlgo.getDecompressor(); - // My guess is that the bounded range fis is needed to stop the + // My guess is that the bounded range fis is needed to stop the // decompressor reading into next block -- IIRC, it just grabs a // bunch of data w/o regard to whether decompressor is coming to end of a // decompression. @@ -1016,15 +1016,15 @@ public class HFile { decompressor, 0); buf = ByteBuffer.allocate(decompressedSize); IOUtils.readFully(is, buf.array(), 0, buf.capacity()); - is.close(); + is.close(); } finally { if (null != decompressor) { - this.compressAlgo.returnDecompressor(decompressor); + this.compressAlgo.returnDecompressor(decompressor); } } return buf; } - + /** * @return First key in the file. May be null if file has no entries. */ @@ -1066,7 +1066,7 @@ public class HFile { return (this.blockIndex != null? this.blockIndex.heapSize(): 0) + ((this.metaIndex != null)? this.metaIndex.heapSize(): 0); } - + /** * @return Midkey for this file. We work with block boundaries only so * returned midkey is an approximation only. @@ -1107,7 +1107,7 @@ public class HFile { this.cacheBlocks = cacheBlocks; this.pread = pread; } - + public KeyValue getKeyValue() { if(this.block == null) { return null; @@ -1169,25 +1169,25 @@ public class HFile { currValueLen = block.getInt(); return true; } - + public int seekTo(byte [] key) throws IOException { return seekTo(key, 0, key.length); } - + public int seekTo(byte[] key, int offset, int length) throws IOException { int b = reader.blockContainingKey(key, offset, length); if (b < 0) return -1; // falls before the beginning of the file! :-( // Avoid re-reading the same block (that'd be dumb). loadBlock(b); - + return blockSeek(key, offset, length, false); } /** * Within a loaded block, seek looking for the first key * that is smaller than (or equal to?) the key we are interested in. - * + * * A note on the seekBefore - if you have seekBefore = true, AND the * first key in the block = key, then you'll get thrown exceptions. * @param key to find @@ -1235,7 +1235,7 @@ public class HFile { public boolean seekBefore(byte [] key) throws IOException { return seekBefore(key, 0, key.length); } - + public boolean seekBefore(byte[] key, int offset, int length) throws IOException { int b = reader.blockContainingKey(key, offset, length); @@ -1294,7 +1294,7 @@ public class HFile { blockFetches++; return true; } - + private void loadBlock(int bloc) throws IOException { if (block == null) { block = reader.readBlock(bloc, this.cacheBlocks, this.pread); @@ -1317,7 +1317,7 @@ public class HFile { return trailer.toString(); } } - + /* * The RFile has a fixed trailer which contains offsets to other variable * parts of the file. Also includes basic metadata on this file. @@ -1337,14 +1337,14 @@ public class HFile { int entryCount; int compressionCodec; int version = 1; - + FixedFileTrailer() { super(); } static int trailerSize() { // Keep this up to date... - return + return ( Bytes.SIZEOF_INT * 5 ) + ( Bytes.SIZEOF_LONG * 4 ) + TRAILERBLOCKMAGIC.length; @@ -1376,7 +1376,7 @@ public class HFile { metaIndexOffset = inputStream.readLong(); metaIndexCount = inputStream.readInt(); - + totalUncompressedBytes = inputStream.readLong(); entryCount = inputStream.readInt(); compressionCodec = inputStream.readInt(); @@ -1415,7 +1415,7 @@ public class HFile { /* Needed doing lookup on blocks. */ final RawComparator comparator; - + /* * Shutdown default constructor */ @@ -1443,7 +1443,7 @@ public class HFile { /** * Adds a new entry in the block index. - * + * * @param key Last key in the block * @param offset file offset where the block is stored * @param dataSize the uncompressed data size @@ -1474,13 +1474,13 @@ public class HFile { // the block with a firstKey < key. This means the value we want is potentially // in the next block. pos --; // in previous block. - + return pos; } // wow, a perfect hit, how unlikely? return pos; } - + /* * @return File midkey. Inexact. Operates on block boundaries. Does * not go into blocks. @@ -1571,12 +1571,12 @@ public class HFile { } public long heapSize() { - long heapsize = ClassSize.align(ClassSize.OBJECT + + long heapsize = ClassSize.align(ClassSize.OBJECT + 2 * Bytes.SIZEOF_INT + (3 + 1) * ClassSize.REFERENCE); - //Calculating the size of blockKeys + //Calculating the size of blockKeys if(blockKeys != null) { //Adding array + references overhead - heapsize += ClassSize.align(ClassSize.ARRAY + + heapsize += ClassSize.align(ClassSize.ARRAY + blockKeys.length * ClassSize.REFERENCE); //Adding bytes for(byte [] bs : blockKeys) { @@ -1584,17 +1584,17 @@ public class HFile { } } if(blockOffsets != null) { - heapsize += ClassSize.align(ClassSize.ARRAY + + heapsize += ClassSize.align(ClassSize.ARRAY + blockOffsets.length * Bytes.SIZEOF_LONG); } if(blockDataSizes != null) { - heapsize += ClassSize.align(ClassSize.ARRAY + + heapsize += ClassSize.align(ClassSize.ARRAY + blockDataSizes.length * Bytes.SIZEOF_INT); } - + return ClassSize.align(heapsize); } - + } /* @@ -1621,7 +1621,7 @@ public class HFile { /** * Get names of supported compression algorithms. The names are acceptable by * HFile.Writer. - * + * * @return Array of strings, each represents a supported compression * algorithm. Currently, the following compression algorithms are * supported. @@ -1648,13 +1648,13 @@ public class HFile { /** * Returns all files belonging to the given region directory. Could return an * empty list. - * + * * @param fs The file system reference. * @param regionDir The region directory to scan. * @return The list of files found. * @throws IOException When scanning the files fails. */ - static List getStoreFiles(FileSystem fs, Path regionDir) + static List getStoreFiles(FileSystem fs, Path regionDir) throws IOException { List res = new ArrayList(); PathFilter dirFilter = new FSUtils.DirFilter(fs); @@ -1669,7 +1669,7 @@ public class HFile { } return res; } - + public static void main(String []args) throws IOException { try { // create options @@ -1715,7 +1715,7 @@ public class HFile { Path regionDir = new Path(tableDir, Integer.toString(enc)); if (verbose) System.out.println("region dir -> " + regionDir); List regionFiles = getStoreFiles(fs, regionDir); - if (verbose) System.out.println("Number of region files found -> " + + if (verbose) System.out.println("Number of region files found -> " + regionFiles.size()); if (verbose) { int i = 1; @@ -1732,7 +1732,7 @@ public class HFile { System.err.println("ERROR, file doesnt exist: " + file); continue; } - // create reader and load file info + // create reader and load file info HFile.Reader reader = new HFile.Reader(fs, file, null, false); Map fileInfo = reader.loadFileInfo(); // scan over file and read key/value's and check if requested @@ -1750,9 +1750,9 @@ public class HFile { // check if rows are in order if (checkRow && pkv != null) { if (Bytes.compareTo(pkv.getRow(), kv.getRow()) > 0) { - System.err.println("WARNING, previous row is greater then" + - " current row\n\tfilename -> " + file + - "\n\tprevious -> " + Bytes.toStringBinary(pkv.getKey()) + + System.err.println("WARNING, previous row is greater then" + + " current row\n\tfilename -> " + file + + "\n\tprevious -> " + Bytes.toStringBinary(pkv.getKey()) + "\n\tcurrent -> " + Bytes.toStringBinary(kv.getKey())); } } @@ -1760,14 +1760,14 @@ public class HFile { if (checkFamily) { String fam = Bytes.toString(kv.getFamily()); if (!file.toString().contains(fam)) { - System.err.println("WARNING, filename does not match kv family," + - "\n\tfilename -> " + file + + System.err.println("WARNING, filename does not match kv family," + + "\n\tfilename -> " + file + "\n\tkeyvalue -> " + Bytes.toStringBinary(kv.getKey())); } if (pkv != null && Bytes.compareTo(pkv.getFamily(), kv.getFamily()) != 0) { System.err.println("WARNING, previous kv has different family" + - " compared to current key\n\tfilename -> " + file + - "\n\tprevious -> " + Bytes.toStringBinary(pkv.getKey()) + + " compared to current key\n\tfilename -> " + file + + "\n\tprevious -> " + Bytes.toStringBinary(pkv.getKey()) + "\n\tcurrent -> " + Bytes.toStringBinary(kv.getKey())); } } @@ -1777,7 +1777,7 @@ public class HFile { if (verbose || printKeyValue) { System.out.println("Scanned kv count -> " + count); } - // print meta data + // print meta data if (printMeta) { System.out.println("Block index size as per heapsize: " + reader.indexSize()); System.out.println(reader.toString()); diff --git a/src/java/org/apache/hadoop/hbase/io/hfile/HFileScanner.java b/src/java/org/apache/hadoop/hbase/io/hfile/HFileScanner.java index 6b9673d..9d891c6 100644 --- a/src/java/org/apache/hadoop/hbase/io/hfile/HFileScanner.java +++ b/src/java/org/apache/hadoop/hbase/io/hfile/HFileScanner.java @@ -27,7 +27,7 @@ import org.apache.hadoop.hbase.KeyValue; /** * A scanner allows you to position yourself within a HFile and * scan through it. It allows you to reposition yourself as well. - * + * *

A scanner doesn't always have a key/value that it is pointing to * when it is first created and before * {@link #seekTo()}/{@link #seekTo(byte[])} are called. @@ -40,7 +40,7 @@ public interface HFileScanner { /** * SeekTo or just before the passed key. Examine the return * code to figure whether we found the key or not. - * Consider the key stream of all the keys in the file, + * Consider the key stream of all the keys in the file, * k[0] .. k[n], where there are n keys in the file. * @param key Key to find. * @return -1, if key < k[0], no position; @@ -53,7 +53,7 @@ public interface HFileScanner { public int seekTo(byte[] key) throws IOException; public int seekTo(byte[] key, int offset, int length) throws IOException; /** - * Consider the key stream of all the keys in the file, + * Consider the key stream of all the keys in the file, * k[0] .. k[n], where there are n keys in the file. * @param key Key to find * @return false if key <= k[0] or true with scanner in position 'i' such @@ -87,7 +87,7 @@ public interface HFileScanner { /** * Gets a buffer view to the current value. You must call * {@link #seekTo(byte[])} before this method. - * + * * @return byte buffer for the value. The limit is set to the value size, and * the position is 0, the start of the buffer view. */ diff --git a/src/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java b/src/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java index 4116786..0fde664 100644 --- a/src/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java +++ b/src/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java @@ -40,35 +40,35 @@ import org.apache.hadoop.hbase.util.ClassSize; * memory-bound using an LRU eviction algorithm, and concurrent: backed by a * {@link ConcurrentHashMap} and with a non-blocking eviction thread giving * constant-time {@link #cacheBlock} and {@link #getBlock} operations.

- * + * * Contains three levels of block priority to allow for * scan-resistance and in-memory families. A block is added with an inMemory * flag if necessary, otherwise a block becomes a single access priority. Once * a blocked is accessed again, it changes to multiple access. This is used * to prevent scans from thrashing the cache, adding a least-frequently-used * element to the eviction algorithm.

- * + * * Each priority is given its own chunk of the total cache to ensure * fairness during eviction. Each priority will retain close to its maximum * size, however, if any priority is not using its entire chunk the others * are able to grow beyond their chunk size.

- * + * * Instantiated at a minimum with the total size and average block size. - * All sizes are in bytes. The block size is not especially important as this + * All sizes are in bytes. The block size is not especially important as this * cache is fully dynamic in its sizing of blocks. It is only used for * pre-allocating data structures and in initial heap estimation of the map.

- * + * * The detailed constructor defines the sizes for the three priorities (they * should total to the maximum size defined). It also sets the levels that * trigger and control the eviction thread.

- * + * * The acceptable size is the cache size level which triggers the eviction * process to start. It evicts enough blocks to get the size below the * minimum size specified.

- * + * * Eviction happens in a separate thread and involves a single full-scan * of the map. It determines how many bytes must be freed to reach the minimum - * size, and then while scanning determines the fewest least-recently-used + * size, and then while scanning determines the fewest least-recently-used * blocks necessary from each of the three priorities (would be 3 times bytes * to free). It then uses the priority chunk sizes to evict fairly according * to the relative sizes and usage. @@ -76,81 +76,81 @@ import org.apache.hadoop.hbase.util.ClassSize; public class LruBlockCache implements BlockCache, HeapSize { static final Log LOG = LogFactory.getLog(LruBlockCache.class); - + /** Default Configuration Parameters*/ - + /** Backing Concurrent Map Configuration */ static final float DEFAULT_LOAD_FACTOR = 0.75f; static final int DEFAULT_CONCURRENCY_LEVEL = 16; - + /** Eviction thresholds */ static final float DEFAULT_MIN_FACTOR = 0.75f; static final float DEFAULT_ACCEPTABLE_FACTOR = 0.85f; - + /** Priority buckets */ static final float DEFAULT_SINGLE_FACTOR = 0.25f; static final float DEFAULT_MULTI_FACTOR = 0.50f; static final float DEFAULT_MEMORY_FACTOR = 0.25f; - + /** Statistics thread */ static final int statThreadPeriod = 60; - + /** Concurrent map (the cache) */ private final ConcurrentHashMap map; - + /** Eviction lock (locked when eviction in process) */ private final ReentrantLock evictionLock = new ReentrantLock(true); - + /** Volatile boolean to track if we are in an eviction process or not */ private volatile boolean evictionInProgress = false; - + /** Eviction thread */ private final EvictionThread evictionThread; - + /** Statistics thread schedule pool (for heavy debugging, could remove) */ private final ScheduledExecutorService scheduleThreadPool = Executors.newScheduledThreadPool(1); - + /** Current size of cache */ private final AtomicLong size; - + /** Current number of cached elements */ private final AtomicLong elements; - + /** Cache access count (sequential ID) */ private final AtomicLong count; - + /** Cache statistics */ private final CacheStats stats; - + /** Maximum allowable size of cache (block put if size > max, evict) */ private long maxSize; /** Approximate block size */ private long blockSize; - + /** Acceptable size of cache (no evictions if size < acceptable) */ private float acceptableFactor; - + /** Minimum threshold of cache (when evicting, evict until size < min) */ private float minFactor; - + /** Single access bucket size */ private float singleFactor; - + /** Multiple access bucket size */ private float multiFactor; - + /** In-memory bucket size */ private float memoryFactor; - + /** Overhead of the structure itself */ private long overhead; - + /** * Default constructor. Specify maximum size and expected average block * size (approximation is fine). - * + * *

All other factors will be calculated based on defaults specified in * this class. * @param maxSize maximum size of cache, in bytes @@ -159,7 +159,7 @@ public class LruBlockCache implements BlockCache, HeapSize { public LruBlockCache(long maxSize, long blockSize) { this(maxSize, blockSize, true); } - + /** * Constructor used for testing. Allows disabling of the eviction thread. */ @@ -171,7 +171,7 @@ public class LruBlockCache implements BlockCache, HeapSize { DEFAULT_SINGLE_FACTOR, DEFAULT_MULTI_FACTOR, DEFAULT_MEMORY_FACTOR); } - + /** * Configurable constructor. Use this constructor if not using defaults. * @param maxSize maximum size of this cache, in bytes @@ -191,7 +191,7 @@ public class LruBlockCache implements BlockCache, HeapSize { float minFactor, float acceptableFactor, float singleFactor, float multiFactor, float memoryFactor) { if(singleFactor + multiFactor + memoryFactor != 1) { - throw new IllegalArgumentException("Single, multi, and memory factors " + + throw new IllegalArgumentException("Single, multi, and memory factors " + " should total 1.0"); } if(minFactor >= acceptableFactor) { @@ -223,16 +223,16 @@ public class LruBlockCache implements BlockCache, HeapSize { this.scheduleThreadPool.scheduleAtFixedRate(new StatisticsThread(this), statThreadPeriod, statThreadPeriod, TimeUnit.SECONDS); } - + public void setMaxSize(long maxSize) { this.maxSize = maxSize; if(this.size.get() > acceptableSize() && !evictionInProgress) { runEviction(); } } - + // BlockCache implementation - + /** * Cache the block with the specified name and buffer. *

@@ -295,7 +295,7 @@ public class LruBlockCache implements BlockCache, HeapSize { stats.evicted(); return block.heapSize(); } - + /** * Multi-threaded call to run the eviction process. */ @@ -306,7 +306,7 @@ public class LruBlockCache implements BlockCache, HeapSize { evictionThread.evict(); } } - + /** * Eviction method. */ @@ -314,25 +314,25 @@ public class LruBlockCache implements BlockCache, HeapSize { // Ensure only one eviction at a time if(!evictionLock.tryLock()) return; - + try { evictionInProgress = true; - + long bytesToFree = size.get() - minSize(); - - LOG.debug("Block cache LRU eviction started. Attempting to free " + + + LOG.debug("Block cache LRU eviction started. Attempting to free " + bytesToFree + " bytes"); - + if(bytesToFree <= 0) return; - + // Instantiate priority buckets - BlockBucket bucketSingle = new BlockBucket(bytesToFree, blockSize, + BlockBucket bucketSingle = new BlockBucket(bytesToFree, blockSize, singleSize(), "single"); - BlockBucket bucketMulti = new BlockBucket(bytesToFree, blockSize, + BlockBucket bucketMulti = new BlockBucket(bytesToFree, blockSize, multiSize(), "multi"); - BlockBucket bucketMemory = new BlockBucket(bytesToFree, blockSize, + BlockBucket bucketMemory = new BlockBucket(bytesToFree, blockSize, memorySize(), "memory"); - + // Scan entire map putting into appropriate buckets for(CachedBlock cachedBlock : map.values()) { switch(cachedBlock.getPriority()) { @@ -350,17 +350,17 @@ public class LruBlockCache implements BlockCache, HeapSize { } } } - - PriorityQueue bucketQueue = + + PriorityQueue bucketQueue = new PriorityQueue(3); - + bucketQueue.add(bucketSingle); bucketQueue.add(bucketMulti); bucketQueue.add(bucketMemory); - + int remainingBuckets = 3; long bytesFreed = 0; - + BlockBucket bucket; while((bucket = bucketQueue.poll()) != null) { long overflow = bucket.overflow(); @@ -368,28 +368,28 @@ public class LruBlockCache implements BlockCache, HeapSize { long bucketBytesToFree = Math.min(overflow, (long)Math.ceil((bytesToFree - bytesFreed) / remainingBuckets)); bytesFreed += bucket.free(bucketBytesToFree); - } + } remainingBuckets--; } - + float singleMB = ((float)bucketSingle.totalSize())/((float)(1024*1024)); float multiMB = ((float)bucketMulti.totalSize())/((float)(1024*1024)); float memoryMB = ((float)bucketMemory.totalSize())/((float)(1024*1024)); - - LOG.debug("Block cache LRU eviction completed. " + + + LOG.debug("Block cache LRU eviction completed. " + "Freed " + bytesFreed + " bytes. " + "Priority Sizes: " + "Single=" + singleMB + "MB (" + bucketSingle.totalSize() + "), " + "Multi=" + multiMB + "MB (" + bucketMulti.totalSize() + ")," + "Memory=" + memoryMB + "MB (" + bucketMemory.totalSize() + ")"); - + } finally { stats.evict(); evictionInProgress = false; evictionLock.unlock(); } } - + /** * Used to group blocks into priority buckets. There will be a BlockBucket * for each priority (single, multi, memory). Once bucketed, the eviction @@ -402,20 +402,20 @@ public class LruBlockCache implements BlockCache, HeapSize { private long totalSize = 0; private long bucketSize; String name; - - public BlockBucket(long bytesToFree, long blockSize, long bucketSize, + + public BlockBucket(long bytesToFree, long blockSize, long bucketSize, String name) { this.bucketSize = bucketSize; queue = new CachedBlockQueue(bytesToFree, blockSize); totalSize = 0; this.name = name; } - + public void add(CachedBlock block) { totalSize += block.heapSize(); queue.add(block); } - + public long free(long toFree) { CachedBlock [] blocks = queue.get(); long freedBytes = 0; @@ -427,21 +427,21 @@ public class LruBlockCache implements BlockCache, HeapSize { } return freedBytes; } - + public long overflow() { return totalSize - bucketSize; } - + public long totalSize() { return totalSize; } - + public int compareTo(BlockBucket that) { if(this.overflow() == that.overflow()) return 0; return this.overflow() > that.overflow() ? 1 : -1; } } - + /** * Get the maximum size of this cache. * @return max size in bytes @@ -449,7 +449,7 @@ public class LruBlockCache implements BlockCache, HeapSize { public long getMaxSize() { return this.maxSize; } - + /** * Get the current size of this cache. * @return current size in bytes @@ -457,7 +457,7 @@ public class LruBlockCache implements BlockCache, HeapSize { public long getCurrentSize() { return this.size.get(); } - + /** * Get the current size of this cache. * @return current size in bytes @@ -465,7 +465,7 @@ public class LruBlockCache implements BlockCache, HeapSize { public long getFreeSize() { return getMaxSize() - getCurrentSize(); } - + /** * Get the size of this cache (number of cached blocks) * @return number of cached blocks @@ -473,14 +473,14 @@ public class LruBlockCache implements BlockCache, HeapSize { public long size() { return this.elements.get(); } - + /** * Get the number of eviction runs that have occurred */ public long getEvictionCount() { return this.stats.getEvictionCount(); } - + /** * Get the number of blocks that have been evicted during the lifetime * of this cache. @@ -488,22 +488,22 @@ public class LruBlockCache implements BlockCache, HeapSize { public long getEvictedCount() { return this.stats.getEvictedCount(); } - + /* * Eviction thread. Sits in waiting state until an eviction is triggered * when the cache size grows above the acceptable level.

- * + * * Thread is triggered into action by {@link LruBlockCache#runEviction()} */ private static class EvictionThread extends Thread { private WeakReference cache; - + public EvictionThread(LruBlockCache cache) { super("LruBlockCache.EvictionThread"); setDaemon(true); this.cache = new WeakReference(cache); } - + @Override public void run() { while(true) { @@ -523,7 +523,7 @@ public class LruBlockCache implements BlockCache, HeapSize { } } } - + /* * Statistics thread. Periodically prints the cache statistics to the log. */ @@ -540,7 +540,7 @@ public class LruBlockCache implements BlockCache, HeapSize { lru.logStats(); } } - + public void logStats() { // Log size long totalSize = heapSize(); @@ -548,7 +548,7 @@ public class LruBlockCache implements BlockCache, HeapSize { float sizeMB = ((float)totalSize)/((float)(1024*1024)); float freeMB = ((float)freeSize)/((float)(1024*1024)); float maxMB = ((float)maxSize)/((float)(1024*1024)); - LruBlockCache.LOG.debug("Cache Stats: Sizes: " + + LruBlockCache.LOG.debug("Cache Stats: Sizes: " + "Total=" + sizeMB + "MB (" + totalSize + "), " + "Free=" + freeMB + "MB (" + freeSize + "), " + "Max=" + maxMB + "MB (" + maxSize +")" + @@ -564,46 +564,46 @@ public class LruBlockCache implements BlockCache, HeapSize { "Miss Ratio=" + stats.getMissRatio()*100 + "%, " + "Evicted/Run=" + stats.evictedPerEviction()); } - + /** * Get counter statistics for this cache. - * + * *

Includes: total accesses, hits, misses, evicted blocks, and runs * of the eviction processes. */ public CacheStats getStats() { return this.stats; } - + public static class CacheStats { private final AtomicLong accessCount = new AtomicLong(0); private final AtomicLong hitCount = new AtomicLong(0); private final AtomicLong missCount = new AtomicLong(0); private final AtomicLong evictionCount = new AtomicLong(0); private final AtomicLong evictedCount = new AtomicLong(0); - + public void miss() { missCount.incrementAndGet(); accessCount.incrementAndGet(); } - + public void hit() { hitCount.incrementAndGet(); accessCount.incrementAndGet(); } - + public void evict() { evictionCount.incrementAndGet(); } - + public void evicted() { evictedCount.incrementAndGet(); } - + public long getRequestCount() { return accessCount.get(); } - + public long getMissCount() { return missCount.get(); } @@ -611,47 +611,47 @@ public class LruBlockCache implements BlockCache, HeapSize { public long getHitCount() { return hitCount.get(); } - + public long getEvictionCount() { return evictionCount.get(); } - + public long getEvictedCount() { return evictedCount.get(); } - + public double getHitRatio() { return ((float)getHitCount()/(float)getRequestCount()); } - + public double getMissRatio() { return ((float)getMissCount()/(float)getRequestCount()); } - + public double evictedPerEviction() { return (float)((float)getEvictedCount()/(float)getEvictionCount()); } } - + public final static long CACHE_FIXED_OVERHEAD = ClassSize.align( - (3 * Bytes.SIZEOF_LONG) + (8 * ClassSize.REFERENCE) + + (3 * Bytes.SIZEOF_LONG) + (8 * ClassSize.REFERENCE) + (5 * Bytes.SIZEOF_FLOAT) + Bytes.SIZEOF_BOOLEAN + ClassSize.OBJECT); - + // HeapSize implementation public long heapSize() { return getCurrentSize(); } - + public static long calculateOverhead(long maxSize, long blockSize, int concurrency){ return CACHE_FIXED_OVERHEAD + ClassSize.CONCURRENT_HASHMAP + - ((int)Math.ceil(maxSize*1.2/blockSize) + ((int)Math.ceil(maxSize*1.2/blockSize) * ClassSize.CONCURRENT_HASHMAP_ENTRY) + (concurrency * ClassSize.CONCURRENT_HASHMAP_SEGMENT); } - + // Simple calculators of sizes given factors and maxSize - + private long acceptableSize() { return (long)Math.floor(this.maxSize * this.acceptableFactor); } diff --git a/src/java/org/apache/hadoop/hbase/io/hfile/SimpleBlockCache.java b/src/java/org/apache/hadoop/hbase/io/hfile/SimpleBlockCache.java index fbdb2af..561b715 100644 --- a/src/java/org/apache/hadoop/hbase/io/hfile/SimpleBlockCache.java +++ b/src/java/org/apache/hadoop/hbase/io/hfile/SimpleBlockCache.java @@ -6,9 +6,9 @@ * "License"); you may not use this file except in compliance with the * License. * You may obtain a copy of the License at - * + * * http://www.apache.org/licenses/LICENSE-2.0 - * + * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the @@ -36,19 +36,19 @@ public class SimpleBlockCache implements BlockCache { this.blockId = blockId; } } - private Map cache = + private Map cache = new HashMap(); private ReferenceQueue q = new ReferenceQueue(); public int dumps = 0; - + /** * Constructor */ public SimpleBlockCache() { super(); } - + void processQueue() { Ref r; while ( (r = (Ref)q.poll()) != null) { @@ -77,7 +77,7 @@ public class SimpleBlockCache implements BlockCache { cache.put(blockName, new Ref(blockName, buf, q)); } - public synchronized void cacheBlock(String blockName, ByteBuffer buf, + public synchronized void cacheBlock(String blockName, ByteBuffer buf, boolean inMemory) { cache.put(blockName, new Ref(blockName, buf, q)); } diff --git a/src/java/org/apache/hadoop/hbase/ipc/HBaseClient.java b/src/java/org/apache/hadoop/hbase/ipc/HBaseClient.java index cdce106..a0a88c1 100644 --- a/src/java/org/apache/hadoop/hbase/ipc/HBaseClient.java +++ b/src/java/org/apache/hadoop/hbase/ipc/HBaseClient.java @@ -56,14 +56,14 @@ import org.apache.hadoop.util.ReflectionUtils; /** A client for an IPC service. IPC calls take a single {@link Writable} as a * parameter, and return a {@link Writable} as their value. A service runs on * a port and is defined by a parameter class and a value class. - * + * *

This is the org.apache.hadoop.ipc.Client renamed as HBaseClient and * moved into this package so can access package-private methods. - * + * * @see HBaseServer */ public class HBaseClient { - + public static final Log LOG = LogFactory.getLog("org.apache.hadoop.ipc.HBaseClient"); protected Hashtable connections = @@ -73,7 +73,7 @@ public class HBaseClient { protected int counter; // counter for call ids protected AtomicBoolean running = new AtomicBoolean(true); // if client runs final protected Configuration conf; - final protected int maxIdleTime; //connections will be culled if it was idle for + final protected int maxIdleTime; //connections will be culled if it was idle for //maxIdleTime msecs final protected int maxRetries; //the max. no. of retries for socket connections final protected long failureSleep; // Time to sleep before retry on failure. @@ -83,14 +83,14 @@ public class HBaseClient { protected SocketFactory socketFactory; // how to create sockets private int refCount = 1; - + final private static String PING_INTERVAL_NAME = "ipc.ping.interval"; final static int DEFAULT_PING_INTERVAL = 60000; // 1 min final static int PING_CALL_ID = -1; - + /** * set the ping interval value in configuration - * + * * @param conf Configuration * @param pingInterval the ping interval */ @@ -101,14 +101,14 @@ public class HBaseClient { /** * Get the ping interval from configuration; * If not set in the configuration, return the default value. - * + * * @param conf Configuration * @return the ping interval */ final static int getPingInterval(Configuration conf) { return conf.getInt(PING_INTERVAL_NAME, DEFAULT_PING_INTERVAL); } - + /** * Increment this client's reference count * @@ -116,7 +116,7 @@ public class HBaseClient { synchronized void incCount() { refCount++; } - + /** * Decrement this client's reference count * @@ -124,10 +124,10 @@ public class HBaseClient { synchronized void decCount() { refCount--; } - + /** * Return if this client has no reference - * + * * @return true if this client has no reference; false otherwise */ synchronized boolean isZeroReference() { @@ -158,17 +158,17 @@ public class HBaseClient { /** Set the exception when there is an error. * Notify the caller the call is done. - * + * * @param error exception thrown by the call; either local or remote */ public synchronized void setException(IOException error) { this.error = error; callComplete(); } - - /** Set the return value when there is no error. + + /** Set the return value when there is no error. * Notify the caller the call is done. - * + * * @param value return value of the call. */ public synchronized void setValue(Writable value) { @@ -185,7 +185,7 @@ public class HBaseClient { private Socket socket = null; // connected socket private DataInputStream in; private DataOutputStream out; - + // currently active calls private Hashtable calls = new Hashtable(); private AtomicLong lastActivity = new AtomicLong();// last I/O activity time @@ -195,10 +195,10 @@ public class HBaseClient { public Connection(InetSocketAddress address) throws IOException { this(new ConnectionId(address, null)); } - + public Connection(ConnectionId remoteId) throws IOException { if (remoteId.getAddress().isUnresolved()) { - throw new UnknownHostException("unknown host: " + + throw new UnknownHostException("unknown host: " + remoteId.getAddress().getHostName()); } this.remoteId = remoteId; @@ -249,7 +249,7 @@ public class HBaseClient { } sendPing(); } - + /** Read a byte from the stream. * Send a ping if timeout on read. Retries if no failure is detected * until a byte is read. @@ -269,7 +269,7 @@ public class HBaseClient { /** Read bytes into a buffer starting from offset off * Send a ping if timeout on read. Retries if no failure is detected * until a byte is read. - * + * * @return the total number of bytes read; -1 if the connection is closed. */ @Override @@ -283,7 +283,7 @@ public class HBaseClient { } while (true); } } - + /** Connect to the server and set up the I/O streams. It then sends * a header to the server and starts * the connection thread that waits for responses. @@ -292,7 +292,7 @@ public class HBaseClient { if (socket != null || shouldCloseConnection.get()) { return; } - + short ioFailures = 0; short timeoutFailures = 0; try { @@ -370,8 +370,8 @@ public class HBaseClient { try { Thread.sleep(failureSleep); } catch (InterruptedException ignored) {} - - LOG.info("Retrying connect to server: " + remoteId.getAddress() + + + LOG.info("Retrying connect to server: " + remoteId.getAddress() + " after sleeping " + failureSleep + "ms. Already tried " + curRetries + " time(s)."); } @@ -384,17 +384,17 @@ public class HBaseClient { out.write(HBaseServer.CURRENT_VERSION); //When there are more fields we can have ConnectionHeader Writable. DataOutputBuffer buf = new DataOutputBuffer(); - ObjectWritable.writeObject(buf, remoteId.getTicket(), + ObjectWritable.writeObject(buf, remoteId.getTicket(), UserGroupInformation.class, conf); int bufLen = buf.getLength(); out.writeInt(bufLen); out.write(buf.getData(), 0, bufLen); } - + /* wait till someone signals us to start reading RPC response or - * it is idle too long, it is marked as to be closed, + * it is idle too long, it is marked as to be closed, * or the client is marked as not running. - * + * * Return true if it is time to read a response; false otherwise. */ private synchronized boolean waitForWork() { @@ -407,7 +407,7 @@ public class HBaseClient { } catch (InterruptedException e) {} } } - + if (!calls.isEmpty() && !shouldCloseConnection.get() && running.get()) { return true; } else if (shouldCloseConnection.get()) { @@ -415,7 +415,7 @@ public class HBaseClient { } else if (calls.isEmpty()) { // idle connection closed or stopped markClosed(null); return false; - } else { // get stopped but there are still pending requests + } else { // get stopped but there are still pending requests markClosed((IOException)new IOException().initCause( new InterruptedException())); return false; @@ -426,7 +426,7 @@ public class HBaseClient { return remoteId.getAddress(); } - /* Send a ping to the server if the time elapsed + /* Send a ping to the server if the time elapsed * since last I/O activity is equal to or greater than the ping interval */ protected synchronized void sendPing() throws IOException { @@ -443,7 +443,7 @@ public class HBaseClient { @Override public void run() { if (LOG.isDebugEnabled()) - LOG.debug(getName() + ": starting, having connections " + LOG.debug(getName() + ": starting, having connections " + connections.size()); try { @@ -456,7 +456,7 @@ public class HBaseClient { } close(); - + if (LOG.isDebugEnabled()) LOG.debug(getName() + ": stopped, remaining connections " + connections.size()); @@ -477,7 +477,7 @@ public class HBaseClient { synchronized (this.out) { if (LOG.isDebugEnabled()) LOG.debug(getName() + " sending #" + call.id); - + //for serializing the //data to be written d = new DataOutputBuffer(); @@ -496,7 +496,7 @@ public class HBaseClient { // close early IOUtils.closeStream(d); } - } + } /* Receive a response. * Because only one receiver, so no synchronization on in. @@ -506,7 +506,7 @@ public class HBaseClient { return; } touch(); - + try { int id = in.readInt(); // try to read an id @@ -529,14 +529,14 @@ public class HBaseClient { markClosed(e); } } - + private synchronized void markClosed(IOException e) { if (shouldCloseConnection.compareAndSet(false, true)) { closeException = e; notifyAll(); } } - + /** Close the connection. */ private synchronized void close() { if (!shouldCloseConnection.get()) { @@ -579,14 +579,14 @@ public class HBaseClient { if (LOG.isDebugEnabled()) LOG.debug(getName() + ": closed"); } - + /* Cleanup all calls and mark them as done */ private void cleanupCalls() { Iterator> itor = calls.entrySet().iterator() ; while (itor.hasNext()) { - Call c = itor.next().getValue(); + Call c = itor.next().getValue(); c.setException(closeException); // local exception - itor.remove(); + itor.remove(); } } } @@ -595,7 +595,7 @@ public class HBaseClient { private class ParallelCall extends Call { private ParallelResults results; protected int index; - + public ParallelCall(Writable param, ParallelResults results, int index) { super(param); this.results = results; @@ -639,10 +639,10 @@ public class HBaseClient { * @param conf * @param factory */ - public HBaseClient(Class valueClass, Configuration conf, + public HBaseClient(Class valueClass, Configuration conf, SocketFactory factory) { this.valueClass = valueClass; - this.maxIdleTime = + this.maxIdleTime = conf.getInt("hbase.ipc.client.connection.maxidletime", 10000); //10s this.maxRetries = conf.getInt("hbase.ipc.client.connect.max.retries", 0); this.failureSleep = conf.getInt("hbase.client.pause", 2000); @@ -664,7 +664,7 @@ public class HBaseClient { public HBaseClient(Class valueClass, Configuration conf) { this(valueClass, conf, NetUtils.getDefaultSocketFactory(conf)); } - + /** Return the socket factory of this client * * @return this client's socket factory @@ -683,14 +683,14 @@ public class HBaseClient { if (!running.compareAndSet(true, false)) { return; } - + // wake up all connections synchronized (connections) { for (Connection conn : connections.values()) { conn.interrupt(); } } - + // wait until all connections are closed while (!connections.isEmpty()) { try { @@ -702,19 +702,19 @@ public class HBaseClient { /** Make a call, passing param, to the IPC server running at * address, returning the value. Throws exceptions if there are - * network problems or if the remote code threw an exception. - * @param param - * @param address - * @return Writable + * network problems or if the remote code threw an exception. + * @param param + * @param address + * @return Writable * @throws IOException */ public Writable call(Writable param, InetSocketAddress address) throws IOException { return call(param, address, null); } - - public Writable call(Writable param, InetSocketAddress addr, - UserGroupInformation ticket) + + public Writable call(Writable param, InetSocketAddress addr, + UserGroupInformation ticket) throws IOException { Call call = new Call(param); Connection connection = getConnection(addr, ticket, call); @@ -750,11 +750,11 @@ public class HBaseClient { /** * Take an IOException and the address we were trying to connect to * and return an IOException with the input exception as the cause. - * The new exception provides the stack trace of the place where + * The new exception provides the stack trace of the place where * the exception is thrown and some extra diagnostics information. - * If the exception is ConnectException or SocketTimeoutException, + * If the exception is ConnectException or SocketTimeoutException, * return a new one of the same type; Otherwise return an IOException. - * + * * @param addr target address * @param exception the relevant exception * @return an exception to throw @@ -781,9 +781,9 @@ public class HBaseClient { /** Makes a set of calls in parallel. Each parameter is sent to the * corresponding address. When all values are available, or have timed out * or errored, the collected results are returned in an array. The array - * contains nulls for calls that timed out or errored. - * @param params - * @param addresses + * contains nulls for calls that timed out or errored. + * @param params + * @param addresses * @return Writable[] * @throws IOException */ @@ -800,7 +800,7 @@ public class HBaseClient { connection.sendParam(call); // send each parameter } catch (IOException e) { // log errors - LOG.info("Calling "+addresses[i]+" caught: " + + LOG.info("Calling "+addresses[i]+" caught: " + e.getMessage(),e); results.size--; // wait for one fewer result } @@ -817,7 +817,7 @@ public class HBaseClient { /** Get a connection from the pool, or create a new one and add it to the * pool. Connections to a given host/port are reused. */ - private Connection getConnection(InetSocketAddress addr, + private Connection getConnection(InetSocketAddress addr, UserGroupInformation ticket, Call call) throws IOException { @@ -826,7 +826,7 @@ public class HBaseClient { throw new IOException("The client is stopped"); } Connection connection; - /* we could avoid this allocation for each RPC by having a + /* we could avoid this allocation for each RPC by having a * connectionsId object and with set() method. We need to manage the * refs for keys in HashMap properly. For now its ok. */ @@ -840,7 +840,7 @@ public class HBaseClient { } } } while (!connection.addCall(call)); - + //we don't invoke the method below inside "synchronized (connections)" //block above. The reason for that is if the server happens to be slow, //it will take longer to establish a connection and that will slow the @@ -856,19 +856,19 @@ public class HBaseClient { private static class ConnectionId { InetSocketAddress address; UserGroupInformation ticket; - + ConnectionId(InetSocketAddress address, UserGroupInformation ticket) { this.address = address; this.ticket = ticket; } - + InetSocketAddress getAddress() { return address; } UserGroupInformation getTicket() { return ticket; } - + @Override public boolean equals(Object obj) { if (obj instanceof ConnectionId) { @@ -878,10 +878,10 @@ public class HBaseClient { } return false; } - + @Override public int hashCode() { return address.hashCode() ^ System.identityHashCode(ticket); } - } + } } diff --git a/src/java/org/apache/hadoop/hbase/ipc/HBaseRPC.java b/src/java/org/apache/hadoop/hbase/ipc/HBaseRPC.java index d30e884..b136010 100644 --- a/src/java/org/apache/hadoop/hbase/ipc/HBaseRPC.java +++ b/src/java/org/apache/hadoop/hbase/ipc/HBaseRPC.java @@ -56,7 +56,7 @@ import org.apache.hadoop.security.UserGroupInformation; * optimizations like using our own version of ObjectWritable. Class has been * renamed to avoid confusing it w/ hadoop versions. *

- * + * * * A protocol is a Java interface. All parameters and return types must * be one of: @@ -170,9 +170,9 @@ public class HBaseRPC { protected ClientCache() {} /** - * Construct & cache an IPC client with the user-provided SocketFactory + * Construct & cache an IPC client with the user-provided SocketFactory * if no cached client exists. - * + * * @param conf Configuration * @return an IPC client */ @@ -195,9 +195,9 @@ public class HBaseRPC { } /** - * Construct & cache an IPC client with the default SocketFactory + * Construct & cache an IPC client with the default SocketFactory * if no cached client exists. - * + * * @param conf Configuration * @return an IPC client */ @@ -206,7 +206,7 @@ public class HBaseRPC { } /** - * Stop a RPC client connection + * Stop a RPC client connection * A RPC client is closed only when its reference count becomes zero. */ protected void stopClient(HBaseClient client) { @@ -223,7 +223,7 @@ public class HBaseRPC { } protected final static ClientCache CLIENTS = new ClientCache(); - + private static class Invoker implements InvocationHandler { private InetSocketAddress address; private UserGroupInformation ticket; @@ -236,7 +236,7 @@ public class HBaseRPC { * @param conf * @param factory */ - public Invoker(InetSocketAddress address, UserGroupInformation ticket, + public Invoker(InetSocketAddress address, UserGroupInformation ticket, Configuration conf, SocketFactory factory) { this.address = address; this.ticket = ticket; @@ -258,8 +258,8 @@ public class HBaseRPC { } return value.get(); } - - /* close the IPC client that's responsible for this invoker's RPCs */ + + /* close the IPC client that's responsible for this invoker's RPCs */ synchronized protected void close() { if (!isClosed) { isClosed = true; @@ -276,7 +276,7 @@ public class HBaseRPC { private String interfaceName; private long clientVersion; private long serverVersion; - + /** * Create a version mismatch exception * @param interfaceName the name of the protocol mismatch @@ -291,23 +291,23 @@ public class HBaseRPC { this.clientVersion = clientVersion; this.serverVersion = serverVersion; } - + /** * Get the interface name - * @return the java class name + * @return the java class name * (eg. org.apache.hadoop.mapred.InterTrackerProtocol) */ public String getInterfaceName() { return interfaceName; } - + /** * @return the client's preferred version */ public long getClientVersion() { return clientVersion; } - + /** * @return the server's agreed to version. */ @@ -315,7 +315,7 @@ public class HBaseRPC { return serverVersion; } } - + /** * @param protocol * @param clientVersion @@ -384,7 +384,7 @@ public class HBaseRPC { SocketFactory factory) throws IOException { return getProxy(protocol, clientVersion, addr, null, conf, factory); } - + /** * Construct a client-side proxy object that implements the named protocol, * talking to a server at the named address. @@ -401,23 +401,23 @@ public class HBaseRPC { public static VersionedProtocol getProxy(Class protocol, long clientVersion, InetSocketAddress addr, UserGroupInformation ticket, Configuration conf, SocketFactory factory) - throws IOException { + throws IOException { VersionedProtocol proxy = (VersionedProtocol) Proxy.newProxyInstance( protocol.getClassLoader(), new Class[] { protocol }, new Invoker(addr, ticket, conf, factory)); - long serverVersion = proxy.getProtocolVersion(protocol.getName(), + long serverVersion = proxy.getProtocolVersion(protocol.getName(), clientVersion); if (serverVersion == clientVersion) { return proxy; } - throw new VersionMismatch(protocol.getName(), clientVersion, + throw new VersionMismatch(protocol.getName(), clientVersion, serverVersion); } /** * Construct a client-side proxy object with the default SocketFactory - * + * * @param protocol * @param clientVersion * @param addr @@ -463,7 +463,7 @@ public class HBaseRPC { HBaseClient client = CLIENTS.getClient(conf); try { Writable[] wrappedValues = client.call(invocations, addrs); - + if (method.getReturnType() == Void.TYPE) { return null; } @@ -473,7 +473,7 @@ public class HBaseRPC { for (int i = 0; i < values.length; i++) if (wrappedValues[i] != null) values[i] = ((HbaseObjectWritable)wrappedValues[i]).get(); - + return values; } finally { CLIENTS.stopClient(client); @@ -491,7 +491,7 @@ public class HBaseRPC { * @return Server * @throws IOException */ - public static Server getServer(final Object instance, final String bindAddress, final int port, Configuration conf) + public static Server getServer(final Object instance, final String bindAddress, final int port, Configuration conf) throws IOException { return getServer(instance, bindAddress, port, 1, false, conf); } @@ -511,7 +511,7 @@ public class HBaseRPC { */ public static Server getServer(final Object instance, final String bindAddress, final int port, final int numHandlers, - final boolean verbose, Configuration conf) + final boolean verbose, Configuration conf) throws IOException { return new Server(instance, conf, bindAddress, port, numHandlers, verbose); } @@ -530,11 +530,11 @@ public class HBaseRPC { * @param port the port to listen for connections on * @throws IOException */ - public Server(Object instance, Configuration conf, String bindAddress, int port) + public Server(Object instance, Configuration conf, String bindAddress, int port) throws IOException { this(instance, conf, bindAddress, port, 1, false); } - + private static String classNameBase(String className) { String[] names = className.split("\\.", -1); if (names == null || names.length == 0) { @@ -542,7 +542,7 @@ public class HBaseRPC { } return names[names.length-1]; } - + /** Construct an RPC server. * @param instance the instance whose methods will be called * @param conf the configuration to use diff --git a/src/java/org/apache/hadoop/hbase/ipc/HBaseRPCProtocolVersion.java b/src/java/org/apache/hadoop/hbase/ipc/HBaseRPCProtocolVersion.java index f407886..3e31714 100644 --- a/src/java/org/apache/hadoop/hbase/ipc/HBaseRPCProtocolVersion.java +++ b/src/java/org/apache/hadoop/hbase/ipc/HBaseRPCProtocolVersion.java @@ -29,7 +29,7 @@ import org.apache.hadoop.ipc.VersionedProtocol; public interface HBaseRPCProtocolVersion extends VersionedProtocol { /** * Interface version. - * + * * HMasterInterface version history: *

    *
  • Version was incremented to 2 when we brought the hadoop RPC local to diff --git a/src/java/org/apache/hadoop/hbase/ipc/HBaseRPCStatistics.java b/src/java/org/apache/hadoop/hbase/ipc/HBaseRPCStatistics.java index e11f828..584c280 100644 --- a/src/java/org/apache/hadoop/hbase/ipc/HBaseRPCStatistics.java +++ b/src/java/org/apache/hadoop/hbase/ipc/HBaseRPCStatistics.java @@ -30,11 +30,11 @@ import org.apache.hadoop.metrics.util.MetricsRegistry; public class HBaseRPCStatistics extends MetricsDynamicMBeanBase { private final ObjectName mbeanName; - public HBaseRPCStatistics(MetricsRegistry registry, + public HBaseRPCStatistics(MetricsRegistry registry, String hostName, String port) { super(registry, "HBaseRPCStatistics"); - String name = String.format("RPCStatistics-%s", + String name = String.format("RPCStatistics-%s", (port != null ? port : "unknown")); mbeanName = MBeanUtil.registerMBean("HBase", name, this); diff --git a/src/java/org/apache/hadoop/hbase/ipc/HBaseRpcMetrics.java b/src/java/org/apache/hadoop/hbase/ipc/HBaseRpcMetrics.java index 63d5f5c..06cbd72 100644 --- a/src/java/org/apache/hadoop/hbase/ipc/HBaseRpcMetrics.java +++ b/src/java/org/apache/hadoop/hbase/ipc/HBaseRpcMetrics.java @@ -27,7 +27,7 @@ import org.apache.hadoop.metrics.util.MetricsTimeVaryingRate; import org.apache.hadoop.metrics.util.MetricsRegistry; /** - * + * * This class is for maintaining the various RPC statistics * and publishing them through the metrics interfaces. * This also registers the JMX MBean for RPC. @@ -42,22 +42,22 @@ public class HBaseRpcMetrics implements Updater { private MetricsRecord metricsRecord; private static Log LOG = LogFactory.getLog(HBaseRpcMetrics.class); private final HBaseRPCStatistics rpcStatistics; - + public HBaseRpcMetrics(String hostName, String port) { MetricsContext context = MetricsUtil.getContext("rpc"); metricsRecord = MetricsUtil.createRecord(context, "metrics"); metricsRecord.setTag("port", port); - LOG.info("Initializing RPC Metrics with hostName=" + LOG.info("Initializing RPC Metrics with hostName=" + hostName + ", port=" + port); context.registerUpdater(this); - + rpcStatistics = new HBaseRPCStatistics(this.registry, hostName, port); } - - + + /** * The metrics variables are public: * - they can be set directly by calling their set/inc methods diff --git a/src/java/org/apache/hadoop/hbase/ipc/HBaseServer.java b/src/java/org/apache/hadoop/hbase/ipc/HBaseServer.java index 5090909..19d94a6 100644 --- a/src/java/org/apache/hadoop/hbase/ipc/HBaseServer.java +++ b/src/java/org/apache/hadoop/hbase/ipc/HBaseServer.java @@ -61,28 +61,28 @@ import org.apache.hadoop.util.StringUtils; /** An abstract IPC service. IPC calls take a single {@link Writable} as a * parameter, and return a {@link Writable} as their value. A service runs on * a port and is defined by a parameter class and a value class. - * - * + * + * *

    Copied local so can fix HBASE-900. - * + * * @see HBaseClient */ public abstract class HBaseServer { - + /** * The first four bytes of Hadoop RPC connections */ public static final ByteBuffer HEADER = ByteBuffer.wrap("hrpc".getBytes()); - + // 1 : Introduce ping and server does not throw away RPCs - // 3 : RPC was refactored in 0.19 + // 3 : RPC was refactored in 0.19 public static final byte CURRENT_VERSION = 3; - + /** * How many calls/handler are allowed in the queue. */ private static final int MAX_QUEUE_SIZE_PER_HANDLER = 100; - + public static final Log LOG = LogFactory.getLog("org.apache.hadoop.ipc.HBaseServer"); @@ -98,13 +98,13 @@ public abstract class HBaseServer { public static HBaseServer get() { return SERVER.get(); } - + /** This is set to Call object before Handler invokes an RPC and reset * after the call returns. */ protected static final ThreadLocal CurCall = new ThreadLocal(); - - /** Returns the remote side ip address when invoked inside an RPC + + /** Returns the remote side ip address when invoked inside an RPC * Returns null incase of an error. * @return InetAddress */ @@ -124,23 +124,23 @@ public abstract class HBaseServer { return (addr == null) ? null : addr.getHostAddress(); } - protected String bindAddress; + protected String bindAddress; protected int port; // port we listen on private int handlerCount; // number of handler threads protected Class paramClass; // class of call parameters - protected int maxIdleTime; // the maximum idle time after + protected int maxIdleTime; // the maximum idle time after // which a client may be // disconnected protected int thresholdIdleConnections; // the number of idle - // connections after which we - // will start cleaning up idle + // connections after which we + // will start cleaning up idle // connections - int maxConnectionsToNuke; // the max number of + int maxConnectionsToNuke; // the max number of // connections to nuke // during a cleanup - + protected HBaseRpcMetrics rpcMetrics; - + protected Configuration conf; private int maxQueueSize; @@ -151,7 +151,7 @@ public abstract class HBaseServer { volatile protected boolean running = true; // true while server runs protected BlockingQueue callQueue; // queued calls - protected List connectionList = + protected List connectionList = Collections.synchronizedList(new LinkedList()); //maintain a list //of client connections @@ -162,7 +162,7 @@ public abstract class HBaseServer { protected HBaseRPCErrorHandler errorHandler = null; /** - * A convenience method to bind to a given address and report + * A convenience method to bind to a given address and report * better exceptions if the address is not a valid host. * @param socket the socket to bind * @param address the address to bind to @@ -171,13 +171,13 @@ public abstract class HBaseServer { * @throws UnknownHostException if the address isn't a valid host name * @throws IOException other random errors from bind */ - public static void bind(ServerSocket socket, InetSocketAddress address, + public static void bind(ServerSocket socket, InetSocketAddress address, int backlog) throws IOException { try { socket.bind(address, backlog); } catch (BindException e) { BindException bindException = - new BindException("Problem binding to " + address + " : " + + new BindException("Problem binding to " + address + " : " + e.getMessage()); bindException.initCause(e); throw bindException; @@ -185,7 +185,7 @@ public abstract class HBaseServer { // If they try to bind to a different host's address, give a better // error message. if ("Unresolved address".equals(e.getMessage())) { - throw new UnknownHostException("Invalid hostname for server: " + + throw new UnknownHostException("Invalid hostname for server: " + address.getHostName()); } throw e; @@ -208,7 +208,7 @@ public abstract class HBaseServer { this.timestamp = System.currentTimeMillis(); this.response = null; } - + @Override public String toString() { return param.toString() + " from " + connection.toString(); @@ -221,17 +221,17 @@ public abstract class HBaseServer { /** Listens on the socket. Creates jobs for the handler threads*/ private class Listener extends Thread { - + private ServerSocketChannel acceptChannel = null; //the accept channel private Selector selector = null; //the selector that we use for the server private InetSocketAddress address; //the address we bind at private Random rand = new Random(); private long lastCleanupRunTime = 0; //the last time when a cleanup connec- //-tion (for idle connections) ran - private long cleanupInterval = 10000; //the minimum interval between + private long cleanupInterval = 10000; //the minimum interval between //two cleanup runs private int backlogLength = conf.getInt("ipc.server.listen.queue.size", 128); - + public Listener() throws IOException { address = new InetSocketAddress(bindAddress, port); // Create a new server socket and set to non blocking mode @@ -252,7 +252,7 @@ public abstract class HBaseServer { /** cleanup connections from connectionList. Choose a random range * to scan and also have a limit on the number of the connections * that will be cleanedup per run. The criteria for cleanup is the time - * for which the connection was idle. If 'force' is true then all + * for which the connection was idle. If 'force' is true then all * connections will be looked at for the cleanup. */ private void cleanupConnections(boolean force) { @@ -331,7 +331,7 @@ public abstract class HBaseServer { } } else { // we can run out of memory if we have too many threads - // log the event and sleep for a minute and give + // log the event and sleep for a minute and give // some thread(s) a chance to finish LOG.warn("Out of Memory in server select", e); closeCurrentConnection(key); @@ -358,7 +358,7 @@ public abstract class HBaseServer { selector= null; acceptChannel= null; - + // clean up all connections while (!connectionList.isEmpty()) { closeConnection(connectionList.remove(0)); @@ -381,7 +381,7 @@ public abstract class HBaseServer { InetSocketAddress getAddress() { return (InetSocketAddress)acceptChannel.socket().getLocalSocketAddress(); } - + void doAccept(SelectionKey key) throws IOException, OutOfMemoryError { Connection c = null; ServerSocketChannel server = (ServerSocketChannel) key.channel(); @@ -411,10 +411,10 @@ public abstract class HBaseServer { int count = 0; Connection c = (Connection)key.attachment(); if (c == null) { - return; + return; } c.setLastContact(System.currentTimeMillis()); - + try { count = c.readAndProcess(); } catch (InterruptedException ieo) { @@ -425,7 +425,7 @@ public abstract class HBaseServer { } if (count < 0) { if (LOG.isDebugEnabled()) - LOG.debug(getName() + ": disconnecting client " + + LOG.debug(getName() + ": disconnecting client " + c.getHostAddress() + ". Number of active connections: "+ numConnections); closeConnection(c); @@ -434,7 +434,7 @@ public abstract class HBaseServer { else { c.setLastContact(System.currentTimeMillis()); } - } + } synchronized void doStop() { if (selector != null) { @@ -455,7 +455,7 @@ public abstract class HBaseServer { private class Responder extends Thread { private Selector writeSelector; private int pending; // connections waiting to register - + final static int PURGE_INTERVAL = 900000; // 15mins Responder() throws IOException { @@ -498,7 +498,7 @@ public abstract class HBaseServer { // LOG.debug("Checking for old call responses."); ArrayList calls; - + // get the list of channels from list of keys. synchronized (writeSelector.keys()) { calls = new ArrayList(writeSelector.keys().size()); @@ -506,12 +506,12 @@ public abstract class HBaseServer { while (iter.hasNext()) { SelectionKey key = iter.next(); Call call = (Call)key.attachment(); - if (call != null && key.channel() == call.connection.channel) { + if (call != null && key.channel() == call.connection.channel) { calls.add(call); } } } - + for(Call call : calls) { doPurge(call, now); } @@ -531,7 +531,7 @@ public abstract class HBaseServer { try { Thread.sleep(60000); } catch (Exception ie) {} } } catch (Exception e) { - LOG.warn("Exception in Responder " + + LOG.warn("Exception in Responder " + StringUtils.stringifyException(e)); } } @@ -564,7 +564,7 @@ public abstract class HBaseServer { } // - // Remove calls that have been pending in the responseQueue + // Remove calls that have been pending in the responseQueue // for a long time. // private void doPurge(Call call, long now) { @@ -629,18 +629,18 @@ public abstract class HBaseServer { } } else { // - // If we were unable to write the entire response out, then - // insert in Selector queue. + // If we were unable to write the entire response out, then + // insert in Selector queue. // call.connection.responseQueue.addFirst(call); - + if (inHandler) { // set the serve time when the response has to be sent later call.timestamp = System.currentTimeMillis(); - + incPending(); try { - // Wakeup the thread blocked on select, only then can the call + // Wakeup the thread blocked on select, only then can the call // to channel.register() complete. writeSelector.wakeup(); channel.register(writeSelector, SelectionKey.OP_WRITE, call); @@ -653,7 +653,7 @@ public abstract class HBaseServer { } if (LOG.isDebugEnabled()) { LOG.debug(getName() + ": responding to #" + call.id + " from " + - call.connection + " Wrote partial " + numBytes + + call.connection + " Wrote partial " + numBytes + " bytes."); } } @@ -711,7 +711,7 @@ public abstract class HBaseServer { private long lastContact; private int dataLength; protected Socket socket; - // Cache the remote host & port info so that even if the socket is + // Cache the remote host & port info so that even if the socket is // disconnected, we can say where it used to connect to. private String hostAddress; private int remotePort; @@ -739,13 +739,13 @@ public abstract class HBaseServer { socketSendBufferSize); } } - } + } @Override public String toString() { - return getHostAddress() + ":" + remotePort; + return getHostAddress() + ":" + remotePort; } - + public String getHostAddress() { return hostAddress; } @@ -762,17 +762,17 @@ public abstract class HBaseServer { private boolean isIdle() { return rpcCount == 0; } - + /* Decrement the outstanding RPC count */ protected void decRpcCount() { rpcCount--; } - + /* Increment the outstanding RPC count */ private void incRpcCount() { rpcCount++; } - + protected boolean timedOut(long currentTime) { if (isIdle() && currentTime - lastContact > maxIdleTime) return true; @@ -783,14 +783,14 @@ public abstract class HBaseServer { while (true) { /* Read at most one RPC. If the header is not read completely yet * then iterate until we read first RPC or until there is no data left. - */ + */ int count = -1; if (dataLengthBuffer.remaining() > 0) { - count = channelRead(channel, dataLengthBuffer); - if (count < 0 || dataLengthBuffer.remaining() > 0) + count = channelRead(channel, dataLengthBuffer); + if (count < 0 || dataLengthBuffer.remaining() > 0) return count; } - + if (!versionRead) { //Every connection is expected to send the header. ByteBuffer versionBuffer = ByteBuffer.allocate(1); @@ -799,13 +799,13 @@ public abstract class HBaseServer { return count; } int version = versionBuffer.get(0); - - dataLengthBuffer.flip(); + + dataLengthBuffer.flip(); if (!HEADER.equals(dataLengthBuffer) || version != CURRENT_VERSION) { //Warning is ok since this is not supposed to happen. - LOG.warn("Incorrect header or version mismatch from " + + LOG.warn("Incorrect header or version mismatch from " + hostAddress + ":" + remotePort + - " got version " + version + + " got version " + version + " expected version " + CURRENT_VERSION); return -1; } @@ -813,11 +813,11 @@ public abstract class HBaseServer { versionRead = true; continue; } - + if (data == null) { dataLengthBuffer.flip(); dataLength = dataLengthBuffer.getInt(); - + if (dataLength == HBaseClient.PING_CALL_ID) { dataLengthBuffer.clear(); return 0; //ping message @@ -825,9 +825,9 @@ public abstract class HBaseServer { data = ByteBuffer.allocate(dataLength); incRpcCount(); // Increment the rpc count } - + count = channelRead(channel, data); - + if (data.remaining() == 0) { dataLengthBuffer.clear(); data.flip(); @@ -840,7 +840,7 @@ public abstract class HBaseServer { headerRead = true; data = null; continue; - } + } return count; } } @@ -854,18 +854,18 @@ public abstract class HBaseServer { new DataInputStream(new ByteArrayInputStream(data.array())); ticket = (UserGroupInformation) ObjectWritable.readObject(in, conf); } - + private void processData() throws IOException, InterruptedException { DataInputStream dis = new DataInputStream(new ByteArrayInputStream(data.array())); int id = dis.readInt(); // try to read an id - + if (LOG.isDebugEnabled()) LOG.debug(" got #" + id); - + Writable param = ReflectionUtils.newInstance(paramClass, conf); // read param - param.readFields(dis); - + param.readFields(dis); + Call call = new Call(id, param, this); callQueue.put(call); // queue the call; maybe blocked here } @@ -903,11 +903,11 @@ public abstract class HBaseServer { if (LOG.isDebugEnabled()) LOG.debug(getName() + ": has #" + call.id + " from " + call.connection); - + String errorClass = null; String error = null; Writable value = null; - + CurCall.set(call); UserGroupInformation previous = UserGroupInformation.getCurrentUGI(); UserGroupInformation.setCurrentUGI(call.connection.ticket); @@ -965,22 +965,22 @@ public abstract class HBaseServer { } } - + protected HBaseServer(String bindAddress, int port, - Class paramClass, int handlerCount, + Class paramClass, int handlerCount, Configuration conf) - throws IOException + throws IOException { this(bindAddress, port, paramClass, handlerCount, conf, Integer.toString(port)); } /** Constructs a server listening on the named port and address. Parameters passed must * be of the named class. The handlerCount determines * the number of handler threads that will be used to process calls. - * + * */ - protected HBaseServer(String bindAddress, int port, - Class paramClass, int handlerCount, - Configuration conf, String serverName) + protected HBaseServer(String bindAddress, int port, + Class paramClass, int handlerCount, + Configuration conf, String serverName) throws IOException { this.bindAddress = bindAddress; this.conf = conf; @@ -989,14 +989,14 @@ public abstract class HBaseServer { this.handlerCount = handlerCount; this.socketSendBufferSize = 0; this.maxQueueSize = handlerCount * MAX_QUEUE_SIZE_PER_HANDLER; - this.callQueue = new LinkedBlockingQueue(maxQueueSize); + this.callQueue = new LinkedBlockingQueue(maxQueueSize); this.maxIdleTime = 2*conf.getInt("ipc.client.connection.maxidletime", 1000); this.maxConnectionsToNuke = conf.getInt("ipc.client.kill.max", 10); this.thresholdIdleConnections = conf.getInt("ipc.client.idlethreshold", 4000); - + // Start the listener here and let it bind to the port listener = new Listener(); - this.port = listener.getAddress().getPort(); + this.port = listener.getAddress().getPort(); this.rpcMetrics = new HBaseRpcMetrics(serverName, Integer.toString(this.port)); this.tcpNoDelay = conf.getBoolean("ipc.server.tcpnodelay", false); @@ -1013,7 +1013,7 @@ public abstract class HBaseServer { } connection.close(); } - + /** Sets the socket buffer size used for responding to RPCs. * @param size */ @@ -1024,7 +1024,7 @@ public abstract class HBaseServer { responder.start(); listener.start(); handlers = new Handler[handlerCount]; - + for (int i = 0; i < handlerCount; i++) { handlers[i] = new Handler(i); handlers[i].start(); @@ -1069,11 +1069,11 @@ public abstract class HBaseServer { public synchronized InetSocketAddress getListenerAddress() { return listener.getAddress(); } - - /** Called for each call. - * @param param - * @param receiveTime - * @return Writable + + /** Called for each call. + * @param param + * @param receiveTime + * @return Writable * @throws IOException */ public abstract Writable call(Writable param, long receiveTime) @@ -1086,7 +1086,7 @@ public abstract class HBaseServer { public int getNumOpenConnections() { return numConnections; } - + /** * The number of rpc calls in the queue. * @return The number of rpc calls in the queue. @@ -1101,26 +1101,26 @@ public abstract class HBaseServer { */ public void setErrorHandler(HBaseRPCErrorHandler handler) { this.errorHandler = handler; - } + } /** - * When the read or write buffer size is larger than this limit, i/o will be + * When the read or write buffer size is larger than this limit, i/o will be * done in chunks of this size. Most RPC requests and responses would be * be smaller. */ private static int NIO_BUFFER_LIMIT = 8*1024; //should not be more than 64KB. - + /** * This is a wrapper around {@link WritableByteChannel#write(ByteBuffer)}. - * If the amount of data is large, it writes to channel in smaller chunks. - * This is to avoid jdk from creating many direct buffers as the size of + * If the amount of data is large, it writes to channel in smaller chunks. + * This is to avoid jdk from creating many direct buffers as the size of * buffer increases. This also minimizes extra copies in NIO layer - * as a result of multiple write operations required to write a large - * buffer. + * as a result of multiple write operations required to write a large + * buffer. * * @see WritableByteChannel#write(ByteBuffer) */ - protected static int channelWrite(WritableByteChannel channel, + protected static int channelWrite(WritableByteChannel channel, ByteBuffer buffer) throws IOException { return (buffer.remaining() <= NIO_BUFFER_LIMIT) ? channel.write(buffer) : channelIO(null, channel, buffer); @@ -1128,13 +1128,13 @@ public abstract class HBaseServer { /** * This is a wrapper around {@link ReadableByteChannel#read(ByteBuffer)}. - * If the amount of data is large, it writes to channel in smaller chunks. - * This is to avoid jdk from creating many direct buffers as the size of + * If the amount of data is large, it writes to channel in smaller chunks. + * This is to avoid jdk from creating many direct buffers as the size of * ByteBuffer increases. There should not be any performance degredation. - * + * * @see ReadableByteChannel#read(ByteBuffer) */ - protected static int channelRead(ReadableByteChannel channel, + protected static int channelRead(ReadableByteChannel channel, ByteBuffer buffer) throws IOException { return (buffer.remaining() <= NIO_BUFFER_LIMIT) ? channel.read(buffer) : channelIO(channel, null, buffer); @@ -1144,35 +1144,35 @@ public abstract class HBaseServer { * Helper for {@link #channelRead(ReadableByteChannel, ByteBuffer)} * and {@link #channelWrite(WritableByteChannel, ByteBuffer)}. Only * one of readCh or writeCh should be non-null. - * + * * @see #channelRead(ReadableByteChannel, ByteBuffer) * @see #channelWrite(WritableByteChannel, ByteBuffer) */ - private static int channelIO(ReadableByteChannel readCh, + private static int channelIO(ReadableByteChannel readCh, WritableByteChannel writeCh, ByteBuffer buf) throws IOException { - + int originalLimit = buf.limit(); int initialRemaining = buf.remaining(); int ret = 0; - + while (buf.remaining() > 0) { try { int ioSize = Math.min(buf.remaining(), NIO_BUFFER_LIMIT); buf.limit(buf.position() + ioSize); - - ret = (readCh == null) ? writeCh.write(buf) : readCh.read(buf); - + + ret = (readCh == null) ? writeCh.write(buf) : readCh.read(buf); + if (ret < ioSize) { break; } } finally { - buf.limit(originalLimit); + buf.limit(originalLimit); } } - int nBytes = initialRemaining - buf.remaining(); + int nBytes = initialRemaining - buf.remaining(); return (nBytes > 0) ? nBytes : ret; - } + } } diff --git a/src/java/org/apache/hadoop/hbase/ipc/HMasterInterface.java b/src/java/org/apache/hadoop/hbase/ipc/HMasterInterface.java index 4c22639..2fedd52 100644 --- a/src/java/org/apache/hadoop/hbase/ipc/HMasterInterface.java +++ b/src/java/org/apache/hadoop/hbase/ipc/HMasterInterface.java @@ -31,27 +31,27 @@ import org.apache.hadoop.io.Writable; * Clients interact with the HMasterInterface to gain access to meta-level * HBase functionality, like finding an HRegionServer and creating/destroying * tables. - * + * *

    NOTE: if you change the interface, you must change the RPC version * number in HBaseRPCProtocolVersion - * + * */ public interface HMasterInterface extends HBaseRPCProtocolVersion { /** @return true if master is available */ public boolean isMasterRunning(); - + // Admin tools would use these cmds - + /** * Creates a new table. If splitKeys are specified, then the table will be * created with an initial set of multiple regions. If splitKeys is null, * the table will be created with a single region. * @param desc table descriptor - * @param splitKeys + * @param splitKeys * @throws IOException */ - public void createTable(HTableDescriptor desc, byte [][] splitKeys) + public void createTable(HTableDescriptor desc, byte [][] splitKeys) throws IOException; /** @@ -60,7 +60,7 @@ public interface HMasterInterface extends HBaseRPCProtocolVersion { * @throws IOException */ public void deleteTable(final byte [] tableName) throws IOException; - + /** * Adds a column to the specified table * @param tableName @@ -77,8 +77,8 @@ public interface HMasterInterface extends HBaseRPCProtocolVersion { * @param descriptor new column descriptor * @throws IOException */ - public void modifyColumn(final byte [] tableName, final byte [] columnName, - HColumnDescriptor descriptor) + public void modifyColumn(final byte [] tableName, final byte [] columnName, + HColumnDescriptor descriptor) throws IOException; @@ -90,17 +90,17 @@ public interface HMasterInterface extends HBaseRPCProtocolVersion { */ public void deleteColumn(final byte [] tableName, final byte [] columnName) throws IOException; - + /** * Puts the table on-line (only needed if table has been previously taken offline) * @param tableName * @throws IOException */ public void enableTable(final byte [] tableName) throws IOException; - + /** * Take table offline - * + * * @param tableName * @throws IOException */ @@ -108,7 +108,7 @@ public interface HMasterInterface extends HBaseRPCProtocolVersion { /** * Modify a table's metadata - * + * * @param tableName * @param op * @param args diff --git a/src/java/org/apache/hadoop/hbase/ipc/HMasterRegionInterface.java b/src/java/org/apache/hadoop/hbase/ipc/HMasterRegionInterface.java index b7e384c..53fe302 100644 --- a/src/java/org/apache/hadoop/hbase/ipc/HMasterRegionInterface.java +++ b/src/java/org/apache/hadoop/hbase/ipc/HMasterRegionInterface.java @@ -27,13 +27,13 @@ import org.apache.hadoop.hbase.HMsg; import org.apache.hadoop.hbase.HRegionInfo; /** - * HRegionServers interact with the HMasterRegionInterface to report on local + * HRegionServers interact with the HMasterRegionInterface to report on local * goings-on and to obtain data-handling instructions from the HMaster. *

    Changes here need to be reflected in HbaseObjectWritable HbaseRPC#Invoker. - * + * *

    NOTE: if you change the interface, you must change the RPC version * number in HBaseRPCProtocolVersion - * + * */ public interface HMasterRegionInterface extends HBaseRPCProtocolVersion { @@ -49,16 +49,16 @@ public interface HMasterRegionInterface extends HBaseRPCProtocolVersion { /** * Called to renew lease, tell master what the region server is doing and to * receive new instructions from the master - * + * * @param info server's address and start code * @param msgs things the region server wants to tell the master - * @param mostLoadedRegions Array of HRegionInfos that should contain the + * @param mostLoadedRegions Array of HRegionInfos that should contain the * reporting server's most loaded regions. These are candidates for being * rebalanced. * @return instructions from the master to the region server * @throws IOException */ - public HMsg[] regionServerReport(HServerInfo info, HMsg msgs[], + public HMsg[] regionServerReport(HServerInfo info, HMsg msgs[], HRegionInfo mostLoadedRegions[]) throws IOException; } \ No newline at end of file diff --git a/src/java/org/apache/hadoop/hbase/ipc/HRegionInterface.java b/src/java/org/apache/hadoop/hbase/ipc/HRegionInterface.java index 902cf89..d054b48 100644 --- a/src/java/org/apache/hadoop/hbase/ipc/HRegionInterface.java +++ b/src/java/org/apache/hadoop/hbase/ipc/HRegionInterface.java @@ -35,27 +35,27 @@ import org.apache.hadoop.hbase.regionserver.HRegion; /** * Clients interact with HRegionServers using a handle to the HRegionInterface. - * + * *

    NOTE: if you change the interface, you must change the RPC version * number in HBaseRPCProtocolVersion - * + * */ public interface HRegionInterface extends HBaseRPCProtocolVersion { - /** + /** * Get metainfo about an HRegion - * + * * @param regionName name of the region * @return HRegionInfo object for region * @throws NotServingRegionException */ public HRegionInfo getRegionInfo(final byte [] regionName) throws NotServingRegionException; - + /** - * Return all the data for the row that matches row exactly, + * Return all the data for the row that matches row exactly, * or the one that immediately preceeds it. - * + * * @param regionName region name * @param row row key * @param family Column family to look for row in. @@ -67,11 +67,11 @@ public interface HRegionInterface extends HBaseRPCProtocolVersion { throws IOException; /** - * + * * @return the regions served by this regionserver */ public HRegion [] getOnlineRegionsAsArray(); - + /** * Perform Get operation. * @param regionName name of region to get from @@ -91,17 +91,17 @@ public interface HRegionInterface extends HBaseRPCProtocolVersion { public boolean exists(byte [] regionName, Get get) throws IOException; /** - * Put data into the specified region + * Put data into the specified region * @param regionName * @param put the data to be put * @throws IOException */ public void put(final byte [] regionName, final Put put) throws IOException; - + /** * Put an array of puts into the specified region - * + * * @param regionName * @param puts * @return The number of processed put's. Returns -1 if all Puts @@ -112,7 +112,7 @@ public interface HRegionInterface extends HBaseRPCProtocolVersion { throws IOException; /** - * Deletes all the KeyValues that match those found in the Delete object, + * Deletes all the KeyValues that match those found in the Delete object, * if their ts <= to the Delete. In case of a delete with a specific ts it * only deletes that specific KeyValue. * @param regionName @@ -124,7 +124,7 @@ public interface HRegionInterface extends HBaseRPCProtocolVersion { /** * Put an array of deletes into the specified region - * + * * @param regionName * @param deletes * @return The number of processed deletes. Returns -1 if all Deletes @@ -137,7 +137,7 @@ public interface HRegionInterface extends HBaseRPCProtocolVersion { /** * Atomically checks if a row/family/qualifier value match the expectedValue. * If it does, it adds the put. - * + * * @param regionName * @param row * @param family @@ -147,15 +147,15 @@ public interface HRegionInterface extends HBaseRPCProtocolVersion { * @throws IOException * @return true if the new put was execute, false otherwise */ - public boolean checkAndPut(final byte[] regionName, final byte [] row, + public boolean checkAndPut(final byte[] regionName, final byte [] row, final byte [] family, final byte [] qualifier, final byte [] value, final Put put) throws IOException; - + /** * Atomically increments a column value. If the column value isn't long-like, * this could throw an exception. - * + * * @param regionName * @param row * @param family @@ -165,18 +165,18 @@ public interface HRegionInterface extends HBaseRPCProtocolVersion { * @return new incremented column value * @throws IOException */ - public long incrementColumnValue(byte [] regionName, byte [] row, + public long incrementColumnValue(byte [] regionName, byte [] row, byte [] family, byte [] qualifier, long amount, boolean writeToWAL) throws IOException; - - + + // // remote scanner interface // /** * Opens a remote scanner with a RowFilter. - * + * * @param regionName name of region to scan * @param scan configured scan object * @return scannerId scanner identifier used in other calls @@ -184,7 +184,7 @@ public interface HRegionInterface extends HBaseRPCProtocolVersion { */ public long openScanner(final byte [] regionName, final Scan scan) throws IOException; - + /** * Get the next set of values * @param scannerId clientId passed to openScanner @@ -192,7 +192,7 @@ public interface HRegionInterface extends HBaseRPCProtocolVersion { * @throws IOException */ public Result next(long scannerId) throws IOException; - + /** * Get the next set of values * @param scannerId clientId passed to openScanner @@ -203,10 +203,10 @@ public interface HRegionInterface extends HBaseRPCProtocolVersion { * @throws IOException */ public Result [] next(long scannerId, int numberOfRows) throws IOException; - + /** * Close a scanner - * + * * @param scannerId the scanner id returned by openScanner * @throws IOException */ @@ -232,15 +232,15 @@ public interface HRegionInterface extends HBaseRPCProtocolVersion { */ public void unlockRow(final byte [] regionName, final long lockId) throws IOException; - - + + /** * Method used when a master is taking the place of another failed one. * @return All regions assigned on this region server * @throws IOException */ public HRegionInfo[] getRegionsAssignment() throws IOException; - + /** * Method used when a master is taking the place of another failed one. * @return The HSI diff --git a/src/java/org/apache/hadoop/hbase/mapred/BuildTableIndex.java b/src/java/org/apache/hadoop/hbase/mapred/BuildTableIndex.java index 3e702c1..22a5bf3 100644 --- a/src/java/org/apache/hadoop/hbase/mapred/BuildTableIndex.java +++ b/src/java/org/apache/hadoop/hbase/mapred/BuildTableIndex.java @@ -35,7 +35,7 @@ import org.apache.hadoop.mapred.JobConf; * Example table column indexing class. Runs a mapreduce job to index * specified table columns. *

    • Each row is modeled as a Lucene document: row key is indexed in - * its untokenized form, column name-value pairs are Lucene field name-value + * its untokenized form, column name-value pairs are Lucene field name-value * pairs.
    • *
    • A file passed on command line is used to populate an * {@link IndexConfiguration} which is used to set various Lucene parameters, diff --git a/src/java/org/apache/hadoop/hbase/mapred/Driver.java b/src/java/org/apache/hadoop/hbase/mapred/Driver.java index c37e357..b5559ea 100644 --- a/src/java/org/apache/hadoop/hbase/mapred/Driver.java +++ b/src/java/org/apache/hadoop/hbase/mapred/Driver.java @@ -29,7 +29,7 @@ import org.apache.hadoop.util.ProgramDriver; public class Driver { /** * @param args - * @throws Throwable + * @throws Throwable */ public static void main(String[] args) throws Throwable { ProgramDriver pgd = new ProgramDriver(); diff --git a/src/java/org/apache/hadoop/hbase/mapred/GroupingTableMap.java b/src/java/org/apache/hadoop/hbase/mapred/GroupingTableMap.java index 8110a1e..f7e2727 100644 --- a/src/java/org/apache/hadoop/hbase/mapred/GroupingTableMap.java +++ b/src/java/org/apache/hadoop/hbase/mapred/GroupingTableMap.java @@ -44,12 +44,12 @@ extends MapReduceBase implements TableMap { /** - * JobConf parameter to specify the columns used to produce the key passed to + * JobConf parameter to specify the columns used to produce the key passed to * collect from the map phase */ public static final String GROUP_COLUMNS = "hbase.mapred.groupingtablemap.columns"; - + protected byte [][] m_columns; /** @@ -64,9 +64,9 @@ implements TableMap { * @param job job configuration object */ @SuppressWarnings("unchecked") - public static void initJob(String table, String columns, String groupColumns, + public static void initJob(String table, String columns, String groupColumns, Class mapper, JobConf job) { - + TableMapReduceUtil.initTableMapJob(table, columns, mapper, ImmutableBytesWritable.class, RowResult.class, job); job.set(GROUP_COLUMNS, groupColumns); @@ -84,19 +84,19 @@ implements TableMap { /** * Extract the grouping columns from value to construct a new key. - * + * * Pass the new key and value to reduce. * If any of the grouping columns are not found in the value, the record is skipped. - * @param key - * @param value - * @param output - * @param reporter - * @throws IOException + * @param key + * @param value + * @param output + * @param reporter + * @throws IOException */ - public void map(ImmutableBytesWritable key, RowResult value, + public void map(ImmutableBytesWritable key, RowResult value, OutputCollector output, Reporter reporter) throws IOException { - + byte[][] keyVals = extractKeyValues(value); if(keyVals != null) { ImmutableBytesWritable tKey = createGroupKey(keyVals); @@ -107,9 +107,9 @@ implements TableMap { /** * Extract columns values from the current record. This method returns * null if any of the columns are not found. - * + * * Override this method if you want to deal with nulls differently. - * + * * @param r * @return array of byte values */ @@ -135,9 +135,9 @@ implements TableMap { } /** - * Create a key by concatenating multiple column values. + * Create a key by concatenating multiple column values. * Override this function in order to produce different types of keys. - * + * * @param vals * @return key generated by concatenating multiple column values */ diff --git a/src/java/org/apache/hadoop/hbase/mapred/HRegionPartitioner.java b/src/java/org/apache/hadoop/hbase/mapred/HRegionPartitioner.java index 0b83bc6..801285c 100644 --- a/src/java/org/apache/hadoop/hbase/mapred/HRegionPartitioner.java +++ b/src/java/org/apache/hadoop/hbase/mapred/HRegionPartitioner.java @@ -35,25 +35,25 @@ import org.apache.hadoop.mapred.Partitioner; * This is used to partition the output keys into groups of keys. * Keys are grouped according to the regions that currently exist * so that each reducer fills a single region so load is distributed. - * + * * @param * @param */ @Deprecated -public class HRegionPartitioner +public class HRegionPartitioner implements Partitioner { private final Log LOG = LogFactory.getLog(TableInputFormat.class); private HTable table; - private byte[][] startKeys; - + private byte[][] startKeys; + public void configure(JobConf job) { try { - this.table = new HTable(new HBaseConfiguration(job), + this.table = new HTable(new HBaseConfiguration(job), job.get(TableOutputFormat.OUTPUT_TABLE)); } catch (IOException e) { LOG.error(e); } - + try { this.startKeys = this.table.getStartKeys(); } catch (IOException e) { @@ -79,7 +79,7 @@ implements Partitioner { if (Bytes.compareTo(region, this.startKeys[i]) == 0 ){ if (i >= numPartitions-1){ // cover if we have less reduces then regions. - return (Integer.toString(i).hashCode() + return (Integer.toString(i).hashCode() & Integer.MAX_VALUE) % numPartitions; } return i; diff --git a/src/java/org/apache/hadoop/hbase/mapred/IdentityTableMap.java b/src/java/org/apache/hadoop/hbase/mapred/IdentityTableMap.java index 4db9075..5e56ccd 100644 --- a/src/java/org/apache/hadoop/hbase/mapred/IdentityTableMap.java +++ b/src/java/org/apache/hadoop/hbase/mapred/IdentityTableMap.java @@ -44,7 +44,7 @@ implements TableMap { /** * Use this before submitting a TableMap job. It will * appropriately set up the JobConf. - * + * * @param table table name * @param columns columns to scan * @param mapper mapper class @@ -60,17 +60,17 @@ implements TableMap { /** * Pass the key, value to reduce - * @param key - * @param value - * @param output - * @param reporter - * @throws IOException + * @param key + * @param value + * @param output + * @param reporter + * @throws IOException */ public void map(ImmutableBytesWritable key, RowResult value, OutputCollector output, Reporter reporter) throws IOException { - - // convert + + // convert output.collect(key, value); } } diff --git a/src/java/org/apache/hadoop/hbase/mapred/IdentityTableReduce.java b/src/java/org/apache/hadoop/hbase/mapred/IdentityTableReduce.java index 48e32cc..3a7821e 100644 --- a/src/java/org/apache/hadoop/hbase/mapred/IdentityTableReduce.java +++ b/src/java/org/apache/hadoop/hbase/mapred/IdentityTableReduce.java @@ -40,20 +40,20 @@ implements TableReduce { @SuppressWarnings("unused") private static final Log LOG = LogFactory.getLog(IdentityTableReduce.class.getName()); - + /** * No aggregation, output pairs of (key, record) - * @param key - * @param values - * @param output - * @param reporter - * @throws IOException + * @param key + * @param values + * @param output + * @param reporter + * @throws IOException */ public void reduce(ImmutableBytesWritable key, Iterator values, OutputCollector output, Reporter reporter) throws IOException { - + while(values.hasNext()) { output.collect(key, values.next()); } diff --git a/src/java/org/apache/hadoop/hbase/mapred/IndexOutputFormat.java b/src/java/org/apache/hadoop/hbase/mapred/IndexOutputFormat.java index 7083488..4a830a8 100644 --- a/src/java/org/apache/hadoop/hbase/mapred/IndexOutputFormat.java +++ b/src/java/org/apache/hadoop/hbase/mapred/IndexOutputFormat.java @@ -105,7 +105,7 @@ public class IndexOutputFormat extends boolean closed; private long docCount = 0; - public void write(ImmutableBytesWritable key, + public void write(ImmutableBytesWritable key, LuceneDocumentWrapper value) throws IOException { // unwrap and index doc diff --git a/src/java/org/apache/hadoop/hbase/mapred/IndexTableReduce.java b/src/java/org/apache/hadoop/hbase/mapred/IndexTableReduce.java index 412fca1..3b0417b 100644 --- a/src/java/org/apache/hadoop/hbase/mapred/IndexTableReduce.java +++ b/src/java/org/apache/hadoop/hbase/mapred/IndexTableReduce.java @@ -98,7 +98,7 @@ public class IndexTableReduce extends MapReduceBase implements Field.Index.NO; // UTF-8 encode value - Field field = new Field(column, Bytes.toString(columnValue), + Field field = new Field(column, Bytes.toString(columnValue), store, index); field.setBoost(indexConf.getBoost(column)); field.setOmitNorms(indexConf.isOmitNorms(column)); diff --git a/src/java/org/apache/hadoop/hbase/mapred/RowCounter.java b/src/java/org/apache/hadoop/hbase/mapred/RowCounter.java index 8a0faa8..ca82860 100644 --- a/src/java/org/apache/hadoop/hbase/mapred/RowCounter.java +++ b/src/java/org/apache/hadoop/hbase/mapred/RowCounter.java @@ -39,7 +39,7 @@ import org.apache.hadoop.util.ToolRunner; /** * A job with a map to count rows. - * Map outputs table rows IF the input row has columns that have content. + * Map outputs table rows IF the input row has columns that have content. * Uses an {@link IdentityReducer} */ @Deprecated @@ -108,13 +108,13 @@ public class RowCounter extends Configured implements Tool { FileOutputFormat.setOutputPath(c, new Path(args[0])); return c; } - + static int printUsage() { System.out.println(NAME + " [...]"); return -1; } - + public int run(final String[] args) throws Exception { // Make sure there are at least 3 parameters if (args.length < 3) { diff --git a/src/java/org/apache/hadoop/hbase/mapred/TableInputFormatBase.java b/src/java/org/apache/hadoop/hbase/mapred/TableInputFormatBase.java index fa7127e..7260c1e 100644 --- a/src/java/org/apache/hadoop/hbase/mapred/TableInputFormatBase.java +++ b/src/java/org/apache/hadoop/hbase/mapred/TableInputFormatBase.java @@ -221,7 +221,7 @@ implements InputFormat { try { result = this.scanner.next(); } catch (UnknownScannerException e) { - LOG.debug("recovered from " + StringUtils.stringifyException(e)); + LOG.debug("recovered from " + StringUtils.stringifyException(e)); restart(lastRow); this.scanner.next(); // skip presumed already mapped row result = this.scanner.next(); @@ -299,7 +299,7 @@ implements InputFormat { int lastPos = startPos + middle; lastPos = startKeys.length % realNumSplits > i ? lastPos + 1 : lastPos; String regionLocation = table.getRegionLocation(startKeys[startPos]). - getServerAddress().getHostname(); + getServerAddress().getHostname(); splits[i] = new TableSplit(this.table.getTableName(), startKeys[startPos], ((i + 1) < realNumSplits) ? startKeys[lastPos]: HConstants.EMPTY_START_ROW, regionLocation); diff --git a/src/java/org/apache/hadoop/hbase/mapred/TableMapReduceUtil.java b/src/java/org/apache/hadoop/hbase/mapred/TableMapReduceUtil.java index 0a5fa60..87e758f 100644 --- a/src/java/org/apache/hadoop/hbase/mapred/TableMapReduceUtil.java +++ b/src/java/org/apache/hadoop/hbase/mapred/TableMapReduceUtil.java @@ -36,11 +36,11 @@ import org.apache.hadoop.mapred.JobConf; @Deprecated @SuppressWarnings("unchecked") public class TableMapReduceUtil { - + /** * Use this before submitting a TableMap job. It will * appropriately set up the JobConf. - * + * * @param table The table name to read from. * @param columns The columns to scan. * @param mapper The mapper class to use. @@ -49,10 +49,10 @@ public class TableMapReduceUtil { * @param job The current job configuration to adjust. */ public static void initTableMapJob(String table, String columns, - Class mapper, - Class outputKeyClass, + Class mapper, + Class outputKeyClass, Class outputValueClass, JobConf job) { - + job.setInputFormat(TableInputFormat.class); job.setMapOutputValueClass(outputValueClass); job.setMapOutputKeyClass(outputKeyClass); @@ -60,15 +60,15 @@ public class TableMapReduceUtil { FileInputFormat.addInputPaths(job, table); job.set(TableInputFormat.COLUMN_LIST, columns); } - + /** * Use this before submitting a TableReduce job. It will * appropriately set up the JobConf. - * + * * @param table The output table. * @param reducer The reducer class to use. * @param job The current job configuration to adjust. - * @throws IOException When determining the region count fails. + * @throws IOException When determining the region count fails. */ public static void initTableReduceJob(String table, Class reducer, JobConf job) @@ -79,13 +79,13 @@ public class TableMapReduceUtil { /** * Use this before submitting a TableReduce job. It will * appropriately set up the JobConf. - * + * * @param table The output table. * @param reducer The reducer class to use. * @param job The current job configuration to adjust. - * @param partitioner Partitioner to use. Pass null to use + * @param partitioner Partitioner to use. Pass null to use * default partitioner. - * @throws IOException When determining the region count fails. + * @throws IOException When determining the region count fails. */ public static void initTableReduceJob(String table, Class reducer, JobConf job, Class partitioner) @@ -106,17 +106,17 @@ public class TableMapReduceUtil { job.setPartitionerClass(partitioner); } } - + /** - * Ensures that the given number of reduce tasks for the given job - * configuration does not exceed the number of regions for the given table. - * + * Ensures that the given number of reduce tasks for the given job + * configuration does not exceed the number of regions for the given table. + * * @param table The table to get the region count for. * @param job The current job configuration to adjust. * @throws IOException When retrieving the table details fails. */ - public static void limitNumReduceTasks(String table, JobConf job) - throws IOException { + public static void limitNumReduceTasks(String table, JobConf job) + throws IOException { HTable outputTable = new HTable(new HBaseConfiguration(job), table); int regions = outputTable.getRegionsInfo().size(); if (job.getNumReduceTasks() > regions) @@ -124,15 +124,15 @@ public class TableMapReduceUtil { } /** - * Ensures that the given number of map tasks for the given job - * configuration does not exceed the number of regions for the given table. - * + * Ensures that the given number of map tasks for the given job + * configuration does not exceed the number of regions for the given table. + * * @param table The table to get the region count for. * @param job The current job configuration to adjust. * @throws IOException When retrieving the table details fails. */ - public static void limitNumMapTasks(String table, JobConf job) - throws IOException { + public static void limitNumMapTasks(String table, JobConf job) + throws IOException { HTable outputTable = new HTable(new HBaseConfiguration(job), table); int regions = outputTable.getRegionsInfo().size(); if (job.getNumMapTasks() > regions) @@ -140,30 +140,30 @@ public class TableMapReduceUtil { } /** - * Sets the number of reduce tasks for the given job configuration to the - * number of regions the given table has. - * + * Sets the number of reduce tasks for the given job configuration to the + * number of regions the given table has. + * * @param table The table to get the region count for. * @param job The current job configuration to adjust. * @throws IOException When retrieving the table details fails. */ - public static void setNumReduceTasks(String table, JobConf job) - throws IOException { + public static void setNumReduceTasks(String table, JobConf job) + throws IOException { HTable outputTable = new HTable(new HBaseConfiguration(job), table); int regions = outputTable.getRegionsInfo().size(); job.setNumReduceTasks(regions); } - + /** - * Sets the number of map tasks for the given job configuration to the - * number of regions the given table has. - * + * Sets the number of map tasks for the given job configuration to the + * number of regions the given table has. + * * @param table The table to get the region count for. * @param job The current job configuration to adjust. * @throws IOException When retrieving the table details fails. */ - public static void setNumMapTasks(String table, JobConf job) - throws IOException { + public static void setNumMapTasks(String table, JobConf job) + throws IOException { HTable outputTable = new HTable(new HBaseConfiguration(job), table); int regions = outputTable.getRegionsInfo().size(); job.setNumMapTasks(regions); @@ -173,7 +173,7 @@ public class TableMapReduceUtil { * Sets the number of rows to return and cache with each scanner iteration. * Higher caching values will enable faster mapreduce jobs at the expense of * requiring more heap to contain the cached rows. - * + * * @param job The current job configuration to adjust. * @param batchSize The number of rows to return in batch with each scanner * iteration. diff --git a/src/java/org/apache/hadoop/hbase/mapred/TableOutputFormat.java b/src/java/org/apache/hadoop/hbase/mapred/TableOutputFormat.java index f7e9e18..5f9482e 100644 --- a/src/java/org/apache/hadoop/hbase/mapred/TableOutputFormat.java +++ b/src/java/org/apache/hadoop/hbase/mapred/TableOutputFormat.java @@ -48,7 +48,7 @@ FileOutputFormat { private final Log LOG = LogFactory.getLog(TableOutputFormat.class); /** - * Convert Reduce output (key, value) to (HStoreKey, KeyedDataArrayWritable) + * Convert Reduce output (key, value) to (HStoreKey, KeyedDataArrayWritable) * and write to an HBase table */ protected static class TableRecordWriter @@ -57,14 +57,14 @@ FileOutputFormat { /** * Instantiate a TableRecordWriter with the HBase HClient for writing. - * + * * @param table */ public TableRecordWriter(HTable table) { m_table = table; } - public void close(Reporter reporter) + public void close(Reporter reporter) throws IOException { m_table.flushCommits(); } @@ -74,14 +74,14 @@ FileOutputFormat { m_table.commit(new BatchUpdate(value)); } } - + @Override @SuppressWarnings("unchecked") public RecordWriter getRecordWriter(FileSystem ignored, JobConf job, String name, Progressable progress) throws IOException { - + // expecting exactly one path - + String tableName = job.get(OUTPUT_TABLE); HTable table = null; try { @@ -97,7 +97,7 @@ FileOutputFormat { @Override public void checkOutputSpecs(FileSystem ignored, JobConf job) throws FileAlreadyExistsException, InvalidJobConfException, IOException { - + String tableName = job.get(OUTPUT_TABLE); if(tableName == null) { throw new IOException("Must specify table name"); diff --git a/src/java/org/apache/hadoop/hbase/mapred/package-info.java b/src/java/org/apache/hadoop/hbase/mapred/package-info.java index 83d458b..56b4d52 100644 --- a/src/java/org/apache/hadoop/hbase/mapred/package-info.java +++ b/src/java/org/apache/hadoop/hbase/mapred/package-info.java @@ -99,7 +99,7 @@ below. If running the reduce step makes sense in your case, its usually better to have lots of reducers so load is spread across the hbase cluster.

      There is also a new hbase partitioner that will run as many reducers as -currently existing regions. The +currently existing regions. The {@link org.apache.hadoop.hbase.mapred.HRegionPartitioner} is suitable when your table is large and your upload is not such that it will greatly alter the number of existing regions when done; other use the default @@ -133,7 +133,7 @@ Read the class comment for specification of inputs, prerequisites, etc.

      Example to bulk import/load a text file into an HTable

      -

      Here's a sample program from +

      Here's a sample program from Allen Day that takes an HDFS text file path and an HBase table name as inputs, and loads the contents of the text file to the table all up in the map phase. @@ -180,12 +180,12 @@ public class BulkImport implements Tool { throws IOException { if ( table == null ) throw new IOException("table is null"); - + // Split input line on tab character String [] splits = value.toString().split("\t"); if ( splits.length != 4 ) return; - + String rowID = splits[0]; int timestamp = Integer.parseInt( splits[1] ); String colID = splits[2]; @@ -198,8 +198,8 @@ public class BulkImport implements Tool { if ( timestamp > 0 ) bu.setTimestamp( timestamp ); - bu.put(colID, cellValue.getBytes()); - table.commit( bu ); + bu.put(colID, cellValue.getBytes()); + table.commit( bu ); } public void configure(JobConf job) { @@ -212,7 +212,7 @@ public class BulkImport implements Tool { } } } - + public JobConf createSubmittableJob(String[] args) { JobConf c = new JobConf(getConf(), BulkImport.class); c.setJobName(NAME); @@ -224,7 +224,7 @@ public class BulkImport implements Tool { c.setOutputFormat(NullOutputFormat.class); return c; } - + static int printUsage() { System.err.println("Usage: " + NAME + " <input> <table_name>"); System.err.println("\twhere <input> is a tab-delimited text file with 4 columns."); @@ -233,7 +233,7 @@ public class BulkImport implements Tool { System.err.println("\t\tcolumn 3 = column ID"); System.err.println("\t\tcolumn 4 = cell value"); return -1; - } + } public int run(@SuppressWarnings("unused") String[] args) throws Exception { // Make sure there are exactly 3 parameters left. @@ -246,7 +246,7 @@ public class BulkImport implements Tool { public Configuration getConf() { return this.conf; - } + } public void setConf(final Configuration c) { this.conf = c; diff --git a/src/java/org/apache/hadoop/hbase/mapreduce/BuildTableIndex.java b/src/java/org/apache/hadoop/hbase/mapreduce/BuildTableIndex.java index de39a15..938d78e 100644 --- a/src/java/org/apache/hadoop/hbase/mapreduce/BuildTableIndex.java +++ b/src/java/org/apache/hadoop/hbase/mapreduce/BuildTableIndex.java @@ -36,7 +36,7 @@ import org.apache.hadoop.util.GenericOptionsParser; * Example table column indexing class. Runs a mapreduce job to index * specified table columns. *

      • Each row is modeled as a Lucene document: row key is indexed in - * its untokenized form, column name-value pairs are Lucene field name-value + * its untokenized form, column name-value pairs are Lucene field name-value * pairs.
      • *
      • A file passed on command line is used to populate an * {@link IndexConfiguration} which is used to set various Lucene parameters, @@ -60,7 +60,7 @@ public class BuildTableIndex { /** * Prints the usage message and exists the program. - * + * * @param message The message to print first. */ private static void printUsage(String message) { @@ -71,12 +71,12 @@ public class BuildTableIndex { /** * Creates a new job. - * @param conf - * + * @param conf + * * @param args The command line arguments. * @throws IOException When reading the configuration fails. */ - public static Job createSubmittableJob(Configuration conf, String[] args) + public static Job createSubmittableJob(Configuration conf, String[] args) throws IOException { if (args.length < 6) { printUsage("Too few arguments"); @@ -129,7 +129,7 @@ public class BuildTableIndex { Scan scan = new Scan(); scan.addColumns(columnNames.toString()); // use identity map (a waste, but just as an example) - IdentityTableMapper.initJob(tableName, scan, + IdentityTableMapper.initJob(tableName, scan, IdentityTableMapper.class, job); // use IndexTableReduce to build a Lucene index job.setReducerClass(IndexTableReducer.class); @@ -142,7 +142,7 @@ public class BuildTableIndex { * Reads xml file of indexing configurations. The xml format is similar to * hbase-default.xml and hadoop-default.xml. For an example configuration, * see the createIndexConfContent method in TestTableIndex. - * + * * @param fileName The file to read. * @return XML configuration read from file. * @throws IOException When the XML is broken. @@ -177,16 +177,16 @@ public class BuildTableIndex { /** * The main entry point. - * + * * @param args The command line arguments. * @throws Exception When running the job fails. */ public static void main(String[] args) throws Exception { HBaseConfiguration conf = new HBaseConfiguration(); - String[] otherArgs = + String[] otherArgs = new GenericOptionsParser(conf, args).getRemainingArgs(); Job job = createSubmittableJob(conf, otherArgs); System.exit(job.waitForCompletion(true) ? 0 : 1); } - + } \ No newline at end of file diff --git a/src/java/org/apache/hadoop/hbase/mapreduce/Driver.java b/src/java/org/apache/hadoop/hbase/mapreduce/Driver.java index 7521c56..ff5793b 100644 --- a/src/java/org/apache/hadoop/hbase/mapreduce/Driver.java +++ b/src/java/org/apache/hadoop/hbase/mapreduce/Driver.java @@ -29,7 +29,7 @@ import org.apache.hadoop.hbase.migration.nineteen.HStoreFileToStoreFile; public class Driver { /** * @param args - * @throws Throwable + * @throws Throwable */ public static void main(String[] args) throws Throwable { ProgramDriver pgd = new ProgramDriver(); diff --git a/src/java/org/apache/hadoop/hbase/mapreduce/Export.java b/src/java/org/apache/hadoop/hbase/mapreduce/Export.java index f1cbcba..7f37589 100644 --- a/src/java/org/apache/hadoop/hbase/mapreduce/Export.java +++ b/src/java/org/apache/hadoop/hbase/mapreduce/Export.java @@ -51,7 +51,7 @@ public class Export { * @param value The columns. * @param context The current context. * @throws IOException When something is broken with the data. - * @see org.apache.hadoop.mapreduce.Mapper#map(KEYIN, VALUEIN, + * @see org.apache.hadoop.mapreduce.Mapper#map(KEYIN, VALUEIN, * org.apache.hadoop.mapreduce.Mapper.Context) */ @Override @@ -68,7 +68,7 @@ public class Export { /** * Sets up the actual job. - * + * * @param conf The current configuration. * @param args The command line parameters. * @return The newly created job. @@ -114,7 +114,7 @@ public class Export { /** * Main entry point. - * + * * @param args The command line parameters. * @throws Exception When running the job fails. */ diff --git a/src/java/org/apache/hadoop/hbase/mapreduce/GroupingTableMapper.java b/src/java/org/apache/hadoop/hbase/mapreduce/GroupingTableMapper.java index c1448c6..c0cf028 100644 --- a/src/java/org/apache/hadoop/hbase/mapreduce/GroupingTableMapper.java +++ b/src/java/org/apache/hadoop/hbase/mapreduce/GroupingTableMapper.java @@ -40,31 +40,31 @@ public class GroupingTableMapper extends TableMapper implements Configurable { /** - * JobConf parameter to specify the columns used to produce the key passed to + * JobConf parameter to specify the columns used to produce the key passed to * collect from the map phase. */ public static final String GROUP_COLUMNS = "hbase.mapred.groupingtablemap.columns"; - + /** The grouping columns. */ protected byte [][] columns; /** The current configuration. */ private Configuration conf = null; - + /** - * Use this before submitting a TableMap job. It will appropriately set up + * Use this before submitting a TableMap job. It will appropriately set up * the job. * * @param table The table to be processed. * @param scan The scan with the columns etc. - * @param groupColumns A space separated list of columns used to form the + * @param groupColumns A space separated list of columns used to form the * key used in collect. * @param mapper The mapper class. * @param job The current job. * @throws IOException When setting up the job fails. */ @SuppressWarnings("unchecked") - public static void initJob(String table, Scan scan, String groupColumns, + public static void initJob(String table, Scan scan, String groupColumns, Class mapper, Job job) throws IOException { TableMapReduceUtil.initTableMapperJob(table, scan, mapper, ImmutableBytesWritable.class, Result.class, job); @@ -72,18 +72,18 @@ extends TableMapper implements Configurable { } /** - * Extract the grouping columns from value to construct a new key. Pass the - * new key and value to reduce. If any of the grouping columns are not found + * Extract the grouping columns from value to construct a new key. Pass the + * new key and value to reduce. If any of the grouping columns are not found * in the value, the record is skipped. - * - * @param key The current key. + * + * @param key The current key. * @param value The current value. - * @param context The current context. + * @param context The current context. * @throws IOException When writing the record fails. * @throws InterruptedException When the job is aborted. */ @Override - public void map(ImmutableBytesWritable key, Result value, Context context) + public void map(ImmutableBytesWritable key, Result value, Context context) throws IOException, InterruptedException { byte[][] keyVals = extractKeyValues(value); if(keyVals != null) { @@ -97,7 +97,7 @@ extends TableMapper implements Configurable { * null if any of the columns are not found. *

        * Override this method if you want to deal with nulls differently. - * + * * @param r The current values. * @return Array of byte values. */ @@ -124,9 +124,9 @@ extends TableMapper implements Configurable { /** * Create a key by concatenating multiple column values. - *

        + *

        * Override this function in order to produce different types of keys. - * + * * @param vals The current key/values. * @return A key generated by concatenating multiple column values. */ @@ -150,7 +150,7 @@ extends TableMapper implements Configurable { /** * Returns the current configuration. - * + * * @return The current configuration. * @see org.apache.hadoop.conf.Configurable#getConf() */ @@ -161,7 +161,7 @@ extends TableMapper implements Configurable { /** * Sets the configuration. This is used to set up the grouping details. - * + * * @param configuration The configuration to set. * @see org.apache.hadoop.conf.Configurable#setConf( * org.apache.hadoop.conf.Configuration) @@ -175,5 +175,5 @@ extends TableMapper implements Configurable { columns[i] = Bytes.toBytes(cols[i]); } } - + } diff --git a/src/java/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat.java b/src/java/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat.java index fefb6bc..2c81723 100644 --- a/src/java/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat.java +++ b/src/java/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat.java @@ -50,7 +50,7 @@ import org.mortbay.log.Log; public class HFileOutputFormat extends FileOutputFormat { public RecordWriter getRecordWriter(TaskAttemptContext context) throws IOException, InterruptedException { - // Get the path of the temporary output file + // Get the path of the temporary output file final Path outputPath = FileOutputFormat.getOutputPath(context); final Path outputdir = new FileOutputCommitter(outputPath, context).getWorkPath(); Configuration conf = context.getConfiguration(); @@ -127,7 +127,7 @@ public class HFileOutputFormat extends FileOutputFormat The type of the key. * @param The type of the value. */ -public class HRegionPartitioner +public class HRegionPartitioner extends Partitioner implements Configurable { - + private final Log LOG = LogFactory.getLog(TableInputFormat.class); private Configuration conf = null; private HTable table; - private byte[][] startKeys; - + private byte[][] startKeys; + /** - * Gets the partition number for a given key (hence record) given the total + * Gets the partition number for a given key (hence record) given the total * number of partitions i.e. number of reduce-tasks for the job. - * + * *

        Typically a hash function on a all or a subset of the key.

        * * @param key The key to be partitioned. @@ -80,7 +80,7 @@ implements Configurable { if (Bytes.compareTo(region, this.startKeys[i]) == 0 ){ if (i >= numPartitions-1){ // cover if we have less reduces then regions. - return (Integer.toString(i).hashCode() + return (Integer.toString(i).hashCode() & Integer.MAX_VALUE) % numPartitions; } return i; @@ -92,7 +92,7 @@ implements Configurable { /** * Returns the current configuration. - * + * * @return The current configuration. * @see org.apache.hadoop.conf.Configurable#getConf() */ @@ -104,7 +104,7 @@ implements Configurable { /** * Sets the configuration. This is used to determine the start keys for the * given table. - * + * * @param configuration The configuration to set. * @see org.apache.hadoop.conf.Configurable#setConf( * org.apache.hadoop.conf.Configuration) @@ -113,7 +113,7 @@ implements Configurable { public void setConf(Configuration configuration) { this.conf = configuration; try { - this.table = new HTable(new HBaseConfiguration(conf), + this.table = new HTable(new HBaseConfiguration(conf), configuration.get(TableOutputFormat.OUTPUT_TABLE)); } catch (IOException e) { LOG.error(e); diff --git a/src/java/org/apache/hadoop/hbase/mapreduce/IdentityTableMapper.java b/src/java/org/apache/hadoop/hbase/mapreduce/IdentityTableMapper.java index a8cd6e3..fd5d8fe 100644 --- a/src/java/org/apache/hadoop/hbase/mapreduce/IdentityTableMapper.java +++ b/src/java/org/apache/hadoop/hbase/mapreduce/IdentityTableMapper.java @@ -33,9 +33,9 @@ public class IdentityTableMapper extends TableMapper { /** - * Use this before submitting a TableMap job. It will appropriately set up + * Use this before submitting a TableMap job. It will appropriately set up * the job. - * + * * @param table The table name. * @param scan The scan with the columns to scan. * @param mapper The mapper class. @@ -51,16 +51,16 @@ extends TableMapper { /** * Pass the key, value to reduce. - * - * @param key The current key. + * + * @param key The current key. * @param value The current value. - * @param context The current context. + * @param context The current context. * @throws IOException When writing the record fails. * @throws InterruptedException When the job is aborted. */ - public void map(ImmutableBytesWritable key, Result value, Context context) + public void map(ImmutableBytesWritable key, Result value, Context context) throws IOException, InterruptedException { context.write(key, value); } - + } diff --git a/src/java/org/apache/hadoop/hbase/mapreduce/IdentityTableReducer.java b/src/java/org/apache/hadoop/hbase/mapreduce/IdentityTableReducer.java index eb7609c..90c0a8e 100644 --- a/src/java/org/apache/hadoop/hbase/mapreduce/IdentityTableReducer.java +++ b/src/java/org/apache/hadoop/hbase/mapreduce/IdentityTableReducer.java @@ -27,44 +27,44 @@ import org.apache.hadoop.io.Writable; import org.apache.hadoop.mapreduce.OutputFormat; /** - * Convenience class that simply writes all values (which must be - * {@link org.apache.hadoop.hbase.client.Put Put} or + * Convenience class that simply writes all values (which must be + * {@link org.apache.hadoop.hbase.client.Put Put} or * {@link org.apache.hadoop.hbase.client.Delete Delete} instances) - * passed to it out to the configured HBase table. This works in combination + * passed to it out to the configured HBase table. This works in combination * with {@link TableOutputFormat} which actually does the writing to HBase.

        - * + * * Keys are passed along but ignored in TableOutputFormat. However, they can * be used to control how your values will be divided up amongst the specified * number of reducers.

        - * - * You can also use the {@link TableMapReduceUtil} class to set up the two + * + * You can also use the {@link TableMapReduceUtil} class to set up the two * classes in one step: *

        * TableMapReduceUtil.initTableReducerJob("table", IdentityTableReducer.class, job); *
        * This will also set the proper {@link TableOutputFormat} which is given the - * table parameter. The - * {@link org.apache.hadoop.hbase.client.Put Put} or + * table parameter. The + * {@link org.apache.hadoop.hbase.client.Put Put} or * {@link org.apache.hadoop.hbase.client.Delete Delete} define the * row and columns implicitly. */ -public class IdentityTableReducer +public class IdentityTableReducer extends TableReducer { @SuppressWarnings("unused") private static final Log LOG = LogFactory.getLog(IdentityTableReducer.class); - + /** - * Writes each given record, consisting of the row key and the given values, - * to the configured {@link OutputFormat}. It is emitting the row key and each - * {@link org.apache.hadoop.hbase.client.Put Put} or - * {@link org.apache.hadoop.hbase.client.Delete Delete} as separate pairs. - * - * @param key The current row key. - * @param values The {@link org.apache.hadoop.hbase.client.Put Put} or - * {@link org.apache.hadoop.hbase.client.Delete Delete} list for the given + * Writes each given record, consisting of the row key and the given values, + * to the configured {@link OutputFormat}. It is emitting the row key and each + * {@link org.apache.hadoop.hbase.client.Put Put} or + * {@link org.apache.hadoop.hbase.client.Delete Delete} as separate pairs. + * + * @param key The current row key. + * @param values The {@link org.apache.hadoop.hbase.client.Put Put} or + * {@link org.apache.hadoop.hbase.client.Delete Delete} list for the given * row. - * @param context The context of the reduce. + * @param context The context of the reduce. * @throws IOException When writing the record fails. * @throws InterruptedException When the job gets interrupted. */ diff --git a/src/java/org/apache/hadoop/hbase/mapreduce/Import.java b/src/java/org/apache/hadoop/hbase/mapreduce/Import.java index fa2bfc4..22ad9c5 100644 --- a/src/java/org/apache/hadoop/hbase/mapreduce/Import.java +++ b/src/java/org/apache/hadoop/hbase/mapreduce/Import.java @@ -49,7 +49,7 @@ public class Import { * @param value The columns. * @param context The current context. * @throws IOException When something is broken with the data. - * @see org.apache.hadoop.mapreduce.Mapper#map(KEYIN, VALUEIN, + * @see org.apache.hadoop.mapreduce.Mapper#map(KEYIN, VALUEIN, * org.apache.hadoop.mapreduce.Mapper.Context) */ @Override @@ -63,7 +63,7 @@ public class Import { } } - private static Put resultToPut(ImmutableBytesWritable key, Result result) + private static Put resultToPut(ImmutableBytesWritable key, Result result) throws IOException { Put put = new Put(key.get()); for (KeyValue kv : result.raw()) { @@ -75,13 +75,13 @@ public class Import { /** * Sets up the actual job. - * + * * @param conf The current configuration. * @param args The command line parameters. * @return The newly created job. * @throws IOException When setting up the job fails. */ - public static Job createSubmittableJob(Configuration conf, String[] args) + public static Job createSubmittableJob(Configuration conf, String[] args) throws IOException { String tableName = args[0]; Path inputDir = new Path(args[1]); @@ -109,7 +109,7 @@ public class Import { /** * Main entry point. - * + * * @param args The command line parameters. * @throws Exception When running the job fails. */ diff --git a/src/java/org/apache/hadoop/hbase/mapreduce/IndexConfiguration.java b/src/java/org/apache/hadoop/hbase/mapreduce/IndexConfiguration.java index 9d5cb9d..9281829 100644 --- a/src/java/org/apache/hadoop/hbase/mapreduce/IndexConfiguration.java +++ b/src/java/org/apache/hadoop/hbase/mapreduce/IndexConfiguration.java @@ -47,7 +47,7 @@ import org.w3c.dom.Text; * Configuration parameters for building a Lucene index. */ public class IndexConfiguration extends Configuration { - + private static final Log LOG = LogFactory.getLog(IndexConfiguration.class); static final String HBASE_COLUMN_NAME = "hbase.column.name"; diff --git a/src/java/org/apache/hadoop/hbase/mapreduce/IndexOutputFormat.java b/src/java/org/apache/hadoop/hbase/mapreduce/IndexOutputFormat.java index c43a71d..1fa9e8a 100644 --- a/src/java/org/apache/hadoop/hbase/mapreduce/IndexOutputFormat.java +++ b/src/java/org/apache/hadoop/hbase/mapreduce/IndexOutputFormat.java @@ -38,9 +38,9 @@ import org.apache.lucene.search.Similarity; * Create a local index, unwrap Lucene documents created by reduce, add them to * the index, and copy the index to the destination. */ -public class IndexOutputFormat +public class IndexOutputFormat extends FileOutputFormat { - + static final Log LOG = LogFactory.getLog(IndexOutputFormat.class); /** Random generator. */ @@ -48,7 +48,7 @@ extends FileOutputFormat { /** * Returns the record writer. - * + * * @param context The current task context. * @return The record writer. * @throws IOException When there is an issue with the writer. @@ -59,7 +59,7 @@ extends FileOutputFormat { getRecordWriter(TaskAttemptContext context) throws IOException { - final Path perm = new Path(FileOutputFormat.getOutputPath(context), + final Path perm = new Path(FileOutputFormat.getOutputPath(context), FileOutputFormat.getUniqueFile(context, "part", "")); // null for "dirsProp" means no predefined directories final Path temp = context.getConfiguration().getLocalPath( @@ -109,5 +109,5 @@ extends FileOutputFormat { writer.setUseCompoundFile(indexConf.isUseCompoundFile()); return new IndexRecordWriter(context, fs, writer, indexConf, perm, temp); } - + } diff --git a/src/java/org/apache/hadoop/hbase/mapreduce/IndexRecordWriter.java b/src/java/org/apache/hadoop/hbase/mapreduce/IndexRecordWriter.java index 0092831..bbf58ca 100644 --- a/src/java/org/apache/hadoop/hbase/mapreduce/IndexRecordWriter.java +++ b/src/java/org/apache/hadoop/hbase/mapreduce/IndexRecordWriter.java @@ -35,11 +35,11 @@ import org.apache.lucene.index.IndexWriter; /** * Writes the records into a Lucene index writer. */ -public class IndexRecordWriter +public class IndexRecordWriter extends RecordWriter { static final Log LOG = LogFactory.getLog(IndexRecordWriter.class); - + private long docCount = 0; private TaskAttemptContext context = null; private FileSystem fs = null; @@ -47,10 +47,10 @@ extends RecordWriter { private IndexConfiguration indexConf = null; private Path perm = null; private Path temp = null; - + /** * Creates a new instance. - * + * * @param context The task context. * @param fs The file system. * @param writer The index writer. @@ -58,7 +58,7 @@ extends RecordWriter { * @param perm The permanent path in the DFS. * @param temp The temporary local path. */ - public IndexRecordWriter(TaskAttemptContext context, FileSystem fs, + public IndexRecordWriter(TaskAttemptContext context, FileSystem fs, IndexWriter writer, IndexConfiguration indexConf, Path perm, Path temp) { this.context = context; this.fs = fs; @@ -67,10 +67,10 @@ extends RecordWriter { this.perm = perm; this.temp = temp; } - + /** * Writes the record into an index. - * + * * @param key The current key. * @param value The current value. * @throws IOException When the index is faulty. @@ -81,14 +81,14 @@ extends RecordWriter { throws IOException { // unwrap and index doc Document doc = value.get(); - writer.addDocument(doc); + writer.addDocument(doc); docCount++; context.progress(); - } + } /** * Closes the writer. - * + * * @param context The current context. * @throws IOException When closing the writer fails. * @see org.apache.hadoop.mapreduce.RecordWriter#close(org.apache.hadoop.mapreduce.TaskAttemptContext) @@ -128,10 +128,10 @@ extends RecordWriter { /** Flag to track when to finish. */ private boolean closed = false; - + /** * Runs the thread. Sending heart beats to the framework. - * + * * @see java.lang.Runnable#run() */ @Override @@ -143,7 +143,7 @@ extends RecordWriter { } while (!closed) { try { - context.progress(); + context.progress(); Thread.sleep(1000); } catch (InterruptedException e) { continue; @@ -152,14 +152,14 @@ extends RecordWriter { } } } - + /** - * Switches the flag. + * Switches the flag. */ public void setClosed() { closed = true; } - + } - + } diff --git a/src/java/org/apache/hadoop/hbase/mapreduce/IndexTableReducer.java b/src/java/org/apache/hadoop/hbase/mapreduce/IndexTableReducer.java index 1abe86e..8cdf0db 100644 --- a/src/java/org/apache/hadoop/hbase/mapreduce/IndexTableReducer.java +++ b/src/java/org/apache/hadoop/hbase/mapreduce/IndexTableReducer.java @@ -37,23 +37,23 @@ import org.apache.commons.logging.LogFactory; * Construct a Lucene document per row, which is consumed by IndexOutputFormat * to build a Lucene index */ -public class IndexTableReducer -extends Reducer implements Configurable { - + private static final Log LOG = LogFactory.getLog(IndexTableReducer.class); - + private IndexConfiguration indexConf; private Configuration conf = null; - + /** * Writes each given record, consisting of the key and the given values, to * the index. - * + * * @param key The current row key. * @param values The values for the given row. - * @param context The context of the reduce. + * @param context The context of the reduce. * @throws IOException When writing the record fails. * @throws InterruptedException When the job gets interrupted. */ @@ -85,7 +85,7 @@ implements Configurable { Field.Index.NO; // UTF-8 encode value - Field field = new Field(column, Bytes.toString(columnValue), + Field field = new Field(column, Bytes.toString(columnValue), store, index); field.setBoost(indexConf.getBoost(column)); field.setOmitNorms(indexConf.isOmitNorms(column)); @@ -98,7 +98,7 @@ implements Configurable { /** * Returns the current configuration. - * + * * @return The current configuration. * @see org.apache.hadoop.conf.Configurable#getConf() */ @@ -109,7 +109,7 @@ implements Configurable { /** * Sets the configuration. This is used to set up the index configuration. - * + * * @param configuration The configuration to set. * @see org.apache.hadoop.conf.Configurable#setConf( * org.apache.hadoop.conf.Configuration) @@ -126,5 +126,5 @@ implements Configurable { LOG.debug("Index conf: " + indexConf); } } - + } diff --git a/src/java/org/apache/hadoop/hbase/mapreduce/LuceneDocumentWrapper.java b/src/java/org/apache/hadoop/hbase/mapreduce/LuceneDocumentWrapper.java index 6aa7cdd..5564647 100644 --- a/src/java/org/apache/hadoop/hbase/mapreduce/LuceneDocumentWrapper.java +++ b/src/java/org/apache/hadoop/hbase/mapreduce/LuceneDocumentWrapper.java @@ -29,7 +29,7 @@ import org.apache.lucene.document.Document; * It doesn't really serialize/deserialize a lucene document. */ public class LuceneDocumentWrapper implements Writable { - + /** The document to add to the index. */ protected Document doc; diff --git a/src/java/org/apache/hadoop/hbase/mapreduce/RowCounter.java b/src/java/org/apache/hadoop/hbase/mapreduce/RowCounter.java index 8223b00..59c46f6 100644 --- a/src/java/org/apache/hadoop/hbase/mapreduce/RowCounter.java +++ b/src/java/org/apache/hadoop/hbase/mapreduce/RowCounter.java @@ -33,7 +33,7 @@ import org.apache.hadoop.mapreduce.lib.output.NullOutputFormat; import org.apache.hadoop.util.GenericOptionsParser; /** - * A job with a just a map phase to count rows. Map outputs table rows IF the + * A job with a just a map phase to count rows. Map outputs table rows IF the * input row has columns that have content. */ public class RowCounter { @@ -45,18 +45,18 @@ public class RowCounter { */ static class RowCounterMapper extends TableMapper { - + /** Counter enumeration to count the actual rows. */ private static enum Counters {ROWS} /** * Maps the data. - * + * * @param row The current table row key. * @param values The columns. * @param context The current context. * @throws IOException When something is broken with the data. - * @see org.apache.hadoop.mapreduce.Mapper#map(KEYIN, VALUEIN, + * @see org.apache.hadoop.mapreduce.Mapper#map(KEYIN, VALUEIN, * org.apache.hadoop.mapreduce.Mapper.Context) */ @Override @@ -75,13 +75,13 @@ public class RowCounter { /** * Sets up the actual job. - * + * * @param conf The current configuration. * @param args The command line parameters. * @return The newly created job. * @throws IOException When setting up the job fails. */ - public static Job createSubmittableJob(Configuration conf, String[] args) + public static Job createSubmittableJob(Configuration conf, String[] args) throws IOException { String tableName = args[0]; Job job = new Job(conf, NAME + "_" + tableName); @@ -110,7 +110,7 @@ public class RowCounter { /** * Main entry point. - * + * * @param args The command line parameters. * @throws Exception When running the job fails. */ diff --git a/src/java/org/apache/hadoop/hbase/mapreduce/TableInputFormat.java b/src/java/org/apache/hadoop/hbase/mapreduce/TableInputFormat.java index 7b1ed95..aad8545 100644 --- a/src/java/org/apache/hadoop/hbase/mapreduce/TableInputFormat.java +++ b/src/java/org/apache/hadoop/hbase/mapreduce/TableInputFormat.java @@ -34,11 +34,11 @@ import org.apache.hadoop.util.StringUtils; /** * Convert HBase tabular data into a format that is consumable by Map/Reduce. */ -public class TableInputFormat extends TableInputFormatBase +public class TableInputFormat extends TableInputFormatBase implements Configurable { - + private final Log LOG = LogFactory.getLog(TableInputFormat.class); - + /** Job parameter that specifies the input table. */ public static final String INPUT_TABLE = "hbase.mapreduce.inputtable"; /** Base-64 encoded scanner. All other SCAN_ confs are ignored if this is specified. @@ -61,13 +61,13 @@ implements Configurable { public static final String SCAN_CACHEBLOCKS = "hbase.mapreduce.scan.cacheblocks"; /** The number of rows for caching that will be passed to scanners. */ public static final String SCAN_CACHEDROWS = "hbase.mapreduce.scan.cachedrows"; - + /** The configuration. */ private Configuration conf = null; /** * Returns the current configuration. - * + * * @return The current configuration. * @see org.apache.hadoop.conf.Configurable#getConf() */ @@ -79,7 +79,7 @@ implements Configurable { /** * Sets the configuration. This is used to set the details for the table to * be scanned. - * + * * @param configuration The configuration to set. * @see org.apache.hadoop.conf.Configurable#setConf( * org.apache.hadoop.conf.Configuration) @@ -93,9 +93,9 @@ implements Configurable { } catch (Exception e) { LOG.error(StringUtils.stringifyException(e)); } - + Scan scan = null; - + if (conf.get(SCAN) != null) { try { scan = TableMapReduceUtil.convertStringToScan(conf.get(SCAN)); @@ -105,22 +105,22 @@ implements Configurable { } else { try { scan = new Scan(); - + if (conf.get(SCAN_COLUMNS) != null) { scan.addColumns(conf.get(SCAN_COLUMNS)); } - - if (conf.get(SCAN_COLUMN_FAMILY) != null) { + + if (conf.get(SCAN_COLUMN_FAMILY) != null) { scan.addFamily(Bytes.toBytes(conf.get(SCAN_COLUMN_FAMILY))); } - + if (conf.get(SCAN_TIMESTAMP) != null) { scan.setTimeStamp(Long.parseLong(conf.get(SCAN_TIMESTAMP))); } - + if (conf.get(SCAN_TIMERANGE_START) != null && conf.get(SCAN_TIMERANGE_END) != null) { scan.setTimeRange( - Long.parseLong(conf.get(SCAN_TIMERANGE_START)), + Long.parseLong(conf.get(SCAN_TIMERANGE_START)), Long.parseLong(conf.get(SCAN_TIMERANGE_END))); } @@ -141,5 +141,5 @@ implements Configurable { setScan(scan); } - + } diff --git a/src/java/org/apache/hadoop/hbase/mapreduce/TableInputFormatBase.java b/src/java/org/apache/hadoop/hbase/mapreduce/TableInputFormatBase.java index 7b47199..80c751c 100644 --- a/src/java/org/apache/hadoop/hbase/mapreduce/TableInputFormatBase.java +++ b/src/java/org/apache/hadoop/hbase/mapreduce/TableInputFormatBase.java @@ -45,8 +45,8 @@ import org.apache.hadoop.hbase.util.Pair; import org.apache.hadoop.util.StringUtils; /** - * A base for {@link TableInputFormat}s. Receives a {@link HTable}, an - * {@link Scan} instance that defines the input columns etc. Subclasses may use + * A base for {@link TableInputFormat}s. Receives a {@link HTable}, an + * {@link Scan} instance that defines the input columns etc. Subclasses may use * other TableRecordReader implementations. *

        * An example of a subclass: @@ -74,7 +74,7 @@ import org.apache.hadoop.util.StringUtils; */ public abstract class TableInputFormatBase extends InputFormat { - + final Log LOG = LogFactory.getLog(TableInputFormatBase.class); /** Holds the details for the internal scanner. */ @@ -85,12 +85,12 @@ extends InputFormat { private TableRecordReader tableRecordReader = null; /** - * Iterate over an HBase table data, return (ImmutableBytesWritable, Result) + * Iterate over an HBase table data, return (ImmutableBytesWritable, Result) * pairs. */ protected class TableRecordReader extends RecordReader { - + private ResultScanner scanner = null; private Scan scan = null; private HTable htable = null; @@ -113,7 +113,7 @@ extends InputFormat { /** * Build the scanner. Not done in constructor to allow for extension. * - * @throws IOException When restarting the scan fails. + * @throws IOException When restarting the scan fails. */ public void init() throws IOException { restart(scan.getStartRow()); @@ -121,7 +121,7 @@ extends InputFormat { /** * Sets the HBase table. - * + * * @param htable The {@link HTable} to scan. */ public void setHTable(HTable htable) { @@ -130,7 +130,7 @@ extends InputFormat { /** * Sets the scan defining the actual details like columns etc. - * + * * @param scan The scan to set. */ public void setScan(Scan scan) { @@ -139,7 +139,7 @@ extends InputFormat { /** * Closes the split. - * + * * @see org.apache.hadoop.mapreduce.RecordReader#close() */ @Override @@ -149,7 +149,7 @@ extends InputFormat { /** * Returns the current key. - * + * * @return The current key. * @throws IOException * @throws InterruptedException When the job is aborted. @@ -163,7 +163,7 @@ extends InputFormat { /** * Returns the current value. - * + * * @return The current value. * @throws IOException When the value is faulty. * @throws InterruptedException When the job is aborted. @@ -176,13 +176,13 @@ extends InputFormat { /** * Initializes the reader. - * + * * @param inputsplit The split to work with. * @param context The current task context. * @throws IOException When setting up the reader fails. * @throws InterruptedException When the job is aborted. * @see org.apache.hadoop.mapreduce.RecordReader#initialize( - * org.apache.hadoop.mapreduce.InputSplit, + * org.apache.hadoop.mapreduce.InputSplit, * org.apache.hadoop.mapreduce.TaskAttemptContext) */ @Override @@ -193,7 +193,7 @@ extends InputFormat { /** * Positions the record reader to the next record. - * + * * @return true if there was another record. * @throws IOException When reading the record failed. * @throws InterruptedException When the job was aborted. @@ -206,7 +206,7 @@ extends InputFormat { try { value = this.scanner.next(); } catch (IOException e) { - LOG.debug("recovered from " + StringUtils.stringifyException(e)); + LOG.debug("recovered from " + StringUtils.stringifyException(e)); restart(lastRow); scanner.next(); // skip presumed already mapped row value = scanner.next(); @@ -221,7 +221,7 @@ extends InputFormat { /** * The current progress of the record reader through its data. - * + * * @return A number between 0.0 and 1.0, the fraction of the data read. * @see org.apache.hadoop.mapreduce.RecordReader#getProgress() */ @@ -235,13 +235,13 @@ extends InputFormat { /** * Builds a TableRecordReader. If no TableRecordReader was provided, uses * the default. - * + * * @param split The split to work with. * @param context The current context. * @return The newly created record reader. * @throws IOException When creating the reader fails. * @see org.apache.hadoop.mapreduce.InputFormat#createRecordReader( - * org.apache.hadoop.mapreduce.InputSplit, + * org.apache.hadoop.mapreduce.InputSplit, * org.apache.hadoop.mapreduce.TaskAttemptContext) */ @Override @@ -276,7 +276,7 @@ extends InputFormat { @Override public List getSplits(JobContext context) throws IOException { Pair keys = table.getStartEndKeys(); - if (keys == null || keys.getFirst() == null || + if (keys == null || keys.getFirst() == null || keys.getFirst().length == 0) { throw new IOException("Expecting at least one region."); } @@ -284,7 +284,7 @@ extends InputFormat { throw new IOException("No table was provided."); } int count = 0; - List splits = new ArrayList(keys.getFirst().length); + List splits = new ArrayList(keys.getFirst().length); for (int i = 0; i < keys.getFirst().length; i++) { if ( !includeRegionInSplit(keys.getFirst()[i], keys.getSecond()[i])) { continue; @@ -296,19 +296,19 @@ extends InputFormat { // determine if the given start an stop key fall into the region if ((startRow.length == 0 || keys.getSecond()[i].length == 0 || Bytes.compareTo(startRow, keys.getSecond()[i]) < 0) && - (stopRow.length == 0 || + (stopRow.length == 0 || Bytes.compareTo(stopRow, keys.getFirst()[i]) > 0)) { - byte[] splitStart = startRow.length == 0 || - Bytes.compareTo(keys.getFirst()[i], startRow) >= 0 ? + byte[] splitStart = startRow.length == 0 || + Bytes.compareTo(keys.getFirst()[i], startRow) >= 0 ? keys.getFirst()[i] : startRow; - byte[] splitStop = (stopRow.length == 0 || + byte[] splitStop = (stopRow.length == 0 || Bytes.compareTo(keys.getSecond()[i], stopRow) <= 0) && - keys.getSecond()[i].length > 0 ? + keys.getSecond()[i].length > 0 ? keys.getSecond()[i] : stopRow; InputSplit split = new TableSplit(table.getTableName(), splitStart, splitStop, regionLocation); splits.add(split); - if (LOG.isDebugEnabled()) + if (LOG.isDebugEnabled()) LOG.debug("getSplits: split -> " + (count++) + " -> " + split); } } @@ -361,7 +361,7 @@ extends InputFormat { /** * Gets the scan defining the actual details like columns etc. - * + * * @return The internal scan instance. */ public Scan getScan() { @@ -371,7 +371,7 @@ extends InputFormat { /** * Sets the scan defining the actual details like columns etc. - * + * * @param scan The scan to set. */ public void setScan(Scan scan) { @@ -381,7 +381,7 @@ extends InputFormat { /** * Allows subclasses to set the {@link TableRecordReader}. * - * @param tableRecordReader A different {@link TableRecordReader} + * @param tableRecordReader A different {@link TableRecordReader} * implementation. */ protected void setTableRecordReader(TableRecordReader tableRecordReader) { diff --git a/src/java/org/apache/hadoop/hbase/mapreduce/TableMapReduceUtil.java b/src/java/org/apache/hadoop/hbase/mapreduce/TableMapReduceUtil.java index a371a59..0ce6284 100644 --- a/src/java/org/apache/hadoop/hbase/mapreduce/TableMapReduceUtil.java +++ b/src/java/org/apache/hadoop/hbase/mapreduce/TableMapReduceUtil.java @@ -39,11 +39,11 @@ import org.apache.hadoop.mapreduce.Job; */ @SuppressWarnings("unchecked") public class TableMapReduceUtil { - + /** - * Use this before submitting a TableMap job. It will appropriately set up + * Use this before submitting a TableMap job. It will appropriately set up * the job. - * + * * @param table The table name to read from. * @param scan The scan instance with the columns, time range etc. * @param mapper The mapper class to use. @@ -53,8 +53,8 @@ public class TableMapReduceUtil { * @throws IOException When setting up the details fails. */ public static void initTableMapperJob(String table, Scan scan, - Class mapper, - Class outputKeyClass, + Class mapper, + Class outputKeyClass, Class outputValueClass, Job job) throws IOException { job.setInputFormatClass(TableInputFormat.class); if (outputValueClass != null) job.setMapOutputValueClass(outputValueClass); @@ -67,13 +67,13 @@ public class TableMapReduceUtil { /** * Writes the given scan into a Base64 encoded string. - * + * * @param scan The scan to write out. * @return The scan saved in a Base64 encoded string. * @throws IOException When writing the scan fails. */ static String convertScanToString(Scan scan) throws IOException { - ByteArrayOutputStream out = new ByteArrayOutputStream(); + ByteArrayOutputStream out = new ByteArrayOutputStream(); DataOutputStream dos = new DataOutputStream(out); scan.write(dos); return Base64.encodeBytes(out.toByteArray()); @@ -81,7 +81,7 @@ public class TableMapReduceUtil { /** * Converts the given Base64 string back into a Scan instance. - * + * * @param base64 The scan details. * @return The newly created Scan instance. * @throws IOException When reading the scan instance fails. @@ -93,15 +93,15 @@ public class TableMapReduceUtil { scan.readFields(dis); return scan; } - + /** * Use this before submitting a TableReduce job. It will * appropriately set up the JobConf. - * + * * @param table The output table. * @param reducer The reducer class to use. * @param job The current job to adjust. - * @throws IOException When determining the region count fails. + * @throws IOException When determining the region count fails. */ public static void initTableReducerJob(String table, Class reducer, Job job) @@ -112,13 +112,13 @@ public class TableMapReduceUtil { /** * Use this before submitting a TableReduce job. It will * appropriately set up the JobConf. - * + * * @param table The output table. * @param reducer The reducer class to use. * @param job The current job to adjust. - * @param partitioner Partitioner to use. Pass null to use + * @param partitioner Partitioner to use. Pass null to use * default partitioner. - * @throws IOException When determining the region count fails. + * @throws IOException When determining the region count fails. */ public static void initTableReducerJob(String table, Class reducer, Job job, Class partitioner) @@ -140,17 +140,17 @@ public class TableMapReduceUtil { job.setPartitionerClass(partitioner); } } - + /** - * Ensures that the given number of reduce tasks for the given job - * configuration does not exceed the number of regions for the given table. - * + * Ensures that the given number of reduce tasks for the given job + * configuration does not exceed the number of regions for the given table. + * * @param table The table to get the region count for. * @param job The current job to adjust. * @throws IOException When retrieving the table details fails. */ - public static void limitNumReduceTasks(String table, Job job) - throws IOException { + public static void limitNumReduceTasks(String table, Job job) + throws IOException { HTable outputTable = new HTable(new HBaseConfiguration( job.getConfiguration()), table); int regions = outputTable.getRegionsInfo().size(); @@ -159,26 +159,26 @@ public class TableMapReduceUtil { } /** - * Sets the number of reduce tasks for the given job configuration to the - * number of regions the given table has. - * + * Sets the number of reduce tasks for the given job configuration to the + * number of regions the given table has. + * * @param table The table to get the region count for. * @param job The current job to adjust. * @throws IOException When retrieving the table details fails. */ - public static void setNumReduceTasks(String table, Job job) - throws IOException { + public static void setNumReduceTasks(String table, Job job) + throws IOException { HTable outputTable = new HTable(new HBaseConfiguration( job.getConfiguration()), table); int regions = outputTable.getRegionsInfo().size(); job.setNumReduceTasks(regions); } - + /** * Sets the number of rows to return and cache with each scanner iteration. * Higher caching values will enable faster mapreduce jobs at the expense of * requiring more heap to contain the cached rows. - * + * * @param job The current job to adjust. * @param batchSize The number of rows to return in batch with each scanner * iteration. @@ -186,5 +186,5 @@ public class TableMapReduceUtil { public static void setScannerCaching(Job job, int batchSize) { job.getConfiguration().setInt("hbase.client.scanner.caching", batchSize); } - + } \ No newline at end of file diff --git a/src/java/org/apache/hadoop/hbase/mapreduce/TableMapper.java b/src/java/org/apache/hadoop/hbase/mapreduce/TableMapper.java index ae0af5b..bbceb63 100644 --- a/src/java/org/apache/hadoop/hbase/mapreduce/TableMapper.java +++ b/src/java/org/apache/hadoop/hbase/mapreduce/TableMapper.java @@ -24,9 +24,9 @@ import org.apache.hadoop.hbase.io.ImmutableBytesWritable; import org.apache.hadoop.mapreduce.Mapper; /** - * Extends the base Mapper class to add the required input key + * Extends the base Mapper class to add the required input key * and value classes. - * + * * @param The type of the key. * @param The type of the value. * @see org.apache.hadoop.mapreduce.Mapper diff --git a/src/java/org/apache/hadoop/hbase/mapreduce/TableOutputFormat.java b/src/java/org/apache/hadoop/hbase/mapreduce/TableOutputFormat.java index b31b0c7..26cfc5d 100644 --- a/src/java/org/apache/hadoop/hbase/mapreduce/TableOutputFormat.java +++ b/src/java/org/apache/hadoop/hbase/mapreduce/TableOutputFormat.java @@ -36,9 +36,9 @@ import org.apache.hadoop.mapreduce.TaskAttemptContext; /** * Convert Map/Reduce output and write it to an HBase table. The KEY is ignored - * while the output value must be either a {@link Put} or a - * {@link Delete} instance. - * + * while the output value must be either a {@link Put} or a + * {@link Delete} instance. + * * @param The type of the key. Ignored in this class. */ public class TableOutputFormat extends OutputFormat { @@ -49,18 +49,18 @@ public class TableOutputFormat extends OutputFormat { /** * Writes the reducer output to an HBase table. - * + * * @param The type of the key. */ - protected static class TableRecordWriter + protected static class TableRecordWriter extends RecordWriter { - + /** The table to write to. */ private HTable table; /** * Instantiate a TableRecordWriter with the HBase HClient for writing. - * + * * @param table The table to write to. */ public TableRecordWriter(HTable table) { @@ -69,37 +69,37 @@ public class TableOutputFormat extends OutputFormat { /** * Closes the writer, in this case flush table commits. - * + * * @param context The context. * @throws IOException When closing the writer fails. * @see org.apache.hadoop.mapreduce.RecordWriter#close(org.apache.hadoop.mapreduce.TaskAttemptContext) */ @Override - public void close(TaskAttemptContext context) + public void close(TaskAttemptContext context) throws IOException { table.flushCommits(); } /** * Writes a key/value pair into the table. - * + * * @param key The key. * @param value The value. * @throws IOException When writing fails. * @see org.apache.hadoop.mapreduce.RecordWriter#write(java.lang.Object, java.lang.Object) */ @Override - public void write(KEY key, Writable value) + public void write(KEY key, Writable value) throws IOException { if (value instanceof Put) this.table.put(new Put((Put)value)); else if (value instanceof Delete) this.table.delete(new Delete((Delete)value)); else throw new IOException("Pass a Delete or a Put"); } } - + /** * Creates a new record writer. - * + * * @param context The current task context. * @return The newly created writer instance. * @throws IOException When creating the writer fails. @@ -107,13 +107,13 @@ public class TableOutputFormat extends OutputFormat { * @see org.apache.hadoop.mapreduce.lib.output.FileOutputFormat#getRecordWriter(org.apache.hadoop.mapreduce.TaskAttemptContext) */ public RecordWriter getRecordWriter( - TaskAttemptContext context) + TaskAttemptContext context) throws IOException, InterruptedException { // expecting exactly one path String tableName = context.getConfiguration().get(OUTPUT_TABLE); HTable table = null; try { - table = new HTable(new HBaseConfiguration(context.getConfiguration()), + table = new HTable(new HBaseConfiguration(context.getConfiguration()), tableName); } catch(IOException e) { LOG.error(e); @@ -125,9 +125,9 @@ public class TableOutputFormat extends OutputFormat { /** * Checks if the output target exists. - * + * * @param context The current context. - * @throws IOException When the check fails. + * @throws IOException When the check fails. * @throws InterruptedException When the job is aborted. * @see org.apache.hadoop.mapreduce.OutputFormat#checkOutputSpecs(org.apache.hadoop.mapreduce.JobContext) */ @@ -135,12 +135,12 @@ public class TableOutputFormat extends OutputFormat { public void checkOutputSpecs(JobContext context) throws IOException, InterruptedException { // TODO Check if the table exists? - + } /** * Returns the output committer. - * + * * @param context The current context. * @return The committer. * @throws IOException When creating the committer fails. @@ -148,9 +148,9 @@ public class TableOutputFormat extends OutputFormat { * @see org.apache.hadoop.mapreduce.OutputFormat#getOutputCommitter(org.apache.hadoop.mapreduce.TaskAttemptContext) */ @Override - public OutputCommitter getOutputCommitter(TaskAttemptContext context) + public OutputCommitter getOutputCommitter(TaskAttemptContext context) throws IOException, InterruptedException { return new TableOutputCommitter(); } - + } diff --git a/src/java/org/apache/hadoop/hbase/mapreduce/TableReducer.java b/src/java/org/apache/hadoop/hbase/mapreduce/TableReducer.java index 64540ac..d087f85 100644 --- a/src/java/org/apache/hadoop/hbase/mapreduce/TableReducer.java +++ b/src/java/org/apache/hadoop/hbase/mapreduce/TableReducer.java @@ -24,16 +24,16 @@ import org.apache.hadoop.mapreduce.Reducer; /** * Extends the basic Reducer class to add the required key and - * value input/output classes. While the input key and value as well as the - * output key can be anything handed in from the previous map phase the output - * value must be either a {@link org.apache.hadoop.hbase.client.Put Put} + * value input/output classes. While the input key and value as well as the + * output key can be anything handed in from the previous map phase the output + * value must be either a {@link org.apache.hadoop.hbase.client.Put Put} * or a {@link org.apache.hadoop.hbase.client.Delete Delete} instance when * using the {@link TableOutputFormat} class. *

        - * This class is extended by {@link IdentityTableReducer} but can also be + * This class is extended by {@link IdentityTableReducer} but can also be * subclassed to implement similar features or any custom code needed. It has - * the advantage to enforce the output value to a specific basic type. - * + * the advantage to enforce the output value to a specific basic type. + * * @param The type of the input key. * @param The type of the input value. * @param The type of the output key. diff --git a/src/java/org/apache/hadoop/hbase/mapreduce/TableSplit.java b/src/java/org/apache/hadoop/hbase/mapreduce/TableSplit.java index 9caca06..53c8ebb 100644 --- a/src/java/org/apache/hadoop/hbase/mapreduce/TableSplit.java +++ b/src/java/org/apache/hadoop/hbase/mapreduce/TableSplit.java @@ -32,9 +32,9 @@ import org.apache.hadoop.mapreduce.InputSplit; * A table split corresponds to a key range (low, high). All references to row * below refer to the key of the row. */ -public class TableSplit extends InputSplit +public class TableSplit extends InputSplit implements Writable, Comparable { - + private byte [] tableName; private byte [] startRow; private byte [] endRow; @@ -48,7 +48,7 @@ implements Writable, Comparable { /** * Creates a new instance while assigning all variables. - * + * * @param tableName The name of the current table. * @param startRow The start row of the split. * @param endRow The end row of the split. @@ -64,8 +64,8 @@ implements Writable, Comparable { /** * Returns the table name. - * - * @return The table name. + * + * @return The table name. */ public byte [] getTableName() { return tableName; @@ -73,26 +73,26 @@ implements Writable, Comparable { /** * Returns the start row. - * + * * @return The start row. - */ + */ public byte [] getStartRow() { return startRow; } /** * Returns the end row. - * - * @return The end row. + * + * @return The end row. */ public byte [] getEndRow() { return endRow; } - /** + /** * Returns the region location. - * - * @return The region's location. + * + * @return The region's location. */ public String getRegionLocation() { return regionLocation; @@ -100,7 +100,7 @@ implements Writable, Comparable { /** * Returns the region's location as an array. - * + * * @return The array containing the region location. * @see org.apache.hadoop.mapreduce.InputSplit#getLocations() */ @@ -111,7 +111,7 @@ implements Writable, Comparable { /** * Returns the length of the split. - * + * * @return The length of the split. * @see org.apache.hadoop.mapreduce.InputSplit#getLength() */ @@ -123,7 +123,7 @@ implements Writable, Comparable { /** * Reads the values of each field. - * + * * @param in The input to read from. * @throws IOException When reading the input fails. */ @@ -137,7 +137,7 @@ implements Writable, Comparable { /** * Writes the field values to the output. - * + * * @param out The output to write to. * @throws IOException When writing the values to the output fails. */ @@ -151,7 +151,7 @@ implements Writable, Comparable { /** * Returns the details about this instance as a string. - * + * * @return The values of this instance as a string. * @see java.lang.Object#toString() */ @@ -163,7 +163,7 @@ implements Writable, Comparable { /** * Compares this split against the given one. - * + * * @param split The split to compare to. * @return The result of the comparison. * @see java.lang.Comparable#compareTo(java.lang.Object) @@ -172,5 +172,5 @@ implements Writable, Comparable { public int compareTo(TableSplit split) { return Bytes.compareTo(getStartRow(), split.getStartRow()); } - + } diff --git a/src/java/org/apache/hadoop/hbase/mapreduce/package-info.java b/src/java/org/apache/hadoop/hbase/mapreduce/package-info.java index 1ef5e2e..152d8fe 100644 --- a/src/java/org/apache/hadoop/hbase/mapreduce/package-info.java +++ b/src/java/org/apache/hadoop/hbase/mapreduce/package-info.java @@ -97,7 +97,7 @@ below. If running the reduce step makes sense in your case, its usually better to have lots of reducers so load is spread across the hbase cluster.

        There is also a new hbase partitioner that will run as many reducers as -currently existing regions. The +currently existing regions. The {@link org.apache.hadoop.hbase.mapreduce.HRegionPartitioner} is suitable when your table is large and your upload is not such that it will greatly alter the number of existing regions when done; otherwise use the default @@ -117,7 +117,7 @@ The row id must be formatted as a {@link org.apache.hadoop.hbase.io.ImmutableByt value as a {@link org.apache.hadoop.hbase.KeyValue} (A KeyValue holds the value for a cell and its coordinates; row/family/qualifier/timestamp, etc.). Note that you must specify a timestamp when you create the KeyValue in your map task -otherwise the KeyValue will be created with the default LATEST_TIMESTAMP (Long.MAX_VALUE). +otherwise the KeyValue will be created with the default LATEST_TIMESTAMP (Long.MAX_VALUE). Use System.currentTimeMillis() if your data does not inherently bear a timestamp. Your reduce task will also need to emit the KeyValues in order. See {@link org.apache.hadoop.hbase.mapreduce.KeyValueSortReducer} @@ -134,7 +134,7 @@ Given the default hash Partitioner, if the keys were 0-4 (inclusive), and you had configured two reducers, reducer 0 would have get keys 0, 2 and 4 whereas reducer 1 would get keys 1 and 3 (in order). For your bulk import to work, the keys need to be orderd so reducer 0 gets keys 0-2 and reducer 1 gets keys -3-4 (See TotalOrderPartitioner up in hadoop for more on what this means). +3-4 (See TotalOrderPartitioner up in hadoop for more on what this means). To achieve total ordering, you will likely need to write a Partitioner that is intimate with your tables key namespace and that knows how to distribute keys among the reducers so a total order is maintained diff --git a/src/java/org/apache/hadoop/hbase/master/AddColumn.java b/src/java/org/apache/hadoop/hbase/master/AddColumn.java index c46aa41..a2cff9d 100644 --- a/src/java/org/apache/hadoop/hbase/master/AddColumn.java +++ b/src/java/org/apache/hadoop/hbase/master/AddColumn.java @@ -29,8 +29,8 @@ import org.apache.hadoop.hbase.ipc.HRegionInterface; class AddColumn extends ColumnOperation { private final HColumnDescriptor newColumn; - AddColumn(final HMaster master, final byte [] tableName, - final HColumnDescriptor newColumn) + AddColumn(final HMaster master, final byte [] tableName, + final HColumnDescriptor newColumn) throws IOException { super(master, tableName); this.newColumn = newColumn; diff --git a/src/java/org/apache/hadoop/hbase/master/BaseScanner.java b/src/java/org/apache/hadoop/hbase/master/BaseScanner.java index d0db0c1..f421beb 100644 --- a/src/java/org/apache/hadoop/hbase/master/BaseScanner.java +++ b/src/java/org/apache/hadoop/hbase/master/BaseScanner.java @@ -56,11 +56,11 @@ import org.apache.hadoop.ipc.RemoteException; /** * Base HRegion scanner class. Holds utilty common to ROOT and * META HRegion scanners. - * + * *

        How do we know if all regions are assigned? After the initial scan of * the ROOT and META regions, all regions known at * that time will have been or are in the process of being assigned.

        - * + * *

        When a region is split the region server notifies the master of the * split and the new regions are assigned. But suppose the master loses the * split message? We need to periodically rescan the ROOT and @@ -69,34 +69,34 @@ import org.apache.hadoop.ipc.RemoteException; *

      • If we rescan, any regions that are new but not assigned will have * no server info. Any regions that are not being served by the same * server will get re-assigned.
      • - * + * *
      • Thus a periodic rescan of the root region will find any new * META regions where we missed the META split * message or we failed to detect a server death and consequently need to * assign the region to a new server.
      • - * + * *
      • if we keep track of all the known META regions, then * we can rescan them periodically. If we do this then we can detect any * regions for which we missed a region split message.
      • *
      - * + * * Thus just keeping track of all the META regions permits * periodic rescanning which will detect unassigned regions (new or * otherwise) without the need to keep track of every region.

      - * + * *

      So the ROOT region scanner needs to wake up: *

        *
      1. when the master receives notification that the ROOT * region has been opened.
      2. *
      3. periodically after the first scan
      4. *
      - * + * * The META scanner needs to wake up: *
        *
      1. when a META region comes on line
      2. * periodically to rescan the online META regions *
      - * + * *

      A META region is not 'online' until it has been scanned * once. */ @@ -120,16 +120,16 @@ abstract class BaseScanner extends Chore implements HConstants { } private final boolean rootRegion; protected final HMaster master; - + protected boolean initialScanComplete; - + protected abstract boolean initialScan(); protected abstract void maintenanceScan(); - - // will use this variable to synchronize and make sure we aren't interrupted + + // will use this variable to synchronize and make sure we aren't interrupted // mid-scan final Object scannerLock = new Object(); - + BaseScanner(final HMaster master, final boolean rootRegion, final int period, final AtomicBoolean stop) { super(period, stop); @@ -137,17 +137,17 @@ abstract class BaseScanner extends Chore implements HConstants { this.master = master; this.initialScanComplete = false; } - + /** @return true if initial scan completed successfully */ public boolean isInitialScanComplete() { return initialScanComplete; } - + @Override protected boolean initialChore() { return initialScan(); } - + @Override protected void chore() { maintenanceScan(); @@ -203,7 +203,7 @@ abstract class BaseScanner extends Chore implements HConstants { e = RemoteExceptionHandler.decodeRemoteException((RemoteException) e); if (e instanceof UnknownScannerException) { // Reset scannerId so we do not try closing a scanner the other side - // has lost account of: prevents duplicated stack trace out of the + // has lost account of: prevents duplicated stack trace out of the // below close in the finally. scannerId = -1L; } @@ -221,7 +221,7 @@ abstract class BaseScanner extends Chore implements HConstants { } // Scan is finished. - + // First clean up any meta region rows which had null HRegionInfos if (emptyRows.size() > 0) { LOG.warn("Found " + emptyRows.size() + " rows with empty HRegionInfo " + @@ -282,7 +282,7 @@ abstract class BaseScanner extends Chore implements HConstants { * the filesystem, then a daughters was not added to .META. -- must have been * a crash before their addition. Add them here. * @param metaRegionName Meta region name: e.g. .META.,,1 - * @param server HRegionInterface of meta server to talk to + * @param server HRegionInterface of meta server to talk to * @param parent HRegionInfo of split offlined parent * @param rowContent Content of parent row in * metaRegionName @@ -290,7 +290,7 @@ abstract class BaseScanner extends Chore implements HConstants { * the filesystem. * @throws IOException */ - private boolean cleanupAndVerifySplits(final byte [] metaRegionName, + private boolean cleanupAndVerifySplits(final byte [] metaRegionName, final HRegionInterface srvr, final HRegionInfo parent, Result rowContent) throws IOException { @@ -312,7 +312,7 @@ abstract class BaseScanner extends Chore implements HConstants { return result; } - + /* * See if the passed daughter has references in the filesystem to the parent * and if not, remove the note of daughter region in the parent row: its @@ -328,7 +328,7 @@ abstract class BaseScanner extends Chore implements HConstants { * @return True if this daughter still has references to the parent. * @throws IOException */ - private boolean checkDaughter(final byte [] metaRegionName, + private boolean checkDaughter(final byte [] metaRegionName, final HRegionInterface srvr, final HRegionInfo parent, final Result rowContent, final byte [] qualifier) throws IOException { @@ -394,7 +394,7 @@ abstract class BaseScanner extends Chore implements HConstants { * @param daughter * @throws IOException */ - private void addDaughterRowChecked(final byte [] metaRegionName, + private void addDaughterRowChecked(final byte [] metaRegionName, final HRegionInterface srvr, final byte [] parent, final HRegionInfo split, final byte [] daughter) throws IOException { @@ -463,7 +463,7 @@ abstract class BaseScanner extends Chore implements HConstants { * @param qualifier * @throws IOException */ - private void removeDaughterFromParent(final byte [] metaRegionName, + private void removeDaughterFromParent(final byte [] metaRegionName, final HRegionInterface srvr, final HRegionInfo parent, final HRegionInfo split, final byte [] qualifier) throws IOException { @@ -476,20 +476,20 @@ abstract class BaseScanner extends Chore implements HConstants { srvr.delete(metaRegionName, delete); } - /* + /* * Checks if a daughter region -- either splitA or splitB -- still holds * references to parent. If not, removes reference to the split from * the parent meta region row so we don't check it any more. * @param metaRegionName Name of meta region to look in. * @param srvr Where region resides. - * @param parent Parent region name. + * @param parent Parent region name. * @param rowContent Keyed content of the parent row in meta region. * @param split Which column family. * @param qualifier Which of the daughters to look at, splitA or splitB. * @return True if still has references to parent. * @throws IOException */ - private boolean hasReferences(final byte [] metaRegionName, + private boolean hasReferences(final byte [] metaRegionName, final HRegionInterface srvr, final HRegionInfo parent, Result rowContent, final HRegionInfo split, byte [] qualifier) throws IOException { @@ -534,7 +534,7 @@ abstract class BaseScanner extends Chore implements HConstants { */ protected void checkAssigned(final HRegionInterface regionServer, final MetaRegion meta, final HRegionInfo info, - final String serverAddress, final long startCode) + final String serverAddress, final long startCode) throws IOException { String sa = serverAddress; long sc = startCode; diff --git a/src/java/org/apache/hadoop/hbase/master/ChangeTableState.java b/src/java/org/apache/hadoop/hbase/master/ChangeTableState.java index ead18d8..277f0ea 100644 --- a/src/java/org/apache/hadoop/hbase/master/ChangeTableState.java +++ b/src/java/org/apache/hadoop/hbase/master/ChangeTableState.java @@ -47,8 +47,8 @@ class ChangeTableState extends TableOperation { new TreeMap>(); protected long lockid; - ChangeTableState(final HMaster master, final byte [] tableName, - final boolean onLine) + ChangeTableState(final HMaster master, final byte [] tableName, + final boolean onLine) throws IOException { super(master, tableName); this.online = onLine; diff --git a/src/java/org/apache/hadoop/hbase/master/ColumnOperation.java b/src/java/org/apache/hadoop/hbase/master/ColumnOperation.java index a424b78..42d47b5 100644 --- a/src/java/org/apache/hadoop/hbase/master/ColumnOperation.java +++ b/src/java/org/apache/hadoop/hbase/master/ColumnOperation.java @@ -31,8 +31,8 @@ import org.apache.hadoop.hbase.util.Writables; abstract class ColumnOperation extends TableOperation { private final Log LOG = LogFactory.getLog(this.getClass()); - - protected ColumnOperation(final HMaster master, final byte [] tableName) + + protected ColumnOperation(final HMaster master, final byte [] tableName) throws IOException { super(master, tableName); } diff --git a/src/java/org/apache/hadoop/hbase/master/DeleteColumn.java b/src/java/org/apache/hadoop/hbase/master/DeleteColumn.java index 75b8cad..46e0f55 100644 --- a/src/java/org/apache/hadoop/hbase/master/DeleteColumn.java +++ b/src/java/org/apache/hadoop/hbase/master/DeleteColumn.java @@ -30,8 +30,8 @@ import org.apache.hadoop.hbase.regionserver.Store; class DeleteColumn extends ColumnOperation { private final byte [] columnName; - DeleteColumn(final HMaster master, final byte [] tableName, - final byte [] columnName) + DeleteColumn(final HMaster master, final byte [] tableName, + final byte [] columnName) throws IOException { super(master, tableName); this.columnName = columnName; diff --git a/src/java/org/apache/hadoop/hbase/master/HMaster.java b/src/java/org/apache/hadoop/hbase/master/HMaster.java index acc2eed..9e1cc11 100644 --- a/src/java/org/apache/hadoop/hbase/master/HMaster.java +++ b/src/java/org/apache/hadoop/hbase/master/HMaster.java @@ -95,12 +95,12 @@ import org.apache.zookeeper.Watcher.Event.KeeperState; /** * HMaster is the "master server" for a HBase. * There is only one HMaster for a single HBase deployment. - * + * * NOTE: This class extends Thread rather than Chore because the sleep time * can be interrupted when there is something to do, rather than the Chore * sleep time which is invariant. */ -public class HMaster extends Thread implements HConstants, HMasterInterface, +public class HMaster extends Thread implements HConstants, HMasterInterface, HMasterRegionInterface, Watcher { static final Log LOG = LogFactory.getLog(HMaster.class.getName()); @@ -120,7 +120,7 @@ public class HMaster extends Thread implements HConstants, HMasterInterface, private final HBaseConfiguration conf; final FileSystem fs; final Random rand; - final int threadWakeFrequency; + final int threadWakeFrequency; final int numRetries; final long maxRegionOpenTime; final int leaseTimeout; @@ -134,14 +134,14 @@ public class HMaster extends Thread implements HConstants, HMasterInterface, final ServerConnection connection; final int metaRescanInterval; - + // A Sleeper that sleeps for threadWakeFrequency private final Sleeper sleeper; - + // Default access so accesible from unit tests. MASTER is name of the webapp // and the attribute name used stuffing this instance into web context. InfoServer infoServer; - + /** Name of master server */ public static final String MASTER = "master"; @@ -152,14 +152,14 @@ public class HMaster extends Thread implements HConstants, HMasterInterface, ServerManager serverManager; RegionManager regionManager; - + private MasterMetrics metrics; final Lock splitLogLock = new ReentrantLock(); - /** + /** * Build the HMaster out of a raw configuration item. * @param conf configuration - * + * * @throws IOException */ public HMaster(HBaseConfiguration conf) throws IOException { @@ -167,7 +167,7 @@ public class HMaster extends Thread implements HConstants, HMasterInterface, String addressStr = DNS.getDefaultHost( conf.get("hbase.master.dns.interface","default"), conf.get("hbase.master.dns.nameserver","default")); - addressStr += ":" + + addressStr += ":" + conf.get(MASTER_PORT, Integer.toString(DEFAULT_MASTER_PORT)); HServerAddress hsa = new HServerAddress(addressStr); LOG.info("My address is " + hsa); @@ -196,7 +196,7 @@ public class HMaster extends Thread implements HConstants, HMasterInterface, try { // Make sure the hbase root directory exists! if (!fs.exists(rootdir)) { - fs.mkdirs(rootdir); + fs.mkdirs(rootdir); FSUtils.setVersion(fs, rootdir); } else { FSUtils.checkVersion(fs, rootdir, true); @@ -218,7 +218,7 @@ public class HMaster extends Thread implements HConstants, HMasterInterface, this.server = HBaseRPC.getServer(this, hsa.getBindAddress(), hsa.getPort(), conf.getInt("hbase.regionserver.handler.count", 10), false, conf); - + // The rpc-server port can be ephemeral... ensure we have the correct info this.address = new HServerAddress(server.getListenerAddress()); @@ -230,16 +230,16 @@ public class HMaster extends Thread implements HConstants, HMasterInterface, conf.getInt("hbase.master.meta.thread.rescanfrequency", 60 * 1000); this.sleeper = new Sleeper(this.threadWakeFrequency, this.closed); - + zooKeeperWrapper = new ZooKeeperWrapper(conf, this); zkMasterAddressWatcher = new ZKMasterAddressWatcher(this); serverManager = new ServerManager(this); regionManager = new RegionManager(this); - + writeAddressToZooKeeper(true); this.regionServerOperationQueue = new RegionServerOperationQueue(this.conf, this.closed); - + // We're almost open for business this.closed.set(false); LOG.info("HMaster initialized on " + this.address.toString()); @@ -330,7 +330,7 @@ public class HMaster extends Thread implements HConstants, HMasterInterface, public HServerAddress getMasterAddress() { return address; } - + /** * @return Hbase root dir. */ @@ -359,18 +359,18 @@ public class HMaster extends Thread implements HConstants, HMasterInterface, public Map getServersToLoad() { return serverManager.getServersToLoad(); } - + /** @return The average load */ public double getAverageLoad() { return serverManager.getAverageLoad(); } - + /** @return the number of regions on filesystem */ public int countRegionsOnFS() { try { return regionManager.countRegionsOnFS(); } catch (IOException e) { - LOG.warn("Get count of Regions on FileSystem error : " + + LOG.warn("Get count of Regions on FileSystem error : " + StringUtils.stringifyException(e)); } return -1; @@ -386,14 +386,14 @@ public class HMaster extends Thread implements HConstants, HMasterInterface, } return rootServer; } - + /** * Wait until root region is available */ public void waitForRootRegionLocation() { regionManager.waitForRootRegionLocation(); } - + /** * @return Read-only map of online regions. */ @@ -446,7 +446,7 @@ public class HMaster extends Thread implements HConstants, HMasterInterface, LOG.fatal("Unhandled exception. Starting shutdown.", t); closed.set(true); } - + // Wait for all the remaining region servers to report in. this.serverManager.letRegionServersShutdown(); @@ -473,14 +473,14 @@ public class HMaster extends Thread implements HConstants, HMasterInterface, /* * Verifies if this instance of HBase is fresh or the master was started * following a failover. In the second case, it inspects the region server - * directory and gets their regions assignment. + * directory and gets their regions assignment. */ private void verifyClusterState() { try { LOG.debug("Checking cluster state..."); HServerAddress rootLocation = zooKeeperWrapper.readRootRegionLocation(); List addresses = zooKeeperWrapper.scanRSDirectory(); - + // Check if this is a fresh start of the cluster if(addresses.size() == 0) { LOG.debug("This is a fresh start, proceeding with normal startup"); @@ -489,13 +489,13 @@ public class HMaster extends Thread implements HConstants, HMasterInterface, } LOG.info("This is a failover, ZK inspection begins..."); boolean isRootRegionAssigned = false; - Map assignedRegions = + Map assignedRegions = new HashMap(); // This is a failover case. We must: // - contact every region server to add them to the regionservers list - // - get their current regions assignment + // - get their current regions assignment for (HServerAddress address : addresses) { - HRegionInterface hri = + HRegionInterface hri = this.connection.getHRegionConnection(address, false); HServerInfo info = hri.getHServerInfo(); LOG.debug("Inspection found server " + info.getName()); @@ -519,14 +519,14 @@ public class HMaster extends Thread implements HConstants, HMasterInterface, assignedRegions.put(region.getRegionName(), region); } } - LOG.info("Inspection found " + assignedRegions.size() + " regions, " + + LOG.info("Inspection found " + assignedRegions.size() + " regions, " + (isRootRegionAssigned ? "with -ROOT-" : "but -ROOT- was MIA")); splitLogAfterStartup(); } catch(IOException ex) { ex.printStackTrace(); } } - + /** * Inspect the log directory to recover any log file without * and active region server. @@ -630,7 +630,7 @@ public class HMaster extends Thread implements HConstants, HMasterInterface, // Send back some config info return createConfigurationSubset(); } - + /** * @return Subset of configuration to pass initializing regionservers: e.g. * the filesystem to use and root directory to use. @@ -642,7 +642,7 @@ public class HMaster extends Thread implements HConstants, HMasterInterface, if (rsAddress != null) { mw.put(new Text("hbase.regionserver.address"), new Text(rsAddress)); } - + return addConfig(mw, "fs.default.name"); } @@ -651,7 +651,7 @@ public class HMaster extends Thread implements HConstants, HMasterInterface, return mw; } - public HMsg[] regionServerReport(HServerInfo serverInfo, HMsg msgs[], + public HMsg[] regionServerReport(HServerInfo serverInfo, HMsg msgs[], HRegionInfo[] mostLoadedRegions) throws IOException { return adornRegionServerAnswer(serverInfo, @@ -661,7 +661,7 @@ public class HMaster extends Thread implements HConstants, HMasterInterface, /** * Override if you'd add messages to return to regionserver hsi * @param messages Messages to add to - * @return Messages to return to + * @return Messages to return to */ protected HMsg [] adornRegionServerAnswer(final HServerInfo hsi, final HMsg [] msgs) { @@ -683,7 +683,7 @@ public class HMaster extends Thread implements HConstants, HMasterInterface, } public void createTable(HTableDescriptor desc, byte [][] splitKeys) - throws IOException { + throws IOException { if (!isMasterRunning()) { throw new MasterNotRunningException(); } @@ -725,7 +725,7 @@ public class HMaster extends Thread implements HConstants, HMasterInterface, } } - private synchronized void createTable(final HRegionInfo [] newRegions) + private synchronized void createTable(final HRegionInfo [] newRegions) throws IOException { String tableName = newRegions[0].getTableDesc().getNameAsString(); // 1. Check to see if table already exists. Get meta region where @@ -733,7 +733,7 @@ public class HMaster extends Thread implements HConstants, HMasterInterface, // for the table we want to create already exists, then table already // created. Throw already-exists exception. MetaRegion m = regionManager.getFirstMetaRegionForRegion(newRegions[0]); - + byte [] metaRegionName = m.getRegionName(); HRegionInterface srvr = connection.getHRegionConnection(m.getServer()); byte[] firstRowInTable = Bytes.toBytes(tableName + ",,"); @@ -767,11 +767,11 @@ public class HMaster extends Thread implements HConstants, HMasterInterface, } public void addColumn(byte [] tableName, HColumnDescriptor column) - throws IOException { + throws IOException { new AddColumn(this, tableName, column).process(); } - public void modifyColumn(byte [] tableName, byte [] columnName, + public void modifyColumn(byte [] tableName, byte [] columnName, HColumnDescriptor descriptor) throws IOException { new ModifyColumn(this, tableName, columnName, descriptor).process(); @@ -898,7 +898,7 @@ public class HMaster extends Thread implements HConstants, HMasterInterface, } return null; } - + /** * Get row from meta table. * @param row @@ -914,7 +914,7 @@ public class HMaster extends Thread implements HConstants, HMasterInterface, get.addFamily(family); return srvr.get(meta.getRegionName(), get); } - + /* * @param meta * @return Server connection to meta .META. region. @@ -925,12 +925,12 @@ public class HMaster extends Thread implements HConstants, HMasterInterface, return this.connection.getHRegionConnection(meta.getServer()); } - public void modifyTable(final byte[] tableName, HConstants.Modify op, + public void modifyTable(final byte[] tableName, HConstants.Modify op, Writable[] args) throws IOException { switch (op) { case TABLE_SET_HTD: - if (args == null || args.length < 1 || + if (args == null || args.length < 1 || !(args[0] instanceof HTableDescriptor)) throw new IOException("SET_HTD request requires an HTableDescriptor"); HTableDescriptor htd = (HTableDescriptor) args[0]; @@ -977,12 +977,12 @@ public class HMaster extends Thread implements HConstants, HMasterInterface, if (args.length == 2) { servername = Bytes.toString(((ImmutableBytesWritable)args[1]).get()); } - // Need hri + // Need hri Result rr = getFromMETA(regionname, HConstants.CATALOG_FAMILY); HRegionInfo hri = getHRegionInfo(rr.getRow(), rr); if (servername == null) { // Get server from the .META. if it wasn't passed as argument - servername = + servername = Bytes.toString(rr.getValue(CATALOG_FAMILY, SERVER_QUALIFIER)); } // Take region out of the intransistions in case it got stuck there doing @@ -1035,7 +1035,7 @@ public class HMaster extends Thread implements HConstants, HMasterInterface, public HBaseConfiguration getConfiguration() { return this.conf; } - + // TODO ryan rework this function /* * Get HRegionInfo from passed META map of row values. @@ -1069,12 +1069,12 @@ public class HMaster extends Thread implements HConstants, HMasterInterface, /* * When we find rows in a meta region that has an empty HRegionInfo, we * clean them up here. - * + * * @param s connection to server serving meta region * @param metaRegionName name of the meta region we scanned * @param emptyRows the row keys that had empty HRegionInfos */ - protected void deleteEmptyMetaRows(HRegionInterface s, + protected void deleteEmptyMetaRows(HRegionInterface s, byte [] metaRegionName, List emptyRows) { for (byte [] regionName: emptyRows) { @@ -1089,7 +1089,7 @@ public class HMaster extends Thread implements HConstants, HMasterInterface, } } } - + /** * Get the ZK wrapper object * @return the zookeeper wrapper @@ -1097,20 +1097,20 @@ public class HMaster extends Thread implements HConstants, HMasterInterface, public ZooKeeperWrapper getZooKeeperWrapper() { return zooKeeperWrapper; } - + /** * @see org.apache.zookeeper.Watcher#process(org.apache.zookeeper.WatchedEvent) */ @Override public void process(WatchedEvent event) { - LOG.debug(("Got event " + event.getType() + + LOG.debug(("Got event " + event.getType() + " with path " + event.getPath())); - // Master should kill itself if its session expired or if its + // Master should kill itself if its session expired or if its // znode was deleted manually (usually for testing purposes) - if(event.getState() == KeeperState.Expired || - (event.getType().equals(EventType.NodeDeleted) && + if(event.getState() == KeeperState.Expired || + (event.getType().equals(EventType.NodeDeleted) && event.getPath().equals( - this.zooKeeperWrapper.getMasterElectionZNode())) + this.zooKeeperWrapper.getMasterElectionZNode())) && !shutdownRequested.get()) { LOG.info("Master lost its znode, trying to get a new one"); @@ -1133,7 +1133,7 @@ public class HMaster extends Thread implements HConstants, HMasterInterface, } } } - + /* * Main program */ @@ -1251,7 +1251,7 @@ public class HMaster extends Thread implements HConstants, HMasterInterface, printUsageAndExit(); } } - + /** * Main program * @param args diff --git a/src/java/org/apache/hadoop/hbase/master/MetaRegion.java b/src/java/org/apache/hadoop/hbase/master/MetaRegion.java index b85b413..480a98c 100644 --- a/src/java/org/apache/hadoop/hbase/master/MetaRegion.java +++ b/src/java/org/apache/hadoop/hbase/master/MetaRegion.java @@ -42,7 +42,7 @@ public class MetaRegion implements Comparable { } this.regionInfo = regionInfo; } - + @Override public String toString() { return "{server: " + this.server.toString() + ", regionname: " + @@ -65,13 +65,13 @@ public class MetaRegion implements Comparable { return regionInfo.getStartKey(); } - + /** @return the endKey */ public byte [] getEndKey() { return regionInfo.getEndKey(); } - + public HRegionInfo getRegionInfo() { return regionInfo; } diff --git a/src/java/org/apache/hadoop/hbase/master/MetaScanner.java b/src/java/org/apache/hadoop/hbase/master/MetaScanner.java index 8c619c7..7143827 100644 --- a/src/java/org/apache/hadoop/hbase/master/MetaScanner.java +++ b/src/java/org/apache/hadoop/hbase/master/MetaScanner.java @@ -30,24 +30,24 @@ import org.apache.hadoop.hbase.RemoteExceptionHandler; /** * MetaScanner META table. - * + * * When a META server comes on line, a MetaRegion object is * queued up by regionServerReport() and this thread wakes up. * - * It's important to do this work in a separate thread, or else the blocking + * It's important to do this work in a separate thread, or else the blocking * action would prevent other work from getting done. */ class MetaScanner extends BaseScanner { /** Initial work for the meta scanner is queued up here */ private volatile BlockingQueue metaRegionsToScan = new LinkedBlockingQueue(); - + private final List metaRegionsToRescan = new ArrayList(); - + /** * Constructor - * + * * @param master */ public MetaScanner(HMaster master) { @@ -88,7 +88,7 @@ class MetaScanner extends BaseScanner { // Make sure the file system is still available this.master.checkFileSystem(); } catch (Exception e) { - // If for some reason we get some other kind of exception, + // If for some reason we get some other kind of exception, // at least log it rather than go out silently. LOG.error("Unexpected exception", e); } @@ -102,7 +102,7 @@ class MetaScanner extends BaseScanner { (region == null && metaRegionsToScan.size() > 0) && !metaRegionsScanned()) { try { - region = metaRegionsToScan.poll(this.master.threadWakeFrequency, + region = metaRegionsToScan.poll(this.master.threadWakeFrequency, TimeUnit.MILLISECONDS); } catch (InterruptedException e) { // continue @@ -134,7 +134,7 @@ class MetaScanner extends BaseScanner { } /* - * Called by the meta scanner when it has completed scanning all meta + * Called by the meta scanner when it has completed scanning all meta * regions. This wakes up any threads that were waiting for this to happen. * @param totalRows Total rows scanned. * @param regionCount Count of regions in .META. table. @@ -171,10 +171,10 @@ class MetaScanner extends BaseScanner { } return this.master.closed.get(); } - + /** * Add another meta region to scan to the queue. - */ + */ void addMetaRegionToScan(MetaRegion m) { metaRegionsToScan.add(m); } diff --git a/src/java/org/apache/hadoop/hbase/master/ModifyColumn.java b/src/java/org/apache/hadoop/hbase/master/ModifyColumn.java index c50ca5d..82eb31a 100644 --- a/src/java/org/apache/hadoop/hbase/master/ModifyColumn.java +++ b/src/java/org/apache/hadoop/hbase/master/ModifyColumn.java @@ -29,9 +29,9 @@ import org.apache.hadoop.hbase.HRegionInfo; class ModifyColumn extends ColumnOperation { private final HColumnDescriptor descriptor; private final byte [] columnName; - - ModifyColumn(final HMaster master, final byte [] tableName, - final byte [] columnName, HColumnDescriptor descriptor) + + ModifyColumn(final HMaster master, final byte [] tableName, + final byte [] columnName, HColumnDescriptor descriptor) throws IOException { super(master, tableName); this.descriptor = descriptor; @@ -47,7 +47,7 @@ class ModifyColumn extends ColumnOperation { updateRegionInfo(server, m.getRegionName(), i); } else { // otherwise, we have an error. throw new InvalidColumnNameException("Column family '" + - Bytes.toString(columnName) + + Bytes.toString(columnName) + "' doesn't exist, so cannot be modified."); } } diff --git a/src/java/org/apache/hadoop/hbase/master/ModifyTableMeta.java b/src/java/org/apache/hadoop/hbase/master/ModifyTableMeta.java index d066739..6c8875d 100644 --- a/src/java/org/apache/hadoop/hbase/master/ModifyTableMeta.java +++ b/src/java/org/apache/hadoop/hbase/master/ModifyTableMeta.java @@ -39,8 +39,8 @@ class ModifyTableMeta extends TableOperation { private HTableDescriptor desc; - ModifyTableMeta(final HMaster master, final byte [] tableName, - HTableDescriptor desc) + ModifyTableMeta(final HMaster master, final byte [] tableName, + HTableDescriptor desc) throws IOException { super(master, tableName); this.desc = desc; diff --git a/src/java/org/apache/hadoop/hbase/master/ProcessRegionClose.java b/src/java/org/apache/hadoop/hbase/master/ProcessRegionClose.java index b5d9215..e3d36a8 100644 --- a/src/java/org/apache/hadoop/hbase/master/ProcessRegionClose.java +++ b/src/java/org/apache/hadoop/hbase/master/ProcessRegionClose.java @@ -26,10 +26,10 @@ import org.apache.hadoop.hbase.HRegionInfo; /** * ProcessRegionClose is the way we do post-processing on a closed region. We - * only spawn one of these asynchronous tasks when the region needs to be + * only spawn one of these asynchronous tasks when the region needs to be * either offlined or deleted. We used to create one of these tasks whenever * a region was closed, but since closing a region that isn't being offlined - * or deleted doesn't actually require post processing, it's no longer + * or deleted doesn't actually require post processing, it's no longer * necessary. */ class ProcessRegionClose extends ProcessRegionStatusChange { @@ -42,7 +42,7 @@ class ProcessRegionClose extends ProcessRegionStatusChange { * @param offlineRegion if true, set the region to offline in meta * @param reassignRegion if true, region is to be reassigned */ - public ProcessRegionClose(HMaster master, HRegionInfo regionInfo, + public ProcessRegionClose(HMaster master, HRegionInfo regionInfo, boolean offlineRegion, boolean reassignRegion) { super(master, regionInfo); @@ -73,7 +73,7 @@ class ProcessRegionClose extends ProcessRegionStatusChange { // We can't proceed unless the meta region we are going to update // is online. metaRegionAvailable() will put this operation on the - // delayedToDoQueue, so return true so the operation is not put + // delayedToDoQueue, so return true so the operation is not put // back on the toDoQueue if(offlineRegion) { diff --git a/src/java/org/apache/hadoop/hbase/master/ProcessRegionOpen.java b/src/java/org/apache/hadoop/hbase/master/ProcessRegionOpen.java index 72c39ef..9cd4a74 100644 --- a/src/java/org/apache/hadoop/hbase/master/ProcessRegionOpen.java +++ b/src/java/org/apache/hadoop/hbase/master/ProcessRegionOpen.java @@ -28,9 +28,9 @@ import org.apache.hadoop.hbase.client.Put; import org.apache.hadoop.hbase.ipc.HRegionInterface; import org.apache.hadoop.hbase.util.Bytes; -/** +/** * ProcessRegionOpen is instantiated when a region server reports that it is - * serving a region. This applies to all meta and user regions except the + * serving a region. This applies to all meta and user regions except the * root region which is handled specially. */ class ProcessRegionOpen extends ProcessRegionStatusChange { diff --git a/src/java/org/apache/hadoop/hbase/master/ProcessRegionStatusChange.java b/src/java/org/apache/hadoop/hbase/master/ProcessRegionStatusChange.java index ed02b7e..3fcdfd8 100644 --- a/src/java/org/apache/hadoop/hbase/master/ProcessRegionStatusChange.java +++ b/src/java/org/apache/hadoop/hbase/master/ProcessRegionStatusChange.java @@ -23,7 +23,7 @@ import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HRegionInfo; /** - * Abstract class that performs common operations for + * Abstract class that performs common operations for * @see #ProcessRegionClose and @see #ProcessRegionOpen */ abstract class ProcessRegionStatusChange extends RegionServerOperation { @@ -41,7 +41,7 @@ abstract class ProcessRegionStatusChange extends RegionServerOperation { this.regionInfo = regionInfo; this.isMetaTable = regionInfo.isMetaTable(); } - + protected boolean metaRegionAvailable() { boolean available = true; if (isMetaTable) { diff --git a/src/java/org/apache/hadoop/hbase/master/ProcessServerShutdown.java b/src/java/org/apache/hadoop/hbase/master/ProcessServerShutdown.java index e92a990..bd87f51 100644 --- a/src/java/org/apache/hadoop/hbase/master/ProcessServerShutdown.java +++ b/src/java/org/apache/hadoop/hbase/master/ProcessServerShutdown.java @@ -39,7 +39,7 @@ import org.apache.hadoop.hbase.regionserver.HRegion; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.master.RegionManager.RegionState; -/** +/** * Instantiated when a server's lease has expired, meaning it has crashed. * The region server's log file needs to be split up for each region it was * serving, and the regions need to get reassigned. @@ -117,19 +117,19 @@ class ProcessServerShutdown extends RegionServerOperation { return this.deadServerAddress; } - private void closeRegionsInTransition() { + private void closeRegionsInTransition() { Map inTransition = master.regionManager.getRegionsInTransitionOnServer(deadServer); for (Map.Entry entry : inTransition.entrySet()) { String regionName = entry.getKey(); RegionState state = entry.getValue(); - + LOG.info("Region " + regionName + " was in transition " + state + " on dead server " + deadServer + " - marking unassigned"); master.regionManager.setUnassigned(state.getRegionInfo(), true); } } - + @Override public String toString() { return "ProcessServerShutdown of " + this.deadServer; @@ -225,7 +225,7 @@ class ProcessServerShutdown extends RegionServerOperation { } // Scan complete. Remove any rows which had empty HRegionInfos - + if (emptyRows.size() > 0) { LOG.warn("Found " + emptyRows.size() + " rows with empty HRegionInfo while scanning meta region " + @@ -269,7 +269,7 @@ class ProcessServerShutdown extends RegionServerOperation { ScanMetaRegions(MetaRegion m, HMaster master) { super(m, master); } - + public Boolean call() throws IOException { if (LOG.isDebugEnabled()) { LOG.debug("process server shutdown scanning " + @@ -289,9 +289,9 @@ class ProcessServerShutdown extends RegionServerOperation { LOG.info("process shutdown of server " + this.deadServer + ": logSplit: " + logSplit + ", rootRescanned: " + rootRescanned + - ", numberOfMetaRegions: " + + ", numberOfMetaRegions: " + master.regionManager.numMetaRegions() + - ", onlineMetaRegions.size(): " + + ", onlineMetaRegions.size(): " + master.regionManager.numOnlineMetaRegions()); if (!logSplit) { // Process the old log file @@ -345,7 +345,7 @@ class ProcessServerShutdown extends RegionServerOperation { if (LOG.isDebugEnabled()) { LOG.debug("process server shutdown scanning root region on " + - master.getRootRegionLocation().getBindAddress() + + master.getRootRegionLocation().getBindAddress() + " finished " + Thread.currentThread().getName()); } rootRescanned = true; @@ -368,7 +368,7 @@ class ProcessServerShutdown extends RegionServerOperation { Bytes.toString(r.getRegionName()) + " on " + r.getServer()); } } - + closeRegionsInTransition(); // Remove this server from dead servers list. Finished splitting logs. diff --git a/src/java/org/apache/hadoop/hbase/master/RegionManager.java b/src/java/org/apache/hadoop/hbase/master/RegionManager.java index 3d5c99e..0ea19f6 100644 --- a/src/java/org/apache/hadoop/hbase/master/RegionManager.java +++ b/src/java/org/apache/hadoop/hbase/master/RegionManager.java @@ -58,18 +58,18 @@ import org.apache.hadoop.hbase.util.Writables; /** * Class to manage assigning regions to servers, state of root and meta, etc. - */ + */ class RegionManager implements HConstants { protected static final Log LOG = LogFactory.getLog(RegionManager.class); - + private AtomicReference rootRegionLocation = new AtomicReference(null); - + private volatile boolean safeMode = true; - + private final RootScanner rootScannerThread; final MetaScanner metaScannerThread; - + /** Set by root scanner to indicate the number of meta regions */ private final AtomicInteger numberOfMetaRegions = new AtomicInteger(); @@ -78,20 +78,20 @@ class RegionManager implements HConstants { new ConcurrentSkipListMap(Bytes.BYTES_COMPARATOR); private static final byte[] OVERLOADED = Bytes.toBytes("Overloaded"); - + private static final byte [] META_REGION_PREFIX = Bytes.toBytes(".META.,"); /** * Map of region name to RegionState for regions that are in transition such as - * + * * unassigned -> pendingOpen -> open * closing -> pendingClose -> closed; if (closed && !offline) -> unassigned - * + * * At the end of a transition, removeRegion is used to remove the region from * the map (since it is no longer in transition) - * + * * Note: Needs to be SortedMap so we can specify a comparator - * + * * @see RegionState inner-class below */ final SortedMap regionsInTransition = @@ -149,7 +149,7 @@ class RegionManager implements HConstants { Threads.setDaemonThreadRunning(rootScannerThread, "RegionManager.rootScanner"); Threads.setDaemonThreadRunning(metaScannerThread, - "RegionManager.metaScanner"); + "RegionManager.metaScanner"); } void unsetRootRegion() { @@ -160,7 +160,7 @@ class RegionManager implements HConstants { LOG.info("-ROOT- region unset (but not set to be reassigned)"); } } - + void reassignRootRegion() { unsetRootRegion(); if (!master.shutdownRequested.get()) { @@ -173,12 +173,12 @@ class RegionManager implements HConstants { } } } - + /* * Assigns regions to region servers attempting to balance the load across - * all region servers. Note that no synchronization is necessary as the caller + * all region servers. Note that no synchronization is necessary as the caller * (ServerManager.processMsgs) already owns the monitor for the RegionManager. - * + * * @param info * @param mostLoadedRegions * @param returnMsgs @@ -204,21 +204,21 @@ class RegionManager implements HConstants { if (isSingleServer) { assignRegionsToOneServer(regionsToAssign, info, returnMsgs); } else { - // otherwise, give this server a few regions taking into account the + // otherwise, give this server a few regions taking into account the // load of all the other servers. assignRegionsToMultipleServers(thisServersLoad, regionsToAssign, info, returnMsgs); } } } - + /* * Make region assignments taking into account multiple servers' loads. * * Note that no synchronization is needed while we iterate over * regionsInTransition because this method is only called by assignRegions * whose caller owns the monitor for RegionManager - * + * * TODO: This code is unintelligible. REWRITE. Add TESTS! St.Ack 09/30/2009 * @param thisServersLoad * @param regionsToAssign @@ -226,7 +226,7 @@ class RegionManager implements HConstants { * @param returnMsgs */ private void assignRegionsToMultipleServers(final HServerLoad thisServersLoad, - final Set regionsToAssign, final HServerInfo info, + final Set regionsToAssign, final HServerInfo info, final ArrayList returnMsgs) { boolean isMetaAssign = false; for (RegionState s : regionsToAssign) { @@ -306,11 +306,11 @@ class RegionManager implements HConstants { /* * Assign all to the only server. An unlikely case but still possible. - * + * * Note that no synchronization is needed on regionsInTransition while - * iterating on it because the only caller is assignRegions whose caller owns + * iterating on it because the only caller is assignRegions whose caller owns * the monitor for RegionManager - * + * * @param regionsToAssign * @param serverName * @param returnMsgs @@ -378,11 +378,11 @@ class RegionManager implements HConstants { /* * Get the set of regions that should be assignable in this pass. - * + * * Note that no synchronization on regionsInTransition is needed because the * only caller (assignRegions, whose caller is ServerManager.processMsgs) owns * the monitor for RegionManager - */ + */ private Set regionsAwaitingAssignment(HServerAddress addr, boolean isSingleServer) { // set of regions we want to assign to this server @@ -441,14 +441,14 @@ class RegionManager implements HConstants { return regionsToAssign; } } - + /* * Figure out the load that is next highest amongst all regionservers. Also, - * return how many servers exist at that load. + * return how many servers exist at that load. */ - private int computeNextHeaviestLoad(HServerLoad referenceLoad, + private int computeNextHeaviestLoad(HServerLoad referenceLoad, HServerLoad heavierLoad) { - + SortedMap> heavyServers = new TreeMap>(); synchronized (master.serverManager.loadToServers) { @@ -479,14 +479,14 @@ class RegionManager implements HConstants { * some or all of its most loaded regions, allowing it to reduce its load. * The closed regions will then get picked up by other underloaded machines. * - * Note that no synchronization is needed because the only caller + * Note that no synchronization is needed because the only caller * (assignRegions) whose caller owns the monitor for RegionManager */ - void unassignSomeRegions(final HServerInfo info, + void unassignSomeRegions(final HServerInfo info, int numRegionsToClose, final HRegionInfo[] mostLoadedRegions, ArrayList returnMsgs) { - LOG.debug("Choosing to reassign " + numRegionsToClose - + " regions. mostLoadedRegions has " + mostLoadedRegions.length + LOG.debug("Choosing to reassign " + numRegionsToClose + + " regions. mostLoadedRegions has " + mostLoadedRegions.length + " regions in it."); int regionIdx = 0; int regionsClosed = 0; @@ -531,23 +531,23 @@ class RegionManager implements HConstants { return !pathname.equals(HLog.HREGION_LOGDIR_NAME) && !pathname.equals(VERSION_FILE_NAME); } - + } /* * PathFilter that accepts all but compaction.dir names. */ static class RegionDirFilter implements PathFilter { - public boolean accept(Path path) { + public boolean accept(Path path) { return !path.getName().equals(HREGION_COMPACTIONDIR_NAME); } } /** * @return the rough number of the regions on fs - * Note: this method simply counts the regions on fs by accumulating all the dirs + * Note: this method simply counts the regions on fs by accumulating all the dirs * in each table dir (${HBASE_ROOT}/$TABLE) and skipping logfiles, compaction dirs. - * @throws IOException + * @throws IOException */ public int countRegionsOnFS() throws IOException { int regions = 0; @@ -563,7 +563,7 @@ class RegionManager implements HConstants { } return regions; } - + /** * @return Read-only map of online regions. */ @@ -584,7 +584,7 @@ class RegionManager implements HConstants { } return false; } - + /** * Return a map of the regions in transition on a server. * Returned map entries are region name -> RegionState @@ -619,9 +619,9 @@ class RegionManager implements HConstants { LOG.debug("meta and root scanners notified"); } } - + /** Stop the region assigner */ - public void stop() { + public void stop() { try { if (rootScannerThread.isAlive()) { rootScannerThread.join(); // Wait for the root scanner to finish. @@ -639,7 +639,7 @@ class RegionManager implements HConstants { master.getZooKeeperWrapper().clearRSDirectory(); master.getZooKeeperWrapper().close(); } - + /** * Block until meta regions are online or we're shutting down. * @return true if we found meta regions, false if we're closing. @@ -650,9 +650,9 @@ class RegionManager implements HConstants { numberOfMetaRegions.get() == onlineMetaRegions.size()); } } - + /** - * Search our map of online meta regions to find the first meta region that + * Search our map of online meta regions to find the first meta region that * should contain a pointer to newRegion. * @param newRegion * @return MetaRegion where the newRegion should live @@ -666,13 +666,13 @@ class RegionManager implements HConstants { } else { if (onlineMetaRegions.containsKey(newRegion.getRegionName())) { return onlineMetaRegions.get(newRegion.getRegionName()); - } + } return onlineMetaRegions.get(onlineMetaRegions.headMap( newRegion.getRegionName()).lastKey()); } } } - + /** * Get a set of all the meta regions that contain info about a given table. * @param tableName Table you need to know all the meta regions for @@ -738,8 +738,8 @@ class RegionManager implements HConstants { * written * @throws IOException */ - public void createRegion(HRegionInfo newRegion, HRegionInterface server, - byte [] metaRegionName) + public void createRegion(HRegionInfo newRegion, HRegionInterface server, + byte [] metaRegionName) throws IOException { // 2. Create the HRegion HRegion region = HRegion.createHRegion(newRegion, master.rootdir, @@ -748,12 +748,12 @@ class RegionManager implements HConstants { // 3. Insert into meta HRegionInfo info = region.getRegionInfo(); byte [] regionName = region.getRegionName(); - + Put put = new Put(regionName); - + put.add(CATALOG_FAMILY, REGIONINFO_QUALIFIER, Writables.getBytes(info)); server.put(metaRegionName, put); - + // 4. Close the new region to flush it to disk. Close its log file too. region.close(); region.getLog().closeAndDelete(); @@ -761,17 +761,17 @@ class RegionManager implements HConstants { // 5. Get it assigned to a server setUnassigned(info, true); } - - /** - * Set a MetaRegion as online. - * @param metaRegion + + /** + * Set a MetaRegion as online. + * @param metaRegion */ public void putMetaRegionOnline(MetaRegion metaRegion) { onlineMetaRegions.put(metaRegion.getStartKey(), metaRegion); } - /** - * Get a list of online MetaRegions + /** + * Get a list of online MetaRegions * @return list of MetaRegion objects */ public List getListOfOnlineMetaRegions() { @@ -781,26 +781,26 @@ class RegionManager implements HConstants { } return regions; } - - /** - * Count of online meta regions + + /** + * Count of online meta regions * @return count of online meta regions */ public int numOnlineMetaRegions() { return onlineMetaRegions.size(); } - - /** - * Check if a meta region is online by its name + + /** + * Check if a meta region is online by its name * @param startKey name of the meta region to check * @return true if the region is online, false otherwise */ public boolean isMetaRegionOnline(byte [] startKey) { return onlineMetaRegions.containsKey(startKey); } - - /** - * Set an online MetaRegion offline - remove it from the map. + + /** + * Set an online MetaRegion offline - remove it from the map. * @param startKey region name * @return the MetaRegion that was taken offline. */ @@ -950,7 +950,7 @@ class RegionManager implements HConstants { /** * Remove a region from the region state map. - * + * * @param info */ public void removeRegion(HRegionInfo info) { @@ -958,7 +958,7 @@ class RegionManager implements HConstants { this.regionsInTransition.remove(info.getRegionNameAsString()); } } - + /** * @param regionName * @return true if the named region is in a transition state @@ -983,8 +983,8 @@ class RegionManager implements HConstants { } } - /** - * Set a region to unassigned + /** + * Set a region to unassigned * @param info Region to set unassigned * @param force if true mark region unassigned whatever its current state */ @@ -1001,8 +1001,8 @@ class RegionManager implements HConstants { s.setUnassigned(); } } - - /** + + /** * Check if a region is on the unassigned list * @param info HRegionInfo to check for * @return true if on the unassigned list, false if it isn't. Note that this @@ -1018,11 +1018,11 @@ class RegionManager implements HConstants { } return false; } - + /** * Check if a region has been assigned and we're waiting for a response from * the region server. - * + * * @param regionName name of the region * @return true if open, false otherwise */ @@ -1048,7 +1048,7 @@ class RegionManager implements HConstants { } } } - + /** * @param regionName * @return true if region is marked to be offlined. @@ -1063,8 +1063,8 @@ class RegionManager implements HConstants { return false; } - /** - * Mark a region as closing + /** + * Mark a region as closing * @param serverName * @param regionInfo * @param setOffline @@ -1087,11 +1087,11 @@ class RegionManager implements HConstants { this.regionsInTransition.put(regionInfo.getRegionNameAsString(), s); } } - - /** - * Remove the map of region names to region infos waiting to be offlined for a + + /** + * Remove the map of region names to region infos waiting to be offlined for a * given server - * + * * @param serverName * @return set of infos to close */ @@ -1107,10 +1107,10 @@ class RegionManager implements HConstants { } return result; } - + /** * Called when we have told a region server to close the region - * + * * @param regionName */ public void setPendingClose(String regionName) { @@ -1121,7 +1121,7 @@ class RegionManager implements HConstants { } } } - + /** * @param regionName */ @@ -1140,8 +1140,8 @@ class RegionManager implements HConstants { public void addMetaRegionToScan(MetaRegion m) { metaScannerThread.addMetaRegionToScan(m); } - - /** + + /** * Check if the initial root scan has been completed. * @return true if scan completed, false otherwise */ @@ -1149,10 +1149,10 @@ class RegionManager implements HConstants { return rootScannerThread.isInitialScanComplete(); } - /** + /** * Check if the initial meta scan has been completed. * @return true if meta completed, false otherwise - */ + */ public boolean isInitialMetaScanComplete() { return metaScannerThread.isInitialScanComplete(); } @@ -1172,7 +1172,7 @@ class RegionManager implements HConstants { return false; } - /** + /** * @return true if the initial meta scan is complete and there are no * unassigned or pending regions */ @@ -1191,15 +1191,15 @@ class RegionManager implements HConstants { } return safeMode; } - - /** + + /** * Get the root region location. * @return HServerAddress describing root region server. */ public HServerAddress getRootRegionLocation() { return rootRegionLocation.get(); } - + /** * Block until either the root region location is available or we're shutting * down. @@ -1219,7 +1219,7 @@ class RegionManager implements HConstants { } } } - + /** * Return the number of meta regions. * @return number of meta regions @@ -1227,7 +1227,7 @@ class RegionManager implements HConstants { public int numMetaRegions() { return numberOfMetaRegions.get(); } - + /** * Bump the count of meta regions up one */ @@ -1276,9 +1276,9 @@ class RegionManager implements HConstants { synchronized (rootRegionLocation) { rootRegionLocation.set(new HServerAddress(address)); rootRegionLocation.notifyAll(); - } + } } - + /** * Set the number of meta regions. * @param num Number of meta regions @@ -1372,7 +1372,7 @@ class RegionManager implements HConstants { applyActions(serverInfo, returnMsgs, this.regionsToMajorCompact, HMsg.Type.MSG_REGION_MAJOR_COMPACT); } - + private void applyActions(final HServerInfo serverInfo, final ArrayList returnMsgs, final SortedMap> map, @@ -1397,28 +1397,28 @@ class RegionManager implements HConstants { * Class to balance region servers load. * It keeps Region Servers load in slop range by unassigning Regions * from most loaded servers. - * + * * Equilibrium is reached when load of all serves are in slop range - * [avgLoadMinusSlop, avgLoadPlusSlop], where + * [avgLoadMinusSlop, avgLoadPlusSlop], where * avgLoadPlusSlop = Math.ceil(avgLoad * (1 + this.slop)), and * avgLoadMinusSlop = Math.floor(avgLoad * (1 - this.slop)) - 1. */ private class LoadBalancer { private float slop; // hbase.regions.slop private final int maxRegToClose; // hbase.regions.close.max - + LoadBalancer(HBaseConfiguration conf) { this.slop = conf.getFloat("hbase.regions.slop", (float)0.3); if (this.slop <= 0) this.slop = 1; //maxRegToClose to constrain balance closing per one iteration - // -1 to turn off + // -1 to turn off // TODO: change default in HBASE-862, need a suggestion this.maxRegToClose = conf.getInt("hbase.regions.close.max", -1); } /** * Balance server load by unassigning some regions. - * + * * @param info - server info * @param mostLoadedRegions - array of most loaded regions * @param returnMsgs - array of return massages @@ -1432,27 +1432,27 @@ class RegionManager implements HConstants { if(servLoad.getLoad() <= Math.ceil(avg) || avg <= 2.0) { return; } - + // check if current server is overloaded int numRegionsToClose = balanceFromOverloaded(servLoad, avg); - + // check if we can unload server by low loaded servers if(numRegionsToClose <= 0) { - numRegionsToClose = balanceToLowloaded(info.getServerName(), servLoad, + numRegionsToClose = balanceToLowloaded(info.getServerName(), servLoad, avg); } - + if(maxRegToClose > 0) { numRegionsToClose = Math.min(numRegionsToClose, maxRegToClose); } - + if(numRegionsToClose > 0) { - unassignSomeRegions(info, numRegionsToClose, mostLoadedRegions, + unassignSomeRegions(info, numRegionsToClose, mostLoadedRegions, returnMsgs); } } - /* + /* * Check if server load is not overloaded (with load > avgLoadPlusSlop). * @return number of regions to unassign. */ @@ -1469,31 +1469,31 @@ class RegionManager implements HConstants { return 0; } - /* - * Check if server is most loaded and can be unloaded to + /* + * Check if server is most loaded and can be unloaded to * low loaded servers (with load < avgLoadMinusSlop). * @return number of regions to unassign. */ - private int balanceToLowloaded(String srvName, HServerLoad srvLoad, + private int balanceToLowloaded(String srvName, HServerLoad srvLoad, double avgLoad) { - SortedMap> loadToServers = + SortedMap> loadToServers = master.serverManager.getLoadToServers(); // check if server most loaded if (!loadToServers.get(loadToServers.lastKey()).contains(srvName)) return 0; - + // this server is most loaded, we will try to unload it by lowest // loaded servers int avgLoadMinusSlop = (int)Math.floor(avgLoad * (1 - this.slop)) - 1; int lowestLoad = loadToServers.firstKey().getNumberOfRegions(); - + if(lowestLoad >= avgLoadMinusSlop) return 0; // there is no low loaded servers - + int lowSrvCount = loadToServers.get(loadToServers.firstKey()).size(); int numRegionsToClose = 0; - + int numSrvRegs = srvLoad.getNumberOfRegions(); int numMoveToLowLoaded = (avgLoadMinusSlop - lowestLoad) * lowSrvCount; numRegionsToClose = numSrvRegs - (int)Math.ceil(avgLoad); @@ -1549,7 +1549,7 @@ class RegionManager implements HConstants { */ static class RegionState implements Comparable { private final HRegionInfo regionInfo; - + enum State { UNASSIGNED, // awaiting a server to be assigned PENDING_OPEN, // told a server to open, hasn't opened yet @@ -1557,13 +1557,13 @@ class RegionManager implements HConstants { CLOSING, // a msg has been enqueued to close ths region, but not delivered to RS yet PENDING_CLOSE, // msg has been delivered to RS to close this region CLOSED // region has been closed but not yet marked in meta - + } - + private State state; - + private boolean isOfflined; - + /* Set when region is assigned or closing */ private String serverName = null; @@ -1572,11 +1572,11 @@ class RegionManager implements HConstants { this.regionInfo = info; this.state = state; } - + synchronized HRegionInfo getRegionInfo() { return this.regionInfo; } - + synchronized byte [] getRegionName() { return this.regionInfo.getRegionName(); } @@ -1592,7 +1592,7 @@ class RegionManager implements HConstants { * @return true if the region is being opened */ synchronized boolean isOpening() { - return state == State.UNASSIGNED || + return state == State.UNASSIGNED || state == State.PENDING_OPEN || state == State.OPEN; } @@ -1605,7 +1605,7 @@ class RegionManager implements HConstants { } /* - * Note: callers of this method (reassignRootRegion, + * Note: callers of this method (reassignRootRegion, * regionsAwaitingAssignment, setUnassigned) ensure that this method is not * called unless it is safe to do so. */ @@ -1651,7 +1651,7 @@ class RegionManager implements HConstants { this.serverName = serverName; this.isOfflined = setOffline; } - + synchronized boolean isPendingClose() { return state == State.PENDING_CLOSE; } @@ -1667,7 +1667,7 @@ class RegionManager implements HConstants { synchronized boolean isClosed() { return state == State.CLOSED; } - + synchronized void setClosed() { if (state != State.PENDING_CLOSE && state != State.PENDING_OPEN && @@ -1678,7 +1678,7 @@ class RegionManager implements HConstants { } state = State.CLOSED; } - + synchronized boolean isOfflined() { return (state == State.CLOSING || state == State.PENDING_CLOSE) && isOfflined; @@ -1689,7 +1689,7 @@ class RegionManager implements HConstants { return ("name=" + Bytes.toString(getRegionName()) + ", state=" + this.state); } - + @Override public boolean equals(Object o) { if (this == o) { @@ -1700,12 +1700,12 @@ class RegionManager implements HConstants { } return this.compareTo((RegionState) o) == 0; } - + @Override public int hashCode() { return Bytes.toString(getRegionName()).hashCode(); } - + public int compareTo(RegionState o) { if (o == null) { return 1; diff --git a/src/java/org/apache/hadoop/hbase/master/RegionServerOperation.java b/src/java/org/apache/hadoop/hbase/master/RegionServerOperation.java index 1c5796d..9eb05a7 100644 --- a/src/java/org/apache/hadoop/hbase/master/RegionServerOperation.java +++ b/src/java/org/apache/hadoop/hbase/master/RegionServerOperation.java @@ -28,14 +28,14 @@ import org.apache.commons.logging.LogFactory; import org.apache.hadoop.hbase.HConstants; abstract class RegionServerOperation implements Delayed, HConstants { - protected static final Log LOG = + protected static final Log LOG = LogFactory.getLog(RegionServerOperation.class.getName()); - + private long expire; protected final HMaster master; protected final int numRetries; private int expirationDuration; - + protected RegionServerOperation(HMaster master) { this.master = master; this.numRetries = master.numRetries; @@ -71,7 +71,7 @@ abstract class RegionServerOperation implements Delayed, HConstants { int getExpirationDuration() { return this.expirationDuration; } - + void setExpirationDuration(final int d) { this.expirationDuration = d; } @@ -96,9 +96,9 @@ abstract class RegionServerOperation implements Delayed, HConstants { // in the run queue, put this request on the delay queue to give // other threads the opportunity to get the meta regions on-line. if (LOG.isDebugEnabled()) { - LOG.debug("numberOfMetaRegions: " + + LOG.debug("numberOfMetaRegions: " + master.regionManager.numMetaRegions() + - ", onlineMetaRegions.size(): " + + ", onlineMetaRegions.size(): " + master.regionManager.numOnlineMetaRegions()); LOG.debug("Requeuing because not all meta regions are online"); } diff --git a/src/java/org/apache/hadoop/hbase/master/RegionServerOperationQueue.java b/src/java/org/apache/hadoop/hbase/master/RegionServerOperationQueue.java index 5e56040..38b28dd 100644 --- a/src/java/org/apache/hadoop/hbase/master/RegionServerOperationQueue.java +++ b/src/java/org/apache/hadoop/hbase/master/RegionServerOperationQueue.java @@ -32,7 +32,7 @@ import org.apache.hadoop.ipc.RemoteException; public class RegionServerOperationQueue { // TODO: Build up the junit test of this class. private final Log LOG = LogFactory.getLog(this.getClass()); - + /** * Enums returned by {@link RegionServerOperationQueue#process()}; */ @@ -95,7 +95,7 @@ public class RegionServerOperationQueue { * @return {@link ProcessingResultCode#PROCESSED}, * {@link ProcessingResultCode#REQUEUED}, * {@link ProcessingResultCode#REQUEUED_BUT_PROBLEM} - */ + */ public synchronized ProcessingResultCode process(final HServerAddress rootRegionLocation) { RegionServerOperation op = null; // Only process the delayed queue if root region is online. If offline, diff --git a/src/java/org/apache/hadoop/hbase/master/RetryableMetaOperation.java b/src/java/org/apache/hadoop/hbase/master/RetryableMetaOperation.java index e8846f2..fd58006 100644 --- a/src/java/org/apache/hadoop/hbase/master/RetryableMetaOperation.java +++ b/src/java/org/apache/hadoop/hbase/master/RetryableMetaOperation.java @@ -44,15 +44,15 @@ abstract class RetryableMetaOperation implements Callable { protected final Sleeper sleeper; protected final MetaRegion m; protected final HMaster master; - + protected HRegionInterface server; - + protected RetryableMetaOperation(MetaRegion m, HMaster master) { this.m = m; this.master = master; this.sleeper = new Sleeper(master.threadWakeFrequency, master.closed); } - + protected T doWithRetries() throws IOException, RuntimeException { List exceptions = new ArrayList(); @@ -76,7 +76,7 @@ abstract class RetryableMetaOperation implements Callable { if (tries == master.numRetries - 1) { if (LOG.isDebugEnabled()) { StringBuilder message = new StringBuilder( - "Trying to contact region server for regionName '" + + "Trying to contact region server for regionName '" + Bytes.toString(m.getRegionName()) + "', but failed after " + (tries + 1) + " attempts.\n"); int i = 1; @@ -97,6 +97,6 @@ abstract class RetryableMetaOperation implements Callable { } this.sleeper.sleep(); } - return null; + return null; } } \ No newline at end of file diff --git a/src/java/org/apache/hadoop/hbase/master/RootScanner.java b/src/java/org/apache/hadoop/hbase/master/RootScanner.java index 8b1bdc2..3f1c49f 100644 --- a/src/java/org/apache/hadoop/hbase/master/RootScanner.java +++ b/src/java/org/apache/hadoop/hbase/master/RootScanner.java @@ -61,7 +61,7 @@ class RootScanner extends BaseScanner { // Make sure the file system is still available master.checkFileSystem(); } catch (Exception e) { - // If for some reason we get some other kind of exception, + // If for some reason we get some other kind of exception, // at least log it rather than go out silently. LOG.error("Unexpected exception", e); } diff --git a/src/java/org/apache/hadoop/hbase/master/ServerManager.java b/src/java/org/apache/hadoop/hbase/master/ServerManager.java index 38a4b22..2349507 100644 --- a/src/java/org/apache/hadoop/hbase/master/ServerManager.java +++ b/src/java/org/apache/hadoop/hbase/master/ServerManager.java @@ -55,7 +55,7 @@ import org.apache.zookeeper.Watcher; import org.apache.zookeeper.Watcher.Event.EventType; /** - * The ServerManager class manages info about region servers - HServerInfo, + * The ServerManager class manages info about region servers - HServerInfo, * load numbers, dying servers, etc. */ class ServerManager implements HConstants { @@ -68,7 +68,7 @@ class ServerManager implements HConstants { private static final HMsg CALL_SERVER_STARTUP = new HMsg(Type.MSG_CALL_SERVER_STARTUP); private static final HMsg [] EMPTY_HMSG_ARRAY = new HMsg[0]; - + private final AtomicInteger quiescedServers = new AtomicInteger(0); /** The map of known server names to server info */ @@ -77,7 +77,7 @@ class ServerManager implements HConstants { final Map serverAddressToServerInfo = new ConcurrentHashMap(); - + /** * Set of known dead servers. On znode expiration, servers are added here. * This is needed in case of a network partitioning where the server's lease @@ -94,10 +94,10 @@ class ServerManager implements HConstants { /** Map of server names -> server load */ final Map serversToLoad = - new ConcurrentHashMap(); + new ConcurrentHashMap(); protected HMaster master; - + /* The regionserver will not be assigned or asked close regions if it * is currently opening >= this many regions. */ @@ -127,10 +127,10 @@ class ServerManager implements HConstants { sb.append("]"); deadServersList = sb.toString(); } - LOG.info(numServers + " region servers, " + numDeadServers + + LOG.info(numServers + " region servers, " + numDeadServers + " dead, average load " + averageLoad + (deadServersList != null? deadServers: "")); } - + } ServerMonitor serverMonitorThread; @@ -147,13 +147,13 @@ class ServerManager implements HConstants { master.shutdownRequested); this.serverMonitorThread.start(); } - + /** * Let the server manager know a new regionserver has come online * @param serverInfo * @throws Leases.LeaseStillHeldException */ - public void regionServerStartup(final HServerInfo serverInfo) + public void regionServerStartup(final HServerInfo serverInfo) throws Leases.LeaseStillHeldException { HServerInfo info = new HServerInfo(serverInfo); String serverName = info.getServerName(); @@ -164,7 +164,7 @@ class ServerManager implements HConstants { LOG.debug("deadServers.contains: " + deadServers.contains(serverName)); throw new Leases.LeaseStillHeldException(serverName); } - + LOG.info("Received start message from: " + serverName); // Go on to process the regionserver registration. HServerLoad load = serversToLoad.remove(serverName); @@ -192,8 +192,8 @@ class ServerManager implements HConstants { } recordNewServer(info); } - - + + /** * Adds the HSI to the RS list and creates an empty load * @param info The region server informations @@ -201,7 +201,7 @@ class ServerManager implements HConstants { public void recordNewServer(HServerInfo info) { recordNewServer(info, false); } - + /** * Adds the HSI to the RS list * @param info The region server informations @@ -228,18 +228,18 @@ class ServerManager implements HConstants { loadToServers.put(load, servers); } } - + /** * Called to process the messages sent from the region server to the master * along with the heart beat. - * + * * @param serverInfo * @param msgs * @param mostLoadedRegions Array of regions the region server is submitting * as candidates to be rebalanced, should it be overloaded * @return messages from master to region server indicating what region * server should do. - * + * * @throws IOException */ public HMsg [] regionServerReport(final HServerInfo serverInfo, @@ -300,12 +300,12 @@ class ServerManager implements HConstants { // This state is reachable if: // // 1) RegionServer A started - // 2) RegionServer B started on the same machine, then + // 2) RegionServer B started on the same machine, then // clobbered A in regionServerStartup. // 3) RegionServer A returns, expecting to work as usual. // // The answer is to ask A to shut down for good. - + if (LOG.isDebugEnabled()) { LOG.debug("region server race condition detected: " + info.getServerName()); @@ -315,7 +315,7 @@ class ServerManager implements HConstants { removeServerInfo(info.getServerName(), info.getServerAddress()); serversToServerInfo.notifyAll(); } - + return new HMsg[] {REGIONSERVER_STOP}; } else { return processRegionServerAllsWell(info, mostLoadedRegions, msgs); @@ -324,13 +324,13 @@ class ServerManager implements HConstants { /** * Region server is exiting with a clean shutdown. - * + * * In this case, the server sends MSG_REPORT_EXITING in msgs[0] followed by - * a MSG_REPORT_CLOSE for each region it was serving. + * a MSG_REPORT_CLOSE for each region it was serving. */ private void processRegionServerExit(HServerInfo serverInfo, HMsg[] msgs) { assert msgs[0].getType() == Type.MSG_REPORT_EXITING; - + synchronized (serversToServerInfo) { try { // This method removes ROOT/META from the list and marks them to be reassigned @@ -362,7 +362,7 @@ class ServerManager implements HConstants { } } } - + // There should not be any regions in transition for this server - the // server should finish transitions itself before closing Map inTransition = @@ -380,7 +380,7 @@ class ServerManager implements HConstants { } finally { serversToServerInfo.notifyAll(); } - } + } } /** @@ -467,7 +467,7 @@ class ServerManager implements HConstants { case MSG_REPORT_PROCESS_OPEN: openingCount++; break; - + case MSG_REPORT_OPEN: processRegionOpen(serverInfo, region, returnMsgs); break; @@ -480,7 +480,7 @@ class ServerManager implements HConstants { processSplitRegion(region, incomingMsgs[++i].getRegionInfo(), incomingMsgs[++i].getRegionInfo()); break; - + case MSG_REPORT_SPLIT_INCLUDES_DAUGHTERS: processSplitRegion(region, incomingMsgs[i].getDaughterA(), incomingMsgs[i].getDaughterB()); @@ -501,9 +501,9 @@ class ServerManager implements HConstants { master.regionManager.setPendingClose(i.getRegionNameAsString()); } - + // Figure out what the RegionServer ought to do, and write back. - + // Should we tell it close regions because its overloaded? If its // currently opening regions, leave it alone till all are open. if (openingCount < this.nobalancingCount) { @@ -516,7 +516,7 @@ class ServerManager implements HConstants { } return returnMsgs.toArray(new HMsg[returnMsgs.size()]); } - + /* * A region has split. * @@ -529,7 +529,7 @@ class ServerManager implements HConstants { synchronized (master.regionManager) { // Cancel any actions pending for the affected region. // This prevents the master from sending a SPLIT message if the table - // has already split by the region server. + // has already split by the region server. master.regionManager.endActions(region.getRegionName()); assignSplitDaughter(a); assignSplitDaughter(b); @@ -574,7 +574,7 @@ class ServerManager implements HConstants { * @param region * @param returnMsgs */ - private void processRegionOpen(HServerInfo serverInfo, + private void processRegionOpen(HServerInfo serverInfo, HRegionInfo region, ArrayList returnMsgs) { boolean duplicateAssignment = false; synchronized (master.regionManager) { @@ -594,7 +594,7 @@ class ServerManager implements HConstants { } } else { // Not root region. If it is not a pending region, then we are - // going to treat it as a duplicate assignment, although we can't + // going to treat it as a duplicate assignment, although we can't // tell for certain that's the case. if (master.regionManager.isPendingOpen( region.getRegionNameAsString())) { @@ -604,20 +604,20 @@ class ServerManager implements HConstants { duplicateAssignment = true; } } - + if (duplicateAssignment) { LOG.warn("region server " + serverInfo.getServerAddress().toString() + " should not have opened region " + Bytes.toString(region.getRegionName())); // This Region should not have been opened. - // Ask the server to shut it down, but don't report it as closed. - // Otherwise the HMaster will think the Region was closed on purpose, + // Ask the server to shut it down, but don't report it as closed. + // Otherwise the HMaster will think the Region was closed on purpose, // and then try to reopen it elsewhere; that's not what we want. returnMsgs.add(new HMsg(HMsg.Type.MSG_REGION_CLOSE_WITHOUT_REPORT, region, "Duplicate assignment".getBytes())); } else { if (region.isRootRegion()) { - // it was assigned, and it's not a duplicate assignment, so take it out + // it was assigned, and it's not a duplicate assignment, so take it out // of the unassigned list. master.regionManager.removeRegion(region); @@ -676,7 +676,7 @@ class ServerManager implements HConstants { this.master.getRegionServerOperationQueue().put(op); } } - + /** Update a server load information because it's shutting down*/ private boolean removeServerInfo(final String serverName, final HServerAddress serverAddress) { @@ -720,10 +720,10 @@ class ServerManager implements HConstants { } return infoUpdated; } - - /** - * Compute the average load across all region servers. - * Currently, this uses a very naive computation - just uses the number of + + /** + * Compute the average load across all region servers. + * Currently, this uses a very naive computation - just uses the number of * regions being served, ignoring stats about number of requests. * @return the average load */ @@ -745,7 +745,7 @@ class ServerManager implements HConstants { public int numServers() { return serversToServerInfo.size(); } - + /** * @param name server name * @return HServerInfo for the given server address @@ -753,7 +753,7 @@ class ServerManager implements HConstants { public HServerInfo getServerInfo(String name) { return serversToServerInfo.get(name); } - + /** * @return Read-only map of servers to serverinfo. */ @@ -796,7 +796,7 @@ class ServerManager implements HConstants { serversToServerInfo.notifyAll(); } } - + /* * Wait on regionservers to report in * with {@link #regionServerReport(HServerInfo, HMsg[])} so they get notice @@ -821,7 +821,7 @@ class ServerManager implements HConstants { } } } - + /** Watcher triggered when a RS znode is deleted */ private class ServerExpirer implements Watcher { private String server; @@ -870,7 +870,7 @@ class ServerManager implements HConstants { public void removeDeadServer(String serverName) { deadServers.remove(serverName); } - + /** * @param serverName * @return true if server is dead diff --git a/src/java/org/apache/hadoop/hbase/master/TableDelete.java b/src/java/org/apache/hadoop/hbase/master/TableDelete.java index 0bde1b1..70d6d21 100644 --- a/src/java/org/apache/hadoop/hbase/master/TableDelete.java +++ b/src/java/org/apache/hadoop/hbase/master/TableDelete.java @@ -31,7 +31,7 @@ import org.apache.hadoop.hbase.ipc.HRegionInterface; import org.apache.hadoop.hbase.regionserver.HRegion; import org.apache.hadoop.hbase.util.Bytes; -/** +/** * Instantiated to delete a table. Table must be offline. */ class TableDelete extends TableOperation { @@ -62,13 +62,13 @@ class TableDelete extends TableOperation { try { HRegion.removeRegionFromMETA(server, m.getRegionName(), i.getRegionName()); HRegion.deleteRegion(this.master.fs, this.master.rootdir, i); - + } catch (IOException e) { LOG.error("failed to delete region " + Bytes.toString(i.getRegionName()), RemoteExceptionHandler.checkIOException(e)); } } - + // delete the table's folder from fs. master.fs.delete(new Path(master.rootdir, Bytes.toString(tableName)), true); } diff --git a/src/java/org/apache/hadoop/hbase/master/TableOperation.java b/src/java/org/apache/hadoop/hbase/master/TableOperation.java index 702fdee..c418edb 100644 --- a/src/java/org/apache/hadoop/hbase/master/TableOperation.java +++ b/src/java/org/apache/hadoop/hbase/master/TableOperation.java @@ -37,7 +37,7 @@ import org.apache.hadoop.hbase.ipc.HRegionInterface; import org.apache.hadoop.hbase.util.Bytes; /** - * Abstract base class for operations that need to examine all HRegionInfo + * Abstract base class for operations that need to examine all HRegionInfo * objects in a table. (For a table, operate on each of its rows * in .META.). */ @@ -64,7 +64,7 @@ abstract class TableOperation implements HConstants { // assigned and scanned. if (master.regionManager.metaScannerThread.waitForMetaRegionsOrClose()) { // We're shutting down. Forget it. - throw new MasterNotRunningException(); + throw new MasterNotRunningException(); } } this.metaRegions = master.regionManager.getMetaRegionsForTable(tableName); @@ -156,7 +156,7 @@ abstract class TableOperation implements HConstants { } } } - + protected boolean isBeingServed(String serverName) { boolean result = false; if (serverName != null && serverName.length() > 0) { diff --git a/src/java/org/apache/hadoop/hbase/master/ZKMasterAddressWatcher.java b/src/java/org/apache/hadoop/hbase/master/ZKMasterAddressWatcher.java index 2d466e7..fa4ab69 100644 --- a/src/java/org/apache/hadoop/hbase/master/ZKMasterAddressWatcher.java +++ b/src/java/org/apache/hadoop/hbase/master/ZKMasterAddressWatcher.java @@ -67,7 +67,7 @@ public class ZKMasterAddressWatcher implements Watcher { notifyAll(); } } - else if(type.equals(EventType.NodeCreated) && + else if(type.equals(EventType.NodeCreated) && event.getPath().equals(this.zooKeeper.clusterStateZNode)) { LOG.debug("Resetting the watch on the cluster state node."); this.zooKeeper.setClusterStateWatch(this); diff --git a/src/java/org/apache/hadoop/hbase/master/metrics/MasterMetrics.java b/src/java/org/apache/hadoop/hbase/master/metrics/MasterMetrics.java index b266721..e139c7d 100644 --- a/src/java/org/apache/hadoop/hbase/master/metrics/MasterMetrics.java +++ b/src/java/org/apache/hadoop/hbase/master/metrics/MasterMetrics.java @@ -28,7 +28,7 @@ import org.apache.hadoop.metrics.jvm.JvmMetrics; import org.apache.hadoop.metrics.util.MetricsRegistry; -/** +/** * This class is for maintaining the various master statistics * and publishing them through the metrics interfaces. *

      @@ -43,7 +43,7 @@ public class MasterMetrics implements Updater { /* * Count of requests to the cluster since last call to metrics update */ - private final MetricsRate cluster_requests = + private final MetricsRate cluster_requests = new MetricsRate("cluster_requests", registry); public MasterMetrics() { @@ -59,16 +59,16 @@ public class MasterMetrics implements Updater { LOG.info("Initialized"); } - + public void shutdown() { if (masterStatistics != null) masterStatistics.shutdown(); } - + /** * Since this object is a registered updater, this method will be called * periodically, e.g. every 5 seconds. - * @param unused + * @param unused */ public void doUpdates(MetricsContext unused) { synchronized (this) { @@ -76,7 +76,7 @@ public class MasterMetrics implements Updater { } this.metricsRecord.update(); } - + public void resetAllMinMax() { // Nothing to do } @@ -87,7 +87,7 @@ public class MasterMetrics implements Updater { public float getRequests() { return this.cluster_requests.getPreviousIntervalValue(); } - + /** * @param inc How much to add to requests. */ diff --git a/src/java/org/apache/hadoop/hbase/master/metrics/MasterStatistics.java b/src/java/org/apache/hadoop/hbase/master/metrics/MasterStatistics.java index ec3e0de..d885348 100644 --- a/src/java/org/apache/hadoop/hbase/master/metrics/MasterStatistics.java +++ b/src/java/org/apache/hadoop/hbase/master/metrics/MasterStatistics.java @@ -32,7 +32,7 @@ public class MasterStatistics extends MetricsMBeanBase { public MasterStatistics(MetricsRegistry registry) { super(registry, "MasterStatistics"); - mbeanName = MBeanUtil.registerMBean("Master", "MasterStatistics", this); + mbeanName = MBeanUtil.registerMBean("Master", "MasterStatistics", this); } public void shutdown() { diff --git a/src/java/org/apache/hadoop/hbase/metrics/MetricsMBeanBase.java b/src/java/org/apache/hadoop/hbase/metrics/MetricsMBeanBase.java index 1f852c1..3d09f95 100644 --- a/src/java/org/apache/hadoop/hbase/metrics/MetricsMBeanBase.java +++ b/src/java/org/apache/hadoop/hbase/metrics/MetricsMBeanBase.java @@ -38,8 +38,8 @@ import org.apache.hadoop.metrics.util.MetricsRegistry; /** * Extends the Hadoop MetricsDynamicMBeanBase class to provide JMX support for - * custom HBase MetricsBase implementations. MetricsDynamicMBeanBase ignores - * registered MetricsBase instance that are not instances of one of the + * custom HBase MetricsBase implementations. MetricsDynamicMBeanBase ignores + * registered MetricsBase instance that are not instances of one of the * org.apache.hadoop.metrics.util implementations. * */ @@ -50,13 +50,13 @@ public class MetricsMBeanBase extends MetricsDynamicMBeanBase { protected final MetricsRegistry registry; protected final String description; protected int registryLength; - /** HBase MetricsBase implementations that MetricsDynamicMBeanBase does - * not understand + /** HBase MetricsBase implementations that MetricsDynamicMBeanBase does + * not understand */ - protected Map extendedAttributes = + protected Map extendedAttributes = new HashMap(); protected MBeanInfo extendedInfo; - + protected MetricsMBeanBase( MetricsRegistry mr, String description ) { super(copyMinusHBaseMetrics(mr), description); this.registry = mr; @@ -87,45 +87,45 @@ public class MetricsMBeanBase extends MetricsDynamicMBeanBase { attributes.add(attr); parentAttributes.add(attr.getName()); } - + this.registryLength = this.registry.getMetricsList().size(); - + for (MetricsBase metric : this.registry.getMetricsList()) { if (metric.getName() == null || parentAttributes.contains(metric.getName())) continue; - + // add on custom HBase metric types if (metric instanceof org.apache.hadoop.hbase.metrics.MetricsRate) { - attributes.add( new MBeanAttributeInfo(metric.getName(), + attributes.add( new MBeanAttributeInfo(metric.getName(), "java.lang.Float", metric.getDescription(), true, false, false) ); extendedAttributes.put(metric.getName(), metric); } // else, its probably a hadoop metric already registered. Skip it. } - this.extendedInfo = new MBeanInfo( this.getClass().getName(), - this.description, attributes.toArray( new MBeanAttributeInfo[0] ), - parentInfo.getConstructors(), parentInfo.getOperations(), + this.extendedInfo = new MBeanInfo( this.getClass().getName(), + this.description, attributes.toArray( new MBeanAttributeInfo[0] ), + parentInfo.getConstructors(), parentInfo.getOperations(), parentInfo.getNotifications() ); } private void checkAndUpdateAttributes() { - if (this.registryLength != this.registry.getMetricsList().size()) + if (this.registryLength != this.registry.getMetricsList().size()) this.init(); } - + @Override public Object getAttribute( String name ) throws AttributeNotFoundException, MBeanException, ReflectionException { - + if (name == null) { throw new IllegalArgumentException("Attribute name is NULL"); } /* * Ugly. Since MetricsDynamicMBeanBase implementation is private, - * we need to first check the parent class for the attribute. + * we need to first check the parent class for the attribute. * In case that the MetricsRegistry contents have changed, this will * allow the parent to update it's internal structures (which we rely on * to update our own. @@ -133,9 +133,9 @@ public class MetricsMBeanBase extends MetricsDynamicMBeanBase { try { return super.getAttribute(name); } catch (AttributeNotFoundException ex) { - + checkAndUpdateAttributes(); - + MetricsBase metric = this.extendedAttributes.get(name); if (metric != null) { if (metric instanceof MetricsRate) { @@ -146,7 +146,7 @@ public class MetricsMBeanBase extends MetricsDynamicMBeanBase { } } } - + throw new AttributeNotFoundException(); } diff --git a/src/java/org/apache/hadoop/hbase/metrics/MetricsRate.java b/src/java/org/apache/hadoop/hbase/metrics/MetricsRate.java index 13fe5ef..fc1dc36 100644 --- a/src/java/org/apache/hadoop/hbase/metrics/MetricsRate.java +++ b/src/java/org/apache/hadoop/hbase/metrics/MetricsRate.java @@ -30,12 +30,12 @@ import org.apache.hadoop.util.StringUtils; */ public class MetricsRate extends MetricsBase { private static final Log LOG = LogFactory.getLog("org.apache.hadoop.hbase.metrics"); - + private int value; private float prevRate; private long ts; - - public MetricsRate(final String name, final MetricsRegistry registry, + + public MetricsRate(final String name, final MetricsRegistry registry, final String description) { super(name, description); this.value = 0; @@ -43,19 +43,19 @@ public class MetricsRate extends MetricsBase { this.ts = System.currentTimeMillis(); registry.add(name, this); } - + public MetricsRate(final String name, final MetricsRegistry registry) { this(name, registry, NO_DESCRIPTION); } - + public synchronized void inc(final int incr) { value += incr; } - + public synchronized void inc() { value++; } - + private synchronized void intervalHeartBeat() { long now = System.currentTimeMillis(); long diff = (now-ts)/1000; @@ -64,18 +64,18 @@ public class MetricsRate extends MetricsBase { this.value = 0; this.ts = now; } - + @Override public synchronized void pushMetric(final MetricsRecord mr) { intervalHeartBeat(); try { mr.setMetric(getName(), getPreviousIntervalValue()); } catch (Exception e) { - LOG.info("pushMetric failed for " + getName() + "\n" + + LOG.info("pushMetric failed for " + getName() + "\n" + StringUtils.stringifyException(e)); } } - + public synchronized float getPreviousIntervalValue() { return this.prevRate; } diff --git a/src/java/org/apache/hadoop/hbase/metrics/file/TimeStampingFileContext.java b/src/java/org/apache/hadoop/hbase/metrics/file/TimeStampingFileContext.java index a5ffc6e..000e0d3 100644 --- a/src/java/org/apache/hadoop/hbase/metrics/file/TimeStampingFileContext.java +++ b/src/java/org/apache/hadoop/hbase/metrics/file/TimeStampingFileContext.java @@ -40,7 +40,7 @@ public class TimeStampingFileContext extends FileContext { private File file = null; private PrintWriter writer = null; private final SimpleDateFormat sdf; - + public TimeStampingFileContext() { super(); this.sdf = new SimpleDateFormat("yyyy-MM-dd'T'HH:mm:ss"); diff --git a/src/java/org/apache/hadoop/hbase/migration/nineteen/HStoreFileToStoreFile.java b/src/java/org/apache/hadoop/hbase/migration/nineteen/HStoreFileToStoreFile.java index 354b032..7da96d0 100644 --- a/src/java/org/apache/hadoop/hbase/migration/nineteen/HStoreFileToStoreFile.java +++ b/src/java/org/apache/hadoop/hbase/migration/nineteen/HStoreFileToStoreFile.java @@ -91,7 +91,7 @@ public class HStoreFileToStoreFile extends Configured implements Tool { if (out != null) out.close(); } } - + private static void gathermapfiles(final HBaseConfiguration conf, final FileSystem fs, final FSDataOutputStream out) throws IOException { diff --git a/src/java/org/apache/hadoop/hbase/migration/nineteen/HStoreKey.java b/src/java/org/apache/hadoop/hbase/migration/nineteen/HStoreKey.java index 47a3b4e..5969f0b 100644 --- a/src/java/org/apache/hadoop/hbase/migration/nineteen/HStoreKey.java +++ b/src/java/org/apache/hadoop/hbase/migration/nineteen/HStoreKey.java @@ -40,7 +40,7 @@ public class HStoreKey implements WritableComparable, HeapSize { * Colon character in UTF-8 */ public static final char COLUMN_FAMILY_DELIMITER = ':'; - + private byte [] row = HConstants.EMPTY_BYTE_ARRAY; private byte [] column = HConstants.EMPTY_BYTE_ARRAY; private long timestamp = Long.MAX_VALUE; @@ -50,7 +50,7 @@ public class HStoreKey implements WritableComparable, HeapSize { * It is not serialized. See https://issues.apache.org/jira/browse/HBASE-832 */ private HRegionInfo regionInfo = null; - + /** * Estimated size tax paid for each instance of HSK. Estimate based on * study of jhat and jprofiler numbers. @@ -63,12 +63,12 @@ public class HStoreKey implements WritableComparable, HeapSize { public HStoreKey() { super(); } - + /** * Create an HStoreKey specifying only the row * The column defaults to the empty string, the time stamp defaults to * Long.MAX_VALUE and the table defaults to empty string - * + * * @param row - row key */ public HStoreKey(final byte [] row) { @@ -79,7 +79,7 @@ public class HStoreKey implements WritableComparable, HeapSize { * Create an HStoreKey specifying only the row * The column defaults to the empty string, the time stamp defaults to * Long.MAX_VALUE and the table defaults to empty string - * + * * @param row - row key */ public HStoreKey(final String row) { @@ -89,18 +89,18 @@ public class HStoreKey implements WritableComparable, HeapSize { /** * Create an HStoreKey specifying the row and timestamp * The column and table names default to the empty string - * + * * @param row row key * @param hri */ public HStoreKey(final byte [] row, final HRegionInfo hri) { this(row, HConstants.EMPTY_BYTE_ARRAY, hri); } - + /** * Create an HStoreKey specifying the row and timestamp * The column and table names default to the empty string - * + * * @param row row key * @param timestamp timestamp value * @param hri HRegionInfo @@ -112,7 +112,7 @@ public class HStoreKey implements WritableComparable, HeapSize { /** * Create an HStoreKey specifying the row and timestamp * The column and table names default to the empty string - * + * * @param row row key * @param timestamp timestamp value */ @@ -123,7 +123,7 @@ public class HStoreKey implements WritableComparable, HeapSize { /** * Create an HStoreKey specifying the row and timestamp * The column and table names default to the empty string - * + * * @param row row key * @param timestamp timestamp value */ @@ -135,7 +135,7 @@ public class HStoreKey implements WritableComparable, HeapSize { * Create an HStoreKey specifying the row and column names * The timestamp defaults to LATEST_TIMESTAMP * and table name defaults to the empty string - * + * * @param row row key * @param column column key */ @@ -147,45 +147,45 @@ public class HStoreKey implements WritableComparable, HeapSize { * Create an HStoreKey specifying the row and column names * The timestamp defaults to LATEST_TIMESTAMP * and table name defaults to the empty string - * + * * @param row row key * @param column column key */ public HStoreKey(final byte [] row, final byte [] column) { this(row, column, HConstants.LATEST_TIMESTAMP); } - + /** * Create an HStoreKey specifying the row, column names and table name * The timestamp defaults to LATEST_TIMESTAMP - * + * * @param row row key * @param column column key * @param regionInfo region info */ - public HStoreKey(final byte [] row, + public HStoreKey(final byte [] row, final byte [] column, final HRegionInfo regionInfo) { this(row, column, HConstants.LATEST_TIMESTAMP, regionInfo); } /** * Create an HStoreKey specifying all the fields - * Does not make copies of the passed byte arrays. Presumes the passed + * Does not make copies of the passed byte arrays. Presumes the passed * arrays immutable. * @param row row key * @param column column key * @param timestamp timestamp value * @param regionInfo region info */ - public HStoreKey(final String row, + public HStoreKey(final String row, final String column, long timestamp, final HRegionInfo regionInfo) { - this (Bytes.toBytes(row), Bytes.toBytes(column), + this (Bytes.toBytes(row), Bytes.toBytes(column), timestamp, regionInfo); } /** * Create an HStoreKey specifying all the fields with unspecified table - * Does not make copies of the passed byte arrays. Presumes the passed + * Does not make copies of the passed byte arrays. Presumes the passed * arrays immutable. * @param row row key * @param column column key @@ -194,17 +194,17 @@ public class HStoreKey implements WritableComparable, HeapSize { public HStoreKey(final byte [] row, final byte [] column, long timestamp) { this(row, column, timestamp, null); } - + /** * Create an HStoreKey specifying all the fields with specified table - * Does not make copies of the passed byte arrays. Presumes the passed + * Does not make copies of the passed byte arrays. Presumes the passed * arrays immutable. * @param row row key * @param column column key * @param timestamp timestamp value * @param regionInfo region info */ - public HStoreKey(final byte [] row, + public HStoreKey(final byte [] row, final byte [] column, long timestamp, final HRegionInfo regionInfo) { // Make copies this.row = row; @@ -215,26 +215,26 @@ public class HStoreKey implements WritableComparable, HeapSize { /** * Constructs a new HStoreKey from another - * + * * @param other the source key */ public HStoreKey(HStoreKey other) { this(other.getRow(), other.getColumn(), other.getTimestamp(), other.getHRegionInfo()); } - + /** * Change the value of the row key - * + * * @param newrow new row key value */ public void setRow(byte [] newrow) { this.row = newrow; } - + /** * Change the value of the column in this key - * + * * @param c new column family value */ public void setColumn(byte [] c) { @@ -243,16 +243,16 @@ public class HStoreKey implements WritableComparable, HeapSize { /** * Change the value of the timestamp field - * + * * @param timestamp new timestamp value */ public void setVersion(long timestamp) { this.timestamp = timestamp; } - + /** * Set the value of this HStoreKey from the supplied key - * + * * @param k key value to copy */ public void set(HStoreKey k) { @@ -260,12 +260,12 @@ public class HStoreKey implements WritableComparable, HeapSize { this.column = k.getColumn(); this.timestamp = k.getTimestamp(); } - + /** @return value of row key */ public byte [] getRow() { return row; } - + /** @return value of column */ public byte [] getColumn() { return this.column; @@ -275,36 +275,36 @@ public class HStoreKey implements WritableComparable, HeapSize { public long getTimestamp() { return this.timestamp; } - + /** @return value of regioninfo */ public HRegionInfo getHRegionInfo() { return this.regionInfo; } - + /** * @param hri */ public void setHRegionInfo(final HRegionInfo hri) { this.regionInfo = hri; } - + /** * Compares the row and column of two keys * @param other Key to compare against. Compares row and column. * @return True if same row and column. * @see #matchesWithoutColumn(HStoreKey) * @see #matchesRowFamily(HStoreKey) - */ + */ public boolean matchesRowCol(HStoreKey other) { return HStoreKey.equalsTwoRowKeys(getHRegionInfo(), getRow(), other.getRow()) && Bytes.equals(getColumn(), other.getColumn()); } - + /** * Compares the row and timestamp of two keys - * + * * @param other Key to copmare against. Compares row and timestamp. - * + * * @return True if same row and timestamp is greater than other * @see #matchesRowCol(HStoreKey) * @see #matchesRowFamily(HStoreKey) @@ -313,12 +313,12 @@ public class HStoreKey implements WritableComparable, HeapSize { return equalsTwoRowKeys(getHRegionInfo(), getRow(), other.getRow()) && getTimestamp() >= other.getTimestamp(); } - + /** * Compares the row and column family of two keys - * + * * @param that Key to compare against. Compares row and column family - * + * * @return true if same row and column family * @see #matchesRowCol(HStoreKey) * @see #matchesWithoutColumn(HStoreKey) @@ -329,13 +329,13 @@ public class HStoreKey implements WritableComparable, HeapSize { Bytes.compareTo(getColumn(), 0, delimiterIndex, that.getColumn(), 0, delimiterIndex) == 0; } - + @Override public String toString() { return Bytes.toString(this.row) + "/" + Bytes.toString(this.column) + "/" + timestamp; } - + @Override public boolean equals(Object obj) { if (!(obj instanceof HStoreKey)) { @@ -350,7 +350,7 @@ public class HStoreKey implements WritableComparable, HeapSize { } return compareTo(other) == 0; } - + @Override public int hashCode() { int result = Bytes.hashCode(getRow()); @@ -364,14 +364,14 @@ public class HStoreKey implements WritableComparable, HeapSize { public int compareTo(final HStoreKey o) { return compareTo(this.regionInfo, this, o); } - + static int compareTo(final HRegionInfo hri, final HStoreKey left, final HStoreKey right) { // We can be passed null if (left == null && right == null) return 0; if (left == null) return -1; if (right == null) return 1; - + int result = compareTwoRowKeys(hri, left.getRow(), right.getRow()); if (result != 0) { return result; @@ -403,7 +403,7 @@ public class HStoreKey implements WritableComparable, HeapSize { * @param column * @return New byte array that holds column family prefix only * (Does not include the colon DELIMITER). - * @throws ColumnNameParseException + * @throws ColumnNameParseException * @see #parseColumn(byte[]) */ public static byte [] getFamily(final byte [] column) @@ -418,7 +418,7 @@ public class HStoreKey implements WritableComparable, HeapSize { System.arraycopy(column, 0, result, 0, index); return result; } - + /** * @param column * @return Return hash of family portion of passed column. @@ -429,7 +429,7 @@ public class HStoreKey implements WritableComparable, HeapSize { // delimiter return Bytes.mapKey(column, index > 0? index: column.length); } - + /** * @param family * @param column @@ -444,7 +444,7 @@ public class HStoreKey implements WritableComparable, HeapSize { } return Bytes.compareTo(family, 0, index, column, 0, index) == 0; } - + /** * @param family * @return Return family plus the family delimiter. @@ -475,14 +475,14 @@ public class HStoreKey implements WritableComparable, HeapSize { * @return Return array of size two whose first element has the family * prefix of passed column c and whose second element is the * column qualifier. - * @throws ColumnNameParseException + * @throws ColumnNameParseException */ public static byte [][] parseColumn(final byte [] c) throws ColumnNameParseException { byte [][] result = new byte [2][]; int index = getFamilyDelimiterIndex(c); if (index == -1) { - throw new ColumnNameParseException("Impossible column name: " + + throw new ColumnNameParseException("Impossible column name: " + Bytes.toStringBinary(c)); } result[0] = new byte [index]; @@ -493,7 +493,7 @@ public class HStoreKey implements WritableComparable, HeapSize { len); return result; } - + /** * @param b * @return Index of the family-qualifier colon delimiter character in passed @@ -521,7 +521,7 @@ public class HStoreKey implements WritableComparable, HeapSize { public static byte[] getBytes(final HStoreKey hsk) { return Bytes.add(hsk.getRow(), hsk.getColumn()); } - + /** * Utility method to compare two row keys. * This is required because of the meta delimiters. @@ -531,7 +531,7 @@ public class HStoreKey implements WritableComparable, HeapSize { * @param rowB * @return value of the comparison */ - public static int compareTwoRowKeys(HRegionInfo regionInfo, + public static int compareTwoRowKeys(HRegionInfo regionInfo, byte[] rowA, byte[] rowB) { if (regionInfo != null && regionInfo.isMetaRegion()) { byte[][] keysA = stripStartKeyMeta(rowA); @@ -543,7 +543,7 @@ public class HStoreKey implements WritableComparable, HeapSize { } return Bytes.compareTo(rowA, rowB); } - + /** * Utility method to check if two row keys are equal. * This is required because of the meta delimiters @@ -553,13 +553,13 @@ public class HStoreKey implements WritableComparable, HeapSize { * @param rowB * @return if it's equal */ - public static boolean equalsTwoRowKeys(HRegionInfo regionInfo, + public static boolean equalsTwoRowKeys(HRegionInfo regionInfo, byte[] rowA, byte[] rowB) { return ((rowA == null) && (rowB == null)) ? true: (rowA == null) || (rowB == null) || (rowA.length != rowB.length) ? false: compareTwoRowKeys(regionInfo,rowA,rowB) == 0; } - + private static byte[][] stripStartKeyMeta(byte[] rowKey) { int offset = -1; for (int i = rowKey.length - 1; i > 0; i--) { @@ -581,7 +581,7 @@ public class HStoreKey implements WritableComparable, HeapSize { elements[1] = timestamp; return elements; } - + // Writable public void write(DataOutput out) throws IOException { @@ -607,13 +607,13 @@ public class HStoreKey implements WritableComparable, HeapSize { */ public static class HStoreKeyWritableComparator extends WritableComparator { private final HRegionInfo hri; - + /** @param hri */ public HStoreKeyWritableComparator(final HRegionInfo hri) { super(HStoreKey.class); this.hri = hri; } - + @SuppressWarnings("unchecked") @Override public int compare(final WritableComparable left, @@ -621,7 +621,7 @@ public class HStoreKey implements WritableComparable, HeapSize { return compareTo(this.hri, (HStoreKey)left, (HStoreKey)right); } } - + /** * Pass this class into {@link org.apache.hadoop.io.MapFile}.getClosest when * searching for the key that comes BEFORE this one but NOT this one. This @@ -636,19 +636,19 @@ public class HStoreKey implements WritableComparable, HeapSize { private final HStoreKey beforeThisKey; /** - * @param beforeThisKey + * @param beforeThisKey */ public BeforeThisStoreKey(final HStoreKey beforeThisKey) { super(); this.beforeThisKey = beforeThisKey; } - + @Override public int compareTo(final HStoreKey o) { int result = this.beforeThisKey.compareTo(o); return result == 0? -1: result; } - + @Override public boolean equals(Object obj) { if (obj == null || !(obj instanceof HStoreKey)) { @@ -731,12 +731,12 @@ public class HStoreKey implements WritableComparable, HeapSize { public void write(DataOutput out) throws IOException { this.beforeThisKey.write(out); } - + @Override public HRegionInfo getHRegionInfo() { return this.beforeThisKey.getHRegionInfo(); } - + @Override public void setHRegionInfo(final HRegionInfo hri) { this.beforeThisKey.setHRegionInfo(hri); diff --git a/src/java/org/apache/hadoop/hbase/migration/nineteen/io/BloomFilterMapFile.java b/src/java/org/apache/hadoop/hbase/migration/nineteen/io/BloomFilterMapFile.java index a06233c..c7231a1 100644 --- a/src/java/org/apache/hadoop/hbase/migration/nineteen/io/BloomFilterMapFile.java +++ b/src/java/org/apache/hadoop/hbase/migration/nineteen/io/BloomFilterMapFile.java @@ -62,7 +62,7 @@ public class BloomFilterMapFile extends HBaseMapFile { * @throws IOException */ public Reader(FileSystem fs, String dirName, Configuration conf, - final boolean filter, final boolean blockCacheEnabled, + final boolean filter, final boolean blockCacheEnabled, HRegionInfo hri) throws IOException { super(fs, dirName, conf, blockCacheEnabled, hri); @@ -89,7 +89,7 @@ public class BloomFilterMapFile extends HBaseMapFile { } return filter; } - + @Override public Writable get(WritableComparable key, Writable val) throws IOException { @@ -126,7 +126,7 @@ public class BloomFilterMapFile extends HBaseMapFile { } return null; } - + /** * @return size of the bloom filter */ @@ -134,13 +134,13 @@ public class BloomFilterMapFile extends HBaseMapFile { return bloomFilter == null ? 0 : bloomFilter.getVectorSize(); } } - + public static class Writer extends HBaseWriter { private static final double DEFAULT_NUMBER_OF_HASH_FUNCTIONS = 4.0; private final BloomFilter bloomFilter; private final String dirName; private final FileSystem fs; - + /** * @param conf * @param fs @@ -159,25 +159,25 @@ public class BloomFilterMapFile extends HBaseMapFile { this.dirName = dirName; this.fs = fs; if (filter) { - /* + /* * There is no way to automatically determine the vector size and the * number of hash functions to use. In particular, bloom filters are * very sensitive to the number of elements inserted into them. For * HBase, the number of entries depends on the size of the data stored * in the column. Currently the default region size is 256MB, so the - * number of entries is approximately + * number of entries is approximately * 256MB / (average value size for column). - * + * * If m denotes the number of bits in the Bloom filter (vectorSize), * n denotes the number of elements inserted into the Bloom filter and * k represents the number of hash functions used (nbHash), then * according to Broder and Mitzenmacher, - * + * * ( http://www.eecs.harvard.edu/~michaelm/NEWWORK/postscripts/BloomFilterSurvey.pdf ) - * + * * the probability of false positives is minimized when k is * approximately m/n ln(2). - * + * * If we fix the number of hash functions and know the number of * entries, then the optimal vector size m = (k * n) / ln(2) */ @@ -215,10 +215,10 @@ public class BloomFilterMapFile extends HBaseMapFile { flushBloomFilter(); } } - + /** * Flushes bloom filter to disk - * + * * @throws IOException */ private void flushBloomFilter() throws IOException { diff --git a/src/java/org/apache/hadoop/hbase/migration/nineteen/io/HBaseMapFile.java b/src/java/org/apache/hadoop/hbase/migration/nineteen/io/HBaseMapFile.java index 0a77ae5..5391826 100644 --- a/src/java/org/apache/hadoop/hbase/migration/nineteen/io/HBaseMapFile.java +++ b/src/java/org/apache/hadoop/hbase/migration/nineteen/io/HBaseMapFile.java @@ -36,7 +36,7 @@ import org.apache.hadoop.io.Writable; public class HBaseMapFile extends MapFile { // TODO not used. remove?! // private static final Log LOG = LogFactory.getLog(HBaseMapFile.class); - + /** * Values are instances of this class. */ @@ -61,7 +61,7 @@ public class HBaseMapFile extends MapFile { throws IOException { this(fs, dirName, conf, false, hri); } - + /** * @param fs * @param dirName @@ -73,11 +73,11 @@ public class HBaseMapFile extends MapFile { public HBaseReader(FileSystem fs, String dirName, Configuration conf, boolean blockCacheEnabled, HRegionInfo hri) throws IOException { - super(fs, dirName, new org.apache.hadoop.hbase.migration.nineteen.HStoreKey.HStoreKeyWritableComparator(hri), + super(fs, dirName, new org.apache.hadoop.hbase.migration.nineteen.HStoreKey.HStoreKeyWritableComparator(hri), conf, false); // defer opening streams this.blockCacheEnabled = blockCacheEnabled; open(fs, dirName, new org.apache.hadoop.hbase.migration.nineteen.HStoreKey.HStoreKeyWritableComparator(hri), conf); - + // Force reading of the mapfile index by calling midKey. Reading the // index will bring the index into memory over here on the client and // then close the index file freeing up socket connection and resources @@ -85,8 +85,8 @@ public class HBaseMapFile extends MapFile { // load the index force the issue in HStoreFile MapFiles because an // access may not happen for some time; meantime we're using up datanode // resources (See HADOOP-2341). midKey() goes to index. Does not seek. - - + + // Disable for migration !!! midKey(); } } diff --git a/src/java/org/apache/hadoop/hbase/migration/nineteen/io/HalfMapFileReader.java b/src/java/org/apache/hadoop/hbase/migration/nineteen/io/HalfMapFileReader.java index e4b20dc..4c97a40 100644 --- a/src/java/org/apache/hadoop/hbase/migration/nineteen/io/HalfMapFileReader.java +++ b/src/java/org/apache/hadoop/hbase/migration/nineteen/io/HalfMapFileReader.java @@ -41,10 +41,10 @@ import org.apache.hadoop.io.WritableComparable; * of the file with keys that sort greater than those of the bottom half. * The top includes the split files midkey, of the key that follows if it does * not exist in the file. - * + * *

      This type works in tandem with the {@link Reference} type. This class * is used reading while Reference is used writing. - * + * *

      This file is not splitable. Calls to {@link #midKey()} return null. */ //TODO should be fixed generic warnings from MapFile methods @@ -54,7 +54,7 @@ public class HalfMapFileReader extends BloomFilterMapFile.Reader { private final boolean top; private final HStoreKey midkey; private boolean firstNextCall = true; - + /** * @param fs * @param dirName @@ -64,14 +64,14 @@ public class HalfMapFileReader extends BloomFilterMapFile.Reader { * @param hri * @throws IOException */ - public HalfMapFileReader(final FileSystem fs, final String dirName, + public HalfMapFileReader(final FileSystem fs, final String dirName, final Configuration conf, final Range r, final WritableComparable mk, final HRegionInfo hri) throws IOException { this(fs, dirName, conf, r, mk, false, false, hri); } - + /** * @param fs * @param dirName @@ -83,7 +83,7 @@ public class HalfMapFileReader extends BloomFilterMapFile.Reader { * @param hri * @throws IOException */ - public HalfMapFileReader(final FileSystem fs, final String dirName, + public HalfMapFileReader(final FileSystem fs, final String dirName, final Configuration conf, final Range r, final WritableComparable mk, final boolean filter, final boolean blockCacheEnabled, @@ -100,7 +100,7 @@ public class HalfMapFileReader extends BloomFilterMapFile.Reader { // Is it top or bottom half? this.top = Reference.isTopFileRegion(r); } - + /* * Check key is not bleeding into wrong half of the file. * @param key @@ -124,7 +124,7 @@ public class HalfMapFileReader extends BloomFilterMapFile.Reader { public synchronized void finalKey(WritableComparable key) throws IOException { if (top) { - super.finalKey(key); + super.finalKey(key); } else { Writable value = new ImmutableBytesWritable(); WritableComparable found = super.getClosest(midkey, value, true); @@ -194,7 +194,7 @@ public class HalfMapFileReader extends BloomFilterMapFile.Reader { Writables.copyWritable(nearest, key); return true; } - return false; + return false; } } boolean result = super.next(key, val); @@ -208,7 +208,7 @@ public class HalfMapFileReader extends BloomFilterMapFile.Reader { } return result; } - + @Override public synchronized void reset() throws IOException { if (top) { diff --git a/src/java/org/apache/hadoop/hbase/migration/nineteen/io/Reference.java b/src/java/org/apache/hadoop/hbase/migration/nineteen/io/Reference.java index 7f4c52f..a9925b2 100644 --- a/src/java/org/apache/hadoop/hbase/migration/nineteen/io/Reference.java +++ b/src/java/org/apache/hadoop/hbase/migration/nineteen/io/Reference.java @@ -33,7 +33,7 @@ import org.apache.hadoop.io.Writable; * half of the file. References are made at region split time. Being lazy * about copying data between the parent of the split and the split daughters * makes splitting faster. - * + * *

      References work with {@link HalfMapFileReader}. References know how to * write out the reference format in the file system and are whats juggled when * references are mixed in with direct store files. The @@ -58,8 +58,8 @@ public class Reference implements Writable { private long fileid; private Range region; private HStoreKey midkey; - - /** + + /** * For split HStoreFiles, it specifies if the file covers the lower half or * the upper half of the key range */ @@ -69,7 +69,7 @@ public class Reference implements Writable { /** HStoreFile contains lower half of key range */ bottom } - + public Reference(final int ern, final long fid, final HStoreKey m, final Range fr) { this.encodedRegionName = ern; @@ -77,7 +77,7 @@ public class Reference implements Writable { this.region = fr; this.midkey = m; } - + public Reference() { this(-1, -1, null, Range.bottom); } @@ -89,11 +89,11 @@ public class Reference implements Writable { public Range getFileRegion() { return region; } - + public HStoreKey getMidkey() { return midkey; } - + public int getEncodedRegionName() { return this.encodedRegionName; } @@ -127,7 +127,7 @@ public class Reference implements Writable { midkey = new HStoreKey(); midkey.readFields(in); } - + public static boolean isTopFileRegion(final Range r) { return r.equals(Range.top); } diff --git a/src/java/org/apache/hadoop/hbase/migration/nineteen/onelab/filter/BloomFilter.java b/src/java/org/apache/hadoop/hbase/migration/nineteen/onelab/filter/BloomFilter.java index ef82a27..60c3063 100644 --- a/src/java/org/apache/hadoop/hbase/migration/nineteen/onelab/filter/BloomFilter.java +++ b/src/java/org/apache/hadoop/hbase/migration/nineteen/onelab/filter/BloomFilter.java @@ -2,30 +2,30 @@ * * Copyright (c) 2005, European Commission project OneLab under contract 034819 (http://www.one-lab.org) * All rights reserved. - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following * conditions are met: - * - Redistributions of source code must retain the above copyright + * - Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. - * - Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in + * - Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the distribution. * - Neither the name of the University Catholique de Louvain - UCL - * nor the names of its contributors may be used to endorse or - * promote products derived from this software without specific prior + * nor the names of its contributors may be used to endorse or + * promote products derived from this software without specific prior * written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS - * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE - * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, - * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, - * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER - * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT - * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN - * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS + * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE + * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, + * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN + * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ /** @@ -58,15 +58,15 @@ import org.apache.hadoop.hbase.util.Hash; /** * Implements a Bloom filter, as defined by Bloom in 1970. *

      - * The Bloom filter is a data structure that was introduced in 1970 and that has been adopted by + * The Bloom filter is a data structure that was introduced in 1970 and that has been adopted by * the networking research community in the past decade thanks to the bandwidth efficiencies that it - * offers for the transmission of set membership information between networked hosts. A sender encodes - * the information into a bit vector, the Bloom filter, that is more compact than a conventional - * representation. Computation and space costs for construction are linear in the number of elements. - * The receiver uses the filter to test whether various elements are members of the set. Though the - * filter will occasionally return a false positive, it will never return a false negative. When creating - * the filter, the sender can choose its desired point in a trade-off between the false positive rate and the size. - * + * offers for the transmission of set membership information between networked hosts. A sender encodes + * the information into a bit vector, the Bloom filter, that is more compact than a conventional + * representation. Computation and space costs for construction are linear in the number of elements. + * The receiver uses the filter to test whether various elements are members of the set. Though the + * filter will occasionally return a false positive, it will never return a false negative. When creating + * the filter, the sender can choose its desired point in a trade-off between the false positive rate and the size. + * * contract European Commission One-Lab Project 034819. * * @version 1.0 - 2 Feb. 07 @@ -82,7 +82,7 @@ public class BloomFilter extends Filter { (byte)0x40, (byte)0x80 }; - + /** The bit vector. */ BitSet bits; @@ -90,7 +90,7 @@ public class BloomFilter extends Filter { public BloomFilter() { super(); } - + /** * Constructor * @param vectorSize The vector size of this filter. @@ -183,7 +183,7 @@ public class BloomFilter extends Filter { bf.or(this); return bf; }//end clone() - + /** * @return size of the the bloomfilter */ @@ -228,7 +228,7 @@ public class BloomFilter extends Filter { } } } - + /* @return number of bytes needed to hold bit vector */ private int getNBytes() { return (vectorSize + 7) / 8; diff --git a/src/java/org/apache/hadoop/hbase/migration/nineteen/onelab/filter/CountingBloomFilter.java b/src/java/org/apache/hadoop/hbase/migration/nineteen/onelab/filter/CountingBloomFilter.java index 5e3471d..3ec9c6a 100644 --- a/src/java/org/apache/hadoop/hbase/migration/nineteen/onelab/filter/CountingBloomFilter.java +++ b/src/java/org/apache/hadoop/hbase/migration/nineteen/onelab/filter/CountingBloomFilter.java @@ -2,30 +2,30 @@ * * Copyright (c) 2005, European Commission project OneLab under contract 034819 (http://www.one-lab.org) * All rights reserved. - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following * conditions are met: - * - Redistributions of source code must retain the above copyright + * - Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. - * - Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in + * - Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the distribution. * - Neither the name of the University Catholique de Louvain - UCL - * nor the names of its contributors may be used to endorse or - * promote products derived from this software without specific prior + * nor the names of its contributors may be used to endorse or + * promote products derived from this software without specific prior * written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS - * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE - * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, - * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, - * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER - * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT - * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN - * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS + * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE + * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, + * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN + * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ /** @@ -58,13 +58,13 @@ import org.apache.hadoop.hbase.util.Hash; * 2000 paper. *

      * A counting Bloom filter is an improvement to standard a Bloom filter as it - * allows dynamic additions and deletions of set membership information. This + * allows dynamic additions and deletions of set membership information. This * is achieved through the use of a counting vector instead of a bit vector. - * + * * contract European Commission One-Lab Project 034819. * * @version 1.1 - 19 Jan. 08 - * + * */ public final class CountingBloomFilter extends Filter implements Cloneable { /** Storage for the counting buckets */ @@ -75,7 +75,7 @@ public final class CountingBloomFilter extends Filter implements Cloneable { /** Default constructor - use with readFields */ public CountingBloomFilter() {} - + /** * Constructor * @param vectorSize The vector size of this filter. @@ -106,10 +106,10 @@ public final class CountingBloomFilter extends Filter implements Cloneable { // find the bucket int wordNum = h[i] >> 4; // div 16 int bucketShift = (h[i] & 0x0f) << 2; // (mod 16) * 4 - + long bucketMask = 15L << bucketShift; long bucketValue = (buckets[wordNum] & bucketMask) >>> bucketShift; - + // only increment if the count in the bucket is less than BUCKET_MAX_VALUE if(bucketValue < BUCKET_MAX_VALUE) { // increment by 1 @@ -139,10 +139,10 @@ public final class CountingBloomFilter extends Filter implements Cloneable { // find the bucket int wordNum = h[i] >> 4; // div 16 int bucketShift = (h[i] & 0x0f) << 2; // (mod 16) * 4 - + long bucketMask = 15L << bucketShift; long bucketValue = (buckets[wordNum] & bucketMask) >>> bucketShift; - + // only decrement if the count in the bucket is between 0 and BUCKET_MAX_VALUE if(bucketValue >= 1 && bucketValue < BUCKET_MAX_VALUE) { // decrement by 1 @@ -160,7 +160,7 @@ public final class CountingBloomFilter extends Filter implements Cloneable { throw new IllegalArgumentException("filters cannot be and-ed"); } CountingBloomFilter cbf = (CountingBloomFilter)filter; - + int sizeInWords = buckets2words(vectorSize); for(int i = 0; i < sizeInWords; i++) { this.buckets[i] &= cbf.buckets[i]; @@ -217,7 +217,7 @@ public final class CountingBloomFilter extends Filter implements Cloneable { // find the bucket int wordNum = h[i] >> 4; // div 16 int bucketShift = (h[i] & 0x0f) << 2; // (mod 16) * 4 - + long bucketMask = 15L << bucketShift; long bucketValue = (buckets[wordNum] & bucketMask) >>> bucketShift; if (bucketValue < res) res = (int)bucketValue; @@ -266,13 +266,13 @@ public final class CountingBloomFilter extends Filter implements Cloneable { if(i > 0) { res.append(" "); } - + int wordNum = i >> 4; // div 16 int bucketShift = (i & 0x0f) << 2; // (mod 16) * 4 - + long bucketMask = 15L << bucketShift; long bucketValue = (buckets[wordNum] & bucketMask) >>> bucketShift; - + res.append(bucketValue); } diff --git a/src/java/org/apache/hadoop/hbase/migration/nineteen/onelab/filter/DynamicBloomFilter.java b/src/java/org/apache/hadoop/hbase/migration/nineteen/onelab/filter/DynamicBloomFilter.java index 826f050..e8a88cc 100644 --- a/src/java/org/apache/hadoop/hbase/migration/nineteen/onelab/filter/DynamicBloomFilter.java +++ b/src/java/org/apache/hadoop/hbase/migration/nineteen/onelab/filter/DynamicBloomFilter.java @@ -2,30 +2,30 @@ * * Copyright (c) 2005, European Commission project OneLab under contract 034819 (http://www.one-lab.org) * All rights reserved. - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following * conditions are met: - * - Redistributions of source code must retain the above copyright + * - Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. - * - Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in + * - Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the distribution. * - Neither the name of the University Catholique de Louvain - UCL - * nor the names of its contributors may be used to endorse or - * promote products derived from this software without specific prior + * nor the names of its contributors may be used to endorse or + * promote products derived from this software without specific prior * written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS - * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE - * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, - * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, - * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER - * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT - * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN - * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS + * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE + * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, + * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN + * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ /** @@ -57,33 +57,33 @@ import org.apache.hadoop.hbase.util.Hash; * Implements a dynamic Bloom filter, as defined in the INFOCOM 2006 paper. *

      * A dynamic Bloom filter (DBF) makes use of a s * m bit matrix but - * each of the s rows is a standard Bloom filter. The creation + * each of the s rows is a standard Bloom filter. The creation * process of a DBF is iterative. At the start, the DBF is a 1 * m * bit matrix, i.e., it is composed of a single standard Bloom filter. - * It assumes that nr elements are recorded in the + * It assumes that nr elements are recorded in the * initial bit vector, where nr <= n (n is - * the cardinality of the set A to record in the filter). + * the cardinality of the set A to record in the filter). *

      * As the size of A grows during the execution of the application, * several keys must be inserted in the DBF. When inserting a key into the DBF, * one must first get an active Bloom filter in the matrix. A Bloom filter is - * active when the number of recorded keys, nr, is + * active when the number of recorded keys, nr, is * strictly less than the current cardinality of A, n. - * If an active Bloom filter is found, the key is inserted and + * If an active Bloom filter is found, the key is inserted and * nr is incremented by one. On the other hand, if there * is no active Bloom filter, a new one is created (i.e., a new row is added to * the matrix) according to the current size of A and the element * is added in this new Bloom filter and the nr value of * this new Bloom filter is set to one. A given key is said to belong to the * DBF if the k positions are set to one in one of the matrix rows. - * + * * contract European Commission One-Lab Project 034819. * * @version 1.0 - 6 Feb. 07 - * + * */ public class DynamicBloomFilter extends Filter implements Cloneable { - /** + /** * Threshold for the maximum number of key to record in a dynamic Bloom filter row. */ private int nr; diff --git a/src/java/org/apache/hadoop/hbase/migration/nineteen/onelab/filter/Filter.java b/src/java/org/apache/hadoop/hbase/migration/nineteen/onelab/filter/Filter.java index 1401479..8a3ceca 100644 --- a/src/java/org/apache/hadoop/hbase/migration/nineteen/onelab/filter/Filter.java +++ b/src/java/org/apache/hadoop/hbase/migration/nineteen/onelab/filter/Filter.java @@ -2,32 +2,32 @@ * * Copyright (c) 2005, European Commission project OneLab under contract 034819 * (http://www.one-lab.org) - * + * * All rights reserved. - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following * conditions are met: - * - Redistributions of source code must retain the above copyright + * - Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. - * - Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in + * - Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the distribution. * - Neither the name of the University Catholique de Louvain - UCL - * nor the names of its contributors may be used to endorse or - * promote products derived from this software without specific prior + * nor the names of its contributors may be used to endorse or + * promote products derived from this software without specific prior * written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS - * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE - * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, - * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, - * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER - * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT - * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN - * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS + * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE + * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, + * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN + * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ /** @@ -62,17 +62,17 @@ import org.apache.hadoop.io.Writable; * Defines the general behavior of a filter. *

      * A filter is a data structure which aims at offering a lossy summary of a set A. The - * key idea is to map entries of A (also called keys) into several positions + * key idea is to map entries of A (also called keys) into several positions * in a vector through the use of several hash functions. *

      * Typically, a filter will be implemented as a Bloom filter (or a Bloom filter extension). *

      * It must be extended in order to define the real behavior. - * + * * @version 1.0 - 2 Feb. 07 */ public abstract class Filter implements Writable { - private static final int VERSION = -1; // negative to accommodate for old format + private static final int VERSION = -1; // negative to accommodate for old format /** The vector size of this filter. */ protected int vectorSize; @@ -81,13 +81,13 @@ public abstract class Filter implements Writable { /** The number of hash function to consider. */ protected int nbHash; - + /** Type of hashing function to use. */ protected int hashType; protected Filter() {} - - /** + + /** * Constructor. * @param vectorSize The vector size of this filter. * @param nbHash The number of hash functions to consider. @@ -184,9 +184,9 @@ public abstract class Filter implements Writable { add(keys[i]); } }//end add() - + // Writable interface - + public void write(DataOutput out) throws IOException { out.writeInt(VERSION); out.writeInt(this.nbHash); diff --git a/src/java/org/apache/hadoop/hbase/migration/nineteen/onelab/filter/HashFunction.java b/src/java/org/apache/hadoop/hbase/migration/nineteen/onelab/filter/HashFunction.java index bd1adbe..f85bfd7 100644 --- a/src/java/org/apache/hadoop/hbase/migration/nineteen/onelab/filter/HashFunction.java +++ b/src/java/org/apache/hadoop/hbase/migration/nineteen/onelab/filter/HashFunction.java @@ -1,33 +1,33 @@ /** * - * Copyright (c) 2005, European Commission project OneLab under contract 034819 + * Copyright (c) 2005, European Commission project OneLab under contract 034819 * (http://www.one-lab.org) - * + * * All rights reserved. - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following * conditions are met: - * - Redistributions of source code must retain the above copyright + * - Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. - * - Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in + * - Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the distribution. * - Neither the name of the University Catholique de Louvain - UCL - * nor the names of its contributors may be used to endorse or - * promote products derived from this software without specific prior + * nor the names of its contributors may be used to endorse or + * promote products derived from this software without specific prior * written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS - * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE - * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, - * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, - * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER - * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT - * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN - * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS + * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE + * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, + * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN + * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ /** @@ -54,8 +54,8 @@ import org.apache.hadoop.hbase.util.Hash; /** * Implements a hash object that returns a certain number of hashed values. *

      - * It is based on the SHA-1 algorithm. - * + * It is based on the SHA-1 algorithm. + * * @version 1.0 - 2 Feb. 07 */ public final class HashFunction { @@ -67,7 +67,7 @@ public final class HashFunction { /** Hashing algorithm to use. */ private Hash hashFunction; - + /** * Constructor. *

      @@ -80,7 +80,7 @@ public final class HashFunction { if(maxValue <= 0) { throw new IllegalArgumentException("maxValue must be > 0"); } - + if(nbHash <= 0) { throw new IllegalArgumentException("nbHash must be > 0"); } @@ -115,6 +115,6 @@ public final class HashFunction { result[i] = Math.abs(initval) % maxValue; } return result; - }//end hash() + }//end hash() }//end class diff --git a/src/java/org/apache/hadoop/hbase/migration/nineteen/onelab/filter/Key.java b/src/java/org/apache/hadoop/hbase/migration/nineteen/onelab/filter/Key.java index 5576c4b..11b9b2f 100644 --- a/src/java/org/apache/hadoop/hbase/migration/nineteen/onelab/filter/Key.java +++ b/src/java/org/apache/hadoop/hbase/migration/nineteen/onelab/filter/Key.java @@ -2,30 +2,30 @@ * * Copyright (c) 2005, European Commission project OneLab under contract 034819 (http://www.one-lab.org) * All rights reserved. - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following * conditions are met: - * - Redistributions of source code must retain the above copyright + * - Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. - * - Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in + * - Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the distribution. * - Neither the name of the University Catholique de Louvain - UCL - * nor the names of its contributors may be used to endorse or - * promote products derived from this software without specific prior + * nor the names of its contributors may be used to endorse or + * promote products derived from this software without specific prior * written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS - * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE - * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, - * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, - * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER - * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT - * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN - * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS + * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE + * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, + * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN + * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ /** @@ -59,11 +59,11 @@ import org.apache.hadoop.io.WritableComparable; public class Key implements WritableComparable { /** Byte value of key */ byte[] bytes; - + /** * The weight associated to this key. *

      - * Invariant: if it is not specified, each instance of + * Invariant: if it is not specified, each instance of * Key will have a default weight of 1.0 */ double weight; @@ -103,7 +103,7 @@ public class Key implements WritableComparable { this.bytes = value; this.weight = weight; } - + /** @return byte[] The value of this key. */ public byte[] getBytes() { return this.bytes; @@ -115,7 +115,7 @@ public class Key implements WritableComparable { }//end getWeight() /** - * Increments the weight of this key with a specified value. + * Increments the weight of this key with a specified value. * @param weight The increment. */ public void incrementWeight(double weight){ @@ -131,7 +131,7 @@ public class Key implements WritableComparable { public boolean equals(Object o) { return this.compareTo(o) == 0; } - + @Override public int hashCode() { int result = 0; @@ -149,15 +149,15 @@ public class Key implements WritableComparable { out.write(bytes); out.writeDouble(weight); } - + public void readFields(DataInput in) throws IOException { this.bytes = new byte[in.readInt()]; in.readFully(this.bytes); weight = in.readDouble(); } - + // Comparable - + public int compareTo(Object o) { Key other = (Key)o; @@ -165,7 +165,7 @@ public class Key implements WritableComparable { for(int i = 0; result == 0 && i < bytes.length; i++) { result = this.bytes[i] - other.bytes[i]; } - + if(result == 0) { result = Double.valueOf(this.weight - other.weight).intValue(); } diff --git a/src/java/org/apache/hadoop/hbase/migration/nineteen/onelab/filter/RemoveScheme.java b/src/java/org/apache/hadoop/hbase/migration/nineteen/onelab/filter/RemoveScheme.java index 4f68358..23c7a69 100644 --- a/src/java/org/apache/hadoop/hbase/migration/nineteen/onelab/filter/RemoveScheme.java +++ b/src/java/org/apache/hadoop/hbase/migration/nineteen/onelab/filter/RemoveScheme.java @@ -2,32 +2,32 @@ * * Copyright (c) 2005, European Commission project OneLab under contract 034819 * (http://www.one-lab.org) - * + * * All rights reserved. - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following * conditions are met: - * - Redistributions of source code must retain the above copyright + * - Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. - * - Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in + * - Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the distribution. * - Neither the name of the University Catholique de Louvain - UCL - * nor the names of its contributors may be used to endorse or - * promote products derived from this software without specific prior + * nor the names of its contributors may be used to endorse or + * promote products derived from this software without specific prior * written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS - * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE - * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, - * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, - * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER - * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT - * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN - * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS + * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE + * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, + * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN + * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ /** @@ -51,7 +51,7 @@ package org.apache.hadoop.hbase.migration.nineteen.onelab.filter; /** * Defines the different remove scheme for retouched Bloom filters. - * + * * contract European Commission One-Lab Project 034819. * * @version 1.0 - 7 Feb. 07 diff --git a/src/java/org/apache/hadoop/hbase/migration/nineteen/onelab/filter/RetouchedBloomFilter.java b/src/java/org/apache/hadoop/hbase/migration/nineteen/onelab/filter/RetouchedBloomFilter.java index 083d8f3..b4e3265 100644 --- a/src/java/org/apache/hadoop/hbase/migration/nineteen/onelab/filter/RetouchedBloomFilter.java +++ b/src/java/org/apache/hadoop/hbase/migration/nineteen/onelab/filter/RetouchedBloomFilter.java @@ -2,30 +2,30 @@ * * Copyright (c) 2005, European Commission project OneLab under contract 034819 (http://www.one-lab.org) * All rights reserved. - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following * conditions are met: - * - Redistributions of source code must retain the above copyright + * - Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. - * - Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in + * - Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the distribution. * - Neither the name of the University Catholique de Louvain - UCL - * nor the names of its contributors may be used to endorse or - * promote products derived from this software without specific prior + * nor the names of its contributors may be used to endorse or + * promote products derived from this software without specific prior * written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS - * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE - * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, - * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, - * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER - * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT - * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN - * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS + * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE + * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, + * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN + * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ /** @@ -64,11 +64,11 @@ import org.apache.hadoop.hbase.util.Hash; * It allows the removal of selected false positives at the cost of introducing * random false negatives, and with the benefit of eliminating some random false * positives at the same time. - * + * * contract European Commission One-Lab Project 034819. * * @version 1.0 - 7 Feb. 07 - * + * */ public final class RetouchedBloomFilter extends BloomFilter implements RemoveScheme { @@ -86,12 +86,12 @@ implements RemoveScheme { * Ratio vector. */ double[] ratio; - + private Random rand; /** Default constructor - use with readFields */ public RetouchedBloomFilter() {} - + /** * Constructor * @param vectorSize The vector size of this filter. @@ -147,7 +147,7 @@ implements RemoveScheme { if(coll == null) { throw new NullPointerException("Collection can not be null"); } - + for(Key k: coll) { addFalsePositive(k); } @@ -203,19 +203,19 @@ implements RemoveScheme { case RANDOM: index = randomRemove(); break; - + case MINIMUM_FN: index = minimumFnRemove(h); break; - + case MAXIMUM_FP: index = maximumFpRemove(h); break; - + case RATIO: index = ratioRemove(h); break; - + default: throw new AssertionError("Undefined selective clearing scheme"); @@ -375,7 +375,7 @@ implements RemoveScheme { } return weight; } - + /** * Creates and initialises the various vectors. */ @@ -391,7 +391,7 @@ implements RemoveScheme { ratio[i] = 0.0; }//end for -i }//end createVector() - + // Writable @Override diff --git a/src/java/org/apache/hadoop/hbase/migration/nineteen/regionserver/HStoreFile.java b/src/java/org/apache/hadoop/hbase/migration/nineteen/regionserver/HStoreFile.java index 9497425..4576cdf 100644 --- a/src/java/org/apache/hadoop/hbase/migration/nineteen/regionserver/HStoreFile.java +++ b/src/java/org/apache/hadoop/hbase/migration/nineteen/regionserver/HStoreFile.java @@ -45,7 +45,7 @@ import org.apache.hadoop.io.SequenceFile; /** * A HStore data file. HStores usually have one or more of these files. They * are produced by flushing the memcache to disk. - * + * *

      This one has been doctored to be used in migrations. Private and * protecteds have been made public, etc. * @@ -53,14 +53,14 @@ import org.apache.hadoop.io.SequenceFile; * mix of the parent dir, the region name, the column name, and a file * identifier. The name may also be a reference to a store file located * elsewhere. This class handles all that path-building stuff for you. - * + * *

      An HStoreFile usually tracks 4 things: its parent dir, the region * identifier, the column family, and the file identifier. If you know those * four things, you know how to obtain the right HStoreFile. HStoreFiles may * also reference store files in another region serving either from * the top-half of the remote file or from the bottom-half. Such references * are made fast splitting regions. - * + * *

      Plain HStoreFiles are named for a randomly generated id as in: * 1278437856009925445 A file by this name is made in both the * mapfiles and info subdirectories of a @@ -70,7 +70,7 @@ import org.apache.hadoop.io.SequenceFile; * file named something like 1278437856009925445, one to hold the * data in 'mapfiles' and one under 'info' that holds the sequence id for this * store file. - * + * *

      References to store files located over in some other region look like * this: * 1278437856009925445.hbaserepository,qAReLZD-OyQORZWq_vqR1k==,959247014679548184: @@ -81,8 +81,8 @@ import org.apache.hadoop.io.SequenceFile; * to serve the top or bottom region of the remote store file. Note, a region * is not splitable if it has instances of store file references (References * are cleaned up by compactions). - * - *

      When merging or splitting HRegions, we might want to modify one of the + * + *

      When merging or splitting HRegions, we might want to modify one of the * params for an HStoreFile (effectively moving it elsewhere). */ public class HStoreFile implements HConstants { @@ -92,7 +92,7 @@ public class HStoreFile implements HConstants { static final String HSTORE_DATFILE_DIR = "mapfiles"; static final String HSTORE_INFO_DIR = "info"; static final String HSTORE_FILTER_DIR = "filter"; - + private final static Random rand = new Random(); private final Path basedir; @@ -124,7 +124,7 @@ public class HStoreFile implements HConstants { throws IOException { this(conf, fs, basedir, encodedName, colFamily, fileId, ref, false); } - + /** * Constructor that fully initializes the object * @param conf Configuration object @@ -147,7 +147,7 @@ public class HStoreFile implements HConstants { this.colFamily = colFamily; // NOT PASSED IN MIGRATIONS this.hri = null; - + long id = fileId; if (id == -1) { Path mapdir = HStoreFile.getMapDir(basedir, encodedRegionName, colFamily); @@ -158,7 +158,7 @@ public class HStoreFile implements HConstants { } while(fs.exists(testpath)); } this.fileId = id; - + // If a reference, construction does not write the pointer files. Thats // done by invocations of writeReferenceFiles(hsf, fs). Happens at split. this.reference = ref; @@ -169,7 +169,7 @@ public class HStoreFile implements HConstants { boolean isReference() { return reference != null; } - + private static final Pattern REF_NAME_PARSER = Pattern.compile("^(\\d+)(?:\\.(.+))?$"); @@ -210,7 +210,7 @@ public class HStoreFile implements HConstants { } // Build full filenames from those components - + /** @return path for MapFile */ Path getMapFilePath() { if (isReference()) { @@ -230,10 +230,10 @@ public class HStoreFile implements HConstants { private Path getMapFilePath(final int encodedName, final long fid) { return getMapFilePath(encodedName, fid, HRegionInfo.NO_HASH); } - + private Path getMapFilePath(final int encodedName, final long fid, final int ern) { - return new Path(HStoreFile.getMapDir(basedir, encodedName, colFamily), + return new Path(HStoreFile.getMapDir(basedir, encodedName, colFamily), createHStoreFilename(fid, ern)); } @@ -242,18 +242,18 @@ public class HStoreFile implements HConstants { if (isReference()) { return getInfoFilePath(encodedRegionName, fileId, reference.getEncodedRegionName()); - + } return getInfoFilePath(encodedRegionName, fileId); } - + private Path getInfoFilePath(final int encodedName, final long fid) { return getInfoFilePath(encodedName, fid, HRegionInfo.NO_HASH); } - + private Path getInfoFilePath(final int encodedName, final long fid, final int ern) { - return new Path(HStoreFile.getInfoDir(basedir, encodedName, colFamily), + return new Path(HStoreFile.getInfoDir(basedir, encodedName, colFamily), createHStoreFilename(fid, ern)); } @@ -282,17 +282,17 @@ public class HStoreFile implements HConstants { dstA.writeReferenceFiles(fs); dstB.writeReferenceFiles(fs); } - + void writeReferenceFiles(final FileSystem fs) throws IOException { createOrFail(fs, getMapFilePath()); writeSplitInfo(fs); } - + /* * If reference, create and write the remote store file id, the midkey and * whether we're going against the top file region of the referent out to - * the info file. + * the info file. * @param p Path to info file. * @param hsf * @param fs @@ -336,7 +336,7 @@ public class HStoreFile implements HConstants { } } - /** + /** * Reads in an info file * * @param filesystem file system @@ -370,10 +370,10 @@ public class HStoreFile implements HConstants { in.close(); } } - + /** * Writes the file-identifier to disk - * + * * @param filesystem file system * @param infonum file id * @throws IOException @@ -382,10 +382,10 @@ public class HStoreFile implements HConstants { throws IOException { writeInfo(filesystem, infonum, false); } - + /** * Writes the file-identifier to disk - * + * * @param filesystem file system * @param infonum file id * @param mc True if this file is product of a major compaction @@ -412,13 +412,13 @@ public class HStoreFile implements HConstants { /** * Delete store map files. - * @throws IOException + * @throws IOException */ public void delete() throws IOException { fs.delete(getMapFilePath(), true); fs.delete(getInfoFilePath(), true); } - + /** * Renames the mapfiles and info directories under the passed * hsf directory. @@ -448,7 +448,7 @@ public class HStoreFile implements HConstants { } return success; } - + /** * Get reader for the store file map file. * Client is responsible for closing file when done. @@ -463,7 +463,7 @@ public class HStoreFile implements HConstants { throws IOException { if (isReference()) { return new HalfMapFileReader(fs, - getMapFilePath(reference).toString(), conf, + getMapFilePath(reference).toString(), conf, reference.getFileRegion(), reference.getMidkey(), bloomFilter, blockCacheEnabled, this.hri); } @@ -522,7 +522,7 @@ public class HStoreFile implements HConstants { return encodedRegionName + "/" + Bytes.toString(colFamily) + "/" + fileId + (isReference()? "-" + reference.toString(): ""); } - + /** * @return True if this file was made by a major compaction. */ @@ -532,7 +532,7 @@ public class HStoreFile implements HConstants { private static String createHStoreFilename(final long fid, final int encodedRegionName) { - return Long.toString(fid) + + return Long.toString(fid) + ((encodedRegionName != HRegionInfo.NO_HASH)? "." + encodedRegionName : ""); } @@ -569,7 +569,7 @@ public class HStoreFile implements HConstants { final byte [] f) { return getFamilySubDir(dir, encodedRegionName, f, HSTORE_FILTER_DIR); } - + /* * @param base Base directory * @param encodedRegionName Encoding of region name. diff --git a/src/java/org/apache/hadoop/hbase/regionserver/ColumnCount.java b/src/java/org/apache/hadoop/hbase/regionserver/ColumnCount.java index e73a900..d86ecd3 100644 --- a/src/java/org/apache/hadoop/hbase/regionserver/ColumnCount.java +++ b/src/java/org/apache/hadoop/hbase/regionserver/ColumnCount.java @@ -29,7 +29,7 @@ public class ColumnCount { private final int offset; private final int length; private int count; - + /** * Constructor * @param column the qualifier to count the versions for @@ -37,7 +37,7 @@ public class ColumnCount { public ColumnCount(byte [] column) { this(column, 0); } - + /** * Constructor * @param column the qualifier to count the versions for @@ -46,7 +46,7 @@ public class ColumnCount { public ColumnCount(byte [] column, int count) { this(column, 0, column.length, count); } - + /** * Constuctor * @param column the qualifier to count the versions for @@ -60,28 +60,28 @@ public class ColumnCount { this.length = length; this.count = count; } - + /** * @return the buffer */ public byte [] getBuffer(){ return this.bytes; } - + /** * @return the offset */ public int getOffset(){ return this.offset; } - + /** * @return the length */ public int getLength(){ return this.length; - } - + } + /** * Decrement the current version count * @return current count @@ -105,7 +105,7 @@ public class ColumnCount { public void setCount(int count) { this.count = count; } - + /** * Check to see if needed to fetch more versions * @param max diff --git a/src/java/org/apache/hadoop/hbase/regionserver/ColumnTracker.java b/src/java/org/apache/hadoop/hbase/regionserver/ColumnTracker.java index dfb3026..35b03a1 100644 --- a/src/java/org/apache/hadoop/hbase/regionserver/ColumnTracker.java +++ b/src/java/org/apache/hadoop/hbase/regionserver/ColumnTracker.java @@ -23,7 +23,7 @@ import org.apache.hadoop.hbase.regionserver.QueryMatcher.MatchCode; /** * Implementing classes of this interface will be used for the tracking - * and enforcement of columns and numbers of versions during the course of a + * and enforcement of columns and numbers of versions during the course of a * Get or Scan operation. *

      * Currently there are two different types of Store/Family-level queries. @@ -38,7 +38,7 @@ import org.apache.hadoop.hbase.regionserver.QueryMatcher.MatchCode; * what action should be taken. *

    • {@link #update} is called at the end of every StoreFile or memstore. *

      - * This class is NOT thread-safe as queries are never multi-threaded + * This class is NOT thread-safe as queries are never multi-threaded */ public interface ColumnTracker { /** @@ -49,19 +49,19 @@ public interface ColumnTracker { * @return The match code instance. */ public MatchCode checkColumn(byte [] bytes, int offset, int length); - + /** * Updates internal variables in between files */ public void update(); - + /** * Resets the Matcher */ public void reset(); - + /** - * + * * @return true when done. */ public boolean done(); diff --git a/src/java/org/apache/hadoop/hbase/regionserver/CompactSplitThread.java b/src/java/org/apache/hadoop/hbase/regionserver/CompactSplitThread.java index 86e7a06..0f1c8c3 100644 --- a/src/java/org/apache/hadoop/hbase/regionserver/CompactSplitThread.java +++ b/src/java/org/apache/hadoop/hbase/regionserver/CompactSplitThread.java @@ -37,7 +37,7 @@ import org.apache.hadoop.hbase.client.Put; import org.apache.hadoop.hbase.util.Writables; import org.apache.hadoop.util.StringUtils; -/** +/** * Compact region on request and then run split if appropriate * * NOTE: This class extends Thread rather than Chore because the sleep time @@ -46,18 +46,18 @@ import org.apache.hadoop.util.StringUtils; */ class CompactSplitThread extends Thread implements HConstants { static final Log LOG = LogFactory.getLog(CompactSplitThread.class); - + private HTable root = null; private HTable meta = null; private final long frequency; private final ReentrantLock lock = new ReentrantLock(); - + private final HRegionServer server; private final HBaseConfiguration conf; - + private final BlockingQueue compactionQueue = new LinkedBlockingQueue(); - + private final HashSet regionsInQueue = new HashSet(); /** @param server */ @@ -69,7 +69,7 @@ class CompactSplitThread extends Thread implements HConstants { conf.getLong("hbase.regionserver.thread.splitcompactcheckfrequency", 20 * 1000); } - + @Override public void run() { while (!this.server.isStopRequested() && this.server.isInSafeMode()) { @@ -154,7 +154,7 @@ class CompactSplitThread extends Thread implements HConstants { } } } - + private void split(final HRegion region, final byte [] midKey) throws IOException { final HRegionInfo oldRegionInfo = region.getRegionInfo(); @@ -164,7 +164,7 @@ class CompactSplitThread extends Thread implements HConstants { // Didn't need to be split return; } - + // When a region is split, the META table needs to updated if we're // splitting a 'normal' region, and the ROOT table needs to be // updated if we are splitting a META region. @@ -189,16 +189,16 @@ class CompactSplitThread extends Thread implements HConstants { oldRegionInfo.setSplit(true); // Inform the HRegionServer that the parent HRegion is no-longer online. this.server.removeFromOnlineRegions(oldRegionInfo); - + Put put = new Put(oldRegionInfo.getRegionName()); - put.add(CATALOG_FAMILY, REGIONINFO_QUALIFIER, + put.add(CATALOG_FAMILY, REGIONINFO_QUALIFIER, Writables.getBytes(oldRegionInfo)); put.add(CATALOG_FAMILY, SPLITA_QUALIFIER, Writables.getBytes(newRegions[0].getRegionInfo())); put.add(CATALOG_FAMILY, SPLITB_QUALIFIER, Writables.getBytes(newRegions[1].getRegionInfo())); t.put(put); - + // If we crash here, then the daughters will not be added and we'll have // and offlined parent but no daughters to take up the slack. hbase-2244 // adds fixup to the metascanners. @@ -210,7 +210,7 @@ class CompactSplitThread extends Thread implements HConstants { newRegions[i].getRegionInfo())); t.put(put); } - + // If we crash here, the master will not know of the new daughters and they // will not be assigned. The metascanner when it runs will notice and take // care of assigning the new daughters. @@ -228,7 +228,7 @@ class CompactSplitThread extends Thread implements HConstants { /** * Only interrupt once it's done with a run through the work loop. - */ + */ void interruptIfNecessary() { if (lock.tryLock()) { this.interrupt(); diff --git a/src/java/org/apache/hadoop/hbase/regionserver/DeleteCompare.java b/src/java/org/apache/hadoop/hbase/regionserver/DeleteCompare.java index bf7608d..95a4c63 100644 --- a/src/java/org/apache/hadoop/hbase/regionserver/DeleteCompare.java +++ b/src/java/org/apache/hadoop/hbase/regionserver/DeleteCompare.java @@ -25,10 +25,10 @@ import org.apache.hadoop.hbase.util.Bytes; /** - * Class that provides static method needed when putting deletes into memstore + * Class that provides static method needed when putting deletes into memstore */ public class DeleteCompare { - + /** * Return codes from deleteCompare. */ @@ -37,12 +37,12 @@ public class DeleteCompare { * Do nothing. Move to next KV in memstore */ SKIP, - + /** * Add to the list of deletes. */ DELETE, - + /** * Stop looking at KVs in memstore. Finalize. */ @@ -134,5 +134,5 @@ public class DeleteCompare { } else { return DeleteCode.SKIP; } - } + } } diff --git a/src/java/org/apache/hadoop/hbase/regionserver/DeleteTracker.java b/src/java/org/apache/hadoop/hbase/regionserver/DeleteTracker.java index c5bc695..5d63fd5 100644 --- a/src/java/org/apache/hadoop/hbase/regionserver/DeleteTracker.java +++ b/src/java/org/apache/hadoop/hbase/regionserver/DeleteTracker.java @@ -26,10 +26,10 @@ package org.apache.hadoop.hbase.regionserver; * This class is utilized through three methods: *

      • {@link #add} when encountering a Delete *
      • {@link #isDeleted} when checking if a Put KeyValue has been deleted - *
      • {@link #update} when reaching the end of a StoreFile + *
      • {@link #update} when reaching the end of a StoreFile */ public interface DeleteTracker { - + /** * Add the specified KeyValue to the list of deletes to check against for * this row operation. @@ -43,7 +43,7 @@ public interface DeleteTracker { */ public void add(byte [] buffer, int qualifierOffset, int qualifierLength, long timestamp, byte type); - + /** * Check if the specified KeyValue buffer has been deleted by a previously * seen delete. @@ -55,12 +55,12 @@ public interface DeleteTracker { */ public boolean isDeleted(byte [] buffer, int qualifierOffset, int qualifierLength, long timestamp); - + /** * @return true if there are no current delete, false otherwise */ public boolean isEmpty(); - + /** * Called at the end of every StoreFile. *

        @@ -68,14 +68,14 @@ public interface DeleteTracker { * when the end of each StoreFile is reached. */ public void update(); - + /** * Called between rows. *

        * This clears everything as if a new DeleteTracker was instantiated. */ public void reset(); - + /** * Return codes for comparison of two Deletes. @@ -85,7 +85,7 @@ public interface DeleteTracker { * INCLUDE means add the specified Delete to the merged list. * NEXT means move to the next element in the specified list(s). */ - enum DeleteCompare { + enum DeleteCompare { INCLUDE_OLD_NEXT_OLD, INCLUDE_OLD_NEXT_BOTH, INCLUDE_NEW_NEXT_NEW, @@ -93,5 +93,5 @@ public interface DeleteTracker { NEXT_OLD, NEXT_NEW } - + } diff --git a/src/java/org/apache/hadoop/hbase/regionserver/ExplicitColumnTracker.java b/src/java/org/apache/hadoop/hbase/regionserver/ExplicitColumnTracker.java index d814f90..fd07ed5 100644 --- a/src/java/org/apache/hadoop/hbase/regionserver/ExplicitColumnTracker.java +++ b/src/java/org/apache/hadoop/hbase/regionserver/ExplicitColumnTracker.java @@ -26,7 +26,7 @@ import org.apache.hadoop.hbase.regionserver.QueryMatcher.MatchCode; import org.apache.hadoop.hbase.util.Bytes; /** - * This class is used for the tracking and enforcement of columns and numbers + * This class is used for the tracking and enforcement of columns and numbers * of versions during the course of a Get or Scan operation, when explicit * column qualifiers have been asked for in the query. * @@ -34,7 +34,7 @@ import org.apache.hadoop.hbase.util.Bytes; * for both scans and gets. The main difference is 'next' and 'done' collapse * for the scan case (since we see all columns in order), and we only reset * between rows. - * + * *

        * This class is utilized by {@link QueryMatcher} through two methods: *

        • {@link #checkColumn} is called when a Put satisfies all other @@ -42,7 +42,7 @@ import org.apache.hadoop.hbase.util.Bytes; * what action should be taken. *
        • {@link #update} is called at the end of every StoreFile or memstore. *

          - * This class is NOT thread-safe as queries are never multi-threaded + * This class is NOT thread-safe as queries are never multi-threaded */ public class ExplicitColumnTracker implements ColumnTracker { @@ -51,7 +51,7 @@ public class ExplicitColumnTracker implements ColumnTracker { private final List columnsToReuse; private int index; private ColumnCount column; - + /** * Default constructor. * @param columns columns specified user in query @@ -66,7 +66,7 @@ public class ExplicitColumnTracker implements ColumnTracker { } reset(); } - + /** * Done when there are no more columns to match against. */ @@ -77,7 +77,7 @@ public class ExplicitColumnTracker implements ColumnTracker { public ColumnCount getColumnHint() { return this.column; } - + /** * Checks against the parameters of the query and the columns which have * already been processed by this query. @@ -135,7 +135,7 @@ public class ExplicitColumnTracker implements ColumnTracker { } } while(true); } - + /** * Called at the end of every StoreFile or memstore. */ diff --git a/src/java/org/apache/hadoop/hbase/regionserver/FailedLogCloseException.java b/src/java/org/apache/hadoop/hbase/regionserver/FailedLogCloseException.java index cc2cc86..e4f4a60 100644 --- a/src/java/org/apache/hadoop/hbase/regionserver/FailedLogCloseException.java +++ b/src/java/org/apache/hadoop/hbase/regionserver/FailedLogCloseException.java @@ -29,7 +29,7 @@ class FailedLogCloseException extends IOException { private static final long serialVersionUID = 1759152841462990925L; /** - * + * */ public FailedLogCloseException() { super(); diff --git a/src/java/org/apache/hadoop/hbase/regionserver/FlushRequester.java b/src/java/org/apache/hadoop/hbase/regionserver/FlushRequester.java index e1db18f..f4d3230 100644 --- a/src/java/org/apache/hadoop/hbase/regionserver/FlushRequester.java +++ b/src/java/org/apache/hadoop/hbase/regionserver/FlushRequester.java @@ -29,7 +29,7 @@ package org.apache.hadoop.hbase.regionserver; public interface FlushRequester { /** * Tell the listener the cache needs to be flushed. - * + * * @param region the HRegion requesting the cache flush */ void request(HRegion region); diff --git a/src/java/org/apache/hadoop/hbase/regionserver/GetClosestRowBeforeTracker.java b/src/java/org/apache/hadoop/hbase/regionserver/GetClosestRowBeforeTracker.java index ecd44f7..52fbbd3 100644 --- a/src/java/org/apache/hadoop/hbase/regionserver/GetClosestRowBeforeTracker.java +++ b/src/java/org/apache/hadoop/hbase/regionserver/GetClosestRowBeforeTracker.java @@ -129,7 +129,7 @@ class GetClosestRowBeforeTracker { return isDeleted(kv, rowdeletes); } - /** + /** * Check if the specified KeyValue buffer has been deleted by a previously * seen delete. * @param kv @@ -237,4 +237,4 @@ class GetClosestRowBeforeTracker { this.tablenamePlusDelimiterLength, kv.getBuffer(), kv.getRowOffset(), this.tablenamePlusDelimiterLength) == 0; } -} \ No newline at end of file +} \ No newline at end of file diff --git a/src/java/org/apache/hadoop/hbase/regionserver/GetDeleteTracker.java b/src/java/org/apache/hadoop/hbase/regionserver/GetDeleteTracker.java index b865f50..8e68d65 100644 --- a/src/java/org/apache/hadoop/hbase/regionserver/GetDeleteTracker.java +++ b/src/java/org/apache/hadoop/hbase/regionserver/GetDeleteTracker.java @@ -35,7 +35,7 @@ import org.apache.hadoop.hbase.util.Bytes; *

        • {@link #isDeleted} when checking if a Put KeyValue has been deleted *
        • {@link #update} when reaching the end of a StoreFile *

          - * This class is NOT thread-safe as queries are never multi-threaded + * This class is NOT thread-safe as queries are never multi-threaded */ public class GetDeleteTracker implements DeleteTracker { private static long UNSET = -1L; @@ -76,7 +76,7 @@ public class GetDeleteTracker implements DeleteTracker { } } - /** + /** * Check if the specified KeyValue buffer has been deleted by a previously * seen delete. * @param buffer KeyValue buffer @@ -100,7 +100,7 @@ public class GetDeleteTracker implements DeleteTracker { // Check column int ret = Bytes.compareTo(buffer, qualifierOffset, qualifierLength, - this.delete.buffer, this.delete.qualifierOffset, + this.delete.buffer, this.delete.qualifierOffset, this.delete.qualifierLength); while (ret != 0) { if (ret <= -1) { @@ -120,7 +120,7 @@ public class GetDeleteTracker implements DeleteTracker { } } - + // Check Timestamp if(timestamp > this.delete.timestamp) { return false; @@ -186,7 +186,7 @@ public class GetDeleteTracker implements DeleteTracker { } // Merge previous deletes with new deletes - List mergeDeletes = + List mergeDeletes = new ArrayList(this.newDeletes.size()); int oldIndex = 0; int newIndex = 0; @@ -295,7 +295,7 @@ public class GetDeleteTracker implements DeleteTracker { } } - private void mergeDown(List mergeDeletes, List srcDeletes, + private void mergeDown(List mergeDeletes, List srcDeletes, int srcIndex) { int index = srcIndex; while(index < srcDeletes.size()) { @@ -335,7 +335,7 @@ public class GetDeleteTracker implements DeleteTracker { } if(oldDelete.timestamp < newDelete.timestamp) { return DeleteCompare.INCLUDE_NEW_NEXT_BOTH; - } + } return DeleteCompare.INCLUDE_OLD_NEXT_BOTH; } diff --git a/src/java/org/apache/hadoop/hbase/regionserver/HLog.java b/src/java/org/apache/hadoop/hbase/regionserver/HLog.java index d6a1012..0113d07 100644 --- a/src/java/org/apache/hadoop/hbase/regionserver/HLog.java +++ b/src/java/org/apache/hadoop/hbase/regionserver/HLog.java @@ -75,7 +75,7 @@ import org.apache.hadoop.io.SequenceFile.Reader; import org.apache.hadoop.io.compress.DefaultCodec; /** - * HLog stores all the edits to the HStore. Its the hbase write-ahead-log + * HLog stores all the edits to the HStore. Its the hbase write-ahead-log * implementation. * * It performs logfile-rolling, so external callers are not aware that the @@ -128,7 +128,7 @@ public class HLog implements HConstants, Syncable { private int initialReplication; // initial replication factor of SequenceFile.writer private Method getNumCurrentReplicas; // refers to DFSOutputStream.getNumCurrentReplicas private final static Object [] NO_ARGS = new Object []{}; - + // used to indirectly tell syncFs to force the sync private boolean forceSync = false; @@ -138,7 +138,7 @@ public class HLog implements HConstants, Syncable { SequenceFile.Writer writer; /* - * Map of all log files but the current one. + * Map of all log files but the current one. */ final SortedMap outputfiles = Collections.synchronizedSortedMap(new TreeMap()); @@ -156,11 +156,11 @@ public class HLog implements HConstants, Syncable { // The timestamp (in ms) when the log file was created. private volatile long filenum = -1; - + //number of transactions in the current Hlog. private final AtomicInteger numEntries = new AtomicInteger(0); - // If > than this size, roll the log. This is typically 0.95 times the size + // If > than this size, roll the log. This is typically 0.95 times the size // of the default Hdfs block size. private final long logrollsize; @@ -173,7 +173,7 @@ public class HLog implements HConstants, Syncable { private final Object updateLock = new Object(); private final boolean enabled; - + /* * If more than this many logs, force flush of oldest region to oldest edit * goes to disk. If too many and we crash, then will take forever replaying. @@ -274,8 +274,8 @@ public class HLog implements HConstants, Syncable { this.getNumCurrentReplicas = null; if(this.hdfs_out != null) { try { - this.getNumCurrentReplicas = - this.hdfs_out.getClass().getMethod("getNumCurrentReplicas", + this.getNumCurrentReplicas = + this.hdfs_out.getClass().getMethod("getNumCurrentReplicas", new Class []{}); this.getNumCurrentReplicas.setAccessible(true); } catch (NoSuchMethodException e) { @@ -290,7 +290,7 @@ public class HLog implements HConstants, Syncable { } else { LOG.info("getNumCurrentReplicas--HDFS-826 not available" ); } - + // Test if syncfs is available. Method m = null; if (isAppend(conf)) { @@ -306,7 +306,7 @@ public class HLog implements HConstants, Syncable { } } this.syncfs = m; - + logSyncerThread = new LogSyncer(this.optionalFlushInterval); Threads.setDaemonThreadRunning(logSyncerThread, Thread.currentThread().getName() + ".logSyncer"); @@ -328,7 +328,7 @@ public class HLog implements HConstants, Syncable { // Compression makes no sense for commit log. Always return NONE. return CompressionType.NONE; } - + /** * Called by HRegionServer when it opens a new region to ensure that log * sequence numbers are always greater than the latest sequence number of the @@ -345,7 +345,7 @@ public class HLog implements HConstants, Syncable { LOG.debug("Change sequence number from " + logSeqNum + " to " + newvalue); } } - + /** * @return log sequence number */ @@ -398,7 +398,7 @@ public class HLog implements HConstants, Syncable { final Field field = writer.getClass().getDeclaredField("out"); field.setAccessible(true); // get variable: writer.out - FSDataOutputStream writer_out = + FSDataOutputStream writer_out = (FSDataOutputStream)field.get(writer); // writer's OutputStream: writer.out.getWrappedStream() // important: only valid for the lifetime of this.writer @@ -409,7 +409,7 @@ public class HLog implements HConstants, Syncable { LOG.error("Problem obtaining hdfs_out: " + ex); this.hdfs_out = null; } - + LOG.info((oldFile != null? "Roll " + FSUtils.getPath(oldFile) + ", entries=" + this.numEntries.get() + @@ -442,7 +442,7 @@ public class HLog implements HConstants, Syncable { protected SequenceFile.Writer createWriter(Path path) throws IOException { return createWriter(path, HLogKey.class, WALEdit.class); } - + // usage: see TestLogRolling.java OutputStream getOutputStream() { return this.hdfs_out; @@ -457,7 +457,7 @@ public class HLog implements HConstants, Syncable { SequenceFile.CompressionType.NONE, new DefaultCodec(), null, new Metadata()); } - + /* * Clean up old commit logs. * @return If lots of logs, flush the returned region so next time through @@ -566,7 +566,7 @@ public class HLog implements HConstants, Syncable { FailedLogCloseException flce = new FailedLogCloseException("#" + currentfilenum); flce.initCause(e); - throw e; + throw e; } if (currentfilenum >= 0) { oldFile = computeFilename(currentfilenum); @@ -632,7 +632,7 @@ public class HLog implements HConstants, Syncable { } /** Append an entry to the log. - * + * * @param regionInfo * @param logEdit * @param now Time of this edit write. @@ -656,11 +656,11 @@ public class HLog implements HConstants, Syncable { protected HLogKey makeKey(byte[] regionName, byte[] tableName, long seqnum, long now) { return new HLogKey(regionName, tableName, seqnum, now); } - - - + + + /** Append an entry to the log. - * + * * @param regionInfo * @param logEdit * @param logKey @@ -749,13 +749,13 @@ public class HLog implements HConstants, Syncable { * it happens. */ class LogSyncer extends Thread { - + // Using fairness to make sure locks are given in order private final ReentrantLock lock = new ReentrantLock(true); // Condition used to wait until we have something to sync private final Condition queueEmpty = lock.newCondition(); - + // Condition used to signal that the sync is done private final Condition syncDone = lock.newCondition(); @@ -774,7 +774,7 @@ public class HLog implements HConstants, Syncable { // awaiting with a timeout doesn't always // throw exceptions on interrupt while(!this.isInterrupted()) { - + // Wait until something has to be hflushed or do it if we waited // enough time (useful if something appends but does not hflush). // 0 or less means that it timed out and maybe waited a bit more. @@ -782,12 +782,12 @@ public class HLog implements HConstants, Syncable { this.optionalFlushInterval*1000000) <= 0)) { forceSync = true; } - + // We got the signal, let's hflush. We currently own the lock so new // writes are waiting to acquire it in addToSyncQueue while the ones // we hflush are waiting on await() hflush(); - + // Release all the clients waiting on the hflush. Notice that we still // own the lock until we get back to await at which point all the // other threads waiting will first acquire and release locks @@ -805,13 +805,13 @@ public class HLog implements HConstants, Syncable { LOG.info(getName() + " exiting"); } } - + /** * This method first signals the thread that there's a sync needed * and then waits for it to happen before returning. */ public void addToSyncQueue(boolean force) { - + // Don't bother if somehow our append was already hflush if (unflushedEntries.get() == 0) { return; @@ -827,7 +827,7 @@ public class HLog implements HConstants, Syncable { } // Wake the thread queueEmpty.signal(); - + // Wait for it to hflush syncDone.await(); } catch (InterruptedException e) { @@ -851,7 +851,7 @@ public class HLog implements HConstants, Syncable { public void sync(boolean force) { logSyncerThread.addToSyncQueue(force); } - + protected void hflush() throws IOException { synchronized (this.updateLock) { if (this.closed) { @@ -867,7 +867,7 @@ public class HLog implements HConstants, Syncable { this.writer.sync(); if (this.syncfs != null) { try { - this.syncfs.invoke(this.writer, NO_ARGS); + this.syncfs.invoke(this.writer, NO_ARGS); } catch (Exception e) { throw new IOException("Reflection", e); } @@ -876,23 +876,23 @@ public class HLog implements HConstants, Syncable { this.syncOps++; this.forceSync = false; this.unflushedEntries.set(0); - - // if the number of replicas in HDFS has fallen below the initial - // value, then roll logs. + + // if the number of replicas in HDFS has fallen below the initial + // value, then roll logs. try { int numCurrentReplicas = getLogReplication(); - if (numCurrentReplicas != 0 && - numCurrentReplicas < this.initialReplication) { - LOG.warn("HDFS pipeline error detected. " + + if (numCurrentReplicas != 0 && + numCurrentReplicas < this.initialReplication) { + LOG.warn("HDFS pipeline error detected. " + "Found " + numCurrentReplicas + " replicas but expecting " + - this.initialReplication + " replicas. " + - " Requesting close of hlog."); + this.initialReplication + " replicas. " + + " Requesting close of hlog."); requestLogRoll(); logRollRequested = true; - } - } catch (Exception e) { + } + } catch (Exception e) { LOG.warn("Unable to invoke DFSOutputStream.getNumCurrentReplicas" + e + - " still proceeding ahead..."); + " still proceeding ahead..."); } } catch (IOException e) { LOG.fatal("Could not append. Requesting close of hlog", e); @@ -906,26 +906,26 @@ public class HLog implements HConstants, Syncable { } } } - + /** * This method gets the datanode replication count for the current HLog. * - * If the pipeline isn't started yet or is empty, you will get the default - * replication factor. Therefore, if this function returns 0, it means you + * If the pipeline isn't started yet or is empty, you will get the default + * replication factor. Therefore, if this function returns 0, it means you * are not properly running with the HDFS-826 patch. - * + * * @throws Exception */ int getLogReplication() throws Exception { if(this.getNumCurrentReplicas != null && this.hdfs_out != null) { Object repl = this.getNumCurrentReplicas.invoke(this.hdfs_out, NO_ARGS); - if (repl instanceof Integer) { - return ((Integer)repl).intValue(); + if (repl instanceof Integer) { + return ((Integer)repl).intValue(); } } return 0; } - + boolean canGetCurReplicas() { return this.getNumCurrentReplicas != null; } @@ -935,7 +935,7 @@ public class HLog implements HConstants, Syncable { this.listener.logRollRequested(); } } - + private void doWrite(HLogKey logKey, WALEdit logEdit) throws IOException { if (!this.enabled) { @@ -1029,7 +1029,7 @@ public class HLog implements HConstants, Syncable { KeyValue edit = completeCacheFlushLogEdit(); WALEdit edits = new WALEdit(); edits.add(edit); - this.writer.append(makeKey(regionName, tableName, logSeqId, System.currentTimeMillis()), + this.writer.append(makeKey(regionName, tableName, logSeqId, System.currentTimeMillis()), edits); writeTime += System.currentTimeMillis() - now; writeOps++; @@ -1069,7 +1069,7 @@ public class HLog implements HConstants, Syncable { public static boolean isMetaFamily(byte [] family) { return Bytes.equals(METAFAMILY, family); } - + /** * Split up a bunch of regionserver commit log files that are no longer * being written to, into new files, one per region for region to replay on @@ -1122,13 +1122,13 @@ public class HLog implements HConstants, Syncable { this.w = w; } } - + @SuppressWarnings("unchecked") public static Class getKeyClass(HBaseConfiguration conf) { - return (Class) + return (Class) conf.getClass("hbase.regionserver.hlog.keyclass", HLogKey.class); } - + public static HLogKey newKey(HBaseConfiguration conf) throws IOException { Class keyClass = getKeyClass(conf); try { @@ -1139,7 +1139,7 @@ public class HLog implements HConstants, Syncable { throw new IOException("cannot create hlog key"); } } - + /* * @param rootDir * @param logfiles @@ -1156,13 +1156,13 @@ public class HLog implements HConstants, Syncable { Collections.synchronizedMap( new TreeMap(Bytes.BYTES_COMPARATOR)); List splits = null; - + // Number of threads to use when log splitting to rewrite the logs. // More means faster but bigger mem consumption. int logWriterThreads = conf.getInt("hbase.regionserver.hlog.splitlog.writer.threads", 3); - - // Number of logs to read into memory before writing to their appropriate + + // Number of logs to read into memory before writing to their appropriate // regions when log splitting. More means faster but bigger mem consumption int logFilesPerStep = conf.getInt("hbase.regionserver.hlog.splitlog.reader.threads", 3); @@ -1172,21 +1172,21 @@ public class HLog implements HConstants, Syncable { final boolean appendSupport = isAppend(conf); // store corrupt logs for post-mortem analysis (empty string = discard) - final String corruptDir = + final String corruptDir = conf.get("hbase.regionserver.hlog.splitlog.corrupt.dir", ".corrupt"); List finishedFiles = new LinkedList(); List corruptFiles = new LinkedList(); try { - int maxSteps = Double.valueOf(Math.ceil((logfiles.length * 1.0) / + int maxSteps = Double.valueOf(Math.ceil((logfiles.length * 1.0) / logFilesPerStep)).intValue(); for (int step = 0; step < maxSteps; step++) { - + // Step 1: read N log files into memory - final Map> logEntries = + final Map> logEntries = new TreeMap>(Bytes.BYTES_COMPARATOR); - int endIndex = step == maxSteps - 1? logfiles.length: + int endIndex = step == maxSteps - 1? logfiles.length: step * logFilesPerStep + logFilesPerStep; for (int i = (step * logFilesPerStep); i < endIndex; i++) { Path curLogFile = logfiles[i].getPath(); @@ -1229,7 +1229,7 @@ public class HLog implements HConstants, Syncable { LOG.debug("IOE Pushed=" + count + " entries from " + curLogFile); e = RemoteExceptionHandler.checkIOException(e); if (!(e instanceof EOFException)) { - String msg = "Exception processing " + curLogFile + + String msg = "Exception processing " + curLogFile + " -- continuing. Possible DATA LOSS!"; if (corruptDir.length() > 0) { msg += " Storing in hlog corruption directory."; @@ -1262,13 +1262,13 @@ public class HLog implements HConstants, Syncable { } } - // Step 2: Some regionserver log files have been read into memory. + // Step 2: Some regionserver log files have been read into memory. // Assign them to the appropriate region directory. class ThreadWithException extends Thread { ThreadWithException(String name) { super(name); } public IOException exception = null; } - List threadList = + List threadList = new ArrayList(logEntries.size()); ExecutorService threadPool = Executors.newFixedThreadPool(logWriterThreads); @@ -1278,7 +1278,7 @@ public class HLog implements HConstants, Syncable { public void run() { LinkedList entries = logEntries.get(region); LOG.debug("Thread got " + entries.size() + " to process"); - if(entries.size() <= 0) { + if(entries.size() <= 0) { LOG.warn("Got a region with no entries to process."); return; } @@ -1292,20 +1292,20 @@ public class HLog implements HConstants, Syncable { // first write to this region, make new logfile assert entries.size() > 0; Path logfile = new Path(HRegion.getRegionDir(HTableDescriptor - .getTableDir(rootDir, + .getTableDir(rootDir, entries.getFirst().getKey().getTablename()), HRegionInfo.encodeRegionName(region)), HREGION_OLDLOGFILE_NAME); - // If splitLog() was running when the user restarted his - // cluster, then we could already have a 'logfile'. - // Since we don't delete logs until everything is written to + // If splitLog() was running when the user restarted his + // cluster, then we could already have a 'logfile'. + // Since we don't delete logs until everything is written to // their respective regions, we can safely remove this tmp. if (fs.exists(logfile)) { - LOG.warn("Deleting old hlog file: " + logfile); + LOG.warn("Deleting old hlog file: " + logfile); fs.delete(logfile, true); } - + // associate an OutputStream with this logfile SequenceFile.Writer w = SequenceFile.createWriter(fs, conf, logfile, @@ -1317,7 +1317,7 @@ public class HLog implements HConstants, Syncable { + logfile + " and region " + Bytes.toStringBinary(region)); } } - + // Items were added to the linkedlist oldest first. Pull them // out in that order. for (ListIterator i = @@ -1327,7 +1327,7 @@ public class HLog implements HConstants, Syncable { wap.w.append(logEntry.getKey(), logEntry.getEdit()); count++; } - + if (LOG.isDebugEnabled()) { LOG.debug("Applied " + count + " total edits to " + Bytes.toStringBinary(region) + " in " @@ -1335,7 +1335,7 @@ public class HLog implements HConstants, Syncable { } } catch (IOException e) { e = RemoteExceptionHandler.checkIOException(e); - LOG.warn("Got while writing region " + LOG.warn("Got while writing region " + Bytes.toStringBinary(region) + " log " + e); e.printStackTrace(); exception = e; @@ -1357,14 +1357,14 @@ public class HLog implements HConstants, Syncable { +"Retaining log files to avoid data loss."); throw new IOException(ex.getMessage(), ex.getCause()); } - + // throw an exception if one of the threads reported one for (ThreadWithException t : threadList) { if (t.exception != null) { throw t.exception; } } - + // End of for loop. Rinse and repeat } } finally { @@ -1398,14 +1398,14 @@ public class HLog implements HConstants, Syncable { fs.mkdirs(cp); } Path newp = new Path(cp, p.getName()); - if (!fs.exists(newp)) { + if (!fs.exists(newp)) { if (!fs.rename(p, newp)) { LOG.warn("Rename of " + p + " to " + newp + " failed."); } else { LOG.warn("Corrupt Hlog (" + p + ") moved to " + newp); } } else { - LOG.warn("Corrupt Hlog (" + p + ") already moved to " + newp + + LOG.warn("Corrupt Hlog (" + p + ") already moved to " + newp + ". Ignoring"); } } else { @@ -1427,7 +1427,7 @@ public class HLog implements HConstants, Syncable { return splits; } - /* + /* * @param conf * @return True if append enabled and we have the syncFs in our path. */ @@ -1444,7 +1444,7 @@ public class HLog implements HConstants, Syncable { } return append; } - + /** * Utility class that lets us keep track of the edit with it's key * Only used when splitting logs @@ -1540,20 +1540,20 @@ public class HLog implements HConstants, Syncable { } LOG.info("Past out lease recovery"); } - + /** * Construct the HLog directory name - * + * * @param info HServerInfo for server * @return the HLog directory name */ public static String getHLogDirectoryName(HServerInfo info) { return getHLogDirectoryName(info.getServerName()); } - + /** * Construct the HLog directory name - * + * * @param serverAddress * @param startCode * @return the HLog directory name @@ -1566,10 +1566,10 @@ public class HLog implements HConstants, Syncable { return getHLogDirectoryName( HServerInfo.getServerName(serverAddress, startCode)); } - + /** * Construct the HLog directory name - * + * * @param serverName * @return the HLog directory name */ @@ -1641,5 +1641,5 @@ public class HLog implements HConstants, Syncable { public static final long FIXED_OVERHEAD = ClassSize.align( ClassSize.OBJECT + (5 * ClassSize.REFERENCE) + ClassSize.ATOMIC_INTEGER + Bytes.SIZEOF_INT + (3 * Bytes.SIZEOF_LONG)); - + } diff --git a/src/java/org/apache/hadoop/hbase/regionserver/HLogKey.java b/src/java/org/apache/hadoop/hbase/regionserver/HLogKey.java index 2d4f7ee..84f6b62 100644 --- a/src/java/org/apache/hadoop/hbase/regionserver/HLogKey.java +++ b/src/java/org/apache/hadoop/hbase/regionserver/HLogKey.java @@ -29,11 +29,11 @@ import java.io.*; /** * A Key for an entry in the change log. - * - * The log intermingles edits to many tables and rows, so each log entry - * identifies the appropriate table and row. Within a table and row, they're + * + * The log intermingles edits to many tables and rows, so each log entry + * identifies the appropriate table and row. Within a table and row, they're * also sorted. - * + * *

          Some Transactional edits (START, COMMIT, ABORT) will not have an * associated row. */ @@ -50,7 +50,7 @@ public class HLogKey implements WritableComparable { public HLogKey() { this(null, null, 0L, HConstants.LATEST_TIMESTAMP); } - + /** * Create the log key! * We maintain the tablename mainly for debugging purposes. @@ -77,7 +77,7 @@ public class HLogKey implements WritableComparable { public byte [] getRegionName() { return regionName; } - + /** @return table name */ public byte [] getTablename() { return tablename; @@ -87,7 +87,7 @@ public class HLogKey implements WritableComparable { public long getLogSeqNum() { return logSeqNum; } - + void setLogSeqNum(long logSeqNum) { this.logSeqNum = logSeqNum; } @@ -104,7 +104,7 @@ public class HLogKey implements WritableComparable { return Bytes.toString(tablename) + "/" + Bytes.toString(regionName) + "/" + logSeqNum; } - + @Override public boolean equals(Object obj) { if (this == obj) { @@ -115,7 +115,7 @@ public class HLogKey implements WritableComparable { } return compareTo((HLogKey)obj) == 0; } - + @Override public int hashCode() { int result = Bytes.hashCode(this.regionName); @@ -149,7 +149,7 @@ public class HLogKey implements WritableComparable { out.writeLong(logSeqNum); out.writeLong(this.writeTime); } - + public void readFields(DataInput in) throws IOException { this.regionName = Bytes.readByteArray(in); this.tablename = Bytes.readByteArray(in); diff --git a/src/java/org/apache/hadoop/hbase/regionserver/HRegion.java b/src/java/org/apache/hadoop/hbase/regionserver/HRegion.java index 5ef638f..d86aa49 100644 --- a/src/java/org/apache/hadoop/hbase/regionserver/HRegion.java +++ b/src/java/org/apache/hadoop/hbase/regionserver/HRegion.java @@ -78,9 +78,9 @@ package org.apache.hadoop.hbase.regionserver; * for each row. A given table consists of one or more HRegions. * *

          We maintain multiple HStores for a single HRegion. - * + * *

          An Store is a set of rows with some column data; together, - * they make up all the data for the rows. + * they make up all the data for the rows. * *

          Each HRegion has a 'startKey' and 'endKey'. *

          The first is inclusive, the second is exclusive (except for @@ -96,15 +96,15 @@ package org.apache.hadoop.hbase.regionserver; * constructed, it holds a read lock until it is closed. A close takes out a * write lock and consequently will block for ongoing operations and will block * new operations from starting while the close is in progress. - * + * *

          An HRegion is defined by its table and its key extent. - * + * *

          It consists of at least one Store. The number of Stores should be * configurable, so that data which is accessed together is stored in the same - * Store. Right now, we approximate that by building a single Store for - * each column family. (This config info will be communicated via the + * Store. Right now, we approximate that by building a single Store for + * each column family. (This config info will be communicated via the * tabledesc.) - * + * *

          The HTableDescriptor contains metainfo about the HRegion's table. * regionName is a unique identifier for this HRegion. (startKey, endKey] * defines the keyspace for this HRegion. @@ -114,8 +114,8 @@ public class HRegion implements HConstants, HeapSize { // , Writable{ static final String SPLITDIR = "splits"; static final String MERGEDIR = "merges"; final AtomicBoolean closed = new AtomicBoolean(false); - /* Closing can take some time; use the closing flag if there is stuff we don't - * want to do while in closing state; e.g. like offer this region up to the + /* Closing can take some time; use the closing flag if there is stuff we don't + * want to do while in closing state; e.g. like offer this region up to the * master as a region to close if the carrying regionserver is overloaded. * Once set, it is never cleared. */ @@ -134,13 +134,13 @@ public class HRegion implements HConstants, HeapSize { // , Writable{ protected final Map stores = new ConcurrentSkipListMap(Bytes.BYTES_RAWCOMPARATOR); - + //These variable are just used for getting data out of the region, to test on //client side // private int numStores = 0; // private int [] storeSize = null; // private byte [] name = null; - + final AtomicLong memstoreSize = new AtomicLong(0); // This is the table subdirectory. @@ -184,7 +184,7 @@ public class HRegion implements HConstants, HeapSize { // , Writable{ this.writesEnabled = !onOff; this.readOnly = onOff; } - + boolean isReadOnly() { return this.readOnly; } @@ -204,7 +204,7 @@ public class HRegion implements HConstants, HeapSize { // , Writable{ // Used to guard splits and closes private final ReentrantReadWriteLock splitsAndClosesLock = new ReentrantReadWriteLock(); - private final ReentrantReadWriteLock newScannerLock = + private final ReentrantReadWriteLock newScannerLock = new ReentrantReadWriteLock(); // Stop updates lock @@ -244,7 +244,7 @@ public class HRegion implements HConstants, HeapSize { // , Writable{ this.regionInfo = null; this.threadWakeFrequency = 0L; } - + /** * HRegion constructor. This constructor should only be used for testing and * extensions. Instances of HRegion should be instantiated with the @@ -259,7 +259,7 @@ public class HRegion implements HConstants, HeapSize { // , Writable{ * appropriate log info for this HRegion. If there is a previous log file * (implying that the HRegion has been written-to before), then read it from * the supplied path. - * @param fs is the filesystem. + * @param fs is the filesystem. * @param conf is global configuration settings. * @param regionInfo - HRegionInfo that describes the region * is new), then read them from the supplied path. @@ -301,7 +301,7 @@ public class HRegion implements HConstants, HeapSize { // , Writable{ /** * Initialize this region and get it ready to roll. * Called after construction. - * + * * @param initialFiles path * @param reporter progressable * @throws IOException e @@ -318,7 +318,7 @@ public class HRegion implements HConstants, HeapSize { // , Writable{ // Load in all the HStores. long maxSeqId = -1; long minSeqIdToRecover = Integer.MAX_VALUE; - + for (HColumnDescriptor c : this.regionInfo.getTableDesc().getFamilies()) { Store store = instantiateHStore(this.basedir, c, oldLogFile, reporter); this.stores.put(c.getName(), store); @@ -326,7 +326,7 @@ public class HRegion implements HConstants, HeapSize { // , Writable{ if (storeSeqId > maxSeqId) { maxSeqId = storeSeqId; } - + long storeSeqIdBeforeRecovery = store.getMaxSeqIdBeforeLogRecovery(); if (storeSeqIdBeforeRecovery < minSeqIdToRecover) { minSeqIdToRecover = storeSeqIdBeforeRecovery; @@ -341,7 +341,7 @@ public class HRegion implements HConstants, HeapSize { // , Writable{ } fs.delete(oldLogFile, false); } - + // Add one to the current maximum sequence id so new edits are beyond. this.minSequenceId = maxSeqId + 1; @@ -400,7 +400,7 @@ public class HRegion implements HConstants, HeapSize { // , Writable{ // Name of this file has two leading and trailing underscores so it doesn't // clash w/ a store/family name. There is possibility, but assumption is // that its slim (don't want to use control character in filename because - // + // Path regioninfo = new Path(this.regiondir, REGIONINFO_FILE); if (this.fs.exists(regioninfo) && this.fs.getFileStatus(regioninfo).getLen() > 0) { @@ -424,7 +424,7 @@ public class HRegion implements HConstants, HeapSize { // , Writable{ long getMinSequenceId() { return this.minSequenceId; } - + /** @return a HRegionInfo object for this region */ public HRegionInfo getRegionInfo() { return this.regionInfo; @@ -434,7 +434,7 @@ public class HRegion implements HConstants, HeapSize { // , Writable{ public boolean isClosed() { return this.closed.get(); } - + /** * @return True if closing process has started. */ @@ -445,18 +445,18 @@ public class HRegion implements HConstants, HeapSize { // , Writable{ public ReadWriteConsistencyControl getRWCC() { return rwcc; } - + /** - * Close down this HRegion. Flush the cache, shut down each HStore, don't + * Close down this HRegion. Flush the cache, shut down each HStore, don't * service any more calls. * - *

          This method could take some time to execute, so don't call it from a + *

          This method could take some time to execute, so don't call it from a * time-sensitive thread. - * - * @return Vector of all the storage files that the HRegion's component + * + * @return Vector of all the storage files that the HRegion's component * HStores make use of. It's a list of all HStoreFile objects. Returns empty * vector if already closed and null if judged that it should not close. - * + * * @throws IOException e */ public List close() throws IOException { @@ -467,14 +467,14 @@ public class HRegion implements HConstants, HeapSize { // , Writable{ * Close down this HRegion. Flush the cache unless abort parameter is true, * Shut down each HStore, don't service any more calls. * - * This method could take some time to execute, so don't call it from a + * This method could take some time to execute, so don't call it from a * time-sensitive thread. - * + * * @param abort true if server is aborting (only during testing) - * @return Vector of all the storage files that the HRegion's component + * @return Vector of all the storage files that the HRegion's component * HStores make use of. It's a list of HStoreFile objects. Can be null if * we are not to close at this time or we are already closed. - * + * * @throws IOException e */ public List close(final boolean abort) throws IOException { @@ -522,12 +522,12 @@ public class HRegion implements HConstants, HeapSize { // , Writable{ // outstanding updates. waitOnRowLocks(); LOG.debug("No more row locks outstanding on region " + this); - + // Don't flush the cache if we are aborting if (!abort) { internalFlushcache(); } - + List result = new ArrayList(); for (Store store: stores.values()) { result.addAll(store.close()); @@ -611,11 +611,11 @@ public class HRegion implements HConstants, HeapSize { // , Writable{ public long getLastFlushTime() { return this.lastFlushTime; } - + ////////////////////////////////////////////////////////////////////////////// - // HRegion maintenance. + // HRegion maintenance. // - // These methods are meant to be called periodically by the HRegionServer for + // These methods are meant to be called periodically by the HRegionServer for // upkeep. ////////////////////////////////////////////////////////////////////////////// @@ -737,7 +737,7 @@ public class HRegion implements HConstants, HeapSize { // , Writable{ protected void prepareToSplit() { // nothing } - + /* * @param dir * @return compaction directory for the passed in dir @@ -754,7 +754,7 @@ public class HRegion implements HConstants, HeapSize { // , Writable{ private void doRegionCompactionPrep() throws IOException { doRegionCompactionCleanup(); } - + /* * Removes the compaction directory for this Store. * @throws IOException @@ -775,13 +775,13 @@ public class HRegion implements HConstants, HeapSize { // , Writable{ * Called by compaction thread and after region is opened to compact the * HStores if necessary. * - *

          This operation could block for a long time, so don't call it from a + *

          This operation could block for a long time, so don't call it from a * time-sensitive thread. * * Note that no locking is necessary at this level because compaction only * conflicts with a region split, and that cannot happen because the region * server does them sequentially and not in parallel. - * + * * @return mid key if split is needed * @throws IOException e */ @@ -795,13 +795,13 @@ public class HRegion implements HConstants, HeapSize { // , Writable{ * Called by compaction thread and after region is opened to compact the * HStores if necessary. * - *

          This operation could block for a long time, so don't call it from a + *

          This operation could block for a long time, so don't call it from a * time-sensitive thread. * * Note that no locking is necessary at this level because compaction only * conflicts with a region split, and that cannot happen because the region * server does them sequentially and not in parallel. - * + * * @param majorCompaction True to force a major compaction regardless of thresholds * @return split row if split is needed * @throws IOException e @@ -829,7 +829,7 @@ public class HRegion implements HConstants, HeapSize { // , Writable{ return splitRow; } } - LOG.info("Starting" + (majorCompaction? " major " : " ") + + LOG.info("Starting" + (majorCompaction? " major " : " ") + "compaction on region " + this); long startTime = System.currentTimeMillis(); doRegionCompactionPrep(); @@ -842,7 +842,7 @@ public class HRegion implements HConstants, HeapSize { // , Writable{ } } doRegionCompactionCleanup(); - String timeTaken = StringUtils.formatTimeDiff(System.currentTimeMillis(), + String timeTaken = StringUtils.formatTimeDiff(System.currentTimeMillis(), startTime); LOG.info("compaction completed on region " + this + " in " + timeTaken); } finally { @@ -859,7 +859,7 @@ public class HRegion implements HConstants, HeapSize { // , Writable{ /** * Flush the cache. - * + * * When this method is called the cache will be flushed unless: *

            *
          1. the cache is empty
          2. @@ -868,11 +868,11 @@ public class HRegion implements HConstants, HeapSize { // , Writable{ *
          3. writes are disabled
          4. *
          * - *

          This method may block for some time, so it should not be called from a + *

          This method may block for some time, so it should not be called from a * time-sensitive thread. - * + * * @return true if cache was flushed - * + * * @throws IOException general io exceptions * @throws DroppedSnapshotException Thrown when replay of hlog is required * because a Snapshot was not properly persisted. @@ -891,7 +891,7 @@ public class HRegion implements HConstants, HeapSize { // , Writable{ writestate.flushing + ", writesEnabled=" + writestate.writesEnabled); } - return false; + return false; } } try { @@ -920,25 +920,25 @@ public class HRegion implements HConstants, HeapSize { // , Writable{ * flushed. (That way, during recovery, we know when we can rely on the * on-disk flushed structures and when we have to recover the memstore from * the log.) - * + * *

          So, we have a three-step process: - * + * *

          • A. Flush the memstore to the on-disk stores, noting the current * sequence ID for the log.
          • - * + * *
          • B. Write a FLUSHCACHE-COMPLETE message to the log, using the sequence * ID that was current at the time of memstore-flush.
          • - * + * *
          • C. Get rid of the memstore structures that are now redundant, as * they've been flushed to the on-disk HStores.
          • *
          *

          This method is protected, but can be accessed via several public * routes. - * + * *

          This method may block for some time. - * + * * @return true if the region needs compacting - * + * * @throws IOException general io exceptions * @throws DroppedSnapshotException Thrown when replay of hlog is required * because a Snapshot was not properly persisted. @@ -1068,7 +1068,7 @@ public class HRegion implements HConstants, HeapSize { // , Writable{ // B. Write a FLUSHCACHE-COMPLETE message to the log. // This tells future readers that the HStores were emitted correctly, - // and that all updates to the log for this regionName that have lower + // and that all updates to the log for this regionName that have lower // log-sequence-ids can be safely ignored. this.log.completeCacheFlush(getRegionName(), regionInfo.getTableDesc().getName(), completeSequenceId, @@ -1079,7 +1079,7 @@ public class HRegion implements HConstants, HeapSize { // , Writable{ synchronized (this) { notifyAll(); } - + if (LOG.isDebugEnabled()) { long now = System.currentTimeMillis(); LOG.debug("Finished memstore flush of ~" + @@ -1108,23 +1108,23 @@ public class HRegion implements HConstants, HeapSize { // , Writable{ /** * Get the sequence number to be associated with this cache flush. Used by * TransactionalRegion to not complete pending transactions. - * - * + * + * * @param currentSequenceId * @return sequence id to complete the cache flush with - */ + */ protected long getCompleteCacheFlushSequenceId(long currentSequenceId) { return currentSequenceId; } - + ////////////////////////////////////////////////////////////////////////////// // get() methods for client use. ////////////////////////////////////////////////////////////////////////////// /** - * Return all the data for the row that matches row exactly, - * or the one that immediately preceeds it, at or immediately before + * Return all the data for the row that matches row exactly, + * or the one that immediately preceeds it, at or immediately before * ts. - * + * * @param row row key * @return map of values * @throws IOException @@ -1135,10 +1135,10 @@ public class HRegion implements HConstants, HeapSize { // , Writable{ } /** - * Return all the data for the row that matches row exactly, - * or the one that immediately preceeds it, at or immediately before + * Return all the data for the row that matches row exactly, + * or the one that immediately preceeds it, at or immediately before * ts. - * + * * @param row row key * @param family column family to find on * @return map of values @@ -1168,7 +1168,7 @@ public class HRegion implements HConstants, HeapSize { // , Writable{ } /** - * Return an iterator that scans over the HRegion, returning the indicated + * Return an iterator that scans over the HRegion, returning the indicated * columns and rows specified by the {@link Scan}. *

          * This Iterator must be closed by the caller. @@ -1252,8 +1252,8 @@ public class HRegion implements HConstants, HeapSize { // , Writable{ splitsAndClosesLock.readLock().unlock(); } } - - + + /** * @param familyMap map of family to edits for the given family. * @param writeToWAL @@ -1272,10 +1272,10 @@ public class HRegion implements HConstants, HeapSize { // , Writable{ for (Map.Entry> e : familyMap.entrySet()) { - byte[] family = e.getKey(); + byte[] family = e.getKey(); List kvs = e.getValue(); Map kvCount = new TreeMap(Bytes.BYTES_COMPARATOR); - + Store store = getStore(family); for (KeyValue kv: kvs) { // Check if time is LATEST, change to time of most recent addition if so @@ -1349,9 +1349,9 @@ public class HRegion implements HConstants, HeapSize { // , Writable{ for (Map.Entry> e : familyMap.entrySet()) { - byte[] family = e.getKey(); + byte[] family = e.getKey(); List kvs = e.getValue(); - + Store store = getStore(family); for (KeyValue kv: kvs) { kv.setMemstoreTS(w.getWriteNumber()); @@ -1370,7 +1370,7 @@ public class HRegion implements HConstants, HeapSize { // , Writable{ requestFlush(); } } - + /** * @param put * @throws IOException @@ -1378,7 +1378,7 @@ public class HRegion implements HConstants, HeapSize { // , Writable{ public void put(Put put) throws IOException { this.put(put, null, put.getWriteToWAL()); } - + /** * @param put * @param writeToWAL @@ -1408,7 +1408,7 @@ public class HRegion implements HConstants, HeapSize { // , Writable{ checkReadOnly(); // Do a rough check that we have resources to accept a write. The check is - // 'rough' in that between the resource check and the call to obtain a + // 'rough' in that between the resource check and the call to obtain a // read lock, resources may run out. For now, the thought is that this // will be extremely rare; we'll deal with it when it happens. checkResources(); @@ -1436,13 +1436,13 @@ public class HRegion implements HConstants, HeapSize { // , Writable{ } } - - //TODO, Think that gets/puts and deletes should be refactored a bit so that + + //TODO, Think that gets/puts and deletes should be refactored a bit so that //the getting of the lock happens before, so that you would just pass it into - //the methods. So in the case of checkAndPut you could just do lockRow, + //the methods. So in the case of checkAndPut you could just do lockRow, //get, put, unlockRow or something /** - * + * * @param row * @param family * @param qualifier @@ -1454,10 +1454,10 @@ public class HRegion implements HConstants, HeapSize { // , Writable{ * @return true if the new put was execute, false otherwise */ public boolean checkAndPut(byte [] row, byte [] family, byte [] qualifier, - byte [] expectedValue, Put put, Integer lockId, boolean writeToWAL) + byte [] expectedValue, Put put, Integer lockId, boolean writeToWAL) throws IOException{ checkReadOnly(); - //TODO, add check for value length or maybe even better move this to the + //TODO, add check for value length or maybe even better move this to the //client if this becomes a global setting checkResources(); splitsAndClosesLock.readLock().lock(); @@ -1469,7 +1469,7 @@ public class HRegion implements HConstants, HeapSize { // , Writable{ byte [] now = Bytes.toBytes(System.currentTimeMillis()); // Lock row - Integer lid = getLock(lockId, get.getRow()); + Integer lid = getLock(lockId, get.getRow()); List result = new ArrayList(); try { result = get(get); @@ -1486,7 +1486,7 @@ public class HRegion implements HConstants, HeapSize { // , Writable{ if (matches) { // All edits for the given row (across all column families) must happen atomically. put(put.getFamilyMap(), writeToWAL); - return true; + return true; } return false; } finally { @@ -1494,10 +1494,10 @@ public class HRegion implements HConstants, HeapSize { // , Writable{ } } finally { splitsAndClosesLock.readLock().unlock(); - } + } } - - + + /** * Checks if any stamps is Long.MAX_VALUE. If so, sets them to now. *

          @@ -1517,10 +1517,10 @@ public class HRegion implements HConstants, HeapSize { // , Writable{ } return true; } - + // /* -// * Utility method to verify values length. +// * Utility method to verify values length. // * @param batchUpdate The update to verify // * @throws IOException Thrown if a value is too long // */ @@ -1528,7 +1528,7 @@ public class HRegion implements HConstants, HeapSize { // , Writable{ // throws IOException { // Map> families = put.getFamilyMap(); // for(Map.Entry> entry : families.entrySet()) { -// HColumnDescriptor hcd = +// HColumnDescriptor hcd = // this.regionInfo.getTableDesc().getFamily(entry.getKey()); // int maxLen = hcd.getMaxValueLength(); // for(KeyValue kv : entry.getValue()) { @@ -1543,7 +1543,7 @@ public class HRegion implements HConstants, HeapSize { // , Writable{ /* * Check if resources to support an update. - * + * * Here we synchronize on HRegion, a broad scoped lock. Its appropriate * given we're figuring in here whether this region is able to take on * writes. This is only method with a synchronize (at time of writing), @@ -1590,7 +1590,7 @@ public class HRegion implements HConstants, HeapSize { // , Writable{ } } - /** + /** * Add updates first to the hlog and then add values to memstore. * Warning: Assumption is caller has lock on passed in row. * @param family @@ -1605,7 +1605,7 @@ public class HRegion implements HConstants, HeapSize { // , Writable{ this.put(familyMap, true); } - /** + /** * Add updates first to the hlog (if writeToWal) and then add values to memstore. * Warning: Assumption is caller has lock on passed in row. * @param familyMap map of family to edits for the given family. @@ -1622,7 +1622,7 @@ public class HRegion implements HConstants, HeapSize { // , Writable{ try { WALEdit walEdit = new WALEdit(); - + // check if column families are valid; // check if any timestampupdates are needed; // and if writeToWAL is set, then also collapse edits into a single list. @@ -1636,7 +1636,7 @@ public class HRegion implements HConstants, HeapSize { // , Writable{ // update timestamp on keys if required. if (updateKeys(edits, byteNow)) { if (writeToWAL) { - // bunch up all edits across all column families into a + // bunch up all edits across all column families into a // single WALEdit. for (KeyValue kv : edits) { walEdit.add(kv); @@ -1659,14 +1659,14 @@ public class HRegion implements HConstants, HeapSize { // , Writable{ walEdit, now, this.getRegionInfo().isMetaRegion()); } - + long size = 0; w = rwcc.beginMemstoreInsert(); // now make changes to the memstore for (Map.Entry> e : familyMap.entrySet()) { - byte[] family = e.getKey(); + byte[] family = e.getKey(); List edits = e.getValue(); Store store = getStore(family); @@ -1720,7 +1720,7 @@ public class HRegion implements HConstants, HeapSize { // , Writable{ // Used by subclasses; e.g. THBase. } - protected Store instantiateHStore(Path baseDir, + protected Store instantiateHStore(Path baseDir, HColumnDescriptor c, Path oldLogFile, Progressable reporter) throws IOException { return new Store(baseDir, this, c, this.fs, oldLogFile, @@ -1735,7 +1735,7 @@ public class HRegion implements HConstants, HeapSize { // , Writable{ * TODO: Make this lookup faster. */ public Store getStore(final byte [] column) { - return this.stores.get(column); + return this.stores.get(column); } ////////////////////////////////////////////////////////////////////////////// @@ -1768,10 +1768,10 @@ public class HRegion implements HConstants, HeapSize { // , Writable{ * But it acts as a guard on the client; a miswritten client just can't * submit the name of a row and start writing to it; it must know the correct * lockid, which matches the lock list in memory. - * - *

          It would be more memory-efficient to assume a correctly-written client, + * + *

          It would be more memory-efficient to assume a correctly-written client, * which maybe we'll do in the future. - * + * * @param row Name of row to lock. * @throws IOException * @return The id of the held lock. @@ -1794,7 +1794,7 @@ public class HRegion implements HConstants, HeapSize { // , Writable{ // generate a new lockid. Attempt to insert the new [lockid, row]. // if this lockid already exists in the map then revert and retry // We could have first done a lockIds.get, and if it does not exist only - // then do a lockIds.put, but the hope is that the lockIds.put will + // then do a lockIds.put, but the hope is that the lockIds.put will // mostly return null the first time itself because there won't be // too many lockId collisions. byte [] prev = null; @@ -1816,7 +1816,7 @@ public class HRegion implements HConstants, HeapSize { // , Writable{ splitsAndClosesLock.readLock().unlock(); } } - + /** * Used by unit tests. * @param lockid @@ -1827,8 +1827,8 @@ public class HRegion implements HConstants, HeapSize { // , Writable{ return lockIds.get(lockid); } } - - /** + + /** * Release the row lock! * @param lockid The lock ID to release. */ @@ -1839,7 +1839,7 @@ public class HRegion implements HConstants, HeapSize { // , Writable{ lockedRows.notifyAll(); } } - + /** * See if row is currently locked. * @param lockid @@ -1853,14 +1853,14 @@ public class HRegion implements HConstants, HeapSize { // , Writable{ return false; } } - + /** * Returns existing row lock if found, otherwise * obtains a new row lock and returns it. * @param lockid * @return lockid */ - private Integer getLock(Integer lockid, byte [] row) + private Integer getLock(Integer lockid, byte [] row) throws IOException { Integer lid = null; if (lockid == null) { @@ -1873,13 +1873,13 @@ public class HRegion implements HConstants, HeapSize { // , Writable{ } return lid; } - + private void waitOnRowLocks() { synchronized (lockedRows) { while (!this.lockedRows.isEmpty()) { if (LOG.isDebugEnabled()) { LOG.debug("Waiting on " + this.lockedRows.size() + " row locks"); - } + } try { this.lockedRows.wait(); } catch (InterruptedException e) { @@ -1888,17 +1888,17 @@ public class HRegion implements HConstants, HeapSize { // , Writable{ } } } - + @Override public boolean equals(Object o) { return this.hashCode() == ((HRegion)o).hashCode(); } - + @Override public int hashCode() { return this.regionInfo.getRegionName().hashCode(); } - + @Override public String toString() { return this.regionInfo.getRegionNameAsString(); @@ -2097,7 +2097,7 @@ public class HRegion implements HConstants, HeapSize { // , Writable{ } /** - * + * * @param scanner to be closed */ public void close(KeyValueScanner scanner) { @@ -2106,12 +2106,12 @@ public class HRegion implements HConstants, HeapSize { // , Writable{ } catch(NullPointerException npe) {} } } - + // Utility methods /** * A utility method to create new instances of HRegion based on the * {@link org.apache.hadoop.hbase.HConstants#REGION_IMPL} configuration - * property. + * property. * @param basedir qualified path of directory where region should be located, * usually the table directory. * @param log The HLog is the outbound log for any updates to the HRegion @@ -2158,7 +2158,7 @@ public class HRegion implements HConstants, HeapSize { // , Writable{ * @param rootDir Root directory for HBase instance * @param conf * @return new HRegion - * + * * @throws IOException */ public static HRegion createHRegion(final HRegionInfo info, final Path rootDir, @@ -2175,7 +2175,7 @@ public class HRegion implements HConstants, HeapSize { // , Writable{ region.initialize(null, null); return region; } - + /** * Convenience method to open a HRegion outside of an HRegionServer context. * @param info Info for region to be opened. @@ -2186,7 +2186,7 @@ public class HRegion implements HConstants, HeapSize { // , Writable{ * up. HRegionStore does this every time it opens a new region. * @param conf * @return new HRegion - * + * * @throws IOException */ public static HRegion openHRegion(final HRegionInfo info, final Path rootDir, @@ -2207,18 +2207,18 @@ public class HRegion implements HConstants, HeapSize { // , Writable{ } return r; } - + /** * Inserts a new region's meta information into the passed * meta region. Used by the HMaster bootstrap code adding * new table to ROOT table. - * + * * @param meta META HRegion to be updated * @param r HRegion to add to meta * * @throws IOException */ - public static void addRegionToMETA(HRegion meta, HRegion r) + public static void addRegionToMETA(HRegion meta, HRegion r) throws IOException { meta.checkResources(); // The row key is the region name @@ -2238,7 +2238,7 @@ public class HRegion implements HConstants, HeapSize { // , Writable{ * Delete a region's meta information from the passed * meta region. Removes content in the 'info' column family. * Does not remove region historian info. - * + * * @param srvr META server to be updated * @param metaRegionName Meta region name * @param regionName HRegion to remove from meta @@ -2274,7 +2274,7 @@ public class HRegion implements HConstants, HeapSize { // , Writable{ srvr.put(metaRegionName, put); cleanRegionInMETA(srvr, metaRegionName, info); } - + /** * Clean COL_SERVER and COL_STARTCODE for passed info in * .META. @@ -2294,7 +2294,7 @@ public class HRegion implements HConstants, HeapSize { // , Writable{ /** * Deletes all the files for a HRegion - * + * * @param fs the file system object * @param rootdir qualified path of HBase root directory * @param info HRegionInfo for region to be deleted @@ -2317,7 +2317,7 @@ public class HRegion implements HConstants, HeapSize { // , Writable{ /** * Computes the Path of the HRegion - * + * * @param tabledir qualified path for table * @param name ENCODED region name * @return Path of HRegion directory @@ -2325,10 +2325,10 @@ public class HRegion implements HConstants, HeapSize { // , Writable{ public static Path getRegionDir(final Path tabledir, final int name) { return new Path(tabledir, Integer.toString(name)); } - + /** * Computes the Path of the HRegion - * + * * @param rootdir qualified path of HBase root directory * @param info HRegionInfo for the region * @return qualified path of region directory @@ -2342,7 +2342,7 @@ public class HRegion implements HConstants, HeapSize { // , Writable{ /** * Determines if the specified row is within the row range specified by the * specified HRegionInfo - * + * * @param info HRegionInfo that specifies the row range * @param row row to be checked * @return true if the row is within the range specified by the HRegionInfo @@ -2356,7 +2356,7 @@ public class HRegion implements HConstants, HeapSize { // , Writable{ /** * Make the directories for a specific column family - * + * * @param fs the file system * @param tabledir base directory where region will live (usually the table dir) * @param hri @@ -2374,7 +2374,7 @@ public class HRegion implements HConstants, HeapSize { // , Writable{ /** * Merge two HRegions. The regions must be adjacent and must not overlap. - * + * * @param srcA * @param srcB * @return new merged HRegion @@ -2406,7 +2406,7 @@ public class HRegion implements HConstants, HeapSize { // , Writable{ /** * Merge two regions whether they are adjacent or not. - * + * * @param a region a * @param b region b * @return new merged region @@ -2421,12 +2421,12 @@ public class HRegion implements HConstants, HeapSize { // , Writable{ FileSystem fs = a.getFilesystem(); // Make sure each region's cache is empty - + a.flushcache(); b.flushcache(); - + // Compact each region so we only have one store file per family - + a.compactStores(true); if (LOG.isDebugEnabled()) { LOG.debug("Files for region: " + a); @@ -2437,12 +2437,12 @@ public class HRegion implements HConstants, HeapSize { // , Writable{ LOG.debug("Files for region: " + b); listPaths(fs, b.getRegionDir()); } - + HBaseConfiguration conf = a.getConf(); HTableDescriptor tabledesc = a.getTableDesc(); HLog log = a.getLog(); Path basedir = a.getBaseDir(); - // Presume both are of same region type -- i.e. both user or catalog + // Presume both are of same region type -- i.e. both user or catalog // table regions. This way can use comparator. final byte [] startKey = a.comparator.matchingRows(a.getStartKey(), 0, a.getStartKey().length, @@ -2450,7 +2450,7 @@ public class HRegion implements HConstants, HeapSize { // , Writable{ b.comparator.matchingRows(b.getStartKey(), 0, b.getStartKey().length, EMPTY_BYTE_ARRAY, 0, EMPTY_BYTE_ARRAY.length)? EMPTY_BYTE_ARRAY: - a.comparator.compareRows(a.getStartKey(), 0, a.getStartKey().length, + a.comparator.compareRows(a.getStartKey(), 0, a.getStartKey().length, b.getStartKey(), 0, b.getStartKey().length) <= 0? a.getStartKey(): b.getStartKey(); final byte [] endKey = a.comparator.matchingRows(a.getEndKey(), 0, @@ -2464,7 +2464,7 @@ public class HRegion implements HConstants, HeapSize { // , Writable{ HRegionInfo newRegionInfo = new HRegionInfo(tabledesc, startKey, endKey); LOG.info("Creating new region " + newRegionInfo.toString()); - int encodedName = newRegionInfo.getEncodedName(); + int encodedName = newRegionInfo.getEncodedName(); Path newRegionDir = HRegion.getRegionDir(a.getBaseDir(), encodedName); if(fs.exists(newRegionDir)) { throw new IOException("Cannot merge; target file collision at " + @@ -2524,7 +2524,7 @@ public class HRegion implements HConstants, HeapSize { // , Writable{ } /* - * Fills a map with a vector of store files keyed by column family. + * Fills a map with a vector of store files keyed by column family. * @param byFamily Map to fill. * @param storeFiles Store files to process. * @param family @@ -2546,7 +2546,7 @@ public class HRegion implements HConstants, HeapSize { // , Writable{ /** * @return True if needs a mojor compaction. - * @throws IOException + * @throws IOException */ boolean isMajorCompaction() throws IOException { for (Store store: this.stores.values()) { @@ -2559,7 +2559,7 @@ public class HRegion implements HConstants, HeapSize { // , Writable{ /* * List the files under the specified directory - * + * * @param fs * @param dir * @throws IOException @@ -2582,7 +2582,7 @@ public class HRegion implements HConstants, HeapSize { // , Writable{ } } - + // // HBASE-880 // @@ -2628,7 +2628,7 @@ public class HRegion implements HConstants, HeapSize { // , Writable{ } /** - * + * * @param row * @param family * @param qualifier @@ -2697,13 +2697,13 @@ public class HRegion implements HConstants, HeapSize { // , Writable{ return result; } - - + + // // New HBASE-880 Helpers // - - private void checkFamily(final byte [] family) + + private void checkFamily(final byte [] family) throws NoSuchColumnFamilyException { if(!regionInfo.getTableDesc().hasFamily(family)) { throw new NoSuchColumnFamilyException("Column family " + @@ -2715,9 +2715,9 @@ public class HRegion implements HConstants, HeapSize { // , Writable{ public static final long FIXED_OVERHEAD = ClassSize.align( (5 * Bytes.SIZEOF_LONG) + Bytes.SIZEOF_BOOLEAN + (21 * ClassSize.REFERENCE) + ClassSize.OBJECT + Bytes.SIZEOF_INT); - + public static final long DEEP_OVERHEAD = ClassSize.align(FIXED_OVERHEAD + - ClassSize.OBJECT + (2 * ClassSize.ATOMIC_BOOLEAN) + + ClassSize.OBJECT + (2 * ClassSize.ATOMIC_BOOLEAN) + ClassSize.ATOMIC_LONG + ClassSize.ATOMIC_INTEGER + // Using TreeMap for TreeSet @@ -2730,7 +2730,7 @@ public class HRegion implements HConstants, HeapSize { // , Writable{ ClassSize.align(ClassSize.OBJECT + (5 * Bytes.SIZEOF_BOOLEAN)) + (3 * ClassSize.REENTRANT_LOCK)); - + public long heapSize() { long heapSize = DEEP_OVERHEAD; for(Store store : this.stores.values()) { @@ -2839,7 +2839,7 @@ public class HRegion implements HConstants, HeapSize { // , Writable{ * ./bin/hbase org.apache.hadoop.hbase.regionserver.HRegion * * @param args - * @throws IOException + * @throws IOException */ public static void main(String[] args) throws IOException { if (args.length < 1) { diff --git a/src/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java b/src/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java index 5c29c2b..badd42b 100644 --- a/src/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java +++ b/src/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java @@ -126,7 +126,7 @@ public class HRegionServer implements HConstants, HRegionInterface, // plain boolean so we can pass a reference to Chore threads. Otherwise, // Chore threads need to know about the hosting class. protected final AtomicBoolean stopRequested = new AtomicBoolean(false); - + protected final AtomicBoolean quiesced = new AtomicBoolean(false); protected final AtomicBoolean safeMode = new AtomicBoolean(true); @@ -137,7 +137,7 @@ public class HRegionServer implements HConstants, HRegionInterface, // If false, the file system has become unavailable protected volatile boolean fsOk; - + protected HServerInfo serverInfo; protected final HBaseConfiguration conf; @@ -165,7 +165,7 @@ public class HRegionServer implements HConstants, HRegionInterface, protected final int numRegionsToReport; private final long maxScannerResultSize; - + // Remote HMaster private HMasterRegionInterface hbaseMaster; @@ -175,7 +175,7 @@ public class HRegionServer implements HConstants, HRegionInterface, // Leases private Leases leases; - + // Request counter private volatile AtomicInteger requestCount = new AtomicInteger(); @@ -183,25 +183,25 @@ public class HRegionServer implements HConstants, HRegionInterface, // is name of the webapp and the attribute name used stuffing this instance // into web context. InfoServer infoServer; - + /** region server process name */ public static final String REGIONSERVER = "regionserver"; - + /* * Space is reserved in HRS constructor and then released when aborting * to recover from an OOME. See HBASE-706. TODO: Make this percentage of the * heap or a minimum. */ private final LinkedList reservedSpace = new LinkedList(); - + private RegionServerMetrics metrics; // Compactions CompactSplitThread compactSplitThread; - // Cache flushing + // Cache flushing MemStoreFlusher cacheFlusher; - + /* Check for major compactions. */ Chore majorCompactionChecker; @@ -210,7 +210,7 @@ public class HRegionServer implements HConstants, HRegionInterface, // eclipse warning when accessed by inner classes protected volatile HLog hlog; LogRoller hlogRoller; - + // flag set after we're done setting up server threads (used for testing) protected volatile boolean isOnline; @@ -247,7 +247,7 @@ public class HRegionServer implements HConstants, HRegionInterface, machineName = DNS.getDefaultHost( conf.get("hbase.regionserver.dns.interface","default"), conf.get("hbase.regionserver.dns.nameserver","default")); - String addressStr = machineName + ":" + + String addressStr = machineName + ":" + conf.get(REGIONSERVER_PORT, Integer.toString(DEFAULT_REGIONSERVER_PORT)); // This is not necessarily the address we will run with. The address we // use will be in #serverInfo data member. For example, we may have been @@ -260,9 +260,9 @@ public class HRegionServer implements HConstants, HRegionInterface, this.fsOk = true; this.conf = conf; this.connection = ServerConnectionManager.getConnection(conf); - + this.isOnline = false; - + // Config'ed params this.numRetries = conf.getInt("hbase.client.retries.number", 2); this.threadWakeFrequency = conf.getInt(THREAD_WAKE_FREQUENCY, 10 * 1000); @@ -273,14 +273,14 @@ public class HRegionServer implements HConstants, HRegionInterface, sleeper = new Sleeper(this.msgInterval, this.stopRequested); this.maxScannerResultSize = conf.getLong( - HConstants.HBASE_CLIENT_SCANNER_MAX_RESULT_SIZE_KEY, + HConstants.HBASE_CLIENT_SCANNER_MAX_RESULT_SIZE_KEY, HConstants.DEFAULT_HBASE_CLIENT_SCANNER_MAX_RESULT_SIZE); - + // Task thread to process requests from Master this.worker = new Worker(); - this.numRegionsToReport = - conf.getInt("hbase.regionserver.numregionstoreport", 10); + this.numRegionsToReport = + conf.getInt("hbase.regionserver.numregionstoreport", 10); this.rpcTimeout = conf.getLong("hbase.regionserver.lease.period", 60000); @@ -299,7 +299,7 @@ public class HRegionServer implements HConstants, HRegionInterface, this.shutdownHDFS.set(true); // Server to handle client requests - this.server = HBaseRPC.getServer(this, address.getBindAddress(), + this.server = HBaseRPC.getServer(this, address.getBindAddress(), address.getPort(), conf.getInt("hbase.regionserver.handler.count", 10), false, conf); this.server.setErrorHandler(this); @@ -331,13 +331,13 @@ public class HRegionServer implements HConstants, HRegionInterface, // Cache flushing thread. this.cacheFlusher = new MemStoreFlusher(conf, this); - + // Compaction thread this.compactSplitThread = new CompactSplitThread(this); - + // Log rolling thread this.hlogRoller = new LogRoller(this); - + // Background thread to check for major compactions; needed if region // has not gotten updates in a while. Make it run at a lesser frequency. int multiplier = this.conf.getInt(THREAD_WAKE_FREQUENCY + @@ -422,7 +422,7 @@ public class HRegionServer implements HConstants, HRegionInterface, /** * The HRegionServer sticks in this loop until closed. It repeatedly checks - * in with the HMaster, sending heartbeats & reports, and receiving HRegion + * in with the HMaster, sending heartbeats & reports, and receiving HRegion * load/unload instructions. */ public void run() { @@ -528,7 +528,7 @@ public class HRegionServer implements HConstants, HRegionInterface, } catch (IOException e) { this.abortRequested = true; this.stopRequested.set(true); - e = RemoteExceptionHandler.checkIOException(e); + e = RemoteExceptionHandler.checkIOException(e); LOG.fatal("error restarting server", e); break; } @@ -595,7 +595,7 @@ public class HRegionServer implements HConstants, HRegionInterface, } } now = System.currentTimeMillis(); - HMsg msg = this.outboundMsgs.poll((msgInterval - (now - lastMsg)), + HMsg msg = this.outboundMsgs.poll((msgInterval - (now - lastMsg)), TimeUnit.MILLISECONDS); // If we got something, add it to list of things to send. if (msg != null) outboundMessages.add(msg); @@ -682,7 +682,7 @@ public class HRegionServer implements HConstants, HRegionInterface, HBaseRPC.stopProxy(this.hbaseMaster); this.hbaseMaster = null; } - + join(); this.zooKeeperWrapper.close(); if (this.shutdownHDFS.get()) { @@ -827,16 +827,16 @@ public class HRegionServer implements HConstants, HRegionInterface, stores += r.stores.size(); for (Store store: r.stores.values()) { storefiles += store.getStorefilesCount(); - storefileSizeMB += + storefileSizeMB += (int)(store.getStorefilesSize()/1024/1024); - storefileIndexSizeMB += + storefileIndexSizeMB += (int)(store.getStorefilesIndexSize()/1024/1024); } } return new HServerLoad.RegionLoad(name, stores, storefiles, storefileSizeMB, memstoreSizeMB, storefileIndexSizeMB); } - + /** * @param regionName * @return An instance of RegionLoad. @@ -911,12 +911,12 @@ public class HRegionServer implements HConstants, HRegionInterface, } return stop; } - - + + /** * Checks to see if the file system is still accessible. * If not, sets abortRequested and stopRequested - * + * * @return false if file system is not available */ protected boolean checkFileSystem() { @@ -940,7 +940,7 @@ public class HRegionServer implements HConstants, HRegionInterface, private static class ShutdownThread extends Thread { private final HRegionServer instance; private final Thread mainThread; - + /** * @param instance * @param mainThread @@ -953,7 +953,7 @@ public class HRegionServer implements HConstants, HRegionInterface, @Override public void run() { LOG.info("Starting shutdown thread."); - + // tell the region server to stop instance.stop(); @@ -961,7 +961,7 @@ public class HRegionServer implements HConstants, HRegionInterface, Threads.shutdown(mainThread); LOG.info("Shutdown thread complete"); - } + } } // We need to call HDFS shutdown when we are done shutting down @@ -998,7 +998,7 @@ public class HRegionServer implements HConstants, HRegionInterface, } } } - + /** * So, HDFS caches FileSystems so when you call FileSystem.get it's fast. In * order to make sure things are cleaned up, it also creates a shutdown hook @@ -1026,7 +1026,7 @@ public class HRegionServer implements HConstants, HRegionInterface, } Runtime.getRuntime().removeShutdownHook(hdfsClientFinalizer); return hdfsClientFinalizer; - + } catch (NoSuchFieldException nsfe) { LOG.fatal("Couldn't find field 'clientFinalizer' in FileSystem!", nsfe); throw new RuntimeException("Failed to suppress HDFS shutdown hook"); @@ -1037,7 +1037,7 @@ public class HRegionServer implements HConstants, HRegionInterface, } /** - * Report the status of the server. A server is online once all the startup + * Report the status of the server. A server is online once all the startup * is completed (setting up filesystem, starting service threads, etc.). This * method is designed mostly to be useful in tests. * @return true if online, false if not. @@ -1045,10 +1045,10 @@ public class HRegionServer implements HConstants, HRegionInterface, public boolean isOnline() { return isOnline; } - + private HLog setupHLog() throws RegionServerRunningException, IOException { - + Path logdir = new Path(rootDir, HLog.getHLogDirectoryName(this.serverInfo)); if (LOG.isDebugEnabled()) { LOG.debug("Log dir " + logdir); @@ -1062,17 +1062,17 @@ public class HRegionServer implements HConstants, HRegionInterface, return newlog; } - // instantiate + // instantiate protected HLog instantiateHLog(Path logdir) throws IOException { HLog newlog = new HLog(fs, logdir, conf, hlogRoller); return newlog; } - + protected LogRoller getLogRoller() { return hlogRoller; - } - + } + /* * @param interval Interval since last time metrics were called. */ @@ -1101,7 +1101,7 @@ public class HRegionServer implements HConstants, HRegionInterface, synchronized (r.stores) { stores += r.stores.size(); for(Map.Entry ee: r.stores.entrySet()) { - Store store = ee.getValue(); + Store store = ee.getValue(); storefiles += store.getStorefilesCount(); storefileIndexSize += store.getStorefilesIndexSize(); } @@ -1160,7 +1160,7 @@ public class HRegionServer implements HConstants, HRegionInterface, Threads.setDaemonThreadRunning(this.workerThread, n + ".worker", handler); Threads.setDaemonThreadRunning(this.majorCompactionChecker, n + ".majorCompactionChecker", handler); - + // Leases is not a Thread. Internally it runs a daemon thread. If it gets // an unhandled exception, it will just exit. this.leases.setName(n + ".leaseChecker"); @@ -1190,7 +1190,7 @@ public class HRegionServer implements HConstants, HRegionInterface, // update HRS server info this.serverInfo.setInfoPort(port); } - } + } } // Start Server. This service is like leases in that it internally runs @@ -1276,7 +1276,7 @@ public class HRegionServer implements HConstants, HRegionInterface, stop(); } - /** + /** * Wait on all threads to finish. * Presumption is that all closes and stops have already been called. */ @@ -1344,7 +1344,7 @@ public class HRegionServer implements HConstants, HRegionInterface, if (LOG.isDebugEnabled()) LOG.debug("sending initial server load: " + hsl); lastMsg = System.currentTimeMillis(); - boolean startCodeOk = false; + boolean startCodeOk = false; while(!startCodeOk) { serverInfo.setStartCode(System.currentTimeMillis()); startCodeOk = zooKeeperWrapper.writeRSLocation(this.serverInfo); @@ -1383,13 +1383,13 @@ public class HRegionServer implements HConstants, HRegionInterface, private void reportClose(final HRegionInfo region, final byte[] message) { this.outboundMsgs.add(new HMsg(HMsg.Type.MSG_REPORT_CLOSE, region, message)); } - + /** * Add to the outbound message buffer - * - * When a region splits, we need to tell the master that there are two new + * + * When a region splits, we need to tell the master that there are two new * regions that need to be assigned. - * + * * We do not need to inform the master about the old region, because we've * updated the meta or root regions, and the master will pick that up on its * next rescan of the root or meta tables. @@ -1422,7 +1422,7 @@ public class HRegionServer implements HConstants, HRegionInterface, final BlockingQueue toDo = new LinkedBlockingQueue(); private Worker worker; private Thread workerThread; - + /** Thread that performs long running requests from the master */ class Worker implements Runnable { void stop() { @@ -1430,7 +1430,7 @@ public class HRegionServer implements HConstants, HRegionInterface, toDo.notifyAll(); } } - + public void run() { try { while(!stopRequested.get()) { @@ -1492,12 +1492,12 @@ public class HRegionServer implements HConstants, HRegionInterface, e.msg.isType(Type.MSG_REGION_MAJOR_COMPACT), e.msg.getType().name()); break; - + case MSG_REGION_FLUSH: region = getRegion(info.getRegionName()); region.flushcache(); break; - + case TESTING_MSG_BLOCK_RS: while (!stopRequested.get()) { Threads.sleep(1000); @@ -1575,9 +1575,9 @@ public class HRegionServer implements HConstants, HRegionInterface, this.lock.writeLock().unlock(); } } - reportOpen(regionInfo); + reportOpen(regionInfo); } - + protected HRegion instantiateRegion(final HRegionInfo regionInfo) throws IOException { HRegion r = HRegion.newHRegion(HTableDescriptor.getTableDir(rootDir, regionInfo @@ -1588,9 +1588,9 @@ public class HRegionServer implements HConstants, HRegionInterface, addProcessingMessage(regionInfo); } }); - return r; + return r; } - + /** * Add a MSG_REPORT_PROCESS_OPEN to the outbound queue. * This method is called while region is in the queue of regions to process @@ -1644,7 +1644,7 @@ public class HRegionServer implements HConstants, HRegionInterface, } return regionsToClose; } - + /* * Thread to run close of a region. */ @@ -1655,7 +1655,7 @@ public class HRegionServer implements HConstants, HRegionInterface, super(Thread.currentThread().getName() + ".regionCloser." + r.toString()); this.r = r; } - + @Override public void run() { try { @@ -1727,7 +1727,7 @@ public class HRegionServer implements HConstants, HRegionInterface, } - public Result getClosestRowBefore(final byte [] regionName, + public Result getClosestRowBefore(final byte [] regionName, final byte [] row, final byte [] family) throws IOException { checkOpen(); @@ -1735,8 +1735,8 @@ public class HRegionServer implements HConstants, HRegionInterface, try { // locate the region we're operating on HRegion region = getRegion(regionName); - // ask the region for all the data - + // ask the region for all the data + Result r = region.getClosestRowBefore(row, family); return r; } catch (Throwable t) { @@ -1772,7 +1772,7 @@ public class HRegionServer implements HConstants, HRegionInterface, throws IOException { if (put.getRow() == null) throw new IllegalArgumentException("update has null row"); - + checkOpen(); this.requestCount.incrementAndGet(); HRegion region = getRegion(regionName); @@ -1793,7 +1793,7 @@ public class HRegionServer implements HConstants, HRegionInterface, checkOpen(); try { HRegion region = getRegion(regionName); - + if (!region.getRegionInfo().isMetaTable()) { this.cacheFlusher.reclaimMemStoreMemory(); } @@ -1821,7 +1821,7 @@ public class HRegionServer implements HConstants, HRegionInterface, } /** - * + * * @param regionName * @param row * @param family @@ -1832,12 +1832,12 @@ public class HRegionServer implements HConstants, HRegionInterface, * @return true if the new put was execute, false otherwise */ public boolean checkAndPut(final byte[] regionName, final byte [] row, - final byte [] family, final byte [] qualifier, final byte [] value, + final byte [] family, final byte [] qualifier, final byte [] value, final Put put) throws IOException{ //Getting actual value Get get = new Get(row); get.addColumn(family, qualifier); - + checkOpen(); this.requestCount.incrementAndGet(); HRegion region = getRegion(regionName); @@ -1853,7 +1853,7 @@ public class HRegionServer implements HConstants, HRegionInterface, throw convertThrowableToIOE(cleanup(t)); } } - + // // remote scanner interface // @@ -1878,7 +1878,7 @@ public class HRegionServer implements HConstants, HRegionInterface, throw convertThrowableToIOE(cleanup(t, "Failed openScanner")); } } - + protected long addScanner(InternalScanner s) throws LeaseStillHeldException { long scannerId = -1L; scannerId = rand.nextLong(); @@ -1909,7 +1909,7 @@ public class HRegionServer implements HConstants, HRegionInterface, try { checkOpen(); } catch (IOException e) { - // If checkOpen failed, server not running or filesystem gone, + // If checkOpen failed, server not running or filesystem gone, // cancel this lease; filesystem is gone or we're closing or something. this.leases.cancelLease(scannerName); throw e; @@ -1948,7 +1948,7 @@ public class HRegionServer implements HConstants, HRegionInterface, } throw convertThrowableToIOE(cleanup(t)); } - } + } public void close(final long scannerId) throws IOException { try { @@ -1968,17 +1968,17 @@ public class HRegionServer implements HConstants, HRegionInterface, } } - /** + /** * Instantiated as a scanner lease. * If the lease times out, the scanner is closed */ private class ScannerListener implements LeaseListener { private final String scannerName; - + ScannerListener(final String n) { this.scannerName = n; } - + public void leaseExpired() { LOG.info("Scanner " + this.scannerName + " lease expired"); InternalScanner s = null; @@ -1994,7 +1994,7 @@ public class HRegionServer implements HConstants, HRegionInterface, } } } - + // // Methods that do the actual work for the remote API // @@ -2040,7 +2040,7 @@ public class HRegionServer implements HConstants, HRegionInterface, } catch (Throwable t) { throw convertThrowableToIOE(cleanup(t)); } - + // All have been processed successfully. return -1; } @@ -2173,7 +2173,7 @@ public class HRegionServer implements HConstants, HRegionInterface, public InfoServer getInfoServer() { return infoServer; } - + /** * @return true if a stop has been requested. */ @@ -2189,7 +2189,7 @@ public class HRegionServer implements HConstants, HRegionInterface, } /** - * + * * @return the configuration */ public HBaseConfiguration getConfiguration() { @@ -2211,7 +2211,7 @@ public class HRegionServer implements HConstants, HRegionInterface, public HRegion [] getOnlineRegionsAsArray() { return getOnlineRegions().toArray(new HRegion[0]); } - + /** * @return The HRegionInfos from online regions sorted */ @@ -2224,10 +2224,10 @@ public class HRegionServer implements HConstants, HRegionInterface, } return result; } - + /** - * This method removes HRegion corresponding to hri from the Map of onlineRegions. - * + * This method removes HRegion corresponding to hri from the Map of onlineRegions. + * * @param hri the HRegionInfo corresponding to the HRegion to-be-removed. * @return the removed HRegion, or null if the HRegion was not in onlineRegions. */ @@ -2262,7 +2262,7 @@ public class HRegionServer implements HConstants, HRegionInterface, } return sortedRegions; } - + /** * @param regionName * @return HRegion for the passed regionName or null if named @@ -2281,8 +2281,8 @@ public class HRegionServer implements HConstants, HRegionInterface, public FlushRequester getFlushRequester() { return this.cacheFlusher; } - - /** + + /** * Protected utility method for safely obtaining an HRegion handle. * @param regionName Name of online {@link HRegion} to return * @return {@link HRegion} for regionName @@ -2325,10 +2325,10 @@ public class HRegionServer implements HConstants, HRegionInterface, } return regions.toArray(new HRegionInfo[regions.size()]); } - - /** + + /** * Called to verify that this server is up and running. - * + * * @throws IOException */ protected void checkOpen() throws IOException { @@ -2340,14 +2340,14 @@ public class HRegionServer implements HConstants, HRegionInterface, throw new IOException("File system not available"); } } - + /** * @return Returns list of non-closed regions hosted on this server. If no * regions to check, returns an empty list. */ protected Set getRegionsToCheck() { HashSet regionsToCheck = new HashSet(); - //TODO: is this locking necessary? + //TODO: is this locking necessary? lock.readLock().lock(); try { regionsToCheck.addAll(this.onlineRegions.values()); @@ -2364,9 +2364,9 @@ public class HRegionServer implements HConstants, HRegionInterface, return regionsToCheck; } - public long getProtocolVersion(final String protocol, + public long getProtocolVersion(final String protocol, final long clientVersion) - throws IOException { + throws IOException { if (protocol.equals(HRegionInterface.class.getName())) { return HBaseRPCProtocolVersion.versionID; } @@ -2421,21 +2421,21 @@ public class HRegionServer implements HConstants, HRegionInterface, public HServerInfo getServerInfo() { return this.serverInfo; } /** {@inheritDoc} */ - public long incrementColumnValue(byte [] regionName, byte [] row, + public long incrementColumnValue(byte [] regionName, byte [] row, byte [] family, byte [] qualifier, long amount, boolean writeToWAL) throws IOException { checkOpen(); if (regionName == null) { - throw new IOException("Invalid arguments to incrementColumnValue " + + throw new IOException("Invalid arguments to incrementColumnValue " + "regionName is null"); } requestCount.incrementAndGet(); try { HRegion region = getRegion(regionName); - long retval = region.incrementColumnValue(row, family, qualifier, amount, + long retval = region.incrementColumnValue(row, family, qualifier, amount, writeToWAL); - + return retval; } catch (IOException e) { checkFileSystem(); @@ -2452,7 +2452,7 @@ public class HRegionServer implements HConstants, HRegionInterface, } return regions; } - + /** {@inheritDoc} */ public HServerInfo getHServerInfo() throws IOException { return serverInfo; @@ -2480,7 +2480,7 @@ public class HRegionServer implements HConstants, HRegionInterface, // // Main program and support routines // - + /** * @param hrs * @return Thread the RegionServer is running in correctly named. @@ -2506,7 +2506,7 @@ public class HRegionServer implements HConstants, HRegionInterface, private static void printUsageAndExit() { printUsageAndExit(null); } - + private static void printUsageAndExit(final String message) { if (message != null) { System.err.println(message); @@ -2544,7 +2544,7 @@ public class HRegionServer implements HConstants, HRegionInterface, printUsageAndExit(); } Configuration conf = new HBaseConfiguration(); - + // Process command-line args. TODO: Better cmd-line processing // (but hopefully something not as painful as cli options). for (String cmd: args) { @@ -2569,13 +2569,13 @@ public class HRegionServer implements HConstants, HRegionInterface, } break; } - + if (cmd.equals("stop")) { printUsageAndExit("To shutdown the regionserver run " + "bin/hbase-daemon.sh stop regionserver or send a kill signal to" + "the regionserver pid"); } - + // Print out usage if we get to here. printUsageAndExit(); } diff --git a/src/java/org/apache/hadoop/hbase/regionserver/InternalScanner.java b/src/java/org/apache/hadoop/hbase/regionserver/InternalScanner.java index d58a8a3..ded46e8 100644 --- a/src/java/org/apache/hadoop/hbase/regionserver/InternalScanner.java +++ b/src/java/org/apache/hadoop/hbase/regionserver/InternalScanner.java @@ -27,10 +27,10 @@ import org.apache.hadoop.hbase.KeyValue; /** * Internal scanners differ from client-side scanners in that they operate on - * HStoreKeys and byte[] instead of RowResults. This is because they are + * HStoreKeys and byte[] instead of RowResults. This is because they are * actually close to how the data is physically stored, and therefore it is more - * convenient to interact with them that way. It is also much easier to merge - * the results across SortedMaps than RowResults. + * convenient to interact with them that way. It is also much easier to merge + * the results across SortedMaps than RowResults. * *

          Additionally, we need to be able to determine if the scanner is doing * wildcard column matches (when only a column family is specified or if a @@ -50,7 +50,7 @@ public interface InternalScanner extends Closeable { /** * Grab the next row's worth of values with a limit on the number of values - * to return. + * to return. * @param result * @param limit * @return true if more rows exist after this one, false if scanner is done diff --git a/src/java/org/apache/hadoop/hbase/regionserver/KeyValueHeap.java b/src/java/org/apache/hadoop/hbase/regionserver/KeyValueHeap.java index 63f94af..b1ff038 100644 --- a/src/java/org/apache/hadoop/hbase/regionserver/KeyValueHeap.java +++ b/src/java/org/apache/hadoop/hbase/regionserver/KeyValueHeap.java @@ -46,14 +46,14 @@ public class KeyValueHeap implements KeyValueScanner, InternalScanner { private KVScannerComparator comparator; /** - * Constructor. This KeyValueHeap will handle closing of passed in + * Constructor. This KeyValueHeap will handle closing of passed in * KeyValueScanners. * @param scanners * @param comparator */ public KeyValueHeap(KeyValueScanner [] scanners, KVComparator comparator) { this.comparator = new KVScannerComparator(comparator); - this.heap = new PriorityQueue(scanners.length, + this.heap = new PriorityQueue(scanners.length, this.comparator); for (KeyValueScanner scanner : scanners) { if (scanner.peek() != null) { @@ -64,14 +64,14 @@ public class KeyValueHeap implements KeyValueScanner, InternalScanner { } this.current = heap.poll(); } - + public KeyValue peek() { if(this.current == null) { return null; } return this.current.peek(); } - + public KeyValue next() { if(this.current == null) { return null; @@ -101,7 +101,7 @@ public class KeyValueHeap implements KeyValueScanner, InternalScanner { * InternalScanner as well as KeyValueScanner (a {@link StoreScanner}). * @param result * @param limit - * @return true if there are more keys, false if all scanners are done + * @return true if there are more keys, false if all scanners are done */ public boolean next(List result, int limit) throws IOException { InternalScanner currentAsInternal = (InternalScanner)this.current; @@ -124,7 +124,7 @@ public class KeyValueHeap implements KeyValueScanner, InternalScanner { * This can ONLY be called when you are using Scanners that implement * InternalScanner as well as KeyValueScanner (a {@link StoreScanner}). * @param result - * @return true if there are more keys, false if all scanners are done + * @return true if there are more keys, false if all scanners are done */ public boolean next(List result) throws IOException { return next(result, -1); @@ -168,9 +168,9 @@ public class KeyValueHeap implements KeyValueScanner, InternalScanner { scanner.close(); } } - + /** - * Seeks all scanners at or below the specified seek key. If we earlied-out + * Seeks all scanners at or below the specified seek key. If we earlied-out * of a row, we may end up skipping values that were never reached yet. * Rather than iterating down, we want to give the opportunity to re-seek. *

          diff --git a/src/java/org/apache/hadoop/hbase/regionserver/KeyValueScanner.java b/src/java/org/apache/hadoop/hbase/regionserver/KeyValueScanner.java index 4c5b844..69c19ea 100644 --- a/src/java/org/apache/hadoop/hbase/regionserver/KeyValueScanner.java +++ b/src/java/org/apache/hadoop/hbase/regionserver/KeyValueScanner.java @@ -30,20 +30,20 @@ public interface KeyValueScanner { * @return the next KeyValue */ public KeyValue peek(); - + /** - * Return the next KeyValue in this scanner, iterating the scanner + * Return the next KeyValue in this scanner, iterating the scanner * @return the next KeyValue */ public KeyValue next(); - + /** * Seek the scanner at or after the specified KeyValue. * @param key * @return true if scanner has values left, false if end of scanner */ public boolean seek(KeyValue key); - + /** * Close the KeyValue scanner. */ diff --git a/src/java/org/apache/hadoop/hbase/regionserver/KeyValueSkipListSet.java b/src/java/org/apache/hadoop/hbase/regionserver/KeyValueSkipListSet.java index 440f5a7..7ca94f1 100644 --- a/src/java/org/apache/hadoop/hbase/regionserver/KeyValueSkipListSet.java +++ b/src/java/org/apache/hadoop/hbase/regionserver/KeyValueSkipListSet.java @@ -61,7 +61,7 @@ class KeyValueSkipListSet implements NavigableSet { */ static class MapEntryIterator implements Iterator { private final Iterator> iterator; - + MapEntryIterator(final Iterator> i) { this.iterator = i; } diff --git a/src/java/org/apache/hadoop/hbase/regionserver/LogRoller.java b/src/java/org/apache/hadoop/hbase/regionserver/LogRoller.java index f5659cc..06b2efd 100644 --- a/src/java/org/apache/hadoop/hbase/regionserver/LogRoller.java +++ b/src/java/org/apache/hadoop/hbase/regionserver/LogRoller.java @@ -30,13 +30,13 @@ import org.apache.hadoop.hbase.util.Bytes; /** * Runs periodically to determine if the HLog should be rolled. - * + * * NOTE: This class extends Thread rather than Chore because the sleep time * can be interrupted when there is something to do, rather than the Chore * sleep time which is invariant. */ class LogRoller extends Thread implements LogRollListener { - static final Log LOG = LogFactory.getLog(LogRoller.class); + static final Log LOG = LogFactory.getLog(LogRoller.class); private final ReentrantLock rollLock = new ReentrantLock(); private final AtomicBoolean rollLog = new AtomicBoolean(false); private final HRegionServer server; diff --git a/src/java/org/apache/hadoop/hbase/regionserver/LruHashMap.java b/src/java/org/apache/hadoop/hbase/regionserver/LruHashMap.java index 745a16e..2ed6e75 100644 --- a/src/java/org/apache/hadoop/hbase/regionserver/LruHashMap.java +++ b/src/java/org/apache/hadoop/hbase/regionserver/LruHashMap.java @@ -37,8 +37,8 @@ import org.apache.commons.logging.LogFactory; * The LruHashMap is a memory-aware HashMap with a configurable maximum * memory footprint. *

          - * It maintains an ordered list of all entries in the map ordered by - * access time. When space needs to be freed becase the maximum has been + * It maintains an ordered list of all entries in the map ordered by + * access time. When space needs to be freed becase the maximum has been * reached, or the application has asked to free memory, entries will be * evicted according to an LRU (least-recently-used) algorithm. That is, * those entries which have not been accessed the longest will be evicted @@ -53,8 +53,8 @@ public class LruHashMap implements HeapSize, Map { static final Log LOG = LogFactory.getLog(LruHashMap.class); - - /** The default size (in bytes) of the LRU */ + + /** The default size (in bytes) of the LRU */ private static final long DEFAULT_MAX_MEM_USAGE = 50000; /** The default capacity of the hash table */ private static final int DEFAULT_INITIAL_CAPACITY = 16; @@ -62,12 +62,12 @@ implements HeapSize, Map { private static final int MAXIMUM_CAPACITY = 1 << 30; /** The default load factor to use */ private static final float DEFAULT_LOAD_FACTOR = 0.75f; - + /** Memory overhead of this Object (for HeapSize) */ - private static final int OVERHEAD = 5 * Bytes.SIZEOF_LONG + - 2 * Bytes.SIZEOF_INT + 2 * Bytes.SIZEOF_FLOAT + 3 * ClassSize.REFERENCE + + private static final int OVERHEAD = 5 * Bytes.SIZEOF_LONG + + 2 * Bytes.SIZEOF_INT + 2 * Bytes.SIZEOF_FLOAT + 3 * ClassSize.REFERENCE + 1 * ClassSize.ARRAY; - + /** Load factor allowed (usually 75%) */ private final float loadFactor; /** Number of key/vals in the map */ @@ -86,7 +86,7 @@ implements HeapSize, Map { private long memTotal = 0; /** Amount of available memory */ private long memFree = 0; - + /** Number of successful (found) get() calls */ private long hitCount = 0; /** Number of unsuccessful (not found) get() calls */ @@ -121,7 +121,7 @@ implements HeapSize, Map { throw new IllegalArgumentException("Max memory usage too small to " + "support base overhead"); } - + /** Find a power of 2 >= initialCapacity */ int capacity = calculateCapacity(initialCapacity); this.loadFactor = loadFactor; @@ -146,7 +146,7 @@ implements HeapSize, Map { public LruHashMap(int initialCapacity, float loadFactor) { this(initialCapacity, loadFactor, DEFAULT_MAX_MEM_USAGE); } - + /** * Constructs a new, empty map with the specified initial capacity and * with the default load factor and maximum memory usage. @@ -174,14 +174,14 @@ implements HeapSize, Map { } /** - * Constructs a new, empty map with the default initial capacity, + * Constructs a new, empty map with the default initial capacity, * load factor and maximum memory usage. */ public LruHashMap() { this(DEFAULT_INITIAL_CAPACITY, DEFAULT_LOAD_FACTOR, DEFAULT_MAX_MEM_USAGE); } - + //-------------------------------------------------------------------------- /** * Get the currently available memory for this LRU in bytes. @@ -192,7 +192,7 @@ implements HeapSize, Map { public long getMemFree() { return memFree; } - + /** * Get the maximum memory allowed for this LRU in bytes. * @@ -201,7 +201,7 @@ implements HeapSize, Map { public long getMemMax() { return memTotal; } - + /** * Get the currently used memory for this LRU in bytes. * @@ -210,7 +210,7 @@ implements HeapSize, Map { public long getMemUsed() { return (memTotal - memFree); } - + /** * Get the number of hits to the map. This is the number of times * a call to get() returns a matched key. @@ -220,7 +220,7 @@ implements HeapSize, Map { public long getHitCount() { return hitCount; } - + /** * Get the number of misses to the map. This is the number of times * a call to get() returns null. @@ -230,7 +230,7 @@ implements HeapSize, Map { public long getMissCount() { return missCount; } - + /** * Get the hit ratio. This is the number of hits divided by the * total number of requests. @@ -241,7 +241,7 @@ implements HeapSize, Map { return (double)((double)hitCount/ ((double)(hitCount+missCount))); } - + /** * Free the requested amount of memory from the LRU map. * @@ -263,7 +263,7 @@ implements HeapSize, Map { } return freedMemory; } - + /** * The total memory usage of this map * @@ -272,7 +272,7 @@ implements HeapSize, Map { public long heapSize() { return (memTotal - memFree); } - + //-------------------------------------------------------------------------- /** * Retrieves the value associated with the specified key. @@ -288,7 +288,7 @@ implements HeapSize, Map { checkKey((K)key); int hash = hash(key); int i = hashIndex(hash, entries.length); - Entry e = entries[i]; + Entry e = entries[i]; while (true) { if (e == null) { missCount++; @@ -315,7 +315,7 @@ implements HeapSize, Map { * @param key the key * @param value the value * @return the value that was previously mapped to this key, null if none - * @throws UnsupportedOperationException if either objects do not + * @throws UnsupportedOperationException if either objects do not * implement HeapSize * @throws NullPointerException if the key or value is null */ @@ -324,7 +324,7 @@ implements HeapSize, Map { checkValue(value); int hash = hash(key); int i = hashIndex(hash, entries.length); - + // For old values for (Entry e = entries[i]; e != null; e = e.next) { if (e.hash == hash && isEqual(key, e.key)) { @@ -340,7 +340,7 @@ implements HeapSize, Map { checkAndFreeMemory(memChange); return null; } - + /** * Deletes the mapping for the specified key if it exists. * @@ -383,7 +383,7 @@ implements HeapSize, Map { public synchronized void clear() { memFree += clearAll(); } - + //-------------------------------------------------------------------------- /** * Checks whether there is a value in the map for the specified key. @@ -398,9 +398,9 @@ implements HeapSize, Map { checkKey((K)key); int hash = hash(key); int i = hashIndex(hash, entries.length); - Entry e = entries[i]; + Entry e = entries[i]; while (e != null) { - if (e.hash == hash && isEqual(key, e.key)) + if (e.hash == hash && isEqual(key, e.key)) return true; e = e.next; } @@ -409,7 +409,7 @@ implements HeapSize, Map { /** * Checks whether this is a mapping which contains the specified value. - * + * * Does not affect the LRU. This is an inefficient operation. * * @param value the value to check @@ -445,7 +445,7 @@ implements HeapSize, Map { throw new NullPointerException("null keys are not allowed"); } } - + /** * Enforces value constraints. Null values are not permitted and value must * implement HeapSize. It should not be necessary to verify the second @@ -463,7 +463,7 @@ implements HeapSize, Map { throw new NullPointerException("null values are not allowed"); } } - + /** * Returns the minimum memory usage of the base map structure. * @@ -472,7 +472,7 @@ implements HeapSize, Map { private long getMinimumUsage() { return OVERHEAD + (entries.length * ClassSize.REFERENCE); } - + //-------------------------------------------------------------------------- /** * Evicts and frees based on LRU until at least as much memory as requested @@ -499,7 +499,7 @@ implements HeapSize, Map { removeEntry(headPtr); return freed; } - + /** * Moves the specified entry to the most recently used slot of the * LRU. This is called whenever an entry is fetched. @@ -545,10 +545,10 @@ implements HeapSize, Map { } else { prev.next = next; } - + Entry prevPtr = e.getPrevPtr(); Entry nextPtr = e.getNextPtr(); - + if(prevPtr != null && nextPtr != null) { prevPtr.setNextPtr(nextPtr); nextPtr.setPrevPtr(prevPtr); @@ -559,7 +559,7 @@ implements HeapSize, Map { headPtr = nextPtr; nextPtr.setPrevPtr(null); } - + return; } prev = e; @@ -589,7 +589,7 @@ implements HeapSize, Map { } else { prev.next = next; } - + // Updating LRU Entry prevPtr = e.getPrevPtr(); Entry nextPtr = e.getNextPtr(); @@ -603,7 +603,7 @@ implements HeapSize, Map { headPtr = nextPtr; nextPtr.setPrevPtr(null); } - + return e; } prev = e; @@ -671,7 +671,7 @@ implements HeapSize, Map { size = 0; return freedMemory; } - + //-------------------------------------------------------------------------- /** * Recreates the entire contents of the hashmap into a new array @@ -683,7 +683,7 @@ implements HeapSize, Map { private void growTable(int newCapacity) { Entry [] oldTable = entries; int oldCapacity = oldTable.length; - + // Do not allow growing the table beyond the max capacity if (oldCapacity == MAXIMUM_CAPACITY) { threshold = Integer.MAX_VALUE; @@ -692,12 +692,12 @@ implements HeapSize, Map { // Determine how much additional space will be required to grow the array long requiredSpace = (newCapacity - oldCapacity) * ClassSize.REFERENCE; - + // Verify/enforce we have sufficient memory to grow checkAndFreeMemory(requiredSpace); Entry [] newTable = new Entry[newCapacity]; - + // Transfer existing entries to new hash table for(int i=0; i < oldCapacity; i++) { Entry entry = oldTable[i]; @@ -734,7 +734,7 @@ implements HeapSize, Map { h ^= (h >>> 10); return h; } - + /** * Compares two objects for equality. Method uses equals method and * assumes neither value is null. @@ -746,7 +746,7 @@ implements HeapSize, Map { private boolean isEqual(Object x, Object y) { return (x == y || x.equals(y)); } - + /** * Determines the index into the current hash table for the specified * hashValue. @@ -781,7 +781,7 @@ implements HeapSize, Map { } return newCapacity; } - + /** * Calculates the threshold of the map given the capacity and load * factor. Once the number of entries in the map grows to the @@ -802,7 +802,7 @@ implements HeapSize, Map { memFree -= OVERHEAD; memFree -= (entries.length * ClassSize.REFERENCE); } - + //-------------------------------------------------------------------------- /** * Debugging function that returns a List sorted by access time. @@ -836,7 +836,7 @@ implements HeapSize, Map { } return entrySet; } - + /** * Get the head of the linked list (least recently used). * @@ -845,16 +845,16 @@ implements HeapSize, Map { public Entry getHeadPtr() { return headPtr; } - + /** * Get the tail of the linked list (most recently used). - * + * * @return tail of linked list */ public Entry getTailPtr() { return tailPtr; } - + //-------------------------------------------------------------------------- /** * To best optimize this class, some of the methods that are part of a @@ -863,7 +863,7 @@ implements HeapSize, Map { * significant overhead and code complexity to support and are * unnecessary for the requirements of this class. */ - + /** * Intentionally unimplemented. */ @@ -887,7 +887,7 @@ implements HeapSize, Map { throw new UnsupportedOperationException( "hashCode(Object) is intentionally unimplemented"); } - + /** * Intentionally unimplemented. */ @@ -895,7 +895,7 @@ implements HeapSize, Map { throw new UnsupportedOperationException( "keySet() is intentionally unimplemented"); } - + /** * Intentionally unimplemented. */ @@ -903,7 +903,7 @@ implements HeapSize, Map { throw new UnsupportedOperationException( "putAll() is intentionally unimplemented"); } - + /** * Intentionally unimplemented. */ @@ -925,9 +925,9 @@ implements HeapSize, Map { protected static class Entry implements Map.Entry, HeapSize { /** The baseline overhead memory usage of this class */ - static final int OVERHEAD = 1 * Bytes.SIZEOF_LONG + + static final int OVERHEAD = 1 * Bytes.SIZEOF_LONG + 5 * ClassSize.REFERENCE + 2 * Bytes.SIZEOF_INT; - + /** The key */ protected final K key; /** The value */ @@ -936,12 +936,12 @@ implements HeapSize, Map { protected final int hash; /** The next entry in the hash chain (for collisions) */ protected Entry next; - + /** The previous entry in the LRU list (towards LRU) */ protected Entry prevPtr; /** The next entry in the LRU list (towards MRU) */ protected Entry nextPtr; - + /** The precomputed heap size of this entry */ protected long heapSize; @@ -982,7 +982,7 @@ implements HeapSize, Map { public V getValue() { return value; } - + /** * Set the value of this entry. * @@ -998,7 +998,7 @@ implements HeapSize, Map { value = newValue; return oldValue; } - + /** * Replace the value of this entry. * @@ -1014,7 +1014,7 @@ implements HeapSize, Map { heapSize += sizeDiff; return sizeDiff; } - + /** * Returns true is the specified entry has the same key and the * same value as this entry. @@ -1031,13 +1031,13 @@ implements HeapSize, Map { if (k1 == k2 || (k1 != null && k1.equals(k2))) { Object v1 = getValue(); Object v2 = e.getValue(); - if (v1 == v2 || (v1 != null && v1.equals(v2))) + if (v1 == v2 || (v1 != null && v1.equals(v2))) return true; } return false; } - - /** + + /** * Returns the hash code of the entry by xor'ing the hash values * of the key and value of this entry. * @@ -1046,7 +1046,7 @@ implements HeapSize, Map { public int hashCode() { return (key.hashCode() ^ value.hashCode()); } - + /** * Returns String representation of the entry in form "key=value" * @@ -1064,15 +1064,15 @@ implements HeapSize, Map { protected void setPrevPtr(Entry prevPtr){ this.prevPtr = prevPtr; } - + /** * Returns the previous pointer for the entry in the LRU. * @return previous entry */ protected Entry getPrevPtr(){ return prevPtr; - } - + } + /** * Sets the next pointer for the entry in the LRU. * @param nextPtr next entry @@ -1080,7 +1080,7 @@ implements HeapSize, Map { protected void setNextPtr(Entry nextPtr){ this.nextPtr = nextPtr; } - + /** * Returns the next pointer for the entry in teh LRU. * @return next entry @@ -1088,7 +1088,7 @@ implements HeapSize, Map { protected Entry getNextPtr(){ return nextPtr; } - + /** * Returns the pre-computed and "deep" size of the Entry * @return size of the entry in bytes diff --git a/src/java/org/apache/hadoop/hbase/regionserver/MemStore.java b/src/java/org/apache/hadoop/hbase/regionserver/MemStore.java index dcb9e55..669098b 100644 --- a/src/java/org/apache/hadoop/hbase/regionserver/MemStore.java +++ b/src/java/org/apache/hadoop/hbase/regionserver/MemStore.java @@ -189,7 +189,7 @@ public class MemStore implements HeapSize { return s; } - /** + /** * Write a delete * @param delete * @return approximate size of the passed key and value. @@ -206,7 +206,7 @@ public class MemStore implements HeapSize { this.size.addAndGet(s); return s; } - + /** * @param kv Find the row that comes after this one. If null, we return the * first. @@ -455,7 +455,7 @@ public class MemStore implements HeapSize { void readLockUnlock() { this.lock.readLock().unlock(); } - + /** * * @param set memstore or snapshot @@ -486,7 +486,7 @@ public class MemStore implements HeapSize { } return false; } - + /* * MemStoreScanner implements the KeyValueScanner. @@ -520,7 +520,7 @@ public class MemStore implements HeapSize { StoreScanner level with coordination with MemStoreScanner. */ - + MemStoreScanner() { super(); @@ -531,7 +531,7 @@ public class MemStore implements HeapSize { KeyValue ret = null; long readPoint = ReadWriteConsistencyControl.getThreadReadPoint(); //DebugPrint.println( " MS@" + hashCode() + ": threadpoint = " + readPoint); - + while (ret == null && it.hasNext()) { KeyValue v = it.next(); if (v.getMemstoreTS() <= readPoint) { @@ -566,7 +566,7 @@ public class MemStore implements HeapSize { //DebugPrint.println( " MS@" + hashCode() + " snapshot seek: " + snapshotNextRow + " with size = " + // snapshot.size() + " threadread = " + readPoint); - + KeyValue lowest = getLowest(); // has data := (lowest != null) @@ -631,7 +631,7 @@ public class MemStore implements HeapSize { public final static long FIXED_OVERHEAD = ClassSize.align( ClassSize.OBJECT + (7 * ClassSize.REFERENCE)); - + public final static long DEEP_OVERHEAD = ClassSize.align(FIXED_OVERHEAD + ClassSize.REENTRANT_LOCK + ClassSize.ATOMIC_LONG + ClassSize.COPYONWRITE_ARRAYSET + ClassSize.COPYONWRITE_ARRAYLIST + @@ -645,11 +645,11 @@ public class MemStore implements HeapSize { * @return Size */ long heapSizeChange(final KeyValue kv, final boolean notpresent) { - return notpresent ? + return notpresent ? ClassSize.align(ClassSize.CONCURRENT_SKIPLISTMAP_ENTRY + kv.heapSize()): 0; } - + /** * Get the entire heap usage for this MemStore not including keys in the * snapshot. @@ -658,7 +658,7 @@ public class MemStore implements HeapSize { public long heapSize() { return size.get(); } - + /** * Get the heap usage of KVs in this MemStore. */ diff --git a/src/java/org/apache/hadoop/hbase/regionserver/MemStoreFlusher.java b/src/java/org/apache/hadoop/hbase/regionserver/MemStoreFlusher.java index 167707f..adefd07 100644 --- a/src/java/org/apache/hadoop/hbase/regionserver/MemStoreFlusher.java +++ b/src/java/org/apache/hadoop/hbase/regionserver/MemStoreFlusher.java @@ -45,14 +45,14 @@ import org.apache.hadoop.util.StringUtils; * NOTE: This class extends Thread rather than Chore because the sleep time * can be interrupted when there is something to do, rather than the Chore * sleep time which is invariant. - * + * * @see FlushRequester */ class MemStoreFlusher extends Thread implements FlushRequester { static final Log LOG = LogFactory.getLog(MemStoreFlusher.class); private final BlockingQueue flushQueue = new LinkedBlockingQueue(); - + private final HashSet regionsInQueue = new HashSet(); private final long threadWakeFrequency; @@ -61,7 +61,7 @@ class MemStoreFlusher extends Thread implements FlushRequester { protected final long globalMemStoreLimit; protected final long globalMemStoreLimitLowMark; - + private static final float DEFAULT_UPPER = 0.4f; private static final float DEFAULT_LOWER = 0.25f; private static final String UPPER_KEY = @@ -91,7 +91,7 @@ class MemStoreFlusher extends Thread implements FlushRequester { "because supplied " + LOWER_KEY + " was > " + UPPER_KEY); } this.globalMemStoreLimitLowMark = lower; - this.blockingStoreFilesNumber = + this.blockingStoreFilesNumber = conf.getInt("hbase.hstore.blockingStoreFiles", -1); if (this.blockingStoreFilesNumber == -1) { this.blockingStoreFilesNumber = 1 + @@ -120,7 +120,7 @@ class MemStoreFlusher extends Thread implements FlushRequester { float limit = c.getFloat(key, defaultLimit); return getMemStoreLimit(max, limit, defaultLimit); } - + static long getMemStoreLimit(final long max, final float limit, final float defaultLimit) { if (limit >= 0.9f || limit < 0.1f) { @@ -129,7 +129,7 @@ class MemStoreFlusher extends Thread implements FlushRequester { } return (long)(max * limit); } - + @Override public void run() { while (!this.server.isStopRequested() && this.server.isInSafeMode()) { @@ -166,7 +166,7 @@ class MemStoreFlusher extends Thread implements FlushRequester { flushQueue.clear(); LOG.info(getName() + " exiting"); } - + public void request(HRegion r) { synchronized (regionsInQueue) { if (!regionsInQueue.contains(r)) { @@ -175,10 +175,10 @@ class MemStoreFlusher extends Thread implements FlushRequester { } } } - + /** * Only interrupt once it's done with a run through the work loop. - */ + */ void interruptIfNecessary() { lock.lock(); try { @@ -187,10 +187,10 @@ class MemStoreFlusher extends Thread implements FlushRequester { lock.unlock(); } } - + /* * Flush a region. - * + * * @param region the region to be flushed * @param removeFromQueue True if the region needs to be removed from the * flush queue. False if called from the main flusher run loop and true if @@ -203,21 +203,21 @@ class MemStoreFlusher extends Thread implements FlushRequester { * That compactions do not run when called out of flushSomeRegions means that * compactions can be reported by the historian without danger of deadlock * (HBASE-670). - * + * *

          In the main run loop, regions have already been removed from the flush * queue, and if this method is called for the relief of memory pressure, - * this may not be necessarily true. We want to avoid trying to remove + * this may not be necessarily true. We want to avoid trying to remove * region from the queue because if it has already been removed, it requires a * sequential scan of the queue to determine that it is not in the queue. - * + * *

          If called from flushSomeRegions, the region may be in the queue but - * it may have been determined that the region had a significant amount of + * it may have been determined that the region had a significant amount of * memory in use and needed to be flushed to relieve memory pressure. In this * case, its flush may preempt the pending request in the queue, and if so, * it needs to be removed from the queue to avoid flushing the region * multiple times. - * - * @return true if the region was successfully flushed, false otherwise. If + * + * @return true if the region was successfully flushed, false otherwise. If * false, there will be accompanying log messages explaining why the log was * not flushed. */ @@ -341,7 +341,7 @@ class MemStoreFlusher extends Thread implements FlushRequester { } /** - * Check if the regionserver's memstore memory usage is greater than the + * Check if the regionserver's memstore memory usage is greater than the * limit. If so, flush regions with the biggest memstores until we're down * to the lower limit. This method blocks callers until we're down to a safe * amount of memstore consumption. diff --git a/src/java/org/apache/hadoop/hbase/regionserver/QueryMatcher.java b/src/java/org/apache/hadoop/hbase/regionserver/QueryMatcher.java index 66bd6f1..fef0faf 100644 --- a/src/java/org/apache/hadoop/hbase/regionserver/QueryMatcher.java +++ b/src/java/org/apache/hadoop/hbase/regionserver/QueryMatcher.java @@ -33,7 +33,7 @@ import org.apache.hadoop.hbase.util.Bytes; * This is the primary class used to process KeyValues during a Get or Scan * operation. *

          - * It encapsulates the handling of the column and version input parameters to + * It encapsulates the handling of the column and version input parameters to * the query through a {@link ColumnTracker}. *

          * Deletes are handled using the {@link DeleteTracker}. @@ -41,10 +41,10 @@ import org.apache.hadoop.hbase.util.Bytes; * All other query parameters are accessed from the client-specified Get. *

          * The primary method used is {@link #match} with the current KeyValue. It will - * return a {@link QueryMatcher.MatchCode} - * + * return a {@link QueryMatcher.MatchCode} + * * , deletes, - * versions, + * versions, */ public class QueryMatcher { /** @@ -59,17 +59,17 @@ public class QueryMatcher { * Include KeyValue in the returned result */ INCLUDE, - + /** * Do not include KeyValue in the returned result */ SKIP, - + /** * Do not include, jump to next StoreFile or memstore (in time order) */ NEXT, - + /** * Do not include, return current result */ @@ -93,25 +93,25 @@ public class QueryMatcher { */ DONE_SCAN, } - + /** Keeps track of deletes */ protected DeleteTracker deletes; - + /** Keeps track of columns and versions */ protected ColumnTracker columns; - + /** Key to seek to in memstore and StoreFiles */ protected KeyValue startKey; - + /** Row comparator for the region this query is for */ KeyComparator rowComparator; - + /** Row the query is on */ protected byte [] row; - + /** TimeRange the query is for */ protected TimeRange tr; - + /** Oldest allowed version stamp for TTL enforcement */ protected long oldestStamp; @@ -125,7 +125,7 @@ public class QueryMatcher { * @param ttl * @param rowComparator */ - public QueryMatcher(Get get, byte [] family, + public QueryMatcher(Get get, byte [] family, NavigableSet columns, long ttl, KeyComparator rowComparator, int maxVersions) { this.row = get.getRow(); @@ -164,7 +164,7 @@ public class QueryMatcher { this.startKey = matcher.getStartKey(); reset(); } - + /** * Main method for ColumnMatcher. *

          @@ -195,10 +195,10 @@ public class QueryMatcher { // Directly act on KV buffer byte [] bytes = kv.getBuffer(); int offset = kv.getOffset(); - + int keyLength = Bytes.toInt(bytes, offset); offset += KeyValue.ROW_OFFSET; - + short rowLength = Bytes.toShort(bytes, offset); offset += Bytes.SIZEOF_SHORT; @@ -207,7 +207,7 @@ public class QueryMatcher { /* Check ROW * If past query's row, go to next StoreFile * If not reached query's row, go to next KeyValue - */ + */ int ret = this.rowComparator.compareRows(row, 0, row.length, bytes, offset, rowLength); if (ret <= -1) { @@ -220,7 +220,7 @@ public class QueryMatcher { offset += rowLength; byte familyLength = bytes[offset]; offset += Bytes.SIZEOF_BYTE + familyLength; - + int columnLength = keyLength + KeyValue.ROW_OFFSET - (offset - kv.getOffset()) - KeyValue.TIMESTAMP_TYPE_SIZE; int columnOffset = offset; @@ -244,14 +244,14 @@ public class QueryMatcher { */ byte type = bytes[offset]; // if delete type == delete family, return done_row - + if (isDelete(type)) { if (tr.withinOrAfterTimeRange(timestamp)) { this.deletes.add(bytes, columnOffset, columnLength, timestamp, type); } return MatchCode.SKIP; // skip the delete cell. } - + /* Check TimeRange * If outside of range, move to next KeyValue */ @@ -274,8 +274,8 @@ public class QueryMatcher { * Returns a MatchCode directly, identical language * If matched column without enough versions, include * If enough versions of this column or does not match, skip - * If have moved past - * If enough versions of everything, + * If have moved past + * If enough versions of everything, * TODO: No mapping from Filter.ReturnCode to MatchCode. */ MatchCode mc = columns.checkColumn(bytes, columnOffset, columnLength); @@ -293,7 +293,7 @@ public class QueryMatcher { protected boolean isDelete(byte type) { return (type != KeyValue.Type.Put.getCode()); } - + protected boolean isExpired(long timestamp) { return (timestamp < oldestStamp); } @@ -309,18 +309,18 @@ public class QueryMatcher { public ColumnCount getSeekColumn() { return this.columns.getColumnHint(); } - + /** * Called after reading each section (memstore, snapshot, storefiles). *

          * This method will update the internal structures to be accurate for - * the next section. + * the next section. */ public void update() { this.deletes.update(); this.columns.update(); } - + /** * Resets the current columns and deletes */ @@ -336,52 +336,52 @@ public class QueryMatcher { public void setRow(byte [] row) { this.row = row; } - + /** - * + * * @return the start key */ public KeyValue getStartKey() { return this.startKey; } - + /** * @return the TimeRange */ public TimeRange getTimeRange() { return this.tr; } - + /** * @return the oldest stamp */ public long getOldestStamp() { return this.oldestStamp; } - + /** * @return current KeyComparator */ public KeyComparator getRowComparator() { return this.rowComparator; } - + /** * @return ColumnTracker */ public ColumnTracker getColumnTracker() { return this.columns; } - + /** * @return DeleteTracker */ public DeleteTracker getDeleteTracker() { return this.deletes; } - + /** - * + * * @return true when done. */ public boolean isDone() { diff --git a/src/java/org/apache/hadoop/hbase/regionserver/ReadWriteConsistencyControl.java b/src/java/org/apache/hadoop/hbase/regionserver/ReadWriteConsistencyControl.java index b1f1368..c9e1236 100644 --- a/src/java/org/apache/hadoop/hbase/regionserver/ReadWriteConsistencyControl.java +++ b/src/java/org/apache/hadoop/hbase/regionserver/ReadWriteConsistencyControl.java @@ -22,7 +22,7 @@ public class ReadWriteConsistencyControl { public static long getThreadReadPoint() { return perThreadReadPoint.get(); } - + public static long resetThreadReadPoint(ReadWriteConsistencyControl rwcc) { perThreadReadPoint.set(rwcc.memstoreReadPoint()); return getThreadReadPoint(); diff --git a/src/java/org/apache/hadoop/hbase/regionserver/RegionServerRunningException.java b/src/java/org/apache/hadoop/hbase/regionserver/RegionServerRunningException.java index 8bdfedb..cbefe1e 100644 --- a/src/java/org/apache/hadoop/hbase/regionserver/RegionServerRunningException.java +++ b/src/java/org/apache/hadoop/hbase/regionserver/RegionServerRunningException.java @@ -27,7 +27,7 @@ import java.io.IOException; */ public class RegionServerRunningException extends IOException { private static final long serialVersionUID = 1L << 31 - 1L; - + /** Default Constructor */ public RegionServerRunningException() { super(); diff --git a/src/java/org/apache/hadoop/hbase/regionserver/ScanDeleteTracker.java b/src/java/org/apache/hadoop/hbase/regionserver/ScanDeleteTracker.java index f4c7dc9..0626417 100644 --- a/src/java/org/apache/hadoop/hbase/regionserver/ScanDeleteTracker.java +++ b/src/java/org/apache/hadoop/hbase/regionserver/ScanDeleteTracker.java @@ -36,7 +36,7 @@ import org.apache.hadoop.hbase.util.Bytes; *

        • {@link #isDeleted} when checking if a Put KeyValue has been deleted *
        • {@link #update} when reaching the end of a StoreFile or row for scans *

          - * This class is NOT thread-safe as queries are never multi-threaded + * This class is NOT thread-safe as queries are never multi-threaded */ public class ScanDeleteTracker implements DeleteTracker { @@ -53,7 +53,7 @@ public class ScanDeleteTracker implements DeleteTracker { public ScanDeleteTracker() { super(); } - + /** * Add the specified KeyValue to the list of deletes to check against for * this row operation. @@ -91,7 +91,7 @@ public class ScanDeleteTracker implements DeleteTracker { // missing else is never called. } - /** + /** * Check if the specified KeyValue buffer has been deleted by a previously * seen delete. * @@ -107,7 +107,7 @@ public class ScanDeleteTracker implements DeleteTracker { if (timestamp <= familyStamp) { return true; } - + if (deleteBuffer != null) { int ret = Bytes.compareTo(deleteBuffer, deleteOffset, deleteLength, buffer, qualifierOffset, qualifierLength); @@ -150,7 +150,7 @@ public class ScanDeleteTracker implements DeleteTracker { } @Override - // should not be called at all even (!) + // should not be called at all even (!) public void update() { this.reset(); } diff --git a/src/java/org/apache/hadoop/hbase/regionserver/ScanQueryMatcher.java b/src/java/org/apache/hadoop/hbase/regionserver/ScanQueryMatcher.java index 4a71876..2dd1aeb 100644 --- a/src/java/org/apache/hadoop/hbase/regionserver/ScanQueryMatcher.java +++ b/src/java/org/apache/hadoop/hbase/regionserver/ScanQueryMatcher.java @@ -48,7 +48,7 @@ public class ScanQueryMatcher extends QueryMatcher { * @param rowComparator */ public ScanQueryMatcher(Scan scan, byte [] family, - NavigableSet columns, long ttl, + NavigableSet columns, long ttl, KeyValue.KeyComparator rowComparator, int maxVersions) { this.tr = scan.getTimeRange(); this.oldestStamp = System.currentTimeMillis() - ttl; @@ -62,7 +62,7 @@ public class ScanQueryMatcher extends QueryMatcher { } this.filter = scan.getFilter(); this.oldFilter = scan.getOldFilter(); - + // Single branch to deal with two types of reads (columns vs all in family) if (columns == null || columns.size() == 0) { // use a specialized scan for wildcard column tracker. @@ -81,7 +81,7 @@ public class ScanQueryMatcher extends QueryMatcher { * - include the current KeyValue (MatchCode.INCLUDE) * - ignore the current KeyValue (MatchCode.SKIP) * - got to the next row (MatchCode.DONE) - * + * * @param kv KeyValue to check * @return The match code instance. */ @@ -95,14 +95,14 @@ public class ScanQueryMatcher extends QueryMatcher { byte [] bytes = kv.getBuffer(); int offset = kv.getOffset(); - int initialOffset = offset; + int initialOffset = offset; int keyLength = Bytes.toInt(bytes, offset, Bytes.SIZEOF_INT); offset += KeyValue.ROW_OFFSET; - + short rowLength = Bytes.toShort(bytes, offset, Bytes.SIZEOF_SHORT); offset += Bytes.SIZEOF_SHORT; - + int ret = this.rowComparator.compareRows(row, 0, row.length, bytes, offset, rowLength); if (ret <= -1) { @@ -122,17 +122,17 @@ public class ScanQueryMatcher extends QueryMatcher { stickyNextRow = true; return MatchCode.SEEK_NEXT_ROW; } - + //Passing rowLength offset += rowLength; //Skipping family byte familyLength = bytes [offset]; offset += familyLength + 1; - + int qualLength = keyLength + KeyValue.ROW_OFFSET - (offset - initialOffset) - KeyValue.TIMESTAMP_TYPE_SIZE; - + long timestamp = kv.getTimestamp(); if (isExpired(timestamp)) { // done, the rest of this column will also be expired as well. @@ -145,7 +145,7 @@ public class ScanQueryMatcher extends QueryMatcher { this.deletes.add(bytes, offset, qualLength, timestamp, type); // Can't early out now, because DelFam come before any other keys } - // May be able to optimize the SKIP here, if we matched + // May be able to optimize the SKIP here, if we matched // due to a DelFam, we can skip to next row // due to a DelCol, we can skip to next col // But it requires more info out of isDelete(). diff --git a/src/java/org/apache/hadoop/hbase/regionserver/ScanWildcardColumnTracker.java b/src/java/org/apache/hadoop/hbase/regionserver/ScanWildcardColumnTracker.java index 435d512..8326e62 100644 --- a/src/java/org/apache/hadoop/hbase/regionserver/ScanWildcardColumnTracker.java +++ b/src/java/org/apache/hadoop/hbase/regionserver/ScanWildcardColumnTracker.java @@ -29,7 +29,7 @@ import org.apache.hadoop.hbase.util.Bytes; * Keeps track of the columns for a scan if they are not explicitly specified */ public class ScanWildcardColumnTracker implements ColumnTracker { - private static final Log LOG = + private static final Log LOG = LogFactory.getLog(ScanWildcardColumnTracker.class); private byte [] columnBuffer = null; private int columnOffset = 0; @@ -103,7 +103,7 @@ public class ScanWildcardColumnTracker implements ColumnTracker { currentCount = 0; if (++currentCount > maxVersions) return MatchCode.SKIP; - return MatchCode.INCLUDE; + return MatchCode.INCLUDE; } @Override @@ -122,7 +122,7 @@ public class ScanWildcardColumnTracker implements ColumnTracker { * Used by matcher and scan/get to get a hint of the next column * to seek to after checkColumn() returns SKIP. Returns the next interesting * column we want, or NULL there is none (wildcard scanner). - * + * * @return The column count. */ public ColumnCount getColumnHint() { @@ -131,7 +131,7 @@ public class ScanWildcardColumnTracker implements ColumnTracker { /** - * We can never know a-priori if we are done, so always return false. + * We can never know a-priori if we are done, so always return false. * @return false */ @Override diff --git a/src/java/org/apache/hadoop/hbase/regionserver/Store.java b/src/java/org/apache/hadoop/hbase/regionserver/Store.java index 9006099..a16492d 100644 --- a/src/java/org/apache/hadoop/hbase/regionserver/Store.java +++ b/src/java/org/apache/hadoop/hbase/regionserver/Store.java @@ -66,7 +66,7 @@ import org.apache.hadoop.util.StringUtils; * A Store holds a column family in a Region. Its a memstore and a set of zero * or more StoreFiles, which stretch backwards over time. * - *

          There's no reason to consider append-logging at this level; all logging + *

          There's no reason to consider append-logging at this level; all logging * and locking is handled at the HRegion level. Store just provides * services to manage sets of StoreFiles. One of the most important of those * services is compaction services where files are aggregated once they pass @@ -139,7 +139,7 @@ public class Store implements HConstants, HeapSize { private final int blocksize; private final boolean blockcache; private final Compression.Algorithm compression; - + // Comparing KeyValues final KeyValue.KVComparator comparator; final KeyValue.KVComparator comparatorIgnoringType; @@ -186,7 +186,7 @@ public class Store implements HConstants, HeapSize { this.ttl *= 1000; } this.memstore = new MemStore(this.comparator); - this.regionCompactionDir = new Path(HRegion.getCompactionDir(basedir), + this.regionCompactionDir = new Path(HRegion.getCompactionDir(basedir), Integer.toString(info.getEncodedName())); this.storeName = this.family.getName(); this.storeNameStr = Bytes.toString(this.storeName); @@ -195,10 +195,10 @@ public class Store implements HConstants, HeapSize { // MIN_COMMITS_FOR_COMPACTION map files this.compactionThreshold = conf.getInt("hbase.hstore.compactionThreshold", 3); - + // Check if this is in-memory store this.inMemory = family.isInMemory(); - + // By default we split region if a file > DEFAULT_MAX_FILE_SIZE. long maxFileSize = info.getTableDesc().getMaxFileSize(); if (maxFileSize == HConstants.DEFAULT_MAX_FILE_SIZE) { @@ -228,7 +228,7 @@ public class Store implements HConstants, HeapSize { this.maxSeqId = newId; // start with the log id we just recovered. } } - + HColumnDescriptor getFamily() { return this.family; } @@ -236,7 +236,7 @@ public class Store implements HConstants, HeapSize { long getMaxSequenceId() { return this.maxSeqId; } - + long getMaxSeqIdBeforeLogRecovery() { return maxSeqIdBeforeLogRecovery; } @@ -286,11 +286,11 @@ public class Store implements HConstants, HeapSize { } /* - * Read the reconstructionLog to see whether we need to build a brand-new - * file out of non-flushed log entries. + * Read the reconstructionLog to see whether we need to build a brand-new + * file out of non-flushed log entries. * - * We can ignore any log message that has a sequence ID that's equal to or - * lower than maxSeqID. (Because we know such log messages are already + * We can ignore any log message that has a sequence ID that's equal to or + * lower than maxSeqID. (Because we know such log messages are already * reflected in the HFiles.) * * @return the new max sequence id as per the log, or -1 if no log recovered @@ -447,7 +447,7 @@ public class Store implements HConstants, HeapSize { /** * Adds a value to the memstore - * + * * @param kv * @return memstore size delta */ @@ -462,7 +462,7 @@ public class Store implements HConstants, HeapSize { /** * Adds a value to the memstore - * + * * @param kv * @return memstore size delta */ @@ -484,10 +484,10 @@ public class Store implements HConstants, HeapSize { /** * Close all the readers - * + * * We don't need to worry about subsequent requests because the HRegion holds * a write lock that will prevent any more reads or writes. - * + * * @throws IOException */ List close() throws IOException { @@ -568,7 +568,7 @@ public class Store implements HConstants, HeapSize { writer.close(); } } - StoreFile sf = new StoreFile(this.fs, writer.getPath(), blockcache, + StoreFile sf = new StoreFile(this.fs, writer.getPath(), blockcache, this.conf, this.inMemory); Reader r = sf.getReader(); this.storeSize += r.length(); @@ -657,21 +657,21 @@ public class Store implements HConstants, HeapSize { ////////////////////////////////////////////////////////////////////////////// /** - * Compact the StoreFiles. This method may take some time, so the calling + * Compact the StoreFiles. This method may take some time, so the calling * thread must be able to block for long periods. - * + * *

          During this time, the Store can work as usual, getting values from * StoreFiles and writing new StoreFiles from the memstore. - * - * Existing StoreFiles are not destroyed until the new compacted StoreFile is + * + * Existing StoreFiles are not destroyed until the new compacted StoreFile is * completely written-out to disk. * *

          The compactLock prevents multiple simultaneous compactions. * The structureLock prevents us from interfering with other write operations. - * - *

          We don't want to hold the structureLock for the whole time, as a compact() + * + *

          We don't want to hold the structureLock for the whole time, as a compact() * can be lengthy and we want to allow cache-flushes during this period. - * + * * @param mc True to force a major compaction regardless of thresholds * @return row to split around if a split is needed, null otherwise * @throws IOException @@ -699,7 +699,7 @@ public class Store implements HConstants, HeapSize { } boolean references = hasReferences(filesToCompact); - if (!majorcompaction && !references && + if (!majorcompaction && !references && (forceSplit || (filesToCompact.size() < compactionThreshold))) { return checkSplit(forceSplit); } @@ -733,10 +733,10 @@ public class Store implements HConstants, HeapSize { fileSizes[i] = len; totalSize += len; } - + if (!majorcompaction && !references) { - // Here we select files for incremental compaction. - // The rule is: if the largest(oldest) one is more than twice the + // Here we select files for incremental compaction. + // The rule is: if the largest(oldest) one is more than twice the // size of the second, skip the largest, and continue to next..., // until we meet the compactionThreshold limit. @@ -770,7 +770,7 @@ public class Store implements HConstants, HeapSize { " file(s), size: " + skipped); } } - + // Ready to go. Have list of files to compact. LOG.debug("Started compaction of " + filesToCompact.size() + " file(s)" + (references? ", hasReferences=true,": " ") + " into " + @@ -805,7 +805,7 @@ public class Store implements HConstants, HeapSize { /* * Gets lowest timestamp from files in a dir - * + * * @param fs * @param dir * @throws IOException @@ -873,7 +873,7 @@ public class Store implements HConstants, HeapSize { /** * Do a minor/major compaction. Uses the scan infrastructure to make it easy. - * + * * @param filesToCompact which files to compact * @param majorCompaction true to major compact (prune all deletes, max versions, etc) * @param maxId Readers maximum sequence id. @@ -949,14 +949,14 @@ public class Store implements HConstants, HeapSize { } /* - * It's assumed that the compactLock will be acquired prior to calling this + * It's assumed that the compactLock will be acquired prior to calling this * method! Otherwise, it is not thread-safe! * *

          It works by processing a compaction that's been written to disk. - * + * *

          It is usually invoked at the end of a compaction, but might also be * invoked at HStore startup, if the prior execution died midway through. - * + * *

          Moving the compacted TreeMap into place means: *

              * 1) Moving the new compacted StoreFile into place
          @@ -964,7 +964,7 @@ public class Store implements HConstants, HeapSize {
              * 3) Loading the new TreeMap.
              * 4) Compute new store size
              * 
          - * + * * @param compactedFiles list of files that were compacted * @param compactedFile StoreFile that is the result of the compaction * @return StoreFile created. May be null. @@ -1009,7 +1009,7 @@ public class Store implements HConstants, HeapSize { // WARN ugly hack here, but necessary sadly. ReadWriteConsistencyControl.resetThreadReadPoint(region.getRWCC()); - + // Tell observers that list of StoreFiles has changed. notifyChangedReadersObservers(); // Finally, delete old store files. @@ -1049,7 +1049,7 @@ public class Store implements HConstants, HeapSize { public int getNumberOfstorefiles() { return this.storefiles.size(); } - + /* * @param wantedVersions How many versions were asked for. @@ -1077,8 +1077,8 @@ public class Store implements HConstants, HeapSize { /** * Find the key that matches row exactly, or the one that immediately - * preceeds it. WARNING: Only use this method on a table where writes occur - * with strictly increasing timestamps. This method assumes this pattern of + * preceeds it. WARNING: Only use this method on a table where writes occur + * with strictly increasing timestamps. This method assumes this pattern of * writes in order to make it reasonably performant. Also our search is * dependent on the axiom that deletes are for cells that are in the container * that follows whether a memstore snapshot or a storefile, not for the @@ -1280,8 +1280,8 @@ public class Store implements HConstants, HeapSize { byte [] lk = r.getLastKey(); KeyValue lastKey = KeyValue.createKeyValueFromKey(lk, 0, lk.length); // if the midkey is the same as the first and last keys, then we cannot - // (ever) split this region. - if (this.comparator.compareRows(mk, firstKey) == 0 && + // (ever) split this region. + if (this.comparator.compareRows(mk, firstKey) == 0 && this.comparator.compareRows(mk, lastKey) == 0) { if (LOG.isDebugEnabled()) { LOG.debug("cannot split because midkey is the same as first or " + @@ -1298,12 +1298,12 @@ public class Store implements HConstants, HeapSize { } return null; } - + /** @return aggregate size of HStore */ public long getSize() { return storeSize; } - + ////////////////////////////////////////////////////////////////////////////// // File administration ////////////////////////////////////////////////////////////////////////////// @@ -1403,7 +1403,7 @@ public class Store implements HConstants, HeapSize { * @param kv Key to find. * @return True if we were able to seek the scanner to b or to * the key just after. - * @throws IOException + * @throws IOException */ static boolean getClosest(final HFileScanner s, final KeyValue kv) throws IOException { @@ -1445,19 +1445,19 @@ public class Store implements HConstants, HeapSize { } return false; } - + // // HBASE-880/1249/1304 // - + /** * Retrieve results from this store given the specified Get parameters. * @param get Get operation * @param columns List of columns to match, can be empty (not null) - * @param result List to add results to + * @param result List to add results to * @throws IOException */ - public void get(Get get, NavigableSet columns, List result) + public void get(Get get, NavigableSet columns, List result) throws IOException { KeyComparator keyComparator = this.comparator.getRawComparator(); @@ -1471,12 +1471,12 @@ public class Store implements HConstants, HeapSize { // Received early-out from memstore return; } - + // Check if we even have storefiles if (this.storefiles.isEmpty()) { return; } - + // Get storefiles for this store List storefileScanners = new ArrayList(); for (StoreFile sf : this.storefiles.descendingMap().values()) { @@ -1488,11 +1488,11 @@ public class Store implements HConstants, HeapSize { // Get a scanner that caches the block and uses pread storefileScanners.add(r.getScanner(true, true)); } - + // StoreFileGetScan will handle reading this store's storefiles StoreFileGetScan scanner = new StoreFileGetScan(storefileScanners, matcher); - - // Run a GET scan and put results into the specified list + + // Run a GET scan and put results into the specified list scanner.get(result); } finally { this.lock.readLock().unlock(); @@ -1505,7 +1505,7 @@ public class Store implements HConstants, HeapSize { * This function will always be seen as atomic by other readers * because it only puts a single KV to memstore. Thus no * read/write control necessary. - * + * * @param row * @param f * @param qualifier @@ -1571,17 +1571,17 @@ public class Store implements HConstants, HeapSize { public boolean hasTooManyStoreFiles() { return this.storefiles.size() > this.compactionThreshold; } - + public static final long FIXED_OVERHEAD = ClassSize.align( ClassSize.OBJECT + (17 * ClassSize.REFERENCE) + (6 * Bytes.SIZEOF_LONG) + (3 * Bytes.SIZEOF_INT) + Bytes.SIZEOF_BOOLEAN + ClassSize.align(ClassSize.ARRAY)); - + public static final long DEEP_OVERHEAD = ClassSize.align(FIXED_OVERHEAD + - ClassSize.OBJECT + ClassSize.REENTRANT_LOCK + - ClassSize.CONCURRENT_SKIPLISTMAP + + ClassSize.OBJECT + ClassSize.REENTRANT_LOCK + + ClassSize.CONCURRENT_SKIPLISTMAP + ClassSize.CONCURRENT_SKIPLISTMAP_ENTRY + ClassSize.OBJECT); - + @Override public long heapSize() { return DEEP_OVERHEAD + this.memstore.heapSize(); diff --git a/src/java/org/apache/hadoop/hbase/regionserver/StoreFile.java b/src/java/org/apache/hadoop/hbase/regionserver/StoreFile.java index 3241da4..0ff82f1 100644 --- a/src/java/org/apache/hadoop/hbase/regionserver/StoreFile.java +++ b/src/java/org/apache/hadoop/hbase/regionserver/StoreFile.java @@ -78,7 +78,7 @@ public class StoreFile implements HConstants { private boolean blockcache; // Is this from an in-memory store private boolean inMemory; - + // Keys for metadata stored in backing HFile. private static final byte [] MAX_SEQ_ID_KEY = Bytes.toBytes("MAX_SEQ_ID_KEY"); // Set when we obtain a Reader. @@ -89,7 +89,7 @@ public class StoreFile implements HConstants { // If true, this file was product of a major compaction. Its then set // whenever you get a Reader. private AtomicBoolean majorCompaction = null; - + /* * Regex that will work for straight filenames and for reference names. * If reference, then the regex has more than just one group. Group 1 is @@ -105,17 +105,17 @@ public class StoreFile implements HConstants { private final HBaseConfiguration conf; /** - * Constructor, loads a reader and it's indices, etc. May allocate a + * Constructor, loads a reader and it's indices, etc. May allocate a * substantial amount of ram depending on the underlying files (10-20MB?). - * + * * @param fs The current file system to use. * @param p The path of the file. * @param blockcache true if the block cache is enabled. * @param conf The current configuration. * @throws IOException When opening the reader fails. */ - StoreFile(final FileSystem fs, final Path p, final boolean blockcache, - final HBaseConfiguration conf, final boolean inMemory) + StoreFile(final FileSystem fs, final Path p, final boolean blockcache, + final HBaseConfiguration conf, final boolean inMemory) throws IOException { this.conf = conf; this.fs = fs; @@ -220,7 +220,7 @@ public class StoreFile implements HConstants { /** * Returns the block cache or null in case none should be used. - * + * * @param conf The current configuration. * @return The block cache or null. */ @@ -263,7 +263,7 @@ public class StoreFile implements HConstants { throw new IllegalAccessError("Already open"); } if (isReference()) { - this.reader = new HalfHFileReader(this.fs, this.referencePath, + this.reader = new HalfHFileReader(this.fs, this.referencePath, getBlockCache(), this.reference); } else { this.reader = new Reader(this.fs, this.path, getBlockCache(), @@ -285,7 +285,7 @@ public class StoreFile implements HConstants { this.sequenceid += 1; } } - + } b = map.get(MAJOR_COMPACTION_KEY); if (b != null) { @@ -327,7 +327,7 @@ public class StoreFile implements HConstants { /** * Delete this file - * @throws IOException + * @throws IOException */ public void delete() throws IOException { close(); @@ -446,7 +446,7 @@ public class StoreFile implements HConstants { * Write file metadata. * Call before you call close on the passed w since its written * as metadata to that file. - * + * * @param w hfile writer * @param maxSequenceId Maximum sequence id. * @throws IOException @@ -487,7 +487,7 @@ public class StoreFile implements HConstants { throws IOException { // A reference to the bottom half of the hsf store file. Reference r = new Reference(splitRow, range); - // Add the referred-to regions name as a dot separated suffix. + // Add the referred-to regions name as a dot separated suffix. // See REF_NAME_PARSER regex above. The referred-to regions name is // up in the path of the passed in f -- parentdir is family, // then the directory above is the region name. diff --git a/src/java/org/apache/hadoop/hbase/regionserver/StoreFileGetScan.java b/src/java/org/apache/hadoop/hbase/regionserver/StoreFileGetScan.java index 6be4a8b..da8e2a8 100644 --- a/src/java/org/apache/hadoop/hbase/regionserver/StoreFileGetScan.java +++ b/src/java/org/apache/hadoop/hbase/regionserver/StoreFileGetScan.java @@ -35,7 +35,7 @@ public class StoreFileGetScan { private QueryMatcher matcher; private KeyValue startKey; - + /** * Constructor * @param scanners @@ -55,7 +55,7 @@ public class StoreFileGetScan { * proceeding to the next StoreFile. *

          * This strategy allows for optimal, stateless (no persisted Scanners) - * early-out scenarios. + * early-out scenarios. * @param result List to add results to * @throws IOException */ @@ -67,15 +67,15 @@ public class StoreFileGetScan { } } } - + /** * Performs a GET operation on a single StoreFile. * @param scanner * @param result * @return true if done with this store, false if must continue to next - * @throws IOException + * @throws IOException */ - public boolean getStoreFile(HFileScanner scanner, List result) + public boolean getStoreFile(HFileScanner scanner, List result) throws IOException { if (scanner.seekTo(startKey.getBuffer(), startKey.getKeyOffset(), startKey.getKeyLength()) == -1) { @@ -108,5 +108,5 @@ public class StoreFileGetScan { } while(scanner.next()); return false; } - + } diff --git a/src/java/org/apache/hadoop/hbase/regionserver/StoreFileScanner.java b/src/java/org/apache/hadoop/hbase/regionserver/StoreFileScanner.java index cd441b4..5ada644 100644 --- a/src/java/org/apache/hadoop/hbase/regionserver/StoreFileScanner.java +++ b/src/java/org/apache/hadoop/hbase/regionserver/StoreFileScanner.java @@ -29,10 +29,10 @@ import org.apache.hadoop.hbase.io.hfile.HFileScanner; * A KeyValue scanner that iterates over a single HFile */ class StoreFileScanner implements KeyValueScanner { - + private HFileScanner hfs; private KeyValue cur = null; - + /** * Implements a {@link KeyValueScanner} on top of the specified {@link HFileScanner} * @param hfs HFile scanner @@ -44,11 +44,11 @@ class StoreFileScanner implements KeyValueScanner { public String toString() { return "StoreFileScanner[" + hfs.toString() + ", cur=" + cur + "]"; } - + public KeyValue peek() { return cur; } - + public KeyValue next() { KeyValue retKey = cur; cur = hfs.getKeyValue(); @@ -62,7 +62,7 @@ class StoreFileScanner implements KeyValueScanner { } return retKey; } - + public boolean seek(KeyValue key) { try { if(!seekAtOrAfter(hfs, key)) { @@ -77,14 +77,14 @@ class StoreFileScanner implements KeyValueScanner { return false; } } - + public void close() { // Nothing to close on HFileScanner? cur = null; } - + /** - * + * * @param s * @param k * @return diff --git a/src/java/org/apache/hadoop/hbase/regionserver/StoreScanner.java b/src/java/org/apache/hadoop/hbase/regionserver/StoreScanner.java index 09e24e9..12d3529 100644 --- a/src/java/org/apache/hadoop/hbase/regionserver/StoreScanner.java +++ b/src/java/org/apache/hadoop/hbase/regionserver/StoreScanner.java @@ -80,7 +80,7 @@ class StoreScanner implements KeyValueScanner, InternalScanner, ChangedReadersOb /** * Used for major compactions.

          - * + * * Opens a scanner across specified StoreFiles. * @param store who we scan * @param scan the spec @@ -111,7 +111,7 @@ class StoreScanner implements KeyValueScanner, InternalScanner, ChangedReadersOb this.store = null; this.isGet = false; this.cacheBlocks = scan.getCacheBlocks(); - this.matcher = new ScanQueryMatcher(scan, colFamily, columns, ttl, + this.matcher = new ScanQueryMatcher(scan, colFamily, columns, ttl, comparator.getRawComparator(), scan.getMaxVersions()); // Seek all scanners to the initial key @@ -210,12 +210,12 @@ class StoreScanner implements KeyValueScanner, InternalScanner, ChangedReadersOb case SKIP: this.heap.next(); break; - + default: throw new RuntimeException("UNEXPECTED"); } } - + if (!results.isEmpty()) { // copy jazz outResult.addAll(results); diff --git a/src/java/org/apache/hadoop/hbase/regionserver/WALEdit.java b/src/java/org/apache/hadoop/hbase/regionserver/WALEdit.java index cfc995d..fdd277a 100644 --- a/src/java/org/apache/hadoop/hbase/regionserver/WALEdit.java +++ b/src/java/org/apache/hadoop/hbase/regionserver/WALEdit.java @@ -31,47 +31,47 @@ import org.apache.hadoop.io.Writable; /** * WALEdit: Used in HBase's transaction log (WAL) to represent - * the collection of edits (KeyValue objects) corresponding to a + * the collection of edits (KeyValue objects) corresponding to a * single transaction. The class implements "Writable" interface * for serializing/deserializing a set of KeyValue items. - * + * * Previously, if a transaction contains 3 edits to c1, c2, c3 for a row R, * the HLog would have three log entries as follows: - * + * * : * : * : - * + * * This presents problems because row level atomicity of transactions * was not guaranteed. If we crash after few of the above appends make * it, then recovery will restore a partial transaction. * * In the new world, all the edits for a given transaction are written * out as a single record, for example: - * + * * : - * + * * where, the WALEdit is serialized as: * <-1, # of edits, , , ... > * For example: * <-1, 3, , , > - * - * The -1 marker is just a special way of being backward compatible with + * + * The -1 marker is just a special way of being backward compatible with * an old HLog which would have contained a single . - * + * * The deserializer for WALEdit backward compatibly detects if the record * is an old style KeyValue or the new style WALEdit. * */ public class WALEdit implements Writable { - + private final int VERSION_2 = -1; - + private List kvs = new ArrayList(); - + public WALEdit() { } - + public void add(KeyValue kv) { this.kvs.add(kv); } @@ -83,13 +83,13 @@ public class WALEdit implements Writable { public int size() { return kvs.size(); } - + public List getKeyValues() { return kvs; } public void readFields(DataInput in) throws IOException { - + // ignore any old state in case caller is recycling an instance of this object. kvs = new ArrayList(); @@ -132,5 +132,5 @@ public class WALEdit implements Writable { sb.append(">]"); return sb.toString(); } - + } diff --git a/src/java/org/apache/hadoop/hbase/regionserver/WildcardColumnTracker.java b/src/java/org/apache/hadoop/hbase/regionserver/WildcardColumnTracker.java index 0f997cc..35be2f3 100644 --- a/src/java/org/apache/hadoop/hbase/regionserver/WildcardColumnTracker.java +++ b/src/java/org/apache/hadoop/hbase/regionserver/WildcardColumnTracker.java @@ -26,7 +26,7 @@ import org.apache.hadoop.hbase.regionserver.QueryMatcher.MatchCode; import org.apache.hadoop.hbase.util.Bytes; /** - * This class is used for the tracking and enforcement of columns and numbers + * This class is used for the tracking and enforcement of columns and numbers * of versions during the course of a Get or Scan operation, when all available * column qualifiers have been asked for in the query. *

          @@ -36,20 +36,20 @@ import org.apache.hadoop.hbase.util.Bytes; * what action should be taken. *

        • {@link #update} is called at the end of every StoreFile or memstore. *

          - * This class is NOT thread-safe as queries are never multi-threaded + * This class is NOT thread-safe as queries are never multi-threaded */ public class WildcardColumnTracker implements ColumnTracker { - + private int maxVersions; - + protected List columns; private int index; private ColumnCount column; - - private List newColumns; + + private List newColumns; private int newIndex; private ColumnCount newColumn; - + /** * Default constructor. * @param maxVersions maximum versions to return per columns @@ -58,7 +58,7 @@ public class WildcardColumnTracker implements ColumnTracker { this.maxVersions = maxVersions; reset(); } - + public void reset() { this.index = 0; this.column = null; @@ -67,7 +67,7 @@ public class WildcardColumnTracker implements ColumnTracker { this.newIndex = 0; this.newColumn = null; } - + /** * Can never early-out from reading more storefiles in Wildcard case. */ @@ -241,7 +241,7 @@ public class WildcardColumnTracker implements ColumnTracker { } } while(true); } - + /** * Called at the end of every StoreFile or memstore. */ @@ -253,14 +253,14 @@ public class WildcardColumnTracker implements ColumnTracker { } return; } - + // If no new columns, retain previous columns and return if(this.newColumns.size() == 0) { this.index = 0; this.column = this.columns.get(index); return; } - + // Merge previous columns with new columns // There will be no overlapping List mergeColumns = new ArrayList( @@ -271,14 +271,14 @@ public class WildcardColumnTracker implements ColumnTracker { newColumn = newColumns.get(0); while(true) { int ret = Bytes.compareTo( - column.getBuffer(), column.getOffset(),column.getLength(), + column.getBuffer(), column.getOffset(),column.getLength(), newColumn.getBuffer(), newColumn.getOffset(), newColumn.getLength()); - + // Existing is smaller than new, add existing and iterate it if(ret <= -1) { mergeColumns.add(column); if(++index == columns.size()) { - // No more existing left, merge down rest of new and return + // No more existing left, merge down rest of new and return mergeDown(mergeColumns, newColumns, newIndex); finish(mergeColumns); return; @@ -286,7 +286,7 @@ public class WildcardColumnTracker implements ColumnTracker { column = columns.get(index); continue; } - + // New is smaller than existing, add new and iterate it mergeColumns.add(newColumn); if(++newIndex == newColumns.size()) { @@ -299,23 +299,23 @@ public class WildcardColumnTracker implements ColumnTracker { continue; } } - - private void mergeDown(List mergeColumns, + + private void mergeDown(List mergeColumns, List srcColumns, int srcIndex) { int index = srcIndex; while(index < srcColumns.size()) { mergeColumns.add(srcColumns.get(index++)); } } - + private void finish(List mergeColumns) { this.columns = mergeColumns; this.index = 0; this.column = this.columns.size() > 0? columns.get(index) : null; - + this.newColumns = new ArrayList(); this.newIndex = 0; this.newColumn = null; } - + } diff --git a/src/java/org/apache/hadoop/hbase/regionserver/metrics/RegionServerMetrics.java b/src/java/org/apache/hadoop/hbase/regionserver/metrics/RegionServerMetrics.java index fc280d5..55aff8e 100644 --- a/src/java/org/apache/hadoop/hbase/regionserver/metrics/RegionServerMetrics.java +++ b/src/java/org/apache/hadoop/hbase/regionserver/metrics/RegionServerMetrics.java @@ -36,7 +36,7 @@ import org.apache.hadoop.metrics.util.MetricsTimeVaryingRate; import org.apache.hadoop.metrics.util.MetricsLongValue; import org.apache.hadoop.metrics.util.MetricsRegistry; -/** +/** * This class is for maintaining the various regionserver statistics * and publishing them through the metrics interfaces. *

          @@ -50,10 +50,10 @@ public class RegionServerMetrics implements Updater { private static final int MB = 1024*1024; private MetricsRegistry registry = new MetricsRegistry(); private final RegionServerStatistics statistics; - + public final MetricsTimeVaryingRate atomicIncrementTime = new MetricsTimeVaryingRate("atomicIncrementTime", registry); - + /** * Count of regions carried by this regionserver */ @@ -110,19 +110,19 @@ public class RegionServerMetrics implements Updater { /** * filesystem read latency */ - public final MetricsTimeVaryingRate fsReadLatency = + public final MetricsTimeVaryingRate fsReadLatency = new MetricsTimeVaryingRate("fsReadLatency", registry); /** * filesystem write latency */ - public final MetricsTimeVaryingRate fsWriteLatency = + public final MetricsTimeVaryingRate fsWriteLatency = new MetricsTimeVaryingRate("fsWriteLatency", registry); /** * filesystem sync latency */ - public final MetricsTimeVaryingRate fsSyncLatency = + public final MetricsTimeVaryingRate fsSyncLatency = new MetricsTimeVaryingRate("fsSyncLatency", registry); public RegionServerMetrics() { @@ -148,7 +148,7 @@ public class RegionServerMetrics implements Updater { /** * Since this object is a registered updater, this method will be called * periodically, e.g. every 5 seconds. - * @param unused + * @param unused */ public void doUpdates(MetricsContext unused) { synchronized (this) { @@ -163,7 +163,7 @@ public class RegionServerMetrics implements Updater { this.blockCacheFree.pushMetric(this.metricsRecord); this.blockCacheCount.pushMetric(this.metricsRecord); this.blockCacheHitRatio.pushMetric(this.metricsRecord); - + // Mix in HFile and HLog metrics // Be careful. Here is code for MTVR from up in hadoop: // public synchronized void inc(final int numOps, final long time) { @@ -204,14 +204,14 @@ public class RegionServerMetrics implements Updater { public float getRequests() { return this.requests.getPreviousIntervalValue(); } - + /** * @param inc How much to add to requests. */ public void incrementRequests(final int inc) { this.requests.inc(inc); } - + @Override public String toString() { StringBuilder sb = new StringBuilder(); diff --git a/src/java/org/apache/hadoop/hbase/regionserver/metrics/RegionServerStatistics.java b/src/java/org/apache/hadoop/hbase/regionserver/metrics/RegionServerStatistics.java index 94f0ec3..2e06563 100644 --- a/src/java/org/apache/hadoop/hbase/regionserver/metrics/RegionServerStatistics.java +++ b/src/java/org/apache/hadoop/hbase/regionserver/metrics/RegionServerStatistics.java @@ -34,8 +34,8 @@ public class RegionServerStatistics extends MetricsMBeanBase { public RegionServerStatistics(MetricsRegistry registry, String rsName) { super(registry, "RegionServerStatistics"); - mbeanName = MBeanUtil.registerMBean("RegionServer", - "RegionServerStatistics", this); + mbeanName = MBeanUtil.registerMBean("RegionServer", + "RegionServerStatistics", this); } public void shutdown() { diff --git a/src/java/org/apache/hadoop/hbase/rest/AbstractController.java b/src/java/org/apache/hadoop/hbase/rest/AbstractController.java index 689f284..db56123 100644 --- a/src/java/org/apache/hadoop/hbase/rest/AbstractController.java +++ b/src/java/org/apache/hadoop/hbase/rest/AbstractController.java @@ -53,7 +53,7 @@ public abstract class AbstractController implements RESTConstants { protected abstract AbstractModel generateModel(HBaseConfiguration conf, HBaseAdmin a); - + protected byte[][] getColumnsFromQueryMap(Map queryMap) { byte[][] columns = null; String[] columnArray = queryMap.get(RESTConstants.COLUMN); diff --git a/src/java/org/apache/hadoop/hbase/rest/Dispatcher.java b/src/java/org/apache/hadoop/hbase/rest/Dispatcher.java index c1f3409..a9aace2 100644 --- a/src/java/org/apache/hadoop/hbase/rest/Dispatcher.java +++ b/src/java/org/apache/hadoop/hbase/rest/Dispatcher.java @@ -51,7 +51,7 @@ import java.util.Map; *

          * This servlet has explicit dependency on Jetty server; it uses the jetty * implementation of MultipartResponse. - * + * *

          * TODO: *

            @@ -65,7 +65,7 @@ import java.util.Map; *
          • Minor items are we are decoding URLs in places where probably already * done and how to timeout scanners that are in the scanner list.
          • *
          - * + * * @see Hbase * REST Specification * @deprecated Use the {@link org.apache.hadoop.hbase.stargate} hbase contrib instead. @@ -73,7 +73,7 @@ import java.util.Map; public class Dispatcher extends javax.servlet.http.HttpServlet { /** - * + * */ private static final long serialVersionUID = -8075335435797071569L; private static final Log LOG = LogFactory.getLog(Dispatcher.class); @@ -101,7 +101,7 @@ public class Dispatcher extends javax.servlet.http.HttpServlet { /** * Utility method used looking at Accept header content. - * + * * @param t * The content type to examine. * @return The enum that matches the prefix of t or the default @@ -294,7 +294,7 @@ public class Dispatcher extends javax.servlet.http.HttpServlet { Status s = createStatus(request, response); byte[][] pathSegments = getPathSegments(request); Map queryMap = request.getParameterMap(); - + if(pathSegments.length == 0) { throw new HBaseRestException("method not supported"); } else if (pathSegments.length == 1 && pathSegments[0].length > 0) { @@ -332,17 +332,17 @@ public class Dispatcher extends javax.servlet.http.HttpServlet { /** * This method will get the path segments from the HttpServletRequest. Please - * note that if the first segment of the path is /api this is removed from the + * note that if the first segment of the path is /api this is removed from the * returning byte array. - * + * * @param request - * + * * @return request pathinfo split on the '/' ignoring the first '/' so first * element in pathSegment is not the empty string. */ protected byte[][] getPathSegments(final HttpServletRequest request) { int context_len = request.getContextPath().length() + 1; - + byte[][] pathSegments = Bytes.toByteArrays(request.getRequestURI().substring(context_len) .split("/")); byte[] apiAsBytes = "api".getBytes(); @@ -377,7 +377,7 @@ public class Dispatcher extends javax.servlet.http.HttpServlet { } char [] c;// 40 characters * sizeof(UTF16) while (true) { - c = new char[bufferLength]; + c = new char[bufferLength]; int n = r.read(c, 0, bufferLength); if (n == -1) break; resultant += new String(c, 0, n); @@ -431,7 +431,7 @@ public class Dispatcher extends javax.servlet.http.HttpServlet { /* * Start up the REST servlet in standalone mode. - * + * * @param args */ protected static void doMain(final String[] args) throws Exception { diff --git a/src/java/org/apache/hadoop/hbase/rest/RowController.java b/src/java/org/apache/hadoop/hbase/rest/RowController.java index 668319d..a01dcbf 100644 --- a/src/java/org/apache/hadoop/hbase/rest/RowController.java +++ b/src/java/org/apache/hadoop/hbase/rest/RowController.java @@ -61,7 +61,7 @@ public class RowController extends AbstractController { tableName = pathSegments[0]; rowName = pathSegments[2]; RowResult row = null; - + if (queryMap.size() == 0 && pathSegments.length <= 3) { row = innerModel.get(tableName, rowName); } else if (pathSegments.length == 4 diff --git a/src/java/org/apache/hadoop/hbase/rest/RowModel.java b/src/java/org/apache/hadoop/hbase/rest/RowModel.java index f5543af..a0c5b0a 100644 --- a/src/java/org/apache/hadoop/hbase/rest/RowModel.java +++ b/src/java/org/apache/hadoop/hbase/rest/RowModel.java @@ -61,7 +61,7 @@ public class RowModel extends AbstractModel { throw new HBaseRestException(e); } } - + @Deprecated public RowResult get(byte[] tableName, byte[] rowName, byte[][] columns) throws HBaseRestException { @@ -84,7 +84,7 @@ public class RowModel extends AbstractModel { get.setTimeStamp(timestamp); return get(tableName, get).getRowResult(); } - + @Deprecated public RowResult get(byte[] tableName, byte[] rowName, long timestamp) throws HBaseRestException { @@ -128,7 +128,7 @@ public class RowModel extends AbstractModel { throw new HBaseRestException(e); } } - + @Deprecated public void delete(byte[] tableName, byte[] rowName) throws HBaseRestException { @@ -146,7 +146,7 @@ public class RowModel extends AbstractModel { } delete(tableName, delete); } - + public void delete(byte[] tableName, Delete delete) throws HBaseRestException { try { diff --git a/src/java/org/apache/hadoop/hbase/rest/ScannerController.java b/src/java/org/apache/hadoop/hbase/rest/ScannerController.java index d8f17fc..1dbff59 100644 --- a/src/java/org/apache/hadoop/hbase/rest/ScannerController.java +++ b/src/java/org/apache/hadoop/hbase/rest/ScannerController.java @@ -38,13 +38,13 @@ import org.apache.hadoop.hbase.rest.parser.IHBaseRestParser; import org.apache.hadoop.hbase.util.Bytes; /** - * + * */ public class ScannerController extends AbstractController { /* * (non-Javadoc) - * + * * @see * org.apache.hadoop.hbase.rest.AbstractController#delete(org.apache.hadoop * .hbase.rest.Status, byte[][], java.util.Map) @@ -79,7 +79,7 @@ public class ScannerController extends AbstractController { /* * (non-Javadoc) - * + * * @see * org.apache.hadoop.hbase.rest.AbstractController#generateModel(org.apache * .hadoop.hbase.HBaseConfiguration, @@ -96,7 +96,7 @@ public class ScannerController extends AbstractController { /* * (non-Javadoc) - * + * * @see * org.apache.hadoop.hbase.rest.AbstractController#get(org.apache.hadoop.hbase * .rest.Status, byte[][], java.util.Map) @@ -112,7 +112,7 @@ public class ScannerController extends AbstractController { /* * (non-Javadoc) - * + * * @see * org.apache.hadoop.hbase.rest.AbstractController#post(org.apache.hadoop. * hbase.rest.Status, byte[][], java.util.Map, byte[], @@ -163,7 +163,7 @@ public class ScannerController extends AbstractController { /* * (non-Javadoc) - * + * * @see * org.apache.hadoop.hbase.rest.AbstractController#put(org.apache.hadoop.hbase * .rest.Status, byte[][], java.util.Map, byte[], @@ -343,7 +343,7 @@ public class ScannerController extends AbstractController { * Given a list of filters in JSON string form, returns a RowSetFilter that * returns true if all input filters return true on a Row (aka an AND * statement). - * + * * @param filters * array of input filters in a JSON String * @return RowSetFilter with all input filters in an AND Statement diff --git a/src/java/org/apache/hadoop/hbase/rest/ScannerModel.java b/src/java/org/apache/hadoop/hbase/rest/ScannerModel.java index bc24b08..fc1c9e7 100644 --- a/src/java/org/apache/hadoop/hbase/rest/ScannerModel.java +++ b/src/java/org/apache/hadoop/hbase/rest/ScannerModel.java @@ -37,14 +37,14 @@ import org.apache.hadoop.hbase.rest.descriptors.ScannerIdentifier; import org.apache.hadoop.hbase.rest.exception.HBaseRestException; /** - * + * */ public class ScannerModel extends AbstractModel { public ScannerModel(HBaseConfiguration config, HBaseAdmin admin) { super.initialize(config, admin); } - + // // Normal Scanner // @@ -83,7 +83,7 @@ public class ScannerModel extends AbstractModel { * returns the next numResults Results from the Scaner mapped to Integer * id. If the end of the table is reached, the scanner is closed and all * succesfully retrieved rows are returned. - * + * * @param id * id target scanner is mapped to. * @param numRows @@ -124,7 +124,7 @@ public class ScannerModel extends AbstractModel { /** * Returns all rows inbetween the scanners current position and the end of the * table. - * + * * @param id * id of scanner to use * @return all rows till end of table @@ -288,5 +288,5 @@ public class ScannerModel extends AbstractModel { return scannerOpen(tableName, columns, startRow, HConstants.LATEST_TIMESTAMP, filter); } - + } diff --git a/src/java/org/apache/hadoop/hbase/rest/Status.java b/src/java/org/apache/hadoop/hbase/rest/Status.java index 15ea8ce..f7e2112 100644 --- a/src/java/org/apache/hadoop/hbase/rest/Status.java +++ b/src/java/org/apache/hadoop/hbase/rest/Status.java @@ -100,7 +100,7 @@ public class Status { protected HttpServletResponse response; protected Object message; protected IRestSerializer serializer; - protected byte[][] pathSegments; + protected byte[][] pathSegments; public int getStatusCode() { return statusCode; @@ -135,7 +135,7 @@ public class Status { /* * (non-Javadoc) - * + * * @see * org.apache.hadoop.hbase.rest.xml.IOutputXML#toXML(org.apache.hadoop.hbase * .rest.serializer.IRestSerializer) @@ -163,7 +163,7 @@ public class Status { this.statusCode = HttpServletResponse.SC_OK; this.message = message; } - + public void setAccepted() { this.statusCode = HttpServletResponse.SC_ACCEPTED; this.message = new StatusMessage(HttpServletResponse.SC_ACCEPTED, false, "success"); @@ -177,7 +177,7 @@ public class Status { public void setCreated() { this.statusCode = HttpServletResponse.SC_CREATED; this.setOK(); - } + } public void setScannerCreated(ScannerIdentifier scannerIdentifier) { this.statusCode = HttpServletResponse.SC_OK; @@ -229,17 +229,17 @@ public class Status { this.statusCode = HttpServletResponse.SC_BAD_REQUEST; this.message = new StatusMessage(statusCode, true, message); } - + public void setUnsupportedMediaType(Object message) { this.statusCode = HttpServletResponse.SC_UNSUPPORTED_MEDIA_TYPE; this.message = new StatusMessage(statusCode, true, message); } - + public void setGone() { this.statusCode = HttpServletResponse.SC_GONE; this.message = new StatusMessage(statusCode, true, "item no longer available"); } - + // Utility public void respond() throws HBaseRestException { diff --git a/src/java/org/apache/hadoop/hbase/rest/TableController.java b/src/java/org/apache/hadoop/hbase/rest/TableController.java index a022041..77b7fb8 100644 --- a/src/java/org/apache/hadoop/hbase/rest/TableController.java +++ b/src/java/org/apache/hadoop/hbase/rest/TableController.java @@ -69,7 +69,7 @@ public class TableController extends AbstractController { /* * (non-Javadoc) - * + * * @param input column descriptor JSON. Should be of the form:
              * {"column_families":[ { "name":STRING, "bloomfilter":BOOLEAN,
              * "max_versions":INTEGER, "compression_type":STRING, "in_memory":BOOLEAN,
          @@ -79,7 +79,7 @@ public class TableController extends AbstractController {
              * default values are: 
           bloomfilter => false max_versions => 3
              * compression_type => NONE in_memory => false block_cache_enabled => false
              * max_value_length => 2147483647 time_to_live => Integer.MAX_VALUE 
          - * + * * @see * org.apache.hadoop.hbase.rest.AbstractController#post(org.apache.hadoop. * hbase.rest.Status, byte[][], java.util.Map, byte[], diff --git a/src/java/org/apache/hadoop/hbase/rest/TableModel.java b/src/java/org/apache/hadoop/hbase/rest/TableModel.java index fa19a58..c7680a3 100644 --- a/src/java/org/apache/hadoop/hbase/rest/TableModel.java +++ b/src/java/org/apache/hadoop/hbase/rest/TableModel.java @@ -54,7 +54,7 @@ public class TableModel extends AbstractModel { /** * Returns all cells from all rows from the given table in the given columns. * The output is in the order that the columns are given. - * + * * @param tableName * table name * @param columnNames @@ -90,7 +90,7 @@ public class TableModel extends AbstractModel { throw new HBaseRestException(e); } } - + protected void disableTable(byte [] tableName) throws HBaseRestException { try { this.admin.disableTable(tableName); @@ -98,7 +98,7 @@ public class TableModel extends AbstractModel { throw new HBaseRestException("IOException disabling table", e); } } - + protected void enableTable(byte [] tableName) throws HBaseRestException { try { this.admin.enableTable(tableName); @@ -173,9 +173,9 @@ public class TableModel extends AbstractModel { /** * Return region offsets. - * @param tableName + * @param tableName * @return Regions - * @throws HBaseRestException + * @throws HBaseRestException */ public Regions getTableRegions(final String tableName) throws HBaseRestException { @@ -195,12 +195,12 @@ public class TableModel extends AbstractModel { // Post Methods /** * Creates table tableName described by the json in input. - * + * * @param tableName * table name * @param htd * HBaseTableDescriptor for the table to be created - * + * * @return true if operation does not fail due to a table with the given * tableName not existing. * @throws org.apache.hadoop.hbase.rest.exception.HBaseRestException @@ -220,7 +220,7 @@ public class TableModel extends AbstractModel { /** * Deletes table tableName - * + * * @param tableName * name of the table. * @return true if table exists and deleted, false if table does not exist. @@ -268,7 +268,7 @@ public class TableModel extends AbstractModel { /* * (non-Javadoc) - * + * * @see org.apache.hadoop.hbase.rest.xml.IOutputXML#toXML() */ public void restSerialize(IRestSerializer serializer) diff --git a/src/java/org/apache/hadoop/hbase/rest/TimestampModel.java b/src/java/org/apache/hadoop/hbase/rest/TimestampModel.java index b4bc2e7..be0d0a8 100644 --- a/src/java/org/apache/hadoop/hbase/rest/TimestampModel.java +++ b/src/java/org/apache/hadoop/hbase/rest/TimestampModel.java @@ -56,14 +56,14 @@ public class TimestampModel extends AbstractModel { throw new HBaseRestException(e); } } - + @Deprecated public void delete(byte[] tableName, byte[] rowName, long timestamp) throws HBaseRestException { Delete delete = new Delete(rowName, timestamp, null); delete(tableName, delete); } - + @Deprecated public void delete(byte[] tableName, byte[] rowName, byte[][] columns, long timestamp) throws HBaseRestException { @@ -84,12 +84,12 @@ public class TimestampModel extends AbstractModel { throw new HBaseRestException(e); } } - + @Deprecated public Cell get(byte[] tableName, byte[] rowName, byte[] columnName, long timestamp) throws HBaseRestException { Get get = new Get(rowName); - byte [][] famAndQf = KeyValue.parseColumn(columnName); + byte [][] famAndQf = KeyValue.parseColumn(columnName); get.addColumn(famAndQf[0], famAndQf[1]); get.setTimeStamp(timestamp); return get(tableName, get).getCellValue(famAndQf[0], famAndQf[1]); @@ -99,7 +99,7 @@ public class TimestampModel extends AbstractModel { public Cell[] get(byte[] tableName, byte[] rowName, byte[] columnName, long timestamp, int numVersions) throws IOException, HBaseRestException { Get get = new Get(rowName); - byte [][] famAndQf = KeyValue.parseColumn(columnName); + byte [][] famAndQf = KeyValue.parseColumn(columnName); get.addColumn(famAndQf[0], famAndQf[1]); get.setTimeStamp(timestamp); get.setMaxVersions(numVersions); diff --git a/src/java/org/apache/hadoop/hbase/rest/descriptors/RestCell.java b/src/java/org/apache/hadoop/hbase/rest/descriptors/RestCell.java index 76e526d..def1222 100644 --- a/src/java/org/apache/hadoop/hbase/rest/descriptors/RestCell.java +++ b/src/java/org/apache/hadoop/hbase/rest/descriptors/RestCell.java @@ -22,24 +22,24 @@ package org.apache.hadoop.hbase.rest.descriptors; import org.apache.hadoop.hbase.io.Cell; /** - * + * */ public class RestCell extends Cell { byte[] name; - - + + /** - * + * */ public RestCell() { super(); // TODO Auto-generated constructor stub } - + /** - * @param name + * @param name * @param cell */ public RestCell(byte[] name, Cell cell) { @@ -96,6 +96,6 @@ public class RestCell extends Cell { public void setName(byte[] name) { this.name = name; } - - + + } diff --git a/src/java/org/apache/hadoop/hbase/rest/descriptors/RowUpdateDescriptor.java b/src/java/org/apache/hadoop/hbase/rest/descriptors/RowUpdateDescriptor.java index 4401055..eeb2e3f 100644 --- a/src/java/org/apache/hadoop/hbase/rest/descriptors/RowUpdateDescriptor.java +++ b/src/java/org/apache/hadoop/hbase/rest/descriptors/RowUpdateDescriptor.java @@ -29,12 +29,12 @@ public class RowUpdateDescriptor { private String tableName; private String rowName; private Map colvals = new HashMap(); - + public RowUpdateDescriptor(String tableName, String rowName) { this.tableName = tableName; this.rowName = rowName; } - + public RowUpdateDescriptor() {} /** @@ -70,5 +70,5 @@ public class RowUpdateDescriptor { */ public Map getColVals() { return colvals; - } + } } diff --git a/src/java/org/apache/hadoop/hbase/rest/descriptors/ScannerDescriptor.java b/src/java/org/apache/hadoop/hbase/rest/descriptors/ScannerDescriptor.java index 2cddabe..8f9c184 100644 --- a/src/java/org/apache/hadoop/hbase/rest/descriptors/ScannerDescriptor.java +++ b/src/java/org/apache/hadoop/hbase/rest/descriptors/ScannerDescriptor.java @@ -20,7 +20,7 @@ package org.apache.hadoop.hbase.rest.descriptors; /** - * + * */ public class ScannerDescriptor { byte[][] columns; @@ -44,7 +44,7 @@ public class ScannerDescriptor { this.startRow = startRow; this.stopRow = stopRow; this.filters = filters; - + if(this.startRow == null) { this.startRow = new byte[0]; } diff --git a/src/java/org/apache/hadoop/hbase/rest/descriptors/ScannerIdentifier.java b/src/java/org/apache/hadoop/hbase/rest/descriptors/ScannerIdentifier.java index 1f10dc3..cc54582 100644 --- a/src/java/org/apache/hadoop/hbase/rest/descriptors/ScannerIdentifier.java +++ b/src/java/org/apache/hadoop/hbase/rest/descriptors/ScannerIdentifier.java @@ -24,7 +24,7 @@ import org.apache.hadoop.hbase.rest.serializer.IRestSerializer; import org.apache.hadoop.hbase.rest.serializer.ISerializable; /** - * + * */ public class ScannerIdentifier implements ISerializable { Integer id; @@ -80,7 +80,7 @@ public class ScannerIdentifier implements ISerializable { /* * (non-Javadoc) - * + * * @see * org.apache.hadoop.hbase.rest.xml.IOutputXML#toXML(org.apache.hadoop.hbase * .rest.serializer.IRestSerializer) diff --git a/src/java/org/apache/hadoop/hbase/rest/descriptors/TimestampsDescriptor.java b/src/java/org/apache/hadoop/hbase/rest/descriptors/TimestampsDescriptor.java index 9125c80..2b0692a 100644 --- a/src/java/org/apache/hadoop/hbase/rest/descriptors/TimestampsDescriptor.java +++ b/src/java/org/apache/hadoop/hbase/rest/descriptors/TimestampsDescriptor.java @@ -28,7 +28,7 @@ import org.apache.hadoop.hbase.rest.serializer.ISerializable; import org.apache.hadoop.hbase.util.Bytes; /** - * + * */ public class TimestampsDescriptor implements ISerializable { Map timestamps = new HashMap(); @@ -54,7 +54,7 @@ public class TimestampsDescriptor implements ISerializable { /* * (non-Javadoc) - * + * * @see * org.apache.hadoop.hbase.rest.serializer.ISerializable#restSerialize(org * .apache.hadoop.hbase.rest.serializer.IRestSerializer) diff --git a/src/java/org/apache/hadoop/hbase/rest/exception/HBaseRestException.java b/src/java/org/apache/hadoop/hbase/rest/exception/HBaseRestException.java index 1db0e76..e554467 100644 --- a/src/java/org/apache/hadoop/hbase/rest/exception/HBaseRestException.java +++ b/src/java/org/apache/hadoop/hbase/rest/exception/HBaseRestException.java @@ -22,7 +22,7 @@ package org.apache.hadoop.hbase.rest.exception; public class HBaseRestException extends Exception { /** - * + * */ private static final long serialVersionUID = 8481585437124298646L; private Exception innerException; diff --git a/src/java/org/apache/hadoop/hbase/rest/filter/ColumnValueFilterFactory.java b/src/java/org/apache/hadoop/hbase/rest/filter/ColumnValueFilterFactory.java index 47e6940..9106b33 100644 --- a/src/java/org/apache/hadoop/hbase/rest/filter/ColumnValueFilterFactory.java +++ b/src/java/org/apache/hadoop/hbase/rest/filter/ColumnValueFilterFactory.java @@ -26,10 +26,10 @@ import org.apache.hadoop.hbase.rest.exception.HBaseRestException; /** * FilterFactory that constructs a ColumnValueFilter from a JSON arg String. * Expects a Stringified JSON argument with the following form: - * + * * { "column_name" : "MY_COLUMN_NAME", "compare_op" : "INSERT_COMPARE_OP_HERE", * "value" : "MY_COMPARE_VALUE" } - * + * * The current valid compare ops are: equal, greater, greater_or_equal, less, * less_or_equal, not_equal */ diff --git a/src/java/org/apache/hadoop/hbase/rest/filter/FilterFactory.java b/src/java/org/apache/hadoop/hbase/rest/filter/FilterFactory.java index 00803c1..5d1c80b 100644 --- a/src/java/org/apache/hadoop/hbase/rest/filter/FilterFactory.java +++ b/src/java/org/apache/hadoop/hbase/rest/filter/FilterFactory.java @@ -29,12 +29,12 @@ import org.apache.hadoop.hbase.rest.exception.HBaseRestException; * "type" : "FILTER_CLASS_NAME", * "args" : "FILTER_ARGUMENTS" * } - * + * * For Filters like WhileMatchRowFilter, * nested Filters are supported. Just serialize a different * filter in the form (for instance if you wanted to use WhileMatchRowFilter * with a StopRowFilter: - * + * * { * "type" : "WhileMatchRowFilter", * "args" : { @@ -42,12 +42,12 @@ import org.apache.hadoop.hbase.rest.exception.HBaseRestException; * "args" : "ROW_KEY_TO_STOP_ON" * } * } - * + * * For filters like RowSetFilter, nested Filters AND Filter arrays * are supported. So for instance If one wanted to do a RegExp * RowFilter UNIONed with a WhileMatchRowFilter(StopRowFilter), * you would look like this: - * + * * { * "type" : "RowFilterSet", * "args" : [ diff --git a/src/java/org/apache/hadoop/hbase/rest/filter/InclusiveStopRowFilterFactory.java b/src/java/org/apache/hadoop/hbase/rest/filter/InclusiveStopRowFilterFactory.java index 6539213..3e6bcda 100644 --- a/src/java/org/apache/hadoop/hbase/rest/filter/InclusiveStopRowFilterFactory.java +++ b/src/java/org/apache/hadoop/hbase/rest/filter/InclusiveStopRowFilterFactory.java @@ -26,7 +26,7 @@ import org.apache.hadoop.hbase.util.Bytes; /** * FilterFactory that construct a InclusiveStopRowFilter * from a JSON argument String. - * + * * It expects that the whole input string consists of only * the rowKey that you wish to stop on. */ diff --git a/src/java/org/apache/hadoop/hbase/rest/filter/RowFilterSetFactory.java b/src/java/org/apache/hadoop/hbase/rest/filter/RowFilterSetFactory.java index ab3721c..b80054e 100644 --- a/src/java/org/apache/hadoop/hbase/rest/filter/RowFilterSetFactory.java +++ b/src/java/org/apache/hadoop/hbase/rest/filter/RowFilterSetFactory.java @@ -28,14 +28,14 @@ import org.apache.hadoop.hbase.rest.exception.HBaseRestException; /** * Constructs a RowFilterSet from a JSON argument String. - * + * * Assumes that the input is a JSONArray consisting of JSON Object version of * the filters that you wish to mash together in an AND statement. - * + * * The Syntax for the individual inner filters are defined by their respective * FilterFactory. If a filter factory for said Factory does not exist, a * MalformedFilterJSONException will be thrown. - * + * * Currently OR Statements are not supported even though at a later iteration * they could be supported easily. */ diff --git a/src/java/org/apache/hadoop/hbase/rest/filter/StopRowFilterFactory.java b/src/java/org/apache/hadoop/hbase/rest/filter/StopRowFilterFactory.java index 28caaf6..f42ea1c 100644 --- a/src/java/org/apache/hadoop/hbase/rest/filter/StopRowFilterFactory.java +++ b/src/java/org/apache/hadoop/hbase/rest/filter/StopRowFilterFactory.java @@ -26,7 +26,7 @@ import org.apache.hadoop.hbase.util.Bytes; /** * FilterFactory that construct a StopRowFilter * from an Argument String. - * + * * It expects that the whole input string consists of only * the rowKey that you wish to stop on. */ diff --git a/src/java/org/apache/hadoop/hbase/rest/filter/WhileMatchRowFilterFactory.java b/src/java/org/apache/hadoop/hbase/rest/filter/WhileMatchRowFilterFactory.java index dab605b..8c49732 100644 --- a/src/java/org/apache/hadoop/hbase/rest/filter/WhileMatchRowFilterFactory.java +++ b/src/java/org/apache/hadoop/hbase/rest/filter/WhileMatchRowFilterFactory.java @@ -26,7 +26,7 @@ import org.apache.hadoop.hbase.rest.exception.HBaseRestException; /** * Factory to produce WhileMatchRowFilters from JSON - * Expects as an arguement a valid JSON Object in + * Expects as an arguement a valid JSON Object in * String form of another RowFilterInterface. */ public class WhileMatchRowFilterFactory implements FilterFactory { diff --git a/src/java/org/apache/hadoop/hbase/rest/parser/HBaseRestParserFactory.java b/src/java/org/apache/hadoop/hbase/rest/parser/HBaseRestParserFactory.java index f28c862..7eebecb 100644 --- a/src/java/org/apache/hadoop/hbase/rest/parser/HBaseRestParserFactory.java +++ b/src/java/org/apache/hadoop/hbase/rest/parser/HBaseRestParserFactory.java @@ -26,7 +26,7 @@ import org.apache.hadoop.hbase.rest.Dispatcher.ContentType; public class HBaseRestParserFactory { - private static final Map> parserMap = + private static final Map> parserMap = new HashMap>(); static { diff --git a/src/java/org/apache/hadoop/hbase/rest/parser/IHBaseRestParser.java b/src/java/org/apache/hadoop/hbase/rest/parser/IHBaseRestParser.java index b87313c..b644842 100644 --- a/src/java/org/apache/hadoop/hbase/rest/parser/IHBaseRestParser.java +++ b/src/java/org/apache/hadoop/hbase/rest/parser/IHBaseRestParser.java @@ -28,12 +28,12 @@ import org.apache.hadoop.hbase.rest.descriptors.ScannerDescriptor; import org.apache.hadoop.hbase.rest.exception.HBaseRestException; /** - * + * */ public interface IHBaseRestParser { /** * Parses a HTableDescriptor given the input array. - * + * * @param input * @return HTableDescriptor * @throws HBaseRestException diff --git a/src/java/org/apache/hadoop/hbase/rest/parser/XMLRestParser.java b/src/java/org/apache/hadoop/hbase/rest/parser/XMLRestParser.java index 7d6cf5d..d51380d 100644 --- a/src/java/org/apache/hadoop/hbase/rest/parser/XMLRestParser.java +++ b/src/java/org/apache/hadoop/hbase/rest/parser/XMLRestParser.java @@ -38,13 +38,13 @@ import org.w3c.dom.Node; import org.w3c.dom.NodeList; /** - * + * */ public class XMLRestParser implements IHBaseRestParser { /* * (non-Javadoc) - * + * * @see * org.apache.hadoop.hbase.rest.parser.IHBaseRestParser#getTableDescriptor * (byte[]) @@ -178,7 +178,7 @@ public class XMLRestParser implements IHBaseRestParser { /* * (non-Javadoc) - * + * * @see * org.apache.hadoop.hbase.rest.parser.IHBaseRestParser#getColumnDescriptors * (byte[]) @@ -212,7 +212,7 @@ public class XMLRestParser implements IHBaseRestParser { /* * (non-Javadoc) - * + * * @see * org.apache.hadoop.hbase.rest.parser.IHBaseRestParser#getScannerDescriptor * (byte[]) @@ -225,7 +225,7 @@ public class XMLRestParser implements IHBaseRestParser { /* * (non-Javadoc) - * + * * @see * org.apache.hadoop.hbase.rest.parser.IHBaseRestParser#getRowUpdateDescriptor * (byte[], byte[][]) diff --git a/src/java/org/apache/hadoop/hbase/rest/serializer/AbstractRestSerializer.java b/src/java/org/apache/hadoop/hbase/rest/serializer/AbstractRestSerializer.java index bcbe1c7..d37f6be 100644 --- a/src/java/org/apache/hadoop/hbase/rest/serializer/AbstractRestSerializer.java +++ b/src/java/org/apache/hadoop/hbase/rest/serializer/AbstractRestSerializer.java @@ -22,7 +22,7 @@ package org.apache.hadoop.hbase.rest.serializer; import javax.servlet.http.HttpServletResponse; /** - * + * * Abstract object that is used as the base of all serializers in the * REST based interface. */ @@ -45,9 +45,9 @@ public abstract class AbstractRestSerializer implements IRestSerializer { /** * Public constructor for AbstractRestSerializer. This is the constructor that * should be called whenever creating a RestSerializer object. - * + * * @param response - * @param prettyPrint + * @param prettyPrint */ public AbstractRestSerializer(HttpServletResponse response, boolean prettyPrint) { diff --git a/src/java/org/apache/hadoop/hbase/rest/serializer/IRestSerializer.java b/src/java/org/apache/hadoop/hbase/rest/serializer/IRestSerializer.java index e91db35..c6f67ac 100644 --- a/src/java/org/apache/hadoop/hbase/rest/serializer/IRestSerializer.java +++ b/src/java/org/apache/hadoop/hbase/rest/serializer/IRestSerializer.java @@ -31,7 +31,7 @@ import org.apache.hadoop.hbase.rest.descriptors.TimestampsDescriptor; import org.apache.hadoop.hbase.rest.exception.HBaseRestException; /** - * + * * Interface that is implemented to return serialized objects back to * the output stream. */ @@ -39,10 +39,10 @@ public interface IRestSerializer { /** * Serializes an object into the appropriate format and writes it to the * output stream. - * + * * This is the main point of entry when for an object to be serialized to the * output stream. - * + * * @param o * @throws HBaseRestException */ @@ -50,10 +50,10 @@ public interface IRestSerializer { /** * serialize the database metadata - * + * * Implementation of this method is optional, IF all the work is done in the * writeOutput(Object o) method - * + * * @param databaseMetadata * @throws HBaseRestException */ @@ -62,10 +62,10 @@ public interface IRestSerializer { /** * serialize the HTableDescriptor object - * + * * Implementation of this method is optional, IF all the work is done in the * writeOutput(Object o) method - * + * * @param tableDescriptor * @throws HBaseRestException */ @@ -74,10 +74,10 @@ public interface IRestSerializer { /** * serialize an HColumnDescriptor to the output stream. - * + * * Implementation of this method is optional, IF all the work is done in the * writeOutput(Object o) method - * + * * @param column * @throws HBaseRestException */ @@ -86,10 +86,10 @@ public interface IRestSerializer { /** * serialize the region data for a table to the output stream - * + * * Implementation of this method is optional, IF all the work is done in the * writeOutput(Object o) method - * + * * @param regions * @throws HBaseRestException */ @@ -97,10 +97,10 @@ public interface IRestSerializer { /** * serialize the status message object to the output stream - * + * * Implementation of this method is optional, IF all the work is done in the * writeOutput(Object o) method - * + * * @param message * @throws HBaseRestException */ @@ -109,10 +109,10 @@ public interface IRestSerializer { /** * serialize the ScannerIdentifier object to the output stream - * + * * Implementation of this method is optional, IF all the work is done in the * writeOutput(Object o) method - * + * * @param scannerIdentifier * @throws HBaseRestException */ @@ -121,10 +121,10 @@ public interface IRestSerializer { /** * serialize a RowResult object to the output stream - * + * * Implementation of this method is optional, IF all the work is done in the * writeOutput(Object o) method - * + * * @param rowResult * @throws HBaseRestException */ @@ -132,10 +132,10 @@ public interface IRestSerializer { /** * serialize a RowResult array to the output stream - * + * * Implementation of this method is optional, IF all the work is done in the * writeOutput(Object o) method - * + * * @param rows * @throws HBaseRestException */ @@ -144,28 +144,28 @@ public interface IRestSerializer { /** * serialize a cell object to the output stream - * + * * Implementation of this method is optional, IF all the work is done in the * writeOutput(Object o) method - * + * * @param cell * @throws HBaseRestException */ public void serializeCell(Cell cell) throws HBaseRestException; - + /** * serialize a Cell array to the output stream - * + * * @param cells * @throws HBaseRestException */ public void serializeCellArray(Cell[] cells) throws HBaseRestException; - - + + /** - * serialize a description of the timestamps available for a row + * serialize a description of the timestamps available for a row * to the output stream. - * + * * @param timestampsDescriptor * @throws HBaseRestException */ diff --git a/src/java/org/apache/hadoop/hbase/rest/serializer/ISerializable.java b/src/java/org/apache/hadoop/hbase/rest/serializer/ISerializable.java index d482854..9aae113 100644 --- a/src/java/org/apache/hadoop/hbase/rest/serializer/ISerializable.java +++ b/src/java/org/apache/hadoop/hbase/rest/serializer/ISerializable.java @@ -22,7 +22,7 @@ package org.apache.hadoop.hbase.rest.serializer; import org.apache.hadoop.hbase.rest.exception.HBaseRestException; /** - * + * * Interface for objects that wish to write back to the REST based * interface output stream. Objects should implement this interface, * then use the IRestSerializer passed to it to call the appropriate @@ -33,7 +33,7 @@ public interface ISerializable { * visitor pattern method where the object implementing this interface will * call back on the IRestSerializer with the correct method to run to * serialize the output of the object to the stream. - * + * * @param serializer * @throws HBaseRestException */ diff --git a/src/java/org/apache/hadoop/hbase/rest/serializer/RestSerializerFactory.java b/src/java/org/apache/hadoop/hbase/rest/serializer/RestSerializerFactory.java index 5aeaf27..a470c3f 100644 --- a/src/java/org/apache/hadoop/hbase/rest/serializer/RestSerializerFactory.java +++ b/src/java/org/apache/hadoop/hbase/rest/serializer/RestSerializerFactory.java @@ -26,10 +26,10 @@ import org.apache.hadoop.hbase.rest.Dispatcher.ContentType; import org.apache.hadoop.hbase.rest.exception.HBaseRestException; /** - * + * * Factory used to return a Rest Serializer tailored to the HTTP * Requesters accept type in the header. - * + * */ public class RestSerializerFactory { diff --git a/src/java/org/apache/hadoop/hbase/rest/serializer/SimpleXMLSerializer.java b/src/java/org/apache/hadoop/hbase/rest/serializer/SimpleXMLSerializer.java index 2c6cd86..d81686b 100644 --- a/src/java/org/apache/hadoop/hbase/rest/serializer/SimpleXMLSerializer.java +++ b/src/java/org/apache/hadoop/hbase/rest/serializer/SimpleXMLSerializer.java @@ -39,10 +39,10 @@ import org.apache.hadoop.hbase.rest.exception.HBaseRestException; import org.apache.hadoop.hbase.util.Bytes; /** - * + * * Basic first pass at implementing an XML serializer for the REST interface. * This should probably be refactored into something better. - * + * */ public class SimpleXMLSerializer extends AbstractRestSerializer { @@ -72,7 +72,7 @@ public class SimpleXMLSerializer extends AbstractRestSerializer { /* * (non-Javadoc) - * + * * @see * org.apache.hadoop.hbase.rest.serializer.IRestSerializer#writeOutput(java * .lang.Object, java.io.OutputStream) @@ -98,7 +98,7 @@ public class SimpleXMLSerializer extends AbstractRestSerializer { /* * (non-Javadoc) - * + * * @seeorg.apache.hadoop.hbase.rest.serializer.IRestSerializer# * serializeDatabaseMetadata * (org.apache.hadoop.hbase.rest.DatabaseModel.DatabaseMetadata) @@ -115,7 +115,7 @@ public class SimpleXMLSerializer extends AbstractRestSerializer { /* * (non-Javadoc) - * + * * @seeorg.apache.hadoop.hbase.rest.serializer.IRestSerializer# * serializeTableDescriptor(org.apache.hadoop.hbase.HTableDescriptor) */ @@ -139,7 +139,7 @@ public class SimpleXMLSerializer extends AbstractRestSerializer { /* * (non-Javadoc) - * + * * @seeorg.apache.hadoop.hbase.rest.serializer.IRestSerializer# * serializeColumnDescriptor(org.apache.hadoop.hbase.HColumnDescriptor) */ @@ -169,7 +169,7 @@ public class SimpleXMLSerializer extends AbstractRestSerializer { /* * (non-Javadoc) - * + * * @see * org.apache.hadoop.hbase.rest.serializer.IRestSerializer#serializeRegionData * (org.apache.hadoop.hbase.rest.TableModel.Regions) @@ -188,7 +188,7 @@ public class SimpleXMLSerializer extends AbstractRestSerializer { /* * (non-Javadoc) - * + * * @see * org.apache.hadoop.hbase.rest.serializer.IRestSerializer#serializeStatusMessage * (org.apache.hadoop.hbase.rest.Status.StatusMessage) @@ -213,7 +213,7 @@ public class SimpleXMLSerializer extends AbstractRestSerializer { /* * (non-Javadoc) - * + * * @seeorg.apache.hadoop.hbase.rest.serializer.IRestSerializer# * serializeScannerIdentifier(org.apache.hadoop.hbase.rest.ScannerIdentifier) */ @@ -230,7 +230,7 @@ public class SimpleXMLSerializer extends AbstractRestSerializer { /* * (non-Javadoc) - * + * * @see * org.apache.hadoop.hbase.rest.serializer.IRestSerializer#serializeRowResult * (org.apache.hadoop.hbase.io.RowResult) @@ -266,7 +266,7 @@ public class SimpleXMLSerializer extends AbstractRestSerializer { /* * (non-Javadoc) - * + * * @see * org.apache.hadoop.hbase.rest.serializer.IRestSerializer#serializeRowResultArray * (org.apache.hadoop.hbase.io.RowResult[]) @@ -282,7 +282,7 @@ public class SimpleXMLSerializer extends AbstractRestSerializer { /* * (non-Javadoc) - * + * * @see * org.apache.hadoop.hbase.rest.serializer.IRestSerializer#serializeCell(org * .apache.hadoop.hbase.io.Cell) @@ -301,7 +301,7 @@ public class SimpleXMLSerializer extends AbstractRestSerializer { /* * (non-Javadoc) - * + * * @see * org.apache.hadoop.hbase.rest.serializer.IRestSerializer#serializeCellArray * (org.apache.hadoop.hbase.io.Cell[]) @@ -316,7 +316,7 @@ public class SimpleXMLSerializer extends AbstractRestSerializer { /* * (non-Javadoc) - * + * * @see * org.apache.hadoop.hbase.rest.serializer.IRestSerializer#serializeTimestamps * (org.apache.hadoop.hbase.rest.RowModel.TimestampsDescriptor) @@ -335,8 +335,8 @@ public class SimpleXMLSerializer extends AbstractRestSerializer { public void print(int output); public void print(long output); - - public void print(boolean output); + + public void print(boolean output); public void flush(); } @@ -371,7 +371,7 @@ public class SimpleXMLSerializer extends AbstractRestSerializer { /* * (non-Javadoc) - * + * * @see * org.apache.hadoop.hbase.rest.serializer.SimpleXMLSerializer.Printer#print * (java.io.PrintWriter, java.lang.String) @@ -382,7 +382,7 @@ public class SimpleXMLSerializer extends AbstractRestSerializer { /* * (non-Javadoc) - * + * * @see * org.apache.hadoop.hbase.rest.serializer.SimpleXMLSerializer.IPrinter# * print(int) @@ -393,7 +393,7 @@ public class SimpleXMLSerializer extends AbstractRestSerializer { /* * (non-Javadoc) - * + * * @see * org.apache.hadoop.hbase.rest.serializer.SimpleXMLSerializer.IPrinter# * print(long) @@ -406,7 +406,7 @@ public class SimpleXMLSerializer extends AbstractRestSerializer { * @see org.apache.hadoop.hbase.rest.serializer.SimpleXMLSerializer.IPrinter#print(boolean) */ public void print(boolean output) { - writer.print(output); + writer.print(output); } } @@ -418,7 +418,7 @@ public class SimpleXMLSerializer extends AbstractRestSerializer { /* * (non-Javadoc) - * + * * @see * org.apache.hadoop.hbase.rest.serializer.SimpleXMLSerializer.Printer#print * (java.io.PrintWriter, java.lang.String) @@ -429,7 +429,7 @@ public class SimpleXMLSerializer extends AbstractRestSerializer { /* * (non-Javadoc) - * + * * @see * org.apache.hadoop.hbase.rest.serializer.SimpleXMLSerializer.IPrinter# * print(int) @@ -441,7 +441,7 @@ public class SimpleXMLSerializer extends AbstractRestSerializer { /* * (non-Javadoc) - * + * * @see * org.apache.hadoop.hbase.rest.serializer.SimpleXMLSerializer.IPrinter# * print(long) diff --git a/src/java/org/apache/hadoop/hbase/thrift/ThriftServer.java b/src/java/org/apache/hadoop/hbase/thrift/ThriftServer.java index cc46b0e..2d29b3c 100644 --- a/src/java/org/apache/hadoop/hbase/thrift/ThriftServer.java +++ b/src/java/org/apache/hadoop/hbase/thrift/ThriftServer.java @@ -73,7 +73,7 @@ import org.apache.thrift.transport.TServerTransport; * Hbase API specified in the Hbase.thrift IDL file. */ public class ThriftServer { - + /** * The HBaseHandler is a glue object that connects Thrift RPC calls to the * HBase client API primarily defined in the HBaseAdmin and HTable objects. @@ -86,7 +86,7 @@ public class ThriftServer { // nextScannerId and scannerMap are used to manage scanner state protected int nextScannerId = 0; protected HashMap scannerMap = null; - + private static ThreadLocal> threadLocalTables = new ThreadLocal>() { @Override protected Map initialValue() { @@ -94,10 +94,10 @@ public class ThriftServer { } }; - + /** * Returns a list of all the column families for a given htable. - * + * * @param table * @return * @throws IOException @@ -110,10 +110,10 @@ public class ThriftServer { } return columns; } - + /** * Creates and returns an HTable instance from a given table name. - * + * * @param tableName * name of table * @return HTable object @@ -129,11 +129,11 @@ public class ThriftServer { } return tables.get(table); } - + /** * Assigns a unique ID to the scanner and adds the mapping to an internal * hash-map. - * + * * @param scanner * @return integer scanner id */ @@ -142,31 +142,31 @@ public class ThriftServer { scannerMap.put(id, scanner); return id; } - + /** * Returns the scanner associated with the specified ID. - * + * * @param id * @return a Scanner, or null if ID was invalid. */ protected synchronized ResultScanner getScanner(int id) { return scannerMap.get(id); } - + /** * Removes the scanner associated with the specified ID from the internal * id->scanner hash-map. - * + * * @param id * @return a Scanner, or null if ID was invalid. */ protected synchronized ResultScanner removeScanner(int id) { return scannerMap.remove(id); } - + /** * Constructs an HBaseHandler object. - * + * * @throws MasterNotRunningException */ HBaseHandler() throws MasterNotRunningException { @@ -174,7 +174,7 @@ public class ThriftServer { admin = new HBaseAdmin(conf); scannerMap = new HashMap(); } - + public void enableTable(final byte[] tableName) throws IOError { try{ admin.enableTable(tableName); @@ -182,7 +182,7 @@ public class ThriftServer { throw new IOError(e.getMessage()); } } - + public void disableTable(final byte[] tableName) throws IOError{ try{ admin.disableTable(tableName); @@ -190,7 +190,7 @@ public class ThriftServer { throw new IOError(e.getMessage()); } } - + public boolean isTableEnabled(final byte[] tableName) throws IOError { try { return HTable.isTableEnabled(tableName); @@ -198,7 +198,7 @@ public class ThriftServer { throw new IOError(e.getMessage()); } } - + public void compact(byte[] tableNameOrRegionName) throws IOError { try{ admin.compact(tableNameOrRegionName); @@ -212,9 +212,9 @@ public class ThriftServer { admin.majorCompact(tableNameOrRegionName); } catch (IOException e) { throw new IOError(e.getMessage()); - } + } } - + public List getTableNames() throws IOError { try { HTableDescriptor[] tables = this.admin.listTables(); @@ -227,7 +227,7 @@ public class ThriftServer { throw new IOError(e.getMessage()); } } - + public List getTableRegions(byte[] tableName) throws IOError { try{ @@ -249,7 +249,7 @@ public class ThriftServer { throw new IOError(e.getMessage()); } } - + @Deprecated public List get(byte[] tableName, byte[] row, byte[] column) throws IOError { @@ -274,7 +274,7 @@ public class ThriftServer { throw new IOError(e.getMessage()); } } - + @Deprecated public List getVer(byte[] tableName, byte[] row, byte[] column, int numVersions) throws IOError { @@ -282,7 +282,7 @@ public class ThriftServer { return getVer(tableName, row, famAndQf[0], famAndQf[1], numVersions); } - public List getVer(byte [] tableName, byte [] row, byte [] family, + public List getVer(byte [] tableName, byte [] row, byte [] family, byte [] qualifier, int numVersions) throws IOError { try { HTable table = getTable(tableName); @@ -301,12 +301,12 @@ public class ThriftServer { throw new IOError(e.getMessage()); } } - + @Deprecated public List getVerTs(byte[] tableName, byte[] row, byte[] column, long timestamp, int numVersions) throws IOError { byte [][] famAndQf = KeyValue.parseColumn(column); - return getVerTs(tableName, row, famAndQf[0], famAndQf[1], timestamp, + return getVerTs(tableName, row, famAndQf[0], famAndQf[1], timestamp, numVersions); } @@ -333,25 +333,25 @@ public class ThriftServer { throw new IOError(e.getMessage()); } } - + public List getRow(byte[] tableName, byte[] row) throws IOError { return getRowWithColumnsTs(tableName, row, null, HConstants.LATEST_TIMESTAMP); } - + public List getRowWithColumns(byte[] tableName, byte[] row, List columns) throws IOError { return getRowWithColumnsTs(tableName, row, columns, HConstants.LATEST_TIMESTAMP); } - + public List getRowTs(byte[] tableName, byte[] row, long timestamp) throws IOError { return getRowWithColumnsTs(tableName, row, null, timestamp); } - + public List getRowWithColumnsTs(byte[] tableName, byte[] row, List columns, long timestamp) throws IOError { try { @@ -379,12 +379,12 @@ public class ThriftServer { throw new IOError(e.getMessage()); } } - + public void deleteAll(byte[] tableName, byte[] row, byte[] column) throws IOError { deleteAllTs(tableName, row, column, HConstants.LATEST_TIMESTAMP); } - + public void deleteAllTs(byte[] tableName, byte[] row, byte[] column, long timestamp) throws IOError { try { @@ -397,16 +397,16 @@ public class ThriftServer { delete.deleteColumns(famAndQf[0], famAndQf[1], timestamp); } table.delete(delete); - + } catch (IOException e) { throw new IOError(e.getMessage()); } } - + public void deleteAllRow(byte[] tableName, byte[] row) throws IOError { deleteAllRowTs(tableName, row, HConstants.LATEST_TIMESTAMP); } - + public void deleteAllRowTs(byte[] tableName, byte[] row, long timestamp) throws IOError { try { @@ -417,7 +417,7 @@ public class ThriftServer { throw new IOError(e.getMessage()); } } - + public void createTable(byte[] tableName, List columnFamilies) throws IOError, IllegalArgument, AlreadyExists { @@ -437,7 +437,7 @@ public class ThriftServer { throw new IllegalArgument(e.getMessage()); } } - + public void deleteTable(byte[] tableName) throws IOError { if (LOG.isDebugEnabled()) { LOG.debug("deleteTable: table=" + new String(tableName)); @@ -451,13 +451,13 @@ public class ThriftServer { throw new IOError(e.getMessage()); } } - + public void mutateRow(byte[] tableName, byte[] row, List mutations) throws IOError, IllegalArgument { mutateRowTs(tableName, row, mutations, HConstants.LATEST_TIMESTAMP); } - - public void mutateRowTs(byte[] tableName, byte[] row, + + public void mutateRowTs(byte[] tableName, byte[] row, List mutations, long timestamp) throws IOError, IllegalArgument { HTable table = null; try { @@ -488,8 +488,8 @@ public class ThriftServer { throw new IllegalArgument(e.getMessage()); } } - - public void mutateRows(byte[] tableName, List rowBatches) + + public void mutateRows(byte[] tableName, List rowBatches) throws IOError, IllegalArgument, TException { mutateRowsTs(tableName, rowBatches, HConstants.LATEST_TIMESTAMP); } @@ -539,14 +539,14 @@ public class ThriftServer { } @Deprecated - public long atomicIncrement(byte[] tableName, byte[] row, byte[] column, + public long atomicIncrement(byte[] tableName, byte[] row, byte[] column, long amount) throws IOError, IllegalArgument, TException { byte [][] famAndQf = KeyValue.parseColumn(column); return atomicIncrement(tableName, row, famAndQf[0], famAndQf[1], amount); } public long atomicIncrement(byte [] tableName, byte [] row, byte [] family, - byte [] qualifier, long amount) + byte [] qualifier, long amount) throws IOError, IllegalArgument, TException { HTable table; try { @@ -556,7 +556,7 @@ public class ThriftServer { throw new IOError(e.getMessage()); } } - + public void scannerClose(int id) throws IOError, IllegalArgument { LOG.debug("scannerClose: id=" + id); ResultScanner scanner = getScanner(id); @@ -566,7 +566,7 @@ public class ThriftServer { scanner.close(); removeScanner(id); } - + public List scannerGetList(int id,int nbRows) throws IllegalArgument, IOError { LOG.debug("scannerGetList: id=" + id); ResultScanner scanner = getScanner(id); @@ -605,7 +605,7 @@ public class ThriftServer { throw new IOError(e.getMessage()); } } - + public int scannerOpenWithStop(byte[] tableName, byte[] startRow, byte[] stopRow, List columns) throws IOError, TException { try { @@ -659,7 +659,7 @@ public class ThriftServer { throw new IOError(e.getMessage()); } } - + public int scannerOpenWithStopTs(byte[] tableName, byte[] startRow, byte[] stopRow, List columns, long timestamp) throws IOError, TException { @@ -679,16 +679,16 @@ public class ThriftServer { throw new IOError(e.getMessage()); } } - + public Map getColumnDescriptors( byte[] tableName) throws IOError, TException { try { TreeMap columns = new TreeMap(Bytes.BYTES_COMPARATOR); - + HTable table = getTable(tableName); HTableDescriptor desc = table.getTableDescriptor(); - + for (HColumnDescriptor e : desc.getFamilies()) { ColumnDescriptor col = ThriftUtilities.colDescFromHbase(e); columns.put(col.name, col); @@ -697,17 +697,17 @@ public class ThriftServer { } catch (IOException e) { throw new IOError(e.getMessage()); } - } + } } - + // // Main program and support routines // - + private static void printUsageAndExit() { printUsageAndExit(null); } - + private static void printUsageAndExit(final String message) { if (message != null) { System.err.println(message); @@ -744,7 +744,7 @@ public class ThriftServer { // if (cmd.startsWith(addressArgKey)) { // bindAddress = cmd.substring(addressArgKey.length()); // continue; -// } else +// } else if (cmd.startsWith(portArgKey)) { port = Integer.parseInt(cmd.substring(portArgKey.length())); continue; @@ -757,7 +757,7 @@ public class ThriftServer { "bin/hbase-daemon.sh stop thrift or send a kill signal to " + "the thrift server pid"); } - + // Print out usage if we get to here. printUsageAndExit(); } @@ -772,10 +772,10 @@ public class ThriftServer { protFactory); server.serve(); } - + /** * @param args - * @throws Exception + * @throws Exception */ public static void main(String [] args) throws Exception { doMain(args); diff --git a/src/java/org/apache/hadoop/hbase/thrift/ThriftUtilities.java b/src/java/org/apache/hadoop/hbase/thrift/ThriftUtilities.java index 13cbd3a..647885b 100644 --- a/src/java/org/apache/hadoop/hbase/thrift/ThriftUtilities.java +++ b/src/java/org/apache/hadoop/hbase/thrift/ThriftUtilities.java @@ -35,11 +35,11 @@ import org.apache.hadoop.hbase.thrift.generated.TRowResult; import org.apache.hadoop.hbase.util.Bytes; public class ThriftUtilities { - + /** * This utility method creates a new Hbase HColumnDescriptor object based on a * Thrift ColumnDescriptor "struct". - * + * * @param in * Thrift ColumnDescriptor object * @return HColumnDescriptor @@ -53,7 +53,7 @@ public class ThriftUtilities { if (in.bloomFilterType.compareTo("NONE") != 0) { bloom = true; } - + if (in.name == null || in.name.length <= 0) { throw new IllegalArgument("column name is empty"); } @@ -62,11 +62,11 @@ public class ThriftUtilities { in.timeToLive, bloom); return col; } - + /** * This utility method creates a new Thrift ColumnDescriptor "struct" based on * an Hbase HColumnDescriptor object. - * + * * @param in * Hbase HColumnDescriptor object * @return Thrift ColumnDescriptor @@ -81,11 +81,11 @@ public class ThriftUtilities { col.bloomFilterType = Boolean.toString(in.isBloomfilter()); return col; } - + /** * This utility method creates a list of Thrift TCell "struct" based on * an Hbase Cell object. The empty list is returned if the input is null. - * + * * @param in * Hbase Cell object * @return Thrift TCell array @@ -121,7 +121,7 @@ public class ThriftUtilities { * This utility method creates a list of Thrift TRowResult "struct" based on * an Hbase RowResult object. The empty list is returned if the input is * null. - * + * * @param in * Hbase RowResult object * @return Thrift TRowResult array @@ -154,7 +154,7 @@ public class ThriftUtilities { * This utility method creates a list of Thrift TRowResult "struct" based on * an Hbase RowResult object. The empty list is returned if the input is * null. - * + * * @param in * Hbase RowResult object * @return Thrift TRowResult array diff --git a/src/java/org/apache/hadoop/hbase/thrift/generated/AlreadyExists.java b/src/java/org/apache/hadoop/hbase/thrift/generated/AlreadyExists.java index 278020f..44d4fef 100644 --- a/src/java/org/apache/hadoop/hbase/thrift/generated/AlreadyExists.java +++ b/src/java/org/apache/hadoop/hbase/thrift/generated/AlreadyExists.java @@ -48,7 +48,7 @@ public class AlreadyExists extends Exception implements TBase, java.io.Serializa } public static final Map metaDataMap = Collections.unmodifiableMap(new HashMap() {{ - put(MESSAGE, new FieldMetaData("message", TFieldRequirementType.DEFAULT, + put(MESSAGE, new FieldMetaData("message", TFieldRequirementType.DEFAULT, new FieldValueMetaData(TType.STRING))); }}); @@ -174,7 +174,7 @@ public class AlreadyExists extends Exception implements TBase, java.io.Serializa while (true) { field = iprot.readFieldBegin(); - if (field.type == TType.STOP) { + if (field.type == TType.STOP) { break; } switch (field.id) @@ -182,7 +182,7 @@ public class AlreadyExists extends Exception implements TBase, java.io.Serializa case MESSAGE: if (field.type == TType.STRING) { this.message = iprot.readString(); - } else { + } else { TProtocolUtil.skip(iprot, field.type); } break; diff --git a/src/java/org/apache/hadoop/hbase/thrift/generated/BatchMutation.java b/src/java/org/apache/hadoop/hbase/thrift/generated/BatchMutation.java index 62ff754..f10ef19 100644 --- a/src/java/org/apache/hadoop/hbase/thrift/generated/BatchMutation.java +++ b/src/java/org/apache/hadoop/hbase/thrift/generated/BatchMutation.java @@ -49,10 +49,10 @@ public class BatchMutation implements TBase, java.io.Serializable, Cloneable { } public static final Map metaDataMap = Collections.unmodifiableMap(new HashMap() {{ - put(ROW, new FieldMetaData("row", TFieldRequirementType.DEFAULT, + put(ROW, new FieldMetaData("row", TFieldRequirementType.DEFAULT, new FieldValueMetaData(TType.STRING))); - put(MUTATIONS, new FieldMetaData("mutations", TFieldRequirementType.DEFAULT, - new ListMetaData(TType.LIST, + put(MUTATIONS, new FieldMetaData("mutations", TFieldRequirementType.DEFAULT, + new ListMetaData(TType.LIST, new StructMetaData(TType.STRUCT, Mutation.class)))); }}); @@ -247,7 +247,7 @@ public class BatchMutation implements TBase, java.io.Serializable, Cloneable { while (true) { field = iprot.readFieldBegin(); - if (field.type == TType.STOP) { + if (field.type == TType.STOP) { break; } switch (field.id) @@ -255,7 +255,7 @@ public class BatchMutation implements TBase, java.io.Serializable, Cloneable { case ROW: if (field.type == TType.STRING) { this.row = iprot.readBinary(); - } else { + } else { TProtocolUtil.skip(iprot, field.type); } break; @@ -273,7 +273,7 @@ public class BatchMutation implements TBase, java.io.Serializable, Cloneable { } iprot.readListEnd(); } - } else { + } else { TProtocolUtil.skip(iprot, field.type); } break; diff --git a/src/java/org/apache/hadoop/hbase/thrift/generated/ColumnDescriptor.java b/src/java/org/apache/hadoop/hbase/thrift/generated/ColumnDescriptor.java index 53ba940..97fe9a1 100644 --- a/src/java/org/apache/hadoop/hbase/thrift/generated/ColumnDescriptor.java +++ b/src/java/org/apache/hadoop/hbase/thrift/generated/ColumnDescriptor.java @@ -78,23 +78,23 @@ public class ColumnDescriptor implements TBase, java.io.Serializable, Cloneable } public static final Map metaDataMap = Collections.unmodifiableMap(new HashMap() {{ - put(NAME, new FieldMetaData("name", TFieldRequirementType.DEFAULT, + put(NAME, new FieldMetaData("name", TFieldRequirementType.DEFAULT, new FieldValueMetaData(TType.STRING))); - put(MAXVERSIONS, new FieldMetaData("maxVersions", TFieldRequirementType.DEFAULT, + put(MAXVERSIONS, new FieldMetaData("maxVersions", TFieldRequirementType.DEFAULT, new FieldValueMetaData(TType.I32))); - put(COMPRESSION, new FieldMetaData("compression", TFieldRequirementType.DEFAULT, + put(COMPRESSION, new FieldMetaData("compression", TFieldRequirementType.DEFAULT, new FieldValueMetaData(TType.STRING))); - put(INMEMORY, new FieldMetaData("inMemory", TFieldRequirementType.DEFAULT, + put(INMEMORY, new FieldMetaData("inMemory", TFieldRequirementType.DEFAULT, new FieldValueMetaData(TType.BOOL))); - put(BLOOMFILTERTYPE, new FieldMetaData("bloomFilterType", TFieldRequirementType.DEFAULT, + put(BLOOMFILTERTYPE, new FieldMetaData("bloomFilterType", TFieldRequirementType.DEFAULT, new FieldValueMetaData(TType.STRING))); - put(BLOOMFILTERVECTORSIZE, new FieldMetaData("bloomFilterVectorSize", TFieldRequirementType.DEFAULT, + put(BLOOMFILTERVECTORSIZE, new FieldMetaData("bloomFilterVectorSize", TFieldRequirementType.DEFAULT, new FieldValueMetaData(TType.I32))); - put(BLOOMFILTERNBHASHES, new FieldMetaData("bloomFilterNbHashes", TFieldRequirementType.DEFAULT, + put(BLOOMFILTERNBHASHES, new FieldMetaData("bloomFilterNbHashes", TFieldRequirementType.DEFAULT, new FieldValueMetaData(TType.I32))); - put(BLOCKCACHEENABLED, new FieldMetaData("blockCacheEnabled", TFieldRequirementType.DEFAULT, + put(BLOCKCACHEENABLED, new FieldMetaData("blockCacheEnabled", TFieldRequirementType.DEFAULT, new FieldValueMetaData(TType.BOOL))); - put(TIMETOLIVE, new FieldMetaData("timeToLive", TFieldRequirementType.DEFAULT, + put(TIMETOLIVE, new FieldMetaData("timeToLive", TFieldRequirementType.DEFAULT, new FieldValueMetaData(TType.I32))); }}); @@ -630,7 +630,7 @@ public class ColumnDescriptor implements TBase, java.io.Serializable, Cloneable while (true) { field = iprot.readFieldBegin(); - if (field.type == TType.STOP) { + if (field.type == TType.STOP) { break; } switch (field.id) @@ -638,7 +638,7 @@ public class ColumnDescriptor implements TBase, java.io.Serializable, Cloneable case NAME: if (field.type == TType.STRING) { this.name = iprot.readBinary(); - } else { + } else { TProtocolUtil.skip(iprot, field.type); } break; @@ -646,14 +646,14 @@ public class ColumnDescriptor implements TBase, java.io.Serializable, Cloneable if (field.type == TType.I32) { this.maxVersions = iprot.readI32(); this.__isset.maxVersions = true; - } else { + } else { TProtocolUtil.skip(iprot, field.type); } break; case COMPRESSION: if (field.type == TType.STRING) { this.compression = iprot.readString(); - } else { + } else { TProtocolUtil.skip(iprot, field.type); } break; @@ -661,14 +661,14 @@ public class ColumnDescriptor implements TBase, java.io.Serializable, Cloneable if (field.type == TType.BOOL) { this.inMemory = iprot.readBool(); this.__isset.inMemory = true; - } else { + } else { TProtocolUtil.skip(iprot, field.type); } break; case BLOOMFILTERTYPE: if (field.type == TType.STRING) { this.bloomFilterType = iprot.readString(); - } else { + } else { TProtocolUtil.skip(iprot, field.type); } break; @@ -676,7 +676,7 @@ public class ColumnDescriptor implements TBase, java.io.Serializable, Cloneable if (field.type == TType.I32) { this.bloomFilterVectorSize = iprot.readI32(); this.__isset.bloomFilterVectorSize = true; - } else { + } else { TProtocolUtil.skip(iprot, field.type); } break; @@ -684,7 +684,7 @@ public class ColumnDescriptor implements TBase, java.io.Serializable, Cloneable if (field.type == TType.I32) { this.bloomFilterNbHashes = iprot.readI32(); this.__isset.bloomFilterNbHashes = true; - } else { + } else { TProtocolUtil.skip(iprot, field.type); } break; @@ -692,7 +692,7 @@ public class ColumnDescriptor implements TBase, java.io.Serializable, Cloneable if (field.type == TType.BOOL) { this.blockCacheEnabled = iprot.readBool(); this.__isset.blockCacheEnabled = true; - } else { + } else { TProtocolUtil.skip(iprot, field.type); } break; @@ -700,7 +700,7 @@ public class ColumnDescriptor implements TBase, java.io.Serializable, Cloneable if (field.type == TType.I32) { this.timeToLive = iprot.readI32(); this.__isset.timeToLive = true; - } else { + } else { TProtocolUtil.skip(iprot, field.type); } break; diff --git a/src/java/org/apache/hadoop/hbase/thrift/generated/Hbase.java b/src/java/org/apache/hadoop/hbase/thrift/generated/Hbase.java index f2e3bf5..3e6e6ce 100644 --- a/src/java/org/apache/hadoop/hbase/thrift/generated/Hbase.java +++ b/src/java/org/apache/hadoop/hbase/thrift/generated/Hbase.java @@ -38,7 +38,7 @@ public class Hbase { /** * Brings a table on-line (enables it) * @param tableName name of the table - * + * * @param tableName */ public void enableTable(byte[] tableName) throws IOError, TException; @@ -47,7 +47,7 @@ public class Hbase { * Disables a table (takes it off-line) If it is being served, the master * will tell the servers to stop serving it. * @param tableName name of the table - * + * * @param tableName */ public void disableTable(byte[] tableName) throws IOError, TException; @@ -55,7 +55,7 @@ public class Hbase { /** * @param tableName name of table to check * @return true if table is on-line - * + * * @param tableName */ public boolean isTableEnabled(byte[] tableName) throws IOError, TException; @@ -74,7 +74,7 @@ public class Hbase { * List all the column families assoicated with a table. * @param tableName table name * @return list of column family descriptors - * + * * @param tableName */ public Map getColumnDescriptors(byte[] tableName) throws IOError, TException; @@ -83,7 +83,7 @@ public class Hbase { * List the regions associated with a table. * @param tableName table name * @return list of region descriptors - * + * * @param tableName */ public List getTableRegions(byte[] tableName) throws IOError, TException; @@ -93,13 +93,13 @@ public class Hbase { * field for each ColumnDescriptor must be set and must end in a * colon (:). All other fields are optional and will get default * values if not explicitly specified. - * + * * @param tableName name of table to create * @param columnFamilies list of column family descriptors - * + * * @throws IllegalArgument if an input parameter is invalid * @throws AlreadyExists if the table name already exists - * + * * @param tableName * @param columnFamilies */ @@ -110,7 +110,7 @@ public class Hbase { * @param tableName name of table to delete * @throws IOError if table doesn't exist on server or there was some other * problem - * + * * @param tableName */ public void deleteTable(byte[] tableName) throws IOError, TException; @@ -118,12 +118,12 @@ public class Hbase { /** * Get a single TCell for the specified table, row, and column at the * latest timestamp. Returns an empty list if no such value exists. - * + * * @param tableName name of table * @param row row key * @param column column name * @return value for specified row/column - * + * * @param tableName * @param row * @param column @@ -133,13 +133,13 @@ public class Hbase { /** * Get the specified number of versions for the specified table, * row, and column. - * + * * @param tableName name of table * @param row row key * @param column column name * @param numVersions number of versions to retrieve * @return list of cells for specified row/column - * + * * @param tableName * @param row * @param column @@ -151,14 +151,14 @@ public class Hbase { * Get the specified number of versions for the specified table, * row, and column. Only versions less than or equal to the specified * timestamp will be returned. - * + * * @param tableName name of table * @param row row key * @param column column name * @param timestamp timestamp * @param numVersions number of versions to retrieve * @return list of cells for specified row/column - * + * * @param tableName * @param row * @param column @@ -170,11 +170,11 @@ public class Hbase { /** * Get all the data for the specified table and row at the latest * timestamp. Returns an empty list if the row does not exist. - * + * * @param tableName name of table * @param row row key * @return TRowResult containing the row and map of columns to TCells - * + * * @param tableName * @param row */ @@ -183,12 +183,12 @@ public class Hbase { /** * Get the specified columns for the specified table and row at the latest * timestamp. Returns an empty list if the row does not exist. - * + * * @param tableName name of table * @param row row key * @param columns List of columns to return, null for all columns * @return TRowResult containing the row and map of columns to TCells - * + * * @param tableName * @param row * @param columns @@ -198,12 +198,12 @@ public class Hbase { /** * Get all the data for the specified table and row at the specified * timestamp. Returns an empty list if the row does not exist. - * + * * @param tableName of table * @param row row key * @param timestamp timestamp * @return TRowResult containing the row and map of columns to TCells - * + * * @param tableName * @param row * @param timestamp @@ -213,12 +213,12 @@ public class Hbase { /** * Get the specified columns for the specified table and row at the specified * timestamp. Returns an empty list if the row does not exist. - * + * * @param tableName name of table * @param row row key * @param columns List of columns to return, null for all columns * @return TRowResult containing the row and map of columns to TCells - * + * * @param tableName * @param row * @param columns @@ -231,11 +231,11 @@ public class Hbase { * single transaction. If an exception is thrown, then the * transaction is aborted. Default current timestamp is used, and * all entries will have an identical timestamp. - * + * * @param tableName name of table * @param row row key * @param mutations list of mutation commands - * + * * @param tableName * @param row * @param mutations @@ -247,12 +247,12 @@ public class Hbase { * single transaction. If an exception is thrown, then the * transaction is aborted. The specified timestamp is used, and * all entries will have an identical timestamp. - * + * * @param tableName name of table * @param row row key * @param mutations list of mutation commands * @param timestamp timestamp - * + * * @param tableName * @param row * @param mutations @@ -265,10 +265,10 @@ public class Hbase { * in a single transaction. If an exception is thrown, then the * transaction is aborted. Default current timestamp is used, and * all entries will have an identical timestamp. - * + * * @param tableName name of table * @param rowBatches list of row batches - * + * * @param tableName * @param rowBatches */ @@ -279,11 +279,11 @@ public class Hbase { * in a single transaction. If an exception is thrown, then the * transaction is aborted. The specified timestamp is used, and * all entries will have an identical timestamp. - * + * * @param tableName name of table * @param rowBatches list of row batches * @param timestamp timestamp - * + * * @param tableName * @param rowBatches * @param timestamp @@ -296,7 +296,7 @@ public class Hbase { * @param row row to increment * @param column name of column * @param value amount to increment by - * + * * @param tableName * @param row * @param column @@ -306,11 +306,11 @@ public class Hbase { /** * Delete all cells that match the passed row and column. - * + * * @param tableName name of table * @param row Row to update * @param column name of column whose value is to be deleted - * + * * @param tableName * @param row * @param column @@ -320,12 +320,12 @@ public class Hbase { /** * Delete all cells that match the passed row and column and whose * timestamp is equal-to or older than the passed timestamp. - * + * * @param tableName name of table * @param row Row to update * @param column name of column whose value is to be deleted * @param timestamp timestamp - * + * * @param tableName * @param row * @param column @@ -335,10 +335,10 @@ public class Hbase { /** * Completely delete the row's cells. - * + * * @param tableName name of table * @param row key of the row to be completely deleted. - * + * * @param tableName * @param row */ @@ -347,11 +347,11 @@ public class Hbase { /** * Completely delete the row's cells marked with a timestamp * equal-to or older than the passed timestamp. - * + * * @param tableName name of table * @param row key of the row to be completely deleted. * @param timestamp timestamp - * + * * @param tableName * @param row * @param timestamp @@ -361,16 +361,16 @@ public class Hbase { /** * Get a scanner on the current table starting at the specified row and * ending at the last row in the table. Return the specified columns. - * + * * @param columns columns to scan. If column name is a column family, all * columns of the specified column family are returned. Its also possible * to pass a regex in the column qualifier. * @param tableName name of table * @param startRow starting row in table to scan. send "" (empty string) to * start at the first row. - * + * * @return scanner id to be used with other scanner procedures - * + * * @param tableName * @param startRow * @param columns @@ -381,7 +381,7 @@ public class Hbase { * Get a scanner on the current table starting and stopping at the * specified rows. ending at the last row in the table. Return the * specified columns. - * + * * @param columns columns to scan. If column name is a column family, all * columns of the specified column family are returned. Its also possible * to pass a regex in the column qualifier. @@ -390,9 +390,9 @@ public class Hbase { * start at the first row. * @param stopRow row to stop scanning on. This row is *not* included in the * scanner's results - * + * * @return scanner id to be used with other scanner procedures - * + * * @param tableName * @param startRow * @param stopRow @@ -403,12 +403,12 @@ public class Hbase { /** * Open a scanner for a given prefix. That is all rows will have the specified * prefix. No other rows will be returned. - * + * * @param tableName name of table * @param startAndPrefix the prefix (and thus start row) of the keys you want * @param columns the columns you want returned * @return scanner id to use with other scanner calls - * + * * @param tableName * @param startAndPrefix * @param columns @@ -419,7 +419,7 @@ public class Hbase { * Get a scanner on the current table starting at the specified row and * ending at the last row in the table. Return the specified columns. * Only values with the specified timestamp are returned. - * + * * @param columns columns to scan. If column name is a column family, all * columns of the specified column family are returned. Its also possible * to pass a regex in the column qualifier. @@ -427,9 +427,9 @@ public class Hbase { * @param startRow starting row in table to scan. send "" (empty string) to * start at the first row. * @param timestamp timestamp - * + * * @return scanner id to be used with other scanner procedures - * + * * @param tableName * @param startRow * @param columns @@ -442,7 +442,7 @@ public class Hbase { * specified rows. ending at the last row in the table. Return the * specified columns. Only values with the specified timestamp are * returned. - * + * * @param columns columns to scan. If column name is a column family, all * columns of the specified column family are returned. Its also possible * to pass a regex in the column qualifier. @@ -452,9 +452,9 @@ public class Hbase { * @param stopRow row to stop scanning on. This row is *not* included * in the scanner's results * @param timestamp timestamp - * + * * @return scanner id to be used with other scanner procedures - * + * * @param tableName * @param startRow * @param stopRow @@ -468,12 +468,12 @@ public class Hbase { * row in the table. When there are no more rows in the table, or a key * greater-than-or-equal-to the scanner's specified stopRow is reached, * an empty list is returned. - * + * * @param id id of a scanner returned by scannerOpen * @return a TRowResult containing the current row and a map of the columns to TCells. * @throws IllegalArgument if ScannerID is invalid * @throws NotFound when the scanner reaches the end - * + * * @param id */ public List scannerGet(int id) throws IOError, IllegalArgument, TException; @@ -483,13 +483,13 @@ public class Hbase { * rows and advances to the next row in the table. When there are no more * rows in the table, or a key greater-than-or-equal-to the scanner's * specified stopRow is reached, an empty list is returned. - * + * * @param id id of a scanner returned by scannerOpen * @param nbRows number of results to regturn * @return a TRowResult containing the current row and a map of the columns to TCells. * @throws IllegalArgument if ScannerID is invalid * @throws NotFound when the scanner reaches the end - * + * * @param id * @param nbRows */ @@ -497,10 +497,10 @@ public class Hbase { /** * Closes the server-state associated with an open scanner. - * + * * @param id id of a scanner returned by scannerOpen * @throws IllegalArgument if ScannerID is invalid - * + * * @param id */ public void scannerClose(int id) throws IOError, IllegalArgument, TException; @@ -2581,7 +2581,7 @@ public class Hbase { } public static final Map metaDataMap = Collections.unmodifiableMap(new HashMap() {{ - put(TABLENAME, new FieldMetaData("tableName", TFieldRequirementType.DEFAULT, + put(TABLENAME, new FieldMetaData("tableName", TFieldRequirementType.DEFAULT, new FieldValueMetaData(TType.STRING))); }}); @@ -2707,7 +2707,7 @@ public class Hbase { while (true) { field = iprot.readFieldBegin(); - if (field.type == TType.STOP) { + if (field.type == TType.STOP) { break; } switch (field.id) @@ -2715,7 +2715,7 @@ public class Hbase { case TABLENAME: if (field.type == TType.STRING) { this.tableName = iprot.readBinary(); - } else { + } else { TProtocolUtil.skip(iprot, field.type); } break; @@ -2780,7 +2780,7 @@ public class Hbase { } public static final Map metaDataMap = Collections.unmodifiableMap(new HashMap() {{ - put(IO, new FieldMetaData("io", TFieldRequirementType.DEFAULT, + put(IO, new FieldMetaData("io", TFieldRequirementType.DEFAULT, new FieldValueMetaData(TType.STRUCT))); }}); @@ -2906,7 +2906,7 @@ public class Hbase { while (true) { field = iprot.readFieldBegin(); - if (field.type == TType.STOP) { + if (field.type == TType.STOP) { break; } switch (field.id) @@ -2915,7 +2915,7 @@ public class Hbase { if (field.type == TType.STRUCT) { this.io = new IOError(); this.io.read(iprot); - } else { + } else { TProtocolUtil.skip(iprot, field.type); } break; @@ -2979,7 +2979,7 @@ public class Hbase { } public static final Map metaDataMap = Collections.unmodifiableMap(new HashMap() {{ - put(TABLENAME, new FieldMetaData("tableName", TFieldRequirementType.DEFAULT, + put(TABLENAME, new FieldMetaData("tableName", TFieldRequirementType.DEFAULT, new FieldValueMetaData(TType.STRING))); }}); @@ -3105,7 +3105,7 @@ public class Hbase { while (true) { field = iprot.readFieldBegin(); - if (field.type == TType.STOP) { + if (field.type == TType.STOP) { break; } switch (field.id) @@ -3113,7 +3113,7 @@ public class Hbase { case TABLENAME: if (field.type == TType.STRING) { this.tableName = iprot.readBinary(); - } else { + } else { TProtocolUtil.skip(iprot, field.type); } break; @@ -3178,7 +3178,7 @@ public class Hbase { } public static final Map metaDataMap = Collections.unmodifiableMap(new HashMap() {{ - put(IO, new FieldMetaData("io", TFieldRequirementType.DEFAULT, + put(IO, new FieldMetaData("io", TFieldRequirementType.DEFAULT, new FieldValueMetaData(TType.STRUCT))); }}); @@ -3304,7 +3304,7 @@ public class Hbase { while (true) { field = iprot.readFieldBegin(); - if (field.type == TType.STOP) { + if (field.type == TType.STOP) { break; } switch (field.id) @@ -3313,7 +3313,7 @@ public class Hbase { if (field.type == TType.STRUCT) { this.io = new IOError(); this.io.read(iprot); - } else { + } else { TProtocolUtil.skip(iprot, field.type); } break; @@ -3377,7 +3377,7 @@ public class Hbase { } public static final Map metaDataMap = Collections.unmodifiableMap(new HashMap() {{ - put(TABLENAME, new FieldMetaData("tableName", TFieldRequirementType.DEFAULT, + put(TABLENAME, new FieldMetaData("tableName", TFieldRequirementType.DEFAULT, new FieldValueMetaData(TType.STRING))); }}); @@ -3503,7 +3503,7 @@ public class Hbase { while (true) { field = iprot.readFieldBegin(); - if (field.type == TType.STOP) { + if (field.type == TType.STOP) { break; } switch (field.id) @@ -3511,7 +3511,7 @@ public class Hbase { case TABLENAME: if (field.type == TType.STRING) { this.tableName = iprot.readBinary(); - } else { + } else { TProtocolUtil.skip(iprot, field.type); } break; @@ -3580,9 +3580,9 @@ public class Hbase { } public static final Map metaDataMap = Collections.unmodifiableMap(new HashMap() {{ - put(SUCCESS, new FieldMetaData("success", TFieldRequirementType.DEFAULT, + put(SUCCESS, new FieldMetaData("success", TFieldRequirementType.DEFAULT, new FieldValueMetaData(TType.BOOL))); - put(IO, new FieldMetaData("io", TFieldRequirementType.DEFAULT, + put(IO, new FieldMetaData("io", TFieldRequirementType.DEFAULT, new FieldValueMetaData(TType.STRUCT))); }}); @@ -3757,7 +3757,7 @@ public class Hbase { while (true) { field = iprot.readFieldBegin(); - if (field.type == TType.STOP) { + if (field.type == TType.STOP) { break; } switch (field.id) @@ -3766,7 +3766,7 @@ public class Hbase { if (field.type == TType.BOOL) { this.success = iprot.readBool(); this.__isset.success = true; - } else { + } else { TProtocolUtil.skip(iprot, field.type); } break; @@ -3774,7 +3774,7 @@ public class Hbase { if (field.type == TType.STRUCT) { this.io = new IOError(); this.io.read(iprot); - } else { + } else { TProtocolUtil.skip(iprot, field.type); } break; @@ -3846,7 +3846,7 @@ public class Hbase { } public static final Map metaDataMap = Collections.unmodifiableMap(new HashMap() {{ - put(TABLENAMEORREGIONNAME, new FieldMetaData("tableNameOrRegionName", TFieldRequirementType.DEFAULT, + put(TABLENAMEORREGIONNAME, new FieldMetaData("tableNameOrRegionName", TFieldRequirementType.DEFAULT, new FieldValueMetaData(TType.STRING))); }}); @@ -3972,7 +3972,7 @@ public class Hbase { while (true) { field = iprot.readFieldBegin(); - if (field.type == TType.STOP) { + if (field.type == TType.STOP) { break; } switch (field.id) @@ -3980,7 +3980,7 @@ public class Hbase { case TABLENAMEORREGIONNAME: if (field.type == TType.STRING) { this.tableNameOrRegionName = iprot.readBinary(); - } else { + } else { TProtocolUtil.skip(iprot, field.type); } break; @@ -4045,7 +4045,7 @@ public class Hbase { } public static final Map metaDataMap = Collections.unmodifiableMap(new HashMap() {{ - put(IO, new FieldMetaData("io", TFieldRequirementType.DEFAULT, + put(IO, new FieldMetaData("io", TFieldRequirementType.DEFAULT, new FieldValueMetaData(TType.STRUCT))); }}); @@ -4171,7 +4171,7 @@ public class Hbase { while (true) { field = iprot.readFieldBegin(); - if (field.type == TType.STOP) { + if (field.type == TType.STOP) { break; } switch (field.id) @@ -4180,7 +4180,7 @@ public class Hbase { if (field.type == TType.STRUCT) { this.io = new IOError(); this.io.read(iprot); - } else { + } else { TProtocolUtil.skip(iprot, field.type); } break; @@ -4244,7 +4244,7 @@ public class Hbase { } public static final Map metaDataMap = Collections.unmodifiableMap(new HashMap() {{ - put(TABLENAMEORREGIONNAME, new FieldMetaData("tableNameOrRegionName", TFieldRequirementType.DEFAULT, + put(TABLENAMEORREGIONNAME, new FieldMetaData("tableNameOrRegionName", TFieldRequirementType.DEFAULT, new FieldValueMetaData(TType.STRING))); }}); @@ -4370,7 +4370,7 @@ public class Hbase { while (true) { field = iprot.readFieldBegin(); - if (field.type == TType.STOP) { + if (field.type == TType.STOP) { break; } switch (field.id) @@ -4378,7 +4378,7 @@ public class Hbase { case TABLENAMEORREGIONNAME: if (field.type == TType.STRING) { this.tableNameOrRegionName = iprot.readBinary(); - } else { + } else { TProtocolUtil.skip(iprot, field.type); } break; @@ -4443,7 +4443,7 @@ public class Hbase { } public static final Map metaDataMap = Collections.unmodifiableMap(new HashMap() {{ - put(IO, new FieldMetaData("io", TFieldRequirementType.DEFAULT, + put(IO, new FieldMetaData("io", TFieldRequirementType.DEFAULT, new FieldValueMetaData(TType.STRUCT))); }}); @@ -4569,7 +4569,7 @@ public class Hbase { while (true) { field = iprot.readFieldBegin(); - if (field.type == TType.STOP) { + if (field.type == TType.STOP) { break; } switch (field.id) @@ -4578,7 +4578,7 @@ public class Hbase { if (field.type == TType.STRUCT) { this.io = new IOError(); this.io.read(iprot); - } else { + } else { TProtocolUtil.skip(iprot, field.type); } break; @@ -4703,7 +4703,7 @@ public class Hbase { while (true) { field = iprot.readFieldBegin(); - if (field.type == TType.STOP) { + if (field.type == TType.STOP) { break; } switch (field.id) @@ -4760,10 +4760,10 @@ public class Hbase { } public static final Map metaDataMap = Collections.unmodifiableMap(new HashMap() {{ - put(SUCCESS, new FieldMetaData("success", TFieldRequirementType.DEFAULT, - new ListMetaData(TType.LIST, + put(SUCCESS, new FieldMetaData("success", TFieldRequirementType.DEFAULT, + new ListMetaData(TType.LIST, new FieldValueMetaData(TType.STRING)))); - put(IO, new FieldMetaData("io", TFieldRequirementType.DEFAULT, + put(IO, new FieldMetaData("io", TFieldRequirementType.DEFAULT, new FieldValueMetaData(TType.STRUCT))); }}); @@ -4958,7 +4958,7 @@ public class Hbase { while (true) { field = iprot.readFieldBegin(); - if (field.type == TType.STOP) { + if (field.type == TType.STOP) { break; } switch (field.id) @@ -4976,7 +4976,7 @@ public class Hbase { } iprot.readListEnd(); } - } else { + } else { TProtocolUtil.skip(iprot, field.type); } break; @@ -4984,7 +4984,7 @@ public class Hbase { if (field.type == TType.STRUCT) { this.io = new IOError(); this.io.read(iprot); - } else { + } else { TProtocolUtil.skip(iprot, field.type); } break; @@ -5066,7 +5066,7 @@ public class Hbase { } public static final Map metaDataMap = Collections.unmodifiableMap(new HashMap() {{ - put(TABLENAME, new FieldMetaData("tableName", TFieldRequirementType.DEFAULT, + put(TABLENAME, new FieldMetaData("tableName", TFieldRequirementType.DEFAULT, new FieldValueMetaData(TType.STRING))); }}); @@ -5192,7 +5192,7 @@ public class Hbase { while (true) { field = iprot.readFieldBegin(); - if (field.type == TType.STOP) { + if (field.type == TType.STOP) { break; } switch (field.id) @@ -5200,7 +5200,7 @@ public class Hbase { case TABLENAME: if (field.type == TType.STRING) { this.tableName = iprot.readBinary(); - } else { + } else { TProtocolUtil.skip(iprot, field.type); } break; @@ -5268,11 +5268,11 @@ public class Hbase { } public static final Map metaDataMap = Collections.unmodifiableMap(new HashMap() {{ - put(SUCCESS, new FieldMetaData("success", TFieldRequirementType.DEFAULT, - new MapMetaData(TType.MAP, - new FieldValueMetaData(TType.STRING), + put(SUCCESS, new FieldMetaData("success", TFieldRequirementType.DEFAULT, + new MapMetaData(TType.MAP, + new FieldValueMetaData(TType.STRING), new StructMetaData(TType.STRUCT, ColumnDescriptor.class)))); - put(IO, new FieldMetaData("io", TFieldRequirementType.DEFAULT, + put(IO, new FieldMetaData("io", TFieldRequirementType.DEFAULT, new FieldValueMetaData(TType.STRUCT))); }}); @@ -5471,7 +5471,7 @@ public class Hbase { while (true) { field = iprot.readFieldBegin(); - if (field.type == TType.STOP) { + if (field.type == TType.STOP) { break; } switch (field.id) @@ -5492,7 +5492,7 @@ public class Hbase { } iprot.readMapEnd(); } - } else { + } else { TProtocolUtil.skip(iprot, field.type); } break; @@ -5500,7 +5500,7 @@ public class Hbase { if (field.type == TType.STRUCT) { this.io = new IOError(); this.io.read(iprot); - } else { + } else { TProtocolUtil.skip(iprot, field.type); } break; @@ -5583,7 +5583,7 @@ public class Hbase { } public static final Map metaDataMap = Collections.unmodifiableMap(new HashMap() {{ - put(TABLENAME, new FieldMetaData("tableName", TFieldRequirementType.DEFAULT, + put(TABLENAME, new FieldMetaData("tableName", TFieldRequirementType.DEFAULT, new FieldValueMetaData(TType.STRING))); }}); @@ -5709,7 +5709,7 @@ public class Hbase { while (true) { field = iprot.readFieldBegin(); - if (field.type == TType.STOP) { + if (field.type == TType.STOP) { break; } switch (field.id) @@ -5717,7 +5717,7 @@ public class Hbase { case TABLENAME: if (field.type == TType.STRING) { this.tableName = iprot.readBinary(); - } else { + } else { TProtocolUtil.skip(iprot, field.type); } break; @@ -5785,10 +5785,10 @@ public class Hbase { } public static final Map metaDataMap = Collections.unmodifiableMap(new HashMap() {{ - put(SUCCESS, new FieldMetaData("success", TFieldRequirementType.DEFAULT, - new ListMetaData(TType.LIST, + put(SUCCESS, new FieldMetaData("success", TFieldRequirementType.DEFAULT, + new ListMetaData(TType.LIST, new StructMetaData(TType.STRUCT, TRegionInfo.class)))); - put(IO, new FieldMetaData("io", TFieldRequirementType.DEFAULT, + put(IO, new FieldMetaData("io", TFieldRequirementType.DEFAULT, new FieldValueMetaData(TType.STRUCT))); }}); @@ -5983,7 +5983,7 @@ public class Hbase { while (true) { field = iprot.readFieldBegin(); - if (field.type == TType.STOP) { + if (field.type == TType.STOP) { break; } switch (field.id) @@ -6002,7 +6002,7 @@ public class Hbase { } iprot.readListEnd(); } - } else { + } else { TProtocolUtil.skip(iprot, field.type); } break; @@ -6010,7 +6010,7 @@ public class Hbase { if (field.type == TType.STRUCT) { this.io = new IOError(); this.io.read(iprot); - } else { + } else { TProtocolUtil.skip(iprot, field.type); } break; @@ -6095,10 +6095,10 @@ public class Hbase { } public static final Map metaDataMap = Collections.unmodifiableMap(new HashMap() {{ - put(TABLENAME, new FieldMetaData("tableName", TFieldRequirementType.DEFAULT, + put(TABLENAME, new FieldMetaData("tableName", TFieldRequirementType.DEFAULT, new FieldValueMetaData(TType.STRING))); - put(COLUMNFAMILIES, new FieldMetaData("columnFamilies", TFieldRequirementType.DEFAULT, - new ListMetaData(TType.LIST, + put(COLUMNFAMILIES, new FieldMetaData("columnFamilies", TFieldRequirementType.DEFAULT, + new ListMetaData(TType.LIST, new StructMetaData(TType.STRUCT, ColumnDescriptor.class)))); }}); @@ -6293,7 +6293,7 @@ public class Hbase { while (true) { field = iprot.readFieldBegin(); - if (field.type == TType.STOP) { + if (field.type == TType.STOP) { break; } switch (field.id) @@ -6301,7 +6301,7 @@ public class Hbase { case TABLENAME: if (field.type == TType.STRING) { this.tableName = iprot.readBinary(); - } else { + } else { TProtocolUtil.skip(iprot, field.type); } break; @@ -6319,7 +6319,7 @@ public class Hbase { } iprot.readListEnd(); } - } else { + } else { TProtocolUtil.skip(iprot, field.type); } break; @@ -6409,11 +6409,11 @@ public class Hbase { } public static final Map metaDataMap = Collections.unmodifiableMap(new HashMap() {{ - put(IO, new FieldMetaData("io", TFieldRequirementType.DEFAULT, + put(IO, new FieldMetaData("io", TFieldRequirementType.DEFAULT, new FieldValueMetaData(TType.STRUCT))); - put(IA, new FieldMetaData("ia", TFieldRequirementType.DEFAULT, + put(IA, new FieldMetaData("ia", TFieldRequirementType.DEFAULT, new FieldValueMetaData(TType.STRUCT))); - put(EXIST, new FieldMetaData("exist", TFieldRequirementType.DEFAULT, + put(EXIST, new FieldMetaData("exist", TFieldRequirementType.DEFAULT, new FieldValueMetaData(TType.STRUCT))); }}); @@ -6639,7 +6639,7 @@ public class Hbase { while (true) { field = iprot.readFieldBegin(); - if (field.type == TType.STOP) { + if (field.type == TType.STOP) { break; } switch (field.id) @@ -6648,7 +6648,7 @@ public class Hbase { if (field.type == TType.STRUCT) { this.io = new IOError(); this.io.read(iprot); - } else { + } else { TProtocolUtil.skip(iprot, field.type); } break; @@ -6656,7 +6656,7 @@ public class Hbase { if (field.type == TType.STRUCT) { this.ia = new IllegalArgument(); this.ia.read(iprot); - } else { + } else { TProtocolUtil.skip(iprot, field.type); } break; @@ -6664,7 +6664,7 @@ public class Hbase { if (field.type == TType.STRUCT) { this.exist = new AlreadyExists(); this.exist.read(iprot); - } else { + } else { TProtocolUtil.skip(iprot, field.type); } break; @@ -6752,7 +6752,7 @@ public class Hbase { } public static final Map metaDataMap = Collections.unmodifiableMap(new HashMap() {{ - put(TABLENAME, new FieldMetaData("tableName", TFieldRequirementType.DEFAULT, + put(TABLENAME, new FieldMetaData("tableName", TFieldRequirementType.DEFAULT, new FieldValueMetaData(TType.STRING))); }}); @@ -6878,7 +6878,7 @@ public class Hbase { while (true) { field = iprot.readFieldBegin(); - if (field.type == TType.STOP) { + if (field.type == TType.STOP) { break; } switch (field.id) @@ -6886,7 +6886,7 @@ public class Hbase { case TABLENAME: if (field.type == TType.STRING) { this.tableName = iprot.readBinary(); - } else { + } else { TProtocolUtil.skip(iprot, field.type); } break; @@ -6951,7 +6951,7 @@ public class Hbase { } public static final Map metaDataMap = Collections.unmodifiableMap(new HashMap() {{ - put(IO, new FieldMetaData("io", TFieldRequirementType.DEFAULT, + put(IO, new FieldMetaData("io", TFieldRequirementType.DEFAULT, new FieldValueMetaData(TType.STRUCT))); }}); @@ -7077,7 +7077,7 @@ public class Hbase { while (true) { field = iprot.readFieldBegin(); - if (field.type == TType.STOP) { + if (field.type == TType.STOP) { break; } switch (field.id) @@ -7086,7 +7086,7 @@ public class Hbase { if (field.type == TType.STRUCT) { this.io = new IOError(); this.io.read(iprot); - } else { + } else { TProtocolUtil.skip(iprot, field.type); } break; @@ -7156,11 +7156,11 @@ public class Hbase { } public static final Map metaDataMap = Collections.unmodifiableMap(new HashMap() {{ - put(TABLENAME, new FieldMetaData("tableName", TFieldRequirementType.DEFAULT, + put(TABLENAME, new FieldMetaData("tableName", TFieldRequirementType.DEFAULT, new FieldValueMetaData(TType.STRING))); - put(ROW, new FieldMetaData("row", TFieldRequirementType.DEFAULT, + put(ROW, new FieldMetaData("row", TFieldRequirementType.DEFAULT, new FieldValueMetaData(TType.STRING))); - put(COLUMN, new FieldMetaData("column", TFieldRequirementType.DEFAULT, + put(COLUMN, new FieldMetaData("column", TFieldRequirementType.DEFAULT, new FieldValueMetaData(TType.STRING))); }}); @@ -7386,7 +7386,7 @@ public class Hbase { while (true) { field = iprot.readFieldBegin(); - if (field.type == TType.STOP) { + if (field.type == TType.STOP) { break; } switch (field.id) @@ -7394,21 +7394,21 @@ public class Hbase { case TABLENAME: if (field.type == TType.STRING) { this.tableName = iprot.readBinary(); - } else { + } else { TProtocolUtil.skip(iprot, field.type); } break; case ROW: if (field.type == TType.STRING) { this.row = iprot.readBinary(); - } else { + } else { TProtocolUtil.skip(iprot, field.type); } break; case COLUMN: if (field.type == TType.STRING) { this.column = iprot.readBinary(); - } else { + } else { TProtocolUtil.skip(iprot, field.type); } break; @@ -7502,10 +7502,10 @@ public class Hbase { } public static final Map metaDataMap = Collections.unmodifiableMap(new HashMap() {{ - put(SUCCESS, new FieldMetaData("success", TFieldRequirementType.DEFAULT, - new ListMetaData(TType.LIST, + put(SUCCESS, new FieldMetaData("success", TFieldRequirementType.DEFAULT, + new ListMetaData(TType.LIST, new StructMetaData(TType.STRUCT, TCell.class)))); - put(IO, new FieldMetaData("io", TFieldRequirementType.DEFAULT, + put(IO, new FieldMetaData("io", TFieldRequirementType.DEFAULT, new FieldValueMetaData(TType.STRUCT))); }}); @@ -7700,7 +7700,7 @@ public class Hbase { while (true) { field = iprot.readFieldBegin(); - if (field.type == TType.STOP) { + if (field.type == TType.STOP) { break; } switch (field.id) @@ -7719,7 +7719,7 @@ public class Hbase { } iprot.readListEnd(); } - } else { + } else { TProtocolUtil.skip(iprot, field.type); } break; @@ -7727,7 +7727,7 @@ public class Hbase { if (field.type == TType.STRUCT) { this.io = new IOError(); this.io.read(iprot); - } else { + } else { TProtocolUtil.skip(iprot, field.type); } break; @@ -7819,13 +7819,13 @@ public class Hbase { } public static final Map metaDataMap = Collections.unmodifiableMap(new HashMap() {{ - put(TABLENAME, new FieldMetaData("tableName", TFieldRequirementType.DEFAULT, + put(TABLENAME, new FieldMetaData("tableName", TFieldRequirementType.DEFAULT, new FieldValueMetaData(TType.STRING))); - put(ROW, new FieldMetaData("row", TFieldRequirementType.DEFAULT, + put(ROW, new FieldMetaData("row", TFieldRequirementType.DEFAULT, new FieldValueMetaData(TType.STRING))); - put(COLUMN, new FieldMetaData("column", TFieldRequirementType.DEFAULT, + put(COLUMN, new FieldMetaData("column", TFieldRequirementType.DEFAULT, new FieldValueMetaData(TType.STRING))); - put(NUMVERSIONS, new FieldMetaData("numVersions", TFieldRequirementType.DEFAULT, + put(NUMVERSIONS, new FieldMetaData("numVersions", TFieldRequirementType.DEFAULT, new FieldValueMetaData(TType.I32))); }}); @@ -8100,7 +8100,7 @@ public class Hbase { while (true) { field = iprot.readFieldBegin(); - if (field.type == TType.STOP) { + if (field.type == TType.STOP) { break; } switch (field.id) @@ -8108,21 +8108,21 @@ public class Hbase { case TABLENAME: if (field.type == TType.STRING) { this.tableName = iprot.readBinary(); - } else { + } else { TProtocolUtil.skip(iprot, field.type); } break; case ROW: if (field.type == TType.STRING) { this.row = iprot.readBinary(); - } else { + } else { TProtocolUtil.skip(iprot, field.type); } break; case COLUMN: if (field.type == TType.STRING) { this.column = iprot.readBinary(); - } else { + } else { TProtocolUtil.skip(iprot, field.type); } break; @@ -8130,7 +8130,7 @@ public class Hbase { if (field.type == TType.I32) { this.numVersions = iprot.readI32(); this.__isset.numVersions = true; - } else { + } else { TProtocolUtil.skip(iprot, field.type); } break; @@ -8231,10 +8231,10 @@ public class Hbase { } public static final Map metaDataMap = Collections.unmodifiableMap(new HashMap() {{ - put(SUCCESS, new FieldMetaData("success", TFieldRequirementType.DEFAULT, - new ListMetaData(TType.LIST, + put(SUCCESS, new FieldMetaData("success", TFieldRequirementType.DEFAULT, + new ListMetaData(TType.LIST, new StructMetaData(TType.STRUCT, TCell.class)))); - put(IO, new FieldMetaData("io", TFieldRequirementType.DEFAULT, + put(IO, new FieldMetaData("io", TFieldRequirementType.DEFAULT, new FieldValueMetaData(TType.STRUCT))); }}); @@ -8429,7 +8429,7 @@ public class Hbase { while (true) { field = iprot.readFieldBegin(); - if (field.type == TType.STOP) { + if (field.type == TType.STOP) { break; } switch (field.id) @@ -8448,7 +8448,7 @@ public class Hbase { } iprot.readListEnd(); } - } else { + } else { TProtocolUtil.skip(iprot, field.type); } break; @@ -8456,7 +8456,7 @@ public class Hbase { if (field.type == TType.STRUCT) { this.io = new IOError(); this.io.read(iprot); - } else { + } else { TProtocolUtil.skip(iprot, field.type); } break; @@ -8552,15 +8552,15 @@ public class Hbase { } public static final Map metaDataMap = Collections.unmodifiableMap(new HashMap() {{ - put(TABLENAME, new FieldMetaData("tableName", TFieldRequirementType.DEFAULT, + put(TABLENAME, new FieldMetaData("tableName", TFieldRequirementType.DEFAULT, new FieldValueMetaData(TType.STRING))); - put(ROW, new FieldMetaData("row", TFieldRequirementType.DEFAULT, + put(ROW, new FieldMetaData("row", TFieldRequirementType.DEFAULT, new FieldValueMetaData(TType.STRING))); - put(COLUMN, new FieldMetaData("column", TFieldRequirementType.DEFAULT, + put(COLUMN, new FieldMetaData("column", TFieldRequirementType.DEFAULT, new FieldValueMetaData(TType.STRING))); - put(TIMESTAMP, new FieldMetaData("timestamp", TFieldRequirementType.DEFAULT, + put(TIMESTAMP, new FieldMetaData("timestamp", TFieldRequirementType.DEFAULT, new FieldValueMetaData(TType.I64))); - put(NUMVERSIONS, new FieldMetaData("numVersions", TFieldRequirementType.DEFAULT, + put(NUMVERSIONS, new FieldMetaData("numVersions", TFieldRequirementType.DEFAULT, new FieldValueMetaData(TType.I32))); }}); @@ -8884,7 +8884,7 @@ public class Hbase { while (true) { field = iprot.readFieldBegin(); - if (field.type == TType.STOP) { + if (field.type == TType.STOP) { break; } switch (field.id) @@ -8892,21 +8892,21 @@ public class Hbase { case TABLENAME: if (field.type == TType.STRING) { this.tableName = iprot.readBinary(); - } else { + } else { TProtocolUtil.skip(iprot, field.type); } break; case ROW: if (field.type == TType.STRING) { this.row = iprot.readBinary(); - } else { + } else { TProtocolUtil.skip(iprot, field.type); } break; case COLUMN: if (field.type == TType.STRING) { this.column = iprot.readBinary(); - } else { + } else { TProtocolUtil.skip(iprot, field.type); } break; @@ -8914,7 +8914,7 @@ public class Hbase { if (field.type == TType.I64) { this.timestamp = iprot.readI64(); this.__isset.timestamp = true; - } else { + } else { TProtocolUtil.skip(iprot, field.type); } break; @@ -8922,7 +8922,7 @@ public class Hbase { if (field.type == TType.I32) { this.numVersions = iprot.readI32(); this.__isset.numVersions = true; - } else { + } else { TProtocolUtil.skip(iprot, field.type); } break; @@ -9030,10 +9030,10 @@ public class Hbase { } public static final Map metaDataMap = Collections.unmodifiableMap(new HashMap() {{ - put(SUCCESS, new FieldMetaData("success", TFieldRequirementType.DEFAULT, - new ListMetaData(TType.LIST, + put(SUCCESS, new FieldMetaData("success", TFieldRequirementType.DEFAULT, + new ListMetaData(TType.LIST, new StructMetaData(TType.STRUCT, TCell.class)))); - put(IO, new FieldMetaData("io", TFieldRequirementType.DEFAULT, + put(IO, new FieldMetaData("io", TFieldRequirementType.DEFAULT, new FieldValueMetaData(TType.STRUCT))); }}); @@ -9228,7 +9228,7 @@ public class Hbase { while (true) { field = iprot.readFieldBegin(); - if (field.type == TType.STOP) { + if (field.type == TType.STOP) { break; } switch (field.id) @@ -9247,7 +9247,7 @@ public class Hbase { } iprot.readListEnd(); } - } else { + } else { TProtocolUtil.skip(iprot, field.type); } break; @@ -9255,7 +9255,7 @@ public class Hbase { if (field.type == TType.STRUCT) { this.io = new IOError(); this.io.read(iprot); - } else { + } else { TProtocolUtil.skip(iprot, field.type); } break; @@ -9340,9 +9340,9 @@ public class Hbase { } public static final Map metaDataMap = Collections.unmodifiableMap(new HashMap() {{ - put(TABLENAME, new FieldMetaData("tableName", TFieldRequirementType.DEFAULT, + put(TABLENAME, new FieldMetaData("tableName", TFieldRequirementType.DEFAULT, new FieldValueMetaData(TType.STRING))); - put(ROW, new FieldMetaData("row", TFieldRequirementType.DEFAULT, + put(ROW, new FieldMetaData("row", TFieldRequirementType.DEFAULT, new FieldValueMetaData(TType.STRING))); }}); @@ -9518,7 +9518,7 @@ public class Hbase { while (true) { field = iprot.readFieldBegin(); - if (field.type == TType.STOP) { + if (field.type == TType.STOP) { break; } switch (field.id) @@ -9526,14 +9526,14 @@ public class Hbase { case TABLENAME: if (field.type == TType.STRING) { this.tableName = iprot.readBinary(); - } else { + } else { TProtocolUtil.skip(iprot, field.type); } break; case ROW: if (field.type == TType.STRING) { this.row = iprot.readBinary(); - } else { + } else { TProtocolUtil.skip(iprot, field.type); } break; @@ -9614,10 +9614,10 @@ public class Hbase { } public static final Map metaDataMap = Collections.unmodifiableMap(new HashMap() {{ - put(SUCCESS, new FieldMetaData("success", TFieldRequirementType.DEFAULT, - new ListMetaData(TType.LIST, + put(SUCCESS, new FieldMetaData("success", TFieldRequirementType.DEFAULT, + new ListMetaData(TType.LIST, new StructMetaData(TType.STRUCT, TRowResult.class)))); - put(IO, new FieldMetaData("io", TFieldRequirementType.DEFAULT, + put(IO, new FieldMetaData("io", TFieldRequirementType.DEFAULT, new FieldValueMetaData(TType.STRUCT))); }}); @@ -9812,7 +9812,7 @@ public class Hbase { while (true) { field = iprot.readFieldBegin(); - if (field.type == TType.STOP) { + if (field.type == TType.STOP) { break; } switch (field.id) @@ -9831,7 +9831,7 @@ public class Hbase { } iprot.readListEnd(); } - } else { + } else { TProtocolUtil.skip(iprot, field.type); } break; @@ -9839,7 +9839,7 @@ public class Hbase { if (field.type == TType.STRUCT) { this.io = new IOError(); this.io.read(iprot); - } else { + } else { TProtocolUtil.skip(iprot, field.type); } break; @@ -9927,12 +9927,12 @@ public class Hbase { } public static final Map metaDataMap = Collections.unmodifiableMap(new HashMap() {{ - put(TABLENAME, new FieldMetaData("tableName", TFieldRequirementType.DEFAULT, + put(TABLENAME, new FieldMetaData("tableName", TFieldRequirementType.DEFAULT, new FieldValueMetaData(TType.STRING))); - put(ROW, new FieldMetaData("row", TFieldRequirementType.DEFAULT, + put(ROW, new FieldMetaData("row", TFieldRequirementType.DEFAULT, new FieldValueMetaData(TType.STRING))); - put(COLUMNS, new FieldMetaData("columns", TFieldRequirementType.DEFAULT, - new ListMetaData(TType.LIST, + put(COLUMNS, new FieldMetaData("columns", TFieldRequirementType.DEFAULT, + new ListMetaData(TType.LIST, new FieldValueMetaData(TType.STRING)))); }}); @@ -10177,7 +10177,7 @@ public class Hbase { while (true) { field = iprot.readFieldBegin(); - if (field.type == TType.STOP) { + if (field.type == TType.STOP) { break; } switch (field.id) @@ -10185,14 +10185,14 @@ public class Hbase { case TABLENAME: if (field.type == TType.STRING) { this.tableName = iprot.readBinary(); - } else { + } else { TProtocolUtil.skip(iprot, field.type); } break; case ROW: if (field.type == TType.STRING) { this.row = iprot.readBinary(); - } else { + } else { TProtocolUtil.skip(iprot, field.type); } break; @@ -10209,7 +10209,7 @@ public class Hbase { } iprot.readListEnd(); } - } else { + } else { TProtocolUtil.skip(iprot, field.type); } break; @@ -10309,10 +10309,10 @@ public class Hbase { } public static final Map metaDataMap = Collections.unmodifiableMap(new HashMap() {{ - put(SUCCESS, new FieldMetaData("success", TFieldRequirementType.DEFAULT, - new ListMetaData(TType.LIST, + put(SUCCESS, new FieldMetaData("success", TFieldRequirementType.DEFAULT, + new ListMetaData(TType.LIST, new StructMetaData(TType.STRUCT, TRowResult.class)))); - put(IO, new FieldMetaData("io", TFieldRequirementType.DEFAULT, + put(IO, new FieldMetaData("io", TFieldRequirementType.DEFAULT, new FieldValueMetaData(TType.STRUCT))); }}); @@ -10507,7 +10507,7 @@ public class Hbase { while (true) { field = iprot.readFieldBegin(); - if (field.type == TType.STOP) { + if (field.type == TType.STOP) { break; } switch (field.id) @@ -10526,7 +10526,7 @@ public class Hbase { } iprot.readListEnd(); } - } else { + } else { TProtocolUtil.skip(iprot, field.type); } break; @@ -10534,7 +10534,7 @@ public class Hbase { if (field.type == TType.STRUCT) { this.io = new IOError(); this.io.read(iprot); - } else { + } else { TProtocolUtil.skip(iprot, field.type); } break; @@ -10623,11 +10623,11 @@ public class Hbase { } public static final Map metaDataMap = Collections.unmodifiableMap(new HashMap() {{ - put(TABLENAME, new FieldMetaData("tableName", TFieldRequirementType.DEFAULT, + put(TABLENAME, new FieldMetaData("tableName", TFieldRequirementType.DEFAULT, new FieldValueMetaData(TType.STRING))); - put(ROW, new FieldMetaData("row", TFieldRequirementType.DEFAULT, + put(ROW, new FieldMetaData("row", TFieldRequirementType.DEFAULT, new FieldValueMetaData(TType.STRING))); - put(TIMESTAMP, new FieldMetaData("timestamp", TFieldRequirementType.DEFAULT, + put(TIMESTAMP, new FieldMetaData("timestamp", TFieldRequirementType.DEFAULT, new FieldValueMetaData(TType.I64))); }}); @@ -10852,7 +10852,7 @@ public class Hbase { while (true) { field = iprot.readFieldBegin(); - if (field.type == TType.STOP) { + if (field.type == TType.STOP) { break; } switch (field.id) @@ -10860,14 +10860,14 @@ public class Hbase { case TABLENAME: if (field.type == TType.STRING) { this.tableName = iprot.readBinary(); - } else { + } else { TProtocolUtil.skip(iprot, field.type); } break; case ROW: if (field.type == TType.STRING) { this.row = iprot.readBinary(); - } else { + } else { TProtocolUtil.skip(iprot, field.type); } break; @@ -10875,7 +10875,7 @@ public class Hbase { if (field.type == TType.I64) { this.timestamp = iprot.readI64(); this.__isset.timestamp = true; - } else { + } else { TProtocolUtil.skip(iprot, field.type); } break; @@ -10963,10 +10963,10 @@ public class Hbase { } public static final Map metaDataMap = Collections.unmodifiableMap(new HashMap() {{ - put(SUCCESS, new FieldMetaData("success", TFieldRequirementType.DEFAULT, - new ListMetaData(TType.LIST, + put(SUCCESS, new FieldMetaData("success", TFieldRequirementType.DEFAULT, + new ListMetaData(TType.LIST, new StructMetaData(TType.STRUCT, TRowResult.class)))); - put(IO, new FieldMetaData("io", TFieldRequirementType.DEFAULT, + put(IO, new FieldMetaData("io", TFieldRequirementType.DEFAULT, new FieldValueMetaData(TType.STRUCT))); }}); @@ -11161,7 +11161,7 @@ public class Hbase { while (true) { field = iprot.readFieldBegin(); - if (field.type == TType.STOP) { + if (field.type == TType.STOP) { break; } switch (field.id) @@ -11180,7 +11180,7 @@ public class Hbase { } iprot.readListEnd(); } - } else { + } else { TProtocolUtil.skip(iprot, field.type); } break; @@ -11188,7 +11188,7 @@ public class Hbase { if (field.type == TType.STRUCT) { this.io = new IOError(); this.io.read(iprot); - } else { + } else { TProtocolUtil.skip(iprot, field.type); } break; @@ -11280,14 +11280,14 @@ public class Hbase { } public static final Map metaDataMap = Collections.unmodifiableMap(new HashMap() {{ - put(TABLENAME, new FieldMetaData("tableName", TFieldRequirementType.DEFAULT, + put(TABLENAME, new FieldMetaData("tableName", TFieldRequirementType.DEFAULT, new FieldValueMetaData(TType.STRING))); - put(ROW, new FieldMetaData("row", TFieldRequirementType.DEFAULT, + put(ROW, new FieldMetaData("row", TFieldRequirementType.DEFAULT, new FieldValueMetaData(TType.STRING))); - put(COLUMNS, new FieldMetaData("columns", TFieldRequirementType.DEFAULT, - new ListMetaData(TType.LIST, + put(COLUMNS, new FieldMetaData("columns", TFieldRequirementType.DEFAULT, + new ListMetaData(TType.LIST, new FieldValueMetaData(TType.STRING)))); - put(TIMESTAMP, new FieldMetaData("timestamp", TFieldRequirementType.DEFAULT, + put(TIMESTAMP, new FieldMetaData("timestamp", TFieldRequirementType.DEFAULT, new FieldValueMetaData(TType.I64))); }}); @@ -11581,7 +11581,7 @@ public class Hbase { while (true) { field = iprot.readFieldBegin(); - if (field.type == TType.STOP) { + if (field.type == TType.STOP) { break; } switch (field.id) @@ -11589,14 +11589,14 @@ public class Hbase { case TABLENAME: if (field.type == TType.STRING) { this.tableName = iprot.readBinary(); - } else { + } else { TProtocolUtil.skip(iprot, field.type); } break; case ROW: if (field.type == TType.STRING) { this.row = iprot.readBinary(); - } else { + } else { TProtocolUtil.skip(iprot, field.type); } break; @@ -11613,7 +11613,7 @@ public class Hbase { } iprot.readListEnd(); } - } else { + } else { TProtocolUtil.skip(iprot, field.type); } break; @@ -11621,7 +11621,7 @@ public class Hbase { if (field.type == TType.I64) { this.timestamp = iprot.readI64(); this.__isset.timestamp = true; - } else { + } else { TProtocolUtil.skip(iprot, field.type); } break; @@ -11728,10 +11728,10 @@ public class Hbase { } public static final Map metaDataMap = Collections.unmodifiableMap(new HashMap() {{ - put(SUCCESS, new FieldMetaData("success", TFieldRequirementType.DEFAULT, - new ListMetaData(TType.LIST, + put(SUCCESS, new FieldMetaData("success", TFieldRequirementType.DEFAULT, + new ListMetaData(TType.LIST, new StructMetaData(TType.STRUCT, TRowResult.class)))); - put(IO, new FieldMetaData("io", TFieldRequirementType.DEFAULT, + put(IO, new FieldMetaData("io", TFieldRequirementType.DEFAULT, new FieldValueMetaData(TType.STRUCT))); }}); @@ -11926,7 +11926,7 @@ public class Hbase { while (true) { field = iprot.readFieldBegin(); - if (field.type == TType.STOP) { + if (field.type == TType.STOP) { break; } switch (field.id) @@ -11945,7 +11945,7 @@ public class Hbase { } iprot.readListEnd(); } - } else { + } else { TProtocolUtil.skip(iprot, field.type); } break; @@ -11953,7 +11953,7 @@ public class Hbase { if (field.type == TType.STRUCT) { this.io = new IOError(); this.io.read(iprot); - } else { + } else { TProtocolUtil.skip(iprot, field.type); } break; @@ -12041,12 +12041,12 @@ public class Hbase { } public static final Map metaDataMap = Collections.unmodifiableMap(new HashMap() {{ - put(TABLENAME, new FieldMetaData("tableName", TFieldRequirementType.DEFAULT, + put(TABLENAME, new FieldMetaData("tableName", TFieldRequirementType.DEFAULT, new FieldValueMetaData(TType.STRING))); - put(ROW, new FieldMetaData("row", TFieldRequirementType.DEFAULT, + put(ROW, new FieldMetaData("row", TFieldRequirementType.DEFAULT, new FieldValueMetaData(TType.STRING))); - put(MUTATIONS, new FieldMetaData("mutations", TFieldRequirementType.DEFAULT, - new ListMetaData(TType.LIST, + put(MUTATIONS, new FieldMetaData("mutations", TFieldRequirementType.DEFAULT, + new ListMetaData(TType.LIST, new StructMetaData(TType.STRUCT, Mutation.class)))); }}); @@ -12291,7 +12291,7 @@ public class Hbase { while (true) { field = iprot.readFieldBegin(); - if (field.type == TType.STOP) { + if (field.type == TType.STOP) { break; } switch (field.id) @@ -12299,14 +12299,14 @@ public class Hbase { case TABLENAME: if (field.type == TType.STRING) { this.tableName = iprot.readBinary(); - } else { + } else { TProtocolUtil.skip(iprot, field.type); } break; case ROW: if (field.type == TType.STRING) { this.row = iprot.readBinary(); - } else { + } else { TProtocolUtil.skip(iprot, field.type); } break; @@ -12324,7 +12324,7 @@ public class Hbase { } iprot.readListEnd(); } - } else { + } else { TProtocolUtil.skip(iprot, field.type); } break; @@ -12424,9 +12424,9 @@ public class Hbase { } public static final Map metaDataMap = Collections.unmodifiableMap(new HashMap() {{ - put(IO, new FieldMetaData("io", TFieldRequirementType.DEFAULT, + put(IO, new FieldMetaData("io", TFieldRequirementType.DEFAULT, new FieldValueMetaData(TType.STRUCT))); - put(IA, new FieldMetaData("ia", TFieldRequirementType.DEFAULT, + put(IA, new FieldMetaData("ia", TFieldRequirementType.DEFAULT, new FieldValueMetaData(TType.STRUCT))); }}); @@ -12602,7 +12602,7 @@ public class Hbase { while (true) { field = iprot.readFieldBegin(); - if (field.type == TType.STOP) { + if (field.type == TType.STOP) { break; } switch (field.id) @@ -12611,7 +12611,7 @@ public class Hbase { if (field.type == TType.STRUCT) { this.io = new IOError(); this.io.read(iprot); - } else { + } else { TProtocolUtil.skip(iprot, field.type); } break; @@ -12619,7 +12619,7 @@ public class Hbase { if (field.type == TType.STRUCT) { this.ia = new IllegalArgument(); this.ia.read(iprot); - } else { + } else { TProtocolUtil.skip(iprot, field.type); } break; @@ -12705,14 +12705,14 @@ public class Hbase { } public static final Map metaDataMap = Collections.unmodifiableMap(new HashMap() {{ - put(TABLENAME, new FieldMetaData("tableName", TFieldRequirementType.DEFAULT, + put(TABLENAME, new FieldMetaData("tableName", TFieldRequirementType.DEFAULT, new FieldValueMetaData(TType.STRING))); - put(ROW, new FieldMetaData("row", TFieldRequirementType.DEFAULT, + put(ROW, new FieldMetaData("row", TFieldRequirementType.DEFAULT, new FieldValueMetaData(TType.STRING))); - put(MUTATIONS, new FieldMetaData("mutations", TFieldRequirementType.DEFAULT, - new ListMetaData(TType.LIST, + put(MUTATIONS, new FieldMetaData("mutations", TFieldRequirementType.DEFAULT, + new ListMetaData(TType.LIST, new StructMetaData(TType.STRUCT, Mutation.class)))); - put(TIMESTAMP, new FieldMetaData("timestamp", TFieldRequirementType.DEFAULT, + put(TIMESTAMP, new FieldMetaData("timestamp", TFieldRequirementType.DEFAULT, new FieldValueMetaData(TType.I64))); }}); @@ -13006,7 +13006,7 @@ public class Hbase { while (true) { field = iprot.readFieldBegin(); - if (field.type == TType.STOP) { + if (field.type == TType.STOP) { break; } switch (field.id) @@ -13014,14 +13014,14 @@ public class Hbase { case TABLENAME: if (field.type == TType.STRING) { this.tableName = iprot.readBinary(); - } else { + } else { TProtocolUtil.skip(iprot, field.type); } break; case ROW: if (field.type == TType.STRING) { this.row = iprot.readBinary(); - } else { + } else { TProtocolUtil.skip(iprot, field.type); } break; @@ -13039,7 +13039,7 @@ public class Hbase { } iprot.readListEnd(); } - } else { + } else { TProtocolUtil.skip(iprot, field.type); } break; @@ -13047,7 +13047,7 @@ public class Hbase { if (field.type == TType.I64) { this.timestamp = iprot.readI64(); this.__isset.timestamp = true; - } else { + } else { TProtocolUtil.skip(iprot, field.type); } break; @@ -13154,9 +13154,9 @@ public class Hbase { } public static final Map metaDataMap = Collections.unmodifiableMap(new HashMap() {{ - put(IO, new FieldMetaData("io", TFieldRequirementType.DEFAULT, + put(IO, new FieldMetaData("io", TFieldRequirementType.DEFAULT, new FieldValueMetaData(TType.STRUCT))); - put(IA, new FieldMetaData("ia", TFieldRequirementType.DEFAULT, + put(IA, new FieldMetaData("ia", TFieldRequirementType.DEFAULT, new FieldValueMetaData(TType.STRUCT))); }}); @@ -13332,7 +13332,7 @@ public class Hbase { while (true) { field = iprot.readFieldBegin(); - if (field.type == TType.STOP) { + if (field.type == TType.STOP) { break; } switch (field.id) @@ -13341,7 +13341,7 @@ public class Hbase { if (field.type == TType.STRUCT) { this.io = new IOError(); this.io.read(iprot); - } else { + } else { TProtocolUtil.skip(iprot, field.type); } break; @@ -13349,7 +13349,7 @@ public class Hbase { if (field.type == TType.STRUCT) { this.ia = new IllegalArgument(); this.ia.read(iprot); - } else { + } else { TProtocolUtil.skip(iprot, field.type); } break; @@ -13428,10 +13428,10 @@ public class Hbase { } public static final Map metaDataMap = Collections.unmodifiableMap(new HashMap() {{ - put(TABLENAME, new FieldMetaData("tableName", TFieldRequirementType.DEFAULT, + put(TABLENAME, new FieldMetaData("tableName", TFieldRequirementType.DEFAULT, new FieldValueMetaData(TType.STRING))); - put(ROWBATCHES, new FieldMetaData("rowBatches", TFieldRequirementType.DEFAULT, - new ListMetaData(TType.LIST, + put(ROWBATCHES, new FieldMetaData("rowBatches", TFieldRequirementType.DEFAULT, + new ListMetaData(TType.LIST, new StructMetaData(TType.STRUCT, BatchMutation.class)))); }}); @@ -13626,7 +13626,7 @@ public class Hbase { while (true) { field = iprot.readFieldBegin(); - if (field.type == TType.STOP) { + if (field.type == TType.STOP) { break; } switch (field.id) @@ -13634,7 +13634,7 @@ public class Hbase { case TABLENAME: if (field.type == TType.STRING) { this.tableName = iprot.readBinary(); - } else { + } else { TProtocolUtil.skip(iprot, field.type); } break; @@ -13652,7 +13652,7 @@ public class Hbase { } iprot.readListEnd(); } - } else { + } else { TProtocolUtil.skip(iprot, field.type); } break; @@ -13739,9 +13739,9 @@ public class Hbase { } public static final Map metaDataMap = Collections.unmodifiableMap(new HashMap() {{ - put(IO, new FieldMetaData("io", TFieldRequirementType.DEFAULT, + put(IO, new FieldMetaData("io", TFieldRequirementType.DEFAULT, new FieldValueMetaData(TType.STRUCT))); - put(IA, new FieldMetaData("ia", TFieldRequirementType.DEFAULT, + put(IA, new FieldMetaData("ia", TFieldRequirementType.DEFAULT, new FieldValueMetaData(TType.STRUCT))); }}); @@ -13917,7 +13917,7 @@ public class Hbase { while (true) { field = iprot.readFieldBegin(); - if (field.type == TType.STOP) { + if (field.type == TType.STOP) { break; } switch (field.id) @@ -13926,7 +13926,7 @@ public class Hbase { if (field.type == TType.STRUCT) { this.io = new IOError(); this.io.read(iprot); - } else { + } else { TProtocolUtil.skip(iprot, field.type); } break; @@ -13934,7 +13934,7 @@ public class Hbase { if (field.type == TType.STRUCT) { this.ia = new IllegalArgument(); this.ia.read(iprot); - } else { + } else { TProtocolUtil.skip(iprot, field.type); } break; @@ -14017,12 +14017,12 @@ public class Hbase { } public static final Map metaDataMap = Collections.unmodifiableMap(new HashMap() {{ - put(TABLENAME, new FieldMetaData("tableName", TFieldRequirementType.DEFAULT, + put(TABLENAME, new FieldMetaData("tableName", TFieldRequirementType.DEFAULT, new FieldValueMetaData(TType.STRING))); - put(ROWBATCHES, new FieldMetaData("rowBatches", TFieldRequirementType.DEFAULT, - new ListMetaData(TType.LIST, + put(ROWBATCHES, new FieldMetaData("rowBatches", TFieldRequirementType.DEFAULT, + new ListMetaData(TType.LIST, new StructMetaData(TType.STRUCT, BatchMutation.class)))); - put(TIMESTAMP, new FieldMetaData("timestamp", TFieldRequirementType.DEFAULT, + put(TIMESTAMP, new FieldMetaData("timestamp", TFieldRequirementType.DEFAULT, new FieldValueMetaData(TType.I64))); }}); @@ -14266,7 +14266,7 @@ public class Hbase { while (true) { field = iprot.readFieldBegin(); - if (field.type == TType.STOP) { + if (field.type == TType.STOP) { break; } switch (field.id) @@ -14274,7 +14274,7 @@ public class Hbase { case TABLENAME: if (field.type == TType.STRING) { this.tableName = iprot.readBinary(); - } else { + } else { TProtocolUtil.skip(iprot, field.type); } break; @@ -14292,7 +14292,7 @@ public class Hbase { } iprot.readListEnd(); } - } else { + } else { TProtocolUtil.skip(iprot, field.type); } break; @@ -14300,7 +14300,7 @@ public class Hbase { if (field.type == TType.I64) { this.timestamp = iprot.readI64(); this.__isset.timestamp = true; - } else { + } else { TProtocolUtil.skip(iprot, field.type); } break; @@ -14394,9 +14394,9 @@ public class Hbase { } public static final Map metaDataMap = Collections.unmodifiableMap(new HashMap() {{ - put(IO, new FieldMetaData("io", TFieldRequirementType.DEFAULT, + put(IO, new FieldMetaData("io", TFieldRequirementType.DEFAULT, new FieldValueMetaData(TType.STRUCT))); - put(IA, new FieldMetaData("ia", TFieldRequirementType.DEFAULT, + put(IA, new FieldMetaData("ia", TFieldRequirementType.DEFAULT, new FieldValueMetaData(TType.STRUCT))); }}); @@ -14572,7 +14572,7 @@ public class Hbase { while (true) { field = iprot.readFieldBegin(); - if (field.type == TType.STOP) { + if (field.type == TType.STOP) { break; } switch (field.id) @@ -14581,7 +14581,7 @@ public class Hbase { if (field.type == TType.STRUCT) { this.io = new IOError(); this.io.read(iprot); - } else { + } else { TProtocolUtil.skip(iprot, field.type); } break; @@ -14589,7 +14589,7 @@ public class Hbase { if (field.type == TType.STRUCT) { this.ia = new IllegalArgument(); this.ia.read(iprot); - } else { + } else { TProtocolUtil.skip(iprot, field.type); } break; @@ -14675,13 +14675,13 @@ public class Hbase { } public static final Map metaDataMap = Collections.unmodifiableMap(new HashMap() {{ - put(TABLENAME, new FieldMetaData("tableName", TFieldRequirementType.DEFAULT, + put(TABLENAME, new FieldMetaData("tableName", TFieldRequirementType.DEFAULT, new FieldValueMetaData(TType.STRING))); - put(ROW, new FieldMetaData("row", TFieldRequirementType.DEFAULT, + put(ROW, new FieldMetaData("row", TFieldRequirementType.DEFAULT, new FieldValueMetaData(TType.STRING))); - put(COLUMN, new FieldMetaData("column", TFieldRequirementType.DEFAULT, + put(COLUMN, new FieldMetaData("column", TFieldRequirementType.DEFAULT, new FieldValueMetaData(TType.STRING))); - put(VALUE, new FieldMetaData("value", TFieldRequirementType.DEFAULT, + put(VALUE, new FieldMetaData("value", TFieldRequirementType.DEFAULT, new FieldValueMetaData(TType.I64))); }}); @@ -14956,7 +14956,7 @@ public class Hbase { while (true) { field = iprot.readFieldBegin(); - if (field.type == TType.STOP) { + if (field.type == TType.STOP) { break; } switch (field.id) @@ -14964,21 +14964,21 @@ public class Hbase { case TABLENAME: if (field.type == TType.STRING) { this.tableName = iprot.readBinary(); - } else { + } else { TProtocolUtil.skip(iprot, field.type); } break; case ROW: if (field.type == TType.STRING) { this.row = iprot.readBinary(); - } else { + } else { TProtocolUtil.skip(iprot, field.type); } break; case COLUMN: if (field.type == TType.STRING) { this.column = iprot.readBinary(); - } else { + } else { TProtocolUtil.skip(iprot, field.type); } break; @@ -14986,7 +14986,7 @@ public class Hbase { if (field.type == TType.I64) { this.value = iprot.readI64(); this.__isset.value = true; - } else { + } else { TProtocolUtil.skip(iprot, field.type); } break; @@ -15091,11 +15091,11 @@ public class Hbase { } public static final Map metaDataMap = Collections.unmodifiableMap(new HashMap() {{ - put(SUCCESS, new FieldMetaData("success", TFieldRequirementType.DEFAULT, + put(SUCCESS, new FieldMetaData("success", TFieldRequirementType.DEFAULT, new FieldValueMetaData(TType.I64))); - put(IO, new FieldMetaData("io", TFieldRequirementType.DEFAULT, + put(IO, new FieldMetaData("io", TFieldRequirementType.DEFAULT, new FieldValueMetaData(TType.STRUCT))); - put(IA, new FieldMetaData("ia", TFieldRequirementType.DEFAULT, + put(IA, new FieldMetaData("ia", TFieldRequirementType.DEFAULT, new FieldValueMetaData(TType.STRUCT))); }}); @@ -15320,7 +15320,7 @@ public class Hbase { while (true) { field = iprot.readFieldBegin(); - if (field.type == TType.STOP) { + if (field.type == TType.STOP) { break; } switch (field.id) @@ -15329,7 +15329,7 @@ public class Hbase { if (field.type == TType.I64) { this.success = iprot.readI64(); this.__isset.success = true; - } else { + } else { TProtocolUtil.skip(iprot, field.type); } break; @@ -15337,7 +15337,7 @@ public class Hbase { if (field.type == TType.STRUCT) { this.io = new IOError(); this.io.read(iprot); - } else { + } else { TProtocolUtil.skip(iprot, field.type); } break; @@ -15345,7 +15345,7 @@ public class Hbase { if (field.type == TType.STRUCT) { this.ia = new IllegalArgument(); this.ia.read(iprot); - } else { + } else { TProtocolUtil.skip(iprot, field.type); } break; @@ -15435,11 +15435,11 @@ public class Hbase { } public static final Map metaDataMap = Collections.unmodifiableMap(new HashMap() {{ - put(TABLENAME, new FieldMetaData("tableName", TFieldRequirementType.DEFAULT, + put(TABLENAME, new FieldMetaData("tableName", TFieldRequirementType.DEFAULT, new FieldValueMetaData(TType.STRING))); - put(ROW, new FieldMetaData("row", TFieldRequirementType.DEFAULT, + put(ROW, new FieldMetaData("row", TFieldRequirementType.DEFAULT, new FieldValueMetaData(TType.STRING))); - put(COLUMN, new FieldMetaData("column", TFieldRequirementType.DEFAULT, + put(COLUMN, new FieldMetaData("column", TFieldRequirementType.DEFAULT, new FieldValueMetaData(TType.STRING))); }}); @@ -15665,7 +15665,7 @@ public class Hbase { while (true) { field = iprot.readFieldBegin(); - if (field.type == TType.STOP) { + if (field.type == TType.STOP) { break; } switch (field.id) @@ -15673,21 +15673,21 @@ public class Hbase { case TABLENAME: if (field.type == TType.STRING) { this.tableName = iprot.readBinary(); - } else { + } else { TProtocolUtil.skip(iprot, field.type); } break; case ROW: if (field.type == TType.STRING) { this.row = iprot.readBinary(); - } else { + } else { TProtocolUtil.skip(iprot, field.type); } break; case COLUMN: if (field.type == TType.STRING) { this.column = iprot.readBinary(); - } else { + } else { TProtocolUtil.skip(iprot, field.type); } break; @@ -15778,7 +15778,7 @@ public class Hbase { } public static final Map metaDataMap = Collections.unmodifiableMap(new HashMap() {{ - put(IO, new FieldMetaData("io", TFieldRequirementType.DEFAULT, + put(IO, new FieldMetaData("io", TFieldRequirementType.DEFAULT, new FieldValueMetaData(TType.STRUCT))); }}); @@ -15904,7 +15904,7 @@ public class Hbase { while (true) { field = iprot.readFieldBegin(); - if (field.type == TType.STOP) { + if (field.type == TType.STOP) { break; } switch (field.id) @@ -15913,7 +15913,7 @@ public class Hbase { if (field.type == TType.STRUCT) { this.io = new IOError(); this.io.read(iprot); - } else { + } else { TProtocolUtil.skip(iprot, field.type); } break; @@ -15987,13 +15987,13 @@ public class Hbase { } public static final Map metaDataMap = Collections.unmodifiableMap(new HashMap() {{ - put(TABLENAME, new FieldMetaData("tableName", TFieldRequirementType.DEFAULT, + put(TABLENAME, new FieldMetaData("tableName", TFieldRequirementType.DEFAULT, new FieldValueMetaData(TType.STRING))); - put(ROW, new FieldMetaData("row", TFieldRequirementType.DEFAULT, + put(ROW, new FieldMetaData("row", TFieldRequirementType.DEFAULT, new FieldValueMetaData(TType.STRING))); - put(COLUMN, new FieldMetaData("column", TFieldRequirementType.DEFAULT, + put(COLUMN, new FieldMetaData("column", TFieldRequirementType.DEFAULT, new FieldValueMetaData(TType.STRING))); - put(TIMESTAMP, new FieldMetaData("timestamp", TFieldRequirementType.DEFAULT, + put(TIMESTAMP, new FieldMetaData("timestamp", TFieldRequirementType.DEFAULT, new FieldValueMetaData(TType.I64))); }}); @@ -16268,7 +16268,7 @@ public class Hbase { while (true) { field = iprot.readFieldBegin(); - if (field.type == TType.STOP) { + if (field.type == TType.STOP) { break; } switch (field.id) @@ -16276,21 +16276,21 @@ public class Hbase { case TABLENAME: if (field.type == TType.STRING) { this.tableName = iprot.readBinary(); - } else { + } else { TProtocolUtil.skip(iprot, field.type); } break; case ROW: if (field.type == TType.STRING) { this.row = iprot.readBinary(); - } else { + } else { TProtocolUtil.skip(iprot, field.type); } break; case COLUMN: if (field.type == TType.STRING) { this.column = iprot.readBinary(); - } else { + } else { TProtocolUtil.skip(iprot, field.type); } break; @@ -16298,7 +16298,7 @@ public class Hbase { if (field.type == TType.I64) { this.timestamp = iprot.readI64(); this.__isset.timestamp = true; - } else { + } else { TProtocolUtil.skip(iprot, field.type); } break; @@ -16396,7 +16396,7 @@ public class Hbase { } public static final Map metaDataMap = Collections.unmodifiableMap(new HashMap() {{ - put(IO, new FieldMetaData("io", TFieldRequirementType.DEFAULT, + put(IO, new FieldMetaData("io", TFieldRequirementType.DEFAULT, new FieldValueMetaData(TType.STRUCT))); }}); @@ -16522,7 +16522,7 @@ public class Hbase { while (true) { field = iprot.readFieldBegin(); - if (field.type == TType.STOP) { + if (field.type == TType.STOP) { break; } switch (field.id) @@ -16531,7 +16531,7 @@ public class Hbase { if (field.type == TType.STRUCT) { this.io = new IOError(); this.io.read(iprot); - } else { + } else { TProtocolUtil.skip(iprot, field.type); } break; @@ -16598,9 +16598,9 @@ public class Hbase { } public static final Map metaDataMap = Collections.unmodifiableMap(new HashMap() {{ - put(TABLENAME, new FieldMetaData("tableName", TFieldRequirementType.DEFAULT, + put(TABLENAME, new FieldMetaData("tableName", TFieldRequirementType.DEFAULT, new FieldValueMetaData(TType.STRING))); - put(ROW, new FieldMetaData("row", TFieldRequirementType.DEFAULT, + put(ROW, new FieldMetaData("row", TFieldRequirementType.DEFAULT, new FieldValueMetaData(TType.STRING))); }}); @@ -16776,7 +16776,7 @@ public class Hbase { while (true) { field = iprot.readFieldBegin(); - if (field.type == TType.STOP) { + if (field.type == TType.STOP) { break; } switch (field.id) @@ -16784,14 +16784,14 @@ public class Hbase { case TABLENAME: if (field.type == TType.STRING) { this.tableName = iprot.readBinary(); - } else { + } else { TProtocolUtil.skip(iprot, field.type); } break; case ROW: if (field.type == TType.STRING) { this.row = iprot.readBinary(); - } else { + } else { TProtocolUtil.skip(iprot, field.type); } break; @@ -16869,7 +16869,7 @@ public class Hbase { } public static final Map metaDataMap = Collections.unmodifiableMap(new HashMap() {{ - put(IO, new FieldMetaData("io", TFieldRequirementType.DEFAULT, + put(IO, new FieldMetaData("io", TFieldRequirementType.DEFAULT, new FieldValueMetaData(TType.STRUCT))); }}); @@ -16995,7 +16995,7 @@ public class Hbase { while (true) { field = iprot.readFieldBegin(); - if (field.type == TType.STOP) { + if (field.type == TType.STOP) { break; } switch (field.id) @@ -17004,7 +17004,7 @@ public class Hbase { if (field.type == TType.STRUCT) { this.io = new IOError(); this.io.read(iprot); - } else { + } else { TProtocolUtil.skip(iprot, field.type); } break; @@ -17075,11 +17075,11 @@ public class Hbase { } public static final Map metaDataMap = Collections.unmodifiableMap(new HashMap() {{ - put(TABLENAME, new FieldMetaData("tableName", TFieldRequirementType.DEFAULT, + put(TABLENAME, new FieldMetaData("tableName", TFieldRequirementType.DEFAULT, new FieldValueMetaData(TType.STRING))); - put(ROW, new FieldMetaData("row", TFieldRequirementType.DEFAULT, + put(ROW, new FieldMetaData("row", TFieldRequirementType.DEFAULT, new FieldValueMetaData(TType.STRING))); - put(TIMESTAMP, new FieldMetaData("timestamp", TFieldRequirementType.DEFAULT, + put(TIMESTAMP, new FieldMetaData("timestamp", TFieldRequirementType.DEFAULT, new FieldValueMetaData(TType.I64))); }}); @@ -17304,7 +17304,7 @@ public class Hbase { while (true) { field = iprot.readFieldBegin(); - if (field.type == TType.STOP) { + if (field.type == TType.STOP) { break; } switch (field.id) @@ -17312,14 +17312,14 @@ public class Hbase { case TABLENAME: if (field.type == TType.STRING) { this.tableName = iprot.readBinary(); - } else { + } else { TProtocolUtil.skip(iprot, field.type); } break; case ROW: if (field.type == TType.STRING) { this.row = iprot.readBinary(); - } else { + } else { TProtocolUtil.skip(iprot, field.type); } break; @@ -17327,7 +17327,7 @@ public class Hbase { if (field.type == TType.I64) { this.timestamp = iprot.readI64(); this.__isset.timestamp = true; - } else { + } else { TProtocolUtil.skip(iprot, field.type); } break; @@ -17412,7 +17412,7 @@ public class Hbase { } public static final Map metaDataMap = Collections.unmodifiableMap(new HashMap() {{ - put(IO, new FieldMetaData("io", TFieldRequirementType.DEFAULT, + put(IO, new FieldMetaData("io", TFieldRequirementType.DEFAULT, new FieldValueMetaData(TType.STRUCT))); }}); @@ -17538,7 +17538,7 @@ public class Hbase { while (true) { field = iprot.readFieldBegin(); - if (field.type == TType.STOP) { + if (field.type == TType.STOP) { break; } switch (field.id) @@ -17547,7 +17547,7 @@ public class Hbase { if (field.type == TType.STRUCT) { this.io = new IOError(); this.io.read(iprot); - } else { + } else { TProtocolUtil.skip(iprot, field.type); } break; @@ -17617,12 +17617,12 @@ public class Hbase { } public static final Map metaDataMap = Collections.unmodifiableMap(new HashMap() {{ - put(TABLENAME, new FieldMetaData("tableName", TFieldRequirementType.DEFAULT, + put(TABLENAME, new FieldMetaData("tableName", TFieldRequirementType.DEFAULT, new FieldValueMetaData(TType.STRING))); - put(STARTROW, new FieldMetaData("startRow", TFieldRequirementType.DEFAULT, + put(STARTROW, new FieldMetaData("startRow", TFieldRequirementType.DEFAULT, new FieldValueMetaData(TType.STRING))); - put(COLUMNS, new FieldMetaData("columns", TFieldRequirementType.DEFAULT, - new ListMetaData(TType.LIST, + put(COLUMNS, new FieldMetaData("columns", TFieldRequirementType.DEFAULT, + new ListMetaData(TType.LIST, new FieldValueMetaData(TType.STRING)))); }}); @@ -17867,7 +17867,7 @@ public class Hbase { while (true) { field = iprot.readFieldBegin(); - if (field.type == TType.STOP) { + if (field.type == TType.STOP) { break; } switch (field.id) @@ -17875,14 +17875,14 @@ public class Hbase { case TABLENAME: if (field.type == TType.STRING) { this.tableName = iprot.readBinary(); - } else { + } else { TProtocolUtil.skip(iprot, field.type); } break; case STARTROW: if (field.type == TType.STRING) { this.startRow = iprot.readBinary(); - } else { + } else { TProtocolUtil.skip(iprot, field.type); } break; @@ -17899,7 +17899,7 @@ public class Hbase { } iprot.readListEnd(); } - } else { + } else { TProtocolUtil.skip(iprot, field.type); } break; @@ -18000,9 +18000,9 @@ public class Hbase { } public static final Map metaDataMap = Collections.unmodifiableMap(new HashMap() {{ - put(SUCCESS, new FieldMetaData("success", TFieldRequirementType.DEFAULT, + put(SUCCESS, new FieldMetaData("success", TFieldRequirementType.DEFAULT, new FieldValueMetaData(TType.I32))); - put(IO, new FieldMetaData("io", TFieldRequirementType.DEFAULT, + put(IO, new FieldMetaData("io", TFieldRequirementType.DEFAULT, new FieldValueMetaData(TType.STRUCT))); }}); @@ -18177,7 +18177,7 @@ public class Hbase { while (true) { field = iprot.readFieldBegin(); - if (field.type == TType.STOP) { + if (field.type == TType.STOP) { break; } switch (field.id) @@ -18186,7 +18186,7 @@ public class Hbase { if (field.type == TType.I32) { this.success = iprot.readI32(); this.__isset.success = true; - } else { + } else { TProtocolUtil.skip(iprot, field.type); } break; @@ -18194,7 +18194,7 @@ public class Hbase { if (field.type == TType.STRUCT) { this.io = new IOError(); this.io.read(iprot); - } else { + } else { TProtocolUtil.skip(iprot, field.type); } break; @@ -18275,14 +18275,14 @@ public class Hbase { } public static final Map metaDataMap = Collections.unmodifiableMap(new HashMap() {{ - put(TABLENAME, new FieldMetaData("tableName", TFieldRequirementType.DEFAULT, + put(TABLENAME, new FieldMetaData("tableName", TFieldRequirementType.DEFAULT, new FieldValueMetaData(TType.STRING))); - put(STARTROW, new FieldMetaData("startRow", TFieldRequirementType.DEFAULT, + put(STARTROW, new FieldMetaData("startRow", TFieldRequirementType.DEFAULT, new FieldValueMetaData(TType.STRING))); - put(STOPROW, new FieldMetaData("stopRow", TFieldRequirementType.DEFAULT, + put(STOPROW, new FieldMetaData("stopRow", TFieldRequirementType.DEFAULT, new FieldValueMetaData(TType.STRING))); - put(COLUMNS, new FieldMetaData("columns", TFieldRequirementType.DEFAULT, - new ListMetaData(TType.LIST, + put(COLUMNS, new FieldMetaData("columns", TFieldRequirementType.DEFAULT, + new ListMetaData(TType.LIST, new FieldValueMetaData(TType.STRING)))); }}); @@ -18577,7 +18577,7 @@ public class Hbase { while (true) { field = iprot.readFieldBegin(); - if (field.type == TType.STOP) { + if (field.type == TType.STOP) { break; } switch (field.id) @@ -18585,21 +18585,21 @@ public class Hbase { case TABLENAME: if (field.type == TType.STRING) { this.tableName = iprot.readBinary(); - } else { + } else { TProtocolUtil.skip(iprot, field.type); } break; case STARTROW: if (field.type == TType.STRING) { this.startRow = iprot.readBinary(); - } else { + } else { TProtocolUtil.skip(iprot, field.type); } break; case STOPROW: if (field.type == TType.STRING) { this.stopRow = iprot.readBinary(); - } else { + } else { TProtocolUtil.skip(iprot, field.type); } break; @@ -18616,7 +18616,7 @@ public class Hbase { } iprot.readListEnd(); } - } else { + } else { TProtocolUtil.skip(iprot, field.type); } break; @@ -18730,9 +18730,9 @@ public class Hbase { } public static final Map metaDataMap = Collections.unmodifiableMap(new HashMap() {{ - put(SUCCESS, new FieldMetaData("success", TFieldRequirementType.DEFAULT, + put(SUCCESS, new FieldMetaData("success", TFieldRequirementType.DEFAULT, new FieldValueMetaData(TType.I32))); - put(IO, new FieldMetaData("io", TFieldRequirementType.DEFAULT, + put(IO, new FieldMetaData("io", TFieldRequirementType.DEFAULT, new FieldValueMetaData(TType.STRUCT))); }}); @@ -18907,7 +18907,7 @@ public class Hbase { while (true) { field = iprot.readFieldBegin(); - if (field.type == TType.STOP) { + if (field.type == TType.STOP) { break; } switch (field.id) @@ -18916,7 +18916,7 @@ public class Hbase { if (field.type == TType.I32) { this.success = iprot.readI32(); this.__isset.success = true; - } else { + } else { TProtocolUtil.skip(iprot, field.type); } break; @@ -18924,7 +18924,7 @@ public class Hbase { if (field.type == TType.STRUCT) { this.io = new IOError(); this.io.read(iprot); - } else { + } else { TProtocolUtil.skip(iprot, field.type); } break; @@ -19002,12 +19002,12 @@ public class Hbase { } public static final Map metaDataMap = Collections.unmodifiableMap(new HashMap() {{ - put(TABLENAME, new FieldMetaData("tableName", TFieldRequirementType.DEFAULT, + put(TABLENAME, new FieldMetaData("tableName", TFieldRequirementType.DEFAULT, new FieldValueMetaData(TType.STRING))); - put(STARTANDPREFIX, new FieldMetaData("startAndPrefix", TFieldRequirementType.DEFAULT, + put(STARTANDPREFIX, new FieldMetaData("startAndPrefix", TFieldRequirementType.DEFAULT, new FieldValueMetaData(TType.STRING))); - put(COLUMNS, new FieldMetaData("columns", TFieldRequirementType.DEFAULT, - new ListMetaData(TType.LIST, + put(COLUMNS, new FieldMetaData("columns", TFieldRequirementType.DEFAULT, + new ListMetaData(TType.LIST, new FieldValueMetaData(TType.STRING)))); }}); @@ -19252,7 +19252,7 @@ public class Hbase { while (true) { field = iprot.readFieldBegin(); - if (field.type == TType.STOP) { + if (field.type == TType.STOP) { break; } switch (field.id) @@ -19260,14 +19260,14 @@ public class Hbase { case TABLENAME: if (field.type == TType.STRING) { this.tableName = iprot.readBinary(); - } else { + } else { TProtocolUtil.skip(iprot, field.type); } break; case STARTANDPREFIX: if (field.type == TType.STRING) { this.startAndPrefix = iprot.readBinary(); - } else { + } else { TProtocolUtil.skip(iprot, field.type); } break; @@ -19284,7 +19284,7 @@ public class Hbase { } iprot.readListEnd(); } - } else { + } else { TProtocolUtil.skip(iprot, field.type); } break; @@ -19385,9 +19385,9 @@ public class Hbase { } public static final Map metaDataMap = Collections.unmodifiableMap(new HashMap() {{ - put(SUCCESS, new FieldMetaData("success", TFieldRequirementType.DEFAULT, + put(SUCCESS, new FieldMetaData("success", TFieldRequirementType.DEFAULT, new FieldValueMetaData(TType.I32))); - put(IO, new FieldMetaData("io", TFieldRequirementType.DEFAULT, + put(IO, new FieldMetaData("io", TFieldRequirementType.DEFAULT, new FieldValueMetaData(TType.STRUCT))); }}); @@ -19562,7 +19562,7 @@ public class Hbase { while (true) { field = iprot.readFieldBegin(); - if (field.type == TType.STOP) { + if (field.type == TType.STOP) { break; } switch (field.id) @@ -19571,7 +19571,7 @@ public class Hbase { if (field.type == TType.I32) { this.success = iprot.readI32(); this.__isset.success = true; - } else { + } else { TProtocolUtil.skip(iprot, field.type); } break; @@ -19579,7 +19579,7 @@ public class Hbase { if (field.type == TType.STRUCT) { this.io = new IOError(); this.io.read(iprot); - } else { + } else { TProtocolUtil.skip(iprot, field.type); } break; @@ -19661,14 +19661,14 @@ public class Hbase { } public static final Map metaDataMap = Collections.unmodifiableMap(new HashMap() {{ - put(TABLENAME, new FieldMetaData("tableName", TFieldRequirementType.DEFAULT, + put(TABLENAME, new FieldMetaData("tableName", TFieldRequirementType.DEFAULT, new FieldValueMetaData(TType.STRING))); - put(STARTROW, new FieldMetaData("startRow", TFieldRequirementType.DEFAULT, + put(STARTROW, new FieldMetaData("startRow", TFieldRequirementType.DEFAULT, new FieldValueMetaData(TType.STRING))); - put(COLUMNS, new FieldMetaData("columns", TFieldRequirementType.DEFAULT, - new ListMetaData(TType.LIST, + put(COLUMNS, new FieldMetaData("columns", TFieldRequirementType.DEFAULT, + new ListMetaData(TType.LIST, new FieldValueMetaData(TType.STRING)))); - put(TIMESTAMP, new FieldMetaData("timestamp", TFieldRequirementType.DEFAULT, + put(TIMESTAMP, new FieldMetaData("timestamp", TFieldRequirementType.DEFAULT, new FieldValueMetaData(TType.I64))); }}); @@ -19962,7 +19962,7 @@ public class Hbase { while (true) { field = iprot.readFieldBegin(); - if (field.type == TType.STOP) { + if (field.type == TType.STOP) { break; } switch (field.id) @@ -19970,14 +19970,14 @@ public class Hbase { case TABLENAME: if (field.type == TType.STRING) { this.tableName = iprot.readBinary(); - } else { + } else { TProtocolUtil.skip(iprot, field.type); } break; case STARTROW: if (field.type == TType.STRING) { this.startRow = iprot.readBinary(); - } else { + } else { TProtocolUtil.skip(iprot, field.type); } break; @@ -19994,7 +19994,7 @@ public class Hbase { } iprot.readListEnd(); } - } else { + } else { TProtocolUtil.skip(iprot, field.type); } break; @@ -20002,7 +20002,7 @@ public class Hbase { if (field.type == TType.I64) { this.timestamp = iprot.readI64(); this.__isset.timestamp = true; - } else { + } else { TProtocolUtil.skip(iprot, field.type); } break; @@ -20110,9 +20110,9 @@ public class Hbase { } public static final Map metaDataMap = Collections.unmodifiableMap(new HashMap() {{ - put(SUCCESS, new FieldMetaData("success", TFieldRequirementType.DEFAULT, + put(SUCCESS, new FieldMetaData("success", TFieldRequirementType.DEFAULT, new FieldValueMetaData(TType.I32))); - put(IO, new FieldMetaData("io", TFieldRequirementType.DEFAULT, + put(IO, new FieldMetaData("io", TFieldRequirementType.DEFAULT, new FieldValueMetaData(TType.STRUCT))); }}); @@ -20287,7 +20287,7 @@ public class Hbase { while (true) { field = iprot.readFieldBegin(); - if (field.type == TType.STOP) { + if (field.type == TType.STOP) { break; } switch (field.id) @@ -20296,7 +20296,7 @@ public class Hbase { if (field.type == TType.I32) { this.success = iprot.readI32(); this.__isset.success = true; - } else { + } else { TProtocolUtil.skip(iprot, field.type); } break; @@ -20304,7 +20304,7 @@ public class Hbase { if (field.type == TType.STRUCT) { this.io = new IOError(); this.io.read(iprot); - } else { + } else { TProtocolUtil.skip(iprot, field.type); } break; @@ -20389,16 +20389,16 @@ public class Hbase { } public static final Map metaDataMap = Collections.unmodifiableMap(new HashMap() {{ - put(TABLENAME, new FieldMetaData("tableName", TFieldRequirementType.DEFAULT, + put(TABLENAME, new FieldMetaData("tableName", TFieldRequirementType.DEFAULT, new FieldValueMetaData(TType.STRING))); - put(STARTROW, new FieldMetaData("startRow", TFieldRequirementType.DEFAULT, + put(STARTROW, new FieldMetaData("startRow", TFieldRequirementType.DEFAULT, new FieldValueMetaData(TType.STRING))); - put(STOPROW, new FieldMetaData("stopRow", TFieldRequirementType.DEFAULT, + put(STOPROW, new FieldMetaData("stopRow", TFieldRequirementType.DEFAULT, new FieldValueMetaData(TType.STRING))); - put(COLUMNS, new FieldMetaData("columns", TFieldRequirementType.DEFAULT, - new ListMetaData(TType.LIST, + put(COLUMNS, new FieldMetaData("columns", TFieldRequirementType.DEFAULT, + new ListMetaData(TType.LIST, new FieldValueMetaData(TType.STRING)))); - put(TIMESTAMP, new FieldMetaData("timestamp", TFieldRequirementType.DEFAULT, + put(TIMESTAMP, new FieldMetaData("timestamp", TFieldRequirementType.DEFAULT, new FieldValueMetaData(TType.I64))); }}); @@ -20742,7 +20742,7 @@ public class Hbase { while (true) { field = iprot.readFieldBegin(); - if (field.type == TType.STOP) { + if (field.type == TType.STOP) { break; } switch (field.id) @@ -20750,21 +20750,21 @@ public class Hbase { case TABLENAME: if (field.type == TType.STRING) { this.tableName = iprot.readBinary(); - } else { + } else { TProtocolUtil.skip(iprot, field.type); } break; case STARTROW: if (field.type == TType.STRING) { this.startRow = iprot.readBinary(); - } else { + } else { TProtocolUtil.skip(iprot, field.type); } break; case STOPROW: if (field.type == TType.STRING) { this.stopRow = iprot.readBinary(); - } else { + } else { TProtocolUtil.skip(iprot, field.type); } break; @@ -20781,7 +20781,7 @@ public class Hbase { } iprot.readListEnd(); } - } else { + } else { TProtocolUtil.skip(iprot, field.type); } break; @@ -20789,7 +20789,7 @@ public class Hbase { if (field.type == TType.I64) { this.timestamp = iprot.readI64(); this.__isset.timestamp = true; - } else { + } else { TProtocolUtil.skip(iprot, field.type); } break; @@ -20910,9 +20910,9 @@ public class Hbase { } public static final Map metaDataMap = Collections.unmodifiableMap(new HashMap() {{ - put(SUCCESS, new FieldMetaData("success", TFieldRequirementType.DEFAULT, + put(SUCCESS, new FieldMetaData("success", TFieldRequirementType.DEFAULT, new FieldValueMetaData(TType.I32))); - put(IO, new FieldMetaData("io", TFieldRequirementType.DEFAULT, + put(IO, new FieldMetaData("io", TFieldRequirementType.DEFAULT, new FieldValueMetaData(TType.STRUCT))); }}); @@ -21087,7 +21087,7 @@ public class Hbase { while (true) { field = iprot.readFieldBegin(); - if (field.type == TType.STOP) { + if (field.type == TType.STOP) { break; } switch (field.id) @@ -21096,7 +21096,7 @@ public class Hbase { if (field.type == TType.I32) { this.success = iprot.readI32(); this.__isset.success = true; - } else { + } else { TProtocolUtil.skip(iprot, field.type); } break; @@ -21104,7 +21104,7 @@ public class Hbase { if (field.type == TType.STRUCT) { this.io = new IOError(); this.io.read(iprot); - } else { + } else { TProtocolUtil.skip(iprot, field.type); } break; @@ -21177,7 +21177,7 @@ public class Hbase { } public static final Map metaDataMap = Collections.unmodifiableMap(new HashMap() {{ - put(ID, new FieldMetaData("id", TFieldRequirementType.DEFAULT, + put(ID, new FieldMetaData("id", TFieldRequirementType.DEFAULT, new FieldValueMetaData(TType.I32))); }}); @@ -21302,7 +21302,7 @@ public class Hbase { while (true) { field = iprot.readFieldBegin(); - if (field.type == TType.STOP) { + if (field.type == TType.STOP) { break; } switch (field.id) @@ -21311,7 +21311,7 @@ public class Hbase { if (field.type == TType.I32) { this.id = iprot.readI32(); this.__isset.id = true; - } else { + } else { TProtocolUtil.skip(iprot, field.type); } break; @@ -21376,12 +21376,12 @@ public class Hbase { } public static final Map metaDataMap = Collections.unmodifiableMap(new HashMap() {{ - put(SUCCESS, new FieldMetaData("success", TFieldRequirementType.DEFAULT, - new ListMetaData(TType.LIST, + put(SUCCESS, new FieldMetaData("success", TFieldRequirementType.DEFAULT, + new ListMetaData(TType.LIST, new StructMetaData(TType.STRUCT, TRowResult.class)))); - put(IO, new FieldMetaData("io", TFieldRequirementType.DEFAULT, + put(IO, new FieldMetaData("io", TFieldRequirementType.DEFAULT, new FieldValueMetaData(TType.STRUCT))); - put(IA, new FieldMetaData("ia", TFieldRequirementType.DEFAULT, + put(IA, new FieldMetaData("ia", TFieldRequirementType.DEFAULT, new FieldValueMetaData(TType.STRUCT))); }}); @@ -21626,7 +21626,7 @@ public class Hbase { while (true) { field = iprot.readFieldBegin(); - if (field.type == TType.STOP) { + if (field.type == TType.STOP) { break; } switch (field.id) @@ -21645,7 +21645,7 @@ public class Hbase { } iprot.readListEnd(); } - } else { + } else { TProtocolUtil.skip(iprot, field.type); } break; @@ -21653,7 +21653,7 @@ public class Hbase { if (field.type == TType.STRUCT) { this.io = new IOError(); this.io.read(iprot); - } else { + } else { TProtocolUtil.skip(iprot, field.type); } break; @@ -21661,7 +21661,7 @@ public class Hbase { if (field.type == TType.STRUCT) { this.ia = new IllegalArgument(); this.ia.read(iprot); - } else { + } else { TProtocolUtil.skip(iprot, field.type); } break; @@ -21760,9 +21760,9 @@ public class Hbase { } public static final Map metaDataMap = Collections.unmodifiableMap(new HashMap() {{ - put(ID, new FieldMetaData("id", TFieldRequirementType.DEFAULT, + put(ID, new FieldMetaData("id", TFieldRequirementType.DEFAULT, new FieldValueMetaData(TType.I32))); - put(NBROWS, new FieldMetaData("nbRows", TFieldRequirementType.DEFAULT, + put(NBROWS, new FieldMetaData("nbRows", TFieldRequirementType.DEFAULT, new FieldValueMetaData(TType.I32))); }}); @@ -21936,7 +21936,7 @@ public class Hbase { while (true) { field = iprot.readFieldBegin(); - if (field.type == TType.STOP) { + if (field.type == TType.STOP) { break; } switch (field.id) @@ -21945,7 +21945,7 @@ public class Hbase { if (field.type == TType.I32) { this.id = iprot.readI32(); this.__isset.id = true; - } else { + } else { TProtocolUtil.skip(iprot, field.type); } break; @@ -21953,7 +21953,7 @@ public class Hbase { if (field.type == TType.I32) { this.nbRows = iprot.readI32(); this.__isset.nbRows = true; - } else { + } else { TProtocolUtil.skip(iprot, field.type); } break; @@ -22025,12 +22025,12 @@ public class Hbase { } public static final Map metaDataMap = Collections.unmodifiableMap(new HashMap() {{ - put(SUCCESS, new FieldMetaData("success", TFieldRequirementType.DEFAULT, - new ListMetaData(TType.LIST, + put(SUCCESS, new FieldMetaData("success", TFieldRequirementType.DEFAULT, + new ListMetaData(TType.LIST, new StructMetaData(TType.STRUCT, TRowResult.class)))); - put(IO, new FieldMetaData("io", TFieldRequirementType.DEFAULT, + put(IO, new FieldMetaData("io", TFieldRequirementType.DEFAULT, new FieldValueMetaData(TType.STRUCT))); - put(IA, new FieldMetaData("ia", TFieldRequirementType.DEFAULT, + put(IA, new FieldMetaData("ia", TFieldRequirementType.DEFAULT, new FieldValueMetaData(TType.STRUCT))); }}); @@ -22275,7 +22275,7 @@ public class Hbase { while (true) { field = iprot.readFieldBegin(); - if (field.type == TType.STOP) { + if (field.type == TType.STOP) { break; } switch (field.id) @@ -22294,7 +22294,7 @@ public class Hbase { } iprot.readListEnd(); } - } else { + } else { TProtocolUtil.skip(iprot, field.type); } break; @@ -22302,7 +22302,7 @@ public class Hbase { if (field.type == TType.STRUCT) { this.io = new IOError(); this.io.read(iprot); - } else { + } else { TProtocolUtil.skip(iprot, field.type); } break; @@ -22310,7 +22310,7 @@ public class Hbase { if (field.type == TType.STRUCT) { this.ia = new IllegalArgument(); this.ia.read(iprot); - } else { + } else { TProtocolUtil.skip(iprot, field.type); } break; @@ -22405,7 +22405,7 @@ public class Hbase { } public static final Map metaDataMap = Collections.unmodifiableMap(new HashMap() {{ - put(ID, new FieldMetaData("id", TFieldRequirementType.DEFAULT, + put(ID, new FieldMetaData("id", TFieldRequirementType.DEFAULT, new FieldValueMetaData(TType.I32))); }}); @@ -22530,7 +22530,7 @@ public class Hbase { while (true) { field = iprot.readFieldBegin(); - if (field.type == TType.STOP) { + if (field.type == TType.STOP) { break; } switch (field.id) @@ -22539,7 +22539,7 @@ public class Hbase { if (field.type == TType.I32) { this.id = iprot.readI32(); this.__isset.id = true; - } else { + } else { TProtocolUtil.skip(iprot, field.type); } break; @@ -22601,9 +22601,9 @@ public class Hbase { } public static final Map metaDataMap = Collections.unmodifiableMap(new HashMap() {{ - put(IO, new FieldMetaData("io", TFieldRequirementType.DEFAULT, + put(IO, new FieldMetaData("io", TFieldRequirementType.DEFAULT, new FieldValueMetaData(TType.STRUCT))); - put(IA, new FieldMetaData("ia", TFieldRequirementType.DEFAULT, + put(IA, new FieldMetaData("ia", TFieldRequirementType.DEFAULT, new FieldValueMetaData(TType.STRUCT))); }}); @@ -22779,7 +22779,7 @@ public class Hbase { while (true) { field = iprot.readFieldBegin(); - if (field.type == TType.STOP) { + if (field.type == TType.STOP) { break; } switch (field.id) @@ -22788,7 +22788,7 @@ public class Hbase { if (field.type == TType.STRUCT) { this.io = new IOError(); this.io.read(iprot); - } else { + } else { TProtocolUtil.skip(iprot, field.type); } break; @@ -22796,7 +22796,7 @@ public class Hbase { if (field.type == TType.STRUCT) { this.ia = new IllegalArgument(); this.ia.read(iprot); - } else { + } else { TProtocolUtil.skip(iprot, field.type); } break; diff --git a/src/java/org/apache/hadoop/hbase/thrift/generated/IOError.java b/src/java/org/apache/hadoop/hbase/thrift/generated/IOError.java index 5220b0a..842ec07 100644 --- a/src/java/org/apache/hadoop/hbase/thrift/generated/IOError.java +++ b/src/java/org/apache/hadoop/hbase/thrift/generated/IOError.java @@ -48,7 +48,7 @@ public class IOError extends Exception implements TBase, java.io.Serializable, C } public static final Map metaDataMap = Collections.unmodifiableMap(new HashMap() {{ - put(MESSAGE, new FieldMetaData("message", TFieldRequirementType.DEFAULT, + put(MESSAGE, new FieldMetaData("message", TFieldRequirementType.DEFAULT, new FieldValueMetaData(TType.STRING))); }}); @@ -174,7 +174,7 @@ public class IOError extends Exception implements TBase, java.io.Serializable, C while (true) { field = iprot.readFieldBegin(); - if (field.type == TType.STOP) { + if (field.type == TType.STOP) { break; } switch (field.id) @@ -182,7 +182,7 @@ public class IOError extends Exception implements TBase, java.io.Serializable, C case MESSAGE: if (field.type == TType.STRING) { this.message = iprot.readString(); - } else { + } else { TProtocolUtil.skip(iprot, field.type); } break; diff --git a/src/java/org/apache/hadoop/hbase/thrift/generated/IllegalArgument.java b/src/java/org/apache/hadoop/hbase/thrift/generated/IllegalArgument.java index d7870df..c6d0852 100644 --- a/src/java/org/apache/hadoop/hbase/thrift/generated/IllegalArgument.java +++ b/src/java/org/apache/hadoop/hbase/thrift/generated/IllegalArgument.java @@ -47,7 +47,7 @@ public class IllegalArgument extends Exception implements TBase, java.io.Seriali } public static final Map metaDataMap = Collections.unmodifiableMap(new HashMap() {{ - put(MESSAGE, new FieldMetaData("message", TFieldRequirementType.DEFAULT, + put(MESSAGE, new FieldMetaData("message", TFieldRequirementType.DEFAULT, new FieldValueMetaData(TType.STRING))); }}); @@ -173,7 +173,7 @@ public class IllegalArgument extends Exception implements TBase, java.io.Seriali while (true) { field = iprot.readFieldBegin(); - if (field.type == TType.STOP) { + if (field.type == TType.STOP) { break; } switch (field.id) @@ -181,7 +181,7 @@ public class IllegalArgument extends Exception implements TBase, java.io.Seriali case MESSAGE: if (field.type == TType.STRING) { this.message = iprot.readString(); - } else { + } else { TProtocolUtil.skip(iprot, field.type); } break; diff --git a/src/java/org/apache/hadoop/hbase/thrift/generated/Mutation.java b/src/java/org/apache/hadoop/hbase/thrift/generated/Mutation.java index a961325..6f78e23 100644 --- a/src/java/org/apache/hadoop/hbase/thrift/generated/Mutation.java +++ b/src/java/org/apache/hadoop/hbase/thrift/generated/Mutation.java @@ -53,11 +53,11 @@ public class Mutation implements TBase, java.io.Serializable, Cloneable { } public static final Map metaDataMap = Collections.unmodifiableMap(new HashMap() {{ - put(ISDELETE, new FieldMetaData("isDelete", TFieldRequirementType.DEFAULT, + put(ISDELETE, new FieldMetaData("isDelete", TFieldRequirementType.DEFAULT, new FieldValueMetaData(TType.BOOL))); - put(COLUMN, new FieldMetaData("column", TFieldRequirementType.DEFAULT, + put(COLUMN, new FieldMetaData("column", TFieldRequirementType.DEFAULT, new FieldValueMetaData(TType.STRING))); - put(VALUE, new FieldMetaData("value", TFieldRequirementType.DEFAULT, + put(VALUE, new FieldMetaData("value", TFieldRequirementType.DEFAULT, new FieldValueMetaData(TType.STRING))); }}); @@ -284,7 +284,7 @@ public class Mutation implements TBase, java.io.Serializable, Cloneable { while (true) { field = iprot.readFieldBegin(); - if (field.type == TType.STOP) { + if (field.type == TType.STOP) { break; } switch (field.id) @@ -293,21 +293,21 @@ public class Mutation implements TBase, java.io.Serializable, Cloneable { if (field.type == TType.BOOL) { this.isDelete = iprot.readBool(); this.__isset.isDelete = true; - } else { + } else { TProtocolUtil.skip(iprot, field.type); } break; case COLUMN: if (field.type == TType.STRING) { this.column = iprot.readBinary(); - } else { + } else { TProtocolUtil.skip(iprot, field.type); } break; case VALUE: if (field.type == TType.STRING) { this.value = iprot.readBinary(); - } else { + } else { TProtocolUtil.skip(iprot, field.type); } break; diff --git a/src/java/org/apache/hadoop/hbase/thrift/generated/TCell.java b/src/java/org/apache/hadoop/hbase/thrift/generated/TCell.java index 5c1d57a..f9db35a 100644 --- a/src/java/org/apache/hadoop/hbase/thrift/generated/TCell.java +++ b/src/java/org/apache/hadoop/hbase/thrift/generated/TCell.java @@ -53,9 +53,9 @@ public class TCell implements TBase, java.io.Serializable, Cloneable { } public static final Map metaDataMap = Collections.unmodifiableMap(new HashMap() {{ - put(VALUE, new FieldMetaData("value", TFieldRequirementType.DEFAULT, + put(VALUE, new FieldMetaData("value", TFieldRequirementType.DEFAULT, new FieldValueMetaData(TType.STRING))); - put(TIMESTAMP, new FieldMetaData("timestamp", TFieldRequirementType.DEFAULT, + put(TIMESTAMP, new FieldMetaData("timestamp", TFieldRequirementType.DEFAULT, new FieldValueMetaData(TType.I64))); }}); @@ -230,7 +230,7 @@ public class TCell implements TBase, java.io.Serializable, Cloneable { while (true) { field = iprot.readFieldBegin(); - if (field.type == TType.STOP) { + if (field.type == TType.STOP) { break; } switch (field.id) @@ -238,7 +238,7 @@ public class TCell implements TBase, java.io.Serializable, Cloneable { case VALUE: if (field.type == TType.STRING) { this.value = iprot.readBinary(); - } else { + } else { TProtocolUtil.skip(iprot, field.type); } break; @@ -246,7 +246,7 @@ public class TCell implements TBase, java.io.Serializable, Cloneable { if (field.type == TType.I64) { this.timestamp = iprot.readI64(); this.__isset.timestamp = true; - } else { + } else { TProtocolUtil.skip(iprot, field.type); } break; diff --git a/src/java/org/apache/hadoop/hbase/thrift/generated/TRegionInfo.java b/src/java/org/apache/hadoop/hbase/thrift/generated/TRegionInfo.java index e09d9dc..95e85c2 100644 --- a/src/java/org/apache/hadoop/hbase/thrift/generated/TRegionInfo.java +++ b/src/java/org/apache/hadoop/hbase/thrift/generated/TRegionInfo.java @@ -60,15 +60,15 @@ public class TRegionInfo implements TBase, java.io.Serializable, Cloneable { } public static final Map metaDataMap = Collections.unmodifiableMap(new HashMap() {{ - put(STARTKEY, new FieldMetaData("startKey", TFieldRequirementType.DEFAULT, + put(STARTKEY, new FieldMetaData("startKey", TFieldRequirementType.DEFAULT, new FieldValueMetaData(TType.STRING))); - put(ENDKEY, new FieldMetaData("endKey", TFieldRequirementType.DEFAULT, + put(ENDKEY, new FieldMetaData("endKey", TFieldRequirementType.DEFAULT, new FieldValueMetaData(TType.STRING))); - put(ID, new FieldMetaData("id", TFieldRequirementType.DEFAULT, + put(ID, new FieldMetaData("id", TFieldRequirementType.DEFAULT, new FieldValueMetaData(TType.I64))); - put(NAME, new FieldMetaData("name", TFieldRequirementType.DEFAULT, + put(NAME, new FieldMetaData("name", TFieldRequirementType.DEFAULT, new FieldValueMetaData(TType.STRING))); - put(VERSION, new FieldMetaData("version", TFieldRequirementType.DEFAULT, + put(VERSION, new FieldMetaData("version", TFieldRequirementType.DEFAULT, new FieldValueMetaData(TType.BYTE))); }}); @@ -392,7 +392,7 @@ public class TRegionInfo implements TBase, java.io.Serializable, Cloneable { while (true) { field = iprot.readFieldBegin(); - if (field.type == TType.STOP) { + if (field.type == TType.STOP) { break; } switch (field.id) @@ -400,14 +400,14 @@ public class TRegionInfo implements TBase, java.io.Serializable, Cloneable { case STARTKEY: if (field.type == TType.STRING) { this.startKey = iprot.readBinary(); - } else { + } else { TProtocolUtil.skip(iprot, field.type); } break; case ENDKEY: if (field.type == TType.STRING) { this.endKey = iprot.readBinary(); - } else { + } else { TProtocolUtil.skip(iprot, field.type); } break; @@ -415,14 +415,14 @@ public class TRegionInfo implements TBase, java.io.Serializable, Cloneable { if (field.type == TType.I64) { this.id = iprot.readI64(); this.__isset.id = true; - } else { + } else { TProtocolUtil.skip(iprot, field.type); } break; case NAME: if (field.type == TType.STRING) { this.name = iprot.readBinary(); - } else { + } else { TProtocolUtil.skip(iprot, field.type); } break; @@ -430,7 +430,7 @@ public class TRegionInfo implements TBase, java.io.Serializable, Cloneable { if (field.type == TType.BYTE) { this.version = iprot.readByte(); this.__isset.version = true; - } else { + } else { TProtocolUtil.skip(iprot, field.type); } break; diff --git a/src/java/org/apache/hadoop/hbase/thrift/generated/TRowResult.java b/src/java/org/apache/hadoop/hbase/thrift/generated/TRowResult.java index dc0572c..0488a67 100644 --- a/src/java/org/apache/hadoop/hbase/thrift/generated/TRowResult.java +++ b/src/java/org/apache/hadoop/hbase/thrift/generated/TRowResult.java @@ -49,11 +49,11 @@ public class TRowResult implements TBase, java.io.Serializable, Cloneable { } public static final Map metaDataMap = Collections.unmodifiableMap(new HashMap() {{ - put(ROW, new FieldMetaData("row", TFieldRequirementType.DEFAULT, + put(ROW, new FieldMetaData("row", TFieldRequirementType.DEFAULT, new FieldValueMetaData(TType.STRING))); - put(COLUMNS, new FieldMetaData("columns", TFieldRequirementType.DEFAULT, - new MapMetaData(TType.MAP, - new FieldValueMetaData(TType.STRING), + put(COLUMNS, new FieldMetaData("columns", TFieldRequirementType.DEFAULT, + new MapMetaData(TType.MAP, + new FieldValueMetaData(TType.STRING), new StructMetaData(TType.STRUCT, TCell.class)))); }}); @@ -252,7 +252,7 @@ public class TRowResult implements TBase, java.io.Serializable, Cloneable { while (true) { field = iprot.readFieldBegin(); - if (field.type == TType.STOP) { + if (field.type == TType.STOP) { break; } switch (field.id) @@ -260,7 +260,7 @@ public class TRowResult implements TBase, java.io.Serializable, Cloneable { case ROW: if (field.type == TType.STRING) { this.row = iprot.readBinary(); - } else { + } else { TProtocolUtil.skip(iprot, field.type); } break; @@ -280,7 +280,7 @@ public class TRowResult implements TBase, java.io.Serializable, Cloneable { } iprot.readMapEnd(); } - } else { + } else { TProtocolUtil.skip(iprot, field.type); } break; diff --git a/src/java/org/apache/hadoop/hbase/util/Base64.java b/src/java/org/apache/hadoop/hbase/util/Base64.java index 867af77..9dcf497 100644 --- a/src/java/org/apache/hadoop/hbase/util/Base64.java +++ b/src/java/org/apache/hadoop/hbase/util/Base64.java @@ -1,6 +1,6 @@ /** * Encodes and decodes to and from Base64 notation. - * + * *

          * Homepage: http://iharder.net/base64. *

          @@ -28,7 +28,7 @@ * Special thanks to Jim Kellerman at * http://www.powerset.com/ for contributing the new Base64 dialects. *
        • - * + * *
        • v2.1 - Cleaned up javadoc comments and unused variables and methods. * Added some convenience methods for reading and writing to and from files. *
        • @@ -46,7 +46,7 @@ * decode( String s, boolean gzipCompressed ). Added the ability to * "suspend" encoding in the Output Stream so you can turn on and off the * encoding if you need to embed base64 data in an otherwise "normal" stream - * (like an XML file). + * (like an XML file). *
        • v1.5 - Output stream pases on flush() command but doesn't do anything * itself. This helps when using GZIP streams. Added the ability to * GZip-compress objects before encoding them.
        • @@ -141,7 +141,7 @@ public class Base64 { /** * Encode using Base64-like encoding that is URL and Filename safe as - * described in Section 4 of RFC3548: + * described in Section 4 of RFC3548: * * http://www.faqs.org/rfcs/rfc3548.html. * It is important to note that data encoded this way is not @@ -159,7 +159,7 @@ public class Base64 { public final static int ORDERED = 32; /* ******** P R I V A T E F I E L D S ******** */ - + private static final Log LOG = LogFactory.getLog(Base64.class); /** Maximum line length (76) of Base64 output. */ @@ -290,8 +290,8 @@ public class Base64 { * exactly the same as the input value. It is described in the RFC change * request: * http://www.faqs.org/qa/rfcc-1940.html. - * - * It replaces "plus" and "slash" with "hyphen" and "underscore" and + * + * It replaces "plus" and "slash" with "hyphen" and "underscore" and * rearranges the alphabet so that the characters are in their natural sort * order. */ @@ -354,7 +354,7 @@ public class Base64 { } else if ((options & ORDERED) == ORDERED) { return _ORDERED_ALPHABET; - + } else { return _STANDARD_ALPHABET; } @@ -369,10 +369,10 @@ public class Base64 { protected final static byte[] getDecodabet(int options) { if ((options & URL_SAFE) == URL_SAFE) { return _URL_SAFE_DECODABET; - + } else if ((options & ORDERED) == ORDERED) { return _ORDERED_DECODABET; - + } else { return _STANDARD_DECODABET; } @@ -383,9 +383,9 @@ public class Base64 { /** * Main program. Used for testing. - * + * * Encodes or decodes two files from the command line - * + * * @param args command arguments */ public final static void main(String[] args) { @@ -410,7 +410,7 @@ public class Base64 { /** * Prints command line usage. - * + * * @param msg A message to include with usage info. */ private final static void usage(String msg) { @@ -426,7 +426,7 @@ public class Base64 { * significant bytes in your array is given by numSigBytes. The * array threeBytes needs only be as big as numSigBytes. * Code can reuse a byte array by passing a four-byte array as b4. - * + * * @param b4 A reusable byte array to reduce array instantiation * @param threeBytes the array to convert * @param numSigBytes the number of significant bytes in your array @@ -452,7 +452,7 @@ public class Base64 { *

          * This is the lowest level of the encoding methods with all possible * parameters. - * + * * @param source the array to convert * @param srcOffset the index where conversion begins * @param numSigBytes the number of significant bytes in your array @@ -465,7 +465,7 @@ public class Base64 { int numSigBytes, byte[] destination, int destOffset, int options) { byte[] ALPHABET = getAlphabet(options); - // 1 2 3 + // 1 2 3 // 01234567890123456789012345678901 Bit position // --------000000001111111122222222 Array position from threeBytes // --------| || || || | Six bit groups to index ALPHABET @@ -513,7 +513,7 @@ public class Base64 { * serialized object. If the object cannot be serialized or there is another * error, the method will return null. The object is not * GZip-compressed before being encoded. - * + * * @param serializableObject The object to encode * @return The Base64-encoded object * @since 1.4 @@ -538,7 +538,7 @@ public class Base64 { *

          * Example: * encodeObject( myObj, Base64.GZIP | Base64.DONT_BREAK_LINES ) - * + * * @param serializableObject The object to encode * @param options Specified options * @see Base64#GZIP @@ -565,11 +565,11 @@ public class Base64 { } catch (UnsupportedEncodingException uue) { return new String(baos.toByteArray()); - + } catch (IOException e) { LOG.error("error encoding object", e); return null; - + } finally { if (oos != null) { try { @@ -595,7 +595,7 @@ public class Base64 { /** * Encodes a byte array into Base64 notation. Does not GZip-compress data. - * + * * @param source The data to convert * @return encoded byte array * @since 1.4 @@ -613,13 +613,13 @@ public class Base64 { *

        • DONT_BREAK_LINES: don't break lines at 76 characters. Note: * Technically, this makes your encoding non-compliant.
        • *
        - * + * *

        * Example: encodeBytes( myData, Base64.GZIP ) or *

        * Example: * encodeBytes( myData, Base64.GZIP | Base64.DONT_BREAK_LINES ) - * + * * @param source The data to convert * @param options Specified options * @see Base64#GZIP @@ -635,7 +635,7 @@ public class Base64 { /** * Encodes a byte array into Base64 notation. Does not GZip-compress data. - * + * * @param source The data to convert * @param off Offset in array where conversion should begin * @param len Length of data to convert @@ -655,13 +655,13 @@ public class Base64 { *

      • DONT_BREAK_LINES: don't break lines at 76 characters. Note: * Technically, this makes your encoding non-compliant.
      • *
      - * + * *

      * Example: encodeBytes( myData, Base64.GZIP ) or *

      * Example: * encodeBytes( myData, Base64.GZIP | Base64.DONT_BREAK_LINES ) - * + * * @param source The data to convert * @param off Offset in array where conversion should begin * @param len Length of data to convert @@ -680,7 +680,7 @@ public class Base64 { GZIPOutputStream gzos = null; try { - gzos = + gzos = new GZIPOutputStream(new Base64OutputStream(baos, ENCODE | options)); gzos.write(source, off, len); @@ -694,7 +694,7 @@ public class Base64 { } catch (IOException e) { LOG.error("error encoding byte array", e); return null; - + } finally { if (gzos != null) { try { @@ -711,7 +711,7 @@ public class Base64 { } // end finally } // end Compress - + // Don't compress. Better not to use streams at all then. boolean breakLines = ((options & DONT_BREAK_LINES) == 0); @@ -766,7 +766,7 @@ public class Base64 { * This is the lowest level of the decoding methods with all possible * parameters. *

      - * + * * @param source the array to convert * @param srcOffset the index where conversion begins * @param destination the array to hold the conversion @@ -791,7 +791,7 @@ public class Base64 { destination[destOffset] = (byte) (outBuff >>> 16); return 1; - + } else if (source[srcOffset + 3] == EQUALS_SIGN) { // Example: DkL= // Two ways to do the same thing. Don't know which way I like best. // int outBuff = ( ( DECODABET[ source[ srcOffset ] ] << 24 ) >>> 6 ) @@ -824,9 +824,9 @@ public class Base64 { destination[destOffset + 2] = (byte) (outBuff); return 3; - + } catch (Exception e) { - LOG.error("error decoding bytes at " + source[srcOffset] + ": " + + LOG.error("error decoding bytes at " + source[srcOffset] + ": " + (DECODABET[source[srcOffset]]) + ", " + source[srcOffset + 1] + ": " + (DECODABET[source[srcOffset + 1]]) + ", " + source[srcOffset + 2] + ": " + (DECODABET[source[srcOffset + 2]]) + @@ -841,11 +841,11 @@ public class Base64 { * Very low-level access to decoding ASCII characters in the form of a byte * array. Does not support automatically gunzipping or any other "fancy" * features. - * + * * @param source The Base64 encoded data * @param off The offset of where to begin decoding * @param len The length of characters to decode - * @param options + * @param options * @see Base64#URL_SAFE * @see Base64#ORDERED * @return decoded data @@ -894,7 +894,7 @@ public class Base64 { /** * Decodes data from Base64 notation, automatically detecting gzip-compressed * data and decompressing it. - * + * * @param s the string to decode * @return the decoded data * @since 1.4 @@ -906,7 +906,7 @@ public class Base64 { /** * Decodes data from Base64 notation, automatically detecting gzip-compressed * data and decompressing it. - * + * * @param s the string to decode * @param options * @see Base64#URL_SAFE @@ -972,7 +972,7 @@ public class Base64 { /** * Attempts to decode Base64 data and deserialize a Java Object within. * Returns null if there was an error. - * + * * @param encodedObject The Base64 data to decode * @return The decoded and deserialized object * @since 1.5 @@ -1008,11 +1008,11 @@ public class Base64 { /** * Convenience method for encoding data to a file. - * + * * @param dataToEncode byte array of data to encode in base64 form * @param filename Filename for saving encoded data * @return true if successful, false otherwise - * + * * @since 2.1 */ public static boolean encodeToFile(byte[] dataToEncode, String filename) { @@ -1026,7 +1026,7 @@ public class Base64 { } catch (IOException e) { LOG.error("error encoding file: " + filename, e); success = false; - + } finally { if (bos != null) { try { @@ -1042,11 +1042,11 @@ public class Base64 { /** * Convenience method for decoding data to a file. - * + * * @param dataToDecode Base64-encoded data as a string * @param filename Filename for saving decoded data * @return true if successful, false otherwise - * + * * @since 2.1 */ public static boolean decodeToFile(String dataToDecode, String filename) { @@ -1056,7 +1056,7 @@ public class Base64 { bos = new Base64OutputStream(new FileOutputStream(filename), DECODE); bos.write(dataToDecode.getBytes(PREFERRED_ENCODING)); success = true; - + } catch (IOException e) { LOG.error("error decoding to file: " + filename, e); success = false; @@ -1076,10 +1076,10 @@ public class Base64 { /** * Convenience method for reading a base64-encoded file and decoding it. - * + * * @param filename Filename for reading encoded data * @return decoded byte array or null if unsuccessful - * + * * @since 2.1 */ public static byte[] decodeFromFile(String filename) { @@ -1091,33 +1091,33 @@ public class Base64 { // Check the size of file if (file.length() > Integer.MAX_VALUE) { - LOG.fatal("File is too big for this convenience method (" + + LOG.fatal("File is too big for this convenience method (" + file.length() + " bytes)."); return null; } // end if: file too big for int index - + buffer = new byte[(int) file.length()]; // Open a stream - + bis = new Base64InputStream(new BufferedInputStream( new FileInputStream(file)), DECODE); // Read until done - + int length = 0; for (int numBytes = 0; (numBytes = bis.read(buffer, length, 4096)) >= 0; ) { length += numBytes; } - + // Save in a variable to return - + decodedData = new byte[length]; System.arraycopy(buffer, 0, decodedData, 0, length); } catch (IOException e) { LOG.error("Error decoding from file " + filename, e); - + } finally { if (bis != null) { try { @@ -1133,10 +1133,10 @@ public class Base64 { /** * Convenience method for reading a binary file and base64-encoding it. - * + * * @param filename Filename for reading binary data * @return base64-encoded string or null if unsuccessful - * + * * @since 2.1 */ public static String encodeFromFile(String filename) { @@ -1144,9 +1144,9 @@ public class Base64 { Base64InputStream bis = null; try { File file = new File(filename); - + // Need max() for math on small files (v2.2.1) - + byte[] buffer = new byte[Math.max((int) (file.length() * 1.4), 40)]; // Open a stream @@ -1161,12 +1161,12 @@ public class Base64 { } // Save in a variable to return - + encodedData = new String(buffer, 0, length, PREFERRED_ENCODING); } catch (IOException e) { LOG.error("Error encoding from file " + filename, e); - + } finally { if (bis != null) { try { @@ -1182,7 +1182,7 @@ public class Base64 { /** * Reads infile and encodes it to outfile. - * + * * @param infile Input file * @param outfile Output file * @since 2.2 @@ -1193,7 +1193,7 @@ public class Base64 { try { out = new BufferedOutputStream(new FileOutputStream(outfile)); out.write(encoded.getBytes("US-ASCII")); // Strict, 7-bit output. - + } catch (IOException e) { LOG.error("error encoding from file " + infile + " to " + outfile, e); @@ -1210,7 +1210,7 @@ public class Base64 { /** * Reads infile and decodes it to outfile. - * + * * @param infile Input file * @param outfile Output file * @since 2.2 @@ -1221,7 +1221,7 @@ public class Base64 { try { out = new BufferedOutputStream(new FileOutputStream(outfile)); out.write(decoded); - + } catch (IOException e) { LOG.error("error decoding from file " + infile + " to " + outfile, e); @@ -1242,7 +1242,7 @@ public class Base64 { * A {@link Base64.Base64InputStream} will read data from another * InputStream, given in the constructor, and * encode/decode to/from Base64 notation on the fly. - * + * * @see Base64 * @since 1.3 */ @@ -1259,7 +1259,7 @@ public class Base64 { /** * Constructs a {@link Base64InputStream} in DECODE mode. - * + * * @param in the InputStream from which to read data. * @since 1.3 */ @@ -1271,18 +1271,18 @@ public class Base64 { * Constructs a {@link Base64.Base64InputStream} in either ENCODE or DECODE mode. *

      * Valid options: - * + * *

            *   ENCODE or DECODE: Encode or Decode as data is read.
            *   DONT_BREAK_LINES: don't break lines at 76 characters
            *     (only meaningful when encoding)
            *     <i>Note: Technically, this makes your encoding non-compliant.</i>
            * 
      - * + * *

      * Example: new Base64.Base64InputStream( in, Base64.DECODE ) - * - * + * + * * @param in the InputStream from which to read data. * @param options Specified options * @see Base64#ENCODE @@ -1306,7 +1306,7 @@ public class Base64 { /** * Reads enough of the input stream to convert to/from Base64 and returns * the next byte. - * + * * @return next byte * @since 1.3 */ @@ -1364,10 +1364,10 @@ public class Base64 { if (i == 4) { numSigBytes = decode4to3(b4, 0, buffer, 0, options); position = 0; - + } else if (i == 0) { return -1; - + } else { // Must have broken out from above. throw new IOException("Improperly padded Base64 input."); @@ -1410,7 +1410,7 @@ public class Base64 { * Calls {@link #read()} repeatedly until the end of stream is reached or * len bytes are read. Returns number of bytes read into array * or -1 if end of stream is encountered. - * + * * @param dest array to hold values * @param off offset for array * @param len max number of bytes to read into array @@ -1442,7 +1442,7 @@ public class Base64 { * A {@link Base64.Base64OutputStream} will write data to another * OutputStream, given in the constructor, and * encode/decode to/from Base64 notation on the fly. - * + * * @see Base64 * @since 1.3 */ @@ -1460,7 +1460,7 @@ public class Base64 { /** * Constructs a {@link Base64OutputStream} in ENCODE mode. - * + * * @param out the OutputStream to which data will be written. * @since 1.3 */ @@ -1472,17 +1472,17 @@ public class Base64 { * Constructs a {@link Base64OutputStream} in either ENCODE or DECODE mode. *

      * Valid options: - * + * *

        *
      • ENCODE or DECODE: Encode or Decode as data is read.
      • *
      • DONT_BREAK_LINES: don't break lines at 76 characters (only * meaningful when encoding) Note: Technically, this makes your * encoding non-compliant.
      • *
      - * + * *

      * Example: new Base64.Base64OutputStream( out, Base64.ENCODE ) - * + * * @param out the OutputStream to which data will be written. * @param options Specified options. * @see Base64#ENCODE @@ -1509,7 +1509,7 @@ public class Base64 { * notation. When encoding, bytes are buffered three at a time before the * output stream actually gets a write() call. When decoding, bytes are * buffered four at a time. - * + * * @param theByte the byte to write * @since 1.3 */ @@ -1534,7 +1534,7 @@ public class Base64 { position = 0; } // end if: enough to output - + } else { // Meaningful Base64 character? if (decodabet[theByte & 0x7f] > WHITE_SPACE_ENC) { @@ -1544,7 +1544,7 @@ public class Base64 { out.write(b4, 0, len); position = 0; } // end if: enough to output - + } else if (decodabet[theByte & 0x7f] != WHITE_SPACE_ENC) { throw new IOException("Invalid character in Base64 data."); } // end else: not white space either @@ -1554,7 +1554,7 @@ public class Base64 { /** * Calls {@link #write(int)} repeatedly until len bytes are * written. - * + * * @param theBytes array from which to read bytes * @param off offset for array * @param len max number of bytes to read into array @@ -1577,7 +1577,7 @@ public class Base64 { /** * Method added by PHIL. [Thanks, PHIL. -Rob] This pads the buffer without * closing the stream. - * + * * @throws IOException */ public void flushBase64() throws IOException { @@ -1595,7 +1595,7 @@ public class Base64 { /** * Flushes and closes (I think, in the superclass) the stream. - * + * * @since 1.3 */ @Override @@ -1626,7 +1626,7 @@ public class Base64 { /** * Resumes encoding of the stream. May be helpful if you need to embed a * piece of base640-encoded data in a stream. - * + * * @since 1.5.1 */ public void resumeEncoding() { diff --git a/src/java/org/apache/hadoop/hbase/util/Bytes.java b/src/java/org/apache/hadoop/hbase/util/Bytes.java index 3b8d460..8e3c823 100644 --- a/src/java/org/apache/hadoop/hbase/util/Bytes.java +++ b/src/java/org/apache/hadoop/hbase/util/Bytes.java @@ -65,7 +65,7 @@ public class Bytes { * Size of float in bytes */ public static final int SIZEOF_FLOAT = Float.SIZE/Byte.SIZE; - + /** * Size of int in bytes */ diff --git a/src/java/org/apache/hadoop/hbase/util/ClassSize.java b/src/java/org/apache/hadoop/hbase/util/ClassSize.java index 81f4bff..30b22b9 100755 --- a/src/java/org/apache/hadoop/hbase/util/ClassSize.java +++ b/src/java/org/apache/hadoop/hbase/util/ClassSize.java @@ -30,12 +30,12 @@ import org.apache.commons.logging.LogFactory; /** * Class for determining the "size" of a class, an attempt to calculate the * actual bytes that an object of this class will occupy in memory - * + * * The core of this class is taken from the Derby project */ public class ClassSize { static final Log LOG = LogFactory.getLog(ClassSize.class); - + private static int nrOfRefsPerObj = 2; /** Array overhead */ @@ -43,61 +43,61 @@ public class ClassSize { /** Overhead for ArrayList(0) */ public static int ARRAYLIST = 0; - + /** Overhead for ByteBuffer */ public static int BYTE_BUFFER = 0; /** Overhead for an Integer */ public static int INTEGER = 0; - + /** Overhead for entry in map */ public static int MAP_ENTRY = 0; - + /** Object overhead is minimum 2 * reference size (8 bytes on 64-bit) */ public static int OBJECT = 0; - + /** Reference size is 8 bytes on 64-bit, 4 bytes on 32-bit */ public static int REFERENCE = 0; - + /** String overhead */ public static int STRING = 0; /** Overhead for TreeMap */ public static int TREEMAP = 0; - + /** Overhead for ConcurrentHashMap */ public static int CONCURRENT_HASHMAP = 0; - + /** Overhead for ConcurrentHashMap.Entry */ public static int CONCURRENT_HASHMAP_ENTRY = 0; - + /** Overhead for ConcurrentHashMap.Segment */ public static int CONCURRENT_HASHMAP_SEGMENT = 0; - + /** Overhead for ConcurrentSkipListMap */ public static int CONCURRENT_SKIPLISTMAP = 0; - + /** Overhead for ConcurrentSkipListMap Entry */ public static int CONCURRENT_SKIPLISTMAP_ENTRY = 0; - + /** Overhead for ReentrantReadWriteLock */ public static int REENTRANT_LOCK = 0; - + /** Overhead for AtomicLong */ public static int ATOMIC_LONG = 0; - + /** Overhead for AtomicInteger */ public static int ATOMIC_INTEGER = 0; - + /** Overhead for AtomicBoolean */ public static int ATOMIC_BOOLEAN = 0; - + /** Overhead for CopyOnWriteArraySet */ public static int COPYONWRITE_ARRAYSET = 0; - + /** Overhead for CopyOnWriteArrayList */ public static int COPYONWRITE_ARRAYLIST = 0; - + private static final String THIRTY_TWO = "32"; /** @@ -108,7 +108,7 @@ public class ClassSize { // Figure out whether this is a 32 or 64 bit machine. Properties sysProps = System.getProperties(); String arcModel = sysProps.getProperty("sun.arch.data.model"); - + //Default value is set to 8, covering the case when arcModel is unknown REFERENCE = 8; if (arcModel.equals(THIRTY_TWO)) { @@ -116,55 +116,55 @@ public class ClassSize { } OBJECT = 2 * REFERENCE; - + ARRAY = 3 * REFERENCE; ARRAYLIST = align(OBJECT + align(REFERENCE) + align(ARRAY) + (2 * Bytes.SIZEOF_INT)); - - BYTE_BUFFER = align(OBJECT + align(REFERENCE) + align(ARRAY) + - (5 * Bytes.SIZEOF_INT) + - (3 * Bytes.SIZEOF_BOOLEAN) + Bytes.SIZEOF_LONG); - + + BYTE_BUFFER = align(OBJECT + align(REFERENCE) + align(ARRAY) + + (5 * Bytes.SIZEOF_INT) + + (3 * Bytes.SIZEOF_BOOLEAN) + Bytes.SIZEOF_LONG); + INTEGER = align(OBJECT + Bytes.SIZEOF_INT); - + MAP_ENTRY = align(OBJECT + 5 * REFERENCE + Bytes.SIZEOF_BOOLEAN); - + TREEMAP = align(OBJECT + (2 * Bytes.SIZEOF_INT) + align(7 * REFERENCE)); - + STRING = align(OBJECT + ARRAY + REFERENCE + 3 * Bytes.SIZEOF_INT); - - CONCURRENT_HASHMAP = align((2 * Bytes.SIZEOF_INT) + ARRAY + + + CONCURRENT_HASHMAP = align((2 * Bytes.SIZEOF_INT) + ARRAY + (6 * REFERENCE) + OBJECT); - + CONCURRENT_HASHMAP_ENTRY = align(REFERENCE + OBJECT + (3 * REFERENCE) + (2 * Bytes.SIZEOF_INT)); - - CONCURRENT_HASHMAP_SEGMENT = align(REFERENCE + OBJECT + + + CONCURRENT_HASHMAP_SEGMENT = align(REFERENCE + OBJECT + (3 * Bytes.SIZEOF_INT) + Bytes.SIZEOF_FLOAT + ARRAY); - + CONCURRENT_SKIPLISTMAP = align(Bytes.SIZEOF_INT + OBJECT + (8 * REFERENCE)); - + CONCURRENT_SKIPLISTMAP_ENTRY = align( - align(OBJECT + (3 * REFERENCE)) + /* one node per entry */ + align(OBJECT + (3 * REFERENCE)) + /* one node per entry */ align((OBJECT + (3 * REFERENCE))/2)); /* one index per two entries */ - + REENTRANT_LOCK = align(OBJECT + (3 * REFERENCE)); - + ATOMIC_LONG = align(OBJECT + Bytes.SIZEOF_LONG); - + ATOMIC_INTEGER = align(OBJECT + Bytes.SIZEOF_INT); - + ATOMIC_BOOLEAN = align(OBJECT + Bytes.SIZEOF_BOOLEAN); - + COPYONWRITE_ARRAYSET = align(OBJECT + REFERENCE); - + COPYONWRITE_ARRAYLIST = align(OBJECT + (2 * REFERENCE) + ARRAY); } - + /** - * The estimate of the size of a class instance depends on whether the JVM - * uses 32 or 64 bit addresses, that is it depends on the size of an object + * The estimate of the size of a class instance depends on whether the JVM + * uses 32 or 64 bit addresses, that is it depends on the size of an object * reference. It is a linear function of the size of a reference, e.g. * 24 + 5*r where r is the size of a reference (usually 4 or 8 bytes). * @@ -172,7 +172,7 @@ public class ClassSize { * in the above example. * * @param cl A class whose instance size is to be estimated - * @return an array of 3 integers. The first integer is the size of the + * @return an array of 3 integers. The first integer is the size of the * primitives, the second the number of arrays and the third the number of * references. */ @@ -181,7 +181,7 @@ public class ClassSize { int primitives = 0; int arrays = 0; //The number of references that a new object takes - int references = nrOfRefsPerObj; + int references = nrOfRefsPerObj; for( ; null != cl; cl = cl.getSuperclass()) { Field[] field = cl.getDeclaredFields(); @@ -230,7 +230,7 @@ public class ClassSize { } /** - * Estimate the static space taken up by a class instance given the + * Estimate the static space taken up by a class instance given the * coefficients returned by getSizeCoefficients. * * @param coeff the coefficients @@ -246,19 +246,19 @@ public class ClassSize { if (LOG.isDebugEnabled()) { // Write out region name as string and its encoded name. LOG.debug("Primitives " + coeff[0] + ", arrays " + coeff[1] + - ", references(includes " + nrOfRefsPerObj + - " for object overhead) " + coeff[2] + ", refSize " + REFERENCE + + ", references(includes " + nrOfRefsPerObj + + " for object overhead) " + coeff[2] + ", refSize " + REFERENCE + ", size " + size); } } return size; - } + } /** - * Estimate the static space taken up by the fields of a class. This includes - * the space taken up by by references (the pointer) but not by the referenced - * object. So the estimated size of an array field does not depend on the size - * of the array. Similarly the size of an object (reference) field does not + * Estimate the static space taken up by the fields of a class. This includes + * the space taken up by by references (the pointer) but not by the referenced + * object. So the estimated size of an array field does not depend on the size + * of the array. Similarly the size of an object (reference) field does not * depend on the object. * * @return the size estimate in bytes. @@ -266,7 +266,7 @@ public class ClassSize { @SuppressWarnings("unchecked") public static long estimateBase(Class cl, boolean debug) { return estimateBaseFromCoefficients( getSizeCoefficients(cl, debug), debug); - } + } /** * Aligns a number to 8. @@ -276,7 +276,7 @@ public class ClassSize { public static int align(int num) { return (int)(align((long)num)); } - + /** * Aligns a number to 8. * @param num number to align to 8 @@ -287,6 +287,6 @@ public class ClassSize { //stored and sent together return ((num + 7) >> 3) << 3; } - + } diff --git a/src/java/org/apache/hadoop/hbase/util/FSUtils.java b/src/java/org/apache/hadoop/hbase/util/FSUtils.java index cedbe41..5d3f09a 100644 --- a/src/java/org/apache/hadoop/hbase/util/FSUtils.java +++ b/src/java/org/apache/hadoop/hbase/util/FSUtils.java @@ -68,7 +68,7 @@ public class FSUtils { /** * Check if directory exists. If it does not, create it. - * @param fs + * @param fs * @param dir * @return Path * @throws IOException @@ -100,11 +100,11 @@ public class FSUtils { /** * Checks to see if the specified file system is available - * + * * @param fs * @throws IOException */ - public static void checkFileSystemAvailable(final FileSystem fs) + public static void checkFileSystemAvailable(final FileSystem fs) throws IOException { if (!(fs instanceof DistributedFileSystem)) { return; @@ -127,7 +127,7 @@ public class FSUtils { io.initCause(exception); throw io; } - + /** * If DFS, check safe mode and if so, wait until we clear it. * @param conf @@ -169,7 +169,7 @@ public class FSUtils { /** * Verifies current version of file system - * + * * @param fs * @param rootdir * @return null if no version file exists, version string otherwise. @@ -190,20 +190,20 @@ public class FSUtils { } return version; } - + /** * Verifies current version of file system - * + * * @param fs file system * @param rootdir root directory of HBase installation - * @param message if true, issues a message on System.out - * + * @param message if true, issues a message on System.out + * * @throws IOException */ - public static void checkVersion(FileSystem fs, Path rootdir, + public static void checkVersion(FileSystem fs, Path rootdir, boolean message) throws IOException { String version = getVersion(fs, rootdir); - + if (version == null) { if (!rootRegionExists(fs, rootdir)) { // rootDir is empty (no version file and no root region) @@ -213,7 +213,7 @@ public class FSUtils { } } else if (version.compareTo(HConstants.FILE_SYSTEM_VERSION) == 0) return; - + // version is deprecated require migration // Output on stdout so user sees it in terminal. String msg = "File system needs to be upgraded." @@ -225,28 +225,28 @@ public class FSUtils { } throw new FileSystemVersionException(msg); } - + /** * Sets version of file system - * + * * @param fs * @param rootdir * @throws IOException */ - public static void setVersion(FileSystem fs, Path rootdir) + public static void setVersion(FileSystem fs, Path rootdir) throws IOException { setVersion(fs, rootdir, HConstants.FILE_SYSTEM_VERSION); } /** * Sets version of file system - * + * * @param fs * @param rootdir * @param version * @throws IOException */ - public static void setVersion(FileSystem fs, Path rootdir, String version) + public static void setVersion(FileSystem fs, Path rootdir, String version) throws IOException { FSDataOutputStream s = fs.create(new Path(rootdir, HConstants.VERSION_FILE_NAME)); @@ -257,7 +257,7 @@ public class FSUtils { /** * Verifies root directory path is a valid URI with a scheme - * + * * @param root root directory path * @throws IOException if not a valid URI with a scheme */ @@ -274,7 +274,7 @@ public class FSUtils { throw io; } } - + /** * Return the 'path' component of a Path. In Hadoop, Path is an URI. This * method returns the 'path' component of a Path's URI: e.g. If a Path is @@ -283,7 +283,7 @@ public class FSUtils { * This method is useful if you want to print out a Path without qualifying * Filesystem instance. * @param p Filesystem Path whose 'path' component we are to return. - * @return Path portion of the Filesystem + * @return Path portion of the Filesystem */ public static String getPath(Path p) { return p.toUri().getPath(); @@ -293,7 +293,7 @@ public class FSUtils { * @param c * @return Path to hbase root directory: i.e. hbase.rootdir as a * Path. - * @throws IOException + * @throws IOException */ public static Path getRootDir(final HBaseConfiguration c) throws IOException { FileSystem fs = FileSystem.get(c); @@ -307,10 +307,10 @@ public class FSUtils { } return rootdir; } - + /** * Checks if root region exists - * + * * @param fs file system * @param rootdir root directory of HBase installation * @return true if exists diff --git a/src/java/org/apache/hadoop/hbase/util/Hash.java b/src/java/org/apache/hadoop/hbase/util/Hash.java index d5a5e8a..6e91d8f 100644 --- a/src/java/org/apache/hadoop/hbase/util/Hash.java +++ b/src/java/org/apache/hadoop/hbase/util/Hash.java @@ -30,7 +30,7 @@ public abstract class Hash { public static final int JENKINS_HASH = 0; /** Constant to denote {@link MurmurHash}. */ public static final int MURMUR_HASH = 1; - + /** * This utility method converts String representation of hash function name * to a symbolic constant. Currently two function types are supported, @@ -47,7 +47,7 @@ public abstract class Hash { return INVALID_HASH; } } - + /** * This utility method converts the name of the configured * hash type to a symbolic constant. @@ -58,7 +58,7 @@ public abstract class Hash { String name = conf.get("hbase.hash.type", "murmur"); return parseHashType(name); } - + /** * Get a singleton instance of hash function of a given type. * @param type predefined hash type @@ -74,7 +74,7 @@ public abstract class Hash { return null; } } - + /** * Get a singleton instance of hash function of a type * defined in the configuration. @@ -85,7 +85,7 @@ public abstract class Hash { int type = getHashType(conf); return getInstance(type); } - + /** * Calculate a hash using all bytes from the input argument, and * a seed of -1. @@ -95,7 +95,7 @@ public abstract class Hash { public int hash(byte[] bytes) { return hash(bytes, bytes.length, -1); } - + /** * Calculate a hash using all bytes from the input argument, * and a provided seed value. @@ -106,7 +106,7 @@ public abstract class Hash { public int hash(byte[] bytes, int initval) { return hash(bytes, bytes.length, initval); } - + /** * Calculate a hash using bytes from 0 to length, and * the provided seed value diff --git a/src/java/org/apache/hadoop/hbase/util/InfoServer.java b/src/java/org/apache/hadoop/hbase/util/InfoServer.java index 02da4f3..8afc220 100644 --- a/src/java/org/apache/hadoop/hbase/util/InfoServer.java +++ b/src/java/org/apache/hadoop/hbase/util/InfoServer.java @@ -36,14 +36,14 @@ import org.mortbay.jetty.webapp.WebAppContext; * "/static/" -> points to common static files (src/webapps/static) * "/" -> the jsp server code from (src/webapps/) */ -public class InfoServer extends HttpServer { +public class InfoServer extends HttpServer { /** * Create a status server on the given port. * The jsp scripts are taken from src/webapps/name. * @param name The name of the server * @param bindAddress * @param port The port to use on the server - * @param findPort whether the server should start at the given port and + * @param findPort whether the server should start at the given port and * increment by 1 until it finds a free port. * @throws IOException */ @@ -84,7 +84,7 @@ public class InfoServer extends HttpServer { } // Now do my logs. - // set up the context for "/logs/" if "hadoop.log.dir" property is defined. + // set up the context for "/logs/" if "hadoop.log.dir" property is defined. String logDir = System.getProperty("hbase.log.dir"); if (logDir != null) { Context logContext = new Context(parent, "/logs"); @@ -115,8 +115,8 @@ public class InfoServer extends HttpServer { private static String getWebAppsPath(final String path) throws IOException { URL url = InfoServer.class.getClassLoader().getResource(path); - if (url == null) - throw new IOException("webapps not found in CLASSPATH: " + path); + if (url == null) + throw new IOException("webapps not found in CLASSPATH: " + path); return url.toString(); } diff --git a/src/java/org/apache/hadoop/hbase/util/JVMClusterUtil.java b/src/java/org/apache/hadoop/hbase/util/JVMClusterUtil.java index 40e9937..b225bdf 100644 --- a/src/java/org/apache/hadoop/hbase/util/JVMClusterUtil.java +++ b/src/java/org/apache/hadoop/hbase/util/JVMClusterUtil.java @@ -1,6 +1,6 @@ /** * Copyright 2010 The Apache Software Foundation - * + * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -39,7 +39,7 @@ public class JVMClusterUtil { */ public static class RegionServerThread extends Thread { private final HRegionServer regionServer; - + public RegionServerThread(final HRegionServer r, final int index) { super(r, "RegionServer:" + index); this.regionServer = r; @@ -49,7 +49,7 @@ public class JVMClusterUtil { public HRegionServer getRegionServer() { return this.regionServer; } - + /** * Block until the region server has come online, indicating it is ready * to be used. @@ -77,7 +77,7 @@ public class JVMClusterUtil { public static JVMClusterUtil.RegionServerThread createRegionServerThread(final HBaseConfiguration c, final Class hrsc, final int index) throws IOException { - HRegionServer server; + HRegionServer server; try { server = hrsc.getConstructor(HBaseConfiguration.class).newInstance(c); } catch (Exception e) { diff --git a/src/java/org/apache/hadoop/hbase/util/JenkinsHash.java b/src/java/org/apache/hadoop/hbase/util/JenkinsHash.java index a34a9e8..f592ab0 100644 --- a/src/java/org/apache/hadoop/hbase/util/JenkinsHash.java +++ b/src/java/org/apache/hadoop/hbase/util/JenkinsHash.java @@ -25,13 +25,13 @@ import java.io.IOException; /** * Produces 32-bit hash for hash table lookup. - * + * *

      lookup3.c, by Bob Jenkins, May 2006, Public Domain.
        *
        * You can use this free for any purpose.  It's in the public domain.
        * It has no warranty.
        * 
      - * + * * @see lookup3.c * @see Hash Functions (and how this * function compares to others such as CRC, MD?, etc @@ -41,9 +41,9 @@ import java.io.IOException; public class JenkinsHash extends Hash { private static long INT_MASK = 0x00000000ffffffffL; private static long BYTE_MASK = 0x00000000000000ffL; - + private static JenkinsHash _instance = new JenkinsHash(); - + public static Hash getInstance() { return _instance; } @@ -55,26 +55,26 @@ public class JenkinsHash extends Hash { /** * taken from hashlittle() -- hash a variable-length key into a 32-bit value - * + * * @param key the key (the unaligned variable-length array of bytes) * @param nbytes number of bytes to include in hash * @param initval can be any integer value * @return a 32-bit value. Every bit of the key affects every bit of the * return value. Two keys differing by one or two bits will have totally * different hash values. - * + * *

      The best hash table sizes are powers of 2. There is no need to do mod * a prime (mod is sooo slow!). If you need less than 32 bits, use a bitmask. * For example, if you need only 10 bits, do * h = (h & hashmask(10)); * In which case, the hash table should have hashsize(10) elements. - * + * *

      If you are hashing n strings byte[][] k, do it like this: * for (int i = 0, h = 0; i < n; ++i) h = hash( k[i], h); - * + * *

      By Bob Jenkins, 2006. bob_jenkins@burtleburtle.net. You may use this * code any way you wish, private, educational, or commercial. It's free. - * + * *

      Use for hash table lookup, or anything where one collision in 2^^32 is * acceptable. Do NOT use for cryptographic purposes. */ @@ -98,16 +98,16 @@ public class JenkinsHash extends Hash { c = (c + (((key[offset + 9] & BYTE_MASK) << 8) & INT_MASK)) & INT_MASK; c = (c + (((key[offset + 10] & BYTE_MASK) << 16) & INT_MASK)) & INT_MASK; c = (c + (((key[offset + 11] & BYTE_MASK) << 24) & INT_MASK)) & INT_MASK; - + /* * mix -- mix 3 32-bit values reversibly. * This is reversible, so any information in (a,b,c) before mix() is * still in (a,b,c) after mix(). - * + * * If four pairs of (a,b,c) inputs are run through mix(), or through * mix() in reverse, there are at least 32 bits of the output that * are sometimes the same for one pair and different for another pair. - * + * * This was tested for: * - pairs that differed by one bit, by two bits, in any combination * of top bits of (a,b,c), or in any combination of bottom bits of @@ -118,22 +118,22 @@ public class JenkinsHash extends Hash { * difference. * - the base values were pseudorandom, all zero but one bit set, or * all zero plus a counter that starts at zero. - * + * * Some k values for my "a-=c; a^=rot(c,k); c+=b;" arrangement that * satisfy this are * 4 6 8 16 19 4 * 9 15 3 18 27 15 * 14 9 3 7 17 3 - * Well, "9 15 3 18 27 15" didn't quite get 32 bits diffing for + * Well, "9 15 3 18 27 15" didn't quite get 32 bits diffing for * "differ" defined as + with a one-bit base and a two-bit delta. I * used http://burtleburtle.net/bob/hash/avalanche.html to choose * the operations, constants, and arrangements of the variables. - * + * * This does not achieve avalanche. There are input bits of (a,b,c) * that fail to affect some output bits of (a,b,c), especially of a. * The most thoroughly mixed value is c, but it doesn't really even * achieve avalanche in c. - * + * * This allows some parallelism. Read-after-writes are good at doubling * the number of bits affected, so the goal of mixing pulls in the * opposite direction as the goal of parallelism. I did what I could. @@ -150,7 +150,7 @@ public class JenkinsHash extends Hash { * b -= a; b ^= rot(a,19); a += c; \ * c -= b; c ^= rot(b, 4); b += a; \ * } - * + * * mix(a,b,c); */ a = (a - c) & INT_MASK; a ^= rot(c, 4); c = (c + b) & INT_MASK; @@ -193,21 +193,21 @@ public class JenkinsHash extends Hash { } /* * final -- final mixing of 3 32-bit values (a,b,c) into c - * + * * Pairs of (a,b,c) values differing in only a few bits will usually * produce values of c that look totally different. This was tested for * - pairs that differed by one bit, by two bits, in any combination * of top bits of (a,b,c), or in any combination of bottom bits of * (a,b,c). - * + * * - "differ" is defined as +, -, ^, or ~^. For + and -, I transformed * the output delta to a Gray code (a^(a>>1)) so a string of 1's (as * is commonly produced by subtraction) look like a single 1-bit * difference. - * + * * - the base values were pseudorandom, all zero but one bit set, or * all zero plus a counter that starts at zero. - * + * * These constants passed: * 14 11 25 16 4 14 24 * 12 14 25 16 4 14 24 @@ -215,9 +215,9 @@ public class JenkinsHash extends Hash { * 4 8 15 26 3 22 24 * 10 8 15 26 3 22 24 * 11 8 15 26 3 22 24 - * + * * #define final(a,b,c) \ - * { + * { * c ^= b; c -= rot(b,14); \ * a ^= c; a -= rot(c,11); \ * b ^= a; b -= rot(a,25); \ @@ -226,7 +226,7 @@ public class JenkinsHash extends Hash { * b ^= a; b -= rot(a,14); \ * c ^= b; c -= rot(b,24); \ * } - * + * */ c ^= b; c = (c - rot(b,14)) & INT_MASK; a ^= c; a = (a - rot(c,11)) & INT_MASK; @@ -238,7 +238,7 @@ public class JenkinsHash extends Hash { return (int)(c & INT_MASK); } - + /** * Compute the hash of the specified file * @param args name of file to compute hash of. diff --git a/src/java/org/apache/hadoop/hbase/util/Keying.java b/src/java/org/apache/hadoop/hbase/util/Keying.java index 49ed739..77195c9 100644 --- a/src/java/org/apache/hadoop/hbase/util/Keying.java +++ b/src/java/org/apache/hadoop/hbase/util/Keying.java @@ -37,7 +37,7 @@ public class Keying { /** * Makes a key out of passed URI for use as row name or column qualifier. - * + * * This method runs transforms on the passed URI so it sits better * as a key (or portion-of-a-key) in hbase. The host portion of * the URI authority is reversed so subdomains sort under their parent @@ -49,10 +49,10 @@ public class Keying { * r:http://org.apache.lucene/index.html?query=something#middle * The transforms are reversible. No transform is done if passed URI is * not hierarchical. - * + * *

      If authority userinfo is present, will mess up the sort * (until we do more work).

      - * + * * @param u URL to transform. * @return An opaque URI of artificial 'r' scheme with host portion of URI * authority reversed (if present). @@ -70,10 +70,10 @@ public class Keying { } return SCHEME + m.group(1) + reverseHostname(m.group(2)) + m.group(3); } - + /** * Reverse the {@link #createKey(String)} transform. - * + * * @param s URI made by {@link #createKey(String)}. * @return 'Restored' URI made by reversing the {@link #createKey(String)} * transform. @@ -89,14 +89,14 @@ public class Keying { } return m.group(1) + reverseHostname(m.group(2)) + m.group(3); } - + private static Matcher getMatcher(final String u) { if (u == null || u.length() <= 0) { return null; } return URI_RE_PARSER.matcher(u); } - + private static String reverseHostname(final String hostname) { if (hostname == null) { return ""; diff --git a/src/java/org/apache/hadoop/hbase/util/Merge.java b/src/java/org/apache/hadoop/hbase/util/Merge.java index a199bd4..710cc37 100644 --- a/src/java/org/apache/hadoop/hbase/util/Merge.java +++ b/src/java/org/apache/hadoop/hbase/util/Merge.java @@ -71,7 +71,7 @@ public class Merge extends Configured implements Tool { this.conf = conf; this.mergeInfo = null; } - + public int run(String[] args) throws Exception { if (parseArgs(args) != 0) { return -1; @@ -86,7 +86,7 @@ public class Merge extends Configured implements Tool { LOG.fatal("File system is not available", e); return -1; } - + // Verify HBase is down LOG.info("Verifying that HBase is not running..."); try { @@ -96,9 +96,9 @@ public class Merge extends Configured implements Tool { } catch (MasterNotRunningException e) { // Expected. Ignore. } - + // Initialize MetaUtils and and get the root of the HBase installation - + this.utils = new MetaUtils(conf); this.rootdir = FSUtils.getRootDir(this.conf); try { @@ -120,14 +120,14 @@ public class Merge extends Configured implements Tool { ); return -1; - + } finally { if (this.utils != null) { this.utils.shutdown(); } } } - + /** @return HRegionInfo for merge result */ HRegionInfo getMergedHRegionInfo() { return this.mergeInfo; @@ -151,25 +151,25 @@ public class Merge extends Configured implements Tool { get.addColumn(HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER); List cells2 = rootRegion.get(get, null).list(); HRegionInfo info2 = Writables.getHRegionInfo((cells2 == null)? null: cells2.get(0).getValue()); - HRegion merged = merge(info1, rootRegion, info2, rootRegion); + HRegion merged = merge(info1, rootRegion, info2, rootRegion); LOG.info("Adding " + merged.getRegionInfo() + " to " + rootRegion.getRegionInfo()); HRegion.addRegionToMETA(rootRegion, merged); merged.close(); } - + private static class MetaScannerListener implements MetaUtils.ScannerListener { private final byte [] region1; private final byte [] region2; private HRegionInfo meta1 = null; private HRegionInfo meta2 = null; - + MetaScannerListener(final byte [] region1, final byte [] region2) { this.region1 = region1; this.region2 = region2; } - + public boolean processRow(HRegionInfo info) { if (meta1 == null && HRegion.rowIsInRange(info, region1)) { meta1 = info; @@ -180,16 +180,16 @@ public class Merge extends Configured implements Tool { } return meta1 == null || (region2 != null && meta2 == null); } - + HRegionInfo getMeta1() { return meta1; } - + HRegionInfo getMeta2() { return meta2; } } - + /* * Merges two regions from a user table. */ @@ -258,7 +258,7 @@ public class Merge extends Configured implements Tool { HRegion.addRegionToMETA(mergeMeta, merged); merged.close(); } - + /* * Actually merge two regions and update their info in the meta region(s) * If the meta is split, meta1 may be different from meta2. (and we may have @@ -293,21 +293,21 @@ public class Merge extends Configured implements Tool { r1.close(); } } - + // Remove the old regions from meta. // HRegion.merge has already deleted their files - + removeRegionFromMeta(meta1, info1); removeRegionFromMeta(meta2, info2); this.mergeInfo = merged.getRegionInfo(); return merged; } - + /* * Removes a region's meta information from the passed meta * region. - * + * * @param meta META HRegion to be updated * @param regioninfo HRegionInfo of region to remove from meta * @@ -318,8 +318,8 @@ public class Merge extends Configured implements Tool { if (LOG.isDebugEnabled()) { LOG.debug("Removing region: " + regioninfo + " from " + meta); } - - Delete delete = new Delete(regioninfo.getRegionName(), + + Delete delete = new Delete(regioninfo.getRegionName(), System.currentTimeMillis(), null); meta.delete(delete, null, true); } @@ -327,7 +327,7 @@ public class Merge extends Configured implements Tool { /* * Adds a region's meta information from the passed meta * region. - * + * * @param metainfo META HRegionInfo to be updated * @param region HRegion to add to meta * @@ -336,7 +336,7 @@ public class Merge extends Configured implements Tool { private int parseArgs(String[] args) { GenericOptionsParser parser = new GenericOptionsParser(this.getConf(), args); - + String[] remainingArgs = parser.getRemainingArgs(); if (remainingArgs.length != 3) { usage(); @@ -344,7 +344,7 @@ public class Merge extends Configured implements Tool { } tableName = Bytes.toBytes(remainingArgs[0]); isMetaTable = Bytes.compareTo(tableName, HConstants.META_TABLE_NAME) == 0; - + region1 = Bytes.toBytesBinary(remainingArgs[1]); region2 = Bytes.toBytesBinary(remainingArgs[2]); int status = 0; @@ -356,7 +356,7 @@ public class Merge extends Configured implements Tool { } return status; } - + private boolean notInTable(final byte [] tn, final byte [] rn) { if (WritableComparator.compareBytes(tn, 0, tn.length, rn, 0, tn.length) != 0) { LOG.error("Region " + Bytes.toString(rn) + " does not belong to table " + @@ -365,15 +365,15 @@ public class Merge extends Configured implements Tool { } return false; } - + private void usage() { System.err.println( "Usage: bin/hbase merge \n"); } - + /** * Main program - * + * * @param args */ public static void main(String[] args) { diff --git a/src/java/org/apache/hadoop/hbase/util/MetaUtils.java b/src/java/org/apache/hadoop/hbase/util/MetaUtils.java index 39ad39d..5e9bfb4 100644 --- a/src/java/org/apache/hadoop/hbase/util/MetaUtils.java +++ b/src/java/org/apache/hadoop/hbase/util/MetaUtils.java @@ -63,14 +63,14 @@ public class MetaUtils { private HRegion rootRegion; private Map metaRegions = Collections.synchronizedSortedMap( new TreeMap(Bytes.BYTES_COMPARATOR)); - - /** Default constructor + + /** Default constructor * @throws IOException */ public MetaUtils() throws IOException { this(new HBaseConfiguration()); } - - /** @param conf HBaseConfiguration + + /** @param conf HBaseConfiguration * @throws IOException */ public MetaUtils(HBaseConfiguration conf) throws IOException { this.conf = conf; @@ -89,7 +89,7 @@ public class MetaUtils { this.rootdir = FSUtils.getRootDir(this.conf); } - /** @return the HLog + /** @return the HLog * @throws IOException */ public synchronized HLog getLog() throws IOException { if (this.log == null) { @@ -99,7 +99,7 @@ public class MetaUtils { } return this.log; } - + /** * @return HRegion for root region * @throws IOException @@ -110,10 +110,10 @@ public class MetaUtils { } return this.rootRegion; } - + /** * Open or return cached opened meta region - * + * * @param metaInfo HRegionInfo for meta region * @return meta HRegion * @throws IOException @@ -127,7 +127,7 @@ public class MetaUtils { } return meta; } - + /** * Closes catalog regions if open. Also closes and deletes the HLog. You * must call this method if you want to persist changes made during a @@ -172,18 +172,18 @@ public class MetaUtils { public interface ScannerListener { /** * Callback so client of scanner can process row contents - * + * * @param info HRegionInfo for row * @return false to terminate the scan * @throws IOException */ public boolean processRow(HRegionInfo info) throws IOException; } - + /** * Scans the root region. For every meta region found, calls the listener with * the HRegionInfo of the meta region. - * + * * @param listener method to be called for each meta region found * @throws IOException */ @@ -241,7 +241,7 @@ public class MetaUtils { *

      Use for reading meta only. Does not close region when done. * Use {@link #getMetaRegion(HRegionInfo)} instead if writing. Adds * meta region to list that will get a close on {@link #shutdown()}. - * + * * @param metaRegionInfo HRegionInfo for meta region * @param listener method to be called for each meta region found * @throws IOException @@ -270,7 +270,7 @@ public class MetaUtils { meta.compactStores(); return meta; } - + /** * Set a single region on/offline. * This is a tool to repair tables that have offlined tables in their midst. @@ -302,24 +302,24 @@ public class MetaUtils { HRegionInfo info = Writables.getHRegionInfo(value); Put put = new Put(row); info.setOffline(onlineOffline); - put.add(HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER, + put.add(HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER, Writables.getBytes(info)); t.put(put); - + Delete delete = new Delete(row); delete.deleteColumns(HConstants.CATALOG_FAMILY, HConstants.SERVER_QUALIFIER); delete.deleteColumns(HConstants.CATALOG_FAMILY, HConstants.STARTCODE_QUALIFIER); - + t.delete(delete); } - + /** * Offline version of the online TableOperation, * org.apache.hadoop.hbase.master.AddColumn. * @param tableName * @param hcd Add this column to tableName - * @throws IOException + * @throws IOException */ public void addColumn(final byte [] tableName, final HColumnDescriptor hcd) @@ -329,7 +329,7 @@ public class MetaUtils { final HRegion m = getMetaRegion(hri); scanMetaRegion(m, new ScannerListener() { private boolean inTable = true; - + @SuppressWarnings("synthetic-access") public boolean processRow(HRegionInfo info) throws IOException { LOG.debug("Testing " + Bytes.toString(tableName) + " against " + @@ -347,7 +347,7 @@ public class MetaUtils { }}); } } - + /** * Offline version of the online TableOperation, * org.apache.hadoop.hbase.master.DeleteColumn. @@ -362,7 +362,7 @@ public class MetaUtils { final HRegion m = getMetaRegion(hri); scanMetaRegion(m, new ScannerListener() { private boolean inTable = true; - + @SuppressWarnings("synthetic-access") public boolean processRow(HRegionInfo info) throws IOException { if (Bytes.equals(info.getTableDesc().getName(), tableName)) { @@ -385,15 +385,15 @@ public class MetaUtils { }}); } } - + /** * Update COL_REGIONINFO in meta region r with HRegionInfo hri - * + * * @param r * @param hri * @throws IOException */ - public void updateMETARegionInfo(HRegion r, final HRegionInfo hri) + public void updateMETARegionInfo(HRegion r, final HRegionInfo hri) throws IOException { if (LOG.isDebugEnabled()) { Get get = new Get(hri.getRegionName()); @@ -408,14 +408,14 @@ public class MetaUtils { return; } HRegionInfo h = Writables.getHRegionInfoOrNull(value); - - LOG.debug("Old " + Bytes.toString(HConstants.CATALOG_FAMILY) + ":" + + + LOG.debug("Old " + Bytes.toString(HConstants.CATALOG_FAMILY) + ":" + Bytes.toString(HConstants.REGIONINFO_QUALIFIER) + " for " + hri.toString() + " in " + r.toString() + " is: " + h.toString()); } - + Put put = new Put(hri.getRegionName()); - put.add(HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER, + put.add(HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER, Writables.getBytes(hri)); r.put(put); @@ -432,8 +432,8 @@ public class MetaUtils { return; } HRegionInfo h = Writables.getHRegionInfoOrNull(value); - LOG.debug("New " + Bytes.toString(HConstants.CATALOG_FAMILY) + ":" + - Bytes.toString(HConstants.REGIONINFO_QUALIFIER) + " for " + + LOG.debug("New " + Bytes.toString(HConstants.CATALOG_FAMILY) + ":" + + Bytes.toString(HConstants.REGIONINFO_QUALIFIER) + " for " + hri.toString() + " in " + r.toString() + " is: " + h.toString()); } } @@ -456,7 +456,7 @@ public class MetaUtils { // Return all meta regions that contain the passed tablename. scanRootRegion(new ScannerListener() { private final Log SL_LOG = LogFactory.getLog(this.getClass()); - + public boolean processRow(HRegionInfo info) throws IOException { SL_LOG.debug("Testing " + info); if (Bytes.equals(info.getTableDesc().getName(), @@ -468,7 +468,7 @@ public class MetaUtils { }}); return result; } - + /** * @param n Table name. * @return True if a catalog table, -ROOT- or .META. diff --git a/src/java/org/apache/hadoop/hbase/util/Migrate.java b/src/java/org/apache/hadoop/hbase/util/Migrate.java index acdf8d0..25de9da 100644 --- a/src/java/org/apache/hadoop/hbase/util/Migrate.java +++ b/src/java/org/apache/hadoop/hbase/util/Migrate.java @@ -64,13 +64,13 @@ import org.apache.hadoop.util.ToolRunner; * the filesystem across versions until there's a match with current software's * version number. This script will only cross a particular version divide. You may * need to install earlier or later hbase to migrate earlier (or older) versions. - * + * *

      This wrapper script comprises a set of migration steps. Which steps * are run depends on the span between the version of the hbase data in the * Filesystem and the version of the current softare. - * + * *

      A migration script must accompany any patch that changes data formats. - * + * *

      This script has a 'check' and 'execute' mode. Adding migration steps, * its important to keep this in mind. Testing if migration needs to be run, * be careful not to make presumptions about the current state of the data in @@ -80,9 +80,9 @@ import org.apache.hadoop.util.ToolRunner; * old formats -- or, worse, fail in ways that are hard to figure (One such is * edits made by previous migration steps not being apparent in later migration * steps). The upshot is always verify presumptions migrating. - * + * *

      This script will migrate an hbase 0.18.x only. - * + * * @see How To Migration */ public class Migrate extends Configured implements Tool { @@ -93,11 +93,11 @@ public class Migrate extends Configured implements Tool { // Filesystem version of hbase 0.1.x. private static final float HBASE_0_1_VERSION = 0.1f; - + // Filesystem version we can migrate from private static final int PREVIOUS_VERSION = 6; - - private static final String MIGRATION_LINK = + + private static final String MIGRATION_LINK = " See http://wiki.apache.org/hadoop/Hbase/HowToMigrate for more information."; /** @@ -149,7 +149,7 @@ public class Migrate extends Configured implements Tool { return false; } } - + private boolean notRunning() { // Verify HBase is down LOG.info("Verifying that HBase is not running...." + @@ -162,7 +162,7 @@ public class Migrate extends Configured implements Tool { return true; } } - + public int run(String[] args) { if (parseArgs(args) != 0) { return -1; @@ -215,7 +215,7 @@ public class Migrate extends Configured implements Tool { return -1; } } - + // Move the fileystem version from 6 to 7. private void migrate6to7() throws IOException { if (this.check && this.migrationNeeded) { @@ -390,14 +390,14 @@ public class Migrate extends Configured implements Tool { } byte [][] parts = KeyValue.parseColumn(key.getColumn()); KeyValue kv = deleteBytes.equals(value)? - new KeyValue(key.getRow(), parts[0], parts[1], + new KeyValue(key.getRow(), parts[0], parts[1], key.getTimestamp(), KeyValue.Type.Delete): - new KeyValue(key.getRow(), parts[0], parts[1], + new KeyValue(key.getRow(), parts[0], parts[1], key.getTimestamp(), value.get()); tgt.append(kv); } long seqid = hsf.loadInfo(fs); - StoreFile.appendMetadata(tgt, seqid, + StoreFile.appendMetadata(tgt, seqid, hsf.isMajorCompaction()); // Success, delete src. src.close(); @@ -433,7 +433,7 @@ public class Migrate extends Configured implements Tool { return; } Put put = new Put(oldHri.getRegionName()); - put.add(HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER, + put.add(HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER, Writables.getBytes(oldHri)); mr.put(put); } @@ -478,7 +478,7 @@ public class Migrate extends Configured implements Tool { return; } Put put = new Put(oldHri.getRegionName()); - put.add(HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER, + put.add(HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER, Writables.getBytes(oldHri)); mr.put(put); LOG.info("Upped versions on " + oldHri.getRegionNameAsString()); @@ -528,7 +528,7 @@ public class Migrate extends Configured implements Tool { } return 0; } - + private void usage() { System.err.println("Usage: bin/hbase migrate {check | upgrade} [options]"); System.err.println(); @@ -543,7 +543,7 @@ public class Migrate extends Configured implements Tool { /** * Main program - * + * * @param args command line arguments */ public static void main(String[] args) { diff --git a/src/java/org/apache/hadoop/hbase/util/MurmurHash.java b/src/java/org/apache/hadoop/hbase/util/MurmurHash.java index f31c285..3823871 100644 --- a/src/java/org/apache/hadoop/hbase/util/MurmurHash.java +++ b/src/java/org/apache/hadoop/hbase/util/MurmurHash.java @@ -21,17 +21,17 @@ package org.apache.hadoop.hbase.util; /** * This is a very fast, non-cryptographic hash suitable for general hash-based * lookup. See http://murmurhash.googlepages.com/ for more details. - * + * *

      The C version of MurmurHash 2.0 found at that site was ported * to Java by Andrzej Bialecki (ab at getopt org).

      */ public class MurmurHash extends Hash { private static MurmurHash _instance = new MurmurHash(); - + public static Hash getInstance() { return _instance; } - + @Override public int hash(byte[] data, int length, int seed) { int m = 0x5bd1e995; diff --git a/src/java/org/apache/hadoop/hbase/util/Sleeper.java b/src/java/org/apache/hadoop/hbase/util/Sleeper.java index 0e817ff..c97b9cd 100644 --- a/src/java/org/apache/hadoop/hbase/util/Sleeper.java +++ b/src/java/org/apache/hadoop/hbase/util/Sleeper.java @@ -27,7 +27,7 @@ import org.apache.commons.logging.LogFactory; /** * Sleeper for current thread. * Sleeps for passed period. Also checks passed boolean and if interrupted, - * will return if the flag is set (rather than go back to sleep until its + * will return if the flag is set (rather than go back to sleep until its * sleep time is up). */ public class Sleeper { @@ -35,7 +35,7 @@ public class Sleeper { private final int period; private AtomicBoolean stop; private static final long MINIMAL_DELTA_FOR_LOGGING = 10000; - + private final Object sleepLock = new Object(); private boolean triggerWake = false; @@ -47,7 +47,7 @@ public class Sleeper { this.period = sleep; this.stop = stop; } - + /** * Sleep for period. */ @@ -65,7 +65,7 @@ public class Sleeper { sleepLock.notify(); } } - + /** * Sleep for period adjusted by passed startTime * @param startTime Time some task started previous to now. Time to sleep diff --git a/src/java/org/apache/hadoop/hbase/util/SoftValueMap.java b/src/java/org/apache/hadoop/hbase/util/SoftValueMap.java index 80a81b5..cdd0d7b 100644 --- a/src/java/org/apache/hadoop/hbase/util/SoftValueMap.java +++ b/src/java/org/apache/hadoop/hbase/util/SoftValueMap.java @@ -29,7 +29,7 @@ import java.util.Set; /** * A Map that uses Soft Reference values internally. Use as a simple cache. - * + * * @param key class * @param value class */ @@ -37,11 +37,11 @@ public class SoftValueMap implements Map { private final Map> internalMap = new HashMap>(); private final ReferenceQueue rq; - + public SoftValueMap() { this(new ReferenceQueue()); } - + public SoftValueMap(final ReferenceQueue rq) { this.rq = rq; } @@ -66,12 +66,12 @@ public class SoftValueMap implements Map { new SoftValue(key, value, this.rq)); return oldValue == null ? null : oldValue.get(); } - + @SuppressWarnings("unchecked") public void putAll(Map map) { throw new RuntimeException("Not implemented"); } - + public V get(Object key) { checkReferences(); SoftValue value = this.internalMap.get(key); @@ -92,16 +92,16 @@ public class SoftValueMap implements Map { } public boolean containsKey(Object key) { - checkReferences(); + checkReferences(); return this.internalMap.containsKey(key); } - + public boolean containsValue(Object value) { /* checkReferences(); return internalMap.containsValue(value);*/ throw new UnsupportedOperationException("Don't support containsValue!"); } - + public boolean isEmpty() { checkReferences(); return this.internalMap.isEmpty(); diff --git a/src/java/org/apache/hadoop/hbase/util/SoftValueSortedMap.java b/src/java/org/apache/hadoop/hbase/util/SoftValueSortedMap.java index 7c4c49c..f440245 100644 --- a/src/java/org/apache/hadoop/hbase/util/SoftValueSortedMap.java +++ b/src/java/org/apache/hadoop/hbase/util/SoftValueSortedMap.java @@ -33,19 +33,19 @@ import java.util.TreeSet; * A SortedMap implementation that uses Soft Reference values * internally to make it play well with the GC when in a low-memory * situation. Use as a cache where you also need SortedMap functionality. - * + * * @param key class * @param value class */ public class SoftValueSortedMap implements SortedMap { private final SortedMap> internalMap; private final ReferenceQueue rq = new ReferenceQueue(); - + /** Constructor */ public SoftValueSortedMap() { this(new TreeMap>()); } - + /** * Constructor * @param c @@ -53,7 +53,7 @@ public class SoftValueSortedMap implements SortedMap { public SoftValueSortedMap(final Comparator c) { this(new TreeMap>(c)); } - + /** For headMap and tailMap support */ private SoftValueSortedMap(SortedMap> original) { this.internalMap = original; @@ -80,12 +80,12 @@ public class SoftValueSortedMap implements SortedMap { new SoftValue(key, value, this.rq)); return oldValue == null ? null : oldValue.get(); } - + @SuppressWarnings("unchecked") public synchronized void putAll(Map map) { throw new RuntimeException("Not implemented"); } - + public synchronized V get(Object key) { checkReferences(); SoftValue value = this.internalMap.get(key); @@ -106,10 +106,10 @@ public class SoftValueSortedMap implements SortedMap { } public synchronized boolean containsKey(Object key) { - checkReferences(); + checkReferences(); return this.internalMap.containsKey(key); } - + public synchronized boolean containsValue(Object value) { /* checkReferences(); return internalMap.containsValue(value);*/ @@ -125,22 +125,22 @@ public class SoftValueSortedMap implements SortedMap { checkReferences(); return internalMap.lastKey(); } - + public synchronized SoftValueSortedMap headMap(K key) { checkReferences(); return new SoftValueSortedMap(this.internalMap.headMap(key)); } - + public synchronized SoftValueSortedMap tailMap(K key) { checkReferences(); return new SoftValueSortedMap(this.internalMap.tailMap(key)); } - + public synchronized SoftValueSortedMap subMap(K fromKey, K toKey) { checkReferences(); return new SoftValueSortedMap(this.internalMap.subMap(fromKey, toKey)); } - + public synchronized boolean isEmpty() { checkReferences(); return this.internalMap.isEmpty(); diff --git a/src/java/org/apache/hadoop/hbase/util/Strings.java b/src/java/org/apache/hadoop/hbase/util/Strings.java index 117312a..f66e1d8 100644 --- a/src/java/org/apache/hadoop/hbase/util/Strings.java +++ b/src/java/org/apache/hadoop/hbase/util/Strings.java @@ -25,7 +25,7 @@ package org.apache.hadoop.hbase.util; public class Strings { public final static String DEFAULT_SEPARATOR = "="; public final static String DEFAULT_KEYVALUE_SEPARATOR = ", "; - + /** * Append to a StringBuilder a key/value. * Uses default separators. diff --git a/src/java/org/apache/hadoop/hbase/util/Threads.java b/src/java/org/apache/hadoop/hbase/util/Threads.java index e102c22..4a1a52a 100644 --- a/src/java/org/apache/hadoop/hbase/util/Threads.java +++ b/src/java/org/apache/hadoop/hbase/util/Threads.java @@ -29,7 +29,7 @@ import org.apache.hadoop.util.ReflectionUtils; */ public class Threads { protected static final Log LOG = LogFactory.getLog(Threads.class); - + /** * Utility method that sets name, daemon status and starts passed thread. * @param t @@ -40,7 +40,7 @@ public class Threads { final String name) { return setDaemonThreadRunning(t, name, null); } - + /** * Utility method that sets name, daemon status and starts passed thread. * @param t diff --git a/src/java/org/apache/hadoop/hbase/util/VersionInfo.java b/src/java/org/apache/hadoop/hbase/util/VersionInfo.java index 63f75f0..68009ef 100644 --- a/src/java/org/apache/hadoop/hbase/util/VersionInfo.java +++ b/src/java/org/apache/hadoop/hbase/util/VersionInfo.java @@ -27,7 +27,7 @@ import org.apache.hadoop.hbase.VersionAnnotation; public class VersionInfo { private static Package myPackage; private static VersionAnnotation version; - + static { myPackage = VersionAnnotation.class.getPackage(); version = myPackage.getAnnotation(VersionAnnotation.class); @@ -40,7 +40,7 @@ public class VersionInfo { static Package getPackage() { return myPackage; } - + /** * Get the hbase version. * @return the hbase version string, eg. "0.6.3-dev" @@ -48,7 +48,7 @@ public class VersionInfo { public static String getVersion() { return version != null ? version.version() : "Unknown"; } - + /** * Get the subversion revision number for the root directory * @return the revision number, eg. "451451" @@ -56,7 +56,7 @@ public class VersionInfo { public static String getRevision() { return version != null ? version.revision() : "Unknown"; } - + /** * The date that hbase was compiled. * @return the compilation date in unix date format @@ -64,7 +64,7 @@ public class VersionInfo { public static String getDate() { return version != null ? version.date() : "Unknown"; } - + /** * The user that compiled hbase. * @return the username of the user @@ -72,7 +72,7 @@ public class VersionInfo { public static String getUser() { return version != null ? version.user() : "Unknown"; } - + /** * Get the subversion URL for the root hbase directory. * @return the url @@ -80,7 +80,7 @@ public class VersionInfo { public static String getUrl() { return version != null ? version.url() : "Unknown"; } - + /** * @param args */ diff --git a/src/java/org/apache/hadoop/hbase/util/Writables.java b/src/java/org/apache/hadoop/hbase/util/Writables.java index 93423b1..34bca93 100644 --- a/src/java/org/apache/hadoop/hbase/util/Writables.java +++ b/src/java/org/apache/hadoop/hbase/util/Writables.java @@ -36,7 +36,7 @@ import org.apache.hadoop.io.WritableUtils; public class Writables { /** * @param w - * @return The bytes of w gotten by running its + * @return The bytes of w gotten by running its * {@link Writable#write(java.io.DataOutput)} method. * @throws IOException * @see #getWritable(byte[], Writable) @@ -119,7 +119,7 @@ public class Writables { throws IOException { return (HRegionInfo)getWritable(bytes, new HRegionInfo()); } - + /** * @param bytes * @return A HRegionInfo instance built out of passed bytes @@ -184,7 +184,7 @@ public class Writables { } return Bytes.toString(c.getValue()); } - + /** * @param c * @return Cell as a long. diff --git a/src/java/org/apache/hadoop/hbase/zookeeper/ZooKeeperWrapper.java b/src/java/org/apache/hadoop/hbase/zookeeper/ZooKeeperWrapper.java index 0bee09c..0e08c29 100644 --- a/src/java/org/apache/hadoop/hbase/zookeeper/ZooKeeperWrapper.java +++ b/src/java/org/apache/hadoop/hbase/zookeeper/ZooKeeperWrapper.java @@ -105,7 +105,7 @@ public class ZooKeeperWrapper implements HConstants { "master"); String stateZNodeName = conf.get("zookeeper.znode.state", "shutdown"); - + rootRegionZNode = getZNode(parentZNode, rootServerZNodeName); outOfSafeModeZNode = getZNode(parentZNode, outOfSafeModeZNodeName); rsZNode = getZNode(parentZNode, rsZNodeName); @@ -195,31 +195,31 @@ public class ZooKeeperWrapper implements HConstants { } return sb.toString(); } - + /** * Gets the statistics from the given server. Uses a 1 minute timeout. - * + * * @param server The server to get the statistics from. * @return The array of response strings. * @throws IOException When the socket communication fails. */ - public String[] getServerStats(String server) + public String[] getServerStats(String server) throws IOException { return getServerStats(server, 1 * 60 * 1000); } - + /** * Gets the statistics from the given server. - * + * * @param server The server to get the statistics from. * @param timeout The socket timeout to use. * @return The array of response strings. * @throws IOException When the socket communication fails. */ - public String[] getServerStats(String server, int timeout) + public String[] getServerStats(String server, int timeout) throws IOException { String[] sp = server.split(":"); - Socket socket = new Socket(sp[0], + Socket socket = new Socket(sp[0], sp.length > 1 ? Integer.parseInt(sp[1]) : 2181); socket.setSoTimeout(timeout); PrintWriter out = new PrintWriter(socket.getOutputStream(), true); @@ -307,7 +307,7 @@ public class ZooKeeperWrapper implements HConstants { public HServerAddress readMasterAddress(Watcher watcher) { return readAddress(masterElectionZNode, watcher); } - + /** * Watch the state of the cluster, up or down * @param watcher Watcher to set on cluster state node @@ -321,7 +321,7 @@ public class ZooKeeperWrapper implements HConstants { LOG.warn("Failed to check on ZNode " + clusterStateZNode, e); } } - + /** * Set the cluster state, up or down * @param up True to write the node, false to delete it @@ -334,7 +334,7 @@ public class ZooKeeperWrapper implements HConstants { try { if(up) { byte[] data = Bytes.toBytes("up"); - zooKeeper.create(clusterStateZNode, data, + zooKeeper.create(clusterStateZNode, data, Ids.OPEN_ACL_UNSAFE, CreateMode.PERSISTENT); LOG.debug("State node wrote in ZooKeeper"); } else { @@ -575,7 +575,7 @@ public class ZooKeeperWrapper implements HConstants { return false; } - + /** * Write in ZK this RS startCode and address. * Ensures that the full path exists. @@ -622,7 +622,7 @@ public class ZooKeeperWrapper implements HConstants { return false; } - + /** * Scans the regions servers directory * @return A list of server addresses @@ -641,7 +641,7 @@ public class ZooKeeperWrapper implements HConstants { } return addresses; } - + /** * Method used to make sure the region server directory is empty. * @@ -659,7 +659,7 @@ public class ZooKeeperWrapper implements HConstants { LOG.warn("Failed to delete " + rsZNode + " znodes in ZooKeeper: " + e); } } - + private boolean checkExistenceOf(String path) { Stat stat = null; try { @@ -684,7 +684,7 @@ public class ZooKeeperWrapper implements HConstants { LOG.warn("Failed to close connection with ZooKeeper"); } } - + private String getZNode(String parentZNode, String znodeName) { return znodeName.charAt(0) == ZNODE_PATH_SEPARATOR ? znodeName : joinPath(parentZNode, znodeName); @@ -701,6 +701,6 @@ public class ZooKeeperWrapper implements HConstants { public String getMasterElectionZNode() { return masterElectionZNode; } - - + + } -- 1.7.1.4.g83535