From f2cdd818265f41738052689cf8e5a4015fe72037 Mon Sep 17 00:00:00 2001 From: Elliott Clark Date: Wed, 26 Dec 2012 13:49:09 -0800 Subject: [PATCH] Make the client a seperate module. --- .../java/org/apache/hadoop/hbase/ClusterId.java | 103 + .../org/apache/hadoop/hbase/ClusterStatus.java | 350 +++ .../java/org/apache/hadoop/hbase/Coprocessor.java | 56 + .../hadoop/hbase/CoprocessorEnvironment.java | 55 + .../org/apache/hadoop/hbase/HColumnDescriptor.java | 1142 ++++++++++ .../java/org/apache/hadoop/hbase/HRegionInfo.java | 1094 +++++++++ .../org/apache/hadoop/hbase/HRegionLocation.java | 128 ++ .../org/apache/hadoop/hbase/HTableDescriptor.java | 1301 +++++++++++ .../apache/hadoop/hbase/MasterAdminProtocol.java | 349 +++ .../apache/hadoop/hbase/MasterMonitorProtocol.java | 99 + .../org/apache/hadoop/hbase/MasterProtocol.java | 44 + .../java/org/apache/hadoop/hbase/RegionLoad.java | 154 ++ .../hadoop/hbase/RegionServerStatusProtocol.java | 39 + .../main/java/org/apache/hadoop/hbase/Server.java | 51 + .../java/org/apache/hadoop/hbase/ServerLoad.java | 305 +++ .../java/org/apache/hadoop/hbase/ServerName.java | 355 +++ .../hadoop/hbase/catalog/CatalogTracker.java | 703 ++++++ .../apache/hadoop/hbase/catalog/MetaReader.java | 643 ++++++ .../hadoop/hbase/client/AbstractClientScanner.java | 77 + .../org/apache/hadoop/hbase/client/Action.java | 80 + .../apache/hadoop/hbase/client/AdminProtocol.java | 37 + .../org/apache/hadoop/hbase/client/Append.java | 90 + .../org/apache/hadoop/hbase/client/Attributes.java | 51 + .../apache/hadoop/hbase/client/ClientProtocol.java | 39 + .../apache/hadoop/hbase/client/ClientScanner.java | 405 ++++ .../hadoop/hbase/client/ConnectionUtils.java | 52 + .../org/apache/hadoop/hbase/client/Delete.java | 256 +++ .../java/org/apache/hadoop/hbase/client/Get.java | 425 ++++ .../org/apache/hadoop/hbase/client/HBaseAdmin.java | 2171 ++++++++++++++++++ .../apache/hadoop/hbase/client/HConnection.java | 364 +++ .../hadoop/hbase/client/HConnectionManager.java | 2378 ++++++++++++++++++++ .../org/apache/hadoop/hbase/client/HTable.java | 1299 +++++++++++ .../apache/hadoop/hbase/client/HTableFactory.java | 49 + .../hadoop/hbase/client/HTableInterface.java | 557 +++++ .../hbase/client/HTableInterfaceFactory.java | 52 + .../org/apache/hadoop/hbase/client/HTablePool.java | 552 +++++ .../org/apache/hadoop/hbase/client/HTableUtil.java | 141 ++ .../org/apache/hadoop/hbase/client/Increment.java | 277 +++ .../apache/hadoop/hbase/client/IsolationLevel.java | 59 + .../client/MasterAdminKeepAliveConnection.java | 44 + .../client/MasterMonitorKeepAliveConnection.java | 44 + .../apache/hadoop/hbase/client/MetaScanner.java | 489 ++++ .../apache/hadoop/hbase/client/MultiAction.java | 90 + .../apache/hadoop/hbase/client/MultiResponse.java | 85 + .../org/apache/hadoop/hbase/client/Mutation.java | 232 ++ .../hbase/client/NoServerForRegionException.java | 45 + .../org/apache/hadoop/hbase/client/Operation.java | 113 + .../hbase/client/OperationWithAttributes.java | 108 + .../java/org/apache/hadoop/hbase/client/Put.java | 357 +++ .../hbase/client/RegionOfflineException.java | 39 + .../org/apache/hadoop/hbase/client/Result.java | 709 ++++++ .../apache/hadoop/hbase/client/ResultScanner.java | 54 + .../hbase/client/RetriesExhaustedException.java | 109 + .../RetriesExhaustedWithDetailsException.java | 152 ++ .../java/org/apache/hadoop/hbase/client/Row.java | 34 + .../org/apache/hadoop/hbase/client/RowLock.java | 66 + .../apache/hadoop/hbase/client/RowMutations.java | 103 + .../java/org/apache/hadoop/hbase/client/Scan.java | 652 ++++++ .../hadoop/hbase/client/ScannerCallable.java | 322 +++ .../hbase/client/ScannerTimeoutException.java | 44 + .../apache/hadoop/hbase/client/ServerCallable.java | 243 ++ .../client/UnmodifyableHColumnDescriptor.java | 96 + .../hbase/client/UnmodifyableHRegionInfo.java | 53 + .../hbase/client/UnmodifyableHTableDescriptor.java | 127 ++ .../hbase/client/ZooKeeperKeepAliveConnection.java | 54 + .../client/coprocessor/AggregationClient.java | 701 ++++++ .../hadoop/hbase/client/coprocessor/Batch.java | 74 + .../client/coprocessor/LongColumnInterpreter.java | 141 ++ .../hbase/client/coprocessor/package-info.java | 226 ++ .../hadoop/hbase/client/metrics/ScanMetrics.java | 142 ++ .../apache/hadoop/hbase/client/package-info.java | 185 ++ .../hbase/client/replication/ReplicationAdmin.java | 209 ++ .../hbase/coprocessor/ColumnInterpreter.java | 164 ++ .../hadoop/hbase/filter/BinaryComparator.java | 89 + .../hbase/filter/BinaryPrefixComparator.java | 91 + .../apache/hadoop/hbase/filter/BitComparator.java | 133 ++ .../hadoop/hbase/filter/ByteArrayComparable.java | 100 + .../hadoop/hbase/filter/ColumnCountGetFilter.java | 121 + .../hbase/filter/ColumnPaginationFilter.java | 142 ++ .../hadoop/hbase/filter/ColumnPrefixFilter.java | 141 ++ .../hadoop/hbase/filter/ColumnRangeFilter.java | 229 ++ .../apache/hadoop/hbase/filter/CompareFilter.java | 180 ++ .../hadoop/hbase/filter/DependentColumnFilter.java | 289 +++ .../apache/hadoop/hbase/filter/FamilyFilter.java | 130 ++ .../org/apache/hadoop/hbase/filter/Filter.java | 196 ++ .../org/apache/hadoop/hbase/filter/FilterBase.java | 170 ++ .../org/apache/hadoop/hbase/filter/FilterList.java | 378 ++++ .../apache/hadoop/hbase/filter/FilterWrapper.java | 151 ++ .../hadoop/hbase/filter/FirstKeyOnlyFilter.java | 114 + .../FirstKeyValueMatchingQualifiersFilter.java | 124 + .../apache/hadoop/hbase/filter/FuzzyRowFilter.java | 333 +++ .../hadoop/hbase/filter/InclusiveStopFilter.java | 128 ++ .../hbase/filter/IncompatibleFilterException.java | 44 + .../hbase/filter/InvalidRowFilterException.java | 45 + .../apache/hadoop/hbase/filter/KeyOnlyFilter.java | 102 + .../hbase/filter/MultipleColumnPrefixFilter.java | 199 ++ .../apache/hadoop/hbase/filter/NullComparator.java | 88 + .../org/apache/hadoop/hbase/filter/PageFilter.java | 126 ++ .../apache/hadoop/hbase/filter/ParseConstants.java | 263 +++ .../apache/hadoop/hbase/filter/ParseFilter.java | 859 +++++++ .../apache/hadoop/hbase/filter/PrefixFilter.java | 123 + .../hadoop/hbase/filter/QualifierFilter.java | 129 ++ .../hadoop/hbase/filter/RandomRowFilter.java | 150 ++ .../hadoop/hbase/filter/RegexStringComparator.java | 174 ++ .../org/apache/hadoop/hbase/filter/RowFilter.java | 144 ++ .../filter/SingleColumnValueExcludeFilter.java | 178 ++ .../hbase/filter/SingleColumnValueFilter.java | 389 ++++ .../org/apache/hadoop/hbase/filter/SkipFilter.java | 145 ++ .../hadoop/hbase/filter/SubstringComparator.java | 112 + .../hadoop/hbase/filter/TimestampsFilter.java | 175 ++ .../apache/hadoop/hbase/filter/ValueFilter.java | 125 + .../hadoop/hbase/filter/WhileMatchFilter.java | 145 ++ .../apache/hadoop/hbase/filter/package-info.java | 34 + .../hadoop/hbase/io/HbaseObjectWritable.java | 803 +++++++ .../apache/hadoop/hbase/io/WritableWithSize.java | 38 + .../hadoop/hbase/ipc/BlockingRpcCallback.java | 73 + .../org/apache/hadoop/hbase/ipc/ClientCache.java | 102 + .../hadoop/hbase/ipc/CoprocessorRpcChannel.java | 68 + .../org/apache/hadoop/hbase/ipc/HBaseClient.java | 1510 +++++++++++++ .../apache/hadoop/hbase/ipc/HBaseClientRPC.java | 294 +++ .../org/apache/hadoop/hbase/ipc/Invocation.java | 219 ++ .../hbase/ipc/MasterCoprocessorRpcChannel.java | 88 + .../hadoop/hbase/ipc/ProtobufRpcClientEngine.java | 194 ++ .../apache/hadoop/hbase/ipc/ProtocolSignature.java | 243 ++ .../hbase/ipc/RegionCoprocessorRpcChannel.java | 103 + .../apache/hadoop/hbase/ipc/RpcClientEngine.java | 42 + .../hbase/ipc/ServerNotRunningYetException.java | 32 + .../hadoop/hbase/ipc/ServerRpcController.java | 133 ++ .../apache/hadoop/hbase/ipc/VersionedProtocol.java | 59 + .../apache/hadoop/hbase/master/RegionState.java | 275 +++ .../apache/hadoop/hbase/protobuf/ProtobufUtil.java | 1854 +++++++++++++++ .../hadoop/hbase/protobuf/RequestConverter.java | 1156 ++++++++++ .../hadoop/hbase/protobuf/ResponseConverter.java | 282 +++ .../hbase/regionserver/RegionOpeningState.java | 31 + .../hadoop/hbase/replication/ReplicationPeer.java | 206 ++ .../hbase/replication/ReplicationZookeeper.java | 1103 +++++++++ .../hadoop/hbase/security/HBaseSaslRpcClient.java | 277 +++ .../hadoop/hbase/security/access/Permission.java | 199 ++ .../hbase/security/access/TablePermission.java | 307 +++ .../hbase/security/access/UserPermission.java | 167 ++ .../token/AuthenticationTokenIdentifier.java | 184 ++ .../token/AuthenticationTokenSelector.java | 54 + .../hadoop/hbase/zookeeper/EmptyWatcher.java | 34 + .../apache/hadoop/hbase/zookeeper/HQuorumPeer.java | 160 ++ .../hbase/zookeeper/MasterAddressTracker.java | 181 ++ .../hadoop/hbase/zookeeper/MetaNodeTracker.java | 48 + .../hbase/zookeeper/RecoverableZooKeeper.java | 598 +++++ .../hadoop/hbase/zookeeper/RootRegionTracker.java | 184 ++ .../apache/hadoop/hbase/zookeeper/ZKClusterId.java | 80 + .../apache/hadoop/hbase/zookeeper/ZKConfig.java | 273 +++ .../org/apache/hadoop/hbase/zookeeper/ZKTable.java | 367 +++ .../hadoop/hbase/zookeeper/ZKTableReadOnly.java | 160 ++ .../org/apache/hadoop/hbase/zookeeper/ZKUtil.java | 1417 ++++++++++++ .../hadoop/hbase/zookeeper/ZooKeeperListener.java | 82 + .../hbase/zookeeper/ZooKeeperNodeTracker.java | 255 +++ .../hadoop/hbase/zookeeper/ZooKeeperWatcher.java | 474 ++++ .../apache/hadoop/hbase/client/TestAttributes.java | 165 ++ .../org/apache/hadoop/hbase/client/TestGet.java | 107 + .../apache/hadoop/hbase/client/TestOperation.java | 369 +++ .../apache/hadoop/hbase/client/TestPutDotHas.java | 73 + .../org/apache/hadoop/hbase/client/TestScan.java | 108 + .../java/org/apache/hadoop/hbase/Abortable.java | 45 + .../main/java/org/apache/hadoop/hbase/Chore.java | 122 + .../hadoop/hbase/ClockOutOfSyncException.java | 37 + .../hadoop/hbase/DeserializationException.java | 43 + .../apache/hadoop/hbase/DoNotRetryIOException.java | 55 + .../hadoop/hbase/DroppedSnapshotException.java | 45 + .../hadoop/hbase/FailedSanityCheckException.java | 49 + .../org/apache/hadoop/hbase/HBaseException.java | 44 + .../org/apache/hadoop/hbase/HBaseIOException.java | 48 + .../java/org/apache/hadoop/hbase/HConstants.java | 5 + .../hbase/InvalidFamilyOperationException.java | 54 + .../hadoop/hbase/MasterNotRunningException.java | 57 + .../hbase/NotAllMetaRegionsOnlineException.java | 46 + .../hadoop/hbase/NotServingRegionException.java | 56 + .../hbase/OutOfOrderScannerNextException.java | 38 + .../apache/hadoop/hbase/PleaseHoldException.java | 38 + .../org/apache/hadoop/hbase/RegionException.java | 47 + .../apache/hadoop/hbase/RegionMovedException.java | 126 ++ .../hadoop/hbase/RegionTooBusyException.java | 47 + .../hadoop/hbase/RemoteExceptionHandler.java | 121 + .../java/org/apache/hadoop/hbase/Stoppable.java | 38 + .../apache/hadoop/hbase/TableExistsException.java | 42 + .../hadoop/hbase/TableInfoMissingException.java | 47 + .../hadoop/hbase/TableNotDisabledException.java | 53 + .../hadoop/hbase/TableNotEnabledException.java | 53 + .../hadoop/hbase/TableNotFoundException.java | 39 + .../hadoop/hbase/UnknownRegionException.java | 35 + .../hadoop/hbase/UnknownRowLockException.java | 45 + .../hadoop/hbase/UnknownScannerException.java | 48 + .../apache/hadoop/hbase/YouAreDeadException.java | 38 + .../hadoop/hbase/ZooKeeperConnectionException.java | 53 + .../hadoop/hbase/io/DataInputInputStream.java | 60 + .../hadoop/hbase/io/DataOutputOutputStream.java | 67 + .../java/org/apache/hadoop/hbase/io/TimeRange.java | 200 ++ .../hadoop/hbase/regionserver/BloomType.java | 35 + .../regionserver/NoSuchColumnFamilyException.java | 42 + .../regionserver/RegionServerRunningException.java | 46 + .../regionserver/RegionServerStoppedException.java | 35 + .../hbase/regionserver/WrongRegionException.java | 46 + .../regionserver/wal/FailedLogCloseException.java | 46 + .../wal/OrphanHLogAfterSplitException.java | 42 + .../hbase/security/AccessDeniedException.java | 39 + .../apache/hadoop/hbase/security/AuthMethod.java | 68 + .../apache/hadoop/hbase/security/KerberosInfo.java | 39 + .../hadoop/hbase/security/QualityOfProtection.java | 37 + .../apache/hadoop/hbase/security/SaslStatus.java | 31 + .../apache/hadoop/hbase/security/SaslUtils.java | 87 + .../apache/hadoop/hbase/security/TokenInfo.java | 38 + .../org/apache/hadoop/hbase/security/User.java | 407 ++++ .../org/apache/hadoop/hbase/util/Addressing.java | 80 + .../java/org/apache/hadoop/hbase/util/Classes.java | 83 + .../org/apache/hadoop/hbase/util/HasThread.java | 100 + .../java/org/apache/hadoop/hbase/util/Hash.java | 137 ++ .../org/apache/hadoop/hbase/util/HashedBytes.java | 65 + .../org/apache/hadoop/hbase/util/JenkinsHash.java | 261 +++ .../java/org/apache/hadoop/hbase/util/Methods.java | 69 + .../org/apache/hadoop/hbase/util/MurmurHash.java | 92 + .../java/org/apache/hadoop/hbase/util/Pair.java | 135 ++ .../apache/hadoop/hbase/util/PairOfSameType.java | 115 + .../java/org/apache/hadoop/hbase/util/PoolMap.java | 451 ++++ .../org/apache/hadoop/hbase/util/ProtoUtil.java | 69 + .../org/apache/hadoop/hbase/util/RetryCounter.java | 69 + .../hadoop/hbase/util/RetryCounterFactory.java | 40 + .../java/org/apache/hadoop/hbase/util/Sleeper.java | 117 + .../hadoop/hbase/util/SoftValueSortedMap.java | 289 +++ .../java/org/apache/hadoop/hbase/util/Triple.java | 90 + .../org/apache/hadoop/hbase/util/Writables.java | 167 ++ .../java/org/apache/hadoop/hbase/Abortable.java | 45 - .../main/java/org/apache/hadoop/hbase/Chore.java | 122 - .../hadoop/hbase/ClockOutOfSyncException.java | 37 - .../java/org/apache/hadoop/hbase/ClusterId.java | 103 - .../org/apache/hadoop/hbase/ClusterStatus.java | 350 --- .../java/org/apache/hadoop/hbase/Coprocessor.java | 56 - .../hadoop/hbase/CoprocessorEnvironment.java | 55 - .../hadoop/hbase/DeserializationException.java | 43 - .../apache/hadoop/hbase/DoNotRetryIOException.java | 55 - .../hadoop/hbase/DroppedSnapshotException.java | 45 - .../hadoop/hbase/FailedSanityCheckException.java | 49 - .../org/apache/hadoop/hbase/HBaseException.java | 44 - .../org/apache/hadoop/hbase/HBaseIOException.java | 48 - .../org/apache/hadoop/hbase/HColumnDescriptor.java | 1143 ---------- .../java/org/apache/hadoop/hbase/HRegionInfo.java | 1094 --------- .../org/apache/hadoop/hbase/HRegionLocation.java | 128 -- .../org/apache/hadoop/hbase/HTableDescriptor.java | 1301 ----------- .../hbase/InvalidFamilyOperationException.java | 54 - .../apache/hadoop/hbase/MasterAdminProtocol.java | 349 --- .../apache/hadoop/hbase/MasterMonitorProtocol.java | 99 - .../hadoop/hbase/MasterNotRunningException.java | 57 - .../org/apache/hadoop/hbase/MasterProtocol.java | 44 - .../hbase/NotAllMetaRegionsOnlineException.java | 46 - .../hadoop/hbase/NotServingRegionException.java | 56 - .../hbase/OutOfOrderScannerNextException.java | 38 - .../apache/hadoop/hbase/PleaseHoldException.java | 38 - .../org/apache/hadoop/hbase/RegionException.java | 47 - .../java/org/apache/hadoop/hbase/RegionLoad.java | 154 -- .../apache/hadoop/hbase/RegionMovedException.java | 126 -- .../hadoop/hbase/RegionServerStatusProtocol.java | 39 - .../hadoop/hbase/RegionTooBusyException.java | 47 - .../hadoop/hbase/RemoteExceptionHandler.java | 121 - .../main/java/org/apache/hadoop/hbase/Server.java | 51 - .../java/org/apache/hadoop/hbase/ServerLoad.java | 305 --- .../java/org/apache/hadoop/hbase/ServerName.java | 355 --- .../java/org/apache/hadoop/hbase/Stoppable.java | 38 - .../apache/hadoop/hbase/TableExistsException.java | 42 - .../hadoop/hbase/TableInfoMissingException.java | 47 - .../hadoop/hbase/TableNotDisabledException.java | 53 - .../hadoop/hbase/TableNotEnabledException.java | 53 - .../hadoop/hbase/TableNotFoundException.java | 39 - .../hadoop/hbase/UnknownRegionException.java | 35 - .../hadoop/hbase/UnknownRowLockException.java | 45 - .../hadoop/hbase/UnknownScannerException.java | 48 - .../apache/hadoop/hbase/YouAreDeadException.java | 38 - .../hadoop/hbase/ZooKeeperConnectionException.java | 53 - .../hadoop/hbase/catalog/CatalogTracker.java | 703 ------ .../apache/hadoop/hbase/catalog/MetaReader.java | 643 ------ .../hadoop/hbase/client/AbstractClientScanner.java | 77 - .../org/apache/hadoop/hbase/client/Action.java | 80 - .../apache/hadoop/hbase/client/AdminProtocol.java | 37 - .../org/apache/hadoop/hbase/client/Append.java | 90 - .../org/apache/hadoop/hbase/client/Attributes.java | 51 - .../apache/hadoop/hbase/client/ClientProtocol.java | 39 - .../apache/hadoop/hbase/client/ClientScanner.java | 405 ---- .../hadoop/hbase/client/ConnectionUtils.java | 52 - .../org/apache/hadoop/hbase/client/Delete.java | 256 --- .../java/org/apache/hadoop/hbase/client/Get.java | 425 ---- .../org/apache/hadoop/hbase/client/HBaseAdmin.java | 2171 ------------------ .../apache/hadoop/hbase/client/HConnection.java | 364 --- .../hadoop/hbase/client/HConnectionManager.java | 2378 -------------------- .../org/apache/hadoop/hbase/client/HTable.java | 1300 ----------- .../apache/hadoop/hbase/client/HTableFactory.java | 49 - .../hadoop/hbase/client/HTableInterface.java | 557 ----- .../hbase/client/HTableInterfaceFactory.java | 52 - .../org/apache/hadoop/hbase/client/HTablePool.java | 552 ----- .../org/apache/hadoop/hbase/client/HTableUtil.java | 141 -- .../org/apache/hadoop/hbase/client/Increment.java | 277 --- .../apache/hadoop/hbase/client/IsolationLevel.java | 59 - .../client/MasterAdminKeepAliveConnection.java | 44 - .../client/MasterMonitorKeepAliveConnection.java | 44 - .../apache/hadoop/hbase/client/MetaScanner.java | 489 ---- .../apache/hadoop/hbase/client/MultiAction.java | 90 - .../apache/hadoop/hbase/client/MultiResponse.java | 85 - .../org/apache/hadoop/hbase/client/Mutation.java | 232 -- .../hbase/client/NoServerForRegionException.java | 45 - .../org/apache/hadoop/hbase/client/Operation.java | 113 - .../hbase/client/OperationWithAttributes.java | 108 - .../java/org/apache/hadoop/hbase/client/Put.java | 357 --- .../hbase/client/RegionOfflineException.java | 39 - .../org/apache/hadoop/hbase/client/Result.java | 709 ------ .../apache/hadoop/hbase/client/ResultScanner.java | 54 - .../hbase/client/RetriesExhaustedException.java | 109 - .../RetriesExhaustedWithDetailsException.java | 152 -- .../java/org/apache/hadoop/hbase/client/Row.java | 34 - .../org/apache/hadoop/hbase/client/RowLock.java | 66 - .../apache/hadoop/hbase/client/RowMutations.java | 103 - .../java/org/apache/hadoop/hbase/client/Scan.java | 652 ------ .../hadoop/hbase/client/ScannerCallable.java | 322 --- .../hbase/client/ScannerTimeoutException.java | 44 - .../apache/hadoop/hbase/client/ServerCallable.java | 243 -- .../client/UnmodifyableHColumnDescriptor.java | 96 - .../hbase/client/UnmodifyableHRegionInfo.java | 53 - .../hbase/client/UnmodifyableHTableDescriptor.java | 127 -- .../hbase/client/ZooKeeperKeepAliveConnection.java | 54 - .../client/coprocessor/AggregationClient.java | 701 ------ .../hadoop/hbase/client/coprocessor/Batch.java | 74 - .../client/coprocessor/LongColumnInterpreter.java | 141 -- .../hbase/client/coprocessor/package-info.java | 226 -- .../hadoop/hbase/client/metrics/ScanMetrics.java | 142 -- .../apache/hadoop/hbase/client/package-info.java | 185 -- .../hbase/client/replication/ReplicationAdmin.java | 209 -- .../hbase/coprocessor/ColumnInterpreter.java | 164 -- .../hadoop/hbase/filter/BinaryComparator.java | 89 - .../hbase/filter/BinaryPrefixComparator.java | 91 - .../apache/hadoop/hbase/filter/BitComparator.java | 133 -- .../hadoop/hbase/filter/ByteArrayComparable.java | 100 - .../hadoop/hbase/filter/ColumnCountGetFilter.java | 121 - .../hbase/filter/ColumnPaginationFilter.java | 142 -- .../hadoop/hbase/filter/ColumnPrefixFilter.java | 141 -- .../hadoop/hbase/filter/ColumnRangeFilter.java | 229 -- .../apache/hadoop/hbase/filter/CompareFilter.java | 180 -- .../hadoop/hbase/filter/DependentColumnFilter.java | 289 --- .../apache/hadoop/hbase/filter/FamilyFilter.java | 130 -- .../org/apache/hadoop/hbase/filter/Filter.java | 196 -- .../org/apache/hadoop/hbase/filter/FilterBase.java | 170 -- .../org/apache/hadoop/hbase/filter/FilterList.java | 378 ---- .../apache/hadoop/hbase/filter/FilterWrapper.java | 151 -- .../hadoop/hbase/filter/FirstKeyOnlyFilter.java | 114 - .../FirstKeyValueMatchingQualifiersFilter.java | 124 - .../apache/hadoop/hbase/filter/FuzzyRowFilter.java | 333 --- .../hadoop/hbase/filter/InclusiveStopFilter.java | 128 -- .../hbase/filter/IncompatibleFilterException.java | 44 - .../hbase/filter/InvalidRowFilterException.java | 45 - .../apache/hadoop/hbase/filter/KeyOnlyFilter.java | 102 - .../hbase/filter/MultipleColumnPrefixFilter.java | 199 -- .../apache/hadoop/hbase/filter/NullComparator.java | 88 - .../org/apache/hadoop/hbase/filter/PageFilter.java | 126 -- .../apache/hadoop/hbase/filter/ParseConstants.java | 263 --- .../apache/hadoop/hbase/filter/ParseFilter.java | 859 ------- .../apache/hadoop/hbase/filter/PrefixFilter.java | 123 - .../hadoop/hbase/filter/QualifierFilter.java | 129 -- .../hadoop/hbase/filter/RandomRowFilter.java | 150 -- .../hadoop/hbase/filter/RegexStringComparator.java | 174 -- .../org/apache/hadoop/hbase/filter/RowFilter.java | 144 -- .../filter/SingleColumnValueExcludeFilter.java | 178 -- .../hbase/filter/SingleColumnValueFilter.java | 389 ---- .../org/apache/hadoop/hbase/filter/SkipFilter.java | 145 -- .../hadoop/hbase/filter/SubstringComparator.java | 112 - .../hadoop/hbase/filter/TimestampsFilter.java | 175 -- .../apache/hadoop/hbase/filter/ValueFilter.java | 125 - .../hadoop/hbase/filter/WhileMatchFilter.java | 145 -- .../apache/hadoop/hbase/filter/package-info.java | 34 - .../hadoop/hbase/io/DataInputInputStream.java | 60 - .../hadoop/hbase/io/DataOutputOutputStream.java | 67 - .../hadoop/hbase/io/HbaseObjectWritable.java | 806 ------- .../java/org/apache/hadoop/hbase/io/TimeRange.java | 200 -- .../apache/hadoop/hbase/io/WritableWithSize.java | 38 - .../apache/hadoop/hbase/io/hfile/CacheConfig.java | 2 +- .../org/apache/hadoop/hbase/io/hfile/HFile.java | 5 - .../hadoop/hbase/ipc/BlockingRpcCallback.java | 73 - .../org/apache/hadoop/hbase/ipc/ClientCache.java | 104 - .../hadoop/hbase/ipc/CoprocessorRpcChannel.java | 68 - .../org/apache/hadoop/hbase/ipc/HBaseClient.java | 1504 ------------- .../apache/hadoop/hbase/ipc/HBaseClientRPC.java | 294 --- .../org/apache/hadoop/hbase/ipc/HBaseServer.java | 31 +- .../org/apache/hadoop/hbase/ipc/Invocation.java | 219 -- .../hbase/ipc/MasterCoprocessorRpcChannel.java | 88 - .../hadoop/hbase/ipc/ProtobufRpcClientEngine.java | 194 -- .../hadoop/hbase/ipc/ProtobufRpcServerEngine.java | 3 +- .../apache/hadoop/hbase/ipc/ProtocolSignature.java | 243 -- .../hbase/ipc/RegionCoprocessorRpcChannel.java | 103 - .../apache/hadoop/hbase/ipc/RpcClientEngine.java | 42 - .../hbase/ipc/ServerNotRunningYetException.java | 32 - .../hadoop/hbase/ipc/ServerRpcController.java | 133 -- .../apache/hadoop/hbase/ipc/VersionedProtocol.java | 59 - .../hadoop/hbase/mapreduce/HFileOutputFormat.java | 2 +- .../org/apache/hadoop/hbase/master/HMaster.java | 1 - .../apache/hadoop/hbase/master/RegionState.java | 275 --- .../hbase/protobuf/ProtobufReplicationUtil.java | 154 ++ .../apache/hadoop/hbase/protobuf/ProtobufUtil.java | 1920 ---------------- .../hadoop/hbase/protobuf/RequestConverter.java | 1216 ---------- .../hadoop/hbase/protobuf/ResponseConverter.java | 282 --- .../hadoop/hbase/regionserver/BloomType.java | 35 - .../hadoop/hbase/regionserver/HRegionServer.java | 5 +- .../regionserver/NoSuchColumnFamilyException.java | 42 - .../hbase/regionserver/RegionOpeningState.java | 31 - .../regionserver/RegionServerRunningException.java | 46 - .../regionserver/RegionServerStoppedException.java | 35 - .../hbase/regionserver/WrongRegionException.java | 46 - .../regionserver/wal/FailedLogCloseException.java | 46 - .../wal/OrphanHLogAfterSplitException.java | 42 - .../hadoop/hbase/replication/ReplicationPeer.java | 206 -- .../hbase/replication/ReplicationZookeeper.java | 1104 --------- .../regionserver/ReplicationSource.java | 6 +- .../hbase/security/AccessDeniedException.java | 39 - .../hadoop/hbase/security/HBaseSaslRpcClient.java | 279 --- .../hadoop/hbase/security/HBaseSaslRpcServer.java | 139 +- .../apache/hadoop/hbase/security/KerberosInfo.java | 39 - .../apache/hadoop/hbase/security/TokenInfo.java | 38 - .../org/apache/hadoop/hbase/security/User.java | 407 ---- .../hadoop/hbase/security/access/Permission.java | 199 -- .../hbase/security/access/TablePermission.java | 307 --- .../hbase/security/access/UserPermission.java | 167 -- .../token/AuthenticationTokenIdentifier.java | 185 -- .../token/AuthenticationTokenSelector.java | 54 - .../org/apache/hadoop/hbase/util/Addressing.java | 80 - .../java/org/apache/hadoop/hbase/util/Classes.java | 83 - .../org/apache/hadoop/hbase/util/HasThread.java | 100 - .../java/org/apache/hadoop/hbase/util/Hash.java | 137 -- .../org/apache/hadoop/hbase/util/HashedBytes.java | 65 - .../org/apache/hadoop/hbase/util/JenkinsHash.java | 261 --- .../java/org/apache/hadoop/hbase/util/Methods.java | 69 - .../org/apache/hadoop/hbase/util/MurmurHash.java | 92 - .../java/org/apache/hadoop/hbase/util/Pair.java | 135 -- .../apache/hadoop/hbase/util/PairOfSameType.java | 115 - .../java/org/apache/hadoop/hbase/util/PoolMap.java | 451 ---- .../org/apache/hadoop/hbase/util/ProtoUtil.java | 69 - .../org/apache/hadoop/hbase/util/RetryCounter.java | 69 - .../hadoop/hbase/util/RetryCounterFactory.java | 40 - .../java/org/apache/hadoop/hbase/util/Sleeper.java | 117 - .../hadoop/hbase/util/SoftValueSortedMap.java | 289 --- .../java/org/apache/hadoop/hbase/util/Triple.java | 90 - .../org/apache/hadoop/hbase/util/Writables.java | 167 -- .../hadoop/hbase/zookeeper/EmptyWatcher.java | 34 - .../apache/hadoop/hbase/zookeeper/HQuorumPeer.java | 160 -- .../hbase/zookeeper/MasterAddressTracker.java | 181 -- .../hadoop/hbase/zookeeper/MetaNodeTracker.java | 48 - .../hbase/zookeeper/RecoverableZooKeeper.java | 598 ----- .../hadoop/hbase/zookeeper/RootRegionTracker.java | 184 -- .../apache/hadoop/hbase/zookeeper/ZKClusterId.java | 80 - .../apache/hadoop/hbase/zookeeper/ZKConfig.java | 273 --- .../org/apache/hadoop/hbase/zookeeper/ZKTable.java | 368 --- .../hadoop/hbase/zookeeper/ZKTableReadOnly.java | 161 -- .../org/apache/hadoop/hbase/zookeeper/ZKUtil.java | 1425 ------------ .../hadoop/hbase/zookeeper/ZooKeeperListener.java | 82 - .../hbase/zookeeper/ZooKeeperNodeTracker.java | 255 --- .../hadoop/hbase/zookeeper/ZooKeeperWatcher.java | 474 ---- .../apache/hadoop/hbase/client/TestAttributes.java | 168 -- .../org/apache/hadoop/hbase/client/TestGet.java | 110 - .../apache/hadoop/hbase/client/TestOperation.java | 372 --- .../apache/hadoop/hbase/client/TestPutDotHas.java | 76 - .../org/apache/hadoop/hbase/client/TestScan.java | 111 - .../hbase/regionserver/CreateRandomStoreFile.java | 4 +- .../hadoop/hbase/regionserver/TestStoreFile.java | 4 +- 463 files changed, 50498 insertions(+), 50416 deletions(-) create mode 100644 hbase-client/src/main/java/org/apache/hadoop/hbase/ClusterId.java create mode 100644 hbase-client/src/main/java/org/apache/hadoop/hbase/ClusterStatus.java create mode 100644 hbase-client/src/main/java/org/apache/hadoop/hbase/Coprocessor.java create mode 100644 hbase-client/src/main/java/org/apache/hadoop/hbase/CoprocessorEnvironment.java create mode 100644 hbase-client/src/main/java/org/apache/hadoop/hbase/HColumnDescriptor.java create mode 100644 hbase-client/src/main/java/org/apache/hadoop/hbase/HRegionInfo.java create mode 100644 hbase-client/src/main/java/org/apache/hadoop/hbase/HRegionLocation.java create mode 100644 hbase-client/src/main/java/org/apache/hadoop/hbase/HTableDescriptor.java create mode 100644 hbase-client/src/main/java/org/apache/hadoop/hbase/MasterAdminProtocol.java create mode 100644 hbase-client/src/main/java/org/apache/hadoop/hbase/MasterMonitorProtocol.java create mode 100644 hbase-client/src/main/java/org/apache/hadoop/hbase/MasterProtocol.java create mode 100644 hbase-client/src/main/java/org/apache/hadoop/hbase/RegionLoad.java create mode 100644 hbase-client/src/main/java/org/apache/hadoop/hbase/RegionServerStatusProtocol.java create mode 100644 hbase-client/src/main/java/org/apache/hadoop/hbase/Server.java create mode 100644 hbase-client/src/main/java/org/apache/hadoop/hbase/ServerLoad.java create mode 100644 hbase-client/src/main/java/org/apache/hadoop/hbase/ServerName.java create mode 100644 hbase-client/src/main/java/org/apache/hadoop/hbase/catalog/CatalogTracker.java create mode 100644 hbase-client/src/main/java/org/apache/hadoop/hbase/catalog/MetaReader.java create mode 100644 hbase-client/src/main/java/org/apache/hadoop/hbase/client/AbstractClientScanner.java create mode 100644 hbase-client/src/main/java/org/apache/hadoop/hbase/client/Action.java create mode 100644 hbase-client/src/main/java/org/apache/hadoop/hbase/client/AdminProtocol.java create mode 100644 hbase-client/src/main/java/org/apache/hadoop/hbase/client/Append.java create mode 100644 hbase-client/src/main/java/org/apache/hadoop/hbase/client/Attributes.java create mode 100644 hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClientProtocol.java create mode 100644 hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClientScanner.java create mode 100644 hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionUtils.java create mode 100644 hbase-client/src/main/java/org/apache/hadoop/hbase/client/Delete.java create mode 100644 hbase-client/src/main/java/org/apache/hadoop/hbase/client/Get.java create mode 100644 hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java create mode 100644 hbase-client/src/main/java/org/apache/hadoop/hbase/client/HConnection.java create mode 100644 hbase-client/src/main/java/org/apache/hadoop/hbase/client/HConnectionManager.java create mode 100644 hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTable.java create mode 100644 hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTableFactory.java create mode 100644 hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTableInterface.java create mode 100644 hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTableInterfaceFactory.java create mode 100644 hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTablePool.java create mode 100644 hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTableUtil.java create mode 100644 hbase-client/src/main/java/org/apache/hadoop/hbase/client/Increment.java create mode 100644 hbase-client/src/main/java/org/apache/hadoop/hbase/client/IsolationLevel.java create mode 100644 hbase-client/src/main/java/org/apache/hadoop/hbase/client/MasterAdminKeepAliveConnection.java create mode 100644 hbase-client/src/main/java/org/apache/hadoop/hbase/client/MasterMonitorKeepAliveConnection.java create mode 100644 hbase-client/src/main/java/org/apache/hadoop/hbase/client/MetaScanner.java create mode 100644 hbase-client/src/main/java/org/apache/hadoop/hbase/client/MultiAction.java create mode 100644 hbase-client/src/main/java/org/apache/hadoop/hbase/client/MultiResponse.java create mode 100644 hbase-client/src/main/java/org/apache/hadoop/hbase/client/Mutation.java create mode 100644 hbase-client/src/main/java/org/apache/hadoop/hbase/client/NoServerForRegionException.java create mode 100644 hbase-client/src/main/java/org/apache/hadoop/hbase/client/Operation.java create mode 100644 hbase-client/src/main/java/org/apache/hadoop/hbase/client/OperationWithAttributes.java create mode 100644 hbase-client/src/main/java/org/apache/hadoop/hbase/client/Put.java create mode 100644 hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionOfflineException.java create mode 100644 hbase-client/src/main/java/org/apache/hadoop/hbase/client/Result.java create mode 100644 hbase-client/src/main/java/org/apache/hadoop/hbase/client/ResultScanner.java create mode 100644 hbase-client/src/main/java/org/apache/hadoop/hbase/client/RetriesExhaustedException.java create mode 100644 hbase-client/src/main/java/org/apache/hadoop/hbase/client/RetriesExhaustedWithDetailsException.java create mode 100644 hbase-client/src/main/java/org/apache/hadoop/hbase/client/Row.java create mode 100644 hbase-client/src/main/java/org/apache/hadoop/hbase/client/RowLock.java create mode 100644 hbase-client/src/main/java/org/apache/hadoop/hbase/client/RowMutations.java create mode 100644 hbase-client/src/main/java/org/apache/hadoop/hbase/client/Scan.java create mode 100644 hbase-client/src/main/java/org/apache/hadoop/hbase/client/ScannerCallable.java create mode 100644 hbase-client/src/main/java/org/apache/hadoop/hbase/client/ScannerTimeoutException.java create mode 100644 hbase-client/src/main/java/org/apache/hadoop/hbase/client/ServerCallable.java create mode 100644 hbase-client/src/main/java/org/apache/hadoop/hbase/client/UnmodifyableHColumnDescriptor.java create mode 100644 hbase-client/src/main/java/org/apache/hadoop/hbase/client/UnmodifyableHRegionInfo.java create mode 100644 hbase-client/src/main/java/org/apache/hadoop/hbase/client/UnmodifyableHTableDescriptor.java create mode 100644 hbase-client/src/main/java/org/apache/hadoop/hbase/client/ZooKeeperKeepAliveConnection.java create mode 100644 hbase-client/src/main/java/org/apache/hadoop/hbase/client/coprocessor/AggregationClient.java create mode 100644 hbase-client/src/main/java/org/apache/hadoop/hbase/client/coprocessor/Batch.java create mode 100644 hbase-client/src/main/java/org/apache/hadoop/hbase/client/coprocessor/LongColumnInterpreter.java create mode 100644 hbase-client/src/main/java/org/apache/hadoop/hbase/client/coprocessor/package-info.java create mode 100644 hbase-client/src/main/java/org/apache/hadoop/hbase/client/metrics/ScanMetrics.java create mode 100644 hbase-client/src/main/java/org/apache/hadoop/hbase/client/package-info.java create mode 100644 hbase-client/src/main/java/org/apache/hadoop/hbase/client/replication/ReplicationAdmin.java create mode 100644 hbase-client/src/main/java/org/apache/hadoop/hbase/coprocessor/ColumnInterpreter.java create mode 100644 hbase-client/src/main/java/org/apache/hadoop/hbase/filter/BinaryComparator.java create mode 100644 hbase-client/src/main/java/org/apache/hadoop/hbase/filter/BinaryPrefixComparator.java create mode 100644 hbase-client/src/main/java/org/apache/hadoop/hbase/filter/BitComparator.java create mode 100644 hbase-client/src/main/java/org/apache/hadoop/hbase/filter/ByteArrayComparable.java create mode 100644 hbase-client/src/main/java/org/apache/hadoop/hbase/filter/ColumnCountGetFilter.java create mode 100644 hbase-client/src/main/java/org/apache/hadoop/hbase/filter/ColumnPaginationFilter.java create mode 100644 hbase-client/src/main/java/org/apache/hadoop/hbase/filter/ColumnPrefixFilter.java create mode 100644 hbase-client/src/main/java/org/apache/hadoop/hbase/filter/ColumnRangeFilter.java create mode 100644 hbase-client/src/main/java/org/apache/hadoop/hbase/filter/CompareFilter.java create mode 100644 hbase-client/src/main/java/org/apache/hadoop/hbase/filter/DependentColumnFilter.java create mode 100644 hbase-client/src/main/java/org/apache/hadoop/hbase/filter/FamilyFilter.java create mode 100644 hbase-client/src/main/java/org/apache/hadoop/hbase/filter/Filter.java create mode 100644 hbase-client/src/main/java/org/apache/hadoop/hbase/filter/FilterBase.java create mode 100644 hbase-client/src/main/java/org/apache/hadoop/hbase/filter/FilterList.java create mode 100644 hbase-client/src/main/java/org/apache/hadoop/hbase/filter/FilterWrapper.java create mode 100644 hbase-client/src/main/java/org/apache/hadoop/hbase/filter/FirstKeyOnlyFilter.java create mode 100644 hbase-client/src/main/java/org/apache/hadoop/hbase/filter/FirstKeyValueMatchingQualifiersFilter.java create mode 100644 hbase-client/src/main/java/org/apache/hadoop/hbase/filter/FuzzyRowFilter.java create mode 100644 hbase-client/src/main/java/org/apache/hadoop/hbase/filter/InclusiveStopFilter.java create mode 100644 hbase-client/src/main/java/org/apache/hadoop/hbase/filter/IncompatibleFilterException.java create mode 100644 hbase-client/src/main/java/org/apache/hadoop/hbase/filter/InvalidRowFilterException.java create mode 100644 hbase-client/src/main/java/org/apache/hadoop/hbase/filter/KeyOnlyFilter.java create mode 100644 hbase-client/src/main/java/org/apache/hadoop/hbase/filter/MultipleColumnPrefixFilter.java create mode 100644 hbase-client/src/main/java/org/apache/hadoop/hbase/filter/NullComparator.java create mode 100644 hbase-client/src/main/java/org/apache/hadoop/hbase/filter/PageFilter.java create mode 100644 hbase-client/src/main/java/org/apache/hadoop/hbase/filter/ParseConstants.java create mode 100644 hbase-client/src/main/java/org/apache/hadoop/hbase/filter/ParseFilter.java create mode 100644 hbase-client/src/main/java/org/apache/hadoop/hbase/filter/PrefixFilter.java create mode 100644 hbase-client/src/main/java/org/apache/hadoop/hbase/filter/QualifierFilter.java create mode 100644 hbase-client/src/main/java/org/apache/hadoop/hbase/filter/RandomRowFilter.java create mode 100644 hbase-client/src/main/java/org/apache/hadoop/hbase/filter/RegexStringComparator.java create mode 100644 hbase-client/src/main/java/org/apache/hadoop/hbase/filter/RowFilter.java create mode 100644 hbase-client/src/main/java/org/apache/hadoop/hbase/filter/SingleColumnValueExcludeFilter.java create mode 100644 hbase-client/src/main/java/org/apache/hadoop/hbase/filter/SingleColumnValueFilter.java create mode 100644 hbase-client/src/main/java/org/apache/hadoop/hbase/filter/SkipFilter.java create mode 100644 hbase-client/src/main/java/org/apache/hadoop/hbase/filter/SubstringComparator.java create mode 100644 hbase-client/src/main/java/org/apache/hadoop/hbase/filter/TimestampsFilter.java create mode 100644 hbase-client/src/main/java/org/apache/hadoop/hbase/filter/ValueFilter.java create mode 100644 hbase-client/src/main/java/org/apache/hadoop/hbase/filter/WhileMatchFilter.java create mode 100644 hbase-client/src/main/java/org/apache/hadoop/hbase/filter/package-info.java create mode 100644 hbase-client/src/main/java/org/apache/hadoop/hbase/io/HbaseObjectWritable.java create mode 100644 hbase-client/src/main/java/org/apache/hadoop/hbase/io/WritableWithSize.java create mode 100644 hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/BlockingRpcCallback.java create mode 100644 hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/ClientCache.java create mode 100644 hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/CoprocessorRpcChannel.java create mode 100644 hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/HBaseClient.java create mode 100644 hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/HBaseClientRPC.java create mode 100644 hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/Invocation.java create mode 100644 hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/MasterCoprocessorRpcChannel.java create mode 100644 hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/ProtobufRpcClientEngine.java create mode 100644 hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/ProtocolSignature.java create mode 100644 hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/RegionCoprocessorRpcChannel.java create mode 100644 hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/RpcClientEngine.java create mode 100644 hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/ServerNotRunningYetException.java create mode 100644 hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/ServerRpcController.java create mode 100644 hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/VersionedProtocol.java create mode 100644 hbase-client/src/main/java/org/apache/hadoop/hbase/master/RegionState.java create mode 100644 hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java create mode 100644 hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/RequestConverter.java create mode 100644 hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ResponseConverter.java create mode 100644 hbase-client/src/main/java/org/apache/hadoop/hbase/regionserver/RegionOpeningState.java create mode 100644 hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeer.java create mode 100644 hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationZookeeper.java create mode 100644 hbase-client/src/main/java/org/apache/hadoop/hbase/security/HBaseSaslRpcClient.java create mode 100644 hbase-client/src/main/java/org/apache/hadoop/hbase/security/access/Permission.java create mode 100644 hbase-client/src/main/java/org/apache/hadoop/hbase/security/access/TablePermission.java create mode 100644 hbase-client/src/main/java/org/apache/hadoop/hbase/security/access/UserPermission.java create mode 100644 hbase-client/src/main/java/org/apache/hadoop/hbase/security/token/AuthenticationTokenIdentifier.java create mode 100644 hbase-client/src/main/java/org/apache/hadoop/hbase/security/token/AuthenticationTokenSelector.java create mode 100644 hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/EmptyWatcher.java create mode 100644 hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/HQuorumPeer.java create mode 100644 hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/MasterAddressTracker.java create mode 100644 hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/MetaNodeTracker.java create mode 100644 hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/RecoverableZooKeeper.java create mode 100644 hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/RootRegionTracker.java create mode 100644 hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKClusterId.java create mode 100644 hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKConfig.java create mode 100644 hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKTable.java create mode 100644 hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKTableReadOnly.java create mode 100644 hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKUtil.java create mode 100644 hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZooKeeperListener.java create mode 100644 hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZooKeeperNodeTracker.java create mode 100644 hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZooKeeperWatcher.java create mode 100644 hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAttributes.java create mode 100644 hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestGet.java create mode 100644 hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestOperation.java create mode 100644 hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestPutDotHas.java create mode 100644 hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestScan.java create mode 100644 hbase-common/src/main/java/org/apache/hadoop/hbase/Abortable.java create mode 100644 hbase-common/src/main/java/org/apache/hadoop/hbase/Chore.java create mode 100644 hbase-common/src/main/java/org/apache/hadoop/hbase/ClockOutOfSyncException.java create mode 100644 hbase-common/src/main/java/org/apache/hadoop/hbase/DeserializationException.java create mode 100644 hbase-common/src/main/java/org/apache/hadoop/hbase/DoNotRetryIOException.java create mode 100644 hbase-common/src/main/java/org/apache/hadoop/hbase/DroppedSnapshotException.java create mode 100644 hbase-common/src/main/java/org/apache/hadoop/hbase/FailedSanityCheckException.java create mode 100644 hbase-common/src/main/java/org/apache/hadoop/hbase/HBaseException.java create mode 100644 hbase-common/src/main/java/org/apache/hadoop/hbase/HBaseIOException.java create mode 100644 hbase-common/src/main/java/org/apache/hadoop/hbase/InvalidFamilyOperationException.java create mode 100644 hbase-common/src/main/java/org/apache/hadoop/hbase/MasterNotRunningException.java create mode 100644 hbase-common/src/main/java/org/apache/hadoop/hbase/NotAllMetaRegionsOnlineException.java create mode 100644 hbase-common/src/main/java/org/apache/hadoop/hbase/NotServingRegionException.java create mode 100644 hbase-common/src/main/java/org/apache/hadoop/hbase/OutOfOrderScannerNextException.java create mode 100644 hbase-common/src/main/java/org/apache/hadoop/hbase/PleaseHoldException.java create mode 100644 hbase-common/src/main/java/org/apache/hadoop/hbase/RegionException.java create mode 100644 hbase-common/src/main/java/org/apache/hadoop/hbase/RegionMovedException.java create mode 100644 hbase-common/src/main/java/org/apache/hadoop/hbase/RegionTooBusyException.java create mode 100644 hbase-common/src/main/java/org/apache/hadoop/hbase/RemoteExceptionHandler.java create mode 100644 hbase-common/src/main/java/org/apache/hadoop/hbase/Stoppable.java create mode 100644 hbase-common/src/main/java/org/apache/hadoop/hbase/TableExistsException.java create mode 100644 hbase-common/src/main/java/org/apache/hadoop/hbase/TableInfoMissingException.java create mode 100644 hbase-common/src/main/java/org/apache/hadoop/hbase/TableNotDisabledException.java create mode 100644 hbase-common/src/main/java/org/apache/hadoop/hbase/TableNotEnabledException.java create mode 100644 hbase-common/src/main/java/org/apache/hadoop/hbase/TableNotFoundException.java create mode 100644 hbase-common/src/main/java/org/apache/hadoop/hbase/UnknownRegionException.java create mode 100644 hbase-common/src/main/java/org/apache/hadoop/hbase/UnknownRowLockException.java create mode 100644 hbase-common/src/main/java/org/apache/hadoop/hbase/UnknownScannerException.java create mode 100644 hbase-common/src/main/java/org/apache/hadoop/hbase/YouAreDeadException.java create mode 100644 hbase-common/src/main/java/org/apache/hadoop/hbase/ZooKeeperConnectionException.java create mode 100644 hbase-common/src/main/java/org/apache/hadoop/hbase/io/DataInputInputStream.java create mode 100644 hbase-common/src/main/java/org/apache/hadoop/hbase/io/DataOutputOutputStream.java create mode 100644 hbase-common/src/main/java/org/apache/hadoop/hbase/io/TimeRange.java create mode 100644 hbase-common/src/main/java/org/apache/hadoop/hbase/regionserver/BloomType.java create mode 100644 hbase-common/src/main/java/org/apache/hadoop/hbase/regionserver/NoSuchColumnFamilyException.java create mode 100644 hbase-common/src/main/java/org/apache/hadoop/hbase/regionserver/RegionServerRunningException.java create mode 100644 hbase-common/src/main/java/org/apache/hadoop/hbase/regionserver/RegionServerStoppedException.java create mode 100644 hbase-common/src/main/java/org/apache/hadoop/hbase/regionserver/WrongRegionException.java create mode 100644 hbase-common/src/main/java/org/apache/hadoop/hbase/regionserver/wal/FailedLogCloseException.java create mode 100644 hbase-common/src/main/java/org/apache/hadoop/hbase/regionserver/wal/OrphanHLogAfterSplitException.java create mode 100644 hbase-common/src/main/java/org/apache/hadoop/hbase/security/AccessDeniedException.java create mode 100644 hbase-common/src/main/java/org/apache/hadoop/hbase/security/AuthMethod.java create mode 100644 hbase-common/src/main/java/org/apache/hadoop/hbase/security/KerberosInfo.java create mode 100644 hbase-common/src/main/java/org/apache/hadoop/hbase/security/QualityOfProtection.java create mode 100644 hbase-common/src/main/java/org/apache/hadoop/hbase/security/SaslStatus.java create mode 100644 hbase-common/src/main/java/org/apache/hadoop/hbase/security/SaslUtils.java create mode 100644 hbase-common/src/main/java/org/apache/hadoop/hbase/security/TokenInfo.java create mode 100644 hbase-common/src/main/java/org/apache/hadoop/hbase/security/User.java create mode 100644 hbase-common/src/main/java/org/apache/hadoop/hbase/util/Addressing.java create mode 100644 hbase-common/src/main/java/org/apache/hadoop/hbase/util/Classes.java create mode 100644 hbase-common/src/main/java/org/apache/hadoop/hbase/util/HasThread.java create mode 100644 hbase-common/src/main/java/org/apache/hadoop/hbase/util/Hash.java create mode 100644 hbase-common/src/main/java/org/apache/hadoop/hbase/util/HashedBytes.java create mode 100644 hbase-common/src/main/java/org/apache/hadoop/hbase/util/JenkinsHash.java create mode 100644 hbase-common/src/main/java/org/apache/hadoop/hbase/util/Methods.java create mode 100644 hbase-common/src/main/java/org/apache/hadoop/hbase/util/MurmurHash.java create mode 100644 hbase-common/src/main/java/org/apache/hadoop/hbase/util/Pair.java create mode 100644 hbase-common/src/main/java/org/apache/hadoop/hbase/util/PairOfSameType.java create mode 100644 hbase-common/src/main/java/org/apache/hadoop/hbase/util/PoolMap.java create mode 100644 hbase-common/src/main/java/org/apache/hadoop/hbase/util/ProtoUtil.java create mode 100644 hbase-common/src/main/java/org/apache/hadoop/hbase/util/RetryCounter.java create mode 100644 hbase-common/src/main/java/org/apache/hadoop/hbase/util/RetryCounterFactory.java create mode 100644 hbase-common/src/main/java/org/apache/hadoop/hbase/util/Sleeper.java create mode 100644 hbase-common/src/main/java/org/apache/hadoop/hbase/util/SoftValueSortedMap.java create mode 100644 hbase-common/src/main/java/org/apache/hadoop/hbase/util/Triple.java create mode 100644 hbase-common/src/main/java/org/apache/hadoop/hbase/util/Writables.java delete mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/Abortable.java delete mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/Chore.java delete mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/ClockOutOfSyncException.java delete mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/ClusterId.java delete mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/ClusterStatus.java delete mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/Coprocessor.java delete mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/CoprocessorEnvironment.java delete mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/DeserializationException.java delete mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/DoNotRetryIOException.java delete mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/DroppedSnapshotException.java delete mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/FailedSanityCheckException.java delete mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/HBaseException.java delete mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/HBaseIOException.java delete mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/HColumnDescriptor.java delete mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/HRegionInfo.java delete mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/HRegionLocation.java delete mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/HTableDescriptor.java delete mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/InvalidFamilyOperationException.java delete mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/MasterAdminProtocol.java delete mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/MasterMonitorProtocol.java delete mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/MasterNotRunningException.java delete mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/MasterProtocol.java delete mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/NotAllMetaRegionsOnlineException.java delete mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/NotServingRegionException.java delete mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/OutOfOrderScannerNextException.java delete mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/PleaseHoldException.java delete mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/RegionException.java delete mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/RegionLoad.java delete mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/RegionMovedException.java delete mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/RegionServerStatusProtocol.java delete mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/RegionTooBusyException.java delete mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/RemoteExceptionHandler.java delete mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/Server.java delete mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/ServerLoad.java delete mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/ServerName.java delete mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/Stoppable.java delete mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/TableExistsException.java delete mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/TableInfoMissingException.java delete mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/TableNotDisabledException.java delete mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/TableNotEnabledException.java delete mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/TableNotFoundException.java delete mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/UnknownRegionException.java delete mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/UnknownRowLockException.java delete mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/UnknownScannerException.java delete mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/YouAreDeadException.java delete mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/ZooKeeperConnectionException.java delete mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/catalog/CatalogTracker.java delete mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/catalog/MetaReader.java delete mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/client/AbstractClientScanner.java delete mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/client/Action.java delete mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/client/AdminProtocol.java delete mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/client/Append.java delete mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/client/Attributes.java delete mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/client/ClientProtocol.java delete mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/client/ClientScanner.java delete mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/client/ConnectionUtils.java delete mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/client/Delete.java delete mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/client/Get.java delete mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java delete mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/client/HConnection.java delete mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/client/HConnectionManager.java delete mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/client/HTable.java delete mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/client/HTableFactory.java delete mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/client/HTableInterface.java delete mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/client/HTableInterfaceFactory.java delete mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/client/HTablePool.java delete mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/client/HTableUtil.java delete mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/client/Increment.java delete mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/client/IsolationLevel.java delete mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/client/MasterAdminKeepAliveConnection.java delete mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/client/MasterMonitorKeepAliveConnection.java delete mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/client/MetaScanner.java delete mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/client/MultiAction.java delete mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/client/MultiResponse.java delete mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/client/Mutation.java delete mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/client/NoServerForRegionException.java delete mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/client/Operation.java delete mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/client/OperationWithAttributes.java delete mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/client/Put.java delete mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/client/RegionOfflineException.java delete mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/client/Result.java delete mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/client/ResultScanner.java delete mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/client/RetriesExhaustedException.java delete mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/client/RetriesExhaustedWithDetailsException.java delete mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/client/Row.java delete mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/client/RowLock.java delete mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/client/RowMutations.java delete mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/client/Scan.java delete mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/client/ScannerCallable.java delete mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/client/ScannerTimeoutException.java delete mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/client/ServerCallable.java delete mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/client/UnmodifyableHColumnDescriptor.java delete mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/client/UnmodifyableHRegionInfo.java delete mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/client/UnmodifyableHTableDescriptor.java delete mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/client/ZooKeeperKeepAliveConnection.java delete mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/client/coprocessor/AggregationClient.java delete mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/client/coprocessor/Batch.java delete mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/client/coprocessor/LongColumnInterpreter.java delete mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/client/coprocessor/package-info.java delete mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/client/metrics/ScanMetrics.java delete mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/client/package-info.java delete mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/client/replication/ReplicationAdmin.java delete mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/ColumnInterpreter.java delete mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/filter/BinaryComparator.java delete mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/filter/BinaryPrefixComparator.java delete mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/filter/BitComparator.java delete mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/filter/ByteArrayComparable.java delete mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/filter/ColumnCountGetFilter.java delete mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/filter/ColumnPaginationFilter.java delete mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/filter/ColumnPrefixFilter.java delete mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/filter/ColumnRangeFilter.java delete mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/filter/CompareFilter.java delete mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/filter/DependentColumnFilter.java delete mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/filter/FamilyFilter.java delete mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/filter/Filter.java delete mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/filter/FilterBase.java delete mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/filter/FilterList.java delete mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/filter/FilterWrapper.java delete mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/filter/FirstKeyOnlyFilter.java delete mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/filter/FirstKeyValueMatchingQualifiersFilter.java delete mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/filter/FuzzyRowFilter.java delete mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/filter/InclusiveStopFilter.java delete mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/filter/IncompatibleFilterException.java delete mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/filter/InvalidRowFilterException.java delete mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/filter/KeyOnlyFilter.java delete mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/filter/MultipleColumnPrefixFilter.java delete mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/filter/NullComparator.java delete mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/filter/PageFilter.java delete mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/filter/ParseConstants.java delete mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/filter/ParseFilter.java delete mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/filter/PrefixFilter.java delete mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/filter/QualifierFilter.java delete mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/filter/RandomRowFilter.java delete mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/filter/RegexStringComparator.java delete mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/filter/RowFilter.java delete mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/filter/SingleColumnValueExcludeFilter.java delete mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/filter/SingleColumnValueFilter.java delete mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/filter/SkipFilter.java delete mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/filter/SubstringComparator.java delete mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/filter/TimestampsFilter.java delete mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/filter/ValueFilter.java delete mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/filter/WhileMatchFilter.java delete mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/filter/package-info.java delete mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/io/DataInputInputStream.java delete mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/io/DataOutputOutputStream.java delete mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/io/HbaseObjectWritable.java delete mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/io/TimeRange.java delete mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/io/WritableWithSize.java delete mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/BlockingRpcCallback.java delete mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/ClientCache.java delete mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/CoprocessorRpcChannel.java delete mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/HBaseClient.java delete mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/HBaseClientRPC.java delete mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/Invocation.java delete mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/MasterCoprocessorRpcChannel.java delete mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/ProtobufRpcClientEngine.java delete mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/ProtocolSignature.java delete mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RegionCoprocessorRpcChannel.java delete mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcClientEngine.java delete mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/ServerNotRunningYetException.java delete mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/ServerRpcController.java delete mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/VersionedProtocol.java delete mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/master/RegionState.java create mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufReplicationUtil.java delete mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java delete mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/protobuf/RequestConverter.java delete mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/protobuf/ResponseConverter.java delete mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/BloomType.java delete mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/NoSuchColumnFamilyException.java delete mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionOpeningState.java delete mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionServerRunningException.java delete mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionServerStoppedException.java delete mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/WrongRegionException.java delete mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/FailedLogCloseException.java delete mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/OrphanHLogAfterSplitException.java delete mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeer.java delete mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/replication/ReplicationZookeeper.java delete mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/security/AccessDeniedException.java delete mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/security/HBaseSaslRpcClient.java delete mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/security/KerberosInfo.java delete mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/security/TokenInfo.java delete mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/security/User.java delete mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/Permission.java delete mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/TablePermission.java delete mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/UserPermission.java delete mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/security/token/AuthenticationTokenIdentifier.java delete mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/security/token/AuthenticationTokenSelector.java delete mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/util/Addressing.java delete mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/util/Classes.java delete mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/util/HasThread.java delete mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/util/Hash.java delete mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/util/HashedBytes.java delete mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/util/JenkinsHash.java delete mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/util/Methods.java delete mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/util/MurmurHash.java delete mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/util/Pair.java delete mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/util/PairOfSameType.java delete mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/util/PoolMap.java delete mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/util/ProtoUtil.java delete mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/util/RetryCounter.java delete mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/util/RetryCounterFactory.java delete mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/util/Sleeper.java delete mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/util/SoftValueSortedMap.java delete mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/util/Triple.java delete mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/util/Writables.java delete mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/EmptyWatcher.java delete mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/HQuorumPeer.java delete mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/MasterAddressTracker.java delete mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/MetaNodeTracker.java delete mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/RecoverableZooKeeper.java delete mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/RootRegionTracker.java delete mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKClusterId.java delete mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKConfig.java delete mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKTable.java delete mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKTableReadOnly.java delete mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKUtil.java delete mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/ZooKeeperListener.java delete mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/ZooKeeperNodeTracker.java delete mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/ZooKeeperWatcher.java delete mode 100644 hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAttributes.java delete mode 100644 hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestGet.java delete mode 100644 hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestOperation.java delete mode 100644 hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestPutDotHas.java delete mode 100644 hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestScan.java diff --git hbase-client/src/main/java/org/apache/hadoop/hbase/ClusterId.java hbase-client/src/main/java/org/apache/hadoop/hbase/ClusterId.java new file mode 100644 index 0000000..a8e8560 --- /dev/null +++ hbase-client/src/main/java/org/apache/hadoop/hbase/ClusterId.java @@ -0,0 +1,103 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase; + +import java.util.UUID; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.hbase.protobuf.ProtobufUtil; +import org.apache.hadoop.hbase.protobuf.generated.ClusterIdProtos; +import org.apache.hadoop.hbase.util.Bytes; + +import com.google.protobuf.InvalidProtocolBufferException; + +/** + * The identifier for this cluster. + * It is serialized to the filesystem and up into zookeeper. This is a container for the id. + * Also knows how to serialize and deserialize the cluster id. + */ +@InterfaceAudience.Private +public class ClusterId { + private final String id; + + /** + * New ClusterID. Generates a uniqueid. + */ + public ClusterId() { + this(UUID.randomUUID().toString()); + } + + ClusterId(final String uuid) { + this.id = uuid; + } + + /** + * @return The clusterid serialized using pb w/ pb magic prefix + */ + public byte [] toByteArray() { + return ProtobufUtil.prependPBMagic(convert().toByteArray()); + } + + /** + * @param bytes A pb serialized {@link ClusterId} instance with pb magic prefix + * @return An instance of {@link ClusterId} made from bytes + * @throws DeserializationException + * @see #toByteArray() + */ + public static ClusterId parseFrom(final byte [] bytes) throws DeserializationException { + if (ProtobufUtil.isPBMagicPrefix(bytes)) { + int pblen = ProtobufUtil.lengthOfPBMagic(); + ClusterIdProtos.ClusterId.Builder builder = ClusterIdProtos.ClusterId.newBuilder(); + ClusterIdProtos.ClusterId cid = null; + try { + cid = builder.mergeFrom(bytes, pblen, bytes.length - pblen).build(); + } catch (InvalidProtocolBufferException e) { + throw new DeserializationException(e); + } + return convert(cid); + } else { + // Presume it was written out this way, the old way. + return new ClusterId(Bytes.toString(bytes)); + } + } + + /** + * @return A pb instance to represent this instance. + */ + ClusterIdProtos.ClusterId convert() { + ClusterIdProtos.ClusterId.Builder builder = ClusterIdProtos.ClusterId.newBuilder(); + return builder.setClusterId(this.id).build(); + } + + /** + * @param cid + * @return A {@link ClusterId} made from the passed in cid + */ + static ClusterId convert(final ClusterIdProtos.ClusterId cid) { + return new ClusterId(cid.getClusterId()); + } + + /** + * @see java.lang.Object#toString() + */ + @Override + public String toString() { + return this.id; + } +} diff --git hbase-client/src/main/java/org/apache/hadoop/hbase/ClusterStatus.java hbase-client/src/main/java/org/apache/hadoop/hbase/ClusterStatus.java new file mode 100644 index 0000000..d9dfac0 --- /dev/null +++ hbase-client/src/main/java/org/apache/hadoop/hbase/ClusterStatus.java @@ -0,0 +1,350 @@ +/** + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase; + +import java.util.Arrays; +import java.util.Collection; +import java.util.Collections; +import java.util.HashMap; +import java.util.LinkedList; +import java.util.Map; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; +import org.apache.hadoop.hbase.master.RegionState; +import org.apache.hadoop.hbase.protobuf.ProtobufUtil; +import org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos; +import org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.LiveServerInfo; +import org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionInTransition; +import org.apache.hadoop.hbase.protobuf.generated.FSProtos.HBaseVersionFileContent; +import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos; +import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier; +import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.RegionSpecifierType; +import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.io.VersionedWritable; + +import com.google.protobuf.ByteString; + +/** + * Status information on the HBase cluster. + *

+ * ClusterStatus provides clients with information such as: + *

+ */ +@InterfaceAudience.Public +@InterfaceStability.Evolving +public class ClusterStatus extends VersionedWritable { + /** + * Version for object serialization. Incremented for changes in serialized + * representation. + *
+ *
0
Initial version
+ *
1
Added cluster ID
+ *
2
Added Map of ServerName to ServerLoad
+ *
3
Added master and backupMasters
+ *
+ */ + private static final byte VERSION = 2; + + private String hbaseVersion; + private Map liveServers; + private Collection deadServers; + private ServerName master; + private Collection backupMasters; + private Map intransition; + private String clusterId; + private String[] masterCoprocessors; + private boolean balancerOn; + + /** + * Constructor, for Writable + * @deprecated Used by Writables and Writables are going away. + */ + @Deprecated + public ClusterStatus() { + super(); + } + + public ClusterStatus(final String hbaseVersion, final String clusterid, + final Map servers, + final Collection deadServers, + final ServerName master, + final Collection backupMasters, + final Map rit, + final String[] masterCoprocessors, + final boolean balancerOn) { + this.hbaseVersion = hbaseVersion; + + this.liveServers = servers; + this.deadServers = deadServers; + this.master = master; + this.backupMasters = backupMasters; + this.intransition = rit; + this.clusterId = clusterid; + this.masterCoprocessors = masterCoprocessors; + this.balancerOn = balancerOn; + } + + /** + * @return the names of region servers on the dead list + */ + public Collection getDeadServerNames() { + return Collections.unmodifiableCollection(deadServers); + } + + /** + * @return the number of region servers in the cluster + */ + public int getServersSize() { + return liveServers.size(); + } + + /** + * @return the number of dead region servers in the cluster + */ + public int getDeadServers() { + return deadServers.size(); + } + + /** + * @return the average cluster load + */ + public double getAverageLoad() { + int load = getRegionsCount(); + return (double)load / (double)getServersSize(); + } + + /** + * @return the number of regions deployed on the cluster + */ + public int getRegionsCount() { + int count = 0; + for (Map.Entry e: this.liveServers.entrySet()) { + count += e.getValue().getNumberOfRegions(); + } + return count; + } + + /** + * @return the number of requests since last report + */ + public int getRequestsCount() { + int count = 0; + for (Map.Entry e: this.liveServers.entrySet()) { + count += e.getValue().getTotalNumberOfRequests(); + } + return count; + } + + /** + * @return the HBase version string as reported by the HMaster + */ + public String getHBaseVersion() { + return hbaseVersion; + } + + /** + * @see java.lang.Object#equals(java.lang.Object) + */ + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (!(o instanceof ClusterStatus)) { + return false; + } + return (getVersion() == ((ClusterStatus)o).getVersion()) && + getHBaseVersion().equals(((ClusterStatus)o).getHBaseVersion()) && + this.liveServers.equals(((ClusterStatus)o).liveServers) && + this.deadServers.containsAll(((ClusterStatus)o).deadServers) && + Arrays.equals(this.masterCoprocessors, + ((ClusterStatus)o).masterCoprocessors) && + this.master.equals(((ClusterStatus)o).master) && + this.backupMasters.containsAll(((ClusterStatus)o).backupMasters); + } + + /** + * @see java.lang.Object#hashCode() + */ + public int hashCode() { + return VERSION + hbaseVersion.hashCode() + this.liveServers.hashCode() + + this.deadServers.hashCode() + this.master.hashCode() + + this.backupMasters.hashCode(); + } + + /** @return the object version number */ + public byte getVersion() { + return VERSION; + } + + // + // Getters + // + + /** + * Returns detailed region server information: A list of + * {@link ServerName}. + * @return region server information + * @deprecated Use {@link #getServers()} + */ + public Collection getServerInfo() { + return getServers(); + } + + public Collection getServers() { + return Collections.unmodifiableCollection(this.liveServers.keySet()); + } + + /** + * Returns detailed information about the current master {@link ServerName}. + * @return current master information if it exists + */ + public ServerName getMaster() { + return this.master; + } + + /** + * @return the number of backup masters in the cluster + */ + public int getBackupMastersSize() { + return this.backupMasters.size(); + } + + /** + * @return the names of backup masters + */ + public Collection getBackupMasters() { + return Collections.unmodifiableCollection(this.backupMasters); + } + + /** + * @param sn + * @return Server's load or null if not found. + */ + public ServerLoad getLoad(final ServerName sn) { + return this.liveServers.get(sn); + } + + public Map getRegionsInTransition() { + return this.intransition; + } + + public String getClusterId() { + return clusterId; + } + + public String[] getMasterCoprocessors() { + return masterCoprocessors; + } + + + public boolean isBalancerOn() { + return balancerOn; + } + + /** + * Convert a ClutserStatus to a protobuf ClusterStatus + * + * @return the protobuf ClusterStatus + */ + public ClusterStatusProtos.ClusterStatus convert() { + ClusterStatusProtos.ClusterStatus.Builder builder = ClusterStatusProtos.ClusterStatus.newBuilder(); + builder.setHbaseVersion(HBaseVersionFileContent.newBuilder().setVersion(getHBaseVersion())); + + for (Map.Entry entry : liveServers.entrySet()) { + LiveServerInfo.Builder lsi = + LiveServerInfo.newBuilder().setServer(ProtobufUtil.toServerName(entry.getKey())); + lsi.setServerLoad(entry.getValue().obtainServerLoadPB()); + builder.addLiveServers(lsi.build()); + } + for (ServerName deadServer : getDeadServerNames()) { + builder.addDeadServers(ProtobufUtil.toServerName(deadServer)); + } + for (Map.Entry rit : getRegionsInTransition().entrySet()) { + ClusterStatusProtos.RegionState rs = rit.getValue().convert(); + RegionSpecifier.Builder spec = + RegionSpecifier.newBuilder().setType(RegionSpecifierType.REGION_NAME); + spec.setValue(ByteString.copyFrom(Bytes.toBytes(rit.getKey()))); + + RegionInTransition pbRIT = + RegionInTransition.newBuilder().setSpec(spec.build()).setRegionState(rs).build(); + builder.addRegionsInTransition(pbRIT); + } + builder.setClusterId(new ClusterId(getClusterId()).convert()); + for (String coprocessor : getMasterCoprocessors()) { + builder.addMasterCoprocessors(HBaseProtos.Coprocessor.newBuilder().setName(coprocessor)); + } + builder.setMaster( + ProtobufUtil.toServerName(getMaster())); + for (ServerName backup : getBackupMasters()) { + builder.addBackupMasters(ProtobufUtil.toServerName(backup)); + } + builder.setBalancerOn(balancerOn); + return builder.build(); + } + + /** + * Convert a protobuf ClusterStatus to a ClusterStatus + * + * @param proto the protobuf ClusterStatus + * @return the converted ClusterStatus + */ + public static ClusterStatus convert(ClusterStatusProtos.ClusterStatus proto) { + Map servers = new HashMap(); + for (LiveServerInfo lsi : proto.getLiveServersList()) { + servers.put(ProtobufUtil.toServerName(lsi.getServer()), new ServerLoad(lsi.getServerLoad())); + } + Collection deadServers = new LinkedList(); + for (HBaseProtos.ServerName sn : proto.getDeadServersList()) { + deadServers.add(ProtobufUtil.toServerName(sn)); + } + Collection backupMasters = new LinkedList(); + for (HBaseProtos.ServerName sn : proto.getBackupMastersList()) { + backupMasters.add(ProtobufUtil.toServerName(sn)); + } + final Map rit = new HashMap(); + for (RegionInTransition region : proto.getRegionsInTransitionList()) { + String key = new String(region.getSpec().getValue().toByteArray()); + RegionState value = RegionState.convert(region.getRegionState()); + rit.put(key,value); + } + final int numMasterCoprocessors = proto.getMasterCoprocessorsCount(); + final String[] masterCoprocessors = new String[numMasterCoprocessors]; + for (int i = 0; i < numMasterCoprocessors; i++) { + masterCoprocessors[i] = proto.getMasterCoprocessors(i).getName(); + } + return new ClusterStatus(proto.getHbaseVersion().getVersion(), + ClusterId.convert(proto.getClusterId()).toString(),servers,deadServers, + ProtobufUtil.toServerName(proto.getMaster()),backupMasters,rit,masterCoprocessors, + proto.getBalancerOn()); + } +} diff --git hbase-client/src/main/java/org/apache/hadoop/hbase/Coprocessor.java hbase-client/src/main/java/org/apache/hadoop/hbase/Coprocessor.java new file mode 100644 index 0000000..88ecd2f --- /dev/null +++ hbase-client/src/main/java/org/apache/hadoop/hbase/Coprocessor.java @@ -0,0 +1,56 @@ +/* + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase; + +import java.io.IOException; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; + +/** + * Coprocess interface. + */ +@InterfaceAudience.Public +@InterfaceStability.Evolving +public interface Coprocessor { + static final int VERSION = 1; + + /** Highest installation priority */ + static final int PRIORITY_HIGHEST = 0; + /** High (system) installation priority */ + static final int PRIORITY_SYSTEM = Integer.MAX_VALUE / 4; + /** Default installation priority for user coprocessors */ + static final int PRIORITY_USER = Integer.MAX_VALUE / 2; + /** Lowest installation priority */ + static final int PRIORITY_LOWEST = Integer.MAX_VALUE; + + /** + * Lifecycle state of a given coprocessor instance. + */ + public enum State { + UNINSTALLED, + INSTALLED, + STARTING, + ACTIVE, + STOPPING, + STOPPED + } + + // Interface + void start(CoprocessorEnvironment env) throws IOException; + + void stop(CoprocessorEnvironment env) throws IOException; +} diff --git hbase-client/src/main/java/org/apache/hadoop/hbase/CoprocessorEnvironment.java hbase-client/src/main/java/org/apache/hadoop/hbase/CoprocessorEnvironment.java new file mode 100644 index 0000000..3806426 --- /dev/null +++ hbase-client/src/main/java/org/apache/hadoop/hbase/CoprocessorEnvironment.java @@ -0,0 +1,55 @@ +/* + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase; + +import java.io.IOException; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.client.HTableInterface; + +/** + * Coprocessor environment state. + */ +@InterfaceAudience.Public +@InterfaceStability.Evolving +public interface CoprocessorEnvironment { + + /** @return the Coprocessor interface version */ + public int getVersion(); + + /** @return the HBase version as a string (e.g. "0.21.0") */ + public String getHBaseVersion(); + + /** @return the loaded coprocessor instance */ + public Coprocessor getInstance(); + + /** @return the priority assigned to the loaded coprocessor */ + public int getPriority(); + + /** @return the load sequence number */ + public int getLoadSequence(); + + /** @return the configuration */ + public Configuration getConfiguration(); + + /** + * @return an interface for accessing the given table + * @throws IOException + */ + public HTableInterface getTable(byte[] tableName) throws IOException; +} diff --git hbase-client/src/main/java/org/apache/hadoop/hbase/HColumnDescriptor.java hbase-client/src/main/java/org/apache/hadoop/hbase/HColumnDescriptor.java new file mode 100644 index 0000000..d4a4566 --- /dev/null +++ hbase-client/src/main/java/org/apache/hadoop/hbase/HColumnDescriptor.java @@ -0,0 +1,1142 @@ +/** + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase; + +import java.io.DataInput; +import java.io.DataOutput; +import java.io.IOException; +import java.util.Collections; +import java.util.HashMap; +import java.util.HashSet; +import java.util.Map; +import java.util.Set; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; +import org.apache.hadoop.hbase.io.ImmutableBytesWritable; +import org.apache.hadoop.hbase.io.compress.Compression; +import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding; +import org.apache.hadoop.hbase.protobuf.ProtobufUtil; +import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema; +import org.apache.hadoop.hbase.regionserver.BloomType; +import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.io.Text; +import org.apache.hadoop.io.WritableComparable; + +import com.google.common.base.Preconditions; +import com.google.protobuf.ByteString; +import com.google.protobuf.InvalidProtocolBufferException; + +/** + * An HColumnDescriptor contains information about a column family such as the + * number of versions, compression settings, etc. + * + * It is used as input when creating a table or adding a column. Once set, the + * parameters that specify a column cannot be changed without deleting the + * column and recreating it. If there is data stored in the column, it will be + * deleted when the column is deleted. + */ +@InterfaceAudience.Public +@InterfaceStability.Evolving +public class HColumnDescriptor implements WritableComparable { + // For future backward compatibility + + // Version 3 was when column names become byte arrays and when we picked up + // Time-to-live feature. Version 4 was when we moved to byte arrays, HBASE-82. + // Version 5 was when bloom filter descriptors were removed. + // Version 6 adds metadata as a map where keys and values are byte[]. + // Version 7 -- add new compression and hfile blocksize to HColumnDescriptor (HBASE-1217) + // Version 8 -- reintroduction of bloom filters, changed from boolean to enum + // Version 9 -- add data block encoding + private static final byte COLUMN_DESCRIPTOR_VERSION = (byte) 9; + + // These constants are used as FileInfo keys + public static final String COMPRESSION = "COMPRESSION"; + public static final String COMPRESSION_COMPACT = "COMPRESSION_COMPACT"; + public static final String ENCODE_ON_DISK = + "ENCODE_ON_DISK"; + public static final String DATA_BLOCK_ENCODING = + "DATA_BLOCK_ENCODING"; + public static final String BLOCKCACHE = "BLOCKCACHE"; + public static final String CACHE_DATA_ON_WRITE = "CACHE_DATA_ON_WRITE"; + public static final String CACHE_INDEX_ON_WRITE = "CACHE_INDEX_ON_WRITE"; + public static final String CACHE_BLOOMS_ON_WRITE = "CACHE_BLOOMS_ON_WRITE"; + public static final String EVICT_BLOCKS_ON_CLOSE = "EVICT_BLOCKS_ON_CLOSE"; + + /** + * Size of storefile/hfile 'blocks'. Default is {@link #DEFAULT_BLOCKSIZE}. + * Use smaller block sizes for faster random-access at expense of larger + * indices (more memory consumption). + */ + public static final String BLOCKSIZE = "BLOCKSIZE"; + + public static final String LENGTH = "LENGTH"; + public static final String TTL = "TTL"; + public static final String BLOOMFILTER = "BLOOMFILTER"; + public static final String FOREVER = "FOREVER"; + public static final String REPLICATION_SCOPE = "REPLICATION_SCOPE"; + public static final String MIN_VERSIONS = "MIN_VERSIONS"; + public static final String KEEP_DELETED_CELLS = "KEEP_DELETED_CELLS"; + + /** + * Default compression type. + */ + public static final String DEFAULT_COMPRESSION = + Compression.Algorithm.NONE.getName(); + + /** + * Default value of the flag that enables data block encoding on disk, as + * opposed to encoding in cache only. We encode blocks everywhere by default, + * as long as {@link #DATA_BLOCK_ENCODING} is not NONE. + */ + public static final boolean DEFAULT_ENCODE_ON_DISK = true; + + /** Default data block encoding algorithm. */ + public static final String DEFAULT_DATA_BLOCK_ENCODING = + DataBlockEncoding.NONE.toString(); + + /** + * Default number of versions of a record to keep. + */ + public static final int DEFAULT_VERSIONS = 3; + + /** + * Default is not to keep a minimum of versions. + */ + public static final int DEFAULT_MIN_VERSIONS = 0; + + /* + * Cache here the HCD value. + * Question: its OK to cache since when we're reenable, we create a new HCD? + */ + private volatile Integer blocksize = null; + + /** + * Default setting for whether to serve from memory or not. + */ + public static final boolean DEFAULT_IN_MEMORY = false; + + /** + * Default setting for preventing deleted from being collected immediately. + */ + public static final boolean DEFAULT_KEEP_DELETED = false; + + /** + * Default setting for whether to use a block cache or not. + */ + public static final boolean DEFAULT_BLOCKCACHE = true; + + /** + * Default setting for whether to cache data blocks on write if block caching + * is enabled. + */ + public static final boolean DEFAULT_CACHE_DATA_ON_WRITE = false; + + /** + * Default setting for whether to cache index blocks on write if block + * caching is enabled. + */ + public static final boolean DEFAULT_CACHE_INDEX_ON_WRITE = false; + + /** + * Default size of blocks in files stored to the filesytem (hfiles). + */ + public static final int DEFAULT_BLOCKSIZE = HConstants.DEFAULT_BLOCKSIZE; + + /** + * Default setting for whether or not to use bloomfilters. + */ + public static final String DEFAULT_BLOOMFILTER = BloomType.NONE.toString(); + + /** + * Default setting for whether to cache bloom filter blocks on write if block + * caching is enabled. + */ + public static final boolean DEFAULT_CACHE_BLOOMS_ON_WRITE = false; + + /** + * Default time to live of cell contents. + */ + public static final int DEFAULT_TTL = HConstants.FOREVER; + + /** + * Default scope. + */ + public static final int DEFAULT_REPLICATION_SCOPE = HConstants.REPLICATION_SCOPE_LOCAL; + + /** + * Default setting for whether to evict cached blocks from the blockcache on + * close. + */ + public static final boolean DEFAULT_EVICT_BLOCKS_ON_CLOSE = false; + + private final static Map DEFAULT_VALUES + = new HashMap(); + private final static Set RESERVED_KEYWORDS + = new HashSet(); + static { + DEFAULT_VALUES.put(BLOOMFILTER, DEFAULT_BLOOMFILTER); + DEFAULT_VALUES.put(REPLICATION_SCOPE, String.valueOf(DEFAULT_REPLICATION_SCOPE)); + DEFAULT_VALUES.put(HConstants.VERSIONS, String.valueOf(DEFAULT_VERSIONS)); + DEFAULT_VALUES.put(MIN_VERSIONS, String.valueOf(DEFAULT_MIN_VERSIONS)); + DEFAULT_VALUES.put(COMPRESSION, DEFAULT_COMPRESSION); + DEFAULT_VALUES.put(TTL, String.valueOf(DEFAULT_TTL)); + DEFAULT_VALUES.put(BLOCKSIZE, String.valueOf(DEFAULT_BLOCKSIZE)); + DEFAULT_VALUES.put(HConstants.IN_MEMORY, String.valueOf(DEFAULT_IN_MEMORY)); + DEFAULT_VALUES.put(BLOCKCACHE, String.valueOf(DEFAULT_BLOCKCACHE)); + DEFAULT_VALUES.put(KEEP_DELETED_CELLS, String.valueOf(DEFAULT_KEEP_DELETED)); + DEFAULT_VALUES.put(ENCODE_ON_DISK, String.valueOf(DEFAULT_ENCODE_ON_DISK)); + DEFAULT_VALUES.put(DATA_BLOCK_ENCODING, String.valueOf(DEFAULT_DATA_BLOCK_ENCODING)); + DEFAULT_VALUES.put(CACHE_DATA_ON_WRITE, String.valueOf(DEFAULT_CACHE_DATA_ON_WRITE)); + DEFAULT_VALUES.put(CACHE_INDEX_ON_WRITE, String.valueOf(DEFAULT_CACHE_INDEX_ON_WRITE)); + DEFAULT_VALUES.put(CACHE_BLOOMS_ON_WRITE, String.valueOf(DEFAULT_CACHE_BLOOMS_ON_WRITE)); + DEFAULT_VALUES.put(EVICT_BLOCKS_ON_CLOSE, String.valueOf(DEFAULT_EVICT_BLOCKS_ON_CLOSE)); + for (String s : DEFAULT_VALUES.keySet()) { + RESERVED_KEYWORDS.add(new ImmutableBytesWritable(Bytes.toBytes(s))); + } + } + + private static final int UNINITIALIZED = -1; + + // Column family name + private byte [] name; + + // Column metadata + protected final Map values = + new HashMap(); + + /* + * Cache the max versions rather than calculate it every time. + */ + private int cachedMaxVersions = UNINITIALIZED; + + /** + * Default constructor. Must be present for Writable. + * @deprecated Used by Writables and Writables are going away. + */ + @Deprecated + // Make this private rather than remove after deprecation period elapses. Its needed by pb + // deserializations. + public HColumnDescriptor() { + this.name = null; + } + + /** + * Construct a column descriptor specifying only the family name + * The other attributes are defaulted. + * + * @param familyName Column family name. Must be 'printable' -- digit or + * letter -- and may not contain a : + */ + public HColumnDescriptor(final String familyName) { + this(Bytes.toBytes(familyName)); + } + + /** + * Construct a column descriptor specifying only the family name + * The other attributes are defaulted. + * + * @param familyName Column family name. Must be 'printable' -- digit or + * letter -- and may not contain a : + */ + public HColumnDescriptor(final byte [] familyName) { + this (familyName == null || familyName.length <= 0? + HConstants.EMPTY_BYTE_ARRAY: familyName, DEFAULT_VERSIONS, + DEFAULT_COMPRESSION, DEFAULT_IN_MEMORY, DEFAULT_BLOCKCACHE, + DEFAULT_TTL, DEFAULT_BLOOMFILTER); + } + + /** + * Constructor. + * Makes a deep copy of the supplied descriptor. + * Can make a modifiable descriptor from an UnmodifyableHColumnDescriptor. + * @param desc The descriptor. + */ + public HColumnDescriptor(HColumnDescriptor desc) { + super(); + this.name = desc.name.clone(); + for (Map.Entry e: + desc.values.entrySet()) { + this.values.put(e.getKey(), e.getValue()); + } + setMaxVersions(desc.getMaxVersions()); + } + + /** + * Constructor + * @param familyName Column family name. Must be 'printable' -- digit or + * letter -- and may not contain a : + * @param maxVersions Maximum number of versions to keep + * @param compression Compression type + * @param inMemory If true, column data should be kept in an HRegionServer's + * cache + * @param blockCacheEnabled If true, MapFile blocks should be cached + * @param timeToLive Time-to-live of cell contents, in seconds + * (use HConstants.FOREVER for unlimited TTL) + * @param bloomFilter Bloom filter type for this column + * + * @throws IllegalArgumentException if passed a family name that is made of + * other than 'word' characters: i.e. [a-zA-Z_0-9] or contains + * a : + * @throws IllegalArgumentException if the number of versions is <= 0 + * @deprecated use {@link #HColumnDescriptor(String)} and setters + */ + @Deprecated + public HColumnDescriptor(final byte [] familyName, final int maxVersions, + final String compression, final boolean inMemory, + final boolean blockCacheEnabled, + final int timeToLive, final String bloomFilter) { + this(familyName, maxVersions, compression, inMemory, blockCacheEnabled, + DEFAULT_BLOCKSIZE, timeToLive, bloomFilter, DEFAULT_REPLICATION_SCOPE); + } + + /** + * Constructor + * @param familyName Column family name. Must be 'printable' -- digit or + * letter -- and may not contain a : + * @param maxVersions Maximum number of versions to keep + * @param compression Compression type + * @param inMemory If true, column data should be kept in an HRegionServer's + * cache + * @param blockCacheEnabled If true, MapFile blocks should be cached + * @param blocksize Block size to use when writing out storefiles. Use + * smaller block sizes for faster random-access at expense of larger indices + * (more memory consumption). Default is usually 64k. + * @param timeToLive Time-to-live of cell contents, in seconds + * (use HConstants.FOREVER for unlimited TTL) + * @param bloomFilter Bloom filter type for this column + * @param scope The scope tag for this column + * + * @throws IllegalArgumentException if passed a family name that is made of + * other than 'word' characters: i.e. [a-zA-Z_0-9] or contains + * a : + * @throws IllegalArgumentException if the number of versions is <= 0 + * @deprecated use {@link #HColumnDescriptor(String)} and setters + */ + @Deprecated + public HColumnDescriptor(final byte [] familyName, final int maxVersions, + final String compression, final boolean inMemory, + final boolean blockCacheEnabled, final int blocksize, + final int timeToLive, final String bloomFilter, final int scope) { + this(familyName, DEFAULT_MIN_VERSIONS, maxVersions, DEFAULT_KEEP_DELETED, + compression, DEFAULT_ENCODE_ON_DISK, DEFAULT_DATA_BLOCK_ENCODING, + inMemory, blockCacheEnabled, blocksize, timeToLive, bloomFilter, + scope); + } + + /** + * Constructor + * @param familyName Column family name. Must be 'printable' -- digit or + * letter -- and may not contain a : + * @param minVersions Minimum number of versions to keep + * @param maxVersions Maximum number of versions to keep + * @param keepDeletedCells Whether to retain deleted cells until they expire + * up to maxVersions versions. + * @param compression Compression type + * @param encodeOnDisk whether to use the specified data block encoding + * on disk. If false, the encoding will be used in cache only. + * @param dataBlockEncoding data block encoding + * @param inMemory If true, column data should be kept in an HRegionServer's + * cache + * @param blockCacheEnabled If true, MapFile blocks should be cached + * @param blocksize Block size to use when writing out storefiles. Use + * smaller blocksizes for faster random-access at expense of larger indices + * (more memory consumption). Default is usually 64k. + * @param timeToLive Time-to-live of cell contents, in seconds + * (use HConstants.FOREVER for unlimited TTL) + * @param bloomFilter Bloom filter type for this column + * @param scope The scope tag for this column + * + * @throws IllegalArgumentException if passed a family name that is made of + * other than 'word' characters: i.e. [a-zA-Z_0-9] or contains + * a : + * @throws IllegalArgumentException if the number of versions is <= 0 + * @deprecated use {@link #HColumnDescriptor(String)} and setters + */ + @Deprecated + public HColumnDescriptor(final byte[] familyName, final int minVersions, + final int maxVersions, final boolean keepDeletedCells, + final String compression, final boolean encodeOnDisk, + final String dataBlockEncoding, final boolean inMemory, + final boolean blockCacheEnabled, final int blocksize, + final int timeToLive, final String bloomFilter, final int scope) { + isLegalFamilyName(familyName); + this.name = familyName; + + if (maxVersions <= 0) { + // TODO: Allow maxVersion of 0 to be the way you say "Keep all versions". + // Until there is support, consider 0 or < 0 -- a configuration error. + throw new IllegalArgumentException("Maximum versions must be positive"); + } + + if (minVersions > 0) { + if (timeToLive == HConstants.FOREVER) { + throw new IllegalArgumentException("Minimum versions requires TTL."); + } + if (minVersions >= maxVersions) { + throw new IllegalArgumentException("Minimum versions must be < " + + "maximum versions."); + } + } + + setMaxVersions(maxVersions); + setMinVersions(minVersions); + setKeepDeletedCells(keepDeletedCells); + setInMemory(inMemory); + setBlockCacheEnabled(blockCacheEnabled); + setTimeToLive(timeToLive); + setCompressionType(Compression.Algorithm. + valueOf(compression.toUpperCase())); + setEncodeOnDisk(encodeOnDisk); + setDataBlockEncoding(DataBlockEncoding. + valueOf(dataBlockEncoding.toUpperCase())); + setBloomFilterType(BloomType. + valueOf(bloomFilter.toUpperCase())); + setBlocksize(blocksize); + setScope(scope); + } + + /** + * @param b Family name. + * @return b + * @throws IllegalArgumentException If not null and not a legitimate family + * name: i.e. 'printable' and ends in a ':' (Null passes are allowed because + * b can be null when deserializing). Cannot start with a '.' + * either. Also Family can not be an empty value. + */ + public static byte [] isLegalFamilyName(final byte [] b) { + if (b == null) { + return b; + } + Preconditions.checkArgument(b.length != 0, "Family name can not be empty"); + if (b[0] == '.') { + throw new IllegalArgumentException("Family names cannot start with a " + + "period: " + Bytes.toString(b)); + } + for (int i = 0; i < b.length; i++) { + if (Character.isISOControl(b[i]) || b[i] == ':' || b[i] == '\\' || b[i] == '/') { + throw new IllegalArgumentException("Illegal character <" + b[i] + + ">. Family names cannot contain control characters or colons: " + + Bytes.toString(b)); + } + } + return b; + } + + /** + * @return Name of this column family + */ + public byte [] getName() { + return name; + } + + /** + * @return Name of this column family + */ + public String getNameAsString() { + return Bytes.toString(this.name); + } + + /** + * @param key The key. + * @return The value. + */ + public byte[] getValue(byte[] key) { + ImmutableBytesWritable ibw = values.get(new ImmutableBytesWritable(key)); + if (ibw == null) + return null; + return ibw.get(); + } + + /** + * @param key The key. + * @return The value as a string. + */ + public String getValue(String key) { + byte[] value = getValue(Bytes.toBytes(key)); + if (value == null) + return null; + return Bytes.toString(value); + } + + /** + * @return All values. + */ + public Map getValues() { + // shallow pointer copy + return Collections.unmodifiableMap(values); + } + + /** + * @param key The key. + * @param value The value. + * @return this (for chained invocation) + */ + public HColumnDescriptor setValue(byte[] key, byte[] value) { + values.put(new ImmutableBytesWritable(key), + new ImmutableBytesWritable(value)); + return this; + } + + /** + * @param key Key whose key and value we're to remove from HCD parameters. + */ + public void remove(final byte [] key) { + values.remove(new ImmutableBytesWritable(key)); + } + + /** + * @param key The key. + * @param value The value. + * @return this (for chained invocation) + */ + public HColumnDescriptor setValue(String key, String value) { + if (value == null) { + remove(Bytes.toBytes(key)); + } else { + setValue(Bytes.toBytes(key), Bytes.toBytes(value)); + } + return this; + } + + /** @return compression type being used for the column family */ + public Compression.Algorithm getCompression() { + String n = getValue(COMPRESSION); + if (n == null) { + return Compression.Algorithm.NONE; + } + return Compression.Algorithm.valueOf(n.toUpperCase()); + } + + /** @return compression type being used for the column family for major + compression */ + public Compression.Algorithm getCompactionCompression() { + String n = getValue(COMPRESSION_COMPACT); + if (n == null) { + return getCompression(); + } + return Compression.Algorithm.valueOf(n.toUpperCase()); + } + + /** @return maximum number of versions */ + public int getMaxVersions() { + if (this.cachedMaxVersions == UNINITIALIZED) { + String v = getValue(HConstants.VERSIONS); + this.cachedMaxVersions = Integer.parseInt(v); + } + return this.cachedMaxVersions; + } + + /** + * @param maxVersions maximum number of versions + * @return this (for chained invocation) + */ + public HColumnDescriptor setMaxVersions(int maxVersions) { + setValue(HConstants.VERSIONS, Integer.toString(maxVersions)); + cachedMaxVersions = maxVersions; + return this; + } + + /** + * @return The storefile/hfile blocksize for this column family. + */ + public synchronized int getBlocksize() { + if (this.blocksize == null) { + String value = getValue(BLOCKSIZE); + this.blocksize = (value != null)? + Integer.decode(value): Integer.valueOf(DEFAULT_BLOCKSIZE); + } + return this.blocksize.intValue(); + } + + /** + * @param s Blocksize to use when writing out storefiles/hfiles on this + * column family. + * @return this (for chained invocation) + */ + public HColumnDescriptor setBlocksize(int s) { + setValue(BLOCKSIZE, Integer.toString(s)); + this.blocksize = null; + return this; + } + + /** + * @return Compression type setting. + */ + public Compression.Algorithm getCompressionType() { + return getCompression(); + } + + /** + * Compression types supported in hbase. + * LZO is not bundled as part of the hbase distribution. + * See LZO Compression + * for how to enable it. + * @param type Compression type setting. + * @return this (for chained invocation) + */ + public HColumnDescriptor setCompressionType(Compression.Algorithm type) { + return setValue(COMPRESSION, type.getName().toUpperCase()); + } + + /** @return data block encoding algorithm used on disk */ + public DataBlockEncoding getDataBlockEncodingOnDisk() { + String encodeOnDiskStr = getValue(ENCODE_ON_DISK); + boolean encodeOnDisk; + if (encodeOnDiskStr == null) { + encodeOnDisk = DEFAULT_ENCODE_ON_DISK; + } else { + encodeOnDisk = Boolean.valueOf(encodeOnDiskStr); + } + + if (!encodeOnDisk) { + // No encoding on disk. + return DataBlockEncoding.NONE; + } + return getDataBlockEncoding(); + } + + /** + * Set the flag indicating that we only want to encode data block in cache + * but not on disk. + * @return this (for chained invocation) + */ + public HColumnDescriptor setEncodeOnDisk(boolean encodeOnDisk) { + return setValue(ENCODE_ON_DISK, String.valueOf(encodeOnDisk)); + } + + /** + * @return the data block encoding algorithm used in block cache and + * optionally on disk + */ + public DataBlockEncoding getDataBlockEncoding() { + String type = getValue(DATA_BLOCK_ENCODING); + if (type == null) { + type = DEFAULT_DATA_BLOCK_ENCODING; + } + return DataBlockEncoding.valueOf(type); + } + + /** + * Set data block encoding algorithm used in block cache. + * @param type What kind of data block encoding will be used. + * @return this (for chained invocation) + */ + public HColumnDescriptor setDataBlockEncoding(DataBlockEncoding type) { + String name; + if (type != null) { + name = type.toString(); + } else { + name = DataBlockEncoding.NONE.toString(); + } + return setValue(DATA_BLOCK_ENCODING, name); + } + + /** + * @return Compression type setting. + */ + public Compression.Algorithm getCompactionCompressionType() { + return getCompactionCompression(); + } + + /** + * Compression types supported in hbase. + * LZO is not bundled as part of the hbase distribution. + * See LZO Compression + * for how to enable it. + * @param type Compression type setting. + * @return this (for chained invocation) + */ + public HColumnDescriptor setCompactionCompressionType( + Compression.Algorithm type) { + return setValue(COMPRESSION_COMPACT, type.getName().toUpperCase()); + } + + /** + * @return True if we are to keep all in use HRegionServer cache. + */ + public boolean isInMemory() { + String value = getValue(HConstants.IN_MEMORY); + if (value != null) + return Boolean.valueOf(value).booleanValue(); + return DEFAULT_IN_MEMORY; + } + + /** + * @param inMemory True if we are to keep all values in the HRegionServer + * cache + * @return this (for chained invocation) + */ + public HColumnDescriptor setInMemory(boolean inMemory) { + return setValue(HConstants.IN_MEMORY, Boolean.toString(inMemory)); + } + + public boolean getKeepDeletedCells() { + String value = getValue(KEEP_DELETED_CELLS); + if (value != null) { + return Boolean.valueOf(value).booleanValue(); + } + return DEFAULT_KEEP_DELETED; + } + + /** + * @param keepDeletedCells True if deleted rows should not be collected + * immediately. + * @return this (for chained invocation) + */ + public HColumnDescriptor setKeepDeletedCells(boolean keepDeletedCells) { + return setValue(KEEP_DELETED_CELLS, Boolean.toString(keepDeletedCells)); + } + + /** + * @return Time-to-live of cell contents, in seconds. + */ + public int getTimeToLive() { + String value = getValue(TTL); + return (value != null)? Integer.valueOf(value).intValue(): DEFAULT_TTL; + } + + /** + * @param timeToLive Time-to-live of cell contents, in seconds. + * @return this (for chained invocation) + */ + public HColumnDescriptor setTimeToLive(int timeToLive) { + return setValue(TTL, Integer.toString(timeToLive)); + } + + /** + * @return The minimum number of versions to keep. + */ + public int getMinVersions() { + String value = getValue(MIN_VERSIONS); + return (value != null)? Integer.valueOf(value).intValue(): 0; + } + + /** + * @param minVersions The minimum number of versions to keep. + * (used when timeToLive is set) + * @return this (for chained invocation) + */ + public HColumnDescriptor setMinVersions(int minVersions) { + return setValue(MIN_VERSIONS, Integer.toString(minVersions)); + } + + /** + * @return True if MapFile blocks should be cached. + */ + public boolean isBlockCacheEnabled() { + String value = getValue(BLOCKCACHE); + if (value != null) + return Boolean.valueOf(value).booleanValue(); + return DEFAULT_BLOCKCACHE; + } + + /** + * @param blockCacheEnabled True if MapFile blocks should be cached. + * @return this (for chained invocation) + */ + public HColumnDescriptor setBlockCacheEnabled(boolean blockCacheEnabled) { + return setValue(BLOCKCACHE, Boolean.toString(blockCacheEnabled)); + } + + /** + * @return bloom filter type used for new StoreFiles in ColumnFamily + */ + public BloomType getBloomFilterType() { + String n = getValue(BLOOMFILTER); + if (n == null) { + n = DEFAULT_BLOOMFILTER; + } + return BloomType.valueOf(n.toUpperCase()); + } + + /** + * @param bt bloom filter type + * @return this (for chained invocation) + */ + public HColumnDescriptor setBloomFilterType(final BloomType bt) { + return setValue(BLOOMFILTER, bt.toString()); + } + + /** + * @return the scope tag + */ + public int getScope() { + String value = getValue(REPLICATION_SCOPE); + if (value != null) { + return Integer.valueOf(value).intValue(); + } + return DEFAULT_REPLICATION_SCOPE; + } + + /** + * @param scope the scope tag + * @return this (for chained invocation) + */ + public HColumnDescriptor setScope(int scope) { + return setValue(REPLICATION_SCOPE, Integer.toString(scope)); + } + + /** + * @return true if we should cache data blocks on write + */ + public boolean shouldCacheDataOnWrite() { + String value = getValue(CACHE_DATA_ON_WRITE); + if (value != null) { + return Boolean.valueOf(value).booleanValue(); + } + return DEFAULT_CACHE_DATA_ON_WRITE; + } + + /** + * @param value true if we should cache data blocks on write + * @return this (for chained invocation) + */ + public HColumnDescriptor setCacheDataOnWrite(boolean value) { + return setValue(CACHE_DATA_ON_WRITE, Boolean.toString(value)); + } + + /** + * @return true if we should cache index blocks on write + */ + public boolean shouldCacheIndexesOnWrite() { + String value = getValue(CACHE_INDEX_ON_WRITE); + if (value != null) { + return Boolean.valueOf(value).booleanValue(); + } + return DEFAULT_CACHE_INDEX_ON_WRITE; + } + + /** + * @param value true if we should cache index blocks on write + * @return this (for chained invocation) + */ + public HColumnDescriptor setCacheIndexesOnWrite(boolean value) { + return setValue(CACHE_INDEX_ON_WRITE, Boolean.toString(value)); + } + + /** + * @return true if we should cache bloomfilter blocks on write + */ + public boolean shouldCacheBloomsOnWrite() { + String value = getValue(CACHE_BLOOMS_ON_WRITE); + if (value != null) { + return Boolean.valueOf(value).booleanValue(); + } + return DEFAULT_CACHE_BLOOMS_ON_WRITE; + } + + /** + * @param value true if we should cache bloomfilter blocks on write + * @return this (for chained invocation) + */ + public HColumnDescriptor setCacheBloomsOnWrite(boolean value) { + return setValue(CACHE_BLOOMS_ON_WRITE, Boolean.toString(value)); + } + + /** + * @return true if we should evict cached blocks from the blockcache on + * close + */ + public boolean shouldEvictBlocksOnClose() { + String value = getValue(EVICT_BLOCKS_ON_CLOSE); + if (value != null) { + return Boolean.valueOf(value).booleanValue(); + } + return DEFAULT_EVICT_BLOCKS_ON_CLOSE; + } + + /** + * @param value true if we should evict cached blocks from the blockcache on + * close + * @return this (for chained invocation) + */ + public HColumnDescriptor setEvictBlocksOnClose(boolean value) { + return setValue(EVICT_BLOCKS_ON_CLOSE, Boolean.toString(value)); + } + + /** + * @see java.lang.Object#toString() + */ + @Override + public String toString() { + StringBuilder s = new StringBuilder(); + s.append('{'); + s.append(HConstants.NAME); + s.append(" => '"); + s.append(Bytes.toString(name)); + s.append("'"); + s.append(getValues(true)); + s.append('}'); + return s.toString(); + } + + /** + * @return Column family descriptor with only the customized attributes. + */ + public String toStringCustomizedValues() { + StringBuilder s = new StringBuilder(); + s.append('{'); + s.append(HConstants.NAME); + s.append(" => '"); + s.append(Bytes.toString(name)); + s.append("'"); + s.append(getValues(false)); + s.append('}'); + return s.toString(); + } + + private StringBuilder getValues(boolean printDefaults) { + StringBuilder s = new StringBuilder(); + + boolean hasConfigKeys = false; + + // print all reserved keys first + for (ImmutableBytesWritable k : values.keySet()) { + if (!RESERVED_KEYWORDS.contains(k)) { + hasConfigKeys = true; + continue; + } + String key = Bytes.toString(k.get()); + String value = Bytes.toString(values.get(k).get()); + if (printDefaults + || !DEFAULT_VALUES.containsKey(key) + || !DEFAULT_VALUES.get(key).equalsIgnoreCase(value)) { + s.append(", "); + s.append(key); + s.append(" => "); + s.append('\'').append(value).append('\''); + } + } + + // print all non-reserved, advanced config keys as a separate subset + if (hasConfigKeys) { + s.append(", "); + s.append(HConstants.CONFIG).append(" => "); + s.append('{'); + boolean printComma = false; + for (ImmutableBytesWritable k : values.keySet()) { + if (RESERVED_KEYWORDS.contains(k)) { + continue; + } + String key = Bytes.toString(k.get()); + String value = Bytes.toString(values.get(k).get()); + if (printComma) { + s.append(", "); + } + printComma = true; + s.append('\'').append(key).append('\''); + s.append(" => "); + s.append('\'').append(value).append('\''); + } + s.append('}'); + } + return s; + } + + public static Map getDefaultValues() { + return Collections.unmodifiableMap(DEFAULT_VALUES); + } + + /** + * @see java.lang.Object#equals(java.lang.Object) + */ + @Override + public boolean equals(Object obj) { + if (this == obj) { + return true; + } + if (obj == null) { + return false; + } + if (!(obj instanceof HColumnDescriptor)) { + return false; + } + return compareTo((HColumnDescriptor)obj) == 0; + } + + /** + * @see java.lang.Object#hashCode() + */ + @Override + public int hashCode() { + int result = Bytes.hashCode(this.name); + result ^= Byte.valueOf(COLUMN_DESCRIPTOR_VERSION).hashCode(); + result ^= values.hashCode(); + return result; + } + + /** + * @deprecated Writables are going away. Use pb {@link #parseFrom(byte[])} instead. + */ + @Deprecated + public void readFields(DataInput in) throws IOException { + int version = in.readByte(); + if (version < 6) { + if (version <= 2) { + Text t = new Text(); + t.readFields(in); + this.name = t.getBytes(); +// if(KeyValue.getFamilyDelimiterIndex(this.name, 0, this.name.length) +// > 0) { +// this.name = stripColon(this.name); +// } + } else { + this.name = Bytes.readByteArray(in); + } + this.values.clear(); + setMaxVersions(in.readInt()); + int ordinal = in.readInt(); + setCompressionType(Compression.Algorithm.values()[ordinal]); + setInMemory(in.readBoolean()); + setBloomFilterType(in.readBoolean() ? BloomType.ROW : BloomType.NONE); + if (getBloomFilterType() != BloomType.NONE && version < 5) { + // If a bloomFilter is enabled and the column descriptor is less than + // version 5, we need to skip over it to read the rest of the column + // descriptor. There are no BloomFilterDescriptors written to disk for + // column descriptors with a version number >= 5 + throw new UnsupportedClassVersionError(this.getClass().getName() + + " does not support backward compatibility with versions older " + + "than version 5"); + } + if (version > 1) { + setBlockCacheEnabled(in.readBoolean()); + } + if (version > 2) { + setTimeToLive(in.readInt()); + } + } else { + // version 6+ + this.name = Bytes.readByteArray(in); + this.values.clear(); + int numValues = in.readInt(); + for (int i = 0; i < numValues; i++) { + ImmutableBytesWritable key = new ImmutableBytesWritable(); + ImmutableBytesWritable value = new ImmutableBytesWritable(); + key.readFields(in); + value.readFields(in); + + // in version 8, the BloomFilter setting changed from bool to enum + if (version < 8 && Bytes.toString(key.get()).equals(BLOOMFILTER)) { + value.set(Bytes.toBytes( + Boolean.getBoolean(Bytes.toString(value.get())) + ? BloomType.ROW.toString() + : BloomType.NONE.toString())); + } + + values.put(key, value); + } + if (version == 6) { + // Convert old values. + setValue(COMPRESSION, Compression.Algorithm.NONE.getName()); + } + String value = getValue(HConstants.VERSIONS); + this.cachedMaxVersions = (value != null)? + Integer.valueOf(value).intValue(): DEFAULT_VERSIONS; + } + } + + /** + * @deprecated Writables are going away. Use {@link #toByteArray()} instead. + */ + @Deprecated + public void write(DataOutput out) throws IOException { + out.writeByte(COLUMN_DESCRIPTOR_VERSION); + Bytes.writeByteArray(out, this.name); + out.writeInt(values.size()); + for (Map.Entry e: + values.entrySet()) { + e.getKey().write(out); + e.getValue().write(out); + } + } + + // Comparable + + public int compareTo(HColumnDescriptor o) { + int result = Bytes.compareTo(this.name, o.getName()); + if (result == 0) { + // punt on comparison for ordering, just calculate difference + result = this.values.hashCode() - o.values.hashCode(); + if (result < 0) + result = -1; + else if (result > 0) + result = 1; + } + return result; + } + + /** + * @return This instance serialized with pb with pb magic prefix + * @see #parseFrom(byte[]) + */ + public byte [] toByteArray() { + return ProtobufUtil.prependPBMagic(convert().toByteArray()); + } + + /** + * @param bytes A pb serialized {@link HColumnDescriptor} instance with pb magic prefix + * @return An instance of {@link HColumnDescriptor} made from bytes + * @throws DeserializationException + * @see #toByteArray() + */ + public static HColumnDescriptor parseFrom(final byte [] bytes) throws DeserializationException { + if (!ProtobufUtil.isPBMagicPrefix(bytes)) throw new DeserializationException("No magic"); + int pblen = ProtobufUtil.lengthOfPBMagic(); + ColumnFamilySchema.Builder builder = ColumnFamilySchema.newBuilder(); + ColumnFamilySchema cfs = null; + try { + cfs = builder.mergeFrom(bytes, pblen, bytes.length - pblen).build(); + } catch (InvalidProtocolBufferException e) { + throw new DeserializationException(e); + } + return convert(cfs); + } + + /** + * @param cfs + * @return An {@link HColumnDescriptor} made from the passed in cfs + */ + public static HColumnDescriptor convert(final ColumnFamilySchema cfs) { + // Use the empty constructor so we preserve the initial values set on construction for things + // like maxVersion. Otherwise, we pick up wrong values on deserialization which makes for + // unrelated-looking test failures that are hard to trace back to here. + HColumnDescriptor hcd = new HColumnDescriptor(); + hcd.name = cfs.getName().toByteArray(); + for (ColumnFamilySchema.Attribute a: cfs.getAttributesList()) { + hcd.setValue(a.getName().toByteArray(), a.getValue().toByteArray()); + } + return hcd; + } + + /** + * @return Convert this instance to a the pb column family type + */ + public ColumnFamilySchema convert() { + ColumnFamilySchema.Builder builder = ColumnFamilySchema.newBuilder(); + builder.setName(ByteString.copyFrom(getName())); + for (Map.Entry e: this.values.entrySet()) { + ColumnFamilySchema.Attribute.Builder aBuilder = ColumnFamilySchema.Attribute.newBuilder(); + aBuilder.setName(ByteString.copyFrom(e.getKey().get())); + aBuilder.setValue(ByteString.copyFrom(e.getValue().get())); + builder.addAttributes(aBuilder.build()); + } + return builder.build(); + } +} diff --git hbase-client/src/main/java/org/apache/hadoop/hbase/HRegionInfo.java hbase-client/src/main/java/org/apache/hadoop/hbase/HRegionInfo.java new file mode 100644 index 0000000..8bdb603 --- /dev/null +++ hbase-client/src/main/java/org/apache/hadoop/hbase/HRegionInfo.java @@ -0,0 +1,1094 @@ +/** + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase; + +import java.io.ByteArrayInputStream; +import java.io.DataInput; +import java.io.DataInputStream; +import java.io.DataOutput; +import java.io.EOFException; +import java.io.IOException; +import java.io.SequenceInputStream; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; +import org.apache.hadoop.hbase.KeyValue.KVComparator; +import org.apache.hadoop.hbase.client.Result; +import org.apache.hadoop.hbase.protobuf.ProtobufUtil; +import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos; +import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo; +import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.hbase.util.JenkinsHash; +import org.apache.hadoop.hbase.util.MD5Hash; +import org.apache.hadoop.hbase.util.Pair; +import org.apache.hadoop.hbase.util.PairOfSameType; +import org.apache.hadoop.io.DataInputBuffer; + +import com.google.protobuf.ByteString; +import com.google.protobuf.InvalidProtocolBufferException; + +/** + * HRegion information. + * Contains HRegion id, start and end keys, a reference to this HRegions' table descriptor, etc. + */ +@InterfaceAudience.Public +@InterfaceStability.Evolving +public class HRegionInfo implements Comparable { + /* + * There are two versions associated with HRegionInfo: HRegionInfo.VERSION and + * HConstants.META_VERSION. HRegionInfo.VERSION indicates the data structure's versioning + * while HConstants.META_VERSION indicates the versioning of the serialized HRIs stored in + * the META table. + * + * Pre-0.92: + * HRI.VERSION == 0 and HConstants.META_VERSION does not exist (is not stored at META table) + * HRegionInfo had an HTableDescriptor reference inside it. + * HRegionInfo is serialized as Writable to META table. + * For 0.92.x and 0.94.x: + * HRI.VERSION == 1 and HConstants.META_VERSION == 0 + * HRI no longer has HTableDescriptor in it. + * HRI is serialized as Writable to META table. + * For 0.96.x: + * HRI.VERSION == 1 and HConstants.META_VERSION == 1 + * HRI data structure is the same as 0.92 and 0.94 + * HRI is serialized as PB to META table. + * + * Versioning of HRegionInfo is deprecated. HRegionInfo does protobuf + * serialization using RegionInfo class, which has it's own versioning. + */ + @Deprecated + public static final byte VERSION = 1; + private static final Log LOG = LogFactory.getLog(HRegionInfo.class); + + /** + * The new format for a region name contains its encodedName at the end. + * The encoded name also serves as the directory name for the region + * in the filesystem. + * + * New region name format: + * <tablename>,,<startkey>,<regionIdTimestamp>.<encodedName>. + * where, + * <encodedName> is a hex version of the MD5 hash of + * <tablename>,<startkey>,<regionIdTimestamp> + * + * The old region name format: + * <tablename>,<startkey>,<regionIdTimestamp> + * For region names in the old format, the encoded name is a 32-bit + * JenkinsHash integer value (in its decimal notation, string form). + *

+ * **NOTE** + * + * ROOT, the first META region, and regions created by an older + * version of HBase (0.20 or prior) will continue to use the + * old region name format. + */ + + /** Separator used to demarcate the encodedName in a region name + * in the new format. See description on new format above. + */ + private static final int ENC_SEPARATOR = '.'; + public static final int MD5_HEX_LENGTH = 32; + + /** + * Does region name contain its encoded name? + * @param regionName region name + * @return boolean indicating if this a new format region + * name which contains its encoded name. + */ + private static boolean hasEncodedName(final byte[] regionName) { + // check if region name ends in ENC_SEPARATOR + if ((regionName.length >= 1) + && (regionName[regionName.length - 1] == ENC_SEPARATOR)) { + // region name is new format. it contains the encoded name. + return true; + } + return false; + } + + /** + * @param regionName + * @return the encodedName + */ + public static String encodeRegionName(final byte [] regionName) { + String encodedName; + if (hasEncodedName(regionName)) { + // region is in new format: + // ,,/encodedName/ + encodedName = Bytes.toString(regionName, + regionName.length - MD5_HEX_LENGTH - 1, + MD5_HEX_LENGTH); + } else { + // old format region name. ROOT and first META region also + // use this format.EncodedName is the JenkinsHash value. + int hashVal = Math.abs(JenkinsHash.getInstance().hash(regionName, + regionName.length, 0)); + encodedName = String.valueOf(hashVal); + } + return encodedName; + } + + /** + * Use logging. + * @param encodedRegionName The encoded regionname. + * @return -ROOT- if passed 70236052 or + * .META. if passed 1028785192 else returns + * encodedRegionName + */ + public static String prettyPrint(final String encodedRegionName) { + if (encodedRegionName.equals("70236052")) { + return encodedRegionName + "/-ROOT-"; + } else if (encodedRegionName.equals("1028785192")) { + return encodedRegionName + "/.META."; + } + return encodedRegionName; + } + + /** HRegionInfo for root region */ + public static final HRegionInfo ROOT_REGIONINFO = + new HRegionInfo(0L, Bytes.toBytes("-ROOT-")); + + /** HRegionInfo for first meta region */ + public static final HRegionInfo FIRST_META_REGIONINFO = + new HRegionInfo(1L, Bytes.toBytes(".META.")); + + private byte [] endKey = HConstants.EMPTY_BYTE_ARRAY; + // This flag is in the parent of a split while the parent is still referenced + // by daughter regions. We USED to set this flag when we disabled a table + // but now table state is kept up in zookeeper as of 0.90.0 HBase. + private boolean offLine = false; + private long regionId = -1; + private transient byte [] regionName = HConstants.EMPTY_BYTE_ARRAY; + private String regionNameStr = ""; + private boolean split = false; + private byte [] startKey = HConstants.EMPTY_BYTE_ARRAY; + private int hashCode = -1; + //TODO: Move NO_HASH to HStoreFile which is really the only place it is used. + public static final String NO_HASH = null; + private volatile String encodedName = NO_HASH; + private byte [] encodedNameAsBytes = null; + + // Current TableName + private byte[] tableName = null; + + private void setHashCode() { + int result = Arrays.hashCode(this.regionName); + result ^= this.regionId; + result ^= Arrays.hashCode(this.startKey); + result ^= Arrays.hashCode(this.endKey); + result ^= Boolean.valueOf(this.offLine).hashCode(); + result ^= Arrays.hashCode(this.tableName); + this.hashCode = result; + } + + + /** + * Private constructor used constructing HRegionInfo for the catalog root and + * first meta regions + */ + private HRegionInfo(long regionId, byte[] tableName) { + super(); + this.regionId = regionId; + this.tableName = tableName.clone(); + // Note: Root & First Meta regions names are still in old format + this.regionName = createRegionName(tableName, null, + regionId, false); + this.regionNameStr = Bytes.toStringBinary(this.regionName); + setHashCode(); + } + + /** Default constructor - creates empty object + * @deprecated Used by Writables and Writables are going away. + */ + @Deprecated + public HRegionInfo() { + super(); + } + + public HRegionInfo(final byte[] tableName) { + this(tableName, null, null); + } + + /** + * Construct HRegionInfo with explicit parameters + * + * @param tableName the table name + * @param startKey first key in region + * @param endKey end of key range + * @throws IllegalArgumentException + */ + public HRegionInfo(final byte[] tableName, final byte[] startKey, final byte[] endKey) + throws IllegalArgumentException { + this(tableName, startKey, endKey, false); + } + + + /** + * Construct HRegionInfo with explicit parameters + * + * @param tableName the table descriptor + * @param startKey first key in region + * @param endKey end of key range + * @param split true if this region has split and we have daughter regions + * regions that may or may not hold references to this region. + * @throws IllegalArgumentException + */ + public HRegionInfo(final byte[] tableName, final byte[] startKey, final byte[] endKey, + final boolean split) + throws IllegalArgumentException { + this(tableName, startKey, endKey, split, System.currentTimeMillis()); + } + + + /** + * Construct HRegionInfo with explicit parameters + * + * @param tableName the table descriptor + * @param startKey first key in region + * @param endKey end of key range + * @param split true if this region has split and we have daughter regions + * regions that may or may not hold references to this region. + * @param regionid Region id to use. + * @throws IllegalArgumentException + */ + public HRegionInfo(final byte[] tableName, final byte[] startKey, + final byte[] endKey, final boolean split, final long regionid) + throws IllegalArgumentException { + + super(); + if (tableName == null) { + throw new IllegalArgumentException("tableName cannot be null"); + } + this.tableName = tableName.clone(); + this.offLine = false; + this.regionId = regionid; + + this.regionName = createRegionName(this.tableName, startKey, regionId, true); + + this.regionNameStr = Bytes.toStringBinary(this.regionName); + this.split = split; + this.endKey = endKey == null? HConstants.EMPTY_END_ROW: endKey.clone(); + this.startKey = startKey == null? + HConstants.EMPTY_START_ROW: startKey.clone(); + this.tableName = tableName.clone(); + setHashCode(); + } + + /** + * Costruct a copy of another HRegionInfo + * + * @param other + */ + public HRegionInfo(HRegionInfo other) { + super(); + this.endKey = other.getEndKey(); + this.offLine = other.isOffline(); + this.regionId = other.getRegionId(); + this.regionName = other.getRegionName(); + this.regionNameStr = Bytes.toStringBinary(this.regionName); + this.split = other.isSplit(); + this.startKey = other.getStartKey(); + this.hashCode = other.hashCode(); + this.encodedName = other.getEncodedName(); + this.tableName = other.tableName; + } + + + /** + * Make a region name of passed parameters. + * @param tableName + * @param startKey Can be null + * @param regionid Region id (Usually timestamp from when region was created). + * @param newFormat should we create the region name in the new format + * (such that it contains its encoded name?). + * @return Region name made of passed tableName, startKey and id + */ + public static byte [] createRegionName(final byte [] tableName, + final byte [] startKey, final long regionid, boolean newFormat) { + return createRegionName(tableName, startKey, Long.toString(regionid), newFormat); + } + + /** + * Make a region name of passed parameters. + * @param tableName + * @param startKey Can be null + * @param id Region id (Usually timestamp from when region was created). + * @param newFormat should we create the region name in the new format + * (such that it contains its encoded name?). + * @return Region name made of passed tableName, startKey and id + */ + public static byte [] createRegionName(final byte [] tableName, + final byte [] startKey, final String id, boolean newFormat) { + return createRegionName(tableName, startKey, Bytes.toBytes(id), newFormat); + } + + /** + * Make a region name of passed parameters. + * @param tableName + * @param startKey Can be null + * @param id Region id (Usually timestamp from when region was created). + * @param newFormat should we create the region name in the new format + * (such that it contains its encoded name?). + * @return Region name made of passed tableName, startKey and id + */ + public static byte [] createRegionName(final byte [] tableName, + final byte [] startKey, final byte [] id, boolean newFormat) { + byte [] b = new byte [tableName.length + 2 + id.length + + (startKey == null? 0: startKey.length) + + (newFormat ? (MD5_HEX_LENGTH + 2) : 0)]; + + int offset = tableName.length; + System.arraycopy(tableName, 0, b, 0, offset); + b[offset++] = HConstants.DELIMITER; + if (startKey != null && startKey.length > 0) { + System.arraycopy(startKey, 0, b, offset, startKey.length); + offset += startKey.length; + } + b[offset++] = HConstants.DELIMITER; + System.arraycopy(id, 0, b, offset, id.length); + offset += id.length; + + if (newFormat) { + // + // Encoded name should be built into the region name. + // + // Use the region name thus far (namely, ,,) + // to compute a MD5 hash to be used as the encoded name, and append + // it to the byte buffer. + // + String md5Hash = MD5Hash.getMD5AsHex(b, 0, offset); + byte [] md5HashBytes = Bytes.toBytes(md5Hash); + + if (md5HashBytes.length != MD5_HEX_LENGTH) { + LOG.error("MD5-hash length mismatch: Expected=" + MD5_HEX_LENGTH + + "; Got=" + md5HashBytes.length); + } + + // now append the bytes '..' to the end + b[offset++] = ENC_SEPARATOR; + System.arraycopy(md5HashBytes, 0, b, offset, MD5_HEX_LENGTH); + offset += MD5_HEX_LENGTH; + b[offset++] = ENC_SEPARATOR; + } + + return b; + } + + /** + * Gets the table name from the specified region name. + * @param regionName + * @return Table name. + */ + public static byte [] getTableName(byte [] regionName) { + int offset = -1; + for (int i = 0; i < regionName.length; i++) { + if (regionName[i] == HConstants.DELIMITER) { + offset = i; + break; + } + } + byte [] tableName = new byte[offset]; + System.arraycopy(regionName, 0, tableName, 0, offset); + return tableName; + } + + /** + * Separate elements of a regionName. + * @param regionName + * @return Array of byte[] containing tableName, startKey and id + * @throws IOException + */ + public static byte [][] parseRegionName(final byte [] regionName) + throws IOException { + int offset = -1; + for (int i = 0; i < regionName.length; i++) { + if (regionName[i] == HConstants.DELIMITER) { + offset = i; + break; + } + } + if(offset == -1) throw new IOException("Invalid regionName format"); + byte [] tableName = new byte[offset]; + System.arraycopy(regionName, 0, tableName, 0, offset); + offset = -1; + for (int i = regionName.length - 1; i > 0; i--) { + if(regionName[i] == HConstants.DELIMITER) { + offset = i; + break; + } + } + if(offset == -1) throw new IOException("Invalid regionName format"); + byte [] startKey = HConstants.EMPTY_BYTE_ARRAY; + if(offset != tableName.length + 1) { + startKey = new byte[offset - tableName.length - 1]; + System.arraycopy(regionName, tableName.length + 1, startKey, 0, + offset - tableName.length - 1); + } + byte [] id = new byte[regionName.length - offset - 1]; + System.arraycopy(regionName, offset + 1, id, 0, + regionName.length - offset - 1); + byte [][] elements = new byte[3][]; + elements[0] = tableName; + elements[1] = startKey; + elements[2] = id; + return elements; + } + + /** @return the regionId */ + public long getRegionId(){ + return regionId; + } + + /** + * @return the regionName as an array of bytes. + * @see #getRegionNameAsString() + */ + public byte [] getRegionName(){ + return regionName; + } + + /** + * @return Region name as a String for use in logging, etc. + */ + public String getRegionNameAsString() { + if (hasEncodedName(this.regionName)) { + // new format region names already have their encoded name. + return this.regionNameStr; + } + + // old format. regionNameStr doesn't have the region name. + // + // + return this.regionNameStr + "." + this.getEncodedName(); + } + + /** @return the encoded region name */ + public synchronized String getEncodedName() { + if (this.encodedName == NO_HASH) { + this.encodedName = encodeRegionName(this.regionName); + } + return this.encodedName; + } + + public synchronized byte [] getEncodedNameAsBytes() { + if (this.encodedNameAsBytes == null) { + this.encodedNameAsBytes = Bytes.toBytes(getEncodedName()); + } + return this.encodedNameAsBytes; + } + + /** @return the startKey */ + public byte [] getStartKey(){ + return startKey; + } + + /** @return the endKey */ + public byte [] getEndKey(){ + return endKey; + } + + /** + * Get current table name of the region + * @return byte array of table name + */ + public byte[] getTableName() { + if (tableName == null || tableName.length == 0) { + tableName = getTableName(getRegionName()); + } + return tableName; + } + + /** + * Get current table name as string + * @return string representation of current table + */ + public String getTableNameAsString() { + return Bytes.toString(tableName); + } + + /** + * Returns true if the given inclusive range of rows is fully contained + * by this region. For example, if the region is foo,a,g and this is + * passed ["b","c"] or ["a","c"] it will return true, but if this is passed + * ["b","z"] it will return false. + * @throws IllegalArgumentException if the range passed is invalid (ie end < start) + */ + public boolean containsRange(byte[] rangeStartKey, byte[] rangeEndKey) { + if (Bytes.compareTo(rangeStartKey, rangeEndKey) > 0) { + throw new IllegalArgumentException( + "Invalid range: " + Bytes.toStringBinary(rangeStartKey) + + " > " + Bytes.toStringBinary(rangeEndKey)); + } + + boolean firstKeyInRange = Bytes.compareTo(rangeStartKey, startKey) >= 0; + boolean lastKeyInRange = + Bytes.compareTo(rangeEndKey, endKey) < 0 || + Bytes.equals(endKey, HConstants.EMPTY_BYTE_ARRAY); + return firstKeyInRange && lastKeyInRange; + } + + /** + * Return true if the given row falls in this region. + */ + public boolean containsRow(byte[] row) { + return Bytes.compareTo(row, startKey) >= 0 && + (Bytes.compareTo(row, endKey) < 0 || + Bytes.equals(endKey, HConstants.EMPTY_BYTE_ARRAY)); + } + + /** @return true if this is the root region */ + public boolean isRootRegion() { + return Bytes.equals(tableName, HRegionInfo.ROOT_REGIONINFO.getTableName()); + } + + /** @return true if this region is from a table that is a meta table, + * either .META. or -ROOT- + */ + public boolean isMetaTable() { + return isRootRegion() || isMetaRegion(); + } + + /** @return true if this region is a meta region */ + public boolean isMetaRegion() { + return Bytes.equals(tableName, HRegionInfo.FIRST_META_REGIONINFO.getTableName()); + } + + /** + * @return True if has been split and has daughters. + */ + public boolean isSplit() { + return this.split; + } + + /** + * @param split set split status + */ + public void setSplit(boolean split) { + this.split = split; + } + + /** + * @return True if this region is offline. + */ + public boolean isOffline() { + return this.offLine; + } + + /** + * The parent of a region split is offline while split daughters hold + * references to the parent. Offlined regions are closed. + * @param offLine Set online/offline status. + */ + public void setOffline(boolean offLine) { + this.offLine = offLine; + } + + + /** + * @return True if this is a split parent region. + */ + public boolean isSplitParent() { + if (!isSplit()) return false; + if (!isOffline()) { + LOG.warn("Region is split but NOT offline: " + getRegionNameAsString()); + } + return true; + } + + /** + * @see java.lang.Object#toString() + */ + @Override + public String toString() { + return "{" + HConstants.NAME + " => '" + + this.regionNameStr + + "', STARTKEY => '" + + Bytes.toStringBinary(this.startKey) + "', ENDKEY => '" + + Bytes.toStringBinary(this.endKey) + + "', ENCODED => " + getEncodedName() + "," + + (isOffline()? " OFFLINE => true,": "") + + (isSplit()? " SPLIT => true,": "") + "}"; + } + + /** + * @see java.lang.Object#equals(java.lang.Object) + */ + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (o == null) { + return false; + } + if (!(o instanceof HRegionInfo)) { + return false; + } + return this.compareTo((HRegionInfo)o) == 0; + } + + /** + * @see java.lang.Object#hashCode() + */ + @Override + public int hashCode() { + return this.hashCode; + } + + /** @return the object version number + * @deprecated HRI is no longer a VersionedWritable */ + @Deprecated + public byte getVersion() { + return VERSION; + } + + /** + * @deprecated Use protobuf serialization instead. See {@link #toByteArray()} and + * {@link #toDelimitedByteArray()} + */ + @Deprecated + public void write(DataOutput out) throws IOException { + out.writeByte(getVersion()); + Bytes.writeByteArray(out, endKey); + out.writeBoolean(offLine); + out.writeLong(regionId); + Bytes.writeByteArray(out, regionName); + out.writeBoolean(split); + Bytes.writeByteArray(out, startKey); + Bytes.writeByteArray(out, tableName); + out.writeInt(hashCode); + } + + /** + * @deprecated Use protobuf deserialization instead. + * @see #parseFrom(byte[]) + */ + @Deprecated + public void readFields(DataInput in) throws IOException { + // Read the single version byte. We don't ask the super class do it + // because freaks out if its not the current classes' version. This method + // can deserialize version 0 and version 1 of HRI. + byte version = in.readByte(); + if (version == 0) { + // This is the old HRI that carried an HTD. Migrate it. The below + // was copied from the old 0.90 HRI readFields. + this.endKey = Bytes.readByteArray(in); + this.offLine = in.readBoolean(); + this.regionId = in.readLong(); + this.regionName = Bytes.readByteArray(in); + this.regionNameStr = Bytes.toStringBinary(this.regionName); + this.split = in.readBoolean(); + this.startKey = Bytes.readByteArray(in); + try { + HTableDescriptor htd = new HTableDescriptor(); + htd.readFields(in); + this.tableName = htd.getName(); + } catch(EOFException eofe) { + throw new IOException("HTD not found in input buffer", eofe); + } + this.hashCode = in.readInt(); + } else if (getVersion() == version) { + this.endKey = Bytes.readByteArray(in); + this.offLine = in.readBoolean(); + this.regionId = in.readLong(); + this.regionName = Bytes.readByteArray(in); + this.regionNameStr = Bytes.toStringBinary(this.regionName); + this.split = in.readBoolean(); + this.startKey = Bytes.readByteArray(in); + this.tableName = Bytes.readByteArray(in); + this.hashCode = in.readInt(); + } else { + throw new IOException("Non-migratable/unknown version=" + getVersion()); + } + } + + @Deprecated + private void readFields(byte[] bytes) throws IOException { + if (bytes == null || bytes.length <= 0) { + throw new IllegalArgumentException("Can't build a writable with empty " + + "bytes array"); + } + DataInputBuffer in = new DataInputBuffer(); + try { + in.reset(bytes, 0, bytes.length); + this.readFields(in); + } finally { + in.close(); + } + } + + // + // Comparable + // + + public int compareTo(HRegionInfo o) { + if (o == null) { + return 1; + } + + // Are regions of same table? + int result = Bytes.compareTo(this.tableName, o.tableName); + if (result != 0) { + return result; + } + + // Compare start keys. + result = Bytes.compareTo(this.startKey, o.startKey); + if (result != 0) { + return result; + } + + // Compare end keys. + result = Bytes.compareTo(this.endKey, o.endKey); + + if (result != 0) { + if (this.getStartKey().length != 0 + && this.getEndKey().length == 0) { + return 1; // this is last region + } + if (o.getStartKey().length != 0 + && o.getEndKey().length == 0) { + return -1; // o is the last region + } + return result; + } + + // regionId is usually milli timestamp -- this defines older stamps + // to be "smaller" than newer stamps in sort order. + if (this.regionId > o.regionId) { + return 1; + } else if (this.regionId < o.regionId) { + return -1; + } + + if (this.offLine == o.offLine) + return 0; + if (this.offLine == true) return -1; + + return 1; + } + + /** + * @return Comparator to use comparing {@link KeyValue}s. + */ + public KVComparator getComparator() { + return isRootRegion()? KeyValue.ROOT_COMPARATOR: isMetaRegion()? + KeyValue.META_COMPARATOR: KeyValue.COMPARATOR; + } + + /** + * Convert a HRegionInfo to a RegionInfo + * + * @return the converted RegionInfo + */ + RegionInfo convert() { + return convert(this); + } + + /** + * Convert a HRegionInfo to a RegionInfo + * + * @param info the HRegionInfo to convert + * @return the converted RegionInfo + */ + public static RegionInfo convert(final HRegionInfo info) { + if (info == null) return null; + RegionInfo.Builder builder = RegionInfo.newBuilder(); + builder.setTableName(ByteString.copyFrom(info.getTableName())); + builder.setRegionId(info.getRegionId()); + if (info.getStartKey() != null) { + builder.setStartKey(ByteString.copyFrom(info.getStartKey())); + } + if (info.getEndKey() != null) { + builder.setEndKey(ByteString.copyFrom(info.getEndKey())); + } + builder.setOffline(info.isOffline()); + builder.setSplit(info.isSplit()); + return builder.build(); + } + + /** + * Convert a RegionInfo to a HRegionInfo + * + * @param proto the RegionInfo to convert + * @return the converted HRegionInfo + */ + public static HRegionInfo convert(final RegionInfo proto) { + if (proto == null) return null; + byte [] tableName = proto.getTableName().toByteArray(); + if (Bytes.equals(tableName, HConstants.ROOT_TABLE_NAME)) { + return ROOT_REGIONINFO; + } else if (Bytes.equals(tableName, HConstants.META_TABLE_NAME)) { + return FIRST_META_REGIONINFO; + } + long regionId = proto.getRegionId(); + byte[] startKey = null; + byte[] endKey = null; + if (proto.hasStartKey()) { + startKey = proto.getStartKey().toByteArray(); + } + if (proto.hasEndKey()) { + endKey = proto.getEndKey().toByteArray(); + } + boolean split = false; + if (proto.hasSplit()) { + split = proto.getSplit(); + } + HRegionInfo hri = new HRegionInfo(tableName, startKey, endKey, split, regionId); + if (proto.hasOffline()) { + hri.setOffline(proto.getOffline()); + } + return hri; + } + + /** + * @return This instance serialized as protobuf w/ a magic pb prefix. + * @see #parseFrom(byte[]) + */ + public byte [] toByteArray() { + byte [] bytes = convert().toByteArray(); + return ProtobufUtil.prependPBMagic(bytes); + } + + /** + * @param bytes + * @return A deserialized {@link HRegionInfo} or null if we failed deserialize or passed bytes null + * @see #toByteArray() + */ + public static HRegionInfo parseFromOrNull(final byte [] bytes) { + if (bytes == null || bytes.length <= 0) return null; + try { + return parseFrom(bytes); + } catch (DeserializationException e) { + return null; + } + } + + /** + * @param bytes A pb RegionInfo serialized with a pb magic prefix. + * @return A deserialized {@link HRegionInfo} + * @throws DeserializationException + * @see #toByteArray() + */ + public static HRegionInfo parseFrom(final byte [] bytes) throws DeserializationException { + if (ProtobufUtil.isPBMagicPrefix(bytes)) { + int pblen = ProtobufUtil.lengthOfPBMagic(); + try { + HBaseProtos.RegionInfo ri = + HBaseProtos.RegionInfo.newBuilder().mergeFrom(bytes, pblen, bytes.length - pblen).build(); + return convert(ri); + } catch (InvalidProtocolBufferException e) { + throw new DeserializationException(e); + } + } else { + try { + HRegionInfo hri = new HRegionInfo(); + hri.readFields(bytes); + return hri; + } catch (IOException e) { + throw new DeserializationException(e); + } + } + } + + /** + * Use this instead of {@link #toByteArray()} when writing to a stream and you want to use + * the pb mergeDelimitedFrom (w/o the delimiter, pb reads to EOF which may not be what you want). + * @return This instance serialized as a delimited protobuf w/ a magic pb prefix. + * @throws IOException + * @see #toByteArray() + */ + public byte [] toDelimitedByteArray() throws IOException { + return ProtobufUtil.toDelimitedByteArray(convert()); + } + + /** + * Extract a HRegionInfo and ServerName from catalog table {@link Result}. + * @param r Result to pull from + * @return A pair of the {@link HRegionInfo} and the {@link ServerName} + * (or null for server address if no address set in .META.). + * @throws IOException + */ + public static Pair getHRegionInfoAndServerName(final Result r) { + HRegionInfo info = + getHRegionInfo(r, HConstants.REGIONINFO_QUALIFIER); + ServerName sn = getServerName(r); + return new Pair(info, sn); + } + + /** + * Returns HRegionInfo object from the column + * HConstants.CATALOG_FAMILY:HConstants.REGIONINFO_QUALIFIER of the catalog + * table Result. + * @param data a Result object from the catalog table scan + * @return HRegionInfo or null + */ + public static HRegionInfo getHRegionInfo(Result data) { + byte [] bytes = + data.getValue(HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER); + if (bytes == null) return null; + HRegionInfo info = parseFromOrNull(bytes); + if (LOG.isDebugEnabled()) { + LOG.debug("Current INFO from scan results = " + info); + } + return info; + } + + /** + * Returns the daughter regions by reading the corresponding columns of the catalog table + * Result. + * @param data a Result object from the catalog table scan + * @return a pair of HRegionInfo or PairOfSameType(null, null) if the region is not a split + * parent + */ + public static PairOfSameType getDaughterRegions(Result data) throws IOException { + HRegionInfo splitA = getHRegionInfo(data, HConstants.SPLITA_QUALIFIER); + HRegionInfo splitB = getHRegionInfo(data, HConstants.SPLITB_QUALIFIER); + + return new PairOfSameType(splitA, splitB); + } + + /** + * Returns the HRegionInfo object from the column {@link HConstants#CATALOG_FAMILY} and + * qualifier of the catalog table result. + * @param r a Result object from the catalog table scan + * @param qualifier Column family qualifier -- either + * {@link HConstants#SPLITA_QUALIFIER}, {@link HConstants#SPLITB_QUALIFIER} or + * {@link HConstants#REGIONINFO_QUALIFIER}. + * @return An HRegionInfo instance or null. + * @throws IOException + */ + public static HRegionInfo getHRegionInfo(final Result r, byte [] qualifier) { + byte [] bytes = r.getValue(HConstants.CATALOG_FAMILY, qualifier); + if (bytes == null || bytes.length <= 0) return null; + return parseFromOrNull(bytes); + } + + /** + * Returns a {@link ServerName} from catalog table {@link Result}. + * @param r Result to pull from + * @return A ServerName instance or null if necessary fields not found or empty. + */ + public static ServerName getServerName(final Result r) { + byte[] value = r.getValue(HConstants.CATALOG_FAMILY, + HConstants.SERVER_QUALIFIER); + if (value == null || value.length == 0) return null; + String hostAndPort = Bytes.toString(value); + value = r.getValue(HConstants.CATALOG_FAMILY, + HConstants.STARTCODE_QUALIFIER); + if (value == null || value.length == 0) return null; + return new ServerName(hostAndPort, Bytes.toLong(value)); + } + + /** + * Parses an HRegionInfo instance from the passed in stream. Presumes the HRegionInfo was + * serialized to the stream with {@link #toDelimitedByteArray()} + * @param in + * @return An instance of HRegionInfo. + * @throws IOException + */ + public static HRegionInfo parseFrom(final DataInputStream in) throws IOException { + // I need to be able to move back in the stream if this is not a pb serialization so I can + // do the Writable decoding instead. + int pblen = ProtobufUtil.lengthOfPBMagic(); + byte [] pbuf = new byte[pblen]; + if (in.markSupported()) { //read it with mark() + in.mark(pblen); + } + int read = in.read(pbuf); //assumption: if Writable serialization, it should be longer than pblen. + if (read != pblen) throw new IOException("read=" + read + ", wanted=" + pblen); + if (ProtobufUtil.isPBMagicPrefix(pbuf)) { + return convert(HBaseProtos.RegionInfo.parseDelimitedFrom(in)); + } else { + // Presume Writables. Need to reset the stream since it didn't start w/ pb. + if (in.markSupported()) { + in.reset(); + HRegionInfo hri = new HRegionInfo(); + hri.readFields(in); + return hri; + } else { + //we cannot use BufferedInputStream, it consumes more than we read from the underlying IS + ByteArrayInputStream bais = new ByteArrayInputStream(pbuf); + SequenceInputStream sis = new SequenceInputStream(bais, in); //concatenate input streams + HRegionInfo hri = new HRegionInfo(); + hri.readFields(new DataInputStream(sis)); + return hri; + } + } + } + + /** + * Serializes given HRegionInfo's as a byte array. Use this instead of {@link #toByteArray()} when + * writing to a stream and you want to use the pb mergeDelimitedFrom (w/o the delimiter, pb reads + * to EOF which may not be what you want). {@link #parseDelimitedFrom(byte[], int, int)} can + * be used to read back the instances. + * @param infos HRegionInfo objects to serialize + * @return This instance serialized as a delimited protobuf w/ a magic pb prefix. + * @throws IOException + * @see #toByteArray() + */ + public static byte[] toDelimitedByteArray(HRegionInfo... infos) throws IOException { + byte[][] bytes = new byte[infos.length][]; + int size = 0; + for (int i = 0; i < infos.length; i++) { + bytes[i] = infos[i].toDelimitedByteArray(); + size += bytes[i].length; + } + + byte[] result = new byte[size]; + int offset = 0; + for (byte[] b : bytes) { + System.arraycopy(b, 0, result, offset, b.length); + offset += b.length; + } + return result; + } + + /** + * Parses all the HRegionInfo instances from the passed in stream until EOF. Presumes the + * HRegionInfo's were serialized to the stream with {@link #toDelimitedByteArray()} + * @param bytes serialized bytes + * @param offset the start offset into the byte[] buffer + * @param length how far we should read into the byte[] buffer + * @return All the hregioninfos that are in the byte array. Keeps reading till we hit the end. + */ + public static List parseDelimitedFrom(final byte[] bytes, final int offset, + final int length) throws IOException { + if (bytes == null) { + throw new IllegalArgumentException("Can't build an object with empty bytes array"); + } + DataInputBuffer in = new DataInputBuffer(); + List hris = new ArrayList(); + try { + in.reset(bytes, offset, length); + while (in.available() > 0) { + HRegionInfo hri = parseFrom(in); + hris.add(hri); + } + } finally { + in.close(); + } + return hris; + } + +} diff --git hbase-client/src/main/java/org/apache/hadoop/hbase/HRegionLocation.java hbase-client/src/main/java/org/apache/hadoop/hbase/HRegionLocation.java new file mode 100644 index 0000000..7c8e9aa --- /dev/null +++ hbase-client/src/main/java/org/apache/hadoop/hbase/HRegionLocation.java @@ -0,0 +1,128 @@ +/** + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; +import org.apache.hadoop.hbase.util.Addressing; + +/** + * Data structure to hold HRegionInfo and the address for the hosting + * HRegionServer. Immutable. Comparable, but we compare the 'location' only: + * i.e. the hostname and port, and *not* the regioninfo. This means two + * instances are the same if they refer to the same 'location' (the same + * hostname and port), though they may be carrying different regions. + */ +@InterfaceAudience.Public +@InterfaceStability.Evolving +public class HRegionLocation implements Comparable { + private final HRegionInfo regionInfo; + private final String hostname; + private final int port; + // Cache of the 'toString' result. + private String cachedString = null; + // Cache of the hostname + port + private String cachedHostnamePort; + + /** + * Constructor + * @param regionInfo the HRegionInfo for the region + * @param hostname Hostname + * @param port port + */ + public HRegionLocation(HRegionInfo regionInfo, final String hostname, + final int port) { + this.regionInfo = regionInfo; + this.hostname = hostname; + this.port = port; + } + + /** + * @see java.lang.Object#toString() + */ + @Override + public synchronized String toString() { + if (this.cachedString == null) { + this.cachedString = "region=" + this.regionInfo.getRegionNameAsString() + + ", hostname=" + this.hostname + ", port=" + this.port; + } + return this.cachedString; + } + + /** + * @see java.lang.Object#equals(java.lang.Object) + */ + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (o == null) { + return false; + } + if (!(o instanceof HRegionLocation)) { + return false; + } + return this.compareTo((HRegionLocation)o) == 0; + } + + /** + * @see java.lang.Object#hashCode() + */ + @Override + public int hashCode() { + int result = this.hostname.hashCode(); + result ^= this.port; + return result; + } + + /** @return HRegionInfo */ + public HRegionInfo getRegionInfo(){ + return regionInfo; + } + + public String getHostname() { + return this.hostname; + } + + public int getPort() { + return this.port; + } + + /** + * @return String made of hostname and port formatted as per {@link Addressing#createHostAndPortStr(String, int)} + */ + public synchronized String getHostnamePort() { + if (this.cachedHostnamePort == null) { + this.cachedHostnamePort = + Addressing.createHostAndPortStr(this.hostname, this.port); + } + return this.cachedHostnamePort; + } + + // + // Comparable + // + + public int compareTo(HRegionLocation o) { + int result = this.hostname.compareTo(o.getHostname()); + if (result != 0) return result; + return this.port - o.getPort(); + } +} diff --git hbase-client/src/main/java/org/apache/hadoop/hbase/HTableDescriptor.java hbase-client/src/main/java/org/apache/hadoop/hbase/HTableDescriptor.java new file mode 100644 index 0000000..b697e26 --- /dev/null +++ hbase-client/src/main/java/org/apache/hadoop/hbase/HTableDescriptor.java @@ -0,0 +1,1301 @@ +/** + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase; + +import java.io.DataInput; +import java.io.DataOutput; +import java.io.IOException; +import java.util.Collection; +import java.util.Collections; +import java.util.HashMap; +import java.util.HashSet; +import java.util.Iterator; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.TreeMap; +import java.util.TreeSet; +import java.util.regex.Matcher; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hbase.io.ImmutableBytesWritable; +import org.apache.hadoop.hbase.protobuf.ProtobufUtil; +import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema; +import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema; +import org.apache.hadoop.hbase.security.User; +import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.hbase.util.Writables; +import org.apache.hadoop.io.WritableComparable; + +import com.google.protobuf.ByteString; +import com.google.protobuf.InvalidProtocolBufferException; + +/** + * HTableDescriptor contains the details about an HBase table such as the descriptors of + * all the column families, is the table a catalog table, -ROOT- or + * .META. , is the table is read only, the maximum size of the memstore, + * when the region split should occur, coprocessors associated with it etc... + */ +@InterfaceAudience.Public +@InterfaceStability.Evolving +public class HTableDescriptor implements WritableComparable { + + /** + * Changes prior to version 3 were not recorded here. + * Version 3 adds metadata as a map where keys and values are byte[]. + * Version 4 adds indexes + * Version 5 removed transactional pollution -- e.g. indexes + */ + private static final byte TABLE_DESCRIPTOR_VERSION = 5; + + private byte [] name = HConstants.EMPTY_BYTE_ARRAY; + + private String nameAsString = ""; + + /** + * A map which holds the metadata information of the table. This metadata + * includes values like IS_ROOT, IS_META, DEFERRED_LOG_FLUSH, SPLIT_POLICY, + * MAX_FILE_SIZE, READONLY, MEMSTORE_FLUSHSIZE etc... + */ + protected final Map values = + new HashMap(); + + public static final String SPLIT_POLICY = "SPLIT_POLICY"; + + /** + * INTERNAL Used by HBase Shell interface to access this metadata + * attribute which denotes the maximum size of the store file after which + * a region split occurs + * + * @see #getMaxFileSize() + */ + public static final String MAX_FILESIZE = "MAX_FILESIZE"; + private static final ImmutableBytesWritable MAX_FILESIZE_KEY = + new ImmutableBytesWritable(Bytes.toBytes(MAX_FILESIZE)); + + public static final String OWNER = "OWNER"; + public static final ImmutableBytesWritable OWNER_KEY = + new ImmutableBytesWritable(Bytes.toBytes(OWNER)); + + /** + * INTERNAL Used by rest interface to access this metadata + * attribute which denotes if the table is Read Only + * + * @see #isReadOnly() + */ + public static final String READONLY = "READONLY"; + private static final ImmutableBytesWritable READONLY_KEY = + new ImmutableBytesWritable(Bytes.toBytes(READONLY)); + + /** + * INTERNAL Used by HBase Shell interface to access this metadata + * attribute which represents the maximum size of the memstore after which + * its contents are flushed onto the disk + * + * @see #getMemStoreFlushSize() + */ + public static final String MEMSTORE_FLUSHSIZE = "MEMSTORE_FLUSHSIZE"; + private static final ImmutableBytesWritable MEMSTORE_FLUSHSIZE_KEY = + new ImmutableBytesWritable(Bytes.toBytes(MEMSTORE_FLUSHSIZE)); + + /** + * INTERNAL Used by rest interface to access this metadata + * attribute which denotes if the table is a -ROOT- region or not + * + * @see #isRootRegion() + */ + public static final String IS_ROOT = "IS_ROOT"; + private static final ImmutableBytesWritable IS_ROOT_KEY = + new ImmutableBytesWritable(Bytes.toBytes(IS_ROOT)); + + /** + * INTERNAL Used by rest interface to access this metadata + * attribute which denotes if it is a catalog table, either + * .META. or -ROOT- + * + * @see #isMetaRegion() + */ + public static final String IS_META = "IS_META"; + private static final ImmutableBytesWritable IS_META_KEY = + new ImmutableBytesWritable(Bytes.toBytes(IS_META)); + + /** + * INTERNAL Used by HBase Shell interface to access this metadata + * attribute which denotes if the deferred log flush option is enabled + */ + public static final String DEFERRED_LOG_FLUSH = "DEFERRED_LOG_FLUSH"; + private static final ImmutableBytesWritable DEFERRED_LOG_FLUSH_KEY = + new ImmutableBytesWritable(Bytes.toBytes(DEFERRED_LOG_FLUSH)); + + /* + * The below are ugly but better than creating them each time till we + * replace booleans being saved as Strings with plain booleans. Need a + * migration script to do this. TODO. + */ + private static final ImmutableBytesWritable FALSE = + new ImmutableBytesWritable(Bytes.toBytes(Boolean.FALSE.toString())); + + private static final ImmutableBytesWritable TRUE = + new ImmutableBytesWritable(Bytes.toBytes(Boolean.TRUE.toString())); + + private static final boolean DEFAULT_DEFERRED_LOG_FLUSH = false; + + /** + * Constant that denotes whether the table is READONLY by default and is false + */ + public static final boolean DEFAULT_READONLY = false; + + /** + * Constant that denotes the maximum default size of the memstore after which + * the contents are flushed to the store files + */ + public static final long DEFAULT_MEMSTORE_FLUSH_SIZE = 1024*1024*128L; + + private final static Map DEFAULT_VALUES + = new HashMap(); + private final static Set RESERVED_KEYWORDS + = new HashSet(); + static { + DEFAULT_VALUES.put(MAX_FILESIZE, + String.valueOf(HConstants.DEFAULT_MAX_FILE_SIZE)); + DEFAULT_VALUES.put(READONLY, String.valueOf(DEFAULT_READONLY)); + DEFAULT_VALUES.put(MEMSTORE_FLUSHSIZE, + String.valueOf(DEFAULT_MEMSTORE_FLUSH_SIZE)); + DEFAULT_VALUES.put(DEFERRED_LOG_FLUSH, + String.valueOf(DEFAULT_DEFERRED_LOG_FLUSH)); + for (String s : DEFAULT_VALUES.keySet()) { + RESERVED_KEYWORDS.add(new ImmutableBytesWritable(Bytes.toBytes(s))); + } + RESERVED_KEYWORDS.add(IS_ROOT_KEY); + RESERVED_KEYWORDS.add(IS_META_KEY); + } + + /** + * Cache of whether this is a meta table or not. + */ + private volatile Boolean meta = null; + /** + * Cache of whether this is root table or not. + */ + private volatile Boolean root = null; + /** + * Cache of whether deferred logging set. + */ + private Boolean deferredLog = null; + + /** + * Maps column family name to the respective HColumnDescriptors + */ + private final Map families = + new TreeMap(Bytes.BYTES_RAWCOMPARATOR); + + /** + * INTERNAL Private constructor used internally creating table descriptors for + * catalog tables, .META. and -ROOT-. + */ + protected HTableDescriptor(final byte [] name, HColumnDescriptor[] families) { + this.name = name.clone(); + this.nameAsString = Bytes.toString(this.name); + setMetaFlags(name); + for(HColumnDescriptor descriptor : families) { + this.families.put(descriptor.getName(), descriptor); + } + } + + /** + * INTERNAL Private constructor used internally creating table descriptors for + * catalog tables, .META. and -ROOT-. + */ + protected HTableDescriptor(final byte [] name, HColumnDescriptor[] families, + Map values) { + this.name = name.clone(); + this.nameAsString = Bytes.toString(this.name); + setMetaFlags(name); + for(HColumnDescriptor descriptor : families) { + this.families.put(descriptor.getName(), descriptor); + } + for (Map.Entry entry: + values.entrySet()) { + this.values.put(entry.getKey(), entry.getValue()); + } + } + + /** + * Default constructor which constructs an empty object. + * For deserializing an HTableDescriptor instance only. + * @see #HTableDescriptor(byte[]) + * @deprecated Used by Writables and Writables are going away. + */ + @Deprecated + public HTableDescriptor() { + super(); + } + + /** + * Construct a table descriptor specifying table name. + * @param name Table name. + * @throws IllegalArgumentException if passed a table name + * that is made of other than 'word' characters, underscore or period: i.e. + * [a-zA-Z_0-9.]. + * @see HADOOP-1581 HBASE: Un-openable tablename bug + */ + public HTableDescriptor(final String name) { + this(Bytes.toBytes(name)); + } + + /** + * Construct a table descriptor specifying a byte array table name + * @param name - Table name as a byte array. + * @throws IllegalArgumentException if passed a table name + * that is made of other than 'word' characters, underscore or period: i.e. + * [a-zA-Z_0-9-.]. + * @see HADOOP-1581 HBASE: Un-openable tablename bug + */ + public HTableDescriptor(final byte [] name) { + super(); + setMetaFlags(this.name); + this.name = this.isMetaRegion()? name: isLegalTableName(name); + this.nameAsString = Bytes.toString(this.name); + } + + /** + * Construct a table descriptor by cloning the descriptor passed as a parameter. + *

+ * Makes a deep copy of the supplied descriptor. + * Can make a modifiable descriptor from an UnmodifyableHTableDescriptor. + * @param desc The descriptor. + */ + public HTableDescriptor(final HTableDescriptor desc) { + super(); + this.name = desc.name.clone(); + this.nameAsString = Bytes.toString(this.name); + setMetaFlags(this.name); + for (HColumnDescriptor c: desc.families.values()) { + this.families.put(c.getName(), new HColumnDescriptor(c)); + } + for (Map.Entry e: + desc.values.entrySet()) { + this.values.put(e.getKey(), e.getValue()); + } + } + + /* + * Set meta flags on this table. + * IS_ROOT_KEY is set if its a -ROOT- table + * IS_META_KEY is set either if its a -ROOT- or a .META. table + * Called by constructors. + * @param name + */ + private void setMetaFlags(final byte [] name) { + setRootRegion(Bytes.equals(name, HConstants.ROOT_TABLE_NAME)); + setMetaRegion(isRootRegion() || + Bytes.equals(name, HConstants.META_TABLE_NAME)); + } + + /** + * Check if the descriptor represents a -ROOT- region. + * + * @return true if this is a -ROOT- region + */ + public boolean isRootRegion() { + if (this.root == null) { + this.root = isSomething(IS_ROOT_KEY, false)? Boolean.TRUE: Boolean.FALSE; + } + return this.root.booleanValue(); + } + + /** + * INTERNAL Used to denote if the current table represents + * -ROOT- region. This is used internally by the + * HTableDescriptor constructors + * + * @param isRoot true if this is the -ROOT- region + */ + protected void setRootRegion(boolean isRoot) { + // TODO: Make the value a boolean rather than String of boolean. + values.put(IS_ROOT_KEY, isRoot? TRUE: FALSE); + } + + /** + * Checks if this table is either -ROOT- or .META. + * region. + * + * @return true if this is either a -ROOT- or .META. + * region + */ + public boolean isMetaRegion() { + if (this.meta == null) { + this.meta = calculateIsMetaRegion(); + } + return this.meta.booleanValue(); + } + + private synchronized Boolean calculateIsMetaRegion() { + byte [] value = getValue(IS_META_KEY); + return (value != null)? Boolean.valueOf(Bytes.toString(value)): Boolean.FALSE; + } + + private boolean isSomething(final ImmutableBytesWritable key, + final boolean valueIfNull) { + byte [] value = getValue(key); + if (value != null) { + // TODO: Make value be a boolean rather than String of boolean. + return Boolean.valueOf(Bytes.toString(value)).booleanValue(); + } + return valueIfNull; + } + + /** + * INTERNAL Used to denote if the current table represents + * -ROOT- or .META. region. This is used + * internally by the HTableDescriptor constructors + * + * @param isMeta true if its either -ROOT- or + * .META. region + */ + protected void setMetaRegion(boolean isMeta) { + values.put(IS_META_KEY, isMeta? TRUE: FALSE); + } + + /** + * Checks if the table is a .META. table + * + * @return true if table is .META. region. + */ + public boolean isMetaTable() { + return isMetaRegion() && !isRootRegion(); + } + + /** + * Checks of the tableName being passed represents either + * -ROOT- or .META. + * + * @return true if a tablesName is either -ROOT- + * or .META. + */ + public static boolean isMetaTable(final byte [] tableName) { + return Bytes.equals(tableName, HConstants.ROOT_TABLE_NAME) || + Bytes.equals(tableName, HConstants.META_TABLE_NAME); + } + + /** + * Check passed byte buffer, "tableName", is legal user-space table name. + * @return Returns passed tableName param + * @throws NullPointerException If passed tableName is null + * @throws IllegalArgumentException if passed a tableName + * that is made of other than 'word' characters or underscores: i.e. + * [a-zA-Z_0-9]. + */ + public static byte [] isLegalTableName(final byte [] tableName) { + if (tableName == null || tableName.length <= 0) { + throw new IllegalArgumentException("Name is null or empty"); + } + if (tableName[0] == '.' || tableName[0] == '-') { + throw new IllegalArgumentException("Illegal first character <" + tableName[0] + + "> at 0. User-space table names can only start with 'word " + + "characters': i.e. [a-zA-Z_0-9]: " + Bytes.toString(tableName)); + } + if (HConstants.CLUSTER_ID_FILE_NAME.equalsIgnoreCase(Bytes + .toString(tableName)) + || HConstants.SPLIT_LOGDIR_NAME.equalsIgnoreCase(Bytes + .toString(tableName)) + || HConstants.VERSION_FILE_NAME.equalsIgnoreCase(Bytes + .toString(tableName))) { + throw new IllegalArgumentException(Bytes.toString(tableName) + + " conflicted with system reserved words"); + } + for (int i = 0; i < tableName.length; i++) { + if (Character.isLetterOrDigit(tableName[i]) || tableName[i] == '_' || + tableName[i] == '-' || tableName[i] == '.') { + continue; + } + throw new IllegalArgumentException("Illegal character <" + tableName[i] + + "> at " + i + ". User-space table names can only contain " + + "'word characters': i.e. [a-zA-Z_0-9-.]: " + Bytes.toString(tableName)); + } + return tableName; + } + + /** + * Getter for accessing the metadata associated with the key + * + * @param key The key. + * @return The value. + * @see #values + */ + public byte[] getValue(byte[] key) { + return getValue(new ImmutableBytesWritable(key)); + } + + private byte[] getValue(final ImmutableBytesWritable key) { + ImmutableBytesWritable ibw = values.get(key); + if (ibw == null) + return null; + return ibw.get(); + } + + /** + * Getter for accessing the metadata associated with the key + * + * @param key The key. + * @return The value. + * @see #values + */ + public String getValue(String key) { + byte[] value = getValue(Bytes.toBytes(key)); + if (value == null) + return null; + return Bytes.toString(value); + } + + /** + * Getter for fetching an unmodifiable {@link #values} map. + * + * @return unmodifiable map {@link #values}. + * @see #values + */ + public Map getValues() { + // shallow pointer copy + return Collections.unmodifiableMap(values); + } + + /** + * Setter for storing metadata as a (key, value) pair in {@link #values} map + * + * @param key The key. + * @param value The value. + * @see #values + */ + public void setValue(byte[] key, byte[] value) { + setValue(new ImmutableBytesWritable(key), value); + } + + /* + * @param key The key. + * @param value The value. + */ + private void setValue(final ImmutableBytesWritable key, + final byte[] value) { + values.put(key, new ImmutableBytesWritable(value)); + } + + /* + * @param key The key. + * @param value The value. + */ + private void setValue(final ImmutableBytesWritable key, + final ImmutableBytesWritable value) { + values.put(key, value); + } + + /** + * Setter for storing metadata as a (key, value) pair in {@link #values} map + * + * @param key The key. + * @param value The value. + * @see #values + */ + public void setValue(String key, String value) { + if (value == null) { + remove(Bytes.toBytes(key)); + } else { + setValue(Bytes.toBytes(key), Bytes.toBytes(value)); + } + } + + /** + * Remove metadata represented by the key from the {@link #values} map + * + * @param key Key whose key and value we're to remove from HTableDescriptor + * parameters. + */ + public void remove(final byte [] key) { + values.remove(new ImmutableBytesWritable(key)); + } + + /** + * Remove metadata represented by the key from the {@link #values} map + * + * @param key Key whose key and value we're to remove from HTableDescriptor + * parameters. + */ + public void remove(final String key) { + remove(Bytes.toBytes(key)); + } + + /** + * Check if the readOnly flag of the table is set. If the readOnly flag is + * set then the contents of the table can only be read from but not modified. + * + * @return true if all columns in the table should be read only + */ + public boolean isReadOnly() { + return isSomething(READONLY_KEY, DEFAULT_READONLY); + } + + /** + * Setting the table as read only sets all the columns in the table as read + * only. By default all tables are modifiable, but if the readOnly flag is + * set to true then the contents of the table can only be read but not modified. + * + * @param readOnly True if all of the columns in the table should be read + * only. + */ + public void setReadOnly(final boolean readOnly) { + setValue(READONLY_KEY, readOnly? TRUE: FALSE); + } + + /** + * Check if deferred log edits are enabled on the table. + * + * @return true if that deferred log flush is enabled on the table + * + * @see #setDeferredLogFlush(boolean) + */ + public synchronized boolean isDeferredLogFlush() { + if(this.deferredLog == null) { + this.deferredLog = + isSomething(DEFERRED_LOG_FLUSH_KEY, DEFAULT_DEFERRED_LOG_FLUSH); + } + return this.deferredLog; + } + + /** + * This is used to defer the log edits syncing to the file system. Everytime + * an edit is sent to the server it is first sync'd to the file system by the + * log writer. This sync is an expensive operation and thus can be deferred so + * that the edits are kept in memory for a specified period of time as represented + * by hbase.regionserver.optionallogflushinterval and not flushed + * for every edit. + *

+ * NOTE:- This option might result in data loss if the region server crashes + * before these deferred edits in memory are flushed onto the filesystem. + *

+ * + * @param isDeferredLogFlush + */ + public void setDeferredLogFlush(final boolean isDeferredLogFlush) { + setValue(DEFERRED_LOG_FLUSH_KEY, isDeferredLogFlush? TRUE: FALSE); + this.deferredLog = isDeferredLogFlush; + } + + /** + * Get the name of the table as a byte array. + * + * @return name of table + */ + public byte [] getName() { + return name; + } + + /** + * Get the name of the table as a String + * + * @return name of table as a String + */ + public String getNameAsString() { + return this.nameAsString; + } + + /** + * This get the class associated with the region split policy which + * determines when a region split should occur. The class used by + * default is {@link org.apache.hadoop.hbase.regionserver.ConstantSizeRegionSplitPolicy} + * which split the region base on a constant {@link #getMaxFileSize()} + * + * @return the class name of the region split policy for this table. + * If this returns null, the default constant size based split policy + * is used. + */ + public String getRegionSplitPolicyClassName() { + return getValue(SPLIT_POLICY); + } + + /** + * Set the name of the table. + * + * @param name name of table + */ + public void setName(byte[] name) { + this.name = name; + this.nameAsString = Bytes.toString(this.name); + setMetaFlags(this.name); + } + + /** + * Returns the maximum size upto which a region can grow to after which a region + * split is triggered. The region size is represented by the size of the biggest + * store file in that region. + * + * @return max hregion size for table + * + * @see #setMaxFileSize(long) + */ + public long getMaxFileSize() { + byte [] value = getValue(MAX_FILESIZE_KEY); + if (value != null) + return Long.valueOf(Bytes.toString(value)).longValue(); + return HConstants.DEFAULT_MAX_FILE_SIZE; + } + + /** + * Sets the maximum size upto which a region can grow to after which a region + * split is triggered. The region size is represented by the size of the biggest + * store file in that region, i.e. If the biggest store file grows beyond the + * maxFileSize, then the region split is triggered. This defaults to a value of + * 256 MB. + *

+ * This is not an absolute value and might vary. Assume that a single row exceeds + * the maxFileSize then the storeFileSize will be greater than maxFileSize since + * a single row cannot be split across multiple regions + *

+ * + * @param maxFileSize The maximum file size that a store file can grow to + * before a split is triggered. + */ + public void setMaxFileSize(long maxFileSize) { + setValue(MAX_FILESIZE_KEY, Bytes.toBytes(Long.toString(maxFileSize))); + } + + /** + * Returns the size of the memstore after which a flush to filesystem is triggered. + * + * @return memory cache flush size for each hregion + * + * @see #setMemStoreFlushSize(long) + */ + public long getMemStoreFlushSize() { + byte [] value = getValue(MEMSTORE_FLUSHSIZE_KEY); + if (value != null) + return Long.valueOf(Bytes.toString(value)).longValue(); + return DEFAULT_MEMSTORE_FLUSH_SIZE; + } + + /** + * Represents the maximum size of the memstore after which the contents of the + * memstore are flushed to the filesystem. This defaults to a size of 64 MB. + * + * @param memstoreFlushSize memory cache flush size for each hregion + */ + public void setMemStoreFlushSize(long memstoreFlushSize) { + setValue(MEMSTORE_FLUSHSIZE_KEY, + Bytes.toBytes(Long.toString(memstoreFlushSize))); + } + + /** + * Adds a column family. + * @param family HColumnDescriptor of family to add. + */ + public void addFamily(final HColumnDescriptor family) { + if (family.getName() == null || family.getName().length <= 0) { + throw new NullPointerException("Family name cannot be null or empty"); + } + this.families.put(family.getName(), family); + } + + /** + * Checks to see if this table contains the given column family + * @param familyName Family name or column name. + * @return true if the table contains the specified family name + */ + public boolean hasFamily(final byte [] familyName) { + return families.containsKey(familyName); + } + + /** + * @return Name of this table and then a map of all of the column family + * descriptors. + * @see #getNameAsString() + */ + @Override + public String toString() { + StringBuilder s = new StringBuilder(); + s.append('\'').append(Bytes.toString(name)).append('\''); + s.append(getValues(true)); + for (HColumnDescriptor f : families.values()) { + s.append(", ").append(f); + } + return s.toString(); + } + + /** + * @return Name of this table and then a map of all of the column family + * descriptors (with only the non-default column family attributes) + */ + public String toStringCustomizedValues() { + StringBuilder s = new StringBuilder(); + s.append('\'').append(Bytes.toString(name)).append('\''); + s.append(getValues(false)); + for(HColumnDescriptor hcd : families.values()) { + s.append(", ").append(hcd.toStringCustomizedValues()); + } + return s.toString(); + } + + private StringBuilder getValues(boolean printDefaults) { + StringBuilder s = new StringBuilder(); + + // step 1: set partitioning and pruning + Set reservedKeys = new TreeSet(); + Set configKeys = new TreeSet(); + for (ImmutableBytesWritable k : values.keySet()) { + if (k == null || k.get() == null) continue; + String key = Bytes.toString(k.get()); + // in this section, print out reserved keywords + coprocessor info + if (!RESERVED_KEYWORDS.contains(k) && !key.startsWith("coprocessor$")) { + configKeys.add(k); + continue; + } + // only print out IS_ROOT/IS_META if true + String value = Bytes.toString(values.get(k).get()); + if (key.equalsIgnoreCase(IS_ROOT) || key.equalsIgnoreCase(IS_META)) { + if (Boolean.valueOf(value) == false) continue; + } + // see if a reserved key is a default value. may not want to print it out + if (printDefaults + || !DEFAULT_VALUES.containsKey(key) + || !DEFAULT_VALUES.get(key).equalsIgnoreCase(value)) { + reservedKeys.add(k); + } + } + + // early exit optimization + if (reservedKeys.isEmpty() && configKeys.isEmpty()) return s; + + // step 2: printing + s.append(", {TABLE_ATTRIBUTES => {"); + + // print all reserved keys first + boolean printCommaForAttr = false; + for (ImmutableBytesWritable k : reservedKeys) { + String key = Bytes.toString(k.get()); + String value = Bytes.toString(values.get(k).get()); + if (printCommaForAttr) s.append(", "); + printCommaForAttr = true; + s.append(key); + s.append(" => "); + s.append('\'').append(value).append('\''); + } + + if (!configKeys.isEmpty()) { + // print all non-reserved, advanced config keys as a separate subset + if (printCommaForAttr) s.append(", "); + printCommaForAttr = true; + s.append(HConstants.CONFIG).append(" => "); + s.append("{"); + boolean printCommaForCfg = false; + for (ImmutableBytesWritable k : configKeys) { + String key = Bytes.toString(k.get()); + String value = Bytes.toString(values.get(k).get()); + if (printCommaForCfg) s.append(", "); + printCommaForCfg = true; + s.append('\'').append(key).append('\''); + s.append(" => "); + s.append('\'').append(value).append('\''); + } + s.append("}"); + } + + s.append("}}"); // end METHOD + return s; + } + + public static Map getDefaultValues() { + return Collections.unmodifiableMap(DEFAULT_VALUES); + } + + /** + * Compare the contents of the descriptor with another one passed as a parameter. + * Checks if the obj passed is an instance of HTableDescriptor, if yes then the + * contents of the descriptors are compared. + * + * @return true if the contents of the the two descriptors exactly match + * + * @see java.lang.Object#equals(java.lang.Object) + */ + @Override + public boolean equals(Object obj) { + if (this == obj) { + return true; + } + if (obj == null) { + return false; + } + if (!(obj instanceof HTableDescriptor)) { + return false; + } + return compareTo((HTableDescriptor)obj) == 0; + } + + /** + * @see java.lang.Object#hashCode() + */ + @Override + public int hashCode() { + int result = Bytes.hashCode(this.name); + result ^= Byte.valueOf(TABLE_DESCRIPTOR_VERSION).hashCode(); + if (this.families != null && this.families.size() > 0) { + for (HColumnDescriptor e: this.families.values()) { + result ^= e.hashCode(); + } + } + result ^= values.hashCode(); + return result; + } + + /** + * INTERNAL This method is a part of {@link WritableComparable} interface + * and is used for de-serialization of the HTableDescriptor over RPC + * @deprecated Writables are going away. Use pb {@link #parseFrom(byte[])} instead. + */ + @Deprecated + @Override + public void readFields(DataInput in) throws IOException { + int version = in.readInt(); + if (version < 3) + throw new IOException("versions < 3 are not supported (and never existed!?)"); + // version 3+ + name = Bytes.readByteArray(in); + nameAsString = Bytes.toString(this.name); + setRootRegion(in.readBoolean()); + setMetaRegion(in.readBoolean()); + values.clear(); + int numVals = in.readInt(); + for (int i = 0; i < numVals; i++) { + ImmutableBytesWritable key = new ImmutableBytesWritable(); + ImmutableBytesWritable value = new ImmutableBytesWritable(); + key.readFields(in); + value.readFields(in); + values.put(key, value); + } + families.clear(); + int numFamilies = in.readInt(); + for (int i = 0; i < numFamilies; i++) { + HColumnDescriptor c = new HColumnDescriptor(); + c.readFields(in); + families.put(c.getName(), c); + } + if (version < 4) { + return; + } + } + + /** + * INTERNAL This method is a part of {@link WritableComparable} interface + * and is used for serialization of the HTableDescriptor over RPC + * @deprecated Writables are going away. + * Use {@link com.google.protobuf.MessageLite#toByteArray} instead. + */ + @Deprecated + @Override + public void write(DataOutput out) throws IOException { + out.writeInt(TABLE_DESCRIPTOR_VERSION); + Bytes.writeByteArray(out, name); + out.writeBoolean(isRootRegion()); + out.writeBoolean(isMetaRegion()); + out.writeInt(values.size()); + for (Map.Entry e: + values.entrySet()) { + e.getKey().write(out); + e.getValue().write(out); + } + out.writeInt(families.size()); + for(Iterator it = families.values().iterator(); + it.hasNext(); ) { + HColumnDescriptor family = it.next(); + family.write(out); + } + } + + // Comparable + + /** + * Compares the descriptor with another descriptor which is passed as a parameter. + * This compares the content of the two descriptors and not the reference. + * + * @return 0 if the contents of the descriptors are exactly matching, + * 1 if there is a mismatch in the contents + */ + @Override + public int compareTo(final HTableDescriptor other) { + int result = Bytes.compareTo(this.name, other.name); + if (result == 0) { + result = families.size() - other.families.size(); + } + if (result == 0 && families.size() != other.families.size()) { + result = Integer.valueOf(families.size()).compareTo( + Integer.valueOf(other.families.size())); + } + if (result == 0) { + for (Iterator it = families.values().iterator(), + it2 = other.families.values().iterator(); it.hasNext(); ) { + result = it.next().compareTo(it2.next()); + if (result != 0) { + break; + } + } + } + if (result == 0) { + // punt on comparison for ordering, just calculate difference + result = this.values.hashCode() - other.values.hashCode(); + if (result < 0) + result = -1; + else if (result > 0) + result = 1; + } + return result; + } + + /** + * Returns an unmodifiable collection of all the {@link HColumnDescriptor} + * of all the column families of the table. + * + * @return Immutable collection of {@link HColumnDescriptor} of all the + * column families. + */ + public Collection getFamilies() { + return Collections.unmodifiableCollection(this.families.values()); + } + + /** + * Returns all the column family names of the current table. The map of + * HTableDescriptor contains mapping of family name to HColumnDescriptors. + * This returns all the keys of the family map which represents the column + * family names of the table. + * + * @return Immutable sorted set of the keys of the families. + */ + public Set getFamiliesKeys() { + return Collections.unmodifiableSet(this.families.keySet()); + } + + /** + * Returns an array all the {@link HColumnDescriptor} of the column families + * of the table. + * + * @return Array of all the HColumnDescriptors of the current table + * + * @see #getFamilies() + */ + public HColumnDescriptor[] getColumnFamilies() { + return getFamilies().toArray(new HColumnDescriptor[0]); + } + + + /** + * Returns the HColumnDescriptor for a specific column family with name as + * specified by the parameter column. + * + * @param column Column family name + * @return Column descriptor for the passed family name or the family on + * passed in column. + */ + public HColumnDescriptor getFamily(final byte [] column) { + return this.families.get(column); + } + + + /** + * Removes the HColumnDescriptor with name specified by the parameter column + * from the table descriptor + * + * @param column Name of the column family to be removed. + * @return Column descriptor for the passed family name or the family on + * passed in column. + */ + public HColumnDescriptor removeFamily(final byte [] column) { + return this.families.remove(column); + } + + + /** + * Add a table coprocessor to this table. The coprocessor + * type must be {@link org.apache.hadoop.hbase.coprocessor.RegionObserver} + * or Endpoint. + * It won't check if the class can be loaded or not. + * Whether a coprocessor is loadable or not will be determined when + * a region is opened. + * @param className Full class name. + * @throws IOException + */ + public void addCoprocessor(String className) throws IOException { + addCoprocessor(className, null, Coprocessor.PRIORITY_USER, null); + } + + + /** + * Add a table coprocessor to this table. The coprocessor + * type must be {@link org.apache.hadoop.hbase.coprocessor.RegionObserver} + * or Endpoint. + * It won't check if the class can be loaded or not. + * Whether a coprocessor is loadable or not will be determined when + * a region is opened. + * @param jarFilePath Path of the jar file. If it's null, the class will be + * loaded from default classloader. + * @param className Full class name. + * @param priority Priority + * @param kvs Arbitrary key-value parameter pairs passed into the coprocessor. + * @throws IOException + */ + public void addCoprocessor(String className, Path jarFilePath, + int priority, final Map kvs) + throws IOException { + if (hasCoprocessor(className)) { + throw new IOException("Coprocessor " + className + " already exists."); + } + // validate parameter kvs + StringBuilder kvString = new StringBuilder(); + if (kvs != null) { + for (Map.Entry e: kvs.entrySet()) { + if (!e.getKey().matches(HConstants.CP_HTD_ATTR_VALUE_PARAM_KEY_PATTERN)) { + throw new IOException("Illegal parameter key = " + e.getKey()); + } + if (!e.getValue().matches(HConstants.CP_HTD_ATTR_VALUE_PARAM_VALUE_PATTERN)) { + throw new IOException("Illegal parameter (" + e.getKey() + + ") value = " + e.getValue()); + } + if (kvString.length() != 0) { + kvString.append(','); + } + kvString.append(e.getKey()); + kvString.append('='); + kvString.append(e.getValue()); + } + } + + // generate a coprocessor key + int maxCoprocessorNumber = 0; + Matcher keyMatcher; + for (Map.Entry e: + this.values.entrySet()) { + keyMatcher = + HConstants.CP_HTD_ATTR_KEY_PATTERN.matcher( + Bytes.toString(e.getKey().get())); + if (!keyMatcher.matches()) { + continue; + } + maxCoprocessorNumber = Math.max(Integer.parseInt(keyMatcher.group(1)), + maxCoprocessorNumber); + } + maxCoprocessorNumber++; + + String key = "coprocessor$" + Integer.toString(maxCoprocessorNumber); + String value = ((jarFilePath == null)? "" : jarFilePath.toString()) + + "|" + className + "|" + Integer.toString(priority) + "|" + + kvString.toString(); + setValue(key, value); + } + + + /** + * Check if the table has an attached co-processor represented by the name className + * + * @param className - Class name of the co-processor + * @return true of the table has a co-processor className + */ + public boolean hasCoprocessor(String className) { + Matcher keyMatcher; + Matcher valueMatcher; + for (Map.Entry e: + this.values.entrySet()) { + keyMatcher = + HConstants.CP_HTD_ATTR_KEY_PATTERN.matcher( + Bytes.toString(e.getKey().get())); + if (!keyMatcher.matches()) { + continue; + } + valueMatcher = + HConstants.CP_HTD_ATTR_VALUE_PATTERN.matcher( + Bytes.toString(e.getValue().get())); + if (!valueMatcher.matches()) { + continue; + } + // get className and compare + String clazz = valueMatcher.group(2).trim(); // classname is the 2nd field + if (clazz.equals(className.trim())) { + return true; + } + } + return false; + } + + /** + * Remove a coprocessor from those set on the table + * @param className Class name of the co-processor + */ + public void removeCoprocessor(String className) { + ImmutableBytesWritable match = null; + Matcher keyMatcher; + Matcher valueMatcher; + for (Map.Entry e : this.values + .entrySet()) { + keyMatcher = HConstants.CP_HTD_ATTR_KEY_PATTERN.matcher(Bytes.toString(e + .getKey().get())); + if (!keyMatcher.matches()) { + continue; + } + valueMatcher = HConstants.CP_HTD_ATTR_VALUE_PATTERN.matcher(Bytes + .toString(e.getValue().get())); + if (!valueMatcher.matches()) { + continue; + } + // get className and compare + String clazz = valueMatcher.group(2).trim(); // classname is the 2nd field + // remove the CP if it is present + if (clazz.equals(className.trim())) { + match = e.getKey(); + break; + } + } + // if we found a match, remove it + if (match != null) + this.values.remove(match); + } + + /** + * Returns the {@link Path} object representing the table directory under + * path rootdir + * + * @param rootdir qualified path of HBase root directory + * @param tableName name of table + * @return {@link Path} for table + */ + public static Path getTableDir(Path rootdir, final byte [] tableName) { + return new Path(rootdir, Bytes.toString(tableName)); + } + + /** Table descriptor for -ROOT-
catalog table */ + public static final HTableDescriptor ROOT_TABLEDESC = new HTableDescriptor( + HConstants.ROOT_TABLE_NAME, + new HColumnDescriptor[] { + new HColumnDescriptor(HConstants.CATALOG_FAMILY) + // Ten is arbitrary number. Keep versions to help debugging. + .setMaxVersions(10) + .setInMemory(true) + .setBlocksize(8 * 1024) + .setTimeToLive(HConstants.FOREVER) + .setScope(HConstants.REPLICATION_SCOPE_LOCAL) + }); + + /** Table descriptor for .META. catalog table */ + public static final HTableDescriptor META_TABLEDESC = new HTableDescriptor( + HConstants.META_TABLE_NAME, new HColumnDescriptor[] { + new HColumnDescriptor(HConstants.CATALOG_FAMILY) + // Ten is arbitrary number. Keep versions to help debugging. + .setMaxVersions(10) + .setInMemory(true) + .setBlocksize(8 * 1024) + .setScope(HConstants.REPLICATION_SCOPE_LOCAL) + }); + + @Deprecated + public void setOwner(User owner) { + setOwnerString(owner != null ? owner.getShortName() : null); + } + + // used by admin.rb:alter(table_name,*args) to update owner. + @Deprecated + public void setOwnerString(String ownerString) { + if (ownerString != null) { + setValue(OWNER_KEY, Bytes.toBytes(ownerString)); + } else { + values.remove(OWNER_KEY); + } + } + + @Deprecated + public String getOwnerString() { + if (getValue(OWNER_KEY) != null) { + return Bytes.toString(getValue(OWNER_KEY)); + } + // Note that every table should have an owner (i.e. should have OWNER_KEY set). + // .META. and -ROOT- should return system user as owner, not null (see + // MasterFileSystem.java:bootstrap()). + return null; + } + + /** + * @return This instance serialized with pb with pb magic prefix + * @see #parseFrom(byte[]) + */ + public byte [] toByteArray() { + return ProtobufUtil.prependPBMagic(convert().toByteArray()); + } + + /** + * @param bytes A pb serialized {@link HTableDescriptor} instance with pb magic prefix + * @return An instance of {@link HTableDescriptor} made from bytes + * @throws DeserializationException + * @throws IOException + * @see #toByteArray() + */ + public static HTableDescriptor parseFrom(final byte [] bytes) + throws DeserializationException, IOException { + if (!ProtobufUtil.isPBMagicPrefix(bytes)) { + return (HTableDescriptor)Writables.getWritable(bytes, new HTableDescriptor()); + } + int pblen = ProtobufUtil.lengthOfPBMagic(); + TableSchema.Builder builder = TableSchema.newBuilder(); + TableSchema ts = null; + try { + ts = builder.mergeFrom(bytes, pblen, bytes.length - pblen).build(); + } catch (InvalidProtocolBufferException e) { + throw new DeserializationException(e); + } + return convert(ts); + } + + /** + * @return Convert the current {@link HTableDescriptor} into a pb TableSchema instance. + */ + public TableSchema convert() { + TableSchema.Builder builder = TableSchema.newBuilder(); + builder.setName(ByteString.copyFrom(getName())); + for (Map.Entry e: this.values.entrySet()) { + TableSchema.Attribute.Builder aBuilder = TableSchema.Attribute.newBuilder(); + aBuilder.setName(ByteString.copyFrom(e.getKey().get())); + aBuilder.setValue(ByteString.copyFrom(e.getValue().get())); + builder.addAttributes(aBuilder.build()); + } + for (HColumnDescriptor hcd: getColumnFamilies()) { + builder.addColumnFamilies(hcd.convert()); + } + return builder.build(); + } + + /** + * @param ts A pb TableSchema instance. + * @return An {@link HTableDescriptor} made from the passed in pb ts. + */ + public static HTableDescriptor convert(final TableSchema ts) { + List list = ts.getColumnFamiliesList(); + HColumnDescriptor [] hcds = new HColumnDescriptor[list.size()]; + int index = 0; + for (ColumnFamilySchema cfs: list) { + hcds[index++] = HColumnDescriptor.convert(cfs); + } + HTableDescriptor htd = new HTableDescriptor(ts.getName().toByteArray(), hcds); + for (TableSchema.Attribute a: ts.getAttributesList()) { + htd.setValue(a.getName().toByteArray(), a.getValue().toByteArray()); + } + return htd; + } +} diff --git hbase-client/src/main/java/org/apache/hadoop/hbase/MasterAdminProtocol.java hbase-client/src/main/java/org/apache/hadoop/hbase/MasterAdminProtocol.java new file mode 100644 index 0000000..57d83ef --- /dev/null +++ hbase-client/src/main/java/org/apache/hadoop/hbase/MasterAdminProtocol.java @@ -0,0 +1,349 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; +import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.MasterAdminService; +import org.apache.hadoop.hbase.security.TokenInfo; +import org.apache.hadoop.hbase.security.KerberosInfo; +import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.AddColumnRequest; +import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.AddColumnResponse; +import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.CatalogScanRequest; +import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.CatalogScanResponse; +import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.CreateTableRequest; +import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.CreateTableResponse; +import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DeleteColumnRequest; +import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DeleteColumnResponse; +import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.AssignRegionRequest; +import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.AssignRegionResponse; +import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DeleteTableRequest; +import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DeleteTableResponse; +import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DisableTableRequest; +import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DisableTableResponse; +import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.EnableCatalogJanitorRequest; +import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.EnableCatalogJanitorResponse; +import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.EnableTableRequest; +import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.EnableTableResponse; +import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.IsCatalogJanitorEnabledRequest; +import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.IsCatalogJanitorEnabledResponse; +import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ModifyColumnRequest; +import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ModifyColumnResponse; +import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ModifyTableRequest; +import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ModifyTableResponse; +import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.MoveRegionRequest; +import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.MoveRegionResponse; +import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.OfflineRegionRequest; +import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.OfflineRegionResponse; +import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.SetBalancerRunningRequest; +import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.SetBalancerRunningResponse; +import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.UnassignRegionRequest; +import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.UnassignRegionResponse; +import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.BalanceRequest; +import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.BalanceResponse; +import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ShutdownRequest; +import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ShutdownResponse; +import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.StopMasterRequest; +import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.StopMasterResponse; +import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningRequest; +import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningResponse; + + +import com.google.protobuf.RpcController; +import com.google.protobuf.ServiceException; + +/** + * Protocol that a client uses to communicate with the Master (for admin purposes). + */ +@KerberosInfo( + serverPrincipal = "hbase.master.kerberos.principal") +@TokenInfo("HBASE_AUTH_TOKEN") +@InterfaceAudience.Private +@InterfaceStability.Evolving +public interface MasterAdminProtocol extends + MasterAdminService.BlockingInterface, MasterProtocol { + public static final long VERSION = 1L; + + /* Column-level */ + + /** + * Adds a column to the specified table + * @param controller Unused (set to null). + * @param req AddColumnRequest that contains:
+ * - tableName: table to modify
+ * - column: column descriptor + * @throws ServiceException + */ + @Override + public AddColumnResponse addColumn(RpcController controller, AddColumnRequest req) + throws ServiceException; + + /** + * Deletes a column from the specified table. Table must be disabled. + * @param controller Unused (set to null). + * @param req DeleteColumnRequest that contains:
+ * - tableName: table to alter
+ * - columnName: column family to remove + * @throws ServiceException + */ + @Override + public DeleteColumnResponse deleteColumn(RpcController controller, DeleteColumnRequest req) + throws ServiceException; + + /** + * Modifies an existing column on the specified table + * @param controller Unused (set to null). + * @param req ModifyColumnRequest that contains:
+ * - tableName: table name
+ * - descriptor: new column descriptor + * @throws ServiceException e + */ + @Override + public ModifyColumnResponse modifyColumn(RpcController controller, ModifyColumnRequest req) + throws ServiceException; + + /* Region-level */ + + /** + * Move a region to a specified destination server. + * @param controller Unused (set to null). + * @param req The request that contains:
+ * - region: The encoded region name; i.e. the hash that makes + * up the region name suffix: e.g. if regionname is + * TestTable,0094429456,1289497600452.527db22f95c8a9e0116f0cc13c680396., + * then the encoded region name is: 527db22f95c8a9e0116f0cc13c680396.
+ * - destServerName: The servername of the destination regionserver. If + * passed the empty byte array we'll assign to a random server. A server name + * is made of host, port and startcode. Here is an example: + * host187.example.com,60020,1289493121758. + * @throws ServiceException that wraps a UnknownRegionException if we can't find a + * region named encodedRegionName + */ + @Override + public MoveRegionResponse moveRegion(RpcController controller, MoveRegionRequest req) + throws ServiceException; + + /** + * Assign a region to a server chosen at random. + * @param controller Unused (set to null). + * @param req contains the region to assign. Will use existing RegionPlan if one + * found. + * @throws ServiceException + */ + @Override + public AssignRegionResponse assignRegion(RpcController controller, AssignRegionRequest req) + throws ServiceException; + + /** + * Unassign a region from current hosting regionserver. Region will then be + * assigned to a regionserver chosen at random. Region could be reassigned + * back to the same server. Use {@link #moveRegion} if you want to + * control the region movement. + * @param controller Unused (set to null). + * @param req The request that contains:
+ * - region: Region to unassign. Will clear any existing RegionPlan + * if one found.
+ * - force: If true, force unassign (Will remove region from + * regions-in-transition too if present as well as from assigned regions -- + * radical!.If results in double assignment use hbck -fix to resolve. + * @throws ServiceException + */ + @Override + public UnassignRegionResponse unassignRegion(RpcController controller, UnassignRegionRequest req) + throws ServiceException; + + /** + * Offline a region from the assignment manager's in-memory state. The + * region should be in a closed state and there will be no attempt to + * automatically reassign the region as in unassign. This is a special + * method, and should only be used by experts or hbck. + * @param controller Unused (set to null). + * @param request OfflineRegionRequest that contains:
+ * - region: Region to offline. Will clear any existing RegionPlan + * if one found. + * @throws ServiceException + */ + @Override + public OfflineRegionResponse offlineRegion(RpcController controller, OfflineRegionRequest request) + throws ServiceException; + + /* Table-level */ + + /** + * Creates a new table asynchronously. If splitKeys are specified, then the + * table will be created with an initial set of multiple regions. + * If splitKeys is null, the table will be created with a single region. + * @param controller Unused (set to null). + * @param req CreateTableRequest that contains:
+ * - tablesSchema: table descriptor
+ * - splitKeys + * @throws ServiceException + */ + @Override + public CreateTableResponse createTable(RpcController controller, CreateTableRequest req) + throws ServiceException; + + /** + * Deletes a table + * @param controller Unused (set to null). + * @param req DeleteTableRequest that contains:
+ * - tableName: table to delete + * @throws ServiceException + */ + @Override + public DeleteTableResponse deleteTable(RpcController controller, DeleteTableRequest req) + throws ServiceException; + + /** + * Puts the table on-line (only needed if table has been previously taken offline) + * @param controller Unused (set to null). + * @param req EnableTableRequest that contains:
+ * - tableName: table to enable + * @throws ServiceException + */ + @Override + public EnableTableResponse enableTable(RpcController controller, EnableTableRequest req) + throws ServiceException; + + /** + * Take table offline + * + * @param controller Unused (set to null). + * @param req DisableTableRequest that contains:
+ * - tableName: table to take offline + * @throws ServiceException + */ + @Override + public DisableTableResponse disableTable(RpcController controller, DisableTableRequest req) + throws ServiceException; + + /** + * Modify a table's metadata + * + * @param controller Unused (set to null). + * @param req ModifyTableRequest that contains:
+ * - tableName: table to modify
+ * - tableSchema: new descriptor for table + * @throws ServiceException + */ + @Override + public ModifyTableResponse modifyTable(RpcController controller, ModifyTableRequest req) + throws ServiceException; + + /* Cluster-level */ + + /** + * Shutdown an HBase cluster. + * @param controller Unused (set to null). + * @param request ShutdownRequest + * @return ShutdownResponse + * @throws ServiceException + */ + @Override + public ShutdownResponse shutdown(RpcController controller, ShutdownRequest request) + throws ServiceException; + + /** + * Stop HBase Master only. + * Does not shutdown the cluster. + * @param controller Unused (set to null). + * @param request StopMasterRequest + * @return StopMasterResponse + * @throws ServiceException + */ + @Override + public StopMasterResponse stopMaster(RpcController controller, StopMasterRequest request) + throws ServiceException; + + /** + * Run the balancer. Will run the balancer and if regions to move, it will + * go ahead and do the reassignments. Can NOT run for various reasons. Check + * logs. + * @param c Unused (set to null). + * @param request BalanceRequest + * @return BalanceResponse that contains:
+ * - balancerRan: True if balancer ran and was able to tell the region servers to + * unassign all the regions to balance (the re-assignment itself is async), + * false otherwise. + */ + @Override + public BalanceResponse balance(RpcController c, BalanceRequest request) throws ServiceException; + + /** + * Turn the load balancer on or off. + * @param controller Unused (set to null). + * @param req SetBalancerRunningRequest that contains:
+ * - on: If true, enable balancer. If false, disable balancer.
+ * - synchronous: if true, wait until current balance() call, if outstanding, to return. + * @return SetBalancerRunningResponse that contains:
+ * - prevBalanceValue: Previous balancer value + * @throws ServiceException + */ + @Override + public SetBalancerRunningResponse setBalancerRunning( + RpcController controller, SetBalancerRunningRequest req) throws ServiceException; + + /** + * @param c Unused (set to null). + * @param req IsMasterRunningRequest + * @return IsMasterRunningRequest that contains:
+ * isMasterRunning: true if master is available + * @throws ServiceException + */ + @Override + public IsMasterRunningResponse isMasterRunning(RpcController c, IsMasterRunningRequest req) + throws ServiceException; + + /** + * Run a scan of the catalog table + * @param c Unused (set to null). + * @param req CatalogScanRequest + * @return CatalogScanResponse that contains the int return code corresponding + * to the number of entries cleaned + * @throws ServiceException + */ + @Override + public CatalogScanResponse runCatalogScan(RpcController c, + CatalogScanRequest req) throws ServiceException; + + /** + * Enable/Disable the catalog janitor + * @param c Unused (set to null). + * @param req EnableCatalogJanitorRequest that contains:
+ * - enable: If true, enable catalog janitor. If false, disable janitor.
+ * @return EnableCatalogJanitorResponse that contains:
+ * - prevValue: true, if it was enabled previously; false, otherwise + * @throws ServiceException + */ + @Override + public EnableCatalogJanitorResponse enableCatalogJanitor(RpcController c, + EnableCatalogJanitorRequest req) throws ServiceException; + + /** + * Query whether the catalog janitor is enabled + * @param c Unused (set to null). + * @param req IsCatalogJanitorEnabledRequest + * @return IsCatalogCatalogJanitorEnabledResponse that contains:
+ * - value: true, if it is enabled; false, otherwise + * @throws ServiceException + */ + @Override + public IsCatalogJanitorEnabledResponse isCatalogJanitorEnabled(RpcController c, + IsCatalogJanitorEnabledRequest req) throws ServiceException; +} diff --git hbase-client/src/main/java/org/apache/hadoop/hbase/MasterMonitorProtocol.java hbase-client/src/main/java/org/apache/hadoop/hbase/MasterMonitorProtocol.java new file mode 100644 index 0000000..d8cff7d --- /dev/null +++ hbase-client/src/main/java/org/apache/hadoop/hbase/MasterMonitorProtocol.java @@ -0,0 +1,99 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; +import org.apache.hadoop.hbase.protobuf.generated.MasterMonitorProtos.MasterMonitorService; +import org.apache.hadoop.hbase.security.TokenInfo; +import org.apache.hadoop.hbase.security.KerberosInfo; +import org.apache.hadoop.hbase.protobuf.generated.MasterMonitorProtos.GetSchemaAlterStatusRequest; +import org.apache.hadoop.hbase.protobuf.generated.MasterMonitorProtos.GetSchemaAlterStatusResponse; +import org.apache.hadoop.hbase.protobuf.generated.MasterMonitorProtos.GetTableDescriptorsRequest; +import org.apache.hadoop.hbase.protobuf.generated.MasterMonitorProtos.GetTableDescriptorsResponse; +import org.apache.hadoop.hbase.protobuf.generated.MasterMonitorProtos.GetClusterStatusRequest; +import org.apache.hadoop.hbase.protobuf.generated.MasterMonitorProtos.GetClusterStatusResponse; +import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningRequest; +import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningResponse; + +import com.google.protobuf.RpcController; +import com.google.protobuf.ServiceException; + +/** + * Protocol that a client uses to communicate with the Master (for monitoring purposes). + */ +@KerberosInfo( + serverPrincipal = "hbase.master.kerberos.principal") +@TokenInfo("HBASE_AUTH_TOKEN") +@InterfaceAudience.Public +@InterfaceStability.Evolving +public interface MasterMonitorProtocol extends + MasterMonitorService.BlockingInterface, MasterProtocol { + public static final long VERSION = 1L; + + /** + * Used by the client to get the number of regions that have received the + * updated schema + * + * @param controller Unused (set to null). + * @param req GetSchemaAlterStatusRequest that contains:
+ * - tableName + * @return GetSchemaAlterStatusResponse indicating the number of regions updated. + * yetToUpdateRegions is the regions that are yet to be updated totalRegions + * is the total number of regions of the table + * @throws ServiceException + */ + @Override + public GetSchemaAlterStatusResponse getSchemaAlterStatus( + RpcController controller, GetSchemaAlterStatusRequest req) throws ServiceException; + + /** + * Get list of TableDescriptors for requested tables. + * @param controller Unused (set to null). + * @param req GetTableDescriptorsRequest that contains:
+ * - tableNames: requested tables, or if empty, all are requested + * @return GetTableDescriptorsResponse + * @throws ServiceException + */ + @Override + public GetTableDescriptorsResponse getTableDescriptors( + RpcController controller, GetTableDescriptorsRequest req) throws ServiceException; + + /** + * Return cluster status. + * @param controller Unused (set to null). + * @param req GetClusterStatusRequest + * @return status object + * @throws ServiceException + */ + @Override + public GetClusterStatusResponse getClusterStatus(RpcController controller, GetClusterStatusRequest req) + throws ServiceException; + + /** + * @param c Unused (set to null). + * @param req IsMasterRunningRequest + * @return IsMasterRunningRequest that contains:
+ * isMasterRunning: true if master is available + * @throws ServiceException + */ + @Override + public IsMasterRunningResponse isMasterRunning(RpcController c, IsMasterRunningRequest req) + throws ServiceException; +} diff --git hbase-client/src/main/java/org/apache/hadoop/hbase/MasterProtocol.java hbase-client/src/main/java/org/apache/hadoop/hbase/MasterProtocol.java new file mode 100644 index 0000000..62b3b84 --- /dev/null +++ hbase-client/src/main/java/org/apache/hadoop/hbase/MasterProtocol.java @@ -0,0 +1,44 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +// Functions implemented by all the master protocols (e.g. MasterAdminProtocol, +// MasterMonitorProtocol). Currently, this is only isMasterRunning, which is used, +// on proxy creation, to check if the master has been stopped. If it has, +// a MasterNotRunningException is thrown back to the client, and the client retries. + +package org.apache.hadoop.hbase; + +import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningRequest; +import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningResponse; +import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MasterService; +import org.apache.hadoop.hbase.ipc.VersionedProtocol; +import com.google.protobuf.RpcController; +import com.google.protobuf.ServiceException; + +public interface MasterProtocol extends VersionedProtocol, MasterService.BlockingInterface { + + /** + * @param c Unused (set to null). + * @param req IsMasterRunningRequest + * @return IsMasterRunningRequest that contains:
+ * isMasterRunning: true if master is available + * @throws ServiceException + */ + public IsMasterRunningResponse isMasterRunning(RpcController c, IsMasterRunningRequest req) + throws ServiceException; +} diff --git hbase-client/src/main/java/org/apache/hadoop/hbase/RegionLoad.java hbase-client/src/main/java/org/apache/hadoop/hbase/RegionLoad.java new file mode 100644 index 0000000..cdb3fba --- /dev/null +++ hbase-client/src/main/java/org/apache/hadoop/hbase/RegionLoad.java @@ -0,0 +1,154 @@ +/** + * Copyright The Apache Software Foundation + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase; + +import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos; +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.hbase.util.Bytes; +import java.util.TreeSet; +import java.util.Set; + +/** + * Encapsulates per-region load metrics. + */ +@InterfaceAudience.Private +public class RegionLoad { + + protected HBaseProtos.RegionLoad regionLoadPB; + + public RegionLoad(HBaseProtos.RegionLoad regionLoadPB) { + this.regionLoadPB = regionLoadPB; + } + + /** + * @return the region name + */ + public byte[] getName() { + return regionLoadPB.getRegionSpecifier().getValue().toByteArray(); + } + + /** + * @return the region name as a string + */ + public String getNameAsString() { + return Bytes.toString(getName()); + } + + /** + * @return the number of stores + */ + public int getStores() { + return regionLoadPB.getStores(); + } + + /** + * @return the number of storefiles + */ + public int getStorefiles() { + return regionLoadPB.getStorefiles(); + } + + /** + * @return the total size of the storefiles, in MB + */ + public int getStorefileSizeMB() { + return regionLoadPB.getStorefileSizeMB(); + } + + /** + * @return the memstore size, in MB + */ + public int getMemStoreSizeMB() { + return regionLoadPB.getMemstoreSizeMB(); + } + + /** + * @return the approximate size of storefile indexes on the heap, in MB + */ + public int getStorefileIndexSizeMB() { + return regionLoadPB.getStorefileIndexSizeMB(); + } + + /** + * @return the number of requests made to region + */ + public long getRequestsCount() { + return getReadRequestsCount() + getWriteRequestsCount(); + } + + /** + * @return the number of read requests made to region + */ + public long getReadRequestsCount() { + return regionLoadPB.getReadRequestsCount(); + } + + /** + * @return the number of write requests made to region + */ + public long getWriteRequestsCount() { + return regionLoadPB.getWriteRequestsCount(); + } + + /** + * @return The current total size of root-level indexes for the region, in KB. + */ + public int getRootIndexSizeKB() { + return regionLoadPB.getRootIndexSizeKB(); + } + + /** + * @return The total size of all index blocks, not just the root level, in KB. + */ + public int getTotalStaticIndexSizeKB() { + return regionLoadPB.getTotalStaticIndexSizeKB(); + } + + /** + * @return The total size of all Bloom filter blocks, not just loaded into the + * block cache, in KB. + */ + public int getTotalStaticBloomSizeKB() { + return regionLoadPB.getTotalStaticBloomSizeKB(); + } + + /** + * @return the total number of kvs in current compaction + */ + public long getTotalCompactingKVs() { + return regionLoadPB.getTotalCompactingKVs(); + } + + /** + * @return the number of already compacted kvs in current compaction + */ + public long getCurrentCompactedKVs() { + return regionLoadPB.getCurrentCompactedKVs(); + } + + /** + * This does not really belong inside RegionLoad but its being done in the name of expediency. + * @return the completed sequence Id for the region + */ + public long getCompleteSequenceId() { + return regionLoadPB.getCompleteSequenceId(); + } +} diff --git hbase-client/src/main/java/org/apache/hadoop/hbase/RegionServerStatusProtocol.java hbase-client/src/main/java/org/apache/hadoop/hbase/RegionServerStatusProtocol.java new file mode 100644 index 0000000..069cea3 --- /dev/null +++ hbase-client/src/main/java/org/apache/hadoop/hbase/RegionServerStatusProtocol.java @@ -0,0 +1,39 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; +import org.apache.hadoop.hbase.ipc.VersionedProtocol; +import org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerStatusService; +import org.apache.hadoop.hbase.security.TokenInfo; +import org.apache.hadoop.hbase.security.KerberosInfo; + +/** + * Protocol that a RegionServer uses to communicate its status to the Master. + */ +@KerberosInfo( + serverPrincipal = "hbase.master.kerberos.principal") +@TokenInfo("HBASE_AUTH_TOKEN") +@InterfaceAudience.Private +@InterfaceStability.Evolving +public interface RegionServerStatusProtocol extends + RegionServerStatusService.BlockingInterface, VersionedProtocol { + public static final long VERSION = 1L; +} diff --git hbase-client/src/main/java/org/apache/hadoop/hbase/Server.java hbase-client/src/main/java/org/apache/hadoop/hbase/Server.java new file mode 100644 index 0000000..2106710 --- /dev/null +++ hbase-client/src/main/java/org/apache/hadoop/hbase/Server.java @@ -0,0 +1,51 @@ +/** + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.catalog.CatalogTracker; +import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher; + +/** + * Defines the set of shared functions implemented by HBase servers (Masters + * and RegionServers). + */ +@InterfaceAudience.Private +public interface Server extends Abortable, Stoppable { + /** + * Gets the configuration object for this server. + */ + public Configuration getConfiguration(); + + /** + * Gets the ZooKeeper instance for this server. + */ + public ZooKeeperWatcher getZooKeeper(); + + /** + * @return Master's instance of {@link CatalogTracker} + */ + public CatalogTracker getCatalogTracker(); + + /** + * @return The unique server name for this server. + */ + public ServerName getServerName(); +} diff --git hbase-client/src/main/java/org/apache/hadoop/hbase/ServerLoad.java hbase-client/src/main/java/org/apache/hadoop/hbase/ServerLoad.java new file mode 100644 index 0000000..bd88b6a --- /dev/null +++ hbase-client/src/main/java/org/apache/hadoop/hbase/ServerLoad.java @@ -0,0 +1,305 @@ +/** + * Copyright The Apache Software Foundation + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase; + +import java.util.Arrays; +import java.util.Collections; +import java.util.List; +import java.util.Map; +import java.util.TreeMap; +import java.util.TreeSet; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; +import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos; +import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Coprocessor; +import org.apache.hadoop.hbase.RegionLoad; +import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.hbase.util.Strings; + +/** + * This class is used for exporting current state of load on a RegionServer. + */ +@InterfaceAudience.Public +@InterfaceStability.Evolving +public class ServerLoad { + private int stores = 0; + private int storefiles = 0; + private int storeUncompressedSizeMB = 0; + private int storefileSizeMB = 0; + private int memstoreSizeMB = 0; + private int storefileIndexSizeMB = 0; + private int readRequestsCount = 0; + private int writeRequestsCount = 0; + private int rootIndexSizeKB = 0; + private int totalStaticIndexSizeKB = 0; + private int totalStaticBloomSizeKB = 0; + private long totalCompactingKVs = 0; + private long currentCompactedKVs = 0; + + public ServerLoad(HBaseProtos.ServerLoad serverLoad) { + this.serverLoad = serverLoad; + for (HBaseProtos.RegionLoad rl: serverLoad.getRegionLoadsList()) { + stores += rl.getStores(); + storefiles += rl.getStorefiles(); + storeUncompressedSizeMB += rl.getStoreUncompressedSizeMB(); + storefileSizeMB += rl.getStorefileSizeMB(); + memstoreSizeMB += rl.getMemstoreSizeMB(); + storefileIndexSizeMB += rl.getStorefileIndexSizeMB(); + readRequestsCount += rl.getReadRequestsCount(); + writeRequestsCount += rl.getWriteRequestsCount(); + rootIndexSizeKB += rl.getRootIndexSizeKB(); + totalStaticIndexSizeKB += rl.getTotalStaticIndexSizeKB(); + totalStaticBloomSizeKB += rl.getTotalStaticBloomSizeKB(); + totalCompactingKVs += rl.getTotalCompactingKVs(); + currentCompactedKVs += rl.getCurrentCompactedKVs(); + } + + } + + // NOTE: Function name cannot start with "get" because then an OpenDataException is thrown because + // HBaseProtos.ServerLoad cannot be converted to an open data type(see HBASE-5967). + /* @return the underlying ServerLoad protobuf object */ + public HBaseProtos.ServerLoad obtainServerLoadPB() { + return serverLoad; + } + + protected HBaseProtos.ServerLoad serverLoad; + + /* @return number of requests since last report. */ + public int getNumberOfRequests() { + return serverLoad.getNumberOfRequests(); + } + public boolean hasNumberOfRequests() { + return serverLoad.hasNumberOfRequests(); + } + + /* @return total Number of requests from the start of the region server. */ + public int getTotalNumberOfRequests() { + return serverLoad.getTotalNumberOfRequests(); + } + public boolean hasTotalNumberOfRequests() { + return serverLoad.hasTotalNumberOfRequests(); + } + + /* @return the amount of used heap, in MB. */ + public int getUsedHeapMB() { + return serverLoad.getUsedHeapMB(); + } + public boolean hasUsedHeapMB() { + return serverLoad.hasUsedHeapMB(); + } + + /* @return the maximum allowable size of the heap, in MB. */ + public int getMaxHeapMB() { + return serverLoad.getMaxHeapMB(); + } + public boolean hasMaxHeapMB() { + return serverLoad.hasMaxHeapMB(); + } + + public int getStores() { + return stores; + } + + public int getStorefiles() { + return storefiles; + } + + public int getStoreUncompressedSizeMB() { + return storeUncompressedSizeMB; + } + + public int getStorefileSizeInMB() { + return storefileSizeMB; + } + + public int getMemstoreSizeInMB() { + return memstoreSizeMB; + } + + public int getStorefileIndexSizeInMB() { + return storefileIndexSizeMB; + } + + public int getReadRequestsCount() { + return readRequestsCount; + } + + public int getWriteRequestsCount() { + return writeRequestsCount; + } + + public int getRootIndexSizeKB() { + return rootIndexSizeKB; + } + + public int getTotalStaticIndexSizeKB() { + return totalStaticIndexSizeKB; + } + + public int getTotalStaticBloomSizeKB() { + return totalStaticBloomSizeKB; + } + + public long getTotalCompactingKVs() { + return totalCompactingKVs; + } + + public long getCurrentCompactedKVs() { + return currentCompactedKVs; + } + + /** + * @return the number of regions + */ + public int getNumberOfRegions() { + return serverLoad.getRegionLoadsCount(); + } + + public int getInfoServerPort() { + return serverLoad.getInfoServerPort(); + } + + /** + * Originally, this method factored in the effect of requests going to the + * server as well. However, this does not interact very well with the current + * region rebalancing code, which only factors number of regions. For the + * interim, until we can figure out how to make rebalancing use all the info + * available, we're just going to make load purely the number of regions. + * + * @return load factor for this server + */ + public int getLoad() { + // See above comment + // int load = numberOfRequests == 0 ? 1 : numberOfRequests; + // load *= numberOfRegions == 0 ? 1 : numberOfRegions; + // return load; + return getNumberOfRegions(); + } + + /** + * @return region load metrics + */ + public Map getRegionsLoad() { + Map regionLoads = + new TreeMap(Bytes.BYTES_COMPARATOR); + for (HBaseProtos.RegionLoad rl : serverLoad.getRegionLoadsList()) { + RegionLoad regionLoad = new RegionLoad(rl); + regionLoads.put(regionLoad.getName(), regionLoad); + } + return regionLoads; + } + + /** + * Return the RegionServer-level coprocessors + * @return string array of loaded RegionServer-level coprocessors + */ + public String[] getRegionServerCoprocessors() { + List list = obtainServerLoadPB().getCoprocessorsList(); + String [] ret = new String[list.size()]; + int i = 0; + for (Coprocessor elem : list) { + ret[i++] = elem.getName(); + } + + return ret; + } + + /** + * Return the RegionServer-level and Region-level coprocessors + * @return string array of loaded RegionServer-level and + * Region-level coprocessors + */ + public String[] getRsCoprocessors() { + // Need a set to remove duplicates, but since generated Coprocessor class + // is not Comparable, make it a Set instead of Set + TreeSet coprocessSet = new TreeSet(); + for (Coprocessor coprocessor : obtainServerLoadPB().getCoprocessorsList()) { + coprocessSet.add(coprocessor.getName()); + } + return coprocessSet.toArray(new String[0]); + } + + /** + * @return number of requests per second received since the last report + */ + public double getRequestsPerSecond() { + return getNumberOfRequests(); + } + + /** + * @see java.lang.Object#toString() + */ + @Override + public String toString() { + StringBuilder sb = + Strings.appendKeyValue(new StringBuilder(), "requestsPerSecond", + Double.valueOf(getRequestsPerSecond())); + Strings.appendKeyValue(sb, "numberOfOnlineRegions", Integer.valueOf(getNumberOfRegions())); + sb = Strings.appendKeyValue(sb, "usedHeapMB", Integer.valueOf(this.getUsedHeapMB())); + sb = Strings.appendKeyValue(sb, "maxHeapMB", Integer.valueOf(getMaxHeapMB())); + sb = Strings.appendKeyValue(sb, "numberOfStores", Integer.valueOf(this.stores)); + sb = Strings.appendKeyValue(sb, "numberOfStorefiles", Integer.valueOf(this.storefiles)); + sb = + Strings.appendKeyValue(sb, "storefileUncompressedSizeMB", + Integer.valueOf(this.storeUncompressedSizeMB)); + sb = Strings.appendKeyValue(sb, "storefileSizeMB", Integer.valueOf(this.storefileSizeMB)); + if (this.storeUncompressedSizeMB != 0) { + sb = + Strings.appendKeyValue( + sb, + "compressionRatio", + String.format("%.4f", (float) this.storefileSizeMB + / (float) this.storeUncompressedSizeMB)); + } + sb = Strings.appendKeyValue(sb, "memstoreSizeMB", Integer.valueOf(this.memstoreSizeMB)); + sb = + Strings.appendKeyValue(sb, "storefileIndexSizeMB", + Integer.valueOf(this.storefileIndexSizeMB)); + sb = Strings.appendKeyValue(sb, "readRequestsCount", Long.valueOf(this.readRequestsCount)); + sb = Strings.appendKeyValue(sb, "writeRequestsCount", Long.valueOf(this.writeRequestsCount)); + sb = Strings.appendKeyValue(sb, "rootIndexSizeKB", Integer.valueOf(this.rootIndexSizeKB)); + sb = + Strings.appendKeyValue(sb, "totalStaticIndexSizeKB", + Integer.valueOf(this.totalStaticIndexSizeKB)); + sb = + Strings.appendKeyValue(sb, "totalStaticBloomSizeKB", + Integer.valueOf(this.totalStaticBloomSizeKB)); + sb = Strings.appendKeyValue(sb, "totalCompactingKVs", Long.valueOf(this.totalCompactingKVs)); + sb = Strings.appendKeyValue(sb, "currentCompactedKVs", Long.valueOf(this.currentCompactedKVs)); + float compactionProgressPct = Float.NaN; + if (this.totalCompactingKVs > 0) { + compactionProgressPct = + Float.valueOf((float) this.currentCompactedKVs / this.totalCompactingKVs); + } + sb = Strings.appendKeyValue(sb, "compactionProgressPct", compactionProgressPct); + + String[] coprocessorStrings = getRsCoprocessors(); + if (coprocessorStrings != null) { + sb = Strings.appendKeyValue(sb, "coprocessors", Arrays.toString(coprocessorStrings)); + } + return sb.toString(); + } + + public static final ServerLoad EMPTY_SERVERLOAD = + new ServerLoad(HBaseProtos.ServerLoad.newBuilder().build()); +} diff --git hbase-client/src/main/java/org/apache/hadoop/hbase/ServerName.java hbase-client/src/main/java/org/apache/hadoop/hbase/ServerName.java new file mode 100644 index 0000000..348ca2f --- /dev/null +++ hbase-client/src/main/java/org/apache/hadoop/hbase/ServerName.java @@ -0,0 +1,355 @@ +/** + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase; + +import java.util.Collection; +import java.util.regex.Pattern; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; +import org.apache.hadoop.hbase.protobuf.ProtobufUtil; +import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RootRegionServer; +import org.apache.hadoop.hbase.util.Addressing; +import org.apache.hadoop.hbase.util.Bytes; + +import com.google.protobuf.InvalidProtocolBufferException; + +/** + * Instance of an HBase ServerName. + * A server name is used uniquely identifying a server instance and is made + * of the combination of hostname, port, and startcode. The startcode + * distingushes restarted servers on same hostname and port (startcode is + * usually timestamp of server startup). The {@link #toString()} format of + * ServerName is safe to use in the filesystem and as znode name up in + * ZooKeeper. Its format is: + * <hostname> '{@link #SERVERNAME_SEPARATOR}' <port> '{@link #SERVERNAME_SEPARATOR}' <startcode>. + * For example, if hostname is example.org, port is 1234, + * and the startcode for the regionserver is 1212121212, then + * the {@link #toString()} would be example.org,1234,1212121212. + * + *

You can obtain a versioned serialized form of this class by calling + * {@link #getVersionedBytes()}. To deserialize, call {@link #parseVersionedServerName(byte[])} + * + *

Immutable. + */ +@InterfaceAudience.Public +@InterfaceStability.Evolving +public class ServerName implements Comparable { + /** + * Version for this class. + * Its a short rather than a byte so I can for sure distinguish between this + * version of this class and the version previous to this which did not have + * a version. + */ + private static final short VERSION = 0; + static final byte [] VERSION_BYTES = Bytes.toBytes(VERSION); + + /** + * What to use if no startcode supplied. + */ + public static final int NON_STARTCODE = -1; + + /** + * This character is used as separator between server hostname, port and + * startcode. + */ + public static final String SERVERNAME_SEPARATOR = ","; + + public static Pattern SERVERNAME_PATTERN = + Pattern.compile("[^" + SERVERNAME_SEPARATOR + "]+" + + SERVERNAME_SEPARATOR + Addressing.VALID_PORT_REGEX + + SERVERNAME_SEPARATOR + Addressing.VALID_PORT_REGEX + "$"); + + /** + * What to use if server name is unknown. + */ + public static final String UNKNOWN_SERVERNAME = "#unknown#"; + + private final String servername; + private final String hostname; + private final int port; + private final long startcode; + + /** + * Cached versioned bytes of this ServerName instance. + * @see #getVersionedBytes() + */ + private byte [] bytes; + + public ServerName(final String hostname, final int port, final long startcode) { + this.hostname = hostname; + this.port = port; + this.startcode = startcode; + this.servername = getServerName(hostname, port, startcode); + } + + public ServerName(final String serverName) { + this(parseHostname(serverName), parsePort(serverName), + parseStartcode(serverName)); + } + + public ServerName(final String hostAndPort, final long startCode) { + this(Addressing.parseHostname(hostAndPort), + Addressing.parsePort(hostAndPort), startCode); + } + + public static String parseHostname(final String serverName) { + if (serverName == null || serverName.length() <= 0) { + throw new IllegalArgumentException("Passed hostname is null or empty"); + } + int index = serverName.indexOf(SERVERNAME_SEPARATOR); + return serverName.substring(0, index); + } + + public static int parsePort(final String serverName) { + String [] split = serverName.split(SERVERNAME_SEPARATOR); + return Integer.parseInt(split[1]); + } + + public static long parseStartcode(final String serverName) { + int index = serverName.lastIndexOf(SERVERNAME_SEPARATOR); + return Long.parseLong(serverName.substring(index + 1)); + } + + @Override + public String toString() { + return getServerName(); + } + + /** + * @return {@link #getServerName()} as bytes with a short-sized prefix with + * the ServerName#VERSION of this class. + */ + public synchronized byte [] getVersionedBytes() { + if (this.bytes == null) { + this.bytes = Bytes.add(VERSION_BYTES, Bytes.toBytes(getServerName())); + } + return this.bytes; + } + + public String getServerName() { + return servername; + } + + public String getHostname() { + return hostname; + } + + public int getPort() { + return port; + } + + public long getStartcode() { + return startcode; + } + + /** + * @param hostName + * @param port + * @param startcode + * @return Server name made of the concatenation of hostname, port and + * startcode formatted as <hostname> ',' <port> ',' <startcode> + */ + public static String getServerName(String hostName, int port, long startcode) { + final StringBuilder name = new StringBuilder(hostName.length() + 1 + 5 + 1 + 13); + name.append(hostName); + name.append(SERVERNAME_SEPARATOR); + name.append(port); + name.append(SERVERNAME_SEPARATOR); + name.append(startcode); + return name.toString(); + } + + /** + * @param hostAndPort String in form of <hostname> ':' <port> + * @param startcode + * @return Server name made of the concatenation of hostname, port and + * startcode formatted as <hostname> ',' <port> ',' <startcode> + */ + public static String getServerName(final String hostAndPort, + final long startcode) { + int index = hostAndPort.indexOf(":"); + if (index <= 0) throw new IllegalArgumentException("Expected ':' "); + return getServerName(hostAndPort.substring(0, index), + Integer.parseInt(hostAndPort.substring(index + 1)), startcode); + } + + /** + * @return Hostname and port formatted as described at + * {@link Addressing#createHostAndPortStr(String, int)} + */ + public String getHostAndPort() { + return Addressing.createHostAndPortStr(this.hostname, this.port); + } + + /** + * @param serverName ServerName in form specified by {@link #getServerName()} + * @return The server start code parsed from servername + */ + public static long getServerStartcodeFromServerName(final String serverName) { + int index = serverName.lastIndexOf(SERVERNAME_SEPARATOR); + return Long.parseLong(serverName.substring(index + 1)); + } + + /** + * Utility method to excise the start code from a server name + * @param inServerName full server name + * @return server name less its start code + */ + public static String getServerNameLessStartCode(String inServerName) { + if (inServerName != null && inServerName.length() > 0) { + int index = inServerName.lastIndexOf(SERVERNAME_SEPARATOR); + if (index > 0) { + return inServerName.substring(0, index); + } + } + return inServerName; + } + + @Override + public int compareTo(ServerName other) { + int compare = this.getHostname().toLowerCase(). + compareTo(other.getHostname().toLowerCase()); + if (compare != 0) return compare; + compare = this.getPort() - other.getPort(); + if (compare != 0) return compare; + return (int)(this.getStartcode() - other.getStartcode()); + } + + @Override + public int hashCode() { + return getServerName().hashCode(); + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null) return false; + if (!(o instanceof ServerName)) return false; + return this.compareTo((ServerName)o) == 0; + } + + + /** + * @return ServerName with matching hostname and port. + */ + public static ServerName findServerWithSameHostnamePort(final Collection names, + final ServerName serverName) { + for (ServerName sn: names) { + if (isSameHostnameAndPort(serverName, sn)) return sn; + } + return null; + } + + /** + * @param left + * @param right + * @return True if other has same hostname and port. + */ + public static boolean isSameHostnameAndPort(final ServerName left, + final ServerName right) { + if (left == null) return false; + if (right == null) return false; + return left.getHostname().equals(right.getHostname()) && + left.getPort() == right.getPort(); + } + + /** + * Use this method instantiating a {@link ServerName} from bytes + * gotten from a call to {@link #getVersionedBytes()}. Will take care of the + * case where bytes were written by an earlier version of hbase. + * @param versionedBytes Pass bytes gotten from a call to {@link #getVersionedBytes()} + * @return A ServerName instance. + * @see #getVersionedBytes() + */ + public static ServerName parseVersionedServerName(final byte [] versionedBytes) { + // Version is a short. + short version = Bytes.toShort(versionedBytes); + if (version == VERSION) { + int length = versionedBytes.length - Bytes.SIZEOF_SHORT; + return new ServerName(Bytes.toString(versionedBytes, Bytes.SIZEOF_SHORT, length)); + } + // Presume the bytes were written with an old version of hbase and that the + // bytes are actually a String of the form "'' ':' ''". + return new ServerName(Bytes.toString(versionedBytes), NON_STARTCODE); + } + + /** + * @param str Either an instance of {@link ServerName#toString()} or a + * "'' ':' ''". + * @return A ServerName instance. + */ + public static ServerName parseServerName(final String str) { + return SERVERNAME_PATTERN.matcher(str).matches()? new ServerName(str): + new ServerName(str, NON_STARTCODE); + } + + + /** + * @return true if the String follows the pattern of {@link ServerName#toString()}, false + * otherwise. + */ + public static boolean isFullServerName(final String str){ + if (str == null ||str.isEmpty()) return false; + return SERVERNAME_PATTERN.matcher(str).matches(); + } + + /** + * Get a ServerName from the passed in data bytes. + * @param data Data with a serialize server name in it; can handle the old style + * servername where servername was host and port. Works too with data that + * begins w/ the pb 'PBUF' magic and that is then followed by a protobuf that + * has a serialized {@link ServerName} in it. + * @return Returns null if data is null else converts passed data + * to a ServerName instance. + * @throws DeserializationException + */ + public static ServerName parseFrom(final byte [] data) throws DeserializationException { + if (data == null || data.length <= 0) return null; + if (ProtobufUtil.isPBMagicPrefix(data)) { + int prefixLen = ProtobufUtil.lengthOfPBMagic(); + try { + RootRegionServer rss = + RootRegionServer.newBuilder().mergeFrom(data, prefixLen, data.length - prefixLen).build(); + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName sn = rss.getServer(); + return new ServerName(sn.getHostName(), sn.getPort(), sn.getStartCode()); + } catch (InvalidProtocolBufferException e) { + // A failed parse of the znode is pretty catastrophic. Rather than loop + // retrying hoping the bad bytes will changes, and rather than change + // the signature on this method to add an IOE which will send ripples all + // over the code base, throw a RuntimeException. This should "never" happen. + // Fail fast if it does. + throw new DeserializationException(e); + } + } + // The str returned could be old style -- pre hbase-1502 -- which was + // hostname and port seperated by a colon rather than hostname, port and + // startcode delimited by a ','. + String str = Bytes.toString(data); + int index = str.indexOf(ServerName.SERVERNAME_SEPARATOR); + if (index != -1) { + // Presume its ServerName serialized with versioned bytes. + return ServerName.parseVersionedServerName(data); + } + // Presume it a hostname:port format. + String hostname = Addressing.parseHostname(str); + int port = Addressing.parsePort(str); + return new ServerName(hostname, port, -1L); + } +} diff --git hbase-client/src/main/java/org/apache/hadoop/hbase/catalog/CatalogTracker.java hbase-client/src/main/java/org/apache/hadoop/hbase/catalog/CatalogTracker.java new file mode 100644 index 0000000..8a383e4 --- /dev/null +++ hbase-client/src/main/java/org/apache/hadoop/hbase/catalog/CatalogTracker.java @@ -0,0 +1,703 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.catalog; + +import java.io.EOFException; +import java.io.IOException; +import java.net.ConnectException; +import java.net.NoRouteToHostException; +import java.net.SocketException; +import java.net.SocketTimeoutException; +import java.net.UnknownHostException; +import java.util.concurrent.atomic.AtomicBoolean; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.Abortable; +import org.apache.hadoop.hbase.HRegionInfo; +import org.apache.hadoop.hbase.NotAllMetaRegionsOnlineException; +import org.apache.hadoop.hbase.ServerName; +import org.apache.hadoop.hbase.client.AdminProtocol; +import org.apache.hadoop.hbase.client.HConnection; +import org.apache.hadoop.hbase.client.HConnectionManager; +import org.apache.hadoop.hbase.client.HTable; +import org.apache.hadoop.hbase.client.RetriesExhaustedException; +import org.apache.hadoop.hbase.ipc.ServerNotRunningYetException; +import org.apache.hadoop.hbase.protobuf.ProtobufUtil; +import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.hbase.zookeeper.MetaNodeTracker; +import org.apache.hadoop.hbase.zookeeper.RootRegionTracker; +import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher; +import org.apache.hadoop.ipc.RemoteException; + +/** + * Tracks the availability of the catalog tables -ROOT- and + * .META.. + * + * This class is "read-only" in that the locations of the catalog tables cannot + * be explicitly set. Instead, ZooKeeper is used to learn of the availability + * and location of -ROOT-. -ROOT- is used to learn of + * the location of .META. If not available in -ROOT-, + * ZooKeeper is used to monitor for a new location of .META.. + * + *

Call {@link #start()} to start up operation. Call {@link #stop()}} to + * interrupt waits and close up shop. + */ +@InterfaceAudience.Private +public class CatalogTracker { + // TODO: This class needs a rethink. The original intent was that it would be + // the one-stop-shop for root and meta locations and that it would get this + // info from reading and watching zk state. The class was to be used by + // servers when they needed to know of root and meta movement but also by + // client-side (inside in HTable) so rather than figure root and meta + // locations on fault, the client would instead get notifications out of zk. + // + // But this original intent is frustrated by the fact that this class has to + // read an hbase table, the -ROOT- table, to figure out the .META. region + // location which means we depend on an HConnection. HConnection will do + // retrying but also, it has its own mechanism for finding root and meta + // locations (and for 'verifying'; it tries the location and if it fails, does + // new lookup, etc.). So, at least for now, HConnection (or HTable) can't + // have a CT since CT needs a HConnection (Even then, do want HT to have a CT? + // For HT keep up a session with ZK? Rather, shouldn't we do like asynchbase + // where we'd open a connection to zk, read what we need then let the + // connection go?). The 'fix' is make it so both root and meta addresses + // are wholey up in zk -- not in zk (root) -- and in an hbase table (meta). + // + // But even then, this class does 'verification' of the location and it does + // this by making a call over an HConnection (which will do its own root + // and meta lookups). Isn't this verification 'useless' since when we + // return, whatever is dependent on the result of this call then needs to + // use HConnection; what we have verified may change in meantime (HConnection + // uses the CT primitives, the root and meta trackers finding root locations). + // + // When meta is moved to zk, this class may make more sense. In the + // meantime, it does not cohere. It should just watch meta and root and not + // NOT do verification -- let that be out in HConnection since its going to + // be done there ultimately anyways. + // + // This class has spread throughout the codebase. It needs to be reigned in. + // This class should be used server-side only, even if we move meta location + // up into zk. Currently its used over in the client package. Its used in + // MetaReader and MetaEditor classes usually just to get the Configuration + // its using (It does this indirectly by asking its HConnection for its + // Configuration and even then this is just used to get an HConnection out on + // the other end). I made https://issues.apache.org/jira/browse/HBASE-4495 for + // doing CT fixup. St.Ack 09/30/2011. + // + + // TODO: Timeouts have never been as advertised in here and its worse now + // with retries; i.e. the HConnection retries and pause goes ahead whatever + // the passed timeout is. Fix. + private static final Log LOG = LogFactory.getLog(CatalogTracker.class); + private final HConnection connection; + private final ZooKeeperWatcher zookeeper; + private final RootRegionTracker rootRegionTracker; + private final MetaNodeTracker metaNodeTracker; + private final AtomicBoolean metaAvailable = new AtomicBoolean(false); + private boolean instantiatedzkw = false; + private Abortable abortable; + + /* + * Do not clear this address once set. Its needed when we do + * server shutdown processing -- we need to know who had .META. last. If you + * want to know if the address is good, rely on {@link #metaAvailable} value. + */ + private ServerName metaLocation; + + /* + * Timeout waiting on root or meta to be set. + */ + private final int defaultTimeout; + + private boolean stopped = false; + + static final byte [] ROOT_REGION_NAME = + HRegionInfo.ROOT_REGIONINFO.getRegionName(); + static final byte [] META_REGION_NAME = + HRegionInfo.FIRST_META_REGIONINFO.getRegionName(); + + /** + * Constructs a catalog tracker. Find current state of catalog tables. + * Begin active tracking by executing {@link #start()} post construction. Does + * not timeout. + * + * @param conf + * the {@link Configuration} from which a {@link HConnection} will be + * obtained; if problem, this connections + * {@link HConnection#abort(String, Throwable)} will be called. + * @throws IOException + */ + public CatalogTracker(final Configuration conf) throws IOException { + this(null, conf, null); + } + + /** + * Constructs the catalog tracker. Find current state of catalog tables. + * Begin active tracking by executing {@link #start()} post construction. + * Does not timeout. + * @param zk If zk is null, we'll create an instance (and shut it down + * when {@link #stop()} is called) else we'll use what is passed. + * @param conf + * @param abortable If fatal exception we'll call abort on this. May be null. + * If it is we'll use the Connection associated with the passed + * {@link Configuration} as our Abortable. + * @throws IOException + */ + public CatalogTracker(final ZooKeeperWatcher zk, final Configuration conf, + final Abortable abortable) + throws IOException { + this(zk, conf, abortable, + conf.getInt("hbase.catalogtracker.default.timeout", 1000)); + } + + /** + * Constructs the catalog tracker. Find current state of catalog tables. + * Begin active tracking by executing {@link #start()} post construction. + * @param zk If zk is null, we'll create an instance (and shut it down + * when {@link #stop()} is called) else we'll use what is passed. + * @param conf + * @param abortable If fatal exception we'll call abort on this. May be null. + * If it is we'll use the Connection associated with the passed + * {@link Configuration} as our Abortable. + * @param defaultTimeout Timeout to use. Pass zero for no timeout + * ({@link Object#wait(long)} when passed a 0 waits for ever). + * @throws IOException + */ + public CatalogTracker(final ZooKeeperWatcher zk, final Configuration conf, + Abortable abortable, final int defaultTimeout) + throws IOException { + this(zk, conf, HConnectionManager.getConnection(conf), abortable, defaultTimeout); + } + + public CatalogTracker(final ZooKeeperWatcher zk, final Configuration conf, + HConnection connection, Abortable abortable, final int defaultTimeout) + throws IOException { + this.connection = connection; + if (abortable == null) { + // A connection is abortable. + this.abortable = this.connection; + } + Abortable throwableAborter = new Abortable() { + + @Override + public void abort(String why, Throwable e) { + throw new RuntimeException(why, e); + } + + @Override + public boolean isAborted() { + return true; + } + + }; + if (zk == null) { + // Create our own. Set flag so we tear it down on stop. + this.zookeeper = + new ZooKeeperWatcher(conf, "catalogtracker-on-" + connection.toString(), + abortable); + instantiatedzkw = true; + } else { + this.zookeeper = zk; + } + this.rootRegionTracker = new RootRegionTracker(zookeeper, throwableAborter); + final CatalogTracker ct = this; + // Override nodeDeleted so we get notified when meta node deleted + this.metaNodeTracker = new MetaNodeTracker(zookeeper, throwableAborter) { + public void nodeDeleted(String path) { + if (!path.equals(node)) return; + ct.resetMetaLocation(); + } + }; + this.defaultTimeout = defaultTimeout; + } + + /** + * Starts the catalog tracker. + * Determines current availability of catalog tables and ensures all further + * transitions of either region are tracked. + * @throws IOException + * @throws InterruptedException + */ + public void start() throws IOException, InterruptedException { + LOG.debug("Starting catalog tracker " + this); + try { + this.rootRegionTracker.start(); + this.metaNodeTracker.start(); + } catch (RuntimeException e) { + Throwable t = e.getCause(); + this.abortable.abort(e.getMessage(), t); + throw new IOException("Attempt to start root/meta tracker failed.", t); + } + } + + /** + * Stop working. + * Interrupts any ongoing waits. + */ + public void stop() { + if (!this.stopped) { + LOG.debug("Stopping catalog tracker " + this); + this.stopped = true; + this.rootRegionTracker.stop(); + this.metaNodeTracker.stop(); + try { + if (this.connection != null) { + this.connection.close(); + } + } catch (IOException e) { + // Although the {@link Closeable} interface throws an {@link + // IOException}, in reality, the implementation would never do that. + LOG.error("Attempt to close catalog tracker's connection failed.", e); + } + if (this.instantiatedzkw) { + this.zookeeper.close(); + } + // Call this and it will interrupt any ongoing waits on meta. + synchronized (this.metaAvailable) { + this.metaAvailable.notifyAll(); + } + } + } + + /** + * Gets the current location for -ROOT- or null if location is + * not currently available. + * @return {@link ServerName} for server hosting -ROOT- or null + * if none available + * @throws InterruptedException + */ + public ServerName getRootLocation() throws InterruptedException { + return this.rootRegionTracker.getRootRegionLocation(); + } + + /** + * @return {@link ServerName} for server hosting .META. or null + * if none available + */ + public ServerName getMetaLocation() { + return this.metaLocation; + } + + /** + * Method used by master on startup trying to figure state of cluster. + * Returns the current meta location unless its null. In this latter case, + * it has not yet been set so go check whats up in -ROOT- and + * return that. + * @return {@link ServerName} for server hosting .META. or if null, + * we'll read the location that is up in -ROOT- table (which + * could be null or just plain stale). + * @throws IOException + */ + public ServerName getMetaLocationOrReadLocationFromRoot() throws IOException { + ServerName sn = getMetaLocation(); + return sn != null? sn: MetaReader.getMetaRegionLocation(this); + } + + /** + * Gets the current location for -ROOT- if available and waits + * for up to the specified timeout if not immediately available. Returns null + * if the timeout elapses before root is available. + * @param timeout maximum time to wait for root availability, in milliseconds + * @return {@link ServerName} for server hosting -ROOT- or null + * if none available + * @throws InterruptedException if interrupted while waiting + * @throws NotAllMetaRegionsOnlineException if root not available before + * timeout + */ + public ServerName waitForRoot(final long timeout) + throws InterruptedException, NotAllMetaRegionsOnlineException { + ServerName sn = rootRegionTracker.waitRootRegionLocation(timeout); + if (sn == null) { + throw new NotAllMetaRegionsOnlineException("Timed out; " + timeout + "ms"); + } + return sn; + } + + /** + * Gets a connection to the server hosting root, as reported by ZooKeeper, + * waiting up to the specified timeout for availability. + * @param timeout How long to wait on root location + * @see #waitForRoot(long) for additional information + * @return connection to server hosting root + * @throws InterruptedException + * @throws NotAllMetaRegionsOnlineException if timed out waiting + * @throws IOException + * @deprecated Use #getRootServerConnection(long) + */ + public AdminProtocol waitForRootServerConnection(long timeout) + throws InterruptedException, NotAllMetaRegionsOnlineException, IOException { + return getRootServerConnection(timeout); + } + + /** + * Gets a connection to the server hosting root, as reported by ZooKeeper, + * waiting up to the specified timeout for availability. + *

WARNING: Does not retry. Use an {@link HTable} instead. + * @param timeout How long to wait on root location + * @see #waitForRoot(long) for additional information + * @return connection to server hosting root + * @throws InterruptedException + * @throws NotAllMetaRegionsOnlineException if timed out waiting + * @throws IOException + */ + AdminProtocol getRootServerConnection(long timeout) + throws InterruptedException, NotAllMetaRegionsOnlineException, IOException { + return getCachedConnection(waitForRoot(timeout)); + } + + /** + * Gets a connection to the server hosting root, as reported by ZooKeeper, + * waiting for the default timeout specified on instantiation. + * @see #waitForRoot(long) for additional information + * @return connection to server hosting root + * @throws NotAllMetaRegionsOnlineException if timed out waiting + * @throws IOException + * @deprecated Use #getRootServerConnection(long) + */ + public AdminProtocol waitForRootServerConnectionDefault() + throws NotAllMetaRegionsOnlineException, IOException { + try { + return getRootServerConnection(this.defaultTimeout); + } catch (InterruptedException e) { + throw new NotAllMetaRegionsOnlineException("Interrupted"); + } + } + + /** + * Gets a connection to the server currently hosting .META. or + * null if location is not currently available. + *

+ * If a location is known, a connection to the cached location is returned. + * If refresh is true, the cached connection is verified first before + * returning. If the connection is not valid, it is reset and rechecked. + *

+ * If no location for meta is currently known, method checks ROOT for a new + * location, verifies META is currently there, and returns a cached connection + * to the server hosting META. + * + * @return connection to server hosting meta, null if location not available + * @throws IOException + * @throws InterruptedException + */ + private AdminProtocol getMetaServerConnection() + throws IOException, InterruptedException { + synchronized (metaAvailable) { + if (metaAvailable.get()) { + AdminProtocol current = getCachedConnection(this.metaLocation); + // If we are to refresh, verify we have a good connection by making + // an invocation on it. + if (verifyRegionLocation(current, this.metaLocation, META_REGION_NAME)) { + return current; + } + resetMetaLocation(); + } + // We got here because there is no meta available or because whats + // available is bad. + + // Now read the current .META. content from -ROOT-. Note: This goes via + // an HConnection. It has its own way of figuring root and meta locations + // which we have to wait on. + ServerName newLocation = MetaReader.getMetaRegionLocation(this); + if (newLocation == null) return null; + + AdminProtocol newConnection = getCachedConnection(newLocation); + if (verifyRegionLocation(newConnection, newLocation, META_REGION_NAME)) { + setMetaLocation(newLocation); + return newConnection; + } else { + if (LOG.isTraceEnabled()) { + LOG.trace("New .META. server: " + newLocation + " isn't valid." + + " Cached .META. server: " + this.metaLocation); + } + } + return null; + } + } + + /** + * Waits indefinitely for availability of .META.. Used during + * cluster startup. Does not verify meta, just that something has been + * set up in zk. + * @see #waitForMeta(long) + * @throws InterruptedException if interrupted while waiting + */ + public void waitForMeta() throws InterruptedException { + while (!this.stopped) { + try { + if (waitForMeta(100) != null) break; + } catch (NotAllMetaRegionsOnlineException e) { + if (LOG.isTraceEnabled()) { + LOG.info(".META. still not available, sleeping and retrying." + + " Reason: " + e.getMessage()); + } + } catch (IOException e) { + LOG.info("Retrying", e); + } + } + } + + /** + * Gets the current location for .META. if available and waits + * for up to the specified timeout if not immediately available. Throws an + * exception if timed out waiting. This method differs from {@link #waitForMeta()} + * in that it will go ahead and verify the location gotten from ZooKeeper and + * -ROOT- region by trying to use returned connection. + * @param timeout maximum time to wait for meta availability, in milliseconds + * @return {@link ServerName} for server hosting .META. or null + * if none available + * @throws InterruptedException if interrupted while waiting + * @throws IOException unexpected exception connecting to meta server + * @throws NotAllMetaRegionsOnlineException if meta not available before + * timeout + */ + public ServerName waitForMeta(long timeout) + throws InterruptedException, IOException, NotAllMetaRegionsOnlineException { + long stop = System.currentTimeMillis() + timeout; + long waitTime = Math.min(50, timeout); + synchronized (metaAvailable) { + while(!stopped && (timeout == 0 || System.currentTimeMillis() < stop)) { + if (getMetaServerConnection() != null) { + return metaLocation; + } + // perhaps -ROOT- region isn't available, let us wait a bit and retry. + metaAvailable.wait(waitTime); + } + if (getMetaServerConnection() == null) { + throw new NotAllMetaRegionsOnlineException("Timed out (" + timeout + "ms)"); + } + return metaLocation; + } + } + + /** + * Gets a connection to the server hosting meta, as reported by ZooKeeper, + * waiting up to the specified timeout for availability. + * @see #waitForMeta(long) for additional information + * @return connection to server hosting meta + * @throws InterruptedException + * @throws NotAllMetaRegionsOnlineException if timed out waiting + * @throws IOException + * @deprecated Does not retry; use an HTable instance instead. + */ + public AdminProtocol waitForMetaServerConnection(long timeout) + throws InterruptedException, NotAllMetaRegionsOnlineException, IOException { + return getCachedConnection(waitForMeta(timeout)); + } + + /** + * Gets a connection to the server hosting meta, as reported by ZooKeeper, + * waiting up to the specified timeout for availability. + * Used in tests. + * @see #waitForMeta(long) for additional information + * @return connection to server hosting meta + * @throws NotAllMetaRegionsOnlineException if timed out or interrupted + * @throws IOException + * @deprecated Does not retry; use an HTable instance instead. + */ + public AdminProtocol waitForMetaServerConnectionDefault() + throws NotAllMetaRegionsOnlineException, IOException { + try { + return getCachedConnection(waitForMeta(defaultTimeout)); + } catch (InterruptedException e) { + throw new NotAllMetaRegionsOnlineException("Interrupted"); + } + } + + /** + * Called when we figure current meta is off (called from zk callback). + */ + public void resetMetaLocation() { + LOG.debug("Current cached META location, " + metaLocation + + ", is not valid, resetting"); + synchronized(this.metaAvailable) { + this.metaAvailable.set(false); + this.metaAvailable.notifyAll(); + } + } + + /** + * @param metaLocation + */ + void setMetaLocation(final ServerName metaLocation) { + LOG.debug("Set new cached META location: " + metaLocation); + synchronized (this.metaAvailable) { + this.metaLocation = metaLocation; + this.metaAvailable.set(true); + // no synchronization because these are private and already under lock + this.metaAvailable.notifyAll(); + } + } + + /** + * @param sn ServerName to get a connection against. + * @return The AdminProtocol we got when we connected to sn + * May have come from cache, may not be good, may have been setup by this + * invocation, or may be null. + * @throws IOException + */ + private AdminProtocol getCachedConnection(ServerName sn) + throws IOException { + if (sn == null) { + return null; + } + AdminProtocol protocol = null; + try { + protocol = connection.getAdmin(sn.getHostname(), sn.getPort()); + } catch (RetriesExhaustedException e) { + if (e.getCause() != null && e.getCause() instanceof ConnectException) { + // Catch this; presume it means the cached connection has gone bad. + } else { + throw e; + } + } catch (SocketTimeoutException e) { + LOG.debug("Timed out connecting to " + sn); + } catch (NoRouteToHostException e) { + LOG.debug("Connecting to " + sn, e); + } catch (SocketException e) { + LOG.debug("Exception connecting to " + sn); + } catch (UnknownHostException e) { + LOG.debug("Unknown host exception connecting to " + sn); + } catch (IOException ioe) { + Throwable cause = ioe.getCause(); + if (ioe instanceof ConnectException) { + // Catch. Connect refused. + } else if (cause != null && cause instanceof EOFException) { + // Catch. Other end disconnected us. + } else if (cause != null && cause.getMessage() != null && + cause.getMessage().toLowerCase().contains("connection reset")) { + // Catch. Connection reset. + } else { + throw ioe; + } + + } + return protocol; + } + + /** + * Verify we can connect to hostingServer and that its carrying + * regionName. + * @param hostingServer Interface to the server hosting regionName + * @param serverName The servername that goes with the metaServer + * Interface. Used logging. + * @param regionName The regionname we are interested in. + * @return True if we were able to verify the region located at other side of + * the Interface. + * @throws IOException + */ + // TODO: We should be able to get the ServerName from the AdminProtocol + // rather than have to pass it in. Its made awkward by the fact that the + // HRI is likely a proxy against remote server so the getServerName needs + // to be fixed to go to a local method or to a cache before we can do this. + private boolean verifyRegionLocation(AdminProtocol hostingServer, + final ServerName address, final byte [] regionName) + throws IOException { + if (hostingServer == null) { + LOG.info("Passed hostingServer is null"); + return false; + } + Throwable t = null; + try { + // Try and get regioninfo from the hosting server. + return ProtobufUtil.getRegionInfo(hostingServer, regionName) != null; + } catch (ConnectException e) { + t = e; + } catch (RetriesExhaustedException e) { + t = e; + } catch (RemoteException e) { + IOException ioe = e.unwrapRemoteException(); + t = ioe; + } catch (IOException e) { + Throwable cause = e.getCause(); + if (cause != null && cause instanceof EOFException) { + t = cause; + } else if (cause != null && cause.getMessage() != null + && cause.getMessage().contains("Connection reset")) { + t = cause; + } else { + t = e; + } + } + LOG.info("Failed verification of " + Bytes.toStringBinary(regionName) + + " at address=" + address + "; " + t); + return false; + } + + /** + * Verify -ROOT- is deployed and accessible. + * @param timeout How long to wait on zk for root address (passed through to + * the internal call to {@link #waitForRootServerConnection(long)}. + * @return True if the -ROOT- location is healthy. + * @throws IOException + * @throws InterruptedException + */ + public boolean verifyRootRegionLocation(final long timeout) + throws InterruptedException, IOException { + AdminProtocol connection = null; + try { + connection = waitForRootServerConnection(timeout); + } catch (NotAllMetaRegionsOnlineException e) { + // Pass + } catch (ServerNotRunningYetException e) { + // Pass -- remote server is not up so can't be carrying root + } catch (UnknownHostException e) { + // Pass -- server name doesn't resolve so it can't be assigned anything. + } + return (connection == null)? false: + verifyRegionLocation(connection, + this.rootRegionTracker.getRootRegionLocation(), ROOT_REGION_NAME); + } + + /** + * Verify .META. is deployed and accessible. + * @param timeout How long to wait on zk for .META. address + * (passed through to the internal call to {@link #waitForMetaServerConnection(long)}. + * @return True if the .META. location is healthy. + * @throws IOException Some unexpected IOE. + * @throws InterruptedException + */ + public boolean verifyMetaRegionLocation(final long timeout) + throws InterruptedException, IOException { + AdminProtocol connection = null; + try { + connection = waitForMetaServerConnection(timeout); + } catch (NotAllMetaRegionsOnlineException e) { + // Pass + } catch (ServerNotRunningYetException e) { + // Pass -- remote server is not up so can't be carrying .META. + } catch (UnknownHostException e) { + // Pass -- server name doesn't resolve so it can't be assigned anything. + } catch (RetriesExhaustedException e) { + // Pass -- failed after bunch of retries. + LOG.debug("Failed verify meta region location after retries", e); + } + return connection != null; + } + + // Used by tests. + MetaNodeTracker getMetaNodeTracker() { + return this.metaNodeTracker; + } + + public HConnection getConnection() { + return this.connection; + } +} diff --git hbase-client/src/main/java/org/apache/hadoop/hbase/catalog/MetaReader.java hbase-client/src/main/java/org/apache/hadoop/hbase/catalog/MetaReader.java new file mode 100644 index 0000000..ea9da0c --- /dev/null +++ hbase-client/src/main/java/org/apache/hadoop/hbase/catalog/MetaReader.java @@ -0,0 +1,643 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.catalog; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; +import java.util.Map; +import java.util.NavigableMap; +import java.util.Set; +import java.util.TreeMap; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.HRegionInfo; +import org.apache.hadoop.hbase.HTableDescriptor; +import org.apache.hadoop.hbase.ServerName; +import org.apache.hadoop.hbase.client.Get; +import org.apache.hadoop.hbase.client.HTable; +import org.apache.hadoop.hbase.client.Result; +import org.apache.hadoop.hbase.client.ResultScanner; +import org.apache.hadoop.hbase.client.Scan; +import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.hbase.util.Pair; + +/** + * Reads region and assignment information from .META.. + */ +@InterfaceAudience.Private +public class MetaReader { + // TODO: Strip CatalogTracker from this class. Its all over and in the end + // its only used to get its Configuration so we can get associated + // Connection. + private static final Log LOG = LogFactory.getLog(MetaReader.class); + + static final byte [] META_REGION_PREFIX; + static { + // Copy the prefix from FIRST_META_REGIONINFO into META_REGION_PREFIX. + // FIRST_META_REGIONINFO == '.META.,,1'. META_REGION_PREFIX == '.META.,' + int len = HRegionInfo.FIRST_META_REGIONINFO.getRegionName().length - 2; + META_REGION_PREFIX = new byte [len]; + System.arraycopy(HRegionInfo.FIRST_META_REGIONINFO.getRegionName(), 0, + META_REGION_PREFIX, 0, len); + } + + /** + * @param row + * @return True if row is row of -ROOT- table. + */ + private static boolean isRootTableRow(final byte [] row) { + if (row.length < META_REGION_PREFIX.length + 2 /* ',', + '1' */) { + // Can't be meta table region. + return false; + } + // Compare the prefix of row. If it matches META_REGION_PREFIX prefix, + // then this is row from -ROOT_ table. + return Bytes.equals(row, 0, META_REGION_PREFIX.length, + META_REGION_PREFIX, 0, META_REGION_PREFIX.length); + } + + /** + * Performs a full scan of .META., skipping regions from any + * tables in the specified set of disabled tables. + * @param catalogTracker + * @param disabledTables set of disabled tables that will not be returned + * @return Returns a map of every region to it's currently assigned server, + * according to META. If the region does not have an assignment it will have + * a null value in the map. + * @throws IOException + */ + public static Map fullScan( + CatalogTracker catalogTracker, final Set disabledTables) + throws IOException { + return fullScan(catalogTracker, disabledTables, false); + } + + /** + * Performs a full scan of .META., skipping regions from any + * tables in the specified set of disabled tables. + * @param catalogTracker + * @param disabledTables set of disabled tables that will not be returned + * @param excludeOfflinedSplitParents If true, do not include offlined split + * parents in the return. + * @return Returns a map of every region to it's currently assigned server, + * according to META. If the region does not have an assignment it will have + * a null value in the map. + * @throws IOException + */ + public static Map fullScan( + CatalogTracker catalogTracker, final Set disabledTables, + final boolean excludeOfflinedSplitParents) + throws IOException { + final Map regions = + new TreeMap(); + Visitor v = new Visitor() { + @Override + public boolean visit(Result r) throws IOException { + if (r == null || r.isEmpty()) return true; + Pair region = HRegionInfo.getHRegionInfoAndServerName(r); + if (region == null) return true; + HRegionInfo hri = region.getFirst(); + if (hri == null) return true; + if (hri.getTableNameAsString() == null) return true; + if (disabledTables.contains( + hri.getTableNameAsString())) return true; + // Are we to include split parents in the list? + if (excludeOfflinedSplitParents && hri.isSplitParent()) return true; + regions.put(hri, region.getSecond()); + return true; + } + }; + fullScan(catalogTracker, v); + return regions; + } + + /** + * Performs a full scan of .META.. + * @return List of {@link Result} + * @throws IOException + */ + public static List fullScan(CatalogTracker catalogTracker) + throws IOException { + CollectAllVisitor v = new CollectAllVisitor(); + fullScan(catalogTracker, v, null); + return v.getResults(); + } + + /** + * Performs a full scan of a -ROOT- table. + * @return List of {@link Result} + * @throws IOException + */ + public static List fullScanOfRoot(CatalogTracker catalogTracker) + throws IOException { + CollectAllVisitor v = new CollectAllVisitor(); + fullScan(catalogTracker, v, null, true); + return v.getResults(); + } + + /** + * Performs a full scan of .META.. + * @param catalogTracker + * @param visitor Visitor invoked against each row. + * @throws IOException + */ + public static void fullScan(CatalogTracker catalogTracker, + final Visitor visitor) + throws IOException { + fullScan(catalogTracker, visitor, null); + } + + /** + * Performs a full scan of .META.. + * @param catalogTracker + * @param visitor Visitor invoked against each row. + * @param startrow Where to start the scan. Pass null if want to begin scan + * at first row (The visitor will stop the Scan when its done so no need to + * pass a stoprow). + * @throws IOException + */ + public static void fullScan(CatalogTracker catalogTracker, + final Visitor visitor, final byte [] startrow) + throws IOException { + fullScan(catalogTracker, visitor, startrow, false); + } + + /** + * Callers should call close on the returned {@link HTable} instance. + * @param catalogTracker We'll use this catalogtracker's connection + * @param tableName Table to get an {@link HTable} against. + * @return An {@link HTable} for tableName + * @throws IOException + */ + private static HTable getHTable(final CatalogTracker catalogTracker, + final byte [] tableName) + throws IOException { + // Passing the CatalogTracker's connection configuration ensures this + // HTable instance uses the CatalogTracker's connection. + org.apache.hadoop.hbase.client.HConnection c = catalogTracker.getConnection(); + if (c == null) throw new NullPointerException("No connection"); + return new HTable(catalogTracker.getConnection().getConfiguration(), tableName); + } + + /** + * Callers should call close on the returned {@link HTable} instance. + * @param catalogTracker + * @param row Row we are putting + * @return + * @throws IOException + */ + static HTable getCatalogHTable(final CatalogTracker catalogTracker, + final byte [] row) + throws IOException { + return isRootTableRow(row)? + getRootHTable(catalogTracker): + getMetaHTable(catalogTracker); + } + + /** + * Callers should call close on the returned {@link HTable} instance. + * @param ct + * @return An {@link HTable} for .META. + * @throws IOException + */ + static HTable getMetaHTable(final CatalogTracker ct) + throws IOException { + return getHTable(ct, HConstants.META_TABLE_NAME); + } + + /** + * Callers should call close on the returned {@link HTable} instance. + * @param ct + * @return An {@link HTable} for -ROOT- + * @throws IOException + */ + static HTable getRootHTable(final CatalogTracker ct) + throws IOException { + return getHTable(ct, HConstants.ROOT_TABLE_NAME); + } + + /** + * @param t Table to use (will be closed when done). + * @param g Get to run + * @throws IOException + */ + private static Result get(final HTable t, final Get g) throws IOException { + try { + return t.get(g); + } finally { + t.close(); + } + } + + /** + * Gets the location of .META. region by reading content of + * -ROOT-. + * @param ct + * @return location of .META. region as a {@link ServerName} or + * null if not found + * @throws IOException + */ + static ServerName getMetaRegionLocation(final CatalogTracker ct) + throws IOException { + return MetaReader.readRegionLocation(ct, CatalogTracker.META_REGION_NAME); + } + + /** + * Reads the location of the specified region + * @param catalogTracker + * @param regionName region whose location we are after + * @return location of region as a {@link ServerName} or null if not found + * @throws IOException + */ + static ServerName readRegionLocation(CatalogTracker catalogTracker, + byte [] regionName) + throws IOException { + Pair pair = getRegion(catalogTracker, regionName); + return (pair == null || pair.getSecond() == null)? null: pair.getSecond(); + } + + /** + * Gets the region info and assignment for the specified region. + * @param catalogTracker + * @param regionName Region to lookup. + * @return Location and HRegionInfo for regionName + * @throws IOException + */ + public static Pair getRegion( + CatalogTracker catalogTracker, byte [] regionName) + throws IOException { + Get get = new Get(regionName); + get.addFamily(HConstants.CATALOG_FAMILY); + Result r = get(getCatalogHTable(catalogTracker, regionName), get); + return (r == null || r.isEmpty())? null: HRegionInfo.getHRegionInfoAndServerName(r); + } + + /** + * Checks if the specified table exists. Looks at the META table hosted on + * the specified server. + * @param catalogTracker + * @param tableName table to check + * @return true if the table exists in meta, false if not + * @throws IOException + */ + public static boolean tableExists(CatalogTracker catalogTracker, + String tableName) + throws IOException { + if (tableName.equals(HTableDescriptor.ROOT_TABLEDESC.getNameAsString()) || + tableName.equals(HTableDescriptor.META_TABLEDESC.getNameAsString())) { + // Catalog tables always exist. + return true; + } + final byte [] tableNameBytes = Bytes.toBytes(tableName); + // Make a version of ResultCollectingVisitor that only collects the first + CollectingVisitor visitor = new CollectingVisitor() { + private HRegionInfo current = null; + + @Override + public boolean visit(Result r) throws IOException { + this.current = + HRegionInfo.getHRegionInfo(r, HConstants.REGIONINFO_QUALIFIER); + if (this.current == null) { + LOG.warn("No serialized HRegionInfo in " + r); + return true; + } + if (!isInsideTable(this.current, tableNameBytes)) return false; + // Else call super and add this Result to the collection. + super.visit(r); + // Stop collecting regions from table after we get one. + return false; + } + + @Override + void add(Result r) { + // Add the current HRI. + this.results.add(this.current); + } + }; + fullScan(catalogTracker, visitor, getTableStartRowForMeta(tableNameBytes)); + // If visitor has results >= 1 then table exists. + return visitor.getResults().size() >= 1; + } + + /** + * Gets all of the regions of the specified table. + * @param catalogTracker + * @param tableName + * @return Ordered list of {@link HRegionInfo}. + * @throws IOException + */ + public static List getTableRegions(CatalogTracker catalogTracker, + byte [] tableName) + throws IOException { + return getTableRegions(catalogTracker, tableName, false); + } + + /** + * Gets all of the regions of the specified table. + * @param catalogTracker + * @param tableName + * @param excludeOfflinedSplitParents If true, do not include offlined split + * parents in the return. + * @return Ordered list of {@link HRegionInfo}. + * @throws IOException + */ + public static List getTableRegions(CatalogTracker catalogTracker, + byte [] tableName, final boolean excludeOfflinedSplitParents) + throws IOException { + List> result = null; + try { + result = getTableRegionsAndLocations(catalogTracker, tableName, + excludeOfflinedSplitParents); + } catch (InterruptedException e) { + throw new RuntimeException(e); + } + return getListOfHRegionInfos(result); + } + + static List getListOfHRegionInfos(final List> pairs) { + if (pairs == null || pairs.isEmpty()) return null; + List result = new ArrayList(pairs.size()); + for (Pair pair: pairs) { + result.add(pair.getFirst()); + } + return result; + } + + /** + * @param current + * @param tableName + * @return True if current tablename is equal to + * tableName + */ + static boolean isInsideTable(final HRegionInfo current, final byte [] tableName) { + return Bytes.equals(tableName, current.getTableName()); + } + + /** + * @param tableName + * @return Place to start Scan in .META. when passed a + * tableName; returns <tableName&rt; <,&rt; <,&rt; + */ + static byte [] getTableStartRowForMeta(final byte [] tableName) { + byte [] startRow = new byte[tableName.length + 2]; + System.arraycopy(tableName, 0, startRow, 0, tableName.length); + startRow[startRow.length - 2] = HConstants.DELIMITER; + startRow[startRow.length - 1] = HConstants.DELIMITER; + return startRow; + } + + /** + * This method creates a Scan object that will only scan catalog rows that + * belong to the specified table. It doesn't specify any columns. + * This is a better alternative to just using a start row and scan until + * it hits a new table since that requires parsing the HRI to get the table + * name. + * @param tableName bytes of table's name + * @return configured Scan object + */ + public static Scan getScanForTableName(byte[] tableName) { + String strName = Bytes.toString(tableName); + // Start key is just the table name with delimiters + byte[] startKey = Bytes.toBytes(strName + ",,"); + // Stop key appends the smallest possible char to the table name + byte[] stopKey = Bytes.toBytes(strName + " ,,"); + + Scan scan = new Scan(startKey); + scan.setStopRow(stopKey); + return scan; + } + + /** + * @param catalogTracker + * @param tableName + * @return Return list of regioninfos and server. + * @throws IOException + * @throws InterruptedException + */ + public static List> + getTableRegionsAndLocations(CatalogTracker catalogTracker, String tableName) + throws IOException, InterruptedException { + return getTableRegionsAndLocations(catalogTracker, Bytes.toBytes(tableName), + true); + } + + /** + * @param catalogTracker + * @param tableName + * @return Return list of regioninfos and server addresses. + * @throws IOException + * @throws InterruptedException + */ + public static List> + getTableRegionsAndLocations(final CatalogTracker catalogTracker, + final byte [] tableName, final boolean excludeOfflinedSplitParents) + throws IOException, InterruptedException { + if (Bytes.equals(tableName, HConstants.ROOT_TABLE_NAME)) { + // If root, do a bit of special handling. + ServerName serverName = catalogTracker.getRootLocation(); + List> list = + new ArrayList>(); + list.add(new Pair(HRegionInfo.ROOT_REGIONINFO, + serverName)); + return list; + } + // Make a version of CollectingVisitor that collects HRegionInfo and ServerAddress + CollectingVisitor> visitor = + new CollectingVisitor>() { + private Pair current = null; + + @Override + public boolean visit(Result r) throws IOException { + HRegionInfo hri = + HRegionInfo.getHRegionInfo(r, HConstants.REGIONINFO_QUALIFIER); + if (hri == null) { + LOG.warn("No serialized HRegionInfo in " + r); + return true; + } + if (!isInsideTable(hri, tableName)) return false; + if (excludeOfflinedSplitParents && hri.isSplitParent()) return true; + ServerName sn = HRegionInfo.getServerName(r); + // Populate this.current so available when we call #add + this.current = new Pair(hri, sn); + // Else call super and add this Result to the collection. + return super.visit(r); + } + + @Override + void add(Result r) { + this.results.add(this.current); + } + }; + fullScan(catalogTracker, visitor, getTableStartRowForMeta(tableName), + Bytes.equals(tableName, HConstants.META_TABLE_NAME)); + return visitor.getResults(); + } + + /** + * @param catalogTracker + * @param serverName + * @return List of user regions installed on this server (does not include + * catalog regions). + * @throws IOException + */ + public static NavigableMap + getServerUserRegions(CatalogTracker catalogTracker, final ServerName serverName) + throws IOException { + final NavigableMap hris = new TreeMap(); + // Fill the above hris map with entries from .META. that have the passed + // servername. + CollectingVisitor v = new CollectingVisitor() { + @Override + void add(Result r) { + if (r == null || r.isEmpty()) return; + ServerName sn = HRegionInfo.getServerName(r); + if (sn != null && sn.equals(serverName)) this.results.add(r); + } + }; + fullScan(catalogTracker, v); + List results = v.getResults(); + if (results != null && !results.isEmpty()) { + // Convert results to Map keyed by HRI + for (Result r: results) { + Pair p = HRegionInfo.getHRegionInfoAndServerName(r); + if (p != null && p.getFirst() != null) hris.put(p.getFirst(), r); + } + } + return hris; + } + + public static void fullScanMetaAndPrint(final CatalogTracker catalogTracker) + throws IOException { + Visitor v = new Visitor() { + @Override + public boolean visit(Result r) throws IOException { + if (r == null || r.isEmpty()) return true; + LOG.info("fullScanMetaAndPrint.Current Meta Row: " + r); + HRegionInfo hrim = HRegionInfo.getHRegionInfo(r); + LOG.info("fullScanMetaAndPrint.HRI Print= " + hrim); + return true; + } + }; + fullScan(catalogTracker, v); + } + + /** + * Performs a full scan of a catalog table. + * @param catalogTracker + * @param visitor Visitor invoked against each row. + * @param startrow Where to start the scan. Pass null if want to begin scan + * at first row. + * @param scanRoot True if we are to scan -ROOT- rather than + * .META., the default (pass false to scan .META.) + * @throws IOException + */ + static void fullScan(CatalogTracker catalogTracker, + final Visitor visitor, final byte [] startrow, final boolean scanRoot) + throws IOException { + Scan scan = new Scan(); + if (startrow != null) scan.setStartRow(startrow); + if (startrow == null && !scanRoot) { + int caching = catalogTracker.getConnection().getConfiguration() + .getInt(HConstants.HBASE_META_SCANNER_CACHING, 100); + scan.setCaching(caching); + } + scan.addFamily(HConstants.CATALOG_FAMILY); + HTable metaTable = scanRoot? + getRootHTable(catalogTracker): getMetaHTable(catalogTracker); + ResultScanner scanner = metaTable.getScanner(scan); + try { + Result data; + while((data = scanner.next()) != null) { + if (data.isEmpty()) continue; + // Break if visit returns false. + if (!visitor.visit(data)) break; + } + } finally { + scanner.close(); + metaTable.close(); + } + return; + } + + /** + * Implementations 'visit' a catalog table row. + */ + public interface Visitor { + /** + * Visit the catalog table row. + * @param r A row from catalog table + * @return True if we are to proceed scanning the table, else false if + * we are to stop now. + */ + public boolean visit(final Result r) throws IOException; + } + + /** + * A {@link Visitor} that collects content out of passed {@link Result}. + */ + static abstract class CollectingVisitor implements Visitor { + final List results = new ArrayList(); + @Override + public boolean visit(Result r) throws IOException { + if (r == null || r.isEmpty()) return true; + add(r); + return true; + } + + abstract void add(Result r); + + /** + * @return Collected results; wait till visits complete to collect all + * possible results + */ + List getResults() { + return this.results; + } + } + + /** + * Collects all returned. + */ + static class CollectAllVisitor extends CollectingVisitor { + @Override + void add(Result r) { + this.results.add(r); + } + } + + /** + * Count regions in .META. for passed table. + * @param c + * @param tableName + * @return Count or regions in table tableName + * @throws IOException + */ + public static int getRegionCount(final Configuration c, final String tableName) throws IOException { + HTable t = new HTable(c, tableName); + try { + return t.getRegionLocations().size(); + } finally { + t.close(); + } + } +} diff --git hbase-client/src/main/java/org/apache/hadoop/hbase/client/AbstractClientScanner.java hbase-client/src/main/java/org/apache/hadoop/hbase/client/AbstractClientScanner.java new file mode 100644 index 0000000..2bb0687 --- /dev/null +++ hbase-client/src/main/java/org/apache/hadoop/hbase/client/AbstractClientScanner.java @@ -0,0 +1,77 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.client; + +import java.io.IOException; +import java.util.Iterator; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; + +/** + * Helper class for custom client scanners. + */ +@InterfaceAudience.Public +@InterfaceStability.Stable +public abstract class AbstractClientScanner implements ResultScanner { + + @Override + public Iterator iterator() { + return new Iterator() { + // The next RowResult, possibly pre-read + Result next = null; + + // return true if there is another item pending, false if there isn't. + // this method is where the actual advancing takes place, but you need + // to call next() to consume it. hasNext() will only advance if there + // isn't a pending next(). + public boolean hasNext() { + if (next == null) { + try { + next = AbstractClientScanner.this.next(); + return next != null; + } catch (IOException e) { + throw new RuntimeException(e); + } + } + return true; + } + + // get the pending next item and advance the iterator. returns null if + // there is no next item. + public Result next() { + // since hasNext() does the real advancing, we call this to determine + // if there is a next before proceeding. + if (!hasNext()) { + return null; + } + + // if we get to here, then hasNext() has given us an item to return. + // we want to return the item and then null out the next pointer, so + // we use a temporary variable. + Result temp = next; + next = null; + return temp; + } + + public void remove() { + throw new UnsupportedOperationException(); + } + }; + } +} diff --git hbase-client/src/main/java/org/apache/hadoop/hbase/client/Action.java hbase-client/src/main/java/org/apache/hadoop/hbase/client/Action.java new file mode 100644 index 0000000..06475d0 --- /dev/null +++ hbase-client/src/main/java/org/apache/hadoop/hbase/client/Action.java @@ -0,0 +1,80 @@ +/** + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.client; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; + +/** + * A Get, Put or Delete associated with it's region. Used internally by + * {@link HTable#batch} to associate the action with it's region and maintain + * the index from the original request. + */ +@InterfaceAudience.Public +@InterfaceStability.Stable +public class Action implements Comparable { + + private Row action; + private int originalIndex; + private R result; + + /** + * This constructor is replaced by {@link #Action(Row, int)} + */ + @Deprecated + public Action(byte[] regionName, Row action, int originalIndex) { + this(action, originalIndex); + } + + public Action(Row action, int originalIndex) { + super(); + this.action = action; + this.originalIndex = originalIndex; + } + + @Deprecated + public byte[] getRegionName() { + return null; + } + + @Deprecated + public void setRegionName(byte[] regionName) { + } + + public R getResult() { + return result; + } + + public void setResult(R result) { + this.result = result; + } + + public Row getAction() { + return action; + } + + public int getOriginalIndex() { + return originalIndex; + } + + @Override + public int compareTo(Object o) { + return action.compareTo(((Action) o).getAction()); + } +} diff --git hbase-client/src/main/java/org/apache/hadoop/hbase/client/AdminProtocol.java hbase-client/src/main/java/org/apache/hadoop/hbase/client/AdminProtocol.java new file mode 100644 index 0000000..50f8b2a --- /dev/null +++ hbase-client/src/main/java/org/apache/hadoop/hbase/client/AdminProtocol.java @@ -0,0 +1,37 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.client; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.hbase.ipc.VersionedProtocol; +import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.AdminService; +import org.apache.hadoop.hbase.security.TokenInfo; +import org.apache.hadoop.hbase.security.KerberosInfo; + +/** + * Protocol that a HBase client uses to communicate with a region server. + */ +@KerberosInfo( + serverPrincipal = "hbase.regionserver.kerberos.principal") +@TokenInfo("HBASE_AUTH_TOKEN") +@InterfaceAudience.Private +public interface AdminProtocol extends + AdminService.BlockingInterface, VersionedProtocol { + public static final long VERSION = 1L; +} diff --git hbase-client/src/main/java/org/apache/hadoop/hbase/client/Append.java hbase-client/src/main/java/org/apache/hadoop/hbase/client/Append.java new file mode 100644 index 0000000..ba1e085 --- /dev/null +++ hbase-client/src/main/java/org/apache/hadoop/hbase/client/Append.java @@ -0,0 +1,90 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.client; + +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; +import org.apache.hadoop.hbase.KeyValue; +import org.apache.hadoop.hbase.util.Bytes; + +/** + * Performs Append operations on a single row. + *

+ * Note that this operation does not appear atomic to readers. Appends are done + * under a single row lock, so write operations to a row are synchronized, but + * readers do not take row locks so get and scan operations can see this + * operation partially completed. + *

+ * To append to a set of columns of a row, instantiate an Append object with the + * row to append to. At least one column to append must be specified using the + * {@link #add(byte[], byte[], byte[])} method. + */ +@InterfaceAudience.Public +@InterfaceStability.Stable +public class Append extends Mutation { + private static final String RETURN_RESULTS = "_rr_"; + /** + * @param returnResults + * True (default) if the append operation should return the results. + * A client that is not interested in the result can save network + * bandwidth setting this to false. + */ + public void setReturnResults(boolean returnResults) { + setAttribute(RETURN_RESULTS, Bytes.toBytes(returnResults)); + } + + /** + * @return current setting for returnResults + */ + public boolean isReturnResults() { + byte[] v = getAttribute(RETURN_RESULTS); + return v == null ? true : Bytes.toBoolean(v); + } + + /** + * Create a Append operation for the specified row. + *

+ * At least one column must be appended to. + * @param row row key + */ + public Append(byte[] row) { + this.row = Arrays.copyOf(row, row.length); + } + + /** + * Add the specified column and value to this Append operation. + * @param family family name + * @param qualifier column qualifier + * @param value value to append to specified column + * @return this + */ + public Append add(byte [] family, byte [] qualifier, byte [] value) { + List list = familyMap.get(family); + if(list == null) { + list = new ArrayList(); + } + list.add(new KeyValue( + this.row, family, qualifier, this.ts, KeyValue.Type.Put, value)); + familyMap.put(family, list); + return this; + } +} \ No newline at end of file diff --git hbase-client/src/main/java/org/apache/hadoop/hbase/client/Attributes.java hbase-client/src/main/java/org/apache/hadoop/hbase/client/Attributes.java new file mode 100644 index 0000000..f916ea6 --- /dev/null +++ hbase-client/src/main/java/org/apache/hadoop/hbase/client/Attributes.java @@ -0,0 +1,51 @@ +/* + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.client; + +import java.util.Map; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; + +@InterfaceAudience.Public +@InterfaceStability.Stable +public interface Attributes { + /** + * Sets an attribute. + * In case value = null attribute is removed from the attributes map. + * Attribute names starting with _ indicate system attributes. + * @param name attribute name + * @param value attribute value + */ + public void setAttribute(String name, byte[] value); + + /** + * Gets an attribute + * @param name attribute name + * @return attribute value if attribute is set, null otherwise + */ + public byte[] getAttribute(String name); + + /** + * Gets all attributes + * @return unmodifiable map of all attributes + */ + public Map getAttributesMap(); +} diff --git hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClientProtocol.java hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClientProtocol.java new file mode 100644 index 0000000..57d84e5 --- /dev/null +++ hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClientProtocol.java @@ -0,0 +1,39 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.client; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; +import org.apache.hadoop.hbase.ipc.VersionedProtocol; +import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ClientService; +import org.apache.hadoop.hbase.security.TokenInfo; +import org.apache.hadoop.hbase.security.KerberosInfo; + +/** + * Protocol that a HBase client uses to communicate with a region server. + */ +@KerberosInfo( + serverPrincipal = "hbase.regionserver.kerberos.principal") +@TokenInfo("HBASE_AUTH_TOKEN") +@InterfaceAudience.Public +@InterfaceStability.Evolving +public interface ClientProtocol extends + ClientService.BlockingInterface, VersionedProtocol { + public static final long VERSION = 1L; +} diff --git hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClientScanner.java hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClientScanner.java new file mode 100644 index 0000000..553346b --- /dev/null +++ hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClientScanner.java @@ -0,0 +1,405 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.client; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.LinkedList; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.DoNotRetryIOException; +import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.HRegionInfo; +import org.apache.hadoop.hbase.KeyValue; +import org.apache.hadoop.hbase.NotServingRegionException; +import org.apache.hadoop.hbase.OutOfOrderScannerNextException; +import org.apache.hadoop.hbase.UnknownScannerException; +import org.apache.hadoop.hbase.client.metrics.ScanMetrics; +import org.apache.hadoop.hbase.protobuf.ProtobufUtil; +import org.apache.hadoop.hbase.protobuf.generated.MapReduceProtos; +import org.apache.hadoop.hbase.regionserver.RegionServerStoppedException; +import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.io.DataOutputBuffer; + +/** + * Implements the scanner interface for the HBase client. + * If there are multiple regions in a table, this scanner will iterate + * through them all. + */ +@InterfaceAudience.Public +@InterfaceStability.Stable +public class ClientScanner extends AbstractClientScanner { + private final Log LOG = LogFactory.getLog(this.getClass()); + private Scan scan; + private boolean closed = false; + // Current region scanner is against. Gets cleared if current region goes + // wonky: e.g. if it splits on us. + private HRegionInfo currentRegion = null; + private ScannerCallable callable = null; + private final LinkedList cache = new LinkedList(); + private final int caching; + private long lastNext; + // Keep lastResult returned successfully in case we have to reset scanner. + private Result lastResult = null; + private ScanMetrics scanMetrics = null; + private final long maxScannerResultSize; + private final HConnection connection; + private final byte[] tableName; + private final int scannerTimeout; + + /** + * Create a new ClientScanner for the specified table. An HConnection will be + * retrieved using the passed Configuration. + * Note that the passed {@link Scan}'s start row maybe changed changed. + * + * @param conf The {@link Configuration} to use. + * @param scan {@link Scan} to use in this scanner + * @param tableName The table that we wish to scan + * @throws IOException + */ + public ClientScanner(final Configuration conf, final Scan scan, + final byte[] tableName) throws IOException { + this(conf, scan, tableName, HConnectionManager.getConnection(conf)); + } + + /** + * Create a new ClientScanner for the specified table + * Note that the passed {@link Scan}'s start row maybe changed changed. + * + * @param conf The {@link Configuration} to use. + * @param scan {@link Scan} to use in this scanner + * @param tableName The table that we wish to scan + * @param connection Connection identifying the cluster + * @throws IOException + */ + public ClientScanner(final Configuration conf, final Scan scan, + final byte[] tableName, HConnection connection) throws IOException { + if (LOG.isDebugEnabled()) { + LOG.debug("Creating scanner over " + + Bytes.toString(tableName) + + " starting at key '" + Bytes.toStringBinary(scan.getStartRow()) + "'"); + } + this.scan = scan; + this.tableName = tableName; + this.lastNext = System.currentTimeMillis(); + this.connection = connection; + if (scan.getMaxResultSize() > 0) { + this.maxScannerResultSize = scan.getMaxResultSize(); + } else { + this.maxScannerResultSize = conf.getLong( + HConstants.HBASE_CLIENT_SCANNER_MAX_RESULT_SIZE_KEY, + HConstants.DEFAULT_HBASE_CLIENT_SCANNER_MAX_RESULT_SIZE); + } + this.scannerTimeout = conf.getInt(HConstants.HBASE_CLIENT_SCANNER_TIMEOUT_PERIOD, + HConstants.DEFAULT_HBASE_CLIENT_SCANNER_TIMEOUT_PERIOD); + + // check if application wants to collect scan metrics + byte[] enableMetrics = scan.getAttribute( + Scan.SCAN_ATTRIBUTES_METRICS_ENABLE); + if (enableMetrics != null && Bytes.toBoolean(enableMetrics)) { + scanMetrics = new ScanMetrics(); + } + + // Use the caching from the Scan. If not set, use the default cache setting for this table. + if (this.scan.getCaching() > 0) { + this.caching = this.scan.getCaching(); + } else { + this.caching = conf.getInt( + HConstants.HBASE_CLIENT_SCANNER_CACHING, + HConstants.DEFAULT_HBASE_CLIENT_SCANNER_CACHING); + } + + // initialize the scanner + nextScanner(this.caching, false); + } + + protected HConnection getConnection() { + return this.connection; + } + + protected byte[] getTableName() { + return this.tableName; + } + + protected Scan getScan() { + return scan; + } + + protected long getTimestamp() { + return lastNext; + } + + // returns true if the passed region endKey + private boolean checkScanStopRow(final byte [] endKey) { + if (this.scan.getStopRow().length > 0) { + // there is a stop row, check to see if we are past it. + byte [] stopRow = scan.getStopRow(); + int cmp = Bytes.compareTo(stopRow, 0, stopRow.length, + endKey, 0, endKey.length); + if (cmp <= 0) { + // stopRow <= endKey (endKey is equals to or larger than stopRow) + // This is a stop. + return true; + } + } + return false; //unlikely. + } + + /* + * Gets a scanner for the next region. If this.currentRegion != null, then + * we will move to the endrow of this.currentRegion. Else we will get + * scanner at the scan.getStartRow(). We will go no further, just tidy + * up outstanding scanners, if currentRegion != null and + * done is true. + * @param nbRows + * @param done Server-side says we're done scanning. + */ + private boolean nextScanner(int nbRows, final boolean done) + throws IOException { + // Close the previous scanner if it's open + if (this.callable != null) { + this.callable.setClose(); + callable.withRetries(); + this.callable = null; + } + + // Where to start the next scanner + byte [] localStartKey; + + // if we're at end of table, close and return false to stop iterating + if (this.currentRegion != null) { + byte [] endKey = this.currentRegion.getEndKey(); + if (endKey == null || + Bytes.equals(endKey, HConstants.EMPTY_BYTE_ARRAY) || + checkScanStopRow(endKey) || + done) { + close(); + if (LOG.isDebugEnabled()) { + LOG.debug("Finished with scanning at " + this.currentRegion); + } + return false; + } + localStartKey = endKey; + if (LOG.isDebugEnabled()) { + LOG.debug("Finished with region " + this.currentRegion); + } + } else { + localStartKey = this.scan.getStartRow(); + } + + if (LOG.isDebugEnabled()) { + LOG.debug("Advancing internal scanner to startKey at '" + + Bytes.toStringBinary(localStartKey) + "'"); + } + try { + callable = getScannerCallable(localStartKey, nbRows); + // Open a scanner on the region server starting at the + // beginning of the region + callable.withRetries(); + this.currentRegion = callable.getHRegionInfo(); + if (this.scanMetrics != null) { + this.scanMetrics.countOfRegions.incrementAndGet(); + } + } catch (IOException e) { + close(); + throw e; + } + return true; + } + + protected ScannerCallable getScannerCallable(byte [] localStartKey, + int nbRows) { + scan.setStartRow(localStartKey); + ScannerCallable s = new ScannerCallable(getConnection(), + getTableName(), scan, this.scanMetrics); + s.setCaching(nbRows); + return s; + } + + /** + * Publish the scan metrics. For now, we use scan.setAttribute to pass the metrics back to the + * application or TableInputFormat.Later, we could push it to other systems. We don't use metrics + * framework because it doesn't support multi-instances of the same metrics on the same machine; + * for scan/map reduce scenarios, we will have multiple scans running at the same time. + * + * By default, scan metrics are disabled; if the application wants to collect them, this behavior + * can be turned on by calling calling: + * + * scan.setAttribute(SCAN_ATTRIBUTES_METRICS_ENABLE, Bytes.toBytes(Boolean.TRUE)) + */ + private void writeScanMetrics() throws IOException { + if (this.scanMetrics == null) { + return; + } + final DataOutputBuffer d = new DataOutputBuffer(); + MapReduceProtos.ScanMetrics pScanMetrics = ProtobufUtil.toScanMetrics(scanMetrics); + scan.setAttribute(Scan.SCAN_ATTRIBUTES_METRICS_DATA, pScanMetrics.toByteArray()); + } + + public Result next() throws IOException { + // If the scanner is closed and there's nothing left in the cache, next is a no-op. + if (cache.size() == 0 && this.closed) { + return null; + } + if (cache.size() == 0) { + Result [] values = null; + long remainingResultSize = maxScannerResultSize; + int countdown = this.caching; + // We need to reset it if it's a new callable that was created + // with a countdown in nextScanner + callable.setCaching(this.caching); + // This flag is set when we want to skip the result returned. We do + // this when we reset scanner because it split under us. + boolean skipFirst = false; + boolean retryAfterOutOfOrderException = true; + do { + try { + if (skipFirst) { + // Skip only the first row (which was the last row of the last + // already-processed batch). + callable.setCaching(1); + values = callable.withRetries(); + callable.setCaching(this.caching); + skipFirst = false; + } + // Server returns a null values if scanning is to stop. Else, + // returns an empty array if scanning is to go on and we've just + // exhausted current region. + values = callable.withRetries(); + retryAfterOutOfOrderException = true; + } catch (DoNotRetryIOException e) { + if (e instanceof UnknownScannerException) { + long timeout = lastNext + scannerTimeout; + // If we are over the timeout, throw this exception to the client + // Else, it's because the region moved and we used the old id + // against the new region server; reset the scanner. + if (timeout < System.currentTimeMillis()) { + long elapsed = System.currentTimeMillis() - lastNext; + ScannerTimeoutException ex = new ScannerTimeoutException( + elapsed + "ms passed since the last invocation, " + + "timeout is currently set to " + scannerTimeout); + ex.initCause(e); + throw ex; + } + } else { + Throwable cause = e.getCause(); + if ((cause == null || (!(cause instanceof NotServingRegionException) + && !(cause instanceof RegionServerStoppedException))) + && !(e instanceof OutOfOrderScannerNextException)) { + throw e; + } + } + // Else, its signal from depths of ScannerCallable that we got an + // NSRE on a next and that we need to reset the scanner. + if (this.lastResult != null) { + this.scan.setStartRow(this.lastResult.getRow()); + // Skip first row returned. We already let it out on previous + // invocation. + skipFirst = true; + } + if (e instanceof OutOfOrderScannerNextException) { + if (retryAfterOutOfOrderException) { + retryAfterOutOfOrderException = false; + } else { + throw new DoNotRetryIOException("Failed after retry" + + ", it could be cause by rpc timeout", e); + } + } + // Clear region + this.currentRegion = null; + callable = null; + continue; + } + long currentTime = System.currentTimeMillis(); + if (this.scanMetrics != null ) { + this.scanMetrics.sumOfMillisSecBetweenNexts.addAndGet(currentTime-lastNext); + } + lastNext = currentTime; + if (values != null && values.length > 0) { + for (Result rs : values) { + cache.add(rs); + for (KeyValue kv : rs.raw()) { + remainingResultSize -= kv.heapSize(); + } + countdown--; + this.lastResult = rs; + } + } + // Values == null means server-side filter has determined we must STOP + } while (remainingResultSize > 0 && countdown > 0 && nextScanner(countdown, values == null)); + } + + if (cache.size() > 0) { + return cache.poll(); + } + + // if we exhausted this scanner before calling close, write out the scan metrics + writeScanMetrics(); + return null; + } + + /** + * Get nbRows rows. + * How many RPCs are made is determined by the {@link Scan#setCaching(int)} + * setting (or hbase.client.scanner.caching in hbase-site.xml). + * @param nbRows number of rows to return + * @return Between zero and nbRows RowResults. Scan is done + * if returned array is of zero-length (We never return null). + * @throws IOException + */ + public Result [] next(int nbRows) throws IOException { + // Collect values to be returned here + ArrayList resultSets = new ArrayList(nbRows); + for(int i = 0; i < nbRows; i++) { + Result next = next(); + if (next != null) { + resultSets.add(next); + } else { + break; + } + } + return resultSets.toArray(new Result[resultSets.size()]); + } + + public void close() { + if (callable != null) { + callable.setClose(); + try { + callable.withRetries(); + } catch (IOException e) { + // We used to catch this error, interpret, and rethrow. However, we + // have since decided that it's not nice for a scanner's close to + // throw exceptions. Chances are it was just an UnknownScanner + // exception due to lease time out. + } finally { + // we want to output the scan metrics even if an error occurred on close + try { + writeScanMetrics(); + } catch (IOException e) { + // As above, we still don't want the scanner close() method to throw. + } + } + callable = null; + } + closed = true; + } +} diff --git hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionUtils.java hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionUtils.java new file mode 100644 index 0000000..d368b24 --- /dev/null +++ hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionUtils.java @@ -0,0 +1,52 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.client; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; +import org.apache.hadoop.hbase.HConstants; + +import java.util.Random; + +/** + * Utility used by client connections such as {@link HConnection} and + * {@link ServerCallable} + */ +@InterfaceAudience.Public +@InterfaceStability.Evolving +public class ConnectionUtils { + + private static final Random RANDOM = new Random(); + /** + * Calculate pause time. + * Built on {@link HConstants#RETRY_BACKOFF}. + * @param pause + * @param tries + * @return How long to wait after tries retries + */ + public static long getPauseTime(final long pause, final int tries) { + int ntries = tries; + if (ntries >= HConstants.RETRY_BACKOFF.length) { + ntries = HConstants.RETRY_BACKOFF.length - 1; + } + + long normalPause = pause * HConstants.RETRY_BACKOFF[ntries]; + long jitter = (long)(normalPause * RANDOM.nextFloat() * 0.01f); // 1% possible jitter + return normalPause + jitter; + } +} diff --git hbase-client/src/main/java/org/apache/hadoop/hbase/client/Delete.java hbase-client/src/main/java/org/apache/hadoop/hbase/client/Delete.java new file mode 100644 index 0000000..9a75546 --- /dev/null +++ hbase-client/src/main/java/org/apache/hadoop/hbase/client/Delete.java @@ -0,0 +1,256 @@ +/* + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.client; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; +import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.KeyValue; +import org.apache.hadoop.hbase.util.Bytes; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; +import java.util.Map; + +/** + * Used to perform Delete operations on a single row. + *

+ * To delete an entire row, instantiate a Delete object with the row + * to delete. To further define the scope of what to delete, perform + * additional methods as outlined below. + *

+ * To delete specific families, execute {@link #deleteFamily(byte[]) deleteFamily} + * for each family to delete. + *

+ * To delete multiple versions of specific columns, execute + * {@link #deleteColumns(byte[], byte[]) deleteColumns} + * for each column to delete. + *

+ * To delete specific versions of specific columns, execute + * {@link #deleteColumn(byte[], byte[], long) deleteColumn} + * for each column version to delete. + *

+ * Specifying timestamps, deleteFamily and deleteColumns will delete all + * versions with a timestamp less than or equal to that passed. If no + * timestamp is specified, an entry is added with a timestamp of 'now' + * where 'now' is the servers's System.currentTimeMillis(). + * Specifying a timestamp to the deleteColumn method will + * delete versions only with a timestamp equal to that specified. + * If no timestamp is passed to deleteColumn, internally, it figures the + * most recent cell's timestamp and adds a delete at that timestamp; i.e. + * it deletes the most recently added cell. + *

The timestamp passed to the constructor is used ONLY for delete of + * rows. For anything less -- a deleteColumn, deleteColumns or + * deleteFamily -- then you need to use the method overrides that take a + * timestamp. The constructor timestamp is not referenced. + */ +@InterfaceAudience.Public +@InterfaceStability.Stable +public class Delete extends Mutation implements Comparable { + /** + * Create a Delete operation for the specified row. + *

+ * If no further operations are done, this will delete everything + * associated with the specified row (all versions of all columns in all + * families). + * @param row row key + */ + public Delete(byte [] row) { + this(row, HConstants.LATEST_TIMESTAMP, null); + } + + /** + * Create a Delete operation for the specified row and timestamp, using + * an optional row lock.

+ * + * If no further operations are done, this will delete all columns in all + * families of the specified row with a timestamp less than or equal to the + * specified timestamp.

+ * + * This timestamp is ONLY used for a delete row operation. If specifying + * families or columns, you must specify each timestamp individually. + * @param row row key + * @param timestamp maximum version timestamp (only for delete row) + * @param rowLock previously acquired row lock, or null + */ + public Delete(byte [] row, long timestamp, RowLock rowLock) { + this.row = row; + this.ts = timestamp; + if (rowLock != null) { + this.lockId = rowLock.getLockId(); + } + } + + /** + * @param d Delete to clone. + */ + public Delete(final Delete d) { + this.row = d.getRow(); + this.ts = d.getTimeStamp(); + this.lockId = d.getLockId(); + this.familyMap.putAll(d.getFamilyMap()); + this.writeToWAL = d.writeToWAL; + } + + /** + * Advanced use only. + * Add an existing delete marker to this Delete object. + * @param kv An existing KeyValue of type "delete". + * @return this for invocation chaining + * @throws IOException + */ + public Delete addDeleteMarker(KeyValue kv) throws IOException { + if (!kv.isDelete()) { + throw new IOException("The recently added KeyValue is not of type " + + "delete. Rowkey: " + Bytes.toStringBinary(this.row)); + } + if (Bytes.compareTo(this.row, 0, row.length, kv.getBuffer(), + kv.getRowOffset(), kv.getRowLength()) != 0) { + throw new IOException("The row in the recently added KeyValue " + + Bytes.toStringBinary(kv.getBuffer(), kv.getRowOffset(), + kv.getRowLength()) + " doesn't match the original one " + + Bytes.toStringBinary(this.row)); + } + byte [] family = kv.getFamily(); + List list = familyMap.get(family); + if (list == null) { + list = new ArrayList(); + } + list.add(kv); + familyMap.put(family, list); + return this; + } + + /** + * Delete all versions of all columns of the specified family. + *

+ * Overrides previous calls to deleteColumn and deleteColumns for the + * specified family. + * @param family family name + * @return this for invocation chaining + */ + public Delete deleteFamily(byte [] family) { + this.deleteFamily(family, HConstants.LATEST_TIMESTAMP); + return this; + } + + /** + * Delete all columns of the specified family with a timestamp less than + * or equal to the specified timestamp. + *

+ * Overrides previous calls to deleteColumn and deleteColumns for the + * specified family. + * @param family family name + * @param timestamp maximum version timestamp + * @return this for invocation chaining + */ + public Delete deleteFamily(byte [] family, long timestamp) { + List list = familyMap.get(family); + if(list == null) { + list = new ArrayList(); + } else if(!list.isEmpty()) { + list.clear(); + } + list.add(new KeyValue(row, family, null, timestamp, KeyValue.Type.DeleteFamily)); + familyMap.put(family, list); + return this; + } + + /** + * Delete all versions of the specified column. + * @param family family name + * @param qualifier column qualifier + * @return this for invocation chaining + */ + public Delete deleteColumns(byte [] family, byte [] qualifier) { + this.deleteColumns(family, qualifier, HConstants.LATEST_TIMESTAMP); + return this; + } + + /** + * Delete all versions of the specified column with a timestamp less than + * or equal to the specified timestamp. + * @param family family name + * @param qualifier column qualifier + * @param timestamp maximum version timestamp + * @return this for invocation chaining + */ + public Delete deleteColumns(byte [] family, byte [] qualifier, long timestamp) { + List list = familyMap.get(family); + if (list == null) { + list = new ArrayList(); + } + list.add(new KeyValue(this.row, family, qualifier, timestamp, + KeyValue.Type.DeleteColumn)); + familyMap.put(family, list); + return this; + } + + /** + * Delete the latest version of the specified column. + * This is an expensive call in that on the server-side, it first does a + * get to find the latest versions timestamp. Then it adds a delete using + * the fetched cells timestamp. + * @param family family name + * @param qualifier column qualifier + * @return this for invocation chaining + */ + public Delete deleteColumn(byte [] family, byte [] qualifier) { + this.deleteColumn(family, qualifier, HConstants.LATEST_TIMESTAMP); + return this; + } + + /** + * Delete the specified version of the specified column. + * @param family family name + * @param qualifier column qualifier + * @param timestamp version timestamp + * @return this for invocation chaining + */ + public Delete deleteColumn(byte [] family, byte [] qualifier, long timestamp) { + List list = familyMap.get(family); + if(list == null) { + list = new ArrayList(); + } + list.add(new KeyValue( + this.row, family, qualifier, timestamp, KeyValue.Type.Delete)); + familyMap.put(family, list); + return this; + } + + /** + * Set the timestamp of the delete. + * + * @param timestamp + */ + public void setTimestamp(long timestamp) { + this.ts = timestamp; + } + + @Override + public Map toMap(int maxCols) { + // we start with the fingerprint map and build on top of it. + Map map = super.toMap(maxCols); + // why is put not doing this? + map.put("ts", this.ts); + return map; + } +} diff --git hbase-client/src/main/java/org/apache/hadoop/hbase/client/Get.java hbase-client/src/main/java/org/apache/hadoop/hbase/client/Get.java new file mode 100644 index 0000000..0ade4e9 --- /dev/null +++ hbase-client/src/main/java/org/apache/hadoop/hbase/client/Get.java @@ -0,0 +1,425 @@ +/** + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.client; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; +import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.KeyValue; +import org.apache.hadoop.hbase.filter.Filter; +import org.apache.hadoop.hbase.io.TimeRange; +import org.apache.hadoop.hbase.util.Bytes; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.NavigableSet; +import java.util.Set; +import java.util.TreeMap; +import java.util.TreeSet; + +/** + * Used to perform Get operations on a single row. + *

+ * To get everything for a row, instantiate a Get object with the row to get. + * To further define the scope of what to get, perform additional methods as + * outlined below. + *

+ * To get all columns from specific families, execute {@link #addFamily(byte[]) addFamily} + * for each family to retrieve. + *

+ * To get specific columns, execute {@link #addColumn(byte[], byte[]) addColumn} + * for each column to retrieve. + *

+ * To only retrieve columns within a specific range of version timestamps, + * execute {@link #setTimeRange(long, long) setTimeRange}. + *

+ * To only retrieve columns with a specific timestamp, execute + * {@link #setTimeStamp(long) setTimestamp}. + *

+ * To limit the number of versions of each column to be returned, execute + * {@link #setMaxVersions(int) setMaxVersions}. + *

+ * To add a filter, execute {@link #setFilter(Filter) setFilter}. + */ +@InterfaceAudience.Public +@InterfaceStability.Stable +public class Get extends OperationWithAttributes + implements Row, Comparable { + + private byte [] row = null; + private long lockId = -1L; + private int maxVersions = 1; + private boolean cacheBlocks = true; + private int storeLimit = -1; + private int storeOffset = 0; + private Filter filter = null; + private TimeRange tr = new TimeRange(); + private Map> familyMap = + new TreeMap>(Bytes.BYTES_COMPARATOR); + + /** + * Create a Get operation for the specified row. + *

+ * If no further operations are done, this will get the latest version of + * all columns in all families of the specified row. + * @param row row key + */ + public Get(byte [] row) { + this(row, null); + } + + /** + * Create a Get operation for the specified row, using an existing row lock. + *

+ * If no further operations are done, this will get the latest version of + * all columns in all families of the specified row. + * @param row row key + * @param rowLock previously acquired row lock, or null + */ + public Get(byte [] row, RowLock rowLock) { + this.row = row; + if(rowLock != null) { + this.lockId = rowLock.getLockId(); + } + } + + /** + * Get all columns from the specified family. + *

+ * Overrides previous calls to addColumn for this family. + * @param family family name + * @return the Get object + */ + public Get addFamily(byte [] family) { + familyMap.remove(family); + familyMap.put(family, null); + return this; + } + + /** + * Get the column from the specific family with the specified qualifier. + *

+ * Overrides previous calls to addFamily for this family. + * @param family family name + * @param qualifier column qualifier + * @return the Get objec + */ + public Get addColumn(byte [] family, byte [] qualifier) { + NavigableSet set = familyMap.get(family); + if(set == null) { + set = new TreeSet(Bytes.BYTES_COMPARATOR); + } + if (qualifier == null) { + qualifier = HConstants.EMPTY_BYTE_ARRAY; + } + set.add(qualifier); + familyMap.put(family, set); + return this; + } + + /** + * Get versions of columns only within the specified timestamp range, + * [minStamp, maxStamp). + * @param minStamp minimum timestamp value, inclusive + * @param maxStamp maximum timestamp value, exclusive + * @throws IOException if invalid time range + * @return this for invocation chaining + */ + public Get setTimeRange(long minStamp, long maxStamp) + throws IOException { + tr = new TimeRange(minStamp, maxStamp); + return this; + } + + /** + * Get versions of columns with the specified timestamp. + * @param timestamp version timestamp + * @return this for invocation chaining + */ + public Get setTimeStamp(long timestamp) { + try { + tr = new TimeRange(timestamp, timestamp+1); + } catch(IOException e) { + // Will never happen + } + return this; + } + + /** + * Get all available versions. + * @return this for invocation chaining + */ + public Get setMaxVersions() { + this.maxVersions = Integer.MAX_VALUE; + return this; + } + + /** + * Get up to the specified number of versions of each column. + * @param maxVersions maximum versions for each column + * @throws IOException if invalid number of versions + * @return this for invocation chaining + */ + public Get setMaxVersions(int maxVersions) throws IOException { + if(maxVersions <= 0) { + throw new IOException("maxVersions must be positive"); + } + this.maxVersions = maxVersions; + return this; + } + + /** + * Set the maximum number of values to return per row per Column Family + * @param limit the maximum number of values returned / row / CF + * @return this for invocation chaining + */ + public Get setMaxResultsPerColumnFamily(int limit) { + this.storeLimit = limit; + return this; + } + + /** + * Set offset for the row per Column Family. This offset is only within a particular row/CF + * combination. It gets reset back to zero when we move to the next row or CF. + * @param offset is the number of kvs that will be skipped. + * @return this for invocation chaining + */ + public Get setRowOffsetPerColumnFamily(int offset) { + this.storeOffset = offset; + return this; + } + + /** + * Apply the specified server-side filter when performing the Get. + * Only {@link Filter#filterKeyValue(KeyValue)} is called AFTER all tests + * for ttl, column match, deletes and max versions have been run. + * @param filter filter to run on the server + * @return this for invocation chaining + */ + public Get setFilter(Filter filter) { + this.filter = filter; + return this; + } + + /* Accessors */ + + /** + * @return Filter + */ + public Filter getFilter() { + return this.filter; + } + + /** + * Set whether blocks should be cached for this Get. + *

+ * This is true by default. When true, default settings of the table and + * family are used (this will never override caching blocks if the block + * cache is disabled for that family or entirely). + * + * @param cacheBlocks if false, default settings are overridden and blocks + * will not be cached + */ + public void setCacheBlocks(boolean cacheBlocks) { + this.cacheBlocks = cacheBlocks; + } + + /** + * Get whether blocks should be cached for this Get. + * @return true if default caching should be used, false if blocks should not + * be cached + */ + public boolean getCacheBlocks() { + return cacheBlocks; + } + + /** + * Method for retrieving the get's row + * @return row + */ + public byte [] getRow() { + return this.row; + } + + /** + * Method for retrieving the get's RowLock + * @return RowLock + */ + public RowLock getRowLock() { + return new RowLock(this.row, this.lockId); + } + + /** + * Method for retrieving the get's lockId + * @return lockId + */ + public long getLockId() { + return this.lockId; + } + + /** + * Method for retrieving the get's maximum number of version + * @return the maximum number of version to fetch for this get + */ + public int getMaxVersions() { + return this.maxVersions; + } + + /** + * Method for retrieving the get's maximum number of values + * to return per Column Family + * @return the maximum number of values to fetch per CF + */ + public int getMaxResultsPerColumnFamily() { + return this.storeLimit; + } + + /** + * Method for retrieving the get's offset per row per column + * family (#kvs to be skipped) + * @return the row offset + */ + public int getRowOffsetPerColumnFamily() { + return this.storeOffset; + } + + /** + * Method for retrieving the get's TimeRange + * @return timeRange + */ + public TimeRange getTimeRange() { + return this.tr; + } + + /** + * Method for retrieving the keys in the familyMap + * @return keys in the current familyMap + */ + public Set familySet() { + return this.familyMap.keySet(); + } + + /** + * Method for retrieving the number of families to get from + * @return number of families + */ + public int numFamilies() { + return this.familyMap.size(); + } + + /** + * Method for checking if any families have been inserted into this Get + * @return true if familyMap is non empty false otherwise + */ + public boolean hasFamilies() { + return !this.familyMap.isEmpty(); + } + + /** + * Method for retrieving the get's familyMap + * @return familyMap + */ + public Map> getFamilyMap() { + return this.familyMap; + } + + /** + * Compile the table and column family (i.e. schema) information + * into a String. Useful for parsing and aggregation by debugging, + * logging, and administration tools. + * @return Map + */ + @Override + public Map getFingerprint() { + Map map = new HashMap(); + List families = new ArrayList(); + map.put("families", families); + for (Map.Entry> entry : + this.familyMap.entrySet()) { + families.add(Bytes.toStringBinary(entry.getKey())); + } + return map; + } + + /** + * Compile the details beyond the scope of getFingerprint (row, columns, + * timestamps, etc.) into a Map along with the fingerprinted information. + * Useful for debugging, logging, and administration tools. + * @param maxCols a limit on the number of columns output prior to truncation + * @return Map + */ + @Override + public Map toMap(int maxCols) { + // we start with the fingerprint map and build on top of it. + Map map = getFingerprint(); + // replace the fingerprint's simple list of families with a + // map from column families to lists of qualifiers and kv details + Map> columns = new HashMap>(); + map.put("families", columns); + // add scalar information first + map.put("row", Bytes.toStringBinary(this.row)); + map.put("maxVersions", this.maxVersions); + map.put("cacheBlocks", this.cacheBlocks); + List timeRange = new ArrayList(); + timeRange.add(this.tr.getMin()); + timeRange.add(this.tr.getMax()); + map.put("timeRange", timeRange); + int colCount = 0; + // iterate through affected families and add details + for (Map.Entry> entry : + this.familyMap.entrySet()) { + List familyList = new ArrayList(); + columns.put(Bytes.toStringBinary(entry.getKey()), familyList); + if(entry.getValue() == null) { + colCount++; + --maxCols; + familyList.add("ALL"); + } else { + colCount += entry.getValue().size(); + if (maxCols <= 0) { + continue; + } + for (byte [] column : entry.getValue()) { + if (--maxCols <= 0) { + continue; + } + familyList.add(Bytes.toStringBinary(column)); + } + } + } + map.put("totalColumns", colCount); + if (this.filter != null) { + map.put("filter", this.filter.toString()); + } + // add the id if set + if (getId() != null) { + map.put("id", getId()); + } + return map; + } + + //Row + public int compareTo(Row other) { + return Bytes.compareTo(this.getRow(), other.getRow()); + } + +} diff --git hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java new file mode 100644 index 0000000..6f3ce22 --- /dev/null +++ hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java @@ -0,0 +1,2171 @@ +/** + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.client; + +import java.io.Closeable; +import java.io.IOException; +import java.io.InterruptedIOException; +import java.net.SocketTimeoutException; +import java.util.Arrays; +import java.util.LinkedList; +import java.util.List; +import java.util.concurrent.Callable; +import java.util.concurrent.atomic.AtomicInteger; +import java.util.concurrent.atomic.AtomicReference; +import java.util.regex.Pattern; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.Abortable; +import org.apache.hadoop.hbase.ClusterStatus; +import org.apache.hadoop.hbase.DeserializationException; +import org.apache.hadoop.hbase.HBaseConfiguration; +import org.apache.hadoop.hbase.HColumnDescriptor; +import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.HRegionInfo; +import org.apache.hadoop.hbase.HRegionLocation; +import org.apache.hadoop.hbase.HTableDescriptor; +import org.apache.hadoop.hbase.MasterNotRunningException; +import org.apache.hadoop.hbase.NotServingRegionException; +import org.apache.hadoop.hbase.RegionException; +import org.apache.hadoop.hbase.ServerName; +import org.apache.hadoop.hbase.TableExistsException; +import org.apache.hadoop.hbase.TableNotEnabledException; +import org.apache.hadoop.hbase.TableNotFoundException; +import org.apache.hadoop.hbase.UnknownRegionException; +import org.apache.hadoop.hbase.ZooKeeperConnectionException; +import org.apache.hadoop.hbase.catalog.CatalogTracker; +import org.apache.hadoop.hbase.catalog.MetaReader; +import org.apache.hadoop.hbase.client.MetaScanner.MetaScannerVisitor; +import org.apache.hadoop.hbase.client.MetaScanner.MetaScannerVisitorBase; +import org.apache.hadoop.hbase.ipc.CoprocessorRpcChannel; +import org.apache.hadoop.hbase.ipc.MasterCoprocessorRpcChannel; +import org.apache.hadoop.hbase.protobuf.ProtobufUtil; +import org.apache.hadoop.hbase.protobuf.RequestConverter; +import org.apache.hadoop.hbase.protobuf.ResponseConverter; +import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.CloseRegionRequest; +import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.CloseRegionResponse; +import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.CompactRegionRequest; +import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.FlushRegionRequest; +import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetRegionInfoRequest; +import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetRegionInfoResponse; +import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetRegionInfoResponse.CompactionState; +import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.RollWALWriterRequest; +import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.RollWALWriterResponse; +import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.StopServerRequest; +import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ScanRequest; +import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ScanResponse; +import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema; +import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.AddColumnRequest; +import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.AssignRegionRequest; +import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.CreateTableRequest; +import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DeleteColumnRequest; +import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DeleteTableRequest; +import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DisableTableRequest; +import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.EnableTableRequest; +import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ModifyColumnRequest; +import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ModifyTableRequest; +import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.MoveRegionRequest; +import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.SetBalancerRunningRequest; +import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ShutdownRequest; +import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.StopMasterRequest; +import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.UnassignRegionRequest; +import org.apache.hadoop.hbase.protobuf.generated.MasterMonitorProtos.GetClusterStatusRequest; +import org.apache.hadoop.hbase.protobuf.generated.MasterMonitorProtos.GetSchemaAlterStatusRequest; +import org.apache.hadoop.hbase.protobuf.generated.MasterMonitorProtos.GetSchemaAlterStatusResponse; +import org.apache.hadoop.hbase.protobuf.generated.MasterMonitorProtos.GetTableDescriptorsRequest; +import org.apache.hadoop.hbase.protobuf.generated.MasterMonitorProtos.GetTableDescriptorsResponse; +import org.apache.hadoop.hbase.regionserver.wal.FailedLogCloseException; +import org.apache.hadoop.hbase.util.Addressing; +import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.hbase.util.Pair; +import org.apache.hadoop.ipc.RemoteException; +import org.apache.hadoop.util.StringUtils; +import org.apache.zookeeper.KeeperException; + +import com.google.protobuf.ByteString; +import com.google.protobuf.ServiceException; + +/** + * Provides an interface to manage HBase database table metadata + general + * administrative functions. Use HBaseAdmin to create, drop, list, enable and + * disable tables. Use it also to add and drop table column families. + * + *

See {@link HTable} to add, update, and delete data from an individual table. + *

Currently HBaseAdmin instances are not expected to be long-lived. For + * example, an HBaseAdmin instance will not ride over a Master restart. + */ +@InterfaceAudience.Public +@InterfaceStability.Evolving +public class HBaseAdmin implements Abortable, Closeable { + private static final Log LOG = LogFactory.getLog(HBaseAdmin.class); + + // We use the implementation class rather then the interface because we + // need the package protected functions to get the connection to master + private HConnection connection; + + private volatile Configuration conf; + private final long pause; + private final int numRetries; + // Some operations can take a long time such as disable of big table. + // numRetries is for 'normal' stuff... Multiply by this factor when + // want to wait a long time. + private final int retryLongerMultiplier; + private boolean aborted; + + /** + * Constructor. + * See {@link #HBaseAdmin(HConnection connection)} + * + * @param c Configuration object. Copied internally. + */ + public HBaseAdmin(Configuration c) + throws MasterNotRunningException, ZooKeeperConnectionException { + // Will not leak connections, as the new implementation of the constructor + // does not throw exceptions anymore. + this(HConnectionManager.getConnection(new Configuration(c))); + } + + /** + * Constructor for externally managed HConnections. + * The connection to master will be created when required by admin functions. + * + * @param connection The HConnection instance to use + * @throws MasterNotRunningException, ZooKeeperConnectionException are not + * thrown anymore but kept into the interface for backward api compatibility + */ + public HBaseAdmin(HConnection connection) + throws MasterNotRunningException, ZooKeeperConnectionException { + this.conf = connection.getConfiguration(); + this.connection = connection; + + this.pause = this.conf.getLong("hbase.client.pause", 1000); + this.numRetries = this.conf.getInt("hbase.client.retries.number", 10); + this.retryLongerMultiplier = this.conf.getInt( + "hbase.client.retries.longer.multiplier", 10); + } + + /** + * @return A new CatalogTracker instance; call {@link #cleanupCatalogTracker(CatalogTracker)} + * to cleanup the returned catalog tracker. + * @throws ZooKeeperConnectionException + * @throws IOException + * @see #cleanupCatalogTracker(CatalogTracker) + */ + private synchronized CatalogTracker getCatalogTracker() + throws ZooKeeperConnectionException, IOException { + CatalogTracker ct = null; + try { + ct = new CatalogTracker(this.conf); + ct.start(); + } catch (InterruptedException e) { + // Let it out as an IOE for now until we redo all so tolerate IEs + Thread.currentThread().interrupt(); + throw new IOException("Interrupted", e); + } + return ct; + } + + private void cleanupCatalogTracker(final CatalogTracker ct) { + ct.stop(); + } + + @Override + public void abort(String why, Throwable e) { + // Currently does nothing but throw the passed message and exception + this.aborted = true; + throw new RuntimeException(why, e); + } + + @Override + public boolean isAborted(){ + return this.aborted; + } + + /** @return HConnection used by this object. */ + public HConnection getConnection() { + return connection; + } + + /** @return - true if the master server is running. Throws an exception + * otherwise. + * @throws ZooKeeperConnectionException + * @throws MasterNotRunningException + */ + public boolean isMasterRunning() + throws MasterNotRunningException, ZooKeeperConnectionException { + return connection.isMasterRunning(); + } + + /** + * @param tableName Table to check. + * @return True if table exists already. + * @throws IOException + */ + public boolean tableExists(final String tableName) + throws IOException { + boolean b = false; + CatalogTracker ct = getCatalogTracker(); + try { + b = MetaReader.tableExists(ct, tableName); + } finally { + cleanupCatalogTracker(ct); + } + return b; + } + + /** + * @param tableName Table to check. + * @return True if table exists already. + * @throws IOException + */ + public boolean tableExists(final byte [] tableName) + throws IOException { + return tableExists(Bytes.toString(tableName)); + } + + /** + * List all the userspace tables. In other words, scan the META table. + * + * If we wanted this to be really fast, we could implement a special + * catalog table that just contains table names and their descriptors. + * Right now, it only exists as part of the META table's region info. + * + * @return - returns an array of HTableDescriptors + * @throws IOException if a remote or network exception occurs + */ + public HTableDescriptor[] listTables() throws IOException { + return this.connection.listTables(); + } + + /** + * List all the userspace tables matching the given pattern. + * + * @param pattern The compiled regular expression to match against + * @return - returns an array of HTableDescriptors + * @throws IOException if a remote or network exception occurs + * @see #listTables() + */ + public HTableDescriptor[] listTables(Pattern pattern) throws IOException { + List matched = new LinkedList(); + HTableDescriptor[] tables = listTables(); + for (HTableDescriptor table : tables) { + if (pattern.matcher(table.getNameAsString()).matches()) { + matched.add(table); + } + } + return matched.toArray(new HTableDescriptor[matched.size()]); + } + + /** + * List all the userspace tables matching the given regular expression. + * + * @param regex The regular expression to match against + * @return - returns an array of HTableDescriptors + * @throws IOException if a remote or network exception occurs + * @see #listTables(java.util.regex.Pattern) + */ + public HTableDescriptor[] listTables(String regex) throws IOException { + return listTables(Pattern.compile(regex)); + } + + + /** + * Method for getting the tableDescriptor + * @param tableName as a byte [] + * @return the tableDescriptor + * @throws TableNotFoundException + * @throws IOException if a remote or network exception occurs + */ + public HTableDescriptor getTableDescriptor(final byte [] tableName) + throws TableNotFoundException, IOException { + return this.connection.getHTableDescriptor(tableName); + } + + private long getPauseTime(int tries) { + int triesCount = tries; + if (triesCount >= HConstants.RETRY_BACKOFF.length) { + triesCount = HConstants.RETRY_BACKOFF.length - 1; + } + return this.pause * HConstants.RETRY_BACKOFF[triesCount]; + } + + /** + * Creates a new table. + * Synchronous operation. + * + * @param desc table descriptor for table + * + * @throws IllegalArgumentException if the table name is reserved + * @throws MasterNotRunningException if master is not running + * @throws TableExistsException if table already exists (If concurrent + * threads, the table may have been created between test-for-existence + * and attempt-at-creation). + * @throws IOException if a remote or network exception occurs + */ + public void createTable(HTableDescriptor desc) + throws IOException { + createTable(desc, null); + } + + /** + * Creates a new table with the specified number of regions. The start key + * specified will become the end key of the first region of the table, and + * the end key specified will become the start key of the last region of the + * table (the first region has a null start key and the last region has a + * null end key). + * + * BigInteger math will be used to divide the key range specified into + * enough segments to make the required number of total regions. + * + * Synchronous operation. + * + * @param desc table descriptor for table + * @param startKey beginning of key range + * @param endKey end of key range + * @param numRegions the total number of regions to create + * + * @throws IllegalArgumentException if the table name is reserved + * @throws MasterNotRunningException if master is not running + * @throws TableExistsException if table already exists (If concurrent + * threads, the table may have been created between test-for-existence + * and attempt-at-creation). + * @throws IOException + */ + public void createTable(HTableDescriptor desc, byte [] startKey, + byte [] endKey, int numRegions) + throws IOException { + HTableDescriptor.isLegalTableName(desc.getName()); + if(numRegions < 3) { + throw new IllegalArgumentException("Must create at least three regions"); + } else if(Bytes.compareTo(startKey, endKey) >= 0) { + throw new IllegalArgumentException("Start key must be smaller than end key"); + } + byte [][] splitKeys = Bytes.split(startKey, endKey, numRegions - 3); + if(splitKeys == null || splitKeys.length != numRegions - 1) { + throw new IllegalArgumentException("Unable to split key range into enough regions"); + } + createTable(desc, splitKeys); + } + + /** + * Creates a new table with an initial set of empty regions defined by the + * specified split keys. The total number of regions created will be the + * number of split keys plus one. Synchronous operation. + * Note : Avoid passing empty split key. + * + * @param desc table descriptor for table + * @param splitKeys array of split keys for the initial regions of the table + * + * @throws IllegalArgumentException if the table name is reserved, if the split keys + * are repeated and if the split key has empty byte array. + * @throws MasterNotRunningException if master is not running + * @throws TableExistsException if table already exists (If concurrent + * threads, the table may have been created between test-for-existence + * and attempt-at-creation). + * @throws IOException + */ + public void createTable(final HTableDescriptor desc, byte [][] splitKeys) + throws IOException { + HTableDescriptor.isLegalTableName(desc.getName()); + try { + createTableAsync(desc, splitKeys); + } catch (SocketTimeoutException ste) { + LOG.warn("Creating " + desc.getNameAsString() + " took too long", ste); + } + int numRegs = splitKeys == null ? 1 : splitKeys.length + 1; + int prevRegCount = 0; + boolean doneWithMetaScan = false; + for (int tries = 0; tries < this.numRetries * this.retryLongerMultiplier; + ++tries) { + if (!doneWithMetaScan) { + // Wait for new table to come on-line + final AtomicInteger actualRegCount = new AtomicInteger(0); + MetaScannerVisitor visitor = new MetaScannerVisitorBase() { + @Override + public boolean processRow(Result rowResult) throws IOException { + HRegionInfo info = HRegionInfo.getHRegionInfo(rowResult); + if (info == null) { + LOG.warn("No serialized HRegionInfo in " + rowResult); + return true; + } + if (!(Bytes.equals(info.getTableName(), desc.getName()))) { + return false; + } + ServerName serverName = HRegionInfo.getServerName(rowResult); + // Make sure that regions are assigned to server + if (!(info.isOffline() || info.isSplit()) && serverName != null + && serverName.getHostAndPort() != null) { + actualRegCount.incrementAndGet(); + } + return true; + } + }; + MetaScanner.metaScan(conf, visitor, desc.getName()); + if (actualRegCount.get() != numRegs) { + if (tries == this.numRetries * this.retryLongerMultiplier - 1) { + throw new RegionOfflineException("Only " + actualRegCount.get() + + " of " + numRegs + " regions are online; retries exhausted."); + } + try { // Sleep + Thread.sleep(getPauseTime(tries)); + } catch (InterruptedException e) { + throw new InterruptedIOException("Interrupted when opening" + + " regions; " + actualRegCount.get() + " of " + numRegs + + " regions processed so far"); + } + if (actualRegCount.get() > prevRegCount) { // Making progress + prevRegCount = actualRegCount.get(); + tries = -1; + } + } else { + doneWithMetaScan = true; + tries = -1; + } + } else if (isTableEnabled(desc.getName())) { + return; + } else { + try { // Sleep + Thread.sleep(getPauseTime(tries)); + } catch (InterruptedException e) { + throw new InterruptedIOException("Interrupted when waiting" + + " for table to be enabled; meta scan was done"); + } + } + } + throw new TableNotEnabledException( + "Retries exhausted while still waiting for table: " + + desc.getNameAsString() + " to be enabled"); + } + + /** + * Creates a new table but does not block and wait for it to come online. + * Asynchronous operation. To check if the table exists, use + * {@link #isTableAvailable} -- it is not safe to create an HTable + * instance to this table before it is available. + * Note : Avoid passing empty split key. + * @param desc table descriptor for table + * + * @throws IllegalArgumentException Bad table name, if the split keys + * are repeated and if the split key has empty byte array. + * @throws MasterNotRunningException if master is not running + * @throws TableExistsException if table already exists (If concurrent + * threads, the table may have been created between test-for-existence + * and attempt-at-creation). + * @throws IOException + */ + public void createTableAsync( + final HTableDescriptor desc, final byte [][] splitKeys) + throws IOException { + HTableDescriptor.isLegalTableName(desc.getName()); + if(splitKeys != null && splitKeys.length > 0) { + Arrays.sort(splitKeys, Bytes.BYTES_COMPARATOR); + // Verify there are no duplicate split keys + byte [] lastKey = null; + for(byte [] splitKey : splitKeys) { + if (Bytes.compareTo(splitKey, HConstants.EMPTY_BYTE_ARRAY) == 0) { + throw new IllegalArgumentException( + "Empty split key must not be passed in the split keys."); + } + if(lastKey != null && Bytes.equals(splitKey, lastKey)) { + throw new IllegalArgumentException("All split keys must be unique, " + + "found duplicate: " + Bytes.toStringBinary(splitKey) + + ", " + Bytes.toStringBinary(lastKey)); + } + lastKey = splitKey; + } + } + + execute(new MasterAdminCallable() { + @Override + public Void call() throws ServiceException { + CreateTableRequest request = RequestConverter.buildCreateTableRequest(desc, splitKeys); + masterAdmin.createTable(null, request); + return null; + } + }); + } + + /** + * Deletes a table. + * Synchronous operation. + * + * @param tableName name of table to delete + * @throws IOException if a remote or network exception occurs + */ + public void deleteTable(final String tableName) throws IOException { + deleteTable(Bytes.toBytes(tableName)); + } + + /** + * Deletes a table. + * Synchronous operation. + * + * @param tableName name of table to delete + * @throws IOException if a remote or network exception occurs + */ + public void deleteTable(final byte [] tableName) throws IOException { + HTableDescriptor.isLegalTableName(tableName); + HRegionLocation firstMetaServer = getFirstMetaServerForTable(tableName); + boolean tableExists = true; + + execute(new MasterAdminCallable() { + @Override + public Void call() throws ServiceException { + DeleteTableRequest req = RequestConverter.buildDeleteTableRequest(tableName); + masterAdmin.deleteTable(null,req); + return null; + } + }); + + // Wait until all regions deleted + ClientProtocol server = + connection.getClient(firstMetaServer.getHostname(), firstMetaServer.getPort()); + for (int tries = 0; tries < (this.numRetries * this.retryLongerMultiplier); tries++) { + try { + + Scan scan = MetaReader.getScanForTableName(tableName); + scan.addColumn(HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER); + ScanRequest request = RequestConverter.buildScanRequest( + firstMetaServer.getRegionInfo().getRegionName(), scan, 1, true); + Result[] values = null; + // Get a batch at a time. + try { + ScanResponse response = server.scan(null, request); + values = ResponseConverter.getResults(response); + } catch (ServiceException se) { + throw ProtobufUtil.getRemoteException(se); + } + + // let us wait until .META. table is updated and + // HMaster removes the table from its HTableDescriptors + if (values == null || values.length == 0) { + tableExists = false; + GetTableDescriptorsResponse htds; + MasterMonitorKeepAliveConnection master = connection.getKeepAliveMasterMonitor(); + try { + GetTableDescriptorsRequest req = + RequestConverter.buildGetTableDescriptorsRequest(null); + htds = master.getTableDescriptors(null, req); + } catch (ServiceException se) { + throw ProtobufUtil.getRemoteException(se); + } finally { + master.close(); + } + for (TableSchema ts : htds.getTableSchemaList()) { + if (Bytes.equals(tableName, ts.getName().toByteArray())) { + tableExists = true; + break; + } + } + if (!tableExists) { + break; + } + } + } catch (IOException ex) { + if(tries == numRetries - 1) { // no more tries left + if (ex instanceof RemoteException) { + throw ((RemoteException) ex).unwrapRemoteException(); + }else { + throw ex; + } + } + } + try { + Thread.sleep(getPauseTime(tries)); + } catch (InterruptedException e) { + // continue + } + } + + if (tableExists) { + throw new IOException("Retries exhausted, it took too long to wait"+ + " for the table " + Bytes.toString(tableName) + " to be deleted."); + } + // Delete cached information to prevent clients from using old locations + this.connection.clearRegionCache(tableName); + LOG.info("Deleted " + Bytes.toString(tableName)); + } + + /** + * Deletes tables matching the passed in pattern and wait on completion. + * + * Warning: Use this method carefully, there is no prompting and the effect is + * immediate. Consider using {@link #listTables(java.lang.String)} and + * {@link #deleteTable(byte[])} + * + * @param regex The regular expression to match table names against + * @return Table descriptors for tables that couldn't be deleted + * @throws IOException + * @see #deleteTables(java.util.regex.Pattern) + * @see #deleteTable(java.lang.String) + */ + public HTableDescriptor[] deleteTables(String regex) throws IOException { + return deleteTables(Pattern.compile(regex)); + } + + /** + * Delete tables matching the passed in pattern and wait on completion. + * + * Warning: Use this method carefully, there is no prompting and the effect is + * immediate. Consider using {@link #listTables(java.util.regex.Pattern) } and + * {@link #deleteTable(byte[])} + * + * @param pattern The pattern to match table names against + * @return Table descriptors for tables that couldn't be deleted + * @throws IOException + */ + public HTableDescriptor[] deleteTables(Pattern pattern) throws IOException { + List failed = new LinkedList(); + for (HTableDescriptor table : listTables(pattern)) { + try { + deleteTable(table.getName()); + } catch (IOException ex) { + LOG.info("Failed to delete table " + table.getNameAsString(), ex); + failed.add(table); + } + } + return failed.toArray(new HTableDescriptor[failed.size()]); + } + + + public void enableTable(final String tableName) + throws IOException { + enableTable(Bytes.toBytes(tableName)); + } + + /** + * Enable a table. May timeout. Use {@link #enableTableAsync(byte[])} + * and {@link #isTableEnabled(byte[])} instead. + * The table has to be in disabled state for it to be enabled. + * @param tableName name of the table + * @throws IOException if a remote or network exception occurs + * There could be couple types of IOException + * TableNotFoundException means the table doesn't exist. + * TableNotDisabledException means the table isn't in disabled state. + * @see #isTableEnabled(byte[]) + * @see #disableTable(byte[]) + * @see #enableTableAsync(byte[]) + */ + public void enableTable(final byte [] tableName) + throws IOException { + enableTableAsync(tableName); + + // Wait until all regions are enabled + boolean enabled = false; + for (int tries = 0; tries < (this.numRetries * this.retryLongerMultiplier); tries++) { + enabled = isTableEnabled(tableName); + if (enabled) { + break; + } + long sleep = getPauseTime(tries); + if (LOG.isDebugEnabled()) { + LOG.debug("Sleeping= " + sleep + "ms, waiting for all regions to be " + + "enabled in " + Bytes.toString(tableName)); + } + try { + Thread.sleep(sleep); + } catch (InterruptedException e) { + Thread.currentThread().interrupt(); + // Do this conversion rather than let it out because do not want to + // change the method signature. + throw new IOException("Interrupted", e); + } + } + if (!enabled) { + throw new IOException("Unable to enable table " + + Bytes.toString(tableName)); + } + LOG.info("Enabled table " + Bytes.toString(tableName)); + } + + public void enableTableAsync(final String tableName) + throws IOException { + enableTableAsync(Bytes.toBytes(tableName)); + } + + /** + * Brings a table on-line (enables it). Method returns immediately though + * enable of table may take some time to complete, especially if the table + * is large (All regions are opened as part of enabling process). Check + * {@link #isTableEnabled(byte[])} to learn when table is fully online. If + * table is taking too long to online, check server logs. + * @param tableName + * @throws IOException + * @since 0.90.0 + */ + public void enableTableAsync(final byte [] tableName) + throws IOException { + HTableDescriptor.isLegalTableName(tableName); + execute(new MasterAdminCallable() { + @Override + public Void call() throws ServiceException { + LOG.info("Started enable of " + Bytes.toString(tableName)); + EnableTableRequest req = RequestConverter.buildEnableTableRequest(tableName); + masterAdmin.enableTable(null,req); + return null; + } + }); + } + + /** + * Enable tables matching the passed in pattern and wait on completion. + * + * Warning: Use this method carefully, there is no prompting and the effect is + * immediate. Consider using {@link #listTables(java.lang.String)} and + * {@link #enableTable(byte[])} + * + * @param regex The regular expression to match table names against + * @throws IOException + * @see #enableTables(java.util.regex.Pattern) + * @see #enableTable(java.lang.String) + */ + public HTableDescriptor[] enableTables(String regex) throws IOException { + return enableTables(Pattern.compile(regex)); + } + + /** + * Enable tables matching the passed in pattern and wait on completion. + * + * Warning: Use this method carefully, there is no prompting and the effect is + * immediate. Consider using {@link #listTables(java.util.regex.Pattern) } and + * {@link #enableTable(byte[])} + * + * @param pattern The pattern to match table names against + * @throws IOException + */ + public HTableDescriptor[] enableTables(Pattern pattern) throws IOException { + List failed = new LinkedList(); + for (HTableDescriptor table : listTables(pattern)) { + if (isTableDisabled(table.getName())) { + try { + enableTable(table.getName()); + } catch (IOException ex) { + LOG.info("Failed to enable table " + table.getNameAsString(), ex); + failed.add(table); + } + } + } + return failed.toArray(new HTableDescriptor[failed.size()]); + } + + public void disableTableAsync(final String tableName) throws IOException { + disableTableAsync(Bytes.toBytes(tableName)); + } + + /** + * Starts the disable of a table. If it is being served, the master + * will tell the servers to stop serving it. This method returns immediately. + * The disable of a table can take some time if the table is large (all + * regions are closed as part of table disable operation). + * Call {@link #isTableDisabled(byte[])} to check for when disable completes. + * If table is taking too long to online, check server logs. + * @param tableName name of table + * @throws IOException if a remote or network exception occurs + * @see #isTableDisabled(byte[]) + * @see #isTableEnabled(byte[]) + * @since 0.90.0 + */ + public void disableTableAsync(final byte [] tableName) throws IOException { + HTableDescriptor.isLegalTableName(tableName); + execute(new MasterAdminCallable() { + @Override + public Void call() throws ServiceException { + LOG.info("Started disable of " + Bytes.toString(tableName)); + DisableTableRequest req = RequestConverter.buildDisableTableRequest(tableName); + masterAdmin.disableTable(null,req); + return null; + } + }); + } + + public void disableTable(final String tableName) + throws IOException { + disableTable(Bytes.toBytes(tableName)); + } + + /** + * Disable table and wait on completion. May timeout eventually. Use + * {@link #disableTableAsync(byte[])} and {@link #isTableDisabled(String)} + * instead. + * The table has to be in enabled state for it to be disabled. + * @param tableName + * @throws IOException + * There could be couple types of IOException + * TableNotFoundException means the table doesn't exist. + * TableNotEnabledException means the table isn't in enabled state. + */ + public void disableTable(final byte [] tableName) + throws IOException { + disableTableAsync(tableName); + // Wait until table is disabled + boolean disabled = false; + for (int tries = 0; tries < (this.numRetries * this.retryLongerMultiplier); tries++) { + disabled = isTableDisabled(tableName); + if (disabled) { + break; + } + long sleep = getPauseTime(tries); + if (LOG.isDebugEnabled()) { + LOG.debug("Sleeping= " + sleep + "ms, waiting for all regions to be " + + "disabled in " + Bytes.toString(tableName)); + } + try { + Thread.sleep(sleep); + } catch (InterruptedException e) { + // Do this conversion rather than let it out because do not want to + // change the method signature. + Thread.currentThread().interrupt(); + throw new IOException("Interrupted", e); + } + } + if (!disabled) { + throw new RegionException("Retries exhausted, it took too long to wait"+ + " for the table " + Bytes.toString(tableName) + " to be disabled."); + } + LOG.info("Disabled " + Bytes.toString(tableName)); + } + + /** + * Disable tables matching the passed in pattern and wait on completion. + * + * Warning: Use this method carefully, there is no prompting and the effect is + * immediate. Consider using {@link #listTables(java.lang.String)} and + * {@link #disableTable(byte[])} + * + * @param regex The regular expression to match table names against + * @return Table descriptors for tables that couldn't be disabled + * @throws IOException + * @see #disableTables(java.util.regex.Pattern) + * @see #disableTable(java.lang.String) + */ + public HTableDescriptor[] disableTables(String regex) throws IOException { + return disableTables(Pattern.compile(regex)); + } + + /** + * Disable tables matching the passed in pattern and wait on completion. + * + * Warning: Use this method carefully, there is no prompting and the effect is + * immediate. Consider using {@link #listTables(java.util.regex.Pattern) } and + * {@link #disableTable(byte[])} + * + * @param pattern The pattern to match table names against + * @return Table descriptors for tables that couldn't be disabled + * @throws IOException + */ + public HTableDescriptor[] disableTables(Pattern pattern) throws IOException { + List failed = new LinkedList(); + for (HTableDescriptor table : listTables(pattern)) { + if (isTableEnabled(table.getName())) { + try { + disableTable(table.getName()); + } catch (IOException ex) { + LOG.info("Failed to disable table " + table.getNameAsString(), ex); + failed.add(table); + } + } + } + return failed.toArray(new HTableDescriptor[failed.size()]); + } + + /** + * @param tableName name of table to check + * @return true if table is on-line + * @throws IOException if a remote or network exception occurs + */ + public boolean isTableEnabled(String tableName) throws IOException { + return isTableEnabled(Bytes.toBytes(tableName)); + } + /** + * @param tableName name of table to check + * @return true if table is on-line + * @throws IOException if a remote or network exception occurs + */ + public boolean isTableEnabled(byte[] tableName) throws IOException { + if (!HTableDescriptor.isMetaTable(tableName)) { + HTableDescriptor.isLegalTableName(tableName); + } + return connection.isTableEnabled(tableName); + } + + /** + * @param tableName name of table to check + * @return true if table is off-line + * @throws IOException if a remote or network exception occurs + */ + public boolean isTableDisabled(final String tableName) throws IOException { + return isTableDisabled(Bytes.toBytes(tableName)); + } + + /** + * @param tableName name of table to check + * @return true if table is off-line + * @throws IOException if a remote or network exception occurs + */ + public boolean isTableDisabled(byte[] tableName) throws IOException { + if (!HTableDescriptor.isMetaTable(tableName)) { + HTableDescriptor.isLegalTableName(tableName); + } + return connection.isTableDisabled(tableName); + } + + /** + * @param tableName name of table to check + * @return true if all regions of the table are available + * @throws IOException if a remote or network exception occurs + */ + public boolean isTableAvailable(byte[] tableName) throws IOException { + return connection.isTableAvailable(tableName); + } + + /** + * @param tableName name of table to check + * @return true if all regions of the table are available + * @throws IOException if a remote or network exception occurs + */ + public boolean isTableAvailable(String tableName) throws IOException { + return connection.isTableAvailable(Bytes.toBytes(tableName)); + } + + /** + * Get the status of alter command - indicates how many regions have received + * the updated schema Asynchronous operation. + * + * @param tableName + * name of the table to get the status of + * @return Pair indicating the number of regions updated Pair.getFirst() is the + * regions that are yet to be updated Pair.getSecond() is the total number + * of regions of the table + * @throws IOException + * if a remote or network exception occurs + */ + public Pair getAlterStatus(final byte[] tableName) + throws IOException { + HTableDescriptor.isLegalTableName(tableName); + return execute(new MasterMonitorCallable>() { + @Override + public Pair call() throws ServiceException { + GetSchemaAlterStatusRequest req = RequestConverter + .buildGetSchemaAlterStatusRequest(tableName); + GetSchemaAlterStatusResponse ret = masterMonitor.getSchemaAlterStatus(null, req); + Pair pair = new Pair(Integer.valueOf(ret + .getYetToUpdateRegions()), Integer.valueOf(ret.getTotalRegions())); + return pair; + } + }); + } + + /** + * Add a column to an existing table. + * Asynchronous operation. + * + * @param tableName name of the table to add column to + * @param column column descriptor of column to be added + * @throws IOException if a remote or network exception occurs + */ + public void addColumn(final String tableName, HColumnDescriptor column) + throws IOException { + addColumn(Bytes.toBytes(tableName), column); + } + + /** + * Add a column to an existing table. + * Asynchronous operation. + * + * @param tableName name of the table to add column to + * @param column column descriptor of column to be added + * @throws IOException if a remote or network exception occurs + */ + public void addColumn(final byte [] tableName, final HColumnDescriptor column) + throws IOException { + execute(new MasterAdminCallable() { + @Override + public Void call() throws ServiceException { + AddColumnRequest req = RequestConverter.buildAddColumnRequest(tableName, column); + masterAdmin.addColumn(null,req); + return null; + } + }); + } + + /** + * Delete a column from a table. + * Asynchronous operation. + * + * @param tableName name of table + * @param columnName name of column to be deleted + * @throws IOException if a remote or network exception occurs + */ + public void deleteColumn(final String tableName, final String columnName) + throws IOException { + deleteColumn(Bytes.toBytes(tableName), Bytes.toBytes(columnName)); + } + + /** + * Delete a column from a table. + * Asynchronous operation. + * + * @param tableName name of table + * @param columnName name of column to be deleted + * @throws IOException if a remote or network exception occurs + */ + public void deleteColumn(final byte [] tableName, final byte [] columnName) + throws IOException { + execute(new MasterAdminCallable() { + @Override + public Void call() throws ServiceException { + DeleteColumnRequest req = RequestConverter.buildDeleteColumnRequest(tableName, columnName); + masterAdmin.deleteColumn(null,req); + return null; + } + }); + } + + /** + * Modify an existing column family on a table. + * Asynchronous operation. + * + * @param tableName name of table + * @param descriptor new column descriptor to use + * @throws IOException if a remote or network exception occurs + */ + public void modifyColumn(final String tableName, HColumnDescriptor descriptor) + throws IOException { + modifyColumn(Bytes.toBytes(tableName), descriptor); + } + + + + /** + * Modify an existing column family on a table. + * Asynchronous operation. + * + * @param tableName name of table + * @param descriptor new column descriptor to use + * @throws IOException if a remote or network exception occurs + */ + public void modifyColumn(final byte [] tableName, final HColumnDescriptor descriptor) + throws IOException { + execute(new MasterAdminCallable() { + @Override + public Void call() throws ServiceException { + ModifyColumnRequest req = RequestConverter.buildModifyColumnRequest(tableName, descriptor); + masterAdmin.modifyColumn(null,req); + return null; + } + }); + } + + /** + * Close a region. For expert-admins. Runs close on the regionserver. The + * master will not be informed of the close. + * @param regionname region name to close + * @param serverName If supplied, we'll use this location rather than + * the one currently in .META. + * @throws IOException if a remote or network exception occurs + */ + public void closeRegion(final String regionname, final String serverName) + throws IOException { + closeRegion(Bytes.toBytes(regionname), serverName); + } + + /** + * Close a region. For expert-admins Runs close on the regionserver. The + * master will not be informed of the close. + * @param regionname region name to close + * @param serverName The servername of the regionserver. If passed null we + * will use servername found in the .META. table. A server name + * is made of host, port and startcode. Here is an example: + * host187.example.com,60020,1289493121758 + * @throws IOException if a remote or network exception occurs + */ + public void closeRegion(final byte [] regionname, final String serverName) + throws IOException { + CatalogTracker ct = getCatalogTracker(); + try { + if (serverName != null) { + Pair pair = MetaReader.getRegion(ct, regionname); + if (pair == null || pair.getFirst() == null) { + throw new UnknownRegionException(Bytes.toStringBinary(regionname)); + } else { + closeRegion(new ServerName(serverName), pair.getFirst()); + } + } else { + Pair pair = MetaReader.getRegion(ct, regionname); + if (pair == null) { + throw new UnknownRegionException(Bytes.toStringBinary(regionname)); + } else if (pair.getSecond() == null) { + throw new NoServerForRegionException(Bytes.toStringBinary(regionname)); + } else { + closeRegion(pair.getSecond(), pair.getFirst()); + } + } + } finally { + cleanupCatalogTracker(ct); + } + } + + /** + * For expert-admins. Runs close on the regionserver. Closes a region based on + * the encoded region name. The region server name is mandatory. If the + * servername is provided then based on the online regions in the specified + * regionserver the specified region will be closed. The master will not be + * informed of the close. Note that the regionname is the encoded regionname. + * + * @param encodedRegionName + * The encoded region name; i.e. the hash that makes up the region + * name suffix: e.g. if regionname is + * TestTable,0094429456,1289497600452.527db22f95c8a9e0116f0cc13c680396. + * , then the encoded region name is: + * 527db22f95c8a9e0116f0cc13c680396. + * @param serverName + * The servername of the regionserver. A server name is made of host, + * port and startcode. This is mandatory. Here is an example: + * host187.example.com,60020,1289493121758 + * @return true if the region was closed, false if not. + * @throws IOException + * if a remote or network exception occurs + */ + public boolean closeRegionWithEncodedRegionName(final String encodedRegionName, + final String serverName) throws IOException { + if (null == serverName || ("").equals(serverName.trim())) { + throw new IllegalArgumentException( + "The servername cannot be null or empty."); + } + ServerName sn = new ServerName(serverName); + AdminProtocol admin = this.connection.getAdmin( + sn.getHostname(), sn.getPort()); + // Close the region without updating zk state. + CloseRegionRequest request = + RequestConverter.buildCloseRegionRequest(encodedRegionName, false); + try { + CloseRegionResponse response = admin.closeRegion(null, request); + boolean isRegionClosed = response.getClosed(); + if (false == isRegionClosed) { + LOG.error("Not able to close the region " + encodedRegionName + "."); + } + return isRegionClosed; + } catch (ServiceException se) { + throw ProtobufUtil.getRemoteException(se); + } + } + + /** + * Close a region. For expert-admins Runs close on the regionserver. The + * master will not be informed of the close. + * @param sn + * @param hri + * @throws IOException + */ + public void closeRegion(final ServerName sn, final HRegionInfo hri) + throws IOException { + AdminProtocol admin = + this.connection.getAdmin(sn.getHostname(), sn.getPort()); + // Close the region without updating zk state. + ProtobufUtil.closeRegion(admin, hri.getRegionName(), false); + } + + /** + * Get all the online regions on a region server. + */ + public List getOnlineRegions( + final ServerName sn) throws IOException { + AdminProtocol admin = + this.connection.getAdmin(sn.getHostname(), sn.getPort()); + return ProtobufUtil.getOnlineRegions(admin); + } + + /** + * Flush a table or an individual region. + * Synchronous operation. + * + * @param tableNameOrRegionName table or region to flush + * @throws IOException if a remote or network exception occurs + * @throws InterruptedException + */ + public void flush(final String tableNameOrRegionName) + throws IOException, InterruptedException { + flush(Bytes.toBytes(tableNameOrRegionName)); + } + + /** + * Flush a table or an individual region. + * Synchronous operation. + * + * @param tableNameOrRegionName table or region to flush + * @throws IOException if a remote or network exception occurs + * @throws InterruptedException + */ + public void flush(final byte [] tableNameOrRegionName) + throws IOException, InterruptedException { + CatalogTracker ct = getCatalogTracker(); + try { + Pair regionServerPair + = getRegion(tableNameOrRegionName, ct); + if (regionServerPair != null) { + if (regionServerPair.getSecond() == null) { + throw new NoServerForRegionException(Bytes.toStringBinary(tableNameOrRegionName)); + } else { + flush(regionServerPair.getSecond(), regionServerPair.getFirst()); + } + } else { + final String tableName = tableNameString(tableNameOrRegionName, ct); + List> pairs = + MetaReader.getTableRegionsAndLocations(ct, + tableName); + for (Pair pair: pairs) { + if (pair.getFirst().isOffline()) continue; + if (pair.getSecond() == null) continue; + try { + flush(pair.getSecond(), pair.getFirst()); + } catch (NotServingRegionException e) { + if (LOG.isDebugEnabled()) { + LOG.debug("Trying to flush " + pair.getFirst() + ": " + + StringUtils.stringifyException(e)); + } + } + } + } + } finally { + cleanupCatalogTracker(ct); + } + } + + private void flush(final ServerName sn, final HRegionInfo hri) + throws IOException { + AdminProtocol admin = + this.connection.getAdmin(sn.getHostname(), sn.getPort()); + FlushRegionRequest request = + RequestConverter.buildFlushRegionRequest(hri.getRegionName()); + try { + admin.flushRegion(null, request); + } catch (ServiceException se) { + throw ProtobufUtil.getRemoteException(se); + } + } + + /** + * Compact a table or an individual region. + * Asynchronous operation. + * + * @param tableNameOrRegionName table or region to compact + * @throws IOException if a remote or network exception occurs + * @throws InterruptedException + */ + public void compact(final String tableNameOrRegionName) + throws IOException, InterruptedException { + compact(Bytes.toBytes(tableNameOrRegionName)); + } + + /** + * Compact a table or an individual region. + * Asynchronous operation. + * + * @param tableNameOrRegionName table or region to compact + * @throws IOException if a remote or network exception occurs + * @throws InterruptedException + */ + public void compact(final byte [] tableNameOrRegionName) + throws IOException, InterruptedException { + compact(tableNameOrRegionName, null, false); + } + + /** + * Compact a column family within a table or region. + * Asynchronous operation. + * + * @param tableOrRegionName table or region to compact + * @param columnFamily column family within a table or region + * @throws IOException if a remote or network exception occurs + * @throws InterruptedException + */ + public void compact(String tableOrRegionName, String columnFamily) + throws IOException, InterruptedException { + compact(Bytes.toBytes(tableOrRegionName), Bytes.toBytes(columnFamily)); + } + + /** + * Compact a column family within a table or region. + * Asynchronous operation. + * + * @param tableNameOrRegionName table or region to compact + * @param columnFamily column family within a table or region + * @throws IOException if a remote or network exception occurs + * @throws InterruptedException + */ + public void compact(final byte [] tableNameOrRegionName, final byte[] columnFamily) + throws IOException, InterruptedException { + compact(tableNameOrRegionName, columnFamily, false); + } + + /** + * Major compact a table or an individual region. + * Asynchronous operation. + * + * @param tableNameOrRegionName table or region to major compact + * @throws IOException if a remote or network exception occurs + * @throws InterruptedException + */ + public void majorCompact(final String tableNameOrRegionName) + throws IOException, InterruptedException { + majorCompact(Bytes.toBytes(tableNameOrRegionName)); + } + + /** + * Major compact a table or an individual region. + * Asynchronous operation. + * + * @param tableNameOrRegionName table or region to major compact + * @throws IOException if a remote or network exception occurs + * @throws InterruptedException + */ + public void majorCompact(final byte [] tableNameOrRegionName) + throws IOException, InterruptedException { + compact(tableNameOrRegionName, null, true); + } + + /** + * Major compact a column family within a table or region. + * Asynchronous operation. + * + * @param tableNameOrRegionName table or region to major compact + * @param columnFamily column family within a table or region + * @throws IOException if a remote or network exception occurs + * @throws InterruptedException + */ + public void majorCompact(final String tableNameOrRegionName, + final String columnFamily) throws IOException, InterruptedException { + majorCompact(Bytes.toBytes(tableNameOrRegionName), + Bytes.toBytes(columnFamily)); + } + + /** + * Major compact a column family within a table or region. + * Asynchronous operation. + * + * @param tableNameOrRegionName table or region to major compact + * @param columnFamily column family within a table or region + * @throws IOException if a remote or network exception occurs + * @throws InterruptedException + */ + public void majorCompact(final byte [] tableNameOrRegionName, + final byte[] columnFamily) throws IOException, InterruptedException { + compact(tableNameOrRegionName, columnFamily, true); + } + + /** + * Compact a table or an individual region. + * Asynchronous operation. + * + * @param tableNameOrRegionName table or region to compact + * @param columnFamily column family within a table or region + * @param major True if we are to do a major compaction. + * @throws IOException if a remote or network exception occurs + * @throws InterruptedException + */ + private void compact(final byte [] tableNameOrRegionName, + final byte[] columnFamily,final boolean major) + throws IOException, InterruptedException { + CatalogTracker ct = getCatalogTracker(); + try { + Pair regionServerPair + = getRegion(tableNameOrRegionName, ct); + if (regionServerPair != null) { + if (regionServerPair.getSecond() == null) { + throw new NoServerForRegionException(Bytes.toStringBinary(tableNameOrRegionName)); + } else { + compact(regionServerPair.getSecond(), regionServerPair.getFirst(), major, columnFamily); + } + } else { + final String tableName = tableNameString(tableNameOrRegionName, ct); + List> pairs = + MetaReader.getTableRegionsAndLocations(ct, + tableName); + for (Pair pair: pairs) { + if (pair.getFirst().isOffline()) continue; + if (pair.getSecond() == null) continue; + try { + compact(pair.getSecond(), pair.getFirst(), major, columnFamily); + } catch (NotServingRegionException e) { + if (LOG.isDebugEnabled()) { + LOG.debug("Trying to" + (major ? " major" : "") + " compact " + + pair.getFirst() + ": " + + StringUtils.stringifyException(e)); + } + } + } + } + } finally { + cleanupCatalogTracker(ct); + } + } + + private void compact(final ServerName sn, final HRegionInfo hri, + final boolean major, final byte [] family) + throws IOException { + AdminProtocol admin = + this.connection.getAdmin(sn.getHostname(), sn.getPort()); + CompactRegionRequest request = + RequestConverter.buildCompactRegionRequest(hri.getRegionName(), major, family); + try { + admin.compactRegion(null, request); + } catch (ServiceException se) { + throw ProtobufUtil.getRemoteException(se); + } + } + + /** + * Move the region r to dest. + * @param encodedRegionName The encoded region name; i.e. the hash that makes + * up the region name suffix: e.g. if regionname is + * TestTable,0094429456,1289497600452.527db22f95c8a9e0116f0cc13c680396., + * then the encoded region name is: 527db22f95c8a9e0116f0cc13c680396. + * @param destServerName The servername of the destination regionserver. If + * passed the empty byte array we'll assign to a random server. A server name + * is made of host, port and startcode. Here is an example: + * host187.example.com,60020,1289493121758 + * @throws UnknownRegionException Thrown if we can't find a region named + * encodedRegionName + * @throws ZooKeeperConnectionException + * @throws MasterNotRunningException + */ + public void move(final byte [] encodedRegionName, final byte [] destServerName) + throws UnknownRegionException, MasterNotRunningException, ZooKeeperConnectionException { + MasterAdminKeepAliveConnection master = connection.getKeepAliveMasterAdmin(); + try { + MoveRegionRequest request = RequestConverter.buildMoveRegionRequest(encodedRegionName, destServerName); + master.moveRegion(null,request); + } catch (ServiceException se) { + IOException ioe = ProtobufUtil.getRemoteException(se); + if (ioe instanceof UnknownRegionException) { + throw (UnknownRegionException)ioe; + } + LOG.error("Unexpected exception: " + se + " from calling HMaster.moveRegion"); + } catch (DeserializationException de) { + LOG.error("Could not parse destination server name: " + de); + } + finally { + master.close(); + } + } + + /** + * @param regionName + * Region name to assign. + * @throws MasterNotRunningException + * @throws ZooKeeperConnectionException + * @throws IOException + */ + public void assign(final byte[] regionName) throws MasterNotRunningException, + ZooKeeperConnectionException, IOException { + execute(new MasterAdminCallable() { + @Override + public Void call() throws ServiceException { + AssignRegionRequest request = RequestConverter.buildAssignRegionRequest(regionName); + masterAdmin.assignRegion(null,request); + return null; + } + }); + } + + /** + * Unassign a region from current hosting regionserver. Region will then be + * assigned to a regionserver chosen at random. Region could be reassigned + * back to the same server. Use {@link #move(byte[], byte[])} if you want + * to control the region movement. + * @param regionName Region to unassign. Will clear any existing RegionPlan + * if one found. + * @param force If true, force unassign (Will remove region from + * regions-in-transition too if present. If results in double assignment + * use hbck -fix to resolve. To be used by experts). + * @throws MasterNotRunningException + * @throws ZooKeeperConnectionException + * @throws IOException + */ + public void unassign(final byte [] regionName, final boolean force) + throws MasterNotRunningException, ZooKeeperConnectionException, IOException { + execute(new MasterAdminCallable() { + @Override + public Void call() throws ServiceException { + UnassignRegionRequest request = + RequestConverter.buildUnassignRegionRequest(regionName, force); + masterAdmin.unassignRegion(null,request); + return null; + } + }); + } + + /** + * Special method, only used by hbck. + */ + public void offline(final byte [] regionName) + throws IOException { + MasterAdminKeepAliveConnection master = connection.getKeepAliveMasterAdmin(); + try { + master.offlineRegion(null,RequestConverter.buildOfflineRegionRequest(regionName)); + } catch (ServiceException se) { + throw ProtobufUtil.getRemoteException(se); + } finally { + master.close(); + } + } + + /** + * Turn the load balancer on or off. + * @param on If true, enable balancer. If false, disable balancer. + * @param synchronous If true, it waits until current balance() call, if outstanding, to return. + * @return Previous balancer value + */ + public boolean setBalancerRunning(final boolean on, final boolean synchronous) + throws MasterNotRunningException, ZooKeeperConnectionException { + MasterAdminKeepAliveConnection master = connection.getKeepAliveMasterAdmin(); + try { + SetBalancerRunningRequest req = + RequestConverter.buildSetBalancerRunningRequest(on, synchronous); + return master.setBalancerRunning(null, req).getPrevBalanceValue(); + } catch (ServiceException se) { + IOException ioe = ProtobufUtil.getRemoteException(se); + if (ioe instanceof MasterNotRunningException) { + throw (MasterNotRunningException)ioe; + } + if (ioe instanceof ZooKeeperConnectionException) { + throw (ZooKeeperConnectionException)ioe; + } + + // Throwing MasterNotRunningException even though not really valid in order to not + // break interface by adding additional exception type. + throw new MasterNotRunningException("Unexpected exception when calling balanceSwitch",se); + } finally { + master.close(); + } + } + + /** + * Invoke the balancer. Will run the balancer and if regions to move, it will + * go ahead and do the reassignments. Can NOT run for various reasons. Check + * logs. + * @return True if balancer ran, false otherwise. + */ + public boolean balancer() + throws MasterNotRunningException, ZooKeeperConnectionException, ServiceException { + MasterAdminKeepAliveConnection master = connection.getKeepAliveMasterAdmin(); + try { + return master.balance(null,RequestConverter.buildBalanceRequest()).getBalancerRan(); + } finally { + master.close(); + } + } + + /** + * Enable/Disable the catalog janitor + * @param enable if true enables the catalog janitor + * @return the previous state + * @throws ServiceException + * @throws MasterNotRunningException + */ + public boolean enableCatalogJanitor(boolean enable) + throws ServiceException, MasterNotRunningException { + MasterAdminKeepAliveConnection master = connection.getKeepAliveMasterAdmin(); + try { + return master.enableCatalogJanitor(null, + RequestConverter.buildEnableCatalogJanitorRequest(enable)).getPrevValue(); + } finally { + master.close(); + } + } + + /** + * Ask for a scan of the catalog table + * @return the number of entries cleaned + * @throws ServiceException + * @throws MasterNotRunningException + */ + public int runCatalogScan() throws ServiceException, MasterNotRunningException { + MasterAdminKeepAliveConnection master = connection.getKeepAliveMasterAdmin(); + try { + return master.runCatalogScan(null, + RequestConverter.buildCatalogScanRequest()).getScanResult(); + } finally { + master.close(); + } + } + + /** + * Query on the catalog janitor state (Enabled/Disabled?) + * @throws ServiceException + * @throws MasterNotRunningException + */ + public boolean isCatalogJanitorEnabled() throws ServiceException, MasterNotRunningException { + MasterAdminKeepAliveConnection master = connection.getKeepAliveMasterAdmin(); + try { + return master.isCatalogJanitorEnabled(null, + RequestConverter.buildIsCatalogJanitorEnabledRequest()).getValue(); + } finally { + master.close(); + } + } + + /** + * Split a table or an individual region. + * Asynchronous operation. + * + * @param tableNameOrRegionName table or region to split + * @throws IOException if a remote or network exception occurs + * @throws InterruptedException + */ + public void split(final String tableNameOrRegionName) + throws IOException, InterruptedException { + split(Bytes.toBytes(tableNameOrRegionName)); + } + + /** + * Split a table or an individual region. Implicitly finds an optimal split + * point. Asynchronous operation. + * + * @param tableNameOrRegionName table to region to split + * @throws IOException if a remote or network exception occurs + * @throws InterruptedException + */ + public void split(final byte [] tableNameOrRegionName) + throws IOException, InterruptedException { + split(tableNameOrRegionName, null); + } + + public void split(final String tableNameOrRegionName, + final String splitPoint) throws IOException, InterruptedException { + split(Bytes.toBytes(tableNameOrRegionName), Bytes.toBytes(splitPoint)); + } + + /** + * Split a table or an individual region. + * Asynchronous operation. + * + * @param tableNameOrRegionName table to region to split + * @param splitPoint the explicit position to split on + * @throws IOException if a remote or network exception occurs + * @throws InterruptedException interrupt exception occurred + */ + public void split(final byte [] tableNameOrRegionName, + final byte [] splitPoint) throws IOException, InterruptedException { + CatalogTracker ct = getCatalogTracker(); + try { + Pair regionServerPair + = getRegion(tableNameOrRegionName, ct); + if (regionServerPair != null) { + if (regionServerPair.getSecond() == null) { + throw new NoServerForRegionException(Bytes.toStringBinary(tableNameOrRegionName)); + } else { + split(regionServerPair.getSecond(), regionServerPair.getFirst(), splitPoint); + } + } else { + final String tableName = tableNameString(tableNameOrRegionName, ct); + List> pairs = + MetaReader.getTableRegionsAndLocations(ct, + tableName); + for (Pair pair: pairs) { + // May not be a server for a particular row + if (pair.getSecond() == null) continue; + HRegionInfo r = pair.getFirst(); + // check for parents + if (r.isSplitParent()) continue; + // if a split point given, only split that particular region + if (splitPoint != null && !r.containsRow(splitPoint)) continue; + // call out to region server to do split now + split(pair.getSecond(), pair.getFirst(), splitPoint); + } + } + } finally { + cleanupCatalogTracker(ct); + } + } + + private void split(final ServerName sn, final HRegionInfo hri, + byte[] splitPoint) throws IOException { + AdminProtocol admin = + this.connection.getAdmin(sn.getHostname(), sn.getPort()); + ProtobufUtil.split(admin, hri, splitPoint); + } + + /** + * Modify an existing table, more IRB friendly version. + * Asynchronous operation. This means that it may be a while before your + * schema change is updated across all of the table. + * + * @param tableName name of table. + * @param htd modified description of the table + * @throws IOException if a remote or network exception occurs + */ + public void modifyTable(final byte [] tableName, final HTableDescriptor htd) + throws IOException { + execute(new MasterAdminCallable() { + @Override + public Void call() throws ServiceException { + ModifyTableRequest request = RequestConverter.buildModifyTableRequest(tableName, htd); + masterAdmin.modifyTable(null, request); + return null; + } + }); + } + + /** + * @param tableNameOrRegionName Name of a table or name of a region. + * @param ct A {@link CatalogTracker} instance (caller of this method usually has one). + * @return a pair of HRegionInfo and ServerName if tableNameOrRegionName is + * a verified region name (we call {@link MetaReader#getRegion( CatalogTracker, byte[])} + * else null. + * Throw an exception if tableNameOrRegionName is null. + * @throws IOException + */ + Pair getRegion(final byte[] tableNameOrRegionName, + final CatalogTracker ct) throws IOException { + if (tableNameOrRegionName == null) { + throw new IllegalArgumentException("Pass a table name or region name"); + } + Pair pair = MetaReader.getRegion(ct, tableNameOrRegionName); + if (pair == null) { + final AtomicReference> result = + new AtomicReference>(null); + final String encodedName = Bytes.toString(tableNameOrRegionName); + MetaScannerVisitor visitor = new MetaScannerVisitorBase() { + @Override + public boolean processRow(Result data) throws IOException { + HRegionInfo info = HRegionInfo.getHRegionInfo(data); + if (info == null) { + LOG.warn("No serialized HRegionInfo in " + data); + return true; + } + if (!encodedName.equals(info.getEncodedName())) return true; + ServerName sn = HRegionInfo.getServerName(data); + result.set(new Pair(info, sn)); + return false; // found the region, stop + } + }; + + MetaScanner.metaScan(conf, visitor); + pair = result.get(); + } + return pair; + } + + /** + * Convert the table name byte array into a table name string and check if table + * exists or not. + * @param tableNameBytes Name of a table. + * @param ct A {@link CatalogTracker} instance (caller of this method usually has one). + * @return tableName in string form. + * @throws IOException if a remote or network exception occurs. + * @throws TableNotFoundException if table does not exist. + */ + private String tableNameString(final byte[] tableNameBytes, CatalogTracker ct) + throws IOException { + String tableNameString = Bytes.toString(tableNameBytes); + if (!MetaReader.tableExists(ct, tableNameString)) { + throw new TableNotFoundException(tableNameString); + } + return tableNameString; + } + + /** + * Shuts down the HBase cluster + * @throws IOException if a remote or network exception occurs + */ + public synchronized void shutdown() throws IOException { + execute(new MasterAdminCallable() { + @Override + public Void call() throws ServiceException { + masterAdmin.shutdown(null,ShutdownRequest.newBuilder().build()); + return null; + } + }); + } + + /** + * Shuts down the current HBase master only. + * Does not shutdown the cluster. + * @see #shutdown() + * @throws IOException if a remote or network exception occurs + */ + public synchronized void stopMaster() throws IOException { + execute(new MasterAdminCallable() { + @Override + public Void call() throws ServiceException { + masterAdmin.stopMaster(null,StopMasterRequest.newBuilder().build()); + return null; + } + }); + } + + /** + * Stop the designated regionserver + * @param hostnamePort Hostname and port delimited by a : as in + * example.org:1234 + * @throws IOException if a remote or network exception occurs + */ + public synchronized void stopRegionServer(final String hostnamePort) + throws IOException { + String hostname = Addressing.parseHostname(hostnamePort); + int port = Addressing.parsePort(hostnamePort); + AdminProtocol admin = + this.connection.getAdmin(hostname, port); + StopServerRequest request = RequestConverter.buildStopServerRequest( + "Called by admin client " + this.connection.toString()); + try { + admin.stopServer(null, request); + } catch (ServiceException se) { + throw ProtobufUtil.getRemoteException(se); + } + } + + /** + * @return cluster status + * @throws IOException if a remote or network exception occurs + */ + public ClusterStatus getClusterStatus() throws IOException { + return execute(new MasterMonitorCallable() { + @Override + public ClusterStatus call() throws ServiceException { + GetClusterStatusRequest req = RequestConverter.buildGetClusterStatusRequest(); + return ClusterStatus.convert(masterMonitor.getClusterStatus(null,req).getClusterStatus()); + } + }); + } + + private HRegionLocation getFirstMetaServerForTable(final byte [] tableName) + throws IOException { + return connection.locateRegion(HConstants.META_TABLE_NAME, + HRegionInfo.createRegionName(tableName, null, HConstants.NINES, false)); + } + + /** + * @return Configuration used by the instance. + */ + public Configuration getConfiguration() { + return this.conf; + } + + /** + * Check to see if HBase is running. Throw an exception if not. + * We consider that HBase is running if ZooKeeper and Master are running. + * + * @param conf system configuration + * @throws MasterNotRunningException if the master is not running + * @throws ZooKeeperConnectionException if unable to connect to zookeeper + */ + public static void checkHBaseAvailable(Configuration conf) + throws MasterNotRunningException, ZooKeeperConnectionException, ServiceException { + Configuration copyOfConf = HBaseConfiguration.create(conf); + + // We set it to make it fail as soon as possible if HBase is not available + copyOfConf.setInt("hbase.client.retries.number", 1); + copyOfConf.setInt("zookeeper.recovery.retry", 0); + + HConnectionManager.HConnectionImplementation connection + = (HConnectionManager.HConnectionImplementation) + HConnectionManager.getConnection(copyOfConf); + + try { + // Check ZK first. + // If the connection exists, we may have a connection to ZK that does + // not work anymore + ZooKeeperKeepAliveConnection zkw = null; + try { + zkw = connection.getKeepAliveZooKeeperWatcher(); + zkw.getRecoverableZooKeeper().getZooKeeper().exists( + zkw.baseZNode, false); + + } catch (IOException e) { + throw new ZooKeeperConnectionException("Can't connect to ZooKeeper", e); + } catch (InterruptedException e) { + Thread.currentThread().interrupt(); + throw new ZooKeeperConnectionException("Can't connect to ZooKeeper", e); + } catch (KeeperException e) { + throw new ZooKeeperConnectionException("Can't connect to ZooKeeper", e); + } finally { + if (zkw != null) { + zkw.close(); + } + } + + // Check Master + connection.isMasterRunning(); + + } finally { + connection.close(); + } + } + + /** + * get the regions of a given table. + * + * @param tableName the name of the table + * @return Ordered list of {@link HRegionInfo}. + * @throws IOException + */ + public List getTableRegions(final byte[] tableName) + throws IOException { + CatalogTracker ct = getCatalogTracker(); + List Regions = null; + try { + Regions = MetaReader.getTableRegions(ct, tableName, true); + } finally { + cleanupCatalogTracker(ct); + } + return Regions; + } + + @Override + public void close() throws IOException { + if (this.connection != null) { + this.connection.close(); + } + } + + /** + * Get tableDescriptors + * @param tableNames List of table names + * @return HTD[] the tableDescriptor + * @throws IOException if a remote or network exception occurs + */ + public HTableDescriptor[] getTableDescriptors(List tableNames) + throws IOException { + return this.connection.getHTableDescriptors(tableNames); + } + + /** + * Roll the log writer. That is, start writing log messages to a new file. + * + * @param serverName + * The servername of the regionserver. A server name is made of host, + * port and startcode. This is mandatory. Here is an example: + * host187.example.com,60020,1289493121758 + * @return If lots of logs, flush the returned regions so next time through + * we can clean logs. Returns null if nothing to flush. Names are actual + * region names as returned by {@link HRegionInfo#getEncodedName()} + * @throws IOException if a remote or network exception occurs + * @throws FailedLogCloseException + */ + public synchronized byte[][] rollHLogWriter(String serverName) + throws IOException, FailedLogCloseException { + ServerName sn = new ServerName(serverName); + AdminProtocol admin = this.connection.getAdmin( + sn.getHostname(), sn.getPort()); + RollWALWriterRequest request = RequestConverter.buildRollWALWriterRequest();; + try { + RollWALWriterResponse response = admin.rollWALWriter(null, request); + int regionCount = response.getRegionToFlushCount(); + byte[][] regionsToFlush = new byte[regionCount][]; + for (int i = 0; i < regionCount; i++) { + ByteString region = response.getRegionToFlush(i); + regionsToFlush[i] = region.toByteArray(); + } + return regionsToFlush; + } catch (ServiceException se) { + throw ProtobufUtil.getRemoteException(se); + } + } + + public String[] getMasterCoprocessors() { + try { + return getClusterStatus().getMasterCoprocessors(); + } catch (IOException e) { + LOG.error("Could not getClusterStatus()",e); + return null; + } + } + + /** + * Get the current compaction state of a table or region. + * It could be in a major compaction, a minor compaction, both, or none. + * + * @param tableNameOrRegionName table or region to major compact + * @throws IOException if a remote or network exception occurs + * @throws InterruptedException + * @return the current compaction state + */ + public CompactionState getCompactionState(final String tableNameOrRegionName) + throws IOException, InterruptedException { + return getCompactionState(Bytes.toBytes(tableNameOrRegionName)); + } + + /** + * Get the current compaction state of a table or region. + * It could be in a major compaction, a minor compaction, both, or none. + * + * @param tableNameOrRegionName table or region to major compact + * @throws IOException if a remote or network exception occurs + * @throws InterruptedException + * @return the current compaction state + */ + public CompactionState getCompactionState(final byte [] tableNameOrRegionName) + throws IOException, InterruptedException { + CompactionState state = CompactionState.NONE; + CatalogTracker ct = getCatalogTracker(); + try { + Pair regionServerPair + = getRegion(tableNameOrRegionName, ct); + if (regionServerPair != null) { + if (regionServerPair.getSecond() == null) { + throw new NoServerForRegionException(Bytes.toStringBinary(tableNameOrRegionName)); + } else { + ServerName sn = regionServerPair.getSecond(); + AdminProtocol admin = + this.connection.getAdmin(sn.getHostname(), sn.getPort()); + GetRegionInfoRequest request = RequestConverter.buildGetRegionInfoRequest( + regionServerPair.getFirst().getRegionName(), true); + GetRegionInfoResponse response = admin.getRegionInfo(null, request); + return response.getCompactionState(); + } + } else { + final String tableName = tableNameString(tableNameOrRegionName, ct); + List> pairs = + MetaReader.getTableRegionsAndLocations(ct, tableName); + for (Pair pair: pairs) { + if (pair.getFirst().isOffline()) continue; + if (pair.getSecond() == null) continue; + try { + ServerName sn = pair.getSecond(); + AdminProtocol admin = + this.connection.getAdmin(sn.getHostname(), sn.getPort()); + GetRegionInfoRequest request = RequestConverter.buildGetRegionInfoRequest( + pair.getFirst().getRegionName(), true); + GetRegionInfoResponse response = admin.getRegionInfo(null, request); + switch (response.getCompactionState()) { + case MAJOR_AND_MINOR: + return CompactionState.MAJOR_AND_MINOR; + case MAJOR: + if (state == CompactionState.MINOR) { + return CompactionState.MAJOR_AND_MINOR; + } + state = CompactionState.MAJOR; + break; + case MINOR: + if (state == CompactionState.MAJOR) { + return CompactionState.MAJOR_AND_MINOR; + } + state = CompactionState.MINOR; + break; + case NONE: + default: // nothing, continue + } + } catch (NotServingRegionException e) { + if (LOG.isDebugEnabled()) { + LOG.debug("Trying to get compaction state of " + + pair.getFirst() + ": " + + StringUtils.stringifyException(e)); + } + } + } + } + } catch (ServiceException se) { + throw ProtobufUtil.getRemoteException(se); + } finally { + cleanupCatalogTracker(ct); + } + return state; + } + + /** + * @see {@link #execute(MasterAdminCallable)} + */ + private abstract static class MasterAdminCallable implements Callable{ + protected MasterAdminKeepAliveConnection masterAdmin; + } + + /** + * @see {@link #execute(MasterMonitorCallable)} + */ + private abstract static class MasterMonitorCallable implements Callable { + protected MasterMonitorKeepAliveConnection masterMonitor; + } + + /** + * This method allows to execute a function requiring a connection to + * master without having to manage the connection creation/close. + * Create a {@link MasterAdminCallable} to use it. + */ + private V execute(MasterAdminCallable function) throws IOException { + function.masterAdmin = connection.getKeepAliveMasterAdmin(); + try { + return executeCallable(function); + } finally { + function.masterAdmin.close(); + } + } + + /** + * This method allows to execute a function requiring a connection to + * master without having to manage the connection creation/close. + * Create a {@link MasterAdminCallable} to use it. + */ + private V execute(MasterMonitorCallable function) throws IOException { + function.masterMonitor = connection.getKeepAliveMasterMonitor(); + try { + return executeCallable(function); + } finally { + function.masterMonitor.close(); + } + } + + /** + * Helper function called by other execute functions. + */ + private V executeCallable(Callable function) throws IOException { + try { + return function.call(); + } catch (RemoteException re) { + throw re.unwrapRemoteException(); + } catch (IOException e) { + throw e; + } catch (ServiceException se) { + throw ProtobufUtil.getRemoteException(se); + } catch (Exception e) { + // This should not happen... + throw new IOException("Unexpected exception when calling master", e); + } + } + + /** + * Creates and returns a {@link com.google.protobuf.RpcChannel} instance + * connected to the active master. + * + *

+ * The obtained {@link com.google.protobuf.RpcChannel} instance can be used to access a published + * coprocessor {@link com.google.protobuf.Service} using standard protobuf service invocations: + *

+ * + *
+ *
+   * CoprocessorRpcChannel channel = myAdmin.coprocessorService();
+   * MyService.BlockingInterface service = MyService.newBlockingStub(channel);
+   * MyCallRequest request = MyCallRequest.newBuilder()
+   *     ...
+   *     .build();
+   * MyCallResponse response = service.myCall(null, request);
+   * 
+ * + * @return A MasterCoprocessorRpcChannel instance + */ + public CoprocessorRpcChannel coprocessorService() { + return new MasterCoprocessorRpcChannel(connection); + } +} diff --git hbase-client/src/main/java/org/apache/hadoop/hbase/client/HConnection.java hbase-client/src/main/java/org/apache/hadoop/hbase/client/HConnection.java new file mode 100644 index 0000000..9456b9c --- /dev/null +++ hbase-client/src/main/java/org/apache/hadoop/hbase/client/HConnection.java @@ -0,0 +1,364 @@ +/** + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.client; + +import java.io.Closeable; +import java.io.IOException; +import java.util.List; +import java.util.concurrent.ExecutorService; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.Abortable; +import org.apache.hadoop.hbase.HRegionLocation; +import org.apache.hadoop.hbase.HTableDescriptor; +import org.apache.hadoop.hbase.MasterAdminProtocol; +import org.apache.hadoop.hbase.MasterMonitorProtocol; +import org.apache.hadoop.hbase.MasterNotRunningException; +import org.apache.hadoop.hbase.ZooKeeperConnectionException; +import org.apache.hadoop.hbase.catalog.CatalogTracker; +import org.apache.hadoop.hbase.client.coprocessor.Batch; +import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher; + +/** + * Cluster connection. Hosts a connection to the ZooKeeper ensemble and + * thereafter into the HBase cluster. Knows how to locate regions out on the cluster, + * keeps a cache of locations and then knows how to recalibrate after they move. + * {@link HConnectionManager} manages instances of this class. + * + *

HConnections are used by {@link HTable} mostly but also by + * {@link HBaseAdmin}, {@link CatalogTracker}, + * and {@link ZooKeeperWatcher}. HConnection instances can be shared. Sharing + * is usually what you want because rather than each HConnection instance + * having to do its own discovery of regions out on the cluster, instead, all + * clients get to share the one cache of locations. Sharing makes cleanup of + * HConnections awkward. See {@link HConnectionManager} for cleanup + * discussion. + * + * @see HConnectionManager + */ +@InterfaceAudience.Public +@InterfaceStability.Stable +public interface HConnection extends Abortable, Closeable { + /** + * @return Configuration instance being used by this HConnection instance. + */ + public Configuration getConfiguration(); + + /** + * Retrieve ZooKeeperWatcher used by this connection. + * @return ZooKeeperWatcher handle being used by the connection. + * @throws IOException if a remote or network exception occurs + * @deprecated Removed because it was a mistake exposing zookeeper in this + * interface (ZooKeeper is an implementation detail). + * Deprecated in HBase 0.94 + */ + @Deprecated + public ZooKeeperWatcher getZooKeeperWatcher() throws IOException; + + /** @return - true if the master server is running */ + public boolean isMasterRunning() + throws MasterNotRunningException, ZooKeeperConnectionException; + + /** + * A table that isTableEnabled == false and isTableDisabled == false + * is possible. This happens when a table has a lot of regions + * that must be processed. + * @param tableName table name + * @return true if the table is enabled, false otherwise + * @throws IOException if a remote or network exception occurs + */ + public boolean isTableEnabled(byte[] tableName) throws IOException; + + /** + * @param tableName table name + * @return true if the table is disabled, false otherwise + * @throws IOException if a remote or network exception occurs + */ + public boolean isTableDisabled(byte[] tableName) throws IOException; + + /** + * @param tableName table name + * @return true if all regions of the table are available, false otherwise + * @throws IOException if a remote or network exception occurs + */ + public boolean isTableAvailable(byte[] tableName) throws IOException; + + /** + * List all the userspace tables. In other words, scan the META table. + * + * If we wanted this to be really fast, we could implement a special + * catalog table that just contains table names and their descriptors. + * Right now, it only exists as part of the META table's region info. + * + * @return - returns an array of HTableDescriptors + * @throws IOException if a remote or network exception occurs + */ + public HTableDescriptor[] listTables() throws IOException; + + /** + * @param tableName table name + * @return table metadata + * @throws IOException if a remote or network exception occurs + */ + public HTableDescriptor getHTableDescriptor(byte[] tableName) + throws IOException; + + /** + * Find the location of the region of tableName that row + * lives in. + * @param tableName name of the table row is in + * @param row row key you're trying to find the region of + * @return HRegionLocation that describes where to find the region in + * question + * @throws IOException if a remote or network exception occurs + */ + public HRegionLocation locateRegion(final byte [] tableName, + final byte [] row) + throws IOException; + + /** + * Allows flushing the region cache. + */ + public void clearRegionCache(); + + /** + * Allows flushing the region cache of all locations that pertain to + * tableName + * @param tableName Name of the table whose regions we are to remove from + * cache. + */ + public void clearRegionCache(final byte [] tableName); + + /** + * Find the location of the region of tableName that row + * lives in, ignoring any value that might be in the cache. + * @param tableName name of the table row is in + * @param row row key you're trying to find the region of + * @return HRegionLocation that describes where to find the region in + * question + * @throws IOException if a remote or network exception occurs + */ + public HRegionLocation relocateRegion(final byte [] tableName, + final byte [] row) + throws IOException; + + /** + * Gets the location of the region of regionName. + * @param regionName name of the region to locate + * @return HRegionLocation that describes where to find the region in + * question + * @throws IOException if a remote or network exception occurs + */ + public HRegionLocation locateRegion(final byte [] regionName) + throws IOException; + + /** + * Gets the locations of all regions in the specified table, tableName. + * @param tableName table to get regions of + * @return list of region locations for all regions of table + * @throws IOException + */ + public List locateRegions(byte[] tableName) + throws IOException; + + /** + * Returns a {@link MasterAdminProtocol} to the active master + */ + public MasterAdminProtocol getMasterAdmin() throws IOException; + + /** + * Returns an {@link MasterMonitorProtocol} to the active master + */ + public MasterMonitorProtocol getMasterMonitor() throws IOException; + + + /** + * Establishes a connection to the region server at the specified address. + * @param hostname RegionServer hostname + * @param port RegionServer port + * @return proxy for HRegionServer + * @throws IOException if a remote or network exception occurs + * + */ + public AdminProtocol getAdmin(final String hostname, final int port) + throws IOException; + + /** + * Establishes a connection to the region server at the specified address, and return + * a region client protocol. + * + * @param hostname RegionServer hostname + * @param port RegionServer port + * @return ClientProtocol proxy for RegionServer + * @throws IOException if a remote or network exception occurs + * + */ + public ClientProtocol getClient(final String hostname, final int port) + throws IOException; + + /** + * Establishes a connection to the region server at the specified address. + * @param hostname RegionServer hostname + * @param port RegionServer port + * @param getMaster - do we check if master is alive + * @return proxy for HRegionServer + * @throws IOException if a remote or network exception occurs + */ + public AdminProtocol getAdmin(final String hostname, + final int port, boolean getMaster) + throws IOException; + + /** + * Find region location hosting passed row + * @param tableName table name + * @param row Row to find. + * @param reload If true do not use cache, otherwise bypass. + * @return Location of row. + * @throws IOException if a remote or network exception occurs + */ + HRegionLocation getRegionLocation(byte [] tableName, byte [] row, + boolean reload) + throws IOException; + + /** + * Pass in a ServerCallable with your particular bit of logic defined and + * this method will manage the process of doing retries with timed waits + * and refinds of missing regions. + * + * @param the type of the return value + * @param callable callable to run + * @return an object of type T + * @throws IOException if a remote or network exception occurs + * @throws RuntimeException other unspecified error + */ + @Deprecated + public T getRegionServerWithRetries(ServerCallable callable) + throws IOException, RuntimeException; + + /** + * Pass in a ServerCallable with your particular bit of logic defined and + * this method will pass it to the defined region server. + * @param the type of the return value + * @param callable callable to run + * @return an object of type T + * @throws IOException if a remote or network exception occurs + * @throws RuntimeException other unspecified error + */ + @Deprecated + public T getRegionServerWithoutRetries(ServerCallable callable) + throws IOException, RuntimeException; + + /** + * Process a mixed batch of Get, Put and Delete actions. All actions for a + * RegionServer are forwarded in one RPC call. + * + * + * @param actions The collection of actions. + * @param tableName Name of the hbase table + * @param pool thread pool for parallel execution + * @param results An empty array, same size as list. If an exception is thrown, + * you can test here for partial results, and to determine which actions + * processed successfully. + * @throws IOException if there are problems talking to META. Per-item + * exceptions are stored in the results array. + * @deprecated since 0.96 - Use {@link HTableInterface#batch} instead + */ + @Deprecated + public void processBatch(List actions, final byte[] tableName, + ExecutorService pool, Object[] results) + throws IOException, InterruptedException; + + /** + * Parameterized batch processing, allowing varying return types for different + * {@link Row} implementations. + * @deprecated since 0.96 - Use {@link HTableInterface#batchCallback} instead + */ + @Deprecated + public void processBatchCallback(List list, + byte[] tableName, + ExecutorService pool, + Object[] results, + Batch.Callback callback) throws IOException, InterruptedException; + + /** + * Enable or disable region cache prefetch for the table. It will be + * applied for the given table's all HTable instances within this + * connection. By default, the cache prefetch is enabled. + * @param tableName name of table to configure. + * @param enable Set to true to enable region cache prefetch. + */ + public void setRegionCachePrefetch(final byte[] tableName, + final boolean enable); + + /** + * Check whether region cache prefetch is enabled or not. + * @param tableName name of table to check + * @return true if table's region cache prefetch is enabled. Otherwise + * it is disabled. + */ + public boolean getRegionCachePrefetch(final byte[] tableName); + + /** + * Scan zookeeper to get the number of region servers + * @return the number of region servers that are currently running + * @throws IOException if a remote or network exception occurs + * @deprecated This method will be changed from public to package protected. + */ + @Deprecated + public int getCurrentNrHRS() throws IOException; + + /** + * @param tableNames List of table names + * @return HTD[] table metadata + * @throws IOException if a remote or network exception occurs + */ + public HTableDescriptor[] getHTableDescriptors(List tableNames) + throws IOException; + + /** + * @return true if this connection is closed + */ + public boolean isClosed(); + + /** + * Clear any caches that pertain to server name sn + * @param sn A server name as hostname:port + */ + public void clearCaches(final String sn); + + /** + * This function allows HBaseAdminProtocol and potentially others to get a shared MasterMonitor + * connection. + * @return The shared instance. Never returns null. + * @throws MasterNotRunningException + */ + public MasterMonitorKeepAliveConnection getKeepAliveMasterMonitor() + throws MasterNotRunningException; + + /** + * This function allows HBaseAdmin and potentially others to get a shared MasterAdminProtocol + * connection. + * @return The shared instance. Never returns null. + * @throws MasterNotRunningException + */ + public MasterAdminKeepAliveConnection getKeepAliveMasterAdmin() throws MasterNotRunningException; +} + diff --git hbase-client/src/main/java/org/apache/hadoop/hbase/client/HConnectionManager.java hbase-client/src/main/java/org/apache/hadoop/hbase/client/HConnectionManager.java new file mode 100644 index 0000000..5cfd89f --- /dev/null +++ hbase-client/src/main/java/org/apache/hadoop/hbase/client/HConnectionManager.java @@ -0,0 +1,2378 @@ +/** + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.client; + +import java.io.Closeable; +import java.io.IOException; +import java.lang.reflect.InvocationHandler; +import java.lang.reflect.InvocationTargetException; +import java.lang.reflect.Method; +import java.lang.reflect.Proxy; +import java.lang.reflect.UndeclaredThrowableException; +import java.net.InetSocketAddress; +import java.util.ArrayList; +import java.util.Collections; +import java.util.HashMap; +import java.util.HashSet; +import java.util.Iterator; +import java.util.LinkedHashMap; +import java.util.LinkedList; +import java.util.List; +import java.util.Map; +import java.util.Map.Entry; +import java.util.Set; +import java.util.concurrent.Callable; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.CopyOnWriteArraySet; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Future; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.atomic.AtomicInteger; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.Chore; +import org.apache.hadoop.hbase.DoNotRetryIOException; +import org.apache.hadoop.hbase.HBaseConfiguration; +import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.HRegionInfo; +import org.apache.hadoop.hbase.HRegionLocation; +import org.apache.hadoop.hbase.HTableDescriptor; +import org.apache.hadoop.hbase.KeyValue; +import org.apache.hadoop.hbase.MasterAdminProtocol; +import org.apache.hadoop.hbase.MasterMonitorProtocol; +import org.apache.hadoop.hbase.MasterNotRunningException; +import org.apache.hadoop.hbase.MasterProtocol; +import org.apache.hadoop.hbase.RegionMovedException; +import org.apache.hadoop.hbase.RemoteExceptionHandler; +import org.apache.hadoop.hbase.ServerName; +import org.apache.hadoop.hbase.Stoppable; +import org.apache.hadoop.hbase.TableNotFoundException; +import org.apache.hadoop.hbase.ZooKeeperConnectionException; +import org.apache.hadoop.hbase.client.MetaScanner.MetaScannerVisitor; +import org.apache.hadoop.hbase.client.MetaScanner.MetaScannerVisitorBase; +import org.apache.hadoop.hbase.client.coprocessor.Batch; +import org.apache.hadoop.hbase.ipc.HBaseClientRPC; +import org.apache.hadoop.hbase.ipc.VersionedProtocol; +import org.apache.hadoop.hbase.protobuf.ProtobufUtil; +import org.apache.hadoop.hbase.protobuf.RequestConverter; +import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema; +import org.apache.hadoop.hbase.protobuf.generated.MasterMonitorProtos.GetTableDescriptorsRequest; +import org.apache.hadoop.hbase.protobuf.generated.MasterMonitorProtos.GetTableDescriptorsResponse; +import org.apache.hadoop.hbase.security.User; +import org.apache.hadoop.hbase.util.Addressing; +import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.hbase.util.Pair; +import org.apache.hadoop.hbase.util.SoftValueSortedMap; +import org.apache.hadoop.hbase.util.Triple; +import org.apache.hadoop.hbase.zookeeper.MasterAddressTracker; +import org.apache.hadoop.hbase.zookeeper.RootRegionTracker; +import org.apache.hadoop.hbase.zookeeper.ZKClusterId; +import org.apache.hadoop.hbase.zookeeper.ZKTableReadOnly; +import org.apache.hadoop.hbase.zookeeper.ZKUtil; +import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher; +import org.apache.hadoop.ipc.RemoteException; +import org.apache.zookeeper.KeeperException; + +import com.google.protobuf.ServiceException; + +/** + * A non-instantiable class that manages {@link HConnection}s. + * This class has a static Map of {@link HConnection} instances keyed by + * {@link Configuration}; all invocations of {@link #getConnection(Configuration)} + * that pass the same {@link Configuration} instance will be returned the same + * {@link HConnection} instance (Adding properties to a Configuration + * instance does not change its object identity). Sharing {@link HConnection} + * instances is usually what you want; all clients of the {@link HConnection} + * instances share the HConnections' cache of Region locations rather than each + * having to discover for itself the location of meta, root, etc. It makes + * sense for the likes of the pool of HTables class {@link HTablePool}, for + * instance (If concerned that a single {@link HConnection} is insufficient + * for sharing amongst clients in say an heavily-multithreaded environment, + * in practise its not proven to be an issue. Besides, {@link HConnection} is + * implemented atop Hadoop RPC and as of this writing, Hadoop RPC does a + * connection per cluster-member, exclusively). + * + *

But sharing connections + * makes clean up of {@link HConnection} instances a little awkward. Currently, + * clients cleanup by calling + * {@link #deleteConnection(Configuration, boolean)}. This will shutdown the + * zookeeper connection the HConnection was using and clean up all + * HConnection resources as well as stopping proxies to servers out on the + * cluster. Not running the cleanup will not end the world; it'll + * just stall the closeup some and spew some zookeeper connection failed + * messages into the log. Running the cleanup on a {@link HConnection} that is + * subsequently used by another will cause breakage so be careful running + * cleanup. + *

To create a {@link HConnection} that is not shared by others, you can + * create a new {@link Configuration} instance, pass this new instance to + * {@link #getConnection(Configuration)}, and then when done, close it up by + * doing something like the following: + *

+ * {@code
+ * Configuration newConfig = new Configuration(originalConf);
+ * HConnection connection = HConnectionManager.getConnection(newConfig);
+ * // Use the connection to your hearts' delight and then when done...
+ * HConnectionManager.deleteConnection(newConfig, true);
+ * }
+ * 
+ *

Cleanup used to be done inside in a shutdown hook. On startup we'd + * register a shutdown hook that called {@link #deleteAllConnections(boolean)} + * on its way out but the order in which shutdown hooks run is not defined so + * were problematic for clients of HConnection that wanted to register their + * own shutdown hooks so we removed ours though this shifts the onus for + * cleanup to the client. + */ +@SuppressWarnings("serial") +@InterfaceAudience.Public +@InterfaceStability.Evolving +public class HConnectionManager { + // An LRU Map of HConnectionKey -> HConnection (TableServer). All + // access must be synchronized. This map is not private because tests + // need to be able to tinker with it. + static final Map HBASE_INSTANCES; + + public static final int MAX_CACHED_HBASE_INSTANCES; + + /** Parameter name for what client protocol to use. */ + public static final String CLIENT_PROTOCOL_CLASS = "hbase.clientprotocol.class"; + + /** Default client protocol class name. */ + public static final String DEFAULT_CLIENT_PROTOCOL_CLASS = ClientProtocol.class.getName(); + + /** Parameter name for what admin protocol to use. */ + public static final String REGION_PROTOCOL_CLASS = "hbase.adminprotocol.class"; + + /** Default admin protocol class name. */ + public static final String DEFAULT_ADMIN_PROTOCOL_CLASS = AdminProtocol.class.getName(); + + private static final Log LOG = LogFactory.getLog(HConnectionManager.class); + + static { + // We set instances to one more than the value specified for {@link + // HConstants#ZOOKEEPER_MAX_CLIENT_CNXNS}. By default, the zk default max + // connections to the ensemble from the one client is 30, so in that case we + // should run into zk issues before the LRU hit this value of 31. + MAX_CACHED_HBASE_INSTANCES = HBaseConfiguration.create().getInt( + HConstants.ZOOKEEPER_MAX_CLIENT_CNXNS, + HConstants.DEFAULT_ZOOKEPER_MAX_CLIENT_CNXNS) + 1; + HBASE_INSTANCES = new LinkedHashMap( + (int) (MAX_CACHED_HBASE_INSTANCES / 0.75F) + 1, 0.75F, true) { + @Override + protected boolean removeEldestEntry( + Map.Entry eldest) { + return size() > MAX_CACHED_HBASE_INSTANCES; + } + }; + } + + /* + * Non-instantiable. + */ + protected HConnectionManager() { + super(); + } + + /** + * Get the connection that goes with the passed conf + * configuration instance. + * If no current connection exists, method creates a new connection for the + * passed conf instance. + * @param conf configuration + * @return HConnection object for conf + * @throws ZooKeeperConnectionException + */ + public static HConnection getConnection(Configuration conf) + throws ZooKeeperConnectionException { + HConnectionKey connectionKey = new HConnectionKey(conf); + synchronized (HBASE_INSTANCES) { + HConnectionImplementation connection = HBASE_INSTANCES.get(connectionKey); + if (connection == null) { + connection = new HConnectionImplementation(conf, true); + HBASE_INSTANCES.put(connectionKey, connection); + } + connection.incCount(); + return connection; + } + } + + /** + * Create a new HConnection instance using the passed conf + * instance. + * Note: This bypasses the usual HConnection life cycle management! + * Use this with caution, the caller is responsible for closing the + * created connection. + * @param conf configuration + * @return HConnection object for conf + * @throws ZooKeeperConnectionException + */ + public static HConnection createConnection(Configuration conf) + throws ZooKeeperConnectionException { + return new HConnectionImplementation(conf, false); + } + + /** + * Delete connection information for the instance specified by configuration. + * If there are no more references to it, this will then close connection to + * the zookeeper ensemble and let go of all resources. + * + * @param conf + * configuration whose identity is used to find {@link HConnection} + * instance. + * @param stopProxy + * Shuts down all the proxy's put up to cluster members including to + * cluster HMaster. Calls + * {@link HBaseClientRPC#stopProxy(org.apache.hadoop.hbase.ipc.VersionedProtocol)} + * . + */ + public static void deleteConnection(Configuration conf, boolean stopProxy) { + deleteConnection(new HConnectionKey(conf), stopProxy, false); + } + + /** + * Delete stale connection information for the instance specified by configuration. + * This will then close connection to + * the zookeeper ensemble and let go of all resources. + * + * @param connection + */ + public static void deleteStaleConnection(HConnection connection) { + deleteConnection(connection, true, true); + } + + /** + * Delete information for all connections. + * @param stopProxy stop the proxy as well + * @throws IOException + */ + public static void deleteAllConnections(boolean stopProxy) { + synchronized (HBASE_INSTANCES) { + Set connectionKeys = new HashSet(); + connectionKeys.addAll(HBASE_INSTANCES.keySet()); + for (HConnectionKey connectionKey : connectionKeys) { + deleteConnection(connectionKey, stopProxy, false); + } + HBASE_INSTANCES.clear(); + } + } + + private static void deleteConnection(HConnection connection, boolean stopProxy, + boolean staleConnection) { + synchronized (HBASE_INSTANCES) { + for (Entry connectionEntry : HBASE_INSTANCES + .entrySet()) { + if (connectionEntry.getValue() == connection) { + deleteConnection(connectionEntry.getKey(), stopProxy, staleConnection); + break; + } + } + } + } + + private static void deleteConnection(HConnectionKey connectionKey, + boolean stopProxy, boolean staleConnection) { + synchronized (HBASE_INSTANCES) { + HConnectionImplementation connection = HBASE_INSTANCES + .get(connectionKey); + if (connection != null) { + connection.decCount(); + if (connection.isZeroReference() || staleConnection) { + HBASE_INSTANCES.remove(connectionKey); + connection.close(stopProxy); + } else if (stopProxy) { + connection.stopProxyOnClose(stopProxy); + } + }else { + LOG.error("Connection not found in the list, can't delete it "+ + "(connection key="+connectionKey+"). May be the key was modified?"); + } + } + } + + /** + * It is provided for unit test cases which verify the behavior of region + * location cache prefetch. + * @return Number of cached regions for the table. + * @throws ZooKeeperConnectionException + */ + static int getCachedRegionCount(Configuration conf, + final byte[] tableName) + throws IOException { + return execute(new HConnectable(conf) { + @Override + public Integer connect(HConnection connection) { + return ((HConnectionImplementation) connection) + .getNumberOfCachedRegionLocations(tableName); + } + }); + } + + /** + * It's provided for unit test cases which verify the behavior of region + * location cache prefetch. + * @return true if the region where the table and row reside is cached. + * @throws ZooKeeperConnectionException + */ + static boolean isRegionCached(Configuration conf, + final byte[] tableName, final byte[] row) throws IOException { + return execute(new HConnectable(conf) { + @Override + public Boolean connect(HConnection connection) { + return ((HConnectionImplementation) connection).isRegionCached(tableName, row); + } + }); + } + + /** + * This class makes it convenient for one to execute a command in the context + * of a {@link HConnection} instance based on the given {@link Configuration}. + * + *

+ * If you find yourself wanting to use a {@link HConnection} for a relatively + * short duration of time, and do not want to deal with the hassle of creating + * and cleaning up that resource, then you should consider using this + * convenience class. + * + * @param + * the return type of the {@link HConnectable#connect(HConnection)} + * method. + */ + public static abstract class HConnectable { + public Configuration conf; + + protected HConnectable(Configuration conf) { + this.conf = conf; + } + + public abstract T connect(HConnection connection) throws IOException; + } + + /** + * This convenience method invokes the given {@link HConnectable#connect} + * implementation using a {@link HConnection} instance that lasts just for the + * duration of that invocation. + * + * @param the return type of the connect method + * @param connectable the {@link HConnectable} instance + * @return the value returned by the connect method + * @throws IOException + */ + public static T execute(HConnectable connectable) throws IOException { + if (connectable == null || connectable.conf == null) { + return null; + } + Configuration conf = connectable.conf; + HConnection connection = HConnectionManager.getConnection(conf); + boolean connectSucceeded = false; + try { + T returnValue = connectable.connect(connection); + connectSucceeded = true; + return returnValue; + } finally { + try { + connection.close(); + } catch (Exception e) { + if (connectSucceeded) { + throw new IOException("The connection to " + connection + + " could not be deleted.", e); + } + } + } + } + + /** + * Denotes a unique key to a {@link HConnection} instance. + * + * In essence, this class captures the properties in {@link Configuration} + * that may be used in the process of establishing a connection. In light of + * that, if any new such properties are introduced into the mix, they must be + * added to the {@link HConnectionKey#properties} list. + * + */ + public static class HConnectionKey { + public static String[] CONNECTION_PROPERTIES = new String[] { + HConstants.ZOOKEEPER_QUORUM, HConstants.ZOOKEEPER_ZNODE_PARENT, + HConstants.ZOOKEEPER_CLIENT_PORT, + HConstants.ZOOKEEPER_RECOVERABLE_WAITTIME, + HConstants.HBASE_CLIENT_PAUSE, HConstants.HBASE_CLIENT_RETRIES_NUMBER, + HConstants.HBASE_CLIENT_RPC_MAXATTEMPTS, + HConstants.HBASE_RPC_TIMEOUT_KEY, + HConstants.HBASE_CLIENT_PREFETCH_LIMIT, + HConstants.HBASE_META_SCANNER_CACHING, + HConstants.HBASE_CLIENT_INSTANCE_ID }; + + private Map properties; + private String username; + + public HConnectionKey(Configuration conf) { + Map m = new HashMap(); + if (conf != null) { + for (String property : CONNECTION_PROPERTIES) { + String value = conf.get(property); + if (value != null) { + m.put(property, value); + } + } + } + this.properties = Collections.unmodifiableMap(m); + + try { + User currentUser = User.getCurrent(); + if (currentUser != null) { + username = currentUser.getName(); + } + } catch (IOException ioe) { + LOG.warn("Error obtaining current user, skipping username in HConnectionKey", + ioe); + } + } + + @Override + public int hashCode() { + final int prime = 31; + int result = 1; + if (username != null) { + result = username.hashCode(); + } + for (String property : CONNECTION_PROPERTIES) { + String value = properties.get(property); + if (value != null) { + result = prime * result + value.hashCode(); + } + } + + return result; + } + + @Override + public boolean equals(Object obj) { + if (this == obj) + return true; + if (obj == null) + return false; + if (getClass() != obj.getClass()) + return false; + HConnectionKey that = (HConnectionKey) obj; + if (this.username != null && !this.username.equals(that.username)) { + return false; + } else if (this.username == null && that.username != null) { + return false; + } + if (this.properties == null) { + if (that.properties != null) { + return false; + } + } else { + if (that.properties == null) { + return false; + } + for (String property : CONNECTION_PROPERTIES) { + String thisValue = this.properties.get(property); + String thatValue = that.properties.get(property); + if (thisValue == thatValue) { + continue; + } + if (thisValue == null || !thisValue.equals(thatValue)) { + return false; + } + } + } + return true; + } + + @Override + public String toString() { + return "HConnectionKey{" + + "properties=" + properties + + ", username='" + username + '\'' + + '}'; + } + } + + /* Encapsulates connection to zookeeper and regionservers.*/ + static class HConnectionImplementation implements HConnection, Closeable { + static final Log LOG = LogFactory.getLog(HConnectionImplementation.class); + private final Class adminClass; + private final Class clientClass; + private final long pause; + private final int numRetries; + private final int maxRPCAttempts; + private final int rpcTimeout; + private final int prefetchRegionLimit; + + private volatile boolean closed; + private volatile boolean aborted; + + private final Object metaRegionLock = new Object(); + private final Object userRegionLock = new Object(); + + // We have a single lock for master & zk to prevent deadlocks. Having + // one lock for ZK and one lock for master is not possible: + // When creating a connection to master, we need a connection to ZK to get + // its address. But another thread could have taken the ZK lock, and could + // be waiting for the master lock => deadlock. + private final Object masterAndZKLock = new Object(); + + private long keepZooKeeperWatcherAliveUntil = Long.MAX_VALUE; + private final DelayedClosing delayedClosing = + DelayedClosing.createAndStart(this); + + + private final Configuration conf; + + // Known region ServerName.toString() -> RegionClient/Admin + private final ConcurrentHashMap> servers = + new ConcurrentHashMap>(); + private final ConcurrentHashMap connectionLock = + new ConcurrentHashMap(); + + /** + * Map of table to table {@link HRegionLocation}s. The table key is made + * by doing a {@link Bytes#mapKey(byte[])} of the table's name. + */ + private final Map> + cachedRegionLocations = + new HashMap>(); + + // The presence of a server in the map implies it's likely that there is an + // entry in cachedRegionLocations that map to this server; but the absence + // of a server in this map guarentees that there is no entry in cache that + // maps to the absent server. + private final Set cachedServers = + new HashSet(); + + // region cache prefetch is enabled by default. this set contains all + // tables whose region cache prefetch are disabled. + private final Set regionCachePrefetchDisabledTables = + new CopyOnWriteArraySet(); + + private boolean stopProxy; + private int refCount; + + // indicates whether this connection's life cycle is managed (by us) + private final boolean managed; + /** + * constructor + * @param conf Configuration object + */ + @SuppressWarnings("unchecked") + public HConnectionImplementation(Configuration conf, boolean managed) + throws ZooKeeperConnectionException { + this.conf = conf; + this.managed = managed; + String adminClassName = conf.get(REGION_PROTOCOL_CLASS, + DEFAULT_ADMIN_PROTOCOL_CLASS); + this.closed = false; + try { + this.adminClass = + (Class) Class.forName(adminClassName); + } catch (ClassNotFoundException e) { + throw new UnsupportedOperationException( + "Unable to find region server interface " + adminClassName, e); + } + String clientClassName = conf.get(CLIENT_PROTOCOL_CLASS, + DEFAULT_CLIENT_PROTOCOL_CLASS); + try { + this.clientClass = + (Class) Class.forName(clientClassName); + } catch (ClassNotFoundException e) { + throw new UnsupportedOperationException( + "Unable to find client protocol " + clientClassName, e); + } + this.pause = conf.getLong(HConstants.HBASE_CLIENT_PAUSE, + HConstants.DEFAULT_HBASE_CLIENT_PAUSE); + this.numRetries = conf.getInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, + HConstants.DEFAULT_HBASE_CLIENT_RETRIES_NUMBER); + this.maxRPCAttempts = conf.getInt( + HConstants.HBASE_CLIENT_RPC_MAXATTEMPTS, + HConstants.DEFAULT_HBASE_CLIENT_RPC_MAXATTEMPTS); + this.rpcTimeout = conf.getInt( + HConstants.HBASE_RPC_TIMEOUT_KEY, + HConstants.DEFAULT_HBASE_RPC_TIMEOUT); + this.prefetchRegionLimit = conf.getInt( + HConstants.HBASE_CLIENT_PREFETCH_LIMIT, + HConstants.DEFAULT_HBASE_CLIENT_PREFETCH_LIMIT); + + retrieveClusterId(); + } + + /** + * An identifier that will remain the same for a given connection. + * @return + */ + public String toString(){ + return "hconnection 0x" + Integer.toHexString( hashCode() ); + } + + private String clusterId = null; + public final void retrieveClusterId(){ + if (conf.get(HConstants.CLUSTER_ID) != null){ + return; + } + + // No synchronized here, worse case we will retrieve it twice, that's + // not an issue. + if (this.clusterId == null){ + this.clusterId = conf.get(HConstants.CLUSTER_ID); + if (this.clusterId == null) { + ZooKeeperKeepAliveConnection zkw = null; + try { + zkw = getKeepAliveZooKeeperWatcher(); + this.clusterId = ZKClusterId.readClusterIdZNode(zkw); + if (clusterId == null) { + LOG.info("ClusterId read in ZooKeeper is null"); + } + } catch (KeeperException e) { + LOG.warn("Can't retrieve clusterId from Zookeeper", e); + } catch (IOException e) { + LOG.warn("Can't retrieve clusterId from Zookeeper", e); + } finally { + if (zkw != null) { + zkw.close(); + } + } + if (this.clusterId == null) { + this.clusterId = "default"; + } + + LOG.info("ClusterId is " + clusterId); + } + } + + conf.set(HConstants.CLUSTER_ID, clusterId); + } + + @Override + public Configuration getConfiguration() { + return this.conf; + } + + private static class MasterProtocolState { + public MasterProtocol protocol; + public int userCount; + public long keepAliveUntil = Long.MAX_VALUE; + public final Class protocolClass; + public long version; + + public MasterProtocolState ( + final Class protocolClass, long version) { + this.protocolClass = protocolClass; + this.version = version; + } + } + + /** + * Create a new Master proxy. Try once only. + */ + private MasterProtocol createMasterInterface( + MasterProtocolState masterProtocolState) + throws IOException, KeeperException, ServiceException { + + ZooKeeperKeepAliveConnection zkw; + try { + zkw = getKeepAliveZooKeeperWatcher(); + } catch (IOException e) { + throw new ZooKeeperConnectionException("Can't connect to ZooKeeper", e); + } + + try { + + checkIfBaseNodeAvailable(zkw); + ServerName sn = MasterAddressTracker.getMasterAddress(zkw); + if (sn == null) { + String msg = + "ZooKeeper available but no active master location found"; + LOG.info(msg); + throw new MasterNotRunningException(msg); + } + + + InetSocketAddress isa = + new InetSocketAddress(sn.getHostname(), sn.getPort()); + MasterProtocol tryMaster = (MasterProtocol) HBaseClientRPC.getProxy( + masterProtocolState.protocolClass, + masterProtocolState.version, + isa, this.conf, this.rpcTimeout); + + if (tryMaster.isMasterRunning( + null, RequestConverter.buildIsMasterRunningRequest()).getIsMasterRunning()) { + return tryMaster; + } else { + HBaseClientRPC.stopProxy(tryMaster); + String msg = "Can create a proxy to master, but it is not running"; + LOG.info(msg); + throw new MasterNotRunningException(msg); + } + } finally { + zkw.close(); + } + } + + /** + * Create a master, retries if necessary. + */ + private MasterProtocol createMasterWithRetries( + MasterProtocolState masterProtocolState) throws MasterNotRunningException { + + // The lock must be at the beginning to prevent multiple master creation + // (and leaks) in a multithread context + synchronized (this.masterAndZKLock) { + Exception exceptionCaught = null; + MasterProtocol master = null; + int tries = 0; + while ( + !this.closed && master == null + ) { + tries++; + try { + master = createMasterInterface(masterProtocolState); + } catch (IOException e) { + exceptionCaught = e; + } catch (KeeperException e) { + exceptionCaught = e; + } catch (ServiceException e) { + exceptionCaught = e; + } + + if (exceptionCaught != null) + // It failed. If it's not the last try, we're going to wait a little + if (tries < numRetries) { + long pauseTime = ConnectionUtils.getPauseTime(this.pause, tries); + LOG.info("getMaster attempt " + tries + " of " + numRetries + + " failed; retrying after sleep of " +pauseTime, exceptionCaught); + + try { + Thread.sleep(pauseTime); + } catch (InterruptedException e) { + Thread.currentThread().interrupt(); + throw new RuntimeException( + "Thread was interrupted while trying to connect to master.", e); + } + + } else { + // Enough tries, we stop now + LOG.info("getMaster attempt " + tries + " of " + numRetries + + " failed; no more retrying.", exceptionCaught); + throw new MasterNotRunningException(exceptionCaught); + } + } + + if (master == null) { + // implies this.closed true + throw new MasterNotRunningException( + "Connection was closed while trying to get master"); + } + + return master; + } + } + + private void checkIfBaseNodeAvailable(ZooKeeperWatcher zkw) + throws MasterNotRunningException { + String errorMsg; + try { + if (ZKUtil.checkExists(zkw, zkw.baseZNode) == -1) { + errorMsg = "The node " + zkw.baseZNode+" is not in ZooKeeper. " + + "It should have been written by the master. " + + "Check the value configured in 'zookeeper.znode.parent'. " + + "There could be a mismatch with the one configured in the master."; + LOG.error(errorMsg); + throw new MasterNotRunningException(errorMsg); + } + } catch (KeeperException e) { + errorMsg = "Can't get connection to ZooKeeper: " + e.getMessage(); + LOG.error(errorMsg); + throw new MasterNotRunningException(errorMsg, e); + } + } + + /** + * @return true if the master is running, throws an exception otherwise + * @throws MasterNotRunningException - if the master is not running + * @throws ZooKeeperConnectionException + */ + @Override + public boolean isMasterRunning() + throws MasterNotRunningException, ZooKeeperConnectionException { + // When getting the master proxy connection, we check it's running, + // so if there is no exception, it means we've been able to get a + // connection on a running master + getKeepAliveMasterMonitor().close(); + return true; + } + + @Override + public HRegionLocation getRegionLocation(final byte [] name, + final byte [] row, boolean reload) + throws IOException { + return reload? relocateRegion(name, row): locateRegion(name, row); + } + + @Override + public boolean isTableEnabled(byte[] tableName) throws IOException { + return testTableOnlineState(tableName, true); + } + + @Override + public boolean isTableDisabled(byte[] tableName) throws IOException { + return testTableOnlineState(tableName, false); + } + + @Override + public boolean isTableAvailable(final byte[] tableName) throws IOException { + final AtomicBoolean available = new AtomicBoolean(true); + final AtomicInteger regionCount = new AtomicInteger(0); + MetaScannerVisitor visitor = new MetaScannerVisitorBase() { + @Override + public boolean processRow(Result row) throws IOException { + HRegionInfo info = MetaScanner.getHRegionInfo(row); + if (info != null) { + if (Bytes.equals(tableName, info.getTableName())) { + ServerName server = HRegionInfo.getServerName(row); + if (server == null) { + available.set(false); + return false; + } + regionCount.incrementAndGet(); + } + } + return true; + } + }; + MetaScanner.metaScan(conf, visitor); + return available.get() && (regionCount.get() > 0); + } + + /* + * @param True if table is online + */ + private boolean testTableOnlineState(byte [] tableName, boolean online) + throws IOException { + if (Bytes.equals(tableName, HConstants.ROOT_TABLE_NAME)) { + // The root region is always enabled + return online; + } + String tableNameStr = Bytes.toString(tableName); + ZooKeeperKeepAliveConnection zkw = getKeepAliveZooKeeperWatcher(); + try { + if (online) { + return ZKTableReadOnly.isEnabledTable(zkw, tableNameStr); + } + return ZKTableReadOnly.isDisabledTable(zkw, tableNameStr); + } catch (KeeperException e) { + throw new IOException("Enable/Disable failed", e); + }finally { + zkw.close(); + } + } + + @Override + public HRegionLocation locateRegion(final byte [] regionName) + throws IOException { + // TODO implement. use old stuff or new stuff? + return null; + } + + @Override + public List locateRegions(final byte [] tableName) + throws IOException { + // TODO implement. use old stuff or new stuff? + return null; + } + + @Override + public HRegionLocation locateRegion(final byte [] tableName, + final byte [] row) + throws IOException{ + return locateRegion(tableName, row, true, true); + } + + @Override + public HRegionLocation relocateRegion(final byte [] tableName, + final byte [] row) + throws IOException{ + + // Since this is an explicit request not to use any caching, finding + // disabled tables should not be desirable. This will ensure that an exception is thrown when + // the first time a disabled table is interacted with. + if (isTableDisabled(tableName)) { + throw new DoNotRetryIOException(Bytes.toString(tableName) + " is disabled."); + } + + return locateRegion(tableName, row, false, true); + } + + private HRegionLocation locateRegion(final byte [] tableName, + final byte [] row, boolean useCache, boolean retry) + throws IOException { + if (this.closed) throw new IOException(toString() + " closed"); + if (tableName == null || tableName.length == 0) { + throw new IllegalArgumentException( + "table name cannot be null or zero length"); + } + + if (Bytes.equals(tableName, HConstants.ROOT_TABLE_NAME)) { + ZooKeeperKeepAliveConnection zkw = getKeepAliveZooKeeperWatcher(); + try { + LOG.debug("Looking up root region location in ZK," + + " connection=" + this); + ServerName servername = + RootRegionTracker.blockUntilAvailable(zkw, this.rpcTimeout); + + LOG.debug("Looked up root region location, connection=" + this + + "; serverName=" + ((servername == null) ? "null" : servername)); + if (servername == null) return null; + return new HRegionLocation(HRegionInfo.ROOT_REGIONINFO, + servername.getHostname(), servername.getPort()); + } catch (InterruptedException e) { + Thread.currentThread().interrupt(); + return null; + } finally { + zkw.close(); + } + } else if (Bytes.equals(tableName, HConstants.META_TABLE_NAME)) { + return locateRegionInMeta(HConstants.ROOT_TABLE_NAME, tableName, row, + useCache, metaRegionLock, retry); + } else { + // Region not in the cache - have to go to the meta RS + return locateRegionInMeta(HConstants.META_TABLE_NAME, tableName, row, + useCache, userRegionLock, retry); + } + } + + /* + * Search .META. for the HRegionLocation info that contains the table and + * row we're seeking. It will prefetch certain number of regions info and + * save them to the global region cache. + */ + private void prefetchRegionCache(final byte[] tableName, + final byte[] row) { + // Implement a new visitor for MetaScanner, and use it to walk through + // the .META. + MetaScannerVisitor visitor = new MetaScannerVisitorBase() { + public boolean processRow(Result result) throws IOException { + try { + HRegionInfo regionInfo = MetaScanner.getHRegionInfo(result); + if (regionInfo == null) { + return true; + } + + // possible we got a region of a different table... + if (!Bytes.equals(regionInfo.getTableName(), tableName)) { + return false; // stop scanning + } + if (regionInfo.isOffline()) { + // don't cache offline regions + return true; + } + + ServerName serverName = HRegionInfo.getServerName(result); + if (serverName == null) { + return true; // don't cache it + } + // instantiate the location + HRegionLocation loc = new HRegionLocation(regionInfo, serverName.getHostname(), + serverName.getPort()); + // cache this meta entry + cacheLocation(tableName, loc); + + return true; + } catch (RuntimeException e) { + throw new IOException(e); + } + } + }; + try { + // pre-fetch certain number of regions info at region cache. + MetaScanner.metaScan(conf, visitor, tableName, row, + this.prefetchRegionLimit); + } catch (IOException e) { + LOG.warn("Encountered problems when prefetch META table: ", e); + } + } + + /* + * Search one of the meta tables (-ROOT- or .META.) for the HRegionLocation + * info that contains the table and row we're seeking. + */ + private HRegionLocation locateRegionInMeta(final byte [] parentTable, + final byte [] tableName, final byte [] row, boolean useCache, + Object regionLockObject, boolean retry) + throws IOException { + HRegionLocation location; + // If we are supposed to be using the cache, look in the cache to see if + // we already have the region. + if (useCache) { + location = getCachedLocation(tableName, row); + if (location != null) { + return location; + } + } + int localNumRetries = retry ? numRetries : 1; + // build the key of the meta region we should be looking for. + // the extra 9's on the end are necessary to allow "exact" matches + // without knowing the precise region names. + byte [] metaKey = HRegionInfo.createRegionName(tableName, row, + HConstants.NINES, false); + for (int tries = 0; true; tries++) { + if (tries >= localNumRetries) { + throw new NoServerForRegionException("Unable to find region for " + + Bytes.toStringBinary(row) + " after " + numRetries + " tries."); + } + + HRegionLocation metaLocation = null; + try { + // locate the root or meta region + metaLocation = locateRegion(parentTable, metaKey, true, false); + // If null still, go around again. + if (metaLocation == null) continue; + ClientProtocol server = + getClient(metaLocation.getHostname(), metaLocation.getPort()); + + Result regionInfoRow = null; + // This block guards against two threads trying to load the meta + // region at the same time. The first will load the meta region and + // the second will use the value that the first one found. + synchronized (regionLockObject) { + // If the parent table is META, we may want to pre-fetch some + // region info into the global region cache for this table. + if (Bytes.equals(parentTable, HConstants.META_TABLE_NAME) && + (getRegionCachePrefetch(tableName)) ) { + prefetchRegionCache(tableName, row); + } + + // Check the cache again for a hit in case some other thread made the + // same query while we were waiting on the lock. If not supposed to + // be using the cache, delete any existing cached location so it won't + // interfere. + if (useCache) { + location = getCachedLocation(tableName, row); + if (location != null) { + return location; + } + } else { + deleteCachedLocation(tableName, row); + } + + // Query the root or meta region for the location of the meta region + regionInfoRow = ProtobufUtil.getRowOrBefore(server, + metaLocation.getRegionInfo().getRegionName(), metaKey, + HConstants.CATALOG_FAMILY); + } + if (regionInfoRow == null) { + throw new TableNotFoundException(Bytes.toString(tableName)); + } + + // convert the row result into the HRegionLocation we need! + HRegionInfo regionInfo = MetaScanner.getHRegionInfo(regionInfoRow); + if (regionInfo == null) { + throw new IOException("HRegionInfo was null or empty in " + + Bytes.toString(parentTable) + ", row=" + regionInfoRow); + } + + // possible we got a region of a different table... + if (!Bytes.equals(regionInfo.getTableName(), tableName)) { + throw new TableNotFoundException( + "Table '" + Bytes.toString(tableName) + "' was not found, got: " + + Bytes.toString(regionInfo.getTableName()) + "."); + } + if (regionInfo.isSplit()) { + throw new RegionOfflineException("the only available region for" + + " the required row is a split parent," + + " the daughters should be online soon: " + + regionInfo.getRegionNameAsString()); + } + if (regionInfo.isOffline()) { + throw new RegionOfflineException("the region is offline, could" + + " be caused by a disable table call: " + + regionInfo.getRegionNameAsString()); + } + + ServerName serverName = HRegionInfo.getServerName(regionInfoRow); + if (serverName == null) { + throw new NoServerForRegionException("No server address listed " + + "in " + Bytes.toString(parentTable) + " for region " + + regionInfo.getRegionNameAsString() + " containing row " + + Bytes.toStringBinary(row)); + } + + // Instantiate the location + location = + new HRegionLocation(regionInfo, serverName.getHostname(), serverName.getPort()); + cacheLocation(tableName, location); + return location; + } catch (TableNotFoundException e) { + // if we got this error, probably means the table just plain doesn't + // exist. rethrow the error immediately. this should always be coming + // from the HTable constructor. + throw e; + } catch (IOException e) { + if (e instanceof RemoteException) { + e = RemoteExceptionHandler.decodeRemoteException((RemoteException) e); + } + if (tries < numRetries - 1) { + if (LOG.isDebugEnabled()) { + LOG.debug("locateRegionInMeta parentTable=" + + Bytes.toString(parentTable) + ", metaLocation=" + + ((metaLocation == null)? "null": "{" + metaLocation + "}") + + ", attempt=" + tries + " of " + + this.numRetries + " failed; retrying after sleep of " + + ConnectionUtils.getPauseTime(this.pause, tries) + " because: " + e.getMessage()); + } + } else { + throw e; + } + // Only relocate the parent region if necessary + if(!(e instanceof RegionOfflineException || + e instanceof NoServerForRegionException)) { + relocateRegion(parentTable, metaKey); + } + } + try{ + Thread.sleep(ConnectionUtils.getPauseTime(this.pause, tries)); + } catch (InterruptedException e) { + Thread.currentThread().interrupt(); + throw new IOException("Giving up trying to location region in " + + "meta: thread is interrupted."); + } + } + } + + /* + * Search the cache for a location that fits our table and row key. + * Return null if no suitable region is located. TODO: synchronization note + * + *

TODO: This method during writing consumes 15% of CPU doing lookup + * into the Soft Reference SortedMap. Improve. + * + * @param tableName + * @param row + * @return Null or region location found in cache. + */ + HRegionLocation getCachedLocation(final byte [] tableName, + final byte [] row) { + SoftValueSortedMap tableLocations = + getTableLocations(tableName); + + // start to examine the cache. we can only do cache actions + // if there's something in the cache for this table. + if (tableLocations.isEmpty()) { + return null; + } + + HRegionLocation possibleRegion = tableLocations.get(row); + if (possibleRegion != null) { + return possibleRegion; + } + + possibleRegion = tableLocations.lowerValueByKey(row); + if (possibleRegion == null) { + return null; + } + + // make sure that the end key is greater than the row we're looking + // for, otherwise the row actually belongs in the next region, not + // this one. the exception case is when the endkey is + // HConstants.EMPTY_END_ROW, signifying that the region we're + // checking is actually the last region in the table. + byte[] endKey = possibleRegion.getRegionInfo().getEndKey(); + if (Bytes.equals(endKey, HConstants.EMPTY_END_ROW) || + KeyValue.getRowComparator(tableName).compareRows( + endKey, 0, endKey.length, row, 0, row.length) > 0) { + return possibleRegion; + } + + // Passed all the way through, so we got nothin - complete cache miss + return null; + } + + /** + * Delete a cached location + * @param tableName tableName + * @param row + */ + void deleteCachedLocation(final byte [] tableName, final byte [] row) { + synchronized (this.cachedRegionLocations) { + Map tableLocations = + getTableLocations(tableName); + // start to examine the cache. we can only do cache actions + // if there's something in the cache for this table. + if (!tableLocations.isEmpty()) { + HRegionLocation rl = getCachedLocation(tableName, row); + if (rl != null) { + tableLocations.remove(rl.getRegionInfo().getStartKey()); + if (LOG.isDebugEnabled()) { + LOG.debug("Removed " + + rl.getRegionInfo().getRegionNameAsString() + + " for tableName=" + Bytes.toString(tableName) + + " from cache " + "because of " + Bytes.toStringBinary(row)); + } + } + } + } + } + + @Override + public void clearCaches(String sn) { + clearCachedLocationForServer(sn); + } + + /* + * Delete all cached entries of a table that maps to a specific location. + * + * @param tablename + * @param server + */ + private void clearCachedLocationForServer(final String server) { + boolean deletedSomething = false; + synchronized (this.cachedRegionLocations) { + if (!cachedServers.contains(server)) { + return; + } + for (Map tableLocations : + cachedRegionLocations.values()) { + for (Entry e : tableLocations.entrySet()) { + if (e.getValue().getHostnamePort().equals(server)) { + tableLocations.remove(e.getKey()); + deletedSomething = true; + } + } + } + cachedServers.remove(server); + } + if (deletedSomething && LOG.isDebugEnabled()) { + LOG.debug("Removed all cached region locations that map to " + server); + } + } + + /* + * @param tableName + * @return Map of cached locations for passed tableName + */ + private SoftValueSortedMap getTableLocations( + final byte [] tableName) { + // find the map of cached locations for this table + Integer key = Bytes.mapKey(tableName); + SoftValueSortedMap result; + synchronized (this.cachedRegionLocations) { + result = this.cachedRegionLocations.get(key); + // if tableLocations for this table isn't built yet, make one + if (result == null) { + result = new SoftValueSortedMap( + Bytes.BYTES_COMPARATOR); + this.cachedRegionLocations.put(key, result); + } + } + return result; + } + + @Override + public void clearRegionCache() { + synchronized(this.cachedRegionLocations) { + this.cachedRegionLocations.clear(); + this.cachedServers.clear(); + } + } + + @Override + public void clearRegionCache(final byte [] tableName) { + synchronized (this.cachedRegionLocations) { + this.cachedRegionLocations.remove(Bytes.mapKey(tableName)); + } + } + + /* + * Put a newly discovered HRegionLocation into the cache. + */ + private void cacheLocation(final byte [] tableName, + final HRegionLocation location) { + byte [] startKey = location.getRegionInfo().getStartKey(); + Map tableLocations = + getTableLocations(tableName); + boolean hasNewCache = false; + synchronized (this.cachedRegionLocations) { + cachedServers.add(location.getHostnamePort()); + hasNewCache = (tableLocations.put(startKey, location) == null); + } + if (hasNewCache) { + LOG.debug("Cached location for " + + location.getRegionInfo().getRegionNameAsString() + + " is " + location.getHostnamePort()); + } + } + + @Override + public AdminProtocol getAdmin(final String hostname, + final int port) throws IOException { + return getAdmin(hostname, port, false); + } + + @Override + public ClientProtocol getClient( + final String hostname, final int port) throws IOException { + return (ClientProtocol)getProtocol(hostname, port, + clientClass, ClientProtocol.VERSION); + } + + @Override + public AdminProtocol getAdmin(final String hostname, + final int port, final boolean master) throws IOException { + return (AdminProtocol)getProtocol(hostname, port, + adminClass, AdminProtocol.VERSION); + } + + /** + * Either the passed isa is null or hostname + * can be but not both. + * @param hostname + * @param port + * @param protocolClass + * @param version + * @return Proxy. + * @throws IOException + */ + VersionedProtocol getProtocol(final String hostname, + final int port, final Class protocolClass, + final long version) throws IOException { + String rsName = Addressing.createHostAndPortStr(hostname, port); + // See if we already have a connection (common case) + Map protocols = this.servers.get(rsName); + if (protocols == null) { + protocols = new HashMap(); + Map existingProtocols = + this.servers.putIfAbsent(rsName, protocols); + if (existingProtocols != null) { + protocols = existingProtocols; + } + } + String protocol = protocolClass.getName(); + VersionedProtocol server = protocols.get(protocol); + if (server == null) { + // create a unique lock for this RS + protocol (if necessary) + String lockKey = protocol + "@" + rsName; + this.connectionLock.putIfAbsent(lockKey, lockKey); + // get the RS lock + synchronized (this.connectionLock.get(lockKey)) { + // do one more lookup in case we were stalled above + server = protocols.get(protocol); + if (server == null) { + try { + // Only create isa when we need to. + InetSocketAddress address = new InetSocketAddress(hostname, port); + // definitely a cache miss. establish an RPC for this RS + server = HBaseClientRPC.waitForProxy( + protocolClass, version, address, this.conf, + this.maxRPCAttempts, this.rpcTimeout, this.rpcTimeout); + protocols.put(protocol, server); + } catch (RemoteException e) { + LOG.warn("RemoteException connecting to RS", e); + // Throw what the RemoteException was carrying. + throw e.unwrapRemoteException(); + } + } + } + } + return server; + } + + @Override + @Deprecated + public ZooKeeperWatcher getZooKeeperWatcher() + throws ZooKeeperConnectionException { + canCloseZKW = false; + + try { + return getKeepAliveZooKeeperWatcher(); + } catch (ZooKeeperConnectionException e){ + throw e; + }catch (IOException e) { + // Encapsulate exception to keep interface + throw new ZooKeeperConnectionException( + "Can't create a zookeeper connection", e); + } + } + + + private ZooKeeperKeepAliveConnection keepAliveZookeeper; + private int keepAliveZookeeperUserCount; + private boolean canCloseZKW = true; + + // keepAlive time, in ms. No reason to make it configurable. + private static final long keepAlive = 5 * 60 * 1000; + + /** + * Retrieve a shared ZooKeeperWatcher. You must close it it once you've have + * finished with it. + * @return The shared instance. Never returns null. + */ + public ZooKeeperKeepAliveConnection getKeepAliveZooKeeperWatcher() + throws IOException { + synchronized (masterAndZKLock) { + + if (keepAliveZookeeper == null) { + // We don't check that our link to ZooKeeper is still valid + // But there is a retry mechanism in the ZooKeeperWatcher itself + keepAliveZookeeper = new ZooKeeperKeepAliveConnection( + conf, this.toString(), this); + } + keepAliveZookeeperUserCount++; + keepZooKeeperWatcherAliveUntil = Long.MAX_VALUE; + + return keepAliveZookeeper; + } + } + + void releaseZooKeeperWatcher(ZooKeeperWatcher zkw) { + if (zkw == null){ + return; + } + synchronized (masterAndZKLock) { + --keepAliveZookeeperUserCount; + if (keepAliveZookeeperUserCount <=0 ){ + keepZooKeeperWatcherAliveUntil = + System.currentTimeMillis() + keepAlive; + } + } + } + + + /** + * Creates a Chore thread to check the connections to master & zookeeper + * and close them when they reach their closing time ( + * {@link #MasterProtocolState.keepAliveUntil} and + * {@link #keepZooKeeperWatcherAliveUntil}). Keep alive time is + * managed by the release functions and the variable {@link #keepAlive} + */ + private static class DelayedClosing extends Chore implements Stoppable { + private HConnectionImplementation hci; + Stoppable stoppable; + + private DelayedClosing( + HConnectionImplementation hci, Stoppable stoppable){ + super( + "ZooKeeperWatcher and Master delayed closing for connection "+hci, + 60*1000, // We check every minutes + stoppable); + this.hci = hci; + this.stoppable = stoppable; + } + + static DelayedClosing createAndStart(HConnectionImplementation hci){ + Stoppable stoppable = new Stoppable() { + private volatile boolean isStopped = false; + @Override public void stop(String why) { isStopped = true;} + @Override public boolean isStopped() {return isStopped;} + }; + + return new DelayedClosing(hci, stoppable); + } + + protected void closeMasterProtocol(MasterProtocolState protocolState) { + if (System.currentTimeMillis() > protocolState.keepAliveUntil) { + hci.closeMasterProtocol(protocolState); + protocolState.keepAliveUntil = Long.MAX_VALUE; + } + } + + @Override + protected void chore() { + synchronized (hci.masterAndZKLock) { + if (hci.canCloseZKW) { + if (System.currentTimeMillis() > + hci.keepZooKeeperWatcherAliveUntil) { + + hci.closeZooKeeperWatcher(); + hci.keepZooKeeperWatcherAliveUntil = Long.MAX_VALUE; + } + } + closeMasterProtocol(hci.masterAdminProtocol); + closeMasterProtocol(hci.masterMonitorProtocol); + } + } + + @Override + public void stop(String why) { + stoppable.stop(why); + } + + @Override + public boolean isStopped() { + return stoppable.isStopped(); + } + } + + private void closeZooKeeperWatcher() { + synchronized (masterAndZKLock) { + if (keepAliveZookeeper != null) { + LOG.info("Closing zookeeper sessionid=0x" + + Long.toHexString( + keepAliveZookeeper.getRecoverableZooKeeper().getSessionId())); + keepAliveZookeeper.internalClose(); + keepAliveZookeeper = null; + } + keepAliveZookeeperUserCount = 0; + } + } + + private static class MasterProtocolHandler implements InvocationHandler { + private HConnectionImplementation connection; + private MasterProtocolState protocolStateTracker; + + protected MasterProtocolHandler(HConnectionImplementation connection, + MasterProtocolState protocolStateTracker) { + this.connection = connection; + this.protocolStateTracker = protocolStateTracker; + } + + @Override + public Object invoke(Object proxy, Method method, Object[] args) + throws Throwable { + if (method.getName().equals("close") && + method.getParameterTypes().length == 0) { + release(connection, protocolStateTracker); + return null; + } else { + try { + return method.invoke(protocolStateTracker.protocol, args); + }catch (InvocationTargetException e){ + // We will have this for all the exception, checked on not, sent + // by any layer, including the functional exception + Throwable cause = e.getCause(); + if (cause == null){ + throw new RuntimeException( + "Proxy invocation failed and getCause is null", e); + } + if (cause instanceof UndeclaredThrowableException) { + cause = cause.getCause(); + } + throw cause; + } + } + } + + private void release( + HConnectionImplementation connection, + MasterProtocolState target) { + connection.releaseMaster(target); + } + } + + MasterProtocolState masterAdminProtocol = + new MasterProtocolState(MasterAdminProtocol.class, MasterAdminProtocol.VERSION); + MasterProtocolState masterMonitorProtocol = + new MasterProtocolState(MasterMonitorProtocol.class, MasterMonitorProtocol.VERSION); + + /** + * This function allows HBaseAdmin and potentially others + * to get a shared master connection. + * + * @return The shared instance. Never returns null. + * @throws MasterNotRunningException + */ + private Object getKeepAliveMasterProtocol( + MasterProtocolState protocolState, Class connectionClass) + throws MasterNotRunningException { + synchronized (masterAndZKLock) { + if (!isKeepAliveMasterConnectedAndRunning(protocolState)) { + if (protocolState.protocol != null) { + HBaseClientRPC.stopProxy(protocolState.protocol); + } + protocolState.protocol = null; + protocolState.protocol = createMasterWithRetries(protocolState); + } + protocolState.userCount++; + protocolState.keepAliveUntil = Long.MAX_VALUE; + + return Proxy.newProxyInstance( + connectionClass.getClassLoader(), + new Class[]{connectionClass}, + new MasterProtocolHandler(this, protocolState) + ); + } + } + + @Override + public MasterAdminProtocol getMasterAdmin() throws MasterNotRunningException { + return getKeepAliveMasterAdmin(); + }; + + @Override + public MasterMonitorProtocol getMasterMonitor() throws MasterNotRunningException { + return getKeepAliveMasterMonitor(); + } + + @Override + public MasterAdminKeepAliveConnection getKeepAliveMasterAdmin() + throws MasterNotRunningException { + return (MasterAdminKeepAliveConnection) + getKeepAliveMasterProtocol(masterAdminProtocol, MasterAdminKeepAliveConnection.class); + } + + @Override + public MasterMonitorKeepAliveConnection getKeepAliveMasterMonitor() + throws MasterNotRunningException { + return (MasterMonitorKeepAliveConnection) + getKeepAliveMasterProtocol(masterMonitorProtocol, MasterMonitorKeepAliveConnection.class); + } + + private boolean isKeepAliveMasterConnectedAndRunning(MasterProtocolState protocolState){ + if (protocolState.protocol == null){ + return false; + } + try { + return protocolState.protocol.isMasterRunning( + null, RequestConverter.buildIsMasterRunningRequest()).getIsMasterRunning(); + }catch (UndeclaredThrowableException e){ + // It's somehow messy, but we can receive exceptions such as + // java.net.ConnectException but they're not declared. So we catch + // it... + LOG.info("Master connection is not running anymore", + e.getUndeclaredThrowable()); + return false; + } catch (ServiceException se) { + LOG.warn("Checking master connection", se); + return false; + } + } + + private void releaseMaster(MasterProtocolState protocolState) { + if (protocolState.protocol == null){ + return; + } + synchronized (masterAndZKLock) { + --protocolState.userCount; + if (protocolState.userCount <= 0) { + protocolState.keepAliveUntil = + System.currentTimeMillis() + keepAlive; + } + } + } + + private void closeMasterProtocol(MasterProtocolState protocolState) { + if (protocolState.protocol != null){ + LOG.info("Closing master protocol: " + protocolState.protocolClass.getName()); + HBaseClientRPC.stopProxy(protocolState.protocol); + protocolState.protocol = null; + } + protocolState.userCount = 0; + } + + /** + * Immediate close of the shared master. Can be by the delayed close or + * when closing the connection itself. + */ + private void closeMaster() { + synchronized (masterAndZKLock) { + closeMasterProtocol(masterAdminProtocol); + closeMasterProtocol(masterMonitorProtocol); + } + } + + @Override + public T getRegionServerWithRetries(ServerCallable callable) + throws IOException, RuntimeException { + return callable.withRetries(); + } + + @Override + public T getRegionServerWithoutRetries(ServerCallable callable) + throws IOException, RuntimeException { + return callable.withoutRetries(); + } + + @Deprecated + private Callable createCallable( + final HRegionLocation loc, final MultiAction multi, + final byte [] tableName) { + // TODO: This does not belong in here!!! St.Ack HConnections should + // not be dealing in Callables; Callables have HConnections, not other + // way around. + final HConnection connection = this; + return new Callable() { + public MultiResponse call() throws IOException { + ServerCallable callable = + new ServerCallable(connection, tableName, null) { + public MultiResponse call() throws IOException { + return ProtobufUtil.multi(server, multi); + } + + @Override + public void connect(boolean reload) throws IOException { + server = connection.getClient( + loc.getHostname(), loc.getPort()); + } + }; + return callable.withoutRetries(); + } + }; + } + + + void updateCachedLocation(HRegionLocation hrl, String hostname, int port) { + HRegionLocation newHrl = new HRegionLocation(hrl.getRegionInfo(), hostname, port); + synchronized (this.cachedRegionLocations) { + cacheLocation(hrl.getRegionInfo().getTableName(), newHrl); + } + } + + void deleteCachedLocation(HRegionLocation rl) { + synchronized (this.cachedRegionLocations) { + Map tableLocations = + getTableLocations(rl.getRegionInfo().getTableName()); + tableLocations.remove(rl.getRegionInfo().getStartKey()); + } + } + + private void updateCachedLocations(byte[] tableName, Row row, Object t) { + updateCachedLocations(null, tableName, row, t); + } + + /** + * Update the location with the new value (if the exception is a RegionMovedException) or delete + * it from the cache. + * @param hrl - can be null. If it's the case, tableName and row should not be null + * @param tableName - can be null if hrl is not null. + * @param row - can be null if hrl is not null. + * @param exception - An object (to simplify user code) on which we will try to find a nested + * or wrapped or both RegionMovedException + */ + private void updateCachedLocations(final HRegionLocation hrl, final byte[] tableName, + Row row, final Object exception) { + + if ((row == null || tableName == null) && hrl == null){ + LOG.warn ("Coding error, see method javadoc. row="+row+", tableName="+ + Bytes.toString(tableName)+", hrl="+hrl); + return; + } + + // Is it something we have already updated? + final HRegionLocation myLoc = (hrl != null ? + hrl : getCachedLocation(tableName, row.getRow())); + if (myLoc == null) { + // There is no such location in the cache => it's been removed already => nothing to do + return; + } + + final RegionMovedException rme = RegionMovedException.find(exception); + if (rme != null) { + LOG.info("Region " + myLoc.getRegionInfo().getRegionNameAsString() + " moved from " + + myLoc.getHostnamePort() + ", updating client location cache." + + " New server: " + rme.getHostname() + ":" + rme.getPort()); + updateCachedLocation(myLoc, rme.getHostname(), rme.getPort()); + } else { + deleteCachedLocation(myLoc); + } + } + + @Override + @Deprecated + public void processBatch(List list, + final byte[] tableName, + ExecutorService pool, + Object[] results) throws IOException, InterruptedException { + // This belongs in HTable!!! Not in here. St.Ack + + // results must be the same size as list + if (results.length != list.size()) { + throw new IllegalArgumentException( + "argument results must be the same size as argument list"); + } + processBatchCallback(list, tableName, pool, results, null); + } + + /** + * Send the queries in parallel on the different region servers. Retries on failures. + * If the method returns it means that there is no error, and the 'results' array will + * contain no exception. On error, an exception is thrown, and the 'results' array will + * contain results and exceptions. + * @deprecated since 0.96 - Use {@link HTable#processBatchCallback} instead + */ + @Override + @Deprecated + public void processBatchCallback( + List list, + byte[] tableName, + ExecutorService pool, + Object[] results, + Batch.Callback callback) + throws IOException, InterruptedException { + + Process p = new Process(this, list, tableName, pool, results, callback); + p.processBatchCallback(); + } + + + /** + * Methods and attributes to manage a batch process are grouped into this single class. + * This allows, by creating a Process per batch process to ensure multithread safety. + * + * This code should be move to HTable once processBatchCallback is not supported anymore in + * the HConnection interface. + */ + private static class Process { + // Info on the queries and their context + private final HConnectionImplementation hci; + private final List rows; + private final byte[] tableName; + private final ExecutorService pool; + private final Object[] results; + private final Batch.Callback callback; + + // Error management: these lists are filled by the errors on the final try. Indexes + // are consistent, i.e. exceptions[i] matches failedActions[i] and failedAddresses[i] + private final List exceptions; + private final List failedActions; + private final List failedAddresses; + + // Used during the batch process + private final List> toReplay; + private final LinkedList, HRegionLocation, Future>> + inProgress; + private int curNumRetries; + + // Notified when a tasks is done + private final List> finishedTasks = new ArrayList>(); + + private Process(HConnectionImplementation hci, List list, + byte[] tableName, ExecutorService pool, Object[] results, + Batch.Callback callback){ + this.hci = hci; + this.rows = list; + this.tableName = tableName; + this.pool = pool; + this.results = results; + this.callback = callback; + this.toReplay = new ArrayList>(); + this.inProgress = + new LinkedList, HRegionLocation, Future>>(); + this.exceptions = new ArrayList(); + this.failedActions = new ArrayList(); + this.failedAddresses = new ArrayList(); + this.curNumRetries = 0; + } + + + /** + * Group a list of actions per region servers, and send them. The created MultiActions are + * added to the inProgress list. + * @param actionsList + * @param sleepTime - sleep time before actually executing the actions. Can be zero. + * @throws IOException - if we can't locate a region after multiple retries. + */ + private void submit(List> actionsList, final long sleepTime) throws IOException { + // group per location => regions server + final Map> actionsByServer = + new HashMap>(); + for (Action aAction : actionsList) { + final Row row = aAction.getAction(); + + if (row != null) { + final HRegionLocation loc = hci.locateRegion(this.tableName, row.getRow()); + if (loc == null) { + throw new IOException("No location found, aborting submit."); + } + + final byte[] regionName = loc.getRegionInfo().getRegionName(); + MultiAction actions = actionsByServer.get(loc); + if (actions == null) { + actions = new MultiAction(); + actionsByServer.put(loc, actions); + } + actions.add(regionName, aAction); + } + } + + // Send the queries and add them to the inProgress list + for (Entry> e : actionsByServer.entrySet()) { + Callable callable = + createDelayedCallable(sleepTime, e.getKey(), e.getValue()); + Triple, HRegionLocation, Future> p = + new Triple, HRegionLocation, Future>( + e.getValue(), e.getKey(), this.pool.submit(callable)); + this.inProgress.addLast(p); + } + } + + + private void addToErrorsLists(Exception ex, Row row, Triple, + HRegionLocation, Future> obj) { + this.exceptions.add(ex); + this.failedActions.add(row); + this.failedAddresses.add(obj.getSecond().getHostnamePort()); + } + + /** + * Resubmit the actions which have failed, after a sleep time. + * @throws IOException + */ + private void doRetry() throws IOException{ + final long sleepTime = ConnectionUtils.getPauseTime(hci.pause, this.curNumRetries); + submit(this.toReplay, sleepTime); + this.toReplay.clear(); + } + + /** + * Parameterized batch processing, allowing varying return types for + * different {@link Row} implementations. + * Throws an exception on error. If there are no exceptions, it means that the 'results' + * array is clean. + */ + private void processBatchCallback() throws IOException, InterruptedException { + if (this.results.length != this.rows.size()) { + throw new IllegalArgumentException( + "argument results (size="+results.length+") must be the same size as " + + "argument list (size="+this.rows.size()+")"); + } + if (this.rows.isEmpty()) { + return; + } + + // We keep the number of retry per action. + int[] nbRetries = new int[this.results.length]; + + // Build the action list. This list won't change after being created, hence the + // indexes will remain constant, allowing a direct lookup. + final List> listActions = new ArrayList>(this.rows.size()); + for (int i = 0; i < this.rows.size(); i++) { + Action action = new Action(this.rows.get(i), i); + listActions.add(action); + } + + // execute the actions. We will analyze and resubmit the actions in a 'while' loop. + submit(listActions, 0); + + // LastRetry is true if, either: + // we had an exception 'DoNotRetry' + // we had more than numRetries for any action + // In this case, we will finish the current retries but we won't start new ones. + boolean lastRetry = false; + // despite its name numRetries means number of tries. So if numRetries == 1 it means we + // won't retry. And we compare vs. 2 in case someone set it to zero. + boolean noRetry = (hci.numRetries < 2); + + // Analyze and resubmit until all actions are done successfully or failed after numRetries + while (!this.inProgress.isEmpty()) { + + // We need the original multi action to find out what actions to replay if + // we have a 'total' failure of the Future + // We need the HRegionLocation as we give it back if we go out of retries + Triple, HRegionLocation, Future> currentTask = + removeFirstDone(); + + // Get the answer, keep the exception if any as we will use it for the analysis + MultiResponse responses = null; + ExecutionException exception = null; + try { + responses = currentTask.getThird().get(); + } catch (ExecutionException e) { + exception = e; + } + + // Error case: no result at all for this multi action. We need to redo all actions + if (responses == null) { + for (List> actions : currentTask.getFirst().actions.values()) { + for (Action action : actions) { + Row row = action.getAction(); + hci.updateCachedLocations(this.tableName, row, exception); + if (noRetry) { + addToErrorsLists(exception, row, currentTask); + } else { + lastRetry = addToReplay(nbRetries, action); + } + } + } + } else { // Success or partial success + // Analyze detailed results. We can still have individual failures to be redo. + // two specific exceptions are managed: + // - DoNotRetryIOException: we continue to retry for other actions + // - RegionMovedException: we update the cache with the new region location + for (Entry>> resultsForRS : + responses.getResults().entrySet()) { + for (Pair regionResult : resultsForRS.getValue()) { + Action correspondingAction = listActions.get(regionResult.getFirst()); + Object result = regionResult.getSecond(); + this.results[correspondingAction.getOriginalIndex()] = result; + + // Failure: retry if it's make sense else update the errors lists + if (result == null || result instanceof Throwable) { + Row row = correspondingAction.getAction(); + hci.updateCachedLocations(this.tableName, row, result); + if (result instanceof DoNotRetryIOException || noRetry) { + addToErrorsLists((Exception)result, row, currentTask); + } else { + lastRetry = addToReplay(nbRetries, correspondingAction); + } + } else // success + if (callback != null) { + this.callback.update(resultsForRS.getKey(), + this.rows.get(regionResult.getFirst()).getRow(), + (R) result); + } + } + } + } + + // Retry all actions in toReplay then clear it. + if (!noRetry && !toReplay.isEmpty()) { + doRetry(); + if (lastRetry) { + noRetry = true; + } + } + } + + if (!exceptions.isEmpty()) { + throw new RetriesExhaustedWithDetailsException(this.exceptions, + this.failedActions, + this.failedAddresses); + } + } + + /** + * Put the action that has to be retried in the Replay list. + * @return true if we're out of numRetries and it's the last retry. + */ + private boolean addToReplay(int[] nbRetries, Action action) { + this.toReplay.add(action); + nbRetries[action.getOriginalIndex()]++; + if (nbRetries[action.getOriginalIndex()] > this.curNumRetries) { + this.curNumRetries = nbRetries[action.getOriginalIndex()]; + } + // numRetries means number of tries, while curNumRetries means current number of retries. So + // we need to add 1 to make them comparable. And as we look for the last try we compare + // with '>=' and no '>'. And we need curNumRetries to means what it says as we don't want + // to initialize it to 1. + return ( (this.curNumRetries +1) >= hci.numRetries); + } + + /** + * Wait for one of tasks to be done, and remove it from the list. + * @return the tasks done. + */ + private Triple, HRegionLocation, Future> + removeFirstDone() throws InterruptedException { + while (true) { + synchronized (finishedTasks) { + if (!finishedTasks.isEmpty()) { + MultiAction done = finishedTasks.remove(finishedTasks.size() - 1); + + // We now need to remove it from the inProgress part. + Iterator, HRegionLocation, Future>> it = + inProgress.iterator(); + while (it.hasNext()) { + Triple, HRegionLocation, Future> task = it.next(); + if (task.getFirst() == done) { // We have the exact object. No java equals here. + it.remove(); + return task; + } + } + LOG.error("Development error: We didn't see a task in the list. " + + done.getRegions()); + } + finishedTasks.wait(10); + } + } + } + + private Callable createDelayedCallable( + final long delay, final HRegionLocation loc, final MultiAction multi) { + + final Callable delegate = hci.createCallable(loc, multi, tableName); + + return new Callable() { + private final long creationTime = System.currentTimeMillis(); + + @Override + public MultiResponse call() throws Exception { + try { + final long waitingTime = delay + creationTime - System.currentTimeMillis(); + if (waitingTime > 0) { + Thread.sleep(waitingTime); + } + return delegate.call(); + } finally { + synchronized (finishedTasks) { + finishedTasks.add(multi); + finishedTasks.notifyAll(); + } + } + } + }; + } + } + + /* + * Return the number of cached region for a table. It will only be called + * from a unit test. + */ + int getNumberOfCachedRegionLocations(final byte[] tableName) { + Integer key = Bytes.mapKey(tableName); + synchronized (this.cachedRegionLocations) { + Map tableLocs = + this.cachedRegionLocations.get(key); + + if (tableLocs == null) { + return 0; + } + return tableLocs.values().size(); + } + } + + /** + * Check the region cache to see whether a region is cached yet or not. + * Called by unit tests. + * @param tableName tableName + * @param row row + * @return Region cached or not. + */ + boolean isRegionCached(final byte[] tableName, final byte[] row) { + HRegionLocation location = getCachedLocation(tableName, row); + return location != null; + } + + @Override + public void setRegionCachePrefetch(final byte[] tableName, + final boolean enable) { + if (!enable) { + regionCachePrefetchDisabledTables.add(Bytes.mapKey(tableName)); + } + else { + regionCachePrefetchDisabledTables.remove(Bytes.mapKey(tableName)); + } + } + + @Override + public boolean getRegionCachePrefetch(final byte[] tableName) { + return !regionCachePrefetchDisabledTables.contains(Bytes.mapKey(tableName)); + } + + @Override + public void abort(final String msg, Throwable t) { + if (t instanceof KeeperException.SessionExpiredException + && keepAliveZookeeper != null) { + synchronized (masterAndZKLock) { + if (keepAliveZookeeper != null) { + LOG.warn("This client just lost it's session with ZooKeeper," + + " closing it." + + " It will be recreated next time someone needs it", t); + closeZooKeeperWatcher(); + } + } + }else { + if (t != null) { + LOG.fatal(msg, t); + } else { + LOG.fatal(msg); + } + this.aborted = true; + this.closed = true; + } + } + + @Override + public boolean isClosed() { + return this.closed; + } + + @Override + public boolean isAborted(){ + return this.aborted; + } + + @Override + public int getCurrentNrHRS() throws IOException { + ZooKeeperKeepAliveConnection zkw = getKeepAliveZooKeeperWatcher(); + + try { + // We go to zk rather than to master to get count of regions to avoid + // HTable having a Master dependency. See HBase-2828 + return ZKUtil.getNumberOfChildren(zkw, zkw.rsZNode); + } catch (KeeperException ke) { + throw new IOException("Unexpected ZooKeeper exception", ke); + } finally { + zkw.close(); + } + } + + public void stopProxyOnClose(boolean stopProxy) { + this.stopProxy = stopProxy; + } + + /** + * Increment this client's reference count. + */ + void incCount() { + ++refCount; + } + + /** + * Decrement this client's reference count. + */ + void decCount() { + if (refCount > 0) { + --refCount; + } + } + + /** + * Return if this client has no reference + * + * @return true if this client has no reference; false otherwise + */ + boolean isZeroReference() { + return refCount == 0; + } + + void close(boolean stopProxy) { + if (this.closed) { + return; + } + delayedClosing.stop("Closing connection"); + if (stopProxy) { + closeMaster(); + for (Map i : servers.values()) { + for (VersionedProtocol server: i.values()) { + HBaseClientRPC.stopProxy(server); + } + } + } + closeZooKeeperWatcher(); + this.servers.clear(); + this.closed = true; + } + + @Override + public void close() { + if (managed) { + HConnectionManager.deleteConnection(this, stopProxy, false); + } else { + close(true); + } + } + + /** + * Close the connection for good, regardless of what the current value of + * {@link #refCount} is. Ideally, {@link #refCount} should be zero at this + * point, which would be the case if all of its consumers close the + * connection. However, on the off chance that someone is unable to close + * the connection, perhaps because it bailed out prematurely, the method + * below will ensure that this {@link HConnection} instance is cleaned up. + * Caveat: The JVM may take an unknown amount of time to call finalize on an + * unreachable object, so our hope is that every consumer cleans up after + * itself, like any good citizen. + */ + @Override + protected void finalize() throws Throwable { + super.finalize(); + // Pretend as if we are about to release the last remaining reference + refCount = 1; + close(); + } + + @Override + public HTableDescriptor[] listTables() throws IOException { + MasterMonitorKeepAliveConnection master = getKeepAliveMasterMonitor(); + try { + GetTableDescriptorsRequest req = + RequestConverter.buildGetTableDescriptorsRequest(null); + return ProtobufUtil.getHTableDescriptorArray(master.getTableDescriptors(null, req)); + } catch (ServiceException se) { + throw ProtobufUtil.getRemoteException(se); + } finally { + master.close(); + } + } + + @Override + public HTableDescriptor[] getHTableDescriptors(List tableNames) throws IOException { + if (tableNames == null || tableNames.isEmpty()) return new HTableDescriptor[0]; + MasterMonitorKeepAliveConnection master = getKeepAliveMasterMonitor(); + try { + GetTableDescriptorsRequest req = + RequestConverter.buildGetTableDescriptorsRequest(tableNames); + return ProtobufUtil.getHTableDescriptorArray(master.getTableDescriptors(null, req)); + } catch (ServiceException se) { + throw ProtobufUtil.getRemoteException(se); + } finally { + master.close(); + } + } + + /** + * Connects to the master to get the table descriptor. + * @param tableName table name + * @return + * @throws IOException if the connection to master fails or if the table + * is not found. + */ + @Override + public HTableDescriptor getHTableDescriptor(final byte[] tableName) + throws IOException { + if (tableName == null || tableName.length == 0) return null; + if (Bytes.equals(tableName, HConstants.ROOT_TABLE_NAME)) { + return new UnmodifyableHTableDescriptor(HTableDescriptor.ROOT_TABLEDESC); + } + if (Bytes.equals(tableName, HConstants.META_TABLE_NAME)) { + return HTableDescriptor.META_TABLEDESC; + } + MasterMonitorKeepAliveConnection master = getKeepAliveMasterMonitor(); + GetTableDescriptorsResponse htds; + try { + GetTableDescriptorsRequest req = + RequestConverter.buildGetTableDescriptorsRequest(null); + htds = master.getTableDescriptors(null, req); + } catch (ServiceException se) { + throw ProtobufUtil.getRemoteException(se); + } finally { + master.close(); + } + for (TableSchema ts : htds.getTableSchemaList()) { + if (Bytes.equals(tableName, ts.getName().toByteArray())) { + return HTableDescriptor.convert(ts); + } + } + throw new TableNotFoundException(Bytes.toString(tableName)); + } + } + + /** + * Set the number of retries to use serverside when trying to communicate + * with another server over {@link HConnection}. Used updating catalog + * tables, etc. Call this method before we create any Connections. + * @param c The Configuration instance to set the retries into. + * @param log Used to log what we set in here. + */ + public static void setServerSideHConnectionRetries(final Configuration c, + final Log log) { + int hcRetries = c.getInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, + HConstants.DEFAULT_HBASE_CLIENT_RETRIES_NUMBER); + // Go big. Multiply by 10. If we can't get to meta after this many retries + // then something seriously wrong. + int serversideMultiplier = + c.getInt("hbase.client.serverside.retries.multiplier", 10); + int retries = hcRetries * serversideMultiplier; + c.setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, retries); + log.debug("HConnection retries=" + retries); + } +} + diff --git hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTable.java hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTable.java new file mode 100644 index 0000000..46b0720 --- /dev/null +++ hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTable.java @@ -0,0 +1,1299 @@ +/** + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.client; + +import java.io.Closeable; +import java.io.IOException; +import java.io.InterruptedIOException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; +import java.util.List; +import java.util.Map; +import java.util.NavigableMap; +import java.util.TreeMap; +import java.util.concurrent.Callable; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Future; +import java.util.concurrent.SynchronousQueue; +import java.util.concurrent.ThreadPoolExecutor; +import java.util.concurrent.TimeUnit; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.HBaseConfiguration; +import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.HRegionInfo; +import org.apache.hadoop.hbase.HRegionLocation; +import org.apache.hadoop.hbase.HTableDescriptor; +import org.apache.hadoop.hbase.KeyValue; +import org.apache.hadoop.hbase.ServerName; +import org.apache.hadoop.hbase.client.HConnectionManager.HConnectable; +import org.apache.hadoop.hbase.client.coprocessor.Batch; +import org.apache.hadoop.hbase.filter.BinaryComparator; +import org.apache.hadoop.hbase.ipc.CoprocessorRpcChannel; +import org.apache.hadoop.hbase.ipc.RegionCoprocessorRpcChannel; +import org.apache.hadoop.hbase.protobuf.ProtobufUtil; +import org.apache.hadoop.hbase.protobuf.RequestConverter; +import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.GetRequest; +import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.GetResponse; +import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.LockRowRequest; +import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.LockRowResponse; +import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MultiRequest; +import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutateRequest; +import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutateResponse; +import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.UnlockRowRequest; +import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.CompareType; +import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.hbase.util.Pair; +import org.apache.hadoop.hbase.util.Threads; + +import com.google.protobuf.Service; +import com.google.protobuf.ServiceException; + +/** + *

Used to communicate with a single HBase table. + * + *

This class is not thread safe for reads nor write. + * + *

In case of writes (Put, Delete), the underlying write buffer can + * be corrupted if multiple threads contend over a single HTable instance. + * + *

In case of reads, some fields used by a Scan are shared among all threads. + * The HTable implementation can either not contract to be safe in case of a Get + * + *

To access a table in a multi threaded environment, please consider + * using the {@link HTablePool} class to create your HTable instances. + * + *

Instances of HTable passed the same {@link Configuration} instance will + * share connections to servers out on the cluster and to the zookeeper ensemble + * as well as caches of region locations. This is usually a *good* thing and it + * is recommended to reuse the same configuration object for all your tables. + * This happens because they will all share the same underlying + * {@link HConnection} instance. See {@link HConnectionManager} for more on + * how this mechanism works. + * + *

{@link HConnection} will read most of the + * configuration it needs from the passed {@link Configuration} on initial + * construction. Thereafter, for settings such as + * hbase.client.pause, hbase.client.retries.number, + * and hbase.client.rpc.maxattempts updating their values in the + * passed {@link Configuration} subsequent to {@link HConnection} construction + * will go unnoticed. To run with changed values, make a new + * {@link HTable} passing a new {@link Configuration} instance that has the + * new configuration. + * + *

Note that this class implements the {@link Closeable} interface. When a + * HTable instance is no longer required, it *should* be closed in order to ensure + * that the underlying resources are promptly released. Please note that the close + * method can throw java.io.IOException that must be handled. + * + * @see HBaseAdmin for create, drop, list, enable and disable of tables. + * @see HConnection + * @see HConnectionManager + */ +@InterfaceAudience.Public +@InterfaceStability.Stable +public class HTable implements HTableInterface { + private static final Log LOG = LogFactory.getLog(HTable.class); + private HConnection connection; + private final byte [] tableName; + private volatile Configuration configuration; + private final ArrayList writeBuffer = new ArrayList(); + private long writeBufferSize; + private boolean clearBufferOnFail; + private boolean autoFlush; + private long currentWriteBufferSize; + protected int scannerCaching; + private int maxKeyValueSize; + private ExecutorService pool; // For Multi + private boolean closed; + private int operationTimeout; + private static final int DOPUT_WB_CHECK = 10; // i.e., doPut checks the writebuffer every X Puts. + private final boolean cleanupPoolOnClose; // shutdown the pool in close() + private final boolean cleanupConnectionOnClose; // close the connection in close() + + /** + * Creates an object to access a HBase table. + * Shares zookeeper connection and other resources with other HTable instances + * created with the same conf instance. Uses already-populated + * region cache if one is available, populated by any other HTable instances + * sharing this conf instance. Recommended. + * @param conf Configuration object to use. + * @param tableName Name of the table. + * @throws IOException if a remote or network exception occurs + */ + public HTable(Configuration conf, final String tableName) + throws IOException { + this(conf, Bytes.toBytes(tableName)); + } + + + /** + * Creates an object to access a HBase table. + * Shares zookeeper connection and other resources with other HTable instances + * created with the same conf instance. Uses already-populated + * region cache if one is available, populated by any other HTable instances + * sharing this conf instance. Recommended. + * @param conf Configuration object to use. + * @param tableName Name of the table. + * @throws IOException if a remote or network exception occurs + */ + public HTable(Configuration conf, final byte [] tableName) + throws IOException { + this.tableName = tableName; + this.cleanupPoolOnClose = this.cleanupConnectionOnClose = true; + if (conf == null) { + this.connection = null; + return; + } + this.connection = HConnectionManager.getConnection(conf); + this.configuration = conf; + + int maxThreads = conf.getInt("hbase.htable.threads.max", Integer.MAX_VALUE); + if (maxThreads == 0) { + maxThreads = 1; // is there a better default? + } + long keepAliveTime = conf.getLong("hbase.htable.threads.keepalivetime", 60); + + // Using the "direct handoff" approach, new threads will only be created + // if it is necessary and will grow unbounded. This could be bad but in HCM + // we only create as many Runnables as there are region servers. It means + // it also scales when new region servers are added. + this.pool = new ThreadPoolExecutor(1, maxThreads, keepAliveTime, TimeUnit.SECONDS, + new SynchronousQueue(), Threads.newDaemonThreadFactory("hbase-table")); + ((ThreadPoolExecutor) this.pool).allowCoreThreadTimeOut(true); + + this.finishSetup(); + } + + /** + * Creates an object to access a HBase table. + * Shares zookeeper connection and other resources with other HTable instances + * created with the same conf instance. Uses already-populated + * region cache if one is available, populated by any other HTable instances + * sharing this conf instance. + * Use this constructor when the ExecutorService is externally managed. + * @param conf Configuration object to use. + * @param tableName Name of the table. + * @param pool ExecutorService to be used. + * @throws IOException if a remote or network exception occurs + */ + public HTable(Configuration conf, final byte[] tableName, final ExecutorService pool) + throws IOException { + this.connection = HConnectionManager.getConnection(conf); + this.configuration = conf; + this.pool = pool; + this.tableName = tableName; + this.cleanupPoolOnClose = false; + this.cleanupConnectionOnClose = true; + + this.finishSetup(); + } + + /** + * Creates an object to access a HBase table. + * Shares zookeeper connection and other resources with other HTable instances + * created with the same connection instance. + * Use this constructor when the ExecutorService and HConnection instance are + * externally managed. + * @param tableName Name of the table. + * @param connection HConnection to be used. + * @param pool ExecutorService to be used. + * @throws IOException if a remote or network exception occurs + */ + public HTable(final byte[] tableName, final HConnection connection, + final ExecutorService pool) throws IOException { + if (pool == null || pool.isShutdown()) { + throw new IllegalArgumentException("Pool is null or shut down."); + } + if (connection == null || connection.isClosed()) { + throw new IllegalArgumentException("Connection is null or closed."); + } + this.tableName = tableName; + this.cleanupPoolOnClose = this.cleanupConnectionOnClose = false; + this.connection = connection; + this.configuration = connection.getConfiguration(); + this.pool = pool; + + this.finishSetup(); + } + + /** + * setup this HTable's parameter based on the passed configuration + */ + private void finishSetup() throws IOException { + this.connection.locateRegion(tableName, HConstants.EMPTY_START_ROW); + this.operationTimeout = HTableDescriptor.isMetaTable(tableName) ? HConstants.DEFAULT_HBASE_CLIENT_OPERATION_TIMEOUT + : this.configuration.getInt(HConstants.HBASE_CLIENT_OPERATION_TIMEOUT, + HConstants.DEFAULT_HBASE_CLIENT_OPERATION_TIMEOUT); + this.writeBufferSize = this.configuration.getLong( + "hbase.client.write.buffer", 2097152); + this.clearBufferOnFail = true; + this.autoFlush = true; + this.currentWriteBufferSize = 0; + this.scannerCaching = this.configuration.getInt( + HConstants.HBASE_CLIENT_SCANNER_CACHING, + HConstants.DEFAULT_HBASE_CLIENT_SCANNER_CACHING); + + this.maxKeyValueSize = this.configuration.getInt( + "hbase.client.keyvalue.maxsize", -1); + this.closed = false; + } + + /** + * {@inheritDoc} + */ + @Override + public Configuration getConfiguration() { + return configuration; + } + + /** + * Tells whether or not a table is enabled or not. This method creates a + * new HBase configuration, so it might make your unit tests fail due to + * incorrect ZK client port. + * @param tableName Name of table to check. + * @return {@code true} if table is online. + * @throws IOException if a remote or network exception occurs + * @deprecated use {@link HBaseAdmin#isTableEnabled(byte[])} + */ + @Deprecated + public static boolean isTableEnabled(String tableName) throws IOException { + return isTableEnabled(Bytes.toBytes(tableName)); + } + + /** + * Tells whether or not a table is enabled or not. This method creates a + * new HBase configuration, so it might make your unit tests fail due to + * incorrect ZK client port. + * @param tableName Name of table to check. + * @return {@code true} if table is online. + * @throws IOException if a remote or network exception occurs + * @deprecated use {@link HBaseAdmin#isTableEnabled(byte[])} + */ + @Deprecated + public static boolean isTableEnabled(byte[] tableName) throws IOException { + return isTableEnabled(HBaseConfiguration.create(), tableName); + } + + /** + * Tells whether or not a table is enabled or not. + * @param conf The Configuration object to use. + * @param tableName Name of table to check. + * @return {@code true} if table is online. + * @throws IOException if a remote or network exception occurs + * @deprecated use {@link HBaseAdmin#isTableEnabled(byte[])} + */ + @Deprecated + public static boolean isTableEnabled(Configuration conf, String tableName) + throws IOException { + return isTableEnabled(conf, Bytes.toBytes(tableName)); + } + + /** + * Tells whether or not a table is enabled or not. + * @param conf The Configuration object to use. + * @param tableName Name of table to check. + * @return {@code true} if table is online. + * @throws IOException if a remote or network exception occurs + * @deprecated use {@link HBaseAdmin#isTableEnabled(byte[] tableName)} + */ + @Deprecated + public static boolean isTableEnabled(Configuration conf, + final byte[] tableName) throws IOException { + return HConnectionManager.execute(new HConnectable(conf) { + @Override + public Boolean connect(HConnection connection) throws IOException { + return connection.isTableEnabled(tableName); + } + }); + } + + /** + * Find region location hosting passed row using cached info + * @param row Row to find. + * @return The location of the given row. + * @throws IOException if a remote or network exception occurs + */ + public HRegionLocation getRegionLocation(final String row) + throws IOException { + return connection.getRegionLocation(tableName, Bytes.toBytes(row), false); + } + + /** + * Finds the region on which the given row is being served. + * @param row Row to find. + * @return Location of the row. + * @throws IOException if a remote or network exception occurs + * @deprecated use {@link #getRegionLocation(byte [], boolean)} instead + */ + public HRegionLocation getRegionLocation(final byte [] row) + throws IOException { + return connection.getRegionLocation(tableName, row, false); + } + + /** + * Finds the region on which the given row is being served. + * @param row Row to find. + * @param reload whether or not to reload information or just use cached + * information + * @return Location of the row. + * @throws IOException if a remote or network exception occurs + */ + public HRegionLocation getRegionLocation(final byte [] row, boolean reload) + throws IOException { + return connection.getRegionLocation(tableName, row, reload); + } + + /** + * {@inheritDoc} + */ + @Override + public byte [] getTableName() { + return this.tableName; + } + + /** + * INTERNAL Used by unit tests and tools to do low-level + * manipulations. + * @return An HConnection instance. + * @deprecated This method will be changed from public to package protected. + */ + // TODO(tsuna): Remove this. Unit tests shouldn't require public helpers. + @Deprecated + public HConnection getConnection() { + return this.connection; + } + + /** + * Gets the number of rows that a scanner will fetch at once. + *

+ * The default value comes from {@code hbase.client.scanner.caching}. + * @deprecated Use {@link Scan#setCaching(int)} and {@link Scan#getCaching()} + */ + @Deprecated + public int getScannerCaching() { + return scannerCaching; + } + + /** + * Sets the number of rows that a scanner will fetch at once. + *

+ * This will override the value specified by + * {@code hbase.client.scanner.caching}. + * Increasing this value will reduce the amount of work needed each time + * {@code next()} is called on a scanner, at the expense of memory use + * (since more rows will need to be maintained in memory by the scanners). + * @param scannerCaching the number of rows a scanner will fetch at once. + * @deprecated Use {@link Scan#setCaching(int)} + */ + @Deprecated + public void setScannerCaching(int scannerCaching) { + this.scannerCaching = scannerCaching; + } + + /** + * {@inheritDoc} + */ + @Override + public HTableDescriptor getTableDescriptor() throws IOException { + return new UnmodifyableHTableDescriptor( + this.connection.getHTableDescriptor(this.tableName)); + } + + /** + * Gets the starting row key for every region in the currently open table. + *

+ * This is mainly useful for the MapReduce integration. + * @return Array of region starting row keys + * @throws IOException if a remote or network exception occurs + */ + public byte [][] getStartKeys() throws IOException { + return getStartEndKeys().getFirst(); + } + + /** + * Gets the ending row key for every region in the currently open table. + *

+ * This is mainly useful for the MapReduce integration. + * @return Array of region ending row keys + * @throws IOException if a remote or network exception occurs + */ + public byte[][] getEndKeys() throws IOException { + return getStartEndKeys().getSecond(); + } + + /** + * Gets the starting and ending row keys for every region in the currently + * open table. + *

+ * This is mainly useful for the MapReduce integration. + * @return Pair of arrays of region starting and ending row keys + * @throws IOException if a remote or network exception occurs + */ + public Pair getStartEndKeys() throws IOException { + NavigableMap regions = getRegionLocations(); + final List startKeyList = new ArrayList(regions.size()); + final List endKeyList = new ArrayList(regions.size()); + + for (HRegionInfo region : regions.keySet()) { + startKeyList.add(region.getStartKey()); + endKeyList.add(region.getEndKey()); + } + + return new Pair( + startKeyList.toArray(new byte[startKeyList.size()][]), + endKeyList.toArray(new byte[endKeyList.size()][])); + } + + /** + * Gets all the regions and their address for this table. + *

+ * This is mainly useful for the MapReduce integration. + * @return A map of HRegionInfo with it's server address + * @throws IOException if a remote or network exception occurs + */ + public NavigableMap getRegionLocations() throws IOException { + // TODO: Odd that this returns a Map of HRI to SN whereas getRegionLocation, singular, returns an HRegionLocation. + return MetaScanner.allTableRegions(getConfiguration(), getTableName(), false); + } + + /** + * Get the corresponding regions for an arbitrary range of keys. + *

+ * @param startKey Starting row in range, inclusive + * @param endKey Ending row in range, exclusive + * @return A list of HRegionLocations corresponding to the regions that + * contain the specified range + * @throws IOException if a remote or network exception occurs + */ + public List getRegionsInRange(final byte [] startKey, + final byte [] endKey) throws IOException { + final boolean endKeyIsEndOfTable = Bytes.equals(endKey, + HConstants.EMPTY_END_ROW); + if ((Bytes.compareTo(startKey, endKey) > 0) && !endKeyIsEndOfTable) { + throw new IllegalArgumentException( + "Invalid range: " + Bytes.toStringBinary(startKey) + + " > " + Bytes.toStringBinary(endKey)); + } + final List regionList = new ArrayList(); + byte [] currentKey = startKey; + do { + HRegionLocation regionLocation = getRegionLocation(currentKey, false); + regionList.add(regionLocation); + currentKey = regionLocation.getRegionInfo().getEndKey(); + } while (!Bytes.equals(currentKey, HConstants.EMPTY_END_ROW) && + (endKeyIsEndOfTable || Bytes.compareTo(currentKey, endKey) < 0)); + return regionList; + } + + /** + * {@inheritDoc} + */ + @Override + public Result getRowOrBefore(final byte[] row, final byte[] family) + throws IOException { + return new ServerCallable(connection, tableName, row, operationTimeout) { + public Result call() throws IOException { + return ProtobufUtil.getRowOrBefore(server, + location.getRegionInfo().getRegionName(), row, family); + } + }.withRetries(); + } + + /** + * {@inheritDoc} + */ + @Override + public ResultScanner getScanner(final Scan scan) throws IOException { + if (scan.getCaching() <= 0) { + scan.setCaching(getScannerCaching()); + } + return new ClientScanner(getConfiguration(), scan, getTableName(), + this.connection); + } + + /** + * {@inheritDoc} + */ + @Override + public ResultScanner getScanner(byte [] family) throws IOException { + Scan scan = new Scan(); + scan.addFamily(family); + return getScanner(scan); + } + + /** + * {@inheritDoc} + */ + @Override + public ResultScanner getScanner(byte [] family, byte [] qualifier) + throws IOException { + Scan scan = new Scan(); + scan.addColumn(family, qualifier); + return getScanner(scan); + } + + /** + * {@inheritDoc} + */ + @Override + public Result get(final Get get) throws IOException { + return new ServerCallable(connection, tableName, get.getRow(), operationTimeout) { + public Result call() throws IOException { + return ProtobufUtil.get(server, + location.getRegionInfo().getRegionName(), get); + } + }.withRetries(); + } + + /** + * {@inheritDoc} + */ + @Override + public Result[] get(List gets) throws IOException { + try { + Object [] r1 = batch((List)gets); + + // translate. + Result [] results = new Result[r1.length]; + int i=0; + for (Object o : r1) { + // batch ensures if there is a failure we get an exception instead + results[i++] = (Result) o; + } + + return results; + } catch (InterruptedException e) { + throw new IOException(e); + } + } + + @Override + public void batch(final List actions, final Object[] results) + throws InterruptedException, IOException { + connection.processBatchCallback(actions, tableName, pool, results, null); + } + + @Override + public Object[] batch(final List actions) + throws InterruptedException, IOException { + Object[] results = new Object[actions.size()]; + connection.processBatchCallback(actions, tableName, pool, results, null); + return results; + } + + @Override + public void batchCallback( + final List actions, final Object[] results, final Batch.Callback callback) + throws IOException, InterruptedException { + connection.processBatchCallback(actions, tableName, pool, results, callback); + } + + @Override + public Object[] batchCallback( + final List actions, final Batch.Callback callback) throws IOException, + InterruptedException { + Object[] results = new Object[actions.size()]; + connection.processBatchCallback(actions, tableName, pool, results, callback); + return results; + } + + /** + * {@inheritDoc} + */ + @Override + public void delete(final Delete delete) + throws IOException { + new ServerCallable(connection, tableName, delete.getRow(), operationTimeout) { + public Boolean call() throws IOException { + try { + MutateRequest request = RequestConverter.buildMutateRequest( + location.getRegionInfo().getRegionName(), delete); + MutateResponse response = server.mutate(null, request); + return Boolean.valueOf(response.getProcessed()); + } catch (ServiceException se) { + throw ProtobufUtil.getRemoteException(se); + } + } + }.withRetries(); + } + + /** + * {@inheritDoc} + */ + @Override + public void delete(final List deletes) + throws IOException { + Object[] results = new Object[deletes.size()]; + try { + connection.processBatch((List) deletes, tableName, pool, results); + } catch (InterruptedException e) { + throw new IOException(e); + } finally { + // mutate list so that it is empty for complete success, or contains only failed records + // results are returned in the same order as the requests in list + // walk the list backwards, so we can remove from list without impacting the indexes of earlier members + for (int i = results.length - 1; i>=0; i--) { + // if result is not null, it succeeded + if (results[i] instanceof Result) { + deletes.remove(i); + } + } + } + } + + /** + * {@inheritDoc} + */ + @Override + public void put(final Put put) throws IOException { + doPut(Arrays.asList(put)); + } + + /** + * {@inheritDoc} + */ + @Override + public void put(final List puts) throws IOException { + doPut(puts); + } + + private void doPut(final List puts) throws IOException { + int n = 0; + for (Put put : puts) { + validatePut(put); + writeBuffer.add(put); + currentWriteBufferSize += put.heapSize(); + + // we need to periodically see if the writebuffer is full instead of waiting until the end of the List + n++; + if (n % DOPUT_WB_CHECK == 0 && currentWriteBufferSize > writeBufferSize) { + flushCommits(); + } + } + if (autoFlush || currentWriteBufferSize > writeBufferSize) { + flushCommits(); + } + } + + /** + * {@inheritDoc} + */ + @Override + public void mutateRow(final RowMutations rm) throws IOException { + new ServerCallable(connection, tableName, rm.getRow(), + operationTimeout) { + public Void call() throws IOException { + try { + MultiRequest request = RequestConverter.buildMultiRequest( + location.getRegionInfo().getRegionName(), rm); + server.multi(null, request); + } catch (ServiceException se) { + throw ProtobufUtil.getRemoteException(se); + } + return null; + } + }.withRetries(); + } + + /** + * {@inheritDoc} + */ + @Override + public Result append(final Append append) throws IOException { + if (append.numFamilies() == 0) { + throw new IOException( + "Invalid arguments to append, no columns specified"); + } + return new ServerCallable(connection, tableName, append.getRow(), operationTimeout) { + public Result call() throws IOException { + try { + MutateRequest request = RequestConverter.buildMutateRequest( + location.getRegionInfo().getRegionName(), append); + MutateResponse response = server.mutate(null, request); + if (!response.hasResult()) return null; + return ProtobufUtil.toResult(response.getResult()); + } catch (ServiceException se) { + throw ProtobufUtil.getRemoteException(se); + } + } + }.withRetries(); + } + + /** + * {@inheritDoc} + */ + @Override + public Result increment(final Increment increment) throws IOException { + if (!increment.hasFamilies()) { + throw new IOException( + "Invalid arguments to increment, no columns specified"); + } + return new ServerCallable(connection, tableName, increment.getRow(), operationTimeout) { + public Result call() throws IOException { + try { + MutateRequest request = RequestConverter.buildMutateRequest( + location.getRegionInfo().getRegionName(), increment); + MutateResponse response = server.mutate(null, request); + return ProtobufUtil.toResult(response.getResult()); + } catch (ServiceException se) { + throw ProtobufUtil.getRemoteException(se); + } + } + }.withRetries(); + } + + /** + * {@inheritDoc} + */ + @Override + public long incrementColumnValue(final byte [] row, final byte [] family, + final byte [] qualifier, final long amount) + throws IOException { + return incrementColumnValue(row, family, qualifier, amount, true); + } + + /** + * {@inheritDoc} + */ + @Override + public long incrementColumnValue(final byte [] row, final byte [] family, + final byte [] qualifier, final long amount, final boolean writeToWAL) + throws IOException { + NullPointerException npe = null; + if (row == null) { + npe = new NullPointerException("row is null"); + } else if (family == null) { + npe = new NullPointerException("family is null"); + } else if (qualifier == null) { + npe = new NullPointerException("qualifier is null"); + } + if (npe != null) { + throw new IOException( + "Invalid arguments to incrementColumnValue", npe); + } + return new ServerCallable(connection, tableName, row, operationTimeout) { + public Long call() throws IOException { + try { + MutateRequest request = RequestConverter.buildMutateRequest( + location.getRegionInfo().getRegionName(), row, family, + qualifier, amount, writeToWAL); + MutateResponse response = server.mutate(null, request); + Result result = ProtobufUtil.toResult(response.getResult()); + return Long.valueOf(Bytes.toLong(result.getValue(family, qualifier))); + } catch (ServiceException se) { + throw ProtobufUtil.getRemoteException(se); + } + } + }.withRetries(); + } + + /** + * {@inheritDoc} + */ + @Override + public boolean checkAndPut(final byte [] row, + final byte [] family, final byte [] qualifier, final byte [] value, + final Put put) + throws IOException { + return new ServerCallable(connection, tableName, row, operationTimeout) { + public Boolean call() throws IOException { + try { + MutateRequest request = RequestConverter.buildMutateRequest( + location.getRegionInfo().getRegionName(), row, family, qualifier, + new BinaryComparator(value), CompareType.EQUAL, put); + MutateResponse response = server.mutate(null, request); + return Boolean.valueOf(response.getProcessed()); + } catch (ServiceException se) { + throw ProtobufUtil.getRemoteException(se); + } + } + }.withRetries(); + } + + + /** + * {@inheritDoc} + */ + @Override + public boolean checkAndDelete(final byte [] row, + final byte [] family, final byte [] qualifier, final byte [] value, + final Delete delete) + throws IOException { + return new ServerCallable(connection, tableName, row, operationTimeout) { + public Boolean call() throws IOException { + try { + MutateRequest request = RequestConverter.buildMutateRequest( + location.getRegionInfo().getRegionName(), row, family, qualifier, + new BinaryComparator(value), CompareType.EQUAL, delete); + MutateResponse response = server.mutate(null, request); + return Boolean.valueOf(response.getProcessed()); + } catch (ServiceException se) { + throw ProtobufUtil.getRemoteException(se); + } + } + }.withRetries(); + } + + /** + * {@inheritDoc} + */ + @Override + public boolean exists(final Get get) throws IOException { + return new ServerCallable(connection, tableName, get.getRow(), operationTimeout) { + public Boolean call() throws IOException { + try { + GetRequest request = RequestConverter.buildGetRequest( + location.getRegionInfo().getRegionName(), get, true); + GetResponse response = server.get(null, request); + return response.getExists(); + } catch (ServiceException se) { + throw ProtobufUtil.getRemoteException(se); + } + } + }.withRetries(); + } + + /** + * {@inheritDoc} + */ + @Override + public void flushCommits() throws IOException { + Object[] results = new Object[writeBuffer.size()]; + boolean success = false; + try { + this.connection.processBatch(writeBuffer, tableName, pool, results); + success = true; + } catch (InterruptedException e) { + throw new InterruptedIOException(e.getMessage()); + } finally { + // mutate list so that it is empty for complete success, or contains + // only failed records. Results are returned in the same order as the + // requests in list. Walk the list backwards, so we can remove from list + // without impacting the indexes of earlier members + currentWriteBufferSize = 0; + if (success || clearBufferOnFail) { + writeBuffer.clear(); + } else { + for (int i = results.length - 1; i >= 0; i--) { + if (results[i] instanceof Result) { + writeBuffer.remove(i); + } else { + currentWriteBufferSize += writeBuffer.get(i).heapSize(); + } + } + } + } + } + + /** + * Process a mixed batch of Get, Put and Delete actions. All actions for a + * RegionServer are forwarded in one RPC call. Queries are executed in parallel. + * + * @param list The collection of actions. + * @param results An empty array, same size as list. If an exception is thrown, + * you can test here for partial results, and to determine which actions + * processed successfully. + * @throws IOException if there are problems talking to META. Per-item + * exceptions are stored in the results array. + */ + public void processBatchCallback( + final List list, final Object[] results, final Batch.Callback callback) + throws IOException, InterruptedException { + connection.processBatchCallback(list, tableName, pool, results, callback); + } + + + /** + * Parameterized batch processing, allowing varying return types for different + * {@link Row} implementations. + */ + public void processBatch(final List list, final Object[] results) + throws IOException, InterruptedException { + + this.processBatchCallback(list, results, null); + } + + + @Override + public void close() throws IOException { + if (this.closed) { + return; + } + flushCommits(); + if (cleanupPoolOnClose) { + this.pool.shutdown(); + } + if (cleanupConnectionOnClose) { + if (this.connection != null) { + this.connection.close(); + } + } + this.closed = true; + } + + // validate for well-formedness + private void validatePut(final Put put) throws IllegalArgumentException{ + if (put.isEmpty()) { + throw new IllegalArgumentException("No columns to insert"); + } + if (maxKeyValueSize > 0) { + for (List list : put.getFamilyMap().values()) { + for (KeyValue kv : list) { + if (kv.getLength() > maxKeyValueSize) { + throw new IllegalArgumentException("KeyValue size too large"); + } + } + } + } + } + + /** + * {@inheritDoc} + */ + @Override + public RowLock lockRow(final byte [] row) + throws IOException { + return new ServerCallable(connection, tableName, row, operationTimeout) { + public RowLock call() throws IOException { + try { + LockRowRequest request = RequestConverter.buildLockRowRequest( + location.getRegionInfo().getRegionName(), row); + LockRowResponse response = server.lockRow(null, request); + return new RowLock(row, response.getLockId()); + } catch (ServiceException se) { + throw ProtobufUtil.getRemoteException(se); + } + } + }.withRetries(); + } + + /** + * {@inheritDoc} + */ + @Override + public void unlockRow(final RowLock rl) + throws IOException { + new ServerCallable(connection, tableName, rl.getRow(), operationTimeout) { + public Boolean call() throws IOException { + try { + UnlockRowRequest request = RequestConverter.buildUnlockRowRequest( + location.getRegionInfo().getRegionName(), rl.getLockId()); + server.unlockRow(null, request); + return Boolean.TRUE; + } catch (ServiceException se) { + throw ProtobufUtil.getRemoteException(se); + } + } + }.withRetries(); + } + + /** + * {@inheritDoc} + */ + @Override + public boolean isAutoFlush() { + return autoFlush; + } + + /** + * See {@link #setAutoFlush(boolean, boolean)} + * + * @param autoFlush + * Whether or not to enable 'auto-flush'. + */ + public void setAutoFlush(boolean autoFlush) { + setAutoFlush(autoFlush, autoFlush); + } + + /** + * Turns 'auto-flush' on or off. + *

+ * When enabled (default), {@link Put} operations don't get buffered/delayed + * and are immediately executed. Failed operations are not retried. This is + * slower but safer. + *

+ * Turning off {@link #autoFlush} means that multiple {@link Put}s will be + * accepted before any RPC is actually sent to do the write operations. If the + * application dies before pending writes get flushed to HBase, data will be + * lost. + *

+ * When you turn {@link #autoFlush} off, you should also consider the + * {@link #clearBufferOnFail} option. By default, asynchronous {@link Put} + * requests will be retried on failure until successful. However, this can + * pollute the writeBuffer and slow down batching performance. Additionally, + * you may want to issue a number of Put requests and call + * {@link #flushCommits()} as a barrier. In both use cases, consider setting + * clearBufferOnFail to true to erase the buffer after {@link #flushCommits()} + * has been called, regardless of success. + * + * @param autoFlush + * Whether or not to enable 'auto-flush'. + * @param clearBufferOnFail + * Whether to keep Put failures in the writeBuffer + * @see #flushCommits + */ + public void setAutoFlush(boolean autoFlush, boolean clearBufferOnFail) { + this.autoFlush = autoFlush; + this.clearBufferOnFail = autoFlush || clearBufferOnFail; + } + + /** + * Returns the maximum size in bytes of the write buffer for this HTable. + *

+ * The default value comes from the configuration parameter + * {@code hbase.client.write.buffer}. + * @return The size of the write buffer in bytes. + */ + public long getWriteBufferSize() { + return writeBufferSize; + } + + /** + * Sets the size of the buffer in bytes. + *

+ * If the new size is less than the current amount of data in the + * write buffer, the buffer gets flushed. + * @param writeBufferSize The new write buffer size, in bytes. + * @throws IOException if a remote or network exception occurs. + */ + public void setWriteBufferSize(long writeBufferSize) throws IOException { + this.writeBufferSize = writeBufferSize; + if(currentWriteBufferSize > writeBufferSize) { + flushCommits(); + } + } + + /** + * Returns the write buffer. + * @return The current write buffer. + */ + public ArrayList getWriteBuffer() { + return writeBuffer; + } + + /** + * The pool is used for mutli requests for this HTable + * @return the pool used for mutli + */ + ExecutorService getPool() { + return this.pool; + } + + /** + * Enable or disable region cache prefetch for the table. It will be + * applied for the given table's all HTable instances who share the same + * connection. By default, the cache prefetch is enabled. + * @param tableName name of table to configure. + * @param enable Set to true to enable region cache prefetch. Or set to + * false to disable it. + * @throws IOException + */ + public static void setRegionCachePrefetch(final byte[] tableName, + final boolean enable) throws IOException { + HConnectionManager.execute(new HConnectable(HBaseConfiguration + .create()) { + @Override + public Void connect(HConnection connection) throws IOException { + connection.setRegionCachePrefetch(tableName, enable); + return null; + } + }); + } + + /** + * Enable or disable region cache prefetch for the table. It will be + * applied for the given table's all HTable instances who share the same + * connection. By default, the cache prefetch is enabled. + * @param conf The Configuration object to use. + * @param tableName name of table to configure. + * @param enable Set to true to enable region cache prefetch. Or set to + * false to disable it. + * @throws IOException + */ + public static void setRegionCachePrefetch(final Configuration conf, + final byte[] tableName, final boolean enable) throws IOException { + HConnectionManager.execute(new HConnectable(conf) { + @Override + public Void connect(HConnection connection) throws IOException { + connection.setRegionCachePrefetch(tableName, enable); + return null; + } + }); + } + + /** + * Check whether region cache prefetch is enabled or not for the table. + * @param conf The Configuration object to use. + * @param tableName name of table to check + * @return true if table's region cache prefecth is enabled. Otherwise + * it is disabled. + * @throws IOException + */ + public static boolean getRegionCachePrefetch(final Configuration conf, + final byte[] tableName) throws IOException { + return HConnectionManager.execute(new HConnectable(conf) { + @Override + public Boolean connect(HConnection connection) throws IOException { + return connection.getRegionCachePrefetch(tableName); + } + }); + } + + /** + * Check whether region cache prefetch is enabled or not for the table. + * @param tableName name of table to check + * @return true if table's region cache prefecth is enabled. Otherwise + * it is disabled. + * @throws IOException + */ + public static boolean getRegionCachePrefetch(final byte[] tableName) throws IOException { + return HConnectionManager.execute(new HConnectable( + HBaseConfiguration.create()) { + @Override + public Boolean connect(HConnection connection) throws IOException { + return connection.getRegionCachePrefetch(tableName); + } + }); + } + + /** + * Explicitly clears the region cache to fetch the latest value from META. + * This is a power user function: avoid unless you know the ramifications. + */ + public void clearRegionCache() { + this.connection.clearRegionCache(); + } + + /** + * {@inheritDoc} + */ + public CoprocessorRpcChannel coprocessorService(byte[] row) { + return new RegionCoprocessorRpcChannel(connection, tableName, row); + } + + /** + * {@inheritDoc} + */ + @Override + public Map coprocessorService(final Class service, + byte[] startKey, byte[] endKey, final Batch.Call callable) + throws ServiceException, Throwable { + final Map results = Collections.synchronizedMap( + new TreeMap(Bytes.BYTES_COMPARATOR)); + coprocessorService(service, startKey, endKey, callable, new Batch.Callback() { + public void update(byte[] region, byte[] row, R value) { + results.put(region, value); + } + }); + return results; + } + + /** + * {@inheritDoc} + */ + @Override + public void coprocessorService(final Class service, + byte[] startKey, byte[] endKey, final Batch.Call callable, + final Batch.Callback callback) throws ServiceException, Throwable { + + // get regions covered by the row range + List keys = getStartKeysInRange(startKey, endKey); + + Map> futures = + new TreeMap>(Bytes.BYTES_COMPARATOR); + for (final byte[] r : keys) { + final RegionCoprocessorRpcChannel channel = + new RegionCoprocessorRpcChannel(connection, tableName, r); + Future future = pool.submit( + new Callable() { + public R call() throws Exception { + T instance = ProtobufUtil.newServiceStub(service, channel); + R result = callable.call(instance); + byte[] region = channel.getLastRegion(); + if (callback != null) { + callback.update(region, r, result); + } + return result; + } + }); + futures.put(r, future); + } + for (Map.Entry> e : futures.entrySet()) { + try { + e.getValue().get(); + } catch (ExecutionException ee) { + LOG.warn("Error calling coprocessor service " + service.getName() + " for row " + + Bytes.toStringBinary(e.getKey()), ee); + throw ee.getCause(); + } catch (InterruptedException ie) { + Thread.currentThread().interrupt(); + throw new InterruptedIOException("Interrupted calling coprocessor service " + service.getName() + + " for row " + Bytes.toStringBinary(e.getKey())) + .initCause(ie); + } + } + } + + private List getStartKeysInRange(byte[] start, byte[] end) + throws IOException { + Pair startEndKeys = getStartEndKeys(); + byte[][] startKeys = startEndKeys.getFirst(); + byte[][] endKeys = startEndKeys.getSecond(); + + if (start == null) { + start = HConstants.EMPTY_START_ROW; + } + if (end == null) { + end = HConstants.EMPTY_END_ROW; + } + + List rangeKeys = new ArrayList(); + for (int i=0; i= 0 ) { + if (Bytes.equals(endKeys[i], HConstants.EMPTY_END_ROW) || + Bytes.compareTo(start, endKeys[i]) < 0) { + rangeKeys.add(start); + } + } else if (Bytes.equals(end, HConstants.EMPTY_END_ROW) || + Bytes.compareTo(startKeys[i], end) <= 0) { + rangeKeys.add(startKeys[i]); + } else { + break; // past stop + } + } + + return rangeKeys; + } + + public void setOperationTimeout(int operationTimeout) { + this.operationTimeout = operationTimeout; + } + + public int getOperationTimeout() { + return operationTimeout; + } + +} diff --git hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTableFactory.java hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTableFactory.java new file mode 100644 index 0000000..1515b37 --- /dev/null +++ hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTableFactory.java @@ -0,0 +1,49 @@ +/** + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.client; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; +import org.apache.hadoop.conf.Configuration; + +import java.io.IOException; + +/** + * Factory for creating HTable instances. + * + * @since 0.21.0 + */ +@InterfaceAudience.Public +@InterfaceStability.Stable +public class HTableFactory implements HTableInterfaceFactory { + @Override + public HTableInterface createHTableInterface(Configuration config, + byte[] tableName) { + try { + return new HTable(config, tableName); + } catch (IOException ioe) { + throw new RuntimeException(ioe); + } + } + + @Override + public void releaseHTableInterface(HTableInterface table) throws IOException { + table.close(); + } +} diff --git hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTableInterface.java hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTableInterface.java new file mode 100644 index 0000000..c5fc356 --- /dev/null +++ hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTableInterface.java @@ -0,0 +1,557 @@ +/** + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.client; + +import java.io.Closeable; +import java.io.IOException; +import java.util.List; +import java.util.Map; + +import com.google.protobuf.Service; +import com.google.protobuf.ServiceException; +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.HTableDescriptor; +import org.apache.hadoop.hbase.KeyValue; +import org.apache.hadoop.hbase.client.coprocessor.Batch; +import org.apache.hadoop.hbase.ipc.CoprocessorRpcChannel; + +/** + * Used to communicate with a single HBase table. + * + * @since 0.21.0 + */ +@InterfaceAudience.Public +@InterfaceStability.Stable +public interface HTableInterface extends Closeable { + + /** + * Gets the name of this table. + * + * @return the table name. + */ + byte[] getTableName(); + + /** + * Returns the {@link Configuration} object used by this instance. + *

+ * The reference returned is not a copy, so any change made to it will + * affect this instance. + */ + Configuration getConfiguration(); + + /** + * Gets the {@link HTableDescriptor table descriptor} for this table. + * @throws IOException if a remote or network exception occurs. + */ + HTableDescriptor getTableDescriptor() throws IOException; + + /** + * Test for the existence of columns in the table, as specified in the Get. + *

+ * + * This will return true if the Get matches one or more keys, false if not. + *

+ * + * This is a server-side call so it prevents any data from being transfered to + * the client. + * + * @param get the Get + * @return true if the specified Get matches one or more keys, false if not + * @throws IOException e + */ + boolean exists(Get get) throws IOException; + + /** + * Method that does a batch call on Deletes, Gets and Puts. The ordering of + * execution of the actions is not defined. Meaning if you do a Put and a + * Get in the same {@link #batch} call, you will not necessarily be + * guaranteed that the Get returns what the Put had put. + * + * @param actions list of Get, Put, Delete objects + * @param results Empty Object[], same size as actions. Provides access to partial + * results, in case an exception is thrown. A null in the result array means that + * the call for that action failed, even after retries + * @throws IOException + * @since 0.90.0 + */ + void batch(final List actions, final Object[] results) throws IOException, InterruptedException; + + /** + * Same as {@link #batch(List, Object[])}, but returns an array of + * results instead of using a results parameter reference. + * + * @param actions list of Get, Put, Delete objects + * @return the results from the actions. A null in the return array means that + * the call for that action failed, even after retries + * @throws IOException + * @since 0.90.0 + */ + Object[] batch(final List actions) throws IOException, InterruptedException; + + /** + * Same as {@link #batch(List, Object[])}, but with a callback. + * @since 0.96.0 + */ + public void batchCallback( + final List actions, final Object[] results, final Batch.Callback callback) + throws IOException, InterruptedException; + + + /** + * Same as {@link #batch(List)}, but with a callback. + * @since 0.96.0 + */ + public Object[] batchCallback( + List actions, Batch.Callback callback) throws IOException, + InterruptedException; + + /** + * Extracts certain cells from a given row. + * @param get The object that specifies what data to fetch and from which row. + * @return The data coming from the specified row, if it exists. If the row + * specified doesn't exist, the {@link Result} instance returned won't + * contain any {@link KeyValue}, as indicated by {@link Result#isEmpty()}. + * @throws IOException if a remote or network exception occurs. + * @since 0.20.0 + */ + Result get(Get get) throws IOException; + + /** + * Extracts certain cells from the given rows, in batch. + * + * @param gets The objects that specify what data to fetch and from which rows. + * + * @return The data coming from the specified rows, if it exists. If the row + * specified doesn't exist, the {@link Result} instance returned won't + * contain any {@link KeyValue}, as indicated by {@link Result#isEmpty()}. + * If there are any failures even after retries, there will be a null in + * the results array for those Gets, AND an exception will be thrown. + * @throws IOException if a remote or network exception occurs. + * + * @since 0.90.0 + */ + Result[] get(List gets) throws IOException; + + /** + * Return the row that matches row exactly, + * or the one that immediately precedes it. + * + * @param row A row key. + * @param family Column family to include in the {@link Result}. + * @throws IOException if a remote or network exception occurs. + * @since 0.20.0 + * + * @deprecated As of version 0.92 this method is deprecated without + * replacement. + * getRowOrBefore is used internally to find entries in .META. and makes + * various assumptions about the table (which are true for .META. but not + * in general) to be efficient. + */ + Result getRowOrBefore(byte[] row, byte[] family) throws IOException; + + /** + * Returns a scanner on the current table as specified by the {@link Scan} + * object. + * Note that the passed {@link Scan}'s start row and caching properties + * maybe changed. + * + * @param scan A configured {@link Scan} object. + * @return A scanner. + * @throws IOException if a remote or network exception occurs. + * @since 0.20.0 + */ + ResultScanner getScanner(Scan scan) throws IOException; + + /** + * Gets a scanner on the current table for the given family. + * + * @param family The column family to scan. + * @return A scanner. + * @throws IOException if a remote or network exception occurs. + * @since 0.20.0 + */ + ResultScanner getScanner(byte[] family) throws IOException; + + /** + * Gets a scanner on the current table for the given family and qualifier. + * + * @param family The column family to scan. + * @param qualifier The column qualifier to scan. + * @return A scanner. + * @throws IOException if a remote or network exception occurs. + * @since 0.20.0 + */ + ResultScanner getScanner(byte[] family, byte[] qualifier) throws IOException; + + + /** + * Puts some data in the table. + *

+ * If {@link #isAutoFlush isAutoFlush} is false, the update is buffered + * until the internal buffer is full. + * @param put The data to put. + * @throws IOException if a remote or network exception occurs. + * @since 0.20.0 + */ + void put(Put put) throws IOException; + + /** + * Puts some data in the table, in batch. + *

+ * If {@link #isAutoFlush isAutoFlush} is false, the update is buffered + * until the internal buffer is full. + *

+ * This can be used for group commit, or for submitting user defined + * batches. The writeBuffer will be periodically inspected while the List + * is processed, so depending on the List size the writeBuffer may flush + * not at all, or more than once. + * @param puts The list of mutations to apply. The batch put is done by + * aggregating the iteration of the Puts over the write buffer + * at the client-side for a single RPC call. + * @throws IOException if a remote or network exception occurs. + * @since 0.20.0 + */ + void put(List puts) throws IOException; + + /** + * Atomically checks if a row/family/qualifier value matches the expected + * value. If it does, it adds the put. If the passed value is null, the check + * is for the lack of column (ie: non-existance) + * + * @param row to check + * @param family column family to check + * @param qualifier column qualifier to check + * @param value the expected value + * @param put data to put if check succeeds + * @throws IOException e + * @return true if the new put was executed, false otherwise + */ + boolean checkAndPut(byte[] row, byte[] family, byte[] qualifier, + byte[] value, Put put) throws IOException; + + /** + * Deletes the specified cells/row. + * + * @param delete The object that specifies what to delete. + * @throws IOException if a remote or network exception occurs. + * @since 0.20.0 + */ + void delete(Delete delete) throws IOException; + + /** + * Deletes the specified cells/rows in bulk. + * @param deletes List of things to delete. List gets modified by this + * method (in particular it gets re-ordered, so the order in which the elements + * are inserted in the list gives no guarantee as to the order in which the + * {@link Delete}s are executed). + * @throws IOException if a remote or network exception occurs. In that case + * the {@code deletes} argument will contain the {@link Delete} instances + * that have not be successfully applied. + * @since 0.20.1 + */ + void delete(List deletes) throws IOException; + + /** + * Atomically checks if a row/family/qualifier value matches the expected + * value. If it does, it adds the delete. If the passed value is null, the + * check is for the lack of column (ie: non-existance) + * + * @param row to check + * @param family column family to check + * @param qualifier column qualifier to check + * @param value the expected value + * @param delete data to delete if check succeeds + * @throws IOException e + * @return true if the new delete was executed, false otherwise + */ + boolean checkAndDelete(byte[] row, byte[] family, byte[] qualifier, + byte[] value, Delete delete) throws IOException; + + /** + * Performs multiple mutations atomically on a single row. Currently + * {@link Put} and {@link Delete} are supported. + * + * @param rm object that specifies the set of mutations to perform atomically + * @throws IOException + */ + public void mutateRow(final RowMutations rm) throws IOException; + + /** + * Appends values to one or more columns within a single row. + *

+ * This operation does not appear atomic to readers. Appends are done + * under a single row lock, so write operations to a row are synchronized, but + * readers do not take row locks so get and scan operations can see this + * operation partially completed. + * + * @param append object that specifies the columns and amounts to be used + * for the increment operations + * @throws IOException e + * @return values of columns after the append operation (maybe null) + */ + public Result append(final Append append) throws IOException; + + /** + * Increments one or more columns within a single row. + *

+ * This operation does not appear atomic to readers. Increments are done + * under a single row lock, so write operations to a row are synchronized, but + * readers do not take row locks so get and scan operations can see this + * operation partially completed. + * + * @param increment object that specifies the columns and amounts to be used + * for the increment operations + * @throws IOException e + * @return values of columns after the increment + */ + public Result increment(final Increment increment) throws IOException; + + /** + * Atomically increments a column value. + *

+ * Equivalent to {@link #incrementColumnValue(byte[], byte[], byte[], + * long, boolean) incrementColumnValue}(row, family, qualifier, amount, + * true)} + * @param row The row that contains the cell to increment. + * @param family The column family of the cell to increment. + * @param qualifier The column qualifier of the cell to increment. + * @param amount The amount to increment the cell with (or decrement, if the + * amount is negative). + * @return The new value, post increment. + * @throws IOException if a remote or network exception occurs. + */ + long incrementColumnValue(byte[] row, byte[] family, byte[] qualifier, + long amount) throws IOException; + + /** + * Atomically increments a column value. If the column value already exists + * and is not a big-endian long, this could throw an exception. If the column + * value does not yet exist it is initialized to amount and + * written to the specified column. + * + *

Setting writeToWAL to false means that in a fail scenario, you will lose + * any increments that have not been flushed. + * @param row The row that contains the cell to increment. + * @param family The column family of the cell to increment. + * @param qualifier The column qualifier of the cell to increment. + * @param amount The amount to increment the cell with (or decrement, if the + * amount is negative). + * @param writeToWAL if {@code true}, the operation will be applied to the + * Write Ahead Log (WAL). This makes the operation slower but safer, as if + * the call returns successfully, it is guaranteed that the increment will + * be safely persisted. When set to {@code false}, the call may return + * successfully before the increment is safely persisted, so it's possible + * that the increment be lost in the event of a failure happening before the + * operation gets persisted. + * @return The new value, post increment. + * @throws IOException if a remote or network exception occurs. + */ + long incrementColumnValue(byte[] row, byte[] family, byte[] qualifier, + long amount, boolean writeToWAL) throws IOException; + + /** + * Tells whether or not 'auto-flush' is turned on. + * + * @return {@code true} if 'auto-flush' is enabled (default), meaning + * {@link Put} operations don't get buffered/delayed and are immediately + * executed. + */ + boolean isAutoFlush(); + + /** + * Executes all the buffered {@link Put} operations. + *

+ * This method gets called once automatically for every {@link Put} or batch + * of {@link Put}s (when put(List) is used) when + * {@link #isAutoFlush} is {@code true}. + * @throws IOException if a remote or network exception occurs. + */ + void flushCommits() throws IOException; + + /** + * Releases any resources held or pending changes in internal buffers. + * + * @throws IOException if a remote or network exception occurs. + */ + void close() throws IOException; + + /** + * Obtains a lock on a row. + * + * @param row The row to lock. + * @return A {@link RowLock} containing the row and lock id. + * @throws IOException if a remote or network exception occurs. + * @see RowLock + * @see #unlockRow + */ + RowLock lockRow(byte[] row) throws IOException; + + /** + * Releases a row lock. + * + * @param rl The row lock to release. + * @throws IOException if a remote or network exception occurs. + * @see RowLock + * @see #unlockRow + */ + void unlockRow(RowLock rl) throws IOException; + + /** + * Creates and returns a {@link com.google.protobuf.RpcChannel} instance connected to the + * table region containing the specified row. The row given does not actually have + * to exist. Whichever region would contain the row based on start and end keys will + * be used. Note that the {@code row} parameter is also not passed to the + * coprocessor handler registered for this protocol, unless the {@code row} + * is separately passed as an argument in the service request. The parameter + * here is only used to locate the region used to handle the call. + * + *

+ * The obtained {@link com.google.protobuf.RpcChannel} instance can be used to access a published + * coprocessor {@link com.google.protobuf.Service} using standard protobuf service invocations: + *

+ * + *
+ *
+   * CoprocessorRpcChannel channel = myTable.coprocessorService(rowkey);
+   * MyService.BlockingInterface service = MyService.newBlockingStub(channel);
+   * MyCallRequest request = MyCallRequest.newBuilder()
+   *     ...
+   *     .build();
+   * MyCallResponse response = service.myCall(null, request);
+   * 
+ * + * @param row The row key used to identify the remote region location + * @return A CoprocessorRpcChannel instance + */ + CoprocessorRpcChannel coprocessorService(byte[] row); + + /** + * Creates an instance of the given {@link com.google.protobuf.Service} subclass for each table + * region spanning the range from the {@code startKey} row to {@code endKey} row (inclusive), + * and invokes the passed {@link org.apache.hadoop.hbase.client.coprocessor.Batch.Call#call} + * method with each {@link Service} + * instance. + * + * @param service the protocol buffer {@code Service} implementation to call + * @param startKey start region selection with region containing this row. If {@code null}, the + * selection will start with the first table region. + * @param endKey select regions up to and including the region containing this row. + * If {@code null}, selection will continue through the last table region. + * @param callable this instance's + * {@link org.apache.hadoop.hbase.client.coprocessor.Batch.Call#call} + * method will be invoked once per table region, using the {@link Service} + * instance connected to that region. + * @param the {@link Service} subclass to connect to + * @param Return type for the {@code callable} parameter's + * {@link org.apache.hadoop.hbase.client.coprocessor.Batch.Call#call} method + * @return a map of result values keyed by region name + */ + Map coprocessorService(final Class service, + byte[] startKey, byte[] endKey, final Batch.Call callable) + throws ServiceException, Throwable; + + /** + * Creates an instance of the given {@link com.google.protobuf.Service} subclass for each table + * region spanning the range from the {@code startKey} row to {@code endKey} row (inclusive), + * and invokes the passed {@link org.apache.hadoop.hbase.client.coprocessor.Batch.Call#call} + * method with each {@link Service} instance. + * + *

+ * The given + * {@link org.apache.hadoop.hbase.client.coprocessor.Batch.Callback#update(byte[], byte[], Object)} + * method will be called with the return value from each region's + * {@link org.apache.hadoop.hbase.client.coprocessor.Batch.Call#call} invocation. + *

+ * + * @param service the protocol buffer {@code Service} implementation to call + * @param startKey start region selection with region containing this row. If {@code null}, the + * selection will start with the first table region. + * @param endKey select regions up to and including the region containing this row. + * If {@code null}, selection will continue through the last table region. + * @param callable this instance's + * {@link org.apache.hadoop.hbase.client.coprocessor.Batch.Call#call} method + * will be invoked once per table region, using the {@link Service} instance + * connected to that region. + * @param callback + * @param the {@link Service} subclass to connect to + * @param Return type for the {@code callable} parameter's + * {@link org.apache.hadoop.hbase.client.coprocessor.Batch.Call#call} method + */ + void coprocessorService(final Class service, + byte[] startKey, byte[] endKey, final Batch.Call callable, + final Batch.Callback callback) throws ServiceException, Throwable; + + /** + * See {@link #setAutoFlush(boolean, boolean)} + * + * @param autoFlush + * Whether or not to enable 'auto-flush'. + */ + public void setAutoFlush(boolean autoFlush); + + /** + * Turns 'auto-flush' on or off. + *

+ * When enabled (default), {@link Put} operations don't get buffered/delayed + * and are immediately executed. Failed operations are not retried. This is + * slower but safer. + *

+ * Turning off {@code autoFlush} means that multiple {@link Put}s will be + * accepted before any RPC is actually sent to do the write operations. If the + * application dies before pending writes get flushed to HBase, data will be + * lost. + *

+ * When you turn {@code #autoFlush} off, you should also consider the + * {@code clearBufferOnFail} option. By default, asynchronous {@link Put} + * requests will be retried on failure until successful. However, this can + * pollute the writeBuffer and slow down batching performance. Additionally, + * you may want to issue a number of Put requests and call + * {@link #flushCommits()} as a barrier. In both use cases, consider setting + * clearBufferOnFail to true to erase the buffer after {@link #flushCommits()} + * has been called, regardless of success. + * + * @param autoFlush + * Whether or not to enable 'auto-flush'. + * @param clearBufferOnFail + * Whether to keep Put failures in the writeBuffer + * @see #flushCommits + */ + public void setAutoFlush(boolean autoFlush, boolean clearBufferOnFail); + + /** + * Returns the maximum size in bytes of the write buffer for this HTable. + *

+ * The default value comes from the configuration parameter + * {@code hbase.client.write.buffer}. + * @return The size of the write buffer in bytes. + */ + public long getWriteBufferSize(); + + /** + * Sets the size of the buffer in bytes. + *

+ * If the new size is less than the current amount of data in the + * write buffer, the buffer gets flushed. + * @param writeBufferSize The new write buffer size, in bytes. + * @throws IOException if a remote or network exception occurs. + */ + public void setWriteBufferSize(long writeBufferSize) throws IOException; +} diff --git hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTableInterfaceFactory.java hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTableInterfaceFactory.java new file mode 100644 index 0000000..40f1f47 --- /dev/null +++ hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTableInterfaceFactory.java @@ -0,0 +1,52 @@ +/** + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.client; + +import java.io.IOException; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; +import org.apache.hadoop.conf.Configuration; + + +/** + * Defines methods to create new HTableInterface. + * + * @since 0.21.0 + */ +@InterfaceAudience.Public +@InterfaceStability.Stable +public interface HTableInterfaceFactory { + + /** + * Creates a new HTableInterface. + * + * @param config HBaseConfiguration instance. + * @param tableName name of the HBase table. + * @return HTableInterface instance. + */ + HTableInterface createHTableInterface(Configuration config, byte[] tableName); + + + /** + * Release the HTable resource represented by the table. + * @param table + */ + void releaseHTableInterface(final HTableInterface table) throws IOException; +} diff --git hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTablePool.java hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTablePool.java new file mode 100644 index 0000000..ef9516f --- /dev/null +++ hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTablePool.java @@ -0,0 +1,552 @@ +/** + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.client; + +import java.io.Closeable; +import java.io.IOException; +import java.util.Collection; +import java.util.List; +import java.util.Map; + +import com.google.protobuf.Service; +import com.google.protobuf.ServiceException; +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.HBaseConfiguration; +import org.apache.hadoop.hbase.HTableDescriptor; +import org.apache.hadoop.hbase.client.coprocessor.Batch; +import org.apache.hadoop.hbase.client.coprocessor.Batch.Callback; +import org.apache.hadoop.hbase.ipc.CoprocessorRpcChannel; +import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.hbase.util.PoolMap; +import org.apache.hadoop.hbase.util.PoolMap.PoolType; + +/** + * A simple pool of HTable instances. + * + * Each HTablePool acts as a pool for all tables. To use, instantiate an + * HTablePool and use {@link #getTable(String)} to get an HTable from the pool. + * + * This method is not needed anymore, clients should call + * HTableInterface.close() rather than returning the tables to the pool + * + * Once you are done with it, close your instance of {@link HTableInterface} + * by calling {@link HTableInterface#close()} rather than returning the tables + * to the pool with (deprecated) {@link #putTable(HTableInterface)}. + * + *

+ * A pool can be created with a maxSize which defines the most HTable + * references that will ever be retained for each table. Otherwise the default + * is {@link Integer#MAX_VALUE}. + * + *

+ * Pool will manage its own connections to the cluster. See + * {@link HConnectionManager}. + */ +@InterfaceAudience.Public +@InterfaceStability.Stable +public class HTablePool implements Closeable { + private final PoolMap tables; + private final int maxSize; + private final PoolType poolType; + private final Configuration config; + private final HTableInterfaceFactory tableFactory; + + /** + * Default Constructor. Default HBaseConfiguration and no limit on pool size. + */ + public HTablePool() { + this(HBaseConfiguration.create(), Integer.MAX_VALUE); + } + + /** + * Constructor to set maximum versions and use the specified configuration. + * + * @param config + * configuration + * @param maxSize + * maximum number of references to keep for each table + */ + public HTablePool(final Configuration config, final int maxSize) { + this(config, maxSize, null, null); + } + + /** + * Constructor to set maximum versions and use the specified configuration and + * table factory. + * + * @param config + * configuration + * @param maxSize + * maximum number of references to keep for each table + * @param tableFactory + * table factory + */ + public HTablePool(final Configuration config, final int maxSize, + final HTableInterfaceFactory tableFactory) { + this(config, maxSize, tableFactory, PoolType.Reusable); + } + + /** + * Constructor to set maximum versions and use the specified configuration and + * pool type. + * + * @param config + * configuration + * @param maxSize + * maximum number of references to keep for each table + * @param poolType + * pool type which is one of {@link PoolType#Reusable} or + * {@link PoolType#ThreadLocal} + */ + public HTablePool(final Configuration config, final int maxSize, + final PoolType poolType) { + this(config, maxSize, null, poolType); + } + + /** + * Constructor to set maximum versions and use the specified configuration, + * table factory and pool type. The HTablePool supports the + * {@link PoolType#Reusable} and {@link PoolType#ThreadLocal}. If the pool + * type is null or not one of those two values, then it will default to + * {@link PoolType#Reusable}. + * + * @param config + * configuration + * @param maxSize + * maximum number of references to keep for each table + * @param tableFactory + * table factory + * @param poolType + * pool type which is one of {@link PoolType#Reusable} or + * {@link PoolType#ThreadLocal} + */ + public HTablePool(final Configuration config, final int maxSize, + final HTableInterfaceFactory tableFactory, PoolType poolType) { + // Make a new configuration instance so I can safely cleanup when + // done with the pool. + this.config = config == null ? HBaseConfiguration.create() : config; + this.maxSize = maxSize; + this.tableFactory = tableFactory == null ? new HTableFactory() + : tableFactory; + if (poolType == null) { + this.poolType = PoolType.Reusable; + } else { + switch (poolType) { + case Reusable: + case ThreadLocal: + this.poolType = poolType; + break; + default: + this.poolType = PoolType.Reusable; + break; + } + } + this.tables = new PoolMap(this.poolType, + this.maxSize); + } + + /** + * Get a reference to the specified table from the pool. + *

+ *

+ * + * @param tableName + * table name + * @return a reference to the specified table + * @throws RuntimeException + * if there is a problem instantiating the HTable + */ + public HTableInterface getTable(String tableName) { + // call the old getTable implementation renamed to findOrCreateTable + HTableInterface table = findOrCreateTable(tableName); + // return a proxy table so when user closes the proxy, the actual table + // will be returned to the pool + return new PooledHTable(table); + } + + /** + * Get a reference to the specified table from the pool. + *

+ * + * Create a new one if one is not available. + * + * @param tableName + * table name + * @return a reference to the specified table + * @throws RuntimeException + * if there is a problem instantiating the HTable + */ + private HTableInterface findOrCreateTable(String tableName) { + HTableInterface table = tables.get(tableName); + if (table == null) { + table = createHTable(tableName); + } + return table; + } + + /** + * Get a reference to the specified table from the pool. + *

+ * + * Create a new one if one is not available. + * + * @param tableName + * table name + * @return a reference to the specified table + * @throws RuntimeException + * if there is a problem instantiating the HTable + */ + public HTableInterface getTable(byte[] tableName) { + return getTable(Bytes.toString(tableName)); + } + + /** + * This method is not needed anymore, clients should call + * HTableInterface.close() rather than returning the tables to the pool + * + * @param table + * the proxy table user got from pool + * @deprecated + */ + public void putTable(HTableInterface table) throws IOException { + // we need to be sure nobody puts a proxy implementation in the pool + // but if the client code is not updated + // and it will continue to call putTable() instead of calling close() + // then we need to return the wrapped table to the pool instead of the + // proxy + // table + if (table instanceof PooledHTable) { + returnTable(((PooledHTable) table).getWrappedTable()); + } else { + // normally this should not happen if clients pass back the same + // table + // object they got from the pool + // but if it happens then it's better to reject it + throw new IllegalArgumentException("not a pooled table: " + table); + } + } + + /** + * Puts the specified HTable back into the pool. + *

+ * + * If the pool already contains maxSize references to the table, then + * the table instance gets closed after flushing buffered edits. + * + * @param table + * table + */ + private void returnTable(HTableInterface table) throws IOException { + // this is the old putTable method renamed and made private + String tableName = Bytes.toString(table.getTableName()); + if (tables.size(tableName) >= maxSize) { + // release table instance since we're not reusing it + this.tables.remove(tableName, table); + this.tableFactory.releaseHTableInterface(table); + return; + } + tables.put(tableName, table); + } + + protected HTableInterface createHTable(String tableName) { + return this.tableFactory.createHTableInterface(config, + Bytes.toBytes(tableName)); + } + + /** + * Closes all the HTable instances , belonging to the given table, in the + * table pool. + *

+ * Note: this is a 'shutdown' of the given table pool and different from + * {@link #putTable(HTableInterface)}, that is used to return the table + * instance to the pool for future re-use. + * + * @param tableName + */ + public void closeTablePool(final String tableName) throws IOException { + Collection tables = this.tables.values(tableName); + if (tables != null) { + for (HTableInterface table : tables) { + this.tableFactory.releaseHTableInterface(table); + } + } + this.tables.remove(tableName); + } + + /** + * See {@link #closeTablePool(String)}. + * + * @param tableName + */ + public void closeTablePool(final byte[] tableName) throws IOException { + closeTablePool(Bytes.toString(tableName)); + } + + /** + * Closes all the HTable instances , belonging to all tables in the table + * pool. + *

+ * Note: this is a 'shutdown' of all the table pools. + */ + public void close() throws IOException { + for (String tableName : tables.keySet()) { + closeTablePool(tableName); + } + this.tables.clear(); + } + + int getCurrentPoolSize(String tableName) { + return tables.size(tableName); + } + + /** + * A proxy class that implements HTableInterface.close method to return the + * wrapped table back to the table pool + * + */ + class PooledHTable implements HTableInterface { + + private HTableInterface table; // actual table implementation + + public PooledHTable(HTableInterface table) { + this.table = table; + } + + @Override + public byte[] getTableName() { + return table.getTableName(); + } + + @Override + public Configuration getConfiguration() { + return table.getConfiguration(); + } + + @Override + public HTableDescriptor getTableDescriptor() throws IOException { + return table.getTableDescriptor(); + } + + @Override + public boolean exists(Get get) throws IOException { + return table.exists(get); + } + + @Override + public void batch(List actions, Object[] results) throws IOException, + InterruptedException { + table.batch(actions, results); + } + + @Override + public Object[] batch(List actions) throws IOException, + InterruptedException { + return table.batch(actions); + } + + @Override + public Result get(Get get) throws IOException { + return table.get(get); + } + + @Override + public Result[] get(List gets) throws IOException { + return table.get(gets); + } + + @Override + @SuppressWarnings("deprecation") + public Result getRowOrBefore(byte[] row, byte[] family) throws IOException { + return table.getRowOrBefore(row, family); + } + + @Override + public ResultScanner getScanner(Scan scan) throws IOException { + return table.getScanner(scan); + } + + @Override + public ResultScanner getScanner(byte[] family) throws IOException { + return table.getScanner(family); + } + + @Override + public ResultScanner getScanner(byte[] family, byte[] qualifier) + throws IOException { + return table.getScanner(family, qualifier); + } + + @Override + public void put(Put put) throws IOException { + table.put(put); + } + + @Override + public void put(List puts) throws IOException { + table.put(puts); + } + + @Override + public boolean checkAndPut(byte[] row, byte[] family, byte[] qualifier, + byte[] value, Put put) throws IOException { + return table.checkAndPut(row, family, qualifier, value, put); + } + + @Override + public void delete(Delete delete) throws IOException { + table.delete(delete); + } + + @Override + public void delete(List deletes) throws IOException { + table.delete(deletes); + } + + @Override + public boolean checkAndDelete(byte[] row, byte[] family, byte[] qualifier, + byte[] value, Delete delete) throws IOException { + return table.checkAndDelete(row, family, qualifier, value, delete); + } + + @Override + public Result increment(Increment increment) throws IOException { + return table.increment(increment); + } + + @Override + public long incrementColumnValue(byte[] row, byte[] family, + byte[] qualifier, long amount) throws IOException { + return table.incrementColumnValue(row, family, qualifier, amount); + } + + @Override + public long incrementColumnValue(byte[] row, byte[] family, + byte[] qualifier, long amount, boolean writeToWAL) throws IOException { + return table.incrementColumnValue(row, family, qualifier, amount, + writeToWAL); + } + + @Override + public boolean isAutoFlush() { + return table.isAutoFlush(); + } + + @Override + public void flushCommits() throws IOException { + table.flushCommits(); + } + + /** + * Returns the actual table back to the pool + * + * @throws IOException + */ + public void close() throws IOException { + returnTable(table); + } + + @Override + public RowLock lockRow(byte[] row) throws IOException { + return table.lockRow(row); + } + + @Override + public void unlockRow(RowLock rl) throws IOException { + table.unlockRow(rl); + } + + @Override + public CoprocessorRpcChannel coprocessorService(byte[] row) { + return table.coprocessorService(row); + } + + @Override + public Map coprocessorService(Class service, + byte[] startKey, byte[] endKey, Batch.Call callable) + throws ServiceException, Throwable { + return table.coprocessorService(service, startKey, endKey, callable); + } + + @Override + public void coprocessorService(Class service, + byte[] startKey, byte[] endKey, Batch.Call callable, Callback callback) + throws ServiceException, Throwable { + table.coprocessorService(service, startKey, endKey, callable, callback); + } + + @Override + public String toString() { + return "PooledHTable{" + ", table=" + table + '}'; + } + + /** + * Expose the wrapped HTable to tests in the same package + * + * @return wrapped htable + */ + HTableInterface getWrappedTable() { + return table; + } + + @Override + public void batchCallback(List actions, + Object[] results, Callback callback) throws IOException, + InterruptedException { + table.batchCallback(actions, results, callback); + } + + @Override + public Object[] batchCallback(List actions, + Callback callback) throws IOException, InterruptedException { + return table.batchCallback(actions, callback); + } + + @Override + public void mutateRow(RowMutations rm) throws IOException { + table.mutateRow(rm); + } + + @Override + public Result append(Append append) throws IOException { + return table.append(append); + } + + @Override + public void setAutoFlush(boolean autoFlush) { + table.setAutoFlush(autoFlush); + } + + @Override + public void setAutoFlush(boolean autoFlush, boolean clearBufferOnFail) { + table.setAutoFlush(autoFlush, clearBufferOnFail); + } + + @Override + public long getWriteBufferSize() { + return table.getWriteBufferSize(); + } + + @Override + public void setWriteBufferSize(long writeBufferSize) throws IOException { + table.setWriteBufferSize(writeBufferSize); + } + } +} diff --git hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTableUtil.java hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTableUtil.java new file mode 100644 index 0000000..7ad6e65 --- /dev/null +++ hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTableUtil.java @@ -0,0 +1,141 @@ +/** + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.client; + +import java.io.IOException; +import java.lang.InterruptedException; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; +import org.apache.hadoop.hbase.HRegionLocation; +import org.apache.hadoop.hbase.client.HTable; +import org.apache.hadoop.hbase.client.Put; +import org.apache.hadoop.hbase.client.Row; + +/** + * Utility class for HTable. + * + * + */ +@InterfaceAudience.Public +@InterfaceStability.Stable +public class HTableUtil { + + private static final int INITIAL_LIST_SIZE = 250; + + /** + * Processes a List of Puts and writes them to an HTable instance in RegionServer buckets via the htable.put method. + * This will utilize the writeBuffer, thus the writeBuffer flush frequency may be tuned accordingly via htable.setWriteBufferSize. + *

+ * The benefit of submitting Puts in this manner is to minimize the number of RegionServer RPCs in each flush. + *

+ * Assumption #1: Regions have been pre-created for the table. If they haven't, then all of the Puts will go to the same region, + * defeating the purpose of this utility method. See the Apache HBase book for an explanation of how to do this. + *
+ * Assumption #2: Row-keys are not monotonically increasing. See the Apache HBase book for an explanation of this problem. + *
+ * Assumption #3: That the input list of Puts is big enough to be useful (in the thousands or more). The intent of this + * method is to process larger chunks of data. + *
+ * Assumption #4: htable.setAutoFlush(false) has been set. This is a requirement to use the writeBuffer. + *

+ * @param htable HTable instance for target HBase table + * @param puts List of Put instances + * @throws IOException if a remote or network exception occurs + * + */ + public static void bucketRsPut(HTable htable, List puts) throws IOException { + + Map> putMap = createRsPutMap(htable, puts); + for (List rsPuts: putMap.values()) { + htable.put( rsPuts ); + } + htable.flushCommits(); + } + + /** + * Processes a List of Rows (Put, Delete) and writes them to an HTable instance in RegionServer buckets via the htable.batch method. + *

+ * The benefit of submitting Puts in this manner is to minimize the number of RegionServer RPCs, thus this will + * produce one RPC of Puts per RegionServer. + *

+ * Assumption #1: Regions have been pre-created for the table. If they haven't, then all of the Puts will go to the same region, + * defeating the purpose of this utility method. See the Apache HBase book for an explanation of how to do this. + *
+ * Assumption #2: Row-keys are not monotonically increasing. See the Apache HBase book for an explanation of this problem. + *
+ * Assumption #3: That the input list of Rows is big enough to be useful (in the thousands or more). The intent of this + * method is to process larger chunks of data. + *

+ * This method accepts a list of Row objects because the underlying .batch method accepts a list of Row objects. + *

+ * @param htable HTable instance for target HBase table + * @param rows List of Row instances + * @throws IOException if a remote or network exception occurs + */ + public static void bucketRsBatch(HTable htable, List rows) throws IOException { + + try { + Map> rowMap = createRsRowMap(htable, rows); + for (List rsRows: rowMap.values()) { + htable.batch( rsRows ); + } + } catch (InterruptedException e) { + throw new IOException(e); + } + + } + + private static Map> createRsPutMap(HTable htable, List puts) throws IOException { + + Map> putMap = new HashMap>(); + for (Put put: puts) { + HRegionLocation rl = htable.getRegionLocation( put.getRow() ); + String hostname = rl.getHostname(); + List recs = putMap.get( hostname); + if (recs == null) { + recs = new ArrayList(INITIAL_LIST_SIZE); + putMap.put( hostname, recs); + } + recs.add(put); + } + return putMap; + } + + private static Map> createRsRowMap(HTable htable, List rows) throws IOException { + + Map> rowMap = new HashMap>(); + for (Row row: rows) { + HRegionLocation rl = htable.getRegionLocation( row.getRow() ); + String hostname = rl.getHostname(); + List recs = rowMap.get( hostname); + if (recs == null) { + recs = new ArrayList(INITIAL_LIST_SIZE); + rowMap.put( hostname, recs); + } + recs.add(row); + } + return rowMap; + } + +} diff --git hbase-client/src/main/java/org/apache/hadoop/hbase/client/Increment.java hbase-client/src/main/java/org/apache/hadoop/hbase/client/Increment.java new file mode 100644 index 0000000..7df6e4b --- /dev/null +++ hbase-client/src/main/java/org/apache/hadoop/hbase/client/Increment.java @@ -0,0 +1,277 @@ +/** + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.client; + +import java.io.IOException; +import java.util.Map; +import java.util.NavigableMap; +import java.util.Set; +import java.util.TreeMap; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; +import org.apache.hadoop.hbase.io.TimeRange; +import org.apache.hadoop.hbase.util.Bytes; + +/** + * Used to perform Increment operations on a single row. + *

+ * This operation does not appear atomic to readers. Increments are done + * under a single row lock, so write operations to a row are synchronized, but + * readers do not take row locks so get and scan operations can see this + * operation partially completed. + *

+ * To increment columns of a row, instantiate an Increment object with the row + * to increment. At least one column to increment must be specified using the + * {@link #addColumn(byte[], byte[], long)} method. + */ +@InterfaceAudience.Public +@InterfaceStability.Stable +public class Increment implements Row { + private byte [] row = null; + private long lockId = -1L; + private boolean writeToWAL = true; + private TimeRange tr = new TimeRange(); + private Map> familyMap = + new TreeMap>(Bytes.BYTES_COMPARATOR); + + /** Constructor for Writable. DO NOT USE */ + public Increment() {} + + /** + * Create a Increment operation for the specified row. + *

+ * At least one column must be incremented. + * @param row row key + */ + public Increment(byte [] row) { + this(row, null); + } + + /** + * Create a Increment operation for the specified row, using an existing row + * lock. + *

+ * At least one column must be incremented. + * @param row row key + * @param rowLock previously acquired row lock, or null + */ + public Increment(byte [] row, RowLock rowLock) { + if (row == null) { + throw new IllegalArgumentException("Cannot increment a null row"); + } + this.row = row; + if(rowLock != null) { + this.lockId = rowLock.getLockId(); + } + } + + /** + * Increment the column from the specific family with the specified qualifier + * by the specified amount. + *

+ * Overrides previous calls to addColumn for this family and qualifier. + * @param family family name + * @param qualifier column qualifier + * @param amount amount to increment by + * @return the Increment object + */ + public Increment addColumn(byte [] family, byte [] qualifier, long amount) { + if (family == null) { + throw new IllegalArgumentException("family cannot be null"); + } + if (qualifier == null) { + throw new IllegalArgumentException("qualifier cannot be null"); + } + NavigableMap set = familyMap.get(family); + if(set == null) { + set = new TreeMap(Bytes.BYTES_COMPARATOR); + } + set.put(qualifier, amount); + familyMap.put(family, set); + return this; + } + + /* Accessors */ + + /** + * Method for retrieving the increment's row + * @return row + */ + public byte [] getRow() { + return this.row; + } + + /** + * Method for retrieving the increment's RowLock + * @return RowLock + */ + public RowLock getRowLock() { + return new RowLock(this.row, this.lockId); + } + + /** + * Method for retrieving the increment's lockId + * @return lockId + */ + public long getLockId() { + return this.lockId; + } + + /** + * Method for retrieving whether WAL will be written to or not + * @return true if WAL should be used, false if not + */ + public boolean getWriteToWAL() { + return this.writeToWAL; + } + + /** + * Sets whether this operation should write to the WAL or not. + * @param writeToWAL true if WAL should be used, false if not + * @return this increment operation + */ + public Increment setWriteToWAL(boolean writeToWAL) { + this.writeToWAL = writeToWAL; + return this; + } + + /** + * Gets the TimeRange used for this increment. + * @return TimeRange + */ + public TimeRange getTimeRange() { + return this.tr; + } + + /** + * Sets the TimeRange to be used on the Get for this increment. + *

+ * This is useful for when you have counters that only last for specific + * periods of time (ie. counters that are partitioned by time). By setting + * the range of valid times for this increment, you can potentially gain + * some performance with a more optimal Get operation. + *

+ * This range is used as [minStamp, maxStamp). + * @param minStamp minimum timestamp value, inclusive + * @param maxStamp maximum timestamp value, exclusive + * @throws IOException if invalid time range + * @return this + */ + public Increment setTimeRange(long minStamp, long maxStamp) + throws IOException { + tr = new TimeRange(minStamp, maxStamp); + return this; + } + + /** + * Method for retrieving the keys in the familyMap + * @return keys in the current familyMap + */ + public Set familySet() { + return this.familyMap.keySet(); + } + + /** + * Method for retrieving the number of families to increment from + * @return number of families + */ + public int numFamilies() { + return this.familyMap.size(); + } + + /** + * Method for retrieving the number of columns to increment + * @return number of columns across all families + */ + public int numColumns() { + if (!hasFamilies()) return 0; + int num = 0; + for (NavigableMap family : familyMap.values()) { + num += family.size(); + } + return num; + } + + /** + * Method for checking if any families have been inserted into this Increment + * @return true if familyMap is non empty false otherwise + */ + public boolean hasFamilies() { + return !this.familyMap.isEmpty(); + } + + /** + * Method for retrieving the increment's familyMap + * @return familyMap + */ + public Map> getFamilyMap() { + return this.familyMap; + } + + /** + * @return String + */ + @Override + public String toString() { + StringBuilder sb = new StringBuilder(); + sb.append("row="); + sb.append(Bytes.toStringBinary(this.row)); + if(this.familyMap.size() == 0) { + sb.append(", no columns set to be incremented"); + return sb.toString(); + } + sb.append(", families="); + boolean moreThanOne = false; + for(Map.Entry> entry : + this.familyMap.entrySet()) { + if(moreThanOne) { + sb.append("), "); + } else { + moreThanOne = true; + sb.append("{"); + } + sb.append("(family="); + sb.append(Bytes.toString(entry.getKey())); + sb.append(", columns="); + if(entry.getValue() == null) { + sb.append("NONE"); + } else { + sb.append("{"); + boolean moreThanOneB = false; + for(Map.Entry column : entry.getValue().entrySet()) { + if(moreThanOneB) { + sb.append(", "); + } else { + moreThanOneB = true; + } + sb.append(Bytes.toStringBinary(column.getKey()) + "+=" + column.getValue()); + } + sb.append("}"); + } + } + sb.append("}"); + return sb.toString(); + } + + @Override + public int compareTo(Row i) { + return Bytes.compareTo(this.getRow(), i.getRow()); + } +} diff --git hbase-client/src/main/java/org/apache/hadoop/hbase/client/IsolationLevel.java hbase-client/src/main/java/org/apache/hadoop/hbase/client/IsolationLevel.java new file mode 100644 index 0000000..fb910d8 --- /dev/null +++ hbase-client/src/main/java/org/apache/hadoop/hbase/client/IsolationLevel.java @@ -0,0 +1,59 @@ +/* + * Copyright The Apache Software Foundation + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.client; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; + +/** + * Specify Isolation levels in Scan operations. + *

+ * There are two isolation levels. A READ_COMMITTED isolation level + * indicates that only data that is committed be returned in a scan. + * An isolation level of READ_UNCOMMITTED indicates that a scan + * should return data that is being modified by transactions that might + * not have been committed yet. + */ +@InterfaceAudience.Public +@InterfaceStability.Stable +public enum IsolationLevel { + + READ_COMMITTED(1), + READ_UNCOMMITTED(2); + + IsolationLevel(int value) {} + + public byte [] toBytes() { + return new byte [] { toByte() }; + } + + public byte toByte() { + return (byte)this.ordinal(); + } + + public static IsolationLevel fromBytes(byte [] bytes) { + return IsolationLevel.fromByte(bytes[0]); + } + + public static IsolationLevel fromByte(byte vbyte) { + return IsolationLevel.values()[vbyte]; + } +} diff --git hbase-client/src/main/java/org/apache/hadoop/hbase/client/MasterAdminKeepAliveConnection.java hbase-client/src/main/java/org/apache/hadoop/hbase/client/MasterAdminKeepAliveConnection.java new file mode 100644 index 0000000..7126073 --- /dev/null +++ hbase-client/src/main/java/org/apache/hadoop/hbase/client/MasterAdminKeepAliveConnection.java @@ -0,0 +1,44 @@ +/** + * Copyright The Apache Software Foundation + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.client; + + +import org.apache.hadoop.hbase.MasterAdminProtocol; + +import java.io.Closeable; + +/** + * A KeepAlive connection is not physically closed immediately after the close, + * but rather kept alive for a few minutes. It makes sense only if it's shared. + * + * This interface is used by a dynamic proxy. It allows to have a #close + * function in a master client. + * + * This class is intended to be used internally by HBase classes that need to + * speak the MasterAdminProtocol; but not by * final user code. Hence it's + * package protected. + */ +interface MasterAdminKeepAliveConnection extends MasterAdminProtocol, Closeable { + + @Override + public void close(); +} + diff --git hbase-client/src/main/java/org/apache/hadoop/hbase/client/MasterMonitorKeepAliveConnection.java hbase-client/src/main/java/org/apache/hadoop/hbase/client/MasterMonitorKeepAliveConnection.java new file mode 100644 index 0000000..a4c7650 --- /dev/null +++ hbase-client/src/main/java/org/apache/hadoop/hbase/client/MasterMonitorKeepAliveConnection.java @@ -0,0 +1,44 @@ +/** + * Copyright The Apache Software Foundation + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.client; + + +import org.apache.hadoop.hbase.MasterMonitorProtocol; + +import java.io.Closeable; + +/** + * A KeepAlive connection is not physically closed immediately after the close, + * but rather kept alive for a few minutes. It makes sense only if it's shared. + * + * This interface is used by a dynamic proxy. It allows to have a #close + * function in a master client. + * + * This class is intended to be used internally by HBase classes that need to + * speak the MasterMonitorProtocol; but not by final user code. Hence it's + * package protected. + */ +interface MasterMonitorKeepAliveConnection extends MasterMonitorProtocol, Closeable { + + @Override + public void close(); +} + diff --git hbase-client/src/main/java/org/apache/hadoop/hbase/client/MetaScanner.java hbase-client/src/main/java/org/apache/hadoop/hbase/client/MetaScanner.java new file mode 100644 index 0000000..eba32a7 --- /dev/null +++ hbase-client/src/main/java/org/apache/hadoop/hbase/client/MetaScanner.java @@ -0,0 +1,489 @@ +/** + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.client; + +import java.io.Closeable; +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; +import java.util.NavigableMap; +import java.util.TreeMap; +import java.util.TreeSet; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.HRegionInfo; +import org.apache.hadoop.hbase.ServerName; +import org.apache.hadoop.hbase.TableNotFoundException; +import org.apache.hadoop.hbase.client.HConnectionManager.HConnectable; +import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.hbase.util.PairOfSameType; + +/** + * Scanner class that contains the .META. table scanning logic + * and uses a Retryable scanner. Provided visitors will be called + * for each row. + * + * Although public visibility, this is not a public-facing API and may evolve in + * minor releases. + * + *

Note that during concurrent region splits, the scanner might not see + * META changes across rows (for parent and daughter entries) consistently. + * see HBASE-5986, and {@link BlockingMetaScannerVisitor} for details.

+ */ +@InterfaceAudience.Private +public class MetaScanner { + private static final Log LOG = LogFactory.getLog(MetaScanner.class); + /** + * Scans the meta table and calls a visitor on each RowResult and uses a empty + * start row value as table name. + * + * @param configuration conf + * @param visitor A custom visitor + * @throws IOException e + */ + public static void metaScan(Configuration configuration, + MetaScannerVisitor visitor) + throws IOException { + metaScan(configuration, visitor, null); + } + + /** + * Scans the meta table and calls a visitor on each RowResult. Uses a table + * name to locate meta regions. + * + * @param configuration config + * @param visitor visitor object + * @param userTableName User table name in meta table to start scan at. Pass + * null if not interested in a particular table. + * @throws IOException e + */ + public static void metaScan(Configuration configuration, + MetaScannerVisitor visitor, byte [] userTableName) + throws IOException { + metaScan(configuration, visitor, userTableName, null, Integer.MAX_VALUE); + } + + /** + * Scans the meta table and calls a visitor on each RowResult. Uses a table + * name and a row name to locate meta regions. And it only scans at most + * rowLimit of rows. + * + * @param configuration HBase configuration. + * @param visitor Visitor object. + * @param userTableName User table name in meta table to start scan at. Pass + * null if not interested in a particular table. + * @param row Name of the row at the user table. The scan will start from + * the region row where the row resides. + * @param rowLimit Max of processed rows. If it is less than 0, it + * will be set to default value Integer.MAX_VALUE. + * @throws IOException e + */ + public static void metaScan(Configuration configuration, + MetaScannerVisitor visitor, byte [] userTableName, byte[] row, + int rowLimit) + throws IOException { + metaScan(configuration, visitor, userTableName, row, rowLimit, + HConstants.META_TABLE_NAME); + } + + /** + * Scans the meta table and calls a visitor on each RowResult. Uses a table + * name and a row name to locate meta regions. And it only scans at most + * rowLimit of rows. + * + * @param configuration HBase configuration. + * @param visitor Visitor object. Closes the visitor before returning. + * @param tableName User table name in meta table to start scan at. Pass + * null if not interested in a particular table. + * @param row Name of the row at the user table. The scan will start from + * the region row where the row resides. + * @param rowLimit Max of processed rows. If it is less than 0, it + * will be set to default value Integer.MAX_VALUE. + * @param metaTableName Meta table to scan, root or meta. + * @throws IOException e + */ + public static void metaScan(Configuration configuration, + final MetaScannerVisitor visitor, final byte[] tableName, + final byte[] row, final int rowLimit, final byte[] metaTableName) + throws IOException { + try { + HConnectionManager.execute(new HConnectable(configuration) { + @Override + public Void connect(HConnection connection) throws IOException { + metaScan(conf, connection, visitor, tableName, row, rowLimit, + metaTableName); + return null; + } + }); + } finally { + visitor.close(); + } + } + + private static void metaScan(Configuration configuration, HConnection connection, + MetaScannerVisitor visitor, byte [] tableName, byte[] row, + int rowLimit, final byte [] metaTableName) + throws IOException { + int rowUpperLimit = rowLimit > 0 ? rowLimit: Integer.MAX_VALUE; + + // if row is not null, we want to use the startKey of the row's region as + // the startRow for the meta scan. + byte[] startRow; + if (row != null) { + // Scan starting at a particular row in a particular table + assert tableName != null; + byte[] searchRow = + HRegionInfo.createRegionName(tableName, row, HConstants.NINES, + false); + HTable metaTable = null; + try { + metaTable = new HTable(configuration, HConstants.META_TABLE_NAME); + Result startRowResult = metaTable.getRowOrBefore(searchRow, + HConstants.CATALOG_FAMILY); + if (startRowResult == null) { + throw new TableNotFoundException("Cannot find row in .META. for table: " + + Bytes.toString(tableName) + ", row=" + Bytes.toStringBinary(searchRow)); + } + HRegionInfo regionInfo = getHRegionInfo(startRowResult); + if (regionInfo == null) { + throw new IOException("HRegionInfo was null or empty in Meta for " + + Bytes.toString(tableName) + ", row=" + Bytes.toStringBinary(searchRow)); + } + + byte[] rowBefore = regionInfo.getStartKey(); + startRow = HRegionInfo.createRegionName(tableName, rowBefore, + HConstants.ZEROES, false); + } finally { + if (metaTable != null) { + metaTable.close(); + } + } + } else if (tableName == null || tableName.length == 0) { + // Full META scan + startRow = HConstants.EMPTY_START_ROW; + } else { + // Scan META for an entire table + startRow = HRegionInfo.createRegionName( + tableName, HConstants.EMPTY_START_ROW, HConstants.ZEROES, false); + } + + // Scan over each meta region + ScannerCallable callable; + int rows = Math.min(rowLimit, configuration.getInt( + HConstants.HBASE_META_SCANNER_CACHING, + HConstants.DEFAULT_HBASE_META_SCANNER_CACHING)); + do { + final Scan scan = new Scan(startRow).addFamily(HConstants.CATALOG_FAMILY); + if (LOG.isDebugEnabled()) { + LOG.debug("Scanning " + Bytes.toString(metaTableName) + + " starting at row=" + Bytes.toStringBinary(startRow) + " for max=" + + rowUpperLimit + " rows using " + connection.toString()); + } + callable = new ScannerCallable(connection, metaTableName, scan, null); + // Open scanner + callable.withRetries(); + + int processedRows = 0; + try { + callable.setCaching(rows); + done: do { + if (processedRows >= rowUpperLimit) { + break; + } + //we have all the rows here + Result [] rrs = callable.withRetries(); + if (rrs == null || rrs.length == 0 || rrs[0].size() == 0) { + break; //exit completely + } + for (Result rr : rrs) { + if (processedRows >= rowUpperLimit) { + break done; + } + if (!visitor.processRow(rr)) + break done; //exit completely + processedRows++; + } + //here, we didn't break anywhere. Check if we have more rows + } while(true); + // Advance the startRow to the end key of the current region + startRow = callable.getHRegionInfo().getEndKey(); + } finally { + // Close scanner + callable.setClose(); + callable.withRetries(); + } + } while (Bytes.compareTo(startRow, HConstants.LAST_ROW) != 0); + } + + /** + * Returns HRegionInfo object from the column + * HConstants.CATALOG_FAMILY:HConstants.REGIONINFO_QUALIFIER of the catalog + * table Result. + * @param data a Result object from the catalog table scan + * @return HRegionInfo or null + */ + public static HRegionInfo getHRegionInfo(Result data) { + byte [] bytes = + data.getValue(HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER); + if (bytes == null) return null; + HRegionInfo info = HRegionInfo.parseFromOrNull(bytes); + if (LOG.isDebugEnabled()) { + LOG.debug("Current INFO from scan results = " + info); + } + return info; + } + + /** + * Lists all of the regions currently in META. + * @param conf + * @return List of all user-space regions. + * @throws IOException + */ + public static List listAllRegions(Configuration conf) + throws IOException { + return listAllRegions(conf, true); + } + + /** + * Lists all of the regions currently in META. + * @param conf + * @param offlined True if we are to include offlined regions, false and we'll + * leave out offlined regions from returned list. + * @return List of all user-space regions. + * @throws IOException + */ + public static List listAllRegions(Configuration conf, final boolean offlined) + throws IOException { + final List regions = new ArrayList(); + MetaScannerVisitor visitor = new BlockingMetaScannerVisitor(conf) { + @Override + public boolean processRowInternal(Result result) throws IOException { + if (result == null || result.isEmpty()) { + return true; + } + + HRegionInfo regionInfo = getHRegionInfo(result); + if (regionInfo == null) { + LOG.warn("Null REGIONINFO_QUALIFIER: " + result); + return true; + } + + // If region offline AND we are not to include offlined regions, return. + if (regionInfo.isOffline() && !offlined) return true; + regions.add(regionInfo); + return true; + } + }; + metaScan(conf, visitor); + return regions; + } + + /** + * Lists all of the table regions currently in META. + * @param conf + * @param offlined True if we are to include offlined regions, false and we'll + * leave out offlined regions from returned list. + * @return Map of all user-space regions to servers + * @throws IOException + */ + public static NavigableMap allTableRegions(Configuration conf, + final byte [] tablename, final boolean offlined) throws IOException { + final NavigableMap regions = + new TreeMap(); + MetaScannerVisitor visitor = new TableMetaScannerVisitor(conf, tablename) { + @Override + public boolean processRowInternal(Result rowResult) throws IOException { + HRegionInfo info = getHRegionInfo(rowResult); + ServerName serverName = HRegionInfo.getServerName(rowResult); + + if (!(info.isOffline() || info.isSplit())) { + regions.put(new UnmodifyableHRegionInfo(info), serverName); + } + return true; + } + }; + metaScan(conf, visitor, tablename); + return regions; + } + + /** + * Visitor class called to process each row of the .META. table + */ + public interface MetaScannerVisitor extends Closeable { + /** + * Visitor method that accepts a RowResult and the meta region location. + * Implementations can return false to stop the region's loop if it becomes + * unnecessary for some reason. + * + * @param rowResult result + * @return A boolean to know if it should continue to loop in the region + * @throws IOException e + */ + public boolean processRow(Result rowResult) throws IOException; + } + + public static abstract class MetaScannerVisitorBase implements MetaScannerVisitor { + @Override + public void close() throws IOException { + } + } + + /** + * A MetaScannerVisitor that provides a consistent view of the table's + * META entries during concurrent splits (see HBASE-5986 for details). This class + * does not guarantee ordered traversal of meta entries, and can block until the + * META entries for daughters are available during splits. + */ + public static abstract class BlockingMetaScannerVisitor + extends MetaScannerVisitorBase { + + private static final int DEFAULT_BLOCKING_TIMEOUT = 10000; + private Configuration conf; + private TreeSet daughterRegions = new TreeSet(Bytes.BYTES_COMPARATOR); + private int blockingTimeout; + private HTable metaTable; + + public BlockingMetaScannerVisitor(Configuration conf) { + this.conf = conf; + this.blockingTimeout = conf.getInt(HConstants.HBASE_CLIENT_OPERATION_TIMEOUT, + DEFAULT_BLOCKING_TIMEOUT); + } + + public abstract boolean processRowInternal(Result rowResult) throws IOException; + + @Override + public void close() throws IOException { + super.close(); + if (metaTable != null) { + metaTable.close(); + metaTable = null; + } + } + + public HTable getMetaTable() throws IOException { + if (metaTable == null) { + metaTable = new HTable(conf, HConstants.META_TABLE_NAME); + } + return metaTable; + } + + @Override + public boolean processRow(Result rowResult) throws IOException { + HRegionInfo info = getHRegionInfo(rowResult); + if (info == null) { + return true; + } + + if (daughterRegions.remove(info.getRegionName())) { + return true; //we have already processed this row + } + + if (info.isSplitParent()) { + /* we have found a parent region which was split. We have to ensure that it's daughters are + * seen by this scanner as well, so we block until they are added to the META table. Even + * though we are waiting for META entries, ACID semantics in HBase indicates that this + * scanner might not see the new rows. So we manually query the daughter rows */ + PairOfSameType daughters = HRegionInfo.getDaughterRegions(rowResult); + HRegionInfo splitA = daughters.getFirst(); + HRegionInfo splitB = daughters.getSecond(); + + HTable metaTable = getMetaTable(); + long start = System.currentTimeMillis(); + Result resultA = getRegionResultBlocking(metaTable, blockingTimeout, + splitA.getRegionName()); + if (resultA != null) { + processRow(resultA); + daughterRegions.add(splitA.getRegionName()); + } else { + throw new RegionOfflineException("Split daughter region " + + splitA.getRegionNameAsString() + " cannot be found in META."); + } + long rem = blockingTimeout - (System.currentTimeMillis() - start); + + Result resultB = getRegionResultBlocking(metaTable, rem, + splitB.getRegionName()); + if (resultB != null) { + processRow(resultB); + daughterRegions.add(splitB.getRegionName()); + } else { + throw new RegionOfflineException("Split daughter region " + + splitB.getRegionNameAsString() + " cannot be found in META."); + } + } + + return processRowInternal(rowResult); + } + + private Result getRegionResultBlocking(HTable metaTable, long timeout, byte[] regionName) + throws IOException { + if (LOG.isDebugEnabled()) { + LOG.debug("blocking until region is in META: " + Bytes.toStringBinary(regionName)); + } + long start = System.currentTimeMillis(); + while (System.currentTimeMillis() - start < timeout) { + Get get = new Get(regionName); + Result result = metaTable.get(get); + HRegionInfo info = getHRegionInfo(result); + if (info != null) { + return result; + } + try { + Thread.sleep(10); + } catch (InterruptedException ex) { + Thread.currentThread().interrupt(); + break; + } + } + return null; + } + } + + /** + * A MetaScannerVisitor for a table. Provides a consistent view of the table's + * META entries during concurrent splits (see HBASE-5986 for details). This class + * does not guarantee ordered traversal of meta entries, and can block until the + * META entries for daughters are available during splits. + */ + public static abstract class TableMetaScannerVisitor extends BlockingMetaScannerVisitor { + private byte[] tableName; + + public TableMetaScannerVisitor(Configuration conf, byte[] tableName) { + super(conf); + this.tableName = tableName; + } + + @Override + public final boolean processRow(Result rowResult) throws IOException { + HRegionInfo info = getHRegionInfo(rowResult); + if (info == null) { + return true; + } + if (!(Bytes.equals(info.getTableName(), tableName))) { + return false; + } + return super.processRow(rowResult); + } + + } +} diff --git hbase-client/src/main/java/org/apache/hadoop/hbase/client/MultiAction.java hbase-client/src/main/java/org/apache/hadoop/hbase/client/MultiAction.java new file mode 100644 index 0000000..5605013 --- /dev/null +++ hbase-client/src/main/java/org/apache/hadoop/hbase/client/MultiAction.java @@ -0,0 +1,90 @@ +/* + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.client; + +import java.util.ArrayList; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.TreeMap; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; +import org.apache.hadoop.hbase.util.Bytes; + +/** + * Container for Actions (i.e. Get, Delete, or Put), which are grouped by + * regionName. Intended to be used with HConnectionManager.processBatch() + */ +@InterfaceAudience.Public +@InterfaceStability.Evolving +public final class MultiAction { + + // map of regions to lists of puts/gets/deletes for that region. + public Map>> actions = new TreeMap>>(Bytes.BYTES_COMPARATOR); + + public MultiAction() { + super(); + } + + /** + * Get the total number of Actions + * + * @return total number of Actions for all groups in this container. + */ + public int size() { + int size = 0; + for (List l : actions.values()) { + size += l.size(); + } + return size; + } + + /** + * Add an Action to this container based on it's regionName. If the regionName + * is wrong, the initial execution will fail, but will be automatically + * retried after looking up the correct region. + * + * @param regionName + * @param a + */ + public void add(byte[] regionName, Action a) { + List> rsActions = actions.get(regionName); + if (rsActions == null) { + rsActions = new ArrayList>(); + actions.put(regionName, rsActions); + } + rsActions.add(a); + } + + public Set getRegions() { + return actions.keySet(); + } + + /** + * @return All actions from all regions in this container + */ + public List> allActions() { + List> res = new ArrayList>(); + for (List> lst : actions.values()) { + res.addAll(lst); + } + return res; + } +} diff --git hbase-client/src/main/java/org/apache/hadoop/hbase/client/MultiResponse.java hbase-client/src/main/java/org/apache/hadoop/hbase/client/MultiResponse.java new file mode 100644 index 0000000..f83f9ab --- /dev/null +++ hbase-client/src/main/java/org/apache/hadoop/hbase/client/MultiResponse.java @@ -0,0 +1,85 @@ +/* + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.client; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; +import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.hbase.util.Pair; + +import java.util.Collection; +import java.util.List; +import java.util.Map; +import java.util.ArrayList; +import java.util.TreeMap; + +/** + * A container for Result objects, grouped by regionName. + */ +@InterfaceAudience.Public +@InterfaceStability.Evolving +public class MultiResponse { + + // map of regionName to list of (Results paired to the original index for that + // Result) + private Map>> results = + new TreeMap>>(Bytes.BYTES_COMPARATOR); + + public MultiResponse() { + super(); + } + + /** + * @return Number of pairs in this container + */ + public int size() { + int size = 0; + for (Collection c : results.values()) { + size += c.size(); + } + return size; + } + + /** + * Add the pair to the container, grouped by the regionName + * + * @param regionName + * @param r + * First item in the pair is the original index of the Action + * (request). Second item is the Result. Result will be empty for + * successful Put and Delete actions. + */ + public void add(byte[] regionName, Pair r) { + List> rs = results.get(regionName); + if (rs == null) { + rs = new ArrayList>(); + results.put(regionName, rs); + } + rs.add(r); + } + + public void add(byte []regionName, int originalIndex, Object resOrEx) { + add(regionName, new Pair(originalIndex, resOrEx)); + } + + public Map>> getResults() { + return results; + } +} \ No newline at end of file diff --git hbase-client/src/main/java/org/apache/hadoop/hbase/client/Mutation.java hbase-client/src/main/java/org/apache/hadoop/hbase/client/Mutation.java new file mode 100644 index 0000000..9f601e6 --- /dev/null +++ hbase-client/src/main/java/org/apache/hadoop/hbase/client/Mutation.java @@ -0,0 +1,232 @@ +/* + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.client; + +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.TreeMap; +import java.util.UUID; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; +import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.KeyValue; +import org.apache.hadoop.hbase.util.Bytes; + +@InterfaceAudience.Public +@InterfaceStability.Evolving +public abstract class Mutation extends OperationWithAttributes implements Row { + // Attribute used in Mutations to indicate the originating cluster. + private static final String CLUSTER_ID_ATTR = "_c.id_"; + + protected byte [] row = null; + protected long ts = HConstants.LATEST_TIMESTAMP; + protected long lockId = -1L; + protected boolean writeToWAL = true; + protected Map> familyMap = + new TreeMap>(Bytes.BYTES_COMPARATOR); + + /** + * Compile the column family (i.e. schema) information + * into a Map. Useful for parsing and aggregation by debugging, + * logging, and administration tools. + * @return Map + */ + @Override + public Map getFingerprint() { + Map map = new HashMap(); + List families = new ArrayList(); + // ideally, we would also include table information, but that information + // is not stored in each Operation instance. + map.put("families", families); + for (Map.Entry> entry : this.familyMap.entrySet()) { + families.add(Bytes.toStringBinary(entry.getKey())); + } + return map; + } + + /** + * Compile the details beyond the scope of getFingerprint (row, columns, + * timestamps, etc.) into a Map along with the fingerprinted information. + * Useful for debugging, logging, and administration tools. + * @param maxCols a limit on the number of columns output prior to truncation + * @return Map + */ + @Override + public Map toMap(int maxCols) { + // we start with the fingerprint map and build on top of it. + Map map = getFingerprint(); + // replace the fingerprint's simple list of families with a + // map from column families to lists of qualifiers and kv details + Map>> columns = + new HashMap>>(); + map.put("families", columns); + map.put("row", Bytes.toStringBinary(this.row)); + int colCount = 0; + // iterate through all column families affected + for (Map.Entry> entry : this.familyMap.entrySet()) { + // map from this family to details for each kv affected within the family + List> qualifierDetails = + new ArrayList>(); + columns.put(Bytes.toStringBinary(entry.getKey()), qualifierDetails); + colCount += entry.getValue().size(); + if (maxCols <= 0) { + continue; + } + // add details for each kv + for (KeyValue kv : entry.getValue()) { + if (--maxCols <= 0 ) { + continue; + } + Map kvMap = kv.toStringMap(); + // row and family information are already available in the bigger map + kvMap.remove("row"); + kvMap.remove("family"); + qualifierDetails.add(kvMap); + } + } + map.put("totalColumns", colCount); + // add the id if set + if (getId() != null) { + map.put("id", getId()); + } + return map; + } + + /** + * @return true if edits should be applied to WAL, false if not + */ + public boolean getWriteToWAL() { + return this.writeToWAL; + } + + /** + * Set whether this Delete should be written to the WAL or not. + * Not writing the WAL means you may lose edits on server crash. + * @param write true if edits should be written to WAL, false if not + */ + public void setWriteToWAL(boolean write) { + this.writeToWAL = write; + } + + /** + * Method for retrieving the put's familyMap + * @return familyMap + */ + public Map> getFamilyMap() { + return this.familyMap; + } + + /** + * Method for setting the put's familyMap + */ + public void setFamilyMap(Map> map) { + this.familyMap = map; + } + + /** + * Method to check if the familyMap is empty + * @return true if empty, false otherwise + */ + public boolean isEmpty() { + return familyMap.isEmpty(); + } + + /** + * Method for retrieving the delete's row + * @return row + */ + @Override + public byte [] getRow() { + return this.row; + } + + public int compareTo(final Row d) { + return Bytes.compareTo(this.getRow(), d.getRow()); + } + + /** + * Method for retrieving the delete's RowLock + * @return RowLock + */ + public RowLock getRowLock() { + return new RowLock(this.row, this.lockId); + } + + /** + * Method for retrieving the delete's lock ID. + * + * @return The lock ID. + */ + public long getLockId() { + return this.lockId; + } + + /** + * Method for retrieving the timestamp + * @return timestamp + */ + public long getTimeStamp() { + return this.ts; + } + + /** + * Set the replication custer id. + * @param clusterId + */ + public void setClusterId(UUID clusterId) { + if (clusterId == null) return; + byte[] val = new byte[2*Bytes.SIZEOF_LONG]; + Bytes.putLong(val, 0, clusterId.getMostSignificantBits()); + Bytes.putLong(val, Bytes.SIZEOF_LONG, clusterId.getLeastSignificantBits()); + setAttribute(CLUSTER_ID_ATTR, val); + } + + /** + * @return The replication cluster id. + */ + public UUID getClusterId() { + byte[] attr = getAttribute(CLUSTER_ID_ATTR); + if (attr == null) { + return HConstants.DEFAULT_CLUSTER_ID; + } + return new UUID(Bytes.toLong(attr,0), Bytes.toLong(attr, Bytes.SIZEOF_LONG)); + } + + /** + * @return the total number of KeyValues + */ + public int size() { + int size = 0; + for(List kvList : this.familyMap.values()) { + size += kvList.size(); + } + return size; + } + + /** + * @return the number of different families + */ + public int numFamilies() { + return familyMap.size(); + } +} diff --git hbase-client/src/main/java/org/apache/hadoop/hbase/client/NoServerForRegionException.java hbase-client/src/main/java/org/apache/hadoop/hbase/client/NoServerForRegionException.java new file mode 100644 index 0000000..6de1007 --- /dev/null +++ hbase-client/src/main/java/org/apache/hadoop/hbase/client/NoServerForRegionException.java @@ -0,0 +1,45 @@ +/** + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.client; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; +import org.apache.hadoop.hbase.RegionException; + +/** + * Thrown when no region server can be found for a region + */ +@InterfaceAudience.Public +@InterfaceStability.Stable +public class NoServerForRegionException extends RegionException { + private static final long serialVersionUID = 1L << 11 - 1L; + + /** default constructor */ + public NoServerForRegionException() { + super(); + } + + /** + * Constructor + * @param s message + */ + public NoServerForRegionException(String s) { + super(s); + } +} diff --git hbase-client/src/main/java/org/apache/hadoop/hbase/client/Operation.java hbase-client/src/main/java/org/apache/hadoop/hbase/client/Operation.java new file mode 100644 index 0000000..07e9c19 --- /dev/null +++ hbase-client/src/main/java/org/apache/hadoop/hbase/client/Operation.java @@ -0,0 +1,113 @@ +/* + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.client; + +import java.io.IOException; +import java.util.Map; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; +import org.codehaus.jackson.map.ObjectMapper; + +/** + * Superclass for any type that maps to a potentially application-level query. + * (e.g. Put, Get, Delete, Scan, Next, etc.) + * Contains methods for exposure to logging and debugging tools. + */ +@InterfaceAudience.Public +@InterfaceStability.Evolving +public abstract class Operation { + // TODO make this configurable + private static final int DEFAULT_MAX_COLS = 5; + + /** + * Produces a Map containing a fingerprint which identifies the type and + * the static schema components of a query (i.e. column families) + * @return a map containing fingerprint information (i.e. column families) + */ + public abstract Map getFingerprint(); + + /** + * Produces a Map containing a summary of the details of a query + * beyond the scope of the fingerprint (i.e. columns, rows...) + * @param maxCols a limit on the number of columns output prior to truncation + * @return a map containing parameters of a query (i.e. rows, columns...) + */ + public abstract Map toMap(int maxCols); + + /** + * Produces a Map containing a full summary of a query. + * @return a map containing parameters of a query (i.e. rows, columns...) + */ + public Map toMap() { + return toMap(DEFAULT_MAX_COLS); + } + + /** + * Produces a JSON object for fingerprint and details exposure in a + * parseable format. + * @param maxCols a limit on the number of columns to include in the JSON + * @return a JSONObject containing this Operation's information, as a string + */ + public String toJSON(int maxCols) throws IOException { + ObjectMapper mapper = new ObjectMapper(); + return mapper.writeValueAsString(toMap(maxCols)); + } + + /** + * Produces a JSON object sufficient for description of a query + * in a debugging or logging context. + * @return the produced JSON object, as a string + */ + public String toJSON() throws IOException { + return toJSON(DEFAULT_MAX_COLS); + } + + /** + * Produces a string representation of this Operation. It defaults to a JSON + * representation, but falls back to a string representation of the + * fingerprint and details in the case of a JSON encoding failure. + * @param maxCols a limit on the number of columns output in the summary + * prior to truncation + * @return a JSON-parseable String + */ + public String toString(int maxCols) { + /* for now this is merely a wrapper from producing a JSON string, but + * toJSON is kept separate in case this is changed to be a less parsable + * pretty printed representation. + */ + try { + return toJSON(maxCols); + } catch (IOException ioe) { + return toMap(maxCols).toString(); + } + } + + /** + * Produces a string representation of this Operation. It defaults to a JSON + * representation, but falls back to a string representation of the + * fingerprint and details in the case of a JSON encoding failure. + * @return String + */ + @Override + public String toString() { + return toString(DEFAULT_MAX_COLS); + } +} + diff --git hbase-client/src/main/java/org/apache/hadoop/hbase/client/OperationWithAttributes.java hbase-client/src/main/java/org/apache/hadoop/hbase/client/OperationWithAttributes.java new file mode 100644 index 0000000..52d50aa --- /dev/null +++ hbase-client/src/main/java/org/apache/hadoop/hbase/client/OperationWithAttributes.java @@ -0,0 +1,108 @@ +/* + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.client; + +import java.util.Collections; +import java.util.HashMap; +import java.util.Map; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; +import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.hbase.util.ClassSize; + +@InterfaceAudience.Public +@InterfaceStability.Evolving +public abstract class OperationWithAttributes extends Operation implements Attributes { + // a opaque blob of attributes + private Map attributes; + + // used for uniquely identifying an operation + static public String ID_ATRIBUTE = "_operation.attributes.id"; + + public void setAttribute(String name, byte[] value) { + if (attributes == null && value == null) { + return; + } + + if (attributes == null) { + attributes = new HashMap(); + } + + if (value == null) { + attributes.remove(name); + if (attributes.isEmpty()) { + this.attributes = null; + } + } else { + attributes.put(name, value); + } + } + + public byte[] getAttribute(String name) { + if (attributes == null) { + return null; + } + + return attributes.get(name); + } + + public Map getAttributesMap() { + if (attributes == null) { + return Collections.emptyMap(); + } + return Collections.unmodifiableMap(attributes); + } + + protected long getAttributeSize() { + long size = 0; + if (attributes != null) { + size += ClassSize.align(this.attributes.size() * ClassSize.MAP_ENTRY); + for(Map.Entry entry : this.attributes.entrySet()) { + size += ClassSize.align(ClassSize.STRING + entry.getKey().length()); + size += ClassSize.align(ClassSize.ARRAY + entry.getValue().length); + } + } + return size; + } + + /** + * This method allows you to set an identifier on an operation. The original + * motivation for this was to allow the identifier to be used in slow query + * logging, but this could obviously be useful in other places. One use of + * this could be to put a class.method identifier in here to see where the + * slow query is coming from. + * @param id + * id to set for the scan + */ + public void setId(String id) { + setAttribute(ID_ATRIBUTE, Bytes.toBytes(id)); + } + + /** + * This method allows you to retrieve the identifier for the operation if one + * was set. + * @return the id or null if not set + */ + public String getId() { + byte[] attr = getAttribute(ID_ATRIBUTE); + return attr == null? null: Bytes.toString(attr); + } +} diff --git hbase-client/src/main/java/org/apache/hadoop/hbase/client/Put.java hbase-client/src/main/java/org/apache/hadoop/hbase/client/Put.java new file mode 100644 index 0000000..31b5573 --- /dev/null +++ hbase-client/src/main/java/org/apache/hadoop/hbase/client/Put.java @@ -0,0 +1,357 @@ +/* + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.client; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; +import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.KeyValue; +import org.apache.hadoop.hbase.io.HeapSize; +import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.hbase.util.ClassSize; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; +import java.util.Map; +import java.util.TreeMap; + +/** + * Used to perform Put operations for a single row. + *

+ * To perform a Put, instantiate a Put object with the row to insert to and + * for each column to be inserted, execute {@link #add(byte[], byte[], byte[]) add} or + * {@link #add(byte[], byte[], long, byte[]) add} if setting the timestamp. + */ +@InterfaceAudience.Public +@InterfaceStability.Stable +public class Put extends Mutation implements HeapSize, Comparable { + private static final long OVERHEAD = ClassSize.align( + ClassSize.OBJECT + 2 * ClassSize.REFERENCE + + 2 * Bytes.SIZEOF_LONG + Bytes.SIZEOF_BOOLEAN + + ClassSize.REFERENCE + ClassSize.TREEMAP); + + /** + * Create a Put operation for the specified row. + * @param row row key + */ + public Put(byte [] row) { + this(row, null); + } + + /** + * Create a Put operation for the specified row, using an existing row lock. + * @param row row key + * @param rowLock previously acquired row lock, or null + */ + public Put(byte [] row, RowLock rowLock) { + this(row, HConstants.LATEST_TIMESTAMP, rowLock); + } + + /** + * Create a Put operation for the specified row, using a given timestamp. + * + * @param row row key + * @param ts timestamp + */ + public Put(byte[] row, long ts) { + this(row, ts, null); + } + + /** + * Create a Put operation for the specified row, using a given timestamp, and an existing row lock. + * @param row row key + * @param ts timestamp + * @param rowLock previously acquired row lock, or null + */ + public Put(byte [] row, long ts, RowLock rowLock) { + if(row == null || row.length > HConstants.MAX_ROW_LENGTH) { + throw new IllegalArgumentException("Row key is invalid"); + } + this.row = Arrays.copyOf(row, row.length); + this.ts = ts; + if(rowLock != null) { + this.lockId = rowLock.getLockId(); + } + } + + /** + * Copy constructor. Creates a Put operation cloned from the specified Put. + * @param putToCopy put to copy + */ + public Put(Put putToCopy) { + this(putToCopy.getRow(), putToCopy.ts, putToCopy.getRowLock()); + this.familyMap = + new TreeMap>(Bytes.BYTES_COMPARATOR); + for(Map.Entry> entry : + putToCopy.getFamilyMap().entrySet()) { + this.familyMap.put(entry.getKey(), entry.getValue()); + } + this.writeToWAL = putToCopy.writeToWAL; + } + + /** + * Add the specified column and value to this Put operation. + * @param family family name + * @param qualifier column qualifier + * @param value column value + * @return this + */ + public Put add(byte [] family, byte [] qualifier, byte [] value) { + return add(family, qualifier, this.ts, value); + } + + /** + * Add the specified column and value, with the specified timestamp as + * its version to this Put operation. + * @param family family name + * @param qualifier column qualifier + * @param ts version timestamp + * @param value column value + * @return this + */ + public Put add(byte [] family, byte [] qualifier, long ts, byte [] value) { + List list = getKeyValueList(family); + KeyValue kv = createPutKeyValue(family, qualifier, ts, value); + list.add(kv); + familyMap.put(kv.getFamily(), list); + return this; + } + + /** + * Add the specified KeyValue to this Put operation. Operation assumes that + * the passed KeyValue is immutable and its backing array will not be modified + * for the duration of this Put. + * @param kv individual KeyValue + * @return this + * @throws java.io.IOException e + */ + public Put add(KeyValue kv) throws IOException{ + byte [] family = kv.getFamily(); + List list = getKeyValueList(family); + //Checking that the row of the kv is the same as the put + int res = Bytes.compareTo(this.row, 0, row.length, + kv.getBuffer(), kv.getRowOffset(), kv.getRowLength()); + if(res != 0) { + throw new IOException("The row in the recently added KeyValue " + + Bytes.toStringBinary(kv.getBuffer(), kv.getRowOffset(), + kv.getRowLength()) + " doesn't match the original one " + + Bytes.toStringBinary(this.row)); + } + list.add(kv); + familyMap.put(family, list); + return this; + } + + /* + * Create a KeyValue with this objects row key and the Put identifier. + * + * @return a KeyValue with this objects row key and the Put identifier. + */ + private KeyValue createPutKeyValue(byte[] family, byte[] qualifier, long ts, + byte[] value) { + return new KeyValue(this.row, family, qualifier, ts, KeyValue.Type.Put, + value); + } + + /** + * A convenience method to determine if this object's familyMap contains + * a value assigned to the given family & qualifier. + * Both given arguments must match the KeyValue object to return true. + * + * @param family column family + * @param qualifier column qualifier + * @return returns true if the given family and qualifier already has an + * existing KeyValue object in the family map. + */ + public boolean has(byte [] family, byte [] qualifier) { + return has(family, qualifier, this.ts, new byte[0], true, true); + } + + /** + * A convenience method to determine if this object's familyMap contains + * a value assigned to the given family, qualifier and timestamp. + * All 3 given arguments must match the KeyValue object to return true. + * + * @param family column family + * @param qualifier column qualifier + * @param ts timestamp + * @return returns true if the given family, qualifier and timestamp already has an + * existing KeyValue object in the family map. + */ + public boolean has(byte [] family, byte [] qualifier, long ts) { + return has(family, qualifier, ts, new byte[0], false, true); + } + + /** + * A convenience method to determine if this object's familyMap contains + * a value assigned to the given family, qualifier and timestamp. + * All 3 given arguments must match the KeyValue object to return true. + * + * @param family column family + * @param qualifier column qualifier + * @param value value to check + * @return returns true if the given family, qualifier and value already has an + * existing KeyValue object in the family map. + */ + public boolean has(byte [] family, byte [] qualifier, byte [] value) { + return has(family, qualifier, this.ts, value, true, false); + } + + /** + * A convenience method to determine if this object's familyMap contains + * the given value assigned to the given family, qualifier and timestamp. + * All 4 given arguments must match the KeyValue object to return true. + * + * @param family column family + * @param qualifier column qualifier + * @param ts timestamp + * @param value value to check + * @return returns true if the given family, qualifier timestamp and value + * already has an existing KeyValue object in the family map. + */ + public boolean has(byte [] family, byte [] qualifier, long ts, byte [] value) { + return has(family, qualifier, ts, value, false, false); + } + + /* + * Private method to determine if this object's familyMap contains + * the given value assigned to the given family, qualifier and timestamp + * respecting the 2 boolean arguments + * + * @param family + * @param qualifier + * @param ts + * @param value + * @param ignoreTS + * @param ignoreValue + * @return returns true if the given family, qualifier timestamp and value + * already has an existing KeyValue object in the family map. + */ + private boolean has(byte[] family, byte[] qualifier, long ts, byte[] value, + boolean ignoreTS, boolean ignoreValue) { + List list = getKeyValueList(family); + if (list.size() == 0) { + return false; + } + // Boolean analysis of ignoreTS/ignoreValue. + // T T => 2 + // T F => 3 (first is always true) + // F T => 2 + // F F => 1 + if (!ignoreTS && !ignoreValue) { + for (KeyValue kv : list) { + if (Arrays.equals(kv.getFamily(), family) && + Arrays.equals(kv.getQualifier(), qualifier) && + Arrays.equals(kv.getValue(), value) && + kv.getTimestamp() == ts) { + return true; + } + } + } else if (ignoreValue && !ignoreTS) { + for (KeyValue kv : list) { + if (Arrays.equals(kv.getFamily(), family) && Arrays.equals(kv.getQualifier(), qualifier) + && kv.getTimestamp() == ts) { + return true; + } + } + } else if (!ignoreValue && ignoreTS) { + for (KeyValue kv : list) { + if (Arrays.equals(kv.getFamily(), family) && Arrays.equals(kv.getQualifier(), qualifier) + && Arrays.equals(kv.getValue(), value)) { + return true; + } + } + } else { + for (KeyValue kv : list) { + if (Arrays.equals(kv.getFamily(), family) && + Arrays.equals(kv.getQualifier(), qualifier)) { + return true; + } + } + } + return false; + } + + /** + * Returns a list of all KeyValue objects with matching column family and qualifier. + * + * @param family column family + * @param qualifier column qualifier + * @return a list of KeyValue objects with the matching family and qualifier, + * returns an empty list if one doesnt exist for the given family. + */ + public List get(byte[] family, byte[] qualifier) { + List filteredList = new ArrayList(); + for (KeyValue kv: getKeyValueList(family)) { + if (Arrays.equals(kv.getQualifier(), qualifier)) { + filteredList.add(kv); + } + } + return filteredList; + } + + /** + * Creates an empty list if one doesnt exist for the given column family + * or else it returns the associated list of KeyValue objects. + * + * @param family column family + * @return a list of KeyValue objects, returns an empty list if one doesnt exist. + */ + private List getKeyValueList(byte[] family) { + List list = familyMap.get(family); + if(list == null) { + list = new ArrayList(0); + } + return list; + } + + //HeapSize + public long heapSize() { + long heapsize = OVERHEAD; + //Adding row + heapsize += ClassSize.align(ClassSize.ARRAY + this.row.length); + + //Adding map overhead + heapsize += + ClassSize.align(this.familyMap.size() * ClassSize.MAP_ENTRY); + for(Map.Entry> entry : this.familyMap.entrySet()) { + //Adding key overhead + heapsize += + ClassSize.align(ClassSize.ARRAY + entry.getKey().length); + + //This part is kinds tricky since the JVM can reuse references if you + //store the same value, but have a good match with SizeOf at the moment + //Adding value overhead + heapsize += ClassSize.align(ClassSize.ARRAYLIST); + int size = entry.getValue().size(); + heapsize += ClassSize.align(ClassSize.ARRAY + + size * ClassSize.REFERENCE); + + for(KeyValue kv : entry.getValue()) { + heapsize += kv.heapSize(); + } + } + heapsize += getAttributeSize(); + + return ClassSize.align((int)heapsize); + } +} diff --git hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionOfflineException.java hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionOfflineException.java new file mode 100644 index 0000000..65a5088 --- /dev/null +++ hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionOfflineException.java @@ -0,0 +1,39 @@ +/** + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.client; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; +import org.apache.hadoop.hbase.RegionException; + +/** Thrown when a table can not be located */ +@InterfaceAudience.Public +@InterfaceStability.Stable +public class RegionOfflineException extends RegionException { + private static final long serialVersionUID = 466008402L; + /** default constructor */ + public RegionOfflineException() { + super(); + } + + /** @param s message */ + public RegionOfflineException(String s) { + super(s); + } +} diff --git hbase-client/src/main/java/org/apache/hadoop/hbase/client/Result.java hbase-client/src/main/java/org/apache/hadoop/hbase/client/Result.java new file mode 100644 index 0000000..9e15bbb --- /dev/null +++ hbase-client/src/main/java/org/apache/hadoop/hbase/client/Result.java @@ -0,0 +1,709 @@ +/* + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.client; + +import java.nio.BufferOverflowException; +import java.nio.ByteBuffer; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Comparator; +import java.util.List; +import java.util.Map; +import java.util.NavigableMap; +import java.util.TreeMap; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; +import org.apache.hadoop.hbase.KeyValue; +import org.apache.hadoop.hbase.KeyValue.SplitKeyValue; +import org.apache.hadoop.hbase.util.Bytes; + +/** + * Single row result of a {@link Get} or {@link Scan} query.

+ * + * This class is NOT THREAD SAFE.

+ * + * Convenience methods are available that return various {@link Map} + * structures and values directly.

+ * + * To get a complete mapping of all cells in the Result, which can include + * multiple families and multiple versions, use {@link #getMap()}.

+ * + * To get a mapping of each family to its columns (qualifiers and values), + * including only the latest version of each, use {@link #getNoVersionMap()}. + * + * To get a mapping of qualifiers to latest values for an individual family use + * {@link #getFamilyMap(byte[])}.

+ * + * To get the latest value for a specific family and qualifier use {@link #getValue(byte[], byte[])}. + * + * A Result is backed by an array of {@link KeyValue} objects, each representing + * an HBase cell defined by the row, family, qualifier, timestamp, and value.

+ * + * The underlying {@link KeyValue} objects can be accessed through the method {@link #list()}. + * Each KeyValue can then be accessed through + * {@link KeyValue#getRow()}, {@link KeyValue#getFamily()}, {@link KeyValue#getQualifier()}, + * {@link KeyValue#getTimestamp()}, and {@link KeyValue#getValue()}.

+ * + * If you need to overwrite a Result with another Result instance -- as in the old 'mapred' RecordReader next + * invocations -- then create an empty Result with the null constructor and in then use {@link #copyFrom(Result)} + */ +@InterfaceAudience.Public +@InterfaceStability.Stable +public class Result { + private KeyValue [] kvs; + // We're not using java serialization. Transient here is just a marker to say + // that this is where we cache row if we're ever asked for it. + private transient byte [] row = null; + // Ditto for familyMap. It can be composed on fly from passed in kvs. + private transient NavigableMap>> familyMap = null; + + // never use directly + private static byte [] buffer = null; + private static final int PAD_WIDTH = 128; + + /** + * Creates an empty Result w/ no KeyValue payload; returns null if you call {@link #raw()}. + * Use this to represent no results if null won't do or in old 'mapred' as oppposed to 'mapreduce' package + * MapReduce where you need to overwrite a Result + * instance with a {@link #copyFrom(Result)} call. + */ + public Result() { + super(); + } + + /** + * Instantiate a Result with the specified array of KeyValues. + *
Note: You must ensure that the keyvalues + * are already sorted + * @param kvs array of KeyValues + */ + public Result(KeyValue [] kvs) { + this.kvs = kvs; + } + + /** + * Instantiate a Result with the specified List of KeyValues. + *
Note: You must ensure that the keyvalues + * are already sorted + * @param kvs List of KeyValues + */ + public Result(List kvs) { + this(kvs.toArray(new KeyValue[kvs.size()])); + } + + /** + * Method for retrieving the row key that corresponds to + * the row from which this Result was created. + * @return row + */ + public byte [] getRow() { + if (this.row == null) { + this.row = this.kvs == null || this.kvs.length == 0? null: this.kvs[0].getRow(); + } + return this.row; + } + + /** + * Return the array of KeyValues backing this Result instance. + * + * The array is sorted from smallest -> largest using the + * {@link KeyValue#COMPARATOR}. + * + * The array only contains what your Get or Scan specifies and no more. + * For example if you request column "A" 1 version you will have at most 1 + * KeyValue in the array. If you request column "A" with 2 version you will + * have at most 2 KeyValues, with the first one being the newer timestamp and + * the second being the older timestamp (this is the sort order defined by + * {@link KeyValue#COMPARATOR}). If columns don't exist, they won't be + * present in the result. Therefore if you ask for 1 version all columns, + * it is safe to iterate over this array and expect to see 1 KeyValue for + * each column and no more. + * + * This API is faster than using getFamilyMap() and getMap() + * + * @return array of KeyValues; can be null if nothing in the result + */ + public KeyValue[] raw() { + return kvs; + } + + /** + * Create a sorted list of the KeyValue's in this result. + * + * Since HBase 0.20.5 this is equivalent to raw(). + * + * @return The sorted list of KeyValue's. + */ + public List list() { + return isEmpty()? null: Arrays.asList(raw()); + } + + /** + * Return the KeyValues for the specific column. The KeyValues are sorted in + * the {@link KeyValue#COMPARATOR} order. That implies the first entry in + * the list is the most recent column. If the query (Scan or Get) only + * requested 1 version the list will contain at most 1 entry. If the column + * did not exist in the result set (either the column does not exist + * or the column was not selected in the query) the list will be empty. + * + * Also see getColumnLatest which returns just a KeyValue + * + * @param family the family + * @param qualifier + * @return a list of KeyValues for this column or empty list if the column + * did not exist in the result set + */ + public List getColumn(byte [] family, byte [] qualifier) { + List result = new ArrayList(); + + KeyValue [] kvs = raw(); + + if (kvs == null || kvs.length == 0) { + return result; + } + int pos = binarySearch(kvs, family, qualifier); + if (pos == -1) { + return result; // cant find it + } + + for (int i = pos ; i < kvs.length ; i++ ) { + KeyValue kv = kvs[i]; + if (kv.matchingColumn(family,qualifier)) { + result.add(kv); + } else { + break; + } + } + + return result; + } + + protected int binarySearch(final KeyValue [] kvs, + final byte [] family, + final byte [] qualifier) { + KeyValue searchTerm = + KeyValue.createFirstOnRow(kvs[0].getRow(), + family, qualifier); + + // pos === ( -(insertion point) - 1) + int pos = Arrays.binarySearch(kvs, searchTerm, KeyValue.COMPARATOR); + // never will exact match + if (pos < 0) { + pos = (pos+1) * -1; + // pos is now insertion point + } + if (pos == kvs.length) { + return -1; // doesn't exist + } + return pos; + } + + /** + * Searches for the latest value for the specified column. + * + * @param kvs the array to search + * @param family family name + * @param foffset family offset + * @param flength family length + * @param qualifier column qualifier + * @param qoffset qualifier offset + * @param qlength qualifier length + * + * @return the index where the value was found, or -1 otherwise + */ + protected int binarySearch(final KeyValue [] kvs, + final byte [] family, final int foffset, final int flength, + final byte [] qualifier, final int qoffset, final int qlength) { + + double keyValueSize = (double) + KeyValue.getKeyValueDataStructureSize(kvs[0].getRowLength(), flength, qlength, 0); + + if (buffer == null || keyValueSize > buffer.length) { + // pad to the smallest multiple of the pad width + buffer = new byte[(int) Math.ceil(keyValueSize / PAD_WIDTH) * PAD_WIDTH]; + } + + KeyValue searchTerm = KeyValue.createFirstOnRow(buffer, 0, + kvs[0].getBuffer(), kvs[0].getRowOffset(), kvs[0].getRowLength(), + family, foffset, flength, + qualifier, qoffset, qlength); + + // pos === ( -(insertion point) - 1) + int pos = Arrays.binarySearch(kvs, searchTerm, KeyValue.COMPARATOR); + // never will exact match + if (pos < 0) { + pos = (pos+1) * -1; + // pos is now insertion point + } + if (pos == kvs.length) { + return -1; // doesn't exist + } + return pos; + } + + /** + * The KeyValue for the most recent timestamp for a given column. + * + * @param family + * @param qualifier + * + * @return the KeyValue for the column, or null if no value exists in the row or none have been + * selected in the query (Get/Scan) + */ + public KeyValue getColumnLatest(byte [] family, byte [] qualifier) { + KeyValue [] kvs = raw(); // side effect possibly. + if (kvs == null || kvs.length == 0) { + return null; + } + int pos = binarySearch(kvs, family, qualifier); + if (pos == -1) { + return null; + } + KeyValue kv = kvs[pos]; + if (kv.matchingColumn(family, qualifier)) { + return kv; + } + return null; + } + + /** + * The KeyValue for the most recent timestamp for a given column. + * + * @param family family name + * @param foffset family offset + * @param flength family length + * @param qualifier column qualifier + * @param qoffset qualifier offset + * @param qlength qualifier length + * + * @return the KeyValue for the column, or null if no value exists in the row or none have been + * selected in the query (Get/Scan) + */ + public KeyValue getColumnLatest(byte [] family, int foffset, int flength, + byte [] qualifier, int qoffset, int qlength) { + + KeyValue [] kvs = raw(); // side effect possibly. + if (kvs == null || kvs.length == 0) { + return null; + } + int pos = binarySearch(kvs, family, foffset, flength, qualifier, qoffset, qlength); + if (pos == -1) { + return null; + } + KeyValue kv = kvs[pos]; + if (kv.matchingColumn(family, foffset, flength, qualifier, qoffset, qlength)) { + return kv; + } + return null; + } + + /** + * Get the latest version of the specified column. + * @param family family name + * @param qualifier column qualifier + * @return value of latest version of column, null if none found + */ + public byte[] getValue(byte [] family, byte [] qualifier) { + KeyValue kv = getColumnLatest(family, qualifier); + if (kv == null) { + return null; + } + return kv.getValue(); + } + + /** + * Returns the value wrapped in a new ByteBuffer. + * + * @param family family name + * @param qualifier column qualifier + * + * @return the latest version of the column, or null if none found + */ + public ByteBuffer getValueAsByteBuffer(byte [] family, byte [] qualifier) { + + KeyValue kv = getColumnLatest(family, 0, family.length, qualifier, 0, qualifier.length); + + if (kv == null) { + return null; + } + return kv.getValueAsByteBuffer(); + } + + /** + * Returns the value wrapped in a new ByteBuffer. + * + * @param family family name + * @param foffset family offset + * @param flength family length + * @param qualifier column qualifier + * @param qoffset qualifier offset + * @param qlength qualifier length + * + * @return the latest version of the column, or null if none found + */ + public ByteBuffer getValueAsByteBuffer(byte [] family, int foffset, int flength, + byte [] qualifier, int qoffset, int qlength) { + + KeyValue kv = getColumnLatest(family, foffset, flength, qualifier, qoffset, qlength); + + if (kv == null) { + return null; + } + return kv.getValueAsByteBuffer(); + } + + /** + * Loads the latest version of the specified column into the provided ByteBuffer. + *

+ * Does not clear or flip the buffer. + * + * @param family family name + * @param qualifier column qualifier + * @param dst the buffer where to write the value + * + * @return true if a value was found, false otherwise + * + * @throws BufferOverflowException there is insufficient space remaining in the buffer + */ + public boolean loadValue(byte [] family, byte [] qualifier, ByteBuffer dst) + throws BufferOverflowException { + return loadValue(family, 0, family.length, qualifier, 0, qualifier.length, dst); + } + + /** + * Loads the latest version of the specified column into the provided ByteBuffer. + *

+ * Does not clear or flip the buffer. + * + * @param family family name + * @param foffset family offset + * @param flength family length + * @param qualifier column qualifier + * @param qoffset qualifier offset + * @param qlength qualifier length + * @param dst the buffer where to write the value + * + * @return true if a value was found, false otherwise + * + * @throws BufferOverflowException there is insufficient space remaining in the buffer + */ + public boolean loadValue(byte [] family, int foffset, int flength, + byte [] qualifier, int qoffset, int qlength, ByteBuffer dst) + throws BufferOverflowException { + KeyValue kv = getColumnLatest(family, foffset, flength, qualifier, qoffset, qlength); + + if (kv == null) { + return false; + } + kv.loadValue(dst); + return true; + } + + /** + * Checks if the specified column contains a non-empty value (not a zero-length byte array). + * + * @param family family name + * @param qualifier column qualifier + * + * @return whether or not a latest value exists and is not empty + */ + public boolean containsNonEmptyColumn(byte [] family, byte [] qualifier) { + + return containsNonEmptyColumn(family, 0, family.length, qualifier, 0, qualifier.length); + } + + /** + * Checks if the specified column contains a non-empty value (not a zero-length byte array). + * + * @param family family name + * @param foffset family offset + * @param flength family length + * @param qualifier column qualifier + * @param qoffset qualifier offset + * @param qlength qualifier length + * + * @return whether or not a latest value exists and is not empty + */ + public boolean containsNonEmptyColumn(byte [] family, int foffset, int flength, + byte [] qualifier, int qoffset, int qlength) { + + KeyValue kv = getColumnLatest(family, foffset, flength, qualifier, qoffset, qlength); + + return (kv != null) && (kv.getValueLength() > 0); + } + + /** + * Checks if the specified column contains an empty value (a zero-length byte array). + * + * @param family family name + * @param qualifier column qualifier + * + * @return whether or not a latest value exists and is empty + */ + public boolean containsEmptyColumn(byte [] family, byte [] qualifier) { + + return containsEmptyColumn(family, 0, family.length, qualifier, 0, qualifier.length); + } + + /** + * Checks if the specified column contains an empty value (a zero-length byte array). + * + * @param family family name + * @param foffset family offset + * @param flength family length + * @param qualifier column qualifier + * @param qoffset qualifier offset + * @param qlength qualifier length + * + * @return whether or not a latest value exists and is empty + */ + public boolean containsEmptyColumn(byte [] family, int foffset, int flength, + byte [] qualifier, int qoffset, int qlength) { + KeyValue kv = getColumnLatest(family, foffset, flength, qualifier, qoffset, qlength); + + return (kv != null) && (kv.getValueLength() == 0); + } + + /** + * Checks for existence of a value for the specified column (empty or not). + * + * @param family family name + * @param qualifier column qualifier + * + * @return true if at least one value exists in the result, false if not + */ + public boolean containsColumn(byte [] family, byte [] qualifier) { + KeyValue kv = getColumnLatest(family, qualifier); + return kv != null; + } + + /** + * Checks for existence of a value for the specified column (empty or not). + * + * @param family family name + * @param foffset family offset + * @param flength family length + * @param qualifier column qualifier + * @param qoffset qualifier offset + * @param qlength qualifier length + * + * @return true if at least one value exists in the result, false if not + */ + public boolean containsColumn(byte [] family, int foffset, int flength, + byte [] qualifier, int qoffset, int qlength) { + + return getColumnLatest(family, foffset, flength, qualifier, qoffset, qlength) != null; + } + + /** + * Map of families to all versions of its qualifiers and values. + *

+ * Returns a three level Map of the form: + * Map&family,Map<qualifier,Map<timestamp,value>>> + *

+ * Note: All other map returning methods make use of this map internally. + * @return map from families to qualifiers to versions + */ + public NavigableMap>> getMap() { + if (this.familyMap != null) { + return this.familyMap; + } + if(isEmpty()) { + return null; + } + this.familyMap = new TreeMap>>(Bytes.BYTES_COMPARATOR); + for(KeyValue kv : this.kvs) { + SplitKeyValue splitKV = kv.split(); + byte [] family = splitKV.getFamily(); + NavigableMap> columnMap = + familyMap.get(family); + if(columnMap == null) { + columnMap = new TreeMap> + (Bytes.BYTES_COMPARATOR); + familyMap.put(family, columnMap); + } + byte [] qualifier = splitKV.getQualifier(); + NavigableMap versionMap = columnMap.get(qualifier); + if(versionMap == null) { + versionMap = new TreeMap(new Comparator() { + public int compare(Long l1, Long l2) { + return l2.compareTo(l1); + } + }); + columnMap.put(qualifier, versionMap); + } + Long timestamp = Bytes.toLong(splitKV.getTimestamp()); + byte [] value = splitKV.getValue(); + versionMap.put(timestamp, value); + } + return this.familyMap; + } + + /** + * Map of families to their most recent qualifiers and values. + *

+ * Returns a two level Map of the form: Map&family,Map<qualifier,value>> + *

+ * The most recent version of each qualifier will be used. + * @return map from families to qualifiers and value + */ + public NavigableMap> getNoVersionMap() { + if(this.familyMap == null) { + getMap(); + } + if(isEmpty()) { + return null; + } + NavigableMap> returnMap = + new TreeMap>(Bytes.BYTES_COMPARATOR); + for(Map.Entry>> + familyEntry : familyMap.entrySet()) { + NavigableMap qualifierMap = + new TreeMap(Bytes.BYTES_COMPARATOR); + for(Map.Entry> qualifierEntry : + familyEntry.getValue().entrySet()) { + byte [] value = + qualifierEntry.getValue().get(qualifierEntry.getValue().firstKey()); + qualifierMap.put(qualifierEntry.getKey(), value); + } + returnMap.put(familyEntry.getKey(), qualifierMap); + } + return returnMap; + } + + /** + * Map of qualifiers to values. + *

+ * Returns a Map of the form: Map<qualifier,value> + * @param family column family to get + * @return map of qualifiers to values + */ + public NavigableMap getFamilyMap(byte [] family) { + if(this.familyMap == null) { + getMap(); + } + if(isEmpty()) { + return null; + } + NavigableMap returnMap = + new TreeMap(Bytes.BYTES_COMPARATOR); + NavigableMap> qualifierMap = + familyMap.get(family); + if(qualifierMap == null) { + return returnMap; + } + for(Map.Entry> entry : + qualifierMap.entrySet()) { + byte [] value = + entry.getValue().get(entry.getValue().firstKey()); + returnMap.put(entry.getKey(), value); + } + return returnMap; + } + + /** + * Returns the value of the first column in the Result. + * @return value of the first column + */ + public byte [] value() { + if (isEmpty()) { + return null; + } + return kvs[0].getValue(); + } + + /** + * Check if the underlying KeyValue [] is empty or not + * @return true if empty + */ + public boolean isEmpty() { + return this.kvs == null || this.kvs.length == 0; + } + + /** + * @return the size of the underlying KeyValue [] + */ + public int size() { + return this.kvs == null? 0: this.kvs.length; + } + + /** + * @return String + */ + @Override + public String toString() { + StringBuilder sb = new StringBuilder(); + sb.append("keyvalues="); + if(isEmpty()) { + sb.append("NONE"); + return sb.toString(); + } + sb.append("{"); + boolean moreThanOne = false; + for(KeyValue kv : this.kvs) { + if(moreThanOne) { + sb.append(", "); + } else { + moreThanOne = true; + } + sb.append(kv.toString()); + } + sb.append("}"); + return sb.toString(); + } + + /** + * Does a deep comparison of two Results, down to the byte arrays. + * @param res1 first result to compare + * @param res2 second result to compare + * @throws Exception Every difference is throwing an exception + */ + public static void compareResults(Result res1, Result res2) + throws Exception { + if (res2 == null) { + throw new Exception("There wasn't enough rows, we stopped at " + + Bytes.toStringBinary(res1.getRow())); + } + if (res1.size() != res2.size()) { + throw new Exception("This row doesn't have the same number of KVs: " + + res1.toString() + " compared to " + res2.toString()); + } + KeyValue[] ourKVs = res1.raw(); + KeyValue[] replicatedKVs = res2.raw(); + for (int i = 0; i < res1.size(); i++) { + if (!ourKVs[i].equals(replicatedKVs[i]) || + !Bytes.equals(ourKVs[i].getValue(), replicatedKVs[i].getValue())) { + throw new Exception("This result was different: " + + res1.toString() + " compared to " + res2.toString()); + } + } + } + + /** + * Copy another Result into this one. Needed for the old Mapred framework + * @param other + */ + public void copyFrom(Result other) { + this.row = null; + this.familyMap = null; + this.kvs = other.kvs; + } +} \ No newline at end of file diff --git hbase-client/src/main/java/org/apache/hadoop/hbase/client/ResultScanner.java hbase-client/src/main/java/org/apache/hadoop/hbase/client/ResultScanner.java new file mode 100644 index 0000000..ef72543 --- /dev/null +++ hbase-client/src/main/java/org/apache/hadoop/hbase/client/ResultScanner.java @@ -0,0 +1,54 @@ +/** + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.client; + +import java.io.Closeable; +import java.io.IOException; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; + +/** + * Interface for client-side scanning. + * Go to {@link HTable} to obtain instances. + */ +@InterfaceAudience.Public +@InterfaceStability.Stable +public interface ResultScanner extends Closeable, Iterable { + + /** + * Grab the next row's worth of values. The scanner will return a Result. + * @return Result object if there is another row, null if the scanner is + * exhausted. + * @throws IOException e + */ + public Result next() throws IOException; + + /** + * @param nbRows number of rows to return + * @return Between zero and nbRows Results + * @throws IOException e + */ + public Result [] next(int nbRows) throws IOException; + + /** + * Closes the scanner and releases any resources it has allocated + */ + public void close(); +} diff --git hbase-client/src/main/java/org/apache/hadoop/hbase/client/RetriesExhaustedException.java hbase-client/src/main/java/org/apache/hadoop/hbase/client/RetriesExhaustedException.java new file mode 100644 index 0000000..d0b98f5 --- /dev/null +++ hbase-client/src/main/java/org/apache/hadoop/hbase/client/RetriesExhaustedException.java @@ -0,0 +1,109 @@ +/** + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.client; + +import java.io.IOException; +import java.util.Date; +import java.util.List; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; + +/** + * Exception thrown by HTable methods when an attempt to do something (like + * commit changes) fails after a bunch of retries. + */ +@InterfaceAudience.Public +@InterfaceStability.Stable +public class RetriesExhaustedException extends IOException { + private static final long serialVersionUID = 1876775844L; + + public RetriesExhaustedException(final String msg) { + super(msg); + } + + public RetriesExhaustedException(final String msg, final IOException e) { + super(msg, e); + } + + /** + * Datastructure that allows adding more info around Throwable incident. + */ + public static class ThrowableWithExtraContext { + private final Throwable t; + private final long when; + private final String extras; + + public ThrowableWithExtraContext(final Throwable t, final long when, + final String extras) { + this.t = t; + this.when = when; + this.extras = extras; + } + + @Override + public String toString() { + return new Date(this.when).toString() + ", " + extras + ", " + t.toString(); + } + } + + /** + * Create a new RetriesExhaustedException from the list of prior failures. + * @param callableVitals Details from the {@link ServerCallable} we were using + * when we got this exception. + * @param numTries The number of tries we made + * @param exceptions List of exceptions that failed before giving up + */ + public RetriesExhaustedException(final String callableVitals, int numTries, + List exceptions) { + super(getMessage(callableVitals, numTries, exceptions)); + } + + /** + * Create a new RetriesExhaustedException from the list of prior failures. + * @param numTries + * @param exceptions List of exceptions that failed before giving up + */ + public RetriesExhaustedException(final int numTries, + final List exceptions) { + super(getMessage(numTries, exceptions)); + } + + private static String getMessage(String callableVitals, int numTries, + List exceptions) { + StringBuilder buffer = new StringBuilder("Failed contacting "); + buffer.append(callableVitals); + buffer.append(" after "); + buffer.append(numTries + 1); + buffer.append(" attempts.\nExceptions:\n"); + for (Throwable t : exceptions) { + buffer.append(t.toString()); + buffer.append("\n"); + } + return buffer.toString(); + } + + private static String getMessage(final int numTries, + final List exceptions) { + StringBuilder buffer = new StringBuilder("Failed after attempts="); + buffer.append(numTries + 1); + buffer.append(", exceptions:\n"); + for (ThrowableWithExtraContext t : exceptions) { + buffer.append(t.toString()); + buffer.append("\n"); + } + return buffer.toString(); + } +} diff --git hbase-client/src/main/java/org/apache/hadoop/hbase/client/RetriesExhaustedWithDetailsException.java hbase-client/src/main/java/org/apache/hadoop/hbase/client/RetriesExhaustedWithDetailsException.java new file mode 100644 index 0000000..e7f5a4f --- /dev/null +++ hbase-client/src/main/java/org/apache/hadoop/hbase/client/RetriesExhaustedWithDetailsException.java @@ -0,0 +1,152 @@ +/* + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.client; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; +import org.apache.hadoop.hbase.DoNotRetryIOException; + +import java.util.Collection; +import java.util.HashMap; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Set; + +/** + * This subclass of {@link org.apache.hadoop.hbase.client.RetriesExhaustedException} + * is thrown when we have more information about which rows were causing which + * exceptions on what servers. You can call {@link #mayHaveClusterIssues()} + * and if the result is false, you have input error problems, otherwise you + * may have cluster issues. You can iterate over the causes, rows and last + * known server addresses via {@link #getNumExceptions()} and + * {@link #getCause(int)}, {@link #getRow(int)} and {@link #getHostnamePort(int)}. + */ +@SuppressWarnings("serial") +@InterfaceAudience.Public +@InterfaceStability.Stable +public class RetriesExhaustedWithDetailsException +extends RetriesExhaustedException { + List exceptions; + List actions; + List hostnameAndPort; + + public RetriesExhaustedWithDetailsException(List exceptions, + List actions, + List hostnameAndPort) { + super("Failed " + exceptions.size() + " action" + + pluralize(exceptions) + ": " + + getDesc(exceptions, actions, hostnameAndPort)); + + this.exceptions = exceptions; + this.actions = actions; + this.hostnameAndPort = hostnameAndPort; + } + + public List getCauses() { + return exceptions; + } + + public int getNumExceptions() { + return exceptions.size(); + } + + public Throwable getCause(int i) { + return exceptions.get(i); + } + + public Row getRow(int i) { + return actions.get(i); + } + + public String getHostnamePort(final int i) { + return this.hostnameAndPort.get(i); + } + + public boolean mayHaveClusterIssues() { + boolean res = false; + + // If all of the exceptions are DNRIOE not exception + for (Throwable t : exceptions) { + if ( !(t instanceof DoNotRetryIOException)) { + res = true; + } + } + return res; + } + + + public static String pluralize(Collection c) { + return pluralize(c.size()); + } + + public static String pluralize(int c) { + return c > 1 ? "s" : ""; + } + + public static String getDesc(List exceptions, + List actions, + List hostnamePort) { + String s = getDesc(classifyExs(exceptions)); + StringBuilder addrs = new StringBuilder(s); + addrs.append("servers with issues: "); + Set uniqAddr = new HashSet(); + uniqAddr.addAll(hostnamePort); + + for(String addr : uniqAddr) { + addrs.append(addr).append(", "); + } + return s; + } + + public static Map classifyExs(List ths) { + Map cls = new HashMap(); + for (Throwable t : ths) { + if (t == null) continue; + String name = ""; + if (t instanceof DoNotRetryIOException) { + name = t.getMessage(); + } else { + name = t.getClass().getSimpleName(); + } + Integer i = cls.get(name); + if (i == null) { + i = 0; + } + i += 1; + cls.put(name, i); + } + return cls; + } + + public static String getDesc(Map classificaton) { + StringBuilder classificatons =new StringBuilder(11); + for (Map.Entry e : classificaton.entrySet()) { + classificatons.append(e.getKey()); + classificatons.append(": "); + classificatons.append(e.getValue()); + classificatons.append(" time"); + classificatons.append(pluralize(e.getValue())); + classificatons.append(", "); + } + return classificatons.toString(); + } + +} diff --git hbase-client/src/main/java/org/apache/hadoop/hbase/client/Row.java hbase-client/src/main/java/org/apache/hadoop/hbase/client/Row.java new file mode 100644 index 0000000..63dca08 --- /dev/null +++ hbase-client/src/main/java/org/apache/hadoop/hbase/client/Row.java @@ -0,0 +1,34 @@ +/* + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.client; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; + +/** + * Has a row. + */ +@InterfaceAudience.Public +@InterfaceStability.Stable +public interface Row extends Comparable { + /** + * @return The row. + */ + public byte [] getRow(); +} diff --git hbase-client/src/main/java/org/apache/hadoop/hbase/client/RowLock.java hbase-client/src/main/java/org/apache/hadoop/hbase/client/RowLock.java new file mode 100644 index 0000000..6736877 --- /dev/null +++ hbase-client/src/main/java/org/apache/hadoop/hbase/client/RowLock.java @@ -0,0 +1,66 @@ +/** + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.client; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; + +/** + * Holds row name and lock id. + */ +@InterfaceAudience.Public +@InterfaceStability.Stable +public class RowLock { + private byte [] row = null; + private long lockId = -1L; + + /** + * Creates a RowLock from a row and lock id + * @param row row to lock on + * @param lockId the lock id + */ + public RowLock(final byte [] row, final long lockId) { + this.row = row; + this.lockId = lockId; + } + + /** + * Creates a RowLock with only a lock id + * @param lockId lock id + */ + public RowLock(final long lockId) { + this.lockId = lockId; + } + + /** + * Get the row for this RowLock + * @return the row + */ + public byte [] getRow() { + return row; + } + + /** + * Get the lock id from this RowLock + * @return the lock id + */ + public long getLockId() { + return lockId; + } +} diff --git hbase-client/src/main/java/org/apache/hadoop/hbase/client/RowMutations.java hbase-client/src/main/java/org/apache/hadoop/hbase/client/RowMutations.java new file mode 100644 index 0000000..8a6e5a7 --- /dev/null +++ hbase-client/src/main/java/org/apache/hadoop/hbase/client/RowMutations.java @@ -0,0 +1,103 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.client; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; +import java.util.List; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; +import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.util.Bytes; + +/** + * Performs multiple mutations atomically on a single row. + * Currently {@link Put} and {@link Delete} are supported. + * + * The mutations are performed in the order in which they + * were added. + */ +@InterfaceAudience.Public +@InterfaceStability.Evolving +public class RowMutations implements Row { + private List mutations = new ArrayList(); + private byte [] row; + private static final byte VERSION = (byte)0; + + /** Constructor for Writable. DO NOT USE */ + public RowMutations() {} + + /** + * Create an atomic mutation for the specified row. + * @param row row key + */ + public RowMutations(byte [] row) { + if(row == null || row.length > HConstants.MAX_ROW_LENGTH) { + throw new IllegalArgumentException("Row key is invalid"); + } + this.row = Arrays.copyOf(row, row.length); + } + + /** + * Add a {@link Put} operation to the list of mutations + * @param p The {@link Put} to add + * @throws IOException + */ + public void add(Put p) throws IOException { + internalAdd(p); + } + + /** + * Add a {@link Delete} operation to the list of mutations + * @param d The {@link Delete} to add + * @throws IOException + */ + public void add(Delete d) throws IOException { + internalAdd(d); + } + + private void internalAdd(Mutation m) throws IOException { + int res = Bytes.compareTo(this.row, m.getRow()); + if(res != 0) { + throw new IOException("The row in the recently added Put/Delete " + + Bytes.toStringBinary(m.getRow()) + " doesn't match the original one " + + Bytes.toStringBinary(this.row)); + } + mutations.add(m); + } + + @Override + public int compareTo(Row i) { + return Bytes.compareTo(this.getRow(), i.getRow()); + } + + @Override + public byte[] getRow() { + return row; + } + + /** + * @return An unmodifiable list of the current mutations. + */ + public List getMutations() { + return Collections.unmodifiableList(mutations); + } +} diff --git hbase-client/src/main/java/org/apache/hadoop/hbase/client/Scan.java hbase-client/src/main/java/org/apache/hadoop/hbase/client/Scan.java new file mode 100644 index 0000000..23bbf18 --- /dev/null +++ hbase-client/src/main/java/org/apache/hadoop/hbase/client/Scan.java @@ -0,0 +1,652 @@ +/* + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.client; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; +import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.filter.Filter; +import org.apache.hadoop.hbase.filter.IncompatibleFilterException; +import org.apache.hadoop.hbase.io.TimeRange; +import org.apache.hadoop.hbase.util.Bytes; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.NavigableSet; +import java.util.TreeMap; +import java.util.TreeSet; + +/** + * Used to perform Scan operations. + *

+ * All operations are identical to {@link Get} with the exception of + * instantiation. Rather than specifying a single row, an optional startRow + * and stopRow may be defined. If rows are not specified, the Scanner will + * iterate over all rows. + *

+ * To scan everything for each row, instantiate a Scan object. + *

+ * To modify scanner caching for just this scan, use {@link #setCaching(int) setCaching}. + * If caching is NOT set, we will use the caching value of the hosting {@link HTable}. See + * {@link HTable#setScannerCaching(int)}. In addition to row caching, it is possible to specify a + * maximum result size, using {@link #setMaxResultSize(long)}. When both are used, + * single server requests are limited by either number of rows or maximum result size, whichever + * limit comes first. + *

+ * To further define the scope of what to get when scanning, perform additional + * methods as outlined below. + *

+ * To get all columns from specific families, execute {@link #addFamily(byte[]) addFamily} + * for each family to retrieve. + *

+ * To get specific columns, execute {@link #addColumn(byte[], byte[]) addColumn} + * for each column to retrieve. + *

+ * To only retrieve columns within a specific range of version timestamps, + * execute {@link #setTimeRange(long, long) setTimeRange}. + *

+ * To only retrieve columns with a specific timestamp, execute + * {@link #setTimeStamp(long) setTimestamp}. + *

+ * To limit the number of versions of each column to be returned, execute + * {@link #setMaxVersions(int) setMaxVersions}. + *

+ * To limit the maximum number of values returned for each call to next(), + * execute {@link #setBatch(int) setBatch}. + *

+ * To add a filter, execute {@link #setFilter(org.apache.hadoop.hbase.filter.Filter) setFilter}. + *

+ * Expert: To explicitly disable server-side block caching for this scan, + * execute {@link #setCacheBlocks(boolean)}. + */ +@InterfaceAudience.Public +@InterfaceStability.Stable +public class Scan extends OperationWithAttributes { + private static final String RAW_ATTR = "_raw_"; + private static final String ISOLATION_LEVEL = "_isolationlevel_"; + + private byte [] startRow = HConstants.EMPTY_START_ROW; + private byte [] stopRow = HConstants.EMPTY_END_ROW; + private int maxVersions = 1; + private int batch = -1; + + private int storeLimit = -1; + private int storeOffset = 0; + + // If application wants to collect scan metrics, it needs to + // call scan.setAttribute(SCAN_ATTRIBUTES_ENABLE, Bytes.toBytes(Boolean.TRUE)) + static public String SCAN_ATTRIBUTES_METRICS_ENABLE = + "scan.attributes.metrics.enable"; + static public String SCAN_ATTRIBUTES_METRICS_DATA = + "scan.attributes.metrics.data"; + + /* + * -1 means no caching + */ + private int caching = -1; + private long maxResultSize = -1; + private boolean cacheBlocks = true; + private Filter filter = null; + private TimeRange tr = new TimeRange(); + private Map> familyMap = + new TreeMap>(Bytes.BYTES_COMPARATOR); + + /** + * Create a Scan operation across all rows. + */ + public Scan() {} + + public Scan(byte [] startRow, Filter filter) { + this(startRow); + this.filter = filter; + } + + /** + * Create a Scan operation starting at the specified row. + *

+ * If the specified row does not exist, the Scanner will start from the + * next closest row after the specified row. + * @param startRow row to start scanner at or after + */ + public Scan(byte [] startRow) { + this.startRow = startRow; + } + + /** + * Create a Scan operation for the range of rows specified. + * @param startRow row to start scanner at or after (inclusive) + * @param stopRow row to stop scanner before (exclusive) + */ + public Scan(byte [] startRow, byte [] stopRow) { + this.startRow = startRow; + this.stopRow = stopRow; + } + + /** + * Creates a new instance of this class while copying all values. + * + * @param scan The scan instance to copy from. + * @throws IOException When copying the values fails. + */ + public Scan(Scan scan) throws IOException { + startRow = scan.getStartRow(); + stopRow = scan.getStopRow(); + maxVersions = scan.getMaxVersions(); + batch = scan.getBatch(); + storeLimit = scan.getMaxResultsPerColumnFamily(); + storeOffset = scan.getRowOffsetPerColumnFamily(); + caching = scan.getCaching(); + maxResultSize = scan.getMaxResultSize(); + cacheBlocks = scan.getCacheBlocks(); + filter = scan.getFilter(); // clone? + TimeRange ctr = scan.getTimeRange(); + tr = new TimeRange(ctr.getMin(), ctr.getMax()); + Map> fams = scan.getFamilyMap(); + for (Map.Entry> entry : fams.entrySet()) { + byte [] fam = entry.getKey(); + NavigableSet cols = entry.getValue(); + if (cols != null && cols.size() > 0) { + for (byte[] col : cols) { + addColumn(fam, col); + } + } else { + addFamily(fam); + } + } + for (Map.Entry attr : scan.getAttributesMap().entrySet()) { + setAttribute(attr.getKey(), attr.getValue()); + } + } + + /** + * Builds a scan object with the same specs as get. + * @param get get to model scan after + */ + public Scan(Get get) { + this.startRow = get.getRow(); + this.stopRow = get.getRow(); + this.filter = get.getFilter(); + this.cacheBlocks = get.getCacheBlocks(); + this.maxVersions = get.getMaxVersions(); + this.storeLimit = get.getMaxResultsPerColumnFamily(); + this.storeOffset = get.getRowOffsetPerColumnFamily(); + this.tr = get.getTimeRange(); + this.familyMap = get.getFamilyMap(); + } + + public boolean isGetScan() { + return this.startRow != null && this.startRow.length > 0 && + Bytes.equals(this.startRow, this.stopRow); + } + + /** + * Get all columns from the specified family. + *

+ * Overrides previous calls to addColumn for this family. + * @param family family name + * @return this + */ + public Scan addFamily(byte [] family) { + familyMap.remove(family); + familyMap.put(family, null); + return this; + } + + /** + * Get the column from the specified family with the specified qualifier. + *

+ * Overrides previous calls to addFamily for this family. + * @param family family name + * @param qualifier column qualifier + * @return this + */ + public Scan addColumn(byte [] family, byte [] qualifier) { + NavigableSet set = familyMap.get(family); + if(set == null) { + set = new TreeSet(Bytes.BYTES_COMPARATOR); + } + if (qualifier == null) { + qualifier = HConstants.EMPTY_BYTE_ARRAY; + } + set.add(qualifier); + familyMap.put(family, set); + return this; + } + + /** + * Get versions of columns only within the specified timestamp range, + * [minStamp, maxStamp). Note, default maximum versions to return is 1. If + * your time range spans more than one version and you want all versions + * returned, up the number of versions beyond the defaut. + * @param minStamp minimum timestamp value, inclusive + * @param maxStamp maximum timestamp value, exclusive + * @throws IOException if invalid time range + * @see #setMaxVersions() + * @see #setMaxVersions(int) + * @return this + */ + public Scan setTimeRange(long minStamp, long maxStamp) + throws IOException { + tr = new TimeRange(minStamp, maxStamp); + return this; + } + + /** + * Get versions of columns with the specified timestamp. Note, default maximum + * versions to return is 1. If your time range spans more than one version + * and you want all versions returned, up the number of versions beyond the + * defaut. + * @param timestamp version timestamp + * @see #setMaxVersions() + * @see #setMaxVersions(int) + * @return this + */ + public Scan setTimeStamp(long timestamp) { + try { + tr = new TimeRange(timestamp, timestamp+1); + } catch(IOException e) { + // Will never happen + } + return this; + } + + /** + * Set the start row of the scan. + * @param startRow row to start scan on (inclusive) + * Note: In order to make startRow exclusive add a trailing 0 byte + * @return this + */ + public Scan setStartRow(byte [] startRow) { + this.startRow = startRow; + return this; + } + + /** + * Set the stop row. + * @param stopRow row to end at (exclusive) + * Note: In order to make stopRow inclusive add a trailing 0 byte + * @return this + */ + public Scan setStopRow(byte [] stopRow) { + this.stopRow = stopRow; + return this; + } + + /** + * Get all available versions. + * @return this + */ + public Scan setMaxVersions() { + this.maxVersions = Integer.MAX_VALUE; + return this; + } + + /** + * Get up to the specified number of versions of each column. + * @param maxVersions maximum versions for each column + * @return this + */ + public Scan setMaxVersions(int maxVersions) { + this.maxVersions = maxVersions; + return this; + } + + /** + * Set the maximum number of values to return for each call to next() + * @param batch the maximum number of values + */ + public void setBatch(int batch) { + if (this.hasFilter() && this.filter.hasFilterRow()) { + throw new IncompatibleFilterException( + "Cannot set batch on a scan using a filter" + + " that returns true for filter.hasFilterRow"); + } + this.batch = batch; + } + + /** + * Set the maximum number of values to return per row per Column Family + * @param limit the maximum number of values returned / row / CF + */ + public void setMaxResultsPerColumnFamily(int limit) { + this.storeLimit = limit; + } + + /** + * Set offset for the row per Column Family. + * @param offset is the number of kvs that will be skipped. + */ + public void setRowOffsetPerColumnFamily(int offset) { + this.storeOffset = offset; + } + + /** + * Set the number of rows for caching that will be passed to scanners. + * If not set, the default setting from {@link HTable#getScannerCaching()} will apply. + * Higher caching values will enable faster scanners but will use more memory. + * @param caching the number of rows for caching + */ + public void setCaching(int caching) { + this.caching = caching; + } + + /** + * @return the maximum result size in bytes. See {@link #setMaxResultSize(long)} + */ + public long getMaxResultSize() { + return maxResultSize; + } + + /** + * Set the maximum result size. The default is -1; this means that no specific + * maximum result size will be set for this scan, and the global configured + * value will be used instead. (Defaults to unlimited). + * + * @param maxResultSize The maximum result size in bytes. + */ + public void setMaxResultSize(long maxResultSize) { + this.maxResultSize = maxResultSize; + } + + /** + * Apply the specified server-side filter when performing the Scan. + * @param filter filter to run on the server + * @return this + */ + public Scan setFilter(Filter filter) { + this.filter = filter; + return this; + } + + /** + * Setting the familyMap + * @param familyMap map of family to qualifier + * @return this + */ + public Scan setFamilyMap(Map> familyMap) { + this.familyMap = familyMap; + return this; + } + + /** + * Getting the familyMap + * @return familyMap + */ + public Map> getFamilyMap() { + return this.familyMap; + } + + /** + * @return the number of families in familyMap + */ + public int numFamilies() { + if(hasFamilies()) { + return this.familyMap.size(); + } + return 0; + } + + /** + * @return true if familyMap is non empty, false otherwise + */ + public boolean hasFamilies() { + return !this.familyMap.isEmpty(); + } + + /** + * @return the keys of the familyMap + */ + public byte[][] getFamilies() { + if(hasFamilies()) { + return this.familyMap.keySet().toArray(new byte[0][0]); + } + return null; + } + + /** + * @return the startrow + */ + public byte [] getStartRow() { + return this.startRow; + } + + /** + * @return the stoprow + */ + public byte [] getStopRow() { + return this.stopRow; + } + + /** + * @return the max number of versions to fetch + */ + public int getMaxVersions() { + return this.maxVersions; + } + + /** + * @return maximum number of values to return for a single call to next() + */ + public int getBatch() { + return this.batch; + } + + /** + * @return maximum number of values to return per row per CF + */ + public int getMaxResultsPerColumnFamily() { + return this.storeLimit; + } + + /** + * Method for retrieving the scan's offset per row per column + * family (#kvs to be skipped) + * @return row offset + */ + public int getRowOffsetPerColumnFamily() { + return this.storeOffset; + } + + /** + * @return caching the number of rows fetched when calling next on a scanner + */ + public int getCaching() { + return this.caching; + } + + /** + * @return TimeRange + */ + public TimeRange getTimeRange() { + return this.tr; + } + + /** + * @return RowFilter + */ + public Filter getFilter() { + return filter; + } + + /** + * @return true is a filter has been specified, false if not + */ + public boolean hasFilter() { + return filter != null; + } + + /** + * Set whether blocks should be cached for this Scan. + *

+ * This is true by default. When true, default settings of the table and + * family are used (this will never override caching blocks if the block + * cache is disabled for that family or entirely). + * + * @param cacheBlocks if false, default settings are overridden and blocks + * will not be cached + */ + public void setCacheBlocks(boolean cacheBlocks) { + this.cacheBlocks = cacheBlocks; + } + + /** + * Get whether blocks should be cached for this Scan. + * @return true if default caching should be used, false if blocks should not + * be cached + */ + public boolean getCacheBlocks() { + return cacheBlocks; + } + + /** + * Compile the table and column family (i.e. schema) information + * into a String. Useful for parsing and aggregation by debugging, + * logging, and administration tools. + * @return Map + */ + @Override + public Map getFingerprint() { + Map map = new HashMap(); + List families = new ArrayList(); + if(this.familyMap.size() == 0) { + map.put("families", "ALL"); + return map; + } else { + map.put("families", families); + } + for (Map.Entry> entry : + this.familyMap.entrySet()) { + families.add(Bytes.toStringBinary(entry.getKey())); + } + return map; + } + + /** + * Compile the details beyond the scope of getFingerprint (row, columns, + * timestamps, etc.) into a Map along with the fingerprinted information. + * Useful for debugging, logging, and administration tools. + * @param maxCols a limit on the number of columns output prior to truncation + * @return Map + */ + @Override + public Map toMap(int maxCols) { + // start with the fingerpring map and build on top of it + Map map = getFingerprint(); + // map from families to column list replaces fingerprint's list of families + Map> familyColumns = + new HashMap>(); + map.put("families", familyColumns); + // add scalar information first + map.put("startRow", Bytes.toStringBinary(this.startRow)); + map.put("stopRow", Bytes.toStringBinary(this.stopRow)); + map.put("maxVersions", this.maxVersions); + map.put("batch", this.batch); + map.put("caching", this.caching); + map.put("maxResultSize", this.maxResultSize); + map.put("cacheBlocks", this.cacheBlocks); + List timeRange = new ArrayList(); + timeRange.add(this.tr.getMin()); + timeRange.add(this.tr.getMax()); + map.put("timeRange", timeRange); + int colCount = 0; + // iterate through affected families and list out up to maxCols columns + for (Map.Entry> entry : + this.familyMap.entrySet()) { + List columns = new ArrayList(); + familyColumns.put(Bytes.toStringBinary(entry.getKey()), columns); + if(entry.getValue() == null) { + colCount++; + --maxCols; + columns.add("ALL"); + } else { + colCount += entry.getValue().size(); + if (maxCols <= 0) { + continue; + } + for (byte [] column : entry.getValue()) { + if (--maxCols <= 0) { + continue; + } + columns.add(Bytes.toStringBinary(column)); + } + } + } + map.put("totalColumns", colCount); + if (this.filter != null) { + map.put("filter", this.filter.toString()); + } + // add the id if set + if (getId() != null) { + map.put("id", getId()); + } + return map; + } + + /** + * Enable/disable "raw" mode for this scan. + * If "raw" is enabled the scan will return all + * delete marker and deleted rows that have not + * been collected, yet. + * This is mostly useful for Scan on column families + * that have KEEP_DELETED_ROWS enabled. + * It is an error to specify any column when "raw" is set. + * @param raw True/False to enable/disable "raw" mode. + */ + public void setRaw(boolean raw) { + setAttribute(RAW_ATTR, Bytes.toBytes(raw)); + } + + /** + * @return True if this Scan is in "raw" mode. + */ + public boolean isRaw() { + byte[] attr = getAttribute(RAW_ATTR); + return attr == null ? false : Bytes.toBoolean(attr); + } + + /* + * Set the isolation level for this scan. If the + * isolation level is set to READ_UNCOMMITTED, then + * this scan will return data from committed and + * uncommitted transactions. If the isolation level + * is set to READ_COMMITTED, then this scan will return + * data from committed transactions only. If a isolation + * level is not explicitly set on a Scan, then it + * is assumed to be READ_COMMITTED. + * @param level IsolationLevel for this scan + */ + public void setIsolationLevel(IsolationLevel level) { + setAttribute(ISOLATION_LEVEL, level.toBytes()); + } + /* + * @return The isolation level of this scan. + * If no isolation level was set for this scan object, + * then it returns READ_COMMITTED. + * @return The IsolationLevel for this scan + */ + public IsolationLevel getIsolationLevel() { + byte[] attr = getAttribute(ISOLATION_LEVEL); + return attr == null ? IsolationLevel.READ_COMMITTED : + IsolationLevel.fromBytes(attr); + } +} diff --git hbase-client/src/main/java/org/apache/hadoop/hbase/client/ScannerCallable.java hbase-client/src/main/java/org/apache/hadoop/hbase/client/ScannerCallable.java new file mode 100644 index 0000000..634d774 --- /dev/null +++ hbase-client/src/main/java/org/apache/hadoop/hbase/client/ScannerCallable.java @@ -0,0 +1,322 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.client; + +import java.io.IOException; +import java.net.UnknownHostException; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.DoNotRetryIOException; +import org.apache.hadoop.hbase.HRegionInfo; +import org.apache.hadoop.hbase.HRegionLocation; +import org.apache.hadoop.hbase.NotServingRegionException; +import org.apache.hadoop.hbase.RemoteExceptionHandler; +import org.apache.hadoop.hbase.UnknownScannerException; +import org.apache.hadoop.hbase.client.metrics.ScanMetrics; +import org.apache.hadoop.hbase.protobuf.ProtobufUtil; +import org.apache.hadoop.hbase.protobuf.RequestConverter; +import org.apache.hadoop.hbase.protobuf.ResponseConverter; +import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ScanRequest; +import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ScanResponse; +import org.apache.hadoop.hbase.regionserver.RegionServerStoppedException; +import org.apache.hadoop.ipc.RemoteException; +import org.apache.hadoop.net.DNS; + +import com.google.protobuf.ServiceException; + +/** + * Retries scanner operations such as create, next, etc. + * Used by {@link ResultScanner}s made by {@link HTable}. + */ +@InterfaceAudience.Public +@InterfaceStability.Stable +public class ScannerCallable extends ServerCallable { + public static final String LOG_SCANNER_LATENCY_CUTOFF + = "hbase.client.log.scanner.latency.cutoff"; + public static final String LOG_SCANNER_ACTIVITY = "hbase.client.log.scanner.activity"; + + private static final Log LOG = LogFactory.getLog(ScannerCallable.class); + private long scannerId = -1L; + private boolean instantiated = false; + private boolean closed = false; + private Scan scan; + private int caching = 1; + private ScanMetrics scanMetrics; + private boolean logScannerActivity = false; + private int logCutOffLatency = 1000; + + // indicate if it is a remote server call + private boolean isRegionServerRemote = true; + private long nextCallSeq = 0; + + /** + * @param connection which connection + * @param tableName table callable is on + * @param scan the scan to execute + * @param scanMetrics the ScanMetrics to used, if it is null, ScannerCallable + * won't collect metrics + */ + public ScannerCallable (HConnection connection, byte [] tableName, Scan scan, + ScanMetrics scanMetrics) { + super(connection, tableName, scan.getStartRow()); + this.scan = scan; + this.scanMetrics = scanMetrics; + Configuration conf = connection.getConfiguration(); + logScannerActivity = conf.getBoolean(LOG_SCANNER_ACTIVITY, false); + logCutOffLatency = conf.getInt(LOG_SCANNER_LATENCY_CUTOFF, 1000); + } + + /** + * @param reload force reload of server location + * @throws IOException + */ + @Override + public void connect(boolean reload) throws IOException { + if (!instantiated || reload) { + super.connect(reload); + checkIfRegionServerIsRemote(); + instantiated = true; + } + + // check how often we retry. + // HConnectionManager will call instantiateServer with reload==true + // if and only if for retries. + if (reload && this.scanMetrics != null) { + this.scanMetrics.countOfRPCRetries.incrementAndGet(); + if (isRegionServerRemote) { + this.scanMetrics.countOfRemoteRPCRetries.incrementAndGet(); + } + } + } + + /** + * compare the local machine hostname with region server's hostname + * to decide if hbase client connects to a remote region server + * @throws UnknownHostException. + */ + private void checkIfRegionServerIsRemote() throws UnknownHostException { + String myAddress = DNS.getDefaultHost("default", "default"); + if (this.location.getHostname().equalsIgnoreCase(myAddress)) { + isRegionServerRemote = false; + } else { + isRegionServerRemote = true; + } + } + + /** + * @see java.util.concurrent.Callable#call() + */ + public Result [] call() throws IOException { + if (closed) { + if (scannerId != -1) { + close(); + } + } else { + if (scannerId == -1L) { + this.scannerId = openScanner(); + } else { + Result [] rrs = null; + try { + incRPCcallsMetrics(); + ScanRequest request = + RequestConverter.buildScanRequest(scannerId, caching, false, nextCallSeq); + try { + ScanResponse response = server.scan(null, request); + // Client and RS maintain a nextCallSeq number during the scan. Every next() call + // from client to server will increment this number in both sides. Client passes this + // number along with the request and at RS side both the incoming nextCallSeq and its + // nextCallSeq will be matched. In case of a timeout this increment at the client side + // should not happen. If at the server side fetching of next batch of data was over, + // there will be mismatch in the nextCallSeq number. Server will throw + // OutOfOrderScannerNextException and then client will reopen the scanner with startrow + // as the last successfully retrieved row. + // See HBASE-5974 + nextCallSeq++; + long timestamp = System.currentTimeMillis(); + rrs = ResponseConverter.getResults(response); + if (logScannerActivity) { + long now = System.currentTimeMillis(); + if (now - timestamp > logCutOffLatency) { + int rows = rrs == null ? 0 : rrs.length; + LOG.info("Took " + (now-timestamp) + "ms to fetch " + + rows + " rows from scanner=" + scannerId); + } + } + if (response.hasMoreResults() + && !response.getMoreResults()) { + scannerId = -1L; + closed = true; + return null; + } + } catch (ServiceException se) { + throw ProtobufUtil.getRemoteException(se); + } + updateResultsMetrics(rrs); + } catch (IOException e) { + if (logScannerActivity) { + LOG.info("Got exception in fetching from scanner=" + + scannerId, e); + } + IOException ioe = e; + if (e instanceof RemoteException) { + ioe = RemoteExceptionHandler.decodeRemoteException((RemoteException)e); + } + if (logScannerActivity && (ioe instanceof UnknownScannerException)) { + try { + HRegionLocation location = + connection.relocateRegion(tableName, scan.getStartRow()); + LOG.info("Scanner=" + scannerId + + " expired, current region location is " + location.toString() + + " ip:" + location.getHostnamePort()); + } catch (Throwable t) { + LOG.info("Failed to relocate region", t); + } + } + if (ioe instanceof NotServingRegionException) { + // Throw a DNRE so that we break out of cycle of calling NSRE + // when what we need is to open scanner against new location. + // Attach NSRE to signal client that it needs to resetup scanner. + if (this.scanMetrics != null) { + this.scanMetrics.countOfNSRE.incrementAndGet(); + } + throw new DoNotRetryIOException("Reset scanner", ioe); + } else if (ioe instanceof RegionServerStoppedException) { + // Throw a DNRE so that we break out of cycle of calling RSSE + // when what we need is to open scanner against new location. + // Attach RSSE to signal client that it needs to resetup scanner. + throw new DoNotRetryIOException("Reset scanner", ioe); + } else { + // The outer layers will retry + throw ioe; + } + } + return rrs; + } + } + return null; + } + + private void incRPCcallsMetrics() { + if (this.scanMetrics == null) { + return; + } + this.scanMetrics.countOfRPCcalls.incrementAndGet(); + if (isRegionServerRemote) { + this.scanMetrics.countOfRemoteRPCcalls.incrementAndGet(); + } + } + + private void updateResultsMetrics(Result[] rrs) { + if (this.scanMetrics == null || rrs == null) { + return; + } + /* + * broken by protobufs + for (Result rr : rrs) { + if (rr.getBytes() != null) { + this.scanMetrics.countOfBytesInResults.inc(rr.getBytes().getLength()); + if (isRegionServerRemote) { + this.scanMetrics.countOfBytesInRemoteResults.inc( + rr.getBytes().getLength()); + } + } + } + */ + } + + private void close() { + if (this.scannerId == -1L) { + return; + } + try { + incRPCcallsMetrics(); + ScanRequest request = + RequestConverter.buildScanRequest(this.scannerId, 0, true); + try { + server.scan(null, request); + } catch (ServiceException se) { + throw ProtobufUtil.getRemoteException(se); + } + } catch (IOException e) { + LOG.warn("Ignore, probably already closed", e); + } + this.scannerId = -1L; + } + + protected long openScanner() throws IOException { + incRPCcallsMetrics(); + ScanRequest request = + RequestConverter.buildScanRequest( + this.location.getRegionInfo().getRegionName(), + this.scan, 0, false); + try { + ScanResponse response = server.scan(null, request); + long id = response.getScannerId(); + if (logScannerActivity) { + LOG.info("Open scanner=" + id + " for scan=" + scan.toString() + + " on region " + this.location.toString() + " ip:" + + this.location.getHostnamePort()); + } + return id; + } catch (ServiceException se) { + throw ProtobufUtil.getRemoteException(se); + } + } + + protected Scan getScan() { + return scan; + } + + /** + * Call this when the next invocation of call should close the scanner + */ + public void setClose() { + this.closed = true; + } + + /** + * @return the HRegionInfo for the current region + */ + public HRegionInfo getHRegionInfo() { + if (!instantiated) { + return null; + } + return location.getRegionInfo(); + } + + /** + * Get the number of rows that will be fetched on next + * @return the number of rows for caching + */ + public int getCaching() { + return caching; + } + + /** + * Set the number of rows that will be fetched on next + * @param caching the number of rows for caching + */ + public void setCaching(int caching) { + this.caching = caching; + } +} diff --git hbase-client/src/main/java/org/apache/hadoop/hbase/client/ScannerTimeoutException.java hbase-client/src/main/java/org/apache/hadoop/hbase/client/ScannerTimeoutException.java new file mode 100644 index 0000000..08bc61c --- /dev/null +++ hbase-client/src/main/java/org/apache/hadoop/hbase/client/ScannerTimeoutException.java @@ -0,0 +1,44 @@ +/** + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.client; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; +import org.apache.hadoop.hbase.DoNotRetryIOException; + +/** + * Thrown when a scanner has timed out. + */ +@InterfaceAudience.Public +@InterfaceStability.Stable +public class ScannerTimeoutException extends DoNotRetryIOException { + + private static final long serialVersionUID = 8788838690290688313L; + + /** default constructor */ + ScannerTimeoutException() { + super(); + } + + /** @param s */ + ScannerTimeoutException(String s) { + super(s); + } +} diff --git hbase-client/src/main/java/org/apache/hadoop/hbase/client/ServerCallable.java hbase-client/src/main/java/org/apache/hadoop/hbase/client/ServerCallable.java new file mode 100644 index 0000000..7c8418a --- /dev/null +++ hbase-client/src/main/java/org/apache/hadoop/hbase/client/ServerCallable.java @@ -0,0 +1,243 @@ +/** + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.client; + +import java.io.IOException; +import java.lang.reflect.UndeclaredThrowableException; +import java.net.ConnectException; +import java.net.SocketTimeoutException; +import java.util.ArrayList; +import java.util.List; +import java.util.concurrent.Callable; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.DoNotRetryIOException; +import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.HRegionLocation; +import org.apache.hadoop.hbase.ipc.HBaseClientRPC; +import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.ipc.RemoteException; + +import com.google.protobuf.ServiceException; + +/** + * Abstract class that implements {@link Callable}. Implementation stipulates + * return type and method we actually invoke on remote Server. Usually + * used inside a try/catch that fields usual connection failures all wrapped + * up in a retry loop. + *

Call {@link #connect(boolean)} to connect to server hosting region + * that contains the passed row in the passed table before invoking + * {@link #call()}. + * @see HConnection#getRegionServerWithoutRetries(ServerCallable) + * @param the class that the ServerCallable handles + */ +@InterfaceAudience.Public +@InterfaceStability.Stable +public abstract class ServerCallable implements Callable { + protected final HConnection connection; + protected final byte [] tableName; + protected final byte [] row; + protected HRegionLocation location; + protected ClientProtocol server; + protected int callTimeout; + protected long startTime, endTime; + + /** + * @param connection Connection to use. + * @param tableName Table name to which row belongs. + * @param row The row we want in tableName. + */ + public ServerCallable(HConnection connection, byte [] tableName, byte [] row) { + this(connection, tableName, row, HConstants.DEFAULT_HBASE_CLIENT_OPERATION_TIMEOUT); + } + + public ServerCallable(HConnection connection, byte [] tableName, byte [] row, int callTimeout) { + this.connection = connection; + this.tableName = tableName; + this.row = row; + this.callTimeout = callTimeout; + } + + /** + * Connect to the server hosting region with row from tablename. + * @param reload Set this to true if connection should re-find the region + * @throws IOException e + */ + public void connect(final boolean reload) throws IOException { + this.location = connection.getRegionLocation(tableName, row, reload); + this.server = connection.getClient(location.getHostname(), + location.getPort()); + } + + /** @return the server name + * @deprecated Just use {@link #toString()} instead. + */ + public String getServerName() { + if (location == null) return null; + return location.getHostnamePort(); + } + + /** @return the region name + * @deprecated Just use {@link #toString()} instead. + */ + public byte[] getRegionName() { + if (location == null) return null; + return location.getRegionInfo().getRegionName(); + } + + /** @return the row + * @deprecated Just use {@link #toString()} instead. + */ + public byte [] getRow() { + return row; + } + + public void beforeCall() { + HBaseClientRPC.setRpcTimeout(this.callTimeout); + this.startTime = System.currentTimeMillis(); + } + + public void afterCall() { + HBaseClientRPC.resetRpcTimeout(); + this.endTime = System.currentTimeMillis(); + } + + public void shouldRetry(Throwable throwable) throws IOException { + if (this.callTimeout != HConstants.DEFAULT_HBASE_CLIENT_OPERATION_TIMEOUT) + if (throwable instanceof SocketTimeoutException + || (this.endTime - this.startTime > this.callTimeout)) { + throw (SocketTimeoutException) (SocketTimeoutException) new SocketTimeoutException( + "Call to access row '" + Bytes.toString(row) + "' on table '" + + Bytes.toString(tableName) + + "' failed on socket timeout exception: " + throwable) + .initCause(throwable); + } else { + this.callTimeout = ((int) (this.endTime - this.startTime)); + } + } + + /** + * @return {@link HConnection} instance used by this Callable. + */ + HConnection getConnection() { + return this.connection; + } + + /** + * Run this instance with retries, timed waits, + * and refinds of missing regions. + * + * @return an object of type T + * @throws IOException if a remote or network exception occurs + * @throws RuntimeException other unspecified error + */ + public T withRetries() + throws IOException, RuntimeException { + Configuration c = getConnection().getConfiguration(); + final long pause = c.getLong(HConstants.HBASE_CLIENT_PAUSE, + HConstants.DEFAULT_HBASE_CLIENT_PAUSE); + final int numRetries = c.getInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, + HConstants.DEFAULT_HBASE_CLIENT_RETRIES_NUMBER); + List exceptions = + new ArrayList(); + for (int tries = 0; tries < numRetries; tries++) { + try { + beforeCall(); + connect(tries != 0); + return call(); + } catch (Throwable t) { + shouldRetry(t); + t = translateException(t); + if (t instanceof SocketTimeoutException || + t instanceof ConnectException || + t instanceof RetriesExhaustedException) { + // if thrown these exceptions, we clear all the cache entries that + // map to that slow/dead server; otherwise, let cache miss and ask + // .META. again to find the new location + HRegionLocation hrl = location; + if (hrl != null) { + getConnection().clearCaches(hrl.getHostnamePort()); + } + } + RetriesExhaustedException.ThrowableWithExtraContext qt = + new RetriesExhaustedException.ThrowableWithExtraContext(t, + System.currentTimeMillis(), toString()); + exceptions.add(qt); + if (tries == numRetries - 1) { + throw new RetriesExhaustedException(tries, exceptions); + } + } finally { + afterCall(); + } + try { + Thread.sleep(ConnectionUtils.getPauseTime(pause, tries)); + } catch (InterruptedException e) { + Thread.currentThread().interrupt(); + throw new IOException("Giving up after tries=" + tries, e); + } + } + return null; + } + + /** + * Run this instance against the server once. + * @return an object of type T + * @throws IOException if a remote or network exception occurs + * @throws RuntimeException other unspecified error + */ + public T withoutRetries() + throws IOException, RuntimeException { + try { + beforeCall(); + connect(false); + return call(); + } catch (Throwable t) { + Throwable t2 = translateException(t); + if (t2 instanceof IOException) { + throw (IOException)t2; + } else { + throw new RuntimeException(t2); + } + } finally { + afterCall(); + } + } + + protected static Throwable translateException(Throwable t) throws IOException { + if (t instanceof UndeclaredThrowableException) { + t = t.getCause(); + } + if (t instanceof RemoteException) { + t = ((RemoteException)t).unwrapRemoteException(); + } + if (t instanceof ServiceException) { + ServiceException se = (ServiceException)t; + Throwable cause = se.getCause(); + if (cause != null && cause instanceof DoNotRetryIOException) { + throw (DoNotRetryIOException)cause; + } + } else if (t instanceof DoNotRetryIOException) { + throw (DoNotRetryIOException)t; + } + return t; + } +} diff --git hbase-client/src/main/java/org/apache/hadoop/hbase/client/UnmodifyableHColumnDescriptor.java hbase-client/src/main/java/org/apache/hadoop/hbase/client/UnmodifyableHColumnDescriptor.java new file mode 100644 index 0000000..bad7ced --- /dev/null +++ hbase-client/src/main/java/org/apache/hadoop/hbase/client/UnmodifyableHColumnDescriptor.java @@ -0,0 +1,96 @@ +/** + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.client; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; +import org.apache.hadoop.hbase.HColumnDescriptor; +import org.apache.hadoop.hbase.io.compress.Compression; + +/** + * Immutable HColumnDescriptor + */ +@InterfaceAudience.Public +@InterfaceStability.Evolving +public class UnmodifyableHColumnDescriptor extends HColumnDescriptor { + + /** + * @param desc wrapped + */ + public UnmodifyableHColumnDescriptor (final HColumnDescriptor desc) { + super(desc); + } + + /** + * @see org.apache.hadoop.hbase.HColumnDescriptor#setValue(byte[], byte[]) + */ + @Override + public HColumnDescriptor setValue(byte[] key, byte[] value) { + throw new UnsupportedOperationException("HColumnDescriptor is read-only"); + } + + /** + * @see org.apache.hadoop.hbase.HColumnDescriptor#setValue(java.lang.String, java.lang.String) + */ + @Override + public HColumnDescriptor setValue(String key, String value) { + throw new UnsupportedOperationException("HColumnDescriptor is read-only"); + } + + /** + * @see org.apache.hadoop.hbase.HColumnDescriptor#setMaxVersions(int) + */ + @Override + public HColumnDescriptor setMaxVersions(int maxVersions) { + throw new UnsupportedOperationException("HColumnDescriptor is read-only"); + } + + /** + * @see org.apache.hadoop.hbase.HColumnDescriptor#setInMemory(boolean) + */ + @Override + public HColumnDescriptor setInMemory(boolean inMemory) { + throw new UnsupportedOperationException("HColumnDescriptor is read-only"); + } + + /** + * @see org.apache.hadoop.hbase.HColumnDescriptor#setBlockCacheEnabled(boolean) + */ + @Override + public HColumnDescriptor setBlockCacheEnabled(boolean blockCacheEnabled) { + throw new UnsupportedOperationException("HColumnDescriptor is read-only"); + } + + /** + * @see org.apache.hadoop.hbase.HColumnDescriptor#setTimeToLive(int) + */ + @Override + public HColumnDescriptor setTimeToLive(int timeToLive) { + throw new UnsupportedOperationException("HColumnDescriptor is read-only"); + } + + /** + * @see org.apache.hadoop.hbase.HColumnDescriptor#setCompressionType(org.apache.hadoop.hbase.io.compress.Compression.Algorithm) + */ + @Override + public HColumnDescriptor setCompressionType(Compression.Algorithm type) { + throw new UnsupportedOperationException("HColumnDescriptor is read-only"); + } +} diff --git hbase-client/src/main/java/org/apache/hadoop/hbase/client/UnmodifyableHRegionInfo.java hbase-client/src/main/java/org/apache/hadoop/hbase/client/UnmodifyableHRegionInfo.java new file mode 100644 index 0000000..f96096e --- /dev/null +++ hbase-client/src/main/java/org/apache/hadoop/hbase/client/UnmodifyableHRegionInfo.java @@ -0,0 +1,53 @@ +/** + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.client; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; +import org.apache.hadoop.hbase.HRegionInfo; + +@InterfaceAudience.Public +@InterfaceStability.Evolving +class UnmodifyableHRegionInfo extends HRegionInfo { + /* + * Creates an unmodifyable copy of an HRegionInfo + * + * @param info + */ + UnmodifyableHRegionInfo(HRegionInfo info) { + super(info); + } + + /** + * @param split set split status + */ + @Override + public void setSplit(boolean split) { + throw new UnsupportedOperationException("HRegionInfo is read-only"); + } + + /** + * @param offLine set online - offline status + */ + @Override + public void setOffline(boolean offLine) { + throw new UnsupportedOperationException("HRegionInfo is read-only"); + } +} diff --git hbase-client/src/main/java/org/apache/hadoop/hbase/client/UnmodifyableHTableDescriptor.java hbase-client/src/main/java/org/apache/hadoop/hbase/client/UnmodifyableHTableDescriptor.java new file mode 100644 index 0000000..87c4f9b --- /dev/null +++ hbase-client/src/main/java/org/apache/hadoop/hbase/client/UnmodifyableHTableDescriptor.java @@ -0,0 +1,127 @@ +/** + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.client; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; +import org.apache.hadoop.hbase.HColumnDescriptor; +import org.apache.hadoop.hbase.HTableDescriptor; + +/** + * Read-only table descriptor. + */ +@InterfaceAudience.Public +@InterfaceStability.Evolving +public class UnmodifyableHTableDescriptor extends HTableDescriptor { + /** Default constructor */ + public UnmodifyableHTableDescriptor() { + super(); + } + + /* + * Create an unmodifyable copy of an HTableDescriptor + * @param desc + */ + UnmodifyableHTableDescriptor(final HTableDescriptor desc) { + super(desc.getName(), getUnmodifyableFamilies(desc), desc.getValues()); + } + + + /* + * @param desc + * @return Families as unmodifiable array. + */ + private static HColumnDescriptor[] getUnmodifyableFamilies( + final HTableDescriptor desc) { + HColumnDescriptor [] f = new HColumnDescriptor[desc.getFamilies().size()]; + int i = 0; + for (HColumnDescriptor c: desc.getFamilies()) { + f[i++] = c; + } + return f; + } + + /** + * Does NOT add a column family. This object is immutable + * @param family HColumnDescriptor of familyto add. + */ + @Override + public void addFamily(final HColumnDescriptor family) { + throw new UnsupportedOperationException("HTableDescriptor is read-only"); + } + + /** + * @param column + * @return Column descriptor for the passed family name or the family on + * passed in column. + */ + @Override + public HColumnDescriptor removeFamily(final byte [] column) { + throw new UnsupportedOperationException("HTableDescriptor is read-only"); + } + + /** + * @see org.apache.hadoop.hbase.HTableDescriptor#setReadOnly(boolean) + */ + @Override + public void setReadOnly(boolean readOnly) { + throw new UnsupportedOperationException("HTableDescriptor is read-only"); + } + + /** + * @see org.apache.hadoop.hbase.HTableDescriptor#setValue(byte[], byte[]) + */ + @Override + public void setValue(byte[] key, byte[] value) { + throw new UnsupportedOperationException("HTableDescriptor is read-only"); + } + + /** + * @see org.apache.hadoop.hbase.HTableDescriptor#setValue(java.lang.String, java.lang.String) + */ + @Override + public void setValue(String key, String value) { + throw new UnsupportedOperationException("HTableDescriptor is read-only"); + } + + /** + * @see org.apache.hadoop.hbase.HTableDescriptor#setMaxFileSize(long) + */ + @Override + public void setMaxFileSize(long maxFileSize) { + throw new UnsupportedOperationException("HTableDescriptor is read-only"); + } + + /** + * @see org.apache.hadoop.hbase.HTableDescriptor#setMemStoreFlushSize(long) + */ + @Override + public void setMemStoreFlushSize(long memstoreFlushSize) { + throw new UnsupportedOperationException("HTableDescriptor is read-only"); + } + +// /** +// * @see org.apache.hadoop.hbase.HTableDescriptor#addIndex(org.apache.hadoop.hbase.client.tableindexed.IndexSpecification) +// */ +// @Override +// public void addIndex(IndexSpecification index) { +// throw new UnsupportedOperationException("HTableDescriptor is read-only"); +// } +} diff --git hbase-client/src/main/java/org/apache/hadoop/hbase/client/ZooKeeperKeepAliveConnection.java hbase-client/src/main/java/org/apache/hadoop/hbase/client/ZooKeeperKeepAliveConnection.java new file mode 100644 index 0000000..01890cf --- /dev/null +++ hbase-client/src/main/java/org/apache/hadoop/hbase/client/ZooKeeperKeepAliveConnection.java @@ -0,0 +1,54 @@ +/** + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.client; + + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher; + +import java.io.IOException; + +/** + * We inherit the current ZooKeeperWatcher implementation to change the semantic + * of the close: the new close won't immediately close the connection but + * will have a keep alive. See {@link HConnection}. + * This allows to make it available with a consistent interface. The whole + * ZooKeeperWatcher use in HConnection will be then changed to remove the + * watcher part. + * + * This class is intended to be used internally by HBase classes; but not by + * final user code. Hence it's package protected. + */ +class ZooKeeperKeepAliveConnection extends ZooKeeperWatcher{ + ZooKeeperKeepAliveConnection( + Configuration conf, String descriptor, + HConnectionManager.HConnectionImplementation conn) throws IOException { + super(conf, descriptor, conn); + } + + @Override + public void close() { + ((HConnectionManager.HConnectionImplementation)abortable).releaseZooKeeperWatcher(this); + } + + void internalClose(){ + super.close(); + } +} diff --git hbase-client/src/main/java/org/apache/hadoop/hbase/client/coprocessor/AggregationClient.java hbase-client/src/main/java/org/apache/hadoop/hbase/client/coprocessor/AggregationClient.java new file mode 100644 index 0000000..2e0c05e --- /dev/null +++ hbase-client/src/main/java/org/apache/hadoop/hbase/client/coprocessor/AggregationClient.java @@ -0,0 +1,701 @@ +/* + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.client.coprocessor; + +import java.io.IOException; +import java.nio.ByteBuffer; +import java.util.ArrayList; +import java.util.List; +import java.util.Map; +import java.util.NavigableMap; +import java.util.NavigableSet; +import java.util.TreeMap; +import java.util.concurrent.atomic.AtomicLong; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.KeyValue; +import org.apache.hadoop.hbase.client.HTable; +import org.apache.hadoop.hbase.client.Result; +import org.apache.hadoop.hbase.client.ResultScanner; +import org.apache.hadoop.hbase.client.Scan; +import org.apache.hadoop.hbase.protobuf.ProtobufUtil; +import org.apache.hadoop.hbase.protobuf.generated.AggregateProtos.AggregateArgument; +import org.apache.hadoop.hbase.protobuf.generated.AggregateProtos.AggregateResponse; +import org.apache.hadoop.hbase.protobuf.generated.AggregateProtos.AggregateService; +import org.apache.hadoop.hbase.coprocessor.ColumnInterpreter; +import org.apache.hadoop.hbase.ipc.BlockingRpcCallback; +import org.apache.hadoop.hbase.ipc.ServerRpcController; +import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.hbase.util.Pair; + +import com.google.protobuf.ByteString; + +/** + * This client class is for invoking the aggregate functions deployed on the + * Region Server side via the AggregateService. This class will implement the + * supporting functionality for summing/processing the individual results + * obtained from the AggregateService for each region. + *

+ * This will serve as the client side handler for invoking the aggregate + * functions. + *

    + * For all aggregate functions, + *
  • start row < end row is an essential condition (if they are not + * {@link HConstants#EMPTY_BYTE_ARRAY}) + *
  • Column family can't be null. In case where multiple families are + * provided, an IOException will be thrown. An optional column qualifier can + * also be defined. + *
  • For methods to find maximum, minimum, sum, rowcount, it returns the + * parameter type. For average and std, it returns a double value. For row + * count, it returns a long value. + */ +@InterfaceAudience.Public +@InterfaceStability.Evolving +public class AggregationClient { + + private static final Log log = LogFactory.getLog(AggregationClient.class); + Configuration conf; + + /** + * Constructor with Conf object + * @param cfg + */ + public AggregationClient(Configuration cfg) { + this.conf = cfg; + } + + /** + * It gives the maximum value of a column for a given column family for the + * given range. In case qualifier is null, a max of all values for the given + * family is returned. + * @param tableName + * @param ci + * @param scan + * @return max val + * @throws Throwable + * The caller is supposed to handle the exception as they are thrown + * & propagated to it. + */ + public R max(final byte[] tableName, final ColumnInterpreter ci, + final Scan scan) throws Throwable { + final AggregateArgument requestArg = validateArgAndGetPB(scan, ci); + class MaxCallBack implements Batch.Callback { + R max = null; + + R getMax() { + return max; + } + + @Override + public synchronized void update(byte[] region, byte[] row, R result) { + max = (max == null || (result != null && ci.compare(max, result) < 0)) ? result : max; + } + } + MaxCallBack aMaxCallBack = new MaxCallBack(); + HTable table = null; + try { + table = new HTable(conf, tableName); + table.coprocessorService(AggregateService.class, scan.getStartRow(), + scan.getStopRow(), new Batch.Call() { + @Override + public R call(AggregateService instance) throws IOException { + ServerRpcController controller = new ServerRpcController(); + BlockingRpcCallback rpcCallback = + new BlockingRpcCallback(); + instance.getMax(controller, requestArg, rpcCallback); + AggregateResponse response = rpcCallback.get(); + if (controller.failedOnException()) { + throw controller.getFailedOn(); + } + if (response.getFirstPartCount() > 0) { + return ci.castToCellType( + ci.parseResponseAsPromotedType( + getBytesFromResponse(response.getFirstPart(0)))); + } + return null; + } + }, aMaxCallBack); + } finally { + if (table != null) { + table.close(); + } + } + return aMaxCallBack.getMax(); + } + + private void validateParameters(Scan scan) throws IOException { + if (scan == null + || (Bytes.equals(scan.getStartRow(), scan.getStopRow()) && !Bytes + .equals(scan.getStartRow(), HConstants.EMPTY_START_ROW)) + || ((Bytes.compareTo(scan.getStartRow(), scan.getStopRow()) > 0) && + !Bytes.equals(scan.getStopRow(), HConstants.EMPTY_END_ROW))) { + throw new IOException( + "Agg client Exception: Startrow should be smaller than Stoprow"); + } else if (scan.getFamilyMap().size() != 1) { + throw new IOException("There must be only one family."); + } + } + + /** + * It gives the minimum value of a column for a given column family for the + * given range. In case qualifier is null, a min of all values for the given + * family is returned. + * @param tableName + * @param ci + * @param scan + * @return min val + * @throws Throwable + */ + public R min(final byte[] tableName, final ColumnInterpreter ci, + final Scan scan) throws Throwable { + final AggregateArgument requestArg = validateArgAndGetPB(scan, ci); + class MinCallBack implements Batch.Callback { + + private R min = null; + + public R getMinimum() { + return min; + } + + @Override + public synchronized void update(byte[] region, byte[] row, R result) { + min = (min == null || (result != null && ci.compare(result, min) < 0)) ? result : min; + } + } + MinCallBack minCallBack = new MinCallBack(); + HTable table = null; + try { + table = new HTable(conf, tableName); + table.coprocessorService(AggregateService.class, scan.getStartRow(), + scan.getStopRow(), new Batch.Call() { + + @Override + public R call(AggregateService instance) throws IOException { + ServerRpcController controller = new ServerRpcController(); + BlockingRpcCallback rpcCallback = + new BlockingRpcCallback(); + instance.getMin(controller, requestArg, rpcCallback); + AggregateResponse response = rpcCallback.get(); + if (controller.failedOnException()) { + throw controller.getFailedOn(); + } + if (response.getFirstPartCount() > 0) { + return ci.castToCellType( + ci.parseResponseAsPromotedType( + getBytesFromResponse(response.getFirstPart(0)))); + } + return null; + } + }, minCallBack); + } finally { + if (table != null) { + table.close(); + } + } + log.debug("Min fom all regions is: " + minCallBack.getMinimum()); + return minCallBack.getMinimum(); + } + + /** + * It gives the row count, by summing up the individual results obtained from + * regions. In case the qualifier is null, FirstKeyValueFilter is used to + * optimised the operation. In case qualifier is provided, I can't use the + * filter as it may set the flag to skip to next row, but the value read is + * not of the given filter: in this case, this particular row will not be + * counted ==> an error. + * @param tableName + * @param ci + * @param scan + * @return + * @throws Throwable + */ + public long rowCount(final byte[] tableName, + final ColumnInterpreter ci, final Scan scan) throws Throwable { + final AggregateArgument requestArg = validateArgAndGetPB(scan, ci); + class RowNumCallback implements Batch.Callback { + private final AtomicLong rowCountL = new AtomicLong(0); + + public long getRowNumCount() { + return rowCountL.get(); + } + + @Override + public void update(byte[] region, byte[] row, Long result) { + rowCountL.addAndGet(result.longValue()); + } + } + RowNumCallback rowNum = new RowNumCallback(); + HTable table = null; + try { + table = new HTable(conf, tableName); + table.coprocessorService(AggregateService.class, scan.getStartRow(), + scan.getStopRow(), new Batch.Call() { + @Override + public Long call(AggregateService instance) throws IOException { + ServerRpcController controller = new ServerRpcController(); + BlockingRpcCallback rpcCallback = + new BlockingRpcCallback(); + instance.getRowNum(controller, requestArg, rpcCallback); + AggregateResponse response = rpcCallback.get(); + if (controller.failedOnException()) { + throw controller.getFailedOn(); + } + byte[] bytes = getBytesFromResponse(response.getFirstPart(0)); + ByteBuffer bb = ByteBuffer.allocate(8).put(bytes); + bb.rewind(); + return bb.getLong(); + } + }, rowNum); + } finally { + if (table != null) { + table.close(); + } + } + return rowNum.getRowNumCount(); + } + + /** + * It sums up the value returned from various regions. In case qualifier is + * null, summation of all the column qualifiers in the given family is done. + * @param tableName + * @param ci + * @param scan + * @return sum + * @throws Throwable + */ + public S sum(final byte[] tableName, final ColumnInterpreter ci, + final Scan scan) throws Throwable { + final AggregateArgument requestArg = validateArgAndGetPB(scan, ci); + + class SumCallBack implements Batch.Callback { + S sumVal = null; + + public S getSumResult() { + return sumVal; + } + + @Override + public synchronized void update(byte[] region, byte[] row, S result) { + sumVal = ci.add(sumVal, result); + } + } + SumCallBack sumCallBack = new SumCallBack(); + HTable table = null; + try { + table = new HTable(conf, tableName); + table.coprocessorService(AggregateService.class, scan.getStartRow(), + scan.getStopRow(), new Batch.Call() { + @Override + public S call(AggregateService instance) throws IOException { + ServerRpcController controller = new ServerRpcController(); + BlockingRpcCallback rpcCallback = + new BlockingRpcCallback(); + instance.getSum(controller, requestArg, rpcCallback); + AggregateResponse response = rpcCallback.get(); + if (controller.failedOnException()) { + throw controller.getFailedOn(); + } + if (response.getFirstPartCount() == 0) { + return null; + } + return ci.parseResponseAsPromotedType( + getBytesFromResponse(response.getFirstPart(0))); + } + }, sumCallBack); + } finally { + if (table != null) { + table.close(); + } + } + return sumCallBack.getSumResult(); + } + + /** + * It computes average while fetching sum and row count from all the + * corresponding regions. Approach is to compute a global sum of region level + * sum and rowcount and then compute the average. + * @param tableName + * @param scan + * @throws Throwable + */ + private Pair getAvgArgs(final byte[] tableName, + final ColumnInterpreter ci, final Scan scan) throws Throwable { + final AggregateArgument requestArg = validateArgAndGetPB(scan, ci); + class AvgCallBack implements Batch.Callback> { + S sum = null; + Long rowCount = 0l; + + public Pair getAvgArgs() { + return new Pair(sum, rowCount); + } + + @Override + public synchronized void update(byte[] region, byte[] row, Pair result) { + sum = ci.add(sum, result.getFirst()); + rowCount += result.getSecond(); + } + } + AvgCallBack avgCallBack = new AvgCallBack(); + HTable table = null; + try { + table = new HTable(conf, tableName); + table.coprocessorService(AggregateService.class, scan.getStartRow(), + scan.getStopRow(), + new Batch.Call>() { + @Override + public Pair call(AggregateService instance) + throws IOException { + ServerRpcController controller = new ServerRpcController(); + BlockingRpcCallback rpcCallback = + new BlockingRpcCallback(); + instance.getAvg(controller, requestArg, rpcCallback); + AggregateResponse response = rpcCallback.get(); + if (controller.failedOnException()) { + throw controller.getFailedOn(); + } + Pair pair = new Pair(null, 0L); + if (response.getFirstPartCount() == 0) { + return pair; + } + pair.setFirst(ci.parseResponseAsPromotedType( + getBytesFromResponse(response.getFirstPart(0)))); + ByteBuffer bb = ByteBuffer.allocate(8).put( + getBytesFromResponse(response.getSecondPart())); + bb.rewind(); + pair.setSecond(bb.getLong()); + return pair; + } + }, avgCallBack); + } finally { + if (table != null) { + table.close(); + } + } + return avgCallBack.getAvgArgs(); + } + + /** + * This is the client side interface/handle for calling the average method for + * a given cf-cq combination. It was necessary to add one more call stack as + * its return type should be a decimal value, irrespective of what + * columninterpreter says. So, this methods collects the necessary parameters + * to compute the average and returs the double value. + * @param tableName + * @param ci + * @param scan + * @return + * @throws Throwable + */ + public double avg(final byte[] tableName, + final ColumnInterpreter ci, Scan scan) throws Throwable { + Pair p = getAvgArgs(tableName, ci, scan); + return ci.divideForAvg(p.getFirst(), p.getSecond()); + } + + /** + * It computes a global standard deviation for a given column and its value. + * Standard deviation is square root of (average of squares - + * average*average). From individual regions, it obtains sum, square sum and + * number of rows. With these, the above values are computed to get the global + * std. + * @param tableName + * @param scan + * @return + * @throws Throwable + */ + private Pair, Long> getStdArgs(final byte[] tableName, + final ColumnInterpreter ci, final Scan scan) throws Throwable { + final AggregateArgument requestArg = validateArgAndGetPB(scan, ci); + class StdCallback implements Batch.Callback, Long>> { + long rowCountVal = 0l; + S sumVal = null, sumSqVal = null; + + public Pair, Long> getStdParams() { + List l = new ArrayList(); + l.add(sumVal); + l.add(sumSqVal); + Pair, Long> p = new Pair, Long>(l, rowCountVal); + return p; + } + + @Override + public synchronized void update(byte[] region, byte[] row, Pair, Long> result) { + if (result.getFirst().size() > 0) { + sumVal = ci.add(sumVal, result.getFirst().get(0)); + sumSqVal = ci.add(sumSqVal, result.getFirst().get(1)); + rowCountVal += result.getSecond(); + } + } + } + StdCallback stdCallback = new StdCallback(); + HTable table = null; + try { + table = new HTable(conf, tableName); + table.coprocessorService(AggregateService.class, scan.getStartRow(), + scan.getStopRow(), + new Batch.Call, Long>>() { + @Override + public Pair, Long> call(AggregateService instance) + throws IOException { + ServerRpcController controller = new ServerRpcController(); + BlockingRpcCallback rpcCallback = + new BlockingRpcCallback(); + instance.getStd(controller, requestArg, rpcCallback); + AggregateResponse response = rpcCallback.get(); + if (controller.failedOnException()) { + throw controller.getFailedOn(); + } + Pair,Long> pair = + new Pair, Long>(new ArrayList(), 0L); + if (response.getFirstPartCount() == 0) { + return pair; + } + List list = new ArrayList(); + for (int i = 0; i < response.getFirstPartCount(); i++) { + list.add(ci.parseResponseAsPromotedType( + getBytesFromResponse(response.getFirstPart(i)))); + } + pair.setFirst(list); + ByteBuffer bb = ByteBuffer.allocate(8).put( + getBytesFromResponse(response.getSecondPart())); + bb.rewind(); + pair.setSecond(bb.getLong()); + return pair; + } + }, stdCallback); + } finally { + if (table != null) { + table.close(); + } + } + return stdCallback.getStdParams(); + } + + /** + * This is the client side interface/handle for calling the std method for a + * given cf-cq combination. It was necessary to add one more call stack as its + * return type should be a decimal value, irrespective of what + * columninterpreter says. So, this methods collects the necessary parameters + * to compute the std and returns the double value. + * @param tableName + * @param ci + * @param scan + * @return + * @throws Throwable + */ + public double std(final byte[] tableName, ColumnInterpreter ci, + Scan scan) throws Throwable { + Pair, Long> p = getStdArgs(tableName, ci, scan); + double res = 0d; + double avg = ci.divideForAvg(p.getFirst().get(0), p.getSecond()); + double avgOfSumSq = ci.divideForAvg(p.getFirst().get(1), p.getSecond()); + res = avgOfSumSq - (avg) * (avg); // variance + res = Math.pow(res, 0.5); + return res; + } + + /** + * It helps locate the region with median for a given column whose weight + * is specified in an optional column. + * From individual regions, it obtains sum of values and sum of weights. + * @param tableName + * @param ci + * @param scan + * @return pair whose first element is a map between start row of the region + * and (sum of values, sum of weights) for the region, the second element is + * (sum of values, sum of weights) for all the regions chosen + * @throws Throwable + */ + private Pair>, List> + getMedianArgs(final byte[] tableName, + final ColumnInterpreter ci, final Scan scan) throws Throwable { + final AggregateArgument requestArg = validateArgAndGetPB(scan, ci); + final NavigableMap> map = + new TreeMap>(Bytes.BYTES_COMPARATOR); + class StdCallback implements Batch.Callback> { + S sumVal = null, sumWeights = null; + + public Pair>, List> getMedianParams() { + List l = new ArrayList(); + l.add(sumVal); + l.add(sumWeights); + Pair>, List> p = + new Pair>, List>(map, l); + return p; + } + + @Override + public synchronized void update(byte[] region, byte[] row, List result) { + map.put(row, result); + sumVal = ci.add(sumVal, result.get(0)); + sumWeights = ci.add(sumWeights, result.get(1)); + } + } + StdCallback stdCallback = new StdCallback(); + HTable table = null; + try { + table = new HTable(conf, tableName); + table.coprocessorService(AggregateService.class, scan.getStartRow(), + scan.getStopRow(), new Batch.Call>() { + @Override + public List call(AggregateService instance) throws IOException { + ServerRpcController controller = new ServerRpcController(); + BlockingRpcCallback rpcCallback = + new BlockingRpcCallback(); + instance.getMedian(controller, requestArg, rpcCallback); + AggregateResponse response = rpcCallback.get(); + if (controller.failedOnException()) { + throw controller.getFailedOn(); + } + + List list = new ArrayList(); + for (int i = 0; i < response.getFirstPartCount(); i++) { + list.add(ci.parseResponseAsPromotedType( + getBytesFromResponse(response.getFirstPart(i)))); + } + return list; + } + + }, stdCallback); + } finally { + if (table != null) { + table.close(); + } + } + return stdCallback.getMedianParams(); + } + + /** + * This is the client side interface/handler for calling the median method for a + * given cf-cq combination. This method collects the necessary parameters + * to compute the median and returns the median. + * @param tableName + * @param ci + * @param scan + * @return R the median + * @throws Throwable + */ + public R median(final byte[] tableName, ColumnInterpreter ci, + Scan scan) throws Throwable { + Pair>, List> p = getMedianArgs(tableName, ci, scan); + byte[] startRow = null; + byte[] colFamily = scan.getFamilies()[0]; + NavigableSet quals = scan.getFamilyMap().get(colFamily); + NavigableMap> map = p.getFirst(); + S sumVal = p.getSecond().get(0); + S sumWeights = p.getSecond().get(1); + double halfSumVal = ci.divideForAvg(sumVal, 2L); + double movingSumVal = 0; + boolean weighted = false; + if (quals.size() > 1) { + weighted = true; + halfSumVal = ci.divideForAvg(sumWeights, 2L); + } + + for (Map.Entry> entry : map.entrySet()) { + S s = weighted ? entry.getValue().get(1) : entry.getValue().get(0); + double newSumVal = movingSumVal + ci.divideForAvg(s, 1L); + if (newSumVal > halfSumVal) break; // we found the region with the median + movingSumVal = newSumVal; + startRow = entry.getKey(); + } + // scan the region with median and find it + Scan scan2 = new Scan(scan); + // inherit stop row from method parameter + if (startRow != null) scan2.setStartRow(startRow); + HTable table = null; + ResultScanner scanner = null; + try { + table = new HTable(conf, tableName); + int cacheSize = scan2.getCaching(); + if (!scan2.getCacheBlocks() || scan2.getCaching() < 2) { + scan2.setCacheBlocks(true); + cacheSize = 5; + scan2.setCaching(cacheSize); + } + scanner = table.getScanner(scan2); + Result[] results = null; + byte[] qualifier = quals.pollFirst(); + // qualifier for the weight column + byte[] weightQualifier = weighted ? quals.pollLast() : qualifier; + R value = null; + do { + results = scanner.next(cacheSize); + if (results != null && results.length > 0) { + for (int i = 0; i < results.length; i++) { + Result r = results[i]; + // retrieve weight + KeyValue kv = r.getColumnLatest(colFamily, weightQualifier); + R newValue = ci.getValue(colFamily, weightQualifier, kv); + S s = ci.castToReturnType(newValue); + double newSumVal = movingSumVal + ci.divideForAvg(s, 1L); + // see if we have moved past the median + if (newSumVal > halfSumVal) { + return value; + } + movingSumVal = newSumVal; + kv = r.getColumnLatest(colFamily, qualifier); + value = ci.getValue(colFamily, qualifier, kv); + } + } + } while (results != null && results.length > 0); + } finally { + if (scanner != null) { + scanner.close(); + } + if (table != null) { + table.close(); + } + } + return null; + } + + AggregateArgument validateArgAndGetPB(Scan scan, ColumnInterpreter ci) + throws IOException { + validateParameters(scan); + final AggregateArgument.Builder requestBuilder = + AggregateArgument.newBuilder(); + requestBuilder.setInterpreterClassName(ci.getClass().getCanonicalName()); + ByteString columnInterpreterSpecificData = null; + if ((columnInterpreterSpecificData = ci.columnInterpreterSpecificData()) + != null) { + requestBuilder.setInterpreterSpecificBytes(columnInterpreterSpecificData); + } + requestBuilder.setScan(ProtobufUtil.toScan(scan)); + return requestBuilder.build(); + } + + byte[] getBytesFromResponse(ByteString response) { + ByteBuffer bb = response.asReadOnlyByteBuffer(); + bb.rewind(); + byte[] bytes; + if (bb.hasArray()) { + bytes = bb.array(); + } else { + bytes = response.toByteArray(); + } + return bytes; + } +} diff --git hbase-client/src/main/java/org/apache/hadoop/hbase/client/coprocessor/Batch.java hbase-client/src/main/java/org/apache/hadoop/hbase/client/coprocessor/Batch.java new file mode 100644 index 0000000..c1b6e35 --- /dev/null +++ hbase-client/src/main/java/org/apache/hadoop/hbase/client/coprocessor/Batch.java @@ -0,0 +1,74 @@ +/* + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.client.coprocessor; + +import java.io.IOException; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; + + +/** + * A collection of interfaces and utilities used for interacting with custom RPC + * interfaces exposed by Coprocessors. + */ +@InterfaceAudience.Public +@InterfaceStability.Evolving +public abstract class Batch { + /** + * Defines a unit of work to be executed. + * + *

    + * When used with + * {@link org.apache.hadoop.hbase.client.HTable#coprocessorService(Class, byte[], byte[], org.apache.hadoop.hbase.client.coprocessor.Batch.Call)} + * the implementations {@link Batch.Call#call(Object)} method will be invoked + * with a proxy to the + * {@link org.apache.hadoop.hbase.coprocessor.CoprocessorService} + * sub-type instance. + *

    + * @see org.apache.hadoop.hbase.client.coprocessor + * @see org.apache.hadoop.hbase.client.HTable#coprocessorService(byte[]) + * @see org.apache.hadoop.hbase.client.HTable#coprocessorService(Class, byte[], byte[], org.apache.hadoop.hbase.client.coprocessor.Batch.Call) + * @param the instance type to be passed to + * {@link Batch.Call#call(Object)} + * @param the return type from {@link Batch.Call#call(Object)} + */ + public static interface Call { + public R call(T instance) throws IOException; + } + + /** + * Defines a generic callback to be triggered for each {@link Batch.Call#call(Object)} + * result. + * + *

    + * When used with + * {@link org.apache.hadoop.hbase.client.HTable#coprocessorService(Class, byte[], byte[], org.apache.hadoop.hbase.client.coprocessor.Batch.Call)} + * the implementation's {@link Batch.Callback#update(byte[], byte[], Object)} + * method will be called with the {@link Batch.Call#call(Object)} return value + * from each region in the selected range. + *

    + * @param the return type from the associated {@link Batch.Call#call(Object)} + * @see org.apache.hadoop.hbase.client.HTable#coprocessorService(Class, byte[], byte[], org.apache.hadoop.hbase.client.coprocessor.Batch.Call) + */ + public static interface Callback { + public void update(byte[] region, byte[] row, R result); + } +} \ No newline at end of file diff --git hbase-client/src/main/java/org/apache/hadoop/hbase/client/coprocessor/LongColumnInterpreter.java hbase-client/src/main/java/org/apache/hadoop/hbase/client/coprocessor/LongColumnInterpreter.java new file mode 100644 index 0000000..404a3b4 --- /dev/null +++ hbase-client/src/main/java/org/apache/hadoop/hbase/client/coprocessor/LongColumnInterpreter.java @@ -0,0 +1,141 @@ +/* + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.client.coprocessor; + +import java.io.IOException; +import java.nio.ByteBuffer; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; +import org.apache.hadoop.hbase.KeyValue; +import org.apache.hadoop.hbase.coprocessor.ColumnInterpreter; +import org.apache.hadoop.hbase.util.Bytes; + +import com.google.protobuf.ByteString; + +/** + * a concrete column interpreter implementation. The cell value is a Long value + * and its promoted data type is also a Long value. For computing aggregation + * function, this class is used to find the datatype of the cell value. Client + * is supposed to instantiate it and passed along as a parameter. See + * TestAggregateProtocol methods for its sample usage. + * Its methods handle null arguments gracefully. + */ +@InterfaceAudience.Public +@InterfaceStability.Evolving +public class LongColumnInterpreter implements ColumnInterpreter { + + public Long getValue(byte[] colFamily, byte[] colQualifier, KeyValue kv) + throws IOException { + if (kv == null || kv.getValueLength() != Bytes.SIZEOF_LONG) + return null; + return Bytes.toLong(kv.getBuffer(), kv.getValueOffset()); + } + + @Override + public Long add(Long l1, Long l2) { + if (l1 == null ^ l2 == null) { + return (l1 == null) ? l2 : l1; // either of one is null. + } else if (l1 == null) // both are null + return null; + return l1 + l2; + } + + @Override + public int compare(final Long l1, final Long l2) { + if (l1 == null ^ l2 == null) { + return l1 == null ? -1 : 1; // either of one is null. + } else if (l1 == null) + return 0; // both are null + return l1.compareTo(l2); // natural ordering. + } + + @Override + public Long getMaxValue() { + return Long.MAX_VALUE; + } + + @Override + public Long increment(Long o) { + return o == null ? null : (o + 1l); + } + + @Override + public Long multiply(Long l1, Long l2) { + return (l1 == null || l2 == null) ? null : l1 * l2; + } + + @Override + public Long getMinValue() { + return Long.MIN_VALUE; + } + + @Override + public double divideForAvg(Long l1, Long l2) { + return (l2 == null || l1 == null) ? Double.NaN : (l1.doubleValue() / l2 + .doubleValue()); + } + + @Override + public Long castToReturnType(Long o) { + return o; + } + + + @Override + public Long parseResponseAsPromotedType(byte[] response) { + ByteBuffer b = ByteBuffer.allocate(8).put(response); + b.rewind(); + long l = b.getLong(); + return l; + } + + @Override + public Long castToCellType(Long l) { + return l; + } + + @Override + public ByteString columnInterpreterSpecificData() { + // nothing + return null; + } + + @Override + public void initialize(ByteString bytes) { + // nothing + } + + @Override + public ByteString getProtoForCellType(Long t) { + return getProtoForPromotedOrCellType(t); + } + + @Override + public ByteString getProtoForPromotedType(Long s) { + return getProtoForPromotedOrCellType(s); + } + + private ByteString getProtoForPromotedOrCellType(Long s) { + ByteBuffer bb = ByteBuffer.allocate(8).putLong(s); + bb.rewind(); + ByteString bs = ByteString.copyFrom(bb); + return bs; + } +} \ No newline at end of file diff --git hbase-client/src/main/java/org/apache/hadoop/hbase/client/coprocessor/package-info.java hbase-client/src/main/java/org/apache/hadoop/hbase/client/coprocessor/package-info.java new file mode 100644 index 0000000..edb3c22 --- /dev/null +++ hbase-client/src/main/java/org/apache/hadoop/hbase/client/coprocessor/package-info.java @@ -0,0 +1,226 @@ +/* + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** +Provides client classes for invoking Coprocessor RPC protocols + +

    +

    +

    + +

    Overview

    +

    +The coprocessor framework provides a way for custom code to run in place on the +HBase region servers with each of a table's regions. These client classes +enable applications to communicate with coprocessor instances via custom RPC +protocols. +

    + +

    +In order to provide a custom RPC protocol to clients, a coprocessor implementation +must: +

      +
    • Define a protocol buffer Service and supporting Message types for the RPC methods. + See the + protocol buffer guide + for more details on defining services.
    • +
    • Generate the Service and Message code using the protoc compiler
    • +
    • Implement the generated Service interface in your coprocessor class and implement the + {@link org.apache.hadoop.hbase.coprocessor.CoprocessorService} interface. The + {@link org.apache.hadoop.hbase.coprocessor.CoprocessorService#getService()} + method should return a reference to the Endpoint's protocol buffer Service instance. +
    +Clients may then call the defined service methods on coprocessor instances via +the {@link org.apache.hadoop.hbase.client.HTable#coprocessorService(byte[])}, +{@link org.apache.hadoop.hbase.client.HTable#coprocessorService(Class, byte[], byte[], org.apache.hadoop.hbase.client.coprocessor.Batch.Call)}, and +{@link org.apache.hadoop.hbase.client.HTable#coprocessorService(Class, byte[], byte[], org.apache.hadoop.hbase.client.coprocessor.Batch.Call, org.apache.hadoop.hbase.client.coprocessor.Batch.Callback)} +methods. +

    + +

    +Since coprocessor Service instances are associated with individual regions within the table, +the client RPC calls must ultimately identify which regions should be used in the Service +method invocations. Since regions are seldom handled directly in client code +and the region names may change over time, the coprocessor RPC calls use row keys +to identify which regions should be used for the method invocations. Clients +can call coprocessor Service methods against either: +

      +
    • a single region - calling + {@link org.apache.hadoop.hbase.client.HTable#coprocessorService(byte[])} + with a single row key. This returns a {@link org.apache.hadoop.hbase.ipc.CoprocessorRpcChannel} + instance which communicates with the region containing the given row key (even if the + row does not exist) as the RPC endpoint. Clients can then use the {@code CoprocessorRpcChannel} + instance in creating a new Service stub to call RPC methods on the region's coprocessor.
    • +
    • a range of regions - calling + {@link org.apache.hadoop.hbase.client.HTable#coprocessorService(Class, byte[], byte[], org.apache.hadoop.hbase.client.coprocessor.Batch.Call)} + or {@link org.apache.hadoop.hbase.client.HTable#coprocessorService(Class, byte[], byte[], org.apache.hadoop.hbase.client.coprocessor.Batch.Call, org.apache.hadoop.hbase.client.coprocessor.Batch.Callback)} + with a starting row key and an ending row key. All regions in the table + from the region containing the start row key to the region containing the end + row key (inclusive), will we used as the RPC endpoints.
    • +
    +

    + +

    Note that the row keys passed as parameters to the HTable +methods are not passed directly to the coprocessor Service implementations. +They are only used to identify the regions for endpoints of the remote calls. +

    + +

    +The {@link org.apache.hadoop.hbase.client.coprocessor.Batch} class defines two +interfaces used for coprocessor Service invocations against multiple regions. Clients implement +{@link org.apache.hadoop.hbase.client.coprocessor.Batch.Call} to call methods of the actual +coprocessor Service instance. The interface's call() method will be called once +per selected region, passing the Service instance for the region as a parameter. Clients +can optionally implement {@link org.apache.hadoop.hbase.client.coprocessor.Batch.Callback} +to be notified of the results from each region invocation as they complete. +The instance's {@link org.apache.hadoop.hbase.client.coprocessor.Batch.Callback#update(byte[], byte[], Object)} +method will be called with the {@link org.apache.hadoop.hbase.client.coprocessor.Batch.Call#call(Object)} +return value from each region. +

    + +

    Example usage

    +

    +To start with, let's use a fictitious coprocessor, RowCountEndpoint +that counts the number of rows and key-values in each region where it is running. +For clients to query this information, the coprocessor defines the following protocol buffer +service: +

    + +
    +
    +message CountRequest {
    +}
    +
    +message CountResponse {
    +  required int64 count = 1 [default = 0];
    +}
    +
    +service RowCountService {
    +  rpc getRowCount(CountRequest)
    +    returns (CountResponse);
    +  rpc getKeyValueCount(CountRequest)
    +    returns (CountResponse);
    +}
    +
    + +

    +Next run the protoc compiler on the .proto file to generate Java code for the Service interface. +The generated {@code RowCountService} interface should look something like: +

    +
    +
    +public static abstract class RowCountService
    +  implements com.google.protobuf.Service {
    +  ...
    +  public interface Interface {
    +    public abstract void getRowCount(
    +        com.google.protobuf.RpcController controller,
    +        org.apache.hadoop.hbase.coprocessor.example.generated.ExampleProtos.CountRequest request,
    +        com.google.protobuf.RpcCallback done);
    +
    +    public abstract void getKeyValueCount(
    +        com.google.protobuf.RpcController controller,
    +        org.apache.hadoop.hbase.coprocessor.example.generated.ExampleProtos.CountRequest request,
    +        com.google.protobuf.RpcCallback done);
    +  }
    +}
    +
    + +

    +Our coprocessor Service will need to implement this interface and the {@link org.apache.hadoop.hbase.coprocessor.CoprocessorService} +in order to be registered correctly as an endpoint. For the sake of simplicity the server-side +implementation is omitted. To see the implementing code, please see the +{@link org.apache.hadoop.hbase.coprocessor.example.RowCountEndpoint} class in the HBase source code. +

    + +

    +Now we need a way to access the results that RowCountService +is making available. If we want to find the row count for all regions, we could +use: +

    + +
    +
    +HTable table = new HTable(conf, "mytable");
    +final ExampleProtos.CountRequest request = ExampleProtos.CountRequest.getDefaultInstance();
    +Map results = table.coprocessorService(
    +    ExampleProtos.RowCountService.class, // the protocol interface we're invoking
    +    null, null,                          // start and end row keys
    +    new Batch.Call() {
    +        public Long call(ExampleProtos.RowCountService counter) throws IOException {
    +          BlockingRpcCallback rpcCallback =
    +              new BlockingRpcCallback();
    +          counter.getRowCount(null, request, rpcCallback);
    +          ExampleProtos.CountResponse response = rpcCallback.get();
    +          return response.hasCount() ? response.getCount() : 0;
    +        }
    +    });
    +
    + +

    +This will return a java.util.Map of the counter.getRowCount() +result for the RowCountService instance running in each region +of mytable, keyed by the region name. +

    + +

    +By implementing {@link org.apache.hadoop.hbase.client.coprocessor.Batch.Call} +as an anonymous class, we can invoke RowCountService methods +directly against the {@link org.apache.hadoop.hbase.client.coprocessor.Batch.Call#call(Object)} +method's argument. Calling {@link org.apache.hadoop.hbase.client.HTable#coprocessorService(Class, byte[], byte[], org.apache.hadoop.hbase.client.coprocessor.Batch.Call)} +will take care of invoking Batch.Call.call() against our anonymous class +with the RowCountService instance for each table region. +

    + +

    +Implementing {@link org.apache.hadoop.hbase.client.coprocessor.Batch.Call} also allows you to +perform additional processing against each region's Service instance. For example, if you would +like to combine row count and key-value count for each region: +

    + +
    +
    +HTable table = new HTable(conf, "mytable");
    +// combine row count and kv count for region
    +final ExampleProtos.CountRequest request = ExampleProtos.CountRequest.getDefaultInstance();
    +Map results = table.coprocessorService(
    +    ExampleProtos.RowCountService.class, // the protocol interface we're invoking
    +    null, null,                          // start and end row keys
    +    new Batch.Call>() {
    +       public Long call(ExampleProtos.RowCountService counter) throws IOException {
    +         BlockingRpcCallback rowCallback =
    +             new BlockingRpcCallback();
    +         counter.getRowCount(null, request, rowCallback);
    +
    +         BlockingRpcCallback kvCallback =
    +             new BlockingRpcCallback();
    +         counter.getKeyValueCount(null, request, kvCallback);
    +
    +         ExampleProtos.CountResponse rowResponse = rowCallback.get();
    +         ExampleProtos.CountResponse kvResponse = kvCallback.get();
    +         return new Pair(rowResponse.hasCount() ? rowResponse.getCount() : 0,
    +             kvResponse.hasCount() ? kvResponse.getCount() : 0);
    +    }
    +});
    +
    +*/ +package org.apache.hadoop.hbase.client.coprocessor; diff --git hbase-client/src/main/java/org/apache/hadoop/hbase/client/metrics/ScanMetrics.java hbase-client/src/main/java/org/apache/hadoop/hbase/client/metrics/ScanMetrics.java new file mode 100644 index 0000000..47b484c --- /dev/null +++ hbase-client/src/main/java/org/apache/hadoop/hbase/client/metrics/ScanMetrics.java @@ -0,0 +1,142 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.client.metrics; + +import com.google.common.collect.ImmutableMap; +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; + +import java.util.HashMap; +import java.util.Map; +import java.util.concurrent.atomic.AtomicLong; + + +/** + * Provides client-side metrics related to scan operations + * The data can be passed to mapreduce framework or other systems. + * We use atomic longs so that one thread can increment, + * while another atomically resets to zero after the values are reported + * to hadoop's counters. + * + * Some of these metrics are general for any client operation such as put + * However, there is no need for this. So they are defined under scan operation + * for now. + */ +@InterfaceAudience.Public +@InterfaceStability.Evolving +public class ScanMetrics { + + + private static final Log LOG = LogFactory.getLog(ScanMetrics.class); + + /** + * Hash to hold the String -> Atomic Long mappings. + */ + private final Map counters = new HashMap(); + + // AtomicLongs to hold the metrics values. These are all updated through ClientScanner and + // ScannerCallable. They are atomic longs so that atomic getAndSet can be used to reset the + // values after progress is passed to hadoop's counters. + + + /** + * number of RPC calls + */ + public final AtomicLong countOfRPCcalls = createCounter("RPC_CALLS"); + + /** + * number of remote RPC calls + */ + public final AtomicLong countOfRemoteRPCcalls = createCounter("REMOTE_RPC_CALLS"); + + /** + * sum of milliseconds between sequential next calls + */ + public final AtomicLong sumOfMillisSecBetweenNexts = createCounter("MILLIS_BETWEEN_NEXTS"); + + /** + * number of NotServingRegionException caught + */ + public final AtomicLong countOfNSRE = createCounter("NOT_SERVING_REGION_EXCEPTION"); + + /** + * number of bytes in Result objects from region servers + */ + public final AtomicLong countOfBytesInResults = createCounter("BYTES_IN_RESULTS"); + + /** + * number of bytes in Result objects from remote region servers + */ + public final AtomicLong countOfBytesInRemoteResults = createCounter("BYTES_IN_REMOTE_RESULTS"); + + /** + * number of regions + */ + public final AtomicLong countOfRegions = createCounter("REGIONS_SCANNED"); + + /** + * number of RPC retries + */ + public final AtomicLong countOfRPCRetries = createCounter("RPC_RETRIES"); + + /** + * number of remote RPC retries + */ + public final AtomicLong countOfRemoteRPCRetries = createCounter("REMOTE_RPC_RETRIES"); + + /** + * constructor + */ + public ScanMetrics() { + } + + private AtomicLong createCounter(String counterName) { + AtomicLong c = new AtomicLong(0); + counters.put(counterName, c); + return c; + } + + public void setCounter(String counterName, long value) { + AtomicLong c = this.counters.get(counterName); + if (c != null) { + c.set(value); + } + } + + /** + * Get all of the values since the last time this function was called. + * + * Calling this function will reset all AtomicLongs in the instance back to 0. + * + * @return A Map of String -> Long for metrics + */ + public Map getMetricsMap() { + //Create a builder + ImmutableMap.Builder builder = ImmutableMap.builder(); + //For every entry add the value and reset the AtomicLong back to zero + for (Map.Entry e : this.counters.entrySet()) { + builder.put(e.getKey(), e.getValue().getAndSet(0)); + } + //Build the immutable map so that people can't mess around with it. + return builder.build(); + } + +} diff --git hbase-client/src/main/java/org/apache/hadoop/hbase/client/package-info.java hbase-client/src/main/java/org/apache/hadoop/hbase/client/package-info.java new file mode 100644 index 0000000..c79bd52 --- /dev/null +++ hbase-client/src/main/java/org/apache/hadoop/hbase/client/package-info.java @@ -0,0 +1,185 @@ +/* + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** +Provides HBase Client + +

    Table of Contents

    + + +

    Overview

    +

    To administer HBase, create and drop tables, list and alter tables, + use {@link org.apache.hadoop.hbase.client.HBaseAdmin}. Once created, table access is via an instance + of {@link org.apache.hadoop.hbase.client.HTable}. You add content to a table a row at a time. To insert, + create an instance of a {@link org.apache.hadoop.hbase.client.Put} object. Specify value, target column + and optionally a timestamp. Commit your update using {@link org.apache.hadoop.hbase.client.HTable#put(Put)}. + To fetch your inserted value, use {@link org.apache.hadoop.hbase.client.Get}. The Get can be specified to be broad -- get all + on a particular row -- or narrow; i.e. return only a single cell value. After creating an instance of + Get, invoke {@link org.apache.hadoop.hbase.client.HTable#get(Get)}. Use + {@link org.apache.hadoop.hbase.client.Scan} to set up a scanner -- a Cursor- like access. After + creating and configuring your Scan instance, call {@link org.apache.hadoop.hbase.client.HTable#getScanner(Scan)} and then + invoke next on the returned object. Both {@link org.apache.hadoop.hbase.client.HTable#get(Get)} and + {@link org.apache.hadoop.hbase.client.HTable#getScanner(Scan)} return a +{@link org.apache.hadoop.hbase.client.Result}. +A Result is a List of {@link org.apache.hadoop.hbase.KeyValue}s. It has facility for packaging the return +in different formats. + Use {@link org.apache.hadoop.hbase.client.Delete} to remove content. + You can remove individual cells or entire families, etc. Pass it to + {@link org.apache.hadoop.hbase.client.HTable#delete(Delete)} to execute. +

    +

    Puts, Gets and Deletes take out a lock on the target row for the duration of their operation. + Concurrent modifications to a single row are serialized. Gets and scans run concurrently without + interference of the row locks and are guaranteed to not to return half written rows. +

    +

    Client code accessing a cluster finds the cluster by querying ZooKeeper. + This means that the ZooKeeper quorum to use must be on the client CLASSPATH. + Usually this means make sure the client can find your hbase-site.xml. +

    + +

    Example API Usage

    + +

    Once you have a running HBase, you probably want a way to hook your application up to it. + If your application is in Java, then you should use the Java API. Here's an example of what + a simple client might look like. This example assumes that you've created a table called + "myTable" with a column family called "myColumnFamily". +

    + +
    +
    +import java.io.IOException;
    +
    +import org.apache.hadoop.hbase.HBaseConfiguration;
    +import org.apache.hadoop.hbase.client.Get;
    +import org.apache.hadoop.hbase.client.HTable;
    +import org.apache.hadoop.hbase.client.Put;
    +import org.apache.hadoop.hbase.client.Result;
    +import org.apache.hadoop.hbase.client.ResultScanner;
    +import org.apache.hadoop.hbase.client.Scan;
    +import org.apache.hadoop.hbase.util.Bytes;
    +
    +
    +// Class that has nothing but a main.
    +// Does a Put, Get and a Scan against an hbase table.
    +public class MyLittleHBaseClient {
    +  public static void main(String[] args) throws IOException {
    +    // You need a configuration object to tell the client where to connect.
    +    // When you create a HBaseConfiguration, it reads in whatever you've set
    +    // into your hbase-site.xml and in hbase-default.xml, as long as these can
    +    // be found on the CLASSPATH
    +    Configuration config = HBaseConfiguration.create();
    +
    +    // This instantiates an HTable object that connects you to
    +    // the "myLittleHBaseTable" table.
    +    HTable table = new HTable(config, "myLittleHBaseTable");
    +
    +    // To add to a row, use Put.  A Put constructor takes the name of the row
    +    // you want to insert into as a byte array.  In HBase, the Bytes class has
    +    // utility for converting all kinds of java types to byte arrays.  In the
    +    // below, we are converting the String "myLittleRow" into a byte array to
    +    // use as a row key for our update. Once you have a Put instance, you can
    +    // adorn it by setting the names of columns you want to update on the row,
    +    // the timestamp to use in your update, etc.If no timestamp, the server
    +    // applies current time to the edits.
    +    Put p = new Put(Bytes.toBytes("myLittleRow"));
    +
    +    // To set the value you'd like to update in the row 'myLittleRow', specify
    +    // the column family, column qualifier, and value of the table cell you'd
    +    // like to update.  The column family must already exist in your table
    +    // schema.  The qualifier can be anything.  All must be specified as byte
    +    // arrays as hbase is all about byte arrays.  Lets pretend the table
    +    // 'myLittleHBaseTable' was created with a family 'myLittleFamily'.
    +    p.add(Bytes.toBytes("myLittleFamily"), Bytes.toBytes("someQualifier"),
    +      Bytes.toBytes("Some Value"));
    +
    +    // Once you've adorned your Put instance with all the updates you want to
    +    // make, to commit it do the following (The HTable#put method takes the
    +    // Put instance you've been building and pushes the changes you made into
    +    // hbase)
    +    table.put(p);
    +
    +    // Now, to retrieve the data we just wrote. The values that come back are
    +    // Result instances. Generally, a Result is an object that will package up
    +    // the hbase return into the form you find most palatable.
    +    Get g = new Get(Bytes.toBytes("myLittleRow"));
    +    Result r = table.get(g);
    +    byte [] value = r.getValue(Bytes.toBytes("myLittleFamily"),
    +      Bytes.toBytes("someQualifier"));
    +    // If we convert the value bytes, we should get back 'Some Value', the
    +    // value we inserted at this location.
    +    String valueStr = Bytes.toString(value);
    +    System.out.println("GET: " + valueStr);
    +
    +    // Sometimes, you won't know the row you're looking for. In this case, you
    +    // use a Scanner. This will give you cursor-like interface to the contents
    +    // of the table.  To set up a Scanner, do like you did above making a Put
    +    // and a Get, create a Scan.  Adorn it with column names, etc.
    +    Scan s = new Scan();
    +    s.addColumn(Bytes.toBytes("myLittleFamily"), Bytes.toBytes("someQualifier"));
    +    ResultScanner scanner = table.getScanner(s);
    +    try {
    +      // Scanners return Result instances.
    +      // Now, for the actual iteration. One way is to use a while loop like so:
    +      for (Result rr = scanner.next(); rr != null; rr = scanner.next()) {
    +        // print out the row we found and the columns we were looking for
    +        System.out.println("Found row: " + rr);
    +      }
    +
    +      // The other approach is to use a foreach loop. Scanners are iterable!
    +      // for (Result rr : scanner) {
    +      //   System.out.println("Found row: " + rr);
    +      // }
    +    } finally {
    +      // Make sure you close your scanners when you are done!
    +      // Thats why we have it inside a try/finally clause
    +      scanner.close();
    +    }
    +  }
    +}
    +
    +
    + +

    There are many other methods for putting data into and getting data out of + HBase, but these examples should get you started. See the HTable javadoc for + more methods. Additionally, there are methods for managing tables in the + HBaseAdmin class.

    + +

    If your client is NOT Java, then you should consider the Thrift or REST + libraries.

    + +

    Related Documentation

    + +
    + + +

    There are many other methods for putting data into and getting data out of + HBase, but these examples should get you started. See the HTable javadoc for + more methods. Additionally, there are methods for managing tables in the + HBaseAdmin class.

    + + + +*/ +package org.apache.hadoop.hbase.client; diff --git hbase-client/src/main/java/org/apache/hadoop/hbase/client/replication/ReplicationAdmin.java hbase-client/src/main/java/org/apache/hadoop/hbase/client/replication/ReplicationAdmin.java new file mode 100644 index 0000000..57457be --- /dev/null +++ hbase-client/src/main/java/org/apache/hadoop/hbase/client/replication/ReplicationAdmin.java @@ -0,0 +1,209 @@ +/** + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.client.replication; + +import java.io.Closeable; +import java.io.IOException; +import java.util.Map; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.Abortable; +import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.client.HConnection; +import org.apache.hadoop.hbase.client.HConnectionManager; +import org.apache.hadoop.hbase.replication.ReplicationZookeeper; +import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher; +import org.apache.zookeeper.KeeperException; + +/** + *

    + * This class provides the administrative interface to HBase cluster + * replication. In order to use it, the cluster and the client using + * ReplicationAdmin must be configured with hbase.replication + * set to true. + *

    + *

    + * Adding a new peer results in creating new outbound connections from every + * region server to a subset of region servers on the slave cluster. Each + * new stream of replication will start replicating from the beginning of the + * current HLog, meaning that edits from that past will be replicated. + *

    + *

    + * Removing a peer is a destructive and irreversible operation that stops + * all the replication streams for the given cluster and deletes the metadata + * used to keep track of the replication state. + *

    + *

    + * Enabling and disabling peers is currently not supported. + *

    + *

    + * As cluster replication is still experimental, a kill switch is provided + * in order to stop all replication-related operations, see + * {@link #setReplicating(boolean)}. When setting it back to true, the new + * state of all the replication streams will be unknown and may have holes. + * Use at your own risk. + *

    + *

    + * To see which commands are available in the shell, type + * replication. + *

    + */ +public class ReplicationAdmin implements Closeable { + private static final Log LOG = LogFactory.getLog(ReplicationAdmin.class); + + private final ReplicationZookeeper replicationZk; + private final HConnection connection; + + /** + * Constructor that creates a connection to the local ZooKeeper ensemble. + * @param conf Configuration to use + * @throws IOException if the connection to ZK cannot be made + * @throws RuntimeException if replication isn't enabled. + */ + public ReplicationAdmin(Configuration conf) throws IOException { + if (!conf.getBoolean(HConstants.REPLICATION_ENABLE_KEY, false)) { + throw new RuntimeException("hbase.replication isn't true, please " + + "enable it in order to use replication"); + } + this.connection = HConnectionManager.getConnection(conf); + ZooKeeperWatcher zkw = createZooKeeperWatcher(); + try { + this.replicationZk = new ReplicationZookeeper(this.connection, conf, zkw); + } catch (KeeperException e) { + throw new IOException("Unable setup the ZooKeeper connection", e); + } + } + + private ZooKeeperWatcher createZooKeeperWatcher() throws IOException { + return new ZooKeeperWatcher(connection.getConfiguration(), + "Replication Admin", new Abortable() { + @Override + public void abort(String why, Throwable e) { + LOG.error(why, e); + System.exit(1); + } + + @Override + public boolean isAborted() { + return false; + } + + }); + } + + + /** + * Add a new peer cluster to replicate to. + * @param id a short that identifies the cluster + * @param clusterKey the concatenation of the slave cluster's + * hbase.zookeeper.quorum:hbase.zookeeper.property.clientPort:zookeeper.znode.parent + * @throws IllegalStateException if there's already one slave since + * multi-slave isn't supported yet. + */ + public void addPeer(String id, String clusterKey) throws IOException { + this.replicationZk.addPeer(id, clusterKey); + } + + /** + * Removes a peer cluster and stops the replication to it. + * @param id a short that identifies the cluster + */ + public void removePeer(String id) throws IOException { + this.replicationZk.removePeer(id); + } + + /** + * Restart the replication stream to the specified peer. + * @param id a short that identifies the cluster + */ + public void enablePeer(String id) throws IOException { + this.replicationZk.enablePeer(id); + } + + /** + * Stop the replication stream to the specified peer. + * @param id a short that identifies the cluster + */ + public void disablePeer(String id) throws IOException { + this.replicationZk.disablePeer(id); + } + + /** + * Get the number of slave clusters the local cluster has. + * @return number of slave clusters + */ + public int getPeersCount() { + return this.replicationZk.listPeersIdsAndWatch().size(); + } + + /** + * Map of this cluster's peers for display. + * @return A map of peer ids to peer cluster keys + */ + public Map listPeers() { + return this.replicationZk.listPeers(); + } + + /** + * Get the current status of the kill switch, if the cluster is replicating + * or not. + * @return true if the cluster is replicated, otherwise false + */ + public boolean getReplicating() throws IOException { + try { + return this.replicationZk.getReplication(); + } catch (KeeperException e) { + throw new IOException("Couldn't get the replication status"); + } + } + + /** + * Kill switch for all replication-related features + * @param newState true to start replication, false to stop it. + * completely + * @return the previous state + */ + public boolean setReplicating(boolean newState) throws IOException { + boolean prev = true; + try { + prev = getReplicating(); + this.replicationZk.setReplicating(newState); + } catch (KeeperException e) { + throw new IOException("Unable to set the replication state", e); + } + return prev; + } + + /** + * Get the ZK-support tool created and used by this object for replication. + * @return the ZK-support tool + */ + ReplicationZookeeper getReplicationZk() { + return replicationZk; + } + + @Override + public void close() throws IOException { + if (this.connection != null) { + this.connection.close(); + } + } +} diff --git hbase-client/src/main/java/org/apache/hadoop/hbase/coprocessor/ColumnInterpreter.java hbase-client/src/main/java/org/apache/hadoop/hbase/coprocessor/ColumnInterpreter.java new file mode 100644 index 0000000..d74929c --- /dev/null +++ hbase-client/src/main/java/org/apache/hadoop/hbase/coprocessor/ColumnInterpreter.java @@ -0,0 +1,164 @@ +/** + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.coprocessor; + +import java.io.IOException; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; +import org.apache.hadoop.hbase.KeyValue; +import org.apache.hadoop.hbase.client.coprocessor.LongColumnInterpreter; + +import com.google.protobuf.ByteString; + +/** + * Defines how value for specific column is interpreted and provides utility + * methods like compare, add, multiply etc for them. Takes column family, column + * qualifier and return the cell value. Its concrete implementation should + * handle null case gracefully. Refer to {@link LongColumnInterpreter} for an + * example. + *

    + * Takes two generic parameters. The cell value type of the interpreter is . + * During some computations like sum, average, the return type can be different + * than the cell value data type, for eg, sum of int cell values might overflow + * in case of a int result, we should use Long for its result. Therefore, this + * class mandates to use a different (promoted) data type for result of these + * computations . All computations are performed on the promoted data type + * . There is a conversion method + * {@link ColumnInterpreter#castToReturnType(Object)} which takes a type and + * returns a type. + * @param Cell value data type + * @param Promoted data type + */ +@InterfaceAudience.Public +@InterfaceStability.Evolving +public interface ColumnInterpreter { + + /** + * @param colFamily + * @param colQualifier + * @param kv + * @return value of type T + * @throws IOException + */ + T getValue(byte[] colFamily, byte[] colQualifier, KeyValue kv) + throws IOException; + + /** + * @param l1 + * @param l2 + * @return sum or non null value among (if either of them is null); otherwise + * returns a null. + */ + public S add(S l1, S l2); + + /** + * returns the maximum value for this type T + * @return max + */ + + T getMaxValue(); + + T getMinValue(); + + /** + * @param o1 + * @param o2 + * @return multiplication + */ + S multiply(S o1, S o2); + + /** + * @param o + * @return increment + */ + S increment(S o); + + /** + * provides casting opportunity between the data types. + * @param o + * @return cast + */ + S castToReturnType(T o); + + /** + * This takes care if either of arguments are null. returns 0 if they are + * equal or both are null; + *

      + *
    • >0 if l1 > l2 or l1 is not null and l2 is null. + *
    • < 0 if l1 < l2 or l1 is null and l2 is not null. + */ + int compare(final T l1, final T l2); + + /** + * used for computing average of data values. Not providing the divide + * method that takes two values as it is not needed as of now. + * @param o + * @param l + * @return Average + */ + double divideForAvg(S o, Long l); + + /** + * This method should return any additional data that is needed on the + * server side to construct the ColumnInterpreter. The server + * will pass this to the {@link #initialize(ByteString)} + * method. If there is no ColumnInterpreter specific data (for e.g., + * {@link LongColumnInterpreter}) then null should be returned. + * @return the PB message + */ + ByteString columnInterpreterSpecificData(); + + /** + * Return the PB for type T + * @param t + * @return PB-message + */ + ByteString getProtoForCellType(T t); + + /** + * Return the PB for type S + * @param s + * @return PB-message + */ + ByteString getProtoForPromotedType(S s); + + /** + * This method should initialize any field(s) of the ColumnInterpreter with + * a parsing of the passed message bytes (used on the server side). + * @param bytes + */ + void initialize(ByteString bytes); + + /** + * Converts the bytes in the server's response to the expected type S + * @param response + * @return response of type S constructed from the message + */ + S parseResponseAsPromotedType(byte[] response); + + /** + * The response message comes as type S. This will convert/cast it to T. + * In some sense, performs the opposite of {@link #castToReturnType(Object)} + * @param response + * @return cast + */ + T castToCellType(S response); +} diff --git hbase-client/src/main/java/org/apache/hadoop/hbase/filter/BinaryComparator.java hbase-client/src/main/java/org/apache/hadoop/hbase/filter/BinaryComparator.java new file mode 100644 index 0000000..26282db --- /dev/null +++ hbase-client/src/main/java/org/apache/hadoop/hbase/filter/BinaryComparator.java @@ -0,0 +1,89 @@ +/* + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.filter; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; +import org.apache.hadoop.hbase.DeserializationException; +import org.apache.hadoop.hbase.protobuf.generated.ComparatorProtos; +import org.apache.hadoop.hbase.util.Bytes; + +import com.google.protobuf.InvalidProtocolBufferException; + +/** + * A binary comparator which lexicographically compares against the specified + * byte array using {@link org.apache.hadoop.hbase.util.Bytes#compareTo(byte[], byte[])}. + */ +@InterfaceAudience.Public +@InterfaceStability.Stable +public class BinaryComparator extends ByteArrayComparable { + + /** + * Constructor + * @param value value + */ + public BinaryComparator(byte[] value) { + super(value); + } + + @Override + public int compareTo(byte [] value, int offset, int length) { + return Bytes.compareTo(this.value, 0, this.value.length, value, offset, length); + } + + /** + * @return The comparator serialized using pb + */ + public byte [] toByteArray() { + ComparatorProtos.BinaryComparator.Builder builder = + ComparatorProtos.BinaryComparator.newBuilder(); + builder.setComparable(super.convert()); + return builder.build().toByteArray(); + } + + /** + * @param pbBytes A pb serialized {@link BinaryComparator} instance + * @return An instance of {@link BinaryComparator} made from bytes + * @throws DeserializationException + * @see #toByteArray + */ + public static BinaryComparator parseFrom(final byte [] pbBytes) + throws DeserializationException { + ComparatorProtos.BinaryComparator proto; + try { + proto = ComparatorProtos.BinaryComparator.parseFrom(pbBytes); + } catch (InvalidProtocolBufferException e) { + throw new DeserializationException(e); + } + return new BinaryComparator(proto.getComparable().getValue().toByteArray()); + } + + /** + * @param other + * @return true if and only if the fields of the comparator that are serialized + * are equal to the corresponding fields in other. Used for testing. + */ + boolean areSerializedFieldsEqual(ByteArrayComparable other) { + if (other == this) return true; + if (!(other instanceof BinaryComparator)) return false; + + return super.areSerializedFieldsEqual(other); + } +} diff --git hbase-client/src/main/java/org/apache/hadoop/hbase/filter/BinaryPrefixComparator.java hbase-client/src/main/java/org/apache/hadoop/hbase/filter/BinaryPrefixComparator.java new file mode 100644 index 0000000..575ff1e --- /dev/null +++ hbase-client/src/main/java/org/apache/hadoop/hbase/filter/BinaryPrefixComparator.java @@ -0,0 +1,91 @@ +/* + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.filter; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; +import org.apache.hadoop.hbase.DeserializationException; +import org.apache.hadoop.hbase.protobuf.generated.ComparatorProtos; +import org.apache.hadoop.hbase.util.Bytes; + +import com.google.protobuf.InvalidProtocolBufferException; + +/** + * A comparator which compares against a specified byte array, but only compares + * up to the length of this byte array. For the rest it is similar to + * {@link BinaryComparator}. + */ +@InterfaceAudience.Public +@InterfaceStability.Stable +public class BinaryPrefixComparator extends ByteArrayComparable { + + /** + * Constructor + * @param value value + */ + public BinaryPrefixComparator(byte[] value) { + super(value); + } + + @Override + public int compareTo(byte [] value, int offset, int length) { + return Bytes.compareTo(this.value, 0, this.value.length, value, offset, + this.value.length <= length ? this.value.length : length); + } + + /** + * @return The comparator serialized using pb + */ + public byte [] toByteArray() { + ComparatorProtos.BinaryPrefixComparator.Builder builder = + ComparatorProtos.BinaryPrefixComparator.newBuilder(); + builder.setComparable(super.convert()); + return builder.build().toByteArray(); + } + + /** + * @param pbBytes A pb serialized {@link BinaryPrefixComparator} instance + * @return An instance of {@link BinaryPrefixComparator} made from bytes + * @throws DeserializationException + * @see #toByteArray + */ + public static BinaryPrefixComparator parseFrom(final byte [] pbBytes) + throws DeserializationException { + ComparatorProtos.BinaryPrefixComparator proto; + try { + proto = ComparatorProtos.BinaryPrefixComparator.parseFrom(pbBytes); + } catch (InvalidProtocolBufferException e) { + throw new DeserializationException(e); + } + return new BinaryPrefixComparator(proto.getComparable().getValue().toByteArray()); + } + + /** + * @param other + * @return true if and only if the fields of the comparator that are serialized + * are equal to the corresponding fields in other. Used for testing. + */ + boolean areSerializedFieldsEqual(ByteArrayComparable other) { + if (other == this) return true; + if (!(other instanceof BinaryPrefixComparator)) return false; + + return super.areSerializedFieldsEqual(other); + } +} diff --git hbase-client/src/main/java/org/apache/hadoop/hbase/filter/BitComparator.java hbase-client/src/main/java/org/apache/hadoop/hbase/filter/BitComparator.java new file mode 100644 index 0000000..73afedc --- /dev/null +++ hbase-client/src/main/java/org/apache/hadoop/hbase/filter/BitComparator.java @@ -0,0 +1,133 @@ +/* + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.filter; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; +import org.apache.hadoop.hbase.DeserializationException; +import org.apache.hadoop.hbase.protobuf.generated.ComparatorProtos; + +import com.google.protobuf.InvalidProtocolBufferException; + +/** + * A bit comparator which performs the specified bitwise operation on each of the bytes + * with the specified byte array. Then returns whether the result is non-zero. + */ +@InterfaceAudience.Public +@InterfaceStability.Stable +public class BitComparator extends ByteArrayComparable { + + /** Bit operators. */ + public enum BitwiseOp { + /** and */ + AND, + /** or */ + OR, + /** xor */ + XOR + } + protected BitwiseOp bitOperator; + + /** + * Constructor + * @param value value + * @param bitOperator operator to use on the bit comparison + */ + public BitComparator(byte[] value, BitwiseOp bitOperator) { + super(value); + this.bitOperator = bitOperator; + } + + /** + * @return the bitwise operator + */ + public BitwiseOp getOperator() { + return bitOperator; + } + + /** + * @return The comparator serialized using pb + */ + public byte [] toByteArray() { + ComparatorProtos.BitComparator.Builder builder = + ComparatorProtos.BitComparator.newBuilder(); + builder.setComparable(super.convert()); + ComparatorProtos.BitComparator.BitwiseOp bitwiseOpPb = + ComparatorProtos.BitComparator.BitwiseOp.valueOf(bitOperator.name()); + builder.setBitwiseOp(bitwiseOpPb); + return builder.build().toByteArray(); + } + + /** + * @param pbBytes A pb serialized {@link BitComparator} instance + * @return An instance of {@link BitComparator} made from bytes + * @throws DeserializationException + * @see #toByteArray + */ + public static BitComparator parseFrom(final byte [] pbBytes) + throws DeserializationException { + ComparatorProtos.BitComparator proto; + try { + proto = ComparatorProtos.BitComparator.parseFrom(pbBytes); + } catch (InvalidProtocolBufferException e) { + throw new DeserializationException(e); + } + BitwiseOp bitwiseOp = BitwiseOp.valueOf(proto.getBitwiseOp().name()); + return new BitComparator(proto.getComparable().getValue().toByteArray(),bitwiseOp); + } + + /** + * @param other + * @return true if and only if the fields of the comparator that are serialized + * are equal to the corresponding fields in other. Used for testing. + */ + boolean areSerializedFieldsEqual(ByteArrayComparable other) { + if (other == this) return true; + if (!(other instanceof BitComparator)) return false; + + BitComparator comparator = (BitComparator)other; + return super.areSerializedFieldsEqual(other) + && this.getOperator().equals(comparator.getOperator()); + } + + @Override + public int compareTo(byte[] value, int offset, int length) { + if (length != this.value.length) { + return 1; + } + int b = 0; + //Iterating backwards is faster because we can quit after one non-zero byte. + for (int i = length - 1; i >= 0 && b == 0; i--) { + switch (bitOperator) { + case AND: + b = (this.value[i] & value[i+offset]) & 0xff; + break; + case OR: + b = (this.value[i] | value[i+offset]) & 0xff; + break; + case XOR: + b = (this.value[i] ^ value[i+offset]) & 0xff; + break; + } + } + return b == 0 ? 1 : 0; + } +} + diff --git hbase-client/src/main/java/org/apache/hadoop/hbase/filter/ByteArrayComparable.java hbase-client/src/main/java/org/apache/hadoop/hbase/filter/ByteArrayComparable.java new file mode 100644 index 0000000..93b73e2 --- /dev/null +++ hbase-client/src/main/java/org/apache/hadoop/hbase/filter/ByteArrayComparable.java @@ -0,0 +1,100 @@ +/** + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.filter; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; +import org.apache.hadoop.hbase.DeserializationException; +import org.apache.hadoop.hbase.protobuf.generated.ComparatorProtos; +import org.apache.hadoop.hbase.util.Bytes; + +import com.google.protobuf.ByteString; + + +/** Base class for byte array comparators */ +@InterfaceAudience.Public +@InterfaceStability.Stable +public abstract class ByteArrayComparable implements Comparable { + + byte[] value; + + /** + * Constructor. + * @param value the value to compare against + */ + public ByteArrayComparable(byte [] value) { + this.value = value; + } + + public byte[] getValue() { + return value; + } + + /** + * @return The comparator serialized using pb + */ + public abstract byte [] toByteArray(); + + ComparatorProtos.ByteArrayComparable convert() { + ComparatorProtos.ByteArrayComparable.Builder builder = + ComparatorProtos.ByteArrayComparable.newBuilder(); + if (value != null) builder.setValue(ByteString.copyFrom(value)); + return builder.build(); + } + + /** + * @param pbBytes A pb serialized {@link ByteArrayComparable} instance + * @return An instance of {@link ByteArrayComparable} made from bytes + * @throws DeserializationException + * @see #toByteArray + */ + public static ByteArrayComparable parseFrom(final byte [] pbBytes) + throws DeserializationException { + throw new DeserializationException( + "parseFrom called on base ByteArrayComparable, but should be called on derived type"); + } + + /** + * @param other + * @return true if and only if the fields of the comparator that are serialized + * are equal to the corresponding fields in other. Used for testing. + */ + boolean areSerializedFieldsEqual(ByteArrayComparable o) { + if (o == this) return true; + if (!(o instanceof ByteArrayComparable)) return false; + + return Bytes.equals(this.getValue(), o.getValue()); + } + + @Override + public int compareTo(byte [] value) { + return compareTo(value, 0, value.length); + } + + /** + * Special compareTo method for subclasses, to avoid + * copying byte[] unnecessarily. + * @param value byte[] to compare + * @param offset offset into value + * @param length number of bytes to compare + * @return a negative integer, zero, or a positive integer as this object + * is less than, equal to, or greater than the specified object. + */ + public abstract int compareTo(byte [] value, int offset, int length); +} diff --git hbase-client/src/main/java/org/apache/hadoop/hbase/filter/ColumnCountGetFilter.java hbase-client/src/main/java/org/apache/hadoop/hbase/filter/ColumnCountGetFilter.java new file mode 100644 index 0000000..d775177d --- /dev/null +++ hbase-client/src/main/java/org/apache/hadoop/hbase/filter/ColumnCountGetFilter.java @@ -0,0 +1,121 @@ +/* + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.filter; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; +import org.apache.hadoop.hbase.DeserializationException; +import org.apache.hadoop.hbase.KeyValue; +import org.apache.hadoop.hbase.protobuf.generated.FilterProtos; + +import java.util.ArrayList; + +import com.google.common.base.Preconditions; +import com.google.protobuf.InvalidProtocolBufferException; + +/** + * Simple filter that returns first N columns on row only. + * This filter was written to test filters in Get and as soon as it gets + * its quota of columns, {@link #filterAllRemaining()} returns true. This + * makes this filter unsuitable as a Scan filter. + */ +@InterfaceAudience.Public +@InterfaceStability.Stable +public class ColumnCountGetFilter extends FilterBase { + private int limit = 0; + private int count = 0; + + public ColumnCountGetFilter(final int n) { + Preconditions.checkArgument(n >= 0, "limit be positive %s", n); + this.limit = n; + } + + public int getLimit() { + return limit; + } + + @Override + public boolean filterAllRemaining() { + return this.count > this.limit; + } + + @Override + public ReturnCode filterKeyValue(KeyValue v) { + this.count++; + return filterAllRemaining() ? ReturnCode.NEXT_COL : ReturnCode.INCLUDE_AND_NEXT_COL; + } + + @Override + public void reset() { + this.count = 0; + } + + public static Filter createFilterFromArguments(ArrayList filterArguments) { + Preconditions.checkArgument(filterArguments.size() == 1, + "Expected 1 but got: %s", filterArguments.size()); + int limit = ParseFilter.convertByteArrayToInt(filterArguments.get(0)); + return new ColumnCountGetFilter(limit); + } + + /** + * @return The filter serialized using pb + */ + public byte [] toByteArray() { + FilterProtos.ColumnCountGetFilter.Builder builder = + FilterProtos.ColumnCountGetFilter.newBuilder(); + builder.setLimit(this.limit); + return builder.build().toByteArray(); + } + + /** + * @param pbBytes A pb serialized {@link ColumnCountGetFilter} instance + * @return An instance of {@link ColumnCountGetFilter} made from bytes + * @throws DeserializationException + * @see #toByteArray + */ + public static ColumnCountGetFilter parseFrom(final byte [] pbBytes) + throws DeserializationException { + FilterProtos.ColumnCountGetFilter proto; + try { + proto = FilterProtos.ColumnCountGetFilter.parseFrom(pbBytes); + } catch (InvalidProtocolBufferException e) { + throw new DeserializationException(e); + } + return new ColumnCountGetFilter(proto.getLimit()); + } + + /** + * @param other + * @return true if and only if the fields of the filter that are serialized + * are equal to the corresponding fields in other. Used for testing. + */ + boolean areSerializedFieldsEqual(Filter o) { + if (o == this) return true; + if (!(o instanceof ColumnCountGetFilter)) return false; + + ColumnCountGetFilter other = (ColumnCountGetFilter)o; + return this.getLimit() == other.getLimit(); + } + + @Override + public String toString() { + return this.getClass().getSimpleName() + " " + this.limit; + } +} diff --git hbase-client/src/main/java/org/apache/hadoop/hbase/filter/ColumnPaginationFilter.java hbase-client/src/main/java/org/apache/hadoop/hbase/filter/ColumnPaginationFilter.java new file mode 100644 index 0000000..d58429f --- /dev/null +++ hbase-client/src/main/java/org/apache/hadoop/hbase/filter/ColumnPaginationFilter.java @@ -0,0 +1,142 @@ +/* + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.filter; + +import java.util.ArrayList; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; +import org.apache.hadoop.hbase.DeserializationException; +import org.apache.hadoop.hbase.KeyValue; +import org.apache.hadoop.hbase.protobuf.generated.FilterProtos; + +import com.google.common.base.Preconditions; +import com.google.protobuf.InvalidProtocolBufferException; + +/** + * A filter, based on the ColumnCountGetFilter, takes two arguments: limit and offset. + * This filter can be used for row-based indexing, where references to other tables are stored across many columns, + * in order to efficient lookups and paginated results for end users. Only most recent versions are considered + * for pagination. + */ +@InterfaceAudience.Public +@InterfaceStability.Stable +public class ColumnPaginationFilter extends FilterBase +{ + private int limit = 0; + private int offset = 0; + private int count = 0; + + public ColumnPaginationFilter(final int limit, final int offset) + { + Preconditions.checkArgument(limit >= 0, "limit must be positive %s", limit); + Preconditions.checkArgument(offset >= 0, "offset must be positive %s", offset); + this.limit = limit; + this.offset = offset; + } + + /** + * @return limit + */ + public int getLimit() { + return limit; + } + + /** + * @return offset + */ + public int getOffset() { + return offset; + } + + @Override + public ReturnCode filterKeyValue(KeyValue v) + { + if(count >= offset + limit) + { + return ReturnCode.NEXT_ROW; + } + + ReturnCode code = count < offset ? ReturnCode.NEXT_COL : + ReturnCode.INCLUDE_AND_NEXT_COL; + count++; + return code; + } + + @Override + public void reset() + { + this.count = 0; + } + + public static Filter createFilterFromArguments(ArrayList filterArguments) { + Preconditions.checkArgument(filterArguments.size() == 2, + "Expected 2 but got: %s", filterArguments.size()); + int limit = ParseFilter.convertByteArrayToInt(filterArguments.get(0)); + int offset = ParseFilter.convertByteArrayToInt(filterArguments.get(1)); + return new ColumnPaginationFilter(limit, offset); + } + + /** + * @return The filter serialized using pb + */ + public byte [] toByteArray() { + FilterProtos.ColumnPaginationFilter.Builder builder = + FilterProtos.ColumnPaginationFilter.newBuilder(); + builder.setLimit(this.limit); + builder.setOffset(this.offset); + return builder.build().toByteArray(); + } + + /** + * @param pbBytes A pb serialized {@link ColumnPaginationFilter} instance + * @return An instance of {@link ColumnPaginationFilter} made from bytes + * @throws DeserializationException + * @see #toByteArray + */ + public static ColumnPaginationFilter parseFrom(final byte [] pbBytes) + throws DeserializationException { + FilterProtos.ColumnPaginationFilter proto; + try { + proto = FilterProtos.ColumnPaginationFilter.parseFrom(pbBytes); + } catch (InvalidProtocolBufferException e) { + throw new DeserializationException(e); + } + return new ColumnPaginationFilter(proto.getLimit(),proto.getOffset()); + } + + /** + * @param other + * @return true if and only if the fields of the filter that are serialized + * are equal to the corresponding fields in other. Used for testing. + */ + boolean areSerializedFieldsEqual(Filter o) { + if (o == this) return true; + if (!(o instanceof ColumnPaginationFilter)) return false; + + ColumnPaginationFilter other = (ColumnPaginationFilter)o; + return this.getLimit() == other.getLimit() && this.getOffset() == other.getOffset(); + } + + @Override + public String toString() { + return String.format("%s (%d, %d)", this.getClass().getSimpleName(), + this.limit, this.offset); + } +} diff --git hbase-client/src/main/java/org/apache/hadoop/hbase/filter/ColumnPrefixFilter.java hbase-client/src/main/java/org/apache/hadoop/hbase/filter/ColumnPrefixFilter.java new file mode 100644 index 0000000..226b2b1 --- /dev/null +++ hbase-client/src/main/java/org/apache/hadoop/hbase/filter/ColumnPrefixFilter.java @@ -0,0 +1,141 @@ +/* + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.filter; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; +import org.apache.hadoop.hbase.DeserializationException; +import org.apache.hadoop.hbase.KeyValue; +import org.apache.hadoop.hbase.protobuf.generated.FilterProtos; +import org.apache.hadoop.hbase.util.Bytes; + +import java.util.ArrayList; + +import com.google.common.base.Preconditions; +import com.google.protobuf.ByteString; +import com.google.protobuf.InvalidProtocolBufferException; + +/** + * This filter is used for selecting only those keys with columns that matches + * a particular prefix. For example, if prefix is 'an', it will pass keys with + * columns like 'and', 'anti' but not keys with columns like 'ball', 'act'. + */ +@InterfaceAudience.Public +@InterfaceStability.Stable +public class ColumnPrefixFilter extends FilterBase { + protected byte [] prefix = null; + + public ColumnPrefixFilter(final byte [] prefix) { + this.prefix = prefix; + } + + public byte[] getPrefix() { + return prefix; + } + + @Override + public ReturnCode filterKeyValue(KeyValue kv) { + if (this.prefix == null || kv.getBuffer() == null) { + return ReturnCode.INCLUDE; + } else { + return filterColumn(kv.getBuffer(), kv.getQualifierOffset(), kv.getQualifierLength()); + } + } + + public ReturnCode filterColumn(byte[] buffer, int qualifierOffset, int qualifierLength) { + if (qualifierLength < prefix.length) { + int cmp = Bytes.compareTo(buffer, qualifierOffset, qualifierLength, this.prefix, 0, + qualifierLength); + if (cmp <= 0) { + return ReturnCode.SEEK_NEXT_USING_HINT; + } else { + return ReturnCode.NEXT_ROW; + } + } else { + int cmp = Bytes.compareTo(buffer, qualifierOffset, this.prefix.length, this.prefix, 0, + this.prefix.length); + if (cmp < 0) { + return ReturnCode.SEEK_NEXT_USING_HINT; + } else if (cmp > 0) { + return ReturnCode.NEXT_ROW; + } else { + return ReturnCode.INCLUDE; + } + } + } + + public static Filter createFilterFromArguments(ArrayList filterArguments) { + Preconditions.checkArgument(filterArguments.size() == 1, + "Expected 1 but got: %s", filterArguments.size()); + byte [] columnPrefix = ParseFilter.removeQuotesFromByteArray(filterArguments.get(0)); + return new ColumnPrefixFilter(columnPrefix); + } + + /** + * @return The filter serialized using pb + */ + public byte [] toByteArray() { + FilterProtos.ColumnPrefixFilter.Builder builder = + FilterProtos.ColumnPrefixFilter.newBuilder(); + if (this.prefix != null) builder.setPrefix(ByteString.copyFrom(this.prefix)); + return builder.build().toByteArray(); + } + + /** + * @param pbBytes A pb serialized {@link ColumnPrefixFilter} instance + * @return An instance of {@link ColumnPrefixFilter} made from bytes + * @throws DeserializationException + * @see #toByteArray + */ + public static ColumnPrefixFilter parseFrom(final byte [] pbBytes) + throws DeserializationException { + FilterProtos.ColumnPrefixFilter proto; + try { + proto = FilterProtos.ColumnPrefixFilter.parseFrom(pbBytes); + } catch (InvalidProtocolBufferException e) { + throw new DeserializationException(e); + } + return new ColumnPrefixFilter(proto.getPrefix().toByteArray()); + } + + /** + * @param other + * @return true if and only if the fields of the filter that are serialized + * are equal to the corresponding fields in other. Used for testing. + */ + boolean areSerializedFieldsEqual(Filter o) { + if (o == this) return true; + if (!(o instanceof ColumnPrefixFilter)) return false; + + ColumnPrefixFilter other = (ColumnPrefixFilter)o; + return Bytes.equals(this.getPrefix(), other.getPrefix()); + } + + public KeyValue getNextKeyHint(KeyValue kv) { + return KeyValue.createFirstOnRow( + kv.getBuffer(), kv.getRowOffset(), kv.getRowLength(), kv.getBuffer(), + kv.getFamilyOffset(), kv.getFamilyLength(), prefix, 0, prefix.length); + } + + @Override + public String toString() { + return this.getClass().getSimpleName() + " " + Bytes.toStringBinary(this.prefix); + } +} diff --git hbase-client/src/main/java/org/apache/hadoop/hbase/filter/ColumnRangeFilter.java hbase-client/src/main/java/org/apache/hadoop/hbase/filter/ColumnRangeFilter.java new file mode 100644 index 0000000..a275d7d --- /dev/null +++ hbase-client/src/main/java/org/apache/hadoop/hbase/filter/ColumnRangeFilter.java @@ -0,0 +1,229 @@ +/* + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.filter; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; +import org.apache.hadoop.hbase.DeserializationException; +import org.apache.hadoop.hbase.KeyValue; +import org.apache.hadoop.hbase.protobuf.generated.FilterProtos; +import org.apache.hadoop.hbase.util.Bytes; + +import java.util.ArrayList; + +import com.google.common.base.Preconditions; +import com.google.protobuf.ByteString; +import com.google.protobuf.InvalidProtocolBufferException; + +/** + * This filter is used for selecting only those keys with columns that are + * between minColumn to maxColumn. For example, if minColumn is 'an', and + * maxColumn is 'be', it will pass keys with columns like 'ana', 'bad', but not + * keys with columns like 'bed', 'eye' + * + * If minColumn is null, there is no lower bound. If maxColumn is null, there is + * no upper bound. + * + * minColumnInclusive and maxColumnInclusive specify if the ranges are inclusive + * or not. + */ +@InterfaceAudience.Public +@InterfaceStability.Stable +public class ColumnRangeFilter extends FilterBase { + protected byte[] minColumn = null; + protected boolean minColumnInclusive = true; + protected byte[] maxColumn = null; + protected boolean maxColumnInclusive = false; + + /** + * Create a filter to select those keys with columns that are between minColumn + * and maxColumn. + * @param minColumn minimum value for the column range. If if it's null, + * there is no lower bound. + * @param minColumnInclusive if true, include minColumn in the range. + * @param maxColumn maximum value for the column range. If it's null, + * @param maxColumnInclusive if true, include maxColumn in the range. + * there is no upper bound. + */ + public ColumnRangeFilter(final byte[] minColumn, boolean minColumnInclusive, + final byte[] maxColumn, boolean maxColumnInclusive) { + this.minColumn = minColumn; + this.minColumnInclusive = minColumnInclusive; + this.maxColumn = maxColumn; + this.maxColumnInclusive = maxColumnInclusive; + } + + /** + * @return if min column range is inclusive. + */ + public boolean isMinColumnInclusive() { + return minColumnInclusive; + } + + /** + * @return if max column range is inclusive. + */ + public boolean isMaxColumnInclusive() { + return maxColumnInclusive; + } + + /** + * @return the min column range for the filter + */ + public byte[] getMinColumn() { + return this.minColumn; + } + + /** + * @return true if min column is inclusive, false otherwise + */ + public boolean getMinColumnInclusive() { + return this.minColumnInclusive; + } + + /** + * @return the max column range for the filter + */ + public byte[] getMaxColumn() { + return this.maxColumn; + } + + /** + * @return true if max column is inclusive, false otherwise + */ + public boolean getMaxColumnInclusive() { + return this.maxColumnInclusive; + } + + @Override + public ReturnCode filterKeyValue(KeyValue kv) { + byte[] buffer = kv.getBuffer(); + int qualifierOffset = kv.getQualifierOffset(); + int qualifierLength = kv.getQualifierLength(); + int cmpMin = 1; + + if (this.minColumn != null) { + cmpMin = Bytes.compareTo(buffer, qualifierOffset, qualifierLength, + this.minColumn, 0, this.minColumn.length); + } + + if (cmpMin < 0) { + return ReturnCode.SEEK_NEXT_USING_HINT; + } + + if (!this.minColumnInclusive && cmpMin == 0) { + return ReturnCode.SKIP; + } + + if (this.maxColumn == null) { + return ReturnCode.INCLUDE; + } + + int cmpMax = Bytes.compareTo(buffer, qualifierOffset, qualifierLength, + this.maxColumn, 0, this.maxColumn.length); + + if (this.maxColumnInclusive && cmpMax <= 0 || + !this.maxColumnInclusive && cmpMax < 0) { + return ReturnCode.INCLUDE; + } + + return ReturnCode.NEXT_ROW; + } + + public static Filter createFilterFromArguments(ArrayList filterArguments) { + Preconditions.checkArgument(filterArguments.size() == 4, + "Expected 4 but got: %s", filterArguments.size()); + byte [] minColumn = ParseFilter.removeQuotesFromByteArray(filterArguments.get(0)); + boolean minColumnInclusive = ParseFilter.convertByteArrayToBoolean(filterArguments.get(1)); + byte [] maxColumn = ParseFilter.removeQuotesFromByteArray(filterArguments.get(2)); + boolean maxColumnInclusive = ParseFilter.convertByteArrayToBoolean(filterArguments.get(3)); + + if (minColumn.length == 0) + minColumn = null; + if (maxColumn.length == 0) + maxColumn = null; + return new ColumnRangeFilter(minColumn, minColumnInclusive, + maxColumn, maxColumnInclusive); + } + + /** + * @return The filter serialized using pb + */ + public byte [] toByteArray() { + FilterProtos.ColumnRangeFilter.Builder builder = + FilterProtos.ColumnRangeFilter.newBuilder(); + if (this.minColumn != null) builder.setMinColumn(ByteString.copyFrom(this.minColumn)); + builder.setMinColumnInclusive(this.minColumnInclusive); + if (this.maxColumn != null) builder.setMaxColumn(ByteString.copyFrom(this.maxColumn)); + builder.setMaxColumnInclusive(this.maxColumnInclusive); + return builder.build().toByteArray(); + } + + /** + * @param pbBytes A pb serialized {@link ColumnRangeFilter} instance + * @return An instance of {@link ColumnRangeFilter} made from bytes + * @throws DeserializationException + * @see #toByteArray + */ + public static ColumnRangeFilter parseFrom(final byte [] pbBytes) + throws DeserializationException { + FilterProtos.ColumnRangeFilter proto; + try { + proto = FilterProtos.ColumnRangeFilter.parseFrom(pbBytes); + } catch (InvalidProtocolBufferException e) { + throw new DeserializationException(e); + } + return new ColumnRangeFilter(proto.hasMinColumn()?proto.getMinColumn().toByteArray():null, + proto.getMinColumnInclusive(),proto.hasMaxColumn()?proto.getMaxColumn().toByteArray():null, + proto.getMaxColumnInclusive()); + } + + /** + * @param other + * @return true if and only if the fields of the filter that are serialized + * are equal to the corresponding fields in other. Used for testing. + */ + boolean areSerializedFieldsEqual(Filter o) { + if (o == this) return true; + if (!(o instanceof ColumnRangeFilter)) return false; + + ColumnRangeFilter other = (ColumnRangeFilter)o; + return Bytes.equals(this.getMinColumn(),other.getMinColumn()) + && this.getMinColumnInclusive() == other.getMinColumnInclusive() + && Bytes.equals(this.getMaxColumn(), other.getMaxColumn()) + && this.getMaxColumnInclusive() == other.getMaxColumnInclusive(); + } + + @Override + public KeyValue getNextKeyHint(KeyValue kv) { + return KeyValue.createFirstOnRow(kv.getBuffer(), kv.getRowOffset(), kv + .getRowLength(), kv.getBuffer(), kv.getFamilyOffset(), kv + .getFamilyLength(), this.minColumn, 0, this.minColumn == null ? 0 + : this.minColumn.length); + } + + @Override + public String toString() { + return this.getClass().getSimpleName() + " " + + (this.minColumnInclusive ? "[" : "(") + Bytes.toStringBinary(this.minColumn) + + ", " + Bytes.toStringBinary(this.maxColumn) + + (this.maxColumnInclusive ? "]" : ")"); + } +} diff --git hbase-client/src/main/java/org/apache/hadoop/hbase/filter/CompareFilter.java hbase-client/src/main/java/org/apache/hadoop/hbase/filter/CompareFilter.java new file mode 100644 index 0000000..ff00af3 --- /dev/null +++ hbase-client/src/main/java/org/apache/hadoop/hbase/filter/CompareFilter.java @@ -0,0 +1,180 @@ +/** + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.filter; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; +import org.apache.hadoop.hbase.protobuf.ProtobufUtil; +import org.apache.hadoop.hbase.protobuf.generated.FilterProtos; +import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos; +import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.CompareType; +import org.apache.hadoop.hbase.util.Bytes; + +import java.util.ArrayList; + +import com.google.common.base.Preconditions; +/** + * This is a generic filter to be used to filter by comparison. It takes an + * operator (equal, greater, not equal, etc) and a byte [] comparator. + *

      + * To filter by row key, use {@link RowFilter}. + *

      + * To filter by column qualifier, use {@link QualifierFilter}. + *

      + * To filter by value, use {@link SingleColumnValueFilter}. + *

      + * These filters can be wrapped with {@link SkipFilter} and {@link WhileMatchFilter} + * to add more control. + *

      + * Multiple filters can be combined using {@link FilterList}. + */ +@InterfaceAudience.Public +@InterfaceStability.Stable +public abstract class CompareFilter extends FilterBase { + + /** Comparison operators. */ + public enum CompareOp { + /** less than */ + LESS, + /** less than or equal to */ + LESS_OR_EQUAL, + /** equals */ + EQUAL, + /** not equal */ + NOT_EQUAL, + /** greater than or equal to */ + GREATER_OR_EQUAL, + /** greater than */ + GREATER, + /** no operation */ + NO_OP, + } + + protected CompareOp compareOp; + protected ByteArrayComparable comparator; + + /** + * Constructor. + * @param compareOp the compare op for row matching + * @param comparator the comparator for row matching + */ + public CompareFilter(final CompareOp compareOp, + final ByteArrayComparable comparator) { + this.compareOp = compareOp; + this.comparator = comparator; + } + + /** + * @return operator + */ + public CompareOp getOperator() { + return compareOp; + } + + /** + * @return the comparator + */ + public ByteArrayComparable getComparator() { + return comparator; + } + + protected boolean doCompare(final CompareOp compareOp, + final ByteArrayComparable comparator, final byte [] data, + final int offset, final int length) { + if (compareOp == CompareOp.NO_OP) { + return true; + } + int compareResult = comparator.compareTo(data, offset, length); + switch (compareOp) { + case LESS: + return compareResult <= 0; + case LESS_OR_EQUAL: + return compareResult < 0; + case EQUAL: + return compareResult != 0; + case NOT_EQUAL: + return compareResult == 0; + case GREATER_OR_EQUAL: + return compareResult > 0; + case GREATER: + return compareResult >= 0; + default: + throw new RuntimeException("Unknown Compare op " + + compareOp.name()); + } + } + + public static ArrayList extractArguments(ArrayList filterArguments) { + Preconditions.checkArgument(filterArguments.size() == 2, + "Expected 2 but got: %s", filterArguments.size()); + CompareOp compareOp = ParseFilter.createCompareOp(filterArguments.get(0)); + ByteArrayComparable comparator = ParseFilter.createComparator( + ParseFilter.removeQuotesFromByteArray(filterArguments.get(1))); + + if (comparator instanceof RegexStringComparator || + comparator instanceof SubstringComparator) { + if (compareOp != CompareOp.EQUAL && + compareOp != CompareOp.NOT_EQUAL) { + throw new IllegalArgumentException ("A regexstring comparator and substring comparator" + + " can only be used with EQUAL and NOT_EQUAL"); + } + } + ArrayList arguments = new ArrayList(); + arguments.add(compareOp); + arguments.add(comparator); + return arguments; + } + + /** + * @return A pb instance to represent this instance. + */ + FilterProtos.CompareFilter convert() { + FilterProtos.CompareFilter.Builder builder = + FilterProtos.CompareFilter.newBuilder(); + HBaseProtos.CompareType compareOp = CompareType.valueOf(this.compareOp.name()); + builder.setCompareOp(compareOp); + if (this.comparator != null) builder.setComparator(ProtobufUtil.toComparator(this.comparator)); + return builder.build(); + } + + /** + * + * @param other + * @return true if and only if the fields of the filter that are serialized + * are equal to the corresponding fields in other. Used for testing. + */ + boolean areSerializedFieldsEqual(Filter o) { + if (o == this) return true; + if (!(o instanceof CompareFilter)) return false; + + CompareFilter other = (CompareFilter)o; + return this.getOperator().equals(other.getOperator()) && + (this.getComparator() == other.getComparator() + || this.getComparator().areSerializedFieldsEqual(other.getComparator())); + } + + @Override + public String toString() { + return String.format("%s (%s, %s)", + this.getClass().getSimpleName(), + this.compareOp.name(), + Bytes.toStringBinary(this.comparator.getValue())); + } +} diff --git hbase-client/src/main/java/org/apache/hadoop/hbase/filter/DependentColumnFilter.java hbase-client/src/main/java/org/apache/hadoop/hbase/filter/DependentColumnFilter.java new file mode 100644 index 0000000..65ec48f --- /dev/null +++ hbase-client/src/main/java/org/apache/hadoop/hbase/filter/DependentColumnFilter.java @@ -0,0 +1,289 @@ +/** + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.filter; + +import java.io.IOException; +import java.util.HashSet; +import java.util.Iterator; +import java.util.List; +import java.util.Set; +import java.util.ArrayList; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; +import org.apache.hadoop.hbase.DeserializationException; +import org.apache.hadoop.hbase.KeyValue; +import org.apache.hadoop.hbase.protobuf.ProtobufUtil; +import org.apache.hadoop.hbase.protobuf.generated.FilterProtos; +import org.apache.hadoop.hbase.util.Bytes; + +import com.google.common.base.Preconditions; +import com.google.protobuf.ByteString; +import com.google.protobuf.InvalidProtocolBufferException; + +/** + * A filter for adding inter-column timestamp matching + * Only cells with a correspondingly timestamped entry in + * the target column will be retained + * Not compatible with Scan.setBatch as operations need + * full rows for correct filtering + */ +@InterfaceAudience.Public +@InterfaceStability.Stable +public class DependentColumnFilter extends CompareFilter { + + protected byte[] columnFamily; + protected byte[] columnQualifier; + protected boolean dropDependentColumn; + + protected Set stampSet = new HashSet(); + + /** + * Build a dependent column filter with value checking + * dependent column varies will be compared using the supplied + * compareOp and comparator, for usage of which + * refer to {@link CompareFilter} + * + * @param family dependent column family + * @param qualifier dependent column qualifier + * @param dropDependentColumn whether the column should be discarded after + * @param valueCompareOp comparison op + * @param valueComparator comparator + */ + public DependentColumnFilter(final byte [] family, final byte[] qualifier, + final boolean dropDependentColumn, final CompareOp valueCompareOp, + final ByteArrayComparable valueComparator) { + // set up the comparator + super(valueCompareOp, valueComparator); + this.columnFamily = family; + this.columnQualifier = qualifier; + this.dropDependentColumn = dropDependentColumn; + } + + /** + * Constructor for DependentColumn filter. + * Keyvalues where a keyvalue from target column + * with the same timestamp do not exist will be dropped. + * + * @param family name of target column family + * @param qualifier name of column qualifier + */ + public DependentColumnFilter(final byte [] family, final byte [] qualifier) { + this(family, qualifier, false); + } + + /** + * Constructor for DependentColumn filter. + * Keyvalues where a keyvalue from target column + * with the same timestamp do not exist will be dropped. + * + * @param family name of dependent column family + * @param qualifier name of dependent qualifier + * @param dropDependentColumn whether the dependent columns keyvalues should be discarded + */ + public DependentColumnFilter(final byte [] family, final byte [] qualifier, + final boolean dropDependentColumn) { + this(family, qualifier, dropDependentColumn, CompareOp.NO_OP, null); + } + + /** + * @return the column family + */ + public byte[] getFamily() { + return this.columnFamily; + } + + /** + * @return the column qualifier + */ + public byte[] getQualifier() { + return this.columnQualifier; + } + + /** + * @return true if we should drop the dependent column, false otherwise + */ + public boolean dropDependentColumn() { + return this.dropDependentColumn; + } + + public boolean getDropDependentColumn() { + return this.dropDependentColumn; + } + + @Override + public boolean filterAllRemaining() { + return false; + } + + @Override + public ReturnCode filterKeyValue(KeyValue v) { + // Check if the column and qualifier match + if (!v.matchingColumn(this.columnFamily, this.columnQualifier)) { + // include non-matches for the time being, they'll be discarded afterwards + return ReturnCode.INCLUDE; + } + // If it doesn't pass the op, skip it + if (comparator != null + && doCompare(compareOp, comparator, v.getBuffer(), v.getValueOffset(), + v.getValueLength())) + return ReturnCode.SKIP; + + stampSet.add(v.getTimestamp()); + if(dropDependentColumn) { + return ReturnCode.SKIP; + } + return ReturnCode.INCLUDE; + } + + @Override + public void filterRow(List kvs) { + Iterator it = kvs.iterator(); + KeyValue kv; + while(it.hasNext()) { + kv = it.next(); + if(!stampSet.contains(kv.getTimestamp())) { + it.remove(); + } + } + } + + @Override + public boolean hasFilterRow() { + return true; + } + + @Override + public boolean filterRow() { + return false; + } + + @Override + public boolean filterRowKey(byte[] buffer, int offset, int length) { + return false; + } + @Override + public void reset() { + stampSet.clear(); + } + + public static Filter createFilterFromArguments(ArrayList filterArguments) { + Preconditions.checkArgument(filterArguments.size() == 2 || + filterArguments.size() == 3 || + filterArguments.size() == 5, + "Expected 2, 3 or 5 but got: %s", filterArguments.size()); + if (filterArguments.size() == 2) { + byte [] family = ParseFilter.removeQuotesFromByteArray(filterArguments.get(0)); + byte [] qualifier = ParseFilter.removeQuotesFromByteArray(filterArguments.get(1)); + return new DependentColumnFilter(family, qualifier); + + } else if (filterArguments.size() == 3) { + byte [] family = ParseFilter.removeQuotesFromByteArray(filterArguments.get(0)); + byte [] qualifier = ParseFilter.removeQuotesFromByteArray(filterArguments.get(1)); + boolean dropDependentColumn = ParseFilter.convertByteArrayToBoolean(filterArguments.get(2)); + return new DependentColumnFilter(family, qualifier, dropDependentColumn); + + } else if (filterArguments.size() == 5) { + byte [] family = ParseFilter.removeQuotesFromByteArray(filterArguments.get(0)); + byte [] qualifier = ParseFilter.removeQuotesFromByteArray(filterArguments.get(1)); + boolean dropDependentColumn = ParseFilter.convertByteArrayToBoolean(filterArguments.get(2)); + CompareOp compareOp = ParseFilter.createCompareOp(filterArguments.get(3)); + ByteArrayComparable comparator = ParseFilter.createComparator( + ParseFilter.removeQuotesFromByteArray(filterArguments.get(4))); + return new DependentColumnFilter(family, qualifier, dropDependentColumn, + compareOp, comparator); + } else { + throw new IllegalArgumentException("Expected 2, 3 or 5 but got: " + filterArguments.size()); + } + } + + /** + * @return The filter serialized using pb + */ + public byte [] toByteArray() { + FilterProtos.DependentColumnFilter.Builder builder = + FilterProtos.DependentColumnFilter.newBuilder(); + builder.setCompareFilter(super.convert()); + if (this.columnFamily != null) { + builder.setColumnFamily(ByteString.copyFrom(this.columnFamily)); + } + if (this.columnQualifier != null) { + builder.setColumnQualifier(ByteString.copyFrom(this.columnQualifier)); + } + builder.setDropDependentColumn(this.dropDependentColumn); + return builder.build().toByteArray(); + } + + /** + * @param pbBytes A pb serialized {@link DependentColumnFilter} instance + * @return An instance of {@link DependentColumnFilter} made from bytes + * @throws DeserializationException + * @see #toByteArray + */ + public static DependentColumnFilter parseFrom(final byte [] pbBytes) + throws DeserializationException { + FilterProtos.DependentColumnFilter proto; + try { + proto = FilterProtos.DependentColumnFilter.parseFrom(pbBytes); + } catch (InvalidProtocolBufferException e) { + throw new DeserializationException(e); + } + final CompareOp valueCompareOp = + CompareOp.valueOf(proto.getCompareFilter().getCompareOp().name()); + ByteArrayComparable valueComparator = null; + try { + if (proto.getCompareFilter().hasComparator()) { + valueComparator = ProtobufUtil.toComparator(proto.getCompareFilter().getComparator()); + } + } catch (IOException ioe) { + throw new DeserializationException(ioe); + } + return new DependentColumnFilter( + proto.hasColumnFamily()?proto.getColumnFamily().toByteArray():null, + proto.hasColumnQualifier()?proto.getColumnQualifier().toByteArray():null, + proto.getDropDependentColumn(), valueCompareOp, valueComparator); + } + + /** + * @param other + * @return true if and only if the fields of the filter that are serialized + * are equal to the corresponding fields in other. Used for testing. + */ + boolean areSerializedFieldsEqual(Filter o) { + if (o == this) return true; + if (!(o instanceof DependentColumnFilter)) return false; + + DependentColumnFilter other = (DependentColumnFilter)o; + return other != null && super.areSerializedFieldsEqual(other) + && Bytes.equals(this.getFamily(), other.getFamily()) + && Bytes.equals(this.getQualifier(), other.getQualifier()) + && this.dropDependentColumn() == other.dropDependentColumn(); + } + + @Override + public String toString() { + return String.format("%s (%s, %s, %s, %s, %s)", + this.getClass().getSimpleName(), + Bytes.toStringBinary(this.columnFamily), + Bytes.toStringBinary(this.columnQualifier), + this.dropDependentColumn, + this.compareOp.name(), + Bytes.toStringBinary(this.comparator.getValue())); + } +} diff --git hbase-client/src/main/java/org/apache/hadoop/hbase/filter/FamilyFilter.java hbase-client/src/main/java/org/apache/hadoop/hbase/filter/FamilyFilter.java new file mode 100644 index 0000000..fb7af8d --- /dev/null +++ hbase-client/src/main/java/org/apache/hadoop/hbase/filter/FamilyFilter.java @@ -0,0 +1,130 @@ +/** + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.filter; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; +import org.apache.hadoop.hbase.DeserializationException; +import org.apache.hadoop.hbase.KeyValue; +import org.apache.hadoop.hbase.protobuf.ProtobufUtil; +import org.apache.hadoop.hbase.protobuf.generated.FilterProtos; + +import com.google.protobuf.InvalidProtocolBufferException; + +import java.io.IOException; +import java.util.ArrayList; + +/** + * This filter is used to filter based on the column family. It takes an + * operator (equal, greater, not equal, etc) and a byte [] comparator for the + * column family portion of a key. + *

      + * This filter can be wrapped with {@link org.apache.hadoop.hbase.filter.WhileMatchFilter} and {@link org.apache.hadoop.hbase.filter.SkipFilter} + * to add more control. + *

      + * Multiple filters can be combined using {@link org.apache.hadoop.hbase.filter.FilterList}. + *

      + * If an already known column family is looked for, use {@link org.apache.hadoop.hbase.client.Get#addFamily(byte[])} + * directly rather than a filter. + */ +@InterfaceAudience.Public +@InterfaceStability.Stable +public class FamilyFilter extends CompareFilter { + + /** + * Constructor. + * + * @param familyCompareOp the compare op for column family matching + * @param familyComparator the comparator for column family matching + */ + public FamilyFilter(final CompareOp familyCompareOp, + final ByteArrayComparable familyComparator) { + super(familyCompareOp, familyComparator); + } + + @Override + public ReturnCode filterKeyValue(KeyValue v) { + int familyLength = v.getFamilyLength(); + if (familyLength > 0) { + if (doCompare(this.compareOp, this.comparator, v.getBuffer(), + v.getFamilyOffset(), familyLength)) { + return ReturnCode.SKIP; + } + } + return ReturnCode.INCLUDE; + } + + public static Filter createFilterFromArguments(ArrayList filterArguments) { + ArrayList arguments = CompareFilter.extractArguments(filterArguments); + CompareOp compareOp = (CompareOp)arguments.get(0); + ByteArrayComparable comparator = (ByteArrayComparable)arguments.get(1); + return new FamilyFilter(compareOp, comparator); + } + + /** + * @return The filter serialized using pb + */ + public byte [] toByteArray() { + FilterProtos.FamilyFilter.Builder builder = + FilterProtos.FamilyFilter.newBuilder(); + builder.setCompareFilter(super.convert()); + return builder.build().toByteArray(); + } + + /** + * @param pbBytes A pb serialized {@link FamilyFilter} instance + * @return An instance of {@link FamilyFilter} made from bytes + * @throws DeserializationException + * @see #toByteArray + */ + public static FamilyFilter parseFrom(final byte [] pbBytes) + throws DeserializationException { + FilterProtos.FamilyFilter proto; + try { + proto = FilterProtos.FamilyFilter.parseFrom(pbBytes); + } catch (InvalidProtocolBufferException e) { + throw new DeserializationException(e); + } + final CompareOp valueCompareOp = + CompareOp.valueOf(proto.getCompareFilter().getCompareOp().name()); + ByteArrayComparable valueComparator = null; + try { + if (proto.getCompareFilter().hasComparator()) { + valueComparator = ProtobufUtil.toComparator(proto.getCompareFilter().getComparator()); + } + } catch (IOException ioe) { + throw new DeserializationException(ioe); + } + return new FamilyFilter(valueCompareOp,valueComparator); + } + + /** + * @param other + * @return true if and only if the fields of the filter that are serialized + * are equal to the corresponding fields in other. Used for testing. + */ + boolean areSerializedFieldsEqual(Filter o) { + if (o == this) return true; + if (!(o instanceof FamilyFilter)) return false; + + FamilyFilter other = (FamilyFilter)o; + return super.areSerializedFieldsEqual(other); + } +} diff --git hbase-client/src/main/java/org/apache/hadoop/hbase/filter/Filter.java hbase-client/src/main/java/org/apache/hadoop/hbase/filter/Filter.java new file mode 100644 index 0000000..edb456e --- /dev/null +++ hbase-client/src/main/java/org/apache/hadoop/hbase/filter/Filter.java @@ -0,0 +1,196 @@ +/* + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.filter; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; +import org.apache.hadoop.hbase.DeserializationException; +import org.apache.hadoop.hbase.KeyValue; + +import java.util.List; + +/** + * Interface for row and column filters directly applied within the regionserver. + * A filter can expect the following call sequence: + *

        + *
      • {@link #reset()}
      • + *
      • {@link #filterAllRemaining()} -> true indicates scan is over, false, keep going on.
      • + *
      • {@link #filterRowKey(byte[],int,int)} -> true to drop this row, + * if false, we will also call
      • + *
      • {@link #filterKeyValue(KeyValue)} -> true to drop this key/value
      • + *
      • {@link #filterRow(List)} -> allows directmodification of the final list to be submitted + *
      • {@link #filterRow()} -> last chance to drop entire row based on the sequence of + * filterValue() calls. Eg: filter a row if it doesn't contain a specified column. + *
      • + *
      + * + * Filter instances are created one per region/scan. This abstract class replaces + * the old RowFilterInterface. + * + * When implementing your own filters, consider inheriting {@link FilterBase} to help + * you reduce boilerplate. + * + * @see FilterBase + */ +@InterfaceAudience.Public +@InterfaceStability.Stable +public abstract class Filter { + /** + * Reset the state of the filter between rows. + */ + abstract public void reset(); + + /** + * Filters a row based on the row key. If this returns true, the entire + * row will be excluded. If false, each KeyValue in the row will be + * passed to {@link #filterKeyValue(KeyValue)} below. + * + * @param buffer buffer containing row key + * @param offset offset into buffer where row key starts + * @param length length of the row key + * @return true, remove entire row, false, include the row (maybe). + */ + abstract public boolean filterRowKey(byte [] buffer, int offset, int length); + + /** + * If this returns true, the scan will terminate. + * + * @return true to end scan, false to continue. + */ + abstract public boolean filterAllRemaining(); + + /** + * A way to filter based on the column family, column qualifier and/or the + * column value. Return code is described below. This allows filters to + * filter only certain number of columns, then terminate without matching ever + * column. + * + * If your filter returns ReturnCode.NEXT_ROW, it should return + * ReturnCode.NEXT_ROW until {@link #reset()} is called + * just in case the caller calls for the next row. + * + * @param v the KeyValue in question + * @return code as described below + * @see Filter.ReturnCode + */ + abstract public ReturnCode filterKeyValue(final KeyValue v); + + /** + * Give the filter a chance to transform the passed KeyValue. + * If the KeyValue is changed a new KeyValue object must be returned. + * @see org.apache.hadoop.hbase.KeyValue#shallowCopy() + * + * The transformed KeyValue is what is eventually returned to the + * client. Most filters will return the passed KeyValue unchanged. + * @see org.apache.hadoop.hbase.filter.KeyOnlyFilter#transform(KeyValue) + * for an example of a transformation. + * + * @param v the KeyValue in question + * @return the changed KeyValue + */ + abstract public KeyValue transform(final KeyValue v); + + /** + * Return codes for filterValue(). + */ + public enum ReturnCode { + /** + * Include the KeyValue + */ + INCLUDE, + /** + * Include the KeyValue and seek to the next column skipping older versions. + */ + INCLUDE_AND_NEXT_COL, + /** + * Skip this KeyValue + */ + SKIP, + /** + * Skip this column. Go to the next column in this row. + */ + NEXT_COL, + /** + * Done with columns, skip to next row. Note that filterRow() will + * still be called. + */ + NEXT_ROW, + /** + * Seek to next key which is given as hint by the filter. + */ + SEEK_NEXT_USING_HINT, +} + + /** + * Chance to alter the list of keyvalues to be submitted. + * Modifications to the list will carry on + * @param kvs the list of keyvalues to be filtered + */ + abstract public void filterRow(List kvs); + + /** + * @return True if this filter actively uses filterRow(List) or filterRow(). + * Primarily used to check for conflicts with scans(such as scans + * that do not read a full row at a time) + */ + abstract public boolean hasFilterRow(); + + /** + * Last chance to veto row based on previous {@link #filterKeyValue(KeyValue)} + * calls. The filter needs to retain state then return a particular value for + * this call if they wish to exclude a row if a certain column is missing + * (for example). + * @return true to exclude row, false to include row. + */ + abstract public boolean filterRow(); + + /** + * If the filter returns the match code SEEK_NEXT_USING_HINT, then + * it should also tell which is the next key it must seek to. + * After receiving the match code SEEK_NEXT_USING_HINT, the QueryMatcher would + * call this function to find out which key it must next seek to. + * @return KeyValue which must be next seeked. return null if the filter is + * not sure which key to seek to next. + */ + abstract public KeyValue getNextKeyHint(final KeyValue currentKV); + + /** + * @return The filter serialized using pb + */ + abstract public byte [] toByteArray(); + + /** + * @param pbBytes A pb serialized {@link Filter} instance + * @return An instance of {@link Filter} made from bytes + * @throws DeserializationException + * @see #toByteArray + */ + public static Filter parseFrom(final byte [] pbBytes) throws DeserializationException { + throw new DeserializationException( + "parseFrom called on base Filter, but should be called on derived type"); + } + + /** + * @param other + * @return true if and only if the fields of the filter that are serialized + * are equal to the corresponding fields in other. Used for testing. + */ + abstract boolean areSerializedFieldsEqual(Filter other); +} diff --git hbase-client/src/main/java/org/apache/hadoop/hbase/filter/FilterBase.java hbase-client/src/main/java/org/apache/hadoop/hbase/filter/FilterBase.java new file mode 100644 index 0000000..b41bc7f --- /dev/null +++ hbase-client/src/main/java/org/apache/hadoop/hbase/filter/FilterBase.java @@ -0,0 +1,170 @@ +/* + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.filter; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; +import org.apache.hadoop.hbase.KeyValue; + +import java.util.List; +import java.util.ArrayList; + +/** + * Abstract base class to help you implement new Filters. Common "ignore" or NOOP type + * methods can go here, helping to reduce boiler plate in an ever-expanding filter + * library. + * + * If you could instantiate FilterBase, it would end up being a "null" filter - + * that is one that never filters anything. + */ +@InterfaceAudience.Public +@InterfaceStability.Stable +public abstract class FilterBase extends Filter { + + /** + * Filters that are purely stateless and do nothing in their reset() methods can inherit + * this null/empty implementation. + * + * @inheritDoc + */ + @Override + public void reset() { + } + + /** + * Filters that do not filter by row key can inherit this implementation that + * never filters anything. (ie: returns false). + * + * @inheritDoc + */ + @Override + public boolean filterRowKey(byte [] buffer, int offset, int length) { + return false; + } + + /** + * Filters that never filter all remaining can inherit this implementation that + * never stops the filter early. + * + * @inheritDoc + */ + @Override + public boolean filterAllRemaining() { + return false; + } + + /** + * Filters that dont filter by key value can inherit this implementation that + * includes all KeyValues. + * + * @inheritDoc + */ + @Override + public ReturnCode filterKeyValue(KeyValue ignored) { + return ReturnCode.INCLUDE; + } + + /** + * By default no transformation takes place + * + * @inheritDoc + */ + @Override + public KeyValue transform(KeyValue v) { + return v; + } + + /** + * Filters that never filter by modifying the returned List of KeyValues can + * inherit this implementation that does nothing. + * + * @inheritDoc + */ + @Override + public void filterRow(List ignored) { + } + + /** + * Fitlers that never filter by modifying the returned List of KeyValues can + * inherit this implementation that does nothing. + * + * @inheritDoc + */ + @Override + public boolean hasFilterRow() { + return false; + } + + /** + * Filters that never filter by rows based on previously gathered state from + * {@link #filterKeyValue(KeyValue)} can inherit this implementation that + * never filters a row. + * + * @inheritDoc + */ + @Override + public boolean filterRow() { + return false; + } + + /** + * Filters that are not sure which key must be next seeked to, can inherit + * this implementation that, by default, returns a null KeyValue. + * + * @inheritDoc + */ + public KeyValue getNextKeyHint(KeyValue currentKV) { + return null; + } + + /** + * Given the filter's arguments it constructs the filter + *

      + * @param filterArguments the filter's arguments + * @return constructed filter object + */ + public static Filter createFilterFromArguments(ArrayList filterArguments) { + throw new IllegalArgumentException("This method has not been implemented"); + } + + /** + * Return filter's info for debugging and logging purpose. + */ + public String toString() { + return this.getClass().getSimpleName(); + } + + /** + * Return length 0 byte array for Filters that don't require special serialization + */ + public byte [] toByteArray() { + return new byte[0]; + } + + /** + * Default implementation so that writers of custom filters aren't forced to implement. + * + * @param other + * @return true if and only if the fields of the filter that are serialized + * are equal to the corresponding fields in other. Used for testing. + */ + boolean areSerializedFieldsEqual(Filter other) { + return true; + } +} diff --git hbase-client/src/main/java/org/apache/hadoop/hbase/filter/FilterList.java hbase-client/src/main/java/org/apache/hadoop/hbase/filter/FilterList.java new file mode 100644 index 0000000..81f9a4b --- /dev/null +++ hbase-client/src/main/java/org/apache/hadoop/hbase/filter/FilterList.java @@ -0,0 +1,378 @@ +/** + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.filter; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.DeserializationException; +import org.apache.hadoop.hbase.HBaseConfiguration; +import org.apache.hadoop.hbase.KeyValue; +import org.apache.hadoop.hbase.protobuf.ProtobufUtil; +import org.apache.hadoop.hbase.protobuf.generated.FilterProtos; +import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos; + +import com.google.protobuf.InvalidProtocolBufferException; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; + +/** + * Implementation of {@link Filter} that represents an ordered List of Filters + * which will be evaluated with a specified boolean operator {@link Operator#MUST_PASS_ALL} + * (!AND) or {@link Operator#MUST_PASS_ONE} (!OR). + * Since you can use Filter Lists as children of Filter Lists, you can create a + * hierarchy of filters to be evaluated. + * Defaults to {@link Operator#MUST_PASS_ALL}. + *

      TODO: Fix creation of Configuration on serialization and deserialization. + */ +@InterfaceAudience.Public +@InterfaceStability.Stable +public class FilterList extends Filter { + /** set operator */ + public static enum Operator { + /** !AND */ + MUST_PASS_ALL, + /** !OR */ + MUST_PASS_ONE + } + + private static final Configuration conf = HBaseConfiguration.create(); + private static final int MAX_LOG_FILTERS = 5; + private Operator operator = Operator.MUST_PASS_ALL; + private List filters = new ArrayList(); + + /** + * Constructor that takes a set of {@link Filter}s. The default operator + * MUST_PASS_ALL is assumed. + * + * @param rowFilters list of filters + */ + public FilterList(final List rowFilters) { + if (rowFilters instanceof ArrayList) { + this.filters = rowFilters; + } else { + this.filters = new ArrayList(rowFilters); + } + } + + /** + * Constructor that takes a var arg number of {@link Filter}s. The fefault operator + * MUST_PASS_ALL is assumed. + * @param rowFilters + */ + public FilterList(final Filter... rowFilters) { + this.filters = new ArrayList(Arrays.asList(rowFilters)); + } + + /** + * Constructor that takes an operator. + * + * @param operator Operator to process filter set with. + */ + public FilterList(final Operator operator) { + this.operator = operator; + } + + /** + * Constructor that takes a set of {@link Filter}s and an operator. + * + * @param operator Operator to process filter set with. + * @param rowFilters Set of row filters. + */ + public FilterList(final Operator operator, final List rowFilters) { + this.filters = new ArrayList(rowFilters); + this.operator = operator; + } + + /** + * Constructor that takes a var arg number of {@link Filter}s and an operator. + * + * @param operator Operator to process filter set with. + * @param rowFilters Filters to use + */ + public FilterList(final Operator operator, final Filter... rowFilters) { + this.filters = new ArrayList(Arrays.asList(rowFilters)); + this.operator = operator; + } + + /** + * Get the operator. + * + * @return operator + */ + public Operator getOperator() { + return operator; + } + + /** + * Get the filters. + * + * @return filters + */ + public List getFilters() { + return filters; + } + + /** + * Add a filter. + * + * @param filter another filter + */ + public void addFilter(Filter filter) { + this.filters.add(filter); + } + + @Override + public void reset() { + for (Filter filter : filters) { + filter.reset(); + } + } + + @Override + public boolean filterRowKey(byte[] rowKey, int offset, int length) { + for (Filter filter : filters) { + if (this.operator == Operator.MUST_PASS_ALL) { + if (filter.filterAllRemaining() || + filter.filterRowKey(rowKey, offset, length)) { + return true; + } + } else if (this.operator == Operator.MUST_PASS_ONE) { + if (!filter.filterAllRemaining() && + !filter.filterRowKey(rowKey, offset, length)) { + return false; + } + } + } + return this.operator == Operator.MUST_PASS_ONE; + } + + @Override + public boolean filterAllRemaining() { + for (Filter filter : filters) { + if (filter.filterAllRemaining()) { + if (operator == Operator.MUST_PASS_ALL) { + return true; + } + } else { + if (operator == Operator.MUST_PASS_ONE) { + return false; + } + } + } + return operator == Operator.MUST_PASS_ONE; + } + + @Override + public KeyValue transform(KeyValue v) { + KeyValue current = v; + for (Filter filter : filters) { + current = filter.transform(current); + } + return current; + } + + @Override + public ReturnCode filterKeyValue(KeyValue v) { + ReturnCode rc = operator == Operator.MUST_PASS_ONE? + ReturnCode.SKIP: ReturnCode.INCLUDE; + for (Filter filter : filters) { + if (operator == Operator.MUST_PASS_ALL) { + if (filter.filterAllRemaining()) { + return ReturnCode.NEXT_ROW; + } + ReturnCode code = filter.filterKeyValue(v); + switch (code) { + // Override INCLUDE and continue to evaluate. + case INCLUDE_AND_NEXT_COL: + rc = ReturnCode.INCLUDE_AND_NEXT_COL; + case INCLUDE: + continue; + default: + return code; + } + } else if (operator == Operator.MUST_PASS_ONE) { + if (filter.filterAllRemaining()) { + continue; + } + + switch (filter.filterKeyValue(v)) { + case INCLUDE: + if (rc != ReturnCode.INCLUDE_AND_NEXT_COL) { + rc = ReturnCode.INCLUDE; + } + break; + case INCLUDE_AND_NEXT_COL: + rc = ReturnCode.INCLUDE_AND_NEXT_COL; + // must continue here to evaluate all filters + break; + case NEXT_ROW: + break; + case SKIP: + // continue; + break; + case NEXT_COL: + break; + case SEEK_NEXT_USING_HINT: + break; + default: + throw new IllegalStateException("Received code is not valid."); + } + } + } + return rc; + } + + @Override + public void filterRow(List kvs) { + for (Filter filter : filters) { + filter.filterRow(kvs); + } + } + + @Override + public boolean hasFilterRow() { + for (Filter filter : filters) { + if(filter.hasFilterRow()) { + return true; + } + } + return false; + } + + @Override + public boolean filterRow() { + for (Filter filter : filters) { + if (operator == Operator.MUST_PASS_ALL) { + if (filter.filterAllRemaining() || filter.filterRow()) { + return true; + } + } else if (operator == Operator.MUST_PASS_ONE) { + if (!filter.filterAllRemaining() + && !filter.filterRow()) { + return false; + } + } + } + return operator == Operator.MUST_PASS_ONE; + } + + /** + * @return The filter serialized using pb + */ + public byte [] toByteArray() { + FilterProtos.FilterList.Builder builder = + FilterProtos.FilterList.newBuilder(); + builder.setOperator(FilterProtos.FilterList.Operator.valueOf(operator.name())); + for (Filter filter : filters) { + builder.addFilters(ProtobufUtil.toFilter(filter)); + } + return builder.build().toByteArray(); + } + + /** + * @param pbBytes A pb serialized {@link FilterList} instance + * @return An instance of {@link FilterList} made from bytes + * @throws DeserializationException + * @see #toByteArray + */ + public static FilterList parseFrom(final byte [] pbBytes) + throws DeserializationException { + FilterProtos.FilterList proto; + try { + proto = FilterProtos.FilterList.parseFrom(pbBytes); + } catch (InvalidProtocolBufferException e) { + throw new DeserializationException(e); + } + + List rowFilters = new ArrayList(proto.getFiltersCount()); + try { + for (HBaseProtos.Filter filter : proto.getFiltersList()) { + rowFilters.add(ProtobufUtil.toFilter(filter)); + } + } catch (IOException ioe) { + throw new DeserializationException(ioe); + } + return new FilterList(Operator.valueOf(proto.getOperator().name()),rowFilters); + } + + /** + * @param other + * @return true if and only if the fields of the filter that are serialized + * are equal to the corresponding fields in other. Used for testing. + */ + boolean areSerializedFieldsEqual(Filter o) { + if (o == this) return true; + if (!(o instanceof FilterList)) return false; + + FilterList other = (FilterList)o; + return this.getOperator().equals(other.getOperator()) && + ((this.getFilters() == other.getFilters()) + || this.getFilters().equals(other.getFilters())); + } + + @Override + public KeyValue getNextKeyHint(KeyValue currentKV) { + KeyValue keyHint = null; + for (Filter filter : filters) { + KeyValue curKeyHint = filter.getNextKeyHint(currentKV); + if (curKeyHint == null && operator == Operator.MUST_PASS_ONE) { + // If we ever don't have a hint and this is must-pass-one, then no hint + return null; + } + if (curKeyHint != null) { + // If this is the first hint we find, set it + if (keyHint == null) { + keyHint = curKeyHint; + continue; + } + // There is an existing hint + if (operator == Operator.MUST_PASS_ALL && + KeyValue.COMPARATOR.compare(keyHint, curKeyHint) < 0) { + // If all conditions must pass, we can keep the max hint + keyHint = curKeyHint; + } else if (operator == Operator.MUST_PASS_ONE && + KeyValue.COMPARATOR.compare(keyHint, curKeyHint) > 0) { + // If any condition can pass, we need to keep the min hint + keyHint = curKeyHint; + } + } + } + return keyHint; + } + + @Override + public String toString() { + return toString(MAX_LOG_FILTERS); + } + + protected String toString(int maxFilters) { + int endIndex = this.filters.size() < maxFilters + ? this.filters.size() : maxFilters; + return String.format("%s %s (%d/%d): %s", + this.getClass().getSimpleName(), + this.operator == Operator.MUST_PASS_ALL ? "AND" : "OR", + endIndex, + this.filters.size(), + this.filters.subList(0, endIndex).toString()); + } +} diff --git hbase-client/src/main/java/org/apache/hadoop/hbase/filter/FilterWrapper.java hbase-client/src/main/java/org/apache/hadoop/hbase/filter/FilterWrapper.java new file mode 100644 index 0000000..7a9af35 --- /dev/null +++ hbase-client/src/main/java/org/apache/hadoop/hbase/filter/FilterWrapper.java @@ -0,0 +1,151 @@ +/* + * Copyright The Apache Software Foundation + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.filter; + +import java.io.IOException; +import java.util.List; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; + +import org.apache.hadoop.hbase.DeserializationException; +import org.apache.hadoop.hbase.KeyValue; +import org.apache.hadoop.hbase.protobuf.ProtobufUtil; +import org.apache.hadoop.hbase.protobuf.generated.FilterProtos; + +import com.google.protobuf.InvalidProtocolBufferException; + +/** + * This is a Filter wrapper class which is used in the server side. Some filter + * related hooks can be defined in this wrapper. The only way to create a + * FilterWrapper instance is passing a client side Filter instance through + * {@link org.apache.hadoop.hbase.client.Scan#getFilter()}. + * + */ +@InterfaceAudience.Private +@InterfaceStability.Evolving +public class FilterWrapper extends Filter { + Filter filter = null; + + public FilterWrapper( Filter filter ) { + if (null == filter) { + // ensure the filter instance is not null + throw new NullPointerException("Cannot create FilterWrapper with null Filter"); + } + this.filter = filter; + } + + /** + * @return The filter serialized using pb + */ + public byte [] toByteArray() { + FilterProtos.FilterWrapper.Builder builder = + FilterProtos.FilterWrapper.newBuilder(); + builder.setFilter(ProtobufUtil.toFilter(this.filter)); + return builder.build().toByteArray(); + } + + /** + * @param pbBytes A pb serialized {@link FilterWrapper} instance + * @return An instance of {@link FilterWrapper} made from bytes + * @throws DeserializationException + * @see #toByteArray + */ + public static FilterWrapper parseFrom(final byte [] pbBytes) + throws DeserializationException { + FilterProtos.FilterWrapper proto; + try { + proto = FilterProtos.FilterWrapper.parseFrom(pbBytes); + } catch (InvalidProtocolBufferException e) { + throw new DeserializationException(e); + } + try { + return new FilterWrapper(ProtobufUtil.toFilter(proto.getFilter())); + } catch (IOException ioe) { + throw new DeserializationException(ioe); + } + } + + @Override + public void reset() { + this.filter.reset(); + } + + @Override + public boolean filterAllRemaining() { + return this.filter.filterAllRemaining(); + } + + @Override + public boolean filterRow() { + return this.filter.filterRow(); + } + + @Override + public KeyValue getNextKeyHint(KeyValue currentKV) { + return this.filter.getNextKeyHint(currentKV); + } + + @Override + public boolean filterRowKey(byte[] buffer, int offset, int length) { + return this.filter.filterRowKey(buffer, offset, length); + } + + @Override + public ReturnCode filterKeyValue(KeyValue v) { + return this.filter.filterKeyValue(v); + } + + @Override + public KeyValue transform(KeyValue v) { + return this.filter.transform(v); + } + + @Override + public boolean hasFilterRow() { + return this.filter.hasFilterRow(); + } + + @Override + public void filterRow(List kvs) { + //To fix HBASE-6429, + //Filter with filterRow() returning true is incompatible with scan with limit + //1. hasFilterRow() returns true, if either filterRow() or filterRow(kvs) is implemented. + //2. filterRow() is merged with filterRow(kvs), + //so that to make all those row related filtering stuff in the same function. + this.filter.filterRow(kvs); + if (!kvs.isEmpty() && this.filter.filterRow()) { + kvs.clear(); + } + } + + /** + * @param other + * @return true if and only if the fields of the filter that are serialized + * are equal to the corresponding fields in other. Used for testing. + */ + boolean areSerializedFieldsEqual(Filter o) { + if (o == this) return true; + if (!(o instanceof FilterWrapper)) return false; + + FilterWrapper other = (FilterWrapper)o; + return this.filter.areSerializedFieldsEqual(other.filter); + } +} diff --git hbase-client/src/main/java/org/apache/hadoop/hbase/filter/FirstKeyOnlyFilter.java hbase-client/src/main/java/org/apache/hadoop/hbase/filter/FirstKeyOnlyFilter.java new file mode 100644 index 0000000..1b63560 --- /dev/null +++ hbase-client/src/main/java/org/apache/hadoop/hbase/filter/FirstKeyOnlyFilter.java @@ -0,0 +1,114 @@ +/* + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.filter; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; +import org.apache.hadoop.hbase.DeserializationException; +import org.apache.hadoop.hbase.KeyValue; +import org.apache.hadoop.hbase.protobuf.generated.FilterProtos; + +import java.util.ArrayList; + +import com.google.common.base.Preconditions; +import com.google.protobuf.InvalidProtocolBufferException; + +/** + * A filter that will only return the first KV from each row. + *

      + * This filter can be used to more efficiently perform row count operations. + */ +@InterfaceAudience.Public +@InterfaceStability.Stable +public class FirstKeyOnlyFilter extends FilterBase { + private boolean foundKV = false; + + public FirstKeyOnlyFilter() { + } + + public void reset() { + foundKV = false; + } + + public ReturnCode filterKeyValue(KeyValue v) { + if(foundKV) return ReturnCode.NEXT_ROW; + foundKV = true; + return ReturnCode.INCLUDE; + } + + public static Filter createFilterFromArguments(ArrayList filterArguments) { + Preconditions.checkArgument(filterArguments.size() == 0, + "Expected 0 but got: %s", filterArguments.size()); + return new FirstKeyOnlyFilter(); + } + + /** + * @return true if first KV has been found. + */ + protected boolean hasFoundKV() { + return this.foundKV; + } + + /** + * + * @param value update {@link #foundKV} flag with value. + */ + protected void setFoundKV(boolean value) { + this.foundKV = value; + } + + /** + * @return The filter serialized using pb + */ + public byte [] toByteArray() { + FilterProtos.FirstKeyOnlyFilter.Builder builder = + FilterProtos.FirstKeyOnlyFilter.newBuilder(); + return builder.build().toByteArray(); + } + + /** + * @param pbBytes A pb serialized {@link FirstKeyOnlyFilter} instance + * @return An instance of {@link FirstKeyOnlyFilter} made from bytes + * @throws DeserializationException + * @see #toByteArray + */ + public static FirstKeyOnlyFilter parseFrom(final byte [] pbBytes) + throws DeserializationException { + FilterProtos.FirstKeyOnlyFilter proto; + try { + proto = FilterProtos.FirstKeyOnlyFilter.parseFrom(pbBytes); + } catch (InvalidProtocolBufferException e) { + throw new DeserializationException(e); + } + + return new FirstKeyOnlyFilter(); + } + + /** + * @param other + * @return true if and only if the fields of the filter that are serialized + * are equal to the corresponding fields in other. Used for testing. + */ + boolean areSerializedFieldsEqual(Filter o) { + if (o == this) return true; + if (!(o instanceof FirstKeyOnlyFilter)) return false; + + return true; + } +} diff --git hbase-client/src/main/java/org/apache/hadoop/hbase/filter/FirstKeyValueMatchingQualifiersFilter.java hbase-client/src/main/java/org/apache/hadoop/hbase/filter/FirstKeyValueMatchingQualifiersFilter.java new file mode 100644 index 0000000..f479420 --- /dev/null +++ hbase-client/src/main/java/org/apache/hadoop/hbase/filter/FirstKeyValueMatchingQualifiersFilter.java @@ -0,0 +1,124 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.filter; + +import com.google.protobuf.ByteString; +import com.google.protobuf.InvalidProtocolBufferException; +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; +import org.apache.hadoop.hbase.DeserializationException; +import org.apache.hadoop.hbase.KeyValue; +import org.apache.hadoop.hbase.protobuf.generated.FilterProtos; +import org.apache.hadoop.hbase.util.Bytes; + +import java.util.Set; +import java.util.TreeSet; + +/** + * The filter looks for the given columns in KeyValue. Once there is a match for + * any one of the columns, it returns ReturnCode.NEXT_ROW for remaining + * KeyValues in the row. + *

      + * Note : It may emit KVs which do not have the given columns in them, if + * these KVs happen to occur before a KV which does have a match. Given this + * caveat, this filter is only useful for special cases + * like {@link org.apache.hadoop.hbase.mapreduce.RowCounter}. + *

      + */ +@InterfaceAudience.Public +@InterfaceStability.Stable +public class FirstKeyValueMatchingQualifiersFilter extends FirstKeyOnlyFilter { + + private Set qualifiers; + + /** + * Constructor which takes a set of columns. As soon as first KeyValue + * matching any of these columns is found, filter moves to next row. + * + * @param qualifiers the set of columns to me matched. + */ + public FirstKeyValueMatchingQualifiersFilter(Set qualifiers) { + this.qualifiers = qualifiers; + } + + public ReturnCode filterKeyValue(KeyValue v) { + if (hasFoundKV()) { + return ReturnCode.NEXT_ROW; + } else if (hasOneMatchingQualifier(v)) { + setFoundKV(true); + } + return ReturnCode.INCLUDE; + } + + private boolean hasOneMatchingQualifier(KeyValue v) { + for (byte[] q : qualifiers) { + if (v.matchingQualifier(q)) { + return true; + } + } + return false; + } + + /** + * @return The filter serialized using pb + */ + public byte [] toByteArray() { + FilterProtos.FirstKeyValueMatchingQualifiersFilter.Builder builder = + FilterProtos.FirstKeyValueMatchingQualifiersFilter.newBuilder(); + for (byte[] qualifier : qualifiers) { + if (qualifier != null) builder.addQualifiers(ByteString.copyFrom(qualifier)); + } + return builder.build().toByteArray(); + } + + /** + * @param pbBytes A pb serialized {@link FirstKeyValueMatchingQualifiersFilter} instance + * @return An instance of {@link FirstKeyValueMatchingQualifiersFilter} made from bytes + * @throws DeserializationException + * @see #toByteArray + */ + public static FirstKeyValueMatchingQualifiersFilter parseFrom(final byte [] pbBytes) + throws DeserializationException { + FilterProtos.FirstKeyValueMatchingQualifiersFilter proto; + try { + proto = FilterProtos.FirstKeyValueMatchingQualifiersFilter.parseFrom(pbBytes); + } catch (InvalidProtocolBufferException e) { + throw new DeserializationException(e); + } + + TreeSet qualifiers = new TreeSet(Bytes.BYTES_COMPARATOR); + for (ByteString qualifier : proto.getQualifiersList()) { + qualifiers.add(qualifier.toByteArray()); + } + return new FirstKeyValueMatchingQualifiersFilter(qualifiers); + } + + /** + * @param other + * @return true if and only if the fields of the filter that are serialized + * are equal to the corresponding fields in other. Used for testing. + */ + boolean areSerializedFieldsEqual(Filter o) { + if (o == this) return true; + if (!(o instanceof FirstKeyValueMatchingQualifiersFilter)) return false; + + FirstKeyValueMatchingQualifiersFilter other = (FirstKeyValueMatchingQualifiersFilter)o; + return this.qualifiers.equals(other.qualifiers); + } +} diff --git hbase-client/src/main/java/org/apache/hadoop/hbase/filter/FuzzyRowFilter.java hbase-client/src/main/java/org/apache/hadoop/hbase/filter/FuzzyRowFilter.java new file mode 100644 index 0000000..00d7b12 --- /dev/null +++ hbase-client/src/main/java/org/apache/hadoop/hbase/filter/FuzzyRowFilter.java @@ -0,0 +1,333 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.filter; + +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; +import org.apache.hadoop.hbase.DeserializationException; +import org.apache.hadoop.hbase.KeyValue; +import org.apache.hadoop.hbase.protobuf.generated.FilterProtos; +import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BytesBytesPair; +import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.hbase.util.Pair; + +import com.google.protobuf.ByteString; +import com.google.protobuf.InvalidProtocolBufferException; + +/** + * Filters data based on fuzzy row key. Performs fast-forwards during scanning. + * It takes pairs (row key, fuzzy info) to match row keys. Where fuzzy info is + * a byte array with 0 or 1 as its values: + *

        + *
      • + * 0 - means that this byte in provided row key is fixed, i.e. row key's byte at same position + * must match + *
      • + *
      • + * 1 - means that this byte in provided row key is NOT fixed, i.e. row key's byte at this + * position can be different from the one in provided row key + *
      • + *
      + * + * + * Example: + * Let's assume row key format is userId_actionId_year_month. Length of userId is fixed + * and is 4, length of actionId is 2 and year and month are 4 and 2 bytes long respectively. + * + * Let's assume that we need to fetch all users that performed certain action (encoded as "99") + * in Jan of any year. Then the pair (row key, fuzzy info) would be the following: + * row key = "????_99_????_01" (one can use any value instead of "?") + * fuzzy info = "\x01\x01\x01\x01\x00\x00\x00\x00\x01\x01\x01\x01\x00\x00\x00" + * + * I.e. fuzzy info tells the matching mask is "????_99_????_01", where at ? can be any value. + * + */ +@InterfaceAudience.Public +@InterfaceStability.Evolving +public class FuzzyRowFilter extends FilterBase { + private List> fuzzyKeysData; + private boolean done = false; + + public FuzzyRowFilter(List> fuzzyKeysData) { + this.fuzzyKeysData = fuzzyKeysData; + } + + // TODO: possible improvement: save which fuzzy row key to use when providing a hint + @Override + public ReturnCode filterKeyValue(KeyValue kv) { + byte[] rowKey = kv.getRow(); + // assigning "worst" result first and looking for better options + SatisfiesCode bestOption = SatisfiesCode.NO_NEXT; + for (Pair fuzzyData : fuzzyKeysData) { + SatisfiesCode satisfiesCode = + satisfies(rowKey, fuzzyData.getFirst(), fuzzyData.getSecond()); + if (satisfiesCode == SatisfiesCode.YES) { + return ReturnCode.INCLUDE; + } + + if (satisfiesCode == SatisfiesCode.NEXT_EXISTS) { + bestOption = SatisfiesCode.NEXT_EXISTS; + } + } + + if (bestOption == SatisfiesCode.NEXT_EXISTS) { + return ReturnCode.SEEK_NEXT_USING_HINT; + } + + // the only unhandled SatisfiesCode is NO_NEXT, i.e. we are done + done = true; + return ReturnCode.NEXT_ROW; + } + + @Override + public KeyValue getNextKeyHint(KeyValue currentKV) { + byte[] rowKey = currentKV.getRow(); + byte[] nextRowKey = null; + // Searching for the "smallest" row key that satisfies at least one fuzzy row key + for (Pair fuzzyData : fuzzyKeysData) { + byte[] nextRowKeyCandidate = getNextForFuzzyRule(rowKey, + fuzzyData.getFirst(), fuzzyData.getSecond()); + if (nextRowKeyCandidate == null) { + continue; + } + if (nextRowKey == null || Bytes.compareTo(nextRowKeyCandidate, nextRowKey) < 0) { + nextRowKey = nextRowKeyCandidate; + } + } + + if (nextRowKey == null) { + // SHOULD NEVER happen + // TODO: is there a better way than throw exception? (stop the scanner?) + throw new IllegalStateException("No next row key that satisfies fuzzy exists when" + + " getNextKeyHint() is invoked." + + " Filter: " + this.toString() + + " currentKV: " + currentKV.toString()); + } + + return KeyValue.createFirstOnRow(nextRowKey); + } + + @Override + public boolean filterAllRemaining() { + return done; + } + + /** + * @return The filter serialized using pb + */ + public byte [] toByteArray() { + FilterProtos.FuzzyRowFilter.Builder builder = + FilterProtos.FuzzyRowFilter.newBuilder(); + for (Pair fuzzyData : fuzzyKeysData) { + BytesBytesPair.Builder bbpBuilder = BytesBytesPair.newBuilder(); + bbpBuilder.setFirst(ByteString.copyFrom(fuzzyData.getFirst())); + bbpBuilder.setSecond(ByteString.copyFrom(fuzzyData.getSecond())); + builder.addFuzzyKeysData(bbpBuilder); + } + return builder.build().toByteArray(); + } + + /** + * @param pbBytes A pb serialized {@link FuzzyRowFilter} instance + * @return An instance of {@link FuzzyRowFilter} made from bytes + * @throws DeserializationException + * @see #toByteArray + */ + public static FuzzyRowFilter parseFrom(final byte [] pbBytes) + throws DeserializationException { + FilterProtos.FuzzyRowFilter proto; + try { + proto = FilterProtos.FuzzyRowFilter.parseFrom(pbBytes); + } catch (InvalidProtocolBufferException e) { + throw new DeserializationException(e); + } + int count = proto.getFuzzyKeysDataCount(); + ArrayList> fuzzyKeysData= new ArrayList>(count); + for (int i = 0; i < count; ++i) { + BytesBytesPair current = proto.getFuzzyKeysData(i); + byte[] keyBytes = current.getFirst().toByteArray(); + byte[] keyMeta = current.getSecond().toByteArray(); + fuzzyKeysData.add(new Pair(keyBytes, keyMeta)); + } + return new FuzzyRowFilter(fuzzyKeysData); + } + + @Override + public String toString() { + final StringBuilder sb = new StringBuilder(); + sb.append("FuzzyRowFilter"); + sb.append("{fuzzyKeysData="); + for (Pair fuzzyData : fuzzyKeysData) { + sb.append('{').append(Bytes.toStringBinary(fuzzyData.getFirst())).append(":"); + sb.append(Bytes.toStringBinary(fuzzyData.getSecond())).append('}'); + } + sb.append("}, "); + return sb.toString(); + } + + // Utility methods + + static enum SatisfiesCode { + // row satisfies fuzzy rule + YES, + // row doesn't satisfy fuzzy rule, but there's possible greater row that does + NEXT_EXISTS, + // row doesn't satisfy fuzzy rule and there's no greater row that does + NO_NEXT + } + + static SatisfiesCode satisfies(byte[] row, + byte[] fuzzyKeyBytes, byte[] fuzzyKeyMeta) { + return satisfies(row, 0, row.length, fuzzyKeyBytes, fuzzyKeyMeta); + } + + private static SatisfiesCode satisfies(byte[] row, int offset, int length, + byte[] fuzzyKeyBytes, byte[] fuzzyKeyMeta) { + if (row == null) { + // do nothing, let scan to proceed + return SatisfiesCode.YES; + } + + boolean nextRowKeyCandidateExists = false; + + for (int i = 0; i < fuzzyKeyMeta.length && i < length; i++) { + // First, checking if this position is fixed and not equals the given one + boolean byteAtPositionFixed = fuzzyKeyMeta[i] == 0; + boolean fixedByteIncorrect = byteAtPositionFixed && fuzzyKeyBytes[i] != row[i + offset]; + if (fixedByteIncorrect) { + // in this case there's another row that satisfies fuzzy rule and bigger than this row + if (nextRowKeyCandidateExists) { + return SatisfiesCode.NEXT_EXISTS; + } + + // If this row byte is less than fixed then there's a byte array bigger than + // this row and which satisfies the fuzzy rule. Otherwise there's no such byte array: + // this row is simply bigger than any byte array that satisfies the fuzzy rule + boolean rowByteLessThanFixed = (row[i + offset] & 0xFF) < (fuzzyKeyBytes[i] & 0xFF); + return rowByteLessThanFixed ? SatisfiesCode.NEXT_EXISTS : SatisfiesCode.NO_NEXT; + } + + // Second, checking if this position is not fixed and byte value is not the biggest. In this + // case there's a byte array bigger than this row and which satisfies the fuzzy rule. To get + // bigger byte array that satisfies the rule we need to just increase this byte + // (see the code of getNextForFuzzyRule below) by one. + // Note: if non-fixed byte is already at biggest value, this doesn't allow us to say there's + // bigger one that satisfies the rule as it can't be increased. + if (fuzzyKeyMeta[i] == 1 && !isMax(fuzzyKeyBytes[i])) { + nextRowKeyCandidateExists = true; + } + } + + return SatisfiesCode.YES; + } + + private static boolean isMax(byte fuzzyKeyByte) { + return (fuzzyKeyByte & 0xFF) == 255; + } + + static byte[] getNextForFuzzyRule(byte[] row, byte[] fuzzyKeyBytes, byte[] fuzzyKeyMeta) { + return getNextForFuzzyRule(row, 0, row.length, fuzzyKeyBytes, fuzzyKeyMeta); + } + + /** + * @return greater byte array than given (row) which satisfies the fuzzy rule if it exists, + * null otherwise + */ + private static byte[] getNextForFuzzyRule(byte[] row, int offset, int length, + byte[] fuzzyKeyBytes, byte[] fuzzyKeyMeta) { + // To find out the next "smallest" byte array that satisfies fuzzy rule and "greater" than + // the given one we do the following: + // 1. setting values on all "fixed" positions to the values from fuzzyKeyBytes + // 2. if during the first step given row did not increase, then we increase the value at + // the first "non-fixed" position (where it is not maximum already) + + // It is easier to perform this by using fuzzyKeyBytes copy and setting "non-fixed" position + // values than otherwise. + byte[] result = Arrays.copyOf(fuzzyKeyBytes, + length > fuzzyKeyBytes.length ? length : fuzzyKeyBytes.length); + int toInc = -1; + + boolean increased = false; + for (int i = 0; i < result.length; i++) { + if (i >= fuzzyKeyMeta.length || fuzzyKeyMeta[i] == 1) { + result[i] = row[offset + i]; + if (!isMax(row[i])) { + // this is "non-fixed" position and is not at max value, hence we can increase it + toInc = i; + } + } else if (i < fuzzyKeyMeta.length && fuzzyKeyMeta[i] == 0) { + if ((row[i + offset] & 0xFF) < (fuzzyKeyBytes[i] & 0xFF)) { + // if setting value for any fixed position increased the original array, + // we are OK + increased = true; + break; + } + if ((row[i + offset] & 0xFF) > (fuzzyKeyBytes[i] & 0xFF)) { + // if setting value for any fixed position makes array "smaller", then just stop: + // in case we found some non-fixed position to increase we will do it, otherwise + // there's no "next" row key that satisfies fuzzy rule and "greater" than given row + break; + } + } + } + + if (!increased) { + if (toInc < 0) { + return null; + } + result[toInc]++; + + // Setting all "non-fixed" positions to zeroes to the right of the one we increased so + // that found "next" row key is the smallest possible + for (int i = toInc + 1; i < result.length; i++) { + if (i >= fuzzyKeyMeta.length || fuzzyKeyMeta[i] == 1) { + result[i] = 0; + } + } + } + + return result; + } + + /** + * @param other + * @return true if and only if the fields of the filter that are serialized + * are equal to the corresponding fields in other. Used for testing. + */ + boolean areSerializedFieldsEqual(Filter o) { + if (o == this) return true; + if (!(o instanceof FuzzyRowFilter)) return false; + + FuzzyRowFilter other = (FuzzyRowFilter)o; + if (this.fuzzyKeysData.size() != other.fuzzyKeysData.size()) return false; + for (int i = 0; i < fuzzyKeysData.size(); ++i) { + Pair thisData = this.fuzzyKeysData.get(i); + Pair otherData = other.fuzzyKeysData.get(i); + if (!(Bytes.equals(thisData.getFirst(), otherData.getFirst()) + && Bytes.equals(thisData.getSecond(), otherData.getSecond()))) { + return false; + } + } + return true; + } + +} diff --git hbase-client/src/main/java/org/apache/hadoop/hbase/filter/InclusiveStopFilter.java hbase-client/src/main/java/org/apache/hadoop/hbase/filter/InclusiveStopFilter.java new file mode 100644 index 0000000..6fb1a62 --- /dev/null +++ hbase-client/src/main/java/org/apache/hadoop/hbase/filter/InclusiveStopFilter.java @@ -0,0 +1,128 @@ +/* + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.filter; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; +import org.apache.hadoop.hbase.DeserializationException; +import org.apache.hadoop.hbase.KeyValue; +import org.apache.hadoop.hbase.protobuf.generated.FilterProtos; +import org.apache.hadoop.hbase.util.Bytes; + +import java.util.ArrayList; + +import com.google.common.base.Preconditions; +import com.google.protobuf.ByteString; +import com.google.protobuf.InvalidProtocolBufferException; + +/** + * A Filter that stops after the given row. There is no "RowStopFilter" because + * the Scan spec allows you to specify a stop row. + * + * Use this filter to include the stop row, eg: [A,Z]. + */ +@InterfaceAudience.Public +@InterfaceStability.Stable +public class InclusiveStopFilter extends FilterBase { + private byte [] stopRowKey; + private boolean done = false; + + public InclusiveStopFilter(final byte [] stopRowKey) { + this.stopRowKey = stopRowKey; + } + + public byte[] getStopRowKey() { + return this.stopRowKey; + } + + public boolean filterRowKey(byte[] buffer, int offset, int length) { + if (buffer == null) { + //noinspection RedundantIfStatement + if (this.stopRowKey == null) { + return true; //filter... + } + return false; + } + // if stopRowKey is <= buffer, then true, filter row. + int cmp = Bytes.compareTo(stopRowKey, 0, stopRowKey.length, + buffer, offset, length); + + if(cmp < 0) { + done = true; + } + return done; + } + + public boolean filterAllRemaining() { + return done; + } + + public static Filter createFilterFromArguments (ArrayList filterArguments) { + Preconditions.checkArgument(filterArguments.size() == 1, + "Expected 1 but got: %s", filterArguments.size()); + byte [] stopRowKey = ParseFilter.removeQuotesFromByteArray(filterArguments.get(0)); + return new InclusiveStopFilter(stopRowKey); + } + + /** + * @return The filter serialized using pb + */ + public byte [] toByteArray() { + FilterProtos.InclusiveStopFilter.Builder builder = + FilterProtos.InclusiveStopFilter.newBuilder(); + if (this.stopRowKey != null) builder.setStopRowKey(ByteString.copyFrom(this.stopRowKey)); + return builder.build().toByteArray(); + } + + /** + * @param pbBytes A pb serialized {@link InclusiveStopFilter} instance + * @return An instance of {@link InclusiveStopFilter} made from bytes + * @throws DeserializationException + * @see #toByteArray + */ + public static InclusiveStopFilter parseFrom(final byte [] pbBytes) + throws DeserializationException { + FilterProtos.InclusiveStopFilter proto; + try { + proto = FilterProtos.InclusiveStopFilter.parseFrom(pbBytes); + } catch (InvalidProtocolBufferException e) { + throw new DeserializationException(e); + } + return new InclusiveStopFilter(proto.hasStopRowKey()?proto.getStopRowKey().toByteArray():null); + } + + /** + * @param other + * @return true if and only if the fields of the filter that are serialized + * are equal to the corresponding fields in other. Used for testing. + */ + boolean areSerializedFieldsEqual(Filter o) { + if (o == this) return true; + if (!(o instanceof InclusiveStopFilter)) return false; + + InclusiveStopFilter other = (InclusiveStopFilter)o; + return Bytes.equals(this.getStopRowKey(), other.getStopRowKey()); + } + + @Override + public String toString() { + return this.getClass().getSimpleName() + " " + Bytes.toStringBinary(this.stopRowKey); + } +} diff --git hbase-client/src/main/java/org/apache/hadoop/hbase/filter/IncompatibleFilterException.java hbase-client/src/main/java/org/apache/hadoop/hbase/filter/IncompatibleFilterException.java new file mode 100644 index 0000000..cf91072 --- /dev/null +++ hbase-client/src/main/java/org/apache/hadoop/hbase/filter/IncompatibleFilterException.java @@ -0,0 +1,44 @@ +/** + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.filter; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; + +/** + * Used to indicate a filter incompatibility + */ +@InterfaceAudience.Public +@InterfaceStability.Stable +public class IncompatibleFilterException extends RuntimeException { + private static final long serialVersionUID = 3236763276623198231L; + +/** constructor */ + public IncompatibleFilterException() { + super(); + } + + /** + * constructor + * @param s message + */ + public IncompatibleFilterException(String s) { + super(s); + } +} diff --git hbase-client/src/main/java/org/apache/hadoop/hbase/filter/InvalidRowFilterException.java hbase-client/src/main/java/org/apache/hadoop/hbase/filter/InvalidRowFilterException.java new file mode 100644 index 0000000..251c953 --- /dev/null +++ hbase-client/src/main/java/org/apache/hadoop/hbase/filter/InvalidRowFilterException.java @@ -0,0 +1,45 @@ +/** + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.filter; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; + +/** + * Used to indicate an invalid RowFilter. + */ +@InterfaceAudience.Public +@InterfaceStability.Stable +public class InvalidRowFilterException extends RuntimeException { + private static final long serialVersionUID = 2667894046345657865L; + + + /** constructor */ + public InvalidRowFilterException() { + super(); + } + + /** + * constructor + * @param s message + */ + public InvalidRowFilterException(String s) { + super(s); + } +} diff --git hbase-client/src/main/java/org/apache/hadoop/hbase/filter/KeyOnlyFilter.java hbase-client/src/main/java/org/apache/hadoop/hbase/filter/KeyOnlyFilter.java new file mode 100644 index 0000000..3bb1390 --- /dev/null +++ hbase-client/src/main/java/org/apache/hadoop/hbase/filter/KeyOnlyFilter.java @@ -0,0 +1,102 @@ +/* + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.filter; + + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; +import org.apache.hadoop.hbase.DeserializationException; +import org.apache.hadoop.hbase.KeyValue; +import org.apache.hadoop.hbase.protobuf.generated.FilterProtos; + +import java.util.ArrayList; + +import com.google.common.base.Preconditions; +import com.google.protobuf.InvalidProtocolBufferException; + +/** + * A filter that will only return the key component of each KV (the value will + * be rewritten as empty). + *

      + * This filter can be used to grab all of the keys without having to also grab + * the values. + */ +@InterfaceAudience.Public +@InterfaceStability.Stable +public class KeyOnlyFilter extends FilterBase { + + boolean lenAsVal; + public KeyOnlyFilter() { this(false); } + public KeyOnlyFilter(boolean lenAsVal) { this.lenAsVal = lenAsVal; } + + @Override + public KeyValue transform(KeyValue kv) { + return kv.createKeyOnly(this.lenAsVal); + } + + public static Filter createFilterFromArguments(ArrayList filterArguments) { + Preconditions.checkArgument((filterArguments.size() == 0 || filterArguments.size() == 1), + "Expected: 0 or 1 but got: %s", filterArguments.size()); + KeyOnlyFilter filter = new KeyOnlyFilter(); + if (filterArguments.size() == 1) { + filter.lenAsVal = ParseFilter.convertByteArrayToBoolean(filterArguments.get(0)); + } + return filter; + } + + /** + * @return The filter serialized using pb + */ + public byte [] toByteArray() { + FilterProtos.KeyOnlyFilter.Builder builder = + FilterProtos.KeyOnlyFilter.newBuilder(); + builder.setLenAsVal(this.lenAsVal); + return builder.build().toByteArray(); + } + + /** + * @param pbBytes A pb serialized {@link KeyOnlyFilter} instance + * @return An instance of {@link KeyOnlyFilter} made from bytes + * @throws DeserializationException + * @see #toByteArray + */ + public static KeyOnlyFilter parseFrom(final byte [] pbBytes) + throws DeserializationException { + FilterProtos.KeyOnlyFilter proto; + try { + proto = FilterProtos.KeyOnlyFilter.parseFrom(pbBytes); + } catch (InvalidProtocolBufferException e) { + throw new DeserializationException(e); + } + return new KeyOnlyFilter(proto.getLenAsVal()); + } + + /** + * @param other + * @return true if and only if the fields of the filter that are serialized + * are equal to the corresponding fields in other. Used for testing. + */ + boolean areSerializedFieldsEqual(Filter o) { + if (o == this) return true; + if (!(o instanceof KeyOnlyFilter)) return false; + + KeyOnlyFilter other = (KeyOnlyFilter)o; + return this.lenAsVal == other.lenAsVal; + } +} diff --git hbase-client/src/main/java/org/apache/hadoop/hbase/filter/MultipleColumnPrefixFilter.java hbase-client/src/main/java/org/apache/hadoop/hbase/filter/MultipleColumnPrefixFilter.java new file mode 100644 index 0000000..be165aa --- /dev/null +++ hbase-client/src/main/java/org/apache/hadoop/hbase/filter/MultipleColumnPrefixFilter.java @@ -0,0 +1,199 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.filter; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; +import org.apache.hadoop.hbase.DeserializationException; +import org.apache.hadoop.hbase.KeyValue; +import org.apache.hadoop.hbase.protobuf.generated.FilterProtos; +import org.apache.hadoop.hbase.util.Bytes; + +import com.google.protobuf.ByteString; +import com.google.protobuf.InvalidProtocolBufferException; + +import java.util.Arrays; +import java.util.Comparator; +import java.util.TreeSet; +import java.util.ArrayList; + +/** + * This filter is used for selecting only those keys with columns that matches + * a particular prefix. For example, if prefix is 'an', it will pass keys will + * columns like 'and', 'anti' but not keys with columns like 'ball', 'act'. + */ +@InterfaceAudience.Public +@InterfaceStability.Stable +public class MultipleColumnPrefixFilter extends FilterBase { + protected byte [] hint = null; + protected TreeSet sortedPrefixes = createTreeSet(); + private final static int MAX_LOG_PREFIXES = 5; + + public MultipleColumnPrefixFilter(final byte [][] prefixes) { + if (prefixes != null) { + for (int i = 0; i < prefixes.length; i++) { + if (!sortedPrefixes.add(prefixes[i])) + throw new IllegalArgumentException ("prefixes must be distinct"); + } + } + } + + public byte [][] getPrefix() { + int count = 0; + byte [][] temp = new byte [sortedPrefixes.size()][]; + for (byte [] prefixes : sortedPrefixes) { + temp [count++] = prefixes; + } + return temp; + } + + @Override + public ReturnCode filterKeyValue(KeyValue kv) { + if (sortedPrefixes.size() == 0 || kv.getBuffer() == null) { + return ReturnCode.INCLUDE; + } else { + return filterColumn(kv.getBuffer(), kv.getQualifierOffset(), kv.getQualifierLength()); + } + } + + public ReturnCode filterColumn(byte[] buffer, int qualifierOffset, int qualifierLength) { + byte [] qualifier = Arrays.copyOfRange(buffer, qualifierOffset, + qualifierLength + qualifierOffset); + TreeSet lesserOrEqualPrefixes = + (TreeSet) sortedPrefixes.headSet(qualifier, true); + + if (lesserOrEqualPrefixes.size() != 0) { + byte [] largestPrefixSmallerThanQualifier = lesserOrEqualPrefixes.last(); + + if (Bytes.startsWith(qualifier, largestPrefixSmallerThanQualifier)) { + return ReturnCode.INCLUDE; + } + + if (lesserOrEqualPrefixes.size() == sortedPrefixes.size()) { + return ReturnCode.NEXT_ROW; + } else { + hint = sortedPrefixes.higher(largestPrefixSmallerThanQualifier); + return ReturnCode.SEEK_NEXT_USING_HINT; + } + } else { + hint = sortedPrefixes.first(); + return ReturnCode.SEEK_NEXT_USING_HINT; + } + } + + public static Filter createFilterFromArguments(ArrayList filterArguments) { + byte [][] prefixes = new byte [filterArguments.size()][]; + for (int i = 0 ; i < filterArguments.size(); i++) { + byte [] columnPrefix = ParseFilter.removeQuotesFromByteArray(filterArguments.get(i)); + prefixes[i] = columnPrefix; + } + return new MultipleColumnPrefixFilter(prefixes); + } + + /** + * @return The filter serialized using pb + */ + public byte [] toByteArray() { + FilterProtos.MultipleColumnPrefixFilter.Builder builder = + FilterProtos.MultipleColumnPrefixFilter.newBuilder(); + for (byte [] element : sortedPrefixes) { + if (element != null) builder.addSortedPrefixes(ByteString.copyFrom(element)); + } + return builder.build().toByteArray(); + } + + /** + * @param pbBytes A pb serialized {@link MultipleColumnPrefixFilter} instance + * @return An instance of {@link MultipleColumnPrefixFilter} made from bytes + * @throws DeserializationException + * @see #toByteArray + */ + public static MultipleColumnPrefixFilter parseFrom(final byte [] pbBytes) + throws DeserializationException { + FilterProtos.MultipleColumnPrefixFilter proto; + try { + proto = FilterProtos.MultipleColumnPrefixFilter.parseFrom(pbBytes); + } catch (InvalidProtocolBufferException e) { + throw new DeserializationException(e); + } + int numPrefixes = proto.getSortedPrefixesCount(); + byte [][] prefixes = new byte[numPrefixes][]; + for (int i = 0; i < numPrefixes; ++i) { + prefixes[i] = proto.getSortedPrefixes(i).toByteArray(); + } + + return new MultipleColumnPrefixFilter(prefixes); + } + + /** + * @param other + * @return true if and only if the fields of the filter that are serialized + * are equal to the corresponding fields in other. Used for testing. + */ + boolean areSerializedFieldsEqual(Filter o) { + if (o == this) return true; + if (!(o instanceof MultipleColumnPrefixFilter)) return false; + + MultipleColumnPrefixFilter other = (MultipleColumnPrefixFilter)o; + return this.sortedPrefixes.equals(other.sortedPrefixes); + } + + public KeyValue getNextKeyHint(KeyValue kv) { + return KeyValue.createFirstOnRow( + kv.getBuffer(), kv.getRowOffset(), kv.getRowLength(), kv.getBuffer(), + kv.getFamilyOffset(), kv.getFamilyLength(), hint, 0, hint.length); + } + + public TreeSet createTreeSet() { + return new TreeSet(new Comparator() { + @Override + public int compare (Object o1, Object o2) { + if (o1 == null || o2 == null) + throw new IllegalArgumentException ("prefixes can't be null"); + + byte [] b1 = (byte []) o1; + byte [] b2 = (byte []) o2; + return Bytes.compareTo (b1, 0, b1.length, b2, 0, b2.length); + } + }); + } + + @Override + public String toString() { + return toString(MAX_LOG_PREFIXES); + } + + protected String toString(int maxPrefixes) { + StringBuilder prefixes = new StringBuilder(); + + int count = 0; + for (byte[] ba : this.sortedPrefixes) { + if (count >= maxPrefixes) { + break; + } + ++count; + prefixes.append(Bytes.toStringBinary(ba)); + if (count < this.sortedPrefixes.size() && count < maxPrefixes) { + prefixes.append(", "); + } + } + + return String.format("%s (%d/%d): [%s]", this.getClass().getSimpleName(), + count, this.sortedPrefixes.size(), prefixes.toString()); + } +} diff --git hbase-client/src/main/java/org/apache/hadoop/hbase/filter/NullComparator.java hbase-client/src/main/java/org/apache/hadoop/hbase/filter/NullComparator.java new file mode 100644 index 0000000..d944d3e --- /dev/null +++ hbase-client/src/main/java/org/apache/hadoop/hbase/filter/NullComparator.java @@ -0,0 +1,88 @@ +/* + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.filter; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; +import org.apache.hadoop.hbase.DeserializationException; +import org.apache.hadoop.hbase.protobuf.generated.ComparatorProtos; + +import com.google.protobuf.InvalidProtocolBufferException; + +/** + * A binary comparator which lexicographically compares against the specified + * byte array using {@link org.apache.hadoop.hbase.util.Bytes#compareTo(byte[], byte[])}. + */ +@InterfaceAudience.Public +@InterfaceStability.Stable +public class NullComparator extends ByteArrayComparable { + + public NullComparator() { + super(new byte[0]); + } + + @Override + public int compareTo(byte[] value) { + return value != null ? 1 : 0; + } + + @Override + public int compareTo(byte[] value, int offset, int length) { + throw new UnsupportedOperationException(); + } + + /** + * @return The comparator serialized using pb + */ + public byte [] toByteArray() { + ComparatorProtos.NullComparator.Builder builder = + ComparatorProtos.NullComparator.newBuilder(); + return builder.build().toByteArray(); + } + + /** + * @param pbBytes A pb serialized {@link NullComparator} instance + * @return An instance of {@link NullComparator} made from bytes + * @throws DeserializationException + * @see #toByteArray + */ + public static NullComparator parseFrom(final byte [] pbBytes) + throws DeserializationException { + ComparatorProtos.NullComparator proto; + try { + proto = ComparatorProtos.NullComparator.parseFrom(pbBytes); + } catch (InvalidProtocolBufferException e) { + throw new DeserializationException(e); + } + return new NullComparator(); + } + + /** + * @param other + * @return true if and only if the fields of the comparator that are serialized + * are equal to the corresponding fields in other. Used for testing. + */ + boolean areSerializedFieldsEqual(ByteArrayComparable other) { + if (other == this) return true; + if (!(other instanceof NullComparator)) return false; + + return super.areSerializedFieldsEqual(other); + } +} diff --git hbase-client/src/main/java/org/apache/hadoop/hbase/filter/PageFilter.java hbase-client/src/main/java/org/apache/hadoop/hbase/filter/PageFilter.java new file mode 100644 index 0000000..2e46288 --- /dev/null +++ hbase-client/src/main/java/org/apache/hadoop/hbase/filter/PageFilter.java @@ -0,0 +1,126 @@ +/** + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.filter; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; +import org.apache.hadoop.hbase.DeserializationException; +import org.apache.hadoop.hbase.KeyValue; +import org.apache.hadoop.hbase.protobuf.generated.FilterProtos; + +import java.util.ArrayList; + +import com.google.common.base.Preconditions; +import com.google.protobuf.InvalidProtocolBufferException; +/** + * Implementation of Filter interface that limits results to a specific page + * size. It terminates scanning once the number of filter-passed rows is > + * the given page size. + *

      + * Note that this filter cannot guarantee that the number of results returned + * to a client are <= page size. This is because the filter is applied + * separately on different region servers. It does however optimize the scan of + * individual HRegions by making sure that the page size is never exceeded + * locally. + */ +@InterfaceAudience.Public +@InterfaceStability.Stable +public class PageFilter extends FilterBase { + private long pageSize = Long.MAX_VALUE; + private int rowsAccepted = 0; + + /** + * Constructor that takes a maximum page size. + * + * @param pageSize Maximum result size. + */ + public PageFilter(final long pageSize) { + Preconditions.checkArgument(pageSize >= 0, "must be positive %s", pageSize); + this.pageSize = pageSize; + } + + public long getPageSize() { + return pageSize; + } + + public boolean filterAllRemaining() { + return this.rowsAccepted >= this.pageSize; + } + + public boolean filterRow() { + this.rowsAccepted++; + return this.rowsAccepted > this.pageSize; + } + + public boolean hasFilterRow() { + return true; + } + + public static Filter createFilterFromArguments(ArrayList filterArguments) { + Preconditions.checkArgument(filterArguments.size() == 1, + "Expected 1 but got: %s", filterArguments.size()); + long pageSize = ParseFilter.convertByteArrayToLong(filterArguments.get(0)); + return new PageFilter(pageSize); + } + + /** + * @return The filter serialized using pb + */ + public byte [] toByteArray() { + FilterProtos.PageFilter.Builder builder = + FilterProtos.PageFilter.newBuilder(); + builder.setPageSize(this.pageSize); + return builder.build().toByteArray(); + } + + /** + * @param pbBytes A pb serialized {@link PageFilter} instance + * @return An instance of {@link PageFilter} made from bytes + * @throws DeserializationException + * @see #toByteArray + */ + public static PageFilter parseFrom(final byte [] pbBytes) + throws DeserializationException { + FilterProtos.PageFilter proto; + try { + proto = FilterProtos.PageFilter.parseFrom(pbBytes); + } catch (InvalidProtocolBufferException e) { + throw new DeserializationException(e); + } + return new PageFilter(proto.getPageSize()); + } + + /** + * @param other + * @return true if and only if the fields of the filter that are serialized + * are equal to the corresponding fields in other. Used for testing. + */ + boolean areSerializedFieldsEqual(Filter o) { + if (o == this) return true; + if (!(o instanceof PageFilter)) return false; + + PageFilter other = (PageFilter)o; + return this.getPageSize() == other.getPageSize(); + } + + @Override + public String toString() { + return this.getClass().getSimpleName() + " " + this.pageSize; + } +} diff --git hbase-client/src/main/java/org/apache/hadoop/hbase/filter/ParseConstants.java hbase-client/src/main/java/org/apache/hadoop/hbase/filter/ParseConstants.java new file mode 100644 index 0000000..449104c --- /dev/null +++ hbase-client/src/main/java/org/apache/hadoop/hbase/filter/ParseConstants.java @@ -0,0 +1,263 @@ +/** + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.filter; + +import java.nio.ByteBuffer; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; + +/** + * ParseConstants holds a bunch of constants related to parsing Filter Strings + * Used by {@link ParseFilter} + */ +@InterfaceAudience.Public +@InterfaceStability.Stable +public final class ParseConstants { + + /** + * ASCII code for LPAREN + */ + public static final int LPAREN = '('; + + /** + * ASCII code for RPAREN + */ + public static final int RPAREN = ')'; + + /** + * ASCII code for whitespace + */ + public static final int WHITESPACE = ' '; + + /** + * ASCII code for tab + */ + public static final int TAB = '\t'; + + /** + * ASCII code for 'A' + */ + public static final int A = 'A'; + + /** + * ASCII code for 'N' + */ + public static final int N = 'N'; + + /** + * ASCII code for 'D' + */ + public static final int D = 'D'; + + /** + * ASCII code for 'O' + */ + public static final int O = 'O'; + + /** + * ASCII code for 'R' + */ + public static final int R = 'R'; + + /** + * ASCII code for 'S' + */ + public static final int S = 'S'; + + /** + * ASCII code for 'K' + */ + public static final int K = 'K'; + + /** + * ASCII code for 'I' + */ + public static final int I = 'I'; + + /** + * ASCII code for 'P' + */ + public static final int P = 'P'; + + /** + * SKIP Array + */ + public static final byte [] SKIP_ARRAY = new byte [ ] {'S', 'K', 'I', 'P'}; + public static final ByteBuffer SKIP_BUFFER = ByteBuffer.wrap(SKIP_ARRAY); + + /** + * ASCII code for 'W' + */ + public static final int W = 'W'; + + /** + * ASCII code for 'H' + */ + public static final int H = 'H'; + + /** + * ASCII code for 'L' + */ + public static final int L = 'L'; + + /** + * ASCII code for 'E' + */ + public static final int E = 'E'; + + /** + * WHILE Array + */ + public static final byte [] WHILE_ARRAY = new byte [] {'W', 'H', 'I', 'L', 'E'}; + public static final ByteBuffer WHILE_BUFFER = ByteBuffer.wrap(WHILE_ARRAY); + + /** + * OR Array + */ + public static final byte [] OR_ARRAY = new byte [] {'O','R'}; + public static final ByteBuffer OR_BUFFER = ByteBuffer.wrap(OR_ARRAY); + + /** + * AND Array + */ + public static final byte [] AND_ARRAY = new byte [] {'A','N', 'D'}; + public static final ByteBuffer AND_BUFFER = ByteBuffer.wrap(AND_ARRAY); + + /** + * ASCII code for Backslash + */ + public static final int BACKSLASH = '\\'; + + /** + * ASCII code for a single quote + */ + public static final int SINGLE_QUOTE = '\''; + + /** + * ASCII code for a comma + */ + public static final int COMMA = ','; + + /** + * LESS_THAN Array + */ + public static final byte [] LESS_THAN_ARRAY = new byte [] {'<'}; + public static final ByteBuffer LESS_THAN_BUFFER = ByteBuffer.wrap(LESS_THAN_ARRAY); + + /** + * LESS_THAN_OR_EQUAL_TO Array + */ + public static final byte [] LESS_THAN_OR_EQUAL_TO_ARRAY = new byte [] {'<', '='}; + public static final ByteBuffer LESS_THAN_OR_EQUAL_TO_BUFFER = + ByteBuffer.wrap(LESS_THAN_OR_EQUAL_TO_ARRAY); + + /** + * GREATER_THAN Array + */ + public static final byte [] GREATER_THAN_ARRAY = new byte [] {'>'}; + public static final ByteBuffer GREATER_THAN_BUFFER = ByteBuffer.wrap(GREATER_THAN_ARRAY); + + /** + * GREATER_THAN_OR_EQUAL_TO Array + */ + public static final byte [] GREATER_THAN_OR_EQUAL_TO_ARRAY = new byte [] {'>', '='}; + public static final ByteBuffer GREATER_THAN_OR_EQUAL_TO_BUFFER = + ByteBuffer.wrap(GREATER_THAN_OR_EQUAL_TO_ARRAY); + + /** + * EQUAL_TO Array + */ + public static final byte [] EQUAL_TO_ARRAY = new byte [] {'='}; + public static final ByteBuffer EQUAL_TO_BUFFER = ByteBuffer.wrap(EQUAL_TO_ARRAY); + + /** + * NOT_EQUAL_TO Array + */ + public static final byte [] NOT_EQUAL_TO_ARRAY = new byte [] {'!', '='}; + public static final ByteBuffer NOT_EQUAL_TO_BUFFER = ByteBuffer.wrap(NOT_EQUAL_TO_ARRAY); + + /** + * ASCII code for equal to (=) + */ + public static final int EQUAL_TO = '='; + + /** + * AND Byte Array + */ + public static final byte [] AND = new byte [] {'A','N','D'}; + + /** + * OR Byte Array + */ + public static final byte [] OR = new byte [] {'O', 'R'}; + + /** + * LPAREN Array + */ + public static final byte [] LPAREN_ARRAY = new byte [] {'('}; + public static final ByteBuffer LPAREN_BUFFER = ByteBuffer.wrap(LPAREN_ARRAY); + + /** + * ASCII code for colon (:) + */ + public static final int COLON = ':'; + + /** + * ASCII code for Zero + */ + public static final int ZERO = '0'; + + /** + * ASCII code foe Nine + */ + public static final int NINE = '9'; + + /** + * BinaryType byte array + */ + public static final byte [] binaryType = new byte [] {'b','i','n','a','r','y'}; + + /** + * BinaryPrefixType byte array + */ + public static final byte [] binaryPrefixType = new byte [] {'b','i','n','a','r','y', + 'p','r','e','f','i','x'}; + + /** + * RegexStringType byte array + */ + public static final byte [] regexStringType = new byte [] {'r','e','g','e', 'x', + 's','t','r','i','n','g'}; + + /** + * SubstringType byte array + */ + public static final byte [] substringType = new byte [] {'s','u','b','s','t','r','i','n','g'}; + + /** + * ASCII for Minus Sign + */ + public static final int MINUS_SIGN = '-'; + + /** + * Package containing filters + */ + public static final String FILTER_PACKAGE = "org.apache.hadoop.hbase.filter"; +} diff --git hbase-client/src/main/java/org/apache/hadoop/hbase/filter/ParseFilter.java hbase-client/src/main/java/org/apache/hadoop/hbase/filter/ParseFilter.java new file mode 100644 index 0000000..02100d7 --- /dev/null +++ hbase-client/src/main/java/org/apache/hadoop/hbase/filter/ParseFilter.java @@ -0,0 +1,859 @@ +/** + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.filter; + +import java.lang.reflect.InvocationTargetException; +import java.lang.reflect.Method; +import java.nio.ByteBuffer; +import java.nio.charset.CharacterCodingException; +import java.util.*; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; +import org.apache.hadoop.hbase.KeyValue; +import org.apache.hadoop.hbase.filter.CompareFilter.CompareOp; +import org.apache.hadoop.hbase.util.Bytes; + +/** + * This class allows a user to specify a filter via a string + * The string is parsed using the methods of this class and + * a filter object is constructed. This filter object is then wrapped + * in a scanner object which is then returned + *

      + * This class addresses the HBASE-4168 JIRA. More documentaton on this + * Filter Language can be found at: https://issues.apache.org/jira/browse/HBASE-4176 + */ +@InterfaceAudience.Public +@InterfaceStability.Stable +public class ParseFilter { + private static final Log LOG = LogFactory.getLog(ParseFilter.class); + + private static HashMap operatorPrecedenceHashMap; + private static HashMap filterHashMap; + + static { + // Registers all the filter supported by the Filter Language + filterHashMap = new HashMap(); + filterHashMap.put("KeyOnlyFilter", ParseConstants.FILTER_PACKAGE + "." + + "KeyOnlyFilter"); + filterHashMap.put("FirstKeyOnlyFilter", ParseConstants.FILTER_PACKAGE + "." + + "FirstKeyOnlyFilter"); + filterHashMap.put("PrefixFilter", ParseConstants.FILTER_PACKAGE + "." + + "PrefixFilter"); + filterHashMap.put("ColumnPrefixFilter", ParseConstants.FILTER_PACKAGE + "." + + "ColumnPrefixFilter"); + filterHashMap.put("MultipleColumnPrefixFilter", ParseConstants.FILTER_PACKAGE + "." + + "MultipleColumnPrefixFilter"); + filterHashMap.put("ColumnCountGetFilter", ParseConstants.FILTER_PACKAGE + "." + + "ColumnCountGetFilter"); + filterHashMap.put("PageFilter", ParseConstants.FILTER_PACKAGE + "." + + "PageFilter"); + filterHashMap.put("ColumnPaginationFilter", ParseConstants.FILTER_PACKAGE + "." + + "ColumnPaginationFilter"); + filterHashMap.put("InclusiveStopFilter", ParseConstants.FILTER_PACKAGE + "." + + "InclusiveStopFilter"); + filterHashMap.put("TimestampsFilter", ParseConstants.FILTER_PACKAGE + "." + + "TimestampsFilter"); + filterHashMap.put("RowFilter", ParseConstants.FILTER_PACKAGE + "." + + "RowFilter"); + filterHashMap.put("FamilyFilter", ParseConstants.FILTER_PACKAGE + "." + + "FamilyFilter"); + filterHashMap.put("QualifierFilter", ParseConstants.FILTER_PACKAGE + "." + + "QualifierFilter"); + filterHashMap.put("ValueFilter", ParseConstants.FILTER_PACKAGE + "." + + "ValueFilter"); + filterHashMap.put("ColumnRangeFilter", ParseConstants.FILTER_PACKAGE + "." + + "ColumnRangeFilter"); + filterHashMap.put("SingleColumnValueFilter", ParseConstants.FILTER_PACKAGE + "." + + "SingleColumnValueFilter"); + filterHashMap.put("SingleColumnValueExcludeFilter", ParseConstants.FILTER_PACKAGE + "." + + "SingleColumnValueExcludeFilter"); + filterHashMap.put("DependentColumnFilter", ParseConstants.FILTER_PACKAGE + "." + + "DependentColumnFilter"); + + // Creates the operatorPrecedenceHashMap + operatorPrecedenceHashMap = new HashMap(); + operatorPrecedenceHashMap.put(ParseConstants.SKIP_BUFFER, 1); + operatorPrecedenceHashMap.put(ParseConstants.WHILE_BUFFER, 1); + operatorPrecedenceHashMap.put(ParseConstants.AND_BUFFER, 2); + operatorPrecedenceHashMap.put(ParseConstants.OR_BUFFER, 3); + } + + /** + * Parses the filterString and constructs a filter using it + *

      + * @param filterString filter string given by the user + * @return filter object we constructed + */ + public Filter parseFilterString (String filterString) + throws CharacterCodingException { + return parseFilterString(Bytes.toBytes(filterString)); + } + + /** + * Parses the filterString and constructs a filter using it + *

      + * @param filterStringAsByteArray filter string given by the user + * @return filter object we constructed + */ + public Filter parseFilterString (byte [] filterStringAsByteArray) + throws CharacterCodingException { + // stack for the operators and parenthesis + Stack operatorStack = new Stack(); + // stack for the filter objects + Stack filterStack = new Stack(); + + Filter filter = null; + for (int i=0; i + * A simpleFilterExpression is of the form: FilterName('arg', 'arg', 'arg') + * The user given filter string can have many simpleFilterExpressions combined + * using operators. + *

      + * This function extracts a simpleFilterExpression from the + * larger filterString given the start offset of the simpler expression + *

      + * @param filterStringAsByteArray filter string given by the user + * @param filterExpressionStartOffset start index of the simple filter expression + * @return byte array containing the simple filter expression + */ + public byte [] extractFilterSimpleExpression (byte [] filterStringAsByteArray, + int filterExpressionStartOffset) + throws CharacterCodingException { + int quoteCount = 0; + for (int i=filterExpressionStartOffset; i + * @param filterStringAsByteArray filter string given by the user + * @return filter object we constructed + */ + public Filter parseSimpleFilterExpression (byte [] filterStringAsByteArray) + throws CharacterCodingException { + + String filterName = Bytes.toString(getFilterName(filterStringAsByteArray)); + ArrayList filterArguments = getFilterArguments(filterStringAsByteArray); + if (!filterHashMap.containsKey(filterName)) { + throw new IllegalArgumentException("Filter Name " + filterName + " not supported"); + } + try { + filterName = filterHashMap.get(filterName); + Class c = Class.forName(filterName); + Class[] argTypes = new Class [] {ArrayList.class}; + Method m = c.getDeclaredMethod("createFilterFromArguments", argTypes); + return (Filter) m.invoke(null,filterArguments); + } catch (ClassNotFoundException e) { + e.printStackTrace(); + } catch (NoSuchMethodException e) { + e.printStackTrace(); + } catch (IllegalAccessException e) { + e.printStackTrace(); + } catch (InvocationTargetException e) { + e.printStackTrace(); + } + throw new IllegalArgumentException("Incorrect filter string " + + new String(filterStringAsByteArray)); + } + +/** + * Returns the filter name given a simple filter expression + *

      + * @param filterStringAsByteArray a simple filter expression + * @return name of filter in the simple filter expression + */ + public static byte [] getFilterName (byte [] filterStringAsByteArray) { + int filterNameStartIndex = 0; + int filterNameEndIndex = 0; + + for (int i=filterNameStartIndex; i + * @param filterStringAsByteArray filter string given by the user + * @return an ArrayList containing the arguments of the filter in the filter string + */ + public static ArrayList getFilterArguments (byte [] filterStringAsByteArray) { + int argumentListStartIndex = KeyValue.getDelimiter(filterStringAsByteArray, 0, + filterStringAsByteArray.length, + ParseConstants.LPAREN); + if (argumentListStartIndex == -1) { + throw new IllegalArgumentException("Incorrect argument list"); + } + + int argumentStartIndex = 0; + int argumentEndIndex = 0; + ArrayList filterArguments = new ArrayList(); + + for (int i = argumentListStartIndex + 1; i, != etc + argumentStartIndex = i; + for (int j = argumentStartIndex; j < filterStringAsByteArray.length; j++) { + if (filterStringAsByteArray[j] == ParseConstants.WHITESPACE || + filterStringAsByteArray[j] == ParseConstants.COMMA || + filterStringAsByteArray[j] == ParseConstants.RPAREN) { + argumentEndIndex = j - 1; + i = j; + byte [] filterArgument = new byte [argumentEndIndex - argumentStartIndex + 1]; + Bytes.putBytes(filterArgument, 0, filterStringAsByteArray, + argumentStartIndex, argumentEndIndex - argumentStartIndex + 1); + filterArguments.add(filterArgument); + break; + } else if (j == filterStringAsByteArray.length - 1) { + throw new IllegalArgumentException("Incorrect argument list"); + } + } + } + } + return filterArguments; + } + +/** + * This function is called while parsing the filterString and an operator is parsed + *

      + * @param operatorStack the stack containing the operators and parenthesis + * @param filterStack the stack containing the filters + * @param operator the operator found while parsing the filterString + */ + public void reduce(Stack operatorStack, + Stack filterStack, + ByteBuffer operator) { + while (!operatorStack.empty() && + !(ParseConstants.LPAREN_BUFFER.equals(operatorStack.peek())) && + hasHigherPriority(operatorStack.peek(), operator)) { + filterStack.push(popArguments(operatorStack, filterStack)); + } + } + + /** + * Pops an argument from the operator stack and the number of arguments required by the operator + * from the filterStack and evaluates them + *

      + * @param operatorStack the stack containing the operators + * @param filterStack the stack containing the filters + * @return the evaluated filter + */ + public static Filter popArguments (Stack operatorStack, Stack filterStack) { + ByteBuffer argumentOnTopOfStack = operatorStack.peek(); + + if (argumentOnTopOfStack.equals(ParseConstants.OR_BUFFER)) { + // The top of the stack is an OR + try { + ArrayList listOfFilters = new ArrayList(); + while (!operatorStack.empty() && operatorStack.peek().equals(ParseConstants.OR_BUFFER)) { + Filter filter = filterStack.pop(); + listOfFilters.add(0, filter); + operatorStack.pop(); + } + Filter filter = filterStack.pop(); + listOfFilters.add(0, filter); + Filter orFilter = new FilterList(FilterList.Operator.MUST_PASS_ONE, listOfFilters); + return orFilter; + } catch (EmptyStackException e) { + throw new IllegalArgumentException("Incorrect input string - an OR needs two filters"); + } + + } else if (argumentOnTopOfStack.equals(ParseConstants.AND_BUFFER)) { + // The top of the stack is an AND + try { + ArrayList listOfFilters = new ArrayList(); + while (!operatorStack.empty() && operatorStack.peek().equals(ParseConstants.AND_BUFFER)) { + Filter filter = filterStack.pop(); + listOfFilters.add(0, filter); + operatorStack.pop(); + } + Filter filter = filterStack.pop(); + listOfFilters.add(0, filter); + Filter andFilter = new FilterList(FilterList.Operator.MUST_PASS_ALL, listOfFilters); + return andFilter; + } catch (EmptyStackException e) { + throw new IllegalArgumentException("Incorrect input string - an AND needs two filters"); + } + + } else if (argumentOnTopOfStack.equals(ParseConstants.SKIP_BUFFER)) { + // The top of the stack is a SKIP + try { + Filter wrappedFilter = filterStack.pop(); + Filter skipFilter = new SkipFilter(wrappedFilter); + operatorStack.pop(); + return skipFilter; + } catch (EmptyStackException e) { + throw new IllegalArgumentException("Incorrect input string - a SKIP wraps a filter"); + } + + } else if (argumentOnTopOfStack.equals(ParseConstants.WHILE_BUFFER)) { + // The top of the stack is a WHILE + try { + Filter wrappedFilter = filterStack.pop(); + Filter whileMatchFilter = new WhileMatchFilter(wrappedFilter); + operatorStack.pop(); + return whileMatchFilter; + } catch (EmptyStackException e) { + throw new IllegalArgumentException("Incorrect input string - a WHILE wraps a filter"); + } + + } else if (argumentOnTopOfStack.equals(ParseConstants.LPAREN_BUFFER)) { + // The top of the stack is a LPAREN + try { + Filter filter = filterStack.pop(); + operatorStack.pop(); + return filter; + } catch (EmptyStackException e) { + throw new IllegalArgumentException("Incorrect Filter String"); + } + + } else { + throw new IllegalArgumentException("Incorrect arguments on operatorStack"); + } + } + +/** + * Returns which operator has higher precedence + *

      + * If a has higher precedence than b, it returns true + * If they have the same precedence, it returns false + */ + public boolean hasHigherPriority(ByteBuffer a, ByteBuffer b) { + if ((operatorPrecedenceHashMap.get(a) - operatorPrecedenceHashMap.get(b)) < 0) { + return true; + } + return false; + } + +/** + * Removes the single quote escaping a single quote - thus it returns an unescaped argument + *

      + * @param filterStringAsByteArray filter string given by user + * @param argumentStartIndex start index of the argument + * @param argumentEndIndex end index of the argument + * @return returns an unescaped argument + */ + public static byte [] createUnescapdArgument (byte [] filterStringAsByteArray, + int argumentStartIndex, int argumentEndIndex) { + int unescapedArgumentLength = 2; + for (int i = argumentStartIndex + 1; i <= argumentEndIndex - 1; i++) { + unescapedArgumentLength ++; + if (filterStringAsByteArray[i] == ParseConstants.SINGLE_QUOTE && + i != (argumentEndIndex - 1) && + filterStringAsByteArray[i+1] == ParseConstants.SINGLE_QUOTE) { + i++; + continue; + } + } + + byte [] unescapedArgument = new byte [unescapedArgumentLength]; + int count = 1; + unescapedArgument[0] = '\''; + for (int i = argumentStartIndex + 1; i <= argumentEndIndex - 1; i++) { + if (filterStringAsByteArray [i] == ParseConstants.SINGLE_QUOTE && + i != (argumentEndIndex - 1) && + filterStringAsByteArray [i+1] == ParseConstants.SINGLE_QUOTE) { + unescapedArgument[count++] = filterStringAsByteArray [i+1]; + i++; + } + else { + unescapedArgument[count++] = filterStringAsByteArray [i]; + } + } + unescapedArgument[unescapedArgumentLength - 1] = '\''; + return unescapedArgument; + } + +/** + * Checks if the current index of filter string we are on is the beginning of the keyword 'OR' + *

      + * @param filterStringAsByteArray filter string given by the user + * @param indexOfOr index at which an 'O' was read + * @return true if the keyword 'OR' is at the current index + */ + public static boolean checkForOr (byte [] filterStringAsByteArray, int indexOfOr) + throws CharacterCodingException, ArrayIndexOutOfBoundsException { + + try { + if (filterStringAsByteArray[indexOfOr] == ParseConstants.O && + filterStringAsByteArray[indexOfOr+1] == ParseConstants.R && + (filterStringAsByteArray[indexOfOr-1] == ParseConstants.WHITESPACE || + filterStringAsByteArray[indexOfOr-1] == ParseConstants.RPAREN) && + (filterStringAsByteArray[indexOfOr+2] == ParseConstants.WHITESPACE || + filterStringAsByteArray[indexOfOr+2] == ParseConstants.LPAREN)) { + return true; + } else { + return false; + } + } catch (ArrayIndexOutOfBoundsException e) { + return false; + } + } + +/** + * Checks if the current index of filter string we are on is the beginning of the keyword 'AND' + *

      + * @param filterStringAsByteArray filter string given by the user + * @param indexOfAnd index at which an 'A' was read + * @return true if the keyword 'AND' is at the current index + */ + public static boolean checkForAnd (byte [] filterStringAsByteArray, int indexOfAnd) + throws CharacterCodingException { + + try { + if (filterStringAsByteArray[indexOfAnd] == ParseConstants.A && + filterStringAsByteArray[indexOfAnd+1] == ParseConstants.N && + filterStringAsByteArray[indexOfAnd+2] == ParseConstants.D && + (filterStringAsByteArray[indexOfAnd-1] == ParseConstants.WHITESPACE || + filterStringAsByteArray[indexOfAnd-1] == ParseConstants.RPAREN) && + (filterStringAsByteArray[indexOfAnd+3] == ParseConstants.WHITESPACE || + filterStringAsByteArray[indexOfAnd+3] == ParseConstants.LPAREN)) { + return true; + } else { + return false; + } + } catch (ArrayIndexOutOfBoundsException e) { + return false; + } + } + +/** + * Checks if the current index of filter string we are on is the beginning of the keyword 'SKIP' + *

      + * @param filterStringAsByteArray filter string given by the user + * @param indexOfSkip index at which an 'S' was read + * @return true if the keyword 'SKIP' is at the current index + */ + public static boolean checkForSkip (byte [] filterStringAsByteArray, int indexOfSkip) + throws CharacterCodingException { + + try { + if (filterStringAsByteArray[indexOfSkip] == ParseConstants.S && + filterStringAsByteArray[indexOfSkip+1] == ParseConstants.K && + filterStringAsByteArray[indexOfSkip+2] == ParseConstants.I && + filterStringAsByteArray[indexOfSkip+3] == ParseConstants.P && + (indexOfSkip == 0 || + filterStringAsByteArray[indexOfSkip-1] == ParseConstants.WHITESPACE || + filterStringAsByteArray[indexOfSkip-1] == ParseConstants.RPAREN || + filterStringAsByteArray[indexOfSkip-1] == ParseConstants.LPAREN) && + (filterStringAsByteArray[indexOfSkip+4] == ParseConstants.WHITESPACE || + filterStringAsByteArray[indexOfSkip+4] == ParseConstants.LPAREN)) { + return true; + } else { + return false; + } + } catch (ArrayIndexOutOfBoundsException e) { + return false; + } + } + +/** + * Checks if the current index of filter string we are on is the beginning of the keyword 'WHILE' + *

      + * @param filterStringAsByteArray filter string given by the user + * @param indexOfWhile index at which an 'W' was read + * @return true if the keyword 'WHILE' is at the current index + */ + public static boolean checkForWhile (byte [] filterStringAsByteArray, int indexOfWhile) + throws CharacterCodingException { + + try { + if (filterStringAsByteArray[indexOfWhile] == ParseConstants.W && + filterStringAsByteArray[indexOfWhile+1] == ParseConstants.H && + filterStringAsByteArray[indexOfWhile+2] == ParseConstants.I && + filterStringAsByteArray[indexOfWhile+3] == ParseConstants.L && + filterStringAsByteArray[indexOfWhile+4] == ParseConstants.E && + (indexOfWhile == 0 || filterStringAsByteArray[indexOfWhile-1] == ParseConstants.WHITESPACE + || filterStringAsByteArray[indexOfWhile-1] == ParseConstants.RPAREN || + filterStringAsByteArray[indexOfWhile-1] == ParseConstants.LPAREN) && + (filterStringAsByteArray[indexOfWhile+5] == ParseConstants.WHITESPACE || + filterStringAsByteArray[indexOfWhile+5] == ParseConstants.LPAREN)) { + return true; + } else { + return false; + } + } catch (ArrayIndexOutOfBoundsException e) { + return false; + } + } + +/** + * Returns a boolean indicating whether the quote was escaped or not + *

      + * @param array byte array in which the quote was found + * @param quoteIndex index of the single quote + * @return returns true if the quote was unescaped + */ + public static boolean isQuoteUnescaped (byte [] array, int quoteIndex) { + if (array == null) { + throw new IllegalArgumentException("isQuoteUnescaped called with a null array"); + } + + if (quoteIndex == array.length - 1 || array[quoteIndex+1] != ParseConstants.SINGLE_QUOTE) { + return true; + } + else { + return false; + } + } + +/** + * Takes a quoted byte array and converts it into an unquoted byte array + * For example: given a byte array representing 'abc', it returns a + * byte array representing abc + *

      + * @param quotedByteArray the quoted byte array + * @return Unquoted byte array + */ + public static byte [] removeQuotesFromByteArray (byte [] quotedByteArray) { + if (quotedByteArray == null || + quotedByteArray.length < 2 || + quotedByteArray[0] != ParseConstants.SINGLE_QUOTE || + quotedByteArray[quotedByteArray.length - 1] != ParseConstants.SINGLE_QUOTE) { + throw new IllegalArgumentException("removeQuotesFromByteArray needs a quoted byte array"); + } else { + byte [] targetString = new byte [quotedByteArray.length - 2]; + Bytes.putBytes(targetString, 0, quotedByteArray, 1, quotedByteArray.length - 2); + return targetString; + } + } + +/** + * Converts an int expressed in a byte array to an actual int + *

      + * This doesn't use Bytes.toInt because that assumes + * that there will be {@link Bytes#SIZEOF_INT} bytes available. + *

      + * @param numberAsByteArray the int value expressed as a byte array + * @return the int value + */ + public static int convertByteArrayToInt (byte [] numberAsByteArray) { + + long tempResult = ParseFilter.convertByteArrayToLong(numberAsByteArray); + + if (tempResult > Integer.MAX_VALUE) { + throw new IllegalArgumentException("Integer Argument too large"); + } else if (tempResult < Integer.MIN_VALUE) { + throw new IllegalArgumentException("Integer Argument too small"); + } + + int result = (int) tempResult; + return result; + } + +/** + * Converts a long expressed in a byte array to an actual long + *

      + * This doesn't use Bytes.toLong because that assumes + * that there will be {@link Bytes#SIZEOF_INT} bytes available. + *

      + * @param numberAsByteArray the long value expressed as a byte array + * @return the long value + */ + public static long convertByteArrayToLong (byte [] numberAsByteArray) { + if (numberAsByteArray == null) { + throw new IllegalArgumentException("convertByteArrayToLong called with a null array"); + } + + int i = 0; + long result = 0; + boolean isNegative = false; + + if (numberAsByteArray[i] == ParseConstants.MINUS_SIGN) { + i++; + isNegative = true; + } + + while (i != numberAsByteArray.length) { + if (numberAsByteArray[i] < ParseConstants.ZERO || + numberAsByteArray[i] > ParseConstants.NINE) { + throw new IllegalArgumentException("Byte Array should only contain digits"); + } + result = result*10 + (numberAsByteArray[i] - ParseConstants.ZERO); + if (result < 0) { + throw new IllegalArgumentException("Long Argument too large"); + } + i++; + } + + if (isNegative) { + return -result; + } else { + return result; + } + } + +/** + * Converts a boolean expressed in a byte array to an actual boolean + *

      + * This doesn't used Bytes.toBoolean because Bytes.toBoolean(byte []) + * assumes that 1 stands for true and 0 for false. + * Here, the byte array representing "true" and "false" is parsed + *

      + * @param booleanAsByteArray the boolean value expressed as a byte array + * @return the boolean value + */ + public static boolean convertByteArrayToBoolean (byte [] booleanAsByteArray) { + if (booleanAsByteArray == null) { + throw new IllegalArgumentException("convertByteArrayToBoolean called with a null array"); + } + + if (booleanAsByteArray.length == 4 && + (booleanAsByteArray[0] == 't' || booleanAsByteArray[0] == 'T') && + (booleanAsByteArray[1] == 'r' || booleanAsByteArray[1] == 'R') && + (booleanAsByteArray[2] == 'u' || booleanAsByteArray[2] == 'U') && + (booleanAsByteArray[3] == 'e' || booleanAsByteArray[3] == 'E')) { + return true; + } + else if (booleanAsByteArray.length == 5 && + (booleanAsByteArray[0] == 'f' || booleanAsByteArray[0] == 'F') && + (booleanAsByteArray[1] == 'a' || booleanAsByteArray[1] == 'A') && + (booleanAsByteArray[2] == 'l' || booleanAsByteArray[2] == 'L') && + (booleanAsByteArray[3] == 's' || booleanAsByteArray[3] == 'S') && + (booleanAsByteArray[4] == 'e' || booleanAsByteArray[4] == 'E')) { + return false; + } + else { + throw new IllegalArgumentException("Incorrect Boolean Expression"); + } + } + +/** + * Takes a compareOperator symbol as a byte array and returns the corresponding CompareOperator + *

      + * @param compareOpAsByteArray the comparatorOperator symbol as a byte array + * @return the Compare Operator + */ + public static CompareFilter.CompareOp createCompareOp (byte [] compareOpAsByteArray) { + ByteBuffer compareOp = ByteBuffer.wrap(compareOpAsByteArray); + if (compareOp.equals(ParseConstants.LESS_THAN_BUFFER)) + return CompareOp.LESS; + else if (compareOp.equals(ParseConstants.LESS_THAN_OR_EQUAL_TO_BUFFER)) + return CompareOp.LESS_OR_EQUAL; + else if (compareOp.equals(ParseConstants.GREATER_THAN_BUFFER)) + return CompareOp.GREATER; + else if (compareOp.equals(ParseConstants.GREATER_THAN_OR_EQUAL_TO_BUFFER)) + return CompareOp.GREATER_OR_EQUAL; + else if (compareOp.equals(ParseConstants.NOT_EQUAL_TO_BUFFER)) + return CompareOp.NOT_EQUAL; + else if (compareOp.equals(ParseConstants.EQUAL_TO_BUFFER)) + return CompareOp.EQUAL; + else + throw new IllegalArgumentException("Invalid compare operator"); + } + +/** + * Parses a comparator of the form comparatorType:comparatorValue form and returns a comparator + *

      + * @param comparator the comparator in the form comparatorType:comparatorValue + * @return the parsed comparator + */ + public static ByteArrayComparable createComparator (byte [] comparator) { + if (comparator == null) + throw new IllegalArgumentException("Incorrect Comparator"); + byte [][] parsedComparator = ParseFilter.parseComparator(comparator); + byte [] comparatorType = parsedComparator[0]; + byte [] comparatorValue = parsedComparator[1]; + + + if (Bytes.equals(comparatorType, ParseConstants.binaryType)) + return new BinaryComparator(comparatorValue); + else if (Bytes.equals(comparatorType, ParseConstants.binaryPrefixType)) + return new BinaryPrefixComparator(comparatorValue); + else if (Bytes.equals(comparatorType, ParseConstants.regexStringType)) + return new RegexStringComparator(new String(comparatorValue)); + else if (Bytes.equals(comparatorType, ParseConstants.substringType)) + return new SubstringComparator(new String(comparatorValue)); + else + throw new IllegalArgumentException("Incorrect comparatorType"); + } + +/** + * Splits a column in comparatorType:comparatorValue form into separate byte arrays + *

      + * @param comparator the comparator + * @return the parsed arguments of the comparator as a 2D byte array + */ + public static byte [][] parseComparator (byte [] comparator) { + final int index = KeyValue.getDelimiter(comparator, 0, comparator.length, ParseConstants.COLON); + if (index == -1) { + throw new IllegalArgumentException("Incorrect comparator"); + } + + byte [][] result = new byte [2][0]; + result[0] = new byte [index]; + System.arraycopy(comparator, 0, result[0], 0, index); + + final int len = comparator.length - (index + 1); + result[1] = new byte[len]; + System.arraycopy(comparator, index + 1, result[1], 0, len); + + return result; + } + +/** + * Return a Set of filters supported by the Filter Language + */ + public Set getSupportedFilters () { + return filterHashMap.keySet(); + } + + /** + * Returns all known filters + * @return an unmodifiable map of filters + */ + public static Map getAllFilters() { + return Collections.unmodifiableMap(filterHashMap); + } + + /** + * Register a new filter with the parser. If the filter is already registered, + * an IllegalArgumentException will be thrown. + * + * @param name a name for the filter + * @param filterClass fully qualified class name + */ + public static void registerFilter(String name, String filterClass) { + if(LOG.isInfoEnabled()) + LOG.info("Registering new filter " + name); + + filterHashMap.put(name, filterClass); + } +} diff --git hbase-client/src/main/java/org/apache/hadoop/hbase/filter/PrefixFilter.java hbase-client/src/main/java/org/apache/hadoop/hbase/filter/PrefixFilter.java new file mode 100644 index 0000000..9c37b3e --- /dev/null +++ hbase-client/src/main/java/org/apache/hadoop/hbase/filter/PrefixFilter.java @@ -0,0 +1,123 @@ +/* + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.filter; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; +import org.apache.hadoop.hbase.DeserializationException; +import org.apache.hadoop.hbase.KeyValue; +import org.apache.hadoop.hbase.protobuf.generated.FilterProtos; +import org.apache.hadoop.hbase.util.Bytes; + +import java.util.ArrayList; + +import com.google.common.base.Preconditions; +import com.google.protobuf.ByteString; +import com.google.protobuf.InvalidProtocolBufferException; + +/** + * Pass results that have same row prefix. + */ +@InterfaceAudience.Public +@InterfaceStability.Stable +public class PrefixFilter extends FilterBase { + protected byte [] prefix = null; + protected boolean passedPrefix = false; + + public PrefixFilter(final byte [] prefix) { + this.prefix = prefix; + } + + public byte[] getPrefix() { + return prefix; + } + + public boolean filterRowKey(byte[] buffer, int offset, int length) { + if (buffer == null || this.prefix == null) + return true; + if (length < prefix.length) + return true; + // if they are equal, return false => pass row + // else return true, filter row + // if we are passed the prefix, set flag + int cmp = Bytes.compareTo(buffer, offset, this.prefix.length, this.prefix, 0, + this.prefix.length); + if(cmp > 0) { + passedPrefix = true; + } + return cmp != 0; + } + + public boolean filterAllRemaining() { + return passedPrefix; + } + + public static Filter createFilterFromArguments(ArrayList filterArguments) { + Preconditions.checkArgument(filterArguments.size() == 1, + "Expected 1 but got: %s", filterArguments.size()); + byte [] prefix = ParseFilter.removeQuotesFromByteArray(filterArguments.get(0)); + return new PrefixFilter(prefix); + } + + /** + * @return The filter serialized using pb + */ + public byte [] toByteArray() { + FilterProtos.PrefixFilter.Builder builder = + FilterProtos.PrefixFilter.newBuilder(); + if (this.prefix != null) builder.setPrefix(ByteString.copyFrom(this.prefix)); + return builder.build().toByteArray(); + } + + /** + * @param pbBytes A pb serialized {@link PrefixFilter} instance + * @return An instance of {@link PrefixFilter} made from bytes + * @throws DeserializationException + * @see #toByteArray + */ + public static PrefixFilter parseFrom(final byte [] pbBytes) + throws DeserializationException { + FilterProtos.PrefixFilter proto; + try { + proto = FilterProtos.PrefixFilter.parseFrom(pbBytes); + } catch (InvalidProtocolBufferException e) { + throw new DeserializationException(e); + } + return new PrefixFilter(proto.hasPrefix()?proto.getPrefix().toByteArray():null); + } + + /** + * @param other + * @return true if and only if the fields of the filter that are serialized + * are equal to the corresponding fields in other. Used for testing. + */ + boolean areSerializedFieldsEqual(Filter o) { + if (o == this) return true; + if (!(o instanceof PrefixFilter)) return false; + + PrefixFilter other = (PrefixFilter)o; + return Bytes.equals(this.getPrefix(), other.getPrefix()); + } + + @Override + public String toString() { + return this.getClass().getSimpleName() + " " + Bytes.toStringBinary(this.prefix); + } +} diff --git hbase-client/src/main/java/org/apache/hadoop/hbase/filter/QualifierFilter.java hbase-client/src/main/java/org/apache/hadoop/hbase/filter/QualifierFilter.java new file mode 100644 index 0000000..24dcb60 --- /dev/null +++ hbase-client/src/main/java/org/apache/hadoop/hbase/filter/QualifierFilter.java @@ -0,0 +1,129 @@ +/** + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.filter; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; +import org.apache.hadoop.hbase.DeserializationException; +import org.apache.hadoop.hbase.KeyValue; +import org.apache.hadoop.hbase.client.Get; +import org.apache.hadoop.hbase.protobuf.ProtobufUtil; +import org.apache.hadoop.hbase.protobuf.generated.FilterProtos; + +import com.google.protobuf.InvalidProtocolBufferException; + +import java.io.IOException; +import java.util.ArrayList; + +/** + * This filter is used to filter based on the column qualifier. It takes an + * operator (equal, greater, not equal, etc) and a byte [] comparator for the + * column qualifier portion of a key. + *

      + * This filter can be wrapped with {@link WhileMatchFilter} and {@link SkipFilter} + * to add more control. + *

      + * Multiple filters can be combined using {@link FilterList}. + *

      + * If an already known column qualifier is looked for, use {@link Get#addColumn} + * directly rather than a filter. + */ +@InterfaceAudience.Public +@InterfaceStability.Stable +public class QualifierFilter extends CompareFilter { + + /** + * Constructor. + * @param op the compare op for column qualifier matching + * @param qualifierComparator the comparator for column qualifier matching + */ + public QualifierFilter(final CompareOp op, + final ByteArrayComparable qualifierComparator) { + super(op, qualifierComparator); + } + + @Override + public ReturnCode filterKeyValue(KeyValue v) { + int qualifierLength = v.getQualifierLength(); + if (qualifierLength > 0) { + if (doCompare(this.compareOp, this.comparator, v.getBuffer(), + v.getQualifierOffset(), qualifierLength)) { + return ReturnCode.SKIP; + } + } + return ReturnCode.INCLUDE; + } + + public static Filter createFilterFromArguments(ArrayList filterArguments) { + ArrayList arguments = CompareFilter.extractArguments(filterArguments); + CompareOp compareOp = (CompareOp)arguments.get(0); + ByteArrayComparable comparator = (ByteArrayComparable)arguments.get(1); + return new QualifierFilter(compareOp, comparator); + } + + /** + * @return The filter serialized using pb + */ + public byte [] toByteArray() { + FilterProtos.QualifierFilter.Builder builder = + FilterProtos.QualifierFilter.newBuilder(); + builder.setCompareFilter(super.convert()); + return builder.build().toByteArray(); + } + + /** + * @param pbBytes A pb serialized {@link QualifierFilter} instance + * @return An instance of {@link QualifierFilter} made from bytes + * @throws DeserializationException + * @see #toByteArray + */ + public static QualifierFilter parseFrom(final byte [] pbBytes) + throws DeserializationException { + FilterProtos.QualifierFilter proto; + try { + proto = FilterProtos.QualifierFilter.parseFrom(pbBytes); + } catch (InvalidProtocolBufferException e) { + throw new DeserializationException(e); + } + final CompareOp valueCompareOp = + CompareOp.valueOf(proto.getCompareFilter().getCompareOp().name()); + ByteArrayComparable valueComparator = null; + try { + if (proto.getCompareFilter().hasComparator()) { + valueComparator = ProtobufUtil.toComparator(proto.getCompareFilter().getComparator()); + } + } catch (IOException ioe) { + throw new DeserializationException(ioe); + } + return new QualifierFilter(valueCompareOp,valueComparator); + } + + /** + * @param other + * @return true if and only if the fields of the filter that are serialized + * are equal to the corresponding fields in other. Used for testing. + */ + boolean areSerializedFieldsEqual(Filter o) { + if (o == this) return true; + if (!(o instanceof QualifierFilter)) return false; + + return super.areSerializedFieldsEqual(o); + } +} diff --git hbase-client/src/main/java/org/apache/hadoop/hbase/filter/RandomRowFilter.java hbase-client/src/main/java/org/apache/hadoop/hbase/filter/RandomRowFilter.java new file mode 100644 index 0000000..9724369 --- /dev/null +++ hbase-client/src/main/java/org/apache/hadoop/hbase/filter/RandomRowFilter.java @@ -0,0 +1,150 @@ +/** + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.filter; + +import java.util.Random; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; +import org.apache.hadoop.hbase.DeserializationException; +import org.apache.hadoop.hbase.KeyValue; +import org.apache.hadoop.hbase.protobuf.generated.FilterProtos; + +import com.google.protobuf.InvalidProtocolBufferException; + +/** + * A filter that includes rows based on a chance. + * + */ +@InterfaceAudience.Public +@InterfaceStability.Stable +public class RandomRowFilter extends FilterBase { + protected static final Random random = new Random(); + + protected float chance; + protected boolean filterOutRow; + + /** + * Create a new filter with a specified chance for a row to be included. + * + * @param chance + */ + public RandomRowFilter(float chance) { + this.chance = chance; + } + + /** + * @return The chance that a row gets included. + */ + public float getChance() { + return chance; + } + + /** + * Set the chance that a row is included. + * + * @param chance + */ + public void setChance(float chance) { + this.chance = chance; + } + + @Override + public boolean filterAllRemaining() { + return false; + } + + @Override + public ReturnCode filterKeyValue(KeyValue v) { + if (filterOutRow) { + return ReturnCode.NEXT_ROW; + } + return ReturnCode.INCLUDE; + } + + @Override + public boolean filterRow() { + return filterOutRow; + } + + public boolean hasFilterRow() { + return true; + } + + @Override + public boolean filterRowKey(byte[] buffer, int offset, int length) { + if (chance < 0) { + // with a zero chance, the rows is always excluded + filterOutRow = true; + } else if (chance > 1) { + // always included + filterOutRow = false; + } else { + // roll the dice + filterOutRow = !(random.nextFloat() < chance); + } + return filterOutRow; + } + + @Override + public void reset() { + filterOutRow = false; + } + + /** + * @return The filter serialized using pb + */ + public byte [] toByteArray() { + FilterProtos.RandomRowFilter.Builder builder = + FilterProtos.RandomRowFilter.newBuilder(); + builder.setChance(this.chance); + return builder.build().toByteArray(); + } + + /** + * @param pbBytes A pb serialized {@link RandomRowFilter} instance + * @return An instance of {@link RandomRowFilter} made from bytes + * @throws DeserializationException + * @see #toByteArray + */ + public static RandomRowFilter parseFrom(final byte [] pbBytes) + throws DeserializationException { + FilterProtos.RandomRowFilter proto; + try { + proto = FilterProtos.RandomRowFilter.parseFrom(pbBytes); + } catch (InvalidProtocolBufferException e) { + throw new DeserializationException(e); + } + return new RandomRowFilter(proto.getChance()); + } + + /** + * @param other + * @return true if and only if the fields of the filter that are serialized + * are equal to the corresponding fields in other. Used for testing. + */ + boolean areSerializedFieldsEqual(Filter o) { + if (o == this) return true; + if (!(o instanceof RandomRowFilter)) return false; + + RandomRowFilter other = (RandomRowFilter)o; + return this.getChance() == other.getChance(); + } +} diff --git hbase-client/src/main/java/org/apache/hadoop/hbase/filter/RegexStringComparator.java hbase-client/src/main/java/org/apache/hadoop/hbase/filter/RegexStringComparator.java new file mode 100644 index 0000000..96c35c3 --- /dev/null +++ hbase-client/src/main/java/org/apache/hadoop/hbase/filter/RegexStringComparator.java @@ -0,0 +1,174 @@ +/** + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.filter; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; +import org.apache.hadoop.hbase.DeserializationException; +import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.protobuf.generated.ComparatorProtos; +import org.apache.hadoop.hbase.util.Bytes; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; + +import com.google.protobuf.InvalidProtocolBufferException; + +import java.nio.charset.Charset; +import java.nio.charset.IllegalCharsetNameException; +import java.util.regex.Pattern; + +/** + * This comparator is for use with {@link CompareFilter} implementations, such + * as {@link RowFilter}, {@link QualifierFilter}, and {@link ValueFilter}, for + * filtering based on the value of a given column. Use it to test if a given + * regular expression matches a cell value in the column. + *

      + * Only EQUAL or NOT_EQUAL comparisons are valid with this comparator. + *

      + * For example: + *

      + *

      + * ValueFilter vf = new ValueFilter(CompareOp.EQUAL,
      + *     new RegexStringComparator(
      + *       // v4 IP address
      + *       "(((25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\\.){3,3}" +
      + *         "(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?))(\\/[0-9]+)?" +
      + *         "|" +
      + *       // v6 IP address
      + *       "((([\\dA-Fa-f]{1,4}:){7}[\\dA-Fa-f]{1,4})(:([\\d]{1,3}.)" +
      + *         "{3}[\\d]{1,3})?)(\\/[0-9]+)?"));
      + * 
      + *

      + * Supports {@link java.util.regex.Pattern} flags as well: + *

      + *

      + * ValueFilter vf = new ValueFilter(CompareOp.EQUAL,
      + *     new RegexStringComparator("regex", Pattern.CASE_INSENSITIVE | Pattern.DOTALL));
      + * 
      + * @see java.util.regex.Pattern + */ +@InterfaceAudience.Public +@InterfaceStability.Stable +public class RegexStringComparator extends ByteArrayComparable { + + private static final Log LOG = LogFactory.getLog(RegexStringComparator.class); + + private Charset charset = HConstants.UTF8_CHARSET; + + private Pattern pattern; + + /** + * Constructor + * Adds Pattern.DOTALL to the underlying Pattern + * @param expr a valid regular expression + */ + public RegexStringComparator(String expr) { + this(expr, Pattern.DOTALL); + } + + /** + * Constructor + * @param expr a valid regular expression + * @param flags java.util.regex.Pattern flags + */ + public RegexStringComparator(String expr, int flags) { + super(Bytes.toBytes(expr)); + this.pattern = Pattern.compile(expr, flags); + } + + /** + * Specifies the {@link Charset} to use to convert the row key to a String. + *

      + * The row key needs to be converted to a String in order to be matched + * against the regular expression. This method controls which charset is + * used to do this conversion. + *

      + * If the row key is made of arbitrary bytes, the charset {@code ISO-8859-1} + * is recommended. + * @param charset The charset to use. + */ + public void setCharset(final Charset charset) { + this.charset = charset; + } + + @Override + public int compareTo(byte[] value, int offset, int length) { + // Use find() for subsequence match instead of matches() (full sequence + // match) to adhere to the principle of least surprise. + return pattern.matcher(new String(value, offset, length, charset)).find() ? 0 + : 1; + } + + /** + * @return The comparator serialized using pb + */ + public byte [] toByteArray() { + ComparatorProtos.RegexStringComparator.Builder builder = + ComparatorProtos.RegexStringComparator.newBuilder(); + builder.setPattern(pattern.toString()); + builder.setPatternFlags(pattern.flags()); + builder.setCharset(charset.name()); + return builder.build().toByteArray(); + } + + /** + * @param pbBytes A pb serialized {@link RegexStringComparator} instance + * @return An instance of {@link RegexStringComparator} made from bytes + * @throws DeserializationException + * @see #toByteArray + */ + public static RegexStringComparator parseFrom(final byte [] pbBytes) + throws DeserializationException { + ComparatorProtos.RegexStringComparator proto; + try { + proto = ComparatorProtos.RegexStringComparator.parseFrom(pbBytes); + } catch (InvalidProtocolBufferException e) { + throw new DeserializationException(e); + } + + RegexStringComparator comparator = + new RegexStringComparator(proto.getPattern(), proto.getPatternFlags()); + final String charset = proto.getCharset(); + if (charset.length() > 0) { + try { + comparator.setCharset(Charset.forName(charset)); + } catch (IllegalCharsetNameException e) { + LOG.error("invalid charset", e); + } + } + return comparator; + } + + /** + * @param other + * @return true if and only if the fields of the comparator that are serialized + * are equal to the corresponding fields in other. Used for testing. + */ + boolean areSerializedFieldsEqual(ByteArrayComparable other) { + if (other == this) return true; + if (!(other instanceof RegexStringComparator)) return false; + + RegexStringComparator comparator = (RegexStringComparator)other; + return super.areSerializedFieldsEqual(comparator) + && this.pattern.toString().equals(comparator.pattern.toString()) + && this.pattern.flags() == comparator.pattern.flags() + && this.charset.equals(comparator.charset); + } +} diff --git hbase-client/src/main/java/org/apache/hadoop/hbase/filter/RowFilter.java hbase-client/src/main/java/org/apache/hadoop/hbase/filter/RowFilter.java new file mode 100644 index 0000000..0226a13 --- /dev/null +++ hbase-client/src/main/java/org/apache/hadoop/hbase/filter/RowFilter.java @@ -0,0 +1,144 @@ +/** + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.filter; + +import java.io.IOException; +import java.util.ArrayList; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; +import org.apache.hadoop.hbase.DeserializationException; +import org.apache.hadoop.hbase.KeyValue; +import org.apache.hadoop.hbase.client.Scan; +import org.apache.hadoop.hbase.protobuf.ProtobufUtil; +import org.apache.hadoop.hbase.protobuf.generated.FilterProtos; + +import com.google.protobuf.InvalidProtocolBufferException; + +/** + * This filter is used to filter based on the key. It takes an operator + * (equal, greater, not equal, etc) and a byte [] comparator for the row, + * and column qualifier portions of a key. + *

      + * This filter can be wrapped with {@link WhileMatchFilter} to add more control. + *

      + * Multiple filters can be combined using {@link FilterList}. + *

      + * If an already known row range needs to be scanned, use {@link Scan} start + * and stop rows directly rather than a filter. + */ +@InterfaceAudience.Public +@InterfaceStability.Stable +public class RowFilter extends CompareFilter { + + private boolean filterOutRow = false; + + /** + * Constructor. + * @param rowCompareOp the compare op for row matching + * @param rowComparator the comparator for row matching + */ + public RowFilter(final CompareOp rowCompareOp, + final ByteArrayComparable rowComparator) { + super(rowCompareOp, rowComparator); + } + + @Override + public void reset() { + this.filterOutRow = false; + } + + @Override + public ReturnCode filterKeyValue(KeyValue v) { + if(this.filterOutRow) { + return ReturnCode.NEXT_ROW; + } + return ReturnCode.INCLUDE; + } + + @Override + public boolean filterRowKey(byte[] data, int offset, int length) { + if(doCompare(this.compareOp, this.comparator, data, offset, length)) { + this.filterOutRow = true; + } + return this.filterOutRow; + } + + @Override + public boolean filterRow() { + return this.filterOutRow; + } + + public static Filter createFilterFromArguments(ArrayList filterArguments) { + ArrayList arguments = CompareFilter.extractArguments(filterArguments); + CompareOp compareOp = (CompareOp)arguments.get(0); + ByteArrayComparable comparator = (ByteArrayComparable)arguments.get(1); + return new RowFilter(compareOp, comparator); + } + + /** + * @return The filter serialized using pb + */ + public byte [] toByteArray() { + FilterProtos.RowFilter.Builder builder = + FilterProtos.RowFilter.newBuilder(); + builder.setCompareFilter(super.convert()); + return builder.build().toByteArray(); + } + + /** + * @param pbBytes A pb serialized {@link RowFilter} instance + * @return An instance of {@link RowFilter} made from bytes + * @throws DeserializationException + * @see #toByteArray + */ + public static RowFilter parseFrom(final byte [] pbBytes) + throws DeserializationException { + FilterProtos.RowFilter proto; + try { + proto = FilterProtos.RowFilter.parseFrom(pbBytes); + } catch (InvalidProtocolBufferException e) { + throw new DeserializationException(e); + } + final CompareOp valueCompareOp = + CompareOp.valueOf(proto.getCompareFilter().getCompareOp().name()); + ByteArrayComparable valueComparator = null; + try { + if (proto.getCompareFilter().hasComparator()) { + valueComparator = ProtobufUtil.toComparator(proto.getCompareFilter().getComparator()); + } + } catch (IOException ioe) { + throw new DeserializationException(ioe); + } + return new RowFilter(valueCompareOp,valueComparator); + } + + /** + * @param other + * @return true if and only if the fields of the filter that are serialized + * are equal to the corresponding fields in other. Used for testing. + */ + boolean areSerializedFieldsEqual(Filter o) { + if (o == this) return true; + if (!(o instanceof RowFilter)) return false; + + return super.areSerializedFieldsEqual(o); + } +} diff --git hbase-client/src/main/java/org/apache/hadoop/hbase/filter/SingleColumnValueExcludeFilter.java hbase-client/src/main/java/org/apache/hadoop/hbase/filter/SingleColumnValueExcludeFilter.java new file mode 100644 index 0000000..c838db5 --- /dev/null +++ hbase-client/src/main/java/org/apache/hadoop/hbase/filter/SingleColumnValueExcludeFilter.java @@ -0,0 +1,178 @@ +/** + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.filter; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; +import org.apache.hadoop.hbase.DeserializationException; +import org.apache.hadoop.hbase.KeyValue; +import org.apache.hadoop.hbase.filter.CompareFilter.CompareOp; +import org.apache.hadoop.hbase.protobuf.ProtobufUtil; +import org.apache.hadoop.hbase.protobuf.generated.FilterProtos; + +import com.google.protobuf.InvalidProtocolBufferException; + +import java.io.IOException; +import java.util.ArrayList; + +/** + * A {@link Filter} that checks a single column value, but does not emit the + * tested column. This will enable a performance boost over + * {@link SingleColumnValueFilter}, if the tested column value is not actually + * needed as input (besides for the filtering itself). + */ +@InterfaceAudience.Public +@InterfaceStability.Stable +public class SingleColumnValueExcludeFilter extends SingleColumnValueFilter { + + /** + * Constructor for binary compare of the value of a single column. If the + * column is found and the condition passes, all columns of the row will be + * emitted; except for the tested column value. If the column is not found or + * the condition fails, the row will not be emitted. + * + * @param family name of column family + * @param qualifier name of column qualifier + * @param compareOp operator + * @param value value to compare column values against + */ + public SingleColumnValueExcludeFilter(byte[] family, byte[] qualifier, + CompareOp compareOp, byte[] value) { + super(family, qualifier, compareOp, value); + } + + /** + * Constructor for binary compare of the value of a single column. If the + * column is found and the condition passes, all columns of the row will be + * emitted; except for the tested column value. If the condition fails, the + * row will not be emitted. + *

      + * Use the filterIfColumnMissing flag to set whether the rest of the columns + * in a row will be emitted if the specified column to check is not found in + * the row. + * + * @param family name of column family + * @param qualifier name of column qualifier + * @param compareOp operator + * @param comparator Comparator to use. + */ + public SingleColumnValueExcludeFilter(byte[] family, byte[] qualifier, + CompareOp compareOp, ByteArrayComparable comparator) { + super(family, qualifier, compareOp, comparator); + } + + /** + * Constructor for protobuf deserialization only. + * @param family + * @param qualifier + * @param compareOp + * @param comparator + * @param foundColumn + * @param matchedColumn + * @param filterIfMissing + * @param latestVersionOnly + */ + protected SingleColumnValueExcludeFilter(final byte[] family, final byte [] qualifier, + final CompareOp compareOp, ByteArrayComparable comparator, final boolean foundColumn, + final boolean matchedColumn, final boolean filterIfMissing, final boolean latestVersionOnly) { + super(family,qualifier,compareOp,comparator,foundColumn, + matchedColumn,filterIfMissing,latestVersionOnly); + } + + public ReturnCode filterKeyValue(KeyValue keyValue) { + ReturnCode superRetCode = super.filterKeyValue(keyValue); + if (superRetCode == ReturnCode.INCLUDE) { + // If the current column is actually the tested column, + // we will skip it instead. + if (keyValue.matchingColumn(this.columnFamily, this.columnQualifier)) { + return ReturnCode.SKIP; + } + } + return superRetCode; + } + + public static Filter createFilterFromArguments(ArrayList filterArguments) { + SingleColumnValueFilter tempFilter = (SingleColumnValueFilter) + SingleColumnValueFilter.createFilterFromArguments(filterArguments); + SingleColumnValueExcludeFilter filter = new SingleColumnValueExcludeFilter ( + tempFilter.getFamily(), tempFilter.getQualifier(), + tempFilter.getOperator(), tempFilter.getComparator()); + + if (filterArguments.size() == 6) { + filter.setFilterIfMissing(tempFilter.getFilterIfMissing()); + filter.setLatestVersionOnly(tempFilter.getLatestVersionOnly()); + } + return filter; + } + + /** + * @return The filter serialized using pb + */ + public byte [] toByteArray() { + FilterProtos.SingleColumnValueExcludeFilter.Builder builder = + FilterProtos.SingleColumnValueExcludeFilter.newBuilder(); + builder.setSingleColumnValueFilter(super.convert()); + return builder.build().toByteArray(); + } + + /** + * @param pbBytes A pb serialized {@link SingleColumnValueExcludeFilter} instance + * @return An instance of {@link SingleColumnValueExcludeFilter} made from bytes + * @throws DeserializationException + * @see #toByteArray + */ + public static SingleColumnValueExcludeFilter parseFrom(final byte [] pbBytes) + throws DeserializationException { + FilterProtos.SingleColumnValueExcludeFilter proto; + try { + proto = FilterProtos.SingleColumnValueExcludeFilter.parseFrom(pbBytes); + } catch (InvalidProtocolBufferException e) { + throw new DeserializationException(e); + } + + FilterProtos.SingleColumnValueFilter parentProto = proto.getSingleColumnValueFilter(); + final CompareOp compareOp = + CompareOp.valueOf(parentProto.getCompareOp().name()); + final ByteArrayComparable comparator; + try { + comparator = ProtobufUtil.toComparator(parentProto.getComparator()); + } catch (IOException ioe) { + throw new DeserializationException(ioe); + } + + return new SingleColumnValueExcludeFilter( + parentProto.hasColumnFamily()?parentProto.getColumnFamily().toByteArray():null, + parentProto.hasColumnQualifier()?parentProto.getColumnQualifier().toByteArray():null, + compareOp, comparator, parentProto.getFoundColumn(),parentProto.getMatchedColumn(), + parentProto.getFilterIfMissing(),parentProto.getLatestVersionOnly()); + } + + /** + * @param other + * @return true if and only if the fields of the filter that are serialized + * are equal to the corresponding fields in other. Used for testing. + */ + boolean areSerializedFieldsEqual(Filter o) { + if (o == this) return true; + if (!(o instanceof SingleColumnValueExcludeFilter)) return false; + + return super.areSerializedFieldsEqual(o); + } +} diff --git hbase-client/src/main/java/org/apache/hadoop/hbase/filter/SingleColumnValueFilter.java hbase-client/src/main/java/org/apache/hadoop/hbase/filter/SingleColumnValueFilter.java new file mode 100644 index 0000000..f8f3da9 --- /dev/null +++ hbase-client/src/main/java/org/apache/hadoop/hbase/filter/SingleColumnValueFilter.java @@ -0,0 +1,389 @@ +/** + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.filter; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; +import org.apache.hadoop.hbase.DeserializationException; +import org.apache.hadoop.hbase.KeyValue; +import org.apache.hadoop.hbase.client.Scan; +import org.apache.hadoop.hbase.filter.CompareFilter.CompareOp; +import org.apache.hadoop.hbase.protobuf.ProtobufUtil; +import org.apache.hadoop.hbase.protobuf.generated.FilterProtos; +import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos; +import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.CompareType; +import org.apache.hadoop.hbase.util.Bytes; + +import java.io.IOException; +import java.util.ArrayList; + +import com.google.common.base.Preconditions; +import com.google.protobuf.ByteString; +import com.google.protobuf.InvalidProtocolBufferException; + +/** + * This filter is used to filter cells based on value. It takes a {@link CompareFilter.CompareOp} + * operator (equal, greater, not equal, etc), and either a byte [] value or + * a ByteArrayComparable. + *

      + * If we have a byte [] value then we just do a lexicographic compare. For + * example, if passed value is 'b' and cell has 'a' and the compare operator + * is LESS, then we will filter out this cell (return true). If this is not + * sufficient (eg you want to deserialize a long and then compare it to a fixed + * long value), then you can pass in your own comparator instead. + *

      + * You must also specify a family and qualifier. Only the value of this column + * will be tested. When using this filter on a {@link Scan} with specified + * inputs, the column to be tested should also be added as input (otherwise + * the filter will regard the column as missing). + *

      + * To prevent the entire row from being emitted if the column is not found + * on a row, use {@link #setFilterIfMissing}. + * Otherwise, if the column is found, the entire row will be emitted only if + * the value passes. If the value fails, the row will be filtered out. + *

      + * In order to test values of previous versions (timestamps), set + * {@link #setLatestVersionOnly} to false. The default is true, meaning that + * only the latest version's value is tested and all previous versions are ignored. + *

      + * To filter based on the value of all scanned columns, use {@link ValueFilter}. + */ +@InterfaceAudience.Public +@InterfaceStability.Stable +public class SingleColumnValueFilter extends FilterBase { + static final Log LOG = LogFactory.getLog(SingleColumnValueFilter.class); + + protected byte [] columnFamily; + protected byte [] columnQualifier; + protected CompareOp compareOp; + protected ByteArrayComparable comparator; + protected boolean foundColumn = false; + protected boolean matchedColumn = false; + protected boolean filterIfMissing = false; + protected boolean latestVersionOnly = true; + + /** + * Constructor for binary compare of the value of a single column. If the + * column is found and the condition passes, all columns of the row will be + * emitted. If the condition fails, the row will not be emitted. + *

      + * Use the filterIfColumnMissing flag to set whether the rest of the columns + * in a row will be emitted if the specified column to check is not found in + * the row. + * + * @param family name of column family + * @param qualifier name of column qualifier + * @param compareOp operator + * @param value value to compare column values against + */ + public SingleColumnValueFilter(final byte [] family, final byte [] qualifier, + final CompareOp compareOp, final byte[] value) { + this(family, qualifier, compareOp, new BinaryComparator(value)); + } + + /** + * Constructor for binary compare of the value of a single column. If the + * column is found and the condition passes, all columns of the row will be + * emitted. If the condition fails, the row will not be emitted. + *

      + * Use the filterIfColumnMissing flag to set whether the rest of the columns + * in a row will be emitted if the specified column to check is not found in + * the row. + * + * @param family name of column family + * @param qualifier name of column qualifier + * @param compareOp operator + * @param comparator Comparator to use. + */ + public SingleColumnValueFilter(final byte [] family, final byte [] qualifier, + final CompareOp compareOp, final ByteArrayComparable comparator) { + this.columnFamily = family; + this.columnQualifier = qualifier; + this.compareOp = compareOp; + this.comparator = comparator; + } + + /** + * Constructor for protobuf deserialization only. + * @param family + * @param qualifier + * @param compareOp + * @param comparator + * @param foundColumn + * @param matchedColumn + * @param filterIfMissing + * @param latestVersionOnly + */ + protected SingleColumnValueFilter(final byte[] family, final byte [] qualifier, + final CompareOp compareOp, ByteArrayComparable comparator, final boolean foundColumn, + final boolean matchedColumn, final boolean filterIfMissing, final boolean latestVersionOnly) { + this(family,qualifier,compareOp,comparator); + this.foundColumn = foundColumn; + this.matchedColumn = matchedColumn; + this.filterIfMissing = filterIfMissing; + this.latestVersionOnly = latestVersionOnly; + } + + /** + * @return operator + */ + public CompareOp getOperator() { + return compareOp; + } + + /** + * @return the comparator + */ + public ByteArrayComparable getComparator() { + return comparator; + } + + /** + * @return the family + */ + public byte[] getFamily() { + return columnFamily; + } + + /** + * @return the qualifier + */ + public byte[] getQualifier() { + return columnQualifier; + } + + public ReturnCode filterKeyValue(KeyValue keyValue) { + // System.out.println("REMOVE KEY=" + keyValue.toString() + ", value=" + Bytes.toString(keyValue.getValue())); + if (this.matchedColumn) { + // We already found and matched the single column, all keys now pass + return ReturnCode.INCLUDE; + } else if (this.latestVersionOnly && this.foundColumn) { + // We found but did not match the single column, skip to next row + return ReturnCode.NEXT_ROW; + } + if (!keyValue.matchingColumn(this.columnFamily, this.columnQualifier)) { + return ReturnCode.INCLUDE; + } + foundColumn = true; + if (filterColumnValue(keyValue.getBuffer(), + keyValue.getValueOffset(), keyValue.getValueLength())) { + return this.latestVersionOnly? ReturnCode.NEXT_ROW: ReturnCode.INCLUDE; + } + this.matchedColumn = true; + return ReturnCode.INCLUDE; + } + + private boolean filterColumnValue(final byte [] data, final int offset, + final int length) { + int compareResult = this.comparator.compareTo(data, offset, length); + switch (this.compareOp) { + case LESS: + return compareResult <= 0; + case LESS_OR_EQUAL: + return compareResult < 0; + case EQUAL: + return compareResult != 0; + case NOT_EQUAL: + return compareResult == 0; + case GREATER_OR_EQUAL: + return compareResult > 0; + case GREATER: + return compareResult >= 0; + default: + throw new RuntimeException("Unknown Compare op " + compareOp.name()); + } + } + + public boolean filterRow() { + // If column was found, return false if it was matched, true if it was not + // If column not found, return true if we filter if missing, false if not + return this.foundColumn? !this.matchedColumn: this.filterIfMissing; + } + + public boolean hasFilterRow() { + return true; + } + + public void reset() { + foundColumn = false; + matchedColumn = false; + } + + /** + * Get whether entire row should be filtered if column is not found. + * @return true if row should be skipped if column not found, false if row + * should be let through anyways + */ + public boolean getFilterIfMissing() { + return filterIfMissing; + } + + /** + * Set whether entire row should be filtered if column is not found. + *

      + * If true, the entire row will be skipped if the column is not found. + *

      + * If false, the row will pass if the column is not found. This is default. + * @param filterIfMissing flag + */ + public void setFilterIfMissing(boolean filterIfMissing) { + this.filterIfMissing = filterIfMissing; + } + + /** + * Get whether only the latest version of the column value should be compared. + * If true, the row will be returned if only the latest version of the column + * value matches. If false, the row will be returned if any version of the + * column value matches. The default is true. + * @return return value + */ + public boolean getLatestVersionOnly() { + return latestVersionOnly; + } + + /** + * Set whether only the latest version of the column value should be compared. + * If true, the row will be returned if only the latest version of the column + * value matches. If false, the row will be returned if any version of the + * column value matches. The default is true. + * @param latestVersionOnly flag + */ + public void setLatestVersionOnly(boolean latestVersionOnly) { + this.latestVersionOnly = latestVersionOnly; + } + + public static Filter createFilterFromArguments(ArrayList filterArguments) { + Preconditions.checkArgument(filterArguments.size() == 4 || filterArguments.size() == 6, + "Expected 4 or 6 but got: %s", filterArguments.size()); + byte [] family = ParseFilter.removeQuotesFromByteArray(filterArguments.get(0)); + byte [] qualifier = ParseFilter.removeQuotesFromByteArray(filterArguments.get(1)); + CompareOp compareOp = ParseFilter.createCompareOp(filterArguments.get(2)); + ByteArrayComparable comparator = ParseFilter.createComparator( + ParseFilter.removeQuotesFromByteArray(filterArguments.get(3))); + + if (comparator instanceof RegexStringComparator || + comparator instanceof SubstringComparator) { + if (compareOp != CompareOp.EQUAL && + compareOp != CompareOp.NOT_EQUAL) { + throw new IllegalArgumentException ("A regexstring comparator and substring comparator " + + "can only be used with EQUAL and NOT_EQUAL"); + } + } + + SingleColumnValueFilter filter = new SingleColumnValueFilter(family, qualifier, + compareOp, comparator); + + if (filterArguments.size() == 6) { + boolean filterIfMissing = ParseFilter.convertByteArrayToBoolean(filterArguments.get(4)); + boolean latestVersionOnly = ParseFilter.convertByteArrayToBoolean(filterArguments.get(5)); + filter.setFilterIfMissing(filterIfMissing); + filter.setLatestVersionOnly(latestVersionOnly); + } + return filter; + } + + FilterProtos.SingleColumnValueFilter convert() { + FilterProtos.SingleColumnValueFilter.Builder builder = + FilterProtos.SingleColumnValueFilter.newBuilder(); + if (this.columnFamily != null) { + builder.setColumnFamily(ByteString.copyFrom(this.columnFamily)); + } + if (this.columnQualifier != null) { + builder.setColumnQualifier(ByteString.copyFrom(this.columnQualifier)); + } + HBaseProtos.CompareType compareOp = CompareType.valueOf(this.compareOp.name()); + builder.setCompareOp(compareOp); + builder.setComparator(ProtobufUtil.toComparator(this.comparator)); + builder.setFoundColumn(this.foundColumn); + builder.setMatchedColumn(this.matchedColumn); + builder.setFilterIfMissing(this.filterIfMissing); + builder.setLatestVersionOnly(this.latestVersionOnly); + + return builder.build(); + } + + /** + * @return The filter serialized using pb + */ + public byte [] toByteArray() { + return convert().toByteArray(); + } + + /** + * @param pbBytes A pb serialized {@link SingleColumnValueFilter} instance + * @return An instance of {@link SingleColumnValueFilter} made from bytes + * @throws DeserializationException + * @see #toByteArray + */ + public static SingleColumnValueFilter parseFrom(final byte [] pbBytes) + throws DeserializationException { + FilterProtos.SingleColumnValueFilter proto; + try { + proto = FilterProtos.SingleColumnValueFilter.parseFrom(pbBytes); + } catch (InvalidProtocolBufferException e) { + throw new DeserializationException(e); + } + + final CompareOp compareOp = + CompareOp.valueOf(proto.getCompareOp().name()); + final ByteArrayComparable comparator; + try { + comparator = ProtobufUtil.toComparator(proto.getComparator()); + } catch (IOException ioe) { + throw new DeserializationException(ioe); + } + + return new SingleColumnValueFilter( + proto.hasColumnFamily()?proto.getColumnFamily().toByteArray():null, + proto.hasColumnQualifier()?proto.getColumnQualifier().toByteArray():null, + compareOp, comparator, proto.getFoundColumn(),proto.getMatchedColumn(), + proto.getFilterIfMissing(),proto.getLatestVersionOnly()); + } + + /** + * @param other + * @return true if and only if the fields of the filter that are serialized + * are equal to the corresponding fields in other. Used for testing. + */ + boolean areSerializedFieldsEqual(Filter o) { + if (o == this) return true; + if (!(o instanceof SingleColumnValueFilter)) return false; + + SingleColumnValueFilter other = (SingleColumnValueFilter)o; + return Bytes.equals(this.getFamily(), other.getFamily()) + && Bytes.equals(this.getQualifier(), other.getQualifier()) + && this.compareOp.equals(other.compareOp) + && this.getComparator().areSerializedFieldsEqual(other.getComparator()) + && this.foundColumn == other.foundColumn + && this.matchedColumn == other.matchedColumn + && this.getFilterIfMissing() == other.getFilterIfMissing() + && this.getLatestVersionOnly() == other.getLatestVersionOnly(); + } + + @Override + public String toString() { + return String.format("%s (%s, %s, %s, %s)", + this.getClass().getSimpleName(), Bytes.toStringBinary(this.columnFamily), + Bytes.toStringBinary(this.columnQualifier), this.compareOp.name(), + Bytes.toStringBinary(this.comparator.getValue())); + } +} diff --git hbase-client/src/main/java/org/apache/hadoop/hbase/filter/SkipFilter.java hbase-client/src/main/java/org/apache/hadoop/hbase/filter/SkipFilter.java new file mode 100644 index 0000000..1d4388d --- /dev/null +++ hbase-client/src/main/java/org/apache/hadoop/hbase/filter/SkipFilter.java @@ -0,0 +1,145 @@ +/* + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.filter; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; +import org.apache.hadoop.hbase.DeserializationException; +import org.apache.hadoop.hbase.KeyValue; +import org.apache.hadoop.hbase.protobuf.ProtobufUtil; +import org.apache.hadoop.hbase.protobuf.generated.FilterProtos; + +import com.google.protobuf.InvalidProtocolBufferException; + +import java.io.DataInput; +import java.io.DataOutput; +import java.io.IOException; +import java.util.List; + +/** + * A wrapper filter that filters an entire row if any of the KeyValue checks do + * not pass. + *

      + * For example, if all columns in a row represent weights of different things, + * with the values being the actual weights, and we want to filter out the + * entire row if any of its weights are zero. In this case, we want to prevent + * rows from being emitted if a single key is filtered. Combine this filter + * with a {@link ValueFilter}: + *

      + *

      + * scan.setFilter(new SkipFilter(new ValueFilter(CompareOp.NOT_EQUAL,
      + *     new BinaryComparator(Bytes.toBytes(0))));
      + * 
      + * Any row which contained a column whose value was 0 will be filtered out
      + * (since ValueFilter will not pass that KeyValue).
      + * Without this filter, the other non-zero valued columns in the row would still
      + * be emitted.
      + */
      +@InterfaceAudience.Public
      +@InterfaceStability.Stable
      +public class SkipFilter extends FilterBase {
      +  private boolean filterRow = false;
      +  private Filter filter;
      +
      +  public SkipFilter(Filter filter) {
      +    this.filter = filter;
      +  }
      +
      +  public Filter getFilter() {
      +    return filter;
      +  }
      +
      +  public void reset() {
      +    filter.reset();
      +    filterRow = false;
      +  }
      +
      +  private void changeFR(boolean value) {
      +    filterRow = filterRow || value;
      +  }
      +
      +  public ReturnCode filterKeyValue(KeyValue v) {
      +    ReturnCode c = filter.filterKeyValue(v);
      +    changeFR(c != ReturnCode.INCLUDE);
      +    return c;
      +  }
      +
      +  @Override
      +  public KeyValue transform(KeyValue v) {
      +    return filter.transform(v);
      +  }
      +
      +  public boolean filterRow() {
      +    return filterRow;
      +  }
      +    
      +  public boolean hasFilterRow() {
      +    return true;
      +  }
      +
      +  /**
      +   * @return The filter serialized using pb
      +   */
      +  public byte [] toByteArray() {
      +    FilterProtos.SkipFilter.Builder builder =
      +      FilterProtos.SkipFilter.newBuilder();
      +    builder.setFilter(ProtobufUtil.toFilter(this.filter));
      +    return builder.build().toByteArray();
      +  }
      +
      +  /**
      +   * @param pbBytes A pb serialized {@link SkipFilter} instance
      +   * @return An instance of {@link SkipFilter} made from bytes
      +   * @throws DeserializationException
      +   * @see #toByteArray
      +   */
      +  public static SkipFilter parseFrom(final byte [] pbBytes)
      +  throws DeserializationException {
      +    FilterProtos.SkipFilter proto;
      +    try {
      +      proto = FilterProtos.SkipFilter.parseFrom(pbBytes);
      +    } catch (InvalidProtocolBufferException e) {
      +      throw new DeserializationException(e);
      +    }
      +    try {
      +      return new SkipFilter(ProtobufUtil.toFilter(proto.getFilter()));
      +    } catch (IOException ioe) {
      +      throw new DeserializationException(ioe);
      +    }
      +  }
      +
      +  /**
      +   * @param other
      +   * @return true if and only if the fields of the filter that are serialized
      +   * are equal to the corresponding fields in other.  Used for testing.
      +   */
      +  boolean areSerializedFieldsEqual(Filter o) {
      +    if (o == this) return true;
      +    if (!(o instanceof SkipFilter)) return false;
      +
      +    SkipFilter other = (SkipFilter)o;
      +    return getFilter().areSerializedFieldsEqual(other.getFilter());
      +  }
      +
      +  @Override
      +  public String toString() {
      +    return this.getClass().getSimpleName() + " " + this.filter.toString();
      +  }
      +}
      diff --git hbase-client/src/main/java/org/apache/hadoop/hbase/filter/SubstringComparator.java hbase-client/src/main/java/org/apache/hadoop/hbase/filter/SubstringComparator.java
      new file mode 100644
      index 0000000..1ed08a2
      --- /dev/null
      +++ hbase-client/src/main/java/org/apache/hadoop/hbase/filter/SubstringComparator.java
      @@ -0,0 +1,112 @@
      +/**
      + *
      + * Licensed to the Apache Software Foundation (ASF) under one
      + * or more contributor license agreements.  See the NOTICE file
      + * distributed with this work for additional information
      + * regarding copyright ownership.  The ASF licenses this file
      + * to you under the Apache License, Version 2.0 (the
      + * "License"); you may not use this file except in compliance
      + * with the License.  You may obtain a copy of the License at
      + *
      + *     http://www.apache.org/licenses/LICENSE-2.0
      + *
      + * Unless required by applicable law or agreed to in writing, software
      + * distributed under the License is distributed on an "AS IS" BASIS,
      + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
      + * See the License for the specific language governing permissions and
      + * limitations under the License.
      + */
      +package org.apache.hadoop.hbase.filter;
      +
      +import org.apache.hadoop.classification.InterfaceAudience;
      +import org.apache.hadoop.classification.InterfaceStability;
      +import org.apache.hadoop.hbase.DeserializationException;
      +import org.apache.hadoop.hbase.protobuf.generated.ComparatorProtos;
      +import org.apache.hadoop.hbase.util.Bytes;
      +
      +import com.google.protobuf.InvalidProtocolBufferException;
      +
      +
      +/**
      + * This comparator is for use with SingleColumnValueFilter, for filtering based on
      + * the value of a given column. Use it to test if a given substring appears
      + * in a cell value in the column. The comparison is case insensitive.
      + * 

      + * Only EQUAL or NOT_EQUAL tests are valid with this comparator. + *

      + * For example: + *

      + *

      + * SingleColumnValueFilter scvf =
      + *   new SingleColumnValueFilter("col", CompareOp.EQUAL,
      + *     new SubstringComparator("substr"));
      + * 
      + */ +@InterfaceAudience.Public +@InterfaceStability.Stable +public class SubstringComparator extends ByteArrayComparable { + + private String substr; + + /** + * Constructor + * @param substr the substring + */ + public SubstringComparator(String substr) { + super(Bytes.toBytes(substr.toLowerCase())); + this.substr = substr.toLowerCase(); + } + + @Override + public byte[] getValue() { + return Bytes.toBytes(substr); + } + + @Override + public int compareTo(byte[] value, int offset, int length) { + return Bytes.toString(value, offset, length).toLowerCase().contains(substr) ? 0 + : 1; + } + + /** + * @return The comparator serialized using pb + */ + public byte [] toByteArray() { + ComparatorProtos.SubstringComparator.Builder builder = + ComparatorProtos.SubstringComparator.newBuilder(); + builder.setSubstr(this.substr); + return builder.build().toByteArray(); + } + + /** + * @param pbBytes A pb serialized {@link SubstringComparator} instance + * @return An instance of {@link SubstringComparator} made from bytes + * @throws DeserializationException + * @see #toByteArray + */ + public static SubstringComparator parseFrom(final byte [] pbBytes) + throws DeserializationException { + ComparatorProtos.SubstringComparator proto; + try { + proto = ComparatorProtos.SubstringComparator.parseFrom(pbBytes); + } catch (InvalidProtocolBufferException e) { + throw new DeserializationException(e); + } + return new SubstringComparator(proto.getSubstr()); + } + + /** + * @param other + * @return true if and only if the fields of the comparator that are serialized + * are equal to the corresponding fields in other. Used for testing. + */ + boolean areSerializedFieldsEqual(ByteArrayComparable other) { + if (other == this) return true; + if (!(other instanceof SubstringComparator)) return false; + + SubstringComparator comparator = (SubstringComparator)other; + return super.areSerializedFieldsEqual(comparator) + && this.substr.equals(comparator.substr); + } + +} diff --git hbase-client/src/main/java/org/apache/hadoop/hbase/filter/TimestampsFilter.java hbase-client/src/main/java/org/apache/hadoop/hbase/filter/TimestampsFilter.java new file mode 100644 index 0000000..20f5d25 --- /dev/null +++ hbase-client/src/main/java/org/apache/hadoop/hbase/filter/TimestampsFilter.java @@ -0,0 +1,175 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.filter; + +import java.util.ArrayList; +import java.util.List; +import java.util.TreeSet; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; +import org.apache.hadoop.hbase.DeserializationException; +import org.apache.hadoop.hbase.KeyValue; +import org.apache.hadoop.hbase.protobuf.generated.FilterProtos; + +import com.google.common.base.Preconditions; +import com.google.protobuf.InvalidProtocolBufferException; + +/** + * Filter that returns only cells whose timestamp (version) is + * in the specified list of timestamps (versions). + *

      + * Note: Use of this filter overrides any time range/time stamp + * options specified using {@link org.apache.hadoop.hbase.client.Get#setTimeRange(long, long)}, + * {@link org.apache.hadoop.hbase.client.Scan#setTimeRange(long, long)}, {@link org.apache.hadoop.hbase.client.Get#setTimeStamp(long)}, + * or {@link org.apache.hadoop.hbase.client.Scan#setTimeStamp(long)}. + */ +@InterfaceAudience.Public +@InterfaceStability.Stable +public class TimestampsFilter extends FilterBase { + + TreeSet timestamps; + private static final int MAX_LOG_TIMESTAMPS = 5; + + // Used during scans to hint the scan to stop early + // once the timestamps fall below the minTimeStamp. + long minTimeStamp = Long.MAX_VALUE; + + /** + * Constructor for filter that retains only those + * cells whose timestamp (version) is in the specified + * list of timestamps. + * + * @param timestamps + */ + public TimestampsFilter(List timestamps) { + for (Long timestamp : timestamps) { + Preconditions.checkArgument(timestamp >= 0, "must be positive %s", timestamp); + } + this.timestamps = new TreeSet(timestamps); + init(); + } + + /** + * @return the list of timestamps + */ + public List getTimestamps() { + List list = new ArrayList(timestamps.size()); + list.addAll(timestamps); + return list; + } + + private void init() { + if (this.timestamps.size() > 0) { + minTimeStamp = this.timestamps.first(); + } + } + + /** + * Gets the minimum timestamp requested by filter. + * @return minimum timestamp requested by filter. + */ + public long getMin() { + return minTimeStamp; + } + + @Override + public ReturnCode filterKeyValue(KeyValue v) { + if (this.timestamps.contains(v.getTimestamp())) { + return ReturnCode.INCLUDE; + } else if (v.getTimestamp() < minTimeStamp) { + // The remaining versions of this column are guaranteed + // to be lesser than all of the other values. + return ReturnCode.NEXT_COL; + } + return ReturnCode.SKIP; + } + + public static Filter createFilterFromArguments(ArrayList filterArguments) { + ArrayList timestamps = new ArrayList(); + for (int i = 0; ibytes + * @throws DeserializationException + * @see #toByteArray + */ + public static TimestampsFilter parseFrom(final byte [] pbBytes) + throws DeserializationException { + FilterProtos.TimestampsFilter proto; + try { + proto = FilterProtos.TimestampsFilter.parseFrom(pbBytes); + } catch (InvalidProtocolBufferException e) { + throw new DeserializationException(e); + } + return new TimestampsFilter(proto.getTimestampsList()); + } + + /** + * @param other + * @return true if and only if the fields of the filter that are serialized + * are equal to the corresponding fields in other. Used for testing. + */ + boolean areSerializedFieldsEqual(Filter o) { + if (o == this) return true; + if (!(o instanceof TimestampsFilter)) return false; + + TimestampsFilter other = (TimestampsFilter)o; + return this.getTimestamps().equals(other.getTimestamps()); + } + + @Override + public String toString() { + return toString(MAX_LOG_TIMESTAMPS); + } + + protected String toString(int maxTimestamps) { + StringBuilder tsList = new StringBuilder(); + + int count = 0; + for (Long ts : this.timestamps) { + if (count >= maxTimestamps) { + break; + } + ++count; + tsList.append(ts.toString()); + if (count < this.timestamps.size() && count < maxTimestamps) { + tsList.append(", "); + } + } + + return String.format("%s (%d/%d): [%s]", this.getClass().getSimpleName(), + count, this.timestamps.size(), tsList.toString()); + } +} diff --git hbase-client/src/main/java/org/apache/hadoop/hbase/filter/ValueFilter.java hbase-client/src/main/java/org/apache/hadoop/hbase/filter/ValueFilter.java new file mode 100644 index 0000000..de100f3 --- /dev/null +++ hbase-client/src/main/java/org/apache/hadoop/hbase/filter/ValueFilter.java @@ -0,0 +1,125 @@ +/** + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.filter; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; +import org.apache.hadoop.hbase.DeserializationException; +import org.apache.hadoop.hbase.KeyValue; +import org.apache.hadoop.hbase.protobuf.ProtobufUtil; +import org.apache.hadoop.hbase.protobuf.generated.FilterProtos; + +import com.google.protobuf.InvalidProtocolBufferException; + +import java.io.IOException; +import java.util.ArrayList; + +/** + * This filter is used to filter based on column value. It takes an + * operator (equal, greater, not equal, etc) and a byte [] comparator for the + * cell value. + *

      + * This filter can be wrapped with {@link WhileMatchFilter} and {@link SkipFilter} + * to add more control. + *

      + * Multiple filters can be combined using {@link FilterList}. + *

      + * To test the value of a single qualifier when scanning multiple qualifiers, + * use {@link SingleColumnValueFilter}. + */ +@InterfaceAudience.Public +@InterfaceStability.Stable +public class ValueFilter extends CompareFilter { + + /** + * Constructor. + * @param valueCompareOp the compare op for value matching + * @param valueComparator the comparator for value matching + */ + public ValueFilter(final CompareOp valueCompareOp, + final ByteArrayComparable valueComparator) { + super(valueCompareOp, valueComparator); + } + + @Override + public ReturnCode filterKeyValue(KeyValue v) { + if (doCompare(this.compareOp, this.comparator, v.getBuffer(), + v.getValueOffset(), v.getValueLength())) { + return ReturnCode.SKIP; + } + return ReturnCode.INCLUDE; + } + + public static Filter createFilterFromArguments(ArrayList filterArguments) { + ArrayList arguments = CompareFilter.extractArguments(filterArguments); + CompareOp compareOp = (CompareOp)arguments.get(0); + ByteArrayComparable comparator = (ByteArrayComparable)arguments.get(1); + return new ValueFilter(compareOp, comparator); + } + + /** + * @return The filter serialized using pb + */ + public byte [] toByteArray() { + FilterProtos.ValueFilter.Builder builder = + FilterProtos.ValueFilter.newBuilder(); + builder.setCompareFilter(super.convert()); + return builder.build().toByteArray(); + } + + /** + * @param pbBytes A pb serialized {@link ValueFilter} instance + * @return An instance of {@link ValueFilter} made from bytes + * @throws DeserializationException + * @see #toByteArray + */ + public static ValueFilter parseFrom(final byte [] pbBytes) + throws DeserializationException { + FilterProtos.ValueFilter proto; + try { + proto = FilterProtos.ValueFilter.parseFrom(pbBytes); + } catch (InvalidProtocolBufferException e) { + throw new DeserializationException(e); + } + final CompareOp valueCompareOp = + CompareOp.valueOf(proto.getCompareFilter().getCompareOp().name()); + ByteArrayComparable valueComparator = null; + try { + if (proto.getCompareFilter().hasComparator()) { + valueComparator = ProtobufUtil.toComparator(proto.getCompareFilter().getComparator()); + } + } catch (IOException ioe) { + throw new DeserializationException(ioe); + } + return new ValueFilter(valueCompareOp,valueComparator); + } + + /** + * @param other + * @return true if and only if the fields of the filter that are serialized + * are equal to the corresponding fields in other. Used for testing. + */ + boolean areSerializedFieldsEqual(Filter o) { + if (o == this) return true; + if (!(o instanceof ValueFilter)) return false; + + return super.areSerializedFieldsEqual(o); + } +} diff --git hbase-client/src/main/java/org/apache/hadoop/hbase/filter/WhileMatchFilter.java hbase-client/src/main/java/org/apache/hadoop/hbase/filter/WhileMatchFilter.java new file mode 100644 index 0000000..6c454e5 --- /dev/null +++ hbase-client/src/main/java/org/apache/hadoop/hbase/filter/WhileMatchFilter.java @@ -0,0 +1,145 @@ +/* + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.filter; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; +import org.apache.hadoop.hbase.DeserializationException; +import org.apache.hadoop.hbase.KeyValue; +import org.apache.hadoop.hbase.protobuf.ProtobufUtil; +import org.apache.hadoop.hbase.protobuf.generated.FilterProtos; + +import com.google.protobuf.InvalidProtocolBufferException; + +import java.io.DataInput; +import java.io.DataOutput; +import java.io.IOException; +import java.util.List; + +/** + * A wrapper filter that returns true from {@link #filterAllRemaining()} as soon + * as the wrapped filters {@link Filter#filterRowKey(byte[], int, int)}, + * {@link Filter#filterKeyValue(org.apache.hadoop.hbase.KeyValue)}, + * {@link org.apache.hadoop.hbase.filter.Filter#filterRow()} or + * {@link org.apache.hadoop.hbase.filter.Filter#filterAllRemaining()} methods + * returns true. + */ +@InterfaceAudience.Public +@InterfaceStability.Stable +public class WhileMatchFilter extends FilterBase { + private boolean filterAllRemaining = false; + private Filter filter; + + public WhileMatchFilter(Filter filter) { + this.filter = filter; + } + + public Filter getFilter() { + return filter; + } + + public void reset() { + this.filter.reset(); + } + + private void changeFAR(boolean value) { + filterAllRemaining = filterAllRemaining || value; + } + + public boolean filterAllRemaining() { + return this.filterAllRemaining || this.filter.filterAllRemaining(); + } + + public boolean filterRowKey(byte[] buffer, int offset, int length) { + boolean value = filter.filterRowKey(buffer, offset, length); + changeFAR(value); + return value; + } + + public ReturnCode filterKeyValue(KeyValue v) { + ReturnCode c = filter.filterKeyValue(v); + changeFAR(c != ReturnCode.INCLUDE); + return c; + } + + @Override + public KeyValue transform(KeyValue v) { + return filter.transform(v); + } + + public boolean filterRow() { + boolean filterRow = this.filter.filterRow(); + changeFAR(filterRow); + return filterRow; + } + + public boolean hasFilterRow() { + return true; + } + + /** + * @return The filter serialized using pb + */ + public byte [] toByteArray() { + FilterProtos.WhileMatchFilter.Builder builder = + FilterProtos.WhileMatchFilter.newBuilder(); + builder.setFilter(ProtobufUtil.toFilter(this.filter)); + return builder.build().toByteArray(); + } + + /** + * @param pbBytes A pb serialized {@link WhileMatchFilter} instance + * @return An instance of {@link WhileMatchFilter} made from bytes + * @throws DeserializationException + * @see #toByteArray + */ + public static WhileMatchFilter parseFrom(final byte [] pbBytes) + throws DeserializationException { + FilterProtos.WhileMatchFilter proto; + try { + proto = FilterProtos.WhileMatchFilter.parseFrom(pbBytes); + } catch (InvalidProtocolBufferException e) { + throw new DeserializationException(e); + } + try { + return new WhileMatchFilter(ProtobufUtil.toFilter(proto.getFilter())); + } catch (IOException ioe) { + throw new DeserializationException(ioe); + } + } + + /** + * @param other + * @return true if and only if the fields of the filter that are serialized + * are equal to the corresponding fields in other. Used for testing. + */ + boolean areSerializedFieldsEqual(Filter o) { + if (o == this) return true; + if (!(o instanceof WhileMatchFilter)) return false; + + WhileMatchFilter other = (WhileMatchFilter)o; + return getFilter().areSerializedFieldsEqual(other.getFilter()); + } + + @Override + public String toString() { + return this.getClass().getSimpleName() + " " + this.filter.toString(); + } +} diff --git hbase-client/src/main/java/org/apache/hadoop/hbase/filter/package-info.java hbase-client/src/main/java/org/apache/hadoop/hbase/filter/package-info.java new file mode 100644 index 0000000..9dea254 --- /dev/null +++ hbase-client/src/main/java/org/apache/hadoop/hbase/filter/package-info.java @@ -0,0 +1,34 @@ +/* + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * Provides row-level filters applied to HRegion scan results during calls to + * {@link org.apache.hadoop.hbase.client.ResultScanner#next()}. + +

      +Filters run the extent of a table unless you wrap your filter in a +{@link org.apache.hadoop.hbase.filter.WhileMatchFilter}. +The latter returns as soon as the filter stops matching. +

      +

      Do not rely on filters carrying state across rows; its not reliable in current +hbase as we have no handlers in place for when regions split, close or server +crashes. +

      +*/ +package org.apache.hadoop.hbase.filter; diff --git hbase-client/src/main/java/org/apache/hadoop/hbase/io/HbaseObjectWritable.java hbase-client/src/main/java/org/apache/hadoop/hbase/io/HbaseObjectWritable.java new file mode 100644 index 0000000..a0f19f2 --- /dev/null +++ hbase-client/src/main/java/org/apache/hadoop/hbase/io/HbaseObjectWritable.java @@ -0,0 +1,803 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.io; + +import java.io.ByteArrayInputStream; +import java.io.ByteArrayOutputStream; +import java.io.DataInput; +import java.io.DataOutput; +import java.io.InputStream; +import java.io.IOException; +import java.io.ObjectInputStream; +import java.io.ObjectOutputStream; +import java.io.Serializable; +import java.lang.reflect.Array; +import java.lang.reflect.InvocationTargetException; +import java.lang.reflect.Method; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.NavigableSet; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.conf.Configurable; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.conf.Configured; +import org.apache.hadoop.hbase.ClusterStatus; +import org.apache.hadoop.hbase.HColumnDescriptor; +import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.HRegionInfo; +import org.apache.hadoop.hbase.HTableDescriptor; +import org.apache.hadoop.hbase.KeyValue; +import org.apache.hadoop.hbase.client.Action; +import org.apache.hadoop.hbase.client.Append; +import org.apache.hadoop.hbase.client.Delete; +import org.apache.hadoop.hbase.client.Get; +import org.apache.hadoop.hbase.client.Increment; +import org.apache.hadoop.hbase.client.MultiAction; +import org.apache.hadoop.hbase.client.MultiResponse; +import org.apache.hadoop.hbase.client.Put; +import org.apache.hadoop.hbase.client.Result; +import org.apache.hadoop.hbase.client.Row; +import org.apache.hadoop.hbase.client.RowMutations; +import org.apache.hadoop.hbase.client.Scan; +import org.apache.hadoop.hbase.filter.BinaryComparator; +import org.apache.hadoop.hbase.filter.BitComparator; +import org.apache.hadoop.hbase.filter.ColumnCountGetFilter; +import org.apache.hadoop.hbase.filter.ColumnPrefixFilter; +import org.apache.hadoop.hbase.filter.ColumnRangeFilter; +import org.apache.hadoop.hbase.filter.CompareFilter; +import org.apache.hadoop.hbase.filter.CompareFilter.CompareOp; +import org.apache.hadoop.hbase.filter.DependentColumnFilter; +import org.apache.hadoop.hbase.filter.FirstKeyOnlyFilter; +import org.apache.hadoop.hbase.filter.InclusiveStopFilter; +import org.apache.hadoop.hbase.filter.KeyOnlyFilter; +import org.apache.hadoop.hbase.filter.PageFilter; +import org.apache.hadoop.hbase.filter.PrefixFilter; +import org.apache.hadoop.hbase.filter.QualifierFilter; +import org.apache.hadoop.hbase.filter.RandomRowFilter; +import org.apache.hadoop.hbase.filter.RowFilter; +import org.apache.hadoop.hbase.filter.SingleColumnValueExcludeFilter; +import org.apache.hadoop.hbase.filter.SingleColumnValueFilter; +import org.apache.hadoop.hbase.filter.SkipFilter; +import org.apache.hadoop.hbase.filter.ValueFilter; +import org.apache.hadoop.hbase.filter.WhileMatchFilter; +import org.apache.hadoop.hbase.filter.ByteArrayComparable; +import org.apache.hadoop.hbase.protobuf.ProtobufUtil; +import org.apache.hadoop.hbase.protobuf.generated.ClientProtos; +import org.apache.hadoop.hbase.regionserver.RegionOpeningState; +import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.hbase.util.ProtoUtil; +import org.apache.hadoop.io.MapWritable; +import org.apache.hadoop.io.ObjectWritable; +import org.apache.hadoop.io.Text; +import org.apache.hadoop.io.Writable; +import org.apache.hadoop.io.WritableFactories; +import org.apache.hadoop.io.WritableUtils; + +import com.google.protobuf.Message; +import com.google.protobuf.RpcController; + +/** + * This is a customized version of the polymorphic hadoop + * {@link ObjectWritable}. It removes UTF8 (HADOOP-414). + * Using {@link Text} intead of UTF-8 saves ~2% CPU between reading and writing + * objects running a short sequentialWrite Performance Evaluation test just in + * ObjectWritable alone; more when we're doing randomRead-ing. Other + * optimizations include our passing codes for classes instead of the + * actual class names themselves. This makes it so this class needs amendment + * if non-Writable classes are introduced -- if passed a Writable for which we + * have no code, we just do the old-school passing of the class name, etc. -- + * but passing codes the savings are large particularly when cell + * data is small (If < a couple of kilobytes, the encoding/decoding of class + * name and reflection to instantiate class was costing in excess of the cell + * handling). + */ +@InterfaceAudience.Private +public class HbaseObjectWritable implements Writable, WritableWithSize, Configurable { + protected final static Log LOG = LogFactory.getLog(HbaseObjectWritable.class); + + // Here we maintain two static maps of classes to code and vice versa. + // Add new classes+codes as wanted or figure way to auto-generate these + // maps. + static final Map> CODE_TO_CLASS = + new HashMap>(); + static final Map, Integer> CLASS_TO_CODE = + new HashMap, Integer>(); + // Special code that means 'not-encoded'; in this case we do old school + // sending of the class name using reflection, etc. + private static final byte NOT_ENCODED = 0; + //Generic array means that the array type is not one of the pre-defined arrays + //in the CLASS_TO_CODE map, but we have to still encode the array since it's + //elements are serializable by this class. + private static final int GENERIC_ARRAY_CODE; + private static final int NEXT_CLASS_CODE; + static { + //////////////////////////////////////////////////////////////////////////// + // WARNING: Please do not insert, remove or swap any line in this static // + // block. Doing so would change or shift all the codes used to serialize // + // objects, which makes backwards compatibility very hard for clients. // + // New codes should always be added at the end. Code removal is // + // discouraged because code is a short now. // + //////////////////////////////////////////////////////////////////////////// + + int code = NOT_ENCODED + 1; + // Primitive types. + addToMap(Boolean.TYPE, code++); + addToMap(Byte.TYPE, code++); + addToMap(Character.TYPE, code++); + addToMap(Short.TYPE, code++); + addToMap(Integer.TYPE, code++); + addToMap(Long.TYPE, code++); + addToMap(Float.TYPE, code++); + addToMap(Double.TYPE, code++); + addToMap(Void.TYPE, code++); + + // Other java types + addToMap(String.class, code++); + addToMap(byte [].class, code++); + addToMap(byte [][].class, code++); + + // Hadoop types + addToMap(Text.class, code++); + addToMap(Writable.class, code++); + addToMap(Writable [].class, code++); + code++; // Removed + addToMap(NullInstance.class, code++); + + // Hbase types + addToMap(HColumnDescriptor.class, code++); + addToMap(HConstants.Modify.class, code++); + + // We used to have a class named HMsg but its been removed. Rather than + // just axe it, use following random Integer class -- we just chose any + // class from java.lang -- instead just so codes that follow stay + // in same relative place. + addToMap(Integer.class, code++); + addToMap(Integer[].class, code++); + + //HRegion shouldn't be pushed across the wire. + code++; //addToMap(HRegion.class, code++); + code++; //addToMap(HRegion[].class, code++); + + addToMap(HRegionInfo.class, code++); + addToMap(HRegionInfo[].class, code++); + code++; // Removed + code++; // Removed + addToMap(HTableDescriptor.class, code++); + addToMap(MapWritable.class, code++); + + // + // HBASE-880 + // + addToMap(ClusterStatus.class, code++); + addToMap(Delete.class, code++); + addToMap(Get.class, code++); + addToMap(KeyValue.class, code++); + addToMap(KeyValue[].class, code++); + addToMap(Put.class, code++); + addToMap(Put[].class, code++); + addToMap(Result.class, code++); + addToMap(Result[].class, code++); + addToMap(Scan.class, code++); + + addToMap(WhileMatchFilter.class, code++); + addToMap(PrefixFilter.class, code++); + addToMap(PageFilter.class, code++); + addToMap(InclusiveStopFilter.class, code++); + addToMap(ColumnCountGetFilter.class, code++); + addToMap(SingleColumnValueFilter.class, code++); + addToMap(SingleColumnValueExcludeFilter.class, code++); + addToMap(BinaryComparator.class, code++); + addToMap(BitComparator.class, code++); + addToMap(CompareFilter.class, code++); + addToMap(RowFilter.class, code++); + addToMap(ValueFilter.class, code++); + addToMap(QualifierFilter.class, code++); + addToMap(SkipFilter.class, code++); + addToMap(ByteArrayComparable.class, code++); + addToMap(FirstKeyOnlyFilter.class, code++); + addToMap(DependentColumnFilter.class, code++); + + addToMap(Delete [].class, code++); + + code++; //addToMap(HLog.Entry.class, code++); + code++; //addToMap(HLog.Entry[].class, code++); + code++; //addToMap(HLogKey.class, code++); + + addToMap(List.class, code++); + + addToMap(NavigableSet.class, code++); + addToMap(ColumnPrefixFilter.class, code++); + + // Multi + addToMap(Row.class, code++); + addToMap(Action.class, code++); + addToMap(MultiAction.class, code++); + addToMap(MultiResponse.class, code++); + + // coprocessor execution + // Exec no longer exists --> addToMap(Exec.class, code++); + code++; + addToMap(Increment.class, code++); + + addToMap(KeyOnlyFilter.class, code++); + + // serializable + addToMap(Serializable.class, code++); + + addToMap(RandomRowFilter.class, code++); + + addToMap(CompareOp.class, code++); + + addToMap(ColumnRangeFilter.class, code++); + + // HServerLoad no longer exists; increase code so other classes stay the same. + code++; + //addToMap(HServerLoad.class, code++); + + addToMap(RegionOpeningState.class, code++); + + addToMap(HTableDescriptor[].class, code++); + + addToMap(Append.class, code++); + + addToMap(RowMutations.class, code++); + + addToMap(Message.class, code++); + + //java.lang.reflect.Array is a placeholder for arrays not defined above + GENERIC_ARRAY_CODE = code++; + addToMap(Array.class, GENERIC_ARRAY_CODE); + + addToMap(RpcController.class, code++); + + // make sure that this is the last statement in this static block + NEXT_CLASS_CODE = code; + } + + private Class declaredClass; + private Object instance; + private Configuration conf; + + /** default constructor for writable */ + public HbaseObjectWritable() { + super(); + } + + /** + * @param instance + */ + public HbaseObjectWritable(Object instance) { + set(instance); + } + + /** + * @param declaredClass + * @param instance + */ + public HbaseObjectWritable(Class declaredClass, Object instance) { + this.declaredClass = declaredClass; + this.instance = instance; + } + + /** @return the instance, or null if none. */ + public Object get() { return instance; } + + /** @return the class this is meant to be. */ + public Class getDeclaredClass() { return declaredClass; } + + /** + * Reset the instance. + * @param instance + */ + public void set(Object instance) { + this.declaredClass = instance.getClass(); + this.instance = instance; + } + + /** + * @see java.lang.Object#toString() + */ + @Override + public String toString() { + return "OW[class=" + declaredClass + ",value=" + instance + "]"; + } + + + public void readFields(DataInput in) throws IOException { + readObject(in, this, this.conf); + } + + public void write(DataOutput out) throws IOException { + writeObject(out, instance, declaredClass, conf); + } + + public long getWritableSize() { + return getWritableSize(instance, declaredClass, conf); + } + + private static class NullInstance extends Configured implements Writable { + Class declaredClass; + /** default constructor for writable */ + @SuppressWarnings("unused") + public NullInstance() { super(null); } + + /** + * @param declaredClass + * @param conf + */ + public NullInstance(Class declaredClass, Configuration conf) { + super(conf); + this.declaredClass = declaredClass; + } + + public void readFields(DataInput in) throws IOException { + this.declaredClass = CODE_TO_CLASS.get(WritableUtils.readVInt(in)); + } + + public void write(DataOutput out) throws IOException { + writeClassCode(out, this.declaredClass); + } + } + + public static Integer getClassCode(final Class c) + throws IOException { + Integer code = CLASS_TO_CODE.get(c); + if (code == null ) { + if (List.class.isAssignableFrom(c)) { + code = CLASS_TO_CODE.get(List.class); + } else if (Writable.class.isAssignableFrom(c)) { + code = CLASS_TO_CODE.get(Writable.class); + } else if (c.isArray()) { + code = CLASS_TO_CODE.get(Array.class); + } else if (Message.class.isAssignableFrom(c)) { + code = CLASS_TO_CODE.get(Message.class); + } else if (Serializable.class.isAssignableFrom(c)){ + code = CLASS_TO_CODE.get(Serializable.class); + } else if (Scan.class.isAssignableFrom(c)) { + code = CLASS_TO_CODE.get(Scan.class); + } + } + return code; + } + + /** + * @return the next object code in the list. Used in testing to verify that additional fields are not added + */ + static int getNextClassCode(){ + return NEXT_CLASS_CODE; + } + + /** + * Write out the code for passed Class. + * @param out + * @param c + * @throws IOException + */ + static void writeClassCode(final DataOutput out, final Class c) + throws IOException { + Integer code = getClassCode(c); + + if (code == null) { + LOG.error("Unsupported type " + c); + StackTraceElement[] els = new Exception().getStackTrace(); + for(StackTraceElement elem : els) { + LOG.error(elem.getMethodName()); + } + throw new UnsupportedOperationException("No code for unexpected " + c); + } + WritableUtils.writeVInt(out, code); + } + + public static long getWritableSize(Object instance, Class declaredClass, + Configuration conf) { + return 0L; // no hint is the default. + } + /** + * Write a {@link Writable}, {@link String}, primitive type, or an array of + * the preceding. + * @param out + * @param instance + * @param declaredClass + * @param conf + * @throws IOException + */ + @SuppressWarnings("unchecked") + public static void writeObject(DataOutput out, Object instance, + Class declaredClass, + Configuration conf) + throws IOException { + + Object instanceObj = instance; + Class declClass = declaredClass; + + if (instanceObj == null) { // null + instanceObj = new NullInstance(declClass, conf); + declClass = Writable.class; + } + writeClassCode(out, declClass); + if (declClass.isArray()) { // array + // If bytearray, just dump it out -- avoid the recursion and + // byte-at-a-time we were previously doing. + if (declClass.equals(byte [].class)) { + Bytes.writeByteArray(out, (byte [])instanceObj); + } else { + //if it is a Generic array, write the element's type + if (getClassCode(declaredClass) == GENERIC_ARRAY_CODE) { + Class componentType = declaredClass.getComponentType(); + writeClass(out, componentType); + } + + int length = Array.getLength(instanceObj); + out.writeInt(length); + for (int i = 0; i < length; i++) { + Object item = Array.get(instanceObj, i); + writeObject(out, item, + item.getClass(), conf); + } + } + } else if (List.class.isAssignableFrom(declClass)) { + List list = (List)instanceObj; + int length = list.size(); + out.writeInt(length); + for (int i = 0; i < length; i++) { + Object elem = list.get(i); + writeObject(out, elem, + elem == null ? Writable.class : elem.getClass(), conf); + } + } else if (declClass == String.class) { // String + Text.writeString(out, (String)instanceObj); + } else if (declClass.isPrimitive()) { // primitive type + if (declClass == Boolean.TYPE) { // boolean + out.writeBoolean(((Boolean)instanceObj).booleanValue()); + } else if (declClass == Character.TYPE) { // char + out.writeChar(((Character)instanceObj).charValue()); + } else if (declClass == Byte.TYPE) { // byte + out.writeByte(((Byte)instanceObj).byteValue()); + } else if (declClass == Short.TYPE) { // short + out.writeShort(((Short)instanceObj).shortValue()); + } else if (declClass == Integer.TYPE) { // int + out.writeInt(((Integer)instanceObj).intValue()); + } else if (declClass == Long.TYPE) { // long + out.writeLong(((Long)instanceObj).longValue()); + } else if (declClass == Float.TYPE) { // float + out.writeFloat(((Float)instanceObj).floatValue()); + } else if (declClass == Double.TYPE) { // double + out.writeDouble(((Double)instanceObj).doubleValue()); + } else if (declClass == Void.TYPE) { // void + } else { + throw new IllegalArgumentException("Not a primitive: "+declClass); + } + } else if (declClass.isEnum()) { // enum + Text.writeString(out, ((Enum)instanceObj).name()); + } else if (Message.class.isAssignableFrom(declaredClass)) { + Text.writeString(out, instanceObj.getClass().getName()); + ((Message)instance).writeDelimitedTo( + DataOutputOutputStream.constructOutputStream(out)); + } else if (Writable.class.isAssignableFrom(declClass)) { // Writable + Class c = instanceObj.getClass(); + Integer code = CLASS_TO_CODE.get(c); + if (code == null) { + out.writeByte(NOT_ENCODED); + Text.writeString(out, c.getName()); + } else { + writeClassCode(out, c); + } + ((Writable)instanceObj).write(out); + } else if (Serializable.class.isAssignableFrom(declClass)) { + Class c = instanceObj.getClass(); + Integer code = CLASS_TO_CODE.get(c); + if (code == null) { + out.writeByte(NOT_ENCODED); + Text.writeString(out, c.getName()); + } else { + writeClassCode(out, c); + } + ByteArrayOutputStream bos = null; + ObjectOutputStream oos = null; + try{ + bos = new ByteArrayOutputStream(); + oos = new ObjectOutputStream(bos); + oos.writeObject(instanceObj); + byte[] value = bos.toByteArray(); + out.writeInt(value.length); + out.write(value); + } finally { + if(bos!=null) bos.close(); + if(oos!=null) oos.close(); + } + } else if (Scan.class.isAssignableFrom(declClass)) { + Scan scan = (Scan)instanceObj; + byte [] scanBytes = ProtobufUtil.toScan(scan).toByteArray(); + out.writeInt(scanBytes.length); + out.write(scanBytes); + } else { + throw new IOException("Can't write: "+instanceObj+" as "+declClass); + } + } + + /** Writes the encoded class code as defined in CLASS_TO_CODE, or + * the whole class name if not defined in the mapping. + */ + static void writeClass(DataOutput out, Class c) throws IOException { + Integer code = CLASS_TO_CODE.get(c); + if (code == null) { + WritableUtils.writeVInt(out, NOT_ENCODED); + Text.writeString(out, c.getName()); + } else { + WritableUtils.writeVInt(out, code); + } + } + + /** Reads and returns the class as written by {@link #writeClass(DataOutput, Class)} */ + static Class readClass(Configuration conf, DataInput in) throws IOException { + Class instanceClass = null; + int b = (byte)WritableUtils.readVInt(in); + if (b == NOT_ENCODED) { + String className = Text.readString(in); + try { + instanceClass = getClassByName(conf, className); + } catch (ClassNotFoundException e) { + LOG.error("Can't find class " + className, e); + throw new IOException("Can't find class " + className, e); + } + } else { + instanceClass = CODE_TO_CLASS.get(b); + } + return instanceClass; + } + + /** + * Read a {@link Writable}, {@link String}, primitive type, or an array of + * the preceding. + * @param in + * @param conf + * @return the object + * @throws IOException + */ + public static Object readObject(DataInput in, Configuration conf) + throws IOException { + return readObject(in, null, conf); + } + + /** + * Read a {@link Writable}, {@link String}, primitive type, or an array of + * the preceding. + * @param in + * @param objectWritable + * @param conf + * @return the object + * @throws IOException + */ + @SuppressWarnings("unchecked") + public static Object readObject(DataInput in, + HbaseObjectWritable objectWritable, Configuration conf) + throws IOException { + Class declaredClass = CODE_TO_CLASS.get(WritableUtils.readVInt(in)); + Object instance; + if (declaredClass.isPrimitive()) { // primitive types + if (declaredClass == Boolean.TYPE) { // boolean + instance = Boolean.valueOf(in.readBoolean()); + } else if (declaredClass == Character.TYPE) { // char + instance = Character.valueOf(in.readChar()); + } else if (declaredClass == Byte.TYPE) { // byte + instance = Byte.valueOf(in.readByte()); + } else if (declaredClass == Short.TYPE) { // short + instance = Short.valueOf(in.readShort()); + } else if (declaredClass == Integer.TYPE) { // int + instance = Integer.valueOf(in.readInt()); + } else if (declaredClass == Long.TYPE) { // long + instance = Long.valueOf(in.readLong()); + } else if (declaredClass == Float.TYPE) { // float + instance = Float.valueOf(in.readFloat()); + } else if (declaredClass == Double.TYPE) { // double + instance = Double.valueOf(in.readDouble()); + } else if (declaredClass == Void.TYPE) { // void + instance = null; + } else { + throw new IllegalArgumentException("Not a primitive: "+declaredClass); + } + } else if (declaredClass.isArray()) { // array + if (declaredClass.equals(byte [].class)) { + instance = Bytes.readByteArray(in); + } else { + int length = in.readInt(); + instance = Array.newInstance(declaredClass.getComponentType(), length); + for (int i = 0; i < length; i++) { + Array.set(instance, i, readObject(in, conf)); + } + } + } else if (declaredClass.equals(Array.class)) { //an array not declared in CLASS_TO_CODE + Class componentType = readClass(conf, in); + int length = in.readInt(); + instance = Array.newInstance(componentType, length); + for (int i = 0; i < length; i++) { + Array.set(instance, i, readObject(in, conf)); + } + } else if (List.class.isAssignableFrom(declaredClass)) { // List + int length = in.readInt(); + instance = new ArrayList(length); + for (int i = 0; i < length; i++) { + ((ArrayList)instance).add(readObject(in, conf)); + } + } else if (declaredClass == String.class) { // String + instance = Text.readString(in); + } else if (declaredClass.isEnum()) { // enum + instance = Enum.valueOf((Class) declaredClass, + Text.readString(in)); + } else if (declaredClass == Message.class) { + String className = Text.readString(in); + try { + declaredClass = getClassByName(conf, className); + instance = tryInstantiateProtobuf(declaredClass, in); + } catch (ClassNotFoundException e) { + LOG.error("Can't find class " + className, e); + throw new IOException("Can't find class " + className, e); + } + } else if (Scan.class.isAssignableFrom(declaredClass)) { + int length = in.readInt(); + byte [] scanBytes = new byte[length]; + in.readFully(scanBytes); + ClientProtos.Scan.Builder scanProto = ClientProtos.Scan.newBuilder(); + instance = ProtobufUtil.toScan(scanProto.mergeFrom(scanBytes).build()); + } else { // Writable or Serializable + Class instanceClass = null; + int b = (byte)WritableUtils.readVInt(in); + if (b == NOT_ENCODED) { + String className = Text.readString(in); + try { + instanceClass = getClassByName(conf, className); + } catch (ClassNotFoundException e) { + LOG.error("Can't find class " + className, e); + throw new IOException("Can't find class " + className, e); + } + } else { + instanceClass = CODE_TO_CLASS.get(b); + } + if(Writable.class.isAssignableFrom(instanceClass)){ + Writable writable = WritableFactories.newInstance(instanceClass, conf); + try { + writable.readFields(in); + } catch (Exception e) { + LOG.error("Error in readFields", e); + throw new IOException("Error in readFields" , e); + } + instance = writable; + if (instanceClass == NullInstance.class) { // null + declaredClass = ((NullInstance)instance).declaredClass; + instance = null; + } + } else { + int length = in.readInt(); + byte[] objectBytes = new byte[length]; + in.readFully(objectBytes); + ByteArrayInputStream bis = null; + ObjectInputStream ois = null; + try { + bis = new ByteArrayInputStream(objectBytes); + ois = new ObjectInputStream(bis); + instance = ois.readObject(); + } catch (ClassNotFoundException e) { + LOG.error("Class not found when attempting to deserialize object", e); + throw new IOException("Class not found when attempting to " + + "deserialize object", e); + } finally { + if(bis!=null) bis.close(); + if(ois!=null) ois.close(); + } + } + } + if (objectWritable != null) { // store values + objectWritable.declaredClass = declaredClass; + objectWritable.instance = instance; + } + return instance; + } + + /** + * Try to instantiate a protocol buffer of the given message class + * from the given input stream. + * + * @param protoClass the class of the generated protocol buffer + * @param dataIn the input stream to read from + * @return the instantiated Message instance + * @throws IOException if an IO problem occurs + */ + public static Message tryInstantiateProtobuf( + Class protoClass, + DataInput dataIn) throws IOException { + + try { + if (dataIn instanceof InputStream) { + // We can use the built-in parseDelimitedFrom and not have to re-copy + // the data + Method parseMethod = getStaticProtobufMethod(protoClass, + "parseDelimitedFrom", InputStream.class); + return (Message)parseMethod.invoke(null, (InputStream)dataIn); + } else { + // Have to read it into a buffer first, since protobuf doesn't deal + // with the DataInput interface directly. + + // Read the size delimiter that writeDelimitedTo writes + int size = ProtoUtil.readRawVarint32(dataIn); + if (size < 0) { + throw new IOException("Invalid size: " + size); + } + + byte[] data = new byte[size]; + dataIn.readFully(data); + Method parseMethod = getStaticProtobufMethod(protoClass, + "parseFrom", byte[].class); + return (Message)parseMethod.invoke(null, data); + } + } catch (InvocationTargetException e) { + + if (e.getCause() instanceof IOException) { + throw (IOException)e.getCause(); + } else { + throw new IOException(e.getCause()); + } + } catch (IllegalAccessException iae) { + throw new AssertionError("Could not access parse method in " + + protoClass); + } + } + + static Method getStaticProtobufMethod(Class declaredClass, String method, + Class ... args) { + + try { + return declaredClass.getMethod(method, args); + } catch (Exception e) { + // This is a bug in Hadoop - protobufs should all have this static method + throw new AssertionError("Protocol buffer class " + declaredClass + + " does not have an accessible parseFrom(InputStream) method!"); + } + } + + @SuppressWarnings("unchecked") + private static Class getClassByName(Configuration conf, String className) + throws ClassNotFoundException { + if(conf != null) { + return conf.getClassByName(className); + } + ClassLoader cl = Thread.currentThread().getContextClassLoader(); + if(cl == null) { + cl = HbaseObjectWritable.class.getClassLoader(); + } + return Class.forName(className, true, cl); + } + + private static void addToMap(final Class clazz, final int code) { + CLASS_TO_CODE.put(clazz, code); + CODE_TO_CLASS.put(code, clazz); + } + + public void setConf(Configuration conf) { + this.conf = conf; + } + + public Configuration getConf() { + return this.conf; + } +} diff --git hbase-client/src/main/java/org/apache/hadoop/hbase/io/WritableWithSize.java hbase-client/src/main/java/org/apache/hadoop/hbase/io/WritableWithSize.java new file mode 100644 index 0000000..1da99c7 --- /dev/null +++ hbase-client/src/main/java/org/apache/hadoop/hbase/io/WritableWithSize.java @@ -0,0 +1,38 @@ +/* + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.io; + +import org.apache.hadoop.classification.InterfaceAudience; + +/** + * An optional interface to 'size' writables. + */ +@InterfaceAudience.Private +public interface WritableWithSize { + /** + * Provide a size hint to the caller. write() should ideally + * not go beyond this if at all possible. + * + * You can return 0 if there is no size hint. + * + * @return the size of the writable + */ + public long getWritableSize(); +} diff --git hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/BlockingRpcCallback.java hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/BlockingRpcCallback.java new file mode 100644 index 0000000..322e676 --- /dev/null +++ hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/BlockingRpcCallback.java @@ -0,0 +1,73 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.ipc; + +import com.google.protobuf.RpcCallback; +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; + +import java.io.IOException; +import java.io.InterruptedIOException; + +/** + * Simple {@link RpcCallback} implementation providing a + * {@link java.util.concurrent.Future}-like {@link BlockingRpcCallback#get()} method, which + * will block util the instance's {@link BlockingRpcCallback#run(Object)} method has been called. + * {@code R} is the RPC response type that will be passed to the {@link #run(Object)} method. + */ +@InterfaceAudience.Public +@InterfaceStability.Evolving +public class BlockingRpcCallback implements RpcCallback { + private R result; + private boolean resultSet = false; + + /** + * Called on completion of the RPC call with the response object, or {@code null} in the case of + * an error. + * @param parameter the response object or {@code null} if an error occurred + */ + @Override + public void run(R parameter) { + synchronized (this) { + result = parameter; + resultSet = true; + this.notify(); + } + } + + /** + * Returns the parameter passed to {@link #run(Object)} or {@code null} if a null value was + * passed. When used asynchronously, this method will block until the {@link #run(Object)} + * method has been called. + * @return the response object or {@code null} if no response was passed + */ + public synchronized R get() throws IOException { + while (!resultSet) { + try { + this.wait(); + } catch (InterruptedException ie) { + Thread.currentThread().interrupt(); + InterruptedIOException exception = new InterruptedIOException(ie.getMessage()); + exception.initCause(ie); + throw exception; + } + } + return result; + } +} diff --git hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/ClientCache.java hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/ClientCache.java new file mode 100644 index 0000000..b711243 --- /dev/null +++ hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/ClientCache.java @@ -0,0 +1,102 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.ipc; + +import java.lang.reflect.Constructor; +import java.lang.reflect.InvocationTargetException; +import java.util.HashMap; +import java.util.Map; + +import javax.net.SocketFactory; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.HConstants; + +/** + * Cache a client using its socket factory as the hash key. + * Enables reuse/sharing of clients on a per SocketFactory basis. A client + * establishes certain configuration dependent characteristics like timeouts, + * tcp-keepalive (true or false), etc. For more details on the characteristics, + * look at {@link HBaseClient#HBaseClient(Configuration, SocketFactory)} + * Creation of dynamic proxies to protocols creates the clients (and increments + * reference count once created), and stopping of the proxies leads to clearing + * out references and when the reference drops to zero, the cache mapping is + * cleared. + */ +class ClientCache { + private Map clients = + new HashMap(); + + protected ClientCache() {} + + /** + * Construct & cache an IPC client with the user-provided SocketFactory + * if no cached client exists. + * + * @param conf Configuration + * @param factory socket factory + * @return an IPC client + */ + @SuppressWarnings("unchecked") + protected synchronized HBaseClient getClient(Configuration conf, SocketFactory factory) { + + HBaseClient client = clients.get(factory); + if (client == null) { + Class hbaseClientClass = (Class) conf + .getClass(HConstants.HBASECLIENT_IMPL, HBaseClient.class); + + // Make an hbase client instead of hadoop Client. + try { + Constructor cst = hbaseClientClass.getConstructor( + Configuration.class, SocketFactory.class); + client = cst.newInstance(conf, factory); + } catch (InvocationTargetException e) { + throw new RuntimeException(e); + } catch (InstantiationException e) { + throw new RuntimeException(e); + } catch (IllegalAccessException e) { + throw new RuntimeException(e); + } catch (NoSuchMethodException e) { + throw new RuntimeException("No matching constructor in "+hbaseClientClass.getName(), e); + } + + clients.put(factory, client); + } else { + client.incCount(); + } + return client; + } + + /** + * Stop a RPC client connection + * A RPC client is closed only when its reference count becomes zero. + * @param client client to stop + */ + protected void stopClient(HBaseClient client) { + synchronized (this) { + client.decCount(); + if (client.isZeroReference()) { + clients.remove(client.getSocketFactory()); + } + } + if (client.isZeroReference()) { + client.stop(); + } + } +} diff --git hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/CoprocessorRpcChannel.java hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/CoprocessorRpcChannel.java new file mode 100644 index 0000000..d3d92e2 --- /dev/null +++ hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/CoprocessorRpcChannel.java @@ -0,0 +1,68 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.ipc; + +import com.google.protobuf.*; +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.hbase.protobuf.ResponseConverter; + +import java.io.IOException; + +/** + * Base class which provides clients with an RPC connection to + * call coprocessor endpoint {@link Service}s + */ +@InterfaceAudience.Private +public abstract class CoprocessorRpcChannel implements RpcChannel, BlockingRpcChannel { + private static Log LOG = LogFactory.getLog(CoprocessorRpcChannel.class); + + @Override + public void callMethod(Descriptors.MethodDescriptor method, + RpcController controller, + Message request, Message responsePrototype, + RpcCallback callback) { + Message response = null; + try { + response = callExecService(method, request, responsePrototype); + } catch (IOException ioe) { + LOG.warn("Call failed on IOException", ioe); + ResponseConverter.setControllerException(controller, ioe); + } + if (callback != null) { + callback.run(response); + } + } + + @Override + public Message callBlockingMethod(Descriptors.MethodDescriptor method, + RpcController controller, + Message request, Message responsePrototype) + throws ServiceException { + try { + return callExecService(method, request, responsePrototype); + } catch (IOException ioe) { + throw new ServiceException("Error calling method "+method.getFullName(), ioe); + } + } + + protected abstract Message callExecService(Descriptors.MethodDescriptor method, + Message request, Message responsePrototype) throws IOException; +} diff --git hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/HBaseClient.java hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/HBaseClient.java new file mode 100644 index 0000000..d4a61c0 --- /dev/null +++ hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/HBaseClient.java @@ -0,0 +1,1510 @@ +/** + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.ipc; + +import java.io.BufferedInputStream; +import java.io.BufferedOutputStream; +import java.io.DataInputStream; +import java.io.DataOutputStream; +import java.io.EOFException; +import java.io.FilterInputStream; +import java.io.IOException; +import java.io.InputStream; +import java.io.OutputStream; +import java.lang.reflect.Method; +import java.net.ConnectException; +import java.net.InetSocketAddress; +import java.net.Socket; +import java.net.SocketException; +import java.net.SocketTimeoutException; +import java.net.UnknownHostException; +import java.nio.ByteBuffer; +import java.security.PrivilegedExceptionAction; +import java.util.HashMap; +import java.util.Iterator; +import java.util.LinkedList; +import java.util.Map; +import java.util.Map.Entry; +import java.util.Random; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.ConcurrentSkipListMap; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.atomic.AtomicLong; + +import javax.net.SocketFactory; +import javax.security.sasl.SaslException; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.protobuf.generated.RPCProtos; +import org.apache.hadoop.hbase.protobuf.generated.RPCProtos.RpcException; +import org.apache.hadoop.hbase.protobuf.generated.RPCProtos.RpcRequestBody; +import org.apache.hadoop.hbase.protobuf.generated.RPCProtos.RpcResponseBody; +import org.apache.hadoop.hbase.protobuf.generated.RPCProtos.RpcResponseHeader; +import org.apache.hadoop.hbase.protobuf.generated.RPCProtos.RpcResponseHeader.Status; +import org.apache.hadoop.hbase.protobuf.generated.RPCProtos.ConnectionHeader; +import org.apache.hadoop.hbase.protobuf.generated.RPCProtos.RpcRequestHeader; +import org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation; +import org.apache.hadoop.hbase.protobuf.generated.Tracing.RPCTInfo; +import org.apache.hadoop.hbase.security.HBaseSaslRpcClient; +import org.apache.hadoop.hbase.security.AuthMethod; +import org.apache.hadoop.hbase.security.KerberosInfo; +import org.apache.hadoop.hbase.security.TokenInfo; +import org.apache.hadoop.hbase.security.User; +import org.apache.hadoop.hbase.security.token.AuthenticationTokenIdentifier; +import org.apache.hadoop.hbase.security.token.AuthenticationTokenSelector; +import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; +import org.apache.hadoop.hbase.util.Pair; +import org.apache.hadoop.hbase.util.PoolMap; +import org.apache.hadoop.hbase.util.PoolMap.PoolType; +import org.apache.hadoop.io.IOUtils; +import org.apache.hadoop.io.Text; +import org.apache.hadoop.ipc.RemoteException; +import org.apache.hadoop.net.NetUtils; +import org.apache.hadoop.security.SecurityUtil; +import org.apache.hadoop.security.UserGroupInformation; +import org.apache.hadoop.security.token.Token; +import org.apache.hadoop.security.token.TokenIdentifier; +import org.apache.hadoop.security.token.TokenSelector; +import org.cloudera.htrace.Span; +import org.cloudera.htrace.Trace; + +import com.google.protobuf.CodedOutputStream; +import com.google.protobuf.Message; +import com.google.protobuf.Message.Builder; + + +/** A client for an IPC service. IPC calls take a single Protobuf message as a + * parameter, and return a single Protobuf message as their value. A service runs on + * a port and is defined by a parameter class and a value class. + * + *

      This is the org.apache.hadoop.ipc.Client renamed as HBaseClient and + * moved into this package so can access package-private methods. + * + * @see HBaseServer + */ +@InterfaceAudience.Private +public class HBaseClient { + + public static final Log LOG = LogFactory + .getLog("org.apache.hadoop.ipc.HBaseClient"); + public static final byte CURRENT_VERSION = 5; + /** + * The first four bytes of Hadoop RPC connections + */ + public static final ByteBuffer HEADER = ByteBuffer.wrap("hrpc".getBytes()); + protected final PoolMap connections; + private static final Map methodInstances = + new ConcurrentHashMap(); + + protected int counter; // counter for call ids + protected final AtomicBoolean running = new AtomicBoolean(true); // if client runs + final protected Configuration conf; + final protected int maxIdleTime; // connections will be culled if it was idle for + // maxIdleTime microsecs + final protected int maxRetries; //the max. no. of retries for socket connections + final protected long failureSleep; // Time to sleep before retry on failure. + protected final boolean tcpNoDelay; // if T then disable Nagle's Algorithm + protected final boolean tcpKeepAlive; // if T then use keepalives + protected int pingInterval; // how often sends ping to the server in msecs + protected int socketTimeout; // socket timeout + protected FailedServers failedServers; + + protected final SocketFactory socketFactory; // how to create sockets + private int refCount = 1; + protected String clusterId; + + final private static String PING_INTERVAL_NAME = "ipc.ping.interval"; + final private static String SOCKET_TIMEOUT = "ipc.socket.timeout"; + final static int DEFAULT_PING_INTERVAL = 60000; // 1 min + final static int DEFAULT_SOCKET_TIMEOUT = 20000; // 20 seconds + final static int PING_CALL_ID = -1; + + public final static String FAILED_SERVER_EXPIRY_KEY = "hbase.ipc.client.failed.servers.expiry"; + public final static int FAILED_SERVER_EXPIRY_DEFAULT = 2000; + + /** + * A class to manage a list of servers that failed recently. + */ + static class FailedServers { + private final LinkedList> failedServers = new + LinkedList>(); + private final int recheckServersTimeout; + + FailedServers(Configuration conf) { + this.recheckServersTimeout = conf.getInt( + FAILED_SERVER_EXPIRY_KEY, FAILED_SERVER_EXPIRY_DEFAULT); + } + + /** + * Add an address to the list of the failed servers list. + */ + public synchronized void addToFailedServers(InetSocketAddress address) { + final long expiry = EnvironmentEdgeManager.currentTimeMillis() + recheckServersTimeout; + failedServers.addFirst(new Pair(expiry, address.toString())); + } + + /** + * Check if the server should be considered as bad. Clean the old entries of the list. + * + * @return true if the server is in the failed servers list + */ + public synchronized boolean isFailedServer(final InetSocketAddress address) { + if (failedServers.isEmpty()) { + return false; + } + + final String lookup = address.toString(); + final long now = EnvironmentEdgeManager.currentTimeMillis(); + + // iterate, looking for the search entry and cleaning expired entries + Iterator> it = failedServers.iterator(); + while (it.hasNext()) { + Pair cur = it.next(); + if (cur.getFirst() < now) { + it.remove(); + } else { + if (lookup.equals(cur.getSecond())) { + return true; + } + } + } + + return false; + } + + } + + public static class FailedServerException extends IOException { + public FailedServerException(String s) { + super(s); + } + } + + + /** + * set the ping interval value in configuration + * + * @param conf Configuration + * @param pingInterval the ping interval + */ + public static void setPingInterval(Configuration conf, int pingInterval) { + conf.setInt(PING_INTERVAL_NAME, pingInterval); + } + + /** + * Get the ping interval from configuration; + * If not set in the configuration, return the default value. + * + * @param conf Configuration + * @return the ping interval + */ + static int getPingInterval(Configuration conf) { + return conf.getInt(PING_INTERVAL_NAME, DEFAULT_PING_INTERVAL); + } + + /** + * Set the socket timeout + * @param conf Configuration + * @param socketTimeout the socket timeout + */ + public static void setSocketTimeout(Configuration conf, int socketTimeout) { + conf.setInt(SOCKET_TIMEOUT, socketTimeout); + } + + /** + * @return the socket timeout + */ + static int getSocketTimeout(Configuration conf) { + return conf.getInt(SOCKET_TIMEOUT, DEFAULT_SOCKET_TIMEOUT); + } + + /** + * Increment this client's reference count + * + */ + synchronized void incCount() { + refCount++; + } + + /** + * Decrement this client's reference count + * + */ + synchronized void decCount() { + refCount--; + } + + /** + * Return if this client has no reference + * + * @return true if this client has no reference; false otherwise + */ + synchronized boolean isZeroReference() { + return refCount==0; + } + + /** A call waiting for a value. */ + protected class Call { + final int id; // call id + final RpcRequestBody param; // rpc request object + Message value; // value, null if error + IOException error; // exception, null if value + boolean done; // true when call is done + long startTime; + + protected Call(RpcRequestBody param) { + this.param = param; + this.startTime = System.currentTimeMillis(); + synchronized (HBaseClient.this) { + this.id = counter++; + } + } + + /** Indicate when the call is complete and the + * value or error are available. Notifies by default. */ + protected synchronized void callComplete() { + this.done = true; + notify(); // notify caller + } + + /** Set the exception when there is an error. + * Notify the caller the call is done. + * + * @param error exception thrown by the call; either local or remote + */ + public synchronized void setException(IOException error) { + this.error = error; + callComplete(); + } + + /** Set the return value when there is no error. + * Notify the caller the call is done. + * + * @param value return value of the call. + */ + public synchronized void setValue(Message value) { + this.value = value; + callComplete(); + } + + public long getStartTime() { + return this.startTime; + } + } + protected static Map> tokenHandlers = + new HashMap>(); + static { + tokenHandlers.put(AuthenticationTokenIdentifier.AUTH_TOKEN_TYPE.toString(), + new AuthenticationTokenSelector()); + } + + /** + * Creates a connection. Can be overridden by a subclass for testing. + * @param remoteId - the ConnectionId to use for the connection creation. + */ + protected Connection createConnection(ConnectionId remoteId) throws IOException { + return new Connection(remoteId); + } + + /** Thread that reads responses and notifies callers. Each connection owns a + * socket connected to a remote address. Calls are multiplexed through this + * socket: responses may be delivered out of order. */ + protected class Connection extends Thread { + private ConnectionHeader header; // connection header + protected ConnectionId remoteId; + protected Socket socket = null; // connected socket + protected DataInputStream in; + protected DataOutputStream out; + private InetSocketAddress server; // server ip:port + private String serverPrincipal; // server's krb5 principal name + private AuthMethod authMethod; // authentication method + private boolean useSasl; + private Token token; + private HBaseSaslRpcClient saslRpcClient; + private int reloginMaxBackoff; // max pause before relogin on sasl failure + + // currently active calls + protected final ConcurrentSkipListMap calls = new ConcurrentSkipListMap(); + protected final AtomicLong lastActivity = new AtomicLong();// last I/O activity time + protected final AtomicBoolean shouldCloseConnection = new AtomicBoolean(); // indicate if the connection is closed + protected IOException closeException; // close reason + + Connection(ConnectionId remoteId) throws IOException { + if (remoteId.getAddress().isUnresolved()) { + throw new UnknownHostException("unknown host: " + + remoteId.getAddress().getHostName()); + } + this.server = remoteId.getAddress(); + + UserGroupInformation ticket = remoteId.getTicket().getUGI(); + Class protocol = remoteId.getProtocol(); + this.useSasl = User.isHBaseSecurityEnabled(conf); + if (useSasl && protocol != null) { + TokenInfo tokenInfo = protocol.getAnnotation(TokenInfo.class); + if (tokenInfo != null) { + TokenSelector tokenSelector = + tokenHandlers.get(tokenInfo.value()); + if (tokenSelector != null) { + token = tokenSelector.selectToken(new Text(clusterId), + ticket.getTokens()); + } else if (LOG.isDebugEnabled()) { + LOG.debug("No token selector found for type "+tokenInfo.value()); + } + } + KerberosInfo krbInfo = protocol.getAnnotation(KerberosInfo.class); + if (krbInfo != null) { + String serverKey = krbInfo.serverPrincipal(); + if (serverKey == null) { + throw new IOException( + "Can't obtain server Kerberos config key from KerberosInfo"); + } + serverPrincipal = SecurityUtil.getServerPrincipal( + conf.get(serverKey), server.getAddress().getCanonicalHostName().toLowerCase()); + if (LOG.isDebugEnabled()) { + LOG.debug("RPC Server Kerberos principal name for protocol=" + + protocol.getCanonicalName() + " is " + serverPrincipal); + } + } + } + + if (!useSasl) { + authMethod = AuthMethod.SIMPLE; + } else if (token != null) { + authMethod = AuthMethod.DIGEST; + } else { + authMethod = AuthMethod.KERBEROS; + } + + if (LOG.isDebugEnabled()) + LOG.debug("Use " + authMethod + " authentication for protocol " + + protocol.getSimpleName()); + + reloginMaxBackoff = conf.getInt("hbase.security.relogin.maxbackoff", 5000); + this.remoteId = remoteId; + + ConnectionHeader.Builder builder = ConnectionHeader.newBuilder(); + builder.setProtocol(protocol == null ? "" : protocol.getName()); + UserInformation userInfoPB; + if ((userInfoPB = getUserInfoPB(ticket)) != null) { + builder.setUserInfo(userInfoPB); + } + this.header = builder.build(); + + this.setName("IPC Client (" + socketFactory.hashCode() +") connection to " + + remoteId.getAddress().toString() + + ((ticket==null)?" from an unknown user": (" from " + + ticket.getUserName()))); + this.setDaemon(true); + } + + private UserInformation getUserInfoPB(UserGroupInformation ugi) { + if (ugi == null || authMethod == AuthMethod.DIGEST) { + // Don't send user for token auth + return null; + } + UserInformation.Builder userInfoPB = UserInformation.newBuilder(); + if (ugi != null) { + if (authMethod == AuthMethod.KERBEROS) { + // Send effective user for Kerberos auth + userInfoPB.setEffectiveUser(ugi.getUserName()); + } else if (authMethod == AuthMethod.SIMPLE) { + //Send both effective user and real user for simple auth + userInfoPB.setEffectiveUser(ugi.getUserName()); + if (ugi.getRealUser() != null) { + userInfoPB.setRealUser(ugi.getRealUser().getUserName()); + } + } + } + return userInfoPB.build(); + } + + /** Update lastActivity with the current time. */ + protected void touch() { + lastActivity.set(System.currentTimeMillis()); + } + + /** + * Add a call to this connection's call queue and notify + * a listener; synchronized. If the connection is dead, the call is not added, and the + * caller is notified. + * This function can return a connection that is already marked as 'shouldCloseConnection' + * It is up to the user code to check this status. + * @param call to add + */ + protected synchronized void addCall(Call call) { + // If the connection is about to close, we manage this as if the call was already added + // to the connection calls list. If not, the connection creations are serialized, as + // mentioned in HBASE-6364 + if (this.shouldCloseConnection.get()) { + if (this.closeException == null) { + call.setException(new IOException( + "Call " + call.id + " not added as the connection " + remoteId + " is closing")); + } else { + call.setException(this.closeException); + } + synchronized (call) { + call.notifyAll(); + } + } else { + calls.put(call.id, call); + notify(); + } + } + + /** This class sends a ping to the remote side when timeout on + * reading. If no failure is detected, it retries until at least + * a byte is read. + */ + protected class PingInputStream extends FilterInputStream { + /* constructor */ + protected PingInputStream(InputStream in) { + super(in); + } + + /* Process timeout exception + * if the connection is not going to be closed, send a ping. + * otherwise, throw the timeout exception. + */ + private void handleTimeout(SocketTimeoutException e) throws IOException { + if (shouldCloseConnection.get() || !running.get() || + remoteId.rpcTimeout > 0) { + throw e; + } + sendPing(); + } + + /** Read a byte from the stream. + * Send a ping if timeout on read. Retries if no failure is detected + * until a byte is read. + * @throws IOException for any IO problem other than socket timeout + */ + @Override + public int read() throws IOException { + do { + try { + return super.read(); + } catch (SocketTimeoutException e) { + handleTimeout(e); + } + } while (true); + } + + /** Read bytes into a buffer starting from offset off + * Send a ping if timeout on read. Retries if no failure is detected + * until a byte is read. + * + * @return the total number of bytes read; -1 if the connection is closed. + */ + @Override + public int read(byte[] buf, int off, int len) throws IOException { + do { + try { + return super.read(buf, off, len); + } catch (SocketTimeoutException e) { + handleTimeout(e); + } + } while (true); + } + } + + protected synchronized void setupConnection() throws IOException { + short ioFailures = 0; + short timeoutFailures = 0; + while (true) { + try { + this.socket = socketFactory.createSocket(); + this.socket.setTcpNoDelay(tcpNoDelay); + this.socket.setKeepAlive(tcpKeepAlive); + // connection time out is 20s + NetUtils.connect(this.socket, remoteId.getAddress(), + getSocketTimeout(conf)); + if (remoteId.rpcTimeout > 0) { + pingInterval = remoteId.rpcTimeout; // overwrite pingInterval + } + this.socket.setSoTimeout(pingInterval); + return; + } catch (SocketTimeoutException toe) { + /* The max number of retries is 45, + * which amounts to 20s*45 = 15 minutes retries. + */ + handleConnectionFailure(timeoutFailures++, maxRetries, toe); + } catch (IOException ie) { + handleConnectionFailure(ioFailures++, maxRetries, ie); + } + } + } + + protected void closeConnection() { + // close the current connection + if (socket != null) { + try { + socket.close(); + } catch (IOException e) { + LOG.warn("Not able to close a socket", e); + } + } + // set socket to null so that the next call to setupIOstreams + // can start the process of connect all over again. + socket = null; + } + + /** + * Handle connection failures + * + * If the current number of retries is equal to the max number of retries, + * stop retrying and throw the exception; Otherwise backoff N seconds and + * try connecting again. + * + * This Method is only called from inside setupIOstreams(), which is + * synchronized. Hence the sleep is synchronized; the locks will be retained. + * + * @param curRetries current number of retries + * @param maxRetries max number of retries allowed + * @param ioe failure reason + * @throws IOException if max number of retries is reached + */ + private void handleConnectionFailure( + int curRetries, int maxRetries, IOException ioe) throws IOException { + + closeConnection(); + + // throw the exception if the maximum number of retries is reached + if (curRetries >= maxRetries) { + throw ioe; + } + + // otherwise back off and retry + try { + Thread.sleep(failureSleep); + } catch (InterruptedException ignored) {} + + LOG.info("Retrying connect to server: " + remoteId.getAddress() + + " after sleeping " + failureSleep + "ms. Already tried " + curRetries + + " time(s)."); + } + + /* wait till someone signals us to start reading RPC response or + * it is idle too long, it is marked as to be closed, + * or the client is marked as not running. + * + * Return true if it is time to read a response; false otherwise. + */ + protected synchronized boolean waitForWork() { + if (calls.isEmpty() && !shouldCloseConnection.get() && running.get()) { + long timeout = maxIdleTime- + (System.currentTimeMillis()-lastActivity.get()); + if (timeout>0) { + try { + wait(timeout); + } catch (InterruptedException ignored) {} + } + } + + if (!calls.isEmpty() && !shouldCloseConnection.get() && running.get()) { + return true; + } else if (shouldCloseConnection.get()) { + return false; + } else if (calls.isEmpty()) { // idle connection closed or stopped + markClosed(null); + return false; + } else { // get stopped but there are still pending requests + markClosed((IOException)new IOException().initCause( + new InterruptedException())); + return false; + } + } + + public InetSocketAddress getRemoteAddress() { + return remoteId.getAddress(); + } + + /* Send a ping to the server if the time elapsed + * since last I/O activity is equal to or greater than the ping interval + */ + protected synchronized void sendPing() throws IOException { + long curTime = System.currentTimeMillis(); + if ( curTime - lastActivity.get() >= pingInterval) { + lastActivity.set(curTime); + //noinspection SynchronizeOnNonFinalField + synchronized (this.out) { + out.writeInt(PING_CALL_ID); + out.flush(); + } + } + } + + @Override + public void run() { + if (LOG.isDebugEnabled()) + LOG.debug(getName() + ": starting, having connections " + + connections.size()); + + try { + while (waitForWork()) {//wait here for work - read or close connection + receiveResponse(); + } + } catch (Throwable t) { + LOG.warn("Unexpected exception receiving call responses", t); + markClosed(new IOException("Unexpected exception receiving call responses", t)); + } + + close(); + + if (LOG.isDebugEnabled()) + LOG.debug(getName() + ": stopped, remaining connections " + + connections.size()); + } + + private synchronized void disposeSasl() { + if (saslRpcClient != null) { + try { + saslRpcClient.dispose(); + saslRpcClient = null; + } catch (IOException ioe) { + LOG.error("Error disposing of SASL client", ioe); + } + } + } + + private synchronized boolean shouldAuthenticateOverKrb() throws IOException { + UserGroupInformation loginUser = UserGroupInformation.getLoginUser(); + UserGroupInformation currentUser = + UserGroupInformation.getCurrentUser(); + UserGroupInformation realUser = currentUser.getRealUser(); + return authMethod == AuthMethod.KERBEROS && + loginUser != null && + //Make sure user logged in using Kerberos either keytab or TGT + loginUser.hasKerberosCredentials() && + // relogin only in case it is the login user (e.g. JT) + // or superuser (like oozie). + (loginUser.equals(currentUser) || loginUser.equals(realUser)); + } + + private synchronized boolean setupSaslConnection(final InputStream in2, + final OutputStream out2) throws IOException { + saslRpcClient = new HBaseSaslRpcClient(authMethod, token, serverPrincipal); + return saslRpcClient.saslConnect(in2, out2); + } + + /** + * If multiple clients with the same principal try to connect + * to the same server at the same time, the server assumes a + * replay attack is in progress. This is a feature of kerberos. + * In order to work around this, what is done is that the client + * backs off randomly and tries to initiate the connection + * again. + * The other problem is to do with ticket expiry. To handle that, + * a relogin is attempted. + *

      + * The retry logic is governed by the {@link #shouldAuthenticateOverKrb} + * method. In case when the user doesn't have valid credentials, we don't + * need to retry (from cache or ticket). In such cases, it is prudent to + * throw a runtime exception when we receive a SaslException from the + * underlying authentication implementation, so there is no retry from + * other high level (for eg, HCM or HBaseAdmin). + *

      + */ + private synchronized void handleSaslConnectionFailure( + final int currRetries, + final int maxRetries, final Exception ex, final Random rand, + final UserGroupInformation user) + throws IOException, InterruptedException{ + user.doAs(new PrivilegedExceptionAction() { + public Object run() throws IOException, InterruptedException { + closeConnection(); + if (shouldAuthenticateOverKrb()) { + if (currRetries < maxRetries) { + LOG.debug("Exception encountered while connecting to " + + "the server : " + ex); + //try re-login + if (UserGroupInformation.isLoginKeytabBased()) { + UserGroupInformation.getLoginUser().reloginFromKeytab(); + } else { + UserGroupInformation.getLoginUser().reloginFromTicketCache(); + } + disposeSasl(); + //have granularity of milliseconds + //we are sleeping with the Connection lock held but since this + //connection instance is being used for connecting to the server + //in question, it is okay + Thread.sleep((rand.nextInt(reloginMaxBackoff) + 1)); + return null; + } else { + String msg = "Couldn't setup connection for " + + UserGroupInformation.getLoginUser().getUserName() + + " to " + serverPrincipal; + LOG.warn(msg); + throw (IOException) new IOException(msg).initCause(ex); + } + } else { + LOG.warn("Exception encountered while connecting to " + + "the server : " + ex); + } + if (ex instanceof RemoteException) { + throw (RemoteException)ex; + } + if (ex instanceof SaslException) { + String msg = "SASL authentication failed." + + " The most likely cause is missing or invalid credentials." + + " Consider 'kinit'."; + LOG.fatal(msg, ex); + throw new RuntimeException(msg, ex); + } + throw new IOException(ex); + } + }); + } + + protected synchronized void setupIOstreams() + throws IOException, InterruptedException { + if (socket != null || shouldCloseConnection.get()) { + return; + } + + if (failedServers.isFailedServer(remoteId.getAddress())) { + if (LOG.isDebugEnabled()) { + LOG.debug("Not trying to connect to " + server + + " this server is in the failed servers list"); + } + IOException e = new FailedServerException( + "This server is in the failed servers list: " + server); + markClosed(e); + close(); + throw e; + } + + try { + if (LOG.isDebugEnabled()) { + LOG.debug("Connecting to "+server); + } + short numRetries = 0; + final short MAX_RETRIES = 5; + Random rand = null; + while (true) { + setupConnection(); + InputStream inStream = NetUtils.getInputStream(socket); + OutputStream outStream = NetUtils.getOutputStream(socket); + writeRpcHeader(outStream); + if (useSasl) { + final InputStream in2 = inStream; + final OutputStream out2 = outStream; + UserGroupInformation ticket = remoteId.getTicket().getUGI(); + if (authMethod == AuthMethod.KERBEROS) { + if (ticket != null && ticket.getRealUser() != null) { + ticket = ticket.getRealUser(); + } + } + boolean continueSasl = false; + try { + continueSasl = + ticket.doAs(new PrivilegedExceptionAction() { + @Override + public Boolean run() throws IOException { + return setupSaslConnection(in2, out2); + } + }); + } catch (Exception ex) { + if (rand == null) { + rand = new Random(); + } + handleSaslConnectionFailure(numRetries++, MAX_RETRIES, ex, rand, + ticket); + continue; + } + if (continueSasl) { + // Sasl connect is successful. Let's set up Sasl i/o streams. + inStream = saslRpcClient.getInputStream(inStream); + outStream = saslRpcClient.getOutputStream(outStream); + } else { + // fall back to simple auth because server told us so. + authMethod = AuthMethod.SIMPLE; + useSasl = false; + } + } + this.in = new DataInputStream(new BufferedInputStream + (new PingInputStream(inStream))); + this.out = new DataOutputStream + (new BufferedOutputStream(outStream)); + writeHeader(); + + // update last activity time + touch(); + + // start the receiver thread after the socket connection has been set up + start(); + return; + } + } catch (IOException e) { + failedServers.addToFailedServers(remoteId.address); + markClosed(e); + close(); + + throw e; + } + } + + /* Write the RPC header */ + private void writeRpcHeader(OutputStream outStream) throws IOException { + DataOutputStream out = new DataOutputStream(new BufferedOutputStream(outStream)); + // Write out the header, version and authentication method + out.write(HEADER.array()); + out.write(CURRENT_VERSION); + authMethod.write(out); + out.flush(); + } + + /** + * Write the protocol header for each connection + * Out is not synchronized because only the first thread does this. + */ + private void writeHeader() throws IOException { + // Write out the ConnectionHeader + out.writeInt(header.getSerializedSize()); + header.writeTo(out); + } + + /** Close the connection. */ + protected synchronized void close() { + if (!shouldCloseConnection.get()) { + LOG.error("The connection is not in the closed state"); + return; + } + + // release the resources + // first thing to do;take the connection out of the connection list + synchronized (connections) { + if (connections.get(remoteId) == this) { + connections.remove(remoteId); + } + } + + // close the streams and therefore the socket + IOUtils.closeStream(out); + IOUtils.closeStream(in); + disposeSasl(); + + // clean up all calls + if (closeException == null) { + if (!calls.isEmpty()) { + LOG.warn( + "A connection is closed for no cause and calls are not empty. " + + "#Calls: " + calls.size()); + + // clean up calls anyway + closeException = new IOException("Unexpected closed connection"); + cleanupCalls(); + } + } else { + // log the info + if (LOG.isDebugEnabled()) { + LOG.debug("closing ipc connection to " + server + ": " + + closeException.getMessage(),closeException); + } + + // cleanup calls + cleanupCalls(); + } + if (LOG.isDebugEnabled()) + LOG.debug(getName() + ": closed"); + } + + /* Initiates a call by sending the parameter to the remote server. + * Note: this is not called from the Connection thread, but by other + * threads. + */ + protected void sendParam(Call call) { + if (shouldCloseConnection.get()) { + return; + } + try { + if (LOG.isDebugEnabled()) + LOG.debug(getName() + " sending #" + call.id); + + RpcRequestHeader.Builder headerBuilder = RPCProtos.RpcRequestHeader.newBuilder(); + headerBuilder.setCallId(call.id); + + if (Trace.isTracing()) { + Span s = Trace.currentTrace(); + headerBuilder.setTinfo(RPCTInfo.newBuilder() + .setParentId(s.getSpanId()) + .setTraceId(s.getTraceId())); + } + + //noinspection SynchronizeOnNonFinalField + synchronized (this.out) { // FindBugs IS2_INCONSISTENT_SYNC + RpcRequestHeader header = headerBuilder.build(); + int serializedHeaderSize = header.getSerializedSize(); + int requestSerializedSize = call.param.getSerializedSize(); + this.out.writeInt(serializedHeaderSize + + CodedOutputStream.computeRawVarint32Size(serializedHeaderSize) + + requestSerializedSize + + CodedOutputStream.computeRawVarint32Size(requestSerializedSize)); + header.writeDelimitedTo(this.out); + call.param.writeDelimitedTo(this.out); + this.out.flush(); + } + } catch(IOException e) { + markClosed(e); + } + } + + + private Method getMethod(Class protocol, + String methodName) { + Method method = methodInstances.get(methodName); + if (method != null) { + return method; + } + Method[] methods = protocol.getMethods(); + for (Method m : methods) { + if (m.getName().equals(methodName)) { + m.setAccessible(true); + methodInstances.put(methodName, m); + return m; + } + } + return null; + } + + /* Receive a response. + * Because only one receiver, so no synchronization on in. + */ + protected void receiveResponse() { + if (shouldCloseConnection.get()) { + return; + } + touch(); + + try { + // See HBaseServer.Call.setResponse for where we write out the response. + // It writes the call.id (int), a boolean signifying any error (and if + // so the exception name/trace), and the response bytes + + // Read the call id. + RpcResponseHeader response = RpcResponseHeader.parseDelimitedFrom(in); + if (response == null) { + // When the stream is closed, protobuf doesn't raise an EOFException, + // instead, it returns a null message object. + throw new EOFException(); + } + int id = response.getCallId(); + + if (LOG.isDebugEnabled()) + LOG.debug(getName() + " got value #" + id); + Call call = calls.get(id); + + Status status = response.getStatus(); + if (status == Status.SUCCESS) { + Message rpcResponseType; + try { + rpcResponseType = ProtobufRpcClientEngine.Invoker.getReturnProtoType( + getMethod(remoteId.getProtocol(), + call.param.getMethodName())); + } catch (Exception e) { + throw new RuntimeException(e); //local exception + } + Builder builder = rpcResponseType.newBuilderForType(); + builder.mergeDelimitedFrom(in); + Message value = builder.build(); + // it's possible that this call may have been cleaned up due to a RPC + // timeout, so check if it still exists before setting the value. + if (call != null) { + call.setValue(value); + } + calls.remove(id); + } else if (status == Status.ERROR) { + RpcException exceptionResponse = RpcException.parseDelimitedFrom(in); + if (call != null) { + //noinspection ThrowableInstanceNeverThrown + call.setException(new RemoteException( + exceptionResponse.getExceptionName(), + exceptionResponse.getStackTrace())); + calls.remove(id); + } + } else if (status == Status.FATAL) { + RpcException exceptionResponse = RpcException.parseDelimitedFrom(in); + // Close the connection + markClosed(new RemoteException( + exceptionResponse.getExceptionName(), + exceptionResponse.getStackTrace())); + } + } catch (IOException e) { + if (e instanceof SocketTimeoutException && remoteId.rpcTimeout > 0) { + // Clean up open calls but don't treat this as a fatal condition, + // since we expect certain responses to not make it by the specified + // {@link ConnectionId#rpcTimeout}. + closeException = e; + } else { + // Since the server did not respond within the default ping interval + // time, treat this as a fatal condition and close this connection + markClosed(e); + } + } finally { + if (remoteId.rpcTimeout > 0) { + cleanupCalls(remoteId.rpcTimeout); + } + } + } + + protected synchronized void markClosed(IOException e) { + if (shouldCloseConnection.compareAndSet(false, true)) { + closeException = e; + notifyAll(); + } + } + + /* Cleanup all calls and mark them as done */ + protected void cleanupCalls() { + cleanupCalls(0); + } + + protected void cleanupCalls(long rpcTimeout) { + Iterator> itor = calls.entrySet().iterator(); + while (itor.hasNext()) { + Call c = itor.next().getValue(); + long waitTime = System.currentTimeMillis() - c.getStartTime(); + if (waitTime >= rpcTimeout) { + if (this.closeException == null) { + // There may be no exception in the case that there are many calls + // being multiplexed over this connection and these are succeeding + // fine while this Call object is taking a long time to finish + // over on the server; e.g. I just asked the regionserver to bulk + // open 3k regions or its a big fat multiput into a heavily-loaded + // server (Perhaps this only happens at the extremes?) + this.closeException = new CallTimeoutException("Call id=" + c.id + + ", waitTime=" + waitTime + ", rpcTimetout=" + rpcTimeout); + } + c.setException(this.closeException); + synchronized (c) { + c.notifyAll(); + } + itor.remove(); + } else { + break; + } + } + try { + if (!calls.isEmpty()) { + Call firstCall = calls.get(calls.firstKey()); + long maxWaitTime = System.currentTimeMillis() - firstCall.getStartTime(); + if (maxWaitTime < rpcTimeout) { + rpcTimeout -= maxWaitTime; + } + } + if (!shouldCloseConnection.get()) { + closeException = null; + if (socket != null) { + socket.setSoTimeout((int) rpcTimeout); + } + } + } catch (SocketException e) { + LOG.debug("Couldn't lower timeout, which may result in longer than expected calls"); + } + } + } + + /** + * Client-side call timeout + */ + public static class CallTimeoutException extends IOException { + public CallTimeoutException(final String msg) { + super(msg); + } + } + + /** Call implementation used for parallel calls. */ + protected class ParallelCall extends Call { + private final ParallelResults results; + protected final int index; + + public ParallelCall(RpcRequestBody param, ParallelResults results, int index) { + super(param); + this.results = results; + this.index = index; + } + + /** Deliver result to result collector. */ + @Override + protected void callComplete() { + results.callComplete(this); + } + } + + /** Result collector for parallel calls. */ + protected static class ParallelResults { + protected final Message[] values; + protected int size; + protected int count; + + public ParallelResults(int size) { + this.values = new RpcResponseBody[size]; + this.size = size; + } + + /* + * Collect a result. + */ + synchronized void callComplete(ParallelCall call) { + // FindBugs IS2_INCONSISTENT_SYNC + values[call.index] = call.value; // store the value + count++; // count it + if (count == size) // if all values are in + notify(); // then notify waiting caller + } + } + + /** + * Construct an IPC client whose values are of the {@link Message} + * class. + * @param conf configuration + * @param factory socket factory + */ + public HBaseClient(Configuration conf, SocketFactory factory) { + this.maxIdleTime = + conf.getInt("hbase.ipc.client.connection.maxidletime", 10000); //10s + this.maxRetries = conf.getInt("hbase.ipc.client.connect.max.retries", 0); + this.failureSleep = conf.getInt("hbase.client.pause", 1000); + this.tcpNoDelay = conf.getBoolean("hbase.ipc.client.tcpnodelay", true); + this.tcpKeepAlive = conf.getBoolean("hbase.ipc.client.tcpkeepalive", true); + this.pingInterval = getPingInterval(conf); + if (LOG.isDebugEnabled()) { + LOG.debug("The ping interval is" + this.pingInterval + "ms."); + } + this.conf = conf; + this.socketFactory = factory; + this.clusterId = conf.get(HConstants.CLUSTER_ID, "default"); + this.connections = new PoolMap( + getPoolType(conf), getPoolSize(conf)); + this.failedServers = new FailedServers(conf); + } + + /** + * Construct an IPC client with the default SocketFactory + * @param conf configuration + */ + public HBaseClient(Configuration conf) { + this(conf, NetUtils.getDefaultSocketFactory(conf)); + } + + /** + * Return the pool type specified in the configuration, which must be set to + * either {@link PoolType#RoundRobin} or {@link PoolType#ThreadLocal}, + * otherwise default to the former. + * + * For applications with many user threads, use a small round-robin pool. For + * applications with few user threads, you may want to try using a + * thread-local pool. In any case, the number of {@link HBaseClient} instances + * should not exceed the operating system's hard limit on the number of + * connections. + * + * @param config configuration + * @return either a {@link PoolType#RoundRobin} or + * {@link PoolType#ThreadLocal} + */ + protected static PoolType getPoolType(Configuration config) { + return PoolType.valueOf(config.get(HConstants.HBASE_CLIENT_IPC_POOL_TYPE), + PoolType.RoundRobin, PoolType.ThreadLocal); + } + + /** + * Return the pool size specified in the configuration, which is applicable only if + * the pool type is {@link PoolType#RoundRobin}. + * + * @param config + * @return the maximum pool size + */ + protected static int getPoolSize(Configuration config) { + return config.getInt(HConstants.HBASE_CLIENT_IPC_POOL_SIZE, 1); + } + + /** Return the socket factory of this client + * + * @return this client's socket factory + */ + SocketFactory getSocketFactory() { + return socketFactory; + } + + /** Stop all threads related to this client. No further calls may be made + * using this client. */ + public void stop() { + if (LOG.isDebugEnabled()) { + LOG.debug("Stopping client"); + } + + if (!running.compareAndSet(true, false)) { + return; + } + + // wake up all connections + synchronized (connections) { + for (Connection conn : connections.values()) { + conn.interrupt(); + } + } + + // wait until all connections are closed + while (!connections.isEmpty()) { + try { + Thread.sleep(100); + } catch (InterruptedException ignored) { + } + } + } + + /** Make a call, passing param, to the IPC server running at + * address, returning the value. Throws exceptions if there are + * network problems or if the remote code threw an exception. + * @param param RpcRequestBody parameter + * @param address network address + * @return Message + * @throws IOException e + */ + public Message call(RpcRequestBody param, InetSocketAddress address) + throws IOException, InterruptedException { + return call(param, address, null, 0); + } + + public Message call(RpcRequestBody param, InetSocketAddress addr, + User ticket, int rpcTimeout) + throws IOException, InterruptedException { + return call(param, addr, null, ticket, rpcTimeout); + } + + /** Make a call, passing param, to the IPC server running at + * address which is servicing the protocol protocol, + * with the ticket credentials, returning the value. + * Throws exceptions if there are network problems or if the remote code + * threw an exception. */ + public Message call(RpcRequestBody param, InetSocketAddress addr, + Class protocol, + User ticket, int rpcTimeout) + throws InterruptedException, IOException { + Call call = new Call(param); + Connection connection = getConnection(addr, protocol, ticket, rpcTimeout, call); + connection.sendParam(call); // send the parameter + boolean interrupted = false; + //noinspection SynchronizationOnLocalVariableOrMethodParameter + synchronized (call) { + while (!call.done) { + try { + call.wait(); // wait for the result + } catch (InterruptedException ignored) { + // save the fact that we were interrupted + interrupted = true; + } + } + + if (interrupted) { + // set the interrupt flag now that we are done waiting + Thread.currentThread().interrupt(); + } + + if (call.error != null) { + if (call.error instanceof RemoteException) { + call.error.fillInStackTrace(); + throw call.error; + } + // local exception + throw wrapException(addr, call.error); + } + return call.value; + } + } + + /** + * Take an IOException and the address we were trying to connect to + * and return an IOException with the input exception as the cause. + * The new exception provides the stack trace of the place where + * the exception is thrown and some extra diagnostics information. + * If the exception is ConnectException or SocketTimeoutException, + * return a new one of the same type; Otherwise return an IOException. + * + * @param addr target address + * @param exception the relevant exception + * @return an exception to throw + */ + @SuppressWarnings({"ThrowableInstanceNeverThrown"}) + protected IOException wrapException(InetSocketAddress addr, + IOException exception) { + if (exception instanceof ConnectException) { + //connection refused; include the host:port in the error + return (ConnectException)new ConnectException( + "Call to " + addr + " failed on connection exception: " + exception) + .initCause(exception); + } else if (exception instanceof SocketTimeoutException) { + return (SocketTimeoutException)new SocketTimeoutException( + "Call to " + addr + " failed on socket timeout exception: " + + exception).initCause(exception); + } else { + return (IOException)new IOException( + "Call to " + addr + " failed on local exception: " + exception) + .initCause(exception); + + } + } + + /** Makes a set of calls in parallel. Each parameter is sent to the + * corresponding address. When all values are available, or have timed out + * or errored, the collected results are returned in an array. The array + * contains nulls for calls that timed out or errored. + * @param params RpcRequestBody parameters + * @param addresses socket addresses + * @return RpcResponseBody[] + * @throws IOException e + * @deprecated Use {@code #call(RpcRequestBody[], InetSocketAddress[], Class, User)} instead + */ + @Deprecated + public Message[] call(RpcRequestBody[] params, InetSocketAddress[] addresses) + throws IOException, InterruptedException { + return call(params, addresses, null, null); + } + + /** Makes a set of calls in parallel. Each parameter is sent to the + * corresponding address. When all values are available, or have timed out + * or errored, the collected results are returned in an array. The array + * contains nulls for calls that timed out or errored. */ + public Message[] call(RpcRequestBody[] params, InetSocketAddress[] addresses, + Class protocol, + User ticket) + throws IOException, InterruptedException { + if (addresses.length == 0) return new RpcResponseBody[0]; + + ParallelResults results = new ParallelResults(params.length); + // TODO this synchronization block doesnt make any sense, we should possibly fix it + //noinspection SynchronizationOnLocalVariableOrMethodParameter + synchronized (results) { + for (int i = 0; i < params.length; i++) { + ParallelCall call = new ParallelCall(params[i], results, i); + try { + Connection connection = + getConnection(addresses[i], protocol, ticket, 0, call); + connection.sendParam(call); // send each parameter + } catch (IOException e) { + // log errors + LOG.info("Calling "+addresses[i]+" caught: " + + e.getMessage(),e); + results.size--; // wait for one fewer result + } + } + while (results.count != results.size) { + try { + results.wait(); // wait for all results + } catch (InterruptedException ignored) {} + } + + return results.values; + } + } + + /* Get a connection from the pool, or create a new one and add it to the + * pool. Connections to a given host/port are reused. */ + protected Connection getConnection(InetSocketAddress addr, + Class protocol, + User ticket, + int rpcTimeout, + Call call) + throws IOException, InterruptedException { + if (!running.get()) { + // the client is stopped + throw new IOException("The client is stopped"); + } + Connection connection; + /* we could avoid this allocation for each RPC by having a + * connectionsId object and with set() method. We need to manage the + * refs for keys in HashMap properly. For now its ok. + */ + ConnectionId remoteId = new ConnectionId(addr, protocol, ticket, rpcTimeout); + synchronized (connections) { + connection = connections.get(remoteId); + if (connection == null) { + connection = createConnection(remoteId); + connections.put(remoteId, connection); + } + } + connection.addCall(call); + + //we don't invoke the method below inside "synchronized (connections)" + //block above. The reason for that is if the server happens to be slow, + //it will take longer to establish a connection and that will slow the + //entire system down. + //Moreover, if the connection is currently created, there will be many threads + // waiting here; as setupIOstreams is synchronized. If the connection fails with a + // timeout, they will all fail simultaneously. This is checked in setupIOstreams. + connection.setupIOstreams(); + return connection; + } + + /** + * This class holds the address and the user ticket. The client connections + * to servers are uniquely identified by + */ + protected static class ConnectionId { + final InetSocketAddress address; + final User ticket; + final int rpcTimeout; + Class protocol; + private static final int PRIME = 16777619; + + ConnectionId(InetSocketAddress address, + Class protocol, + User ticket, + int rpcTimeout) { + this.protocol = protocol; + this.address = address; + this.ticket = ticket; + this.rpcTimeout = rpcTimeout; + } + + InetSocketAddress getAddress() { + return address; + } + + Class getProtocol() { + return protocol; + } + + User getTicket() { + return ticket; + } + + @Override + public boolean equals(Object obj) { + if (obj instanceof ConnectionId) { + ConnectionId id = (ConnectionId) obj; + return address.equals(id.address) && protocol == id.protocol && + ((ticket != null && ticket.equals(id.ticket)) || + (ticket == id.ticket)) && rpcTimeout == id.rpcTimeout; + } + return false; + } + + @Override // simply use the default Object#hashcode() ? + public int hashCode() { + return (address.hashCode() + PRIME * ( + PRIME * System.identityHashCode(protocol) ^ + (ticket == null ? 0 : ticket.hashCode()) )) ^ rpcTimeout; + } + } +} diff --git hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/HBaseClientRPC.java hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/HBaseClientRPC.java new file mode 100644 index 0000000..1b4f20b --- /dev/null +++ hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/HBaseClientRPC.java @@ -0,0 +1,294 @@ +/** + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.ipc; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.client.RetriesExhaustedException; +import org.apache.hadoop.hbase.security.User; +import org.apache.hadoop.net.NetUtils; +import org.apache.hadoop.util.ReflectionUtils; + +import javax.net.SocketFactory; +import java.io.IOException; +import java.lang.reflect.Field; +import java.lang.reflect.Proxy; +import java.net.ConnectException; +import java.net.InetSocketAddress; +import java.net.SocketTimeoutException; +import java.util.HashMap; +import java.util.Map; + +/** + * An RPC implementation. This class provides the client side implementation. + */ +@InterfaceAudience.Private +public class HBaseClientRPC { + + protected static final Log LOG = + LogFactory.getLog("org.apache.hadoop.ipc.HBaseClientRPC"); + + // cache of RpcEngines by protocol + private static final Map PROTOCOL_ENGINES + = new HashMap(); + /** + * Configuration key for the {@link org.apache.hadoop.hbase.ipc.RpcClientEngine} implementation + * to load to handle connection protocols. Handlers for individual protocols can be + * configured using {@code "hbase.rpc.client.engine." + protocol.class.name}. + */ + public static final String RPC_ENGINE_PROP = "hbase.rpc.client.engine"; + // track what RpcEngine is used by a proxy class, for stopProxy() + private static final Map PROXY_ENGINES + = new HashMap(); + // thread-specific RPC timeout, which may override that of RpcEngine + private static ThreadLocal rpcTimeout = new ThreadLocal() { + @Override + protected Integer initialValue() { + return HConstants.DEFAULT_HBASE_CLIENT_OPERATION_TIMEOUT; + } + }; + + static long getProtocolVersion(Class protocol) + throws NoSuchFieldException, IllegalAccessException { + Field versionField = protocol.getField("VERSION"); + versionField.setAccessible(true); + return versionField.getLong(protocol); + } + + // set a protocol to use a non-default RpcEngine + static void setProtocolEngine(Configuration conf, + Class protocol, Class engine) { + conf.setClass(RPC_ENGINE_PROP + "." + protocol.getName(), engine, RpcClientEngine.class); + } + + // return the RpcEngine configured to handle a protocol + static synchronized RpcClientEngine getProtocolEngine(Class protocol, + Configuration conf) { + RpcClientEngine engine = PROTOCOL_ENGINES.get(protocol); + if (engine == null) { + // check for a configured default engine + Class defaultEngine = + conf.getClass(RPC_ENGINE_PROP, ProtobufRpcClientEngine.class); + + // check for a per interface override + Class impl = conf.getClass(RPC_ENGINE_PROP + "." + protocol.getName(), + defaultEngine); + LOG.debug("Using " + impl.getName() + " for " + protocol.getName()); + engine = (RpcClientEngine) ReflectionUtils.newInstance(impl, conf); + if (protocol.isInterface()) + PROXY_ENGINES.put(Proxy.getProxyClass(protocol.getClassLoader(), + protocol), + engine); + PROTOCOL_ENGINES.put(protocol, engine); + } + return engine; + } + + // return the RpcEngine that handles a proxy object + private static synchronized RpcClientEngine getProxyEngine(Object proxy) { + return PROXY_ENGINES.get(proxy.getClass()); + } + + /** + * @param protocol protocol interface + * @param clientVersion which client version we expect + * @param addr address of remote service + * @param conf configuration + * @param maxAttempts max attempts + * @param rpcTimeout timeout for each RPC + * @param timeout timeout in milliseconds + * @return proxy + * @throws java.io.IOException e + */ + @SuppressWarnings("unchecked") + public static VersionedProtocol waitForProxy(Class protocol, + long clientVersion, + InetSocketAddress addr, + Configuration conf, + int maxAttempts, + int rpcTimeout, + long timeout + ) throws IOException { + // HBase does limited number of reconnects which is different from hadoop. + long startTime = System.currentTimeMillis(); + IOException ioe; + int reconnectAttempts = 0; + while (true) { + try { + return getProxy(protocol, clientVersion, addr, conf, rpcTimeout); + } catch (SocketTimeoutException te) { // namenode is busy + LOG.info("Problem connecting to server: " + addr); + ioe = te; + } catch (IOException ioex) { + // We only handle the ConnectException. + ConnectException ce = null; + if (ioex instanceof ConnectException) { + ce = (ConnectException) ioex; + ioe = ce; + } else if (ioex.getCause() != null + && ioex.getCause() instanceof ConnectException) { + ce = (ConnectException) ioex.getCause(); + ioe = ce; + } else if (ioex.getMessage().toLowerCase() + .contains("connection refused")) { + ce = new ConnectException(ioex.getMessage()); + ioe = ce; + } else { + // This is the exception we can't handle. + ioe = ioex; + } + if (ce != null) { + handleConnectionException(++reconnectAttempts, maxAttempts, protocol, + addr, ce); + } + } + // check if timed out + if (System.currentTimeMillis() - timeout >= startTime) { + throw ioe; + } + + // wait for retry + try { + Thread.sleep(1000); + } catch (InterruptedException ie) { + // IGNORE + } + } + } + + /** + * @param retries current retried times. + * @param maxAttmpts max attempts + * @param protocol protocol interface + * @param addr address of remote service + * @param ce ConnectException + * @throws org.apache.hadoop.hbase.client.RetriesExhaustedException + * + */ + private static void handleConnectionException(int retries, + int maxAttmpts, + Class protocol, + InetSocketAddress addr, + ConnectException ce) + throws RetriesExhaustedException { + if (maxAttmpts >= 0 && retries >= maxAttmpts) { + LOG.info("Server at " + addr + " could not be reached after " + + maxAttmpts + " tries, giving up."); + throw new RetriesExhaustedException("Failed setting up proxy " + protocol + + " to " + addr.toString() + " after attempts=" + maxAttmpts, ce); + } + } + + /** + * Construct a client-side proxy object that implements the named protocol, + * talking to a server at the named address. + * + * @param protocol interface + * @param clientVersion version we are expecting + * @param addr remote address + * @param conf configuration + * @param factory socket factory + * @param rpcTimeout timeout for each RPC + * @return proxy + * @throws java.io.IOException e + */ + public static VersionedProtocol getProxy(Class protocol, + long clientVersion, + InetSocketAddress addr, + Configuration conf, + SocketFactory factory, + int rpcTimeout) throws IOException { + return getProxy(protocol, clientVersion, addr, + User.getCurrent(), conf, factory, rpcTimeout); + } + + /** + * Construct a client-side proxy object that implements the named protocol, + * talking to a server at the named address. + * + * @param protocol interface + * @param clientVersion version we are expecting + * @param addr remote address + * @param ticket ticket + * @param conf configuration + * @param factory socket factory + * @param rpcTimeout timeout for each RPC + * @return proxy + * @throws java.io.IOException e + */ + public static VersionedProtocol getProxy( + Class protocol, + long clientVersion, InetSocketAddress addr, User ticket, + Configuration conf, SocketFactory factory, int rpcTimeout) + throws IOException { + RpcClientEngine engine = getProtocolEngine(protocol, conf); + VersionedProtocol proxy = engine + .getProxy(protocol, clientVersion, addr, ticket, conf, factory, + Math.min(rpcTimeout, getRpcTimeout())); + return proxy; + } + + /** + * Construct a client-side proxy object with the default SocketFactory + * + * @param protocol interface + * @param clientVersion version we are expecting + * @param addr remote address + * @param conf configuration + * @param rpcTimeout timeout for each RPC + * @return a proxy instance + * @throws java.io.IOException e + */ + public static VersionedProtocol getProxy( + Class protocol, + long clientVersion, InetSocketAddress addr, Configuration conf, + int rpcTimeout) + throws IOException { + + return getProxy(protocol, clientVersion, addr, conf, NetUtils + .getDefaultSocketFactory(conf), rpcTimeout); + } + + /** + * Stop this proxy and release its invoker's resource + * + * @param proxy the proxy to be stopped + */ + public static void stopProxy(VersionedProtocol proxy) { + if (proxy != null) { + getProxyEngine(proxy).stopProxy(proxy); + } + } + + public static void setRpcTimeout(int t) { + rpcTimeout.set(t); + } + + public static int getRpcTimeout() { + return rpcTimeout.get(); + } + + public static void resetRpcTimeout() { + rpcTimeout.remove(); + } +} diff --git hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/Invocation.java hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/Invocation.java new file mode 100644 index 0000000..574647c --- /dev/null +++ hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/Invocation.java @@ -0,0 +1,219 @@ +/* + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.ipc; + +import java.io.DataInput; +import java.io.DataOutput; +import java.io.IOException; +import java.lang.reflect.Field; +import java.lang.reflect.Method; +import java.util.HashMap; +import java.util.HashSet; +import java.util.Map; +import java.util.Set; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.conf.Configurable; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.client.AdminProtocol; +import org.apache.hadoop.hbase.client.ClientProtocol; +import org.apache.hadoop.hbase.MasterMonitorProtocol; +import org.apache.hadoop.hbase.MasterAdminProtocol; +import org.apache.hadoop.hbase.io.HbaseObjectWritable; +import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.AdminService; +import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ClientService; +import org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerStatusService; +import org.apache.hadoop.hbase.RegionServerStatusProtocol; +import org.apache.hadoop.io.VersionMismatchException; +import org.apache.hadoop.io.VersionedWritable; + +/** A method invocation, including the method name and its parameters.*/ +@InterfaceAudience.Private +public class Invocation extends VersionedWritable implements Configurable { + protected String methodName; + @SuppressWarnings("rawtypes") + protected Class[] parameterClasses; + protected Object[] parameters; + protected Configuration conf; + private long clientVersion; + private int clientMethodsHash; + + // For generated protocol classes which don't have VERSION field, + // such as protobuf interfaces. + static final Map, Long> + PROTOCOL_VERSION = new HashMap, Long>(); + + static { + PROTOCOL_VERSION.put(ClientService.BlockingInterface.class, + Long.valueOf(ClientProtocol.VERSION)); + PROTOCOL_VERSION.put(AdminService.BlockingInterface.class, + Long.valueOf(AdminProtocol.VERSION)); + PROTOCOL_VERSION.put(RegionServerStatusService.BlockingInterface.class, + Long.valueOf(RegionServerStatusProtocol.VERSION)); + PROTOCOL_VERSION.put(MasterMonitorProtocol.class,Long.valueOf(MasterMonitorProtocol.VERSION)); + PROTOCOL_VERSION.put(MasterAdminProtocol.class,Long.valueOf(MasterAdminProtocol.VERSION)); + } + + // For protobuf protocols, which use ServiceException, instead of IOException + protected static final Set> + PROTOBUF_PROTOCOLS = new HashSet>(); + + static { + PROTOBUF_PROTOCOLS.add(ClientProtocol.class); + PROTOBUF_PROTOCOLS.add(AdminProtocol.class); + PROTOBUF_PROTOCOLS.add(RegionServerStatusProtocol.class); + PROTOBUF_PROTOCOLS.add(MasterMonitorProtocol.class); + PROTOBUF_PROTOCOLS.add(MasterAdminProtocol.class); + } + + private static byte RPC_VERSION = 1; + + public Invocation() {} + + public Invocation(Method method, Object[] parameters) { + this.methodName = method.getName(); + this.parameterClasses = method.getParameterTypes(); + this.parameters = parameters; + Class declaringClass = method.getDeclaringClass(); + if (declaringClass.equals(VersionedProtocol.class)) { + //VersionedProtocol is exempted from version check. + clientVersion = 0; + clientMethodsHash = 0; + } else { + try { + Long version = PROTOCOL_VERSION.get(declaringClass); + if (version != null) { + this.clientVersion = version.longValue(); + } else { + Field versionField = declaringClass.getField("VERSION"); + versionField.setAccessible(true); + this.clientVersion = versionField.getLong(declaringClass); + } + } catch (NoSuchFieldException ex) { + throw new RuntimeException("The " + declaringClass, ex); + } catch (IllegalAccessException ex) { + throw new RuntimeException(ex); + } + this.clientMethodsHash = ProtocolSignature.getFingerprint( + declaringClass.getMethods()); + } + } + + /** @return The name of the method invoked. */ + public String getMethodName() { return methodName; } + + /** @return The parameter classes. */ + @SuppressWarnings({ "rawtypes" }) + public Class[] getParameterClasses() { return parameterClasses; } + + /** @return The parameter instances. */ + public Object[] getParameters() { return parameters; } + + long getProtocolVersion() { + return clientVersion; + } + + protected int getClientMethodsHash() { + return clientMethodsHash; + } + + /** + * Returns the rpc version used by the client. + * @return rpcVersion + */ + public long getRpcVersion() { + return RPC_VERSION; + } + + public void readFields(DataInput in) throws IOException { + try { + super.readFields(in); + methodName = in.readUTF(); + clientVersion = in.readLong(); + clientMethodsHash = in.readInt(); + } catch (VersionMismatchException e) { + // VersionMismatchException doesn't provide an API to access + // expectedVersion and foundVersion. This is really sad. + if (e.toString().endsWith("found v0")) { + // Try to be a bit backwards compatible. In previous versions of + // HBase (before HBASE-3939 in 0.92) Invocation wasn't a + // VersionedWritable and thus the first thing on the wire was always + // the 2-byte length of the method name. Because no method name is + // longer than 255 characters, and all method names are in ASCII, + // The following code is equivalent to `in.readUTF()', which we can't + // call again here, because `super.readFields(in)' already consumed + // the first byte of input, which can't be "unread" back into `in'. + final short len = (short) (in.readByte() & 0xFF); // Unsigned byte. + final byte[] buf = new byte[len]; + in.readFully(buf, 0, len); + methodName = new String(buf); + } + } + parameters = new Object[in.readInt()]; + parameterClasses = new Class[parameters.length]; + HbaseObjectWritable objectWritable = new HbaseObjectWritable(); + for (int i = 0; i < parameters.length; i++) { + parameters[i] = HbaseObjectWritable.readObject(in, objectWritable, + this.conf); + parameterClasses[i] = objectWritable.getDeclaredClass(); + } + } + + public void write(DataOutput out) throws IOException { + super.write(out); + out.writeUTF(this.methodName); + out.writeLong(clientVersion); + out.writeInt(clientMethodsHash); + out.writeInt(parameterClasses.length); + for (int i = 0; i < parameterClasses.length; i++) { + HbaseObjectWritable.writeObject(out, parameters[i], parameterClasses[i], + conf); + } + } + + @Override + public String toString() { + StringBuilder buffer = new StringBuilder(256); + buffer.append(methodName); + buffer.append("("); + for (int i = 0; i < parameters.length; i++) { + if (i != 0) + buffer.append(", "); + buffer.append(parameters[i]); + } + buffer.append(")"); + buffer.append(", rpc version="+RPC_VERSION); + buffer.append(", client version="+clientVersion); + buffer.append(", methodsFingerPrint="+clientMethodsHash); + return buffer.toString(); + } + + public void setConf(Configuration conf) { + this.conf = conf; + } + + public Configuration getConf() { + return this.conf; + } + + @Override + public byte getVersion() { + return RPC_VERSION; + } +} diff --git hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/MasterCoprocessorRpcChannel.java hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/MasterCoprocessorRpcChannel.java new file mode 100644 index 0000000..b3a4228 --- /dev/null +++ hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/MasterCoprocessorRpcChannel.java @@ -0,0 +1,88 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.ipc; + +import com.google.protobuf.BlockingRpcChannel; +import com.google.protobuf.ByteString; +import com.google.protobuf.Descriptors; +import com.google.protobuf.Message; +import com.google.protobuf.RpcCallback; +import com.google.protobuf.RpcChannel; +import com.google.protobuf.RpcController; +import com.google.protobuf.ServiceException; +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.client.HConnection; +import org.apache.hadoop.hbase.protobuf.ProtobufUtil; +import org.apache.hadoop.hbase.protobuf.ResponseConverter; +import org.apache.hadoop.hbase.protobuf.generated.ClientProtos; + +import java.io.IOException; + +import static org.apache.hadoop.hbase.protobuf.generated.ClientProtos.CoprocessorServiceResponse; + +/** + * Provides clients with an RPC connection to call coprocessor endpoint {@link com.google.protobuf.Service}s + * against the active master. An instance of this class may be obtained + * by calling {@link org.apache.hadoop.hbase.client.HBaseAdmin#coprocessorService()}, + * but should normally only be used in creating a new {@link com.google.protobuf.Service} stub to call the endpoint + * methods. + * @see org.apache.hadoop.hbase.client.HBaseAdmin#coprocessorService() + */ +@InterfaceAudience.Private +public class MasterCoprocessorRpcChannel extends CoprocessorRpcChannel{ + private static Log LOG = LogFactory.getLog(MasterCoprocessorRpcChannel.class); + + private final HConnection connection; + + public MasterCoprocessorRpcChannel(HConnection conn) { + this.connection = conn; + } + + @Override + protected Message callExecService(Descriptors.MethodDescriptor method, + Message request, Message responsePrototype) + throws IOException { + if (LOG.isDebugEnabled()) { + LOG.debug("Call: "+method.getName()+", "+request.toString()); + } + + final ClientProtos.CoprocessorServiceCall call = + ClientProtos.CoprocessorServiceCall.newBuilder() + .setRow(ByteString.copyFrom(HConstants.EMPTY_BYTE_ARRAY)) + .setServiceName(method.getService().getFullName()) + .setMethodName(method.getName()) + .setRequest(request.toByteString()).build(); + CoprocessorServiceResponse result = ProtobufUtil.execService(connection.getMasterAdmin(), call); + Message response = null; + if (result.getValue().hasValue()) { + response = responsePrototype.newBuilderForType() + .mergeFrom(result.getValue().getValue()).build(); + } else { + response = responsePrototype.getDefaultInstanceForType(); + } + if (LOG.isTraceEnabled()) { + LOG.trace("Master Result is value=" + response); + } + return response; + } + +} diff --git hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/ProtobufRpcClientEngine.java hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/ProtobufRpcClientEngine.java new file mode 100644 index 0000000..46873ab --- /dev/null +++ hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/ProtobufRpcClientEngine.java @@ -0,0 +1,194 @@ +/** + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.ipc; + +import com.google.protobuf.Message; +import com.google.protobuf.ServiceException; +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.protobuf.generated.RPCProtos.RpcRequestBody; +import org.apache.hadoop.hbase.security.User; +import org.apache.hadoop.ipc.RemoteException; + +import javax.net.SocketFactory; +import java.io.IOException; +import java.lang.reflect.InvocationHandler; +import java.lang.reflect.Method; +import java.lang.reflect.Proxy; +import java.net.InetSocketAddress; +import java.util.Map; +import java.util.concurrent.ConcurrentHashMap; + +public class ProtobufRpcClientEngine implements RpcClientEngine { + + private static final Log LOG = + LogFactory.getLog("org.apache.hadoop.hbase.ipc.ProtobufRpcClientEngine"); + + ProtobufRpcClientEngine() { + super(); + } + + protected final static ClientCache CLIENTS = new ClientCache(); + @Override + public VersionedProtocol getProxy( + Class protocol, long clientVersion, + InetSocketAddress addr, User ticket, Configuration conf, + SocketFactory factory, int rpcTimeout) throws IOException { + final Invoker invoker = new Invoker(protocol, addr, ticket, conf, factory, + rpcTimeout); + return (VersionedProtocol) Proxy.newProxyInstance( + protocol.getClassLoader(), new Class[]{protocol}, invoker); + } + + @Override + public void stopProxy(VersionedProtocol proxy) { + if (proxy!=null) { + ((Invoker)Proxy.getInvocationHandler(proxy)).close(); + } + } + + static class Invoker implements InvocationHandler { + private static final Map returnTypes = + new ConcurrentHashMap(); + private Class protocol; + private InetSocketAddress address; + private User ticket; + private HBaseClient client; + private boolean isClosed = false; + final private int rpcTimeout; + private final long clientProtocolVersion; + + public Invoker(Class protocol, + InetSocketAddress addr, User ticket, Configuration conf, + SocketFactory factory, int rpcTimeout) throws IOException { + this.protocol = protocol; + this.address = addr; + this.ticket = ticket; + this.client = CLIENTS.getClient(conf, factory); + this.rpcTimeout = rpcTimeout; + Long version = Invocation.PROTOCOL_VERSION.get(protocol); + if (version != null) { + this.clientProtocolVersion = version; + } else { + try { + this.clientProtocolVersion = HBaseClientRPC.getProtocolVersion(protocol); + } catch (NoSuchFieldException e) { + throw new RuntimeException("Exception encountered during " + + protocol, e); + } catch (IllegalAccessException e) { + throw new RuntimeException("Exception encountered during " + + protocol, e); + } + } + } + + private RpcRequestBody constructRpcRequest(Method method, + Object[] params) throws ServiceException { + RpcRequestBody rpcRequest; + RpcRequestBody.Builder builder = RpcRequestBody.newBuilder(); + builder.setMethodName(method.getName()); + Message param; + int length = params.length; + if (length == 2) { + // RpcController + Message in the method args + // (generated code from RPC bits in .proto files have RpcController) + param = (Message)params[1]; + } else if (length == 1) { // Message + param = (Message)params[0]; + } else { + throw new ServiceException("Too many parameters for request. Method: [" + + method.getName() + "]" + ", Expected: 2, Actual: " + + params.length); + } + builder.setRequestClassName(param.getClass().getName()); + builder.setRequest(param.toByteString()); + builder.setClientProtocolVersion(clientProtocolVersion); + rpcRequest = builder.build(); + return rpcRequest; + } + + /** + * This is the client side invoker of RPC method. It only throws + * ServiceException, since the invocation proxy expects only + * ServiceException to be thrown by the method in case protobuf service. + * + * ServiceException has the following causes: + *
        + *
      1. Exceptions encountered on the client side in this method are + * set as cause in ServiceException as is.
      2. + *
      3. Exceptions from the server are wrapped in RemoteException and are + * set as cause in ServiceException
      4. + *
      + * + * Note that the client calling protobuf RPC methods, must handle + * ServiceException by getting the cause from the ServiceException. If the + * cause is RemoteException, then unwrap it to get the exception thrown by + * the server. + */ + @Override + public Object invoke(Object proxy, Method method, Object[] args) + throws ServiceException { + long startTime = 0; + if (LOG.isDebugEnabled()) { + startTime = System.currentTimeMillis(); + } + + RpcRequestBody rpcRequest = constructRpcRequest(method, args); + Message val = null; + try { + val = client.call(rpcRequest, address, protocol, ticket, rpcTimeout); + + if (LOG.isDebugEnabled()) { + long callTime = System.currentTimeMillis() - startTime; + if (LOG.isTraceEnabled()) LOG.trace("Call: " + method.getName() + " " + callTime); + } + return val; + } catch (Throwable e) { + if (e instanceof RemoteException) { + Throwable cause = ((RemoteException)e).unwrapRemoteException(); + throw new ServiceException(cause); + } + throw new ServiceException(e); + } + } + + synchronized protected void close() { + if (!isClosed) { + isClosed = true; + CLIENTS.stopClient(client); + } + } + + static Message getReturnProtoType(Method method) throws Exception { + if (returnTypes.containsKey(method.getName())) { + return returnTypes.get(method.getName()); + } + + Class returnType = method.getReturnType(); + Method newInstMethod = returnType.getMethod("getDefaultInstance"); + newInstMethod.setAccessible(true); + Message protoType = (Message) newInstMethod.invoke(null, (Object[]) null); + returnTypes.put(method.getName(), protoType); + return protoType; + } + } + +} diff --git hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/ProtocolSignature.java hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/ProtocolSignature.java new file mode 100644 index 0000000..dc61373 --- /dev/null +++ hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/ProtocolSignature.java @@ -0,0 +1,243 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.ipc; + +import java.io.DataInput; +import java.io.DataOutput; +import java.io.IOException; +import java.lang.reflect.Method; +import java.util.Arrays; +import java.util.HashMap; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.io.Writable; +import org.apache.hadoop.io.WritableFactories; +import org.apache.hadoop.io.WritableFactory; + +@InterfaceAudience.Private +public class ProtocolSignature implements Writable { + static { // register a ctor + WritableFactories.setFactory + (ProtocolSignature.class, + new WritableFactory() { + public Writable newInstance() { return new ProtocolSignature(); } + }); + } + + private long version; + private int[] methods = null; // an array of method hash codes + + /** + * default constructor + */ + public ProtocolSignature() { + } + + /** + * Constructor + * + * @param version server version + * @param methodHashcodes hash codes of the methods supported by server + */ + public ProtocolSignature(long version, int[] methodHashcodes) { + this.version = version; + this.methods = methodHashcodes; + } + + public long getVersion() { + return version; + } + + public int[] getMethods() { + return methods; + } + + @Override + public void readFields(DataInput in) throws IOException { + version = in.readLong(); + boolean hasMethods = in.readBoolean(); + if (hasMethods) { + int numMethods = in.readInt(); + methods = new int[numMethods]; + for (int i=0; i type : method.getParameterTypes()) { + hashcode = 31*hashcode ^ type.getName().hashCode(); + } + return hashcode; + } + + /** + * Convert an array of Method into an array of hash codes + * + * @param methods + * @return array of hash codes + */ + private static int[] getFingerprints(Method[] methods) { + if (methods == null) { + return null; + } + int[] hashCodes = new int[methods.length]; + for (int i = 0; i + PROTOCOL_FINGERPRINT_CACHE = + new HashMap(); + + /** + * Return a protocol's signature and finger print from cache + * + * @param protocol a protocol class + * @param serverVersion protocol version + * @return its signature and finger print + */ + private static ProtocolSigFingerprint getSigFingerprint( + Class protocol, long serverVersion) { + String protocolName = protocol.getName(); + synchronized (PROTOCOL_FINGERPRINT_CACHE) { + ProtocolSigFingerprint sig = PROTOCOL_FINGERPRINT_CACHE.get(protocolName); + if (sig == null) { + int[] serverMethodHashcodes = getFingerprints(protocol.getMethods()); + sig = new ProtocolSigFingerprint( + new ProtocolSignature(serverVersion, serverMethodHashcodes), + getFingerprint(serverMethodHashcodes)); + PROTOCOL_FINGERPRINT_CACHE.put(protocolName, sig); + } + return sig; + } + } + + /** + * Get a server protocol's signature + * + * @param clientMethodsHashCode client protocol methods hashcode + * @param serverVersion server protocol version + * @param protocol protocol + * @return the server's protocol signature + */ + static ProtocolSignature getProtocolSignature( + int clientMethodsHashCode, + long serverVersion, + Class protocol) { + // try to get the finger print & signature from the cache + ProtocolSigFingerprint sig = getSigFingerprint(protocol, serverVersion); + + // check if the client side protocol matches the one on the server side + if (clientMethodsHashCode == sig.fingerprint) { + return new ProtocolSignature(serverVersion, null); // null indicates a match + } + + return sig.signature; + } + + /** + * Get a server protocol's signature + * + * @param server server implementation + * @param protocol server protocol + * @param clientVersion client's version + * @param clientMethodsHash client's protocol's hash code + * @return the server protocol's signature + * @throws IOException if any error occurs + */ + @SuppressWarnings("unchecked") + public static ProtocolSignature getProtocolSignature(VersionedProtocol server, + String protocol, + long clientVersion, int clientMethodsHash) throws IOException { + Class inter; + try { + inter = (Class)Class.forName(protocol); + } catch (Exception e) { + throw new IOException(e); + } + long serverVersion = server.getProtocolVersion(protocol, clientVersion); + return ProtocolSignature.getProtocolSignature( + clientMethodsHash, serverVersion, inter); + } +} diff --git hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/RegionCoprocessorRpcChannel.java hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/RegionCoprocessorRpcChannel.java new file mode 100644 index 0000000..e5855c9 --- /dev/null +++ hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/RegionCoprocessorRpcChannel.java @@ -0,0 +1,103 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.ipc; + +import com.google.protobuf.ByteString; +import com.google.protobuf.Descriptors; +import com.google.protobuf.Message; +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.hbase.client.HConnection; +import org.apache.hadoop.hbase.client.ServerCallable; +import org.apache.hadoop.hbase.protobuf.ProtobufUtil; +import org.apache.hadoop.hbase.protobuf.generated.ClientProtos; +import org.apache.hadoop.hbase.util.Bytes; + +import java.io.IOException; + +import static org.apache.hadoop.hbase.protobuf.generated.ClientProtos.CoprocessorServiceResponse; + +/** + * Provides clients with an RPC connection to call coprocessor endpoint {@link com.google.protobuf.Service}s + * against a given table region. An instance of this class may be obtained + * by calling {@link org.apache.hadoop.hbase.client.HTable#coprocessorService(byte[])}, + * but should normally only be used in creating a new {@link com.google.protobuf.Service} stub to call the endpoint + * methods. + * @see org.apache.hadoop.hbase.client.HTable#coprocessorService(byte[]) + */ +@InterfaceAudience.Private +public class RegionCoprocessorRpcChannel extends CoprocessorRpcChannel{ + private static Log LOG = LogFactory.getLog(RegionCoprocessorRpcChannel.class); + + private final HConnection connection; + private final byte[] table; + private final byte[] row; + private byte[] lastRegion; + + public RegionCoprocessorRpcChannel(HConnection conn, byte[] table, byte[] row) { + this.connection = conn; + this.table = table; + this.row = row; + } + + @Override + protected Message callExecService(Descriptors.MethodDescriptor method, + Message request, Message responsePrototype) + throws IOException { + if (LOG.isDebugEnabled()) { + LOG.debug("Call: "+method.getName()+", "+request.toString()); + } + + if (row == null) { + throw new IllegalArgumentException("Missing row property for remote region location"); + } + + final ClientProtos.CoprocessorServiceCall call = + ClientProtos.CoprocessorServiceCall.newBuilder() + .setRow(ByteString.copyFrom(row)) + .setServiceName(method.getService().getFullName()) + .setMethodName(method.getName()) + .setRequest(request.toByteString()).build(); + ServerCallable callable = + new ServerCallable(connection, table, row) { + public CoprocessorServiceResponse call() throws Exception { + byte[] regionName = location.getRegionInfo().getRegionName(); + return ProtobufUtil.execService(server, call, regionName); + } + }; + CoprocessorServiceResponse result = callable.withRetries(); + Message response = null; + if (result.getValue().hasValue()) { + response = responsePrototype.newBuilderForType() + .mergeFrom(result.getValue().getValue()).build(); + } else { + response = responsePrototype.getDefaultInstanceForType(); + } + lastRegion = result.getRegion().getValue().toByteArray(); + if (LOG.isTraceEnabled()) { + LOG.trace("Result is region=" + Bytes.toStringBinary(lastRegion) + ", value=" + response); + } + return response; + } + + public byte[] getLastRegion() { + return lastRegion; + } +} diff --git hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/RpcClientEngine.java hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/RpcClientEngine.java new file mode 100644 index 0000000..f6dcbf9 --- /dev/null +++ hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/RpcClientEngine.java @@ -0,0 +1,42 @@ +/** + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.ipc; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.security.User; + +import javax.net.SocketFactory; +import java.io.IOException; +import java.net.InetSocketAddress; + +/** An RPC implementation for the client */ +@InterfaceAudience.Private +public interface RpcClientEngine { + /** Construct a client-side proxy object. */ + VersionedProtocol getProxy(Class protocol, + long clientVersion, InetSocketAddress addr, + User ticket, Configuration conf, + SocketFactory factory, int rpcTimeout) throws IOException; + + /** Stop this proxy. */ + void stopProxy(VersionedProtocol proxy); + +} diff --git hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/ServerNotRunningYetException.java hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/ServerNotRunningYetException.java new file mode 100644 index 0000000..aa36b4c --- /dev/null +++ hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/ServerNotRunningYetException.java @@ -0,0 +1,32 @@ +/* + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.ipc; + +import java.io.IOException; + +import org.apache.hadoop.classification.InterfaceAudience; + +@SuppressWarnings("serial") +@InterfaceAudience.Private +public class ServerNotRunningYetException extends IOException { + public ServerNotRunningYetException(String s) { + super(s); + } +} diff --git hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/ServerRpcController.java hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/ServerRpcController.java new file mode 100644 index 0000000..cbf63fc --- /dev/null +++ hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/ServerRpcController.java @@ -0,0 +1,133 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.ipc; + +import com.google.protobuf.Descriptors; +import com.google.protobuf.Message; +import com.google.protobuf.RpcCallback; +import com.google.protobuf.RpcController; +import com.google.protobuf.Service; +import org.apache.hadoop.util.StringUtils; + +import java.io.IOException; + +/** + * Used for server-side protobuf RPC service invocations. This handler allows + * invocation exceptions to easily be passed through to the RPC server from coprocessor + * {@link Service} implementations. + * + *

      + * When implementing {@link Service} defined methods, coprocessor endpoints can use the following + * pattern to pass exceptions back to the RPC client: + * + * public void myMethod(RpcController controller, MyRequest request, RpcCallback done) { + * MyResponse response = null; + * try { + * // do processing + * response = MyResponse.getDefaultInstance(); // or use a new builder to populate the response + * } catch (IOException ioe) { + * // pass exception back up + * ResponseConverter.setControllerException(controller, ioe); + * } + * done.run(response); + * } + * + *

      + */ +public class ServerRpcController implements RpcController { + /** + * The exception thrown within + * {@link Service#callMethod(Descriptors.MethodDescriptor, RpcController, Message, RpcCallback)}, + * if any. + */ + // TODO: it would be good widen this to just Throwable, but IOException is what we allow now + private IOException serviceException; + private String errorMessage; + + @Override + public void reset() { + serviceException = null; + errorMessage = null; + } + + @Override + public boolean failed() { + return (failedOnException() || errorMessage != null); + } + + @Override + public String errorText() { + return errorMessage; + } + + @Override + public void startCancel() { + // not implemented + } + + @Override + public void setFailed(String message) { + errorMessage = message; + } + + @Override + public boolean isCanceled() { + return false; + } + + @Override + public void notifyOnCancel(RpcCallback objectRpcCallback) { + // not implemented + } + + /** + * Sets an exception to be communicated back to the {@link Service} client. + * @param ioe the exception encountered during execution of the service method + */ + public void setFailedOn(IOException ioe) { + serviceException = ioe; + setFailed(StringUtils.stringifyException(ioe)); + } + + /** + * Returns any exception thrown during service method invocation, or {@code null} if no exception + * was thrown. This can be used by clients to receive exceptions generated by RPC calls, even + * when {@link RpcCallback}s are used and no {@link com.google.protobuf.ServiceException} is + * declared. + */ + public IOException getFailedOn() { + return serviceException; + } + + /** + * Returns whether or not a server exception was generated in the prior RPC invocation. + */ + public boolean failedOnException() { + return serviceException != null; + } + + /** + * Throws an IOException back out if one is currently stored. + */ + public void checkFailed() throws IOException { + if (failedOnException()) { + throw getFailedOn(); + } + } +} diff --git hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/VersionedProtocol.java hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/VersionedProtocol.java new file mode 100644 index 0000000..3667c6d --- /dev/null +++ hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/VersionedProtocol.java @@ -0,0 +1,59 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.ipc; + +import java.io.IOException; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; + +/** + * Superclass of all protocols that use Hadoop RPC. + * Subclasses of this interface are also supposed to have + * a static final long versionID field. + */ +@InterfaceAudience.Public +@InterfaceStability.Evolving +public interface VersionedProtocol { + + /** + * Return protocol version corresponding to protocol interface. + * @param protocol The classname of the protocol interface + * @param clientVersion The version of the protocol that the client speaks + * @return the version that the server will speak + * @throws IOException if any IO error occurs + */ + @Deprecated + public long getProtocolVersion(String protocol, + long clientVersion) throws IOException; + + /** + * Return protocol version corresponding to protocol interface. + * @param protocol The classname of the protocol interface + * @param clientVersion The version of the protocol that the client speaks + * @param clientMethodsHash the hashcode of client protocol methods + * @return the server protocol signature containing its version and + * a list of its supported methods + * @see ProtocolSignature#getProtocolSignature(VersionedProtocol, String, + * long, int) for a default implementation + */ + public ProtocolSignature getProtocolSignature(String protocol, + long clientVersion, + int clientMethodsHash) throws IOException; +} diff --git hbase-client/src/main/java/org/apache/hadoop/hbase/master/RegionState.java hbase-client/src/main/java/org/apache/hadoop/hbase/master/RegionState.java new file mode 100644 index 0000000..e927d41 --- /dev/null +++ hbase-client/src/main/java/org/apache/hadoop/hbase/master/RegionState.java @@ -0,0 +1,275 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.master; + +import java.io.DataInput; +import java.io.DataOutput; +import java.io.IOException; +import java.util.Date; +import java.util.concurrent.atomic.AtomicLong; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.hbase.HRegionInfo; +import org.apache.hadoop.hbase.ServerName; +import org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos; + +/** + * State of a Region while undergoing transitions. + * Region state cannot be modified except the stamp field. + * So it is almost immutable. + */ +@InterfaceAudience.Private +public class RegionState implements org.apache.hadoop.io.Writable { + public enum State { + OFFLINE, // region is in an offline state + PENDING_OPEN, // sent rpc to server to open but has not begun + OPENING, // server has begun to open but not yet done + OPEN, // server opened region and updated meta + PENDING_CLOSE, // sent rpc to server to close but has not begun + CLOSING, // server has begun to close but not yet done + CLOSED, // server closed region and updated meta + SPLITTING, // server started split of a region + SPLIT // server completed split of a region + } + + // Many threads can update the state at the stamp at the same time + private final AtomicLong stamp; + private HRegionInfo region; + + private volatile ServerName serverName; + private volatile State state; + + public RegionState() { + this.stamp = new AtomicLong(System.currentTimeMillis()); + } + + public RegionState(HRegionInfo region, State state) { + this(region, state, System.currentTimeMillis(), null); + } + + public RegionState(HRegionInfo region, + State state, long stamp, ServerName serverName) { + this.region = region; + this.state = state; + this.stamp = new AtomicLong(stamp); + this.serverName = serverName; + } + + public void updateTimestampToNow() { + setTimestamp(System.currentTimeMillis()); + } + + public State getState() { + return state; + } + + public long getStamp() { + return stamp.get(); + } + + public HRegionInfo getRegion() { + return region; + } + + public ServerName getServerName() { + return serverName; + } + + public boolean isClosing() { + return state == State.CLOSING; + } + + public boolean isClosed() { + return state == State.CLOSED; + } + + public boolean isPendingClose() { + return state == State.PENDING_CLOSE; + } + + public boolean isOpening() { + return state == State.OPENING; + } + + public boolean isOpened() { + return state == State.OPEN; + } + + public boolean isPendingOpen() { + return state == State.PENDING_OPEN; + } + + public boolean isOffline() { + return state == State.OFFLINE; + } + + public boolean isSplitting() { + return state == State.SPLITTING; + } + + public boolean isSplit() { + return state == State.SPLIT; + } + + public boolean isPendingOpenOrOpeningOnServer(final ServerName sn) { + return isOnServer(sn) && (isPendingOpen() || isOpening()); + } + + public boolean isPendingCloseOrClosingOnServer(final ServerName sn) { + return isOnServer(sn) && (isPendingClose() || isClosing()); + } + + public boolean isOnServer(final ServerName sn) { + return serverName != null && serverName.equals(sn); + } + + @Override + public String toString() { + return "{" + region.getRegionNameAsString() + + " state=" + state + + ", ts=" + stamp + + ", server=" + serverName + "}"; + } + + /** + * A slower (but more easy-to-read) stringification + */ + public String toDescriptiveString() { + long lstamp = stamp.get(); + long relTime = System.currentTimeMillis() - lstamp; + + return region.getRegionNameAsString() + + " state=" + state + + ", ts=" + new Date(lstamp) + " (" + (relTime/1000) + "s ago)" + + ", server=" + serverName; + } + + /** + * Convert a RegionState to an HBaseProtos.RegionState + * + * @return the converted HBaseProtos.RegionState + */ + public ClusterStatusProtos.RegionState convert() { + ClusterStatusProtos.RegionState.Builder regionState = ClusterStatusProtos.RegionState.newBuilder(); + ClusterStatusProtos.RegionState.State rs; + switch (regionState.getState()) { + case OFFLINE: + rs = ClusterStatusProtos.RegionState.State.OFFLINE; + break; + case PENDING_OPEN: + rs = ClusterStatusProtos.RegionState.State.PENDING_OPEN; + break; + case OPENING: + rs = ClusterStatusProtos.RegionState.State.OPENING; + break; + case OPEN: + rs = ClusterStatusProtos.RegionState.State.OPEN; + break; + case PENDING_CLOSE: + rs = ClusterStatusProtos.RegionState.State.PENDING_CLOSE; + break; + case CLOSING: + rs = ClusterStatusProtos.RegionState.State.CLOSING; + break; + case CLOSED: + rs = ClusterStatusProtos.RegionState.State.CLOSED; + break; + case SPLITTING: + rs = ClusterStatusProtos.RegionState.State.SPLITTING; + break; + case SPLIT: + rs = ClusterStatusProtos.RegionState.State.SPLIT; + break; + default: + throw new IllegalStateException(""); + } + regionState.setRegionInfo(HRegionInfo.convert(region)); + regionState.setState(rs); + regionState.setStamp(getStamp()); + return regionState.build(); + } + + /** + * Convert a protobuf HBaseProtos.RegionState to a RegionState + * + * @return the RegionState + */ + public static RegionState convert(ClusterStatusProtos.RegionState proto) { + RegionState.State state; + switch (proto.getState()) { + case OFFLINE: + state = State.OFFLINE; + break; + case PENDING_OPEN: + state = State.PENDING_OPEN; + break; + case OPENING: + state = State.OPENING; + break; + case OPEN: + state = State.OPEN; + break; + case PENDING_CLOSE: + state = State.PENDING_CLOSE; + break; + case CLOSING: + state = State.CLOSING; + break; + case CLOSED: + state = State.CLOSED; + break; + case SPLITTING: + state = State.SPLITTING; + break; + case SPLIT: + state = State.SPLIT; + break; + default: + throw new IllegalStateException(""); + } + + return new RegionState(HRegionInfo.convert(proto.getRegionInfo()),state,proto.getStamp(),null); + } + + protected void setTimestamp(final long timestamp) { + stamp.set(timestamp); + } + + /** + * @deprecated Writables are going away + */ + @Deprecated + @Override + public void readFields(DataInput in) throws IOException { + region = new HRegionInfo(); + region.readFields(in); + state = State.valueOf(in.readUTF()); + stamp.set(in.readLong()); + } + + /** + * @deprecated Writables are going away + */ + @Deprecated + @Override + public void write(DataOutput out) throws IOException { + region.write(out); + out.writeUTF(state.name()); + out.writeLong(stamp.get()); + } +} diff --git hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java new file mode 100644 index 0000000..36de37a --- /dev/null +++ hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java @@ -0,0 +1,1854 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.protobuf; + +import static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.RegionSpecifierType.REGION_NAME; + +import java.io.ByteArrayInputStream; +import java.io.ByteArrayOutputStream; +import java.io.DataInput; +import java.io.DataInputStream; +import java.io.DataOutput; +import java.io.DataOutputStream; +import java.io.IOException; +import java.lang.reflect.Constructor; +import java.lang.reflect.Method; +import java.util.ArrayList; +import java.util.Collection; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Map.Entry; +import java.util.NavigableMap; +import java.util.NavigableSet; + +import org.apache.hadoop.hbase.DeserializationException; +import org.apache.hadoop.hbase.DoNotRetryIOException; +import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.HRegionInfo; +import org.apache.hadoop.hbase.HTableDescriptor; +import org.apache.hadoop.hbase.KeyValue; +import org.apache.hadoop.hbase.MasterAdminProtocol; +import org.apache.hadoop.hbase.ServerName; +import org.apache.hadoop.hbase.client.Action; +import org.apache.hadoop.hbase.client.AdminProtocol; +import org.apache.hadoop.hbase.client.Append; +import org.apache.hadoop.hbase.client.ClientProtocol; +import org.apache.hadoop.hbase.client.Delete; +import org.apache.hadoop.hbase.client.Get; +import org.apache.hadoop.hbase.client.Increment; +import org.apache.hadoop.hbase.client.MultiAction; +import org.apache.hadoop.hbase.client.MultiResponse; +import org.apache.hadoop.hbase.client.Mutation; +import org.apache.hadoop.hbase.client.Put; +import org.apache.hadoop.hbase.client.Result; +import org.apache.hadoop.hbase.client.Row; +import org.apache.hadoop.hbase.client.RowLock; +import org.apache.hadoop.hbase.client.RowMutations; +import org.apache.hadoop.hbase.client.Scan; +import org.apache.hadoop.hbase.client.metrics.ScanMetrics; +import org.apache.hadoop.hbase.filter.ByteArrayComparable; +import org.apache.hadoop.hbase.filter.Filter; +import org.apache.hadoop.hbase.io.HbaseObjectWritable; +import org.apache.hadoop.hbase.io.TimeRange; +import org.apache.hadoop.hbase.protobuf.generated.AccessControlProtos; +import org.apache.hadoop.hbase.protobuf.generated.AccessControlProtos.AccessControlService; +import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.CloseRegionRequest; +import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.CloseRegionResponse; +import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetOnlineRegionRequest; +import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetOnlineRegionResponse; +import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetRegionInfoRequest; +import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetRegionInfoResponse; +import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetServerInfoRequest; +import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetServerInfoResponse; +import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetStoreFileRequest; +import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetStoreFileResponse; +import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.OpenRegionRequest; +import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.ServerInfo; +import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.SplitRegionRequest; +import org.apache.hadoop.hbase.protobuf.generated.AuthenticationProtos; +import org.apache.hadoop.hbase.protobuf.generated.ClientProtos; +import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.BulkLoadHFileRequest; +import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.BulkLoadHFileResponse; +import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Column; +import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.CoprocessorServiceCall; +import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.CoprocessorServiceRequest; +import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.CoprocessorServiceResponse; +import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.GetRequest; +import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.GetResponse; +import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MultiRequest; +import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Mutate; +import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Mutate.ColumnValue; +import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Mutate.ColumnValue.QualifierValue; +import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Mutate.DeleteType; +import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Mutate.MutateType; +import org.apache.hadoop.hbase.protobuf.generated.ComparatorProtos; +import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos; +import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameBytesPair; +import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo; +import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionLoad; +import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier; +import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.RegionSpecifierType; +import org.apache.hadoop.hbase.protobuf.generated.MapReduceProtos; +import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.CreateTableRequest; +import org.apache.hadoop.hbase.protobuf.generated.MasterMonitorProtos.GetTableDescriptorsResponse; +import org.apache.hadoop.hbase.security.access.Permission; +import org.apache.hadoop.hbase.security.access.TablePermission; +import org.apache.hadoop.hbase.security.access.UserPermission; +import org.apache.hadoop.hbase.security.token.AuthenticationTokenIdentifier; +import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.hbase.util.Methods; +import org.apache.hadoop.hbase.util.Pair; +import org.apache.hadoop.io.Text; +import org.apache.hadoop.security.token.Token; +import org.apache.hbase.Cell; + +import com.google.common.collect.ArrayListMultimap; +import com.google.common.collect.ListMultimap; +import com.google.common.collect.Lists; +import com.google.protobuf.ByteString; +import com.google.protobuf.InvalidProtocolBufferException; +import com.google.protobuf.Message; +import com.google.protobuf.RpcChannel; +import com.google.protobuf.Service; +import com.google.protobuf.ServiceException; + +/** + * Protobufs utility. + */ +public final class ProtobufUtil { + + private ProtobufUtil() { + } + + /** + * Primitive type to class mapping. + */ + private final static Map> + PRIMITIVES = new HashMap>(); + + static { + PRIMITIVES.put(Boolean.TYPE.getName(), Boolean.TYPE); + PRIMITIVES.put(Byte.TYPE.getName(), Byte.TYPE); + PRIMITIVES.put(Character.TYPE.getName(), Character.TYPE); + PRIMITIVES.put(Short.TYPE.getName(), Short.TYPE); + PRIMITIVES.put(Integer.TYPE.getName(), Integer.TYPE); + PRIMITIVES.put(Long.TYPE.getName(), Long.TYPE); + PRIMITIVES.put(Float.TYPE.getName(), Float.TYPE); + PRIMITIVES.put(Double.TYPE.getName(), Double.TYPE); + PRIMITIVES.put(Void.TYPE.getName(), Void.TYPE); + } + + /** + * Magic we put ahead of a serialized protobuf message. + * For example, all znode content is protobuf messages with the below magic + * for preamble. + */ + public static final byte [] PB_MAGIC = new byte [] {'P', 'B', 'U', 'F'}; + private static final String PB_MAGIC_STR = Bytes.toString(PB_MAGIC); + + /** + * Prepend the passed bytes with four bytes of magic, {@link #PB_MAGIC}, to flag what + * follows as a protobuf in hbase. Prepend these bytes to all content written to znodes, etc. + * @param bytes Bytes to decorate + * @return The passed bytes with magic prepended (Creates a new + * byte array that is bytes.length plus {@link #PB_MAGIC}.length. + */ + public static byte [] prependPBMagic(final byte [] bytes) { + return Bytes.add(PB_MAGIC, bytes); + } + + /** + * @param bytes Bytes to check. + * @return True if passed bytes has {@link #PB_MAGIC} for a prefix. + */ + public static boolean isPBMagicPrefix(final byte [] bytes) { + if (bytes == null || bytes.length < PB_MAGIC.length) return false; + return Bytes.compareTo(PB_MAGIC, 0, PB_MAGIC.length, bytes, 0, PB_MAGIC.length) == 0; + } + + /** + * @param bytes + * @throws DeserializationException if we are missing the pb magic prefix + */ + public static void expectPBMagicPrefix(final byte [] bytes) throws DeserializationException { + if (!isPBMagicPrefix(bytes)) { + throw new DeserializationException("Missing pb magic " + PB_MAGIC_STR + " prefix"); + } + } + + /** + * @return Length of {@link #PB_MAGIC} + */ + public static int lengthOfPBMagic() { + return PB_MAGIC.length; + } + + /** + * Return the IOException thrown by the remote server wrapped in + * ServiceException as cause. + * + * @param se ServiceException that wraps IO exception thrown by the server + * @return Exception wrapped in ServiceException or + * a new IOException that wraps the unexpected ServiceException. + */ + public static IOException getRemoteException(ServiceException se) { + Throwable e = se.getCause(); + if (e == null) { + return new IOException(se); + } + return e instanceof IOException ? (IOException) e : new IOException(se); + } + + /** + * Convert a ServerName to a protocol buffer ServerName + * + * @param serverName the ServerName to convert + * @return the converted protocol buffer ServerName + * @see #toServerName(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName) + */ + public static HBaseProtos.ServerName + toServerName(final ServerName serverName) { + if (serverName == null) return null; + HBaseProtos.ServerName.Builder builder = + HBaseProtos.ServerName.newBuilder(); + builder.setHostName(serverName.getHostname()); + if (serverName.getPort() >= 0) { + builder.setPort(serverName.getPort()); + } + if (serverName.getStartcode() >= 0) { + builder.setStartCode(serverName.getStartcode()); + } + return builder.build(); + } + + /** + * Convert a protocol buffer ServerName to a ServerName + * + * @param proto the protocol buffer ServerName to convert + * @return the converted ServerName + */ + public static ServerName toServerName(final HBaseProtos.ServerName proto) { + if (proto == null) return null; + String hostName = proto.getHostName(); + long startCode = -1; + int port = -1; + if (proto.hasPort()) { + port = proto.getPort(); + } + if (proto.hasStartCode()) { + startCode = proto.getStartCode(); + } + return new ServerName(hostName, port, startCode); + } + + /** + * Get HTableDescriptor[] from GetTableDescriptorsResponse protobuf + * + * @param proto the GetTableDescriptorsResponse + * @return HTableDescriptor[] + */ + public static HTableDescriptor[] getHTableDescriptorArray(GetTableDescriptorsResponse proto) { + if (proto == null) return null; + + HTableDescriptor[] ret = new HTableDescriptor[proto.getTableSchemaCount()]; + for (int i = 0; i < proto.getTableSchemaCount(); ++i) { + ret[i] = HTableDescriptor.convert(proto.getTableSchema(i)); + } + return ret; + } + + /** + * get the split keys in form "byte [][]" from a CreateTableRequest proto + * + * @param proto the CreateTableRequest + * @return the split keys + */ + public static byte [][] getSplitKeysArray(final CreateTableRequest proto) { + byte [][] splitKeys = new byte[proto.getSplitKeysCount()][]; + for (int i = 0; i < proto.getSplitKeysCount(); ++i) { + splitKeys[i] = proto.getSplitKeys(i).toByteArray(); + } + return splitKeys; + } + + /** + * Convert a protocol buffer Get to a client Get + * + * @param proto the protocol buffer Get to convert + * @return the converted client Get + * @throws IOException + */ + public static Get toGet( + final ClientProtos.Get proto) throws IOException { + if (proto == null) return null; + byte[] row = proto.getRow().toByteArray(); + RowLock rowLock = null; + if (proto.hasLockId()) { + rowLock = new RowLock(proto.getLockId()); + } + Get get = new Get(row, rowLock); + if (proto.hasCacheBlocks()) { + get.setCacheBlocks(proto.getCacheBlocks()); + } + if (proto.hasMaxVersions()) { + get.setMaxVersions(proto.getMaxVersions()); + } + if (proto.hasStoreLimit()) { + get.setMaxResultsPerColumnFamily(proto.getStoreLimit()); + } + if (proto.hasStoreOffset()) { + get.setRowOffsetPerColumnFamily(proto.getStoreOffset()); + } + if (proto.hasTimeRange()) { + HBaseProtos.TimeRange timeRange = proto.getTimeRange(); + long minStamp = 0; + long maxStamp = Long.MAX_VALUE; + if (timeRange.hasFrom()) { + minStamp = timeRange.getFrom(); + } + if (timeRange.hasTo()) { + maxStamp = timeRange.getTo(); + } + get.setTimeRange(minStamp, maxStamp); + } + if (proto.hasFilter()) { + HBaseProtos.Filter filter = proto.getFilter(); + get.setFilter(ProtobufUtil.toFilter(filter)); + } + for (NameBytesPair attribute: proto.getAttributeList()) { + get.setAttribute(attribute.getName(), attribute.getValue().toByteArray()); + } + if (proto.getColumnCount() > 0) { + for (Column column: proto.getColumnList()) { + byte[] family = column.getFamily().toByteArray(); + if (column.getQualifierCount() > 0) { + for (ByteString qualifier: column.getQualifierList()) { + get.addColumn(family, qualifier.toByteArray()); + } + } else { + get.addFamily(family); + } + } + } + return get; + } + + /** + * Convert a protocol buffer Mutate to a Put + * + * @param proto the protocol buffer Mutate to convert + * @return the converted client Put + * @throws DoNotRetryIOException + */ + public static Put toPut( + final Mutate proto) throws DoNotRetryIOException { + MutateType type = proto.getMutateType(); + assert type == MutateType.PUT : type.name(); + byte[] row = proto.getRow().toByteArray(); + long timestamp = HConstants.LATEST_TIMESTAMP; + if (proto.hasTimestamp()) { + timestamp = proto.getTimestamp(); + } + RowLock lock = null; + if (proto.hasLockId()) { + lock = new RowLock(proto.getLockId()); + } + Put put = new Put(row, timestamp, lock); + put.setWriteToWAL(proto.getWriteToWAL()); + for (NameBytesPair attribute: proto.getAttributeList()) { + put.setAttribute(attribute.getName(), + attribute.getValue().toByteArray()); + } + for (ColumnValue column: proto.getColumnValueList()) { + byte[] family = column.getFamily().toByteArray(); + for (QualifierValue qv: column.getQualifierValueList()) { + byte[] qualifier = qv.getQualifier().toByteArray(); + if (!qv.hasValue()) { + throw new DoNotRetryIOException( + "Missing required field: qualifer value"); + } + byte[] value = qv.getValue().toByteArray(); + long ts = timestamp; + if (qv.hasTimestamp()) { + ts = qv.getTimestamp(); + } + put.add(family, qualifier, ts, value); + } + } + return put; + } + + /** + * Convert a protocol buffer Mutate to a Delete + * + * @param proto the protocol buffer Mutate to convert + * @return the converted client Delete + */ + public static Delete toDelete(final Mutate proto) { + MutateType type = proto.getMutateType(); + assert type == MutateType.DELETE : type.name(); + byte[] row = proto.getRow().toByteArray(); + long timestamp = HConstants.LATEST_TIMESTAMP; + if (proto.hasTimestamp()) { + timestamp = proto.getTimestamp(); + } + RowLock lock = null; + if (proto.hasLockId()) { + lock = new RowLock(proto.getLockId()); + } + Delete delete = new Delete(row, timestamp, lock); + delete.setWriteToWAL(proto.getWriteToWAL()); + for (NameBytesPair attribute: proto.getAttributeList()) { + delete.setAttribute(attribute.getName(), + attribute.getValue().toByteArray()); + } + for (ColumnValue column: proto.getColumnValueList()) { + byte[] family = column.getFamily().toByteArray(); + for (QualifierValue qv: column.getQualifierValueList()) { + DeleteType deleteType = qv.getDeleteType(); + byte[] qualifier = null; + if (qv.hasQualifier()) { + qualifier = qv.getQualifier().toByteArray(); + } + long ts = HConstants.LATEST_TIMESTAMP; + if (qv.hasTimestamp()) { + ts = qv.getTimestamp(); + } + if (deleteType == DeleteType.DELETE_ONE_VERSION) { + delete.deleteColumn(family, qualifier, ts); + } else if (deleteType == DeleteType.DELETE_MULTIPLE_VERSIONS) { + delete.deleteColumns(family, qualifier, ts); + } else { + delete.deleteFamily(family, ts); + } + } + } + return delete; + } + + /** + * Convert a protocol buffer Mutate to an Append + * + * @param proto the protocol buffer Mutate to convert + * @return the converted client Append + * @throws DoNotRetryIOException + */ + public static Append toAppend( + final Mutate proto) throws DoNotRetryIOException { + MutateType type = proto.getMutateType(); + assert type == MutateType.APPEND : type.name(); + byte[] row = proto.getRow().toByteArray(); + Append append = new Append(row); + append.setWriteToWAL(proto.getWriteToWAL()); + for (NameBytesPair attribute: proto.getAttributeList()) { + append.setAttribute(attribute.getName(), + attribute.getValue().toByteArray()); + } + for (ColumnValue column: proto.getColumnValueList()) { + byte[] family = column.getFamily().toByteArray(); + for (QualifierValue qv: column.getQualifierValueList()) { + byte[] qualifier = qv.getQualifier().toByteArray(); + if (!qv.hasValue()) { + throw new DoNotRetryIOException( + "Missing required field: qualifer value"); + } + byte[] value = qv.getValue().toByteArray(); + append.add(family, qualifier, value); + } + } + return append; + } + + /** + * Convert a MutateRequest to Mutation + * + * @param proto the protocol buffer Mutate to convert + * @return the converted Mutation + * @throws IOException + */ + public static Mutation toMutation(final Mutate proto) throws IOException { + MutateType type = proto.getMutateType(); + if (type == MutateType.APPEND) { + return toAppend(proto); + } + if (type == MutateType.DELETE) { + return toDelete(proto); + } + if (type == MutateType.PUT) { + return toPut(proto); + } + throw new IOException("Not an understood mutate type " + type); + } + + /** + * Convert a protocol buffer Mutate to an Increment + * + * @param proto the protocol buffer Mutate to convert + * @return the converted client Increment + * @throws IOException + */ + public static Increment toIncrement( + final Mutate proto) throws IOException { + MutateType type = proto.getMutateType(); + assert type == MutateType.INCREMENT : type.name(); + RowLock lock = null; + if (proto.hasLockId()) { + lock = new RowLock(proto.getLockId()); + } + byte[] row = proto.getRow().toByteArray(); + Increment increment = new Increment(row, lock); + increment.setWriteToWAL(proto.getWriteToWAL()); + if (proto.hasTimeRange()) { + HBaseProtos.TimeRange timeRange = proto.getTimeRange(); + long minStamp = 0; + long maxStamp = Long.MAX_VALUE; + if (timeRange.hasFrom()) { + minStamp = timeRange.getFrom(); + } + if (timeRange.hasTo()) { + maxStamp = timeRange.getTo(); + } + increment.setTimeRange(minStamp, maxStamp); + } + for (ColumnValue column: proto.getColumnValueList()) { + byte[] family = column.getFamily().toByteArray(); + for (QualifierValue qv: column.getQualifierValueList()) { + byte[] qualifier = qv.getQualifier().toByteArray(); + if (!qv.hasValue()) { + throw new DoNotRetryIOException( + "Missing required field: qualifer value"); + } + long value = Bytes.toLong(qv.getValue().toByteArray()); + increment.addColumn(family, qualifier, value); + } + } + return increment; + } + + /** + * Convert a client Scan to a protocol buffer Scan + * + * @param scan the client Scan to convert + * @return the converted protocol buffer Scan + * @throws IOException + */ + public static ClientProtos.Scan toScan( + final Scan scan) throws IOException { + ClientProtos.Scan.Builder scanBuilder = + ClientProtos.Scan.newBuilder(); + scanBuilder.setCacheBlocks(scan.getCacheBlocks()); + if (scan.getBatch() > 0) { + scanBuilder.setBatchSize(scan.getBatch()); + } + if (scan.getMaxResultSize() > 0) { + scanBuilder.setMaxResultSize(scan.getMaxResultSize()); + } + scanBuilder.setMaxVersions(scan.getMaxVersions()); + TimeRange timeRange = scan.getTimeRange(); + if (!timeRange.isAllTime()) { + HBaseProtos.TimeRange.Builder timeRangeBuilder = + HBaseProtos.TimeRange.newBuilder(); + timeRangeBuilder.setFrom(timeRange.getMin()); + timeRangeBuilder.setTo(timeRange.getMax()); + scanBuilder.setTimeRange(timeRangeBuilder.build()); + } + Map attributes = scan.getAttributesMap(); + if (!attributes.isEmpty()) { + NameBytesPair.Builder attributeBuilder = NameBytesPair.newBuilder(); + for (Map.Entry attribute: attributes.entrySet()) { + attributeBuilder.setName(attribute.getKey()); + attributeBuilder.setValue(ByteString.copyFrom(attribute.getValue())); + scanBuilder.addAttribute(attributeBuilder.build()); + } + } + byte[] startRow = scan.getStartRow(); + if (startRow != null && startRow.length > 0) { + scanBuilder.setStartRow(ByteString.copyFrom(startRow)); + } + byte[] stopRow = scan.getStopRow(); + if (stopRow != null && stopRow.length > 0) { + scanBuilder.setStopRow(ByteString.copyFrom(stopRow)); + } + if (scan.hasFilter()) { + scanBuilder.setFilter(ProtobufUtil.toFilter(scan.getFilter())); + } + if (scan.hasFamilies()) { + Column.Builder columnBuilder = Column.newBuilder(); + for (Map.Entry> + family: scan.getFamilyMap().entrySet()) { + columnBuilder.setFamily(ByteString.copyFrom(family.getKey())); + NavigableSet qualifiers = family.getValue(); + columnBuilder.clearQualifier(); + if (qualifiers != null && qualifiers.size() > 0) { + for (byte [] qualifier: qualifiers) { + columnBuilder.addQualifier(ByteString.copyFrom(qualifier)); + } + } + scanBuilder.addColumn(columnBuilder.build()); + } + } + if (scan.getMaxResultsPerColumnFamily() >= 0) { + scanBuilder.setStoreLimit(scan.getMaxResultsPerColumnFamily()); + } + if (scan.getRowOffsetPerColumnFamily() > 0) { + scanBuilder.setStoreOffset(scan.getRowOffsetPerColumnFamily()); + } + return scanBuilder.build(); + } + + /** + * Convert a protocol buffer Scan to a client Scan + * + * @param proto the protocol buffer Scan to convert + * @return the converted client Scan + * @throws IOException + */ + public static Scan toScan( + final ClientProtos.Scan proto) throws IOException { + byte [] startRow = HConstants.EMPTY_START_ROW; + byte [] stopRow = HConstants.EMPTY_END_ROW; + if (proto.hasStartRow()) { + startRow = proto.getStartRow().toByteArray(); + } + if (proto.hasStopRow()) { + stopRow = proto.getStopRow().toByteArray(); + } + Scan scan = new Scan(startRow, stopRow); + if (proto.hasCacheBlocks()) { + scan.setCacheBlocks(proto.getCacheBlocks()); + } + if (proto.hasMaxVersions()) { + scan.setMaxVersions(proto.getMaxVersions()); + } + if (proto.hasStoreLimit()) { + scan.setMaxResultsPerColumnFamily(proto.getStoreLimit()); + } + if (proto.hasStoreOffset()) { + scan.setRowOffsetPerColumnFamily(proto.getStoreOffset()); + } + if (proto.hasTimeRange()) { + HBaseProtos.TimeRange timeRange = proto.getTimeRange(); + long minStamp = 0; + long maxStamp = Long.MAX_VALUE; + if (timeRange.hasFrom()) { + minStamp = timeRange.getFrom(); + } + if (timeRange.hasTo()) { + maxStamp = timeRange.getTo(); + } + scan.setTimeRange(minStamp, maxStamp); + } + if (proto.hasFilter()) { + HBaseProtos.Filter filter = proto.getFilter(); + scan.setFilter(ProtobufUtil.toFilter(filter)); + } + if (proto.hasBatchSize()) { + scan.setBatch(proto.getBatchSize()); + } + if (proto.hasMaxResultSize()) { + scan.setMaxResultSize(proto.getMaxResultSize()); + } + for (NameBytesPair attribute: proto.getAttributeList()) { + scan.setAttribute(attribute.getName(), attribute.getValue().toByteArray()); + } + if (proto.getColumnCount() > 0) { + for (Column column: proto.getColumnList()) { + byte[] family = column.getFamily().toByteArray(); + if (column.getQualifierCount() > 0) { + for (ByteString qualifier: column.getQualifierList()) { + scan.addColumn(family, qualifier.toByteArray()); + } + } else { + scan.addFamily(family); + } + } + } + return scan; + } + + /** + * Create a protocol buffer Get based on a client Get. + * + * @param get the client Get + * @return a protocol buffer Get + * @throws IOException + */ + public static ClientProtos.Get toGet( + final Get get) throws IOException { + ClientProtos.Get.Builder builder = + ClientProtos.Get.newBuilder(); + builder.setRow(ByteString.copyFrom(get.getRow())); + builder.setCacheBlocks(get.getCacheBlocks()); + builder.setMaxVersions(get.getMaxVersions()); + if (get.getLockId() >= 0) { + builder.setLockId(get.getLockId()); + } + if (get.getFilter() != null) { + builder.setFilter(ProtobufUtil.toFilter(get.getFilter())); + } + TimeRange timeRange = get.getTimeRange(); + if (!timeRange.isAllTime()) { + HBaseProtos.TimeRange.Builder timeRangeBuilder = + HBaseProtos.TimeRange.newBuilder(); + timeRangeBuilder.setFrom(timeRange.getMin()); + timeRangeBuilder.setTo(timeRange.getMax()); + builder.setTimeRange(timeRangeBuilder.build()); + } + Map attributes = get.getAttributesMap(); + if (!attributes.isEmpty()) { + NameBytesPair.Builder attributeBuilder = NameBytesPair.newBuilder(); + for (Map.Entry attribute: attributes.entrySet()) { + attributeBuilder.setName(attribute.getKey()); + attributeBuilder.setValue(ByteString.copyFrom(attribute.getValue())); + builder.addAttribute(attributeBuilder.build()); + } + } + if (get.hasFamilies()) { + Column.Builder columnBuilder = Column.newBuilder(); + Map> families = get.getFamilyMap(); + for (Map.Entry> family: families.entrySet()) { + NavigableSet qualifiers = family.getValue(); + columnBuilder.setFamily(ByteString.copyFrom(family.getKey())); + columnBuilder.clearQualifier(); + if (qualifiers != null && qualifiers.size() > 0) { + for (byte[] qualifier: qualifiers) { + columnBuilder.addQualifier(ByteString.copyFrom(qualifier)); + } + } + builder.addColumn(columnBuilder.build()); + } + } + if (get.getMaxResultsPerColumnFamily() >= 0) { + builder.setStoreLimit(get.getMaxResultsPerColumnFamily()); + } + if (get.getRowOffsetPerColumnFamily() > 0) { + builder.setStoreOffset(get.getRowOffsetPerColumnFamily()); + } + return builder.build(); + } + + /** + * Convert a client Increment to a protobuf Mutate. + * + * @param increment + * @return the converted mutate + */ + public static Mutate toMutate(final Increment increment) { + Mutate.Builder builder = Mutate.newBuilder(); + builder.setRow(ByteString.copyFrom(increment.getRow())); + builder.setMutateType(MutateType.INCREMENT); + builder.setWriteToWAL(increment.getWriteToWAL()); + if (increment.getLockId() >= 0) { + builder.setLockId(increment.getLockId()); + } + TimeRange timeRange = increment.getTimeRange(); + if (!timeRange.isAllTime()) { + HBaseProtos.TimeRange.Builder timeRangeBuilder = + HBaseProtos.TimeRange.newBuilder(); + timeRangeBuilder.setFrom(timeRange.getMin()); + timeRangeBuilder.setTo(timeRange.getMax()); + builder.setTimeRange(timeRangeBuilder.build()); + } + ColumnValue.Builder columnBuilder = ColumnValue.newBuilder(); + QualifierValue.Builder valueBuilder = QualifierValue.newBuilder(); + for (Map.Entry> + family: increment.getFamilyMap().entrySet()) { + columnBuilder.setFamily(ByteString.copyFrom(family.getKey())); + columnBuilder.clearQualifierValue(); + NavigableMap values = family.getValue(); + if (values != null && values.size() > 0) { + for (Map.Entry value: values.entrySet()) { + valueBuilder.setQualifier(ByteString.copyFrom(value.getKey())); + valueBuilder.setValue(ByteString.copyFrom( + Bytes.toBytes(value.getValue().longValue()))); + columnBuilder.addQualifierValue(valueBuilder.build()); + } + } + builder.addColumnValue(columnBuilder.build()); + } + return builder.build(); + } + + /** + * Create a protocol buffer Mutate based on a client Mutation + * + * @param mutateType + * @param mutation + * @return a mutate + * @throws IOException + */ + public static Mutate toMutate(final MutateType mutateType, + final Mutation mutation) throws IOException { + Mutate.Builder mutateBuilder = Mutate.newBuilder(); + mutateBuilder.setRow(ByteString.copyFrom(mutation.getRow())); + mutateBuilder.setMutateType(mutateType); + mutateBuilder.setWriteToWAL(mutation.getWriteToWAL()); + if (mutation.getLockId() >= 0) { + mutateBuilder.setLockId(mutation.getLockId()); + } + mutateBuilder.setTimestamp(mutation.getTimeStamp()); + Map attributes = mutation.getAttributesMap(); + if (!attributes.isEmpty()) { + NameBytesPair.Builder attributeBuilder = NameBytesPair.newBuilder(); + for (Map.Entry attribute: attributes.entrySet()) { + attributeBuilder.setName(attribute.getKey()); + attributeBuilder.setValue(ByteString.copyFrom(attribute.getValue())); + mutateBuilder.addAttribute(attributeBuilder.build()); + } + } + ColumnValue.Builder columnBuilder = ColumnValue.newBuilder(); + QualifierValue.Builder valueBuilder = QualifierValue.newBuilder(); + for (Map.Entry> + family: mutation.getFamilyMap().entrySet()) { + columnBuilder.setFamily(ByteString.copyFrom(family.getKey())); + columnBuilder.clearQualifierValue(); + for (KeyValue value: family.getValue()) { + valueBuilder.setQualifier(ByteString.copyFrom(value.getQualifier())); + valueBuilder.setValue(ByteString.copyFrom(value.getValue())); + valueBuilder.setTimestamp(value.getTimestamp()); + if (mutateType == MutateType.DELETE) { + KeyValue.Type keyValueType = KeyValue.Type.codeToType(value.getType()); + valueBuilder.setDeleteType(toDeleteType(keyValueType)); + } + columnBuilder.addQualifierValue(valueBuilder.build()); + } + mutateBuilder.addColumnValue(columnBuilder.build()); + } + return mutateBuilder.build(); + } + + /** + * Convert a client Result to a protocol buffer Result + * + * @param result the client Result to convert + * @return the converted protocol buffer Result + */ + public static ClientProtos.Result toResult(final Result result) { + ClientProtos.Result.Builder builder = ClientProtos.Result.newBuilder(); + Cell [] cells = result.raw(); + if (cells != null) { + for (Cell c : cells) { + builder.addKeyValue(toKeyValue(c)); + } + } + return builder.build(); + } + + /** + * Convert a protocol buffer Result to a client Result + * + * @param proto the protocol buffer Result to convert + * @return the converted client Result + */ + public static Result toResult(final ClientProtos.Result proto) { + List values = proto.getKeyValueList(); + List keyValues = new ArrayList(values.size()); + for (HBaseProtos.KeyValue kv: values) { + keyValues.add(toKeyValue(kv)); + } + return new Result(keyValues); + } + + /** + * Convert a ByteArrayComparable to a protocol buffer Comparator + * + * @param comparator the ByteArrayComparable to convert + * @return the converted protocol buffer Comparator + */ + public static ComparatorProtos.Comparator toComparator(ByteArrayComparable comparator) { + ComparatorProtos.Comparator.Builder builder = ComparatorProtos.Comparator.newBuilder(); + builder.setName(comparator.getClass().getName()); + builder.setSerializedComparator(ByteString.copyFrom(comparator.toByteArray())); + return builder.build(); + } + + /** + * Convert a protocol buffer Comparator to a ByteArrayComparable + * + * @param proto the protocol buffer Comparator to convert + * @return the converted ByteArrayComparable + */ + @SuppressWarnings("unchecked") + public static ByteArrayComparable toComparator(ComparatorProtos.Comparator proto) + throws IOException { + String type = proto.getName(); + String funcName = "parseFrom"; + byte [] value = proto.getSerializedComparator().toByteArray(); + try { + Class c = + (Class)(Class.forName(type)); + Method parseFrom = c.getMethod(funcName, byte[].class); + if (parseFrom == null) { + throw new IOException("Unable to locate function: " + funcName + " in type: " + type); + } + return (ByteArrayComparable)parseFrom.invoke(null, value); + } catch (Exception e) { + throw new IOException(e); + } + } + + /** + * Convert a protocol buffer Filter to a client Filter + * + * @param proto the protocol buffer Filter to convert + * @return the converted Filter + */ + @SuppressWarnings("unchecked") + public static Filter toFilter(HBaseProtos.Filter proto) throws IOException { + String type = proto.getName(); + final byte [] value = proto.getSerializedFilter().toByteArray(); + String funcName = "parseFrom"; + try { + Class c = + (Class)Class.forName(type); + Method parseFrom = c.getMethod(funcName, byte[].class); + if (parseFrom == null) { + throw new IOException("Unable to locate function: " + funcName + " in type: " + type); + } + return (Filter)parseFrom.invoke(c, value); + } catch (Exception e) { + throw new IOException(e); + } + } + + /** + * Convert a client Filter to a protocol buffer Filter + * + * @param filter the Filter to convert + * @return the converted protocol buffer Filter + */ + public static HBaseProtos.Filter toFilter(Filter filter) { + HBaseProtos.Filter.Builder builder = HBaseProtos.Filter.newBuilder(); + builder.setName(filter.getClass().getName()); + builder.setSerializedFilter(ByteString.copyFrom(filter.toByteArray())); + return builder.build(); + } + + /** + * Convert a delete KeyValue type to protocol buffer DeleteType. + * + * @param type + * @return a DeleteType + * @throws IOException + */ + public static DeleteType toDeleteType( + KeyValue.Type type) throws IOException { + switch (type) { + case Delete: + return DeleteType.DELETE_ONE_VERSION; + case DeleteColumn: + return DeleteType.DELETE_MULTIPLE_VERSIONS; + case DeleteFamily: + return DeleteType.DELETE_FAMILY; + default: + throw new IOException("Unknown delete type: " + type); + } + } + + /** + * Convert a protocol buffer Parameter to a Java object + * + * @param parameter the protocol buffer Parameter to convert + * @return the converted Java object + * @throws IOException if failed to deserialize the parameter + */ + public static Object toObject( + final NameBytesPair parameter) throws IOException { + if (parameter == null || !parameter.hasValue()) return null; + byte[] bytes = parameter.getValue().toByteArray(); + ByteArrayInputStream bais = null; + try { + bais = new ByteArrayInputStream(bytes); + DataInput in = new DataInputStream(bais); + return HbaseObjectWritable.readObject(in, null); + } finally { + if (bais != null) { + bais.close(); + } + } + } + + /** + * Convert a stringified protocol buffer exception Parameter to a Java Exception + * + * @param parameter the protocol buffer Parameter to convert + * @return the converted Exception + * @throws IOException if failed to deserialize the parameter + */ + @SuppressWarnings("unchecked") + public static Throwable toException( + final NameBytesPair parameter) throws IOException { + if (parameter == null || !parameter.hasValue()) return null; + String desc = parameter.getValue().toStringUtf8(); + String type = parameter.getName(); + try { + Class c = + (Class)Class.forName(type); + Constructor cn = + c.getDeclaredConstructor(String.class); + return cn.newInstance(desc); + } catch (Exception e) { + throw new IOException(e); + } + } + + /** + * Serialize a Java Object into a Parameter. The Java Object should be a + * Writable or protocol buffer Message + * + * @param value the Writable/Message object to be serialized + * @return the converted protocol buffer Parameter + * @throws IOException if failed to serialize the object + */ + public static NameBytesPair toParameter( + final Object value) throws IOException { + Class declaredClass = Object.class; + if (value != null) { + declaredClass = value.getClass(); + } + return toParameter(declaredClass, value); + } + + /** + * Serialize a Java Object into a Parameter. The Java Object should be a + * Writable or protocol buffer Message + * + * @param declaredClass the declared class of the parameter + * @param value the Writable/Message object to be serialized + * @return the converted protocol buffer Parameter + * @throws IOException if failed to serialize the object + */ + public static NameBytesPair toParameter( + final Class declaredClass, final Object value) throws IOException { + NameBytesPair.Builder builder = NameBytesPair.newBuilder(); + builder.setName(declaredClass.getName()); + if (value != null) { + ByteArrayOutputStream baos = null; + try { + baos = new ByteArrayOutputStream(); + DataOutput out = new DataOutputStream(baos); + Class clz = declaredClass; + if (HbaseObjectWritable.getClassCode(declaredClass) == null) { + clz = value.getClass(); + } + HbaseObjectWritable.writeObject(out, value, clz, null); + builder.setValue( + ByteString.copyFrom(baos.toByteArray())); + } finally { + if (baos != null) { + baos.close(); + } + } + } + return builder.build(); + } + +// Start helpers for Client + + /** + * A helper to invoke a Get using client protocol. + * + * @param client + * @param regionName + * @param get + * @return the result of the Get + * @throws IOException + */ + public static Result get(final ClientProtocol client, + final byte[] regionName, final Get get) throws IOException { + GetRequest request = + RequestConverter.buildGetRequest(regionName, get); + try { + GetResponse response = client.get(null, request); + if (response == null) return null; + return toResult(response.getResult()); + } catch (ServiceException se) { + throw getRemoteException(se); + } + } + + /** + * A helper to get a row of the closet one before using client protocol. + * + * @param client + * @param regionName + * @param row + * @param family + * @return the row or the closestRowBefore if it doesn't exist + * @throws IOException + */ + public static Result getRowOrBefore(final ClientProtocol client, + final byte[] regionName, final byte[] row, + final byte[] family) throws IOException { + GetRequest request = + RequestConverter.buildGetRowOrBeforeRequest( + regionName, row, family); + try { + GetResponse response = client.get(null, request); + if (!response.hasResult()) return null; + return toResult(response.getResult()); + } catch (ServiceException se) { + throw getRemoteException(se); + } + } + + /** + * A helper to invoke a multi action using client protocol. + * + * @param client + * @param multi + * @return a multi response + * @throws IOException + */ + public static MultiResponse multi(final ClientProtocol client, + final MultiAction multi) throws IOException { + try { + MultiResponse response = new MultiResponse(); + for (Map.Entry>> e: multi.actions.entrySet()) { + byte[] regionName = e.getKey(); + int rowMutations = 0; + List> actions = e.getValue(); + for (Action action: actions) { + Row row = action.getAction(); + if (row instanceof RowMutations) { + MultiRequest request = + RequestConverter.buildMultiRequest(regionName, (RowMutations)row); + client.multi(null, request); + response.add(regionName, action.getOriginalIndex(), new Result()); + rowMutations++; + } + } + if (actions.size() > rowMutations) { + MultiRequest request = + RequestConverter.buildMultiRequest(regionName, actions); + ClientProtos.MultiResponse + proto = client.multi(null, request); + List results = ResponseConverter.getResults(proto); + for (int i = 0, n = results.size(); i < n; i++) { + int originalIndex = actions.get(i).getOriginalIndex(); + response.add(regionName, originalIndex, results.get(i)); + } + } + } + return response; + } catch (ServiceException se) { + throw getRemoteException(se); + } + } + + /** + * A helper to bulk load a list of HFiles using client protocol. + * + * @param client + * @param familyPaths + * @param regionName + * @param assignSeqNum + * @return true if all are loaded + * @throws IOException + */ + public static boolean bulkLoadHFile(final ClientProtocol client, + final List> familyPaths, + final byte[] regionName, boolean assignSeqNum) throws IOException { + BulkLoadHFileRequest request = + RequestConverter.buildBulkLoadHFileRequest(familyPaths, regionName, assignSeqNum); + try { + BulkLoadHFileResponse response = + client.bulkLoadHFile(null, request); + return response.getLoaded(); + } catch (ServiceException se) { + throw getRemoteException(se); + } + } + + public static CoprocessorServiceResponse execService(final ClientProtocol client, + final CoprocessorServiceCall call, final byte[] regionName) throws IOException { + CoprocessorServiceRequest request = CoprocessorServiceRequest.newBuilder() + .setCall(call).setRegion( + RequestConverter.buildRegionSpecifier(REGION_NAME, regionName)).build(); + try { + CoprocessorServiceResponse response = + client.execService(null, request); + return response; + } catch (ServiceException se) { + throw getRemoteException(se); + } + } + + public static CoprocessorServiceResponse execService(final MasterAdminProtocol client, + final CoprocessorServiceCall call) throws IOException { + CoprocessorServiceRequest request = CoprocessorServiceRequest.newBuilder() + .setCall(call).setRegion( + RequestConverter.buildRegionSpecifier(REGION_NAME, HConstants.EMPTY_BYTE_ARRAY)).build(); + try { + CoprocessorServiceResponse response = + client.execMasterService(null, request); + return response; + } catch (ServiceException se) { + throw getRemoteException(se); + } + } + + @SuppressWarnings("unchecked") + public static T newServiceStub(Class service, RpcChannel channel) + throws Exception { + return (T)Methods.call(service, null, "newStub", + new Class[]{ RpcChannel.class }, new Object[]{ channel }); + } + +// End helpers for Client +// Start helpers for Admin + + /** + * A helper to retrieve region info given a region name + * using admin protocol. + * + * @param admin + * @param regionName + * @return the retrieved region info + * @throws IOException + */ + public static HRegionInfo getRegionInfo(final AdminProtocol admin, + final byte[] regionName) throws IOException { + try { + GetRegionInfoRequest request = + RequestConverter.buildGetRegionInfoRequest(regionName); + GetRegionInfoResponse response = + admin.getRegionInfo(null, request); + return HRegionInfo.convert(response.getRegionInfo()); + } catch (ServiceException se) { + throw getRemoteException(se); + } + } + + /** + * A helper to close a region given a region name + * using admin protocol. + * + * @param admin + * @param regionName + * @param transitionInZK + * @throws IOException + */ + public static void closeRegion(final AdminProtocol admin, + final byte[] regionName, final boolean transitionInZK) throws IOException { + CloseRegionRequest closeRegionRequest = + RequestConverter.buildCloseRegionRequest(regionName, transitionInZK); + try { + admin.closeRegion(null, closeRegionRequest); + } catch (ServiceException se) { + throw getRemoteException(se); + } + } + + /** + * A helper to close a region given a region name + * using admin protocol. + * + * @param admin + * @param regionName + * @param versionOfClosingNode + * @return true if the region is closed + * @throws IOException + */ + public static boolean closeRegion(final AdminProtocol admin, final byte[] regionName, + final int versionOfClosingNode, final ServerName destinationServer, + final boolean transitionInZK) throws IOException { + CloseRegionRequest closeRegionRequest = + RequestConverter.buildCloseRegionRequest( + regionName, versionOfClosingNode, destinationServer, transitionInZK); + try { + CloseRegionResponse response = admin.closeRegion(null, closeRegionRequest); + return ResponseConverter.isClosed(response); + } catch (ServiceException se) { + throw getRemoteException(se); + } + } + + + /** + * A helper to open a region using admin protocol. + * @param admin + * @param region + * @throws IOException + */ + public static void openRegion(final AdminProtocol admin, + final HRegionInfo region) throws IOException { + OpenRegionRequest request = + RequestConverter.buildOpenRegionRequest(region, -1); + try { + admin.openRegion(null, request); + } catch (ServiceException se) { + throw ProtobufUtil.getRemoteException(se); + } + } + + /** + * A helper to get the all the online regions on a region + * server using admin protocol. + * + * @param admin + * @return a list of online region info + * @throws IOException + */ + public static List getOnlineRegions(final AdminProtocol admin) throws IOException { + GetOnlineRegionRequest request = RequestConverter.buildGetOnlineRegionRequest(); + GetOnlineRegionResponse response = null; + try { + response = admin.getOnlineRegion(null, request); + } catch (ServiceException se) { + throw getRemoteException(se); + } + return getRegionInfos(response); + } + + /** + * Get the list of region info from a GetOnlineRegionResponse + * + * @param proto the GetOnlineRegionResponse + * @return the list of region info or null if proto is null + */ + static List getRegionInfos(final GetOnlineRegionResponse proto) { + if (proto == null) return null; + List regionInfos = new ArrayList(); + for (RegionInfo regionInfo: proto.getRegionInfoList()) { + regionInfos.add(HRegionInfo.convert(regionInfo)); + } + return regionInfos; + } + + /** + * A helper to get the info of a region server using admin protocol. + * + * @param admin + * @return the server name + * @throws IOException + */ + public static ServerInfo getServerInfo( + final AdminProtocol admin) throws IOException { + GetServerInfoRequest request = RequestConverter.buildGetServerInfoRequest(); + try { + GetServerInfoResponse response = admin.getServerInfo(null, request); + return response.getServerInfo(); + } catch (ServiceException se) { + throw getRemoteException(se); + } + } + + /** + * A helper to get the list of files of a column family + * on a given region using admin protocol. + * + * @param admin + * @param regionName + * @param family + * @return the list of store files + * @throws IOException + */ + public static List getStoreFiles(final AdminProtocol admin, + final byte[] regionName, final byte[] family) throws IOException { + GetStoreFileRequest request = + RequestConverter.buildGetStoreFileRequest(regionName, family); + try { + GetStoreFileResponse response = admin.getStoreFile(null, request); + return response.getStoreFileList(); + } catch (ServiceException se) { + throw ProtobufUtil.getRemoteException(se); + } + } + + /** + * A helper to split a region using admin protocol. + * + * @param admin + * @param hri + * @param splitPoint + * @throws IOException + */ + public static void split(final AdminProtocol admin, + final HRegionInfo hri, byte[] splitPoint) throws IOException { + SplitRegionRequest request = + RequestConverter.buildSplitRegionRequest(hri.getRegionName(), splitPoint); + try { + admin.splitRegion(null, request); + } catch (ServiceException se) { + throw ProtobufUtil.getRemoteException(se); + } + } + +// End helpers for Admin + + /* + * Get the total (read + write) requests from a RegionLoad pb + * @param rl - RegionLoad pb + * @return total (read + write) requests + */ + public static long getTotalRequestsCount(RegionLoad rl) { + if (rl == null) { + return 0; + } + + return rl.getReadRequestsCount() + rl.getWriteRequestsCount(); + } + + + /** + * @param m Message to get delimited pb serialization of (with pb magic prefix) + */ + public static byte [] toDelimitedByteArray(final Message m) throws IOException { + // Allocate arbitrary big size so we avoid resizing. + ByteArrayOutputStream baos = new ByteArrayOutputStream(4096); + m.writeDelimitedTo(baos); + baos.close(); + return ProtobufUtil.prependPBMagic(baos.toByteArray()); + } + + /** + * Converts a Permission proto to a client Permission object. + * + * @param proto the protobuf Permission + * @return the converted Permission + */ + public static Permission toPermission(AccessControlProtos.Permission proto) { + if (proto.hasTable()) { + return toTablePermission(proto); + } else { + List actions = toPermissionActions(proto.getActionList()); + return new Permission(actions.toArray(new Permission.Action[actions.size()])); + } + } + + /** + * Converts a Permission proto to a client TablePermission object. + * + * @param proto the protobuf Permission + * @return the converted TablePermission + */ + public static TablePermission toTablePermission(AccessControlProtos.Permission proto) { + List actions = toPermissionActions(proto.getActionList()); + + byte[] qualifier = null; + byte[] family = null; + byte[] table = null; + + if (proto.hasTable()) table = proto.getTable().toByteArray(); + if (proto.hasFamily()) family = proto.getFamily().toByteArray(); + if (proto.hasQualifier()) qualifier = proto.getQualifier().toByteArray(); + + return new TablePermission(table, family, qualifier, + actions.toArray(new Permission.Action[actions.size()])); + } + + /** + * Convert a client Permission to a Permission proto + * + * @param perm the client Permission + * @return the protobuf Permission + */ + public static AccessControlProtos.Permission toPermission(Permission perm) { + AccessControlProtos.Permission.Builder builder = AccessControlProtos.Permission.newBuilder(); + if (perm instanceof TablePermission) { + TablePermission tablePerm = (TablePermission)perm; + if (tablePerm.hasTable()) { + builder.setTable(ByteString.copyFrom(tablePerm.getTable())); + } + if (tablePerm.hasFamily()) { + builder.setFamily(ByteString.copyFrom(tablePerm.getFamily())); + } + if (tablePerm.hasQualifier()) { + builder.setQualifier(ByteString.copyFrom(tablePerm.getQualifier())); + } + } + for (Permission.Action a : perm.getActions()) { + builder.addAction(toPermissionAction(a)); + } + return builder.build(); + } + + /** + * Converts a list of Permission.Action proto to a list of client Permission.Action objects. + * + * @param protoActions the list of protobuf Actions + * @return the converted list of Actions + */ + public static List toPermissionActions( + List protoActions) { + List actions = new ArrayList(protoActions.size()); + for (AccessControlProtos.Permission.Action a : protoActions) { + actions.add(toPermissionAction(a)); + } + return actions; + } + + /** + * Converts a Permission.Action proto to a client Permission.Action object. + * + * @param action the protobuf Action + * @return the converted Action + */ + public static Permission.Action toPermissionAction( + AccessControlProtos.Permission.Action action) { + switch (action) { + case READ: + return Permission.Action.READ; + case WRITE: + return Permission.Action.WRITE; + case EXEC: + return Permission.Action.EXEC; + case CREATE: + return Permission.Action.CREATE; + case ADMIN: + return Permission.Action.ADMIN; + } + throw new IllegalArgumentException("Unknown action value "+action.name()); + } + + /** + * Convert a client Permission.Action to a Permission.Action proto + * + * @param action the client Action + * @return the protobuf Action + */ + public static AccessControlProtos.Permission.Action toPermissionAction( + Permission.Action action) { + switch (action) { + case READ: + return AccessControlProtos.Permission.Action.READ; + case WRITE: + return AccessControlProtos.Permission.Action.WRITE; + case EXEC: + return AccessControlProtos.Permission.Action.EXEC; + case CREATE: + return AccessControlProtos.Permission.Action.CREATE; + case ADMIN: + return AccessControlProtos.Permission.Action.ADMIN; + } + throw new IllegalArgumentException("Unknown action value "+action.name()); + } + + /** + * Convert a client user permission to a user permission proto + * + * @param perm the client UserPermission + * @return the protobuf UserPermission + */ + public static AccessControlProtos.UserPermission toUserPermission(UserPermission perm) { + AccessControlProtos.Permission.Builder permissionBuilder = + AccessControlProtos.Permission.newBuilder(); + for (Permission.Action a : perm.getActions()) { + permissionBuilder.addAction(toPermissionAction(a)); + } + if (perm.hasTable()) { + permissionBuilder.setTable(ByteString.copyFrom(perm.getTable())); + } + if (perm.hasFamily()) { + permissionBuilder.setFamily(ByteString.copyFrom(perm.getFamily())); + } + if (perm.hasQualifier()) { + permissionBuilder.setQualifier(ByteString.copyFrom(perm.getQualifier())); + } + + return AccessControlProtos.UserPermission.newBuilder() + .setUser(ByteString.copyFrom(perm.getUser())) + .setPermission(permissionBuilder) + .build(); + } + + /** + * Converts a user permission proto to a client user permission object. + * + * @param proto the protobuf UserPermission + * @return the converted UserPermission + */ + public static UserPermission toUserPermission(AccessControlProtos.UserPermission proto) { + AccessControlProtos.Permission permission = proto.getPermission(); + List actions = toPermissionActions(permission.getActionList()); + + byte[] qualifier = null; + byte[] family = null; + byte[] table = null; + + if (permission.hasTable()) table = permission.getTable().toByteArray(); + if (permission.hasFamily()) family = permission.getFamily().toByteArray(); + if (permission.hasQualifier()) qualifier = permission.getQualifier().toByteArray(); + + return new UserPermission(proto.getUser().toByteArray(), + table, family, qualifier, + actions.toArray(new Permission.Action[actions.size()])); + } + + /** + * Convert a ListMultimap where key is username + * to a protobuf UserPermission + * + * @param perm the list of user and table permissions + * @return the protobuf UserTablePermissions + */ + public static AccessControlProtos.UserTablePermissions toUserTablePermissions( + ListMultimap perm) { + AccessControlProtos.UserTablePermissions.Builder builder = + AccessControlProtos.UserTablePermissions.newBuilder(); + for (Map.Entry> entry : perm.asMap().entrySet()) { + AccessControlProtos.UserTablePermissions.UserPermissions.Builder userPermBuilder = + AccessControlProtos.UserTablePermissions.UserPermissions.newBuilder(); + userPermBuilder.setUser(ByteString.copyFromUtf8(entry.getKey())); + for (TablePermission tablePerm: entry.getValue()) { + userPermBuilder.addPermissions(toPermission(tablePerm)); + } + builder.addPermissions(userPermBuilder.build()); + } + return builder.build(); + } + + /** + * A utility used to grant a user some permissions. The permissions will + * be global if table is not specified. Otherwise, they are for those + * table/column family/qualifier only. + *

      + * It's also called by the shell, in case you want to find references. + * + * @param protocol the AccessControlService protocol proxy + * @param userShortName the short name of the user to grant permissions + * @param t optional table name + * @param f optional column family + * @param q optional qualifier + * @param actions the permissions to be granted + * @throws ServiceException + */ + public static void grant(AccessControlService.BlockingInterface protocol, + String userShortName, byte[] t, byte[] f, byte[] q, + Permission.Action... actions) throws ServiceException { + List permActions = + Lists.newArrayListWithCapacity(actions.length); + for (Permission.Action a : actions) { + permActions.add(ProtobufUtil.toPermissionAction(a)); + } + AccessControlProtos.GrantRequest request = RequestConverter. + buildGrantRequest(userShortName, t, f, q, permActions.toArray( + new AccessControlProtos.Permission.Action[actions.length])); + protocol.grant(null, request); + } + + /** + * A utility used to revoke a user some permissions. The permissions will + * be global if table is not specified. Otherwise, they are for those + * table/column family/qualifier only. + *

      + * It's also called by the shell, in case you want to find references. + * + * @param protocol the AccessControlService protocol proxy + * @param userShortName the short name of the user to revoke permissions + * @param t optional table name + * @param f optional column family + * @param q optional qualifier + * @param actions the permissions to be revoked + * @throws ServiceException + */ + public static void revoke(AccessControlService.BlockingInterface protocol, + String userShortName, byte[] t, byte[] f, byte[] q, + Permission.Action... actions) throws ServiceException { + List permActions = + Lists.newArrayListWithCapacity(actions.length); + for (Permission.Action a : actions) { + permActions.add(ProtobufUtil.toPermissionAction(a)); + } + AccessControlProtos.RevokeRequest request = RequestConverter. + buildRevokeRequest(userShortName, t, f, q, permActions.toArray( + new AccessControlProtos.Permission.Action[actions.length])); + protocol.revoke(null, request); + } + + /** + * A utility used to get user permissions. + *

      + * It's also called by the shell, in case you want to find references. + * + * @param protocol the AccessControlService protocol proxy + * @param t optional table name + * @throws ServiceException + */ + public static List getUserPermissions( + AccessControlService.BlockingInterface protocol, + byte[] t) throws ServiceException { + AccessControlProtos.UserPermissionsRequest.Builder builder = + AccessControlProtos.UserPermissionsRequest.newBuilder(); + if (t != null) { + builder.setTable(ByteString.copyFrom(t)); + } + AccessControlProtos.UserPermissionsRequest request = builder.build(); + AccessControlProtos.UserPermissionsResponse response = + protocol.getUserPermissions(null, request); + List perms = new ArrayList(); + for (AccessControlProtos.UserPermission perm: response.getPermissionList()) { + perms.add(ProtobufUtil.toUserPermission(perm)); + } + return perms; + } + + /** + * Convert a protobuf UserTablePermissions to a + * ListMultimap where key is username. + * + * @param proto the protobuf UserPermission + * @return the converted UserPermission + */ + public static ListMultimap toUserTablePermissions( + AccessControlProtos.UserTablePermissions proto) { + ListMultimap perms = ArrayListMultimap.create(); + AccessControlProtos.UserTablePermissions.UserPermissions userPerm; + + for (int i = 0; i < proto.getPermissionsCount(); i++) { + userPerm = proto.getPermissions(i); + for (int j = 0; j < userPerm.getPermissionsCount(); j++) { + TablePermission tablePerm = toTablePermission(userPerm.getPermissions(j)); + perms.put(userPerm.getUser().toStringUtf8(), tablePerm); + } + } + + return perms; + } + + /** + * Converts a Token instance (with embedded identifier) to the protobuf representation. + * + * @param token the Token instance to copy + * @return the protobuf Token message + */ + public static AuthenticationProtos.Token toToken(Token token) { + AuthenticationProtos.Token.Builder builder = AuthenticationProtos.Token.newBuilder(); + builder.setIdentifier(ByteString.copyFrom(token.getIdentifier())); + builder.setPassword(ByteString.copyFrom(token.getPassword())); + if (token.getService() != null) { + builder.setService(ByteString.copyFromUtf8(token.getService().toString())); + } + return builder.build(); + } + + /** + * Converts a protobuf Token message back into a Token instance. + * + * @param proto the protobuf Token message + * @return the Token instance + */ + public static Token toToken(AuthenticationProtos.Token proto) { + return new Token( + proto.hasIdentifier() ? proto.getIdentifier().toByteArray() : null, + proto.hasPassword() ? proto.getPassword().toByteArray() : null, + AuthenticationTokenIdentifier.AUTH_TOKEN_TYPE, + proto.hasService() ? new Text(proto.getService().toStringUtf8()) : null); + } + + /** + * Find the HRegion encoded name based on a region specifier + * + * @param regionSpecifier the region specifier + * @return the corresponding region's encoded name + * @throws DoNotRetryIOException if the specifier type is unsupported + */ + public static String getRegionEncodedName( + final RegionSpecifier regionSpecifier) throws DoNotRetryIOException { + byte[] value = regionSpecifier.getValue().toByteArray(); + RegionSpecifierType type = regionSpecifier.getType(); + switch (type) { + case REGION_NAME: + return HRegionInfo.encodeRegionName(value); + case ENCODED_REGION_NAME: + return Bytes.toString(value); + default: + throw new DoNotRetryIOException( + "Unsupported region specifier type: " + type); + } + } + + public static ScanMetrics toScanMetrics(final byte[] bytes) { + MapReduceProtos.ScanMetrics.Builder builder = MapReduceProtos.ScanMetrics.newBuilder(); + try { + builder.mergeFrom(bytes); + } catch (InvalidProtocolBufferException e) { + //Ignored there are just no key values to add. + } + MapReduceProtos.ScanMetrics pScanMetrics = builder.build(); + ScanMetrics scanMetrics = new ScanMetrics(); + for (HBaseProtos.NameInt64Pair pair : pScanMetrics.getMetricsList()) { + if (pair.hasName() && pair.hasValue()) { + scanMetrics.setCounter(pair.getName(), pair.getValue()); + } + } + return scanMetrics; + } + + public static MapReduceProtos.ScanMetrics toScanMetrics(ScanMetrics scanMetrics) { + MapReduceProtos.ScanMetrics.Builder builder = MapReduceProtos.ScanMetrics.newBuilder(); + Map metrics = scanMetrics.getMetricsMap(); + for (Entry e : metrics.entrySet()) { + HBaseProtos.NameInt64Pair nameInt64Pair = + HBaseProtos.NameInt64Pair.newBuilder() + .setName(e.getKey()) + .setValue(e.getValue()) + .build(); + builder.addMetrics(nameInt64Pair); + } + return builder.build(); + } + + /** + * Unwraps an exception from a protobuf service into the underlying (expected) IOException. + * This method will always throw an exception. + * @param se the {@code ServiceException} instance to convert into an {@code IOException} + */ + public static void toIOException(ServiceException se) throws IOException { + if (se == null) { + throw new NullPointerException("Null service exception passed!"); + } + + Throwable cause = se.getCause(); + if (cause != null && cause instanceof IOException) { + throw (IOException)cause; + } + throw new IOException(se); + } + + public static HBaseProtos.KeyValue toKeyValue(final Cell kv) { + // Doing this is going to kill us if we do it for all data passed. + // St.Ack 20121205 + // TODO: Do a Cell version + HBaseProtos.KeyValue.Builder kvbuilder = HBaseProtos.KeyValue.newBuilder(); + kvbuilder.setRow(ByteString.copyFrom(kv.getRowArray(), kv.getRowOffset(), + kv.getRowLength())); + kvbuilder.setFamily(ByteString.copyFrom(kv.getFamilyArray(), + kv.getFamilyOffset(), kv.getFamilyLength())); + kvbuilder.setQualifier(ByteString.copyFrom(kv.getQualifierArray(), + kv.getQualifierOffset(), kv.getQualifierLength())); + kvbuilder.setKeyType(HBaseProtos.KeyType.valueOf(kv.getTypeByte())); + kvbuilder.setTimestamp(kv.getTimestamp()); + kvbuilder.setValue(ByteString.copyFrom(kv.getValueArray(), kv.getValueOffset(), kv.getValueLength())); + return kvbuilder.build(); + } + + public static KeyValue toKeyValue(final HBaseProtos.KeyValue kv) { + // Doing this is going to kill us if we do it for all data passed. + // St.Ack 20121205 + // TODO: Do a Cell version + return new KeyValue(kv.getRow().toByteArray(), + kv.getFamily().toByteArray(), + kv.getQualifier().toByteArray(), + kv.getTimestamp(), + KeyValue.Type.codeToType((byte)kv.getKeyType().getNumber()), + kv.getValue().toByteArray()); + } +} diff --git hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/RequestConverter.java hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/RequestConverter.java new file mode 100644 index 0000000..8b452aa --- /dev/null +++ hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/RequestConverter.java @@ -0,0 +1,1156 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.protobuf; + +import java.io.IOException; +import java.util.List; + + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.hbase.DeserializationException; +import org.apache.hadoop.hbase.DoNotRetryIOException; +import org.apache.hadoop.hbase.HColumnDescriptor; +import org.apache.hadoop.hbase.HRegionInfo; +import org.apache.hadoop.hbase.HTableDescriptor; +import org.apache.hadoop.hbase.ServerName; +import org.apache.hadoop.hbase.client.Action; +import org.apache.hadoop.hbase.client.Append; +import org.apache.hadoop.hbase.client.Delete; +import org.apache.hadoop.hbase.client.Get; +import org.apache.hadoop.hbase.client.Increment; +import org.apache.hadoop.hbase.client.Mutation; +import org.apache.hadoop.hbase.client.Put; +import org.apache.hadoop.hbase.client.Row; +import org.apache.hadoop.hbase.client.RowMutations; +import org.apache.hadoop.hbase.client.Scan; +import org.apache.hadoop.hbase.filter.ByteArrayComparable; +import org.apache.hadoop.hbase.protobuf.generated.AccessControlProtos; +import org.apache.hadoop.hbase.protobuf.generated.AdminProtos; +import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.CloseRegionRequest; +import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.CompactRegionRequest; +import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.FlushRegionRequest; +import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetOnlineRegionRequest; +import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetRegionInfoRequest; +import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetServerInfoRequest; +import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetStoreFileRequest; +import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.OpenRegionRequest; +import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.OpenRegionRequest.RegionOpenInfo; +import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.SplitRegionRequest; +import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.StopServerRequest; +import org.apache.hadoop.hbase.protobuf.generated.ClientProtos; +import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.BulkLoadHFileRequest; +import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.BulkLoadHFileRequest.FamilyPath; +import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Column; +import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Condition; +import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.GetRequest; +import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.LockRowRequest; +import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MultiAction; +import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MultiRequest; +import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Mutate; +import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Mutate.ColumnValue; +import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Mutate.ColumnValue.QualifierValue; +import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Mutate.MutateType; +import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutateRequest; +import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ScanRequest; +import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.UnlockRowRequest; +import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.CompareType; +import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier; +import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.RegionSpecifierType; +import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.AddColumnRequest; +import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.AssignRegionRequest; +import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.BalanceRequest; +import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.CatalogScanRequest; +import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.CreateTableRequest; +import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DeleteColumnRequest; +import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DeleteTableRequest; +import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DisableTableRequest; +import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.EnableCatalogJanitorRequest; +import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.EnableTableRequest; +import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.IsCatalogJanitorEnabledRequest; +import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ModifyColumnRequest; +import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ModifyTableRequest; +import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.MoveRegionRequest; +import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.OfflineRegionRequest; +import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.SetBalancerRunningRequest; +import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.UnassignRegionRequest; +import org.apache.hadoop.hbase.protobuf.generated.MasterMonitorProtos.GetClusterStatusRequest; +import org.apache.hadoop.hbase.protobuf.generated.MasterMonitorProtos.GetSchemaAlterStatusRequest; +import org.apache.hadoop.hbase.protobuf.generated.MasterMonitorProtos.GetTableDescriptorsRequest; +import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningRequest; +import org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.GetLastFlushedSequenceIdRequest; +import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.hbase.util.Pair; + +import com.google.protobuf.ByteString; + +/** + * Helper utility to build protocol buffer requests, + * or build components for protocol buffer requests. + */ +@InterfaceAudience.Private +public final class RequestConverter { + + private RequestConverter() { + } + +// Start utilities for Client + +/** + * Create a new protocol buffer GetRequest to get a row, all columns in a family. + * If there is no such row, return the closest row before it. + * + * @param regionName the name of the region to get + * @param row the row to get + * @param family the column family to get + * should return the immediate row before + * @return a protocol buffer GetReuqest + */ + public static GetRequest buildGetRowOrBeforeRequest( + final byte[] regionName, final byte[] row, final byte[] family) { + GetRequest.Builder builder = GetRequest.newBuilder(); + RegionSpecifier region = buildRegionSpecifier( + RegionSpecifierType.REGION_NAME, regionName); + builder.setClosestRowBefore(true); + builder.setRegion(region); + + Column.Builder columnBuilder = Column.newBuilder(); + columnBuilder.setFamily(ByteString.copyFrom(family)); + ClientProtos.Get.Builder getBuilder = + ClientProtos.Get.newBuilder(); + getBuilder.setRow(ByteString.copyFrom(row)); + getBuilder.addColumn(columnBuilder.build()); + builder.setGet(getBuilder.build()); + return builder.build(); + } + + /** + * Create a protocol buffer GetRequest for a client Get + * + * @param regionName the name of the region to get + * @param get the client Get + * @return a protocol buffer GetReuqest + */ + public static GetRequest buildGetRequest(final byte[] regionName, + final Get get) throws IOException { + return buildGetRequest(regionName, get, false); + } + + /** + * Create a protocol buffer GetRequest for a client Get + * + * @param regionName the name of the region to get + * @param get the client Get + * @param existenceOnly indicate if check row existence only + * @return a protocol buffer GetReuqest + */ + public static GetRequest buildGetRequest(final byte[] regionName, + final Get get, final boolean existenceOnly) throws IOException { + GetRequest.Builder builder = GetRequest.newBuilder(); + RegionSpecifier region = buildRegionSpecifier( + RegionSpecifierType.REGION_NAME, regionName); + builder.setExistenceOnly(existenceOnly); + builder.setRegion(region); + builder.setGet(ProtobufUtil.toGet(get)); + return builder.build(); + } + + /** + * Create a protocol buffer MutateRequest for a client increment + * + * @param regionName + * @param row + * @param family + * @param qualifier + * @param amount + * @param writeToWAL + * @return a mutate request + */ + public static MutateRequest buildMutateRequest( + final byte[] regionName, final byte[] row, final byte[] family, + final byte [] qualifier, final long amount, final boolean writeToWAL) { + MutateRequest.Builder builder = MutateRequest.newBuilder(); + RegionSpecifier region = buildRegionSpecifier( + RegionSpecifierType.REGION_NAME, regionName); + builder.setRegion(region); + + Mutate.Builder mutateBuilder = Mutate.newBuilder(); + mutateBuilder.setRow(ByteString.copyFrom(row)); + mutateBuilder.setMutateType(MutateType.INCREMENT); + mutateBuilder.setWriteToWAL(writeToWAL); + ColumnValue.Builder columnBuilder = ColumnValue.newBuilder(); + columnBuilder.setFamily(ByteString.copyFrom(family)); + QualifierValue.Builder valueBuilder = QualifierValue.newBuilder(); + valueBuilder.setValue(ByteString.copyFrom(Bytes.toBytes(amount))); + valueBuilder.setQualifier(ByteString.copyFrom(qualifier)); + columnBuilder.addQualifierValue(valueBuilder.build()); + mutateBuilder.addColumnValue(columnBuilder.build()); + + builder.setMutate(mutateBuilder.build()); + return builder.build(); + } + + /** + * Create a protocol buffer MutateRequest for a conditioned put + * + * @param regionName + * @param row + * @param family + * @param qualifier + * @param comparator + * @param compareType + * @param put + * @return a mutate request + * @throws IOException + */ + public static MutateRequest buildMutateRequest( + final byte[] regionName, final byte[] row, final byte[] family, + final byte [] qualifier, final ByteArrayComparable comparator, + final CompareType compareType, final Put put) throws IOException { + MutateRequest.Builder builder = MutateRequest.newBuilder(); + RegionSpecifier region = buildRegionSpecifier( + RegionSpecifierType.REGION_NAME, regionName); + builder.setRegion(region); + Condition condition = buildCondition( + row, family, qualifier, comparator, compareType); + builder.setMutate(ProtobufUtil.toMutate(MutateType.PUT, put)); + builder.setCondition(condition); + return builder.build(); + } + + /** + * Create a protocol buffer MutateRequest for a conditioned delete + * + * @param regionName + * @param row + * @param family + * @param qualifier + * @param comparator + * @param compareType + * @param delete + * @return a mutate request + * @throws IOException + */ + public static MutateRequest buildMutateRequest( + final byte[] regionName, final byte[] row, final byte[] family, + final byte [] qualifier, final ByteArrayComparable comparator, + final CompareType compareType, final Delete delete) throws IOException { + MutateRequest.Builder builder = MutateRequest.newBuilder(); + RegionSpecifier region = buildRegionSpecifier( + RegionSpecifierType.REGION_NAME, regionName); + builder.setRegion(region); + Condition condition = buildCondition( + row, family, qualifier, comparator, compareType); + builder.setMutate(ProtobufUtil.toMutate(MutateType.DELETE, delete)); + builder.setCondition(condition); + return builder.build(); + } + + /** + * Create a protocol buffer MutateRequest for a put + * + * @param regionName + * @param put + * @return a mutate request + * @throws IOException + */ + public static MutateRequest buildMutateRequest( + final byte[] regionName, final Put put) throws IOException { + MutateRequest.Builder builder = MutateRequest.newBuilder(); + RegionSpecifier region = buildRegionSpecifier( + RegionSpecifierType.REGION_NAME, regionName); + builder.setRegion(region); + builder.setMutate(ProtobufUtil.toMutate(MutateType.PUT, put)); + return builder.build(); + } + + /** + * Create a protocol buffer MutateRequest for an append + * + * @param regionName + * @param append + * @return a mutate request + * @throws IOException + */ + public static MutateRequest buildMutateRequest( + final byte[] regionName, final Append append) throws IOException { + MutateRequest.Builder builder = MutateRequest.newBuilder(); + RegionSpecifier region = buildRegionSpecifier( + RegionSpecifierType.REGION_NAME, regionName); + builder.setRegion(region); + builder.setMutate(ProtobufUtil.toMutate(MutateType.APPEND, append)); + return builder.build(); + } + + /** + * Create a protocol buffer MutateRequest for a client increment + * + * @param regionName + * @param increment + * @return a mutate request + */ + public static MutateRequest buildMutateRequest( + final byte[] regionName, final Increment increment) { + MutateRequest.Builder builder = MutateRequest.newBuilder(); + RegionSpecifier region = buildRegionSpecifier( + RegionSpecifierType.REGION_NAME, regionName); + builder.setRegion(region); + builder.setMutate(ProtobufUtil.toMutate(increment)); + return builder.build(); + } + + /** + * Create a protocol buffer MutateRequest for a delete + * + * @param regionName + * @param delete + * @return a mutate request + * @throws IOException + */ + public static MutateRequest buildMutateRequest( + final byte[] regionName, final Delete delete) throws IOException { + MutateRequest.Builder builder = MutateRequest.newBuilder(); + RegionSpecifier region = buildRegionSpecifier( + RegionSpecifierType.REGION_NAME, regionName); + builder.setRegion(region); + builder.setMutate(ProtobufUtil.toMutate(MutateType.DELETE, delete)); + return builder.build(); + } + + /** + * Create a protocol buffer MultiRequest for a row mutations + * + * @param regionName + * @param rowMutations + * @return a multi request + * @throws IOException + */ + public static MultiRequest buildMultiRequest(final byte[] regionName, + final RowMutations rowMutations) throws IOException { + MultiRequest.Builder builder = MultiRequest.newBuilder(); + RegionSpecifier region = buildRegionSpecifier( + RegionSpecifierType.REGION_NAME, regionName); + builder.setRegion(region); + builder.setAtomic(true); + for (Mutation mutation: rowMutations.getMutations()) { + MutateType mutateType = null; + if (mutation instanceof Put) { + mutateType = MutateType.PUT; + } else if (mutation instanceof Delete) { + mutateType = MutateType.DELETE; + } else { + throw new DoNotRetryIOException( + "RowMutations supports only put and delete, not " + + mutation.getClass().getName()); + } + Mutate mutate = ProtobufUtil.toMutate(mutateType, mutation); + builder.addAction(MultiAction.newBuilder().setMutate(mutate).build()); + } + return builder.build(); + } + + /** + * Create a protocol buffer ScanRequest for a client Scan + * + * @param regionName + * @param scan + * @param numberOfRows + * @param closeScanner + * @return a scan request + * @throws IOException + */ + public static ScanRequest buildScanRequest(final byte[] regionName, + final Scan scan, final int numberOfRows, + final boolean closeScanner) throws IOException { + ScanRequest.Builder builder = ScanRequest.newBuilder(); + RegionSpecifier region = buildRegionSpecifier( + RegionSpecifierType.REGION_NAME, regionName); + builder.setNumberOfRows(numberOfRows); + builder.setCloseScanner(closeScanner); + builder.setRegion(region); + builder.setScan(ProtobufUtil.toScan(scan)); + return builder.build(); + } + + /** + * Create a protocol buffer ScanRequest for a scanner id + * + * @param scannerId + * @param numberOfRows + * @param closeScanner + * @return a scan request + */ + public static ScanRequest buildScanRequest(final long scannerId, + final int numberOfRows, final boolean closeScanner) { + ScanRequest.Builder builder = ScanRequest.newBuilder(); + builder.setNumberOfRows(numberOfRows); + builder.setCloseScanner(closeScanner); + builder.setScannerId(scannerId); + return builder.build(); + } + + /** + * Create a protocol buffer ScanRequest for a scanner id + * + * @param scannerId + * @param numberOfRows + * @param closeScanner + * @param nextCallSeq + * @return a scan request + */ + public static ScanRequest buildScanRequest(final long scannerId, final int numberOfRows, + final boolean closeScanner, final long nextCallSeq) { + ScanRequest.Builder builder = ScanRequest.newBuilder(); + builder.setNumberOfRows(numberOfRows); + builder.setCloseScanner(closeScanner); + builder.setScannerId(scannerId); + builder.setNextCallSeq(nextCallSeq); + return builder.build(); + } + + /** + * Create a protocol buffer LockRowRequest + * + * @param regionName + * @param row + * @return a lock row request + */ + public static LockRowRequest buildLockRowRequest( + final byte[] regionName, final byte[] row) { + LockRowRequest.Builder builder = LockRowRequest.newBuilder(); + RegionSpecifier region = buildRegionSpecifier( + RegionSpecifierType.REGION_NAME, regionName); + builder.setRegion(region); + builder.addRow(ByteString.copyFrom(row)); + return builder.build(); + } + + /** + * Create a protocol buffer UnlockRowRequest + * + * @param regionName + * @param lockId + * @return a unlock row request + */ + public static UnlockRowRequest buildUnlockRowRequest( + final byte[] regionName, final long lockId) { + UnlockRowRequest.Builder builder = UnlockRowRequest.newBuilder(); + RegionSpecifier region = buildRegionSpecifier( + RegionSpecifierType.REGION_NAME, regionName); + builder.setRegion(region); + builder.setLockId(lockId); + return builder.build(); + } + + /** + * Create a protocol buffer bulk load request + * + * @param familyPaths + * @param regionName + * @param assignSeqNum + * @return a bulk load request + */ + public static BulkLoadHFileRequest buildBulkLoadHFileRequest( + final List> familyPaths, + final byte[] regionName, boolean assignSeqNum) { + BulkLoadHFileRequest.Builder builder = BulkLoadHFileRequest.newBuilder(); + RegionSpecifier region = buildRegionSpecifier( + RegionSpecifierType.REGION_NAME, regionName); + builder.setRegion(region); + FamilyPath.Builder familyPathBuilder = FamilyPath.newBuilder(); + for (Pair familyPath: familyPaths) { + familyPathBuilder.setFamily(ByteString.copyFrom(familyPath.getFirst())); + familyPathBuilder.setPath(familyPath.getSecond()); + builder.addFamilyPath(familyPathBuilder.build()); + } + builder.setAssignSeqNum(assignSeqNum); + return builder.build(); + } + + /** + * Create a protocol buffer multi request for a list of actions. + * RowMutations in the list (if any) will be ignored. + * + * @param regionName + * @param actions + * @return a multi request + * @throws IOException + */ + public static MultiRequest buildMultiRequest(final byte[] regionName, + final List> actions) throws IOException { + MultiRequest.Builder builder = MultiRequest.newBuilder(); + RegionSpecifier region = buildRegionSpecifier( + RegionSpecifierType.REGION_NAME, regionName); + builder.setRegion(region); + for (Action action: actions) { + MultiAction.Builder protoAction = MultiAction.newBuilder(); + + Row row = action.getAction(); + if (row instanceof Get) { + protoAction.setGet(ProtobufUtil.toGet((Get) row)); + } else if (row instanceof Put) { + protoAction.setMutate(ProtobufUtil.toMutate(MutateType.PUT, (Put) row)); + } else if (row instanceof Delete) { + protoAction.setMutate(ProtobufUtil.toMutate(MutateType.DELETE, (Delete) row)); + } else if (row instanceof Append) { + protoAction.setMutate(ProtobufUtil.toMutate(MutateType.APPEND, (Append) row)); + } else if (row instanceof Increment) { + protoAction.setMutate(ProtobufUtil.toMutate((Increment) row)); + } else if (row instanceof RowMutations) { + continue; // ignore RowMutations + } else { + throw new DoNotRetryIOException( + "multi doesn't support " + row.getClass().getName()); + } + builder.addAction(protoAction.build()); + } + return builder.build(); + } + +// End utilities for Client +//Start utilities for Admin + + /** + * Create a protocol buffer GetRegionInfoRequest for a given region name + * + * @param regionName the name of the region to get info + * @return a protocol buffer GetRegionInfoRequest + */ + public static GetRegionInfoRequest + buildGetRegionInfoRequest(final byte[] regionName) { + return buildGetRegionInfoRequest(regionName, false); + } + + /** + * Create a protocol buffer GetRegionInfoRequest for a given region name + * + * @param regionName the name of the region to get info + * @param includeCompactionState indicate if the compaction state is requested + * @return a protocol buffer GetRegionInfoRequest + */ + public static GetRegionInfoRequest + buildGetRegionInfoRequest(final byte[] regionName, + final boolean includeCompactionState) { + GetRegionInfoRequest.Builder builder = GetRegionInfoRequest.newBuilder(); + RegionSpecifier region = buildRegionSpecifier( + RegionSpecifierType.REGION_NAME, regionName); + builder.setRegion(region); + if (includeCompactionState) { + builder.setCompactionState(includeCompactionState); + } + return builder.build(); + } + + /** + * Create a protocol buffer GetStoreFileRequest for a given region name + * + * @param regionName the name of the region to get info + * @param family the family to get store file list + * @return a protocol buffer GetStoreFileRequest + */ + public static GetStoreFileRequest + buildGetStoreFileRequest(final byte[] regionName, final byte[] family) { + GetStoreFileRequest.Builder builder = GetStoreFileRequest.newBuilder(); + RegionSpecifier region = buildRegionSpecifier( + RegionSpecifierType.REGION_NAME, regionName); + builder.setRegion(region); + builder.addFamily(ByteString.copyFrom(family)); + return builder.build(); + } + + /** + * Create a protocol buffer GetOnlineRegionRequest + * + * @return a protocol buffer GetOnlineRegionRequest + */ + public static GetOnlineRegionRequest buildGetOnlineRegionRequest() { + return GetOnlineRegionRequest.newBuilder().build(); + } + + /** + * Create a protocol buffer FlushRegionRequest for a given region name + * + * @param regionName the name of the region to get info + * @return a protocol buffer FlushRegionRequest + */ + public static FlushRegionRequest + buildFlushRegionRequest(final byte[] regionName) { + FlushRegionRequest.Builder builder = FlushRegionRequest.newBuilder(); + RegionSpecifier region = buildRegionSpecifier( + RegionSpecifierType.REGION_NAME, regionName); + builder.setRegion(region); + return builder.build(); + } + + /** + * Create a protocol buffer OpenRegionRequest to open a list of regions + * + * @param regionOpenInfos info of a list of regions to open + * @return a protocol buffer OpenRegionRequest + */ + public static OpenRegionRequest + buildOpenRegionRequest(final List> regionOpenInfos) { + OpenRegionRequest.Builder builder = OpenRegionRequest.newBuilder(); + for (Pair regionOpenInfo: regionOpenInfos) { + Integer second = regionOpenInfo.getSecond(); + int versionOfOfflineNode = second == null ? -1 : second.intValue(); + builder.addOpenInfo(buildRegionOpenInfo( + regionOpenInfo.getFirst(), versionOfOfflineNode)); + } + return builder.build(); + } + + /** + * Create a protocol buffer OpenRegionRequest for a given region + * + * @param region the region to open + * @param versionOfOfflineNode that needs to be present in the offline node + * @return a protocol buffer OpenRegionRequest + */ + public static OpenRegionRequest buildOpenRegionRequest( + final HRegionInfo region, final int versionOfOfflineNode) { + OpenRegionRequest.Builder builder = OpenRegionRequest.newBuilder(); + builder.addOpenInfo(buildRegionOpenInfo(region, versionOfOfflineNode)); + return builder.build(); + } + + /** + * Create a CloseRegionRequest for a given region name + * + * @param regionName the name of the region to close + * @param transitionInZK indicator if to transition in ZK + * @return a CloseRegionRequest + */ + public static CloseRegionRequest buildCloseRegionRequest( + final byte[] regionName, final boolean transitionInZK) { + CloseRegionRequest.Builder builder = CloseRegionRequest.newBuilder(); + RegionSpecifier region = buildRegionSpecifier( + RegionSpecifierType.REGION_NAME, regionName); + builder.setRegion(region); + builder.setTransitionInZK(transitionInZK); + return builder.build(); + } + + public static CloseRegionRequest buildCloseRegionRequest( + final byte[] regionName, final int versionOfClosingNode, + ServerName destinationServer, final boolean transitionInZK) { + CloseRegionRequest.Builder builder = CloseRegionRequest.newBuilder(); + RegionSpecifier region = buildRegionSpecifier( + RegionSpecifierType.REGION_NAME, regionName); + builder.setRegion(region); + builder.setVersionOfClosingNode(versionOfClosingNode); + builder.setTransitionInZK(transitionInZK); + if (destinationServer != null){ + builder.setDestinationServer(ProtobufUtil.toServerName(destinationServer) ); + } + return builder.build(); + } + + /** + * Create a CloseRegionRequest for a given encoded region name + * + * @param encodedRegionName the name of the region to close + * @param transitionInZK indicator if to transition in ZK + * @return a CloseRegionRequest + */ + public static CloseRegionRequest + buildCloseRegionRequest(final String encodedRegionName, + final boolean transitionInZK) { + CloseRegionRequest.Builder builder = CloseRegionRequest.newBuilder(); + RegionSpecifier region = buildRegionSpecifier( + RegionSpecifierType.ENCODED_REGION_NAME, + Bytes.toBytes(encodedRegionName)); + builder.setRegion(region); + builder.setTransitionInZK(transitionInZK); + return builder.build(); + } + + /** + * Create a SplitRegionRequest for a given region name + * + * @param regionName the name of the region to split + * @param splitPoint the split point + * @return a SplitRegionRequest + */ + public static SplitRegionRequest buildSplitRegionRequest( + final byte[] regionName, final byte[] splitPoint) { + SplitRegionRequest.Builder builder = SplitRegionRequest.newBuilder(); + RegionSpecifier region = buildRegionSpecifier( + RegionSpecifierType.REGION_NAME, regionName); + builder.setRegion(region); + if (splitPoint != null) { + builder.setSplitPoint(ByteString.copyFrom(splitPoint)); + } + return builder.build(); + } + + /** + * Create a CompactRegionRequest for a given region name + * + * @param regionName the name of the region to get info + * @param major indicator if it is a major compaction + * @return a CompactRegionRequest + */ + public static CompactRegionRequest buildCompactRegionRequest( + final byte[] regionName, final boolean major, final byte [] family) { + CompactRegionRequest.Builder builder = CompactRegionRequest.newBuilder(); + RegionSpecifier region = buildRegionSpecifier( + RegionSpecifierType.REGION_NAME, regionName); + builder.setRegion(region); + builder.setMajor(major); + if (family != null) { + builder.setFamily(ByteString.copyFrom(family)); + } + return builder.build(); + } + + /** + * Create a new GetServerInfoRequest + * + * @return a GetServerInfoRequest + */ + public static GetServerInfoRequest buildGetServerInfoRequest() { + GetServerInfoRequest.Builder builder = GetServerInfoRequest.newBuilder(); + return builder.build(); + } + + /** + * Create a new StopServerRequest + * + * @param reason the reason to stop the server + * @return a StopServerRequest + */ + public static StopServerRequest buildStopServerRequest(final String reason) { + StopServerRequest.Builder builder = StopServerRequest.newBuilder(); + builder.setReason(reason); + return builder.build(); + } + +//End utilities for Admin + + /** + * Convert a byte array to a protocol buffer RegionSpecifier + * + * @param type the region specifier type + * @param value the region specifier byte array value + * @return a protocol buffer RegionSpecifier + */ + public static RegionSpecifier buildRegionSpecifier( + final RegionSpecifierType type, final byte[] value) { + RegionSpecifier.Builder regionBuilder = RegionSpecifier.newBuilder(); + regionBuilder.setValue(ByteString.copyFrom(value)); + regionBuilder.setType(type); + return regionBuilder.build(); + } + + /** + * Create a protocol buffer Condition + * + * @param row + * @param family + * @param qualifier + * @param comparator + * @param compareType + * @return a Condition + * @throws IOException + */ + private static Condition buildCondition(final byte[] row, + final byte[] family, final byte [] qualifier, + final ByteArrayComparable comparator, + final CompareType compareType) throws IOException { + Condition.Builder builder = Condition.newBuilder(); + builder.setRow(ByteString.copyFrom(row)); + builder.setFamily(ByteString.copyFrom(family)); + builder.setQualifier(ByteString.copyFrom(qualifier)); + builder.setComparator(ProtobufUtil.toComparator(comparator)); + builder.setCompareType(compareType); + return builder.build(); + } + + /** + * Create a protocol buffer AddColumnRequest + * + * @param tableName + * @param column + * @return an AddColumnRequest + */ + public static AddColumnRequest buildAddColumnRequest( + final byte [] tableName, final HColumnDescriptor column) { + AddColumnRequest.Builder builder = AddColumnRequest.newBuilder(); + builder.setTableName(ByteString.copyFrom(tableName)); + builder.setColumnFamilies(column.convert()); + return builder.build(); + } + + /** + * Create a protocol buffer DeleteColumnRequest + * + * @param tableName + * @param columnName + * @return a DeleteColumnRequest + */ + public static DeleteColumnRequest buildDeleteColumnRequest( + final byte [] tableName, final byte [] columnName) { + DeleteColumnRequest.Builder builder = DeleteColumnRequest.newBuilder(); + builder.setTableName(ByteString.copyFrom(tableName)); + builder.setColumnName(ByteString.copyFrom(columnName)); + return builder.build(); + } + + /** + * Create a protocol buffer ModifyColumnRequest + * + * @param tableName + * @param column + * @return an ModifyColumnRequest + */ + public static ModifyColumnRequest buildModifyColumnRequest( + final byte [] tableName, final HColumnDescriptor column) { + ModifyColumnRequest.Builder builder = ModifyColumnRequest.newBuilder(); + builder.setTableName(ByteString.copyFrom(tableName)); + builder.setColumnFamilies(column.convert()); + return builder.build(); + } + + /** + * Create a protocol buffer MoveRegionRequest + * + * @param encodedRegionName + * @param destServerName + * @return A MoveRegionRequest + * @throws DeserializationException + */ + public static MoveRegionRequest buildMoveRegionRequest( + final byte [] encodedRegionName, final byte [] destServerName) throws DeserializationException { + MoveRegionRequest.Builder builder = MoveRegionRequest.newBuilder(); + builder.setRegion( + buildRegionSpecifier(RegionSpecifierType.ENCODED_REGION_NAME,encodedRegionName)); + if (destServerName != null) { + builder.setDestServerName( + ProtobufUtil.toServerName(new ServerName(Bytes.toString(destServerName)))); + } + return builder.build(); + } + + /** + * Create a protocol buffer AssignRegionRequest + * + * @param regionName + * @return an AssignRegionRequest + */ + public static AssignRegionRequest buildAssignRegionRequest(final byte [] regionName) { + AssignRegionRequest.Builder builder = AssignRegionRequest.newBuilder(); + builder.setRegion(buildRegionSpecifier(RegionSpecifierType.REGION_NAME,regionName)); + return builder.build(); + } + + /** + * Creates a protocol buffer UnassignRegionRequest + * + * @param regionName + * @param force + * @return an UnassignRegionRequest + */ + public static UnassignRegionRequest buildUnassignRegionRequest( + final byte [] regionName, final boolean force) { + UnassignRegionRequest.Builder builder = UnassignRegionRequest.newBuilder(); + builder.setRegion(buildRegionSpecifier(RegionSpecifierType.REGION_NAME,regionName)); + builder.setForce(force); + return builder.build(); + } + + /** + * Creates a protocol buffer OfflineRegionRequest + * + * @param regionName + * @return an OfflineRegionRequest + */ + public static OfflineRegionRequest buildOfflineRegionRequest(final byte [] regionName) { + OfflineRegionRequest.Builder builder = OfflineRegionRequest.newBuilder(); + builder.setRegion(buildRegionSpecifier(RegionSpecifierType.REGION_NAME,regionName)); + return builder.build(); + } + + /** + * Creates a protocol buffer DeleteTableRequest + * + * @param tableName + * @return a DeleteTableRequest + */ + public static DeleteTableRequest buildDeleteTableRequest(final byte [] tableName) { + DeleteTableRequest.Builder builder = DeleteTableRequest.newBuilder(); + builder.setTableName(ByteString.copyFrom(tableName)); + return builder.build(); + } + + /** + * Creates a protocol buffer EnableTableRequest + * + * @param tableName + * @return an EnableTableRequest + */ + public static EnableTableRequest buildEnableTableRequest(final byte [] tableName) { + EnableTableRequest.Builder builder = EnableTableRequest.newBuilder(); + builder.setTableName(ByteString.copyFrom(tableName)); + return builder.build(); + } + + /** + * Creates a protocol buffer DisableTableRequest + * + * @param tableName + * @return a DisableTableRequest + */ + public static DisableTableRequest buildDisableTableRequest(final byte [] tableName) { + DisableTableRequest.Builder builder = DisableTableRequest.newBuilder(); + builder.setTableName(ByteString.copyFrom(tableName)); + return builder.build(); + } + + /** + * Creates a protocol buffer CreateTableRequest + * + * @param hTableDesc + * @param splitKeys + * @return a CreateTableRequest + */ + public static CreateTableRequest buildCreateTableRequest( + final HTableDescriptor hTableDesc, final byte [][] splitKeys) { + CreateTableRequest.Builder builder = CreateTableRequest.newBuilder(); + builder.setTableSchema(hTableDesc.convert()); + if (splitKeys != null) { + for (byte [] splitKey : splitKeys) { + builder.addSplitKeys(ByteString.copyFrom(splitKey)); + } + } + return builder.build(); + } + + + /** + * Creates a protocol buffer ModifyTableRequest + * + * @param table + * @param hTableDesc + * @return a ModifyTableRequest + */ + public static ModifyTableRequest buildModifyTableRequest( + final byte [] table, final HTableDescriptor hTableDesc) { + ModifyTableRequest.Builder builder = ModifyTableRequest.newBuilder(); + builder.setTableName(ByteString.copyFrom(table)); + builder.setTableSchema(hTableDesc.convert()); + return builder.build(); + } + + /** + * Creates a protocol buffer GetSchemaAlterStatusRequest + * + * @param tableName + * @return a GetSchemaAlterStatusRequest + */ + public static GetSchemaAlterStatusRequest buildGetSchemaAlterStatusRequest( + final byte [] tableName) { + GetSchemaAlterStatusRequest.Builder builder = GetSchemaAlterStatusRequest.newBuilder(); + builder.setTableName(ByteString.copyFrom(tableName)); + return builder.build(); + } + + /** + * Creates a protocol buffer GetTableDescriptorsRequest + * + * @param tableNames + * @return a GetTableDescriptorsRequest + */ + public static GetTableDescriptorsRequest buildGetTableDescriptorsRequest( + final List tableNames) { + GetTableDescriptorsRequest.Builder builder = GetTableDescriptorsRequest.newBuilder(); + if (tableNames != null) { + for (String str : tableNames) { + builder.addTableNames(str); + } + } + return builder.build(); + } + + /** + * Creates a protocol buffer IsMasterRunningRequest + * + * @return a IsMasterRunningRequest + */ + public static IsMasterRunningRequest buildIsMasterRunningRequest() { + return IsMasterRunningRequest.newBuilder().build(); + } + + /** + * Creates a protocol buffer BalanceRequest + * + * @return a BalanceRequest + */ + public static BalanceRequest buildBalanceRequest() { + return BalanceRequest.newBuilder().build(); + } + + /** + * Creates a protocol buffer SetBalancerRunningRequest + * + * @param on + * @param synchronous + * @return a SetBalancerRunningRequest + */ + public static SetBalancerRunningRequest buildSetBalancerRunningRequest(boolean on, boolean synchronous) { + return SetBalancerRunningRequest.newBuilder().setOn(on).setSynchronous(synchronous).build(); + } + + /** + * Creates a protocol buffer GetClusterStatusRequest + * + * @return A GetClusterStatusRequest + */ + public static GetClusterStatusRequest buildGetClusterStatusRequest() { + return GetClusterStatusRequest.newBuilder().build(); + } + + /** + * Creates a request for running a catalog scan + * @return A {@link CatalogScanRequest} + */ + public static CatalogScanRequest buildCatalogScanRequest() { + return CatalogScanRequest.newBuilder().build(); + } + + /** + * Creates a request for enabling/disabling the catalog janitor + * @return A {@link EnableCatalogJanitorRequest} + */ + public static EnableCatalogJanitorRequest buildEnableCatalogJanitorRequest(boolean enable) { + return EnableCatalogJanitorRequest.newBuilder().setEnable(enable).build(); + } + + /** + * Creates a request for querying the master whether the catalog janitor is enabled + * @return A {@link IsCatalogJanitorEnabledRequest} + */ + public static IsCatalogJanitorEnabledRequest buildIsCatalogJanitorEnabledRequest() { + return IsCatalogJanitorEnabledRequest.newBuilder().build(); + } + + /** + * Creates a request for querying the master the last flushed sequence Id for a region + * @param regionName + * @return A {@link GetLastFlushedSequenceIdRequest} + */ + public static GetLastFlushedSequenceIdRequest buildGetLastFlushedSequenceIdRequest( + byte[] regionName) { + return GetLastFlushedSequenceIdRequest.newBuilder().setRegionName( + ByteString.copyFrom(regionName)).build(); + } + + /** + * Create a request to grant user permissions. + * + * @param username the short user name who to grant permissions + * @param table optional table name the permissions apply + * @param family optional column family + * @param qualifier optional qualifier + * @param actions the permissions to be granted + * @return A {@link AccessControlProtos} GrantRequest + */ + public static AccessControlProtos.GrantRequest buildGrantRequest( + String username, byte[] table, byte[] family, byte[] qualifier, + AccessControlProtos.Permission.Action... actions) { + AccessControlProtos.Permission.Builder permissionBuilder = + AccessControlProtos.Permission.newBuilder(); + for (AccessControlProtos.Permission.Action a : actions) { + permissionBuilder.addAction(a); + } + if (table != null) { + permissionBuilder.setTable(ByteString.copyFrom(table)); + } + if (family != null) { + permissionBuilder.setFamily(ByteString.copyFrom(family)); + } + if (qualifier != null) { + permissionBuilder.setQualifier(ByteString.copyFrom(qualifier)); + } + + return AccessControlProtos.GrantRequest.newBuilder() + .setPermission( + AccessControlProtos.UserPermission.newBuilder() + .setUser(ByteString.copyFromUtf8(username)) + .setPermission(permissionBuilder.build()) + ).build(); + } + + /** + * Create a request to revoke user permissions. + * + * @param username the short user name whose permissions to be revoked + * @param table optional table name the permissions apply + * @param family optional column family + * @param qualifier optional qualifier + * @param actions the permissions to be revoked + * @return A {@link AccessControlProtos} RevokeRequest + */ + public static AccessControlProtos.RevokeRequest buildRevokeRequest( + String username, byte[] table, byte[] family, byte[] qualifier, + AccessControlProtos.Permission.Action... actions) { + AccessControlProtos.Permission.Builder permissionBuilder = + AccessControlProtos.Permission.newBuilder(); + for (AccessControlProtos.Permission.Action a : actions) { + permissionBuilder.addAction(a); + } + if (table != null) { + permissionBuilder.setTable(ByteString.copyFrom(table)); + } + if (family != null) { + permissionBuilder.setFamily(ByteString.copyFrom(family)); + } + if (qualifier != null) { + permissionBuilder.setQualifier(ByteString.copyFrom(qualifier)); + } + + return AccessControlProtos.RevokeRequest.newBuilder() + .setPermission( + AccessControlProtos.UserPermission.newBuilder() + .setUser(ByteString.copyFromUtf8(username)) + .setPermission(permissionBuilder.build()) + ).build(); + } + + /** + * Create a RegionOpenInfo based on given region info and version of offline node + */ + private static RegionOpenInfo buildRegionOpenInfo( + final HRegionInfo region, final int versionOfOfflineNode) { + RegionOpenInfo.Builder builder = RegionOpenInfo.newBuilder(); + builder.setRegion(HRegionInfo.convert(region)); + if (versionOfOfflineNode >= 0) { + builder.setVersionOfOfflineNode(versionOfOfflineNode); + } + return builder.build(); + } + + /** + * Create a new RollWALWriterRequest + * + * @return a ReplicateWALEntryRequest + */ + public static AdminProtos.RollWALWriterRequest buildRollWALWriterRequest() { + AdminProtos.RollWALWriterRequest.Builder builder = AdminProtos.RollWALWriterRequest.newBuilder(); + return builder.build(); + } +} diff --git hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ResponseConverter.java hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ResponseConverter.java new file mode 100644 index 0000000..96705d7 --- /dev/null +++ hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ResponseConverter.java @@ -0,0 +1,282 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.protobuf; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.hbase.HRegionInfo; +import org.apache.hadoop.hbase.ServerName; +import org.apache.hadoop.hbase.client.Result; +import org.apache.hadoop.hbase.ipc.ServerRpcController; +import org.apache.hadoop.hbase.protobuf.generated.AccessControlProtos.UserPermissionsResponse; +import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.CloseRegionResponse; +import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetOnlineRegionResponse; +import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetServerInfoResponse; +import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.OpenRegionResponse; +import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.RollWALWriterResponse; +import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.ServerInfo; +import org.apache.hadoop.hbase.protobuf.generated.ClientProtos; +import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ActionResult; +import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ScanResponse; +import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameBytesPair; +import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.CatalogScanResponse; +import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.EnableCatalogJanitorResponse; +import org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.GetLastFlushedSequenceIdResponse; +import org.apache.hadoop.hbase.regionserver.RegionOpeningState; +import org.apache.hadoop.hbase.security.access.UserPermission; +import org.apache.hadoop.util.StringUtils; + +import com.google.protobuf.ByteString; +import com.google.protobuf.RpcController; + +/** + * Helper utility to build protocol buffer responses, + * or retrieve data from protocol buffer responses. + */ +@InterfaceAudience.Private +public final class ResponseConverter { + + private ResponseConverter() { + } + +// Start utilities for Client + + /** + * Get the client Results from a protocol buffer ScanResponse + * + * @param response the protocol buffer ScanResponse + * @return the client Results in the response + */ + public static Result[] getResults(final ScanResponse response) { + if (response == null) return null; + int count = response.getResultCount(); + Result[] results = new Result[count]; + for (int i = 0; i < count; i++) { + results[i] = ProtobufUtil.toResult(response.getResult(i)); + } + return results; + } + + /** + * Get the results from a protocol buffer MultiResponse + * + * @param proto the protocol buffer MultiResponse to convert + * @return the results in the MultiResponse + * @throws IOException + */ + public static List getResults( + final ClientProtos.MultiResponse proto) throws IOException { + List results = new ArrayList(); + List resultList = proto.getResultList(); + for (int i = 0, n = resultList.size(); i < n; i++) { + ActionResult result = resultList.get(i); + if (result.hasException()) { + results.add(ProtobufUtil.toException(result.getException())); + } else if (result.hasValue()) { + Object value = ProtobufUtil.toObject(result.getValue()); + if (value instanceof ClientProtos.Result) { + results.add(ProtobufUtil.toResult((ClientProtos.Result) value)); + } else { + results.add(value); + } + } else { + results.add(new Result()); + } + } + return results; + } + + /** + * Wrap a throwable to an action result. + * + * @param t + * @return an action result + */ + public static ActionResult buildActionResult(final Throwable t) { + ActionResult.Builder builder = ActionResult.newBuilder(); + NameBytesPair.Builder parameterBuilder = NameBytesPair.newBuilder(); + parameterBuilder.setName(t.getClass().getName()); + parameterBuilder.setValue( + ByteString.copyFromUtf8(StringUtils.stringifyException(t))); + builder.setException(parameterBuilder.build()); + return builder.build(); + } + + /** + * Converts the permissions list into a protocol buffer UserPermissionsResponse + */ + public static UserPermissionsResponse buildUserPermissionsResponse( + final List permissions) { + UserPermissionsResponse.Builder builder = UserPermissionsResponse.newBuilder(); + for (UserPermission perm : permissions) { + builder.addPermission(ProtobufUtil.toUserPermission(perm)); + } + return builder.build(); + } + +// End utilities for Client +// Start utilities for Admin + + /** + * Get the list of regions to flush from a RollLogWriterResponse + * + * @param proto the RollLogWriterResponse + * @return the the list of regions to flush + */ + public static byte[][] getRegions(final RollWALWriterResponse proto) { + if (proto == null || proto.getRegionToFlushCount() == 0) return null; + List regions = new ArrayList(); + for (ByteString region: proto.getRegionToFlushList()) { + regions.add(region.toByteArray()); + } + return (byte[][])regions.toArray(); + } + + /** + * Get the list of region info from a GetOnlineRegionResponse + * + * @param proto the GetOnlineRegionResponse + * @return the list of region info + */ + public static List getRegionInfos(final GetOnlineRegionResponse proto) { + if (proto == null || proto.getRegionInfoCount() == 0) return null; + return ProtobufUtil.getRegionInfos(proto); + } + + /** + * Get the region opening state from a OpenRegionResponse + * + * @param proto the OpenRegionResponse + * @return the region opening state + */ + public static RegionOpeningState getRegionOpeningState + (final OpenRegionResponse proto) { + if (proto == null || proto.getOpeningStateCount() != 1) return null; + return RegionOpeningState.valueOf( + proto.getOpeningState(0).name()); + } + + /** + * Get a list of region opening state from a OpenRegionResponse + * + * @param proto the OpenRegionResponse + * @return the list of region opening state + */ + public static List getRegionOpeningStateList( + final OpenRegionResponse proto) { + if (proto == null) return null; + List regionOpeningStates = new ArrayList(); + for (int i = 0; i < proto.getOpeningStateCount(); i++) { + regionOpeningStates.add(RegionOpeningState.valueOf( + proto.getOpeningState(i).name())); + } + return regionOpeningStates; + } + + /** + * Check if the region is closed from a CloseRegionResponse + * + * @param proto the CloseRegionResponse + * @return the region close state + */ + public static boolean isClosed + (final CloseRegionResponse proto) { + if (proto == null || !proto.hasClosed()) return false; + return proto.getClosed(); + } + + /** + * A utility to build a GetServerInfoResponse. + * + * @param serverName + * @param webuiPort + * @return the response + */ + public static GetServerInfoResponse buildGetServerInfoResponse( + final ServerName serverName, final int webuiPort) { + GetServerInfoResponse.Builder builder = GetServerInfoResponse.newBuilder(); + ServerInfo.Builder serverInfoBuilder = ServerInfo.newBuilder(); + serverInfoBuilder.setServerName(ProtobufUtil.toServerName(serverName)); + if (webuiPort >= 0) { + serverInfoBuilder.setWebuiPort(webuiPort); + } + builder.setServerInfo(serverInfoBuilder.build()); + return builder.build(); + } + + /** + * A utility to build a GetOnlineRegionResponse. + * + * @param regions + * @return the response + */ + public static GetOnlineRegionResponse buildGetOnlineRegionResponse( + final List regions) { + GetOnlineRegionResponse.Builder builder = GetOnlineRegionResponse.newBuilder(); + for (HRegionInfo region: regions) { + builder.addRegionInfo(HRegionInfo.convert(region)); + } + return builder.build(); + } + + /** + * Creates a response for the catalog scan request + * @return A CatalogScanResponse + */ + public static CatalogScanResponse buildCatalogScanResponse(int numCleaned) { + return CatalogScanResponse.newBuilder().setScanResult(numCleaned).build(); + } + + /** + * Creates a response for the catalog scan request + * @return A EnableCatalogJanitorResponse + */ + public static EnableCatalogJanitorResponse buildEnableCatalogJanitorResponse(boolean prevValue) { + return EnableCatalogJanitorResponse.newBuilder().setPrevValue(prevValue).build(); + } + +// End utilities for Admin + + /** + * Creates a response for the last flushed sequence Id request + * @return A GetLastFlushedSequenceIdResponse + */ + public static GetLastFlushedSequenceIdResponse buildGetLastFlushedSequenceIdResponse( + long seqId) { + return GetLastFlushedSequenceIdResponse.newBuilder().setLastFlushedSequenceId(seqId).build(); + } + + /** + * Stores an exception encountered during RPC invocation so it can be passed back + * through to the client. + * @param controller the controller instance provided by the client when calling the service + * @param ioe the exception encountered + */ + public static void setControllerException(RpcController controller, IOException ioe) { + if (controller != null) { + if (controller instanceof ServerRpcController) { + ((ServerRpcController)controller).setFailedOn(ioe); + } else { + controller.setFailed(StringUtils.stringifyException(ioe)); + } + } + } +} diff --git hbase-client/src/main/java/org/apache/hadoop/hbase/regionserver/RegionOpeningState.java hbase-client/src/main/java/org/apache/hadoop/hbase/regionserver/RegionOpeningState.java new file mode 100644 index 0000000..2de11dc --- /dev/null +++ hbase-client/src/main/java/org/apache/hadoop/hbase/regionserver/RegionOpeningState.java @@ -0,0 +1,31 @@ +/** + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.regionserver; + +import org.apache.hadoop.classification.InterfaceAudience; + +@InterfaceAudience.Private +public enum RegionOpeningState { + + OPENED, + + ALREADY_OPENED, + + FAILED_OPENING; +} diff --git hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeer.java hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeer.java new file mode 100644 index 0000000..2ec760c --- /dev/null +++ hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeer.java @@ -0,0 +1,206 @@ +/* + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.replication; + +import java.io.Closeable; +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; +import java.util.concurrent.atomic.AtomicBoolean; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.Abortable; +import org.apache.hadoop.hbase.DeserializationException; +import org.apache.hadoop.hbase.ServerName; +import org.apache.hadoop.hbase.zookeeper.ZKUtil; +import org.apache.hadoop.hbase.zookeeper.ZooKeeperNodeTracker; +import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher; +import org.apache.zookeeper.KeeperException; + +/** + * This class acts as a wrapper for all the objects used to identify and + * communicate with remote peers and is responsible for answering to expired + * sessions and re-establishing the ZK connections. + */ +@InterfaceAudience.Private +public class ReplicationPeer implements Abortable, Closeable { + private static final Log LOG = LogFactory.getLog(ReplicationPeer.class); + + private final String clusterKey; + private final String id; + private List regionServers = new ArrayList(0); + private final AtomicBoolean peerEnabled = new AtomicBoolean(); + // Cannot be final since a new object needs to be recreated when session fails + private ZooKeeperWatcher zkw; + private final Configuration conf; + + private PeerStateTracker peerStateTracker; + + /** + * Constructor that takes all the objects required to communicate with the + * specified peer, except for the region server addresses. + * @param conf configuration object to this peer + * @param key cluster key used to locate the peer + * @param id string representation of this peer's identifier + */ + public ReplicationPeer(Configuration conf, String key, + String id) throws IOException { + this.conf = conf; + this.clusterKey = key; + this.id = id; + this.reloadZkWatcher(); + } + + /** + * start a state tracker to check whether this peer is enabled or not + * + * @param zookeeper zk watcher for the local cluster + * @param peerStateNode path to zk node which stores peer state + * @throws KeeperException + */ + public void startStateTracker(ZooKeeperWatcher zookeeper, String peerStateNode) + throws KeeperException { + ReplicationZookeeper.ensurePeerEnabled(zookeeper, peerStateNode); + this.peerStateTracker = new PeerStateTracker(peerStateNode, zookeeper, this); + this.peerStateTracker.start(); + try { + this.readPeerStateZnode(); + } catch (DeserializationException e) { + throw ZKUtil.convert(e); + } + } + + private void readPeerStateZnode() throws DeserializationException { + this.peerEnabled.set(ReplicationZookeeper.isPeerEnabled(this.peerStateTracker.getData(false))); + } + + /** + * Get the cluster key of that peer + * @return string consisting of zk ensemble addresses, client port + * and root znode + */ + public String getClusterKey() { + return clusterKey; + } + + /** + * Get the state of this peer + * @return atomic boolean that holds the status + */ + public AtomicBoolean getPeerEnabled() { + return peerEnabled; + } + + /** + * Get a list of all the addresses of all the region servers + * for this peer cluster + * @return list of addresses + */ + public List getRegionServers() { + return regionServers; + } + + /** + * Set the list of region servers for that peer + * @param regionServers list of addresses for the region servers + */ + public void setRegionServers(List regionServers) { + this.regionServers = regionServers; + } + + /** + * Get the ZK connection to this peer + * @return zk connection + */ + public ZooKeeperWatcher getZkw() { + return zkw; + } + + /** + * Get the identifier of this peer + * @return string representation of the id (short) + */ + public String getId() { + return id; + } + + /** + * Get the configuration object required to communicate with this peer + * @return configuration object + */ + public Configuration getConfiguration() { + return conf; + } + + @Override + public void abort(String why, Throwable e) { + LOG.fatal("The ReplicationPeer coresponding to peer " + clusterKey + + " was aborted for the following reason(s):" + why, e); + } + + /** + * Closes the current ZKW (if not null) and creates a new one + * @throws IOException If anything goes wrong connecting + */ + public void reloadZkWatcher() throws IOException { + if (zkw != null) zkw.close(); + zkw = new ZooKeeperWatcher(conf, + "connection to cluster: " + id, this); + } + + @Override + public boolean isAborted() { + // Currently the replication peer is never "Aborted", we just log when the + // abort method is called. + return false; + } + + @Override + public void close() throws IOException { + if (zkw != null){ + zkw.close(); + } + } + + /** + * Tracker for state of this peer + */ + public class PeerStateTracker extends ZooKeeperNodeTracker { + + public PeerStateTracker(String peerStateZNode, ZooKeeperWatcher watcher, + Abortable abortable) { + super(watcher, peerStateZNode, abortable); + } + + @Override + public synchronized void nodeDataChanged(String path) { + if (path.equals(node)) { + super.nodeDataChanged(path); + try { + readPeerStateZnode(); + } catch (DeserializationException e) { + LOG.warn("Failed deserializing the content of " + path, e); + } + } + } + } +} diff --git hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationZookeeper.java hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationZookeeper.java new file mode 100644 index 0000000..525dd22 --- /dev/null +++ hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationZookeeper.java @@ -0,0 +1,1103 @@ +/* + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.replication; + +import java.io.Closeable; +import java.io.IOException; +import java.util.ArrayList; +import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.SortedMap; +import java.util.SortedSet; +import java.util.TreeMap; +import java.util.TreeSet; +import java.util.UUID; +import java.util.concurrent.atomic.AtomicBoolean; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.Abortable; +import org.apache.hadoop.hbase.DeserializationException; +import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.Server; +import org.apache.hadoop.hbase.ServerName; +import org.apache.hadoop.hbase.protobuf.ProtobufUtil; +import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos; +import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.hbase.zookeeper.ZKClusterId; +import org.apache.hadoop.hbase.zookeeper.ZKUtil; +import org.apache.hadoop.hbase.zookeeper.ZooKeeperListener; +import org.apache.hadoop.hbase.zookeeper.ZooKeeperNodeTracker; +import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher; +import org.apache.zookeeper.KeeperException; +import org.apache.zookeeper.KeeperException.ConnectionLossException; +import org.apache.zookeeper.KeeperException.NodeExistsException; +import org.apache.zookeeper.KeeperException.SessionExpiredException; + +import com.google.protobuf.InvalidProtocolBufferException; + +/** + * This class serves as a helper for all things related to zookeeper in + * replication. + *

      + * The layout looks something like this under zookeeper.znode.parent for the + * master cluster: + *

      + * + *

      + * replication/
      + *  state      {contains true or false}
      + *  clusterId  {contains a byte}
      + *  peers/
      + *    1/   {contains a full cluster address}
      + *      peer-state  {contains ENABLED or DISABLED}
      + *    2/
      + *    ...
      + *  rs/ {lists all RS that replicate}
      + *    startcode1/ {lists all peer clusters}
      + *      1/ {lists hlogs to process}
      + *        10.10.1.76%3A53488.123456789 {contains nothing or a position}
      + *        10.10.1.76%3A53488.123456790
      + *        ...
      + *      2/
      + *      ...
      + *    startcode2/
      + *    ...
      + * 
      + */ +@InterfaceAudience.Private +public class ReplicationZookeeper implements Closeable{ + private static final Log LOG = + LogFactory.getLog(ReplicationZookeeper.class); + // Name of znode we use to lock when failover + private final static String RS_LOCK_ZNODE = "lock"; + + // Our handle on zookeeper + private final ZooKeeperWatcher zookeeper; + // Map of peer clusters keyed by their id + private Map peerClusters; + // Path to the root replication znode + private String replicationZNode; + // Path to the peer clusters znode + private String peersZNode; + // Path to the znode that contains all RS that replicates + private String rsZNode; + // Path to this region server's name under rsZNode + private String rsServerNameZnode; + // Name node if the replicationState znode + private String replicationStateNodeName; + // Name of zk node which stores peer state. The peer-state znode is under a + // peers' id node; e.g. /hbase/replication/peers/PEER_ID/peer-state + private String peerStateNodeName; + private final Configuration conf; + // Is this cluster replicating at the moment? + private AtomicBoolean replicating; + // The key to our own cluster + private String ourClusterKey; + // Abortable + private Abortable abortable; + private ReplicationStatusTracker statusTracker; + + /** + * ZNode content if enabled state. + */ + // Public so it can be seen by test code. + public static final byte[] ENABLED_ZNODE_BYTES = toByteArray(ZooKeeperProtos.ReplicationState.State.ENABLED); + + /** + * ZNode content if disabled state. + */ + static final byte[] DISABLED_ZNODE_BYTES = toByteArray(ZooKeeperProtos.ReplicationState.State.DISABLED); + + /** + * Constructor used by clients of replication (like master and HBase clients) + * @param conf conf to use + * @param zk zk connection to use + * @throws IOException + */ + public ReplicationZookeeper(final Abortable abortable, final Configuration conf, + final ZooKeeperWatcher zk) throws KeeperException { + this.conf = conf; + this.zookeeper = zk; + this.replicating = new AtomicBoolean(); + setZNodes(abortable); + } + + /** + * Constructor used by region servers, connects to the peer cluster right away. + * + * @param server + * @param replicating atomic boolean to start/stop replication + * @throws IOException + * @throws KeeperException + */ + public ReplicationZookeeper(final Server server, final AtomicBoolean replicating) + throws IOException, KeeperException { + this.abortable = server; + this.zookeeper = server.getZooKeeper(); + this.conf = server.getConfiguration(); + this.replicating = replicating; + setZNodes(server); + + this.peerClusters = new HashMap(); + ZKUtil.createWithParents(this.zookeeper, + ZKUtil.joinZNode(this.replicationZNode, this.replicationStateNodeName)); + this.rsServerNameZnode = ZKUtil.joinZNode(rsZNode, server.getServerName().toString()); + ZKUtil.createWithParents(this.zookeeper, this.rsServerNameZnode); + connectExistingPeers(); + } + + private void setZNodes(Abortable abortable) throws KeeperException { + String replicationZNodeName = conf.get("zookeeper.znode.replication", "replication"); + String peersZNodeName = conf.get("zookeeper.znode.replication.peers", "peers"); + this.peerStateNodeName = conf.get("zookeeper.znode.replication.peers.state", "peer-state"); + this.replicationStateNodeName = conf.get("zookeeper.znode.replication.state", "state"); + String rsZNodeName = conf.get("zookeeper.znode.replication.rs", "rs"); + this.ourClusterKey = ZKUtil.getZooKeeperClusterKey(this.conf); + this.replicationZNode = ZKUtil.joinZNode(this.zookeeper.baseZNode, replicationZNodeName); + this.peersZNode = ZKUtil.joinZNode(replicationZNode, peersZNodeName); + ZKUtil.createWithParents(this.zookeeper, this.peersZNode); + this.rsZNode = ZKUtil.joinZNode(replicationZNode, rsZNodeName); + ZKUtil.createWithParents(this.zookeeper, this.rsZNode); + + // Set a tracker on replicationStateNodeNode + this.statusTracker = new ReplicationStatusTracker(this.zookeeper, abortable); + statusTracker.start(); + readReplicationStateZnode(); + } + + private void connectExistingPeers() throws IOException, KeeperException { + List znodes = ZKUtil.listChildrenNoWatch(this.zookeeper, this.peersZNode); + if (znodes != null) { + for (String z : znodes) { + connectToPeer(z); + } + } + } + + /** + * List this cluster's peers' IDs + * @return list of all peers' identifiers + */ + public List listPeersIdsAndWatch() { + List ids = null; + try { + ids = ZKUtil.listChildrenAndWatchThem(this.zookeeper, this.peersZNode); + } catch (KeeperException e) { + this.abortable.abort("Cannot get the list of peers ", e); + } + return ids; + } + + /** + * Map of this cluster's peers for display. + * @return A map of peer ids to peer cluster keys + */ + public Map listPeers() { + Map peers = new TreeMap(); + List ids = null; + try { + ids = ZKUtil.listChildrenNoWatch(this.zookeeper, this.peersZNode); + for (String id : ids) { + byte[] bytes = ZKUtil.getData(this.zookeeper, ZKUtil.joinZNode(this.peersZNode, id)); + String clusterKey = null; + try { + clusterKey = parsePeerFrom(bytes); + } catch (DeserializationException de) { + LOG.warn("Failed parse of clusterid=" + id + " znode content, continuing."); + continue; + } + peers.put(id, clusterKey); + } + } catch (KeeperException e) { + this.abortable.abort("Cannot get the list of peers ", e); + } + return peers; + } + + /** + * Returns all region servers from given peer + * + * @param peerClusterId (byte) the cluster to interrogate + * @return addresses of all region servers + */ + public List getSlavesAddresses(String peerClusterId) { + if (this.peerClusters.size() == 0) { + return Collections.emptyList(); + } + ReplicationPeer peer = this.peerClusters.get(peerClusterId); + if (peer == null) { + return Collections.emptyList(); + } + + List addresses; + try { + addresses = fetchSlavesAddresses(peer.getZkw()); + } catch (KeeperException ke) { + reconnectPeer(ke, peer); + addresses = Collections.emptyList(); + } + peer.setRegionServers(addresses); + return peer.getRegionServers(); + } + + /** + * Get the list of all the region servers from the specified peer + * @param zkw zk connection to use + * @return list of region server addresses or an empty list if the slave + * is unavailable + */ + private List fetchSlavesAddresses(ZooKeeperWatcher zkw) + throws KeeperException { + return listChildrenAndGetAsServerNames(zkw, zkw.rsZNode); + } + + /** + * Lists the children of the specified znode, retrieving the data of each + * child as a server address. + * + * Used to list the currently online regionservers and their addresses. + * + * Sets no watches at all, this method is best effort. + * + * Returns an empty list if the node has no children. Returns null if the + * parent node itself does not exist. + * + * @param zkw zookeeper reference + * @param znode node to get children of as addresses + * @return list of data of children of specified znode, empty if no children, + * null if parent does not exist + * @throws KeeperException if unexpected zookeeper exception + */ + public static List listChildrenAndGetAsServerNames( + ZooKeeperWatcher zkw, String znode) + throws KeeperException { + List children = ZKUtil.listChildrenNoWatch(zkw, znode); + if(children == null) { + return Collections.emptyList(); + } + List addresses = new ArrayList(children.size()); + for (String child : children) { + addresses.add(ServerName.parseServerName(child)); + } + return addresses; + } + + /** + * This method connects this cluster to another one and registers it + * in this region server's replication znode + * @param peerId id of the peer cluster + * @throws KeeperException + */ + public boolean connectToPeer(String peerId) + throws IOException, KeeperException { + if (peerClusters == null) { + return false; + } + if (this.peerClusters.containsKey(peerId)) { + return false; + } + ReplicationPeer peer = getPeer(peerId); + if (peer == null) { + return false; + } + this.peerClusters.put(peerId, peer); + ZKUtil.createWithParents(this.zookeeper, ZKUtil.joinZNode( + this.rsServerNameZnode, peerId)); + LOG.info("Added new peer cluster " + peer.getClusterKey()); + return true; + } + + /** + * Helper method to connect to a peer + * @param peerId peer's identifier + * @return object representing the peer + * @throws IOException + * @throws KeeperException + */ + public ReplicationPeer getPeer(String peerId) throws IOException, KeeperException{ + String znode = ZKUtil.joinZNode(this.peersZNode, peerId); + byte [] data = ZKUtil.getData(this.zookeeper, znode); + String otherClusterKey = ""; + try { + otherClusterKey = parsePeerFrom(data); + } catch (DeserializationException e) { + LOG.warn("Failed parse of cluster key from peerId=" + peerId + + ", specifically the content from the following znode: " + znode); + } + if (this.ourClusterKey.equals(otherClusterKey)) { + LOG.debug("Not connecting to " + peerId + " because it's us"); + return null; + } + // Construct the connection to the new peer + Configuration otherConf = new Configuration(this.conf); + try { + ZKUtil.applyClusterKeyToConf(otherConf, otherClusterKey); + } catch (IOException e) { + LOG.error("Can't get peer because:", e); + return null; + } + + ReplicationPeer peer = new ReplicationPeer(otherConf, peerId, + otherClusterKey); + peer.startStateTracker(this.zookeeper, this.getPeerStateNode(peerId)); + return peer; + } + + /** + * Set the new replication state for this cluster + * @param newState + */ + public void setReplicating(boolean newState) throws KeeperException { + ZKUtil.createWithParents(this.zookeeper, + ZKUtil.joinZNode(this.replicationZNode, this.replicationStateNodeName)); + byte[] stateBytes = (newState == true) ? ENABLED_ZNODE_BYTES : DISABLED_ZNODE_BYTES; + ZKUtil.setData(this.zookeeper, + ZKUtil.joinZNode(this.replicationZNode, this.replicationStateNodeName), stateBytes); + } + + /** + * Remove the peer from zookeeper. which will trigger the watchers on every + * region server and close their sources + * @param id + * @throws IllegalArgumentException Thrown when the peer doesn't exist + */ + public void removePeer(String id) throws IOException { + try { + if (!peerExists(id)) { + throw new IllegalArgumentException("Cannot remove inexisting peer"); + } + ZKUtil.deleteNodeRecursively(this.zookeeper, + ZKUtil.joinZNode(this.peersZNode, id)); + } catch (KeeperException e) { + throw new IOException("Unable to remove a peer", e); + } + } + + /** + * Add a new peer to this cluster + * @param id peer's identifier + * @param clusterKey ZK ensemble's addresses, client port and root znode + * @throws IllegalArgumentException Thrown when the peer doesn't exist + * @throws IllegalStateException Thrown when a peer already exists, since + * multi-slave isn't supported yet. + */ + public void addPeer(String id, String clusterKey) throws IOException { + try { + if (peerExists(id)) { + throw new IllegalArgumentException("Cannot add existing peer"); + } + ZKUtil.createWithParents(this.zookeeper, this.peersZNode); + ZKUtil.createAndWatch(this.zookeeper, ZKUtil.joinZNode(this.peersZNode, id), + toByteArray(clusterKey)); + // A peer is enabled by default + ZKUtil.createAndWatch(this.zookeeper, getPeerStateNode(id), ENABLED_ZNODE_BYTES); + } catch (KeeperException e) { + throw new IOException("Unable to add peer", e); + } + } + + /** + * @param clusterKey + * @return Serialized protobuf of clusterKey with pb magic prefix + * prepended suitable for use as content of a this.peersZNode; i.e. + * the content of PEER_ID znode under /hbase/replication/peers/PEER_ID + */ + static byte[] toByteArray(final String clusterKey) { + byte[] bytes = ZooKeeperProtos.ReplicationPeer.newBuilder().setClusterkey(clusterKey).build() + .toByteArray(); + return ProtobufUtil.prependPBMagic(bytes); + } + + /** + * @param state + * @return Serialized protobuf of state with pb magic prefix + * prepended suitable for use as content of either the cluster state + * znode -- whether or not we should be replicating kept in + * /hbase/replication/state -- or as content of a peer-state znode + * under a peer cluster id as in + * /hbase/replication/peers/PEER_ID/peer-state. + */ + static byte[] toByteArray(final ZooKeeperProtos.ReplicationState.State state) { + byte[] bytes = ZooKeeperProtos.ReplicationState.newBuilder().setState(state).build() + .toByteArray(); + return ProtobufUtil.prependPBMagic(bytes); + } + + /** + * @param position + * @return Serialized protobuf of position with pb magic prefix + * prepended suitable for use as content of an hlog position in a + * replication queue. + */ + static byte[] toByteArray( + final long position) { + byte[] bytes = ZooKeeperProtos.ReplicationHLogPosition.newBuilder().setPosition(position) + .build().toByteArray(); + return ProtobufUtil.prependPBMagic(bytes); + } + + /** + * @param lockOwner + * @return Serialized protobuf of lockOwner with pb magic prefix + * prepended suitable for use as content of an replication lock during + * region server fail over. + */ + static byte[] lockToByteArray( + final String lockOwner) { + byte[] bytes = ZooKeeperProtos.ReplicationLock.newBuilder().setLockOwner(lockOwner).build() + .toByteArray(); + return ProtobufUtil.prependPBMagic(bytes); + } + + /** + * @param bytes Content of a peer znode. + * @return ClusterKey parsed from the passed bytes. + * @throws DeserializationException + */ + static String parsePeerFrom(final byte[] bytes) throws DeserializationException { + if (ProtobufUtil.isPBMagicPrefix(bytes)) { + int pblen = ProtobufUtil.lengthOfPBMagic(); + ZooKeeperProtos.ReplicationPeer.Builder builder = ZooKeeperProtos.ReplicationPeer + .newBuilder(); + ZooKeeperProtos.ReplicationPeer peer; + try { + peer = builder.mergeFrom(bytes, pblen, bytes.length - pblen).build(); + } catch (InvalidProtocolBufferException e) { + throw new DeserializationException(e); + } + return peer.getClusterkey(); + } else { + if (bytes.length > 0) { + return Bytes.toString(bytes); + } + return ""; + } + } + + /** + * @param bytes Content of a state znode. + * @return State parsed from the passed bytes. + * @throws DeserializationException + */ + static ZooKeeperProtos.ReplicationState.State parseStateFrom(final byte[] bytes) + throws DeserializationException { + ProtobufUtil.expectPBMagicPrefix(bytes); + int pblen = ProtobufUtil.lengthOfPBMagic(); + ZooKeeperProtos.ReplicationState.Builder builder = ZooKeeperProtos.ReplicationState + .newBuilder(); + ZooKeeperProtos.ReplicationState state; + try { + state = builder.mergeFrom(bytes, pblen, bytes.length - pblen).build(); + return state.getState(); + } catch (InvalidProtocolBufferException e) { + throw new DeserializationException(e); + } + } + + /** + * @param bytes - Content of a HLog position znode. + * @return long - The current HLog position. + * @throws DeserializationException + */ + static long parseHLogPositionFrom( + final byte[] bytes) throws DeserializationException { + if (ProtobufUtil.isPBMagicPrefix(bytes)) { + int pblen = ProtobufUtil.lengthOfPBMagic(); + ZooKeeperProtos.ReplicationHLogPosition.Builder builder = ZooKeeperProtos.ReplicationHLogPosition + .newBuilder(); + ZooKeeperProtos.ReplicationHLogPosition position; + try { + position = builder.mergeFrom(bytes, pblen, bytes.length - pblen).build(); + } catch (InvalidProtocolBufferException e) { + throw new DeserializationException(e); + } + return position.getPosition(); + } else { + if (bytes.length > 0) { + return Bytes.toLong(bytes); + } + return 0; + } + } + + /** + * @param bytes - Content of a lock znode. + * @return String - The owner of the lock. + * @throws DeserializationException + */ + static String parseLockOwnerFrom( + final byte[] bytes) throws DeserializationException { + if (ProtobufUtil.isPBMagicPrefix(bytes)) { + int pblen = ProtobufUtil.lengthOfPBMagic(); + ZooKeeperProtos.ReplicationLock.Builder builder = ZooKeeperProtos.ReplicationLock + .newBuilder(); + ZooKeeperProtos.ReplicationLock lock; + try { + lock = builder.mergeFrom(bytes, pblen, bytes.length - pblen).build(); + } catch (InvalidProtocolBufferException e) { + throw new DeserializationException(e); + } + return lock.getLockOwner(); + } else { + if (bytes.length > 0) { + return Bytes.toString(bytes); + } + return ""; + } + } + + private boolean peerExists(String id) throws KeeperException { + return ZKUtil.checkExists(this.zookeeper, + ZKUtil.joinZNode(this.peersZNode, id)) >= 0; + } + + /** + * Enable replication to the peer + * + * @param id peer's identifier + * @throws IllegalArgumentException + * Thrown when the peer doesn't exist + */ + public void enablePeer(String id) throws IOException { + changePeerState(id, ZooKeeperProtos.ReplicationState.State.ENABLED); + LOG.info("peer " + id + " is enabled"); + } + + /** + * Disable replication to the peer + * + * @param id peer's identifier + * @throws IllegalArgumentException + * Thrown when the peer doesn't exist + */ + public void disablePeer(String id) throws IOException { + changePeerState(id, ZooKeeperProtos.ReplicationState.State.DISABLED); + LOG.info("peer " + id + " is disabled"); + } + + private void changePeerState(String id, ZooKeeperProtos.ReplicationState.State state) + throws IOException { + try { + if (!peerExists(id)) { + throw new IllegalArgumentException("peer " + id + " is not registered"); + } + String peerStateZNode = getPeerStateNode(id); + byte[] stateBytes = (state == ZooKeeperProtos.ReplicationState.State.ENABLED) ? ENABLED_ZNODE_BYTES + : DISABLED_ZNODE_BYTES; + if (ZKUtil.checkExists(this.zookeeper, peerStateZNode) != -1) { + ZKUtil.setData(this.zookeeper, peerStateZNode, stateBytes); + } else { + ZKUtil.createAndWatch(zookeeper, peerStateZNode, stateBytes); + } + LOG.info("state of the peer " + id + " changed to " + state.name()); + } catch (KeeperException e) { + throw new IOException("Unable to change state of the peer " + id, e); + } + } + + /** + * Check whether the peer is enabled or not. This method checks the atomic + * boolean of ReplicationPeer locally. + * + * @param id peer identifier + * @return true if the peer is enabled, otherwise false + * @throws IllegalArgumentException + * Thrown when the peer doesn't exist + */ + public boolean getPeerEnabled(String id) { + if (!this.peerClusters.containsKey(id)) { + throw new IllegalArgumentException("peer " + id + " is not registered"); + } + return this.peerClusters.get(id).getPeerEnabled().get(); + } + + private String getPeerStateNode(String id) { + return ZKUtil.joinZNode(this.peersZNode, ZKUtil.joinZNode(id, this.peerStateNodeName)); + } + + /** + * This reads the state znode for replication and sets the atomic boolean + */ + private void readReplicationStateZnode() { + try { + this.replicating.set(getReplication()); + LOG.info("Replication is now " + (this.replicating.get()? + "started" : "stopped")); + } catch (KeeperException e) { + this.abortable.abort("Failed getting data on from " + getRepStateNode(), e); + } + } + + /** + * Get the replication status of this cluster. If the state znode doesn't + * exist it will also create it and set it true. + * @return returns true when it's enabled, else false + * @throws KeeperException + */ + public boolean getReplication() throws KeeperException { + byte [] data = this.statusTracker.getData(false); + if (data == null || data.length == 0) { + setReplicating(true); + return true; + } + try { + return isPeerEnabled(data); + } catch (DeserializationException e) { + throw ZKUtil.convert(e); + } + } + + private String getRepStateNode() { + return ZKUtil.joinZNode(this.replicationZNode, this.replicationStateNodeName); + } + + /** + * Add a new log to the list of hlogs in zookeeper + * @param filename name of the hlog's znode + * @param peerId name of the cluster's znode + */ + public void addLogToList(String filename, String peerId) + throws KeeperException { + String znode = ZKUtil.joinZNode(this.rsServerNameZnode, peerId); + znode = ZKUtil.joinZNode(znode, filename); + ZKUtil.createWithParents(this.zookeeper, znode); + } + + /** + * Remove a log from the list of hlogs in zookeeper + * @param filename name of the hlog's znode + * @param clusterId name of the cluster's znode + */ + public void removeLogFromList(String filename, String clusterId) { + try { + String znode = ZKUtil.joinZNode(rsServerNameZnode, clusterId); + znode = ZKUtil.joinZNode(znode, filename); + ZKUtil.deleteNode(this.zookeeper, znode); + } catch (KeeperException e) { + this.abortable.abort("Failed remove from list", e); + } + } + + /** + * Set the current position of the specified cluster in the current hlog + * @param filename filename name of the hlog's znode + * @param clusterId clusterId name of the cluster's znode + * @param position the position in the file + * @throws IOException + */ + public void writeReplicationStatus(String filename, String clusterId, + long position) { + try { + String znode = ZKUtil.joinZNode(this.rsServerNameZnode, clusterId); + znode = ZKUtil.joinZNode(znode, filename); + // Why serialize String of Long and note Long as bytes? + ZKUtil.setData(this.zookeeper, znode, toByteArray(position)); + } catch (KeeperException e) { + this.abortable.abort("Writing replication status", e); + } + } + + /** + * Get a list of all the other region servers in this cluster + * and set a watch + * @return a list of server nanes + */ + public List getRegisteredRegionServers() { + List result = null; + try { + result = ZKUtil.listChildrenAndWatchThem( + this.zookeeper, this.zookeeper.rsZNode); + } catch (KeeperException e) { + this.abortable.abort("Get list of registered region servers", e); + } + return result; + } + + /** + * Get the list of the replicators that have queues, they can be alive, dead + * or simply from a previous run + * @return a list of server names + */ + public List getListOfReplicators() { + List result = null; + try { + result = ZKUtil.listChildrenNoWatch(this.zookeeper, rsZNode); + } catch (KeeperException e) { + this.abortable.abort("Get list of replicators", e); + } + return result; + } + + /** + * Get the list of peer clusters for the specified server names + * @param rs server names of the rs + * @return a list of peer cluster + */ + public List getListPeersForRS(String rs) { + String znode = ZKUtil.joinZNode(rsZNode, rs); + List result = null; + try { + result = ZKUtil.listChildrenNoWatch(this.zookeeper, znode); + } catch (KeeperException e) { + this.abortable.abort("Get list of peers for rs", e); + } + return result; + } + + /** + * Get the list of hlogs for the specified region server and peer cluster + * @param rs server names of the rs + * @param id peer cluster + * @return a list of hlogs + */ + public List getListHLogsForPeerForRS(String rs, String id) { + String znode = ZKUtil.joinZNode(rsZNode, rs); + znode = ZKUtil.joinZNode(znode, id); + List result = null; + try { + result = ZKUtil.listChildrenNoWatch(this.zookeeper, znode); + } catch (KeeperException e) { + this.abortable.abort("Get list of hlogs for peer", e); + } + return result; + } + + /** + * Try to set a lock in another server's znode. + * @param znode the server names of the other server + * @return true if the lock was acquired, false in every other cases + */ + public boolean lockOtherRS(String znode) { + try { + String parent = ZKUtil.joinZNode(this.rsZNode, znode); + if (parent.equals(rsServerNameZnode)) { + LOG.warn("Won't lock because this is us, we're dead!"); + return false; + } + String p = ZKUtil.joinZNode(parent, RS_LOCK_ZNODE); + ZKUtil.createAndWatch(this.zookeeper, p, lockToByteArray(rsServerNameZnode)); + } catch (KeeperException e) { + // This exception will pop up if the znode under which we're trying to + // create the lock is already deleted by another region server, meaning + // that the transfer already occurred. + // NoNode => transfer is done and znodes are already deleted + // NodeExists => lock znode already created by another RS + if (e instanceof KeeperException.NoNodeException || + e instanceof KeeperException.NodeExistsException) { + LOG.info("Won't transfer the queue," + + " another RS took care of it because of: " + e.getMessage()); + } else { + LOG.info("Failed lock other rs", e); + } + return false; + } + return true; + } + + /** + * This methods copies all the hlogs queues from another region server + * and returns them all sorted per peer cluster (appended with the dead + * server's znode) + * @param znode server names to copy + * @return all hlogs for all peers of that cluster, null if an error occurred + */ + public SortedMap> copyQueuesFromRS(String znode) { + // TODO this method isn't atomic enough, we could start copying and then + // TODO fail for some reason and we would end up with znodes we don't want. + SortedMap> queues = + new TreeMap>(); + try { + String nodePath = ZKUtil.joinZNode(rsZNode, znode); + List clusters = + ZKUtil.listChildrenNoWatch(this.zookeeper, nodePath); + // We have a lock znode in there, it will count as one. + if (clusters == null || clusters.size() <= 1) { + return queues; + } + // The lock isn't a peer cluster, remove it + clusters.remove(RS_LOCK_ZNODE); + for (String cluster : clusters) { + // We add the name of the recovered RS to the new znode, we can even + // do that for queues that were recovered 10 times giving a znode like + // number-startcode-number-otherstartcode-number-anotherstartcode-etc + String newCluster = cluster+"-"+znode; + String newClusterZnode = ZKUtil.joinZNode(rsServerNameZnode, newCluster); + String clusterPath = ZKUtil.joinZNode(nodePath, cluster); + List hlogs = ZKUtil.listChildrenNoWatch(this.zookeeper, clusterPath); + // That region server didn't have anything to replicate for this cluster + if (hlogs == null || hlogs.size() == 0) { + continue; + } + ZKUtil.createNodeIfNotExistsAndWatch(this.zookeeper, newClusterZnode, + HConstants.EMPTY_BYTE_ARRAY); + SortedSet logQueue = new TreeSet(); + queues.put(newCluster, logQueue); + for (String hlog : hlogs) { + String z = ZKUtil.joinZNode(clusterPath, hlog); + byte[] positionBytes = ZKUtil.getData(this.zookeeper, z); + long position = 0; + try { + position = parseHLogPositionFrom(positionBytes); + } catch (DeserializationException e) { + LOG.warn("Failed parse of hlog position from the following znode: " + z); + } + LOG.debug("Creating " + hlog + " with data " + position); + String child = ZKUtil.joinZNode(newClusterZnode, hlog); + // Position doesn't actually change, we are just deserializing it for + // logging, so just use the already serialized version + ZKUtil.createAndWatch(this.zookeeper, child, positionBytes); + logQueue.add(hlog); + } + } + } catch (KeeperException e) { + this.abortable.abort("Copy queues from rs", e); + } + return queues; + } + + /** + * Delete a complete queue of hlogs + * @param peerZnode znode of the peer cluster queue of hlogs to delete + */ + public void deleteSource(String peerZnode, boolean closeConnection) { + try { + ZKUtil.deleteNodeRecursively(this.zookeeper, + ZKUtil.joinZNode(rsServerNameZnode, peerZnode)); + if (closeConnection) { + this.peerClusters.get(peerZnode).getZkw().close(); + this.peerClusters.remove(peerZnode); + } + } catch (KeeperException e) { + this.abortable.abort("Failed delete of " + peerZnode, e); + } + } + + /** + * Recursive deletion of all znodes in specified rs' znode + * @param znode + */ + public void deleteRsQueues(String znode) { + String fullpath = ZKUtil.joinZNode(rsZNode, znode); + try { + List clusters = + ZKUtil.listChildrenNoWatch(this.zookeeper, fullpath); + for (String cluster : clusters) { + // We'll delete it later + if (cluster.equals(RS_LOCK_ZNODE)) { + continue; + } + String fullClusterPath = ZKUtil.joinZNode(fullpath, cluster); + ZKUtil.deleteNodeRecursively(this.zookeeper, fullClusterPath); + } + // Finish cleaning up + ZKUtil.deleteNodeRecursively(this.zookeeper, fullpath); + } catch (KeeperException e) { + if (e instanceof KeeperException.NoNodeException || + e instanceof KeeperException.NotEmptyException) { + // Testing a special case where another region server was able to + // create a lock just after we deleted it, but then was also able to + // delete the RS znode before us or its lock znode is still there. + if (e.getPath().equals(fullpath)) { + return; + } + } + this.abortable.abort("Failed delete of " + znode, e); + } + } + + /** + * Delete this cluster's queues + */ + public void deleteOwnRSZNode() { + try { + ZKUtil.deleteNodeRecursively(this.zookeeper, + this.rsServerNameZnode); + } catch (KeeperException e) { + // if the znode is already expired, don't bother going further + if (e instanceof KeeperException.SessionExpiredException) { + return; + } + this.abortable.abort("Failed delete of " + this.rsServerNameZnode, e); + } + } + + /** + * Get the position of the specified hlog in the specified peer znode + * @param peerId znode of the peer cluster + * @param hlog name of the hlog + * @return the position in that hlog + * @throws KeeperException + */ + public long getHLogRepPosition(String peerId, String hlog) + throws KeeperException { + String clusterZnode = ZKUtil.joinZNode(rsServerNameZnode, peerId); + String znode = ZKUtil.joinZNode(clusterZnode, hlog); + byte[] bytes = ZKUtil.getData(this.zookeeper, znode); + try { + return parseHLogPositionFrom(bytes); + } catch (DeserializationException de) { + LOG.warn("Failed parse of HLogPosition for peerId=" + peerId + " and hlog=" + hlog + + "znode content, continuing."); + } + // if we can not parse the position, start at the beginning of the hlog file + // again + return 0; + } + + /** + * Returns the UUID of the provided peer id. Should a connection loss or session + * expiration happen, the ZK handler will be reopened once and if it still doesn't + * work then it will bail and return null. + * @param peerId the peer's ID that will be converted into a UUID + * @return a UUID or null if there's a ZK connection issue + */ + public UUID getPeerUUID(String peerId) { + ReplicationPeer peer = getPeerClusters().get(peerId); + UUID peerUUID = null; + try { + peerUUID = getUUIDForCluster(peer.getZkw()); + } catch (KeeperException ke) { + reconnectPeer(ke, peer); + } + return peerUUID; + } + + /** + * Get the UUID for the provided ZK watcher. Doesn't handle any ZK exceptions + * @param zkw watcher connected to an ensemble + * @return the UUID read from zookeeper + * @throws KeeperException + */ + public UUID getUUIDForCluster(ZooKeeperWatcher zkw) throws KeeperException { + return UUID.fromString(ZKClusterId.readClusterIdZNode(zkw)); + } + + private void reconnectPeer(KeeperException ke, ReplicationPeer peer) { + if (ke instanceof ConnectionLossException + || ke instanceof SessionExpiredException) { + LOG.warn( + "Lost the ZooKeeper connection for peer " + peer.getClusterKey(), + ke); + try { + peer.reloadZkWatcher(); + } catch(IOException io) { + LOG.warn( + "Creation of ZookeeperWatcher failed for peer " + + peer.getClusterKey(), io); + } + } + } + + public void registerRegionServerListener(ZooKeeperListener listener) { + this.zookeeper.registerListener(listener); + } + + /** + * Get a map of all peer clusters + * @return map of peer cluster keyed by id + */ + public Map getPeerClusters() { + return this.peerClusters; + } + + /** + * Extracts the znode name of a peer cluster from a ZK path + * @param fullPath Path to extract the id from + * @return the id or an empty string if path is invalid + */ + public static String getZNodeName(String fullPath) { + String[] parts = fullPath.split("/"); + return parts.length > 0 ? parts[parts.length-1] : ""; + } + + /** + * Get this cluster's zk connection + * @return zk connection + */ + public ZooKeeperWatcher getZookeeperWatcher() { + return this.zookeeper; + } + + + /** + * Get the full path to the peers' znode + * @return path to peers in zk + */ + public String getPeersZNode() { + return peersZNode; + } + + @Override + public void close() throws IOException { + if (statusTracker != null) + statusTracker.stop(); + } + + /** + * Utility method to ensure an ENABLED znode is in place; if not present, we + * create it. + * @param zookeeper + * @param path Path to znode to check + * @return True if we created the znode. + * @throws NodeExistsException + * @throws KeeperException + */ + static boolean ensurePeerEnabled(final ZooKeeperWatcher zookeeper, final String path) + throws NodeExistsException, KeeperException { + if (ZKUtil.checkExists(zookeeper, path) == -1) { + ZKUtil.createAndWatch(zookeeper, path, ENABLED_ZNODE_BYTES); + return true; + } + return false; + } + + /** + * @param bytes + * @return True if the passed in bytes are those of a pb + * serialized ENABLED state. + * @throws DeserializationException + */ + static boolean isPeerEnabled(final byte[] bytes) throws DeserializationException { + ZooKeeperProtos.ReplicationState.State state = parseStateFrom(bytes); + return ZooKeeperProtos.ReplicationState.State.ENABLED == state; + } + + /** + * Tracker for status of the replication + */ + public class ReplicationStatusTracker extends ZooKeeperNodeTracker { + public ReplicationStatusTracker(ZooKeeperWatcher watcher, + Abortable abortable) { + super(watcher, getRepStateNode(), abortable); + } + + @Override + public synchronized void nodeDataChanged(String path) { + if (path.equals(node)) { + super.nodeDataChanged(path); + readReplicationStateZnode(); + } + } + } +} diff --git hbase-client/src/main/java/org/apache/hadoop/hbase/security/HBaseSaslRpcClient.java hbase-client/src/main/java/org/apache/hadoop/hbase/security/HBaseSaslRpcClient.java new file mode 100644 index 0000000..60926a3 --- /dev/null +++ hbase-client/src/main/java/org/apache/hadoop/hbase/security/HBaseSaslRpcClient.java @@ -0,0 +1,277 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.security; + +import java.io.BufferedInputStream; +import java.io.BufferedOutputStream; +import java.io.DataInputStream; +import java.io.DataOutputStream; +import java.io.IOException; +import java.io.InputStream; +import java.io.OutputStream; + +import javax.security.auth.callback.Callback; +import javax.security.auth.callback.CallbackHandler; +import javax.security.auth.callback.NameCallback; +import javax.security.auth.callback.PasswordCallback; +import javax.security.auth.callback.UnsupportedCallbackException; +import javax.security.sasl.RealmCallback; +import javax.security.sasl.RealmChoiceCallback; +import javax.security.sasl.Sasl; +import javax.security.sasl.SaslException; +import javax.security.sasl.SaslClient; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.io.WritableUtils; +import org.apache.hadoop.ipc.RemoteException; +import org.apache.hadoop.security.SaslInputStream; +import org.apache.hadoop.security.SaslOutputStream; +import org.apache.hadoop.security.token.Token; +import org.apache.hadoop.security.token.TokenIdentifier; + +/** + * A utility class that encapsulates SASL logic for RPC client. + * Copied from org.apache.hadoop.security + */ +public class HBaseSaslRpcClient { + public static final Log LOG = LogFactory.getLog(HBaseSaslRpcClient.class); + + private final SaslClient saslClient; + + /** + * Create a HBaseSaslRpcClient for an authentication method + * + * @param method + * the requested authentication method + * @param token + * token to use if needed by the authentication method + */ + public HBaseSaslRpcClient(AuthMethod method, + Token token, String serverPrincipal) + throws IOException { + switch (method) { + case DIGEST: + if (LOG.isDebugEnabled()) + LOG.debug("Creating SASL " + AuthMethod.DIGEST.getMechanismName() + + " client to authenticate to service at " + token.getService()); + saslClient = Sasl.createSaslClient(new String[] { AuthMethod.DIGEST + .getMechanismName() }, null, null, SaslUtils.SASL_DEFAULT_REALM, + SaslUtils.SASL_PROPS, new SaslClientCallbackHandler(token)); + break; + case KERBEROS: + if (LOG.isDebugEnabled()) { + LOG + .debug("Creating SASL " + AuthMethod.KERBEROS.getMechanismName() + + " client. Server's Kerberos principal name is " + + serverPrincipal); + } + if (serverPrincipal == null || serverPrincipal.length() == 0) { + throw new IOException( + "Failed to specify server's Kerberos principal name"); + } + String names[] = SaslUtils.splitKerberosName(serverPrincipal); + if (names.length != 3) { + throw new IOException( + "Kerberos principal does not have the expected format: " + + serverPrincipal); + } + saslClient = Sasl.createSaslClient(new String[] { AuthMethod.KERBEROS + .getMechanismName() }, null, names[0], names[1], + SaslUtils.SASL_PROPS, null); + break; + default: + throw new IOException("Unknown authentication method " + method); + } + if (saslClient == null) + throw new IOException("Unable to find SASL client implementation"); + } + + private static void readStatus(DataInputStream inStream) throws IOException { + int status = inStream.readInt(); // read status + if (status != SaslStatus.SUCCESS.state) { + throw new RemoteException(WritableUtils.readString(inStream), + WritableUtils.readString(inStream)); + } + } + + /** + * Do client side SASL authentication with server via the given InputStream + * and OutputStream + * + * @param inS + * InputStream to use + * @param outS + * OutputStream to use + * @return true if connection is set up, or false if needs to switch + * to simple Auth. + * @throws IOException + */ + public boolean saslConnect(InputStream inS, OutputStream outS) + throws IOException { + DataInputStream inStream = new DataInputStream(new BufferedInputStream(inS)); + DataOutputStream outStream = new DataOutputStream(new BufferedOutputStream( + outS)); + + try { + byte[] saslToken = new byte[0]; + if (saslClient.hasInitialResponse()) + saslToken = saslClient.evaluateChallenge(saslToken); + if (saslToken != null) { + outStream.writeInt(saslToken.length); + outStream.write(saslToken, 0, saslToken.length); + outStream.flush(); + if (LOG.isDebugEnabled()) + LOG.debug("Have sent token of size " + saslToken.length + + " from initSASLContext."); + } + if (!saslClient.isComplete()) { + readStatus(inStream); + int len = inStream.readInt(); + if (len == SaslUtils.SWITCH_TO_SIMPLE_AUTH) { + if (LOG.isDebugEnabled()) + LOG.debug("Server asks us to fall back to simple auth."); + saslClient.dispose(); + return false; + } + saslToken = new byte[len]; + if (LOG.isDebugEnabled()) + LOG.debug("Will read input token of size " + saslToken.length + + " for processing by initSASLContext"); + inStream.readFully(saslToken); + } + + while (!saslClient.isComplete()) { + saslToken = saslClient.evaluateChallenge(saslToken); + if (saslToken != null) { + if (LOG.isDebugEnabled()) + LOG.debug("Will send token of size " + saslToken.length + + " from initSASLContext."); + outStream.writeInt(saslToken.length); + outStream.write(saslToken, 0, saslToken.length); + outStream.flush(); + } + if (!saslClient.isComplete()) { + readStatus(inStream); + saslToken = new byte[inStream.readInt()]; + if (LOG.isDebugEnabled()) + LOG.debug("Will read input token of size " + saslToken.length + + " for processing by initSASLContext"); + inStream.readFully(saslToken); + } + } + if (LOG.isDebugEnabled()) { + LOG.debug("SASL client context established. Negotiated QoP: " + + saslClient.getNegotiatedProperty(Sasl.QOP)); + } + return true; + } catch (IOException e) { + try { + saslClient.dispose(); + } catch (SaslException ignored) { + // ignore further exceptions during cleanup + } + throw e; + } + } + + /** + * Get a SASL wrapped InputStream. Can be called only after saslConnect() has + * been called. + * + * @param in + * the InputStream to wrap + * @return a SASL wrapped InputStream + * @throws IOException + */ + public InputStream getInputStream(InputStream in) throws IOException { + if (!saslClient.isComplete()) { + throw new IOException("Sasl authentication exchange hasn't completed yet"); + } + return new SaslInputStream(in, saslClient); + } + + /** + * Get a SASL wrapped OutputStream. Can be called only after saslConnect() has + * been called. + * + * @param out + * the OutputStream to wrap + * @return a SASL wrapped OutputStream + * @throws IOException + */ + public OutputStream getOutputStream(OutputStream out) throws IOException { + if (!saslClient.isComplete()) { + throw new IOException("Sasl authentication exchange hasn't completed yet"); + } + return new SaslOutputStream(out, saslClient); + } + + /** Release resources used by wrapped saslClient */ + public void dispose() throws SaslException { + saslClient.dispose(); + } + + private static class SaslClientCallbackHandler implements CallbackHandler { + private final String userName; + private final char[] userPassword; + + public SaslClientCallbackHandler(Token token) { + this.userName = SaslUtils.encodeIdentifier(token.getIdentifier()); + this.userPassword = SaslUtils.encodePassword(token.getPassword()); + } + + public void handle(Callback[] callbacks) + throws UnsupportedCallbackException { + NameCallback nc = null; + PasswordCallback pc = null; + RealmCallback rc = null; + for (Callback callback : callbacks) { + if (callback instanceof RealmChoiceCallback) { + continue; + } else if (callback instanceof NameCallback) { + nc = (NameCallback) callback; + } else if (callback instanceof PasswordCallback) { + pc = (PasswordCallback) callback; + } else if (callback instanceof RealmCallback) { + rc = (RealmCallback) callback; + } else { + throw new UnsupportedCallbackException(callback, + "Unrecognized SASL client callback"); + } + } + if (nc != null) { + if (LOG.isDebugEnabled()) + LOG.debug("SASL client callback: setting username: " + userName); + nc.setName(userName); + } + if (pc != null) { + if (LOG.isDebugEnabled()) + LOG.debug("SASL client callback: setting userPassword"); + pc.setPassword(userPassword); + } + if (rc != null) { + if (LOG.isDebugEnabled()) + LOG.debug("SASL client callback: setting realm: " + + rc.getDefaultText()); + rc.setText(rc.getDefaultText()); + } + } + } +} diff --git hbase-client/src/main/java/org/apache/hadoop/hbase/security/access/Permission.java hbase-client/src/main/java/org/apache/hadoop/hbase/security/access/Permission.java new file mode 100644 index 0000000..a7a3bc9 --- /dev/null +++ hbase-client/src/main/java/org/apache/hadoop/hbase/security/access/Permission.java @@ -0,0 +1,199 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.security.access; + +import com.google.common.collect.Maps; +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.io.VersionedWritable; + +import java.io.DataInput; +import java.io.DataOutput; +import java.io.IOException; +import java.util.Arrays; +import java.util.Map; + +/** + * Base permissions instance representing the ability to perform a given set + * of actions. + * + * @see TablePermission + */ +public class Permission extends VersionedWritable { + protected static final byte VERSION = 0; + public enum Action { + READ('R'), WRITE('W'), EXEC('X'), CREATE('C'), ADMIN('A'); + + private byte code; + Action(char code) { + this.code = (byte)code; + } + + public byte code() { return code; } + } + + private static Log LOG = LogFactory.getLog(Permission.class); + protected static Map ACTION_BY_CODE = Maps.newHashMap(); + + protected Action[] actions; + + static { + for (Action a : Action.values()) { + ACTION_BY_CODE.put(a.code(), a); + } + } + + /** Empty constructor for Writable implementation. Do not use. */ + public Permission() { + super(); + } + + public Permission(Action... assigned) { + if (assigned != null && assigned.length > 0) { + actions = Arrays.copyOf(assigned, assigned.length); + } + } + + public Permission(byte[] actionCodes) { + if (actionCodes != null) { + Action acts[] = new Action[actionCodes.length]; + int j = 0; + for (int i=0; i 0) { + actions = new Action[length]; + for (int i = 0; i < length; i++) { + byte b = in.readByte(); + Action a = ACTION_BY_CODE.get(b); + if (a == null) { + throw new IOException("Unknown action code '"+ + Bytes.toStringBinary(new byte[]{b})+"' in input"); + } + this.actions[i] = a; + } + } else { + actions = new Action[0]; + } + } + + @Override + public void write(DataOutput out) throws IOException { + super.write(out); + out.writeByte(actions != null ? actions.length : 0); + if (actions != null) { + for (Action a: actions) { + out.writeByte(a.code()); + } + } + } +} diff --git hbase-client/src/main/java/org/apache/hadoop/hbase/security/access/TablePermission.java hbase-client/src/main/java/org/apache/hadoop/hbase/security/access/TablePermission.java new file mode 100644 index 0000000..f00c54a --- /dev/null +++ hbase-client/src/main/java/org/apache/hadoop/hbase/security/access/TablePermission.java @@ -0,0 +1,307 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.security.access; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.hbase.KeyValue; +import org.apache.hadoop.hbase.util.Bytes; + +import java.io.DataInput; +import java.io.DataOutput; +import java.io.IOException; + +/** + * Represents an authorization for access for the given actions, optionally + * restricted to the given column family or column qualifier, over the + * given table. If the family property is null, it implies + * full table access. + */ +public class TablePermission extends Permission { + private static Log LOG = LogFactory.getLog(TablePermission.class); + + private byte[] table; + private byte[] family; + private byte[] qualifier; + + /** Nullary constructor for Writable, do not use */ + public TablePermission() { + super(); + } + + /** + * Create a new permission for the given table and (optionally) column family, + * allowing the given actions. + * @param table the table + * @param family the family, can be null if a global permission on the table + * @param assigned the list of allowed actions + */ + public TablePermission(byte[] table, byte[] family, Action... assigned) { + this(table, family, null, assigned); + } + + /** + * Creates a new permission for the given table, restricted to the given + * column family and qualifer, allowing the assigned actions to be performed. + * @param table the table + * @param family the family, can be null if a global permission on the table + * @param assigned the list of allowed actions + */ + public TablePermission(byte[] table, byte[] family, byte[] qualifier, + Action... assigned) { + super(assigned); + this.table = table; + this.family = family; + this.qualifier = qualifier; + } + + /** + * Creates a new permission for the given table, family and column qualifier, + * allowing the actions matching the provided byte codes to be performed. + * @param table the table + * @param family the family, can be null if a global permission on the table + * @param actionCodes the list of allowed action codes + */ + public TablePermission(byte[] table, byte[] family, byte[] qualifier, + byte[] actionCodes) { + super(actionCodes); + this.table = table; + this.family = family; + this.qualifier = qualifier; + } + + public boolean hasTable() { + return table != null; + } + + public byte[] getTable() { + return table; + } + + public boolean hasFamily() { + return family != null; + } + + public byte[] getFamily() { + return family; + } + + public boolean hasQualifier() { + return qualifier != null; + } + + public byte[] getQualifier() { + return qualifier; + } + + /** + * Checks that a given table operation is authorized by this permission + * instance. + * + * @param table the table where the operation is being performed + * @param family the column family to which the operation is restricted, + * if null implies "all" + * @param qualifier the column qualifier to which the action is restricted, + * if null implies "all" + * @param action the action being requested + * @return true if the action within the given scope is allowed + * by this permission, false + */ + public boolean implies(byte[] table, byte[] family, byte[] qualifier, + Action action) { + if (!Bytes.equals(this.table, table)) { + return false; + } + + if (this.family != null && + (family == null || + !Bytes.equals(this.family, family))) { + return false; + } + + if (this.qualifier != null && + (qualifier == null || + !Bytes.equals(this.qualifier, qualifier))) { + return false; + } + + // check actions + return super.implies(action); + } + + /** + * Checks if this permission grants access to perform the given action on + * the given table and key value. + * @param table the table on which the operation is being performed + * @param kv the KeyValue on which the operation is being requested + * @param action the action requested + * @return true if the action is allowed over the given scope + * by this permission, otherwise false + */ + public boolean implies(byte[] table, KeyValue kv, Action action) { + if (!Bytes.equals(this.table, table)) { + return false; + } + + if (family != null && + (Bytes.compareTo(family, 0, family.length, + kv.getBuffer(), kv.getFamilyOffset(), kv.getFamilyLength()) != 0)) { + return false; + } + + if (qualifier != null && + (Bytes.compareTo(qualifier, 0, qualifier.length, + kv.getBuffer(), kv.getQualifierOffset(), kv.getQualifierLength()) != 0)) { + return false; + } + + // check actions + return super.implies(action); + } + + /** + * Returns true if this permission matches the given column + * family at least. This only indicates a partial match against the table + * and column family, however, and does not guarantee that implies() for the + * column same family would return true. In the case of a + * column-qualifier specific permission, for example, implies() would still + * return false. + */ + public boolean matchesFamily(byte[] table, byte[] family, Action action) { + if (!Bytes.equals(this.table, table)) { + return false; + } + + if (this.family != null && + (family == null || + !Bytes.equals(this.family, family))) { + return false; + } + + // ignore qualifier + // check actions + return super.implies(action); + } + + /** + * Returns if the given permission matches the given qualifier. + * @param table the table name to match + * @param family the column family to match + * @param qualifier the qualifier name to match + * @param action the action requested + * @return true if the table, family and qualifier match, + * otherwise false + */ + public boolean matchesFamilyQualifier(byte[] table, byte[] family, byte[] qualifier, + Action action) { + if (!matchesFamily(table, family, action)) { + return false; + } else { + if (this.qualifier != null && + (qualifier == null || + !Bytes.equals(this.qualifier, qualifier))) { + return false; + } + } + return super.implies(action); + } + + @Override + public boolean equals(Object obj) { + if (!(obj instanceof TablePermission)) { + return false; + } + TablePermission other = (TablePermission)obj; + + if (!(Bytes.equals(table, other.getTable()) && + ((family == null && other.getFamily() == null) || + Bytes.equals(family, other.getFamily())) && + ((qualifier == null && other.getQualifier() == null) || + Bytes.equals(qualifier, other.getQualifier())) + )) { + return false; + } + + // check actions + return super.equals(other); + } + + @Override + public int hashCode() { + final int prime = 37; + int result = super.hashCode(); + if (table != null) { + result = prime * result + Bytes.hashCode(table); + } + if (family != null) { + result = prime * result + Bytes.hashCode(family); + } + if (qualifier != null) { + result = prime * result + Bytes.hashCode(qualifier); + } + return result; + } + + public String toString() { + StringBuilder str = new StringBuilder("[TablePermission: ") + .append("table=").append(Bytes.toString(table)) + .append(", family=").append(Bytes.toString(family)) + .append(", qualifier=").append(Bytes.toString(qualifier)) + .append(", actions="); + if (actions != null) { + for (int i=0; i 0) + str.append(","); + if (actions[i] != null) + str.append(actions[i].toString()); + else + str.append("NULL"); + } + } + str.append("]"); + + return str.toString(); + } + + @Override + public void readFields(DataInput in) throws IOException { + super.readFields(in); + table = Bytes.readByteArray(in); + if (in.readBoolean()) { + family = Bytes.readByteArray(in); + } + if (in.readBoolean()) { + qualifier = Bytes.readByteArray(in); + } + } + + @Override + public void write(DataOutput out) throws IOException { + super.write(out); + Bytes.writeByteArray(out, table); + out.writeBoolean(family != null); + if (family != null) { + Bytes.writeByteArray(out, family); + } + out.writeBoolean(qualifier != null); + if (qualifier != null) { + Bytes.writeByteArray(out, qualifier); + } + } +} diff --git hbase-client/src/main/java/org/apache/hadoop/hbase/security/access/UserPermission.java hbase-client/src/main/java/org/apache/hadoop/hbase/security/access/UserPermission.java new file mode 100644 index 0000000..fd5b755 --- /dev/null +++ hbase-client/src/main/java/org/apache/hadoop/hbase/security/access/UserPermission.java @@ -0,0 +1,167 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.security.access; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.hbase.util.Bytes; + +import java.io.DataInput; +import java.io.DataOutput; +import java.io.IOException; + +/** + * Represents an authorization for access over the given table, column family + * plus qualifier, for the given user. + */ +public class UserPermission extends TablePermission { + private static Log LOG = LogFactory.getLog(UserPermission.class); + + private byte[] user; + + /** Nullary constructor for Writable, do not use */ + public UserPermission() { + super(); + } + + /** + * Creates a new instance for the given user. + * @param user the user + * @param assigned the list of allowed actions + */ + public UserPermission(byte[] user, Action... assigned) { + super(null, null, null, assigned); + this.user = user; + } + + /** + * Creates a new instance for the given user, + * matching the actions with the given codes. + * @param user the user + * @param actionCodes the list of allowed action codes + */ + public UserPermission(byte[] user, byte[] actionCodes) { + super(null, null, null, actionCodes); + this.user = user; + } + + /** + * Creates a new instance for the given user, table and column family. + * @param user the user + * @param table the table + * @param family the family, can be null if action is allowed over the entire + * table + * @param assigned the list of allowed actions + */ + public UserPermission(byte[] user, byte[] table, byte[] family, + Action... assigned) { + super(table, family, assigned); + this.user = user; + } + + /** + * Creates a new permission for the given user, table, column family and + * column qualifier. + * @param user the user + * @param table the table + * @param family the family, can be null if action is allowed over the entire + * table + * @param qualifier the column qualifier, can be null if action is allowed + * over the entire column family + * @param assigned the list of allowed actions + */ + public UserPermission(byte[] user, byte[] table, byte[] family, + byte[] qualifier, Action... assigned) { + super(table, family, qualifier, assigned); + this.user = user; + } + + /** + * Creates a new instance for the given user, table, column family and + * qualifier, matching the actions with the given codes. + * @param user the user + * @param table the table + * @param family the family, can be null if action is allowed over the entire + * table + * @param qualifier the column qualifier, can be null if action is allowed + * over the entire column family + * @param actionCodes the list of allowed action codes + */ + public UserPermission(byte[] user, byte[] table, byte[] family, + byte[] qualifier, byte[] actionCodes) { + super(table, family, qualifier, actionCodes); + this.user = user; + } + + public byte[] getUser() { + return user; + } + + /** + * Returns true if this permission describes a global user permission. + */ + public boolean isGlobal() { + byte[] tableName = getTable(); + return(tableName == null || tableName.length == 0); + } + + @Override + public boolean equals(Object obj) { + if (!(obj instanceof UserPermission)) { + return false; + } + UserPermission other = (UserPermission)obj; + + if ((Bytes.equals(user, other.getUser()) && + super.equals(obj))) { + return true; + } else { + return false; + } + } + + @Override + public int hashCode() { + final int prime = 37; + int result = super.hashCode(); + if (user != null) { + result = prime * result + Bytes.hashCode(user); + } + return result; + } + + public String toString() { + StringBuilder str = new StringBuilder("UserPermission: ") + .append("user=").append(Bytes.toString(user)) + .append(", ").append(super.toString()); + return str.toString(); + } + + @Override + public void readFields(DataInput in) throws IOException { + super.readFields(in); + user = Bytes.readByteArray(in); + } + + @Override + public void write(DataOutput out) throws IOException { + super.write(out); + Bytes.writeByteArray(out, user); + } +} diff --git hbase-client/src/main/java/org/apache/hadoop/hbase/security/token/AuthenticationTokenIdentifier.java hbase-client/src/main/java/org/apache/hadoop/hbase/security/token/AuthenticationTokenIdentifier.java new file mode 100644 index 0000000..aa8f6f2 --- /dev/null +++ hbase-client/src/main/java/org/apache/hadoop/hbase/security/token/AuthenticationTokenIdentifier.java @@ -0,0 +1,184 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.security.token; + +import java.io.DataInput; +import java.io.DataOutput; +import java.io.IOException; + +import com.google.protobuf.ByteString; +import org.apache.hadoop.hbase.protobuf.generated.AuthenticationProtos; +import org.apache.hadoop.io.Text; +import org.apache.hadoop.security.UserGroupInformation; +import org.apache.hadoop.security.token.TokenIdentifier; + +/** + * Represents the identity information stored in an HBase authentication token. + */ +public class AuthenticationTokenIdentifier extends TokenIdentifier { + public static final Text AUTH_TOKEN_TYPE = new Text("HBASE_AUTH_TOKEN"); + + protected String username; + protected int keyId; + protected long issueDate; + protected long expirationDate; + protected long sequenceNumber; + + public AuthenticationTokenIdentifier() { + } + + public AuthenticationTokenIdentifier(String username) { + this.username = username; + } + + public AuthenticationTokenIdentifier(String username, int keyId, + long issueDate, long expirationDate) { + this.username = username; + this.keyId = keyId; + this.issueDate = issueDate; + this.expirationDate = expirationDate; + } + + @Override + public Text getKind() { + return AUTH_TOKEN_TYPE; + } + + @Override + public UserGroupInformation getUser() { + if (username == null || "".equals(username)) { + return null; + } + return UserGroupInformation.createRemoteUser(username); + } + + public String getUsername() { + return username; + } + + void setUsername(String name) { + this.username = name; + } + + public int getKeyId() { + return keyId; + } + + void setKeyId(int id) { + this.keyId = id; + } + + public long getIssueDate() { + return issueDate; + } + + void setIssueDate(long timestamp) { + this.issueDate = timestamp; + } + + public long getExpirationDate() { + return expirationDate; + } + + void setExpirationDate(long timestamp) { + this.expirationDate = timestamp; + } + + public long getSequenceNumber() { + return sequenceNumber; + } + + void setSequenceNumber(long seq) { + this.sequenceNumber = seq; + } + + public byte[] toBytes() { + AuthenticationProtos.TokenIdentifier.Builder builder = + AuthenticationProtos.TokenIdentifier.newBuilder(); + builder.setKind(AuthenticationProtos.TokenIdentifier.Kind.HBASE_AUTH_TOKEN); + if (username != null) { + builder.setUsername(ByteString.copyFromUtf8(username)); + } + builder.setIssueDate(issueDate) + .setExpirationDate(expirationDate) + .setKeyId(keyId) + .setSequenceNumber(sequenceNumber); + return builder.build().toByteArray(); + } + + @Override + public void write(DataOutput out) throws IOException { + byte[] pbBytes = toBytes(); + out.writeInt(pbBytes.length); + out.write(pbBytes); + } + + @Override + public void readFields(DataInput in) throws IOException { + int len = in.readInt(); + byte[] inBytes = new byte[len]; + in.readFully(inBytes); + AuthenticationProtos.TokenIdentifier identifier = + AuthenticationProtos.TokenIdentifier.newBuilder().mergeFrom(inBytes).build(); + // sanity check on type + if (!identifier.hasKind() || + identifier.getKind() != AuthenticationProtos.TokenIdentifier.Kind.HBASE_AUTH_TOKEN) { + throw new IOException("Invalid TokenIdentifier kind from input "+identifier.getKind()); + } + + // copy the field values + if (identifier.hasUsername()) { + username = identifier.getUsername().toStringUtf8(); + } + if (identifier.hasKeyId()) { + keyId = identifier.getKeyId(); + } + if (identifier.hasIssueDate()) { + issueDate = identifier.getIssueDate(); + } + if (identifier.hasExpirationDate()) { + expirationDate = identifier.getExpirationDate(); + } + if (identifier.hasSequenceNumber()) { + sequenceNumber = identifier.getSequenceNumber(); + } + } + + @Override + public boolean equals(Object other) { + if (other == null) { + return false; + } + if (other instanceof AuthenticationTokenIdentifier) { + AuthenticationTokenIdentifier ident = (AuthenticationTokenIdentifier)other; + return sequenceNumber == ident.getSequenceNumber() + && keyId == ident.getKeyId() + && issueDate == ident.getIssueDate() + && expirationDate == ident.getExpirationDate() + && (username == null ? ident.getUsername() == null : + username.equals(ident.getUsername())); + } + return false; + } + + @Override + public int hashCode() { + return (int)sequenceNumber; + } +} diff --git hbase-client/src/main/java/org/apache/hadoop/hbase/security/token/AuthenticationTokenSelector.java hbase-client/src/main/java/org/apache/hadoop/hbase/security/token/AuthenticationTokenSelector.java new file mode 100644 index 0000000..6b71f3a --- /dev/null +++ hbase-client/src/main/java/org/apache/hadoop/hbase/security/token/AuthenticationTokenSelector.java @@ -0,0 +1,54 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.security.token; + +import java.util.Collection; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.io.Text; +import org.apache.hadoop.security.token.Token; +import org.apache.hadoop.security.token.TokenIdentifier; +import org.apache.hadoop.security.token.TokenSelector; + +public class AuthenticationTokenSelector + implements TokenSelector { + private static Log LOG = LogFactory.getLog(AuthenticationTokenSelector.class); + + public AuthenticationTokenSelector() { + } + + @Override + public Token selectToken(Text serviceName, + Collection> tokens) { + if (serviceName != null) { + for (Token ident : tokens) { + if (serviceName.equals(ident.getService()) && + AuthenticationTokenIdentifier.AUTH_TOKEN_TYPE.equals(ident.getKind())) { + if (LOG.isDebugEnabled()) { + LOG.debug("Returning token "+ident); + } + return (Token)ident; + } + } + } + LOG.debug("No matching token found"); + return null; + } +} diff --git hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/EmptyWatcher.java hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/EmptyWatcher.java new file mode 100644 index 0000000..3aba999 --- /dev/null +++ hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/EmptyWatcher.java @@ -0,0 +1,34 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.zookeeper; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.zookeeper.Watcher; +import org.apache.zookeeper.WatchedEvent; + +/** + * An empty ZooKeeper watcher + */ +@InterfaceAudience.Private +public class EmptyWatcher implements Watcher { + // Used in this package but also by tests so needs to be public + public static EmptyWatcher instance = new EmptyWatcher(); + private EmptyWatcher() {} + + public void process(WatchedEvent event) {} +} diff --git hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/HQuorumPeer.java hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/HQuorumPeer.java new file mode 100644 index 0000000..5b1ddbb --- /dev/null +++ hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/HQuorumPeer.java @@ -0,0 +1,160 @@ +/** + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.zookeeper; + +import java.io.File; +import java.io.IOException; +import java.io.PrintWriter; +import java.net.InetAddress; +import java.net.NetworkInterface; +import java.net.UnknownHostException; +import java.util.ArrayList; +import java.util.Enumeration; +import java.util.List; +import java.util.Properties; +import java.util.Map.Entry; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.HBaseConfiguration; +import org.apache.hadoop.hbase.util.Strings; +import org.apache.hadoop.net.DNS; +import org.apache.hadoop.util.StringUtils; +import org.apache.zookeeper.server.ServerConfig; +import org.apache.zookeeper.server.ZooKeeperServerMain; +import org.apache.zookeeper.server.quorum.QuorumPeerConfig; +import org.apache.zookeeper.server.quorum.QuorumPeerMain; + +/** + * HBase's version of ZooKeeper's QuorumPeer. When HBase is set to manage + * ZooKeeper, this class is used to start up QuorumPeer instances. By doing + * things in here rather than directly calling to ZooKeeper, we have more + * control over the process. This class uses {@link ZKConfig} to parse the + * zoo.cfg and inject variables from HBase's site.xml configuration in. + */ +@InterfaceAudience.Public +@InterfaceStability.Evolving +public class HQuorumPeer { + + /** + * Parse ZooKeeper configuration from HBase XML config and run a QuorumPeer. + * @param args String[] of command line arguments. Not used. + */ + public static void main(String[] args) { + Configuration conf = HBaseConfiguration.create(); + try { + Properties zkProperties = ZKConfig.makeZKProps(conf); + writeMyID(zkProperties); + QuorumPeerConfig zkConfig = new QuorumPeerConfig(); + zkConfig.parseProperties(zkProperties); + + // login the zookeeper server principal (if using security) + ZKUtil.loginServer(conf, "hbase.zookeeper.server.keytab.file", + "hbase.zookeeper.server.kerberos.principal", + zkConfig.getClientPortAddress().getHostName()); + + runZKServer(zkConfig); + } catch (Exception e) { + e.printStackTrace(); + System.exit(-1); + } + } + + private static void runZKServer(QuorumPeerConfig zkConfig) throws UnknownHostException, IOException { + if (zkConfig.isDistributed()) { + QuorumPeerMain qp = new QuorumPeerMain(); + qp.runFromConfig(zkConfig); + } else { + ZooKeeperServerMain zk = new ZooKeeperServerMain(); + ServerConfig serverConfig = new ServerConfig(); + serverConfig.readFrom(zkConfig); + zk.runFromConfig(serverConfig); + } + } + + private static boolean addressIsLocalHost(String address) { + return address.equals("localhost") || address.equals("127.0.0.1"); + } + + static void writeMyID(Properties properties) throws IOException { + long myId = -1; + + Configuration conf = HBaseConfiguration.create(); + String myAddress = Strings.domainNamePointerToHostName(DNS.getDefaultHost( + conf.get("hbase.zookeeper.dns.interface","default"), + conf.get("hbase.zookeeper.dns.nameserver","default"))); + + List ips = new ArrayList(); + + // Add what could be the best (configured) match + ips.add(myAddress.contains(".") ? + myAddress : + StringUtils.simpleHostname(myAddress)); + + // For all nics get all hostnames and IPs + Enumeration nics = NetworkInterface.getNetworkInterfaces(); + while(nics.hasMoreElements()) { + Enumeration rawAdrs = + ((NetworkInterface)nics.nextElement()).getInetAddresses(); + while(rawAdrs.hasMoreElements()) { + InetAddress inet = (InetAddress) rawAdrs.nextElement(); + ips.add(StringUtils.simpleHostname(inet.getHostName())); + ips.add(inet.getHostAddress()); + } + } + + for (Entry entry : properties.entrySet()) { + String key = entry.getKey().toString().trim(); + String value = entry.getValue().toString().trim(); + if (key.startsWith("server.")) { + int dot = key.indexOf('.'); + long id = Long.parseLong(key.substring(dot + 1)); + String[] parts = value.split(":"); + String address = parts[0]; + if (addressIsLocalHost(address) || ips.contains(address)) { + myId = id; + break; + } + } + } + + // Set the max session timeout from the provided client-side timeout + properties.setProperty("maxSessionTimeout", + conf.get("zookeeper.session.timeout", "180000")); + + if (myId == -1) { + throw new IOException("Could not find my address: " + myAddress + + " in list of ZooKeeper quorum servers"); + } + + String dataDirStr = properties.get("dataDir").toString().trim(); + File dataDir = new File(dataDirStr); + if (!dataDir.isDirectory()) { + if (!dataDir.mkdirs()) { + throw new IOException("Unable to create data dir " + dataDir); + } + } + + File myIdFile = new File(dataDir, "myid"); + PrintWriter w = new PrintWriter(myIdFile); + w.println(myId); + w.close(); + } +} diff --git hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/MasterAddressTracker.java hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/MasterAddressTracker.java new file mode 100644 index 0000000..1fc5629 --- /dev/null +++ hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/MasterAddressTracker.java @@ -0,0 +1,181 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.zookeeper; + +import java.io.IOException; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.hbase.Abortable; +import org.apache.hadoop.hbase.DeserializationException; +import org.apache.hadoop.hbase.ServerName; +import org.apache.hadoop.hbase.protobuf.ProtobufUtil; +import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos; +import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos; +import org.apache.zookeeper.KeeperException; +import org.apache.zookeeper.data.Stat; + +/** + * Manages the location of the current active Master for the RegionServer. + *

      + * Listens for ZooKeeper events related to the master address. The node + * /master will contain the address of the current master. + * This listener is interested in + * NodeDeleted and NodeCreated events on + * /master. + *

      + * Utilizes {@link ZooKeeperNodeTracker} for zk interactions. + *

      + * You can get the current master via {@link #getMasterAddress()} or via + * {@link #getMasterAddress(ZooKeeperWatcher)} if you do not have a running + * instance of this Tracker in your context. + *

      + * This class also includes utility for interacting with the master znode, for + * writing and reading the znode content. + */ +@InterfaceAudience.Private +public class MasterAddressTracker extends ZooKeeperNodeTracker { + /** + * Construct a master address listener with the specified + * zookeeper reference. + *

      + * This constructor does not trigger any actions, you must call methods + * explicitly. Normally you will just want to execute {@link #start()} to + * begin tracking of the master address. + * + * @param watcher zk reference and watcher + * @param abortable abortable in case of fatal error + */ + public MasterAddressTracker(ZooKeeperWatcher watcher, Abortable abortable) { + super(watcher, watcher.getMasterAddressZNode(), abortable); + } + + /** + * Get the address of the current master if one is available. Returns null + * if no current master. + * @return Server name or null if timed out. + */ + public ServerName getMasterAddress() { + return getMasterAddress(false); + } + + /** + * Get the address of the current master if one is available. Returns null + * if no current master. If refresh is set, try to load the data from ZK again, + * otherwise, cached data will be used. + * + * @param refresh whether to refresh the data by calling ZK directly. + * @return Server name or null if timed out. + */ + public ServerName getMasterAddress(final boolean refresh) { + try { + return ServerName.parseFrom(super.getData(refresh)); + } catch (DeserializationException e) { + LOG.warn("Failed parse", e); + return null; + } + } + + /** + * Get master address. + * Use this instead of {@link #getMasterAddress()} if you do not have an + * instance of this tracker in your context. + * @param zkw ZooKeeperWatcher to use + * @return ServerName stored in the the master address znode or null if no + * znode present. + * @throws KeeperException + * @throws IOException + */ + public static ServerName getMasterAddress(final ZooKeeperWatcher zkw) + throws KeeperException, IOException { + byte [] data = ZKUtil.getData(zkw, zkw.getMasterAddressZNode()); + if (data == null){ + throw new IOException("Can't get master address from ZooKeeper; znode data == null"); + } + try { + return ServerName.parseFrom(data); + } catch (DeserializationException e) { + KeeperException ke = new KeeperException.DataInconsistencyException(); + ke.initCause(e); + throw ke; + } + } + + /** + * Set master address into the master znode or into the backup + * subdirectory of backup masters; switch off the passed in znode + * path. + * @param zkw The ZooKeeperWatcher to use. + * @param znode Where to create the znode; could be at the top level or it + * could be under backup masters + * @param master ServerName of the current master + * @return true if node created, false if not; a watch is set in both cases + * @throws KeeperException + */ + public static boolean setMasterAddress(final ZooKeeperWatcher zkw, + final String znode, final ServerName master) + throws KeeperException { + return ZKUtil.createEphemeralNodeAndWatch(zkw, znode, toByteArray(master)); + } + + /** + * Check if there is a master available. + * @return true if there is a master set, false if not. + */ + public boolean hasMaster() { + return super.getData(false) != null; + } + + /** + * @param sn + * @return Content of the master znode as a serialized pb with the pb + * magic as prefix. + */ + static byte [] toByteArray(final ServerName sn) { + ZooKeeperProtos.Master.Builder mbuilder = ZooKeeperProtos.Master.newBuilder(); + HBaseProtos.ServerName.Builder snbuilder = HBaseProtos.ServerName.newBuilder(); + snbuilder.setHostName(sn.getHostname()); + snbuilder.setPort(sn.getPort()); + snbuilder.setStartCode(sn.getStartcode()); + mbuilder.setMaster(snbuilder.build()); + return ProtobufUtil.prependPBMagic(mbuilder.build().toByteArray()); + } + + /** + * delete the master znode if its content is same as the parameter + */ + public static boolean deleteIfEquals(ZooKeeperWatcher zkw, final String content) { + if (content == null){ + throw new IllegalArgumentException("Content must not be null"); + } + + try { + Stat stat = new Stat(); + byte[] data = ZKUtil.getDataNoWatch(zkw, zkw.getMasterAddressZNode(), stat); + ServerName sn = ServerName.parseFrom(data); + if (sn != null && content.equals(sn.toString())) { + return (ZKUtil.deleteNode(zkw, zkw.getMasterAddressZNode(), stat.getVersion())); + } + } catch (KeeperException e) { + LOG.warn("Can't get or delete the master znode", e); + } catch (DeserializationException e) { + LOG.warn("Can't get or delete the master znode", e); + } + + return false; + } +} diff --git hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/MetaNodeTracker.java hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/MetaNodeTracker.java new file mode 100644 index 0000000..02132c7 --- /dev/null +++ hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/MetaNodeTracker.java @@ -0,0 +1,48 @@ +/** + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.zookeeper; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.hbase.Abortable; +import org.apache.hadoop.hbase.HRegionInfo; + +/** + * Tracks the unassigned zookeeper node used by the META table. + *

      + * If META is already assigned when instantiating this class, you will not + * receive any notification for that assignment. You will receive a + * notification after META has been successfully assigned to a new location. + */ +@InterfaceAudience.Private +public class MetaNodeTracker extends ZooKeeperNodeTracker { + /** + * Creates a meta node tracker. + * @param watcher + * @param abortable + */ + public MetaNodeTracker(final ZooKeeperWatcher watcher, final Abortable abortable) { + super(watcher, ZKUtil.joinZNode(watcher.assignmentZNode, + HRegionInfo.FIRST_META_REGIONINFO.getEncodedName()), abortable); + } + + @Override + public void nodeDeleted(String path) { + super.nodeDeleted(path); + } +} diff --git hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/RecoverableZooKeeper.java hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/RecoverableZooKeeper.java new file mode 100644 index 0000000..4b355f7 --- /dev/null +++ hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/RecoverableZooKeeper.java @@ -0,0 +1,598 @@ +/** + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.zookeeper; + +import java.io.IOException; +import java.lang.management.ManagementFactory; +import java.security.SecureRandom; +import java.util.ArrayList; +import java.util.List; +import java.util.Random; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; +import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.hbase.util.RetryCounter; +import org.apache.hadoop.hbase.util.RetryCounterFactory; +import org.apache.zookeeper.AsyncCallback; +import org.apache.zookeeper.CreateMode; +import org.apache.zookeeper.KeeperException; +import org.apache.zookeeper.Watcher; +import org.apache.zookeeper.ZooKeeper; +import org.apache.zookeeper.ZooKeeper.States; +import org.apache.zookeeper.data.ACL; +import org.apache.zookeeper.data.Stat; + +/** + * A zookeeper that can handle 'recoverable' errors. + * To handle recoverable errors, developers need to realize that there are two + * classes of requests: idempotent and non-idempotent requests. Read requests + * and unconditional sets and deletes are examples of idempotent requests, they + * can be reissued with the same results. + * (Although, the delete may throw a NoNodeException on reissue its effect on + * the ZooKeeper state is the same.) Non-idempotent requests need special + * handling, application and library writers need to keep in mind that they may + * need to encode information in the data or name of znodes to detect + * retries. A simple example is a create that uses a sequence flag. + * If a process issues a create("/x-", ..., SEQUENCE) and gets a connection + * loss exception, that process will reissue another + * create("/x-", ..., SEQUENCE) and get back x-111. When the process does a + * getChildren("/"), it sees x-1,x-30,x-109,x-110,x-111, now it could be + * that x-109 was the result of the previous create, so the process actually + * owns both x-109 and x-111. An easy way around this is to use "x-process id-" + * when doing the create. If the process is using an id of 352, before reissuing + * the create it will do a getChildren("/") and see "x-222-1", "x-542-30", + * "x-352-109", x-333-110". The process will know that the original create + * succeeded an the znode it created is "x-352-109". + * @see "http://wiki.apache.org/hadoop/ZooKeeper/ErrorHandling" + */ +@InterfaceAudience.Public +@InterfaceStability.Evolving +public class RecoverableZooKeeper { + private static final Log LOG = LogFactory.getLog(RecoverableZooKeeper.class); + // the actual ZooKeeper client instance + private ZooKeeper zk; + private final RetryCounterFactory retryCounterFactory; + // An identifier of this process in the cluster + private final String identifier; + private final byte[] id; + private Watcher watcher; + private int sessionTimeout; + private String quorumServers; + private final Random salter; + + // The metadata attached to each piece of data has the + // format: + // 1-byte constant + // 4-byte big-endian integer (length of next field) + // identifier corresponding uniquely to this process + // It is prepended to the data supplied by the user. + + // the magic number is to be backward compatible + private static final byte MAGIC =(byte) 0XFF; + private static final int MAGIC_SIZE = Bytes.SIZEOF_BYTE; + private static final int ID_LENGTH_OFFSET = MAGIC_SIZE; + private static final int ID_LENGTH_SIZE = Bytes.SIZEOF_INT; + + public RecoverableZooKeeper(String quorumServers, int sessionTimeout, + Watcher watcher, int maxRetries, int retryIntervalMillis) + throws IOException { + this.zk = new ZooKeeper(quorumServers, sessionTimeout, watcher); + this.retryCounterFactory = + new RetryCounterFactory(maxRetries, retryIntervalMillis); + + // the identifier = processID@hostName + this.identifier = ManagementFactory.getRuntimeMXBean().getName(); + LOG.info("The identifier of this process is " + identifier); + this.id = Bytes.toBytes(identifier); + + this.watcher = watcher; + this.sessionTimeout = sessionTimeout; + this.quorumServers = quorumServers; + salter = new SecureRandom(); + } + + public void reconnectAfterExpiration() + throws IOException, InterruptedException { + LOG.info("Closing dead ZooKeeper connection, session" + + " was: 0x"+Long.toHexString(zk.getSessionId())); + zk.close(); + this.zk = new ZooKeeper(this.quorumServers, + this.sessionTimeout, this.watcher); + LOG.info("Recreated a ZooKeeper, session" + + " is: 0x"+Long.toHexString(zk.getSessionId())); + } + + /** + * delete is an idempotent operation. Retry before throwing exception. + * This function will not throw NoNodeException if the path does not + * exist. + */ + public void delete(String path, int version) + throws InterruptedException, KeeperException { + RetryCounter retryCounter = retryCounterFactory.create(); + boolean isRetry = false; // False for first attempt, true for all retries. + while (true) { + try { + zk.delete(path, version); + return; + } catch (KeeperException e) { + switch (e.code()) { + case NONODE: + if (isRetry) { + LOG.info("Node " + path + " already deleted. Assuming a " + + "previous attempt succeeded."); + return; + } + LOG.warn("Node " + path + " already deleted, retry=" + isRetry); + throw e; + + case CONNECTIONLOSS: + case SESSIONEXPIRED: + case OPERATIONTIMEOUT: + retryOrThrow(retryCounter, e, "delete"); + break; + + default: + throw e; + } + } + retryCounter.sleepUntilNextRetry(); + retryCounter.useRetry(); + isRetry = true; + } + } + + /** + * exists is an idempotent operation. Retry before throwing exception + * @return A Stat instance + */ + public Stat exists(String path, Watcher watcher) + throws KeeperException, InterruptedException { + RetryCounter retryCounter = retryCounterFactory.create(); + while (true) { + try { + return zk.exists(path, watcher); + } catch (KeeperException e) { + switch (e.code()) { + case CONNECTIONLOSS: + case SESSIONEXPIRED: + case OPERATIONTIMEOUT: + retryOrThrow(retryCounter, e, "exists"); + break; + + default: + throw e; + } + } + retryCounter.sleepUntilNextRetry(); + retryCounter.useRetry(); + } + } + + /** + * exists is an idempotent operation. Retry before throwing exception + * @return A Stat instance + */ + public Stat exists(String path, boolean watch) + throws KeeperException, InterruptedException { + RetryCounter retryCounter = retryCounterFactory.create(); + while (true) { + try { + return zk.exists(path, watch); + } catch (KeeperException e) { + switch (e.code()) { + case CONNECTIONLOSS: + case SESSIONEXPIRED: + case OPERATIONTIMEOUT: + retryOrThrow(retryCounter, e, "exists"); + break; + + default: + throw e; + } + } + retryCounter.sleepUntilNextRetry(); + retryCounter.useRetry(); + } + } + + private void retryOrThrow(RetryCounter retryCounter, KeeperException e, + String opName) throws KeeperException { + LOG.warn("Possibly transient ZooKeeper exception: " + e); + if (!retryCounter.shouldRetry()) { + LOG.error("ZooKeeper " + opName + " failed after " + + retryCounter.getMaxRetries() + " retries"); + throw e; + } + } + + /** + * getChildren is an idempotent operation. Retry before throwing exception + * @return List of children znodes + */ + public List getChildren(String path, Watcher watcher) + throws KeeperException, InterruptedException { + RetryCounter retryCounter = retryCounterFactory.create(); + while (true) { + try { + return zk.getChildren(path, watcher); + } catch (KeeperException e) { + switch (e.code()) { + case CONNECTIONLOSS: + case SESSIONEXPIRED: + case OPERATIONTIMEOUT: + retryOrThrow(retryCounter, e, "getChildren"); + break; + + default: + throw e; + } + } + retryCounter.sleepUntilNextRetry(); + retryCounter.useRetry(); + } + } + + /** + * getChildren is an idempotent operation. Retry before throwing exception + * @return List of children znodes + */ + public List getChildren(String path, boolean watch) + throws KeeperException, InterruptedException { + RetryCounter retryCounter = retryCounterFactory.create(); + while (true) { + try { + return zk.getChildren(path, watch); + } catch (KeeperException e) { + switch (e.code()) { + case CONNECTIONLOSS: + case SESSIONEXPIRED: + case OPERATIONTIMEOUT: + retryOrThrow(retryCounter, e, "getChildren"); + break; + + default: + throw e; + } + } + retryCounter.sleepUntilNextRetry(); + retryCounter.useRetry(); + } + } + + /** + * getData is an idempotent operation. Retry before throwing exception + * @return Data + */ + public byte[] getData(String path, Watcher watcher, Stat stat) + throws KeeperException, InterruptedException { + RetryCounter retryCounter = retryCounterFactory.create(); + while (true) { + try { + byte[] revData = zk.getData(path, watcher, stat); + return this.removeMetaData(revData); + } catch (KeeperException e) { + switch (e.code()) { + case CONNECTIONLOSS: + case SESSIONEXPIRED: + case OPERATIONTIMEOUT: + retryOrThrow(retryCounter, e, "getData"); + break; + + default: + throw e; + } + } + retryCounter.sleepUntilNextRetry(); + retryCounter.useRetry(); + } + } + + /** + * getData is an idemnpotent operation. Retry before throwing exception + * @return Data + */ + public byte[] getData(String path, boolean watch, Stat stat) + throws KeeperException, InterruptedException { + RetryCounter retryCounter = retryCounterFactory.create(); + while (true) { + try { + byte[] revData = zk.getData(path, watch, stat); + return this.removeMetaData(revData); + } catch (KeeperException e) { + switch (e.code()) { + case CONNECTIONLOSS: + case SESSIONEXPIRED: + case OPERATIONTIMEOUT: + retryOrThrow(retryCounter, e, "getData"); + break; + + default: + throw e; + } + } + retryCounter.sleepUntilNextRetry(); + retryCounter.useRetry(); + } + } + + /** + * setData is NOT an idempotent operation. Retry may cause BadVersion Exception + * Adding an identifier field into the data to check whether + * badversion is caused by the result of previous correctly setData + * @return Stat instance + */ + public Stat setData(String path, byte[] data, int version) + throws KeeperException, InterruptedException { + RetryCounter retryCounter = retryCounterFactory.create(); + byte[] newData = appendMetaData(data); + boolean isRetry = false; + while (true) { + try { + return zk.setData(path, newData, version); + } catch (KeeperException e) { + switch (e.code()) { + case CONNECTIONLOSS: + case SESSIONEXPIRED: + case OPERATIONTIMEOUT: + retryOrThrow(retryCounter, e, "setData"); + break; + case BADVERSION: + if (isRetry) { + // try to verify whether the previous setData success or not + try{ + Stat stat = new Stat(); + byte[] revData = zk.getData(path, false, stat); + if(Bytes.compareTo(revData, newData) == 0) { + // the bad version is caused by previous successful setData + return stat; + } + } catch(KeeperException keeperException){ + // the ZK is not reliable at this moment. just throwing exception + throw keeperException; + } + } + // throw other exceptions and verified bad version exceptions + default: + throw e; + } + } + retryCounter.sleepUntilNextRetry(); + retryCounter.useRetry(); + isRetry = true; + } + } + + /** + *

      + * NONSEQUENTIAL create is idempotent operation. + * Retry before throwing exceptions. + * But this function will not throw the NodeExist exception back to the + * application. + *

      + *

      + * But SEQUENTIAL is NOT idempotent operation. It is necessary to add + * identifier to the path to verify, whether the previous one is successful + * or not. + *

      + * + * @return Path + */ + public String create(String path, byte[] data, List acl, + CreateMode createMode) + throws KeeperException, InterruptedException { + byte[] newData = appendMetaData(data); + switch (createMode) { + case EPHEMERAL: + case PERSISTENT: + return createNonSequential(path, newData, acl, createMode); + + case EPHEMERAL_SEQUENTIAL: + case PERSISTENT_SEQUENTIAL: + return createSequential(path, newData, acl, createMode); + + default: + throw new IllegalArgumentException("Unrecognized CreateMode: " + + createMode); + } + } + + private String createNonSequential(String path, byte[] data, List acl, + CreateMode createMode) throws KeeperException, InterruptedException { + RetryCounter retryCounter = retryCounterFactory.create(); + boolean isRetry = false; // False for first attempt, true for all retries. + while (true) { + try { + return zk.create(path, data, acl, createMode); + } catch (KeeperException e) { + switch (e.code()) { + case NODEEXISTS: + if (isRetry) { + // If the connection was lost, there is still a possibility that + // we have successfully created the node at our previous attempt, + // so we read the node and compare. + byte[] currentData = zk.getData(path, false, null); + if (currentData != null && + Bytes.compareTo(currentData, data) == 0) { + // We successfully created a non-sequential node + return path; + } + LOG.error("Node " + path + " already exists with " + + Bytes.toStringBinary(currentData) + ", could not write " + + Bytes.toStringBinary(data)); + throw e; + } + LOG.info("Node " + path + " already exists and this is not a " + + "retry"); + throw e; + + case CONNECTIONLOSS: + case SESSIONEXPIRED: + case OPERATIONTIMEOUT: + retryOrThrow(retryCounter, e, "create"); + break; + + default: + throw e; + } + } + retryCounter.sleepUntilNextRetry(); + retryCounter.useRetry(); + isRetry = true; + } + } + + private String createSequential(String path, byte[] data, + List acl, CreateMode createMode) + throws KeeperException, InterruptedException { + RetryCounter retryCounter = retryCounterFactory.create(); + boolean first = true; + String newPath = path+this.identifier; + while (true) { + try { + if (!first) { + // Check if we succeeded on a previous attempt + String previousResult = findPreviousSequentialNode(newPath); + if (previousResult != null) { + return previousResult; + } + } + first = false; + return zk.create(newPath, data, acl, createMode); + } catch (KeeperException e) { + switch (e.code()) { + case CONNECTIONLOSS: + case SESSIONEXPIRED: + case OPERATIONTIMEOUT: + retryOrThrow(retryCounter, e, "create"); + break; + + default: + throw e; + } + } + retryCounter.sleepUntilNextRetry(); + retryCounter.useRetry(); + } + } + + private String findPreviousSequentialNode(String path) + throws KeeperException, InterruptedException { + int lastSlashIdx = path.lastIndexOf('/'); + assert(lastSlashIdx != -1); + String parent = path.substring(0, lastSlashIdx); + String nodePrefix = path.substring(lastSlashIdx+1); + + List nodes = zk.getChildren(parent, false); + List matching = filterByPrefix(nodes, nodePrefix); + for (String node : matching) { + String nodePath = parent + "/" + node; + Stat stat = zk.exists(nodePath, false); + if (stat != null) { + return nodePath; + } + } + return null; + } + + public byte[] removeMetaData(byte[] data) { + if(data == null || data.length == 0) { + return data; + } + // check the magic data; to be backward compatible + byte magic = data[0]; + if(magic != MAGIC) { + return data; + } + + int idLength = Bytes.toInt(data, ID_LENGTH_OFFSET); + int dataLength = data.length-MAGIC_SIZE-ID_LENGTH_SIZE-idLength; + int dataOffset = MAGIC_SIZE+ID_LENGTH_SIZE+idLength; + + byte[] newData = new byte[dataLength]; + System.arraycopy(data, dataOffset, newData, 0, dataLength); + return newData; + } + + private byte[] appendMetaData(byte[] data) { + if(data == null || data.length == 0){ + return data; + } + byte[] salt = Bytes.toBytes(salter.nextLong()); + int idLength = id.length + salt.length; + byte[] newData = new byte[MAGIC_SIZE+ID_LENGTH_SIZE+idLength+data.length]; + int pos = 0; + pos = Bytes.putByte(newData, pos, MAGIC); + pos = Bytes.putInt(newData, pos, idLength); + pos = Bytes.putBytes(newData, pos, id, 0, id.length); + pos = Bytes.putBytes(newData, pos, salt, 0, salt.length); + pos = Bytes.putBytes(newData, pos, data, 0, data.length); + return newData; + } + + public long getSessionId() { + return zk.getSessionId(); + } + + public void close() throws InterruptedException { + zk.close(); + } + + public States getState() { + return zk.getState(); + } + + public ZooKeeper getZooKeeper() { + return zk; + } + + public byte[] getSessionPasswd() { + return zk.getSessionPasswd(); + } + + public void sync(String path, AsyncCallback.VoidCallback cb, Object ctx) { + this.zk.sync(path, null, null); + } + + /** + * Filters the given node list by the given prefixes. + * This method is all-inclusive--if any element in the node list starts + * with any of the given prefixes, then it is included in the result. + * + * @param nodes the nodes to filter + * @param prefixes the prefixes to include in the result + * @return list of every element that starts with one of the prefixes + */ + private static List filterByPrefix(List nodes, + String... prefixes) { + List lockChildren = new ArrayList(); + for (String child : nodes){ + for (String prefix : prefixes){ + if (child.startsWith(prefix)){ + lockChildren.add(child); + break; + } + } + } + return lockChildren; + } +} diff --git hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/RootRegionTracker.java hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/RootRegionTracker.java new file mode 100644 index 0000000..c7145a2 --- /dev/null +++ hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/RootRegionTracker.java @@ -0,0 +1,184 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.zookeeper; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.hbase.Abortable; +import org.apache.hadoop.hbase.DeserializationException; +import org.apache.hadoop.hbase.ServerName; +import org.apache.hadoop.hbase.protobuf.ProtobufUtil; +import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos; +import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos; +import org.apache.zookeeper.KeeperException; + +/** + * Tracks the root region server location node in zookeeper. + * Root region location is set by RegionServerServices. + * This class has a watcher on the root location and notices changes. + */ +@InterfaceAudience.Private +public class RootRegionTracker extends ZooKeeperNodeTracker { + /** + * Creates a root region location tracker. + * + *

      After construction, use {@link #start} to kick off tracking. + * + * @param watcher + * @param abortable + */ + public RootRegionTracker(ZooKeeperWatcher watcher, Abortable abortable) { + super(watcher, watcher.rootServerZNode, abortable); + } + + /** + * Checks if the root region location is available. + * @return true if root region location is available, false if not + */ + public boolean isLocationAvailable() { + return super.getData(true) != null; + } + + /** + * Gets the root region location, if available. Does not block. Sets a watcher. + * @return server name or null if we failed to get the data. + * @throws InterruptedException + */ + public ServerName getRootRegionLocation() throws InterruptedException { + try { + return ServerName.parseFrom(super.getData(true)); + } catch (DeserializationException e) { + LOG.warn("Failed parse", e); + return null; + } + } + + /** + * Gets the root region location, if available. Does not block. Does not set + * a watcher (In this regard it differs from {@link #getRootRegionLocation()}. + * @param zkw + * @return server name or null if we failed to get the data. + * @throws KeeperException + */ + public static ServerName getRootRegionLocation(final ZooKeeperWatcher zkw) + throws KeeperException { + try { + return ServerName.parseFrom(ZKUtil.getData(zkw, zkw.rootServerZNode)); + } catch (DeserializationException e) { + throw ZKUtil.convert(e); + } + } + + /** + * Gets the root region location, if available, and waits for up to the + * specified timeout if not immediately available. + * Given the zookeeper notification could be delayed, we will try to + * get the latest data. + * @param timeout maximum time to wait, in millis + * @return server name for server hosting root region formatted as per + * {@link ServerName}, or null if none available + * @throws InterruptedException if interrupted while waiting + */ + public ServerName waitRootRegionLocation(long timeout) + throws InterruptedException { + if (false == checkIfBaseNodeAvailable()) { + String errorMsg = "Check the value configured in 'zookeeper.znode.parent'. " + + "There could be a mismatch with the one configured in the master."; + LOG.error(errorMsg); + throw new IllegalArgumentException(errorMsg); + } + try { + return ServerName.parseFrom(super.blockUntilAvailable(timeout, true)); + } catch (DeserializationException e) { + LOG.warn("Failed parse", e); + return null; + } + } + + /** + * Sets the location of -ROOT- in ZooKeeper to the + * specified server address. + * @param zookeeper zookeeper reference + * @param location The server hosting -ROOT- + * @throws KeeperException unexpected zookeeper exception + */ + public static void setRootLocation(ZooKeeperWatcher zookeeper, + final ServerName location) + throws KeeperException { + LOG.info("Setting ROOT region location in ZooKeeper as " + location); + // Make the RootRegionServer pb and then get its bytes and save this as + // the znode content. + byte [] data = toByteArray(location); + try { + ZKUtil.createAndWatch(zookeeper, zookeeper.rootServerZNode, data); + } catch(KeeperException.NodeExistsException nee) { + LOG.debug("ROOT region location already existed, updated location"); + ZKUtil.setData(zookeeper, zookeeper.rootServerZNode, data); + } + } + + /** + * Build up the znode content. + * @param sn What to put into the znode. + * @return The content of the root-region-server znode + */ + static byte [] toByteArray(final ServerName sn) { + // ZNode content is a pb message preceeded by some pb magic. + HBaseProtos.ServerName pbsn = + HBaseProtos.ServerName.newBuilder().setHostName(sn.getHostname()). + setPort(sn.getPort()).setStartCode(sn.getStartcode()).build(); + ZooKeeperProtos.RootRegionServer pbrsr = + ZooKeeperProtos.RootRegionServer.newBuilder().setServer(pbsn).build(); + return ProtobufUtil.prependPBMagic(pbrsr.toByteArray()); + } + + /** + * Deletes the location of -ROOT- in ZooKeeper. + * @param zookeeper zookeeper reference + * @throws KeeperException unexpected zookeeper exception + */ + public static void deleteRootLocation(ZooKeeperWatcher zookeeper) + throws KeeperException { + LOG.info("Unsetting ROOT region location in ZooKeeper"); + try { + // Just delete the node. Don't need any watches. + ZKUtil.deleteNode(zookeeper, zookeeper.rootServerZNode); + } catch(KeeperException.NoNodeException nne) { + // Has already been deleted + } + } + + /** + * Wait until the root region is available. + * @param zkw + * @param timeout + * @return ServerName or null if we timed out. + * @throws InterruptedException + */ + public static ServerName blockUntilAvailable(final ZooKeeperWatcher zkw, + final long timeout) + throws InterruptedException { + byte [] data = ZKUtil.blockUntilAvailable(zkw, zkw.rootServerZNode, timeout); + if (data == null) return null; + try { + return ServerName.parseFrom(data); + } catch (DeserializationException e) { + LOG.warn("Failed parse", e); + return null; + } + } +} diff --git hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKClusterId.java hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKClusterId.java new file mode 100644 index 0000000..8d52341 --- /dev/null +++ hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKClusterId.java @@ -0,0 +1,80 @@ +/* + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.zookeeper; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.hbase.Abortable; +import org.apache.hadoop.hbase.ClusterId; +import org.apache.hadoop.hbase.DeserializationException; +import org.apache.zookeeper.KeeperException; + +/** + * Publishes and synchronizes a unique identifier specific to a given HBase + * cluster. The stored identifier is read from the file system by the active + * master on startup, and is subsequently available to all watchers (including + * clients). + */ +@InterfaceAudience.Private +public class ZKClusterId { + private ZooKeeperWatcher watcher; + private Abortable abortable; + private String id; + + public ZKClusterId(ZooKeeperWatcher watcher, Abortable abortable) { + this.watcher = watcher; + this.abortable = abortable; + } + + public boolean hasId() { + return getId() != null; + } + + public String getId() { + try { + if (id == null) { + id = readClusterIdZNode(watcher); + } + } catch (KeeperException ke) { + abortable.abort("Unexpected exception from ZooKeeper reading cluster ID", + ke); + } + return id; + } + + public static String readClusterIdZNode(ZooKeeperWatcher watcher) + throws KeeperException { + if (ZKUtil.checkExists(watcher, watcher.clusterIdZNode) != -1) { + byte [] data = ZKUtil.getData(watcher, watcher.clusterIdZNode); + if (data != null) { + try { + return ClusterId.parseFrom(data).toString(); + } catch (DeserializationException e) { + throw ZKUtil.convert(e); + } + } + } + return null; + } + + public static void setClusterId(ZooKeeperWatcher watcher, ClusterId id) + throws KeeperException { + ZKUtil.createSetData(watcher, watcher.clusterIdZNode, id.toByteArray()); + } +} diff --git hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKConfig.java hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKConfig.java new file mode 100644 index 0000000..3c74636 --- /dev/null +++ hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKConfig.java @@ -0,0 +1,273 @@ +/** + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.zookeeper; + +import java.io.IOException; +import java.io.InputStream; +import java.net.InetAddress; +import java.net.UnknownHostException; +import java.util.ArrayList; +import java.util.List; +import java.util.Properties; +import java.util.Map.Entry; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.util.StringUtils; + +/** + * Utility methods for reading, and building the ZooKeeper configuration. + */ +@InterfaceAudience.Public +@InterfaceStability.Evolving +public class ZKConfig { + private static final Log LOG = LogFactory.getLog(ZKConfig.class); + + private static final String VARIABLE_START = "${"; + private static final int VARIABLE_START_LENGTH = VARIABLE_START.length(); + private static final String VARIABLE_END = "}"; + private static final int VARIABLE_END_LENGTH = VARIABLE_END.length(); + + /** + * Make a Properties object holding ZooKeeper config. + * Parses the corresponding config options from the HBase XML configs + * and generates the appropriate ZooKeeper properties. + * @param conf Configuration to read from. + * @return Properties holding mappings representing ZooKeeper config file. + */ + public static Properties makeZKProps(Configuration conf) { + if (conf.getBoolean(HConstants.HBASE_CONFIG_READ_ZOOKEEPER_CONFIG, + false)) { + LOG.warn( + "Parsing ZooKeeper's " + HConstants.ZOOKEEPER_CONFIG_NAME + + " file for ZK properties " + + "has been deprecated. Please instead place all ZK related HBase " + + "configuration under the hbase-site.xml, using prefixes " + + "of the form '" + HConstants.ZK_CFG_PROPERTY_PREFIX + "', and " + + "set property '" + HConstants.HBASE_CONFIG_READ_ZOOKEEPER_CONFIG + + "' to false"); + // First check if there is a zoo.cfg in the CLASSPATH. If so, simply read + // it and grab its configuration properties. + ClassLoader cl = HQuorumPeer.class.getClassLoader(); + final InputStream inputStream = + cl.getResourceAsStream(HConstants.ZOOKEEPER_CONFIG_NAME); + if (inputStream != null) { + try { + return parseZooCfg(conf, inputStream); + } catch (IOException e) { + LOG.warn("Cannot read " + HConstants.ZOOKEEPER_CONFIG_NAME + + ", loading from XML files", e); + } + } + } else { + if (LOG.isDebugEnabled()) { + LOG.debug( + "Skipped reading ZK properties file '" + + HConstants.ZOOKEEPER_CONFIG_NAME + + "' since '" + HConstants.HBASE_CONFIG_READ_ZOOKEEPER_CONFIG + + "' was not set to true"); + } + } + + // Otherwise, use the configuration options from HBase's XML files. + Properties zkProperties = new Properties(); + + // Directly map all of the hbase.zookeeper.property.KEY properties. + for (Entry entry : conf) { + String key = entry.getKey(); + if (key.startsWith(HConstants.ZK_CFG_PROPERTY_PREFIX)) { + String zkKey = key.substring(HConstants.ZK_CFG_PROPERTY_PREFIX_LEN); + String value = entry.getValue(); + // If the value has variables substitutions, need to do a get. + if (value.contains(VARIABLE_START)) { + value = conf.get(key); + } + zkProperties.put(zkKey, value); + } + } + + // If clientPort is not set, assign the default. + if (zkProperties.getProperty(HConstants.CLIENT_PORT_STR) == null) { + zkProperties.put(HConstants.CLIENT_PORT_STR, + HConstants.DEFAULT_ZOOKEPER_CLIENT_PORT); + } + + // Create the server.X properties. + int peerPort = conf.getInt("hbase.zookeeper.peerport", 2888); + int leaderPort = conf.getInt("hbase.zookeeper.leaderport", 3888); + + final String[] serverHosts = conf.getStrings(HConstants.ZOOKEEPER_QUORUM, + HConstants.LOCALHOST); + for (int i = 0; i < serverHosts.length; ++i) { + String serverHost = serverHosts[i]; + String address = serverHost + ":" + peerPort + ":" + leaderPort; + String key = "server." + i; + zkProperties.put(key, address); + } + + return zkProperties; + } + + /** + * Parse ZooKeeper's zoo.cfg, injecting HBase Configuration variables in. + * This method is used for testing so we can pass our own InputStream. + * @param conf HBaseConfiguration to use for injecting variables. + * @param inputStream InputStream to read from. + * @return Properties parsed from config stream with variables substituted. + * @throws IOException if anything goes wrong parsing config + * @deprecated in 0.96 onwards. HBase will no longer rely on zoo.cfg + * availability. + */ + @Deprecated + public static Properties parseZooCfg(Configuration conf, + InputStream inputStream) throws IOException { + Properties properties = new Properties(); + try { + properties.load(inputStream); + } catch (IOException e) { + final String msg = "fail to read properties from " + + HConstants.ZOOKEEPER_CONFIG_NAME; + LOG.fatal(msg); + throw new IOException(msg, e); + } + for (Entry entry : properties.entrySet()) { + String value = entry.getValue().toString().trim(); + String key = entry.getKey().toString().trim(); + StringBuilder newValue = new StringBuilder(); + int varStart = value.indexOf(VARIABLE_START); + int varEnd = 0; + while (varStart != -1) { + varEnd = value.indexOf(VARIABLE_END, varStart); + if (varEnd == -1) { + String msg = "variable at " + varStart + " has no end marker"; + LOG.fatal(msg); + throw new IOException(msg); + } + String variable = value.substring(varStart + VARIABLE_START_LENGTH, varEnd); + + String substituteValue = System.getProperty(variable); + if (substituteValue == null) { + substituteValue = conf.get(variable); + } + if (substituteValue == null) { + String msg = "variable " + variable + " not set in system property " + + "or hbase configs"; + LOG.fatal(msg); + throw new IOException(msg); + } + + newValue.append(substituteValue); + + varEnd += VARIABLE_END_LENGTH; + varStart = value.indexOf(VARIABLE_START, varEnd); + } + // Special case for 'hbase.cluster.distributed' property being 'true' + if (key.startsWith("server.")) { + boolean mode = conf.getBoolean(HConstants.CLUSTER_DISTRIBUTED, HConstants.DEFAULT_CLUSTER_DISTRIBUTED); + if (mode == HConstants.CLUSTER_IS_DISTRIBUTED && value.startsWith(HConstants.LOCALHOST)) { + String msg = "The server in zoo.cfg cannot be set to localhost " + + "in a fully-distributed setup because it won't be reachable. " + + "See \"Getting Started\" for more information."; + LOG.fatal(msg); + throw new IOException(msg); + } + } + newValue.append(value.substring(varEnd)); + properties.setProperty(key, newValue.toString()); + } + return properties; + } + + /** + * Return the ZK Quorum servers string given zk properties returned by + * makeZKProps + * @param properties + * @return Quorum servers String + */ + public static String getZKQuorumServersString(Properties properties) { + String clientPort = null; + List servers = new ArrayList(); + + // The clientPort option may come after the server.X hosts, so we need to + // grab everything and then create the final host:port comma separated list. + boolean anyValid = false; + for (Entry property : properties.entrySet()) { + String key = property.getKey().toString().trim(); + String value = property.getValue().toString().trim(); + if (key.equals("clientPort")) { + clientPort = value; + } + else if (key.startsWith("server.")) { + String host = value.substring(0, value.indexOf(':')); + servers.add(host); + try { + //noinspection ResultOfMethodCallIgnored + InetAddress.getByName(host); + anyValid = true; + } catch (UnknownHostException e) { + LOG.warn(StringUtils.stringifyException(e)); + } + } + } + + if (!anyValid) { + LOG.error("no valid quorum servers found in " + HConstants.ZOOKEEPER_CONFIG_NAME); + return null; + } + + if (clientPort == null) { + LOG.error("no clientPort found in " + HConstants.ZOOKEEPER_CONFIG_NAME); + return null; + } + + if (servers.isEmpty()) { + LOG.fatal("No servers were found in provided ZooKeeper configuration. " + + "HBase must have a ZooKeeper cluster configured for its " + + "operation. Ensure that you've configured '" + + HConstants.ZOOKEEPER_QUORUM + "' properly."); + return null; + } + + StringBuilder hostPortBuilder = new StringBuilder(); + for (int i = 0; i < servers.size(); ++i) { + String host = servers.get(i); + if (i > 0) { + hostPortBuilder.append(','); + } + hostPortBuilder.append(host); + hostPortBuilder.append(':'); + hostPortBuilder.append(clientPort); + } + + return hostPortBuilder.toString(); + } + + /** + * Return the ZK Quorum servers string given the specified configuration. + * @param conf + * @return Quorum servers + */ + public static String getZKQuorumServersString(Configuration conf) { + return getZKQuorumServersString(makeZKProps(conf)); + } +} diff --git hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKTable.java hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKTable.java new file mode 100644 index 0000000..45c6cee --- /dev/null +++ hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKTable.java @@ -0,0 +1,367 @@ +/** + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.zookeeper; + +import java.util.HashMap; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Set; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.hbase.protobuf.ProtobufUtil; +import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos; +import org.apache.zookeeper.KeeperException; + +/** + * Helper class for table state tracking for use by AssignmentManager. + * Reads, caches and sets state up in zookeeper. If multiple read/write + * clients, will make for confusion. Read-only clients other than + * AssignmentManager interested in learning table state can use the + * read-only utility methods in {@link ZKTableReadOnly}. + * + *

      To save on trips to the zookeeper ensemble, internally we cache table + * state. + */ +@InterfaceAudience.Private +public class ZKTable { + // A znode will exist under the table directory if it is in any of the + // following states: {@link TableState#ENABLING} , {@link TableState#DISABLING}, + // or {@link TableState#DISABLED}. If {@link TableState#ENABLED}, there will + // be no entry for a table in zk. Thats how it currently works. + + private static final Log LOG = LogFactory.getLog(ZKTable.class); + private final ZooKeeperWatcher watcher; + + /** + * Cache of what we found in zookeeper so we don't have to go to zk ensemble + * for every query. Synchronize access rather than use concurrent Map because + * synchronization needs to span query of zk. + */ + private final Map cache = + new HashMap(); + + // TODO: Make it so always a table znode. Put table schema here as well as table state. + // Have watcher on table znode so all are notified of state or schema change. + + public ZKTable(final ZooKeeperWatcher zkw) throws KeeperException { + super(); + this.watcher = zkw; + populateTableStates(); + } + + /** + * Gets a list of all the tables set as disabled in zookeeper. + * @throws KeeperException + */ + private void populateTableStates() + throws KeeperException { + synchronized (this.cache) { + List children = ZKUtil.listChildrenNoWatch(this.watcher, this.watcher.tableZNode); + if (children == null) return; + for (String child: children) { + ZooKeeperProtos.Table.State state = ZKTableReadOnly.getTableState(this.watcher, child); + if (state != null) this.cache.put(child, state); + } + } + } + + /** + * Sets the specified table as DISABLED in zookeeper. Fails silently if the + * table is already disabled in zookeeper. Sets no watches. + * @param tableName + * @throws KeeperException unexpected zookeeper exception + */ + public void setDisabledTable(String tableName) + throws KeeperException { + synchronized (this.cache) { + if (!isDisablingOrDisabledTable(tableName)) { + LOG.warn("Moving table " + tableName + " state to disabled but was " + + "not first in disabling state: " + this.cache.get(tableName)); + } + setTableState(tableName, ZooKeeperProtos.Table.State.DISABLED); + } + } + + /** + * Sets the specified table as DISABLING in zookeeper. Fails silently if the + * table is already disabled in zookeeper. Sets no watches. + * @param tableName + * @throws KeeperException unexpected zookeeper exception + */ + public void setDisablingTable(final String tableName) + throws KeeperException { + synchronized (this.cache) { + if (!isEnabledOrDisablingTable(tableName)) { + LOG.warn("Moving table " + tableName + " state to disabling but was " + + "not first in enabled state: " + this.cache.get(tableName)); + } + setTableState(tableName, ZooKeeperProtos.Table.State.DISABLING); + } + } + + /** + * Sets the specified table as ENABLING in zookeeper. Fails silently if the + * table is already disabled in zookeeper. Sets no watches. + * @param tableName + * @throws KeeperException unexpected zookeeper exception + */ + public void setEnablingTable(final String tableName) + throws KeeperException { + synchronized (this.cache) { + if (!isDisabledOrEnablingTable(tableName)) { + LOG.warn("Moving table " + tableName + " state to enabling but was " + + "not first in disabled state: " + this.cache.get(tableName)); + } + setTableState(tableName, ZooKeeperProtos.Table.State.ENABLING); + } + } + + /** + * Sets the specified table as ENABLING in zookeeper atomically + * If the table is already in ENABLING state, no operation is performed + * @param tableName + * @return if the operation succeeds or not + * @throws KeeperException unexpected zookeeper exception + */ + public boolean checkAndSetEnablingTable(final String tableName) + throws KeeperException { + synchronized (this.cache) { + if (isEnablingTable(tableName)) { + return false; + } + setTableState(tableName, ZooKeeperProtos.Table.State.ENABLING); + return true; + } + } + + /** + * Sets the specified table as ENABLING in zookeeper atomically + * If the table isn't in DISABLED state, no operation is performed + * @param tableName + * @return if the operation succeeds or not + * @throws KeeperException unexpected zookeeper exception + */ + public boolean checkDisabledAndSetEnablingTable(final String tableName) + throws KeeperException { + synchronized (this.cache) { + if (!isDisabledTable(tableName)) { + return false; + } + setTableState(tableName, ZooKeeperProtos.Table.State.ENABLING); + return true; + } + } + + /** + * Sets the specified table as DISABLING in zookeeper atomically + * If the table isn't in ENABLED state, no operation is performed + * @param tableName + * @return if the operation succeeds or not + * @throws KeeperException unexpected zookeeper exception + */ + public boolean checkEnabledAndSetDisablingTable(final String tableName) + throws KeeperException { + synchronized (this.cache) { + if (this.cache.get(tableName) != null && !isEnabledTable(tableName)) { + return false; + } + setTableState(tableName, ZooKeeperProtos.Table.State.DISABLING); + return true; + } + } + + private void setTableState(final String tableName, final ZooKeeperProtos.Table.State state) + throws KeeperException { + String znode = ZKUtil.joinZNode(this.watcher.tableZNode, tableName); + if (ZKUtil.checkExists(this.watcher, znode) == -1) { + ZKUtil.createAndFailSilent(this.watcher, znode); + } + synchronized (this.cache) { + ZooKeeperProtos.Table.Builder builder = ZooKeeperProtos.Table.newBuilder(); + builder.setState(state); + byte [] data = ProtobufUtil.prependPBMagic(builder.build().toByteArray()); + ZKUtil.setData(this.watcher, znode, data); + this.cache.put(tableName, state); + } + } + + public boolean isDisabledTable(final String tableName) { + return isTableState(tableName, ZooKeeperProtos.Table.State.DISABLED); + } + + public boolean isDisablingTable(final String tableName) { + return isTableState(tableName, ZooKeeperProtos.Table.State.DISABLING); + } + + public boolean isEnablingTable(final String tableName) { + return isTableState(tableName, ZooKeeperProtos.Table.State.ENABLING); + } + + public boolean isEnabledTable(String tableName) { + return isTableState(tableName, ZooKeeperProtos.Table.State.ENABLED); + } + + public boolean isDisablingOrDisabledTable(final String tableName) { + synchronized (this.cache) { + return isDisablingTable(tableName) || isDisabledTable(tableName); + } + } + + public boolean isEnabledOrDisablingTable(final String tableName) { + synchronized (this.cache) { + return isEnabledTable(tableName) || isDisablingTable(tableName); + } + } + + public boolean isDisabledOrEnablingTable(final String tableName) { + synchronized (this.cache) { + return isDisabledTable(tableName) || isEnablingTable(tableName); + } + } + + private boolean isTableState(final String tableName, final ZooKeeperProtos.Table.State state) { + synchronized (this.cache) { + ZooKeeperProtos.Table.State currentState = this.cache.get(tableName); + return ZKTableReadOnly.isTableState(currentState, state); + } + } + + /** + * Deletes the table in zookeeper. Fails silently if the + * table is not currently disabled in zookeeper. Sets no watches. + * @param tableName + * @throws KeeperException unexpected zookeeper exception + */ + public void setDeletedTable(final String tableName) + throws KeeperException { + synchronized (this.cache) { + if (this.cache.remove(tableName) == null) { + LOG.warn("Moving table " + tableName + " state to deleted but was " + + "already deleted"); + } + ZKUtil.deleteNodeFailSilent(this.watcher, + ZKUtil.joinZNode(this.watcher.tableZNode, tableName)); + } + } + + /** + * Sets the ENABLED state in the cache and creates or force updates a node to + * ENABLED state for the specified table + * + * @param tableName + * @throws KeeperException + */ + public void setEnabledTable(final String tableName) throws KeeperException { + setTableState(tableName, ZooKeeperProtos.Table.State.ENABLED); + } + + /** + * check if table is present . + * + * @param tableName + * @return true if the table is present + */ + public boolean isTablePresent(final String tableName) { + synchronized (this.cache) { + ZooKeeperProtos.Table.State state = this.cache.get(tableName); + return !(state == null); + } + } + + /** + * Gets a list of all the tables set as disabled in zookeeper. + * @return Set of disabled tables, empty Set if none + */ + public Set getDisabledTables() { + Set disabledTables = new HashSet(); + synchronized (this.cache) { + Set tables = this.cache.keySet(); + for (String table: tables) { + if (isDisabledTable(table)) disabledTables.add(table); + } + } + return disabledTables; + } + + /** + * Gets a list of all the tables set as disabled in zookeeper. + * @return Set of disabled tables, empty Set if none + * @throws KeeperException + */ + public static Set getDisabledTables(ZooKeeperWatcher zkw) + throws KeeperException { + return getAllTables(zkw, ZooKeeperProtos.Table.State.DISABLED); + } + + /** + * Gets a list of all the tables set as disabling in zookeeper. + * @return Set of disabling tables, empty Set if none + * @throws KeeperException + */ + public static Set getDisablingTables(ZooKeeperWatcher zkw) + throws KeeperException { + return getAllTables(zkw, ZooKeeperProtos.Table.State.DISABLING); + } + + /** + * Gets a list of all the tables set as enabling in zookeeper. + * @return Set of enabling tables, empty Set if none + * @throws KeeperException + */ + public static Set getEnablingTables(ZooKeeperWatcher zkw) + throws KeeperException { + return getAllTables(zkw, ZooKeeperProtos.Table.State.ENABLING); + } + + /** + * Gets a list of all the tables set as disabled in zookeeper. + * @return Set of disabled tables, empty Set if none + * @throws KeeperException + */ + public static Set getDisabledOrDisablingTables(ZooKeeperWatcher zkw) + throws KeeperException { + return getAllTables(zkw, ZooKeeperProtos.Table.State.DISABLED, + ZooKeeperProtos.Table.State.DISABLING); + } + + /** + * Gets a list of all the tables of specified states in zookeeper. + * @return Set of tables of specified states, empty Set if none + * @throws KeeperException + */ + static Set getAllTables(final ZooKeeperWatcher zkw, + final ZooKeeperProtos.Table.State... states) throws KeeperException { + Set allTables = new HashSet(); + List children = + ZKUtil.listChildrenNoWatch(zkw, zkw.tableZNode); + for (String child: children) { + ZooKeeperProtos.Table.State state = ZKTableReadOnly.getTableState(zkw, child); + for (ZooKeeperProtos.Table.State expectedState: states) { + if (state == expectedState) { + allTables.add(child); + break; + } + } + } + return allTables; + } +} diff --git hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKTableReadOnly.java hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKTableReadOnly.java new file mode 100644 index 0000000..2fed38a --- /dev/null +++ hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKTableReadOnly.java @@ -0,0 +1,160 @@ +/** + * Copyright The Apache Software Foundation + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.zookeeper; + +import java.util.HashSet; +import java.util.List; +import java.util.Set; + +import org.apache.hadoop.hbase.DeserializationException; +import org.apache.zookeeper.KeeperException; +import org.apache.hadoop.hbase.protobuf.ProtobufUtil; +import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos; + +import com.google.protobuf.InvalidProtocolBufferException; + +/** + * Non-instantiable class that provides helper functions for + * clients other than AssignmentManager for reading the + * state of a table in ZK. + * + *

      Does not cache state like {@link ZKTable}, actually reads from ZK each call. + */ +public class ZKTableReadOnly { + + private ZKTableReadOnly() {} + + /** + * Go to zookeeper and see if state of table is {@code ZooKeeperProtos.Table.State#DISABLED}. + * This method does not use cache. + * This method is for clients other than AssignmentManager + * @param zkw + * @param tableName + * @return True if table is enabled. + * @throws KeeperException + */ + public static boolean isDisabledTable(final ZooKeeperWatcher zkw, + final String tableName) + throws KeeperException { + ZooKeeperProtos.Table.State state = getTableState(zkw, tableName); + return isTableState(ZooKeeperProtos.Table.State.DISABLED, state); + } + + /** + * Go to zookeeper and see if state of table is {@code ZooKeeperProtos.Table.State#ENABLED}. + * This method does not use cache. + * This method is for clients other than AssignmentManager + * @param zkw + * @param tableName + * @return True if table is enabled. + * @throws KeeperException + */ + public static boolean isEnabledTable(final ZooKeeperWatcher zkw, + final String tableName) + throws KeeperException { + return getTableState(zkw, tableName) == ZooKeeperProtos.Table.State.ENABLED; + } + + /** + * Go to zookeeper and see if state of table is {@code ZooKeeperProtos.Table.State#DISABLING} + * of {@code ZooKeeperProtos.Table.State#DISABLED}. + * This method does not use cache. + * This method is for clients other than AssignmentManager. + * @param zkw + * @param tableName + * @return True if table is enabled. + * @throws KeeperException + */ + public static boolean isDisablingOrDisabledTable(final ZooKeeperWatcher zkw, + final String tableName) + throws KeeperException { + ZooKeeperProtos.Table.State state = getTableState(zkw, tableName); + return isTableState(ZooKeeperProtos.Table.State.DISABLING, state) || + isTableState(ZooKeeperProtos.Table.State.DISABLED, state); + } + + /** + * Gets a list of all the tables set as disabled in zookeeper. + * @return Set of disabled tables, empty Set if none + * @throws KeeperException + */ + public static Set getDisabledTables(ZooKeeperWatcher zkw) + throws KeeperException { + Set disabledTables = new HashSet(); + List children = + ZKUtil.listChildrenNoWatch(zkw, zkw.tableZNode); + for (String child: children) { + ZooKeeperProtos.Table.State state = getTableState(zkw, child); + if (state == ZooKeeperProtos.Table.State.DISABLED) disabledTables.add(child); + } + return disabledTables; + } + + /** + * Gets a list of all the tables set as disabled in zookeeper. + * @return Set of disabled tables, empty Set if none + * @throws KeeperException + */ + public static Set getDisabledOrDisablingTables(ZooKeeperWatcher zkw) + throws KeeperException { + Set disabledTables = new HashSet(); + List children = + ZKUtil.listChildrenNoWatch(zkw, zkw.tableZNode); + for (String child: children) { + ZooKeeperProtos.Table.State state = getTableState(zkw, child); + if (state == ZooKeeperProtos.Table.State.DISABLED || + state == ZooKeeperProtos.Table.State.DISABLING) + disabledTables.add(child); + } + return disabledTables; + } + + static boolean isTableState(final ZooKeeperProtos.Table.State expectedState, + final ZooKeeperProtos.Table.State currentState) { + return currentState != null && currentState.equals(expectedState); + } + + /** + * @param zkw + * @param child + * @return Null or {@link ZooKeeperProtos.Table.State} found in znode. + * @throws KeeperException + */ + static ZooKeeperProtos.Table.State getTableState(final ZooKeeperWatcher zkw, + final String child) + throws KeeperException { + String znode = ZKUtil.joinZNode(zkw.tableZNode, child); + byte [] data = ZKUtil.getData(zkw, znode); + if (data == null || data.length <= 0) return ZooKeeperProtos.Table.State.ENABLED; + try { + ProtobufUtil.expectPBMagicPrefix(data); + ZooKeeperProtos.Table.Builder builder = ZooKeeperProtos.Table.newBuilder(); + int magicLen = ProtobufUtil.lengthOfPBMagic(); + ZooKeeperProtos.Table t = builder.mergeFrom(data, magicLen, data.length - magicLen).build(); + return t.getState(); + } catch (InvalidProtocolBufferException e) { + KeeperException ke = new KeeperException.DataInconsistencyException(); + ke.initCause(e); + throw ke; + } catch (DeserializationException e) { + throw ZKUtil.convert(e); + } + } +} diff --git hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKUtil.java hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKUtil.java new file mode 100644 index 0000000..edaf916 --- /dev/null +++ hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKUtil.java @@ -0,0 +1,1417 @@ +/** + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.zookeeper; + +import java.io.BufferedReader; +import java.io.IOException; +import java.io.InputStreamReader; +import java.io.PrintWriter; +import java.net.InetSocketAddress; +import java.net.InetAddress; +import java.net.Socket; +import java.util.ArrayList; +import java.util.List; +import java.util.Properties; +import java.util.HashMap; +import java.util.Map; + +import javax.security.auth.login.LoginException; +import javax.security.auth.login.AppConfigurationEntry; +import javax.security.auth.login.AppConfigurationEntry.LoginModuleControlFlag; + +import org.apache.hadoop.security.SecurityUtil; +import org.apache.hadoop.security.authentication.util.KerberosUtil; + +import org.apache.commons.lang.StringUtils; +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.DeserializationException; +import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.ServerName; +import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.hbase.util.Threads; +import org.apache.zookeeper.AsyncCallback; +import org.apache.zookeeper.CreateMode; +import org.apache.zookeeper.KeeperException; +import org.apache.zookeeper.KeeperException.NoNodeException; +import org.apache.zookeeper.Watcher; +import org.apache.zookeeper.ZooDefs.Ids; +import org.apache.zookeeper.ZooKeeper; +import org.apache.zookeeper.data.ACL; +import org.apache.zookeeper.data.Stat; +import org.apache.zookeeper.client.ZooKeeperSaslClient; +import org.apache.zookeeper.server.ZooKeeperSaslServer; + +/** + * Internal HBase utility class for ZooKeeper. + * + *

      Contains only static methods and constants. + * + *

      Methods all throw {@link KeeperException} if there is an unexpected + * zookeeper exception, so callers of these methods must handle appropriately. + * If ZK is required for the operation, the server will need to be aborted. + */ +@InterfaceAudience.Public +@InterfaceStability.Evolving +public class ZKUtil { + private static final Log LOG = LogFactory.getLog(ZKUtil.class); + + // TODO: Replace this with ZooKeeper constant when ZOOKEEPER-277 is resolved. + private static final char ZNODE_PATH_SEPARATOR = '/'; + private static int zkDumpConnectionTimeOut; + + /** + * Creates a new connection to ZooKeeper, pulling settings and ensemble config + * from the specified configuration object using methods from {@link ZKConfig}. + * + * Sets the connection status monitoring watcher to the specified watcher. + * + * @param conf configuration to pull ensemble and other settings from + * @param watcher watcher to monitor connection changes + * @return connection to zookeeper + * @throws IOException if unable to connect to zk or config problem + */ + public static RecoverableZooKeeper connect(Configuration conf, Watcher watcher) + throws IOException { + Properties properties = ZKConfig.makeZKProps(conf); + String ensemble = ZKConfig.getZKQuorumServersString(properties); + return connect(conf, ensemble, watcher); + } + + public static RecoverableZooKeeper connect(Configuration conf, String ensemble, + Watcher watcher) + throws IOException { + return connect(conf, ensemble, watcher, ""); + } + + public static RecoverableZooKeeper connect(Configuration conf, String ensemble, + Watcher watcher, final String descriptor) + throws IOException { + if(ensemble == null) { + throw new IOException("Unable to determine ZooKeeper ensemble"); + } + int timeout = conf.getInt(HConstants.ZK_SESSION_TIMEOUT, + HConstants.DEFAULT_ZK_SESSION_TIMEOUT); + LOG.debug(descriptor + " opening connection to ZooKeeper with ensemble (" + + ensemble + ")"); + int retry = conf.getInt("zookeeper.recovery.retry", 3); + int retryIntervalMillis = + conf.getInt("zookeeper.recovery.retry.intervalmill", 1000); + zkDumpConnectionTimeOut = conf.getInt("zookeeper.dump.connection.timeout", + 1000); + return new RecoverableZooKeeper(ensemble, timeout, watcher, + retry, retryIntervalMillis); + } + + /** + * Log in the current zookeeper server process using the given configuration + * keys for the credential file and login principal. + * + *

      This is only applicable when running on secure hbase + * On regular HBase (without security features), this will safely be ignored. + *

      + * + * @param conf The configuration data to use + * @param keytabFileKey Property key used to configure the path to the credential file + * @param userNameKey Property key used to configure the login principal + * @param hostname Current hostname to use in any credentials + * @throws IOException underlying exception from SecurityUtil.login() call + */ + public static void loginServer(Configuration conf, String keytabFileKey, + String userNameKey, String hostname) throws IOException { + login(conf, keytabFileKey, userNameKey, hostname, + ZooKeeperSaslServer.LOGIN_CONTEXT_NAME_KEY, + JaasConfiguration.SERVER_KEYTAB_KERBEROS_CONFIG_NAME); + } + + /** + * Log in the current zookeeper client using the given configuration + * keys for the credential file and login principal. + * + *

      This is only applicable when running on secure hbase + * On regular HBase (without security features), this will safely be ignored. + *

      + * + * @param conf The configuration data to use + * @param keytabFileKey Property key used to configure the path to the credential file + * @param userNameKey Property key used to configure the login principal + * @param hostname Current hostname to use in any credentials + * @throws IOException underlying exception from SecurityUtil.login() call + */ + public static void loginClient(Configuration conf, String keytabFileKey, + String userNameKey, String hostname) throws IOException { + login(conf, keytabFileKey, userNameKey, hostname, + ZooKeeperSaslClient.LOGIN_CONTEXT_NAME_KEY, + JaasConfiguration.CLIENT_KEYTAB_KERBEROS_CONFIG_NAME); + } + + /** + * Log in the current process using the given configuration keys for the + * credential file and login principal. + * + *

      This is only applicable when running on secure hbase + * On regular HBase (without security features), this will safely be ignored. + *

      + * + * @param conf The configuration data to use + * @param keytabFileKey Property key used to configure the path to the credential file + * @param userNameKey Property key used to configure the login principal + * @param hostname Current hostname to use in any credentials + * @param loginContextProperty property name to expose the entry name + * @param loginContextName jaas entry name + * @throws IOException underlying exception from SecurityUtil.login() call + */ + private static void login(Configuration conf, String keytabFileKey, + String userNameKey, String hostname, + String loginContextProperty, String loginContextName) + throws IOException { + if (!isSecureZooKeeper(conf)) + return; + + // User has specified a jaas.conf, keep this one as the good one. + // HBASE_OPTS="-Djava.security.auth.login.config=jaas.conf" + if (System.getProperty("java.security.auth.login.config") != null) + return; + + String keytabFilename = conf.get(keytabFileKey); + String principalConfig = conf.get(userNameKey, System.getProperty("user.name")); + String principalName = SecurityUtil.getServerPrincipal(principalConfig, hostname); + + // Initialize the "jaas.conf" for keyTab/principal, + // If keyTab is not specified use the Ticket Cache. + // and set the zookeeper login context name. + JaasConfiguration jaasConf = new JaasConfiguration(loginContextName, + keytabFilename, principalName); + javax.security.auth.login.Configuration.setConfiguration(jaasConf); + System.setProperty(loginContextProperty, loginContextName); + } + + /** + * A JAAS configuration that defines the login modules that we want to use for login. + */ + private static class JaasConfiguration extends javax.security.auth.login.Configuration { + private static final String SERVER_KEYTAB_KERBEROS_CONFIG_NAME = + "zookeeper-server-keytab-kerberos"; + private static final String CLIENT_KEYTAB_KERBEROS_CONFIG_NAME = + "zookeeper-client-keytab-kerberos"; + + private static final Map BASIC_JAAS_OPTIONS = + new HashMap(); + static { + String jaasEnvVar = System.getenv("HBASE_JAAS_DEBUG"); + if (jaasEnvVar != null && "true".equalsIgnoreCase(jaasEnvVar)) { + BASIC_JAAS_OPTIONS.put("debug", "true"); + } + } + + private static final Map KEYTAB_KERBEROS_OPTIONS = + new HashMap(); + static { + KEYTAB_KERBEROS_OPTIONS.put("doNotPrompt", "true"); + KEYTAB_KERBEROS_OPTIONS.put("storeKey", "true"); + KEYTAB_KERBEROS_OPTIONS.put("refreshKrb5Config", "true"); + KEYTAB_KERBEROS_OPTIONS.putAll(BASIC_JAAS_OPTIONS); + } + + private static final AppConfigurationEntry KEYTAB_KERBEROS_LOGIN = + new AppConfigurationEntry(KerberosUtil.getKrb5LoginModuleName(), + LoginModuleControlFlag.REQUIRED, + KEYTAB_KERBEROS_OPTIONS); + + private static final AppConfigurationEntry[] KEYTAB_KERBEROS_CONF = + new AppConfigurationEntry[]{KEYTAB_KERBEROS_LOGIN}; + + private javax.security.auth.login.Configuration baseConfig; + private final String loginContextName; + private final boolean useTicketCache; + private final String keytabFile; + private final String principal; + + public JaasConfiguration(String loginContextName, String principal) { + this(loginContextName, principal, null, true); + } + + public JaasConfiguration(String loginContextName, String principal, String keytabFile) { + this(loginContextName, principal, keytabFile, keytabFile == null || keytabFile.length() == 0); + } + + private JaasConfiguration(String loginContextName, String principal, + String keytabFile, boolean useTicketCache) { + try { + this.baseConfig = javax.security.auth.login.Configuration.getConfiguration(); + } catch (SecurityException e) { + this.baseConfig = null; + } + this.loginContextName = loginContextName; + this.useTicketCache = useTicketCache; + this.keytabFile = keytabFile; + this.principal = principal; + LOG.info("JaasConfiguration loginContextName=" + loginContextName + + " principal=" + principal + " useTicketCache=" + useTicketCache + + " keytabFile=" + keytabFile); + } + + @Override + public AppConfigurationEntry[] getAppConfigurationEntry(String appName) { + if (loginContextName.equals(appName)) { + if (!useTicketCache) { + KEYTAB_KERBEROS_OPTIONS.put("keyTab", keytabFile); + KEYTAB_KERBEROS_OPTIONS.put("useKeyTab", "true"); + } + KEYTAB_KERBEROS_OPTIONS.put("principal", principal); + KEYTAB_KERBEROS_OPTIONS.put("useTicketCache", useTicketCache ? "true" : "false"); + return KEYTAB_KERBEROS_CONF; + } + if (baseConfig != null) return baseConfig.getAppConfigurationEntry(appName); + return(null); + } + } + + // + // Helper methods + // + + /** + * Join the prefix znode name with the suffix znode name to generate a proper + * full znode name. + * + * Assumes prefix does not end with slash and suffix does not begin with it. + * + * @param prefix beginning of znode name + * @param suffix ending of znode name + * @return result of properly joining prefix with suffix + */ + public static String joinZNode(String prefix, String suffix) { + return prefix + ZNODE_PATH_SEPARATOR + suffix; + } + + /** + * Returns the full path of the immediate parent of the specified node. + * @param node path to get parent of + * @return parent of path, null if passed the root node or an invalid node + */ + public static String getParent(String node) { + int idx = node.lastIndexOf(ZNODE_PATH_SEPARATOR); + return idx <= 0 ? null : node.substring(0, idx); + } + + /** + * Get the name of the current node from the specified fully-qualified path. + * @param path fully-qualified path + * @return name of the current node + */ + public static String getNodeName(String path) { + return path.substring(path.lastIndexOf("/")+1); + } + + /** + * Get the key to the ZK ensemble for this configuration without + * adding a name at the end + * @param conf Configuration to use to build the key + * @return ensemble key without a name + */ + public static String getZooKeeperClusterKey(Configuration conf) { + return getZooKeeperClusterKey(conf, null); + } + + /** + * Get the key to the ZK ensemble for this configuration and append + * a name at the end + * @param conf Configuration to use to build the key + * @param name Name that should be appended at the end if not empty or null + * @return ensemble key with a name (if any) + */ + public static String getZooKeeperClusterKey(Configuration conf, String name) { + String ensemble = conf.get(HConstants.ZOOKEEPER_QUORUM.replaceAll( + "[\\t\\n\\x0B\\f\\r]", "")); + StringBuilder builder = new StringBuilder(ensemble); + builder.append(":"); + builder.append(conf.get(HConstants.ZOOKEEPER_CLIENT_PORT)); + builder.append(":"); + builder.append(conf.get(HConstants.ZOOKEEPER_ZNODE_PARENT)); + if (name != null && !name.isEmpty()) { + builder.append(","); + builder.append(name); + } + return builder.toString(); + } + + /** + * Apply the settings in the given key to the given configuration, this is + * used to communicate with distant clusters + * @param conf configuration object to configure + * @param key string that contains the 3 required configuratins + * @throws IOException + */ + public static void applyClusterKeyToConf(Configuration conf, String key) + throws IOException{ + String[] parts = transformClusterKey(key); + conf.set(HConstants.ZOOKEEPER_QUORUM, parts[0]); + conf.set(HConstants.ZOOKEEPER_CLIENT_PORT, parts[1]); + conf.set(HConstants.ZOOKEEPER_ZNODE_PARENT, parts[2]); + } + + /** + * Separate the given key into the three configurations it should contain: + * hbase.zookeeper.quorum, hbase.zookeeper.client.port + * and zookeeper.znode.parent + * @param key + * @return the three configuration in the described order + * @throws IOException + */ + public static String[] transformClusterKey(String key) throws IOException { + String[] parts = key.split(":"); + if (parts.length != 3) { + throw new IOException("Cluster key invalid, the format should be:" + + HConstants.ZOOKEEPER_QUORUM + ":hbase.zookeeper.client.port:" + + HConstants.ZOOKEEPER_ZNODE_PARENT); + } + return parts; + } + + // + // Existence checks and watches + // + + /** + * Watch the specified znode for delete/create/change events. The watcher is + * set whether or not the node exists. If the node already exists, the method + * returns true. If the node does not exist, the method returns false. + * + * @param zkw zk reference + * @param znode path of node to watch + * @return true if znode exists, false if does not exist or error + * @throws KeeperException if unexpected zookeeper exception + */ + public static boolean watchAndCheckExists(ZooKeeperWatcher zkw, String znode) + throws KeeperException { + try { + Stat s = zkw.getRecoverableZooKeeper().exists(znode, zkw); + boolean exists = s != null ? true : false; + if (exists) { + LOG.debug(zkw.prefix("Set watcher on existing znode " + znode)); + } else { + LOG.debug(zkw.prefix(znode+" does not exist. Watcher is set.")); + } + return exists; + } catch (KeeperException e) { + LOG.warn(zkw.prefix("Unable to set watcher on znode " + znode), e); + zkw.keeperException(e); + return false; + } catch (InterruptedException e) { + LOG.warn(zkw.prefix("Unable to set watcher on znode " + znode), e); + zkw.interruptedException(e); + return false; + } + } + + /** + * Check if the specified node exists. Sets no watches. + * + * @param zkw zk reference + * @param znode path of node to watch + * @return version of the node if it exists, -1 if does not exist + * @throws KeeperException if unexpected zookeeper exception + */ + public static int checkExists(ZooKeeperWatcher zkw, String znode) + throws KeeperException { + try { + Stat s = zkw.getRecoverableZooKeeper().exists(znode, null); + return s != null ? s.getVersion() : -1; + } catch (KeeperException e) { + LOG.warn(zkw.prefix("Unable to set watcher on znode (" + znode + ")"), e); + zkw.keeperException(e); + return -1; + } catch (InterruptedException e) { + LOG.warn(zkw.prefix("Unable to set watcher on znode (" + znode + ")"), e); + zkw.interruptedException(e); + return -1; + } + } + + // + // Znode listings + // + + /** + * Lists the children znodes of the specified znode. Also sets a watch on + * the specified znode which will capture a NodeDeleted event on the specified + * znode as well as NodeChildrenChanged if any children of the specified znode + * are created or deleted. + * + * Returns null if the specified node does not exist. Otherwise returns a + * list of children of the specified node. If the node exists but it has no + * children, an empty list will be returned. + * + * @param zkw zk reference + * @param znode path of node to list and watch children of + * @return list of children of the specified node, an empty list if the node + * exists but has no children, and null if the node does not exist + * @throws KeeperException if unexpected zookeeper exception + */ + public static List listChildrenAndWatchForNewChildren( + ZooKeeperWatcher zkw, String znode) + throws KeeperException { + try { + List children = zkw.getRecoverableZooKeeper().getChildren(znode, zkw); + return children; + } catch(KeeperException.NoNodeException ke) { + LOG.debug(zkw.prefix("Unable to list children of znode " + znode + " " + + "because node does not exist (not an error)")); + return null; + } catch (KeeperException e) { + LOG.warn(zkw.prefix("Unable to list children of znode " + znode + " "), e); + zkw.keeperException(e); + return null; + } catch (InterruptedException e) { + LOG.warn(zkw.prefix("Unable to list children of znode " + znode + " "), e); + zkw.interruptedException(e); + return null; + } + } + + /** + * List all the children of the specified znode, setting a watch for children + * changes and also setting a watch on every individual child in order to get + * the NodeCreated and NodeDeleted events. + * @param zkw zookeeper reference + * @param znode node to get children of and watch + * @return list of znode names, null if the node doesn't exist + * @throws KeeperException + */ + public static List listChildrenAndWatchThem(ZooKeeperWatcher zkw, + String znode) throws KeeperException { + List children = listChildrenAndWatchForNewChildren(zkw, znode); + if (children == null) { + return null; + } + for (String child : children) { + watchAndCheckExists(zkw, joinZNode(znode, child)); + } + return children; + } + + /** + * Lists the children of the specified znode without setting any watches. + * + * Used to list the currently online regionservers and their addresses. + * + * Sets no watches at all, this method is best effort. + * + * Returns an empty list if the node has no children. Returns null if the + * parent node itself does not exist. + * + * @param zkw zookeeper reference + * @param znode node to get children of as addresses + * @return list of data of children of specified znode, empty if no children, + * null if parent does not exist + * @throws KeeperException if unexpected zookeeper exception + */ + public static List listChildrenNoWatch(ZooKeeperWatcher zkw, String znode) + throws KeeperException { + List children = null; + try { + // List the children without watching + children = zkw.getRecoverableZooKeeper().getChildren(znode, null); + } catch(KeeperException.NoNodeException nne) { + return null; + } catch(InterruptedException ie) { + zkw.interruptedException(ie); + } + return children; + } + + /** + * Simple class to hold a node path and node data. + * @deprecated Unused + */ + @Deprecated + public static class NodeAndData { + private String node; + private byte [] data; + public NodeAndData(String node, byte [] data) { + this.node = node; + this.data = data; + } + public String getNode() { + return node; + } + public byte [] getData() { + return data; + } + @Override + public String toString() { + return node; + } + public boolean isEmpty() { + return (data.length == 0); + } + } + + /** + * Checks if the specified znode has any children. Sets no watches. + * + * Returns true if the node exists and has children. Returns false if the + * node does not exist or if the node does not have any children. + * + * Used during master initialization to determine if the master is a + * failed-over-to master or the first master during initial cluster startup. + * If the directory for regionserver ephemeral nodes is empty then this is + * a cluster startup, if not then it is not cluster startup. + * + * @param zkw zk reference + * @param znode path of node to check for children of + * @return true if node has children, false if not or node does not exist + * @throws KeeperException if unexpected zookeeper exception + */ + public static boolean nodeHasChildren(ZooKeeperWatcher zkw, String znode) + throws KeeperException { + try { + return !zkw.getRecoverableZooKeeper().getChildren(znode, null).isEmpty(); + } catch(KeeperException.NoNodeException ke) { + LOG.debug(zkw.prefix("Unable to list children of znode " + znode + " " + + "because node does not exist (not an error)")); + return false; + } catch (KeeperException e) { + LOG.warn(zkw.prefix("Unable to list children of znode " + znode), e); + zkw.keeperException(e); + return false; + } catch (InterruptedException e) { + LOG.warn(zkw.prefix("Unable to list children of znode " + znode), e); + zkw.interruptedException(e); + return false; + } + } + + /** + * Get the number of children of the specified node. + * + * If the node does not exist or has no children, returns 0. + * + * Sets no watches at all. + * + * @param zkw zk reference + * @param znode path of node to count children of + * @return number of children of specified node, 0 if none or parent does not + * exist + * @throws KeeperException if unexpected zookeeper exception + */ + public static int getNumberOfChildren(ZooKeeperWatcher zkw, String znode) + throws KeeperException { + try { + Stat stat = zkw.getRecoverableZooKeeper().exists(znode, null); + return stat == null ? 0 : stat.getNumChildren(); + } catch(KeeperException e) { + LOG.warn(zkw.prefix("Unable to get children of node " + znode)); + zkw.keeperException(e); + } catch(InterruptedException e) { + zkw.interruptedException(e); + } + return 0; + } + + // + // Data retrieval + // + + /** + * Get znode data. Does not set a watcher. + * @return ZNode data, null if the node does not exist or if there is an + * error. + */ + public static byte [] getData(ZooKeeperWatcher zkw, String znode) + throws KeeperException { + try { + byte [] data = zkw.getRecoverableZooKeeper().getData(znode, null, null); + logRetrievedMsg(zkw, znode, data, false); + return data; + } catch (KeeperException.NoNodeException e) { + LOG.debug(zkw.prefix("Unable to get data of znode " + znode + " " + + "because node does not exist (not an error)")); + return null; + } catch (KeeperException e) { + LOG.warn(zkw.prefix("Unable to get data of znode " + znode), e); + zkw.keeperException(e); + return null; + } catch (InterruptedException e) { + LOG.warn(zkw.prefix("Unable to get data of znode " + znode), e); + zkw.interruptedException(e); + return null; + } + } + + /** + * Get the data at the specified znode and set a watch. + * + * Returns the data and sets a watch if the node exists. Returns null and no + * watch is set if the node does not exist or there is an exception. + * + * @param zkw zk reference + * @param znode path of node + * @return data of the specified znode, or null + * @throws KeeperException if unexpected zookeeper exception + */ + public static byte [] getDataAndWatch(ZooKeeperWatcher zkw, String znode) + throws KeeperException { + return getDataInternal(zkw, znode, null, true); + } + + /** + * Get the data at the specified znode and set a watch. + * + * Returns the data and sets a watch if the node exists. Returns null and no + * watch is set if the node does not exist or there is an exception. + * + * @param zkw zk reference + * @param znode path of node + * @param stat object to populate the version of the znode + * @return data of the specified znode, or null + * @throws KeeperException if unexpected zookeeper exception + */ + public static byte[] getDataAndWatch(ZooKeeperWatcher zkw, String znode, + Stat stat) throws KeeperException { + return getDataInternal(zkw, znode, stat, true); + } + + private static byte[] getDataInternal(ZooKeeperWatcher zkw, String znode, Stat stat, + boolean watcherSet) + throws KeeperException { + try { + byte [] data = zkw.getRecoverableZooKeeper().getData(znode, zkw, stat); + logRetrievedMsg(zkw, znode, data, watcherSet); + return data; + } catch (KeeperException.NoNodeException e) { + // This log can get pretty annoying when we cycle on 100ms waits. + // Enable trace if you really want to see it. + LOG.trace(zkw.prefix("Unable to get data of znode " + znode + " " + + "because node does not exist (not an error)")); + return null; + } catch (KeeperException e) { + LOG.warn(zkw.prefix("Unable to get data of znode " + znode), e); + zkw.keeperException(e); + return null; + } catch (InterruptedException e) { + LOG.warn(zkw.prefix("Unable to get data of znode " + znode), e); + zkw.interruptedException(e); + return null; + } + } + + /** + * Get the data at the specified znode without setting a watch. + * + * Returns the data if the node exists. Returns null if the node does not + * exist. + * + * Sets the stats of the node in the passed Stat object. Pass a null stat if + * not interested. + * + * @param zkw zk reference + * @param znode path of node + * @param stat node status to get if node exists + * @return data of the specified znode, or null if node does not exist + * @throws KeeperException if unexpected zookeeper exception + */ + public static byte [] getDataNoWatch(ZooKeeperWatcher zkw, String znode, + Stat stat) + throws KeeperException { + try { + byte [] data = zkw.getRecoverableZooKeeper().getData(znode, null, stat); + logRetrievedMsg(zkw, znode, data, false); + return data; + } catch (KeeperException.NoNodeException e) { + LOG.debug(zkw.prefix("Unable to get data of znode " + znode + " " + + "because node does not exist (not necessarily an error)")); + return null; + } catch (KeeperException e) { + LOG.warn(zkw.prefix("Unable to get data of znode " + znode), e); + zkw.keeperException(e); + return null; + } catch (InterruptedException e) { + LOG.warn(zkw.prefix("Unable to get data of znode " + znode), e); + zkw.interruptedException(e); + return null; + } + } + + /** + * Returns the date of child znodes of the specified znode. Also sets a watch on + * the specified znode which will capture a NodeDeleted event on the specified + * znode as well as NodeChildrenChanged if any children of the specified znode + * are created or deleted. + * + * Returns null if the specified node does not exist. Otherwise returns a + * list of children of the specified node. If the node exists but it has no + * children, an empty list will be returned. + * + * @param zkw zk reference + * @param baseNode path of node to list and watch children of + * @return list of data of children of the specified node, an empty list if the node + * exists but has no children, and null if the node does not exist + * @throws KeeperException if unexpected zookeeper exception + * @deprecated Unused + */ + public static List getChildDataAndWatchForNewChildren( + ZooKeeperWatcher zkw, String baseNode) throws KeeperException { + List nodes = + ZKUtil.listChildrenAndWatchForNewChildren(zkw, baseNode); + List newNodes = new ArrayList(); + if (nodes != null) { + for (String node : nodes) { + String nodePath = ZKUtil.joinZNode(baseNode, node); + byte[] data = ZKUtil.getDataAndWatch(zkw, nodePath); + newNodes.add(new NodeAndData(nodePath, data)); + } + } + return newNodes; + } + + /** + * Update the data of an existing node with the expected version to have the + * specified data. + * + * Throws an exception if there is a version mismatch or some other problem. + * + * Sets no watches under any conditions. + * + * @param zkw zk reference + * @param znode + * @param data + * @param expectedVersion + * @throws KeeperException if unexpected zookeeper exception + * @throws KeeperException.BadVersionException if version mismatch + * @deprecated Unused + */ + public static void updateExistingNodeData(ZooKeeperWatcher zkw, String znode, + byte [] data, int expectedVersion) + throws KeeperException { + try { + zkw.getRecoverableZooKeeper().setData(znode, data, expectedVersion); + } catch(InterruptedException ie) { + zkw.interruptedException(ie); + } + } + + // + // Data setting + // + + /** + * Sets the data of the existing znode to be the specified data. Ensures that + * the current data has the specified expected version. + * + *

      If the node does not exist, a {@link NoNodeException} will be thrown. + * + *

      If their is a version mismatch, method returns null. + * + *

      No watches are set but setting data will trigger other watchers of this + * node. + * + *

      If there is another problem, a KeeperException will be thrown. + * + * @param zkw zk reference + * @param znode path of node + * @param data data to set for node + * @param expectedVersion version expected when setting data + * @return true if data set, false if version mismatch + * @throws KeeperException if unexpected zookeeper exception + */ + public static boolean setData(ZooKeeperWatcher zkw, String znode, + byte [] data, int expectedVersion) + throws KeeperException, KeeperException.NoNodeException { + try { + return zkw.getRecoverableZooKeeper().setData(znode, data, expectedVersion) != null; + } catch (InterruptedException e) { + zkw.interruptedException(e); + return false; + } + } + + /** + * Set data into node creating node if it doesn't yet exist. + * Does not set watch. + * @param zkw zk reference + * @param znode path of node + * @param data data to set for node + * @throws KeeperException + */ + public static void createSetData(final ZooKeeperWatcher zkw, final String znode, + final byte [] data) + throws KeeperException { + if (checkExists(zkw, znode) == -1) { + ZKUtil.createWithParents(zkw, znode); + } + ZKUtil.setData(zkw, znode, data); + } + + /** + * Sets the data of the existing znode to be the specified data. The node + * must exist but no checks are done on the existing data or version. + * + *

      If the node does not exist, a {@link NoNodeException} will be thrown. + * + *

      No watches are set but setting data will trigger other watchers of this + * node. + * + *

      If there is another problem, a KeeperException will be thrown. + * + * @param zkw zk reference + * @param znode path of node + * @param data data to set for node + * @throws KeeperException if unexpected zookeeper exception + */ + public static void setData(ZooKeeperWatcher zkw, String znode, byte [] data) + throws KeeperException, KeeperException.NoNodeException { + setData(zkw, znode, data, -1); + } + + /** + * Returns whether or not secure authentication is enabled + * (whether hbase.security.authentication is set to + * kerberos. + */ + public static boolean isSecureZooKeeper(Configuration conf) { + // hbase shell need to use: + // -Djava.security.auth.login.config=user-jaas.conf + // since each user has a different jaas.conf + if (System.getProperty("java.security.auth.login.config") != null) + return true; + + // Master & RSs uses hbase.zookeeper.client.* + return "kerberos".equalsIgnoreCase(conf.get("hbase.security.authentication")); + } + + private static ArrayList createACL(ZooKeeperWatcher zkw, String node) { + if (isSecureZooKeeper(zkw.getConfiguration())) { + // Certain znodes are accessed directly by the client, + // so they must be readable by non-authenticated clients + if ((node.equals(zkw.baseZNode) == true) || + (node.equals(zkw.rootServerZNode) == true) || + (node.equals(zkw.getMasterAddressZNode()) == true) || + (node.equals(zkw.clusterIdZNode) == true) || + (node.equals(zkw.rsZNode) == true) || + (node.equals(zkw.backupMasterAddressesZNode) == true) || + (node.startsWith(zkw.tableZNode) == true)) { + return ZooKeeperWatcher.CREATOR_ALL_AND_WORLD_READABLE; + } + return Ids.CREATOR_ALL_ACL; + } else { + return Ids.OPEN_ACL_UNSAFE; + } + } + + public static void waitForZKConnectionIfAuthenticating(ZooKeeperWatcher zkw) + throws InterruptedException { + if (isSecureZooKeeper(zkw.getConfiguration())) { + LOG.debug("Waiting for ZooKeeperWatcher to authenticate"); + zkw.saslLatch.await(); + LOG.debug("Done waiting."); + } + } + + // + // Node creation + // + + /** + * + * Set the specified znode to be an ephemeral node carrying the specified + * data. + * + * If the node is created successfully, a watcher is also set on the node. + * + * If the node is not created successfully because it already exists, this + * method will also set a watcher on the node. + * + * If there is another problem, a KeeperException will be thrown. + * + * @param zkw zk reference + * @param znode path of node + * @param data data of node + * @return true if node created, false if not, watch set in both cases + * @throws KeeperException if unexpected zookeeper exception + */ + public static boolean createEphemeralNodeAndWatch(ZooKeeperWatcher zkw, + String znode, byte [] data) + throws KeeperException { + try { + waitForZKConnectionIfAuthenticating(zkw); + zkw.getRecoverableZooKeeper().create(znode, data, createACL(zkw, znode), + CreateMode.EPHEMERAL); + } catch (KeeperException.NodeExistsException nee) { + if(!watchAndCheckExists(zkw, znode)) { + // It did exist but now it doesn't, try again + return createEphemeralNodeAndWatch(zkw, znode, data); + } + return false; + } catch (InterruptedException e) { + LOG.info("Interrupted", e); + Thread.currentThread().interrupt(); + } + return true; + } + + /** + * Creates the specified znode to be a persistent node carrying the specified + * data. + * + * Returns true if the node was successfully created, false if the node + * already existed. + * + * If the node is created successfully, a watcher is also set on the node. + * + * If the node is not created successfully because it already exists, this + * method will also set a watcher on the node but return false. + * + * If there is another problem, a KeeperException will be thrown. + * + * @param zkw zk reference + * @param znode path of node + * @param data data of node + * @return true if node created, false if not, watch set in both cases + * @throws KeeperException if unexpected zookeeper exception + */ + public static boolean createNodeIfNotExistsAndWatch( + ZooKeeperWatcher zkw, String znode, byte [] data) + throws KeeperException { + try { + waitForZKConnectionIfAuthenticating(zkw); + zkw.getRecoverableZooKeeper().create(znode, data, createACL(zkw, znode), + CreateMode.PERSISTENT); + } catch (KeeperException.NodeExistsException nee) { + try { + zkw.getRecoverableZooKeeper().exists(znode, zkw); + } catch (InterruptedException e) { + zkw.interruptedException(e); + return false; + } + return false; + } catch (InterruptedException e) { + zkw.interruptedException(e); + return false; + } + return true; + } + + /** + * Creates the specified node with the specified data and watches it. + * + *

      Throws an exception if the node already exists. + * + *

      The node created is persistent and open access. + * + *

      Returns the version number of the created node if successful. + * + * @param zkw zk reference + * @param znode path of node to create + * @param data data of node to create + * @return version of node created + * @throws KeeperException if unexpected zookeeper exception + * @throws KeeperException.NodeExistsException if node already exists + */ + public static int createAndWatch(ZooKeeperWatcher zkw, + String znode, byte [] data) + throws KeeperException, KeeperException.NodeExistsException { + try { + waitForZKConnectionIfAuthenticating(zkw); + zkw.getRecoverableZooKeeper().create(znode, data, createACL(zkw, znode), + CreateMode.PERSISTENT); + return zkw.getRecoverableZooKeeper().exists(znode, zkw).getVersion(); + } catch (InterruptedException e) { + zkw.interruptedException(e); + return -1; + } + } + + /** + * Async creates the specified node with the specified data. + * + *

      Throws an exception if the node already exists. + * + *

      The node created is persistent and open access. + * + * @param zkw zk reference + * @param znode path of node to create + * @param data data of node to create + * @param cb + * @param ctx + * @throws KeeperException if unexpected zookeeper exception + * @throws KeeperException.NodeExistsException if node already exists + */ + public static void asyncCreate(ZooKeeperWatcher zkw, + String znode, byte [] data, final AsyncCallback.StringCallback cb, + final Object ctx) { + try { + waitForZKConnectionIfAuthenticating(zkw); + zkw.getRecoverableZooKeeper().getZooKeeper().create(znode, data, + createACL(zkw, znode), CreateMode.PERSISTENT, cb, ctx); + } catch (InterruptedException e) { + zkw.interruptedException(e); + } + } + + /** + * Creates the specified node, if the node does not exist. Does not set a + * watch and fails silently if the node already exists. + * + * The node created is persistent and open access. + * + * @param zkw zk reference + * @param znode path of node + * @throws KeeperException if unexpected zookeeper exception + */ + public static void createAndFailSilent(ZooKeeperWatcher zkw, + String znode) + throws KeeperException { + try { + RecoverableZooKeeper zk = zkw.getRecoverableZooKeeper(); + waitForZKConnectionIfAuthenticating(zkw); + if (zk.exists(znode, false) == null) { + zk.create(znode, new byte[0], createACL(zkw,znode), + CreateMode.PERSISTENT); + } + } catch(KeeperException.NodeExistsException nee) { + } catch(KeeperException.NoAuthException nee){ + try { + if (null == zkw.getRecoverableZooKeeper().exists(znode, false)) { + // If we failed to create the file and it does not already exist. + throw(nee); + } + } catch (InterruptedException ie) { + zkw.interruptedException(ie); + } + + } catch(InterruptedException ie) { + zkw.interruptedException(ie); + } + } + + /** + * Creates the specified node and all parent nodes required for it to exist. + * + * No watches are set and no errors are thrown if the node already exists. + * + * The nodes created are persistent and open access. + * + * @param zkw zk reference + * @param znode path of node + * @throws KeeperException if unexpected zookeeper exception + */ + public static void createWithParents(ZooKeeperWatcher zkw, String znode) + throws KeeperException { + try { + if(znode == null) { + return; + } + waitForZKConnectionIfAuthenticating(zkw); + zkw.getRecoverableZooKeeper().create(znode, new byte[0], createACL(zkw, znode), + CreateMode.PERSISTENT); + } catch(KeeperException.NodeExistsException nee) { + return; + } catch(KeeperException.NoNodeException nne) { + createWithParents(zkw, getParent(znode)); + createWithParents(zkw, znode); + } catch(InterruptedException ie) { + zkw.interruptedException(ie); + } + } + + // + // Deletes + // + + /** + * Delete the specified node. Sets no watches. Throws all exceptions. + */ + public static void deleteNode(ZooKeeperWatcher zkw, String node) + throws KeeperException { + deleteNode(zkw, node, -1); + } + + /** + * Delete the specified node with the specified version. Sets no watches. + * Throws all exceptions. + */ + public static boolean deleteNode(ZooKeeperWatcher zkw, String node, + int version) + throws KeeperException { + try { + zkw.getRecoverableZooKeeper().delete(node, version); + return true; + } catch(KeeperException.BadVersionException bve) { + return false; + } catch(InterruptedException ie) { + zkw.interruptedException(ie); + return false; + } + } + + /** + * Deletes the specified node. Fails silent if the node does not exist. + * @param zkw + * @param node + * @throws KeeperException + */ + public static void deleteNodeFailSilent(ZooKeeperWatcher zkw, String node) + throws KeeperException { + try { + zkw.getRecoverableZooKeeper().delete(node, -1); + } catch(KeeperException.NoNodeException nne) { + } catch(InterruptedException ie) { + zkw.interruptedException(ie); + } + } + + /** + * Delete the specified node and all of it's children. + *

      + * If the node does not exist, just returns. + *

      + * Sets no watches. Throws all exceptions besides dealing with deletion of + * children. + */ + public static void deleteNodeRecursively(ZooKeeperWatcher zkw, String node) + throws KeeperException { + try { + List children = ZKUtil.listChildrenNoWatch(zkw, node); + // the node is already deleted, so we just finish + if (children == null) return; + + if(!children.isEmpty()) { + for(String child : children) { + deleteNodeRecursively(zkw, joinZNode(node, child)); + } + } + zkw.getRecoverableZooKeeper().delete(node, -1); + } catch(InterruptedException ie) { + zkw.interruptedException(ie); + } + } + + /** + * Delete all the children of the specified node but not the node itself. + * + * Sets no watches. Throws all exceptions besides dealing with deletion of + * children. + */ + public static void deleteChildrenRecursively(ZooKeeperWatcher zkw, String node) + throws KeeperException { + List children = ZKUtil.listChildrenNoWatch(zkw, node); + if (children == null || children.isEmpty()) return; + for(String child : children) { + deleteNodeRecursively(zkw, joinZNode(node, child)); + } + } + + // + // ZooKeeper cluster information + // + + /** @return String dump of everything in ZooKeeper. */ + public static String dump(ZooKeeperWatcher zkw) { + StringBuilder sb = new StringBuilder(); + try { + sb.append("HBase is rooted at ").append(zkw.baseZNode); + sb.append("\nActive master address: "); + try { + sb.append(MasterAddressTracker.getMasterAddress(zkw)); + } catch (IOException e) { + sb.append("<>"); + } + sb.append("\nBackup master addresses:"); + for (String child : listChildrenNoWatch(zkw, + zkw.backupMasterAddressesZNode)) { + sb.append("\n ").append(child); + } + sb.append("\nRegion server holding ROOT: " + RootRegionTracker.getRootRegionLocation(zkw)); + sb.append("\nRegion servers:"); + for (String child : listChildrenNoWatch(zkw, zkw.rsZNode)) { + sb.append("\n ").append(child); + } + sb.append("\nQuorum Server Statistics:"); + String[] servers = zkw.getQuorum().split(","); + for (String server : servers) { + sb.append("\n ").append(server); + try { + String[] stat = getServerStats(server, ZKUtil.zkDumpConnectionTimeOut); + + if (stat == null) { + sb.append("[Error] invalid quorum server: " + server); + break; + } + + for (String s : stat) { + sb.append("\n ").append(s); + } + } catch (Exception e) { + sb.append("\n ERROR: ").append(e.getMessage()); + } + } + } catch (KeeperException ke) { + sb.append("\nFATAL ZooKeeper Exception!\n"); + sb.append("\n" + ke.getMessage()); + } + return sb.toString(); + } + + /** + * Gets the statistics from the given server. + * + * @param server The server to get the statistics from. + * @param timeout The socket timeout to use. + * @return The array of response strings. + * @throws IOException When the socket communication fails. + */ + public static String[] getServerStats(String server, int timeout) + throws IOException { + String[] sp = server.split(":"); + if (sp == null || sp.length == 0) { + return null; + } + + String host = sp[0]; + int port = sp.length > 1 ? Integer.parseInt(sp[1]) + : HConstants.DEFAULT_ZOOKEPER_CLIENT_PORT; + + Socket socket = new Socket(); + InetSocketAddress sockAddr = new InetSocketAddress(host, port); + socket.connect(sockAddr, timeout); + + socket.setSoTimeout(timeout); + PrintWriter out = new PrintWriter(socket.getOutputStream(), true); + BufferedReader in = new BufferedReader(new InputStreamReader( + socket.getInputStream())); + out.println("stat"); + out.flush(); + ArrayList res = new ArrayList(); + while (true) { + String line = in.readLine(); + if (line != null) { + res.add(line); + } else { + break; + } + } + socket.close(); + return res.toArray(new String[res.size()]); + } + + private static void logRetrievedMsg(final ZooKeeperWatcher zkw, + final String znode, final byte [] data, final boolean watcherSet) { + if (!LOG.isDebugEnabled()) return; + LOG.debug(zkw.prefix("Retrieved " + ((data == null)? 0: data.length) + + " byte(s) of data from znode " + znode + + (watcherSet? " and set watcher; ": ";"))); + } + + private static String getServerNameOrEmptyString(final byte [] data) { + try { + return ServerName.parseFrom(data).toString(); + } catch (DeserializationException e) { + return ""; + } + } + + /** + * Waits for HBase installation's base (parent) znode to become available. + * @throws IOException on ZK errors + */ + public static void waitForBaseZNode(Configuration conf) throws IOException { + LOG.info("Waiting until the base znode is available"); + String parentZNode = conf.get(HConstants.ZOOKEEPER_ZNODE_PARENT, + HConstants.DEFAULT_ZOOKEEPER_ZNODE_PARENT); + ZooKeeper zk = new ZooKeeper(ZKConfig.getZKQuorumServersString(conf), + conf.getInt(HConstants.ZK_SESSION_TIMEOUT, + HConstants.DEFAULT_ZK_SESSION_TIMEOUT), EmptyWatcher.instance); + + final int maxTimeMs = 10000; + final int maxNumAttempts = maxTimeMs / HConstants.SOCKET_RETRY_WAIT_MS; + + KeeperException keeperEx = null; + try { + try { + for (int attempt = 0; attempt < maxNumAttempts; ++attempt) { + try { + if (zk.exists(parentZNode, false) != null) { + LOG.info("Parent znode exists: " + parentZNode); + keeperEx = null; + break; + } + } catch (KeeperException e) { + keeperEx = e; + } + Threads.sleepWithoutInterrupt(HConstants.SOCKET_RETRY_WAIT_MS); + } + } finally { + zk.close(); + } + } catch (InterruptedException ex) { + Thread.currentThread().interrupt(); + } + + if (keeperEx != null) { + throw new IOException(keeperEx); + } + } + + + public static byte[] blockUntilAvailable( + final ZooKeeperWatcher zkw, final String znode, final long timeout) + throws InterruptedException { + if (timeout < 0) throw new IllegalArgumentException(); + if (zkw == null) throw new IllegalArgumentException(); + if (znode == null) throw new IllegalArgumentException(); + + byte[] data = null; + boolean finished = false; + final long endTime = System.currentTimeMillis() + timeout; + while (!finished) { + try { + data = ZKUtil.getData(zkw, znode); + } catch(KeeperException e) { + LOG.warn("Unexpected exception handling blockUntilAvailable", e); + } + + if (data == null && (System.currentTimeMillis() + + HConstants.SOCKET_RETRY_WAIT_MS < endTime)) { + Thread.sleep(HConstants.SOCKET_RETRY_WAIT_MS); + } else { + finished = true; + } + } + + return data; + } + + + /** + * Convert a {@link DeserializationException} to a more palatable {@link KeeperException}. + * Used when can't let a {@link DeserializationException} out w/o changing public API. + * @param e Exception to convert + * @return Converted exception + */ + public static KeeperException convert(final DeserializationException e) { + KeeperException ke = new KeeperException.DataInconsistencyException(); + ke.initCause(e); + return ke; + } +} diff --git hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZooKeeperListener.java hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZooKeeperListener.java new file mode 100644 index 0000000..e743e88 --- /dev/null +++ hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZooKeeperListener.java @@ -0,0 +1,82 @@ +/** + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.zookeeper; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; + + +/** + * Base class for internal listeners of ZooKeeper events. + * + * The {@link ZooKeeperWatcher} for a process will execute the appropriate + * methods of implementations of this class. In order to receive events from + * the watcher, every listener must register itself via {@link ZooKeeperWatcher#registerListener}. + * + * Subclasses need only override those methods in which they are interested. + * + * Note that the watcher will be blocked when invoking methods in listeners so + * they must not be long-running. + */ +@InterfaceAudience.Public +@InterfaceStability.Evolving +public abstract class ZooKeeperListener { + + // Reference to the zk watcher which also contains configuration and constants + protected ZooKeeperWatcher watcher; + + /** + * Construct a ZooKeeper event listener. + */ + public ZooKeeperListener(ZooKeeperWatcher watcher) { + this.watcher = watcher; + } + + /** + * Called when a new node has been created. + * @param path full path of the new node + */ + public void nodeCreated(String path) { + // no-op + } + + /** + * Called when a node has been deleted + * @param path full path of the deleted node + */ + public void nodeDeleted(String path) { + // no-op + } + + /** + * Called when an existing node has changed data. + * @param path full path of the updated node + */ + public void nodeDataChanged(String path) { + // no-op + } + + /** + * Called when an existing node has a child node added or removed. + * @param path full path of the node whose children have changed + */ + public void nodeChildrenChanged(String path) { + // no-op + } +} diff --git hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZooKeeperNodeTracker.java hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZooKeeperNodeTracker.java new file mode 100644 index 0000000..723fd77 --- /dev/null +++ hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZooKeeperNodeTracker.java @@ -0,0 +1,255 @@ +/** + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.zookeeper; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; +import org.apache.hadoop.hbase.Abortable; +import org.apache.zookeeper.KeeperException; + +/** + * Tracks the availability and value of a single ZooKeeper node. + * + *

      Utilizes the {@link ZooKeeperListener} interface to get the necessary + * ZooKeeper events related to the node. + * + *

      This is the base class used by trackers in both the Master and + * RegionServers. + */ +@InterfaceAudience.Public +@InterfaceStability.Evolving +public abstract class ZooKeeperNodeTracker extends ZooKeeperListener { + + static final Log LOG = LogFactory.getLog(ZooKeeperNodeTracker.class); + /** Path of node being tracked */ + protected final String node; + + /** Data of the node being tracked */ + private byte [] data; + + /** Used to abort if a fatal error occurs */ + protected final Abortable abortable; + + private boolean stopped = false; + + /** + * Constructs a new ZK node tracker. + * + *

      After construction, use {@link #start} to kick off tracking. + * + * @param watcher + * @param node + * @param abortable + */ + public ZooKeeperNodeTracker(ZooKeeperWatcher watcher, String node, + Abortable abortable) { + super(watcher); + this.node = node; + this.abortable = abortable; + this.data = null; + } + + /** + * Starts the tracking of the node in ZooKeeper. + * + *

      Use {@link #blockUntilAvailable()} to block until the node is available + * or {@link #getData(boolean)} to get the data of the node if it is available. + */ + public synchronized void start() { + try { + ZKUtil.waitForZKConnectionIfAuthenticating(watcher); + } catch (InterruptedException e) { + throw new IllegalStateException("ZookeeperNodeTracker on " + this.node + + " interuppted while waiting for SASL Authentication", e); + } + this.watcher.registerListener(this); + try { + if(ZKUtil.watchAndCheckExists(watcher, node)) { + byte [] data = ZKUtil.getDataAndWatch(watcher, node); + if(data != null) { + this.data = data; + } else { + // It existed but now does not, try again to ensure a watch is set + LOG.debug("Try starting again because there is no data from " + node); + start(); + } + } + } catch (KeeperException e) { + abortable.abort("Unexpected exception during initialization, aborting", e); + } + } + + public synchronized void stop() { + this.stopped = true; + notifyAll(); + } + + /** + * Gets the data of the node, blocking until the node is available. + * + * @return data of the node + * @throws InterruptedException if the waiting thread is interrupted + */ + public synchronized byte [] blockUntilAvailable() + throws InterruptedException { + return blockUntilAvailable(0, false); + } + + /** + * Gets the data of the node, blocking until the node is available or the + * specified timeout has elapsed. + * + * @param timeout maximum time to wait for the node data to be available, + * n milliseconds. Pass 0 for no timeout. + * @return data of the node + * @throws InterruptedException if the waiting thread is interrupted + */ + public synchronized byte [] blockUntilAvailable(long timeout, boolean refresh) + throws InterruptedException { + if (timeout < 0) throw new IllegalArgumentException(); + boolean notimeout = timeout == 0; + long startTime = System.currentTimeMillis(); + long remaining = timeout; + if (refresh) { + try { + // This does not create a watch if the node does not exists + this.data = ZKUtil.getDataAndWatch(watcher, node); + } catch(KeeperException e) { + // We use to abort here, but in some cases the abort is ignored ( + // (empty Abortable), so it's better to log... + LOG.warn("Unexpected exception handling blockUntilAvailable", e); + abortable.abort("Unexpected exception handling blockUntilAvailable", e); + } + } + boolean nodeExistsChecked = (!refresh ||data!=null); + while (!this.stopped && (notimeout || remaining > 0) && this.data == null) { + if (!nodeExistsChecked) { + try { + nodeExistsChecked = (ZKUtil.checkExists(watcher, node) != -1); + } catch (KeeperException e) { + LOG.warn( + "Got exception while trying to check existence in ZooKeeper" + + " of the node: "+node+", retrying if timeout not reached",e ); + } + + // It did not exists, and now it does. + if (nodeExistsChecked){ + LOG.info("Node "+node+" now exists, resetting a watcher"); + try { + // This does not create a watch if the node does not exists + this.data = ZKUtil.getDataAndWatch(watcher, node); + } catch (KeeperException e) { + LOG.warn("Unexpected exception handling blockUntilAvailable", e); + abortable.abort("Unexpected exception handling blockUntilAvailable", e); + } + } + } + // We expect a notification; but we wait with a + // a timeout to lower the impact of a race condition if any + wait(100); + remaining = timeout - (System.currentTimeMillis() - startTime); + } + return this.data; + } + + /** + * Gets the data of the node. + * + *

      If the node is currently available, the most up-to-date known version of + * the data is returned. If the node is not currently available, null is + * returned. + * @param refresh whether to refresh the data by calling ZK directly. + * @return data of the node, null if unavailable + */ + public synchronized byte [] getData(boolean refresh) { + if (refresh) { + try { + this.data = ZKUtil.getDataAndWatch(watcher, node); + } catch(KeeperException e) { + abortable.abort("Unexpected exception handling getData", e); + } + } + return this.data; + } + + public String getNode() { + return this.node; + } + + @Override + public synchronized void nodeCreated(String path) { + if (!path.equals(node)) return; + try { + byte [] data = ZKUtil.getDataAndWatch(watcher, node); + if (data != null) { + this.data = data; + notifyAll(); + } else { + nodeDeleted(path); + } + } catch(KeeperException e) { + abortable.abort("Unexpected exception handling nodeCreated event", e); + } + } + + @Override + public synchronized void nodeDeleted(String path) { + if(path.equals(node)) { + try { + if(ZKUtil.watchAndCheckExists(watcher, node)) { + nodeCreated(path); + } else { + this.data = null; + } + } catch(KeeperException e) { + abortable.abort("Unexpected exception handling nodeDeleted event", e); + } + } + } + + @Override + public synchronized void nodeDataChanged(String path) { + if(path.equals(node)) { + nodeCreated(path); + } + } + + /** + * Checks if the baseznode set as per the property 'zookeeper.znode.parent' + * exists. + * @return true if baseznode exists. + * false if doesnot exists. + */ + public boolean checkIfBaseNodeAvailable() { + try { + if (ZKUtil.checkExists(watcher, watcher.baseZNode) == -1) { + return false; + } + } catch (KeeperException e) { + abortable + .abort( + "Exception while checking if basenode ("+watcher.baseZNode+ + ") exists in ZooKeeper.", + e); + } + return true; + } +} diff --git hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZooKeeperWatcher.java hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZooKeeperWatcher.java new file mode 100644 index 0000000..128a0d9 --- /dev/null +++ hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZooKeeperWatcher.java @@ -0,0 +1,474 @@ +/** + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.zookeeper; + +import java.io.Closeable; +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; +import java.util.concurrent.CopyOnWriteArrayList; +import java.util.concurrent.CountDownLatch; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.Abortable; +import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.ZooKeeperConnectionException; +import org.apache.hadoop.hbase.util.Threads; +import org.apache.zookeeper.KeeperException; +import org.apache.zookeeper.WatchedEvent; +import org.apache.zookeeper.Watcher; +import org.apache.zookeeper.ZooDefs; +import org.apache.zookeeper.data.ACL; + +/** + * Acts as the single ZooKeeper Watcher. One instance of this is instantiated + * for each Master, RegionServer, and client process. + * + *

      This is the only class that implements {@link Watcher}. Other internal + * classes which need to be notified of ZooKeeper events must register with + * the local instance of this watcher via {@link #registerListener}. + * + *

      This class also holds and manages the connection to ZooKeeper. Code to + * deal with connection related events and exceptions are handled here. + */ +@InterfaceAudience.Public +@InterfaceStability.Evolving +public class ZooKeeperWatcher implements Watcher, Abortable, Closeable { + private static final Log LOG = LogFactory.getLog(ZooKeeperWatcher.class); + + // Identifier for this watcher (for logging only). It is made of the prefix + // passed on construction and the zookeeper sessionid. + private String identifier; + + // zookeeper quorum + private String quorum; + + // zookeeper connection + private RecoverableZooKeeper recoverableZooKeeper; + + // abortable in case of zk failure + protected Abortable abortable; + + // listeners to be notified + private final List listeners = + new CopyOnWriteArrayList(); + + // Used by ZKUtil:waitForZKConnectionIfAuthenticating to wait for SASL + // negotiation to complete + public CountDownLatch saslLatch = new CountDownLatch(1); + + // node names + + // base znode for this cluster + public String baseZNode; + // znode containing location of server hosting root region + public String rootServerZNode; + // znode containing ephemeral nodes of the regionservers + public String rsZNode; + // znode containing ephemeral nodes of the draining regionservers + public String drainingZNode; + // znode of currently active master + private String masterAddressZNode; + // znode of this master in backup master directory, if not the active master + public String backupMasterAddressesZNode; + // znode containing the current cluster state + public String clusterStateZNode; + // znode used for region transitioning and assignment + public String assignmentZNode; + // znode used for table disabling/enabling + public String tableZNode; + // znode containing the unique cluster ID + public String clusterIdZNode; + // znode used for log splitting work assignment + public String splitLogZNode; + // znode containing the state of the load balancer + public String balancerZNode; + + // Certain ZooKeeper nodes need to be world-readable + public static final ArrayList CREATOR_ALL_AND_WORLD_READABLE = + new ArrayList() { { + add(new ACL(ZooDefs.Perms.READ,ZooDefs.Ids.ANYONE_ID_UNSAFE)); + add(new ACL(ZooDefs.Perms.ALL,ZooDefs.Ids.AUTH_IDS)); + }}; + + private final Configuration conf; + + private final Exception constructorCaller; + + /** + * Instantiate a ZooKeeper connection and watcher. + * @param descriptor Descriptive string that is added to zookeeper sessionid + * and used as identifier for this instance. + * @throws IOException + * @throws ZooKeeperConnectionException + */ + public ZooKeeperWatcher(Configuration conf, String descriptor, + Abortable abortable) throws ZooKeeperConnectionException, IOException { + this(conf, descriptor, abortable, false); + } + /** + * Instantiate a ZooKeeper connection and watcher. + * @param descriptor Descriptive string that is added to zookeeper sessionid + * and used as identifier for this instance. + * @throws IOException + * @throws ZooKeeperConnectionException + */ + public ZooKeeperWatcher(Configuration conf, String descriptor, + Abortable abortable, boolean canCreateBaseZNode) + throws IOException, ZooKeeperConnectionException { + this.conf = conf; + // Capture a stack trace now. Will print it out later if problem so we can + // distingush amongst the myriad ZKWs. + try { + throw new Exception("ZKW CONSTRUCTOR STACK TRACE FOR DEBUGGING"); + } catch (Exception e) { + this.constructorCaller = e; + } + this.quorum = ZKConfig.getZKQuorumServersString(conf); + // Identifier will get the sessionid appended later below down when we + // handle the syncconnect event. + this.identifier = descriptor; + this.abortable = abortable; + setNodeNames(conf); + this.recoverableZooKeeper = ZKUtil.connect(conf, quorum, this, descriptor); + if (canCreateBaseZNode) { + createBaseZNodes(); + } + } + + private void createBaseZNodes() throws ZooKeeperConnectionException { + try { + // Create all the necessary "directories" of znodes + ZKUtil.createAndFailSilent(this, baseZNode); + ZKUtil.createAndFailSilent(this, assignmentZNode); + ZKUtil.createAndFailSilent(this, rsZNode); + ZKUtil.createAndFailSilent(this, drainingZNode); + ZKUtil.createAndFailSilent(this, tableZNode); + ZKUtil.createAndFailSilent(this, splitLogZNode); + ZKUtil.createAndFailSilent(this, backupMasterAddressesZNode); + } catch (KeeperException e) { + throw new ZooKeeperConnectionException( + prefix("Unexpected KeeperException creating base node"), e); + } + } + + @Override + public String toString() { + return this.identifier; + } + + /** + * Adds this instance's identifier as a prefix to the passed str + * @param str String to amend. + * @return A new string with this instance's identifier as prefix: e.g. + * if passed 'hello world', the returned string could be + */ + public String prefix(final String str) { + return this.toString() + " " + str; + } + + /** + * Set the local variable node names using the specified configuration. + */ + private void setNodeNames(Configuration conf) { + baseZNode = conf.get(HConstants.ZOOKEEPER_ZNODE_PARENT, + HConstants.DEFAULT_ZOOKEEPER_ZNODE_PARENT); + rootServerZNode = ZKUtil.joinZNode(baseZNode, + conf.get("zookeeper.znode.rootserver", "root-region-server")); + rsZNode = ZKUtil.joinZNode(baseZNode, + conf.get("zookeeper.znode.rs", "rs")); + drainingZNode = ZKUtil.joinZNode(baseZNode, + conf.get("zookeeper.znode.draining.rs", "draining")); + masterAddressZNode = ZKUtil.joinZNode(baseZNode, + conf.get("zookeeper.znode.master", "master")); + backupMasterAddressesZNode = ZKUtil.joinZNode(baseZNode, + conf.get("zookeeper.znode.backup.masters", "backup-masters")); + clusterStateZNode = ZKUtil.joinZNode(baseZNode, + conf.get("zookeeper.znode.state", "shutdown")); + assignmentZNode = ZKUtil.joinZNode(baseZNode, + conf.get("zookeeper.znode.unassigned", "unassigned")); + tableZNode = ZKUtil.joinZNode(baseZNode, + conf.get("zookeeper.znode.tableEnableDisable", "table")); + clusterIdZNode = ZKUtil.joinZNode(baseZNode, + conf.get("zookeeper.znode.clusterId", "hbaseid")); + splitLogZNode = ZKUtil.joinZNode(baseZNode, + conf.get("zookeeper.znode.splitlog", HConstants.SPLIT_LOGDIR_NAME)); + balancerZNode = ZKUtil.joinZNode(baseZNode, + conf.get("zookeeper.znode.balancer", "balancer")); + } + + /** + * Register the specified listener to receive ZooKeeper events. + * @param listener + */ + public void registerListener(ZooKeeperListener listener) { + listeners.add(listener); + } + + /** + * Register the specified listener to receive ZooKeeper events and add it as + * the first in the list of current listeners. + * @param listener + */ + public void registerListenerFirst(ZooKeeperListener listener) { + listeners.add(0, listener); + } + + /** + * Get the connection to ZooKeeper. + * @return connection reference to zookeeper + */ + public RecoverableZooKeeper getRecoverableZooKeeper() { + return recoverableZooKeeper; + } + + public void reconnectAfterExpiration() throws IOException, InterruptedException { + recoverableZooKeeper.reconnectAfterExpiration(); + } + + /** + * Get the quorum address of this instance. + * @return quorum string of this zookeeper connection instance + */ + public String getQuorum() { + return quorum; + } + + /** + * Method called from ZooKeeper for events and connection status. + *

      + * Valid events are passed along to listeners. Connection status changes + * are dealt with locally. + */ + @Override + public void process(WatchedEvent event) { + LOG.debug(prefix("Received ZooKeeper Event, " + + "type=" + event.getType() + ", " + + "state=" + event.getState() + ", " + + "path=" + event.getPath())); + + switch(event.getType()) { + + // If event type is NONE, this is a connection status change + case None: { + connectionEvent(event); + break; + } + + // Otherwise pass along to the listeners + + case NodeCreated: { + for(ZooKeeperListener listener : listeners) { + listener.nodeCreated(event.getPath()); + } + break; + } + + case NodeDeleted: { + for(ZooKeeperListener listener : listeners) { + listener.nodeDeleted(event.getPath()); + } + break; + } + + case NodeDataChanged: { + for(ZooKeeperListener listener : listeners) { + listener.nodeDataChanged(event.getPath()); + } + break; + } + + case NodeChildrenChanged: { + for(ZooKeeperListener listener : listeners) { + listener.nodeChildrenChanged(event.getPath()); + } + break; + } + } + } + + // Connection management + + /** + * Called when there is a connection-related event via the Watcher callback. + *

      + * If Disconnected or Expired, this should shutdown the cluster. But, since + * we send a KeeperException.SessionExpiredException along with the abort + * call, it's possible for the Abortable to catch it and try to create a new + * session with ZooKeeper. This is what the client does in HCM. + *

      + * @param event + */ + private void connectionEvent(WatchedEvent event) { + switch(event.getState()) { + case SyncConnected: + // Now, this callback can be invoked before the this.zookeeper is set. + // Wait a little while. + long finished = System.currentTimeMillis() + + this.conf.getLong("hbase.zookeeper.watcher.sync.connected.wait", 2000); + while (System.currentTimeMillis() < finished) { + Threads.sleep(1); + if (this.recoverableZooKeeper != null) break; + } + if (this.recoverableZooKeeper == null) { + LOG.error("ZK is null on connection event -- see stack trace " + + "for the stack trace when constructor was called on this zkw", + this.constructorCaller); + throw new NullPointerException("ZK is null"); + } + this.identifier = this.identifier + "-0x" + + Long.toHexString(this.recoverableZooKeeper.getSessionId()); + // Update our identifier. Otherwise ignore. + LOG.debug(this.identifier + " connected"); + break; + + case SaslAuthenticated: + if (ZKUtil.isSecureZooKeeper(this.conf)) { + // We are authenticated, clients can proceed. + saslLatch.countDown(); + } + break; + + case AuthFailed: + if (ZKUtil.isSecureZooKeeper(this.conf)) { + // We could not be authenticated, but clients should proceed anyway. + // Only access to znodes that require SASL authentication will be + // denied. The client may never need to access them. + saslLatch.countDown(); + } + break; + + // Abort the server if Disconnected or Expired + case Disconnected: + LOG.debug(prefix("Received Disconnected from ZooKeeper, ignoring")); + break; + + case Expired: + if (ZKUtil.isSecureZooKeeper(this.conf)) { + // We consider Expired equivalent to AuthFailed for this + // connection. Authentication is never going to complete. The + // client should proceed to do cleanup. + saslLatch.countDown(); + } + String msg = prefix(this.identifier + " received expired from " + + "ZooKeeper, aborting"); + // TODO: One thought is to add call to ZooKeeperListener so say, + // ZooKeeperNodeTracker can zero out its data values. + if (this.abortable != null) this.abortable.abort(msg, + new KeeperException.SessionExpiredException()); + break; + + case ConnectedReadOnly: + break; + + default: + throw new IllegalStateException("Received event is not valid."); + } + } + + /** + * Forces a synchronization of this ZooKeeper client connection. + *

      + * Executing this method before running other methods will ensure that the + * subsequent operations are up-to-date and consistent as of the time that + * the sync is complete. + *

      + * This is used for compareAndSwap type operations where we need to read the + * data of an existing node and delete or transition that node, utilizing the + * previously read version and data. We want to ensure that the version read + * is up-to-date from when we begin the operation. + */ + public void sync(String path) { + this.recoverableZooKeeper.sync(path, null, null); + } + + /** + * Handles KeeperExceptions in client calls. + *

      + * This may be temporary but for now this gives one place to deal with these. + *

      + * TODO: Currently this method rethrows the exception to let the caller handle + *

      + * @param ke + * @throws KeeperException + */ + public void keeperException(KeeperException ke) + throws KeeperException { + LOG.error(prefix("Received unexpected KeeperException, re-throwing exception"), ke); + throw ke; + } + + /** + * Handles InterruptedExceptions in client calls. + *

      + * This may be temporary but for now this gives one place to deal with these. + *

      + * TODO: Currently, this method does nothing. + * Is this ever expected to happen? Do we abort or can we let it run? + * Maybe this should be logged as WARN? It shouldn't happen? + *

      + * @param ie + */ + public void interruptedException(InterruptedException ie) { + LOG.debug(prefix("Received InterruptedException, doing nothing here"), ie); + // At least preserver interrupt. + Thread.currentThread().interrupt(); + // no-op + } + + /** + * Close the connection to ZooKeeper. + * + * @throws InterruptedException + */ + public void close() { + try { + if (recoverableZooKeeper != null) { + recoverableZooKeeper.close(); + } + } catch (InterruptedException e) { + Thread.currentThread().interrupt(); + } + } + + public Configuration getConfiguration() { + return conf; + } + + @Override + public void abort(String why, Throwable e) { + this.abortable.abort(why, e); + } + + @Override + public boolean isAborted() { + return this.abortable.isAborted(); + } + + /** + * @return Path to the currently active master. + */ + public String getMasterAddressZNode() { + return this.masterAddressZNode; + } +} diff --git hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAttributes.java hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAttributes.java new file mode 100644 index 0000000..2ffd60a --- /dev/null +++ hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAttributes.java @@ -0,0 +1,165 @@ +/** + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.client; + +import java.util.Arrays; + +import org.apache.hadoop.hbase.util.Bytes; +import org.junit.Assert; +import org.junit.Test; + +public class TestAttributes { + @Test + public void testPutAttributes() { + Put put = new Put(new byte [] {}); + Assert.assertTrue(put.getAttributesMap().isEmpty()); + Assert.assertNull(put.getAttribute("absent")); + + put.setAttribute("absent", null); + Assert.assertTrue(put.getAttributesMap().isEmpty()); + Assert.assertNull(put.getAttribute("absent")); + + // adding attribute + put.setAttribute("attribute1", Bytes.toBytes("value1")); + Assert.assertTrue(Arrays.equals(Bytes.toBytes("value1"), put.getAttribute("attribute1"))); + Assert.assertEquals(1, put.getAttributesMap().size()); + Assert.assertTrue(Arrays.equals(Bytes.toBytes("value1"), put.getAttributesMap().get("attribute1"))); + + // overriding attribute value + put.setAttribute("attribute1", Bytes.toBytes("value12")); + Assert.assertTrue(Arrays.equals(Bytes.toBytes("value12"), put.getAttribute("attribute1"))); + Assert.assertEquals(1, put.getAttributesMap().size()); + Assert.assertTrue(Arrays.equals(Bytes.toBytes("value12"), put.getAttributesMap().get("attribute1"))); + + // adding another attribute + put.setAttribute("attribute2", Bytes.toBytes("value2")); + Assert.assertTrue(Arrays.equals(Bytes.toBytes("value2"), put.getAttribute("attribute2"))); + Assert.assertEquals(2, put.getAttributesMap().size()); + Assert.assertTrue(Arrays.equals(Bytes.toBytes("value2"), put.getAttributesMap().get("attribute2"))); + + // removing attribute + put.setAttribute("attribute2", null); + Assert.assertNull(put.getAttribute("attribute2")); + Assert.assertEquals(1, put.getAttributesMap().size()); + Assert.assertNull(put.getAttributesMap().get("attribute2")); + + // removing non-existed attribute + put.setAttribute("attribute2", null); + Assert.assertNull(put.getAttribute("attribute2")); + Assert.assertEquals(1, put.getAttributesMap().size()); + Assert.assertNull(put.getAttributesMap().get("attribute2")); + + // removing another attribute + put.setAttribute("attribute1", null); + Assert.assertNull(put.getAttribute("attribute1")); + Assert.assertTrue(put.getAttributesMap().isEmpty()); + Assert.assertNull(put.getAttributesMap().get("attribute1")); + } + + + @Test + public void testDeleteAttributes() { + Delete del = new Delete(new byte [] {}); + Assert.assertTrue(del.getAttributesMap().isEmpty()); + Assert.assertNull(del.getAttribute("absent")); + + del.setAttribute("absent", null); + Assert.assertTrue(del.getAttributesMap().isEmpty()); + Assert.assertNull(del.getAttribute("absent")); + + // adding attribute + del.setAttribute("attribute1", Bytes.toBytes("value1")); + Assert.assertTrue(Arrays.equals(Bytes.toBytes("value1"), del.getAttribute("attribute1"))); + Assert.assertEquals(1, del.getAttributesMap().size()); + Assert.assertTrue(Arrays.equals(Bytes.toBytes("value1"), del.getAttributesMap().get("attribute1"))); + + // overriding attribute value + del.setAttribute("attribute1", Bytes.toBytes("value12")); + Assert.assertTrue(Arrays.equals(Bytes.toBytes("value12"), del.getAttribute("attribute1"))); + Assert.assertEquals(1, del.getAttributesMap().size()); + Assert.assertTrue(Arrays.equals(Bytes.toBytes("value12"), del.getAttributesMap().get("attribute1"))); + + // adding another attribute + del.setAttribute("attribute2", Bytes.toBytes("value2")); + Assert.assertTrue(Arrays.equals(Bytes.toBytes("value2"), del.getAttribute("attribute2"))); + Assert.assertEquals(2, del.getAttributesMap().size()); + Assert.assertTrue(Arrays.equals(Bytes.toBytes("value2"), del.getAttributesMap().get("attribute2"))); + + // removing attribute + del.setAttribute("attribute2", null); + Assert.assertNull(del.getAttribute("attribute2")); + Assert.assertEquals(1, del.getAttributesMap().size()); + Assert.assertNull(del.getAttributesMap().get("attribute2")); + + // removing non-existed attribute + del.setAttribute("attribute2", null); + Assert.assertNull(del.getAttribute("attribute2")); + Assert.assertEquals(1, del.getAttributesMap().size()); + Assert.assertNull(del.getAttributesMap().get("attribute2")); + + // removing another attribute + del.setAttribute("attribute1", null); + Assert.assertNull(del.getAttribute("attribute1")); + Assert.assertTrue(del.getAttributesMap().isEmpty()); + Assert.assertNull(del.getAttributesMap().get("attribute1")); + } + + @Test + public void testGetId() { + Get get = new Get(null); + Assert.assertNull("Make sure id is null if unset", get.toMap().get("id")); + get.setId("myId"); + Assert.assertEquals("myId", get.toMap().get("id")); + } + + @Test + public void testAppendId() { + Append append = new Append(Bytes.toBytes("testRow")); + Assert.assertNull("Make sure id is null if unset", append.toMap().get("id")); + append.setId("myId"); + Assert.assertEquals("myId", append.toMap().get("id")); + } + + @Test + public void testDeleteId() { + Delete delete = new Delete(new byte [] {}); + Assert.assertNull("Make sure id is null if unset", delete.toMap().get("id")); + delete.setId("myId"); + Assert.assertEquals("myId", delete.toMap().get("id")); + } + + @Test + public void testPutId() { + Put put = new Put(new byte [] {}); + Assert.assertNull("Make sure id is null if unset", put.toMap().get("id")); + put.setId("myId"); + Assert.assertEquals("myId", put.toMap().get("id")); + } + + @Test + public void testScanId() { + Scan scan = new Scan(); + Assert.assertNull("Make sure id is null if unset", scan.toMap().get("id")); + scan.setId("myId"); + Assert.assertEquals("myId", scan.toMap().get("id")); + } + +} + diff --git hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestGet.java hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestGet.java new file mode 100644 index 0000000..aded6ff --- /dev/null +++ hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestGet.java @@ -0,0 +1,107 @@ +/** + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.client; + +import java.io.IOException; +import java.util.Arrays; +import java.util.Set; + +import org.apache.hadoop.hbase.protobuf.ProtobufUtil; +import org.apache.hadoop.hbase.protobuf.generated.ClientProtos; +import org.apache.hadoop.hbase.util.Bytes; +import org.junit.Assert; +import org.junit.Test; + +// TODO: cover more test cases +public class TestGet { + @Test + public void testAttributesSerialization() throws IOException { + Get get = new Get(Bytes.toBytes("row")); + get.setAttribute("attribute1", Bytes.toBytes("value1")); + get.setAttribute("attribute2", Bytes.toBytes("value2")); + get.setAttribute("attribute3", Bytes.toBytes("value3")); + + ClientProtos.Get getProto = ProtobufUtil.toGet(get); + + Get get2 = ProtobufUtil.toGet(getProto); + Assert.assertNull(get2.getAttribute("absent")); + Assert.assertTrue(Arrays.equals(Bytes.toBytes("value1"), get2.getAttribute("attribute1"))); + Assert.assertTrue(Arrays.equals(Bytes.toBytes("value2"), get2.getAttribute("attribute2"))); + Assert.assertTrue(Arrays.equals(Bytes.toBytes("value3"), get2.getAttribute("attribute3"))); + Assert.assertEquals(3, get2.getAttributesMap().size()); + } + + @Test + public void testGetAttributes() { + Get get = new Get(null); + Assert.assertTrue(get.getAttributesMap().isEmpty()); + Assert.assertNull(get.getAttribute("absent")); + + get.setAttribute("absent", null); + Assert.assertTrue(get.getAttributesMap().isEmpty()); + Assert.assertNull(get.getAttribute("absent")); + + // adding attribute + get.setAttribute("attribute1", Bytes.toBytes("value1")); + Assert.assertTrue(Arrays.equals(Bytes.toBytes("value1"), get.getAttribute("attribute1"))); + Assert.assertEquals(1, get.getAttributesMap().size()); + Assert.assertTrue(Arrays.equals(Bytes.toBytes("value1"), get.getAttributesMap().get("attribute1"))); + + // overriding attribute value + get.setAttribute("attribute1", Bytes.toBytes("value12")); + Assert.assertTrue(Arrays.equals(Bytes.toBytes("value12"), get.getAttribute("attribute1"))); + Assert.assertEquals(1, get.getAttributesMap().size()); + Assert.assertTrue(Arrays.equals(Bytes.toBytes("value12"), get.getAttributesMap().get("attribute1"))); + + // adding another attribute + get.setAttribute("attribute2", Bytes.toBytes("value2")); + Assert.assertTrue(Arrays.equals(Bytes.toBytes("value2"), get.getAttribute("attribute2"))); + Assert.assertEquals(2, get.getAttributesMap().size()); + Assert.assertTrue(Arrays.equals(Bytes.toBytes("value2"), get.getAttributesMap().get("attribute2"))); + + // removing attribute + get.setAttribute("attribute2", null); + Assert.assertNull(get.getAttribute("attribute2")); + Assert.assertEquals(1, get.getAttributesMap().size()); + Assert.assertNull(get.getAttributesMap().get("attribute2")); + + // removing non-existed attribute + get.setAttribute("attribute2", null); + Assert.assertNull(get.getAttribute("attribute2")); + Assert.assertEquals(1, get.getAttributesMap().size()); + Assert.assertNull(get.getAttributesMap().get("attribute2")); + + // removing another attribute + get.setAttribute("attribute1", null); + Assert.assertNull(get.getAttribute("attribute1")); + Assert.assertTrue(get.getAttributesMap().isEmpty()); + Assert.assertNull(get.getAttributesMap().get("attribute1")); + } + + @Test + public void testNullQualifier() { + Get get = new Get(null); + byte[] family = Bytes.toBytes("family"); + get.addColumn(family, null); + Set qualifiers = get.getFamilyMap().get(family); + Assert.assertEquals(1, qualifiers.size()); + } +} + diff --git hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestOperation.java hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestOperation.java new file mode 100644 index 0000000..3f383ef --- /dev/null +++ hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestOperation.java @@ -0,0 +1,369 @@ +/** + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.client; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotNull; + +import org.junit.Test; + +import java.io.IOException; +import java.util.Arrays; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +import org.apache.hadoop.hbase.filter.BinaryComparator; +import org.apache.hadoop.hbase.filter.ColumnCountGetFilter; +import org.apache.hadoop.hbase.filter.ColumnPaginationFilter; +import org.apache.hadoop.hbase.filter.ColumnPrefixFilter; +import org.apache.hadoop.hbase.filter.ColumnRangeFilter; +import org.apache.hadoop.hbase.filter.CompareFilter.CompareOp; +import org.apache.hadoop.hbase.filter.DependentColumnFilter; +import org.apache.hadoop.hbase.filter.FamilyFilter; +import org.apache.hadoop.hbase.filter.Filter; +import org.apache.hadoop.hbase.filter.FilterList; +import org.apache.hadoop.hbase.filter.FilterList.Operator; +import org.apache.hadoop.hbase.filter.FirstKeyOnlyFilter; +import org.apache.hadoop.hbase.filter.InclusiveStopFilter; +import org.apache.hadoop.hbase.filter.KeyOnlyFilter; +import org.apache.hadoop.hbase.filter.MultipleColumnPrefixFilter; +import org.apache.hadoop.hbase.filter.PageFilter; +import org.apache.hadoop.hbase.filter.PrefixFilter; +import org.apache.hadoop.hbase.filter.QualifierFilter; +import org.apache.hadoop.hbase.filter.RowFilter; +import org.apache.hadoop.hbase.filter.SingleColumnValueFilter; +import org.apache.hadoop.hbase.filter.SingleColumnValueExcludeFilter; +import org.apache.hadoop.hbase.filter.SkipFilter; +import org.apache.hadoop.hbase.filter.TimestampsFilter; +import org.apache.hadoop.hbase.filter.ValueFilter; +import org.apache.hadoop.hbase.filter.WhileMatchFilter; +import org.apache.hadoop.hbase.util.Bytes; + +import org.codehaus.jackson.map.ObjectMapper; + +/** + * Run tests that use the functionality of the Operation superclass for + * Puts, Gets, Deletes, Scans, and MultiPuts. + */ +public class TestOperation { + private static byte [] ROW = Bytes.toBytes("testRow"); + private static byte [] FAMILY = Bytes.toBytes("testFamily"); + private static byte [] QUALIFIER = Bytes.toBytes("testQualifier"); + private static byte [] VALUE = Bytes.toBytes("testValue"); + + private static ObjectMapper mapper = new ObjectMapper(); + + private static List TS_LIST = Arrays.asList(2L, 3L, 5L); + private static TimestampsFilter TS_FILTER = new TimestampsFilter(TS_LIST); + private static String STR_TS_FILTER = + TS_FILTER.getClass().getSimpleName() + " (3/3): [2, 3, 5]"; + + private static List L_TS_LIST = + Arrays.asList(0L, 1L, 2L, 3L, 4L, 5L, 6L, 7L, 8L, 9L, 10L); + private static TimestampsFilter L_TS_FILTER = + new TimestampsFilter(L_TS_LIST); + private static String STR_L_TS_FILTER = + L_TS_FILTER.getClass().getSimpleName() + " (5/11): [0, 1, 2, 3, 4]"; + + private static String COL_NAME_1 = "col1"; + private static ColumnPrefixFilter COL_PRE_FILTER = + new ColumnPrefixFilter(COL_NAME_1.getBytes()); + private static String STR_COL_PRE_FILTER = + COL_PRE_FILTER.getClass().getSimpleName() + " " + COL_NAME_1; + + private static String COL_NAME_2 = "col2"; + private static ColumnRangeFilter CR_FILTER = new ColumnRangeFilter( + COL_NAME_1.getBytes(), true, COL_NAME_2.getBytes(), false); + private static String STR_CR_FILTER = CR_FILTER.getClass().getSimpleName() + + " [" + COL_NAME_1 + ", " + COL_NAME_2 + ")"; + + private static int COL_COUNT = 9; + private static ColumnCountGetFilter CCG_FILTER = + new ColumnCountGetFilter(COL_COUNT); + private static String STR_CCG_FILTER = + CCG_FILTER.getClass().getSimpleName() + " " + COL_COUNT; + + private static int LIMIT = 3; + private static int OFFSET = 4; + private static ColumnPaginationFilter CP_FILTER = + new ColumnPaginationFilter(LIMIT, OFFSET); + private static String STR_CP_FILTER = CP_FILTER.getClass().getSimpleName() + + " (" + LIMIT + ", " + OFFSET + ")"; + + private static String STOP_ROW_KEY = "stop"; + private static InclusiveStopFilter IS_FILTER = + new InclusiveStopFilter(STOP_ROW_KEY.getBytes()); + private static String STR_IS_FILTER = + IS_FILTER.getClass().getSimpleName() + " " + STOP_ROW_KEY; + + private static String PREFIX = "prefix"; + private static PrefixFilter PREFIX_FILTER = + new PrefixFilter(PREFIX.getBytes()); + private static String STR_PREFIX_FILTER = "PrefixFilter " + PREFIX; + + private static byte[][] PREFIXES = { + "0".getBytes(), "1".getBytes(), "2".getBytes()}; + private static MultipleColumnPrefixFilter MCP_FILTER = + new MultipleColumnPrefixFilter(PREFIXES); + private static String STR_MCP_FILTER = + MCP_FILTER.getClass().getSimpleName() + " (3/3): [0, 1, 2]"; + + private static byte[][] L_PREFIXES = { + "0".getBytes(), "1".getBytes(), "2".getBytes(), "3".getBytes(), + "4".getBytes(), "5".getBytes(), "6".getBytes(), "7".getBytes()}; + private static MultipleColumnPrefixFilter L_MCP_FILTER = + new MultipleColumnPrefixFilter(L_PREFIXES); + private static String STR_L_MCP_FILTER = + L_MCP_FILTER.getClass().getSimpleName() + " (5/8): [0, 1, 2, 3, 4]"; + + private static int PAGE_SIZE = 9; + private static PageFilter PAGE_FILTER = new PageFilter(PAGE_SIZE); + private static String STR_PAGE_FILTER = + PAGE_FILTER.getClass().getSimpleName() + " " + PAGE_SIZE; + + private static SkipFilter SKIP_FILTER = new SkipFilter(L_TS_FILTER); + private static String STR_SKIP_FILTER = + SKIP_FILTER.getClass().getSimpleName() + " " + STR_L_TS_FILTER; + + private static WhileMatchFilter WHILE_FILTER = + new WhileMatchFilter(L_TS_FILTER); + private static String STR_WHILE_FILTER = + WHILE_FILTER.getClass().getSimpleName() + " " + STR_L_TS_FILTER; + + private static KeyOnlyFilter KEY_ONLY_FILTER = new KeyOnlyFilter(); + private static String STR_KEY_ONLY_FILTER = + KEY_ONLY_FILTER.getClass().getSimpleName(); + + private static FirstKeyOnlyFilter FIRST_KEY_ONLY_FILTER = + new FirstKeyOnlyFilter(); + private static String STR_FIRST_KEY_ONLY_FILTER = + FIRST_KEY_ONLY_FILTER.getClass().getSimpleName(); + + private static CompareOp CMP_OP = CompareOp.EQUAL; + private static byte[] CMP_VALUE = "value".getBytes(); + private static BinaryComparator BC = new BinaryComparator(CMP_VALUE); + private static DependentColumnFilter DC_FILTER = + new DependentColumnFilter(FAMILY, QUALIFIER, true, CMP_OP, BC); + private static String STR_DC_FILTER = String.format( + "%s (%s, %s, %s, %s, %s)", DC_FILTER.getClass().getSimpleName(), + Bytes.toStringBinary(FAMILY), Bytes.toStringBinary(QUALIFIER), true, + CMP_OP.name(), Bytes.toStringBinary(BC.getValue())); + + private static FamilyFilter FAMILY_FILTER = new FamilyFilter(CMP_OP, BC); + private static String STR_FAMILY_FILTER = + FAMILY_FILTER.getClass().getSimpleName() + " (EQUAL, value)"; + + private static QualifierFilter QUALIFIER_FILTER = + new QualifierFilter(CMP_OP, BC); + private static String STR_QUALIFIER_FILTER = + QUALIFIER_FILTER.getClass().getSimpleName() + " (EQUAL, value)"; + + private static RowFilter ROW_FILTER = new RowFilter(CMP_OP, BC); + private static String STR_ROW_FILTER = + ROW_FILTER.getClass().getSimpleName() + " (EQUAL, value)"; + + private static ValueFilter VALUE_FILTER = new ValueFilter(CMP_OP, BC); + private static String STR_VALUE_FILTER = + VALUE_FILTER.getClass().getSimpleName() + " (EQUAL, value)"; + + private static SingleColumnValueFilter SCV_FILTER = + new SingleColumnValueFilter(FAMILY, QUALIFIER, CMP_OP, CMP_VALUE); + private static String STR_SCV_FILTER = String.format("%s (%s, %s, %s, %s)", + SCV_FILTER.getClass().getSimpleName(), Bytes.toStringBinary(FAMILY), + Bytes.toStringBinary(QUALIFIER), CMP_OP.name(), + Bytes.toStringBinary(CMP_VALUE)); + + private static SingleColumnValueExcludeFilter SCVE_FILTER = + new SingleColumnValueExcludeFilter(FAMILY, QUALIFIER, CMP_OP, CMP_VALUE); + private static String STR_SCVE_FILTER = String.format("%s (%s, %s, %s, %s)", + SCVE_FILTER.getClass().getSimpleName(), Bytes.toStringBinary(FAMILY), + Bytes.toStringBinary(QUALIFIER), CMP_OP.name(), + Bytes.toStringBinary(CMP_VALUE)); + + private static FilterList AND_FILTER_LIST = new FilterList( + Operator.MUST_PASS_ALL, Arrays.asList((Filter) TS_FILTER, L_TS_FILTER, + CR_FILTER)); + private static String STR_AND_FILTER_LIST = String.format( + "%s AND (3/3): [%s, %s, %s]", AND_FILTER_LIST.getClass().getSimpleName(), + STR_TS_FILTER, STR_L_TS_FILTER, STR_CR_FILTER); + + private static FilterList OR_FILTER_LIST = new FilterList( + Operator.MUST_PASS_ONE, Arrays.asList((Filter) TS_FILTER, L_TS_FILTER, + CR_FILTER)); + private static String STR_OR_FILTER_LIST = String.format( + "%s OR (3/3): [%s, %s, %s]", AND_FILTER_LIST.getClass().getSimpleName(), + STR_TS_FILTER, STR_L_TS_FILTER, STR_CR_FILTER); + + private static FilterList L_FILTER_LIST = new FilterList( + Arrays.asList((Filter) TS_FILTER, L_TS_FILTER, CR_FILTER, COL_PRE_FILTER, + CCG_FILTER, CP_FILTER, PREFIX_FILTER, PAGE_FILTER)); + private static String STR_L_FILTER_LIST = String.format( + "%s AND (5/8): [%s, %s, %s, %s, %s]", + L_FILTER_LIST.getClass().getSimpleName(), STR_TS_FILTER, STR_L_TS_FILTER, + STR_CR_FILTER, STR_COL_PRE_FILTER, STR_CCG_FILTER, STR_CP_FILTER); + + private static Filter[] FILTERS = { + TS_FILTER, // TimestampsFilter + L_TS_FILTER, // TimestampsFilter + COL_PRE_FILTER, // ColumnPrefixFilter + CP_FILTER, // ColumnPaginationFilter + CR_FILTER, // ColumnRangeFilter + CCG_FILTER, // ColumnCountGetFilter + IS_FILTER, // InclusiveStopFilter + PREFIX_FILTER, // PrefixFilter + PAGE_FILTER, // PageFilter + SKIP_FILTER, // SkipFilter + WHILE_FILTER, // WhileMatchFilter + KEY_ONLY_FILTER, // KeyOnlyFilter + FIRST_KEY_ONLY_FILTER, // FirstKeyOnlyFilter + MCP_FILTER, // MultipleColumnPrefixFilter + L_MCP_FILTER, // MultipleColumnPrefixFilter + DC_FILTER, // DependentColumnFilter + FAMILY_FILTER, // FamilyFilter + QUALIFIER_FILTER, // QualifierFilter + ROW_FILTER, // RowFilter + VALUE_FILTER, // ValueFilter + SCV_FILTER, // SingleColumnValueFilter + SCVE_FILTER, // SingleColumnValueExcludeFilter + AND_FILTER_LIST, // FilterList + OR_FILTER_LIST, // FilterList + L_FILTER_LIST, // FilterList + }; + + private static String[] FILTERS_INFO = { + STR_TS_FILTER, // TimestampsFilter + STR_L_TS_FILTER, // TimestampsFilter + STR_COL_PRE_FILTER, // ColumnPrefixFilter + STR_CP_FILTER, // ColumnPaginationFilter + STR_CR_FILTER, // ColumnRangeFilter + STR_CCG_FILTER, // ColumnCountGetFilter + STR_IS_FILTER, // InclusiveStopFilter + STR_PREFIX_FILTER, // PrefixFilter + STR_PAGE_FILTER, // PageFilter + STR_SKIP_FILTER, // SkipFilter + STR_WHILE_FILTER, // WhileMatchFilter + STR_KEY_ONLY_FILTER, // KeyOnlyFilter + STR_FIRST_KEY_ONLY_FILTER, // FirstKeyOnlyFilter + STR_MCP_FILTER, // MultipleColumnPrefixFilter + STR_L_MCP_FILTER, // MultipleColumnPrefixFilter + STR_DC_FILTER, // DependentColumnFilter + STR_FAMILY_FILTER, // FamilyFilter + STR_QUALIFIER_FILTER, // QualifierFilter + STR_ROW_FILTER, // RowFilter + STR_VALUE_FILTER, // ValueFilter + STR_SCV_FILTER, // SingleColumnValueFilter + STR_SCVE_FILTER, // SingleColumnValueExcludeFilter + STR_AND_FILTER_LIST, // FilterList + STR_OR_FILTER_LIST, // FilterList + STR_L_FILTER_LIST, // FilterList + }; + + static { + assertEquals("The sizes of static arrays do not match: " + + "[FILTERS: %d <=> FILTERS_INFO: %d]", + FILTERS.length, FILTERS_INFO.length); + } + + /** + * Test the client Operations' JSON encoding to ensure that produced JSON is + * parseable and that the details are present and not corrupted. + * @throws IOException + */ + @Test + public void testOperationJSON() + throws IOException { + // produce a Scan Operation + Scan scan = new Scan(ROW); + scan.addColumn(FAMILY, QUALIFIER); + // get its JSON representation, and parse it + String json = scan.toJSON(); + Map parsedJSON = mapper.readValue(json, HashMap.class); + // check for the row + assertEquals("startRow incorrect in Scan.toJSON()", + Bytes.toStringBinary(ROW), parsedJSON.get("startRow")); + // check for the family and the qualifier. + List familyInfo = (List) ((Map) parsedJSON.get("families")).get( + Bytes.toStringBinary(FAMILY)); + assertNotNull("Family absent in Scan.toJSON()", familyInfo); + assertEquals("Qualifier absent in Scan.toJSON()", 1, familyInfo.size()); + assertEquals("Qualifier incorrect in Scan.toJSON()", + Bytes.toStringBinary(QUALIFIER), + familyInfo.get(0)); + + // produce a Get Operation + Get get = new Get(ROW); + get.addColumn(FAMILY, QUALIFIER); + // get its JSON representation, and parse it + json = get.toJSON(); + parsedJSON = mapper.readValue(json, HashMap.class); + // check for the row + assertEquals("row incorrect in Get.toJSON()", + Bytes.toStringBinary(ROW), parsedJSON.get("row")); + // check for the family and the qualifier. + familyInfo = (List) ((Map) parsedJSON.get("families")).get( + Bytes.toStringBinary(FAMILY)); + assertNotNull("Family absent in Get.toJSON()", familyInfo); + assertEquals("Qualifier absent in Get.toJSON()", 1, familyInfo.size()); + assertEquals("Qualifier incorrect in Get.toJSON()", + Bytes.toStringBinary(QUALIFIER), + familyInfo.get(0)); + + // produce a Put operation + Put put = new Put(ROW); + put.add(FAMILY, QUALIFIER, VALUE); + // get its JSON representation, and parse it + json = put.toJSON(); + parsedJSON = mapper.readValue(json, HashMap.class); + // check for the row + assertEquals("row absent in Put.toJSON()", + Bytes.toStringBinary(ROW), parsedJSON.get("row")); + // check for the family and the qualifier. + familyInfo = (List) ((Map) parsedJSON.get("families")).get( + Bytes.toStringBinary(FAMILY)); + assertNotNull("Family absent in Put.toJSON()", familyInfo); + assertEquals("KeyValue absent in Put.toJSON()", 1, familyInfo.size()); + Map kvMap = (Map) familyInfo.get(0); + assertEquals("Qualifier incorrect in Put.toJSON()", + Bytes.toStringBinary(QUALIFIER), + kvMap.get("qualifier")); + assertEquals("Value length incorrect in Put.toJSON()", + VALUE.length, kvMap.get("vlen")); + + // produce a Delete operation + Delete delete = new Delete(ROW); + delete.deleteColumn(FAMILY, QUALIFIER); + // get its JSON representation, and parse it + json = delete.toJSON(); + parsedJSON = mapper.readValue(json, HashMap.class); + // check for the row + assertEquals("row absent in Delete.toJSON()", + Bytes.toStringBinary(ROW), parsedJSON.get("row")); + // check for the family and the qualifier. + familyInfo = (List) ((Map) parsedJSON.get("families")).get( + Bytes.toStringBinary(FAMILY)); + assertNotNull("Family absent in Delete.toJSON()", familyInfo); + assertEquals("KeyValue absent in Delete.toJSON()", 1, familyInfo.size()); + kvMap = (Map) familyInfo.get(0); + assertEquals("Qualifier incorrect in Delete.toJSON()", + Bytes.toStringBinary(QUALIFIER), kvMap.get("qualifier")); + } + +} + diff --git hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestPutDotHas.java hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestPutDotHas.java new file mode 100644 index 0000000..5a4edfb --- /dev/null +++ hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestPutDotHas.java @@ -0,0 +1,73 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.client; + +import org.apache.hadoop.hbase.util.Bytes; +import org.junit.Assert; +import org.junit.Before; +import org.junit.Test; + +/** + * Addresses HBASE-6047 + * We test put.has call with all of its polymorphic magic + */ +public class TestPutDotHas { + + public static final byte[] ROW_01 = Bytes.toBytes("row-01"); + public static final byte[] QUALIFIER_01 = Bytes.toBytes("qualifier-01"); + public static final byte[] VALUE_01 = Bytes.toBytes("value-01"); + public static final byte[] FAMILY_01 = Bytes.toBytes("family-01"); + public static final long TS = 1234567L; + public Put put = new Put(ROW_01); + + @Before + public void setUp() { + put.add(FAMILY_01, QUALIFIER_01, TS, VALUE_01); + } + + @Test + public void testHasIgnoreValueIgnoreTS() { + Assert.assertTrue(put.has(FAMILY_01, QUALIFIER_01)); + Assert.assertFalse(put.has(QUALIFIER_01, FAMILY_01)); + } + + @Test + public void testHasIgnoreValue() { + Assert.assertTrue(put.has(FAMILY_01, QUALIFIER_01, TS)); + Assert.assertFalse(put.has(FAMILY_01, QUALIFIER_01, TS + 1)); + } + + @Test + public void testHasIgnoreTS() { + Assert.assertTrue(put.has(FAMILY_01, QUALIFIER_01, VALUE_01)); + Assert.assertFalse(put.has(FAMILY_01, VALUE_01, QUALIFIER_01)); + } + + @Test + public void testHas() { + Assert.assertTrue(put.has(FAMILY_01, QUALIFIER_01, TS, VALUE_01)); + // Bad TS + Assert.assertFalse(put.has(FAMILY_01, QUALIFIER_01, TS + 1, VALUE_01)); + // Bad Value + Assert.assertFalse(put.has(FAMILY_01, QUALIFIER_01, TS, QUALIFIER_01)); + // Bad Family + Assert.assertFalse(put.has(QUALIFIER_01, QUALIFIER_01, TS, VALUE_01)); + // Bad Qual + Assert.assertFalse(put.has(FAMILY_01, FAMILY_01, TS, VALUE_01)); + } +} diff --git hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestScan.java hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestScan.java new file mode 100644 index 0000000..68a0d25 --- /dev/null +++ hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestScan.java @@ -0,0 +1,108 @@ +/** + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.client; + +import java.io.IOException; +import java.util.Arrays; +import java.util.Set; + +import org.apache.hadoop.hbase.protobuf.ProtobufUtil; +import org.apache.hadoop.hbase.protobuf.generated.ClientProtos; +import org.apache.hadoop.hbase.util.Bytes; +import org.junit.Assert; +import org.junit.Test; + +// TODO: cover more test cases +public class TestScan { + @Test + public void testAttributesSerialization() throws IOException { + Scan scan = new Scan(); + scan.setAttribute("attribute1", Bytes.toBytes("value1")); + scan.setAttribute("attribute2", Bytes.toBytes("value2")); + scan.setAttribute("attribute3", Bytes.toBytes("value3")); + + ClientProtos.Scan scanProto = ProtobufUtil.toScan(scan); + + Scan scan2 = ProtobufUtil.toScan(scanProto); + + Assert.assertNull(scan2.getAttribute("absent")); + Assert.assertTrue(Arrays.equals(Bytes.toBytes("value1"), scan2.getAttribute("attribute1"))); + Assert.assertTrue(Arrays.equals(Bytes.toBytes("value2"), scan2.getAttribute("attribute2"))); + Assert.assertTrue(Arrays.equals(Bytes.toBytes("value3"), scan2.getAttribute("attribute3"))); + Assert.assertEquals(3, scan2.getAttributesMap().size()); + } + + @Test + public void testScanAttributes() { + Scan scan = new Scan(); + Assert.assertTrue(scan.getAttributesMap().isEmpty()); + Assert.assertNull(scan.getAttribute("absent")); + + scan.setAttribute("absent", null); + Assert.assertTrue(scan.getAttributesMap().isEmpty()); + Assert.assertNull(scan.getAttribute("absent")); + + // adding attribute + scan.setAttribute("attribute1", Bytes.toBytes("value1")); + Assert.assertTrue(Arrays.equals(Bytes.toBytes("value1"), scan.getAttribute("attribute1"))); + Assert.assertEquals(1, scan.getAttributesMap().size()); + Assert.assertTrue(Arrays.equals(Bytes.toBytes("value1"), scan.getAttributesMap().get("attribute1"))); + + // overriding attribute value + scan.setAttribute("attribute1", Bytes.toBytes("value12")); + Assert.assertTrue(Arrays.equals(Bytes.toBytes("value12"), scan.getAttribute("attribute1"))); + Assert.assertEquals(1, scan.getAttributesMap().size()); + Assert.assertTrue(Arrays.equals(Bytes.toBytes("value12"), scan.getAttributesMap().get("attribute1"))); + + // adding another attribute + scan.setAttribute("attribute2", Bytes.toBytes("value2")); + Assert.assertTrue(Arrays.equals(Bytes.toBytes("value2"), scan.getAttribute("attribute2"))); + Assert.assertEquals(2, scan.getAttributesMap().size()); + Assert.assertTrue(Arrays.equals(Bytes.toBytes("value2"), scan.getAttributesMap().get("attribute2"))); + + // removing attribute + scan.setAttribute("attribute2", null); + Assert.assertNull(scan.getAttribute("attribute2")); + Assert.assertEquals(1, scan.getAttributesMap().size()); + Assert.assertNull(scan.getAttributesMap().get("attribute2")); + + // removing non-existed attribute + scan.setAttribute("attribute2", null); + Assert.assertNull(scan.getAttribute("attribute2")); + Assert.assertEquals(1, scan.getAttributesMap().size()); + Assert.assertNull(scan.getAttributesMap().get("attribute2")); + + // removing another attribute + scan.setAttribute("attribute1", null); + Assert.assertNull(scan.getAttribute("attribute1")); + Assert.assertTrue(scan.getAttributesMap().isEmpty()); + Assert.assertNull(scan.getAttributesMap().get("attribute1")); + } + + @Test + public void testNullQualifier() { + Scan scan = new Scan(); + byte[] family = Bytes.toBytes("family"); + scan.addColumn(family, null); + Set qualifiers = scan.getFamilyMap().get(family); + Assert.assertEquals(1, qualifiers.size()); + } +} + diff --git hbase-common/src/main/java/org/apache/hadoop/hbase/Abortable.java hbase-common/src/main/java/org/apache/hadoop/hbase/Abortable.java new file mode 100644 index 0000000..a88cf31 --- /dev/null +++ hbase-common/src/main/java/org/apache/hadoop/hbase/Abortable.java @@ -0,0 +1,45 @@ +/** + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase; + +import org.apache.hadoop.classification.InterfaceAudience; + +/** + * Interface to support the aborting of a given server or client. + *

      + * This is used primarily for ZooKeeper usage when we could get an unexpected + * and fatal exception, requiring an abort. + *

      + * Implemented by the Master, RegionServer, and TableServers (client). + */ +@InterfaceAudience.Private +public interface Abortable { + /** + * Abort the server or client. + * @param why Why we're aborting. + * @param e Throwable that caused abort. Can be null. + */ + public void abort(String why, Throwable e); + + /** + * Check if the server or client was aborted. + * @return true if the server or client was aborted, false otherwise + */ + public boolean isAborted(); +} diff --git hbase-common/src/main/java/org/apache/hadoop/hbase/Chore.java hbase-common/src/main/java/org/apache/hadoop/hbase/Chore.java new file mode 100644 index 0000000..692e3fc --- /dev/null +++ hbase-common/src/main/java/org/apache/hadoop/hbase/Chore.java @@ -0,0 +1,122 @@ +/** + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.hbase.util.HasThread; +import org.apache.hadoop.hbase.util.Sleeper; + +/** + * Chore is a task performed on a period in hbase. The chore is run in its own + * thread. This base abstract class provides while loop and sleeping facility. + * If an unhandled exception, the threads exit is logged. + * Implementers just need to add checking if there is work to be done and if + * so, do it. Its the base of most of the chore threads in hbase. + * + *

      Don't subclass Chore if the task relies on being woken up for something to + * do, such as an entry being added to a queue, etc. + */ +@InterfaceAudience.Private +public abstract class Chore extends HasThread { + private final Log LOG = LogFactory.getLog(this.getClass()); + private final Sleeper sleeper; + protected final Stoppable stopper; + + /** + * @param p Period at which we should run. Will be adjusted appropriately + * should we find work and it takes time to complete. + * @param stopper When {@link Stoppable#isStopped()} is true, this thread will + * cleanup and exit cleanly. + */ + public Chore(String name, final int p, final Stoppable stopper) { + super(name); + this.sleeper = new Sleeper(p, stopper); + this.stopper = stopper; + } + + /** + * @see java.lang.Thread#run() + */ + @Override + public void run() { + try { + boolean initialChoreComplete = false; + while (!this.stopper.isStopped()) { + long startTime = System.currentTimeMillis(); + try { + if (!initialChoreComplete) { + initialChoreComplete = initialChore(); + } else { + chore(); + } + } catch (Exception e) { + LOG.error("Caught exception", e); + if (this.stopper.isStopped()) { + continue; + } + } + this.sleeper.sleep(startTime); + } + } catch (Throwable t) { + LOG.fatal(getName() + "error", t); + } finally { + LOG.info(getName() + " exiting"); + cleanup(); + } + } + + /** + * If the thread is currently sleeping, trigger the core to happen immediately. + * If it's in the middle of its operation, will begin another operation + * immediately after finishing this one. + */ + public void triggerNow() { + this.sleeper.skipSleepCycle(); + } + + /** + * Override to run a task before we start looping. + * @return true if initial chore was successful + */ + protected boolean initialChore() { + // Default does nothing. + return true; + } + + /** + * Look for chores. If any found, do them else just return. + */ + protected abstract void chore(); + + /** + * Sleep for period. + */ + protected void sleep() { + this.sleeper.sleep(); + } + + /** + * Called when the chore has completed, allowing subclasses to cleanup any + * extra overhead + */ + protected void cleanup() { + } +} diff --git hbase-common/src/main/java/org/apache/hadoop/hbase/ClockOutOfSyncException.java hbase-common/src/main/java/org/apache/hadoop/hbase/ClockOutOfSyncException.java new file mode 100644 index 0000000..1998593 --- /dev/null +++ hbase-common/src/main/java/org/apache/hadoop/hbase/ClockOutOfSyncException.java @@ -0,0 +1,37 @@ +/** + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase; + +import java.io.IOException; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; + +/** + * This exception is thrown by the master when a region server clock skew is + * too high. + */ +@SuppressWarnings("serial") +@InterfaceAudience.Public +@InterfaceStability.Stable +public class ClockOutOfSyncException extends IOException { + public ClockOutOfSyncException(String message) { + super(message); + } +} diff --git hbase-common/src/main/java/org/apache/hadoop/hbase/DeserializationException.java hbase-common/src/main/java/org/apache/hadoop/hbase/DeserializationException.java new file mode 100644 index 0000000..fa69f26 --- /dev/null +++ hbase-common/src/main/java/org/apache/hadoop/hbase/DeserializationException.java @@ -0,0 +1,43 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase; + +import org.apache.hadoop.classification.InterfaceAudience; + +/** + * Failed deserialization. + */ +@InterfaceAudience.Private +@SuppressWarnings("serial") +public class DeserializationException extends HBaseException { + public DeserializationException() { + super(); + } + + public DeserializationException(final String message) { + super(message); + } + + public DeserializationException(final String message, final Throwable t) { + super(message, t); + } + + public DeserializationException(final Throwable t) { + super(t); + } +} diff --git hbase-common/src/main/java/org/apache/hadoop/hbase/DoNotRetryIOException.java hbase-common/src/main/java/org/apache/hadoop/hbase/DoNotRetryIOException.java new file mode 100644 index 0000000..5d6be07 --- /dev/null +++ hbase-common/src/main/java/org/apache/hadoop/hbase/DoNotRetryIOException.java @@ -0,0 +1,55 @@ +/** + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; + +/** + * Subclass if exception is not meant to be retried: e.g. + * {@link UnknownScannerException} + */ +@InterfaceAudience.Public +@InterfaceStability.Stable +public class DoNotRetryIOException extends HBaseIOException { + + private static final long serialVersionUID = 1197446454511704139L; + + /** + * default constructor + */ + public DoNotRetryIOException() { + super(); + } + + /** + * @param message + */ + public DoNotRetryIOException(String message) { + super(message); + } + + /** + * @param message + * @param cause + */ + public DoNotRetryIOException(String message, Throwable cause) { + super(message, cause); + } +} diff --git hbase-common/src/main/java/org/apache/hadoop/hbase/DroppedSnapshotException.java hbase-common/src/main/java/org/apache/hadoop/hbase/DroppedSnapshotException.java new file mode 100644 index 0000000..76aae2a --- /dev/null +++ hbase-common/src/main/java/org/apache/hadoop/hbase/DroppedSnapshotException.java @@ -0,0 +1,45 @@ +/** + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase; +import java.io.IOException; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; + + +/** + * Thrown during flush if the possibility snapshot content was not properly + * persisted into store files. Response should include replay of hlog content. + */ +@InterfaceAudience.Public +@InterfaceStability.Stable +public class DroppedSnapshotException extends IOException { + + private static final long serialVersionUID = -5463156580831677374L; + + /** + * @param msg + */ + public DroppedSnapshotException(String msg) { + super(msg); + } + + /** + * default constructor + */ + public DroppedSnapshotException() { + super(); + } +} diff --git hbase-common/src/main/java/org/apache/hadoop/hbase/FailedSanityCheckException.java hbase-common/src/main/java/org/apache/hadoop/hbase/FailedSanityCheckException.java new file mode 100644 index 0000000..b62d466 --- /dev/null +++ hbase-common/src/main/java/org/apache/hadoop/hbase/FailedSanityCheckException.java @@ -0,0 +1,49 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase; + +/** + * Exception thrown if a mutation fails sanity checks. + */ +public class FailedSanityCheckException extends DoNotRetryIOException { + + private static final long serialVersionUID = 1788783640409186240L; + + /** + * default constructor + */ + public FailedSanityCheckException() { + super(); + } + + /** + * @param message + */ + public FailedSanityCheckException(String message) { + super(message); + } + + /** + * @param message + * @param cause + */ + public FailedSanityCheckException(String message, Throwable cause) { + super(message, cause); + } + +} diff --git hbase-common/src/main/java/org/apache/hadoop/hbase/HBaseException.java hbase-common/src/main/java/org/apache/hadoop/hbase/HBaseException.java new file mode 100644 index 0000000..28fe337 --- /dev/null +++ hbase-common/src/main/java/org/apache/hadoop/hbase/HBaseException.java @@ -0,0 +1,44 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase; + +import org.apache.hadoop.classification.InterfaceAudience; + +/** + * Base checked exception in HBase. + * @see HBASE-5796 + */ +@SuppressWarnings("serial") +@InterfaceAudience.Private +public class HBaseException extends Exception { + public HBaseException() { + super(); + } + + public HBaseException(final String message) { + super(message); + } + + public HBaseException(final String message, final Throwable t) { + super(message, t); + } + + public HBaseException(final Throwable t) { + super(t); + } +} diff --git hbase-common/src/main/java/org/apache/hadoop/hbase/HBaseIOException.java hbase-common/src/main/java/org/apache/hadoop/hbase/HBaseIOException.java new file mode 100644 index 0000000..193dc91 --- /dev/null +++ hbase-common/src/main/java/org/apache/hadoop/hbase/HBaseIOException.java @@ -0,0 +1,48 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase; + +import java.io.IOException; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; + +/** + * All hbase specific IOExceptions should be subclasses of HBaseIOException + */ +@InterfaceAudience.Public +@InterfaceStability.Evolving +public class HBaseIOException extends IOException { + + private static final long serialVersionUID = 1L; + + public HBaseIOException() { + super(); + } + + public HBaseIOException(String message) { + super(message); + } + + public HBaseIOException(String message, Throwable cause) { + super(message, cause); + } + + public HBaseIOException(Throwable cause) { + super(cause); + }} diff --git hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java index 1e24b8c..7d5aec9 100644 --- hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java +++ hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java @@ -39,6 +39,11 @@ public final class HConstants { /** When we encode strings, we always specify UTF8 encoding */ public static final Charset UTF8_CHARSET = Charset.forName(UTF8_ENCODING); + /** + * Default block size for an HFile. + */ + public final static int DEFAULT_BLOCKSIZE = 64 * 1024; + private static byte[] toBytes(String target) { return target.getBytes(UTF8_CHARSET); } diff --git hbase-common/src/main/java/org/apache/hadoop/hbase/InvalidFamilyOperationException.java hbase-common/src/main/java/org/apache/hadoop/hbase/InvalidFamilyOperationException.java new file mode 100644 index 0000000..c3db943 --- /dev/null +++ hbase-common/src/main/java/org/apache/hadoop/hbase/InvalidFamilyOperationException.java @@ -0,0 +1,54 @@ +/** + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase; + +import java.io.IOException; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; + +/** + * Thrown if a request is table schema modification is requested but + * made for an invalid family name. + */ +@InterfaceAudience.Public +@InterfaceStability.Evolving +public class InvalidFamilyOperationException extends IOException { + private static final long serialVersionUID = 1L << 22 - 1L; + /** default constructor */ + public InvalidFamilyOperationException() { + super(); + } + + /** + * Constructor + * @param s message + */ + public InvalidFamilyOperationException(String s) { + super(s); + } + + /** + * Constructor taking another exception. + * @param e Exception to grab data from. + */ + public InvalidFamilyOperationException(Exception e) { + super(e); + } +} diff --git hbase-common/src/main/java/org/apache/hadoop/hbase/MasterNotRunningException.java hbase-common/src/main/java/org/apache/hadoop/hbase/MasterNotRunningException.java new file mode 100644 index 0000000..8c0a4aa --- /dev/null +++ hbase-common/src/main/java/org/apache/hadoop/hbase/MasterNotRunningException.java @@ -0,0 +1,57 @@ +/** + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase; + +import java.io.IOException; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; + +/** + * Thrown if the master is not running + */ +@InterfaceAudience.Public +@InterfaceStability.Stable +public class MasterNotRunningException extends IOException { + private static final long serialVersionUID = 1L << 23 - 1L; + /** default constructor */ + public MasterNotRunningException() { + super(); + } + + /** + * Constructor + * @param s message + */ + public MasterNotRunningException(String s) { + super(s); + } + + /** + * Constructor taking another exception. + * @param e Exception to grab data from. + */ + public MasterNotRunningException(Exception e) { + super(e); + } + + public MasterNotRunningException(String s, Exception e) { + super(s, e); + } +} diff --git hbase-common/src/main/java/org/apache/hadoop/hbase/NotAllMetaRegionsOnlineException.java hbase-common/src/main/java/org/apache/hadoop/hbase/NotAllMetaRegionsOnlineException.java new file mode 100644 index 0000000..49bc935 --- /dev/null +++ hbase-common/src/main/java/org/apache/hadoop/hbase/NotAllMetaRegionsOnlineException.java @@ -0,0 +1,46 @@ +/** + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; +import org.apache.hadoop.hbase.DoNotRetryIOException; + +/** + * Thrown when an operation requires the root and all meta regions to be online + */ +@InterfaceAudience.Public +@InterfaceStability.Stable +public class NotAllMetaRegionsOnlineException extends DoNotRetryIOException { + private static final long serialVersionUID = 6439786157874827523L; + /** + * default constructor + */ + public NotAllMetaRegionsOnlineException() { + super(); + } + + /** + * @param message + */ + public NotAllMetaRegionsOnlineException(String message) { + super(message); + } +} diff --git hbase-common/src/main/java/org/apache/hadoop/hbase/NotServingRegionException.java hbase-common/src/main/java/org/apache/hadoop/hbase/NotServingRegionException.java new file mode 100644 index 0000000..47d0a26 --- /dev/null +++ hbase-common/src/main/java/org/apache/hadoop/hbase/NotServingRegionException.java @@ -0,0 +1,56 @@ +/** + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase; + +import java.io.IOException; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; +import org.apache.hadoop.hbase.util.Bytes; + +/** + * Thrown by a region server if it is sent a request for a region it is not + * serving. + */ +@InterfaceAudience.Public +@InterfaceStability.Stable +public class NotServingRegionException extends IOException { + private static final long serialVersionUID = 1L << 17 - 1L; + + /** default constructor */ + public NotServingRegionException() { + super(); + } + + /** + * Constructor + * @param s message + */ + public NotServingRegionException(String s) { + super(s); + } + + /** + * Constructor + * @param s message + */ + public NotServingRegionException(final byte [] s) { + super(Bytes.toString(s)); + } +} diff --git hbase-common/src/main/java/org/apache/hadoop/hbase/OutOfOrderScannerNextException.java hbase-common/src/main/java/org/apache/hadoop/hbase/OutOfOrderScannerNextException.java new file mode 100644 index 0000000..b84e705 --- /dev/null +++ hbase-common/src/main/java/org/apache/hadoop/hbase/OutOfOrderScannerNextException.java @@ -0,0 +1,38 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase; + +import org.apache.hadoop.classification.InterfaceAudience; + +/** + * Thrown by a RegionServer while doing next() calls on a ResultScanner. Both client and server + * maintain a nextCallSeq and if they do not match, RS will throw this exception. + */ +@InterfaceAudience.Private +public class OutOfOrderScannerNextException extends DoNotRetryIOException { + + private static final long serialVersionUID = 4595751007554273567L; + + public OutOfOrderScannerNextException() { + super(); + } + + public OutOfOrderScannerNextException(String msg) { + super(msg); + } +} diff --git hbase-common/src/main/java/org/apache/hadoop/hbase/PleaseHoldException.java hbase-common/src/main/java/org/apache/hadoop/hbase/PleaseHoldException.java new file mode 100644 index 0000000..88e436c --- /dev/null +++ hbase-common/src/main/java/org/apache/hadoop/hbase/PleaseHoldException.java @@ -0,0 +1,38 @@ +/** + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase; + +import java.io.IOException; +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; + +/** + * This exception is thrown by the master when a region server was shut down and + * restarted so fast that the master still hasn't processed the server shutdown + * of the first instance, or when master is initializing and client call admin + * operations + */ +@SuppressWarnings("serial") +@InterfaceAudience.Public +@InterfaceStability.Stable +public class PleaseHoldException extends IOException { + public PleaseHoldException(String message) { + super(message); + } +} diff --git hbase-common/src/main/java/org/apache/hadoop/hbase/RegionException.java hbase-common/src/main/java/org/apache/hadoop/hbase/RegionException.java new file mode 100644 index 0000000..8c1d365 --- /dev/null +++ hbase-common/src/main/java/org/apache/hadoop/hbase/RegionException.java @@ -0,0 +1,47 @@ +/** + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase; + +import java.io.IOException; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; +/** + * Thrown when something happens related to region handling. + * Subclasses have to be more specific. + */ +@InterfaceAudience.Public +@InterfaceStability.Stable +public class RegionException extends IOException { + private static final long serialVersionUID = 1473510258071111371L; + + /** default constructor */ + public RegionException() { + super(); + } + + /** + * Constructor + * @param s message + */ + public RegionException(String s) { + super(s); + } + +} diff --git hbase-common/src/main/java/org/apache/hadoop/hbase/RegionMovedException.java hbase-common/src/main/java/org/apache/hadoop/hbase/RegionMovedException.java new file mode 100644 index 0000000..5436c9e --- /dev/null +++ hbase-common/src/main/java/org/apache/hadoop/hbase/RegionMovedException.java @@ -0,0 +1,126 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; +import org.apache.hadoop.ipc.RemoteException; + +/** + * Subclass if the server knows the region is now on another server. + * This allows the client to call the new region server without calling the master. + */ +@InterfaceAudience.Private +@InterfaceStability.Evolving +public class RegionMovedException extends NotServingRegionException { + private static final Log LOG = LogFactory.getLog(RegionMovedException.class); + private static final long serialVersionUID = -7232903522310558397L; + + private final String hostname; + private final int port; + + private static final String HOST_FIELD = "hostname="; + private static final String PORT_FIELD = "port="; + + public RegionMovedException(final String hostname, final int port) { + super(); + this.hostname = hostname; + this.port = port; + } + + public String getHostname() { + return hostname; + } + + public int getPort() { + return port; + } + + /** + * For hadoop.ipc internal call. Do NOT use. + * We have to parse the hostname to recreate the exception. + * The input is the one generated by {@link #getMessage()} + */ + public RegionMovedException(String s) { + int posHostname = s.indexOf(HOST_FIELD) + HOST_FIELD.length(); + int posPort = s.indexOf(PORT_FIELD) + PORT_FIELD.length(); + + String tmpHostname = null; + int tmpPort = -1; + try { + tmpHostname = s.substring(posHostname, s.indexOf(' ', posHostname)); + tmpPort = Integer.parseInt(s.substring(posPort, s.indexOf('.', posPort))); + } catch (Exception ignored) { + LOG.warn("Can't parse the hostname and the port from this string: " + s + ", "+ + "Continuing"); + } + + hostname = tmpHostname; + port = tmpPort; + } + + @Override + public String getMessage() { + return "Region moved to: " + HOST_FIELD + hostname + " " + PORT_FIELD + port + "."; + } + + /** + * Look for a RegionMovedException in the exception: + * - hadoop.ipc wrapped exceptions + * - nested exceptions + * Returns null if we didn't find the exception or if it was not readable. + */ + public static RegionMovedException find(Object exception) { + if (exception == null || !(exception instanceof Throwable)){ + return null; + } + + Throwable cur = (Throwable)exception; + RegionMovedException res = null; + + while (res == null && cur != null) { + if (cur instanceof RegionMovedException) { + res = (RegionMovedException) cur; + } else { + if (cur instanceof RemoteException) { + RemoteException re = (RemoteException) cur; + Exception e = re.unwrapRemoteException(RegionMovedException.class); + if (e == null){ + e = re.unwrapRemoteException(); + } + // unwrapRemoteException can return the exception given as a parameter when it cannot + // unwrap it. In this case, there is no need to look further + // noinspection ObjectEquality + if (e != re){ + res = find(e); + } + } + cur = cur.getCause(); + } + } + + if (res != null && (res.getPort() < 0 || res.getHostname() == null)){ + // We failed to parse the exception. Let's act as we don't find the exception. + return null; + } else { + return res; + } + } +} diff --git hbase-common/src/main/java/org/apache/hadoop/hbase/RegionTooBusyException.java hbase-common/src/main/java/org/apache/hadoop/hbase/RegionTooBusyException.java new file mode 100644 index 0000000..f5217bc --- /dev/null +++ hbase-common/src/main/java/org/apache/hadoop/hbase/RegionTooBusyException.java @@ -0,0 +1,47 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase; + +import java.io.IOException; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; + +/** + * Thrown by a region server if it will block and wait to serve a request. + * For example, the client wants to insert something to a region while the + * region is compacting. + */ +@InterfaceAudience.Public +@InterfaceStability.Evolving +public class RegionTooBusyException extends IOException { + private static final long serialVersionUID = 1728345723728342L; + + /** default constructor */ + public RegionTooBusyException() { + super(); + } + + /** + * Constructor + * @param msg message + */ + public RegionTooBusyException(final String msg) { + super(msg); + } +} diff --git hbase-common/src/main/java/org/apache/hadoop/hbase/RemoteExceptionHandler.java hbase-common/src/main/java/org/apache/hadoop/hbase/RemoteExceptionHandler.java new file mode 100644 index 0000000..a7e6dd2 --- /dev/null +++ hbase-common/src/main/java/org/apache/hadoop/hbase/RemoteExceptionHandler.java @@ -0,0 +1,121 @@ +/** + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase; + +import java.io.IOException; +import java.lang.reflect.Constructor; +import java.lang.reflect.InvocationTargetException; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.ipc.RemoteException; + +/** + * An immutable class which contains a static method for handling + * org.apache.hadoop.ipc.RemoteException exceptions. + */ +@InterfaceAudience.Private +public class RemoteExceptionHandler { + /* Not instantiable */ + private RemoteExceptionHandler() {super();} + + /** + * Examine passed Throwable. See if its carrying a RemoteException. If so, + * run {@link #decodeRemoteException(RemoteException)} on it. Otherwise, + * pass back t unaltered. + * @param t Throwable to examine. + * @return Decoded RemoteException carried by t or + * t unaltered. + */ + public static Throwable checkThrowable(final Throwable t) { + Throwable result = t; + if (t instanceof RemoteException) { + try { + result = + RemoteExceptionHandler.decodeRemoteException((RemoteException)t); + } catch (Throwable tt) { + result = tt; + } + } + return result; + } + + /** + * Examine passed IOException. See if its carrying a RemoteException. If so, + * run {@link #decodeRemoteException(RemoteException)} on it. Otherwise, + * pass back e unaltered. + * @param e Exception to examine. + * @return Decoded RemoteException carried by e or + * e unaltered. + */ + public static IOException checkIOException(final IOException e) { + Throwable t = checkThrowable(e); + return t instanceof IOException? (IOException)t: new IOException(t); + } + + /** + * Converts org.apache.hadoop.ipc.RemoteException into original exception, + * if possible. If the original exception is an Error or a RuntimeException, + * throws the original exception. + * + * @param re original exception + * @return decoded RemoteException if it is an instance of or a subclass of + * IOException, or the original RemoteException if it cannot be decoded. + * + * @throws IOException indicating a server error ocurred if the decoded + * exception is not an IOException. The decoded exception is set as + * the cause. + * @deprecated Use {@link RemoteException#unwrapRemoteException()} instead. + * In fact we should look into deprecating this whole class - St.Ack 2010929 + */ + public static IOException decodeRemoteException(final RemoteException re) + throws IOException { + IOException i = re; + + try { + Class c = Class.forName(re.getClassName()); + + Class[] parameterTypes = { String.class }; + Constructor ctor = c.getConstructor(parameterTypes); + + Object[] arguments = { re.getMessage() }; + Throwable t = (Throwable) ctor.newInstance(arguments); + + if (t instanceof IOException) { + i = (IOException) t; + + } else { + i = new IOException("server error"); + i.initCause(t); + throw i; + } + + } catch (ClassNotFoundException x) { + // continue + } catch (NoSuchMethodException x) { + // continue + } catch (IllegalAccessException x) { + // continue + } catch (InvocationTargetException x) { + // continue + } catch (InstantiationException x) { + // continue + } + return i; + } +} diff --git hbase-common/src/main/java/org/apache/hadoop/hbase/Stoppable.java hbase-common/src/main/java/org/apache/hadoop/hbase/Stoppable.java new file mode 100644 index 0000000..93ccc13 --- /dev/null +++ hbase-common/src/main/java/org/apache/hadoop/hbase/Stoppable.java @@ -0,0 +1,38 @@ +/** + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase; + +import org.apache.hadoop.classification.InterfaceAudience; + +/** + * Implementers are Stoppable. + */ +@InterfaceAudience.Private +public interface Stoppable { + /** + * Stop this service. + * @param why Why we're stopping. + */ + public void stop(String why); + + /** + * @return True if {@link #stop(String)} has been closed. + */ + public boolean isStopped(); +} diff --git hbase-common/src/main/java/org/apache/hadoop/hbase/TableExistsException.java hbase-common/src/main/java/org/apache/hadoop/hbase/TableExistsException.java new file mode 100644 index 0000000..5c94dbd --- /dev/null +++ hbase-common/src/main/java/org/apache/hadoop/hbase/TableExistsException.java @@ -0,0 +1,42 @@ +/** + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase; + +import java.io.IOException; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; + +/** + * Thrown when a table exists but should not + */ +@InterfaceAudience.Public +@InterfaceStability.Stable +public class TableExistsException extends IOException { + private static final long serialVersionUID = 1L << 7 - 1L; + /** default constructor */ + public TableExistsException() { + super(); + } + + /** + * Constructor + * + * @param s message + */ + public TableExistsException(String s) { + super(s); + } +} diff --git hbase-common/src/main/java/org/apache/hadoop/hbase/TableInfoMissingException.java hbase-common/src/main/java/org/apache/hadoop/hbase/TableInfoMissingException.java new file mode 100644 index 0000000..bc8bc7f --- /dev/null +++ hbase-common/src/main/java/org/apache/hadoop/hbase/TableInfoMissingException.java @@ -0,0 +1,47 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase; + +import org.apache.hadoop.classification.InterfaceAudience; + +/** + * + * Failed to find .tableinfo file under table dir + * + */ +@InterfaceAudience.Private +@SuppressWarnings("serial") +public class TableInfoMissingException extends HBaseIOException { + + public TableInfoMissingException() { + super(); + } + + public TableInfoMissingException( String message ) { + super(message); + } + + public TableInfoMissingException( String message, Throwable t ) { + super(message, t); + } + + public TableInfoMissingException( Throwable t ) { + super(t); + } + +} diff --git hbase-common/src/main/java/org/apache/hadoop/hbase/TableNotDisabledException.java hbase-common/src/main/java/org/apache/hadoop/hbase/TableNotDisabledException.java new file mode 100644 index 0000000..1273bd8 --- /dev/null +++ hbase-common/src/main/java/org/apache/hadoop/hbase/TableNotDisabledException.java @@ -0,0 +1,53 @@ +/** + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase; + +import java.io.IOException; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; +import org.apache.hadoop.hbase.util.Bytes; + +/** + * Thrown if a table should be offline but is not + */ +@InterfaceAudience.Public +@InterfaceStability.Stable +public class TableNotDisabledException extends IOException { + private static final long serialVersionUID = 1L << 19 - 1L; + /** default constructor */ + public TableNotDisabledException() { + super(); + } + + /** + * Constructor + * @param s message + */ + public TableNotDisabledException(String s) { + super(s); + } + + /** + * @param tableName Name of table that is not disabled + */ + public TableNotDisabledException(byte[] tableName) { + this(Bytes.toString(tableName)); + } +} diff --git hbase-common/src/main/java/org/apache/hadoop/hbase/TableNotEnabledException.java hbase-common/src/main/java/org/apache/hadoop/hbase/TableNotEnabledException.java new file mode 100644 index 0000000..b3c31b4 --- /dev/null +++ hbase-common/src/main/java/org/apache/hadoop/hbase/TableNotEnabledException.java @@ -0,0 +1,53 @@ +/** + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase; + +import java.io.IOException; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; +import org.apache.hadoop.hbase.util.Bytes; + +/** + * Thrown if a table should be enabled but is not + */ +@InterfaceAudience.Public +@InterfaceStability.Stable +public class TableNotEnabledException extends IOException { + private static final long serialVersionUID = 262144L; + /** default constructor */ + public TableNotEnabledException() { + super(); + } + + /** + * Constructor + * @param s message + */ + public TableNotEnabledException(String s) { + super(s); + } + + /** + * @param tableName Name of table that is not enabled + */ + public TableNotEnabledException(byte[] tableName) { + this(Bytes.toString(tableName)); + } +} diff --git hbase-common/src/main/java/org/apache/hadoop/hbase/TableNotFoundException.java hbase-common/src/main/java/org/apache/hadoop/hbase/TableNotFoundException.java new file mode 100644 index 0000000..fbd3db4 --- /dev/null +++ hbase-common/src/main/java/org/apache/hadoop/hbase/TableNotFoundException.java @@ -0,0 +1,39 @@ +/** + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; + +/** Thrown when a table can not be located */ +@InterfaceAudience.Public +@InterfaceStability.Stable +public class TableNotFoundException extends RegionException { + private static final long serialVersionUID = 993179627856392526L; + + /** default constructor */ + public TableNotFoundException() { + super(); + } + + /** @param s message */ + public TableNotFoundException(String s) { + super(s); + } +} diff --git hbase-common/src/main/java/org/apache/hadoop/hbase/UnknownRegionException.java hbase-common/src/main/java/org/apache/hadoop/hbase/UnknownRegionException.java new file mode 100644 index 0000000..046670f --- /dev/null +++ hbase-common/src/main/java/org/apache/hadoop/hbase/UnknownRegionException.java @@ -0,0 +1,35 @@ +/** + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; + +/** + * Thrown when we are asked to operate on a region we know nothing about. + */ +@InterfaceAudience.Public +@InterfaceStability.Stable +public class UnknownRegionException extends RegionException { + private static final long serialVersionUID = 1968858760475205392L; + + public UnknownRegionException(String regionName) { + super(regionName); + } +} diff --git hbase-common/src/main/java/org/apache/hadoop/hbase/UnknownRowLockException.java hbase-common/src/main/java/org/apache/hadoop/hbase/UnknownRowLockException.java new file mode 100644 index 0000000..e42f3a9 --- /dev/null +++ hbase-common/src/main/java/org/apache/hadoop/hbase/UnknownRowLockException.java @@ -0,0 +1,45 @@ +/** + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; + + +/** + * Thrown if a region server is passed an unknown row lock id + */ +@InterfaceAudience.Public +@InterfaceStability.Stable +public class UnknownRowLockException extends DoNotRetryIOException { + private static final long serialVersionUID = 993179627856392526L; + + /** constructor */ + public UnknownRowLockException() { + super(); + } + + /** + * Constructor + * @param s message + */ + public UnknownRowLockException(String s) { + super(s); + } +} diff --git hbase-common/src/main/java/org/apache/hadoop/hbase/UnknownScannerException.java hbase-common/src/main/java/org/apache/hadoop/hbase/UnknownScannerException.java new file mode 100644 index 0000000..856c029 --- /dev/null +++ hbase-common/src/main/java/org/apache/hadoop/hbase/UnknownScannerException.java @@ -0,0 +1,48 @@ +/** + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; + + +/** + * Thrown if a region server is passed an unknown scanner id. + * Usually means the client has take too long between checkins and so the + * scanner lease on the serverside has expired OR the serverside is closing + * down and has cancelled all leases. + */ +@InterfaceAudience.Public +@InterfaceStability.Stable +public class UnknownScannerException extends DoNotRetryIOException { + private static final long serialVersionUID = 993179627856392526L; + + /** constructor */ + public UnknownScannerException() { + super(); + } + + /** + * Constructor + * @param s message + */ + public UnknownScannerException(String s) { + super(s); + } +} diff --git hbase-common/src/main/java/org/apache/hadoop/hbase/YouAreDeadException.java hbase-common/src/main/java/org/apache/hadoop/hbase/YouAreDeadException.java new file mode 100644 index 0000000..76fc841 --- /dev/null +++ hbase-common/src/main/java/org/apache/hadoop/hbase/YouAreDeadException.java @@ -0,0 +1,38 @@ +/** + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase; + +import java.io.IOException; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; + +/** + * This exception is thrown by the master when a region server reports and is + * already being processed as dead. This can happen when a region server loses + * its session but didn't figure it yet. + */ +@SuppressWarnings("serial") +@InterfaceAudience.Public +@InterfaceStability.Stable +public class YouAreDeadException extends IOException { + public YouAreDeadException(String message) { + super(message); + } +} diff --git hbase-common/src/main/java/org/apache/hadoop/hbase/ZooKeeperConnectionException.java hbase-common/src/main/java/org/apache/hadoop/hbase/ZooKeeperConnectionException.java new file mode 100644 index 0000000..feedff3 --- /dev/null +++ hbase-common/src/main/java/org/apache/hadoop/hbase/ZooKeeperConnectionException.java @@ -0,0 +1,53 @@ +/** + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase; + +import java.io.IOException; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; + +/** + * Thrown if the client can't connect to zookeeper + */ +@InterfaceAudience.Public +@InterfaceStability.Evolving +public class ZooKeeperConnectionException extends IOException { + private static final long serialVersionUID = 1L << 23 - 1L; + /** default constructor */ + public ZooKeeperConnectionException() { + super(); + } + + /** + * Constructor + * @param s message + */ + public ZooKeeperConnectionException(String s) { + super(s); + } + + /** + * Constructor taking another exception. + * @param e Exception to grab data from. + */ + public ZooKeeperConnectionException(String message, Exception e) { + super(message, e); + } +} diff --git hbase-common/src/main/java/org/apache/hadoop/hbase/io/DataInputInputStream.java hbase-common/src/main/java/org/apache/hadoop/hbase/io/DataInputInputStream.java new file mode 100644 index 0000000..b54c3ad --- /dev/null +++ hbase-common/src/main/java/org/apache/hadoop/hbase/io/DataInputInputStream.java @@ -0,0 +1,60 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.io; + +import java.io.DataInput; +import java.io.IOException; +import java.io.InputStream; + +import org.apache.hadoop.classification.InterfaceAudience; + +/** + * An InputStream that wraps a DataInput. + * @see DataOutputOutputStream + */ +@InterfaceAudience.Private +public class DataInputInputStream extends InputStream { + + private DataInput in; + + /** + * Construct an InputStream from the given DataInput. If 'in' + * is already an InputStream, simply returns it. Otherwise, wraps + * it in an InputStream. + * @param in the DataInput to wrap + * @return an InputStream instance that reads from 'in' + */ + public static InputStream constructInputStream(DataInput in) { + if (in instanceof InputStream) { + return (InputStream)in; + } else { + return new DataInputInputStream(in); + } + } + + + public DataInputInputStream(DataInput in) { + this.in = in; + } + + @Override + public int read() throws IOException { + return in.readUnsignedByte(); + } +} diff --git hbase-common/src/main/java/org/apache/hadoop/hbase/io/DataOutputOutputStream.java hbase-common/src/main/java/org/apache/hadoop/hbase/io/DataOutputOutputStream.java new file mode 100644 index 0000000..fe7044d --- /dev/null +++ hbase-common/src/main/java/org/apache/hadoop/hbase/io/DataOutputOutputStream.java @@ -0,0 +1,67 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.io; + +import java.io.DataOutput; +import java.io.IOException; +import java.io.OutputStream; + +import org.apache.hadoop.classification.InterfaceAudience; + +/** + * OutputStream implementation that wraps a DataOutput. + */ +@InterfaceAudience.Private +public class DataOutputOutputStream extends OutputStream { + + private final DataOutput out; + + /** + * Construct an OutputStream from the given DataOutput. If 'out' + * is already an OutputStream, simply returns it. Otherwise, wraps + * it in an OutputStream. + * @param out the DataOutput to wrap + * @return an OutputStream instance that outputs to 'out' + */ + public static OutputStream constructOutputStream(DataOutput out) { + if (out instanceof OutputStream) { + return (OutputStream)out; + } else { + return new DataOutputOutputStream(out); + } + } + + private DataOutputOutputStream(DataOutput out) { + this.out = out; + } + + @Override + public void write(int b) throws IOException { + out.writeByte(b); + } + + @Override + public void write(byte[] b, int off, int len) throws IOException { + out.write(b, off, len); + } + + @Override + public void write(byte[] b) throws IOException { + out.write(b); + } +} diff --git hbase-common/src/main/java/org/apache/hadoop/hbase/io/TimeRange.java hbase-common/src/main/java/org/apache/hadoop/hbase/io/TimeRange.java new file mode 100644 index 0000000..2320582 --- /dev/null +++ hbase-common/src/main/java/org/apache/hadoop/hbase/io/TimeRange.java @@ -0,0 +1,200 @@ +/* + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.io; + +import java.io.DataInput; +import java.io.DataOutput; +import java.io.IOException; + +import org.apache.hadoop.io.Writable; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; +import org.apache.hadoop.hbase.util.Bytes; + +/** + * Represents an interval of version timestamps. + *

      + * Evaluated according to minStamp <= timestamp < maxStamp + * or [minStamp,maxStamp) in interval notation. + *

      + * Only used internally; should not be accessed directly by clients. + */ +@InterfaceAudience.Public +@InterfaceStability.Stable +public class TimeRange implements Writable { + private long minStamp = 0L; + private long maxStamp = Long.MAX_VALUE; + private boolean allTime = false; + + /** + * Default constructor. + * Represents interval [0, Long.MAX_VALUE) (allTime) + */ + public TimeRange() { + allTime = true; + } + + /** + * Represents interval [minStamp, Long.MAX_VALUE) + * @param minStamp the minimum timestamp value, inclusive + */ + public TimeRange(long minStamp) { + this.minStamp = minStamp; + } + + /** + * Represents interval [minStamp, Long.MAX_VALUE) + * @param minStamp the minimum timestamp value, inclusive + */ + public TimeRange(byte [] minStamp) { + this.minStamp = Bytes.toLong(minStamp); + } + + /** + * Represents interval [minStamp, maxStamp) + * @param minStamp the minimum timestamp, inclusive + * @param maxStamp the maximum timestamp, exclusive + * @throws IOException + */ + public TimeRange(long minStamp, long maxStamp) + throws IOException { + if(maxStamp < minStamp) { + throw new IOException("maxStamp is smaller than minStamp"); + } + this.minStamp = minStamp; + this.maxStamp = maxStamp; + } + + /** + * Represents interval [minStamp, maxStamp) + * @param minStamp the minimum timestamp, inclusive + * @param maxStamp the maximum timestamp, exclusive + * @throws IOException + */ + public TimeRange(byte [] minStamp, byte [] maxStamp) + throws IOException { + this(Bytes.toLong(minStamp), Bytes.toLong(maxStamp)); + } + + /** + * @return the smallest timestamp that should be considered + */ + public long getMin() { + return minStamp; + } + + /** + * @return the biggest timestamp that should be considered + */ + public long getMax() { + return maxStamp; + } + + /** + * Check if it is for all time + * @return true if it is for all time + */ + public boolean isAllTime() { + return allTime; + } + + /** + * Check if the specified timestamp is within this TimeRange. + *

      + * Returns true if within interval [minStamp, maxStamp), false + * if not. + * @param bytes timestamp to check + * @param offset offset into the bytes + * @return true if within TimeRange, false if not + */ + public boolean withinTimeRange(byte [] bytes, int offset) { + if(allTime) return true; + return withinTimeRange(Bytes.toLong(bytes, offset)); + } + + /** + * Check if the specified timestamp is within this TimeRange. + *

      + * Returns true if within interval [minStamp, maxStamp), false + * if not. + * @param timestamp timestamp to check + * @return true if within TimeRange, false if not + */ + public boolean withinTimeRange(long timestamp) { + if(allTime) return true; + // check if >= minStamp + return (minStamp <= timestamp && timestamp < maxStamp); + } + + /** + * Check if the specified timestamp is within this TimeRange. + *

      + * Returns true if within interval [minStamp, maxStamp), false + * if not. + * @param timestamp timestamp to check + * @return true if within TimeRange, false if not + */ + public boolean withinOrAfterTimeRange(long timestamp) { + if(allTime) return true; + // check if >= minStamp + return (timestamp >= minStamp); + } + + /** + * Compare the timestamp to timerange + * @param timestamp + * @return -1 if timestamp is less than timerange, + * 0 if timestamp is within timerange, + * 1 if timestamp is greater than timerange + */ + public int compare(long timestamp) { + if (timestamp < minStamp) { + return -1; + } else if (timestamp >= maxStamp) { + return 1; + } else { + return 0; + } + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder(); + sb.append("maxStamp="); + sb.append(this.maxStamp); + sb.append(", minStamp="); + sb.append(this.minStamp); + return sb.toString(); + } + + //Writable + public void readFields(final DataInput in) throws IOException { + this.minStamp = in.readLong(); + this.maxStamp = in.readLong(); + this.allTime = in.readBoolean(); + } + + public void write(final DataOutput out) throws IOException { + out.writeLong(minStamp); + out.writeLong(maxStamp); + out.writeBoolean(this.allTime); + } +} diff --git hbase-common/src/main/java/org/apache/hadoop/hbase/regionserver/BloomType.java hbase-common/src/main/java/org/apache/hadoop/hbase/regionserver/BloomType.java new file mode 100644 index 0000000..b3d50c9 --- /dev/null +++ hbase-common/src/main/java/org/apache/hadoop/hbase/regionserver/BloomType.java @@ -0,0 +1,35 @@ +/** + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.regionserver; + +public enum BloomType { + /** + * Bloomfilters disabled + */ + NONE, + /** + * Bloom enabled with Table row as Key + */ + ROW, + /** + * Bloom enabled with Table row & column (family+qualifier) as Key + */ + ROWCOL +} \ No newline at end of file diff --git hbase-common/src/main/java/org/apache/hadoop/hbase/regionserver/NoSuchColumnFamilyException.java hbase-common/src/main/java/org/apache/hadoop/hbase/regionserver/NoSuchColumnFamilyException.java new file mode 100644 index 0000000..3fc7e57 --- /dev/null +++ hbase-common/src/main/java/org/apache/hadoop/hbase/regionserver/NoSuchColumnFamilyException.java @@ -0,0 +1,42 @@ +/** + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.regionserver; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.hbase.DoNotRetryIOException; + +/** + * Thrown if request for nonexistent column family. + */ +@InterfaceAudience.Private +public class NoSuchColumnFamilyException extends DoNotRetryIOException { + private static final long serialVersionUID = -6569952730832331274L; + + /** default constructor */ + public NoSuchColumnFamilyException() { + super(); + } + + /** + * @param message exception message + */ + public NoSuchColumnFamilyException(String message) { + super(message); + } +} diff --git hbase-common/src/main/java/org/apache/hadoop/hbase/regionserver/RegionServerRunningException.java hbase-common/src/main/java/org/apache/hadoop/hbase/regionserver/RegionServerRunningException.java new file mode 100644 index 0000000..e8bce88 --- /dev/null +++ hbase-common/src/main/java/org/apache/hadoop/hbase/regionserver/RegionServerRunningException.java @@ -0,0 +1,46 @@ +/** + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.regionserver; + +import java.io.IOException; + +import org.apache.hadoop.classification.InterfaceAudience; + +/** + * Thrown if the region server log directory exists (which indicates another + * region server is running at the same address) + */ +@InterfaceAudience.Private +public class RegionServerRunningException extends IOException { + private static final long serialVersionUID = 1L << 31 - 1L; + + /** Default Constructor */ + public RegionServerRunningException() { + super(); + } + + /** + * Constructs the exception and supplies a string as the message + * @param s - message + */ + public RegionServerRunningException(String s) { + super(s); + } + +} diff --git hbase-common/src/main/java/org/apache/hadoop/hbase/regionserver/RegionServerStoppedException.java hbase-common/src/main/java/org/apache/hadoop/hbase/regionserver/RegionServerStoppedException.java new file mode 100644 index 0000000..11ec072 --- /dev/null +++ hbase-common/src/main/java/org/apache/hadoop/hbase/regionserver/RegionServerStoppedException.java @@ -0,0 +1,35 @@ +/** + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.regionserver; + +import java.io.IOException; + +import org.apache.hadoop.classification.InterfaceAudience; + +/** + * Thrown by the region server when it is in shutting down state. + */ +@SuppressWarnings("serial") +@InterfaceAudience.Private +public class RegionServerStoppedException extends IOException { + + public RegionServerStoppedException(String s) { + super(s); + } +} diff --git hbase-common/src/main/java/org/apache/hadoop/hbase/regionserver/WrongRegionException.java hbase-common/src/main/java/org/apache/hadoop/hbase/regionserver/WrongRegionException.java new file mode 100644 index 0000000..dfc6aab --- /dev/null +++ hbase-common/src/main/java/org/apache/hadoop/hbase/regionserver/WrongRegionException.java @@ -0,0 +1,46 @@ +/** + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.regionserver; + +import java.io.IOException; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; + +/** + * Thrown when a request contains a key which is not part of this region + */ +@InterfaceAudience.Public +@InterfaceStability.Stable +public class WrongRegionException extends IOException { + private static final long serialVersionUID = 993179627856392526L; + + /** constructor */ + public WrongRegionException() { + super(); + } + + /** + * Constructor + * @param s message + */ + public WrongRegionException(String s) { + super(s); + } +} diff --git hbase-common/src/main/java/org/apache/hadoop/hbase/regionserver/wal/FailedLogCloseException.java hbase-common/src/main/java/org/apache/hadoop/hbase/regionserver/wal/FailedLogCloseException.java new file mode 100644 index 0000000..8fb8f08 --- /dev/null +++ hbase-common/src/main/java/org/apache/hadoop/hbase/regionserver/wal/FailedLogCloseException.java @@ -0,0 +1,46 @@ +/** + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.regionserver.wal; + +import java.io.IOException; + +import org.apache.hadoop.classification.InterfaceAudience; + +/** + * Thrown when we fail close of the write-ahead-log file. + * Package private. Only used inside this package. + */ +@InterfaceAudience.Private +public class FailedLogCloseException extends IOException { + private static final long serialVersionUID = 1759152841462990925L; + + /** + * + */ + public FailedLogCloseException() { + super(); + } + + /** + * @param arg0 + */ + public FailedLogCloseException(String arg0) { + super(arg0); + } +} diff --git hbase-common/src/main/java/org/apache/hadoop/hbase/regionserver/wal/OrphanHLogAfterSplitException.java hbase-common/src/main/java/org/apache/hadoop/hbase/regionserver/wal/OrphanHLogAfterSplitException.java new file mode 100644 index 0000000..640bffa --- /dev/null +++ hbase-common/src/main/java/org/apache/hadoop/hbase/regionserver/wal/OrphanHLogAfterSplitException.java @@ -0,0 +1,42 @@ +/** + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.regionserver.wal; + +import java.io.IOException; + +import org.apache.hadoop.classification.InterfaceAudience; + +@InterfaceAudience.Private +public class OrphanHLogAfterSplitException extends IOException { + + /** + * Create this exception without a message + */ + public OrphanHLogAfterSplitException() { + super(); + } + + /** + * Create this exception with a message + * @param message why it failed + */ + public OrphanHLogAfterSplitException(String message) { + super(message); + } +} diff --git hbase-common/src/main/java/org/apache/hadoop/hbase/security/AccessDeniedException.java hbase-common/src/main/java/org/apache/hadoop/hbase/security/AccessDeniedException.java new file mode 100644 index 0000000..b8c5d3b --- /dev/null +++ hbase-common/src/main/java/org/apache/hadoop/hbase/security/AccessDeniedException.java @@ -0,0 +1,39 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.security; + +import org.apache.hadoop.hbase.DoNotRetryIOException; + +/** + * Exception thrown by access-related methods. + */ +public class AccessDeniedException extends DoNotRetryIOException { + private static final long serialVersionUID = 1913879564363001780L; + + public AccessDeniedException() { + super(); + } + + public AccessDeniedException(Class clazz, String s) { + super( "AccessDenied [" + clazz.getName() + "]: " + s); + } + + public AccessDeniedException(String s) { + super(s); + } +} diff --git hbase-common/src/main/java/org/apache/hadoop/hbase/security/AuthMethod.java hbase-common/src/main/java/org/apache/hadoop/hbase/security/AuthMethod.java new file mode 100644 index 0000000..26ab35d --- /dev/null +++ hbase-common/src/main/java/org/apache/hadoop/hbase/security/AuthMethod.java @@ -0,0 +1,68 @@ +/** + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.security; + +import org.apache.hadoop.security.UserGroupInformation; + +import java.io.DataInput; +import java.io.DataOutput; +import java.io.IOException; + +/** Authentication method */ +public enum AuthMethod { + SIMPLE((byte) 80, "", UserGroupInformation.AuthenticationMethod.SIMPLE), + KERBEROS((byte) 81, "GSSAPI", UserGroupInformation.AuthenticationMethod.KERBEROS), + DIGEST((byte) 82, "DIGEST-MD5", UserGroupInformation.AuthenticationMethod.TOKEN); + + /** The code for this method. */ + public final byte code; + public final String mechanismName; + public final UserGroupInformation.AuthenticationMethod authenticationMethod; + + AuthMethod(byte code, String mechanismName, + UserGroupInformation.AuthenticationMethod authMethod) { + this.code = code; + this.mechanismName = mechanismName; + this.authenticationMethod = authMethod; + } + + private static final int FIRST_CODE = values()[0].code; + + /** Return the object represented by the code. */ + private static AuthMethod valueOf(byte code) { + final int i = (code & 0xff) - FIRST_CODE; + return i < 0 || i >= values().length ? null : values()[i]; + } + + /** Return the SASL mechanism name */ + public String getMechanismName() { + return mechanismName; + } + + /** Read from in */ + public static AuthMethod read(DataInput in) throws IOException { + return valueOf(in.readByte()); + } + + /** Write to out */ + public void write(DataOutput out) throws IOException { + out.write(code); + } +} diff --git hbase-common/src/main/java/org/apache/hadoop/hbase/security/KerberosInfo.java hbase-common/src/main/java/org/apache/hadoop/hbase/security/KerberosInfo.java new file mode 100644 index 0000000..c3439d0 --- /dev/null +++ hbase-common/src/main/java/org/apache/hadoop/hbase/security/KerberosInfo.java @@ -0,0 +1,39 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.security; + +import java.lang.annotation.ElementType; +import java.lang.annotation.Retention; +import java.lang.annotation.RetentionPolicy; +import java.lang.annotation.Target; + +import org.apache.hadoop.classification.InterfaceAudience; + +/** + * Indicates Kerberos related information to be used for authorizing connections + * over a given RPC protocol interface. + */ +@Retention(RetentionPolicy.RUNTIME) +@Target(ElementType.TYPE) +@InterfaceAudience.Private +public @interface KerberosInfo { + /** Key for getting server's Kerberos principal name from Configuration */ + String serverPrincipal(); + String clientPrincipal() default ""; +} diff --git hbase-common/src/main/java/org/apache/hadoop/hbase/security/QualityOfProtection.java hbase-common/src/main/java/org/apache/hadoop/hbase/security/QualityOfProtection.java new file mode 100644 index 0000000..198e57f --- /dev/null +++ hbase-common/src/main/java/org/apache/hadoop/hbase/security/QualityOfProtection.java @@ -0,0 +1,37 @@ +/** + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.security; + +/** Quality of Protection */ +public enum QualityOfProtection { + AUTHENTICATION("auth"), + INTEGRITY("auth-int"), + PRIVACY("auth-conf"); + + public final String saslQop; + + QualityOfProtection(String saslQop) { + this.saslQop = saslQop; + } + + public String getSaslQop() { + return saslQop; + } +} diff --git hbase-common/src/main/java/org/apache/hadoop/hbase/security/SaslStatus.java hbase-common/src/main/java/org/apache/hadoop/hbase/security/SaslStatus.java new file mode 100644 index 0000000..129cee6 --- /dev/null +++ hbase-common/src/main/java/org/apache/hadoop/hbase/security/SaslStatus.java @@ -0,0 +1,31 @@ +/** + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.security; + +/** Status of sasl connection */ +public enum SaslStatus { + SUCCESS (0), + ERROR (1); + + public final int state; + SaslStatus(int state) { + this.state = state; + } +} diff --git hbase-common/src/main/java/org/apache/hadoop/hbase/security/SaslUtils.java hbase-common/src/main/java/org/apache/hadoop/hbase/security/SaslUtils.java new file mode 100644 index 0000000..1bcfa8b --- /dev/null +++ hbase-common/src/main/java/org/apache/hadoop/hbase/security/SaslUtils.java @@ -0,0 +1,87 @@ +/** + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.security; + +import org.apache.commons.codec.binary.Base64; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.security.token.SecretManager; +import org.apache.hadoop.security.token.TokenIdentifier; + +import javax.security.sasl.Sasl; +import java.io.ByteArrayInputStream; +import java.io.DataInputStream; +import java.io.IOException; +import java.util.Map; +import java.util.TreeMap; + +/** Utility methods for sasl */ +public class SaslUtils { + public static final Map SASL_PROPS = + new TreeMap(); + public static final int SWITCH_TO_SIMPLE_AUTH = -88; + public static final String SASL_DEFAULT_REALM = "default"; + + /** Splitting fully qualified Kerberos name into parts */ + public static String[] splitKerberosName(String fullName) { + return fullName.split("[/@]"); + } + + public static void init(Configuration conf) { + QualityOfProtection saslQOP = QualityOfProtection.AUTHENTICATION; + String rpcProtection = conf.get("hbase.rpc.protection", + QualityOfProtection.AUTHENTICATION.name().toLowerCase()); + if (QualityOfProtection.INTEGRITY.name().toLowerCase() + .equals(rpcProtection)) { + saslQOP = QualityOfProtection.INTEGRITY; + } else if (QualityOfProtection.PRIVACY.name().toLowerCase().equals( + rpcProtection)) { + saslQOP = QualityOfProtection.PRIVACY; + } + + SASL_PROPS.put(Sasl.QOP, saslQOP.getSaslQop()); + SASL_PROPS.put(Sasl.SERVER_AUTH, "true"); + } + + static String encodeIdentifier(byte[] identifier) { + return new String(Base64.encodeBase64(identifier)); + } + + static byte[] decodeIdentifier(String identifier) { + return Base64.decodeBase64(identifier.getBytes()); + } + + public static T getIdentifier(String id, + SecretManager secretManager) throws SecretManager.InvalidToken { + byte[] tokenId = decodeIdentifier(id); + T tokenIdentifier = secretManager.createIdentifier(); + try { + tokenIdentifier.readFields(new DataInputStream(new ByteArrayInputStream( + tokenId))); + } catch (IOException e) { + throw (SecretManager.InvalidToken) new SecretManager.InvalidToken( + "Can't de-serialize tokenIdentifier").initCause(e); + } + return tokenIdentifier; + } + + static char[] encodePassword(byte[] password) { + return new String(Base64.encodeBase64(password)).toCharArray(); + } +} diff --git hbase-common/src/main/java/org/apache/hadoop/hbase/security/TokenInfo.java hbase-common/src/main/java/org/apache/hadoop/hbase/security/TokenInfo.java new file mode 100644 index 0000000..e27d075 --- /dev/null +++ hbase-common/src/main/java/org/apache/hadoop/hbase/security/TokenInfo.java @@ -0,0 +1,38 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.security; + +import java.lang.annotation.ElementType; +import java.lang.annotation.Retention; +import java.lang.annotation.RetentionPolicy; +import java.lang.annotation.Target; + +import org.apache.hadoop.classification.InterfaceAudience; + +/** + * Indicates Token related information to be used in authorizing connections + * over a given RPC protocol interface. + */ +@Retention(RetentionPolicy.RUNTIME) +@Target(ElementType.TYPE) +@InterfaceAudience.Private +public @interface TokenInfo { + /** The type of Token.getKind() to be handled */ + String value(); +} diff --git hbase-common/src/main/java/org/apache/hadoop/hbase/security/User.java hbase-common/src/main/java/org/apache/hadoop/hbase/security/User.java new file mode 100644 index 0000000..0c670db --- /dev/null +++ hbase-common/src/main/java/org/apache/hadoop/hbase/security/User.java @@ -0,0 +1,407 @@ +/** + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.security; + +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.CommonConfigurationKeys; +import org.apache.hadoop.hbase.util.Methods; +import org.apache.hadoop.mapred.JobConf; +import org.apache.hadoop.mapreduce.Job; +import org.apache.hadoop.security.UserGroupInformation; + +import java.io.IOException; +import java.lang.reflect.UndeclaredThrowableException; +import java.security.PrivilegedAction; +import java.security.PrivilegedExceptionAction; + +import org.apache.commons.logging.Log; + +/** + * Wrapper to abstract out usage of user and group information in HBase. + * + *

      + * This class provides a common interface for interacting with user and group + * information across changing APIs in different versions of Hadoop. It only + * provides access to the common set of functionality in + * {@link org.apache.hadoop.security.UserGroupInformation} currently needed by + * HBase, but can be extended as needs change. + *

      + */ +@InterfaceAudience.Public +@InterfaceStability.Evolving +public abstract class User { + public static final String HBASE_SECURITY_CONF_KEY = + "hbase.security.authentication"; + + private static Log LOG = LogFactory.getLog(User.class); + + protected UserGroupInformation ugi; + + public UserGroupInformation getUGI() { + return ugi; + } + + /** + * Returns the full user name. For Kerberos principals this will include + * the host and realm portions of the principal name. + * @return User full name. + */ + public String getName() { + return ugi.getUserName(); + } + + /** + * Returns the list of groups of which this user is a member. On secure + * Hadoop this returns the group information for the user as resolved on the + * server. For 0.20 based Hadoop, the group names are passed from the client. + */ + public String[] getGroupNames() { + return ugi.getGroupNames(); + } + + /** + * Returns the shortened version of the user name -- the portion that maps + * to an operating system user name. + * @return Short name + */ + public abstract String getShortName(); + + /** + * Executes the given action within the context of this user. + */ + public abstract T runAs(PrivilegedAction action); + + /** + * Executes the given action within the context of this user. + */ + public abstract T runAs(PrivilegedExceptionAction action) + throws IOException, InterruptedException; + + /** + * Requests an authentication token for this user and stores it in the + * user's credentials. + * + * @throws IOException + */ + public abstract void obtainAuthTokenForJob(Configuration conf, Job job) + throws IOException, InterruptedException; + + /** + * Requests an authentication token for this user and stores it in the + * user's credentials. + * + * @throws IOException + */ + public abstract void obtainAuthTokenForJob(JobConf job) + throws IOException, InterruptedException; + + public String toString() { + return ugi.toString(); + } + + /** + * Returns the {@code User} instance within current execution context. + */ + public static User getCurrent() throws IOException { + User user = new SecureHadoopUser(); + if (user.getUGI() == null) { + return null; + } + return user; + } + + /** + * Wraps an underlying {@code UserGroupInformation} instance. + * @param ugi The base Hadoop user + * @return User + */ + public static User create(UserGroupInformation ugi) { + if (ugi == null) { + return null; + } + return new SecureHadoopUser(ugi); + } + + /** + * Generates a new {@code User} instance specifically for use in test code. + * @param name the full username + * @param groups the group names to which the test user will belong + * @return a new User instance + */ + public static User createUserForTesting(Configuration conf, + String name, String[] groups) { + return SecureHadoopUser.createUserForTesting(conf, name, groups); + } + + /** + * Log in the current process using the given configuration keys for the + * credential file and login principal. + * + *

      This is only applicable when + * running on secure Hadoop -- see + * org.apache.hadoop.security.SecurityUtil#login(Configuration,String,String,String). + * On regular Hadoop (without security features), this will safely be ignored. + *

      + * + * @param conf The configuration data to use + * @param fileConfKey Property key used to configure path to the credential file + * @param principalConfKey Property key used to configure login principal + * @param localhost Current hostname to use in any credentials + * @throws IOException underlying exception from SecurityUtil.login() call + */ + public static void login(Configuration conf, String fileConfKey, + String principalConfKey, String localhost) throws IOException { + SecureHadoopUser.login(conf, fileConfKey, principalConfKey, localhost); + } + + /** + * Returns whether or not Kerberos authentication is configured for Hadoop. + * For non-secure Hadoop, this always returns false. + * For secure Hadoop, it will return the value from + * {@code UserGroupInformation.isSecurityEnabled()}. + */ + public static boolean isSecurityEnabled() { + return SecureHadoopUser.isSecurityEnabled(); + } + + /** + * Returns whether or not secure authentication is enabled for HBase. Note that + * HBase security requires HDFS security to provide any guarantees, so this requires that + * both hbase.security.authentication and hadoop.security.authentication + * are set to kerberos. + */ + public static boolean isHBaseSecurityEnabled(Configuration conf) { + return "kerberos".equalsIgnoreCase(conf.get(HBASE_SECURITY_CONF_KEY)) && + "kerberos".equalsIgnoreCase( + conf.get(CommonConfigurationKeys.HADOOP_SECURITY_AUTHENTICATION)); + } + + /* Concrete implementations */ + + /** + * Bridges {@code User} invocations to underlying calls to + * {@link org.apache.hadoop.security.UserGroupInformation} for secure Hadoop + * 0.20 and versions 0.21 and above. + */ + private static class SecureHadoopUser extends User { + private String shortName; + + private SecureHadoopUser() throws IOException { + try { + ugi = (UserGroupInformation) callStatic("getCurrentUser"); + } catch (IOException ioe) { + throw ioe; + } catch (RuntimeException re) { + throw re; + } catch (Exception e) { + throw new UndeclaredThrowableException(e, + "Unexpected exception getting current secure user"); + } + } + + private SecureHadoopUser(UserGroupInformation ugi) { + this.ugi = ugi; + } + + @Override + public String getShortName() { + if (shortName != null) return shortName; + + try { + shortName = (String)call(ugi, "getShortUserName", null, null); + return shortName; + } catch (RuntimeException re) { + throw re; + } catch (Exception e) { + throw new UndeclaredThrowableException(e, + "Unexpected error getting user short name"); + } + } + + @Override + public T runAs(PrivilegedAction action) { + try { + return (T) call(ugi, "doAs", new Class[]{PrivilegedAction.class}, + new Object[]{action}); + } catch (RuntimeException re) { + throw re; + } catch (Exception e) { + throw new UndeclaredThrowableException(e, + "Unexpected exception in runAs()"); + } + } + + @Override + public T runAs(PrivilegedExceptionAction action) + throws IOException, InterruptedException { + try { + return (T) call(ugi, "doAs", + new Class[]{PrivilegedExceptionAction.class}, + new Object[]{action}); + } catch (IOException ioe) { + throw ioe; + } catch (InterruptedException ie) { + throw ie; + } catch (RuntimeException re) { + throw re; + } catch (Exception e) { + throw new UndeclaredThrowableException(e, + "Unexpected exception in runAs(PrivilegedExceptionAction)"); + } + } + + @Override + public void obtainAuthTokenForJob(Configuration conf, Job job) + throws IOException, InterruptedException { + try { + Class c = Class.forName( + "org.apache.hadoop.hbase.security.token.TokenUtil"); + Methods.call(c, null, "obtainTokenForJob", + new Class[]{Configuration.class, UserGroupInformation.class, + Job.class}, + new Object[]{conf, ugi, job}); + } catch (ClassNotFoundException cnfe) { + throw new RuntimeException("Failure loading TokenUtil class, " + +"is secure RPC available?", cnfe); + } catch (IOException ioe) { + throw ioe; + } catch (InterruptedException ie) { + throw ie; + } catch (RuntimeException re) { + throw re; + } catch (Exception e) { + throw new UndeclaredThrowableException(e, + "Unexpected error calling TokenUtil.obtainAndCacheToken()"); + } + } + + @Override + public void obtainAuthTokenForJob(JobConf job) + throws IOException, InterruptedException { + try { + Class c = Class.forName( + "org.apache.hadoop.hbase.security.token.TokenUtil"); + Methods.call(c, null, "obtainTokenForJob", + new Class[]{JobConf.class, UserGroupInformation.class}, + new Object[]{job, ugi}); + } catch (ClassNotFoundException cnfe) { + throw new RuntimeException("Failure loading TokenUtil class, " + +"is secure RPC available?", cnfe); + } catch (IOException ioe) { + throw ioe; + } catch (InterruptedException ie) { + throw ie; + } catch (RuntimeException re) { + throw re; + } catch (Exception e) { + throw new UndeclaredThrowableException(e, + "Unexpected error calling TokenUtil.obtainAndCacheToken()"); + } + } + + /** @see User#createUserForTesting(org.apache.hadoop.conf.Configuration, String, String[]) */ + public static User createUserForTesting(Configuration conf, + String name, String[] groups) { + try { + return new SecureHadoopUser( + (UserGroupInformation)callStatic("createUserForTesting", + new Class[]{String.class, String[].class}, + new Object[]{name, groups}) + ); + } catch (RuntimeException re) { + throw re; + } catch (Exception e) { + throw new UndeclaredThrowableException(e, + "Error creating secure test user"); + } + } + + /** + * Obtain credentials for the current process using the configured + * Kerberos keytab file and principal. + * @see User#login(org.apache.hadoop.conf.Configuration, String, String, String) + * + * @param conf the Configuration to use + * @param fileConfKey Configuration property key used to store the path + * to the keytab file + * @param principalConfKey Configuration property key used to store the + * principal name to login as + * @param localhost the local hostname + */ + public static void login(Configuration conf, String fileConfKey, + String principalConfKey, String localhost) throws IOException { + if (isSecurityEnabled()) { + // check for SecurityUtil class + try { + Class c = Class.forName("org.apache.hadoop.security.SecurityUtil"); + Class[] types = new Class[]{ + Configuration.class, String.class, String.class, String.class }; + Object[] args = new Object[]{ + conf, fileConfKey, principalConfKey, localhost }; + Methods.call(c, null, "login", types, args); + } catch (ClassNotFoundException cnfe) { + throw new RuntimeException("Unable to login using " + + "org.apache.hadoop.security.SecurityUtil.login(). SecurityUtil class " + + "was not found! Is this a version of secure Hadoop?", cnfe); + } catch (IOException ioe) { + throw ioe; + } catch (RuntimeException re) { + throw re; + } catch (Exception e) { + throw new UndeclaredThrowableException(e, + "Unhandled exception in User.login()"); + } + } + } + + /** + * Returns the result of {@code UserGroupInformation.isSecurityEnabled()}. + */ + public static boolean isSecurityEnabled() { + try { + return (Boolean)callStatic("isSecurityEnabled"); + } catch (RuntimeException re) { + throw re; + } catch (Exception e) { + throw new UndeclaredThrowableException(e, + "Unexpected exception calling UserGroupInformation.isSecurityEnabled()"); + } + } + } + + /* Reflection helper methods */ + private static Object callStatic(String methodName) throws Exception { + return call(null, methodName, null, null); + } + + private static Object callStatic(String methodName, Class[] types, + Object[] args) throws Exception { + return call(null, methodName, types, args); + } + + private static Object call(UserGroupInformation instance, String methodName, + Class[] types, Object[] args) throws Exception { + return Methods.call(UserGroupInformation.class, instance, methodName, types, + args); + } +} diff --git hbase-common/src/main/java/org/apache/hadoop/hbase/util/Addressing.java hbase-common/src/main/java/org/apache/hadoop/hbase/util/Addressing.java new file mode 100644 index 0000000..caf78f3 --- /dev/null +++ hbase-common/src/main/java/org/apache/hadoop/hbase/util/Addressing.java @@ -0,0 +1,80 @@ +/** + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.util; + +import java.net.InetSocketAddress; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; + +/** + * Utility for network addresses, resolving and naming. + */ +@InterfaceAudience.Public +@InterfaceStability.Evolving +public class Addressing { + public static final String VALID_PORT_REGEX = "[\\d]+"; + public static final String HOSTNAME_PORT_SEPARATOR = ":"; + + /** + * @param hostAndPort Formatted as <hostname> ':' <port> + * @return An InetSocketInstance + */ + public static InetSocketAddress createInetSocketAddressFromHostAndPortStr( + final String hostAndPort) { + return new InetSocketAddress(parseHostname(hostAndPort), parsePort(hostAndPort)); + } + + /** + * @param hostname Server hostname + * @param port Server port + * @return Returns a concatenation of hostname and + * port in following + * form: <hostname> ':' <port>. For example, if hostname + * is example.org and port is 1234, this method will return + * example.org:1234 + */ + public static String createHostAndPortStr(final String hostname, final int port) { + return hostname + HOSTNAME_PORT_SEPARATOR + port; + } + + /** + * @param hostAndPort Formatted as <hostname> ':' <port> + * @return The hostname portion of hostAndPort + */ + public static String parseHostname(final String hostAndPort) { + int colonIndex = hostAndPort.lastIndexOf(HOSTNAME_PORT_SEPARATOR); + if (colonIndex < 0) { + throw new IllegalArgumentException("Not a host:port pair: " + hostAndPort); + } + return hostAndPort.substring(0, colonIndex); + } + + /** + * @param hostAndPort Formatted as <hostname> ':' <port> + * @return The port portion of hostAndPort + */ + public static int parsePort(final String hostAndPort) { + int colonIndex = hostAndPort.lastIndexOf(HOSTNAME_PORT_SEPARATOR); + if (colonIndex < 0) { + throw new IllegalArgumentException("Not a host:port pair: " + hostAndPort); + } + return Integer.parseInt(hostAndPort.substring(colonIndex + 1)); + } +} diff --git hbase-common/src/main/java/org/apache/hadoop/hbase/util/Classes.java hbase-common/src/main/java/org/apache/hadoop/hbase/util/Classes.java new file mode 100644 index 0000000..76551d9 --- /dev/null +++ hbase-common/src/main/java/org/apache/hadoop/hbase/util/Classes.java @@ -0,0 +1,83 @@ +/* + * Copyright The Apache Software Foundation + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.util; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; + +/** + * Utilities for class manipulation. + */ +@InterfaceAudience.Public +@InterfaceStability.Stable +public class Classes { + + /** + * Equivalent of {@link Class#forName(String)} which also returns classes for + * primitives like boolean, etc. + * + * @param className + * The name of the class to retrieve. Can be either a normal class or + * a primitive class. + * @return The class specified by className + * @throws ClassNotFoundException + * If the requested class can not be found. + */ + public static Class extendedForName(String className) + throws ClassNotFoundException { + Class valueType; + if (className.equals("boolean")) { + valueType = boolean.class; + } else if (className.equals("byte")) { + valueType = byte.class; + } else if (className.equals("short")) { + valueType = short.class; + } else if (className.equals("int")) { + valueType = int.class; + } else if (className.equals("long")) { + valueType = long.class; + } else if (className.equals("float")) { + valueType = float.class; + } else if (className.equals("double")) { + valueType = double.class; + } else if (className.equals("char")) { + valueType = char.class; + } else { + valueType = Class.forName(className); + } + return valueType; + } + + public static String stringify(Class[] classes) { + StringBuilder buf = new StringBuilder(); + if (classes != null) { + for (Class c : classes) { + if (buf.length() > 0) { + buf.append(","); + } + buf.append(c.getName()); + } + } else { + buf.append("NULL"); + } + return buf.toString(); + } +} diff --git hbase-common/src/main/java/org/apache/hadoop/hbase/util/HasThread.java hbase-common/src/main/java/org/apache/hadoop/hbase/util/HasThread.java new file mode 100644 index 0000000..dbf9164 --- /dev/null +++ hbase-common/src/main/java/org/apache/hadoop/hbase/util/HasThread.java @@ -0,0 +1,100 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.util; + +import java.lang.Thread.UncaughtExceptionHandler; + +import org.apache.hadoop.classification.InterfaceAudience; + +/** + * Abstract class which contains a Thread and delegates the common Thread + * methods to that instance. + * + * The purpose of this class is to workaround Sun JVM bug #6915621, in which + * something internal to the JDK uses Thread.currentThread() as a monitor + * lock. This can produce deadlocks like HBASE-4367, HBASE-4101, etc. + */ +@InterfaceAudience.Private +public abstract class HasThread implements Runnable { + private final Thread thread; + + public HasThread() { + this.thread = new Thread(this); + } + + public HasThread(String name) { + this.thread = new Thread(this, name); + } + + public Thread getThread() { + return thread; + } + + public abstract void run(); + + //// Begin delegation to Thread + + public final String getName() { + return thread.getName(); + } + + public void interrupt() { + thread.interrupt(); + } + + public final boolean isAlive() { + return thread.isAlive(); + } + + public boolean isInterrupted() { + return thread.isInterrupted(); + } + + public final void setDaemon(boolean on) { + thread.setDaemon(on); + } + + public final void setName(String name) { + thread.setName(name); + } + + public final void setPriority(int newPriority) { + thread.setPriority(newPriority); + } + + public void setUncaughtExceptionHandler(UncaughtExceptionHandler eh) { + thread.setUncaughtExceptionHandler(eh); + } + + public void start() { + thread.start(); + } + + public final void join() throws InterruptedException { + thread.join(); + } + + public final void join(long millis, int nanos) throws InterruptedException { + thread.join(millis, nanos); + } + + public final void join(long millis) throws InterruptedException { + thread.join(millis); + } + //// End delegation to Thread +} diff --git hbase-common/src/main/java/org/apache/hadoop/hbase/util/Hash.java hbase-common/src/main/java/org/apache/hadoop/hbase/util/Hash.java new file mode 100644 index 0000000..b5addf2 --- /dev/null +++ hbase-common/src/main/java/org/apache/hadoop/hbase/util/Hash.java @@ -0,0 +1,137 @@ +/** + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.util; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; +import org.apache.hadoop.conf.Configuration; + +/** + * This class represents a common API for hashing functions. + */ +@InterfaceAudience.Public +@InterfaceStability.Stable +public abstract class Hash { + /** Constant to denote invalid hash type. */ + public static final int INVALID_HASH = -1; + /** Constant to denote {@link JenkinsHash}. */ + public static final int JENKINS_HASH = 0; + /** Constant to denote {@link MurmurHash}. */ + public static final int MURMUR_HASH = 1; + + /** + * This utility method converts String representation of hash function name + * to a symbolic constant. Currently two function types are supported, + * "jenkins" and "murmur". + * @param name hash function name + * @return one of the predefined constants + */ + public static int parseHashType(String name) { + if ("jenkins".equalsIgnoreCase(name)) { + return JENKINS_HASH; + } else if ("murmur".equalsIgnoreCase(name)) { + return MURMUR_HASH; + } else { + return INVALID_HASH; + } + } + + /** + * This utility method converts the name of the configured + * hash type to a symbolic constant. + * @param conf configuration + * @return one of the predefined constants + */ + public static int getHashType(Configuration conf) { + String name = conf.get("hbase.hash.type", "murmur"); + return parseHashType(name); + } + + /** + * Get a singleton instance of hash function of a given type. + * @param type predefined hash type + * @return hash function instance, or null if type is invalid + */ + public static Hash getInstance(int type) { + switch(type) { + case JENKINS_HASH: + return JenkinsHash.getInstance(); + case MURMUR_HASH: + return MurmurHash.getInstance(); + default: + return null; + } + } + + /** + * Get a singleton instance of hash function of a type + * defined in the configuration. + * @param conf current configuration + * @return defined hash type, or null if type is invalid + */ + public static Hash getInstance(Configuration conf) { + int type = getHashType(conf); + return getInstance(type); + } + + /** + * Calculate a hash using all bytes from the input argument, and + * a seed of -1. + * @param bytes input bytes + * @return hash value + */ + public int hash(byte[] bytes) { + return hash(bytes, bytes.length, -1); + } + + /** + * Calculate a hash using all bytes from the input argument, + * and a provided seed value. + * @param bytes input bytes + * @param initval seed value + * @return hash value + */ + public int hash(byte[] bytes, int initval) { + return hash(bytes, 0, bytes.length, initval); + } + + /** + * Calculate a hash using bytes from 0 to length, and + * the provided seed value + * @param bytes input bytes + * @param length length of the valid bytes after offset to consider + * @param initval seed value + * @return hash value + */ + public int hash(byte[] bytes, int length, int initval) { + return hash(bytes, 0, length, initval); + } + + /** + * Calculate a hash using bytes from offset to offset + + * length, and the provided seed value. + * @param bytes input bytes + * @param offset the offset into the array to start consideration + * @param length length of the valid bytes after offset to consider + * @param initval seed value + * @return hash value + */ + public abstract int hash(byte[] bytes, int offset, int length, int initval); +} diff --git hbase-common/src/main/java/org/apache/hadoop/hbase/util/HashedBytes.java hbase-common/src/main/java/org/apache/hadoop/hbase/util/HashedBytes.java new file mode 100644 index 0000000..2e768ed --- /dev/null +++ hbase-common/src/main/java/org/apache/hadoop/hbase/util/HashedBytes.java @@ -0,0 +1,65 @@ +/* + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.util; + +import java.util.Arrays; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; + +/** + * This class encapsulates a byte array and overrides hashCode and equals so + * that it's identity is based on the data rather than the array instance. + */ +@InterfaceAudience.Public +@InterfaceStability.Stable +public class HashedBytes { + + private final byte[] bytes; + private final int hashCode; + + public HashedBytes(byte[] bytes) { + this.bytes = bytes; + hashCode = Bytes.hashCode(bytes); + } + + public byte[] getBytes() { + return bytes; + } + + @Override + public int hashCode() { + return hashCode; + } + + @Override + public boolean equals(Object obj) { + if (this == obj) + return true; + if (obj == null || getClass() != obj.getClass()) + return false; + HashedBytes other = (HashedBytes) obj; + return Arrays.equals(bytes, other.bytes); + } + + @Override + public String toString() { + return Bytes.toStringBinary(bytes); + } +} diff --git hbase-common/src/main/java/org/apache/hadoop/hbase/util/JenkinsHash.java hbase-common/src/main/java/org/apache/hadoop/hbase/util/JenkinsHash.java new file mode 100644 index 0000000..26fda22 --- /dev/null +++ hbase-common/src/main/java/org/apache/hadoop/hbase/util/JenkinsHash.java @@ -0,0 +1,261 @@ +/** + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.util; + +import static java.lang.Integer.rotateLeft; + +import java.io.FileInputStream; +import java.io.IOException; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; + +/** + * Produces 32-bit hash for hash table lookup. + * + *
      lookup3.c, by Bob Jenkins, May 2006, Public Domain.
      + *
      + * You can use this free for any purpose.  It's in the public domain.
      + * It has no warranty.
      + * 
      + * + * @see lookup3.c + * @see Hash Functions (and how this + * function compares to others such as CRC, MD?, etc + * @see Has update on the + * Dr. Dobbs Article + */ +@InterfaceAudience.Public +@InterfaceStability.Stable +public class JenkinsHash extends Hash { + private static final int BYTE_MASK = 0xff; + + private static JenkinsHash _instance = new JenkinsHash(); + + public static Hash getInstance() { + return _instance; + } + + /** + * taken from hashlittle() -- hash a variable-length key into a 32-bit value + * + * @param key the key (the unaligned variable-length array of bytes) + * @param nbytes number of bytes to include in hash + * @param initval can be any integer value + * @return a 32-bit value. Every bit of the key affects every bit of the + * return value. Two keys differing by one or two bits will have totally + * different hash values. + * + *

      The best hash table sizes are powers of 2. There is no need to do mod + * a prime (mod is sooo slow!). If you need less than 32 bits, use a bitmask. + * For example, if you need only 10 bits, do + * h = (h & hashmask(10)); + * In which case, the hash table should have hashsize(10) elements. + * + *

      If you are hashing n strings byte[][] k, do it like this: + * for (int i = 0, h = 0; i < n; ++i) h = hash( k[i], h); + * + *

      By Bob Jenkins, 2006. bob_jenkins@burtleburtle.net. You may use this + * code any way you wish, private, educational, or commercial. It's free. + * + *

      Use for hash table lookup, or anything where one collision in 2^^32 is + * acceptable. Do NOT use for cryptographic purposes. + */ + @Override + @SuppressWarnings("fallthrough") + public int hash(byte[] key, int off, int nbytes, int initval) { + int length = nbytes; + int a, b, c; + a = b = c = 0xdeadbeef + length + initval; + int offset = off; + for (; length > 12; offset += 12, length -= 12) { + a += (key[offset] & BYTE_MASK); + a += ((key[offset + 1] & BYTE_MASK) << 8); + a += ((key[offset + 2] & BYTE_MASK) << 16); + a += ((key[offset + 3] & BYTE_MASK) << 24); + b += (key[offset + 4] & BYTE_MASK); + b += ((key[offset + 5] & BYTE_MASK) << 8); + b += ((key[offset + 6] & BYTE_MASK) << 16); + b += ((key[offset + 7] & BYTE_MASK) << 24); + c += (key[offset + 8] & BYTE_MASK); + c += ((key[offset + 9] & BYTE_MASK) << 8); + c += ((key[offset + 10] & BYTE_MASK) << 16); + c += ((key[offset + 11] & BYTE_MASK) << 24); + + /* + * mix -- mix 3 32-bit values reversibly. + * This is reversible, so any information in (a,b,c) before mix() is + * still in (a,b,c) after mix(). + * + * If four pairs of (a,b,c) inputs are run through mix(), or through + * mix() in reverse, there are at least 32 bits of the output that + * are sometimes the same for one pair and different for another pair. + * + * This was tested for: + * - pairs that differed by one bit, by two bits, in any combination + * of top bits of (a,b,c), or in any combination of bottom bits of + * (a,b,c). + * - "differ" is defined as +, -, ^, or ~^. For + and -, I transformed + * the output delta to a Gray code (a^(a>>1)) so a string of 1's (as + * is commonly produced by subtraction) look like a single 1-bit + * difference. + * - the base values were pseudorandom, all zero but one bit set, or + * all zero plus a counter that starts at zero. + * + * Some k values for my "a-=c; a^=rot(c,k); c+=b;" arrangement that + * satisfy this are + * 4 6 8 16 19 4 + * 9 15 3 18 27 15 + * 14 9 3 7 17 3 + * Well, "9 15 3 18 27 15" didn't quite get 32 bits diffing for + * "differ" defined as + with a one-bit base and a two-bit delta. I + * used http://burtleburtle.net/bob/hash/avalanche.html to choose + * the operations, constants, and arrangements of the variables. + * + * This does not achieve avalanche. There are input bits of (a,b,c) + * that fail to affect some output bits of (a,b,c), especially of a. + * The most thoroughly mixed value is c, but it doesn't really even + * achieve avalanche in c. + * + * This allows some parallelism. Read-after-writes are good at doubling + * the number of bits affected, so the goal of mixing pulls in the + * opposite direction as the goal of parallelism. I did what I could. + * Rotates seem to cost as much as shifts on every machine I could lay + * my hands on, and rotates are much kinder to the top and bottom bits, + * so I used rotates. + * + * #define mix(a,b,c) \ + * { \ + * a -= c; a ^= rot(c, 4); c += b; \ + * b -= a; b ^= rot(a, 6); a += c; \ + * c -= b; c ^= rot(b, 8); b += a; \ + * a -= c; a ^= rot(c,16); c += b; \ + * b -= a; b ^= rot(a,19); a += c; \ + * c -= b; c ^= rot(b, 4); b += a; \ + * } + * + * mix(a,b,c); + */ + a -= c; a ^= rotateLeft(c, 4); c += b; + b -= a; b ^= rotateLeft(a, 6); a += c; + c -= b; c ^= rotateLeft(b, 8); b += a; + a -= c; a ^= rotateLeft(c, 16); c += b; + b -= a; b ^= rotateLeft(a, 19); a += c; + c -= b; c ^= rotateLeft(b, 4); b += a; + } + + //-------------------------------- last block: affect all 32 bits of (c) + switch (length) { // all the case statements fall through + case 12: + c += ((key[offset + 11] & BYTE_MASK) << 24); + case 11: + c += ((key[offset + 10] & BYTE_MASK) << 16); + case 10: + c += ((key[offset + 9] & BYTE_MASK) << 8); + case 9: + c += (key[offset + 8] & BYTE_MASK); + case 8: + b += ((key[offset + 7] & BYTE_MASK) << 24); + case 7: + b += ((key[offset + 6] & BYTE_MASK) << 16); + case 6: + b += ((key[offset + 5] & BYTE_MASK) << 8); + case 5: + b += (key[offset + 4] & BYTE_MASK); + case 4: + a += ((key[offset + 3] & BYTE_MASK) << 24); + case 3: + a += ((key[offset + 2] & BYTE_MASK) << 16); + case 2: + a += ((key[offset + 1] & BYTE_MASK) << 8); + case 1: + //noinspection PointlessArithmeticExpression + a += (key[offset + 0] & BYTE_MASK); + break; + case 0: + return c; + } + /* + * final -- final mixing of 3 32-bit values (a,b,c) into c + * + * Pairs of (a,b,c) values differing in only a few bits will usually + * produce values of c that look totally different. This was tested for + * - pairs that differed by one bit, by two bits, in any combination + * of top bits of (a,b,c), or in any combination of bottom bits of + * (a,b,c). + * + * - "differ" is defined as +, -, ^, or ~^. For + and -, I transformed + * the output delta to a Gray code (a^(a>>1)) so a string of 1's (as + * is commonly produced by subtraction) look like a single 1-bit + * difference. + * + * - the base values were pseudorandom, all zero but one bit set, or + * all zero plus a counter that starts at zero. + * + * These constants passed: + * 14 11 25 16 4 14 24 + * 12 14 25 16 4 14 24 + * and these came close: + * 4 8 15 26 3 22 24 + * 10 8 15 26 3 22 24 + * 11 8 15 26 3 22 24 + * + * #define final(a,b,c) \ + * { + * c ^= b; c -= rot(b,14); \ + * a ^= c; a -= rot(c,11); \ + * b ^= a; b -= rot(a,25); \ + * c ^= b; c -= rot(b,16); \ + * a ^= c; a -= rot(c,4); \ + * b ^= a; b -= rot(a,14); \ + * c ^= b; c -= rot(b,24); \ + * } + * + */ + c ^= b; c -= rotateLeft(b, 14); + a ^= c; a -= rotateLeft(c, 11); + b ^= a; b -= rotateLeft(a, 25); + c ^= b; c -= rotateLeft(b, 16); + a ^= c; a -= rotateLeft(c, 4); + b ^= a; b -= rotateLeft(a, 14); + c ^= b; c -= rotateLeft(b, 24); + return c; + } + + /** + * Compute the hash of the specified file + * @param args name of file to compute hash of. + * @throws IOException e + */ + public static void main(String[] args) throws IOException { + if (args.length != 1) { + System.err.println("Usage: JenkinsHash filename"); + System.exit(-1); + } + FileInputStream in = new FileInputStream(args[0]); + byte[] bytes = new byte[512]; + int value = 0; + JenkinsHash hash = new JenkinsHash(); + for (int length = in.read(bytes); length > 0; length = in.read(bytes)) { + value = hash.hash(bytes, length, value); + } + System.out.println(Math.abs(value)); + } +} diff --git hbase-common/src/main/java/org/apache/hadoop/hbase/util/Methods.java hbase-common/src/main/java/org/apache/hadoop/hbase/util/Methods.java new file mode 100644 index 0000000..8f0a6e3 --- /dev/null +++ hbase-common/src/main/java/org/apache/hadoop/hbase/util/Methods.java @@ -0,0 +1,69 @@ +/* + * Copyright The Apache Software Foundation + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.util; + +import java.lang.reflect.InvocationTargetException; +import java.lang.reflect.Method; +import java.lang.reflect.UndeclaredThrowableException; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; + +@InterfaceAudience.Public +@InterfaceStability.Stable +public class Methods { + private static Log LOG = LogFactory.getLog(Methods.class); + + public static Object call(Class clazz, T instance, String methodName, + Class[] types, Object[] args) throws Exception { + try { + Method m = clazz.getMethod(methodName, types); + return m.invoke(instance, args); + } catch (IllegalArgumentException arge) { + LOG.fatal("Constructed invalid call. class="+clazz.getName()+ + " method=" + methodName + " types=" + Classes.stringify(types), arge); + throw arge; + } catch (NoSuchMethodException nsme) { + throw new IllegalArgumentException( + "Can't find method "+methodName+" in "+clazz.getName()+"!", nsme); + } catch (InvocationTargetException ite) { + // unwrap the underlying exception and rethrow + if (ite.getTargetException() != null) { + if (ite.getTargetException() instanceof Exception) { + throw (Exception)ite.getTargetException(); + } else if (ite.getTargetException() instanceof Error) { + throw (Error)ite.getTargetException(); + } + } + throw new UndeclaredThrowableException(ite, + "Unknown exception invoking "+clazz.getName()+"."+methodName+"()"); + } catch (IllegalAccessException iae) { + throw new IllegalArgumentException( + "Denied access calling "+clazz.getName()+"."+methodName+"()", iae); + } catch (SecurityException se) { + LOG.fatal("SecurityException calling method. class="+clazz.getName()+ + " method=" + methodName + " types=" + Classes.stringify(types), se); + throw se; + } + } +} diff --git hbase-common/src/main/java/org/apache/hadoop/hbase/util/MurmurHash.java hbase-common/src/main/java/org/apache/hadoop/hbase/util/MurmurHash.java new file mode 100644 index 0000000..9b498d1 --- /dev/null +++ hbase-common/src/main/java/org/apache/hadoop/hbase/util/MurmurHash.java @@ -0,0 +1,92 @@ +/** + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.util; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; + +/** + * This is a very fast, non-cryptographic hash suitable for general hash-based + * lookup. See http://murmurhash.googlepages.com/ for more details. + * + *

      The C version of MurmurHash 2.0 found at that site was ported + * to Java by Andrzej Bialecki (ab at getopt org).

      + */ +@InterfaceAudience.Public +@InterfaceStability.Stable +public class MurmurHash extends Hash { + private static MurmurHash _instance = new MurmurHash(); + + public static Hash getInstance() { + return _instance; + } + + @Override + public int hash(byte[] data, int offset, int length, int seed) { + int m = 0x5bd1e995; + int r = 24; + + int h = seed ^ length; + + int len_4 = length >> 2; + + for (int i = 0; i < len_4; i++) { + int i_4 = (i << 2) + offset; + int k = data[i_4 + 3]; + k = k << 8; + k = k | (data[i_4 + 2] & 0xff); + k = k << 8; + k = k | (data[i_4 + 1] & 0xff); + k = k << 8; + //noinspection PointlessArithmeticExpression + k = k | (data[i_4 + 0] & 0xff); + k *= m; + k ^= k >>> r; + k *= m; + h *= m; + h ^= k; + } + + // avoid calculating modulo + int len_m = len_4 << 2; + int left = length - len_m; + int i_m = len_m + offset; + + if (left != 0) { + if (left >= 3) { + h ^= data[i_m + 2] << 16; + } + if (left >= 2) { + h ^= data[i_m + 1] << 8; + } + if (left >= 1) { + h ^= data[i_m]; + } + + h *= m; + } + + h ^= h >>> 13; + h *= m; + h ^= h >>> 15; + + return h; + } +} diff --git hbase-common/src/main/java/org/apache/hadoop/hbase/util/Pair.java hbase-common/src/main/java/org/apache/hadoop/hbase/util/Pair.java new file mode 100644 index 0000000..ecfc308 --- /dev/null +++ hbase-common/src/main/java/org/apache/hadoop/hbase/util/Pair.java @@ -0,0 +1,135 @@ +/** + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.util; + +import java.io.Serializable; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; + +/** + * A generic class for pairs. + * @param + * @param + */ +@InterfaceAudience.Public +@InterfaceStability.Stable +public class Pair implements Serializable +{ + private static final long serialVersionUID = -3986244606585552569L; + protected T1 first = null; + protected T2 second = null; + + /** + * Default constructor. + */ + public Pair() + { + } + + /** + * Constructor + * @param a operand + * @param b operand + */ + public Pair(T1 a, T2 b) + { + this.first = a; + this.second = b; + } + + /** + * Constructs a new pair, inferring the type via the passed arguments + * @param type for first + * @param type for second + * @param a first element + * @param b second element + * @return a new pair containing the passed arguments + */ + public static Pair newPair(T1 a, T2 b) { + return new Pair(a, b); + } + + /** + * Replace the first element of the pair. + * @param a operand + */ + public void setFirst(T1 a) + { + this.first = a; + } + + /** + * Replace the second element of the pair. + * @param b operand + */ + public void setSecond(T2 b) + { + this.second = b; + } + + /** + * Return the first element stored in the pair. + * @return T1 + */ + public T1 getFirst() + { + return first; + } + + /** + * Return the second element stored in the pair. + * @return T2 + */ + public T2 getSecond() + { + return second; + } + + private static boolean equals(Object x, Object y) + { + return (x == null && y == null) || (x != null && x.equals(y)); + } + + @Override + @SuppressWarnings("unchecked") + public boolean equals(Object other) + { + return other instanceof Pair && equals(first, ((Pair)other).first) && + equals(second, ((Pair)other).second); + } + + @Override + public int hashCode() + { + if (first == null) + return (second == null) ? 0 : second.hashCode() + 1; + else if (second == null) + return first.hashCode() + 2; + else + return first.hashCode() * 17 + second.hashCode(); + } + + @Override + public String toString() + { + return "{" + getFirst() + "," + getSecond() + "}"; + } +} diff --git hbase-common/src/main/java/org/apache/hadoop/hbase/util/PairOfSameType.java hbase-common/src/main/java/org/apache/hadoop/hbase/util/PairOfSameType.java new file mode 100644 index 0000000..2da2a3a --- /dev/null +++ hbase-common/src/main/java/org/apache/hadoop/hbase/util/PairOfSameType.java @@ -0,0 +1,115 @@ +/** + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.util; + +import java.util.Iterator; + +import org.apache.commons.lang.NotImplementedException; +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; + +/** + * A generic, immutable class for pairs of objects both of type T. + * @param + * @see Pair if Types differ. + */ +@InterfaceAudience.Public +@InterfaceStability.Stable +public class PairOfSameType implements Iterable { + private final T first; + private final T second; + + /** + * Constructor + * @param a operand + * @param b operand + */ + public PairOfSameType(T a, T b) { + this.first = a; + this.second = b; + } + + /** + * Return the first element stored in the pair. + * @return T + */ + public T getFirst() { + return first; + } + + /** + * Return the second element stored in the pair. + * @return T + */ + public T getSecond() { + return second; + } + + private static boolean equals(Object x, Object y) { + return (x == null && y == null) || (x != null && x.equals(y)); + } + + @Override + @SuppressWarnings("unchecked") + public boolean equals(Object other) { + return other instanceof PairOfSameType && + equals(first, ((PairOfSameType)other).first) && + equals(second, ((PairOfSameType)other).second); + } + + @Override + public int hashCode() { + if (first == null) + return (second == null) ? 0 : second.hashCode() + 1; + else if (second == null) + return first.hashCode() + 2; + else + return first.hashCode() * 17 + second.hashCode(); + } + + @Override + public String toString() { + return "{" + getFirst() + "," + getSecond() + "}"; + } + + @Override + public Iterator iterator() { + return new Iterator() { + private int returned = 0; + + @Override + public boolean hasNext() { + return this.returned < 2; + } + + @Override + public T next() { + if (++this.returned == 1) return getFirst(); + else if (this.returned == 2) return getSecond(); + else throw new IllegalAccessError("this.returned=" + this.returned); + } + + @Override + public void remove() { + throw new NotImplementedException(); + } + }; + } +} diff --git hbase-common/src/main/java/org/apache/hadoop/hbase/util/PoolMap.java hbase-common/src/main/java/org/apache/hadoop/hbase/util/PoolMap.java new file mode 100644 index 0000000..364be66 --- /dev/null +++ hbase-common/src/main/java/org/apache/hadoop/hbase/util/PoolMap.java @@ -0,0 +1,451 @@ +/** + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.util; + +import java.util.ArrayList; +import java.util.Collection; +import java.util.HashMap; +import java.util.HashSet; +import java.util.LinkedList; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.ConcurrentLinkedQueue; +import java.util.concurrent.CopyOnWriteArrayList; +import java.util.concurrent.atomic.AtomicInteger; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; + +/** + * + * The PoolMap maps a key to a collection of values, the elements + * of which are managed by a pool. In effect, that collection acts as a shared + * pool of resources, access to which is closely controlled as per the semantics + * of the pool. + * + *

      + * In case the size of the pool is set to a non-zero positive number, that is + * used to cap the number of resources that a pool may contain for any given + * key. A size of {@link Integer#MAX_VALUE} is interpreted as an unbounded pool. + *

      + * + * @param + * the type of the key to the resource + * @param + * the type of the resource being pooled + */ +@InterfaceAudience.Public +@InterfaceStability.Evolving +public class PoolMap implements Map { + private PoolType poolType; + + private int poolMaxSize; + + private Map> pools = new ConcurrentHashMap>(); + + public PoolMap(PoolType poolType) { + this.poolType = poolType; + } + + public PoolMap(PoolType poolType, int poolMaxSize) { + this.poolType = poolType; + this.poolMaxSize = poolMaxSize; + } + + @Override + public V get(Object key) { + Pool pool = pools.get(key); + return pool != null ? pool.get() : null; + } + + @Override + public V put(K key, V value) { + Pool pool = pools.get(key); + if (pool == null) { + pools.put(key, pool = createPool()); + } + return pool != null ? pool.put(value) : null; + } + + @SuppressWarnings("unchecked") + @Override + public V remove(Object key) { + Pool pool = pools.remove(key); + if (pool != null) { + remove((K) key, pool.get()); + } + return null; + } + + public boolean remove(K key, V value) { + Pool pool = pools.get(key); + boolean res = false; + if (pool != null) { + res = pool.remove(value); + if (res && pool.size() == 0) { + pools.remove(key); + } + } + return res; + } + + @Override + public Collection values() { + Collection values = new ArrayList(); + for (Pool pool : pools.values()) { + Collection poolValues = pool.values(); + if (poolValues != null) { + values.addAll(poolValues); + } + } + return values; + } + + public Collection values(K key) { + Collection values = new ArrayList(); + Pool pool = pools.get(key); + if (pool != null) { + Collection poolValues = pool.values(); + if (poolValues != null) { + values.addAll(poolValues); + } + } + return values; + } + + + @Override + public boolean isEmpty() { + return pools.isEmpty(); + } + + @Override + public int size() { + return pools.size(); + } + + public int size(K key) { + Pool pool = pools.get(key); + return pool != null ? pool.size() : 0; + } + + @Override + public boolean containsKey(Object key) { + return pools.containsKey(key); + } + + @Override + public boolean containsValue(Object value) { + if (value == null) { + return false; + } + for (Pool pool : pools.values()) { + if (value.equals(pool.get())) { + return true; + } + } + return false; + } + + @Override + public void putAll(Map map) { + for (Map.Entry entry : map.entrySet()) { + put(entry.getKey(), entry.getValue()); + } + } + + @Override + public void clear() { + for (Pool pool : pools.values()) { + pool.clear(); + } + pools.clear(); + } + + @Override + public Set keySet() { + return pools.keySet(); + } + + @Override + public Set> entrySet() { + Set> entries = new HashSet>(); + for (Map.Entry> poolEntry : pools.entrySet()) { + final K poolKey = poolEntry.getKey(); + final Pool pool = poolEntry.getValue(); + if (pool != null) { + for (final V poolValue : pool.values()) { + entries.add(new Map.Entry() { + @Override + public K getKey() { + return poolKey; + } + + @Override + public V getValue() { + return poolValue; + } + + @Override + public V setValue(V value) { + return pool.put(value); + } + }); + } + } + } + return null; + } + + protected interface Pool { + public R get(); + + public R put(R resource); + + public boolean remove(R resource); + + public void clear(); + + public Collection values(); + + public int size(); + } + + public enum PoolType { + Reusable, ThreadLocal, RoundRobin; + + public static PoolType valueOf(String poolTypeName, + PoolType defaultPoolType, PoolType... allowedPoolTypes) { + PoolType poolType = PoolType.fuzzyMatch(poolTypeName); + if (poolType != null) { + boolean allowedType = false; + if (poolType.equals(defaultPoolType)) { + allowedType = true; + } else { + if (allowedPoolTypes != null) { + for (PoolType allowedPoolType : allowedPoolTypes) { + if (poolType.equals(allowedPoolType)) { + allowedType = true; + break; + } + } + } + } + if (!allowedType) { + poolType = null; + } + } + return (poolType != null) ? poolType : defaultPoolType; + } + + public static String fuzzyNormalize(String name) { + return name != null ? name.replaceAll("-", "").trim().toLowerCase() : ""; + } + + public static PoolType fuzzyMatch(String name) { + for (PoolType poolType : values()) { + if (fuzzyNormalize(name).equals(fuzzyNormalize(poolType.name()))) { + return poolType; + } + } + return null; + } + } + + protected Pool createPool() { + switch (poolType) { + case Reusable: + return new ReusablePool(poolMaxSize); + case RoundRobin: + return new RoundRobinPool(poolMaxSize); + case ThreadLocal: + return new ThreadLocalPool(); + } + return null; + } + + /** + * The ReusablePool represents a {@link PoolMap.Pool} that builds + * on the {@link LinkedList} class. It essentially allows resources to be + * checked out, at which point it is removed from this pool. When the resource + * is no longer required, it should be returned to the pool in order to be + * reused. + * + *

      + * If {@link #maxSize} is set to {@link Integer#MAX_VALUE}, then the size of + * the pool is unbounded. Otherwise, it caps the number of consumers that can + * check out a resource from this pool to the (non-zero positive) value + * specified in {@link #maxSize}. + *

      + * + * @param + * the type of the resource + */ + @SuppressWarnings("serial") + public class ReusablePool extends ConcurrentLinkedQueue implements Pool { + private int maxSize; + + public ReusablePool(int maxSize) { + this.maxSize = maxSize; + + } + + @Override + public R get() { + return poll(); + } + + @Override + public R put(R resource) { + if (super.size() < maxSize) { + add(resource); + } + return null; + } + + @Override + public Collection values() { + return this; + } + } + + /** + * The RoundRobinPool represents a {@link PoolMap.Pool}, which + * stores its resources in an {@link ArrayList}. It load-balances access to + * its resources by returning a different resource every time a given key is + * looked up. + * + *

      + * If {@link #maxSize} is set to {@link Integer#MAX_VALUE}, then the size of + * the pool is unbounded. Otherwise, it caps the number of resources in this + * pool to the (non-zero positive) value specified in {@link #maxSize}. + *

      + * + * @param + * the type of the resource + * + */ + @SuppressWarnings("serial") + class RoundRobinPool extends CopyOnWriteArrayList implements Pool { + private int maxSize; + private int nextResource = 0; + + public RoundRobinPool(int maxSize) { + this.maxSize = maxSize; + } + + @Override + public R put(R resource) { + if (super.size() < maxSize) { + add(resource); + } + return null; + } + + @Override + public R get() { + if (super.size() < maxSize) { + return null; + } + nextResource %= super.size(); + R resource = get(nextResource++); + return resource; + } + + @Override + public Collection values() { + return this; + } + + } + + /** + * The ThreadLocalPool represents a {@link PoolMap.Pool} that + * builds on the {@link ThreadLocal} class. It essentially binds the resource + * to the thread from which it is accessed. + * + *

      + * Note that the size of the pool is essentially bounded by the number of threads + * that add resources to this pool. + *

      + * + * @param + * the type of the resource + */ + static class ThreadLocalPool extends ThreadLocal implements Pool { + private static final Map, AtomicInteger> poolSizes = new HashMap, AtomicInteger>(); + + public ThreadLocalPool() { + } + + @Override + public R put(R resource) { + R previousResource = get(); + if (previousResource == null) { + AtomicInteger poolSize = poolSizes.get(this); + if (poolSize == null) { + poolSizes.put(this, poolSize = new AtomicInteger(0)); + } + poolSize.incrementAndGet(); + } + this.set(resource); + return previousResource; + } + + @Override + public void remove() { + super.remove(); + AtomicInteger poolSize = poolSizes.get(this); + if (poolSize != null) { + poolSize.decrementAndGet(); + } + } + + @Override + public int size() { + AtomicInteger poolSize = poolSizes.get(this); + return poolSize != null ? poolSize.get() : 0; + } + + @Override + public boolean remove(R resource) { + R previousResource = super.get(); + if (resource != null && resource.equals(previousResource)) { + remove(); + return true; + } else { + return false; + } + } + + @Override + public void clear() { + super.remove(); + } + + @Override + public Collection values() { + List values = new ArrayList(); + values.add(get()); + return values; + } + } +} diff --git hbase-common/src/main/java/org/apache/hadoop/hbase/util/ProtoUtil.java hbase-common/src/main/java/org/apache/hadoop/hbase/util/ProtoUtil.java new file mode 100644 index 0000000..bdbe7b7 --- /dev/null +++ hbase-common/src/main/java/org/apache/hadoop/hbase/util/ProtoUtil.java @@ -0,0 +1,69 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.util; + +import java.io.DataInput; +import java.io.IOException; + +import org.apache.hadoop.classification.InterfaceAudience; + +@InterfaceAudience.Private +public abstract class ProtoUtil { + + /** + * Read a variable length integer in the same format that ProtoBufs encodes. + * @param in the input stream to read from + * @return the integer + * @throws IOException if it is malformed or EOF. + */ + public static int readRawVarint32(DataInput in) throws IOException { + byte tmp = in.readByte(); + if (tmp >= 0) { + return tmp; + } + int result = tmp & 0x7f; + if ((tmp = in.readByte()) >= 0) { + result |= tmp << 7; + } else { + result |= (tmp & 0x7f) << 7; + if ((tmp = in.readByte()) >= 0) { + result |= tmp << 14; + } else { + result |= (tmp & 0x7f) << 14; + if ((tmp = in.readByte()) >= 0) { + result |= tmp << 21; + } else { + result |= (tmp & 0x7f) << 21; + result |= (tmp = in.readByte()) << 28; + if (tmp < 0) { + // Discard upper 32 bits. + for (int i = 0; i < 5; i++) { + if (in.readByte() >= 0) { + return result; + } + } + throw new IOException("Malformed varint"); + } + } + } + } + return result; + } + +} diff --git hbase-common/src/main/java/org/apache/hadoop/hbase/util/RetryCounter.java hbase-common/src/main/java/org/apache/hadoop/hbase/util/RetryCounter.java new file mode 100644 index 0000000..7790362 --- /dev/null +++ hbase-common/src/main/java/org/apache/hadoop/hbase/util/RetryCounter.java @@ -0,0 +1,69 @@ +/** + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.util; + +import java.util.concurrent.TimeUnit; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.classification.InterfaceAudience; + +@InterfaceAudience.Private +public class RetryCounter { + private static final Log LOG = LogFactory.getLog(RetryCounter.class); + private final int maxRetries; + private int retriesRemaining; + private final int retryIntervalMillis; + private final TimeUnit timeUnit; + + public RetryCounter(int maxRetries, + int retryIntervalMillis, TimeUnit timeUnit) { + this.maxRetries = maxRetries; + this.retriesRemaining = maxRetries; + this.retryIntervalMillis = retryIntervalMillis; + this.timeUnit = timeUnit; + } + + public int getMaxRetries() { + return maxRetries; + } + + /** + * Sleep for a exponentially back off time + * @throws InterruptedException + */ + public void sleepUntilNextRetry() throws InterruptedException { + int attempts = getAttemptTimes(); + long sleepTime = (long) (retryIntervalMillis * Math.pow(2, attempts)); + LOG.info("Sleeping " + sleepTime + "ms before retry #" + attempts + "..."); + timeUnit.sleep(sleepTime); + } + + public boolean shouldRetry() { + return retriesRemaining > 0; + } + + public void useRetry() { + retriesRemaining--; + } + + public int getAttemptTimes() { + return maxRetries-retriesRemaining+1; + } +} diff --git hbase-common/src/main/java/org/apache/hadoop/hbase/util/RetryCounterFactory.java hbase-common/src/main/java/org/apache/hadoop/hbase/util/RetryCounterFactory.java new file mode 100644 index 0000000..59edf96 --- /dev/null +++ hbase-common/src/main/java/org/apache/hadoop/hbase/util/RetryCounterFactory.java @@ -0,0 +1,40 @@ +/** + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.util; + +import java.util.concurrent.TimeUnit; + +import org.apache.hadoop.classification.InterfaceAudience; + +@InterfaceAudience.Private +public class RetryCounterFactory { + private final int maxRetries; + private final int retryIntervalMillis; + + public RetryCounterFactory(int maxRetries, int retryIntervalMillis) { + this.maxRetries = maxRetries; + this.retryIntervalMillis = retryIntervalMillis; + } + + public RetryCounter create() { + return new RetryCounter( + maxRetries, retryIntervalMillis, TimeUnit.MILLISECONDS + ); + } +} diff --git hbase-common/src/main/java/org/apache/hadoop/hbase/util/Sleeper.java hbase-common/src/main/java/org/apache/hadoop/hbase/util/Sleeper.java new file mode 100644 index 0000000..de84646 --- /dev/null +++ hbase-common/src/main/java/org/apache/hadoop/hbase/util/Sleeper.java @@ -0,0 +1,117 @@ +/** + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.util; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; +import org.apache.hadoop.hbase.Stoppable; + +/** + * Sleeper for current thread. + * Sleeps for passed period. Also checks passed boolean and if interrupted, + * will return if the flag is set (rather than go back to sleep until its + * sleep time is up). + */ +@InterfaceAudience.Public +@InterfaceStability.Stable +public class Sleeper { + private final Log LOG = LogFactory.getLog(this.getClass().getName()); + private final int period; + private final Stoppable stopper; + private static final long MINIMAL_DELTA_FOR_LOGGING = 10000; + + private final Object sleepLock = new Object(); + private boolean triggerWake = false; + + /** + * @param sleep sleep time in milliseconds + * @param stopper When {@link Stoppable#isStopped()} is true, this thread will + * cleanup and exit cleanly. + */ + public Sleeper(final int sleep, final Stoppable stopper) { + this.period = sleep; + this.stopper = stopper; + } + + /** + * Sleep for period. + */ + public void sleep() { + sleep(System.currentTimeMillis()); + } + + /** + * If currently asleep, stops sleeping; if not asleep, will skip the next + * sleep cycle. + */ + public void skipSleepCycle() { + synchronized (sleepLock) { + triggerWake = true; + sleepLock.notifyAll(); + } + } + + /** + * Sleep for period adjusted by passed startTime + * @param startTime Time some task started previous to now. Time to sleep + * will be docked current time minus passed startTime. + */ + public void sleep(final long startTime) { + if (this.stopper.isStopped()) { + return; + } + long now = System.currentTimeMillis(); + long waitTime = this.period - (now - startTime); + if (waitTime > this.period) { + LOG.warn("Calculated wait time > " + this.period + + "; setting to this.period: " + System.currentTimeMillis() + ", " + + startTime); + waitTime = this.period; + } + while (waitTime > 0) { + long woke = -1; + try { + synchronized (sleepLock) { + if (triggerWake) break; + sleepLock.wait(waitTime); + } + woke = System.currentTimeMillis(); + long slept = woke - now; + if (slept - this.period > MINIMAL_DELTA_FOR_LOGGING) { + LOG.warn("We slept " + slept + "ms instead of " + this.period + + "ms, this is likely due to a long " + + "garbage collecting pause and it's usually bad, see " + + "http://hbase.apache.org/book.html#trouble.rs.runtime.zkexpired"); + } + } catch(InterruptedException iex) { + // We we interrupted because we're meant to stop? If not, just + // continue ignoring the interruption + if (this.stopper.isStopped()) { + return; + } + } + // Recalculate waitTime. + woke = (woke == -1)? System.currentTimeMillis(): woke; + waitTime = this.period - (woke - startTime); + } + triggerWake = false; + } +} diff --git hbase-common/src/main/java/org/apache/hadoop/hbase/util/SoftValueSortedMap.java hbase-common/src/main/java/org/apache/hadoop/hbase/util/SoftValueSortedMap.java new file mode 100644 index 0000000..6e77cb6 --- /dev/null +++ hbase-common/src/main/java/org/apache/hadoop/hbase/util/SoftValueSortedMap.java @@ -0,0 +1,289 @@ +/** + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.util; + +import java.lang.ref.Reference; +import java.lang.ref.ReferenceQueue; +import java.lang.ref.SoftReference; +import java.util.ArrayList; +import java.util.Collection; +import java.util.Collections; +import java.util.Comparator; +import java.util.LinkedHashSet; +import java.util.Map; +import java.util.NavigableMap; +import java.util.Set; +import java.util.SortedMap; +import java.util.TreeMap; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; + +/** + * A SortedMap implementation that uses Soft Reference values + * internally to make it play well with the GC when in a low-memory + * situation. Use as a cache where you also need SortedMap functionality. + * + * @param key class + * @param value class + */ +@InterfaceAudience.Public +@InterfaceStability.Stable +public class SoftValueSortedMap implements SortedMap { + private final SortedMap> internalMap; + private final ReferenceQueue rq = new ReferenceQueue(); + private final Object sync; + + /** Constructor */ + public SoftValueSortedMap() { + this(new TreeMap>()); + } + + /** + * Constructor + * @param c comparator + */ + public SoftValueSortedMap(final Comparator c) { + this(new TreeMap>(c)); + } + + /** Internal constructor + * @param original object to wrap and synchronize on + */ + private SoftValueSortedMap(SortedMap> original) { + this(original, original); + } + + /** Internal constructor + * For headMap, tailMap, and subMap support + * @param original object to wrap + * @param sync object to synchronize on + */ + private SoftValueSortedMap(SortedMap> original, Object sync) { + this.internalMap = original; + this.sync = sync; + } + + /** + * Checks soft references and cleans any that have been placed on + * ReferenceQueue. Call if get/put etc. are not called regularly. + * Internally these call checkReferences on each access. + * @return How many references cleared. + */ + @SuppressWarnings("unchecked") + private int checkReferences() { + int i = 0; + for (Reference ref; (ref = this.rq.poll()) != null;) { + i++; + this.internalMap.remove(((SoftValue)ref).key); + } + return i; + } + + public V put(K key, V value) { + synchronized(sync) { + checkReferences(); + SoftValue oldValue = this.internalMap.put(key, + new SoftValue(key, value, this.rq)); + return oldValue == null ? null : oldValue.get(); + } + } + + @Override + public void putAll(Map m) { + throw new RuntimeException("Not implemented"); + } + + public V get(Object key) { + synchronized(sync) { + checkReferences(); + SoftValue value = this.internalMap.get(key); + if (value == null) { + return null; + } + if (value.get() == null) { + this.internalMap.remove(key); + return null; + } + return value.get(); + } + } + + public V remove(Object key) { + synchronized(sync) { + checkReferences(); + SoftValue value = this.internalMap.remove(key); + return value == null ? null : value.get(); + } + } + + public boolean containsKey(Object key) { + synchronized(sync) { + checkReferences(); + return this.internalMap.containsKey(key); + } + } + + public boolean containsValue(Object value) { + throw new UnsupportedOperationException("Don't support containsValue!"); + } + + public K firstKey() { + synchronized(sync) { + checkReferences(); + return internalMap.firstKey(); + } + } + + public K lastKey() { + synchronized(sync) { + checkReferences(); + return internalMap.lastKey(); + } + } + + public SoftValueSortedMap headMap(K key) { + synchronized(sync) { + checkReferences(); + return new SoftValueSortedMap(this.internalMap.headMap(key), sync); + } + } + + public SoftValueSortedMap tailMap(K key) { + synchronized(sync) { + checkReferences(); + return new SoftValueSortedMap(this.internalMap.tailMap(key), sync); + } + } + + public SoftValueSortedMap subMap(K fromKey, K toKey) { + synchronized(sync) { + checkReferences(); + return new SoftValueSortedMap(this.internalMap.subMap(fromKey, + toKey), sync); + } + } + + /* + * retrieves the value associated with the greatest key strictly less than + * the given key, or null if there is no such key + * @param key the key we're interested in + */ + public synchronized V lowerValueByKey(K key) { + synchronized(sync) { + checkReferences(); + + Map.Entry> entry = + ((NavigableMap>) this.internalMap).lowerEntry(key); + if (entry==null) { + return null; + } + SoftValue value=entry.getValue(); + if (value==null) { + return null; + } + if (value.get() == null) { + this.internalMap.remove(key); + return null; + } + return value.get(); + } + } + + public boolean isEmpty() { + synchronized(sync) { + checkReferences(); + return this.internalMap.isEmpty(); + } + } + + public int size() { + synchronized(sync) { + checkReferences(); + return this.internalMap.size(); + } + } + + public void clear() { + synchronized(sync) { + checkReferences(); + this.internalMap.clear(); + } + } + + public Set keySet() { + synchronized(sync) { + checkReferences(); + // this is not correct as per SortedMap contract (keySet should be + // modifiable) + // needed here so that another thread cannot modify the keyset + // without locking + return Collections.unmodifiableSet(this.internalMap.keySet()); + } + } + + public Comparator comparator() { + return this.internalMap.comparator(); + } + + public Set> entrySet() { + synchronized(sync) { + checkReferences(); + // this is not correct as per SortedMap contract (entrySet should be + // backed by map) + Set> realEntries = new LinkedHashSet>(); + for (Map.Entry> entry : this.internalMap.entrySet()) { + realEntries.add(entry.getValue()); + } + return realEntries; + } + } + + public Collection values() { + synchronized(sync) { + checkReferences(); + ArrayList hardValues = new ArrayList(); + for (SoftValue softValue : this.internalMap.values()) { + hardValues.add(softValue.get()); + } + return hardValues; + } + } + + private static class SoftValue extends SoftReference implements Map.Entry { + final K key; + + SoftValue(K key, V value, ReferenceQueue q) { + super(value, q); + this.key = key; + } + + public K getKey() { + return this.key; + } + + public V getValue() { + return get(); + } + + public V setValue(V value) { + throw new RuntimeException("Not implemented"); + } + } +} diff --git hbase-common/src/main/java/org/apache/hadoop/hbase/util/Triple.java hbase-common/src/main/java/org/apache/hadoop/hbase/util/Triple.java new file mode 100644 index 0000000..bb75553 --- /dev/null +++ hbase-common/src/main/java/org/apache/hadoop/hbase/util/Triple.java @@ -0,0 +1,90 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.util; + +/** + * Utility class to manage a triple. + */ +public class Triple { + private A first; + private B second; + private C third; + + public Triple(A first, B second, C third) { + this.first = first; + this.second = second; + this.third = third; + } + + public int hashCode() { + int hashFirst = (first != null ? first.hashCode() : 0); + int hashSecond = (second != null ? second.hashCode() : 0); + int hashThird = (third != null ? third.hashCode() : 0); + + return (hashFirst >> 1) ^ hashSecond ^ (hashThird << 1); + } + + public boolean equals(Object obj) { + if (!(obj instanceof Triple)) { + return false; + } + + Triple otherTriple = (Triple) obj; + + if (first != otherTriple.first && (first != null && !(first.equals(otherTriple.first)))) + return false; + if (second != otherTriple.second && (second != null && !(second.equals(otherTriple.second)))) + return false; + if (third != otherTriple.third && (third != null && !(third.equals(otherTriple.third)))) + return false; + + return true; + } + + public String toString() { + return "(" + first + ", " + second + "," + third + " )"; + } + + public A getFirst() { + return first; + } + + public void setFirst(A first) { + this.first = first; + } + + public B getSecond() { + return second; + } + + public void setSecond(B second) { + this.second = second; + } + + public C getThird() { + return third; + } + + public void setThird(C third) { + this.third = third; + } +} + + + diff --git hbase-common/src/main/java/org/apache/hadoop/hbase/util/Writables.java hbase-common/src/main/java/org/apache/hadoop/hbase/util/Writables.java new file mode 100644 index 0000000..13157eb --- /dev/null +++ hbase-common/src/main/java/org/apache/hadoop/hbase/util/Writables.java @@ -0,0 +1,167 @@ +/** + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.util; + +import java.io.ByteArrayInputStream; +import java.io.ByteArrayOutputStream; +import java.io.DataInputStream; +import java.io.DataOutputStream; +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.io.DataInputBuffer; +import org.apache.hadoop.io.Writable; + +/** + * Utility class with methods for manipulating Writable objects + */ +@InterfaceAudience.Private +public class Writables { + /** + * @param w writable + * @return The bytes of w gotten by running its + * {@link Writable#write(java.io.DataOutput)} method. + * @throws IOException e + * @see #getWritable(byte[], Writable) + */ + public static byte [] getBytes(final Writable w) throws IOException { + if (w == null) { + throw new IllegalArgumentException("Writable cannot be null"); + } + ByteArrayOutputStream byteStream = new ByteArrayOutputStream(); + DataOutputStream out = new DataOutputStream(byteStream); + try { + w.write(out); + out.close(); + out = null; + return byteStream.toByteArray(); + } finally { + if (out != null) { + out.close(); + } + } + } + + /** + * Put a bunch of Writables as bytes all into the one byte array. + * @param ws writable + * @return The bytes of w gotten by running its + * {@link Writable#write(java.io.DataOutput)} method. + * @throws IOException e + */ + public static byte [] getBytes(final Writable... ws) throws IOException { + List bytes = new ArrayList(); + int size = 0; + for (Writable w: ws) { + byte [] b = getBytes(w); + size += b.length; + bytes.add(b); + } + byte [] result = new byte[size]; + int offset = 0; + for (byte [] b: bytes) { + System.arraycopy(b, 0, result, offset, b.length); + offset += b.length; + } + return result; + } + + /** + * Set bytes into the passed Writable by calling its + * {@link Writable#readFields(java.io.DataInput)}. + * @param bytes serialized bytes + * @param w An empty Writable (usually made by calling the null-arg + * constructor). + * @return The passed Writable after its readFields has been called fed + * by the passed bytes array or IllegalArgumentException + * if passed null or an empty bytes array. + * @throws IOException e + * @throws IllegalArgumentException + */ + public static Writable getWritable(final byte [] bytes, final Writable w) + throws IOException { + return getWritable(bytes, 0, bytes.length, w); + } + + /** + * Set bytes into the passed Writable by calling its + * {@link Writable#readFields(java.io.DataInput)}. + * @param bytes serialized bytes + * @param offset offset into array + * @param length length of data + * @param w An empty Writable (usually made by calling the null-arg + * constructor). + * @return The passed Writable after its readFields has been called fed + * by the passed bytes array or IllegalArgumentException + * if passed null or an empty bytes array. + * @throws IOException e + * @throws IllegalArgumentException + */ + public static Writable getWritable(final byte [] bytes, final int offset, + final int length, final Writable w) + throws IOException { + if (bytes == null || length <=0) { + throw new IllegalArgumentException("Can't build a writable with empty " + + "bytes array"); + } + if (w == null) { + throw new IllegalArgumentException("Writable cannot be null"); + } + DataInputBuffer in = new DataInputBuffer(); + try { + in.reset(bytes, offset, length); + w.readFields(in); + return w; + } finally { + in.close(); + } + } + + /** + * Copy one Writable to another. Copies bytes using data streams. + * @param src Source Writable + * @param tgt Target Writable + * @return The target Writable. + * @throws IOException e + */ + public static Writable copyWritable(final Writable src, final Writable tgt) + throws IOException { + return copyWritable(getBytes(src), tgt); + } + + /** + * Copy one Writable to another. Copies bytes using data streams. + * @param bytes Source Writable + * @param tgt Target Writable + * @return The target Writable. + * @throws IOException e + */ + public static Writable copyWritable(final byte [] bytes, final Writable tgt) + throws IOException { + DataInputStream dis = new DataInputStream(new ByteArrayInputStream(bytes)); + try { + tgt.readFields(dis); + } finally { + dis.close(); + } + return tgt; + } +} diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/Abortable.java hbase-server/src/main/java/org/apache/hadoop/hbase/Abortable.java deleted file mode 100644 index a88cf31..0000000 --- hbase-server/src/main/java/org/apache/hadoop/hbase/Abortable.java +++ /dev/null @@ -1,45 +0,0 @@ -/** - * - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hbase; - -import org.apache.hadoop.classification.InterfaceAudience; - -/** - * Interface to support the aborting of a given server or client. - *

      - * This is used primarily for ZooKeeper usage when we could get an unexpected - * and fatal exception, requiring an abort. - *

      - * Implemented by the Master, RegionServer, and TableServers (client). - */ -@InterfaceAudience.Private -public interface Abortable { - /** - * Abort the server or client. - * @param why Why we're aborting. - * @param e Throwable that caused abort. Can be null. - */ - public void abort(String why, Throwable e); - - /** - * Check if the server or client was aborted. - * @return true if the server or client was aborted, false otherwise - */ - public boolean isAborted(); -} diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/Chore.java hbase-server/src/main/java/org/apache/hadoop/hbase/Chore.java deleted file mode 100644 index 692e3fc..0000000 --- hbase-server/src/main/java/org/apache/hadoop/hbase/Chore.java +++ /dev/null @@ -1,122 +0,0 @@ -/** - * - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hbase; - -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.hbase.util.HasThread; -import org.apache.hadoop.hbase.util.Sleeper; - -/** - * Chore is a task performed on a period in hbase. The chore is run in its own - * thread. This base abstract class provides while loop and sleeping facility. - * If an unhandled exception, the threads exit is logged. - * Implementers just need to add checking if there is work to be done and if - * so, do it. Its the base of most of the chore threads in hbase. - * - *

      Don't subclass Chore if the task relies on being woken up for something to - * do, such as an entry being added to a queue, etc. - */ -@InterfaceAudience.Private -public abstract class Chore extends HasThread { - private final Log LOG = LogFactory.getLog(this.getClass()); - private final Sleeper sleeper; - protected final Stoppable stopper; - - /** - * @param p Period at which we should run. Will be adjusted appropriately - * should we find work and it takes time to complete. - * @param stopper When {@link Stoppable#isStopped()} is true, this thread will - * cleanup and exit cleanly. - */ - public Chore(String name, final int p, final Stoppable stopper) { - super(name); - this.sleeper = new Sleeper(p, stopper); - this.stopper = stopper; - } - - /** - * @see java.lang.Thread#run() - */ - @Override - public void run() { - try { - boolean initialChoreComplete = false; - while (!this.stopper.isStopped()) { - long startTime = System.currentTimeMillis(); - try { - if (!initialChoreComplete) { - initialChoreComplete = initialChore(); - } else { - chore(); - } - } catch (Exception e) { - LOG.error("Caught exception", e); - if (this.stopper.isStopped()) { - continue; - } - } - this.sleeper.sleep(startTime); - } - } catch (Throwable t) { - LOG.fatal(getName() + "error", t); - } finally { - LOG.info(getName() + " exiting"); - cleanup(); - } - } - - /** - * If the thread is currently sleeping, trigger the core to happen immediately. - * If it's in the middle of its operation, will begin another operation - * immediately after finishing this one. - */ - public void triggerNow() { - this.sleeper.skipSleepCycle(); - } - - /** - * Override to run a task before we start looping. - * @return true if initial chore was successful - */ - protected boolean initialChore() { - // Default does nothing. - return true; - } - - /** - * Look for chores. If any found, do them else just return. - */ - protected abstract void chore(); - - /** - * Sleep for period. - */ - protected void sleep() { - this.sleeper.sleep(); - } - - /** - * Called when the chore has completed, allowing subclasses to cleanup any - * extra overhead - */ - protected void cleanup() { - } -} diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/ClockOutOfSyncException.java hbase-server/src/main/java/org/apache/hadoop/hbase/ClockOutOfSyncException.java deleted file mode 100644 index 1998593..0000000 --- hbase-server/src/main/java/org/apache/hadoop/hbase/ClockOutOfSyncException.java +++ /dev/null @@ -1,37 +0,0 @@ -/** - * - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hbase; - -import java.io.IOException; - -import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.classification.InterfaceStability; - -/** - * This exception is thrown by the master when a region server clock skew is - * too high. - */ -@SuppressWarnings("serial") -@InterfaceAudience.Public -@InterfaceStability.Stable -public class ClockOutOfSyncException extends IOException { - public ClockOutOfSyncException(String message) { - super(message); - } -} diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/ClusterId.java hbase-server/src/main/java/org/apache/hadoop/hbase/ClusterId.java deleted file mode 100644 index a8e8560..0000000 --- hbase-server/src/main/java/org/apache/hadoop/hbase/ClusterId.java +++ /dev/null @@ -1,103 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hbase; - -import java.util.UUID; - -import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.hbase.protobuf.ProtobufUtil; -import org.apache.hadoop.hbase.protobuf.generated.ClusterIdProtos; -import org.apache.hadoop.hbase.util.Bytes; - -import com.google.protobuf.InvalidProtocolBufferException; - -/** - * The identifier for this cluster. - * It is serialized to the filesystem and up into zookeeper. This is a container for the id. - * Also knows how to serialize and deserialize the cluster id. - */ -@InterfaceAudience.Private -public class ClusterId { - private final String id; - - /** - * New ClusterID. Generates a uniqueid. - */ - public ClusterId() { - this(UUID.randomUUID().toString()); - } - - ClusterId(final String uuid) { - this.id = uuid; - } - - /** - * @return The clusterid serialized using pb w/ pb magic prefix - */ - public byte [] toByteArray() { - return ProtobufUtil.prependPBMagic(convert().toByteArray()); - } - - /** - * @param bytes A pb serialized {@link ClusterId} instance with pb magic prefix - * @return An instance of {@link ClusterId} made from bytes - * @throws DeserializationException - * @see #toByteArray() - */ - public static ClusterId parseFrom(final byte [] bytes) throws DeserializationException { - if (ProtobufUtil.isPBMagicPrefix(bytes)) { - int pblen = ProtobufUtil.lengthOfPBMagic(); - ClusterIdProtos.ClusterId.Builder builder = ClusterIdProtos.ClusterId.newBuilder(); - ClusterIdProtos.ClusterId cid = null; - try { - cid = builder.mergeFrom(bytes, pblen, bytes.length - pblen).build(); - } catch (InvalidProtocolBufferException e) { - throw new DeserializationException(e); - } - return convert(cid); - } else { - // Presume it was written out this way, the old way. - return new ClusterId(Bytes.toString(bytes)); - } - } - - /** - * @return A pb instance to represent this instance. - */ - ClusterIdProtos.ClusterId convert() { - ClusterIdProtos.ClusterId.Builder builder = ClusterIdProtos.ClusterId.newBuilder(); - return builder.setClusterId(this.id).build(); - } - - /** - * @param cid - * @return A {@link ClusterId} made from the passed in cid - */ - static ClusterId convert(final ClusterIdProtos.ClusterId cid) { - return new ClusterId(cid.getClusterId()); - } - - /** - * @see java.lang.Object#toString() - */ - @Override - public String toString() { - return this.id; - } -} diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/ClusterStatus.java hbase-server/src/main/java/org/apache/hadoop/hbase/ClusterStatus.java deleted file mode 100644 index d9dfac0..0000000 --- hbase-server/src/main/java/org/apache/hadoop/hbase/ClusterStatus.java +++ /dev/null @@ -1,350 +0,0 @@ -/** - * - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hbase; - -import java.util.Arrays; -import java.util.Collection; -import java.util.Collections; -import java.util.HashMap; -import java.util.LinkedList; -import java.util.Map; - -import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.classification.InterfaceStability; -import org.apache.hadoop.hbase.master.RegionState; -import org.apache.hadoop.hbase.protobuf.ProtobufUtil; -import org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos; -import org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.LiveServerInfo; -import org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionInTransition; -import org.apache.hadoop.hbase.protobuf.generated.FSProtos.HBaseVersionFileContent; -import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos; -import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier; -import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.RegionSpecifierType; -import org.apache.hadoop.hbase.util.Bytes; -import org.apache.hadoop.io.VersionedWritable; - -import com.google.protobuf.ByteString; - -/** - * Status information on the HBase cluster. - *

      - * ClusterStatus provides clients with information such as: - *

        - *
      • The count and names of region servers in the cluster.
      • - *
      • The count and names of dead region servers in the cluster.
      • - *
      • The name of the active master for the cluster.
      • - *
      • The name(s) of the backup master(s) for the cluster, if they exist.
      • - *
      • The average cluster load.
      • - *
      • The number of regions deployed on the cluster.
      • - *
      • The number of requests since last report.
      • - *
      • Detailed region server loading and resource usage information, - * per server and per region.
      • - *
      • Regions in transition at master
      • - *
      • The unique cluster ID
      • - *
      - */ -@InterfaceAudience.Public -@InterfaceStability.Evolving -public class ClusterStatus extends VersionedWritable { - /** - * Version for object serialization. Incremented for changes in serialized - * representation. - *
      - *
      0
      Initial version
      - *
      1
      Added cluster ID
      - *
      2
      Added Map of ServerName to ServerLoad
      - *
      3
      Added master and backupMasters
      - *
      - */ - private static final byte VERSION = 2; - - private String hbaseVersion; - private Map liveServers; - private Collection deadServers; - private ServerName master; - private Collection backupMasters; - private Map intransition; - private String clusterId; - private String[] masterCoprocessors; - private boolean balancerOn; - - /** - * Constructor, for Writable - * @deprecated Used by Writables and Writables are going away. - */ - @Deprecated - public ClusterStatus() { - super(); - } - - public ClusterStatus(final String hbaseVersion, final String clusterid, - final Map servers, - final Collection deadServers, - final ServerName master, - final Collection backupMasters, - final Map rit, - final String[] masterCoprocessors, - final boolean balancerOn) { - this.hbaseVersion = hbaseVersion; - - this.liveServers = servers; - this.deadServers = deadServers; - this.master = master; - this.backupMasters = backupMasters; - this.intransition = rit; - this.clusterId = clusterid; - this.masterCoprocessors = masterCoprocessors; - this.balancerOn = balancerOn; - } - - /** - * @return the names of region servers on the dead list - */ - public Collection getDeadServerNames() { - return Collections.unmodifiableCollection(deadServers); - } - - /** - * @return the number of region servers in the cluster - */ - public int getServersSize() { - return liveServers.size(); - } - - /** - * @return the number of dead region servers in the cluster - */ - public int getDeadServers() { - return deadServers.size(); - } - - /** - * @return the average cluster load - */ - public double getAverageLoad() { - int load = getRegionsCount(); - return (double)load / (double)getServersSize(); - } - - /** - * @return the number of regions deployed on the cluster - */ - public int getRegionsCount() { - int count = 0; - for (Map.Entry e: this.liveServers.entrySet()) { - count += e.getValue().getNumberOfRegions(); - } - return count; - } - - /** - * @return the number of requests since last report - */ - public int getRequestsCount() { - int count = 0; - for (Map.Entry e: this.liveServers.entrySet()) { - count += e.getValue().getTotalNumberOfRequests(); - } - return count; - } - - /** - * @return the HBase version string as reported by the HMaster - */ - public String getHBaseVersion() { - return hbaseVersion; - } - - /** - * @see java.lang.Object#equals(java.lang.Object) - */ - public boolean equals(Object o) { - if (this == o) { - return true; - } - if (!(o instanceof ClusterStatus)) { - return false; - } - return (getVersion() == ((ClusterStatus)o).getVersion()) && - getHBaseVersion().equals(((ClusterStatus)o).getHBaseVersion()) && - this.liveServers.equals(((ClusterStatus)o).liveServers) && - this.deadServers.containsAll(((ClusterStatus)o).deadServers) && - Arrays.equals(this.masterCoprocessors, - ((ClusterStatus)o).masterCoprocessors) && - this.master.equals(((ClusterStatus)o).master) && - this.backupMasters.containsAll(((ClusterStatus)o).backupMasters); - } - - /** - * @see java.lang.Object#hashCode() - */ - public int hashCode() { - return VERSION + hbaseVersion.hashCode() + this.liveServers.hashCode() + - this.deadServers.hashCode() + this.master.hashCode() + - this.backupMasters.hashCode(); - } - - /** @return the object version number */ - public byte getVersion() { - return VERSION; - } - - // - // Getters - // - - /** - * Returns detailed region server information: A list of - * {@link ServerName}. - * @return region server information - * @deprecated Use {@link #getServers()} - */ - public Collection getServerInfo() { - return getServers(); - } - - public Collection getServers() { - return Collections.unmodifiableCollection(this.liveServers.keySet()); - } - - /** - * Returns detailed information about the current master {@link ServerName}. - * @return current master information if it exists - */ - public ServerName getMaster() { - return this.master; - } - - /** - * @return the number of backup masters in the cluster - */ - public int getBackupMastersSize() { - return this.backupMasters.size(); - } - - /** - * @return the names of backup masters - */ - public Collection getBackupMasters() { - return Collections.unmodifiableCollection(this.backupMasters); - } - - /** - * @param sn - * @return Server's load or null if not found. - */ - public ServerLoad getLoad(final ServerName sn) { - return this.liveServers.get(sn); - } - - public Map getRegionsInTransition() { - return this.intransition; - } - - public String getClusterId() { - return clusterId; - } - - public String[] getMasterCoprocessors() { - return masterCoprocessors; - } - - - public boolean isBalancerOn() { - return balancerOn; - } - - /** - * Convert a ClutserStatus to a protobuf ClusterStatus - * - * @return the protobuf ClusterStatus - */ - public ClusterStatusProtos.ClusterStatus convert() { - ClusterStatusProtos.ClusterStatus.Builder builder = ClusterStatusProtos.ClusterStatus.newBuilder(); - builder.setHbaseVersion(HBaseVersionFileContent.newBuilder().setVersion(getHBaseVersion())); - - for (Map.Entry entry : liveServers.entrySet()) { - LiveServerInfo.Builder lsi = - LiveServerInfo.newBuilder().setServer(ProtobufUtil.toServerName(entry.getKey())); - lsi.setServerLoad(entry.getValue().obtainServerLoadPB()); - builder.addLiveServers(lsi.build()); - } - for (ServerName deadServer : getDeadServerNames()) { - builder.addDeadServers(ProtobufUtil.toServerName(deadServer)); - } - for (Map.Entry rit : getRegionsInTransition().entrySet()) { - ClusterStatusProtos.RegionState rs = rit.getValue().convert(); - RegionSpecifier.Builder spec = - RegionSpecifier.newBuilder().setType(RegionSpecifierType.REGION_NAME); - spec.setValue(ByteString.copyFrom(Bytes.toBytes(rit.getKey()))); - - RegionInTransition pbRIT = - RegionInTransition.newBuilder().setSpec(spec.build()).setRegionState(rs).build(); - builder.addRegionsInTransition(pbRIT); - } - builder.setClusterId(new ClusterId(getClusterId()).convert()); - for (String coprocessor : getMasterCoprocessors()) { - builder.addMasterCoprocessors(HBaseProtos.Coprocessor.newBuilder().setName(coprocessor)); - } - builder.setMaster( - ProtobufUtil.toServerName(getMaster())); - for (ServerName backup : getBackupMasters()) { - builder.addBackupMasters(ProtobufUtil.toServerName(backup)); - } - builder.setBalancerOn(balancerOn); - return builder.build(); - } - - /** - * Convert a protobuf ClusterStatus to a ClusterStatus - * - * @param proto the protobuf ClusterStatus - * @return the converted ClusterStatus - */ - public static ClusterStatus convert(ClusterStatusProtos.ClusterStatus proto) { - Map servers = new HashMap(); - for (LiveServerInfo lsi : proto.getLiveServersList()) { - servers.put(ProtobufUtil.toServerName(lsi.getServer()), new ServerLoad(lsi.getServerLoad())); - } - Collection deadServers = new LinkedList(); - for (HBaseProtos.ServerName sn : proto.getDeadServersList()) { - deadServers.add(ProtobufUtil.toServerName(sn)); - } - Collection backupMasters = new LinkedList(); - for (HBaseProtos.ServerName sn : proto.getBackupMastersList()) { - backupMasters.add(ProtobufUtil.toServerName(sn)); - } - final Map rit = new HashMap(); - for (RegionInTransition region : proto.getRegionsInTransitionList()) { - String key = new String(region.getSpec().getValue().toByteArray()); - RegionState value = RegionState.convert(region.getRegionState()); - rit.put(key,value); - } - final int numMasterCoprocessors = proto.getMasterCoprocessorsCount(); - final String[] masterCoprocessors = new String[numMasterCoprocessors]; - for (int i = 0; i < numMasterCoprocessors; i++) { - masterCoprocessors[i] = proto.getMasterCoprocessors(i).getName(); - } - return new ClusterStatus(proto.getHbaseVersion().getVersion(), - ClusterId.convert(proto.getClusterId()).toString(),servers,deadServers, - ProtobufUtil.toServerName(proto.getMaster()),backupMasters,rit,masterCoprocessors, - proto.getBalancerOn()); - } -} diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/Coprocessor.java hbase-server/src/main/java/org/apache/hadoop/hbase/Coprocessor.java deleted file mode 100644 index 88ecd2f..0000000 --- hbase-server/src/main/java/org/apache/hadoop/hbase/Coprocessor.java +++ /dev/null @@ -1,56 +0,0 @@ -/* - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hbase; - -import java.io.IOException; - -import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.classification.InterfaceStability; - -/** - * Coprocess interface. - */ -@InterfaceAudience.Public -@InterfaceStability.Evolving -public interface Coprocessor { - static final int VERSION = 1; - - /** Highest installation priority */ - static final int PRIORITY_HIGHEST = 0; - /** High (system) installation priority */ - static final int PRIORITY_SYSTEM = Integer.MAX_VALUE / 4; - /** Default installation priority for user coprocessors */ - static final int PRIORITY_USER = Integer.MAX_VALUE / 2; - /** Lowest installation priority */ - static final int PRIORITY_LOWEST = Integer.MAX_VALUE; - - /** - * Lifecycle state of a given coprocessor instance. - */ - public enum State { - UNINSTALLED, - INSTALLED, - STARTING, - ACTIVE, - STOPPING, - STOPPED - } - - // Interface - void start(CoprocessorEnvironment env) throws IOException; - - void stop(CoprocessorEnvironment env) throws IOException; -} diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/CoprocessorEnvironment.java hbase-server/src/main/java/org/apache/hadoop/hbase/CoprocessorEnvironment.java deleted file mode 100644 index 3806426..0000000 --- hbase-server/src/main/java/org/apache/hadoop/hbase/CoprocessorEnvironment.java +++ /dev/null @@ -1,55 +0,0 @@ -/* - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hbase; - -import java.io.IOException; - -import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.classification.InterfaceStability; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hbase.client.HTableInterface; - -/** - * Coprocessor environment state. - */ -@InterfaceAudience.Public -@InterfaceStability.Evolving -public interface CoprocessorEnvironment { - - /** @return the Coprocessor interface version */ - public int getVersion(); - - /** @return the HBase version as a string (e.g. "0.21.0") */ - public String getHBaseVersion(); - - /** @return the loaded coprocessor instance */ - public Coprocessor getInstance(); - - /** @return the priority assigned to the loaded coprocessor */ - public int getPriority(); - - /** @return the load sequence number */ - public int getLoadSequence(); - - /** @return the configuration */ - public Configuration getConfiguration(); - - /** - * @return an interface for accessing the given table - * @throws IOException - */ - public HTableInterface getTable(byte[] tableName) throws IOException; -} diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/DeserializationException.java hbase-server/src/main/java/org/apache/hadoop/hbase/DeserializationException.java deleted file mode 100644 index fa69f26..0000000 --- hbase-server/src/main/java/org/apache/hadoop/hbase/DeserializationException.java +++ /dev/null @@ -1,43 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hbase; - -import org.apache.hadoop.classification.InterfaceAudience; - -/** - * Failed deserialization. - */ -@InterfaceAudience.Private -@SuppressWarnings("serial") -public class DeserializationException extends HBaseException { - public DeserializationException() { - super(); - } - - public DeserializationException(final String message) { - super(message); - } - - public DeserializationException(final String message, final Throwable t) { - super(message, t); - } - - public DeserializationException(final Throwable t) { - super(t); - } -} diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/DoNotRetryIOException.java hbase-server/src/main/java/org/apache/hadoop/hbase/DoNotRetryIOException.java deleted file mode 100644 index 5d6be07..0000000 --- hbase-server/src/main/java/org/apache/hadoop/hbase/DoNotRetryIOException.java +++ /dev/null @@ -1,55 +0,0 @@ -/** - * - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hbase; - -import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.classification.InterfaceStability; - -/** - * Subclass if exception is not meant to be retried: e.g. - * {@link UnknownScannerException} - */ -@InterfaceAudience.Public -@InterfaceStability.Stable -public class DoNotRetryIOException extends HBaseIOException { - - private static final long serialVersionUID = 1197446454511704139L; - - /** - * default constructor - */ - public DoNotRetryIOException() { - super(); - } - - /** - * @param message - */ - public DoNotRetryIOException(String message) { - super(message); - } - - /** - * @param message - * @param cause - */ - public DoNotRetryIOException(String message, Throwable cause) { - super(message, cause); - } -} diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/DroppedSnapshotException.java hbase-server/src/main/java/org/apache/hadoop/hbase/DroppedSnapshotException.java deleted file mode 100644 index 76aae2a..0000000 --- hbase-server/src/main/java/org/apache/hadoop/hbase/DroppedSnapshotException.java +++ /dev/null @@ -1,45 +0,0 @@ -/** - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hbase; -import java.io.IOException; - -import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.classification.InterfaceStability; - - -/** - * Thrown during flush if the possibility snapshot content was not properly - * persisted into store files. Response should include replay of hlog content. - */ -@InterfaceAudience.Public -@InterfaceStability.Stable -public class DroppedSnapshotException extends IOException { - - private static final long serialVersionUID = -5463156580831677374L; - - /** - * @param msg - */ - public DroppedSnapshotException(String msg) { - super(msg); - } - - /** - * default constructor - */ - public DroppedSnapshotException() { - super(); - } -} diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/FailedSanityCheckException.java hbase-server/src/main/java/org/apache/hadoop/hbase/FailedSanityCheckException.java deleted file mode 100644 index b62d466..0000000 --- hbase-server/src/main/java/org/apache/hadoop/hbase/FailedSanityCheckException.java +++ /dev/null @@ -1,49 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hbase; - -/** - * Exception thrown if a mutation fails sanity checks. - */ -public class FailedSanityCheckException extends DoNotRetryIOException { - - private static final long serialVersionUID = 1788783640409186240L; - - /** - * default constructor - */ - public FailedSanityCheckException() { - super(); - } - - /** - * @param message - */ - public FailedSanityCheckException(String message) { - super(message); - } - - /** - * @param message - * @param cause - */ - public FailedSanityCheckException(String message, Throwable cause) { - super(message, cause); - } - -} diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/HBaseException.java hbase-server/src/main/java/org/apache/hadoop/hbase/HBaseException.java deleted file mode 100644 index 28fe337..0000000 --- hbase-server/src/main/java/org/apache/hadoop/hbase/HBaseException.java +++ /dev/null @@ -1,44 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hbase; - -import org.apache.hadoop.classification.InterfaceAudience; - -/** - * Base checked exception in HBase. - * @see HBASE-5796 - */ -@SuppressWarnings("serial") -@InterfaceAudience.Private -public class HBaseException extends Exception { - public HBaseException() { - super(); - } - - public HBaseException(final String message) { - super(message); - } - - public HBaseException(final String message, final Throwable t) { - super(message, t); - } - - public HBaseException(final Throwable t) { - super(t); - } -} diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/HBaseIOException.java hbase-server/src/main/java/org/apache/hadoop/hbase/HBaseIOException.java deleted file mode 100644 index 193dc91..0000000 --- hbase-server/src/main/java/org/apache/hadoop/hbase/HBaseIOException.java +++ /dev/null @@ -1,48 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hbase; - -import java.io.IOException; - -import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.classification.InterfaceStability; - -/** - * All hbase specific IOExceptions should be subclasses of HBaseIOException - */ -@InterfaceAudience.Public -@InterfaceStability.Evolving -public class HBaseIOException extends IOException { - - private static final long serialVersionUID = 1L; - - public HBaseIOException() { - super(); - } - - public HBaseIOException(String message) { - super(message); - } - - public HBaseIOException(String message, Throwable cause) { - super(message, cause); - } - - public HBaseIOException(Throwable cause) { - super(cause); - }} diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/HColumnDescriptor.java hbase-server/src/main/java/org/apache/hadoop/hbase/HColumnDescriptor.java deleted file mode 100644 index 91a7f01..0000000 --- hbase-server/src/main/java/org/apache/hadoop/hbase/HColumnDescriptor.java +++ /dev/null @@ -1,1143 +0,0 @@ -/** - * - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hbase; - -import java.io.DataInput; -import java.io.DataOutput; -import java.io.IOException; -import java.util.Collections; -import java.util.HashMap; -import java.util.HashSet; -import java.util.Map; -import java.util.Set; - -import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.classification.InterfaceStability; -import org.apache.hadoop.hbase.io.ImmutableBytesWritable; -import org.apache.hadoop.hbase.io.compress.Compression; -import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding; -import org.apache.hadoop.hbase.io.hfile.HFile; -import org.apache.hadoop.hbase.protobuf.ProtobufUtil; -import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema; -import org.apache.hadoop.hbase.regionserver.BloomType; -import org.apache.hadoop.hbase.util.Bytes; -import org.apache.hadoop.io.Text; -import org.apache.hadoop.io.WritableComparable; - -import com.google.common.base.Preconditions; -import com.google.protobuf.ByteString; -import com.google.protobuf.InvalidProtocolBufferException; - -/** - * An HColumnDescriptor contains information about a column family such as the - * number of versions, compression settings, etc. - * - * It is used as input when creating a table or adding a column. Once set, the - * parameters that specify a column cannot be changed without deleting the - * column and recreating it. If there is data stored in the column, it will be - * deleted when the column is deleted. - */ -@InterfaceAudience.Public -@InterfaceStability.Evolving -public class HColumnDescriptor implements WritableComparable { - // For future backward compatibility - - // Version 3 was when column names become byte arrays and when we picked up - // Time-to-live feature. Version 4 was when we moved to byte arrays, HBASE-82. - // Version 5 was when bloom filter descriptors were removed. - // Version 6 adds metadata as a map where keys and values are byte[]. - // Version 7 -- add new compression and hfile blocksize to HColumnDescriptor (HBASE-1217) - // Version 8 -- reintroduction of bloom filters, changed from boolean to enum - // Version 9 -- add data block encoding - private static final byte COLUMN_DESCRIPTOR_VERSION = (byte) 9; - - // These constants are used as FileInfo keys - public static final String COMPRESSION = "COMPRESSION"; - public static final String COMPRESSION_COMPACT = "COMPRESSION_COMPACT"; - public static final String ENCODE_ON_DISK = - "ENCODE_ON_DISK"; - public static final String DATA_BLOCK_ENCODING = - "DATA_BLOCK_ENCODING"; - public static final String BLOCKCACHE = "BLOCKCACHE"; - public static final String CACHE_DATA_ON_WRITE = "CACHE_DATA_ON_WRITE"; - public static final String CACHE_INDEX_ON_WRITE = "CACHE_INDEX_ON_WRITE"; - public static final String CACHE_BLOOMS_ON_WRITE = "CACHE_BLOOMS_ON_WRITE"; - public static final String EVICT_BLOCKS_ON_CLOSE = "EVICT_BLOCKS_ON_CLOSE"; - - /** - * Size of storefile/hfile 'blocks'. Default is {@link #DEFAULT_BLOCKSIZE}. - * Use smaller block sizes for faster random-access at expense of larger - * indices (more memory consumption). - */ - public static final String BLOCKSIZE = "BLOCKSIZE"; - - public static final String LENGTH = "LENGTH"; - public static final String TTL = "TTL"; - public static final String BLOOMFILTER = "BLOOMFILTER"; - public static final String FOREVER = "FOREVER"; - public static final String REPLICATION_SCOPE = "REPLICATION_SCOPE"; - public static final String MIN_VERSIONS = "MIN_VERSIONS"; - public static final String KEEP_DELETED_CELLS = "KEEP_DELETED_CELLS"; - - /** - * Default compression type. - */ - public static final String DEFAULT_COMPRESSION = - Compression.Algorithm.NONE.getName(); - - /** - * Default value of the flag that enables data block encoding on disk, as - * opposed to encoding in cache only. We encode blocks everywhere by default, - * as long as {@link #DATA_BLOCK_ENCODING} is not NONE. - */ - public static final boolean DEFAULT_ENCODE_ON_DISK = true; - - /** Default data block encoding algorithm. */ - public static final String DEFAULT_DATA_BLOCK_ENCODING = - DataBlockEncoding.NONE.toString(); - - /** - * Default number of versions of a record to keep. - */ - public static final int DEFAULT_VERSIONS = 3; - - /** - * Default is not to keep a minimum of versions. - */ - public static final int DEFAULT_MIN_VERSIONS = 0; - - /* - * Cache here the HCD value. - * Question: its OK to cache since when we're reenable, we create a new HCD? - */ - private volatile Integer blocksize = null; - - /** - * Default setting for whether to serve from memory or not. - */ - public static final boolean DEFAULT_IN_MEMORY = false; - - /** - * Default setting for preventing deleted from being collected immediately. - */ - public static final boolean DEFAULT_KEEP_DELETED = false; - - /** - * Default setting for whether to use a block cache or not. - */ - public static final boolean DEFAULT_BLOCKCACHE = true; - - /** - * Default setting for whether to cache data blocks on write if block caching - * is enabled. - */ - public static final boolean DEFAULT_CACHE_DATA_ON_WRITE = false; - - /** - * Default setting for whether to cache index blocks on write if block - * caching is enabled. - */ - public static final boolean DEFAULT_CACHE_INDEX_ON_WRITE = false; - - /** - * Default size of blocks in files stored to the filesytem (hfiles). - */ - public static final int DEFAULT_BLOCKSIZE = HFile.DEFAULT_BLOCKSIZE; - - /** - * Default setting for whether or not to use bloomfilters. - */ - public static final String DEFAULT_BLOOMFILTER = BloomType.NONE.toString(); - - /** - * Default setting for whether to cache bloom filter blocks on write if block - * caching is enabled. - */ - public static final boolean DEFAULT_CACHE_BLOOMS_ON_WRITE = false; - - /** - * Default time to live of cell contents. - */ - public static final int DEFAULT_TTL = HConstants.FOREVER; - - /** - * Default scope. - */ - public static final int DEFAULT_REPLICATION_SCOPE = HConstants.REPLICATION_SCOPE_LOCAL; - - /** - * Default setting for whether to evict cached blocks from the blockcache on - * close. - */ - public static final boolean DEFAULT_EVICT_BLOCKS_ON_CLOSE = false; - - private final static Map DEFAULT_VALUES - = new HashMap(); - private final static Set RESERVED_KEYWORDS - = new HashSet(); - static { - DEFAULT_VALUES.put(BLOOMFILTER, DEFAULT_BLOOMFILTER); - DEFAULT_VALUES.put(REPLICATION_SCOPE, String.valueOf(DEFAULT_REPLICATION_SCOPE)); - DEFAULT_VALUES.put(HConstants.VERSIONS, String.valueOf(DEFAULT_VERSIONS)); - DEFAULT_VALUES.put(MIN_VERSIONS, String.valueOf(DEFAULT_MIN_VERSIONS)); - DEFAULT_VALUES.put(COMPRESSION, DEFAULT_COMPRESSION); - DEFAULT_VALUES.put(TTL, String.valueOf(DEFAULT_TTL)); - DEFAULT_VALUES.put(BLOCKSIZE, String.valueOf(DEFAULT_BLOCKSIZE)); - DEFAULT_VALUES.put(HConstants.IN_MEMORY, String.valueOf(DEFAULT_IN_MEMORY)); - DEFAULT_VALUES.put(BLOCKCACHE, String.valueOf(DEFAULT_BLOCKCACHE)); - DEFAULT_VALUES.put(KEEP_DELETED_CELLS, String.valueOf(DEFAULT_KEEP_DELETED)); - DEFAULT_VALUES.put(ENCODE_ON_DISK, String.valueOf(DEFAULT_ENCODE_ON_DISK)); - DEFAULT_VALUES.put(DATA_BLOCK_ENCODING, String.valueOf(DEFAULT_DATA_BLOCK_ENCODING)); - DEFAULT_VALUES.put(CACHE_DATA_ON_WRITE, String.valueOf(DEFAULT_CACHE_DATA_ON_WRITE)); - DEFAULT_VALUES.put(CACHE_INDEX_ON_WRITE, String.valueOf(DEFAULT_CACHE_INDEX_ON_WRITE)); - DEFAULT_VALUES.put(CACHE_BLOOMS_ON_WRITE, String.valueOf(DEFAULT_CACHE_BLOOMS_ON_WRITE)); - DEFAULT_VALUES.put(EVICT_BLOCKS_ON_CLOSE, String.valueOf(DEFAULT_EVICT_BLOCKS_ON_CLOSE)); - for (String s : DEFAULT_VALUES.keySet()) { - RESERVED_KEYWORDS.add(new ImmutableBytesWritable(Bytes.toBytes(s))); - } - } - - private static final int UNINITIALIZED = -1; - - // Column family name - private byte [] name; - - // Column metadata - protected final Map values = - new HashMap(); - - /* - * Cache the max versions rather than calculate it every time. - */ - private int cachedMaxVersions = UNINITIALIZED; - - /** - * Default constructor. Must be present for Writable. - * @deprecated Used by Writables and Writables are going away. - */ - @Deprecated - // Make this private rather than remove after deprecation period elapses. Its needed by pb - // deserializations. - public HColumnDescriptor() { - this.name = null; - } - - /** - * Construct a column descriptor specifying only the family name - * The other attributes are defaulted. - * - * @param familyName Column family name. Must be 'printable' -- digit or - * letter -- and may not contain a : - */ - public HColumnDescriptor(final String familyName) { - this(Bytes.toBytes(familyName)); - } - - /** - * Construct a column descriptor specifying only the family name - * The other attributes are defaulted. - * - * @param familyName Column family name. Must be 'printable' -- digit or - * letter -- and may not contain a : - */ - public HColumnDescriptor(final byte [] familyName) { - this (familyName == null || familyName.length <= 0? - HConstants.EMPTY_BYTE_ARRAY: familyName, DEFAULT_VERSIONS, - DEFAULT_COMPRESSION, DEFAULT_IN_MEMORY, DEFAULT_BLOCKCACHE, - DEFAULT_TTL, DEFAULT_BLOOMFILTER); - } - - /** - * Constructor. - * Makes a deep copy of the supplied descriptor. - * Can make a modifiable descriptor from an UnmodifyableHColumnDescriptor. - * @param desc The descriptor. - */ - public HColumnDescriptor(HColumnDescriptor desc) { - super(); - this.name = desc.name.clone(); - for (Map.Entry e: - desc.values.entrySet()) { - this.values.put(e.getKey(), e.getValue()); - } - setMaxVersions(desc.getMaxVersions()); - } - - /** - * Constructor - * @param familyName Column family name. Must be 'printable' -- digit or - * letter -- and may not contain a : - * @param maxVersions Maximum number of versions to keep - * @param compression Compression type - * @param inMemory If true, column data should be kept in an HRegionServer's - * cache - * @param blockCacheEnabled If true, MapFile blocks should be cached - * @param timeToLive Time-to-live of cell contents, in seconds - * (use HConstants.FOREVER for unlimited TTL) - * @param bloomFilter Bloom filter type for this column - * - * @throws IllegalArgumentException if passed a family name that is made of - * other than 'word' characters: i.e. [a-zA-Z_0-9] or contains - * a : - * @throws IllegalArgumentException if the number of versions is <= 0 - * @deprecated use {@link #HColumnDescriptor(String)} and setters - */ - @Deprecated - public HColumnDescriptor(final byte [] familyName, final int maxVersions, - final String compression, final boolean inMemory, - final boolean blockCacheEnabled, - final int timeToLive, final String bloomFilter) { - this(familyName, maxVersions, compression, inMemory, blockCacheEnabled, - DEFAULT_BLOCKSIZE, timeToLive, bloomFilter, DEFAULT_REPLICATION_SCOPE); - } - - /** - * Constructor - * @param familyName Column family name. Must be 'printable' -- digit or - * letter -- and may not contain a : - * @param maxVersions Maximum number of versions to keep - * @param compression Compression type - * @param inMemory If true, column data should be kept in an HRegionServer's - * cache - * @param blockCacheEnabled If true, MapFile blocks should be cached - * @param blocksize Block size to use when writing out storefiles. Use - * smaller block sizes for faster random-access at expense of larger indices - * (more memory consumption). Default is usually 64k. - * @param timeToLive Time-to-live of cell contents, in seconds - * (use HConstants.FOREVER for unlimited TTL) - * @param bloomFilter Bloom filter type for this column - * @param scope The scope tag for this column - * - * @throws IllegalArgumentException if passed a family name that is made of - * other than 'word' characters: i.e. [a-zA-Z_0-9] or contains - * a : - * @throws IllegalArgumentException if the number of versions is <= 0 - * @deprecated use {@link #HColumnDescriptor(String)} and setters - */ - @Deprecated - public HColumnDescriptor(final byte [] familyName, final int maxVersions, - final String compression, final boolean inMemory, - final boolean blockCacheEnabled, final int blocksize, - final int timeToLive, final String bloomFilter, final int scope) { - this(familyName, DEFAULT_MIN_VERSIONS, maxVersions, DEFAULT_KEEP_DELETED, - compression, DEFAULT_ENCODE_ON_DISK, DEFAULT_DATA_BLOCK_ENCODING, - inMemory, blockCacheEnabled, blocksize, timeToLive, bloomFilter, - scope); - } - - /** - * Constructor - * @param familyName Column family name. Must be 'printable' -- digit or - * letter -- and may not contain a : - * @param minVersions Minimum number of versions to keep - * @param maxVersions Maximum number of versions to keep - * @param keepDeletedCells Whether to retain deleted cells until they expire - * up to maxVersions versions. - * @param compression Compression type - * @param encodeOnDisk whether to use the specified data block encoding - * on disk. If false, the encoding will be used in cache only. - * @param dataBlockEncoding data block encoding - * @param inMemory If true, column data should be kept in an HRegionServer's - * cache - * @param blockCacheEnabled If true, MapFile blocks should be cached - * @param blocksize Block size to use when writing out storefiles. Use - * smaller blocksizes for faster random-access at expense of larger indices - * (more memory consumption). Default is usually 64k. - * @param timeToLive Time-to-live of cell contents, in seconds - * (use HConstants.FOREVER for unlimited TTL) - * @param bloomFilter Bloom filter type for this column - * @param scope The scope tag for this column - * - * @throws IllegalArgumentException if passed a family name that is made of - * other than 'word' characters: i.e. [a-zA-Z_0-9] or contains - * a : - * @throws IllegalArgumentException if the number of versions is <= 0 - * @deprecated use {@link #HColumnDescriptor(String)} and setters - */ - @Deprecated - public HColumnDescriptor(final byte[] familyName, final int minVersions, - final int maxVersions, final boolean keepDeletedCells, - final String compression, final boolean encodeOnDisk, - final String dataBlockEncoding, final boolean inMemory, - final boolean blockCacheEnabled, final int blocksize, - final int timeToLive, final String bloomFilter, final int scope) { - isLegalFamilyName(familyName); - this.name = familyName; - - if (maxVersions <= 0) { - // TODO: Allow maxVersion of 0 to be the way you say "Keep all versions". - // Until there is support, consider 0 or < 0 -- a configuration error. - throw new IllegalArgumentException("Maximum versions must be positive"); - } - - if (minVersions > 0) { - if (timeToLive == HConstants.FOREVER) { - throw new IllegalArgumentException("Minimum versions requires TTL."); - } - if (minVersions >= maxVersions) { - throw new IllegalArgumentException("Minimum versions must be < " - + "maximum versions."); - } - } - - setMaxVersions(maxVersions); - setMinVersions(minVersions); - setKeepDeletedCells(keepDeletedCells); - setInMemory(inMemory); - setBlockCacheEnabled(blockCacheEnabled); - setTimeToLive(timeToLive); - setCompressionType(Compression.Algorithm. - valueOf(compression.toUpperCase())); - setEncodeOnDisk(encodeOnDisk); - setDataBlockEncoding(DataBlockEncoding. - valueOf(dataBlockEncoding.toUpperCase())); - setBloomFilterType(BloomType. - valueOf(bloomFilter.toUpperCase())); - setBlocksize(blocksize); - setScope(scope); - } - - /** - * @param b Family name. - * @return b - * @throws IllegalArgumentException If not null and not a legitimate family - * name: i.e. 'printable' and ends in a ':' (Null passes are allowed because - * b can be null when deserializing). Cannot start with a '.' - * either. Also Family can not be an empty value. - */ - public static byte [] isLegalFamilyName(final byte [] b) { - if (b == null) { - return b; - } - Preconditions.checkArgument(b.length != 0, "Family name can not be empty"); - if (b[0] == '.') { - throw new IllegalArgumentException("Family names cannot start with a " + - "period: " + Bytes.toString(b)); - } - for (int i = 0; i < b.length; i++) { - if (Character.isISOControl(b[i]) || b[i] == ':' || b[i] == '\\' || b[i] == '/') { - throw new IllegalArgumentException("Illegal character <" + b[i] + - ">. Family names cannot contain control characters or colons: " + - Bytes.toString(b)); - } - } - return b; - } - - /** - * @return Name of this column family - */ - public byte [] getName() { - return name; - } - - /** - * @return Name of this column family - */ - public String getNameAsString() { - return Bytes.toString(this.name); - } - - /** - * @param key The key. - * @return The value. - */ - public byte[] getValue(byte[] key) { - ImmutableBytesWritable ibw = values.get(new ImmutableBytesWritable(key)); - if (ibw == null) - return null; - return ibw.get(); - } - - /** - * @param key The key. - * @return The value as a string. - */ - public String getValue(String key) { - byte[] value = getValue(Bytes.toBytes(key)); - if (value == null) - return null; - return Bytes.toString(value); - } - - /** - * @return All values. - */ - public Map getValues() { - // shallow pointer copy - return Collections.unmodifiableMap(values); - } - - /** - * @param key The key. - * @param value The value. - * @return this (for chained invocation) - */ - public HColumnDescriptor setValue(byte[] key, byte[] value) { - values.put(new ImmutableBytesWritable(key), - new ImmutableBytesWritable(value)); - return this; - } - - /** - * @param key Key whose key and value we're to remove from HCD parameters. - */ - public void remove(final byte [] key) { - values.remove(new ImmutableBytesWritable(key)); - } - - /** - * @param key The key. - * @param value The value. - * @return this (for chained invocation) - */ - public HColumnDescriptor setValue(String key, String value) { - if (value == null) { - remove(Bytes.toBytes(key)); - } else { - setValue(Bytes.toBytes(key), Bytes.toBytes(value)); - } - return this; - } - - /** @return compression type being used for the column family */ - public Compression.Algorithm getCompression() { - String n = getValue(COMPRESSION); - if (n == null) { - return Compression.Algorithm.NONE; - } - return Compression.Algorithm.valueOf(n.toUpperCase()); - } - - /** @return compression type being used for the column family for major - compression */ - public Compression.Algorithm getCompactionCompression() { - String n = getValue(COMPRESSION_COMPACT); - if (n == null) { - return getCompression(); - } - return Compression.Algorithm.valueOf(n.toUpperCase()); - } - - /** @return maximum number of versions */ - public int getMaxVersions() { - if (this.cachedMaxVersions == UNINITIALIZED) { - String v = getValue(HConstants.VERSIONS); - this.cachedMaxVersions = Integer.parseInt(v); - } - return this.cachedMaxVersions; - } - - /** - * @param maxVersions maximum number of versions - * @return this (for chained invocation) - */ - public HColumnDescriptor setMaxVersions(int maxVersions) { - setValue(HConstants.VERSIONS, Integer.toString(maxVersions)); - cachedMaxVersions = maxVersions; - return this; - } - - /** - * @return The storefile/hfile blocksize for this column family. - */ - public synchronized int getBlocksize() { - if (this.blocksize == null) { - String value = getValue(BLOCKSIZE); - this.blocksize = (value != null)? - Integer.decode(value): Integer.valueOf(DEFAULT_BLOCKSIZE); - } - return this.blocksize.intValue(); - } - - /** - * @param s Blocksize to use when writing out storefiles/hfiles on this - * column family. - * @return this (for chained invocation) - */ - public HColumnDescriptor setBlocksize(int s) { - setValue(BLOCKSIZE, Integer.toString(s)); - this.blocksize = null; - return this; - } - - /** - * @return Compression type setting. - */ - public Compression.Algorithm getCompressionType() { - return getCompression(); - } - - /** - * Compression types supported in hbase. - * LZO is not bundled as part of the hbase distribution. - * See LZO Compression - * for how to enable it. - * @param type Compression type setting. - * @return this (for chained invocation) - */ - public HColumnDescriptor setCompressionType(Compression.Algorithm type) { - return setValue(COMPRESSION, type.getName().toUpperCase()); - } - - /** @return data block encoding algorithm used on disk */ - public DataBlockEncoding getDataBlockEncodingOnDisk() { - String encodeOnDiskStr = getValue(ENCODE_ON_DISK); - boolean encodeOnDisk; - if (encodeOnDiskStr == null) { - encodeOnDisk = DEFAULT_ENCODE_ON_DISK; - } else { - encodeOnDisk = Boolean.valueOf(encodeOnDiskStr); - } - - if (!encodeOnDisk) { - // No encoding on disk. - return DataBlockEncoding.NONE; - } - return getDataBlockEncoding(); - } - - /** - * Set the flag indicating that we only want to encode data block in cache - * but not on disk. - * @return this (for chained invocation) - */ - public HColumnDescriptor setEncodeOnDisk(boolean encodeOnDisk) { - return setValue(ENCODE_ON_DISK, String.valueOf(encodeOnDisk)); - } - - /** - * @return the data block encoding algorithm used in block cache and - * optionally on disk - */ - public DataBlockEncoding getDataBlockEncoding() { - String type = getValue(DATA_BLOCK_ENCODING); - if (type == null) { - type = DEFAULT_DATA_BLOCK_ENCODING; - } - return DataBlockEncoding.valueOf(type); - } - - /** - * Set data block encoding algorithm used in block cache. - * @param type What kind of data block encoding will be used. - * @return this (for chained invocation) - */ - public HColumnDescriptor setDataBlockEncoding(DataBlockEncoding type) { - String name; - if (type != null) { - name = type.toString(); - } else { - name = DataBlockEncoding.NONE.toString(); - } - return setValue(DATA_BLOCK_ENCODING, name); - } - - /** - * @return Compression type setting. - */ - public Compression.Algorithm getCompactionCompressionType() { - return getCompactionCompression(); - } - - /** - * Compression types supported in hbase. - * LZO is not bundled as part of the hbase distribution. - * See LZO Compression - * for how to enable it. - * @param type Compression type setting. - * @return this (for chained invocation) - */ - public HColumnDescriptor setCompactionCompressionType( - Compression.Algorithm type) { - return setValue(COMPRESSION_COMPACT, type.getName().toUpperCase()); - } - - /** - * @return True if we are to keep all in use HRegionServer cache. - */ - public boolean isInMemory() { - String value = getValue(HConstants.IN_MEMORY); - if (value != null) - return Boolean.valueOf(value).booleanValue(); - return DEFAULT_IN_MEMORY; - } - - /** - * @param inMemory True if we are to keep all values in the HRegionServer - * cache - * @return this (for chained invocation) - */ - public HColumnDescriptor setInMemory(boolean inMemory) { - return setValue(HConstants.IN_MEMORY, Boolean.toString(inMemory)); - } - - public boolean getKeepDeletedCells() { - String value = getValue(KEEP_DELETED_CELLS); - if (value != null) { - return Boolean.valueOf(value).booleanValue(); - } - return DEFAULT_KEEP_DELETED; - } - - /** - * @param keepDeletedCells True if deleted rows should not be collected - * immediately. - * @return this (for chained invocation) - */ - public HColumnDescriptor setKeepDeletedCells(boolean keepDeletedCells) { - return setValue(KEEP_DELETED_CELLS, Boolean.toString(keepDeletedCells)); - } - - /** - * @return Time-to-live of cell contents, in seconds. - */ - public int getTimeToLive() { - String value = getValue(TTL); - return (value != null)? Integer.valueOf(value).intValue(): DEFAULT_TTL; - } - - /** - * @param timeToLive Time-to-live of cell contents, in seconds. - * @return this (for chained invocation) - */ - public HColumnDescriptor setTimeToLive(int timeToLive) { - return setValue(TTL, Integer.toString(timeToLive)); - } - - /** - * @return The minimum number of versions to keep. - */ - public int getMinVersions() { - String value = getValue(MIN_VERSIONS); - return (value != null)? Integer.valueOf(value).intValue(): 0; - } - - /** - * @param minVersions The minimum number of versions to keep. - * (used when timeToLive is set) - * @return this (for chained invocation) - */ - public HColumnDescriptor setMinVersions(int minVersions) { - return setValue(MIN_VERSIONS, Integer.toString(minVersions)); - } - - /** - * @return True if MapFile blocks should be cached. - */ - public boolean isBlockCacheEnabled() { - String value = getValue(BLOCKCACHE); - if (value != null) - return Boolean.valueOf(value).booleanValue(); - return DEFAULT_BLOCKCACHE; - } - - /** - * @param blockCacheEnabled True if MapFile blocks should be cached. - * @return this (for chained invocation) - */ - public HColumnDescriptor setBlockCacheEnabled(boolean blockCacheEnabled) { - return setValue(BLOCKCACHE, Boolean.toString(blockCacheEnabled)); - } - - /** - * @return bloom filter type used for new StoreFiles in ColumnFamily - */ - public BloomType getBloomFilterType() { - String n = getValue(BLOOMFILTER); - if (n == null) { - n = DEFAULT_BLOOMFILTER; - } - return BloomType.valueOf(n.toUpperCase()); - } - - /** - * @param bt bloom filter type - * @return this (for chained invocation) - */ - public HColumnDescriptor setBloomFilterType(final BloomType bt) { - return setValue(BLOOMFILTER, bt.toString()); - } - - /** - * @return the scope tag - */ - public int getScope() { - String value = getValue(REPLICATION_SCOPE); - if (value != null) { - return Integer.valueOf(value).intValue(); - } - return DEFAULT_REPLICATION_SCOPE; - } - - /** - * @param scope the scope tag - * @return this (for chained invocation) - */ - public HColumnDescriptor setScope(int scope) { - return setValue(REPLICATION_SCOPE, Integer.toString(scope)); - } - - /** - * @return true if we should cache data blocks on write - */ - public boolean shouldCacheDataOnWrite() { - String value = getValue(CACHE_DATA_ON_WRITE); - if (value != null) { - return Boolean.valueOf(value).booleanValue(); - } - return DEFAULT_CACHE_DATA_ON_WRITE; - } - - /** - * @param value true if we should cache data blocks on write - * @return this (for chained invocation) - */ - public HColumnDescriptor setCacheDataOnWrite(boolean value) { - return setValue(CACHE_DATA_ON_WRITE, Boolean.toString(value)); - } - - /** - * @return true if we should cache index blocks on write - */ - public boolean shouldCacheIndexesOnWrite() { - String value = getValue(CACHE_INDEX_ON_WRITE); - if (value != null) { - return Boolean.valueOf(value).booleanValue(); - } - return DEFAULT_CACHE_INDEX_ON_WRITE; - } - - /** - * @param value true if we should cache index blocks on write - * @return this (for chained invocation) - */ - public HColumnDescriptor setCacheIndexesOnWrite(boolean value) { - return setValue(CACHE_INDEX_ON_WRITE, Boolean.toString(value)); - } - - /** - * @return true if we should cache bloomfilter blocks on write - */ - public boolean shouldCacheBloomsOnWrite() { - String value = getValue(CACHE_BLOOMS_ON_WRITE); - if (value != null) { - return Boolean.valueOf(value).booleanValue(); - } - return DEFAULT_CACHE_BLOOMS_ON_WRITE; - } - - /** - * @param value true if we should cache bloomfilter blocks on write - * @return this (for chained invocation) - */ - public HColumnDescriptor setCacheBloomsOnWrite(boolean value) { - return setValue(CACHE_BLOOMS_ON_WRITE, Boolean.toString(value)); - } - - /** - * @return true if we should evict cached blocks from the blockcache on - * close - */ - public boolean shouldEvictBlocksOnClose() { - String value = getValue(EVICT_BLOCKS_ON_CLOSE); - if (value != null) { - return Boolean.valueOf(value).booleanValue(); - } - return DEFAULT_EVICT_BLOCKS_ON_CLOSE; - } - - /** - * @param value true if we should evict cached blocks from the blockcache on - * close - * @return this (for chained invocation) - */ - public HColumnDescriptor setEvictBlocksOnClose(boolean value) { - return setValue(EVICT_BLOCKS_ON_CLOSE, Boolean.toString(value)); - } - - /** - * @see java.lang.Object#toString() - */ - @Override - public String toString() { - StringBuilder s = new StringBuilder(); - s.append('{'); - s.append(HConstants.NAME); - s.append(" => '"); - s.append(Bytes.toString(name)); - s.append("'"); - s.append(getValues(true)); - s.append('}'); - return s.toString(); - } - - /** - * @return Column family descriptor with only the customized attributes. - */ - public String toStringCustomizedValues() { - StringBuilder s = new StringBuilder(); - s.append('{'); - s.append(HConstants.NAME); - s.append(" => '"); - s.append(Bytes.toString(name)); - s.append("'"); - s.append(getValues(false)); - s.append('}'); - return s.toString(); - } - - private StringBuilder getValues(boolean printDefaults) { - StringBuilder s = new StringBuilder(); - - boolean hasConfigKeys = false; - - // print all reserved keys first - for (ImmutableBytesWritable k : values.keySet()) { - if (!RESERVED_KEYWORDS.contains(k)) { - hasConfigKeys = true; - continue; - } - String key = Bytes.toString(k.get()); - String value = Bytes.toString(values.get(k).get()); - if (printDefaults - || !DEFAULT_VALUES.containsKey(key) - || !DEFAULT_VALUES.get(key).equalsIgnoreCase(value)) { - s.append(", "); - s.append(key); - s.append(" => "); - s.append('\'').append(value).append('\''); - } - } - - // print all non-reserved, advanced config keys as a separate subset - if (hasConfigKeys) { - s.append(", "); - s.append(HConstants.CONFIG).append(" => "); - s.append('{'); - boolean printComma = false; - for (ImmutableBytesWritable k : values.keySet()) { - if (RESERVED_KEYWORDS.contains(k)) { - continue; - } - String key = Bytes.toString(k.get()); - String value = Bytes.toString(values.get(k).get()); - if (printComma) { - s.append(", "); - } - printComma = true; - s.append('\'').append(key).append('\''); - s.append(" => "); - s.append('\'').append(value).append('\''); - } - s.append('}'); - } - return s; - } - - public static Map getDefaultValues() { - return Collections.unmodifiableMap(DEFAULT_VALUES); - } - - /** - * @see java.lang.Object#equals(java.lang.Object) - */ - @Override - public boolean equals(Object obj) { - if (this == obj) { - return true; - } - if (obj == null) { - return false; - } - if (!(obj instanceof HColumnDescriptor)) { - return false; - } - return compareTo((HColumnDescriptor)obj) == 0; - } - - /** - * @see java.lang.Object#hashCode() - */ - @Override - public int hashCode() { - int result = Bytes.hashCode(this.name); - result ^= Byte.valueOf(COLUMN_DESCRIPTOR_VERSION).hashCode(); - result ^= values.hashCode(); - return result; - } - - /** - * @deprecated Writables are going away. Use pb {@link #parseFrom(byte[])} instead. - */ - @Deprecated - public void readFields(DataInput in) throws IOException { - int version = in.readByte(); - if (version < 6) { - if (version <= 2) { - Text t = new Text(); - t.readFields(in); - this.name = t.getBytes(); -// if(KeyValue.getFamilyDelimiterIndex(this.name, 0, this.name.length) -// > 0) { -// this.name = stripColon(this.name); -// } - } else { - this.name = Bytes.readByteArray(in); - } - this.values.clear(); - setMaxVersions(in.readInt()); - int ordinal = in.readInt(); - setCompressionType(Compression.Algorithm.values()[ordinal]); - setInMemory(in.readBoolean()); - setBloomFilterType(in.readBoolean() ? BloomType.ROW : BloomType.NONE); - if (getBloomFilterType() != BloomType.NONE && version < 5) { - // If a bloomFilter is enabled and the column descriptor is less than - // version 5, we need to skip over it to read the rest of the column - // descriptor. There are no BloomFilterDescriptors written to disk for - // column descriptors with a version number >= 5 - throw new UnsupportedClassVersionError(this.getClass().getName() + - " does not support backward compatibility with versions older " + - "than version 5"); - } - if (version > 1) { - setBlockCacheEnabled(in.readBoolean()); - } - if (version > 2) { - setTimeToLive(in.readInt()); - } - } else { - // version 6+ - this.name = Bytes.readByteArray(in); - this.values.clear(); - int numValues = in.readInt(); - for (int i = 0; i < numValues; i++) { - ImmutableBytesWritable key = new ImmutableBytesWritable(); - ImmutableBytesWritable value = new ImmutableBytesWritable(); - key.readFields(in); - value.readFields(in); - - // in version 8, the BloomFilter setting changed from bool to enum - if (version < 8 && Bytes.toString(key.get()).equals(BLOOMFILTER)) { - value.set(Bytes.toBytes( - Boolean.getBoolean(Bytes.toString(value.get())) - ? BloomType.ROW.toString() - : BloomType.NONE.toString())); - } - - values.put(key, value); - } - if (version == 6) { - // Convert old values. - setValue(COMPRESSION, Compression.Algorithm.NONE.getName()); - } - String value = getValue(HConstants.VERSIONS); - this.cachedMaxVersions = (value != null)? - Integer.valueOf(value).intValue(): DEFAULT_VERSIONS; - } - } - - /** - * @deprecated Writables are going away. Use {@link #toByteArray()} instead. - */ - @Deprecated - public void write(DataOutput out) throws IOException { - out.writeByte(COLUMN_DESCRIPTOR_VERSION); - Bytes.writeByteArray(out, this.name); - out.writeInt(values.size()); - for (Map.Entry e: - values.entrySet()) { - e.getKey().write(out); - e.getValue().write(out); - } - } - - // Comparable - - public int compareTo(HColumnDescriptor o) { - int result = Bytes.compareTo(this.name, o.getName()); - if (result == 0) { - // punt on comparison for ordering, just calculate difference - result = this.values.hashCode() - o.values.hashCode(); - if (result < 0) - result = -1; - else if (result > 0) - result = 1; - } - return result; - } - - /** - * @return This instance serialized with pb with pb magic prefix - * @see #parseFrom(byte[]) - */ - public byte [] toByteArray() { - return ProtobufUtil.prependPBMagic(convert().toByteArray()); - } - - /** - * @param bytes A pb serialized {@link HColumnDescriptor} instance with pb magic prefix - * @return An instance of {@link HColumnDescriptor} made from bytes - * @throws DeserializationException - * @see #toByteArray() - */ - public static HColumnDescriptor parseFrom(final byte [] bytes) throws DeserializationException { - if (!ProtobufUtil.isPBMagicPrefix(bytes)) throw new DeserializationException("No magic"); - int pblen = ProtobufUtil.lengthOfPBMagic(); - ColumnFamilySchema.Builder builder = ColumnFamilySchema.newBuilder(); - ColumnFamilySchema cfs = null; - try { - cfs = builder.mergeFrom(bytes, pblen, bytes.length - pblen).build(); - } catch (InvalidProtocolBufferException e) { - throw new DeserializationException(e); - } - return convert(cfs); - } - - /** - * @param cfs - * @return An {@link HColumnDescriptor} made from the passed in cfs - */ - public static HColumnDescriptor convert(final ColumnFamilySchema cfs) { - // Use the empty constructor so we preserve the initial values set on construction for things - // like maxVersion. Otherwise, we pick up wrong values on deserialization which makes for - // unrelated-looking test failures that are hard to trace back to here. - HColumnDescriptor hcd = new HColumnDescriptor(); - hcd.name = cfs.getName().toByteArray(); - for (ColumnFamilySchema.Attribute a: cfs.getAttributesList()) { - hcd.setValue(a.getName().toByteArray(), a.getValue().toByteArray()); - } - return hcd; - } - - /** - * @return Convert this instance to a the pb column family type - */ - public ColumnFamilySchema convert() { - ColumnFamilySchema.Builder builder = ColumnFamilySchema.newBuilder(); - builder.setName(ByteString.copyFrom(getName())); - for (Map.Entry e: this.values.entrySet()) { - ColumnFamilySchema.Attribute.Builder aBuilder = ColumnFamilySchema.Attribute.newBuilder(); - aBuilder.setName(ByteString.copyFrom(e.getKey().get())); - aBuilder.setValue(ByteString.copyFrom(e.getValue().get())); - builder.addAttributes(aBuilder.build()); - } - return builder.build(); - } -} diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/HRegionInfo.java hbase-server/src/main/java/org/apache/hadoop/hbase/HRegionInfo.java deleted file mode 100644 index 8bdb603..0000000 --- hbase-server/src/main/java/org/apache/hadoop/hbase/HRegionInfo.java +++ /dev/null @@ -1,1094 +0,0 @@ -/** - * - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hbase; - -import java.io.ByteArrayInputStream; -import java.io.DataInput; -import java.io.DataInputStream; -import java.io.DataOutput; -import java.io.EOFException; -import java.io.IOException; -import java.io.SequenceInputStream; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.List; - -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.classification.InterfaceStability; -import org.apache.hadoop.hbase.KeyValue.KVComparator; -import org.apache.hadoop.hbase.client.Result; -import org.apache.hadoop.hbase.protobuf.ProtobufUtil; -import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos; -import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo; -import org.apache.hadoop.hbase.util.Bytes; -import org.apache.hadoop.hbase.util.JenkinsHash; -import org.apache.hadoop.hbase.util.MD5Hash; -import org.apache.hadoop.hbase.util.Pair; -import org.apache.hadoop.hbase.util.PairOfSameType; -import org.apache.hadoop.io.DataInputBuffer; - -import com.google.protobuf.ByteString; -import com.google.protobuf.InvalidProtocolBufferException; - -/** - * HRegion information. - * Contains HRegion id, start and end keys, a reference to this HRegions' table descriptor, etc. - */ -@InterfaceAudience.Public -@InterfaceStability.Evolving -public class HRegionInfo implements Comparable { - /* - * There are two versions associated with HRegionInfo: HRegionInfo.VERSION and - * HConstants.META_VERSION. HRegionInfo.VERSION indicates the data structure's versioning - * while HConstants.META_VERSION indicates the versioning of the serialized HRIs stored in - * the META table. - * - * Pre-0.92: - * HRI.VERSION == 0 and HConstants.META_VERSION does not exist (is not stored at META table) - * HRegionInfo had an HTableDescriptor reference inside it. - * HRegionInfo is serialized as Writable to META table. - * For 0.92.x and 0.94.x: - * HRI.VERSION == 1 and HConstants.META_VERSION == 0 - * HRI no longer has HTableDescriptor in it. - * HRI is serialized as Writable to META table. - * For 0.96.x: - * HRI.VERSION == 1 and HConstants.META_VERSION == 1 - * HRI data structure is the same as 0.92 and 0.94 - * HRI is serialized as PB to META table. - * - * Versioning of HRegionInfo is deprecated. HRegionInfo does protobuf - * serialization using RegionInfo class, which has it's own versioning. - */ - @Deprecated - public static final byte VERSION = 1; - private static final Log LOG = LogFactory.getLog(HRegionInfo.class); - - /** - * The new format for a region name contains its encodedName at the end. - * The encoded name also serves as the directory name for the region - * in the filesystem. - * - * New region name format: - * <tablename>,,<startkey>,<regionIdTimestamp>.<encodedName>. - * where, - * <encodedName> is a hex version of the MD5 hash of - * <tablename>,<startkey>,<regionIdTimestamp> - * - * The old region name format: - * <tablename>,<startkey>,<regionIdTimestamp> - * For region names in the old format, the encoded name is a 32-bit - * JenkinsHash integer value (in its decimal notation, string form). - *

      - * **NOTE** - * - * ROOT, the first META region, and regions created by an older - * version of HBase (0.20 or prior) will continue to use the - * old region name format. - */ - - /** Separator used to demarcate the encodedName in a region name - * in the new format. See description on new format above. - */ - private static final int ENC_SEPARATOR = '.'; - public static final int MD5_HEX_LENGTH = 32; - - /** - * Does region name contain its encoded name? - * @param regionName region name - * @return boolean indicating if this a new format region - * name which contains its encoded name. - */ - private static boolean hasEncodedName(final byte[] regionName) { - // check if region name ends in ENC_SEPARATOR - if ((regionName.length >= 1) - && (regionName[regionName.length - 1] == ENC_SEPARATOR)) { - // region name is new format. it contains the encoded name. - return true; - } - return false; - } - - /** - * @param regionName - * @return the encodedName - */ - public static String encodeRegionName(final byte [] regionName) { - String encodedName; - if (hasEncodedName(regionName)) { - // region is in new format: - // ,,/encodedName/ - encodedName = Bytes.toString(regionName, - regionName.length - MD5_HEX_LENGTH - 1, - MD5_HEX_LENGTH); - } else { - // old format region name. ROOT and first META region also - // use this format.EncodedName is the JenkinsHash value. - int hashVal = Math.abs(JenkinsHash.getInstance().hash(regionName, - regionName.length, 0)); - encodedName = String.valueOf(hashVal); - } - return encodedName; - } - - /** - * Use logging. - * @param encodedRegionName The encoded regionname. - * @return -ROOT- if passed 70236052 or - * .META. if passed 1028785192 else returns - * encodedRegionName - */ - public static String prettyPrint(final String encodedRegionName) { - if (encodedRegionName.equals("70236052")) { - return encodedRegionName + "/-ROOT-"; - } else if (encodedRegionName.equals("1028785192")) { - return encodedRegionName + "/.META."; - } - return encodedRegionName; - } - - /** HRegionInfo for root region */ - public static final HRegionInfo ROOT_REGIONINFO = - new HRegionInfo(0L, Bytes.toBytes("-ROOT-")); - - /** HRegionInfo for first meta region */ - public static final HRegionInfo FIRST_META_REGIONINFO = - new HRegionInfo(1L, Bytes.toBytes(".META.")); - - private byte [] endKey = HConstants.EMPTY_BYTE_ARRAY; - // This flag is in the parent of a split while the parent is still referenced - // by daughter regions. We USED to set this flag when we disabled a table - // but now table state is kept up in zookeeper as of 0.90.0 HBase. - private boolean offLine = false; - private long regionId = -1; - private transient byte [] regionName = HConstants.EMPTY_BYTE_ARRAY; - private String regionNameStr = ""; - private boolean split = false; - private byte [] startKey = HConstants.EMPTY_BYTE_ARRAY; - private int hashCode = -1; - //TODO: Move NO_HASH to HStoreFile which is really the only place it is used. - public static final String NO_HASH = null; - private volatile String encodedName = NO_HASH; - private byte [] encodedNameAsBytes = null; - - // Current TableName - private byte[] tableName = null; - - private void setHashCode() { - int result = Arrays.hashCode(this.regionName); - result ^= this.regionId; - result ^= Arrays.hashCode(this.startKey); - result ^= Arrays.hashCode(this.endKey); - result ^= Boolean.valueOf(this.offLine).hashCode(); - result ^= Arrays.hashCode(this.tableName); - this.hashCode = result; - } - - - /** - * Private constructor used constructing HRegionInfo for the catalog root and - * first meta regions - */ - private HRegionInfo(long regionId, byte[] tableName) { - super(); - this.regionId = regionId; - this.tableName = tableName.clone(); - // Note: Root & First Meta regions names are still in old format - this.regionName = createRegionName(tableName, null, - regionId, false); - this.regionNameStr = Bytes.toStringBinary(this.regionName); - setHashCode(); - } - - /** Default constructor - creates empty object - * @deprecated Used by Writables and Writables are going away. - */ - @Deprecated - public HRegionInfo() { - super(); - } - - public HRegionInfo(final byte[] tableName) { - this(tableName, null, null); - } - - /** - * Construct HRegionInfo with explicit parameters - * - * @param tableName the table name - * @param startKey first key in region - * @param endKey end of key range - * @throws IllegalArgumentException - */ - public HRegionInfo(final byte[] tableName, final byte[] startKey, final byte[] endKey) - throws IllegalArgumentException { - this(tableName, startKey, endKey, false); - } - - - /** - * Construct HRegionInfo with explicit parameters - * - * @param tableName the table descriptor - * @param startKey first key in region - * @param endKey end of key range - * @param split true if this region has split and we have daughter regions - * regions that may or may not hold references to this region. - * @throws IllegalArgumentException - */ - public HRegionInfo(final byte[] tableName, final byte[] startKey, final byte[] endKey, - final boolean split) - throws IllegalArgumentException { - this(tableName, startKey, endKey, split, System.currentTimeMillis()); - } - - - /** - * Construct HRegionInfo with explicit parameters - * - * @param tableName the table descriptor - * @param startKey first key in region - * @param endKey end of key range - * @param split true if this region has split and we have daughter regions - * regions that may or may not hold references to this region. - * @param regionid Region id to use. - * @throws IllegalArgumentException - */ - public HRegionInfo(final byte[] tableName, final byte[] startKey, - final byte[] endKey, final boolean split, final long regionid) - throws IllegalArgumentException { - - super(); - if (tableName == null) { - throw new IllegalArgumentException("tableName cannot be null"); - } - this.tableName = tableName.clone(); - this.offLine = false; - this.regionId = regionid; - - this.regionName = createRegionName(this.tableName, startKey, regionId, true); - - this.regionNameStr = Bytes.toStringBinary(this.regionName); - this.split = split; - this.endKey = endKey == null? HConstants.EMPTY_END_ROW: endKey.clone(); - this.startKey = startKey == null? - HConstants.EMPTY_START_ROW: startKey.clone(); - this.tableName = tableName.clone(); - setHashCode(); - } - - /** - * Costruct a copy of another HRegionInfo - * - * @param other - */ - public HRegionInfo(HRegionInfo other) { - super(); - this.endKey = other.getEndKey(); - this.offLine = other.isOffline(); - this.regionId = other.getRegionId(); - this.regionName = other.getRegionName(); - this.regionNameStr = Bytes.toStringBinary(this.regionName); - this.split = other.isSplit(); - this.startKey = other.getStartKey(); - this.hashCode = other.hashCode(); - this.encodedName = other.getEncodedName(); - this.tableName = other.tableName; - } - - - /** - * Make a region name of passed parameters. - * @param tableName - * @param startKey Can be null - * @param regionid Region id (Usually timestamp from when region was created). - * @param newFormat should we create the region name in the new format - * (such that it contains its encoded name?). - * @return Region name made of passed tableName, startKey and id - */ - public static byte [] createRegionName(final byte [] tableName, - final byte [] startKey, final long regionid, boolean newFormat) { - return createRegionName(tableName, startKey, Long.toString(regionid), newFormat); - } - - /** - * Make a region name of passed parameters. - * @param tableName - * @param startKey Can be null - * @param id Region id (Usually timestamp from when region was created). - * @param newFormat should we create the region name in the new format - * (such that it contains its encoded name?). - * @return Region name made of passed tableName, startKey and id - */ - public static byte [] createRegionName(final byte [] tableName, - final byte [] startKey, final String id, boolean newFormat) { - return createRegionName(tableName, startKey, Bytes.toBytes(id), newFormat); - } - - /** - * Make a region name of passed parameters. - * @param tableName - * @param startKey Can be null - * @param id Region id (Usually timestamp from when region was created). - * @param newFormat should we create the region name in the new format - * (such that it contains its encoded name?). - * @return Region name made of passed tableName, startKey and id - */ - public static byte [] createRegionName(final byte [] tableName, - final byte [] startKey, final byte [] id, boolean newFormat) { - byte [] b = new byte [tableName.length + 2 + id.length + - (startKey == null? 0: startKey.length) + - (newFormat ? (MD5_HEX_LENGTH + 2) : 0)]; - - int offset = tableName.length; - System.arraycopy(tableName, 0, b, 0, offset); - b[offset++] = HConstants.DELIMITER; - if (startKey != null && startKey.length > 0) { - System.arraycopy(startKey, 0, b, offset, startKey.length); - offset += startKey.length; - } - b[offset++] = HConstants.DELIMITER; - System.arraycopy(id, 0, b, offset, id.length); - offset += id.length; - - if (newFormat) { - // - // Encoded name should be built into the region name. - // - // Use the region name thus far (namely, ,,) - // to compute a MD5 hash to be used as the encoded name, and append - // it to the byte buffer. - // - String md5Hash = MD5Hash.getMD5AsHex(b, 0, offset); - byte [] md5HashBytes = Bytes.toBytes(md5Hash); - - if (md5HashBytes.length != MD5_HEX_LENGTH) { - LOG.error("MD5-hash length mismatch: Expected=" + MD5_HEX_LENGTH + - "; Got=" + md5HashBytes.length); - } - - // now append the bytes '..' to the end - b[offset++] = ENC_SEPARATOR; - System.arraycopy(md5HashBytes, 0, b, offset, MD5_HEX_LENGTH); - offset += MD5_HEX_LENGTH; - b[offset++] = ENC_SEPARATOR; - } - - return b; - } - - /** - * Gets the table name from the specified region name. - * @param regionName - * @return Table name. - */ - public static byte [] getTableName(byte [] regionName) { - int offset = -1; - for (int i = 0; i < regionName.length; i++) { - if (regionName[i] == HConstants.DELIMITER) { - offset = i; - break; - } - } - byte [] tableName = new byte[offset]; - System.arraycopy(regionName, 0, tableName, 0, offset); - return tableName; - } - - /** - * Separate elements of a regionName. - * @param regionName - * @return Array of byte[] containing tableName, startKey and id - * @throws IOException - */ - public static byte [][] parseRegionName(final byte [] regionName) - throws IOException { - int offset = -1; - for (int i = 0; i < regionName.length; i++) { - if (regionName[i] == HConstants.DELIMITER) { - offset = i; - break; - } - } - if(offset == -1) throw new IOException("Invalid regionName format"); - byte [] tableName = new byte[offset]; - System.arraycopy(regionName, 0, tableName, 0, offset); - offset = -1; - for (int i = regionName.length - 1; i > 0; i--) { - if(regionName[i] == HConstants.DELIMITER) { - offset = i; - break; - } - } - if(offset == -1) throw new IOException("Invalid regionName format"); - byte [] startKey = HConstants.EMPTY_BYTE_ARRAY; - if(offset != tableName.length + 1) { - startKey = new byte[offset - tableName.length - 1]; - System.arraycopy(regionName, tableName.length + 1, startKey, 0, - offset - tableName.length - 1); - } - byte [] id = new byte[regionName.length - offset - 1]; - System.arraycopy(regionName, offset + 1, id, 0, - regionName.length - offset - 1); - byte [][] elements = new byte[3][]; - elements[0] = tableName; - elements[1] = startKey; - elements[2] = id; - return elements; - } - - /** @return the regionId */ - public long getRegionId(){ - return regionId; - } - - /** - * @return the regionName as an array of bytes. - * @see #getRegionNameAsString() - */ - public byte [] getRegionName(){ - return regionName; - } - - /** - * @return Region name as a String for use in logging, etc. - */ - public String getRegionNameAsString() { - if (hasEncodedName(this.regionName)) { - // new format region names already have their encoded name. - return this.regionNameStr; - } - - // old format. regionNameStr doesn't have the region name. - // - // - return this.regionNameStr + "." + this.getEncodedName(); - } - - /** @return the encoded region name */ - public synchronized String getEncodedName() { - if (this.encodedName == NO_HASH) { - this.encodedName = encodeRegionName(this.regionName); - } - return this.encodedName; - } - - public synchronized byte [] getEncodedNameAsBytes() { - if (this.encodedNameAsBytes == null) { - this.encodedNameAsBytes = Bytes.toBytes(getEncodedName()); - } - return this.encodedNameAsBytes; - } - - /** @return the startKey */ - public byte [] getStartKey(){ - return startKey; - } - - /** @return the endKey */ - public byte [] getEndKey(){ - return endKey; - } - - /** - * Get current table name of the region - * @return byte array of table name - */ - public byte[] getTableName() { - if (tableName == null || tableName.length == 0) { - tableName = getTableName(getRegionName()); - } - return tableName; - } - - /** - * Get current table name as string - * @return string representation of current table - */ - public String getTableNameAsString() { - return Bytes.toString(tableName); - } - - /** - * Returns true if the given inclusive range of rows is fully contained - * by this region. For example, if the region is foo,a,g and this is - * passed ["b","c"] or ["a","c"] it will return true, but if this is passed - * ["b","z"] it will return false. - * @throws IllegalArgumentException if the range passed is invalid (ie end < start) - */ - public boolean containsRange(byte[] rangeStartKey, byte[] rangeEndKey) { - if (Bytes.compareTo(rangeStartKey, rangeEndKey) > 0) { - throw new IllegalArgumentException( - "Invalid range: " + Bytes.toStringBinary(rangeStartKey) + - " > " + Bytes.toStringBinary(rangeEndKey)); - } - - boolean firstKeyInRange = Bytes.compareTo(rangeStartKey, startKey) >= 0; - boolean lastKeyInRange = - Bytes.compareTo(rangeEndKey, endKey) < 0 || - Bytes.equals(endKey, HConstants.EMPTY_BYTE_ARRAY); - return firstKeyInRange && lastKeyInRange; - } - - /** - * Return true if the given row falls in this region. - */ - public boolean containsRow(byte[] row) { - return Bytes.compareTo(row, startKey) >= 0 && - (Bytes.compareTo(row, endKey) < 0 || - Bytes.equals(endKey, HConstants.EMPTY_BYTE_ARRAY)); - } - - /** @return true if this is the root region */ - public boolean isRootRegion() { - return Bytes.equals(tableName, HRegionInfo.ROOT_REGIONINFO.getTableName()); - } - - /** @return true if this region is from a table that is a meta table, - * either .META. or -ROOT- - */ - public boolean isMetaTable() { - return isRootRegion() || isMetaRegion(); - } - - /** @return true if this region is a meta region */ - public boolean isMetaRegion() { - return Bytes.equals(tableName, HRegionInfo.FIRST_META_REGIONINFO.getTableName()); - } - - /** - * @return True if has been split and has daughters. - */ - public boolean isSplit() { - return this.split; - } - - /** - * @param split set split status - */ - public void setSplit(boolean split) { - this.split = split; - } - - /** - * @return True if this region is offline. - */ - public boolean isOffline() { - return this.offLine; - } - - /** - * The parent of a region split is offline while split daughters hold - * references to the parent. Offlined regions are closed. - * @param offLine Set online/offline status. - */ - public void setOffline(boolean offLine) { - this.offLine = offLine; - } - - - /** - * @return True if this is a split parent region. - */ - public boolean isSplitParent() { - if (!isSplit()) return false; - if (!isOffline()) { - LOG.warn("Region is split but NOT offline: " + getRegionNameAsString()); - } - return true; - } - - /** - * @see java.lang.Object#toString() - */ - @Override - public String toString() { - return "{" + HConstants.NAME + " => '" + - this.regionNameStr - + "', STARTKEY => '" + - Bytes.toStringBinary(this.startKey) + "', ENDKEY => '" + - Bytes.toStringBinary(this.endKey) + - "', ENCODED => " + getEncodedName() + "," + - (isOffline()? " OFFLINE => true,": "") + - (isSplit()? " SPLIT => true,": "") + "}"; - } - - /** - * @see java.lang.Object#equals(java.lang.Object) - */ - @Override - public boolean equals(Object o) { - if (this == o) { - return true; - } - if (o == null) { - return false; - } - if (!(o instanceof HRegionInfo)) { - return false; - } - return this.compareTo((HRegionInfo)o) == 0; - } - - /** - * @see java.lang.Object#hashCode() - */ - @Override - public int hashCode() { - return this.hashCode; - } - - /** @return the object version number - * @deprecated HRI is no longer a VersionedWritable */ - @Deprecated - public byte getVersion() { - return VERSION; - } - - /** - * @deprecated Use protobuf serialization instead. See {@link #toByteArray()} and - * {@link #toDelimitedByteArray()} - */ - @Deprecated - public void write(DataOutput out) throws IOException { - out.writeByte(getVersion()); - Bytes.writeByteArray(out, endKey); - out.writeBoolean(offLine); - out.writeLong(regionId); - Bytes.writeByteArray(out, regionName); - out.writeBoolean(split); - Bytes.writeByteArray(out, startKey); - Bytes.writeByteArray(out, tableName); - out.writeInt(hashCode); - } - - /** - * @deprecated Use protobuf deserialization instead. - * @see #parseFrom(byte[]) - */ - @Deprecated - public void readFields(DataInput in) throws IOException { - // Read the single version byte. We don't ask the super class do it - // because freaks out if its not the current classes' version. This method - // can deserialize version 0 and version 1 of HRI. - byte version = in.readByte(); - if (version == 0) { - // This is the old HRI that carried an HTD. Migrate it. The below - // was copied from the old 0.90 HRI readFields. - this.endKey = Bytes.readByteArray(in); - this.offLine = in.readBoolean(); - this.regionId = in.readLong(); - this.regionName = Bytes.readByteArray(in); - this.regionNameStr = Bytes.toStringBinary(this.regionName); - this.split = in.readBoolean(); - this.startKey = Bytes.readByteArray(in); - try { - HTableDescriptor htd = new HTableDescriptor(); - htd.readFields(in); - this.tableName = htd.getName(); - } catch(EOFException eofe) { - throw new IOException("HTD not found in input buffer", eofe); - } - this.hashCode = in.readInt(); - } else if (getVersion() == version) { - this.endKey = Bytes.readByteArray(in); - this.offLine = in.readBoolean(); - this.regionId = in.readLong(); - this.regionName = Bytes.readByteArray(in); - this.regionNameStr = Bytes.toStringBinary(this.regionName); - this.split = in.readBoolean(); - this.startKey = Bytes.readByteArray(in); - this.tableName = Bytes.readByteArray(in); - this.hashCode = in.readInt(); - } else { - throw new IOException("Non-migratable/unknown version=" + getVersion()); - } - } - - @Deprecated - private void readFields(byte[] bytes) throws IOException { - if (bytes == null || bytes.length <= 0) { - throw new IllegalArgumentException("Can't build a writable with empty " + - "bytes array"); - } - DataInputBuffer in = new DataInputBuffer(); - try { - in.reset(bytes, 0, bytes.length); - this.readFields(in); - } finally { - in.close(); - } - } - - // - // Comparable - // - - public int compareTo(HRegionInfo o) { - if (o == null) { - return 1; - } - - // Are regions of same table? - int result = Bytes.compareTo(this.tableName, o.tableName); - if (result != 0) { - return result; - } - - // Compare start keys. - result = Bytes.compareTo(this.startKey, o.startKey); - if (result != 0) { - return result; - } - - // Compare end keys. - result = Bytes.compareTo(this.endKey, o.endKey); - - if (result != 0) { - if (this.getStartKey().length != 0 - && this.getEndKey().length == 0) { - return 1; // this is last region - } - if (o.getStartKey().length != 0 - && o.getEndKey().length == 0) { - return -1; // o is the last region - } - return result; - } - - // regionId is usually milli timestamp -- this defines older stamps - // to be "smaller" than newer stamps in sort order. - if (this.regionId > o.regionId) { - return 1; - } else if (this.regionId < o.regionId) { - return -1; - } - - if (this.offLine == o.offLine) - return 0; - if (this.offLine == true) return -1; - - return 1; - } - - /** - * @return Comparator to use comparing {@link KeyValue}s. - */ - public KVComparator getComparator() { - return isRootRegion()? KeyValue.ROOT_COMPARATOR: isMetaRegion()? - KeyValue.META_COMPARATOR: KeyValue.COMPARATOR; - } - - /** - * Convert a HRegionInfo to a RegionInfo - * - * @return the converted RegionInfo - */ - RegionInfo convert() { - return convert(this); - } - - /** - * Convert a HRegionInfo to a RegionInfo - * - * @param info the HRegionInfo to convert - * @return the converted RegionInfo - */ - public static RegionInfo convert(final HRegionInfo info) { - if (info == null) return null; - RegionInfo.Builder builder = RegionInfo.newBuilder(); - builder.setTableName(ByteString.copyFrom(info.getTableName())); - builder.setRegionId(info.getRegionId()); - if (info.getStartKey() != null) { - builder.setStartKey(ByteString.copyFrom(info.getStartKey())); - } - if (info.getEndKey() != null) { - builder.setEndKey(ByteString.copyFrom(info.getEndKey())); - } - builder.setOffline(info.isOffline()); - builder.setSplit(info.isSplit()); - return builder.build(); - } - - /** - * Convert a RegionInfo to a HRegionInfo - * - * @param proto the RegionInfo to convert - * @return the converted HRegionInfo - */ - public static HRegionInfo convert(final RegionInfo proto) { - if (proto == null) return null; - byte [] tableName = proto.getTableName().toByteArray(); - if (Bytes.equals(tableName, HConstants.ROOT_TABLE_NAME)) { - return ROOT_REGIONINFO; - } else if (Bytes.equals(tableName, HConstants.META_TABLE_NAME)) { - return FIRST_META_REGIONINFO; - } - long regionId = proto.getRegionId(); - byte[] startKey = null; - byte[] endKey = null; - if (proto.hasStartKey()) { - startKey = proto.getStartKey().toByteArray(); - } - if (proto.hasEndKey()) { - endKey = proto.getEndKey().toByteArray(); - } - boolean split = false; - if (proto.hasSplit()) { - split = proto.getSplit(); - } - HRegionInfo hri = new HRegionInfo(tableName, startKey, endKey, split, regionId); - if (proto.hasOffline()) { - hri.setOffline(proto.getOffline()); - } - return hri; - } - - /** - * @return This instance serialized as protobuf w/ a magic pb prefix. - * @see #parseFrom(byte[]) - */ - public byte [] toByteArray() { - byte [] bytes = convert().toByteArray(); - return ProtobufUtil.prependPBMagic(bytes); - } - - /** - * @param bytes - * @return A deserialized {@link HRegionInfo} or null if we failed deserialize or passed bytes null - * @see #toByteArray() - */ - public static HRegionInfo parseFromOrNull(final byte [] bytes) { - if (bytes == null || bytes.length <= 0) return null; - try { - return parseFrom(bytes); - } catch (DeserializationException e) { - return null; - } - } - - /** - * @param bytes A pb RegionInfo serialized with a pb magic prefix. - * @return A deserialized {@link HRegionInfo} - * @throws DeserializationException - * @see #toByteArray() - */ - public static HRegionInfo parseFrom(final byte [] bytes) throws DeserializationException { - if (ProtobufUtil.isPBMagicPrefix(bytes)) { - int pblen = ProtobufUtil.lengthOfPBMagic(); - try { - HBaseProtos.RegionInfo ri = - HBaseProtos.RegionInfo.newBuilder().mergeFrom(bytes, pblen, bytes.length - pblen).build(); - return convert(ri); - } catch (InvalidProtocolBufferException e) { - throw new DeserializationException(e); - } - } else { - try { - HRegionInfo hri = new HRegionInfo(); - hri.readFields(bytes); - return hri; - } catch (IOException e) { - throw new DeserializationException(e); - } - } - } - - /** - * Use this instead of {@link #toByteArray()} when writing to a stream and you want to use - * the pb mergeDelimitedFrom (w/o the delimiter, pb reads to EOF which may not be what you want). - * @return This instance serialized as a delimited protobuf w/ a magic pb prefix. - * @throws IOException - * @see #toByteArray() - */ - public byte [] toDelimitedByteArray() throws IOException { - return ProtobufUtil.toDelimitedByteArray(convert()); - } - - /** - * Extract a HRegionInfo and ServerName from catalog table {@link Result}. - * @param r Result to pull from - * @return A pair of the {@link HRegionInfo} and the {@link ServerName} - * (or null for server address if no address set in .META.). - * @throws IOException - */ - public static Pair getHRegionInfoAndServerName(final Result r) { - HRegionInfo info = - getHRegionInfo(r, HConstants.REGIONINFO_QUALIFIER); - ServerName sn = getServerName(r); - return new Pair(info, sn); - } - - /** - * Returns HRegionInfo object from the column - * HConstants.CATALOG_FAMILY:HConstants.REGIONINFO_QUALIFIER of the catalog - * table Result. - * @param data a Result object from the catalog table scan - * @return HRegionInfo or null - */ - public static HRegionInfo getHRegionInfo(Result data) { - byte [] bytes = - data.getValue(HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER); - if (bytes == null) return null; - HRegionInfo info = parseFromOrNull(bytes); - if (LOG.isDebugEnabled()) { - LOG.debug("Current INFO from scan results = " + info); - } - return info; - } - - /** - * Returns the daughter regions by reading the corresponding columns of the catalog table - * Result. - * @param data a Result object from the catalog table scan - * @return a pair of HRegionInfo or PairOfSameType(null, null) if the region is not a split - * parent - */ - public static PairOfSameType getDaughterRegions(Result data) throws IOException { - HRegionInfo splitA = getHRegionInfo(data, HConstants.SPLITA_QUALIFIER); - HRegionInfo splitB = getHRegionInfo(data, HConstants.SPLITB_QUALIFIER); - - return new PairOfSameType(splitA, splitB); - } - - /** - * Returns the HRegionInfo object from the column {@link HConstants#CATALOG_FAMILY} and - * qualifier of the catalog table result. - * @param r a Result object from the catalog table scan - * @param qualifier Column family qualifier -- either - * {@link HConstants#SPLITA_QUALIFIER}, {@link HConstants#SPLITB_QUALIFIER} or - * {@link HConstants#REGIONINFO_QUALIFIER}. - * @return An HRegionInfo instance or null. - * @throws IOException - */ - public static HRegionInfo getHRegionInfo(final Result r, byte [] qualifier) { - byte [] bytes = r.getValue(HConstants.CATALOG_FAMILY, qualifier); - if (bytes == null || bytes.length <= 0) return null; - return parseFromOrNull(bytes); - } - - /** - * Returns a {@link ServerName} from catalog table {@link Result}. - * @param r Result to pull from - * @return A ServerName instance or null if necessary fields not found or empty. - */ - public static ServerName getServerName(final Result r) { - byte[] value = r.getValue(HConstants.CATALOG_FAMILY, - HConstants.SERVER_QUALIFIER); - if (value == null || value.length == 0) return null; - String hostAndPort = Bytes.toString(value); - value = r.getValue(HConstants.CATALOG_FAMILY, - HConstants.STARTCODE_QUALIFIER); - if (value == null || value.length == 0) return null; - return new ServerName(hostAndPort, Bytes.toLong(value)); - } - - /** - * Parses an HRegionInfo instance from the passed in stream. Presumes the HRegionInfo was - * serialized to the stream with {@link #toDelimitedByteArray()} - * @param in - * @return An instance of HRegionInfo. - * @throws IOException - */ - public static HRegionInfo parseFrom(final DataInputStream in) throws IOException { - // I need to be able to move back in the stream if this is not a pb serialization so I can - // do the Writable decoding instead. - int pblen = ProtobufUtil.lengthOfPBMagic(); - byte [] pbuf = new byte[pblen]; - if (in.markSupported()) { //read it with mark() - in.mark(pblen); - } - int read = in.read(pbuf); //assumption: if Writable serialization, it should be longer than pblen. - if (read != pblen) throw new IOException("read=" + read + ", wanted=" + pblen); - if (ProtobufUtil.isPBMagicPrefix(pbuf)) { - return convert(HBaseProtos.RegionInfo.parseDelimitedFrom(in)); - } else { - // Presume Writables. Need to reset the stream since it didn't start w/ pb. - if (in.markSupported()) { - in.reset(); - HRegionInfo hri = new HRegionInfo(); - hri.readFields(in); - return hri; - } else { - //we cannot use BufferedInputStream, it consumes more than we read from the underlying IS - ByteArrayInputStream bais = new ByteArrayInputStream(pbuf); - SequenceInputStream sis = new SequenceInputStream(bais, in); //concatenate input streams - HRegionInfo hri = new HRegionInfo(); - hri.readFields(new DataInputStream(sis)); - return hri; - } - } - } - - /** - * Serializes given HRegionInfo's as a byte array. Use this instead of {@link #toByteArray()} when - * writing to a stream and you want to use the pb mergeDelimitedFrom (w/o the delimiter, pb reads - * to EOF which may not be what you want). {@link #parseDelimitedFrom(byte[], int, int)} can - * be used to read back the instances. - * @param infos HRegionInfo objects to serialize - * @return This instance serialized as a delimited protobuf w/ a magic pb prefix. - * @throws IOException - * @see #toByteArray() - */ - public static byte[] toDelimitedByteArray(HRegionInfo... infos) throws IOException { - byte[][] bytes = new byte[infos.length][]; - int size = 0; - for (int i = 0; i < infos.length; i++) { - bytes[i] = infos[i].toDelimitedByteArray(); - size += bytes[i].length; - } - - byte[] result = new byte[size]; - int offset = 0; - for (byte[] b : bytes) { - System.arraycopy(b, 0, result, offset, b.length); - offset += b.length; - } - return result; - } - - /** - * Parses all the HRegionInfo instances from the passed in stream until EOF. Presumes the - * HRegionInfo's were serialized to the stream with {@link #toDelimitedByteArray()} - * @param bytes serialized bytes - * @param offset the start offset into the byte[] buffer - * @param length how far we should read into the byte[] buffer - * @return All the hregioninfos that are in the byte array. Keeps reading till we hit the end. - */ - public static List parseDelimitedFrom(final byte[] bytes, final int offset, - final int length) throws IOException { - if (bytes == null) { - throw new IllegalArgumentException("Can't build an object with empty bytes array"); - } - DataInputBuffer in = new DataInputBuffer(); - List hris = new ArrayList(); - try { - in.reset(bytes, offset, length); - while (in.available() > 0) { - HRegionInfo hri = parseFrom(in); - hris.add(hri); - } - } finally { - in.close(); - } - return hris; - } - -} diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/HRegionLocation.java hbase-server/src/main/java/org/apache/hadoop/hbase/HRegionLocation.java deleted file mode 100644 index 7c8e9aa..0000000 --- hbase-server/src/main/java/org/apache/hadoop/hbase/HRegionLocation.java +++ /dev/null @@ -1,128 +0,0 @@ -/** - * - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hbase; - -import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.classification.InterfaceStability; -import org.apache.hadoop.hbase.util.Addressing; - -/** - * Data structure to hold HRegionInfo and the address for the hosting - * HRegionServer. Immutable. Comparable, but we compare the 'location' only: - * i.e. the hostname and port, and *not* the regioninfo. This means two - * instances are the same if they refer to the same 'location' (the same - * hostname and port), though they may be carrying different regions. - */ -@InterfaceAudience.Public -@InterfaceStability.Evolving -public class HRegionLocation implements Comparable { - private final HRegionInfo regionInfo; - private final String hostname; - private final int port; - // Cache of the 'toString' result. - private String cachedString = null; - // Cache of the hostname + port - private String cachedHostnamePort; - - /** - * Constructor - * @param regionInfo the HRegionInfo for the region - * @param hostname Hostname - * @param port port - */ - public HRegionLocation(HRegionInfo regionInfo, final String hostname, - final int port) { - this.regionInfo = regionInfo; - this.hostname = hostname; - this.port = port; - } - - /** - * @see java.lang.Object#toString() - */ - @Override - public synchronized String toString() { - if (this.cachedString == null) { - this.cachedString = "region=" + this.regionInfo.getRegionNameAsString() + - ", hostname=" + this.hostname + ", port=" + this.port; - } - return this.cachedString; - } - - /** - * @see java.lang.Object#equals(java.lang.Object) - */ - @Override - public boolean equals(Object o) { - if (this == o) { - return true; - } - if (o == null) { - return false; - } - if (!(o instanceof HRegionLocation)) { - return false; - } - return this.compareTo((HRegionLocation)o) == 0; - } - - /** - * @see java.lang.Object#hashCode() - */ - @Override - public int hashCode() { - int result = this.hostname.hashCode(); - result ^= this.port; - return result; - } - - /** @return HRegionInfo */ - public HRegionInfo getRegionInfo(){ - return regionInfo; - } - - public String getHostname() { - return this.hostname; - } - - public int getPort() { - return this.port; - } - - /** - * @return String made of hostname and port formatted as per {@link Addressing#createHostAndPortStr(String, int)} - */ - public synchronized String getHostnamePort() { - if (this.cachedHostnamePort == null) { - this.cachedHostnamePort = - Addressing.createHostAndPortStr(this.hostname, this.port); - } - return this.cachedHostnamePort; - } - - // - // Comparable - // - - public int compareTo(HRegionLocation o) { - int result = this.hostname.compareTo(o.getHostname()); - if (result != 0) return result; - return this.port - o.getPort(); - } -} diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/HTableDescriptor.java hbase-server/src/main/java/org/apache/hadoop/hbase/HTableDescriptor.java deleted file mode 100644 index b697e26..0000000 --- hbase-server/src/main/java/org/apache/hadoop/hbase/HTableDescriptor.java +++ /dev/null @@ -1,1301 +0,0 @@ -/** - * - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hbase; - -import java.io.DataInput; -import java.io.DataOutput; -import java.io.IOException; -import java.util.Collection; -import java.util.Collections; -import java.util.HashMap; -import java.util.HashSet; -import java.util.Iterator; -import java.util.List; -import java.util.Map; -import java.util.Set; -import java.util.TreeMap; -import java.util.TreeSet; -import java.util.regex.Matcher; - -import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.classification.InterfaceStability; -import org.apache.hadoop.fs.Path; -import org.apache.hadoop.hbase.io.ImmutableBytesWritable; -import org.apache.hadoop.hbase.protobuf.ProtobufUtil; -import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema; -import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema; -import org.apache.hadoop.hbase.security.User; -import org.apache.hadoop.hbase.util.Bytes; -import org.apache.hadoop.hbase.util.Writables; -import org.apache.hadoop.io.WritableComparable; - -import com.google.protobuf.ByteString; -import com.google.protobuf.InvalidProtocolBufferException; - -/** - * HTableDescriptor contains the details about an HBase table such as the descriptors of - * all the column families, is the table a catalog table, -ROOT- or - * .META. , is the table is read only, the maximum size of the memstore, - * when the region split should occur, coprocessors associated with it etc... - */ -@InterfaceAudience.Public -@InterfaceStability.Evolving -public class HTableDescriptor implements WritableComparable { - - /** - * Changes prior to version 3 were not recorded here. - * Version 3 adds metadata as a map where keys and values are byte[]. - * Version 4 adds indexes - * Version 5 removed transactional pollution -- e.g. indexes - */ - private static final byte TABLE_DESCRIPTOR_VERSION = 5; - - private byte [] name = HConstants.EMPTY_BYTE_ARRAY; - - private String nameAsString = ""; - - /** - * A map which holds the metadata information of the table. This metadata - * includes values like IS_ROOT, IS_META, DEFERRED_LOG_FLUSH, SPLIT_POLICY, - * MAX_FILE_SIZE, READONLY, MEMSTORE_FLUSHSIZE etc... - */ - protected final Map values = - new HashMap(); - - public static final String SPLIT_POLICY = "SPLIT_POLICY"; - - /** - * INTERNAL Used by HBase Shell interface to access this metadata - * attribute which denotes the maximum size of the store file after which - * a region split occurs - * - * @see #getMaxFileSize() - */ - public static final String MAX_FILESIZE = "MAX_FILESIZE"; - private static final ImmutableBytesWritable MAX_FILESIZE_KEY = - new ImmutableBytesWritable(Bytes.toBytes(MAX_FILESIZE)); - - public static final String OWNER = "OWNER"; - public static final ImmutableBytesWritable OWNER_KEY = - new ImmutableBytesWritable(Bytes.toBytes(OWNER)); - - /** - * INTERNAL Used by rest interface to access this metadata - * attribute which denotes if the table is Read Only - * - * @see #isReadOnly() - */ - public static final String READONLY = "READONLY"; - private static final ImmutableBytesWritable READONLY_KEY = - new ImmutableBytesWritable(Bytes.toBytes(READONLY)); - - /** - * INTERNAL Used by HBase Shell interface to access this metadata - * attribute which represents the maximum size of the memstore after which - * its contents are flushed onto the disk - * - * @see #getMemStoreFlushSize() - */ - public static final String MEMSTORE_FLUSHSIZE = "MEMSTORE_FLUSHSIZE"; - private static final ImmutableBytesWritable MEMSTORE_FLUSHSIZE_KEY = - new ImmutableBytesWritable(Bytes.toBytes(MEMSTORE_FLUSHSIZE)); - - /** - * INTERNAL Used by rest interface to access this metadata - * attribute which denotes if the table is a -ROOT- region or not - * - * @see #isRootRegion() - */ - public static final String IS_ROOT = "IS_ROOT"; - private static final ImmutableBytesWritable IS_ROOT_KEY = - new ImmutableBytesWritable(Bytes.toBytes(IS_ROOT)); - - /** - * INTERNAL Used by rest interface to access this metadata - * attribute which denotes if it is a catalog table, either - * .META. or -ROOT- - * - * @see #isMetaRegion() - */ - public static final String IS_META = "IS_META"; - private static final ImmutableBytesWritable IS_META_KEY = - new ImmutableBytesWritable(Bytes.toBytes(IS_META)); - - /** - * INTERNAL Used by HBase Shell interface to access this metadata - * attribute which denotes if the deferred log flush option is enabled - */ - public static final String DEFERRED_LOG_FLUSH = "DEFERRED_LOG_FLUSH"; - private static final ImmutableBytesWritable DEFERRED_LOG_FLUSH_KEY = - new ImmutableBytesWritable(Bytes.toBytes(DEFERRED_LOG_FLUSH)); - - /* - * The below are ugly but better than creating them each time till we - * replace booleans being saved as Strings with plain booleans. Need a - * migration script to do this. TODO. - */ - private static final ImmutableBytesWritable FALSE = - new ImmutableBytesWritable(Bytes.toBytes(Boolean.FALSE.toString())); - - private static final ImmutableBytesWritable TRUE = - new ImmutableBytesWritable(Bytes.toBytes(Boolean.TRUE.toString())); - - private static final boolean DEFAULT_DEFERRED_LOG_FLUSH = false; - - /** - * Constant that denotes whether the table is READONLY by default and is false - */ - public static final boolean DEFAULT_READONLY = false; - - /** - * Constant that denotes the maximum default size of the memstore after which - * the contents are flushed to the store files - */ - public static final long DEFAULT_MEMSTORE_FLUSH_SIZE = 1024*1024*128L; - - private final static Map DEFAULT_VALUES - = new HashMap(); - private final static Set RESERVED_KEYWORDS - = new HashSet(); - static { - DEFAULT_VALUES.put(MAX_FILESIZE, - String.valueOf(HConstants.DEFAULT_MAX_FILE_SIZE)); - DEFAULT_VALUES.put(READONLY, String.valueOf(DEFAULT_READONLY)); - DEFAULT_VALUES.put(MEMSTORE_FLUSHSIZE, - String.valueOf(DEFAULT_MEMSTORE_FLUSH_SIZE)); - DEFAULT_VALUES.put(DEFERRED_LOG_FLUSH, - String.valueOf(DEFAULT_DEFERRED_LOG_FLUSH)); - for (String s : DEFAULT_VALUES.keySet()) { - RESERVED_KEYWORDS.add(new ImmutableBytesWritable(Bytes.toBytes(s))); - } - RESERVED_KEYWORDS.add(IS_ROOT_KEY); - RESERVED_KEYWORDS.add(IS_META_KEY); - } - - /** - * Cache of whether this is a meta table or not. - */ - private volatile Boolean meta = null; - /** - * Cache of whether this is root table or not. - */ - private volatile Boolean root = null; - /** - * Cache of whether deferred logging set. - */ - private Boolean deferredLog = null; - - /** - * Maps column family name to the respective HColumnDescriptors - */ - private final Map families = - new TreeMap(Bytes.BYTES_RAWCOMPARATOR); - - /** - * INTERNAL Private constructor used internally creating table descriptors for - * catalog tables, .META. and -ROOT-. - */ - protected HTableDescriptor(final byte [] name, HColumnDescriptor[] families) { - this.name = name.clone(); - this.nameAsString = Bytes.toString(this.name); - setMetaFlags(name); - for(HColumnDescriptor descriptor : families) { - this.families.put(descriptor.getName(), descriptor); - } - } - - /** - * INTERNAL Private constructor used internally creating table descriptors for - * catalog tables, .META. and -ROOT-. - */ - protected HTableDescriptor(final byte [] name, HColumnDescriptor[] families, - Map values) { - this.name = name.clone(); - this.nameAsString = Bytes.toString(this.name); - setMetaFlags(name); - for(HColumnDescriptor descriptor : families) { - this.families.put(descriptor.getName(), descriptor); - } - for (Map.Entry entry: - values.entrySet()) { - this.values.put(entry.getKey(), entry.getValue()); - } - } - - /** - * Default constructor which constructs an empty object. - * For deserializing an HTableDescriptor instance only. - * @see #HTableDescriptor(byte[]) - * @deprecated Used by Writables and Writables are going away. - */ - @Deprecated - public HTableDescriptor() { - super(); - } - - /** - * Construct a table descriptor specifying table name. - * @param name Table name. - * @throws IllegalArgumentException if passed a table name - * that is made of other than 'word' characters, underscore or period: i.e. - * [a-zA-Z_0-9.]. - * @see HADOOP-1581 HBASE: Un-openable tablename bug - */ - public HTableDescriptor(final String name) { - this(Bytes.toBytes(name)); - } - - /** - * Construct a table descriptor specifying a byte array table name - * @param name - Table name as a byte array. - * @throws IllegalArgumentException if passed a table name - * that is made of other than 'word' characters, underscore or period: i.e. - * [a-zA-Z_0-9-.]. - * @see HADOOP-1581 HBASE: Un-openable tablename bug - */ - public HTableDescriptor(final byte [] name) { - super(); - setMetaFlags(this.name); - this.name = this.isMetaRegion()? name: isLegalTableName(name); - this.nameAsString = Bytes.toString(this.name); - } - - /** - * Construct a table descriptor by cloning the descriptor passed as a parameter. - *

      - * Makes a deep copy of the supplied descriptor. - * Can make a modifiable descriptor from an UnmodifyableHTableDescriptor. - * @param desc The descriptor. - */ - public HTableDescriptor(final HTableDescriptor desc) { - super(); - this.name = desc.name.clone(); - this.nameAsString = Bytes.toString(this.name); - setMetaFlags(this.name); - for (HColumnDescriptor c: desc.families.values()) { - this.families.put(c.getName(), new HColumnDescriptor(c)); - } - for (Map.Entry e: - desc.values.entrySet()) { - this.values.put(e.getKey(), e.getValue()); - } - } - - /* - * Set meta flags on this table. - * IS_ROOT_KEY is set if its a -ROOT- table - * IS_META_KEY is set either if its a -ROOT- or a .META. table - * Called by constructors. - * @param name - */ - private void setMetaFlags(final byte [] name) { - setRootRegion(Bytes.equals(name, HConstants.ROOT_TABLE_NAME)); - setMetaRegion(isRootRegion() || - Bytes.equals(name, HConstants.META_TABLE_NAME)); - } - - /** - * Check if the descriptor represents a -ROOT- region. - * - * @return true if this is a -ROOT- region - */ - public boolean isRootRegion() { - if (this.root == null) { - this.root = isSomething(IS_ROOT_KEY, false)? Boolean.TRUE: Boolean.FALSE; - } - return this.root.booleanValue(); - } - - /** - * INTERNAL Used to denote if the current table represents - * -ROOT- region. This is used internally by the - * HTableDescriptor constructors - * - * @param isRoot true if this is the -ROOT- region - */ - protected void setRootRegion(boolean isRoot) { - // TODO: Make the value a boolean rather than String of boolean. - values.put(IS_ROOT_KEY, isRoot? TRUE: FALSE); - } - - /** - * Checks if this table is either -ROOT- or .META. - * region. - * - * @return true if this is either a -ROOT- or .META. - * region - */ - public boolean isMetaRegion() { - if (this.meta == null) { - this.meta = calculateIsMetaRegion(); - } - return this.meta.booleanValue(); - } - - private synchronized Boolean calculateIsMetaRegion() { - byte [] value = getValue(IS_META_KEY); - return (value != null)? Boolean.valueOf(Bytes.toString(value)): Boolean.FALSE; - } - - private boolean isSomething(final ImmutableBytesWritable key, - final boolean valueIfNull) { - byte [] value = getValue(key); - if (value != null) { - // TODO: Make value be a boolean rather than String of boolean. - return Boolean.valueOf(Bytes.toString(value)).booleanValue(); - } - return valueIfNull; - } - - /** - * INTERNAL Used to denote if the current table represents - * -ROOT- or .META. region. This is used - * internally by the HTableDescriptor constructors - * - * @param isMeta true if its either -ROOT- or - * .META. region - */ - protected void setMetaRegion(boolean isMeta) { - values.put(IS_META_KEY, isMeta? TRUE: FALSE); - } - - /** - * Checks if the table is a .META. table - * - * @return true if table is .META. region. - */ - public boolean isMetaTable() { - return isMetaRegion() && !isRootRegion(); - } - - /** - * Checks of the tableName being passed represents either - * -ROOT- or .META. - * - * @return true if a tablesName is either -ROOT- - * or .META. - */ - public static boolean isMetaTable(final byte [] tableName) { - return Bytes.equals(tableName, HConstants.ROOT_TABLE_NAME) || - Bytes.equals(tableName, HConstants.META_TABLE_NAME); - } - - /** - * Check passed byte buffer, "tableName", is legal user-space table name. - * @return Returns passed tableName param - * @throws NullPointerException If passed tableName is null - * @throws IllegalArgumentException if passed a tableName - * that is made of other than 'word' characters or underscores: i.e. - * [a-zA-Z_0-9]. - */ - public static byte [] isLegalTableName(final byte [] tableName) { - if (tableName == null || tableName.length <= 0) { - throw new IllegalArgumentException("Name is null or empty"); - } - if (tableName[0] == '.' || tableName[0] == '-') { - throw new IllegalArgumentException("Illegal first character <" + tableName[0] + - "> at 0. User-space table names can only start with 'word " + - "characters': i.e. [a-zA-Z_0-9]: " + Bytes.toString(tableName)); - } - if (HConstants.CLUSTER_ID_FILE_NAME.equalsIgnoreCase(Bytes - .toString(tableName)) - || HConstants.SPLIT_LOGDIR_NAME.equalsIgnoreCase(Bytes - .toString(tableName)) - || HConstants.VERSION_FILE_NAME.equalsIgnoreCase(Bytes - .toString(tableName))) { - throw new IllegalArgumentException(Bytes.toString(tableName) - + " conflicted with system reserved words"); - } - for (int i = 0; i < tableName.length; i++) { - if (Character.isLetterOrDigit(tableName[i]) || tableName[i] == '_' || - tableName[i] == '-' || tableName[i] == '.') { - continue; - } - throw new IllegalArgumentException("Illegal character <" + tableName[i] + - "> at " + i + ". User-space table names can only contain " + - "'word characters': i.e. [a-zA-Z_0-9-.]: " + Bytes.toString(tableName)); - } - return tableName; - } - - /** - * Getter for accessing the metadata associated with the key - * - * @param key The key. - * @return The value. - * @see #values - */ - public byte[] getValue(byte[] key) { - return getValue(new ImmutableBytesWritable(key)); - } - - private byte[] getValue(final ImmutableBytesWritable key) { - ImmutableBytesWritable ibw = values.get(key); - if (ibw == null) - return null; - return ibw.get(); - } - - /** - * Getter for accessing the metadata associated with the key - * - * @param key The key. - * @return The value. - * @see #values - */ - public String getValue(String key) { - byte[] value = getValue(Bytes.toBytes(key)); - if (value == null) - return null; - return Bytes.toString(value); - } - - /** - * Getter for fetching an unmodifiable {@link #values} map. - * - * @return unmodifiable map {@link #values}. - * @see #values - */ - public Map getValues() { - // shallow pointer copy - return Collections.unmodifiableMap(values); - } - - /** - * Setter for storing metadata as a (key, value) pair in {@link #values} map - * - * @param key The key. - * @param value The value. - * @see #values - */ - public void setValue(byte[] key, byte[] value) { - setValue(new ImmutableBytesWritable(key), value); - } - - /* - * @param key The key. - * @param value The value. - */ - private void setValue(final ImmutableBytesWritable key, - final byte[] value) { - values.put(key, new ImmutableBytesWritable(value)); - } - - /* - * @param key The key. - * @param value The value. - */ - private void setValue(final ImmutableBytesWritable key, - final ImmutableBytesWritable value) { - values.put(key, value); - } - - /** - * Setter for storing metadata as a (key, value) pair in {@link #values} map - * - * @param key The key. - * @param value The value. - * @see #values - */ - public void setValue(String key, String value) { - if (value == null) { - remove(Bytes.toBytes(key)); - } else { - setValue(Bytes.toBytes(key), Bytes.toBytes(value)); - } - } - - /** - * Remove metadata represented by the key from the {@link #values} map - * - * @param key Key whose key and value we're to remove from HTableDescriptor - * parameters. - */ - public void remove(final byte [] key) { - values.remove(new ImmutableBytesWritable(key)); - } - - /** - * Remove metadata represented by the key from the {@link #values} map - * - * @param key Key whose key and value we're to remove from HTableDescriptor - * parameters. - */ - public void remove(final String key) { - remove(Bytes.toBytes(key)); - } - - /** - * Check if the readOnly flag of the table is set. If the readOnly flag is - * set then the contents of the table can only be read from but not modified. - * - * @return true if all columns in the table should be read only - */ - public boolean isReadOnly() { - return isSomething(READONLY_KEY, DEFAULT_READONLY); - } - - /** - * Setting the table as read only sets all the columns in the table as read - * only. By default all tables are modifiable, but if the readOnly flag is - * set to true then the contents of the table can only be read but not modified. - * - * @param readOnly True if all of the columns in the table should be read - * only. - */ - public void setReadOnly(final boolean readOnly) { - setValue(READONLY_KEY, readOnly? TRUE: FALSE); - } - - /** - * Check if deferred log edits are enabled on the table. - * - * @return true if that deferred log flush is enabled on the table - * - * @see #setDeferredLogFlush(boolean) - */ - public synchronized boolean isDeferredLogFlush() { - if(this.deferredLog == null) { - this.deferredLog = - isSomething(DEFERRED_LOG_FLUSH_KEY, DEFAULT_DEFERRED_LOG_FLUSH); - } - return this.deferredLog; - } - - /** - * This is used to defer the log edits syncing to the file system. Everytime - * an edit is sent to the server it is first sync'd to the file system by the - * log writer. This sync is an expensive operation and thus can be deferred so - * that the edits are kept in memory for a specified period of time as represented - * by hbase.regionserver.optionallogflushinterval and not flushed - * for every edit. - *

      - * NOTE:- This option might result in data loss if the region server crashes - * before these deferred edits in memory are flushed onto the filesystem. - *

      - * - * @param isDeferredLogFlush - */ - public void setDeferredLogFlush(final boolean isDeferredLogFlush) { - setValue(DEFERRED_LOG_FLUSH_KEY, isDeferredLogFlush? TRUE: FALSE); - this.deferredLog = isDeferredLogFlush; - } - - /** - * Get the name of the table as a byte array. - * - * @return name of table - */ - public byte [] getName() { - return name; - } - - /** - * Get the name of the table as a String - * - * @return name of table as a String - */ - public String getNameAsString() { - return this.nameAsString; - } - - /** - * This get the class associated with the region split policy which - * determines when a region split should occur. The class used by - * default is {@link org.apache.hadoop.hbase.regionserver.ConstantSizeRegionSplitPolicy} - * which split the region base on a constant {@link #getMaxFileSize()} - * - * @return the class name of the region split policy for this table. - * If this returns null, the default constant size based split policy - * is used. - */ - public String getRegionSplitPolicyClassName() { - return getValue(SPLIT_POLICY); - } - - /** - * Set the name of the table. - * - * @param name name of table - */ - public void setName(byte[] name) { - this.name = name; - this.nameAsString = Bytes.toString(this.name); - setMetaFlags(this.name); - } - - /** - * Returns the maximum size upto which a region can grow to after which a region - * split is triggered. The region size is represented by the size of the biggest - * store file in that region. - * - * @return max hregion size for table - * - * @see #setMaxFileSize(long) - */ - public long getMaxFileSize() { - byte [] value = getValue(MAX_FILESIZE_KEY); - if (value != null) - return Long.valueOf(Bytes.toString(value)).longValue(); - return HConstants.DEFAULT_MAX_FILE_SIZE; - } - - /** - * Sets the maximum size upto which a region can grow to after which a region - * split is triggered. The region size is represented by the size of the biggest - * store file in that region, i.e. If the biggest store file grows beyond the - * maxFileSize, then the region split is triggered. This defaults to a value of - * 256 MB. - *

      - * This is not an absolute value and might vary. Assume that a single row exceeds - * the maxFileSize then the storeFileSize will be greater than maxFileSize since - * a single row cannot be split across multiple regions - *

      - * - * @param maxFileSize The maximum file size that a store file can grow to - * before a split is triggered. - */ - public void setMaxFileSize(long maxFileSize) { - setValue(MAX_FILESIZE_KEY, Bytes.toBytes(Long.toString(maxFileSize))); - } - - /** - * Returns the size of the memstore after which a flush to filesystem is triggered. - * - * @return memory cache flush size for each hregion - * - * @see #setMemStoreFlushSize(long) - */ - public long getMemStoreFlushSize() { - byte [] value = getValue(MEMSTORE_FLUSHSIZE_KEY); - if (value != null) - return Long.valueOf(Bytes.toString(value)).longValue(); - return DEFAULT_MEMSTORE_FLUSH_SIZE; - } - - /** - * Represents the maximum size of the memstore after which the contents of the - * memstore are flushed to the filesystem. This defaults to a size of 64 MB. - * - * @param memstoreFlushSize memory cache flush size for each hregion - */ - public void setMemStoreFlushSize(long memstoreFlushSize) { - setValue(MEMSTORE_FLUSHSIZE_KEY, - Bytes.toBytes(Long.toString(memstoreFlushSize))); - } - - /** - * Adds a column family. - * @param family HColumnDescriptor of family to add. - */ - public void addFamily(final HColumnDescriptor family) { - if (family.getName() == null || family.getName().length <= 0) { - throw new NullPointerException("Family name cannot be null or empty"); - } - this.families.put(family.getName(), family); - } - - /** - * Checks to see if this table contains the given column family - * @param familyName Family name or column name. - * @return true if the table contains the specified family name - */ - public boolean hasFamily(final byte [] familyName) { - return families.containsKey(familyName); - } - - /** - * @return Name of this table and then a map of all of the column family - * descriptors. - * @see #getNameAsString() - */ - @Override - public String toString() { - StringBuilder s = new StringBuilder(); - s.append('\'').append(Bytes.toString(name)).append('\''); - s.append(getValues(true)); - for (HColumnDescriptor f : families.values()) { - s.append(", ").append(f); - } - return s.toString(); - } - - /** - * @return Name of this table and then a map of all of the column family - * descriptors (with only the non-default column family attributes) - */ - public String toStringCustomizedValues() { - StringBuilder s = new StringBuilder(); - s.append('\'').append(Bytes.toString(name)).append('\''); - s.append(getValues(false)); - for(HColumnDescriptor hcd : families.values()) { - s.append(", ").append(hcd.toStringCustomizedValues()); - } - return s.toString(); - } - - private StringBuilder getValues(boolean printDefaults) { - StringBuilder s = new StringBuilder(); - - // step 1: set partitioning and pruning - Set reservedKeys = new TreeSet(); - Set configKeys = new TreeSet(); - for (ImmutableBytesWritable k : values.keySet()) { - if (k == null || k.get() == null) continue; - String key = Bytes.toString(k.get()); - // in this section, print out reserved keywords + coprocessor info - if (!RESERVED_KEYWORDS.contains(k) && !key.startsWith("coprocessor$")) { - configKeys.add(k); - continue; - } - // only print out IS_ROOT/IS_META if true - String value = Bytes.toString(values.get(k).get()); - if (key.equalsIgnoreCase(IS_ROOT) || key.equalsIgnoreCase(IS_META)) { - if (Boolean.valueOf(value) == false) continue; - } - // see if a reserved key is a default value. may not want to print it out - if (printDefaults - || !DEFAULT_VALUES.containsKey(key) - || !DEFAULT_VALUES.get(key).equalsIgnoreCase(value)) { - reservedKeys.add(k); - } - } - - // early exit optimization - if (reservedKeys.isEmpty() && configKeys.isEmpty()) return s; - - // step 2: printing - s.append(", {TABLE_ATTRIBUTES => {"); - - // print all reserved keys first - boolean printCommaForAttr = false; - for (ImmutableBytesWritable k : reservedKeys) { - String key = Bytes.toString(k.get()); - String value = Bytes.toString(values.get(k).get()); - if (printCommaForAttr) s.append(", "); - printCommaForAttr = true; - s.append(key); - s.append(" => "); - s.append('\'').append(value).append('\''); - } - - if (!configKeys.isEmpty()) { - // print all non-reserved, advanced config keys as a separate subset - if (printCommaForAttr) s.append(", "); - printCommaForAttr = true; - s.append(HConstants.CONFIG).append(" => "); - s.append("{"); - boolean printCommaForCfg = false; - for (ImmutableBytesWritable k : configKeys) { - String key = Bytes.toString(k.get()); - String value = Bytes.toString(values.get(k).get()); - if (printCommaForCfg) s.append(", "); - printCommaForCfg = true; - s.append('\'').append(key).append('\''); - s.append(" => "); - s.append('\'').append(value).append('\''); - } - s.append("}"); - } - - s.append("}}"); // end METHOD - return s; - } - - public static Map getDefaultValues() { - return Collections.unmodifiableMap(DEFAULT_VALUES); - } - - /** - * Compare the contents of the descriptor with another one passed as a parameter. - * Checks if the obj passed is an instance of HTableDescriptor, if yes then the - * contents of the descriptors are compared. - * - * @return true if the contents of the the two descriptors exactly match - * - * @see java.lang.Object#equals(java.lang.Object) - */ - @Override - public boolean equals(Object obj) { - if (this == obj) { - return true; - } - if (obj == null) { - return false; - } - if (!(obj instanceof HTableDescriptor)) { - return false; - } - return compareTo((HTableDescriptor)obj) == 0; - } - - /** - * @see java.lang.Object#hashCode() - */ - @Override - public int hashCode() { - int result = Bytes.hashCode(this.name); - result ^= Byte.valueOf(TABLE_DESCRIPTOR_VERSION).hashCode(); - if (this.families != null && this.families.size() > 0) { - for (HColumnDescriptor e: this.families.values()) { - result ^= e.hashCode(); - } - } - result ^= values.hashCode(); - return result; - } - - /** - * INTERNAL This method is a part of {@link WritableComparable} interface - * and is used for de-serialization of the HTableDescriptor over RPC - * @deprecated Writables are going away. Use pb {@link #parseFrom(byte[])} instead. - */ - @Deprecated - @Override - public void readFields(DataInput in) throws IOException { - int version = in.readInt(); - if (version < 3) - throw new IOException("versions < 3 are not supported (and never existed!?)"); - // version 3+ - name = Bytes.readByteArray(in); - nameAsString = Bytes.toString(this.name); - setRootRegion(in.readBoolean()); - setMetaRegion(in.readBoolean()); - values.clear(); - int numVals = in.readInt(); - for (int i = 0; i < numVals; i++) { - ImmutableBytesWritable key = new ImmutableBytesWritable(); - ImmutableBytesWritable value = new ImmutableBytesWritable(); - key.readFields(in); - value.readFields(in); - values.put(key, value); - } - families.clear(); - int numFamilies = in.readInt(); - for (int i = 0; i < numFamilies; i++) { - HColumnDescriptor c = new HColumnDescriptor(); - c.readFields(in); - families.put(c.getName(), c); - } - if (version < 4) { - return; - } - } - - /** - * INTERNAL This method is a part of {@link WritableComparable} interface - * and is used for serialization of the HTableDescriptor over RPC - * @deprecated Writables are going away. - * Use {@link com.google.protobuf.MessageLite#toByteArray} instead. - */ - @Deprecated - @Override - public void write(DataOutput out) throws IOException { - out.writeInt(TABLE_DESCRIPTOR_VERSION); - Bytes.writeByteArray(out, name); - out.writeBoolean(isRootRegion()); - out.writeBoolean(isMetaRegion()); - out.writeInt(values.size()); - for (Map.Entry e: - values.entrySet()) { - e.getKey().write(out); - e.getValue().write(out); - } - out.writeInt(families.size()); - for(Iterator it = families.values().iterator(); - it.hasNext(); ) { - HColumnDescriptor family = it.next(); - family.write(out); - } - } - - // Comparable - - /** - * Compares the descriptor with another descriptor which is passed as a parameter. - * This compares the content of the two descriptors and not the reference. - * - * @return 0 if the contents of the descriptors are exactly matching, - * 1 if there is a mismatch in the contents - */ - @Override - public int compareTo(final HTableDescriptor other) { - int result = Bytes.compareTo(this.name, other.name); - if (result == 0) { - result = families.size() - other.families.size(); - } - if (result == 0 && families.size() != other.families.size()) { - result = Integer.valueOf(families.size()).compareTo( - Integer.valueOf(other.families.size())); - } - if (result == 0) { - for (Iterator it = families.values().iterator(), - it2 = other.families.values().iterator(); it.hasNext(); ) { - result = it.next().compareTo(it2.next()); - if (result != 0) { - break; - } - } - } - if (result == 0) { - // punt on comparison for ordering, just calculate difference - result = this.values.hashCode() - other.values.hashCode(); - if (result < 0) - result = -1; - else if (result > 0) - result = 1; - } - return result; - } - - /** - * Returns an unmodifiable collection of all the {@link HColumnDescriptor} - * of all the column families of the table. - * - * @return Immutable collection of {@link HColumnDescriptor} of all the - * column families. - */ - public Collection getFamilies() { - return Collections.unmodifiableCollection(this.families.values()); - } - - /** - * Returns all the column family names of the current table. The map of - * HTableDescriptor contains mapping of family name to HColumnDescriptors. - * This returns all the keys of the family map which represents the column - * family names of the table. - * - * @return Immutable sorted set of the keys of the families. - */ - public Set getFamiliesKeys() { - return Collections.unmodifiableSet(this.families.keySet()); - } - - /** - * Returns an array all the {@link HColumnDescriptor} of the column families - * of the table. - * - * @return Array of all the HColumnDescriptors of the current table - * - * @see #getFamilies() - */ - public HColumnDescriptor[] getColumnFamilies() { - return getFamilies().toArray(new HColumnDescriptor[0]); - } - - - /** - * Returns the HColumnDescriptor for a specific column family with name as - * specified by the parameter column. - * - * @param column Column family name - * @return Column descriptor for the passed family name or the family on - * passed in column. - */ - public HColumnDescriptor getFamily(final byte [] column) { - return this.families.get(column); - } - - - /** - * Removes the HColumnDescriptor with name specified by the parameter column - * from the table descriptor - * - * @param column Name of the column family to be removed. - * @return Column descriptor for the passed family name or the family on - * passed in column. - */ - public HColumnDescriptor removeFamily(final byte [] column) { - return this.families.remove(column); - } - - - /** - * Add a table coprocessor to this table. The coprocessor - * type must be {@link org.apache.hadoop.hbase.coprocessor.RegionObserver} - * or Endpoint. - * It won't check if the class can be loaded or not. - * Whether a coprocessor is loadable or not will be determined when - * a region is opened. - * @param className Full class name. - * @throws IOException - */ - public void addCoprocessor(String className) throws IOException { - addCoprocessor(className, null, Coprocessor.PRIORITY_USER, null); - } - - - /** - * Add a table coprocessor to this table. The coprocessor - * type must be {@link org.apache.hadoop.hbase.coprocessor.RegionObserver} - * or Endpoint. - * It won't check if the class can be loaded or not. - * Whether a coprocessor is loadable or not will be determined when - * a region is opened. - * @param jarFilePath Path of the jar file. If it's null, the class will be - * loaded from default classloader. - * @param className Full class name. - * @param priority Priority - * @param kvs Arbitrary key-value parameter pairs passed into the coprocessor. - * @throws IOException - */ - public void addCoprocessor(String className, Path jarFilePath, - int priority, final Map kvs) - throws IOException { - if (hasCoprocessor(className)) { - throw new IOException("Coprocessor " + className + " already exists."); - } - // validate parameter kvs - StringBuilder kvString = new StringBuilder(); - if (kvs != null) { - for (Map.Entry e: kvs.entrySet()) { - if (!e.getKey().matches(HConstants.CP_HTD_ATTR_VALUE_PARAM_KEY_PATTERN)) { - throw new IOException("Illegal parameter key = " + e.getKey()); - } - if (!e.getValue().matches(HConstants.CP_HTD_ATTR_VALUE_PARAM_VALUE_PATTERN)) { - throw new IOException("Illegal parameter (" + e.getKey() + - ") value = " + e.getValue()); - } - if (kvString.length() != 0) { - kvString.append(','); - } - kvString.append(e.getKey()); - kvString.append('='); - kvString.append(e.getValue()); - } - } - - // generate a coprocessor key - int maxCoprocessorNumber = 0; - Matcher keyMatcher; - for (Map.Entry e: - this.values.entrySet()) { - keyMatcher = - HConstants.CP_HTD_ATTR_KEY_PATTERN.matcher( - Bytes.toString(e.getKey().get())); - if (!keyMatcher.matches()) { - continue; - } - maxCoprocessorNumber = Math.max(Integer.parseInt(keyMatcher.group(1)), - maxCoprocessorNumber); - } - maxCoprocessorNumber++; - - String key = "coprocessor$" + Integer.toString(maxCoprocessorNumber); - String value = ((jarFilePath == null)? "" : jarFilePath.toString()) + - "|" + className + "|" + Integer.toString(priority) + "|" + - kvString.toString(); - setValue(key, value); - } - - - /** - * Check if the table has an attached co-processor represented by the name className - * - * @param className - Class name of the co-processor - * @return true of the table has a co-processor className - */ - public boolean hasCoprocessor(String className) { - Matcher keyMatcher; - Matcher valueMatcher; - for (Map.Entry e: - this.values.entrySet()) { - keyMatcher = - HConstants.CP_HTD_ATTR_KEY_PATTERN.matcher( - Bytes.toString(e.getKey().get())); - if (!keyMatcher.matches()) { - continue; - } - valueMatcher = - HConstants.CP_HTD_ATTR_VALUE_PATTERN.matcher( - Bytes.toString(e.getValue().get())); - if (!valueMatcher.matches()) { - continue; - } - // get className and compare - String clazz = valueMatcher.group(2).trim(); // classname is the 2nd field - if (clazz.equals(className.trim())) { - return true; - } - } - return false; - } - - /** - * Remove a coprocessor from those set on the table - * @param className Class name of the co-processor - */ - public void removeCoprocessor(String className) { - ImmutableBytesWritable match = null; - Matcher keyMatcher; - Matcher valueMatcher; - for (Map.Entry e : this.values - .entrySet()) { - keyMatcher = HConstants.CP_HTD_ATTR_KEY_PATTERN.matcher(Bytes.toString(e - .getKey().get())); - if (!keyMatcher.matches()) { - continue; - } - valueMatcher = HConstants.CP_HTD_ATTR_VALUE_PATTERN.matcher(Bytes - .toString(e.getValue().get())); - if (!valueMatcher.matches()) { - continue; - } - // get className and compare - String clazz = valueMatcher.group(2).trim(); // classname is the 2nd field - // remove the CP if it is present - if (clazz.equals(className.trim())) { - match = e.getKey(); - break; - } - } - // if we found a match, remove it - if (match != null) - this.values.remove(match); - } - - /** - * Returns the {@link Path} object representing the table directory under - * path rootdir - * - * @param rootdir qualified path of HBase root directory - * @param tableName name of table - * @return {@link Path} for table - */ - public static Path getTableDir(Path rootdir, final byte [] tableName) { - return new Path(rootdir, Bytes.toString(tableName)); - } - - /** Table descriptor for -ROOT-
      catalog table */ - public static final HTableDescriptor ROOT_TABLEDESC = new HTableDescriptor( - HConstants.ROOT_TABLE_NAME, - new HColumnDescriptor[] { - new HColumnDescriptor(HConstants.CATALOG_FAMILY) - // Ten is arbitrary number. Keep versions to help debugging. - .setMaxVersions(10) - .setInMemory(true) - .setBlocksize(8 * 1024) - .setTimeToLive(HConstants.FOREVER) - .setScope(HConstants.REPLICATION_SCOPE_LOCAL) - }); - - /** Table descriptor for .META. catalog table */ - public static final HTableDescriptor META_TABLEDESC = new HTableDescriptor( - HConstants.META_TABLE_NAME, new HColumnDescriptor[] { - new HColumnDescriptor(HConstants.CATALOG_FAMILY) - // Ten is arbitrary number. Keep versions to help debugging. - .setMaxVersions(10) - .setInMemory(true) - .setBlocksize(8 * 1024) - .setScope(HConstants.REPLICATION_SCOPE_LOCAL) - }); - - @Deprecated - public void setOwner(User owner) { - setOwnerString(owner != null ? owner.getShortName() : null); - } - - // used by admin.rb:alter(table_name,*args) to update owner. - @Deprecated - public void setOwnerString(String ownerString) { - if (ownerString != null) { - setValue(OWNER_KEY, Bytes.toBytes(ownerString)); - } else { - values.remove(OWNER_KEY); - } - } - - @Deprecated - public String getOwnerString() { - if (getValue(OWNER_KEY) != null) { - return Bytes.toString(getValue(OWNER_KEY)); - } - // Note that every table should have an owner (i.e. should have OWNER_KEY set). - // .META. and -ROOT- should return system user as owner, not null (see - // MasterFileSystem.java:bootstrap()). - return null; - } - - /** - * @return This instance serialized with pb with pb magic prefix - * @see #parseFrom(byte[]) - */ - public byte [] toByteArray() { - return ProtobufUtil.prependPBMagic(convert().toByteArray()); - } - - /** - * @param bytes A pb serialized {@link HTableDescriptor} instance with pb magic prefix - * @return An instance of {@link HTableDescriptor} made from bytes - * @throws DeserializationException - * @throws IOException - * @see #toByteArray() - */ - public static HTableDescriptor parseFrom(final byte [] bytes) - throws DeserializationException, IOException { - if (!ProtobufUtil.isPBMagicPrefix(bytes)) { - return (HTableDescriptor)Writables.getWritable(bytes, new HTableDescriptor()); - } - int pblen = ProtobufUtil.lengthOfPBMagic(); - TableSchema.Builder builder = TableSchema.newBuilder(); - TableSchema ts = null; - try { - ts = builder.mergeFrom(bytes, pblen, bytes.length - pblen).build(); - } catch (InvalidProtocolBufferException e) { - throw new DeserializationException(e); - } - return convert(ts); - } - - /** - * @return Convert the current {@link HTableDescriptor} into a pb TableSchema instance. - */ - public TableSchema convert() { - TableSchema.Builder builder = TableSchema.newBuilder(); - builder.setName(ByteString.copyFrom(getName())); - for (Map.Entry e: this.values.entrySet()) { - TableSchema.Attribute.Builder aBuilder = TableSchema.Attribute.newBuilder(); - aBuilder.setName(ByteString.copyFrom(e.getKey().get())); - aBuilder.setValue(ByteString.copyFrom(e.getValue().get())); - builder.addAttributes(aBuilder.build()); - } - for (HColumnDescriptor hcd: getColumnFamilies()) { - builder.addColumnFamilies(hcd.convert()); - } - return builder.build(); - } - - /** - * @param ts A pb TableSchema instance. - * @return An {@link HTableDescriptor} made from the passed in pb ts. - */ - public static HTableDescriptor convert(final TableSchema ts) { - List list = ts.getColumnFamiliesList(); - HColumnDescriptor [] hcds = new HColumnDescriptor[list.size()]; - int index = 0; - for (ColumnFamilySchema cfs: list) { - hcds[index++] = HColumnDescriptor.convert(cfs); - } - HTableDescriptor htd = new HTableDescriptor(ts.getName().toByteArray(), hcds); - for (TableSchema.Attribute a: ts.getAttributesList()) { - htd.setValue(a.getName().toByteArray(), a.getValue().toByteArray()); - } - return htd; - } -} diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/InvalidFamilyOperationException.java hbase-server/src/main/java/org/apache/hadoop/hbase/InvalidFamilyOperationException.java deleted file mode 100644 index c3db943..0000000 --- hbase-server/src/main/java/org/apache/hadoop/hbase/InvalidFamilyOperationException.java +++ /dev/null @@ -1,54 +0,0 @@ -/** - * - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hbase; - -import java.io.IOException; - -import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.classification.InterfaceStability; - -/** - * Thrown if a request is table schema modification is requested but - * made for an invalid family name. - */ -@InterfaceAudience.Public -@InterfaceStability.Evolving -public class InvalidFamilyOperationException extends IOException { - private static final long serialVersionUID = 1L << 22 - 1L; - /** default constructor */ - public InvalidFamilyOperationException() { - super(); - } - - /** - * Constructor - * @param s message - */ - public InvalidFamilyOperationException(String s) { - super(s); - } - - /** - * Constructor taking another exception. - * @param e Exception to grab data from. - */ - public InvalidFamilyOperationException(Exception e) { - super(e); - } -} diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/MasterAdminProtocol.java hbase-server/src/main/java/org/apache/hadoop/hbase/MasterAdminProtocol.java deleted file mode 100644 index 57d83ef..0000000 --- hbase-server/src/main/java/org/apache/hadoop/hbase/MasterAdminProtocol.java +++ /dev/null @@ -1,349 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hbase; - -import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.classification.InterfaceStability; -import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.MasterAdminService; -import org.apache.hadoop.hbase.security.TokenInfo; -import org.apache.hadoop.hbase.security.KerberosInfo; -import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.AddColumnRequest; -import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.AddColumnResponse; -import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.CatalogScanRequest; -import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.CatalogScanResponse; -import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.CreateTableRequest; -import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.CreateTableResponse; -import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DeleteColumnRequest; -import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DeleteColumnResponse; -import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.AssignRegionRequest; -import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.AssignRegionResponse; -import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DeleteTableRequest; -import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DeleteTableResponse; -import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DisableTableRequest; -import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DisableTableResponse; -import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.EnableCatalogJanitorRequest; -import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.EnableCatalogJanitorResponse; -import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.EnableTableRequest; -import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.EnableTableResponse; -import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.IsCatalogJanitorEnabledRequest; -import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.IsCatalogJanitorEnabledResponse; -import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ModifyColumnRequest; -import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ModifyColumnResponse; -import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ModifyTableRequest; -import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ModifyTableResponse; -import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.MoveRegionRequest; -import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.MoveRegionResponse; -import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.OfflineRegionRequest; -import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.OfflineRegionResponse; -import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.SetBalancerRunningRequest; -import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.SetBalancerRunningResponse; -import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.UnassignRegionRequest; -import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.UnassignRegionResponse; -import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.BalanceRequest; -import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.BalanceResponse; -import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ShutdownRequest; -import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ShutdownResponse; -import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.StopMasterRequest; -import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.StopMasterResponse; -import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningRequest; -import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningResponse; - - -import com.google.protobuf.RpcController; -import com.google.protobuf.ServiceException; - -/** - * Protocol that a client uses to communicate with the Master (for admin purposes). - */ -@KerberosInfo( - serverPrincipal = "hbase.master.kerberos.principal") -@TokenInfo("HBASE_AUTH_TOKEN") -@InterfaceAudience.Private -@InterfaceStability.Evolving -public interface MasterAdminProtocol extends - MasterAdminService.BlockingInterface, MasterProtocol { - public static final long VERSION = 1L; - - /* Column-level */ - - /** - * Adds a column to the specified table - * @param controller Unused (set to null). - * @param req AddColumnRequest that contains:
      - * - tableName: table to modify
      - * - column: column descriptor - * @throws ServiceException - */ - @Override - public AddColumnResponse addColumn(RpcController controller, AddColumnRequest req) - throws ServiceException; - - /** - * Deletes a column from the specified table. Table must be disabled. - * @param controller Unused (set to null). - * @param req DeleteColumnRequest that contains:
      - * - tableName: table to alter
      - * - columnName: column family to remove - * @throws ServiceException - */ - @Override - public DeleteColumnResponse deleteColumn(RpcController controller, DeleteColumnRequest req) - throws ServiceException; - - /** - * Modifies an existing column on the specified table - * @param controller Unused (set to null). - * @param req ModifyColumnRequest that contains:
      - * - tableName: table name
      - * - descriptor: new column descriptor - * @throws ServiceException e - */ - @Override - public ModifyColumnResponse modifyColumn(RpcController controller, ModifyColumnRequest req) - throws ServiceException; - - /* Region-level */ - - /** - * Move a region to a specified destination server. - * @param controller Unused (set to null). - * @param req The request that contains:
      - * - region: The encoded region name; i.e. the hash that makes - * up the region name suffix: e.g. if regionname is - * TestTable,0094429456,1289497600452.527db22f95c8a9e0116f0cc13c680396., - * then the encoded region name is: 527db22f95c8a9e0116f0cc13c680396.
      - * - destServerName: The servername of the destination regionserver. If - * passed the empty byte array we'll assign to a random server. A server name - * is made of host, port and startcode. Here is an example: - * host187.example.com,60020,1289493121758. - * @throws ServiceException that wraps a UnknownRegionException if we can't find a - * region named encodedRegionName - */ - @Override - public MoveRegionResponse moveRegion(RpcController controller, MoveRegionRequest req) - throws ServiceException; - - /** - * Assign a region to a server chosen at random. - * @param controller Unused (set to null). - * @param req contains the region to assign. Will use existing RegionPlan if one - * found. - * @throws ServiceException - */ - @Override - public AssignRegionResponse assignRegion(RpcController controller, AssignRegionRequest req) - throws ServiceException; - - /** - * Unassign a region from current hosting regionserver. Region will then be - * assigned to a regionserver chosen at random. Region could be reassigned - * back to the same server. Use {@link #moveRegion} if you want to - * control the region movement. - * @param controller Unused (set to null). - * @param req The request that contains:
      - * - region: Region to unassign. Will clear any existing RegionPlan - * if one found.
      - * - force: If true, force unassign (Will remove region from - * regions-in-transition too if present as well as from assigned regions -- - * radical!.If results in double assignment use hbck -fix to resolve. - * @throws ServiceException - */ - @Override - public UnassignRegionResponse unassignRegion(RpcController controller, UnassignRegionRequest req) - throws ServiceException; - - /** - * Offline a region from the assignment manager's in-memory state. The - * region should be in a closed state and there will be no attempt to - * automatically reassign the region as in unassign. This is a special - * method, and should only be used by experts or hbck. - * @param controller Unused (set to null). - * @param request OfflineRegionRequest that contains:
      - * - region: Region to offline. Will clear any existing RegionPlan - * if one found. - * @throws ServiceException - */ - @Override - public OfflineRegionResponse offlineRegion(RpcController controller, OfflineRegionRequest request) - throws ServiceException; - - /* Table-level */ - - /** - * Creates a new table asynchronously. If splitKeys are specified, then the - * table will be created with an initial set of multiple regions. - * If splitKeys is null, the table will be created with a single region. - * @param controller Unused (set to null). - * @param req CreateTableRequest that contains:
      - * - tablesSchema: table descriptor
      - * - splitKeys - * @throws ServiceException - */ - @Override - public CreateTableResponse createTable(RpcController controller, CreateTableRequest req) - throws ServiceException; - - /** - * Deletes a table - * @param controller Unused (set to null). - * @param req DeleteTableRequest that contains:
      - * - tableName: table to delete - * @throws ServiceException - */ - @Override - public DeleteTableResponse deleteTable(RpcController controller, DeleteTableRequest req) - throws ServiceException; - - /** - * Puts the table on-line (only needed if table has been previously taken offline) - * @param controller Unused (set to null). - * @param req EnableTableRequest that contains:
      - * - tableName: table to enable - * @throws ServiceException - */ - @Override - public EnableTableResponse enableTable(RpcController controller, EnableTableRequest req) - throws ServiceException; - - /** - * Take table offline - * - * @param controller Unused (set to null). - * @param req DisableTableRequest that contains:
      - * - tableName: table to take offline - * @throws ServiceException - */ - @Override - public DisableTableResponse disableTable(RpcController controller, DisableTableRequest req) - throws ServiceException; - - /** - * Modify a table's metadata - * - * @param controller Unused (set to null). - * @param req ModifyTableRequest that contains:
      - * - tableName: table to modify
      - * - tableSchema: new descriptor for table - * @throws ServiceException - */ - @Override - public ModifyTableResponse modifyTable(RpcController controller, ModifyTableRequest req) - throws ServiceException; - - /* Cluster-level */ - - /** - * Shutdown an HBase cluster. - * @param controller Unused (set to null). - * @param request ShutdownRequest - * @return ShutdownResponse - * @throws ServiceException - */ - @Override - public ShutdownResponse shutdown(RpcController controller, ShutdownRequest request) - throws ServiceException; - - /** - * Stop HBase Master only. - * Does not shutdown the cluster. - * @param controller Unused (set to null). - * @param request StopMasterRequest - * @return StopMasterResponse - * @throws ServiceException - */ - @Override - public StopMasterResponse stopMaster(RpcController controller, StopMasterRequest request) - throws ServiceException; - - /** - * Run the balancer. Will run the balancer and if regions to move, it will - * go ahead and do the reassignments. Can NOT run for various reasons. Check - * logs. - * @param c Unused (set to null). - * @param request BalanceRequest - * @return BalanceResponse that contains:
      - * - balancerRan: True if balancer ran and was able to tell the region servers to - * unassign all the regions to balance (the re-assignment itself is async), - * false otherwise. - */ - @Override - public BalanceResponse balance(RpcController c, BalanceRequest request) throws ServiceException; - - /** - * Turn the load balancer on or off. - * @param controller Unused (set to null). - * @param req SetBalancerRunningRequest that contains:
      - * - on: If true, enable balancer. If false, disable balancer.
      - * - synchronous: if true, wait until current balance() call, if outstanding, to return. - * @return SetBalancerRunningResponse that contains:
      - * - prevBalanceValue: Previous balancer value - * @throws ServiceException - */ - @Override - public SetBalancerRunningResponse setBalancerRunning( - RpcController controller, SetBalancerRunningRequest req) throws ServiceException; - - /** - * @param c Unused (set to null). - * @param req IsMasterRunningRequest - * @return IsMasterRunningRequest that contains:
      - * isMasterRunning: true if master is available - * @throws ServiceException - */ - @Override - public IsMasterRunningResponse isMasterRunning(RpcController c, IsMasterRunningRequest req) - throws ServiceException; - - /** - * Run a scan of the catalog table - * @param c Unused (set to null). - * @param req CatalogScanRequest - * @return CatalogScanResponse that contains the int return code corresponding - * to the number of entries cleaned - * @throws ServiceException - */ - @Override - public CatalogScanResponse runCatalogScan(RpcController c, - CatalogScanRequest req) throws ServiceException; - - /** - * Enable/Disable the catalog janitor - * @param c Unused (set to null). - * @param req EnableCatalogJanitorRequest that contains:
      - * - enable: If true, enable catalog janitor. If false, disable janitor.
      - * @return EnableCatalogJanitorResponse that contains:
      - * - prevValue: true, if it was enabled previously; false, otherwise - * @throws ServiceException - */ - @Override - public EnableCatalogJanitorResponse enableCatalogJanitor(RpcController c, - EnableCatalogJanitorRequest req) throws ServiceException; - - /** - * Query whether the catalog janitor is enabled - * @param c Unused (set to null). - * @param req IsCatalogJanitorEnabledRequest - * @return IsCatalogCatalogJanitorEnabledResponse that contains:
      - * - value: true, if it is enabled; false, otherwise - * @throws ServiceException - */ - @Override - public IsCatalogJanitorEnabledResponse isCatalogJanitorEnabled(RpcController c, - IsCatalogJanitorEnabledRequest req) throws ServiceException; -} diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/MasterMonitorProtocol.java hbase-server/src/main/java/org/apache/hadoop/hbase/MasterMonitorProtocol.java deleted file mode 100644 index d8cff7d..0000000 --- hbase-server/src/main/java/org/apache/hadoop/hbase/MasterMonitorProtocol.java +++ /dev/null @@ -1,99 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hbase; - -import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.classification.InterfaceStability; -import org.apache.hadoop.hbase.protobuf.generated.MasterMonitorProtos.MasterMonitorService; -import org.apache.hadoop.hbase.security.TokenInfo; -import org.apache.hadoop.hbase.security.KerberosInfo; -import org.apache.hadoop.hbase.protobuf.generated.MasterMonitorProtos.GetSchemaAlterStatusRequest; -import org.apache.hadoop.hbase.protobuf.generated.MasterMonitorProtos.GetSchemaAlterStatusResponse; -import org.apache.hadoop.hbase.protobuf.generated.MasterMonitorProtos.GetTableDescriptorsRequest; -import org.apache.hadoop.hbase.protobuf.generated.MasterMonitorProtos.GetTableDescriptorsResponse; -import org.apache.hadoop.hbase.protobuf.generated.MasterMonitorProtos.GetClusterStatusRequest; -import org.apache.hadoop.hbase.protobuf.generated.MasterMonitorProtos.GetClusterStatusResponse; -import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningRequest; -import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningResponse; - -import com.google.protobuf.RpcController; -import com.google.protobuf.ServiceException; - -/** - * Protocol that a client uses to communicate with the Master (for monitoring purposes). - */ -@KerberosInfo( - serverPrincipal = "hbase.master.kerberos.principal") -@TokenInfo("HBASE_AUTH_TOKEN") -@InterfaceAudience.Public -@InterfaceStability.Evolving -public interface MasterMonitorProtocol extends - MasterMonitorService.BlockingInterface, MasterProtocol { - public static final long VERSION = 1L; - - /** - * Used by the client to get the number of regions that have received the - * updated schema - * - * @param controller Unused (set to null). - * @param req GetSchemaAlterStatusRequest that contains:
      - * - tableName - * @return GetSchemaAlterStatusResponse indicating the number of regions updated. - * yetToUpdateRegions is the regions that are yet to be updated totalRegions - * is the total number of regions of the table - * @throws ServiceException - */ - @Override - public GetSchemaAlterStatusResponse getSchemaAlterStatus( - RpcController controller, GetSchemaAlterStatusRequest req) throws ServiceException; - - /** - * Get list of TableDescriptors for requested tables. - * @param controller Unused (set to null). - * @param req GetTableDescriptorsRequest that contains:
      - * - tableNames: requested tables, or if empty, all are requested - * @return GetTableDescriptorsResponse - * @throws ServiceException - */ - @Override - public GetTableDescriptorsResponse getTableDescriptors( - RpcController controller, GetTableDescriptorsRequest req) throws ServiceException; - - /** - * Return cluster status. - * @param controller Unused (set to null). - * @param req GetClusterStatusRequest - * @return status object - * @throws ServiceException - */ - @Override - public GetClusterStatusResponse getClusterStatus(RpcController controller, GetClusterStatusRequest req) - throws ServiceException; - - /** - * @param c Unused (set to null). - * @param req IsMasterRunningRequest - * @return IsMasterRunningRequest that contains:
      - * isMasterRunning: true if master is available - * @throws ServiceException - */ - @Override - public IsMasterRunningResponse isMasterRunning(RpcController c, IsMasterRunningRequest req) - throws ServiceException; -} diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/MasterNotRunningException.java hbase-server/src/main/java/org/apache/hadoop/hbase/MasterNotRunningException.java deleted file mode 100644 index 8c0a4aa..0000000 --- hbase-server/src/main/java/org/apache/hadoop/hbase/MasterNotRunningException.java +++ /dev/null @@ -1,57 +0,0 @@ -/** - * - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hbase; - -import java.io.IOException; - -import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.classification.InterfaceStability; - -/** - * Thrown if the master is not running - */ -@InterfaceAudience.Public -@InterfaceStability.Stable -public class MasterNotRunningException extends IOException { - private static final long serialVersionUID = 1L << 23 - 1L; - /** default constructor */ - public MasterNotRunningException() { - super(); - } - - /** - * Constructor - * @param s message - */ - public MasterNotRunningException(String s) { - super(s); - } - - /** - * Constructor taking another exception. - * @param e Exception to grab data from. - */ - public MasterNotRunningException(Exception e) { - super(e); - } - - public MasterNotRunningException(String s, Exception e) { - super(s, e); - } -} diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/MasterProtocol.java hbase-server/src/main/java/org/apache/hadoop/hbase/MasterProtocol.java deleted file mode 100644 index 62b3b84..0000000 --- hbase-server/src/main/java/org/apache/hadoop/hbase/MasterProtocol.java +++ /dev/null @@ -1,44 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -// Functions implemented by all the master protocols (e.g. MasterAdminProtocol, -// MasterMonitorProtocol). Currently, this is only isMasterRunning, which is used, -// on proxy creation, to check if the master has been stopped. If it has, -// a MasterNotRunningException is thrown back to the client, and the client retries. - -package org.apache.hadoop.hbase; - -import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningRequest; -import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningResponse; -import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MasterService; -import org.apache.hadoop.hbase.ipc.VersionedProtocol; -import com.google.protobuf.RpcController; -import com.google.protobuf.ServiceException; - -public interface MasterProtocol extends VersionedProtocol, MasterService.BlockingInterface { - - /** - * @param c Unused (set to null). - * @param req IsMasterRunningRequest - * @return IsMasterRunningRequest that contains:
      - * isMasterRunning: true if master is available - * @throws ServiceException - */ - public IsMasterRunningResponse isMasterRunning(RpcController c, IsMasterRunningRequest req) - throws ServiceException; -} diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/NotAllMetaRegionsOnlineException.java hbase-server/src/main/java/org/apache/hadoop/hbase/NotAllMetaRegionsOnlineException.java deleted file mode 100644 index 49bc935..0000000 --- hbase-server/src/main/java/org/apache/hadoop/hbase/NotAllMetaRegionsOnlineException.java +++ /dev/null @@ -1,46 +0,0 @@ -/** - * - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hbase; - -import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.classification.InterfaceStability; -import org.apache.hadoop.hbase.DoNotRetryIOException; - -/** - * Thrown when an operation requires the root and all meta regions to be online - */ -@InterfaceAudience.Public -@InterfaceStability.Stable -public class NotAllMetaRegionsOnlineException extends DoNotRetryIOException { - private static final long serialVersionUID = 6439786157874827523L; - /** - * default constructor - */ - public NotAllMetaRegionsOnlineException() { - super(); - } - - /** - * @param message - */ - public NotAllMetaRegionsOnlineException(String message) { - super(message); - } -} diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/NotServingRegionException.java hbase-server/src/main/java/org/apache/hadoop/hbase/NotServingRegionException.java deleted file mode 100644 index 47d0a26..0000000 --- hbase-server/src/main/java/org/apache/hadoop/hbase/NotServingRegionException.java +++ /dev/null @@ -1,56 +0,0 @@ -/** - * - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hbase; - -import java.io.IOException; - -import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.classification.InterfaceStability; -import org.apache.hadoop.hbase.util.Bytes; - -/** - * Thrown by a region server if it is sent a request for a region it is not - * serving. - */ -@InterfaceAudience.Public -@InterfaceStability.Stable -public class NotServingRegionException extends IOException { - private static final long serialVersionUID = 1L << 17 - 1L; - - /** default constructor */ - public NotServingRegionException() { - super(); - } - - /** - * Constructor - * @param s message - */ - public NotServingRegionException(String s) { - super(s); - } - - /** - * Constructor - * @param s message - */ - public NotServingRegionException(final byte [] s) { - super(Bytes.toString(s)); - } -} diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/OutOfOrderScannerNextException.java hbase-server/src/main/java/org/apache/hadoop/hbase/OutOfOrderScannerNextException.java deleted file mode 100644 index b84e705..0000000 --- hbase-server/src/main/java/org/apache/hadoop/hbase/OutOfOrderScannerNextException.java +++ /dev/null @@ -1,38 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hbase; - -import org.apache.hadoop.classification.InterfaceAudience; - -/** - * Thrown by a RegionServer while doing next() calls on a ResultScanner. Both client and server - * maintain a nextCallSeq and if they do not match, RS will throw this exception. - */ -@InterfaceAudience.Private -public class OutOfOrderScannerNextException extends DoNotRetryIOException { - - private static final long serialVersionUID = 4595751007554273567L; - - public OutOfOrderScannerNextException() { - super(); - } - - public OutOfOrderScannerNextException(String msg) { - super(msg); - } -} diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/PleaseHoldException.java hbase-server/src/main/java/org/apache/hadoop/hbase/PleaseHoldException.java deleted file mode 100644 index 88e436c..0000000 --- hbase-server/src/main/java/org/apache/hadoop/hbase/PleaseHoldException.java +++ /dev/null @@ -1,38 +0,0 @@ -/** - * - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hbase; - -import java.io.IOException; -import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.classification.InterfaceStability; - -/** - * This exception is thrown by the master when a region server was shut down and - * restarted so fast that the master still hasn't processed the server shutdown - * of the first instance, or when master is initializing and client call admin - * operations - */ -@SuppressWarnings("serial") -@InterfaceAudience.Public -@InterfaceStability.Stable -public class PleaseHoldException extends IOException { - public PleaseHoldException(String message) { - super(message); - } -} diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/RegionException.java hbase-server/src/main/java/org/apache/hadoop/hbase/RegionException.java deleted file mode 100644 index 8c1d365..0000000 --- hbase-server/src/main/java/org/apache/hadoop/hbase/RegionException.java +++ /dev/null @@ -1,47 +0,0 @@ -/** - * - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hbase; - -import java.io.IOException; - -import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.classification.InterfaceStability; -/** - * Thrown when something happens related to region handling. - * Subclasses have to be more specific. - */ -@InterfaceAudience.Public -@InterfaceStability.Stable -public class RegionException extends IOException { - private static final long serialVersionUID = 1473510258071111371L; - - /** default constructor */ - public RegionException() { - super(); - } - - /** - * Constructor - * @param s message - */ - public RegionException(String s) { - super(s); - } - -} diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/RegionLoad.java hbase-server/src/main/java/org/apache/hadoop/hbase/RegionLoad.java deleted file mode 100644 index cdb3fba..0000000 --- hbase-server/src/main/java/org/apache/hadoop/hbase/RegionLoad.java +++ /dev/null @@ -1,154 +0,0 @@ -/** - * Copyright The Apache Software Foundation - * - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hbase; - -import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos; -import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.hbase.util.Bytes; -import java.util.TreeSet; -import java.util.Set; - -/** - * Encapsulates per-region load metrics. - */ -@InterfaceAudience.Private -public class RegionLoad { - - protected HBaseProtos.RegionLoad regionLoadPB; - - public RegionLoad(HBaseProtos.RegionLoad regionLoadPB) { - this.regionLoadPB = regionLoadPB; - } - - /** - * @return the region name - */ - public byte[] getName() { - return regionLoadPB.getRegionSpecifier().getValue().toByteArray(); - } - - /** - * @return the region name as a string - */ - public String getNameAsString() { - return Bytes.toString(getName()); - } - - /** - * @return the number of stores - */ - public int getStores() { - return regionLoadPB.getStores(); - } - - /** - * @return the number of storefiles - */ - public int getStorefiles() { - return regionLoadPB.getStorefiles(); - } - - /** - * @return the total size of the storefiles, in MB - */ - public int getStorefileSizeMB() { - return regionLoadPB.getStorefileSizeMB(); - } - - /** - * @return the memstore size, in MB - */ - public int getMemStoreSizeMB() { - return regionLoadPB.getMemstoreSizeMB(); - } - - /** - * @return the approximate size of storefile indexes on the heap, in MB - */ - public int getStorefileIndexSizeMB() { - return regionLoadPB.getStorefileIndexSizeMB(); - } - - /** - * @return the number of requests made to region - */ - public long getRequestsCount() { - return getReadRequestsCount() + getWriteRequestsCount(); - } - - /** - * @return the number of read requests made to region - */ - public long getReadRequestsCount() { - return regionLoadPB.getReadRequestsCount(); - } - - /** - * @return the number of write requests made to region - */ - public long getWriteRequestsCount() { - return regionLoadPB.getWriteRequestsCount(); - } - - /** - * @return The current total size of root-level indexes for the region, in KB. - */ - public int getRootIndexSizeKB() { - return regionLoadPB.getRootIndexSizeKB(); - } - - /** - * @return The total size of all index blocks, not just the root level, in KB. - */ - public int getTotalStaticIndexSizeKB() { - return regionLoadPB.getTotalStaticIndexSizeKB(); - } - - /** - * @return The total size of all Bloom filter blocks, not just loaded into the - * block cache, in KB. - */ - public int getTotalStaticBloomSizeKB() { - return regionLoadPB.getTotalStaticBloomSizeKB(); - } - - /** - * @return the total number of kvs in current compaction - */ - public long getTotalCompactingKVs() { - return regionLoadPB.getTotalCompactingKVs(); - } - - /** - * @return the number of already compacted kvs in current compaction - */ - public long getCurrentCompactedKVs() { - return regionLoadPB.getCurrentCompactedKVs(); - } - - /** - * This does not really belong inside RegionLoad but its being done in the name of expediency. - * @return the completed sequence Id for the region - */ - public long getCompleteSequenceId() { - return regionLoadPB.getCompleteSequenceId(); - } -} diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/RegionMovedException.java hbase-server/src/main/java/org/apache/hadoop/hbase/RegionMovedException.java deleted file mode 100644 index 5436c9e..0000000 --- hbase-server/src/main/java/org/apache/hadoop/hbase/RegionMovedException.java +++ /dev/null @@ -1,126 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hbase; - -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.classification.InterfaceStability; -import org.apache.hadoop.ipc.RemoteException; - -/** - * Subclass if the server knows the region is now on another server. - * This allows the client to call the new region server without calling the master. - */ -@InterfaceAudience.Private -@InterfaceStability.Evolving -public class RegionMovedException extends NotServingRegionException { - private static final Log LOG = LogFactory.getLog(RegionMovedException.class); - private static final long serialVersionUID = -7232903522310558397L; - - private final String hostname; - private final int port; - - private static final String HOST_FIELD = "hostname="; - private static final String PORT_FIELD = "port="; - - public RegionMovedException(final String hostname, final int port) { - super(); - this.hostname = hostname; - this.port = port; - } - - public String getHostname() { - return hostname; - } - - public int getPort() { - return port; - } - - /** - * For hadoop.ipc internal call. Do NOT use. - * We have to parse the hostname to recreate the exception. - * The input is the one generated by {@link #getMessage()} - */ - public RegionMovedException(String s) { - int posHostname = s.indexOf(HOST_FIELD) + HOST_FIELD.length(); - int posPort = s.indexOf(PORT_FIELD) + PORT_FIELD.length(); - - String tmpHostname = null; - int tmpPort = -1; - try { - tmpHostname = s.substring(posHostname, s.indexOf(' ', posHostname)); - tmpPort = Integer.parseInt(s.substring(posPort, s.indexOf('.', posPort))); - } catch (Exception ignored) { - LOG.warn("Can't parse the hostname and the port from this string: " + s + ", "+ - "Continuing"); - } - - hostname = tmpHostname; - port = tmpPort; - } - - @Override - public String getMessage() { - return "Region moved to: " + HOST_FIELD + hostname + " " + PORT_FIELD + port + "."; - } - - /** - * Look for a RegionMovedException in the exception: - * - hadoop.ipc wrapped exceptions - * - nested exceptions - * Returns null if we didn't find the exception or if it was not readable. - */ - public static RegionMovedException find(Object exception) { - if (exception == null || !(exception instanceof Throwable)){ - return null; - } - - Throwable cur = (Throwable)exception; - RegionMovedException res = null; - - while (res == null && cur != null) { - if (cur instanceof RegionMovedException) { - res = (RegionMovedException) cur; - } else { - if (cur instanceof RemoteException) { - RemoteException re = (RemoteException) cur; - Exception e = re.unwrapRemoteException(RegionMovedException.class); - if (e == null){ - e = re.unwrapRemoteException(); - } - // unwrapRemoteException can return the exception given as a parameter when it cannot - // unwrap it. In this case, there is no need to look further - // noinspection ObjectEquality - if (e != re){ - res = find(e); - } - } - cur = cur.getCause(); - } - } - - if (res != null && (res.getPort() < 0 || res.getHostname() == null)){ - // We failed to parse the exception. Let's act as we don't find the exception. - return null; - } else { - return res; - } - } -} diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/RegionServerStatusProtocol.java hbase-server/src/main/java/org/apache/hadoop/hbase/RegionServerStatusProtocol.java deleted file mode 100644 index 069cea3..0000000 --- hbase-server/src/main/java/org/apache/hadoop/hbase/RegionServerStatusProtocol.java +++ /dev/null @@ -1,39 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hbase; - -import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.classification.InterfaceStability; -import org.apache.hadoop.hbase.ipc.VersionedProtocol; -import org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerStatusService; -import org.apache.hadoop.hbase.security.TokenInfo; -import org.apache.hadoop.hbase.security.KerberosInfo; - -/** - * Protocol that a RegionServer uses to communicate its status to the Master. - */ -@KerberosInfo( - serverPrincipal = "hbase.master.kerberos.principal") -@TokenInfo("HBASE_AUTH_TOKEN") -@InterfaceAudience.Private -@InterfaceStability.Evolving -public interface RegionServerStatusProtocol extends - RegionServerStatusService.BlockingInterface, VersionedProtocol { - public static final long VERSION = 1L; -} diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/RegionTooBusyException.java hbase-server/src/main/java/org/apache/hadoop/hbase/RegionTooBusyException.java deleted file mode 100644 index f5217bc..0000000 --- hbase-server/src/main/java/org/apache/hadoop/hbase/RegionTooBusyException.java +++ /dev/null @@ -1,47 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hbase; - -import java.io.IOException; - -import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.classification.InterfaceStability; - -/** - * Thrown by a region server if it will block and wait to serve a request. - * For example, the client wants to insert something to a region while the - * region is compacting. - */ -@InterfaceAudience.Public -@InterfaceStability.Evolving -public class RegionTooBusyException extends IOException { - private static final long serialVersionUID = 1728345723728342L; - - /** default constructor */ - public RegionTooBusyException() { - super(); - } - - /** - * Constructor - * @param msg message - */ - public RegionTooBusyException(final String msg) { - super(msg); - } -} diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/RemoteExceptionHandler.java hbase-server/src/main/java/org/apache/hadoop/hbase/RemoteExceptionHandler.java deleted file mode 100644 index a7e6dd2..0000000 --- hbase-server/src/main/java/org/apache/hadoop/hbase/RemoteExceptionHandler.java +++ /dev/null @@ -1,121 +0,0 @@ -/** - * - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hbase; - -import java.io.IOException; -import java.lang.reflect.Constructor; -import java.lang.reflect.InvocationTargetException; - -import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.ipc.RemoteException; - -/** - * An immutable class which contains a static method for handling - * org.apache.hadoop.ipc.RemoteException exceptions. - */ -@InterfaceAudience.Private -public class RemoteExceptionHandler { - /* Not instantiable */ - private RemoteExceptionHandler() {super();} - - /** - * Examine passed Throwable. See if its carrying a RemoteException. If so, - * run {@link #decodeRemoteException(RemoteException)} on it. Otherwise, - * pass back t unaltered. - * @param t Throwable to examine. - * @return Decoded RemoteException carried by t or - * t unaltered. - */ - public static Throwable checkThrowable(final Throwable t) { - Throwable result = t; - if (t instanceof RemoteException) { - try { - result = - RemoteExceptionHandler.decodeRemoteException((RemoteException)t); - } catch (Throwable tt) { - result = tt; - } - } - return result; - } - - /** - * Examine passed IOException. See if its carrying a RemoteException. If so, - * run {@link #decodeRemoteException(RemoteException)} on it. Otherwise, - * pass back e unaltered. - * @param e Exception to examine. - * @return Decoded RemoteException carried by e or - * e unaltered. - */ - public static IOException checkIOException(final IOException e) { - Throwable t = checkThrowable(e); - return t instanceof IOException? (IOException)t: new IOException(t); - } - - /** - * Converts org.apache.hadoop.ipc.RemoteException into original exception, - * if possible. If the original exception is an Error or a RuntimeException, - * throws the original exception. - * - * @param re original exception - * @return decoded RemoteException if it is an instance of or a subclass of - * IOException, or the original RemoteException if it cannot be decoded. - * - * @throws IOException indicating a server error ocurred if the decoded - * exception is not an IOException. The decoded exception is set as - * the cause. - * @deprecated Use {@link RemoteException#unwrapRemoteException()} instead. - * In fact we should look into deprecating this whole class - St.Ack 2010929 - */ - public static IOException decodeRemoteException(final RemoteException re) - throws IOException { - IOException i = re; - - try { - Class c = Class.forName(re.getClassName()); - - Class[] parameterTypes = { String.class }; - Constructor ctor = c.getConstructor(parameterTypes); - - Object[] arguments = { re.getMessage() }; - Throwable t = (Throwable) ctor.newInstance(arguments); - - if (t instanceof IOException) { - i = (IOException) t; - - } else { - i = new IOException("server error"); - i.initCause(t); - throw i; - } - - } catch (ClassNotFoundException x) { - // continue - } catch (NoSuchMethodException x) { - // continue - } catch (IllegalAccessException x) { - // continue - } catch (InvocationTargetException x) { - // continue - } catch (InstantiationException x) { - // continue - } - return i; - } -} diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/Server.java hbase-server/src/main/java/org/apache/hadoop/hbase/Server.java deleted file mode 100644 index 2106710..0000000 --- hbase-server/src/main/java/org/apache/hadoop/hbase/Server.java +++ /dev/null @@ -1,51 +0,0 @@ -/** - * - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hbase; - -import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hbase.catalog.CatalogTracker; -import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher; - -/** - * Defines the set of shared functions implemented by HBase servers (Masters - * and RegionServers). - */ -@InterfaceAudience.Private -public interface Server extends Abortable, Stoppable { - /** - * Gets the configuration object for this server. - */ - public Configuration getConfiguration(); - - /** - * Gets the ZooKeeper instance for this server. - */ - public ZooKeeperWatcher getZooKeeper(); - - /** - * @return Master's instance of {@link CatalogTracker} - */ - public CatalogTracker getCatalogTracker(); - - /** - * @return The unique server name for this server. - */ - public ServerName getServerName(); -} diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/ServerLoad.java hbase-server/src/main/java/org/apache/hadoop/hbase/ServerLoad.java deleted file mode 100644 index bd88b6a..0000000 --- hbase-server/src/main/java/org/apache/hadoop/hbase/ServerLoad.java +++ /dev/null @@ -1,305 +0,0 @@ -/** - * Copyright The Apache Software Foundation - * - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hbase; - -import java.util.Arrays; -import java.util.Collections; -import java.util.List; -import java.util.Map; -import java.util.TreeMap; -import java.util.TreeSet; - -import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.classification.InterfaceStability; -import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos; -import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Coprocessor; -import org.apache.hadoop.hbase.RegionLoad; -import org.apache.hadoop.hbase.util.Bytes; -import org.apache.hadoop.hbase.util.Strings; - -/** - * This class is used for exporting current state of load on a RegionServer. - */ -@InterfaceAudience.Public -@InterfaceStability.Evolving -public class ServerLoad { - private int stores = 0; - private int storefiles = 0; - private int storeUncompressedSizeMB = 0; - private int storefileSizeMB = 0; - private int memstoreSizeMB = 0; - private int storefileIndexSizeMB = 0; - private int readRequestsCount = 0; - private int writeRequestsCount = 0; - private int rootIndexSizeKB = 0; - private int totalStaticIndexSizeKB = 0; - private int totalStaticBloomSizeKB = 0; - private long totalCompactingKVs = 0; - private long currentCompactedKVs = 0; - - public ServerLoad(HBaseProtos.ServerLoad serverLoad) { - this.serverLoad = serverLoad; - for (HBaseProtos.RegionLoad rl: serverLoad.getRegionLoadsList()) { - stores += rl.getStores(); - storefiles += rl.getStorefiles(); - storeUncompressedSizeMB += rl.getStoreUncompressedSizeMB(); - storefileSizeMB += rl.getStorefileSizeMB(); - memstoreSizeMB += rl.getMemstoreSizeMB(); - storefileIndexSizeMB += rl.getStorefileIndexSizeMB(); - readRequestsCount += rl.getReadRequestsCount(); - writeRequestsCount += rl.getWriteRequestsCount(); - rootIndexSizeKB += rl.getRootIndexSizeKB(); - totalStaticIndexSizeKB += rl.getTotalStaticIndexSizeKB(); - totalStaticBloomSizeKB += rl.getTotalStaticBloomSizeKB(); - totalCompactingKVs += rl.getTotalCompactingKVs(); - currentCompactedKVs += rl.getCurrentCompactedKVs(); - } - - } - - // NOTE: Function name cannot start with "get" because then an OpenDataException is thrown because - // HBaseProtos.ServerLoad cannot be converted to an open data type(see HBASE-5967). - /* @return the underlying ServerLoad protobuf object */ - public HBaseProtos.ServerLoad obtainServerLoadPB() { - return serverLoad; - } - - protected HBaseProtos.ServerLoad serverLoad; - - /* @return number of requests since last report. */ - public int getNumberOfRequests() { - return serverLoad.getNumberOfRequests(); - } - public boolean hasNumberOfRequests() { - return serverLoad.hasNumberOfRequests(); - } - - /* @return total Number of requests from the start of the region server. */ - public int getTotalNumberOfRequests() { - return serverLoad.getTotalNumberOfRequests(); - } - public boolean hasTotalNumberOfRequests() { - return serverLoad.hasTotalNumberOfRequests(); - } - - /* @return the amount of used heap, in MB. */ - public int getUsedHeapMB() { - return serverLoad.getUsedHeapMB(); - } - public boolean hasUsedHeapMB() { - return serverLoad.hasUsedHeapMB(); - } - - /* @return the maximum allowable size of the heap, in MB. */ - public int getMaxHeapMB() { - return serverLoad.getMaxHeapMB(); - } - public boolean hasMaxHeapMB() { - return serverLoad.hasMaxHeapMB(); - } - - public int getStores() { - return stores; - } - - public int getStorefiles() { - return storefiles; - } - - public int getStoreUncompressedSizeMB() { - return storeUncompressedSizeMB; - } - - public int getStorefileSizeInMB() { - return storefileSizeMB; - } - - public int getMemstoreSizeInMB() { - return memstoreSizeMB; - } - - public int getStorefileIndexSizeInMB() { - return storefileIndexSizeMB; - } - - public int getReadRequestsCount() { - return readRequestsCount; - } - - public int getWriteRequestsCount() { - return writeRequestsCount; - } - - public int getRootIndexSizeKB() { - return rootIndexSizeKB; - } - - public int getTotalStaticIndexSizeKB() { - return totalStaticIndexSizeKB; - } - - public int getTotalStaticBloomSizeKB() { - return totalStaticBloomSizeKB; - } - - public long getTotalCompactingKVs() { - return totalCompactingKVs; - } - - public long getCurrentCompactedKVs() { - return currentCompactedKVs; - } - - /** - * @return the number of regions - */ - public int getNumberOfRegions() { - return serverLoad.getRegionLoadsCount(); - } - - public int getInfoServerPort() { - return serverLoad.getInfoServerPort(); - } - - /** - * Originally, this method factored in the effect of requests going to the - * server as well. However, this does not interact very well with the current - * region rebalancing code, which only factors number of regions. For the - * interim, until we can figure out how to make rebalancing use all the info - * available, we're just going to make load purely the number of regions. - * - * @return load factor for this server - */ - public int getLoad() { - // See above comment - // int load = numberOfRequests == 0 ? 1 : numberOfRequests; - // load *= numberOfRegions == 0 ? 1 : numberOfRegions; - // return load; - return getNumberOfRegions(); - } - - /** - * @return region load metrics - */ - public Map getRegionsLoad() { - Map regionLoads = - new TreeMap(Bytes.BYTES_COMPARATOR); - for (HBaseProtos.RegionLoad rl : serverLoad.getRegionLoadsList()) { - RegionLoad regionLoad = new RegionLoad(rl); - regionLoads.put(regionLoad.getName(), regionLoad); - } - return regionLoads; - } - - /** - * Return the RegionServer-level coprocessors - * @return string array of loaded RegionServer-level coprocessors - */ - public String[] getRegionServerCoprocessors() { - List list = obtainServerLoadPB().getCoprocessorsList(); - String [] ret = new String[list.size()]; - int i = 0; - for (Coprocessor elem : list) { - ret[i++] = elem.getName(); - } - - return ret; - } - - /** - * Return the RegionServer-level and Region-level coprocessors - * @return string array of loaded RegionServer-level and - * Region-level coprocessors - */ - public String[] getRsCoprocessors() { - // Need a set to remove duplicates, but since generated Coprocessor class - // is not Comparable, make it a Set instead of Set - TreeSet coprocessSet = new TreeSet(); - for (Coprocessor coprocessor : obtainServerLoadPB().getCoprocessorsList()) { - coprocessSet.add(coprocessor.getName()); - } - return coprocessSet.toArray(new String[0]); - } - - /** - * @return number of requests per second received since the last report - */ - public double getRequestsPerSecond() { - return getNumberOfRequests(); - } - - /** - * @see java.lang.Object#toString() - */ - @Override - public String toString() { - StringBuilder sb = - Strings.appendKeyValue(new StringBuilder(), "requestsPerSecond", - Double.valueOf(getRequestsPerSecond())); - Strings.appendKeyValue(sb, "numberOfOnlineRegions", Integer.valueOf(getNumberOfRegions())); - sb = Strings.appendKeyValue(sb, "usedHeapMB", Integer.valueOf(this.getUsedHeapMB())); - sb = Strings.appendKeyValue(sb, "maxHeapMB", Integer.valueOf(getMaxHeapMB())); - sb = Strings.appendKeyValue(sb, "numberOfStores", Integer.valueOf(this.stores)); - sb = Strings.appendKeyValue(sb, "numberOfStorefiles", Integer.valueOf(this.storefiles)); - sb = - Strings.appendKeyValue(sb, "storefileUncompressedSizeMB", - Integer.valueOf(this.storeUncompressedSizeMB)); - sb = Strings.appendKeyValue(sb, "storefileSizeMB", Integer.valueOf(this.storefileSizeMB)); - if (this.storeUncompressedSizeMB != 0) { - sb = - Strings.appendKeyValue( - sb, - "compressionRatio", - String.format("%.4f", (float) this.storefileSizeMB - / (float) this.storeUncompressedSizeMB)); - } - sb = Strings.appendKeyValue(sb, "memstoreSizeMB", Integer.valueOf(this.memstoreSizeMB)); - sb = - Strings.appendKeyValue(sb, "storefileIndexSizeMB", - Integer.valueOf(this.storefileIndexSizeMB)); - sb = Strings.appendKeyValue(sb, "readRequestsCount", Long.valueOf(this.readRequestsCount)); - sb = Strings.appendKeyValue(sb, "writeRequestsCount", Long.valueOf(this.writeRequestsCount)); - sb = Strings.appendKeyValue(sb, "rootIndexSizeKB", Integer.valueOf(this.rootIndexSizeKB)); - sb = - Strings.appendKeyValue(sb, "totalStaticIndexSizeKB", - Integer.valueOf(this.totalStaticIndexSizeKB)); - sb = - Strings.appendKeyValue(sb, "totalStaticBloomSizeKB", - Integer.valueOf(this.totalStaticBloomSizeKB)); - sb = Strings.appendKeyValue(sb, "totalCompactingKVs", Long.valueOf(this.totalCompactingKVs)); - sb = Strings.appendKeyValue(sb, "currentCompactedKVs", Long.valueOf(this.currentCompactedKVs)); - float compactionProgressPct = Float.NaN; - if (this.totalCompactingKVs > 0) { - compactionProgressPct = - Float.valueOf((float) this.currentCompactedKVs / this.totalCompactingKVs); - } - sb = Strings.appendKeyValue(sb, "compactionProgressPct", compactionProgressPct); - - String[] coprocessorStrings = getRsCoprocessors(); - if (coprocessorStrings != null) { - sb = Strings.appendKeyValue(sb, "coprocessors", Arrays.toString(coprocessorStrings)); - } - return sb.toString(); - } - - public static final ServerLoad EMPTY_SERVERLOAD = - new ServerLoad(HBaseProtos.ServerLoad.newBuilder().build()); -} diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/ServerName.java hbase-server/src/main/java/org/apache/hadoop/hbase/ServerName.java deleted file mode 100644 index 348ca2f..0000000 --- hbase-server/src/main/java/org/apache/hadoop/hbase/ServerName.java +++ /dev/null @@ -1,355 +0,0 @@ -/** - * - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hbase; - -import java.util.Collection; -import java.util.regex.Pattern; - -import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.classification.InterfaceStability; -import org.apache.hadoop.hbase.protobuf.ProtobufUtil; -import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RootRegionServer; -import org.apache.hadoop.hbase.util.Addressing; -import org.apache.hadoop.hbase.util.Bytes; - -import com.google.protobuf.InvalidProtocolBufferException; - -/** - * Instance of an HBase ServerName. - * A server name is used uniquely identifying a server instance and is made - * of the combination of hostname, port, and startcode. The startcode - * distingushes restarted servers on same hostname and port (startcode is - * usually timestamp of server startup). The {@link #toString()} format of - * ServerName is safe to use in the filesystem and as znode name up in - * ZooKeeper. Its format is: - * <hostname> '{@link #SERVERNAME_SEPARATOR}' <port> '{@link #SERVERNAME_SEPARATOR}' <startcode>. - * For example, if hostname is example.org, port is 1234, - * and the startcode for the regionserver is 1212121212, then - * the {@link #toString()} would be example.org,1234,1212121212. - * - *

      You can obtain a versioned serialized form of this class by calling - * {@link #getVersionedBytes()}. To deserialize, call {@link #parseVersionedServerName(byte[])} - * - *

      Immutable. - */ -@InterfaceAudience.Public -@InterfaceStability.Evolving -public class ServerName implements Comparable { - /** - * Version for this class. - * Its a short rather than a byte so I can for sure distinguish between this - * version of this class and the version previous to this which did not have - * a version. - */ - private static final short VERSION = 0; - static final byte [] VERSION_BYTES = Bytes.toBytes(VERSION); - - /** - * What to use if no startcode supplied. - */ - public static final int NON_STARTCODE = -1; - - /** - * This character is used as separator between server hostname, port and - * startcode. - */ - public static final String SERVERNAME_SEPARATOR = ","; - - public static Pattern SERVERNAME_PATTERN = - Pattern.compile("[^" + SERVERNAME_SEPARATOR + "]+" + - SERVERNAME_SEPARATOR + Addressing.VALID_PORT_REGEX + - SERVERNAME_SEPARATOR + Addressing.VALID_PORT_REGEX + "$"); - - /** - * What to use if server name is unknown. - */ - public static final String UNKNOWN_SERVERNAME = "#unknown#"; - - private final String servername; - private final String hostname; - private final int port; - private final long startcode; - - /** - * Cached versioned bytes of this ServerName instance. - * @see #getVersionedBytes() - */ - private byte [] bytes; - - public ServerName(final String hostname, final int port, final long startcode) { - this.hostname = hostname; - this.port = port; - this.startcode = startcode; - this.servername = getServerName(hostname, port, startcode); - } - - public ServerName(final String serverName) { - this(parseHostname(serverName), parsePort(serverName), - parseStartcode(serverName)); - } - - public ServerName(final String hostAndPort, final long startCode) { - this(Addressing.parseHostname(hostAndPort), - Addressing.parsePort(hostAndPort), startCode); - } - - public static String parseHostname(final String serverName) { - if (serverName == null || serverName.length() <= 0) { - throw new IllegalArgumentException("Passed hostname is null or empty"); - } - int index = serverName.indexOf(SERVERNAME_SEPARATOR); - return serverName.substring(0, index); - } - - public static int parsePort(final String serverName) { - String [] split = serverName.split(SERVERNAME_SEPARATOR); - return Integer.parseInt(split[1]); - } - - public static long parseStartcode(final String serverName) { - int index = serverName.lastIndexOf(SERVERNAME_SEPARATOR); - return Long.parseLong(serverName.substring(index + 1)); - } - - @Override - public String toString() { - return getServerName(); - } - - /** - * @return {@link #getServerName()} as bytes with a short-sized prefix with - * the ServerName#VERSION of this class. - */ - public synchronized byte [] getVersionedBytes() { - if (this.bytes == null) { - this.bytes = Bytes.add(VERSION_BYTES, Bytes.toBytes(getServerName())); - } - return this.bytes; - } - - public String getServerName() { - return servername; - } - - public String getHostname() { - return hostname; - } - - public int getPort() { - return port; - } - - public long getStartcode() { - return startcode; - } - - /** - * @param hostName - * @param port - * @param startcode - * @return Server name made of the concatenation of hostname, port and - * startcode formatted as <hostname> ',' <port> ',' <startcode> - */ - public static String getServerName(String hostName, int port, long startcode) { - final StringBuilder name = new StringBuilder(hostName.length() + 1 + 5 + 1 + 13); - name.append(hostName); - name.append(SERVERNAME_SEPARATOR); - name.append(port); - name.append(SERVERNAME_SEPARATOR); - name.append(startcode); - return name.toString(); - } - - /** - * @param hostAndPort String in form of <hostname> ':' <port> - * @param startcode - * @return Server name made of the concatenation of hostname, port and - * startcode formatted as <hostname> ',' <port> ',' <startcode> - */ - public static String getServerName(final String hostAndPort, - final long startcode) { - int index = hostAndPort.indexOf(":"); - if (index <= 0) throw new IllegalArgumentException("Expected ':' "); - return getServerName(hostAndPort.substring(0, index), - Integer.parseInt(hostAndPort.substring(index + 1)), startcode); - } - - /** - * @return Hostname and port formatted as described at - * {@link Addressing#createHostAndPortStr(String, int)} - */ - public String getHostAndPort() { - return Addressing.createHostAndPortStr(this.hostname, this.port); - } - - /** - * @param serverName ServerName in form specified by {@link #getServerName()} - * @return The server start code parsed from servername - */ - public static long getServerStartcodeFromServerName(final String serverName) { - int index = serverName.lastIndexOf(SERVERNAME_SEPARATOR); - return Long.parseLong(serverName.substring(index + 1)); - } - - /** - * Utility method to excise the start code from a server name - * @param inServerName full server name - * @return server name less its start code - */ - public static String getServerNameLessStartCode(String inServerName) { - if (inServerName != null && inServerName.length() > 0) { - int index = inServerName.lastIndexOf(SERVERNAME_SEPARATOR); - if (index > 0) { - return inServerName.substring(0, index); - } - } - return inServerName; - } - - @Override - public int compareTo(ServerName other) { - int compare = this.getHostname().toLowerCase(). - compareTo(other.getHostname().toLowerCase()); - if (compare != 0) return compare; - compare = this.getPort() - other.getPort(); - if (compare != 0) return compare; - return (int)(this.getStartcode() - other.getStartcode()); - } - - @Override - public int hashCode() { - return getServerName().hashCode(); - } - - @Override - public boolean equals(Object o) { - if (this == o) return true; - if (o == null) return false; - if (!(o instanceof ServerName)) return false; - return this.compareTo((ServerName)o) == 0; - } - - - /** - * @return ServerName with matching hostname and port. - */ - public static ServerName findServerWithSameHostnamePort(final Collection names, - final ServerName serverName) { - for (ServerName sn: names) { - if (isSameHostnameAndPort(serverName, sn)) return sn; - } - return null; - } - - /** - * @param left - * @param right - * @return True if other has same hostname and port. - */ - public static boolean isSameHostnameAndPort(final ServerName left, - final ServerName right) { - if (left == null) return false; - if (right == null) return false; - return left.getHostname().equals(right.getHostname()) && - left.getPort() == right.getPort(); - } - - /** - * Use this method instantiating a {@link ServerName} from bytes - * gotten from a call to {@link #getVersionedBytes()}. Will take care of the - * case where bytes were written by an earlier version of hbase. - * @param versionedBytes Pass bytes gotten from a call to {@link #getVersionedBytes()} - * @return A ServerName instance. - * @see #getVersionedBytes() - */ - public static ServerName parseVersionedServerName(final byte [] versionedBytes) { - // Version is a short. - short version = Bytes.toShort(versionedBytes); - if (version == VERSION) { - int length = versionedBytes.length - Bytes.SIZEOF_SHORT; - return new ServerName(Bytes.toString(versionedBytes, Bytes.SIZEOF_SHORT, length)); - } - // Presume the bytes were written with an old version of hbase and that the - // bytes are actually a String of the form "'' ':' ''". - return new ServerName(Bytes.toString(versionedBytes), NON_STARTCODE); - } - - /** - * @param str Either an instance of {@link ServerName#toString()} or a - * "'' ':' ''". - * @return A ServerName instance. - */ - public static ServerName parseServerName(final String str) { - return SERVERNAME_PATTERN.matcher(str).matches()? new ServerName(str): - new ServerName(str, NON_STARTCODE); - } - - - /** - * @return true if the String follows the pattern of {@link ServerName#toString()}, false - * otherwise. - */ - public static boolean isFullServerName(final String str){ - if (str == null ||str.isEmpty()) return false; - return SERVERNAME_PATTERN.matcher(str).matches(); - } - - /** - * Get a ServerName from the passed in data bytes. - * @param data Data with a serialize server name in it; can handle the old style - * servername where servername was host and port. Works too with data that - * begins w/ the pb 'PBUF' magic and that is then followed by a protobuf that - * has a serialized {@link ServerName} in it. - * @return Returns null if data is null else converts passed data - * to a ServerName instance. - * @throws DeserializationException - */ - public static ServerName parseFrom(final byte [] data) throws DeserializationException { - if (data == null || data.length <= 0) return null; - if (ProtobufUtil.isPBMagicPrefix(data)) { - int prefixLen = ProtobufUtil.lengthOfPBMagic(); - try { - RootRegionServer rss = - RootRegionServer.newBuilder().mergeFrom(data, prefixLen, data.length - prefixLen).build(); - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName sn = rss.getServer(); - return new ServerName(sn.getHostName(), sn.getPort(), sn.getStartCode()); - } catch (InvalidProtocolBufferException e) { - // A failed parse of the znode is pretty catastrophic. Rather than loop - // retrying hoping the bad bytes will changes, and rather than change - // the signature on this method to add an IOE which will send ripples all - // over the code base, throw a RuntimeException. This should "never" happen. - // Fail fast if it does. - throw new DeserializationException(e); - } - } - // The str returned could be old style -- pre hbase-1502 -- which was - // hostname and port seperated by a colon rather than hostname, port and - // startcode delimited by a ','. - String str = Bytes.toString(data); - int index = str.indexOf(ServerName.SERVERNAME_SEPARATOR); - if (index != -1) { - // Presume its ServerName serialized with versioned bytes. - return ServerName.parseVersionedServerName(data); - } - // Presume it a hostname:port format. - String hostname = Addressing.parseHostname(str); - int port = Addressing.parsePort(str); - return new ServerName(hostname, port, -1L); - } -} diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/Stoppable.java hbase-server/src/main/java/org/apache/hadoop/hbase/Stoppable.java deleted file mode 100644 index 93ccc13..0000000 --- hbase-server/src/main/java/org/apache/hadoop/hbase/Stoppable.java +++ /dev/null @@ -1,38 +0,0 @@ -/** - * - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hbase; - -import org.apache.hadoop.classification.InterfaceAudience; - -/** - * Implementers are Stoppable. - */ -@InterfaceAudience.Private -public interface Stoppable { - /** - * Stop this service. - * @param why Why we're stopping. - */ - public void stop(String why); - - /** - * @return True if {@link #stop(String)} has been closed. - */ - public boolean isStopped(); -} diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/TableExistsException.java hbase-server/src/main/java/org/apache/hadoop/hbase/TableExistsException.java deleted file mode 100644 index 5c94dbd..0000000 --- hbase-server/src/main/java/org/apache/hadoop/hbase/TableExistsException.java +++ /dev/null @@ -1,42 +0,0 @@ -/** - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hbase; - -import java.io.IOException; - -import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.classification.InterfaceStability; - -/** - * Thrown when a table exists but should not - */ -@InterfaceAudience.Public -@InterfaceStability.Stable -public class TableExistsException extends IOException { - private static final long serialVersionUID = 1L << 7 - 1L; - /** default constructor */ - public TableExistsException() { - super(); - } - - /** - * Constructor - * - * @param s message - */ - public TableExistsException(String s) { - super(s); - } -} diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/TableInfoMissingException.java hbase-server/src/main/java/org/apache/hadoop/hbase/TableInfoMissingException.java deleted file mode 100644 index bc8bc7f..0000000 --- hbase-server/src/main/java/org/apache/hadoop/hbase/TableInfoMissingException.java +++ /dev/null @@ -1,47 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hbase; - -import org.apache.hadoop.classification.InterfaceAudience; - -/** - * - * Failed to find .tableinfo file under table dir - * - */ -@InterfaceAudience.Private -@SuppressWarnings("serial") -public class TableInfoMissingException extends HBaseIOException { - - public TableInfoMissingException() { - super(); - } - - public TableInfoMissingException( String message ) { - super(message); - } - - public TableInfoMissingException( String message, Throwable t ) { - super(message, t); - } - - public TableInfoMissingException( Throwable t ) { - super(t); - } - -} diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/TableNotDisabledException.java hbase-server/src/main/java/org/apache/hadoop/hbase/TableNotDisabledException.java deleted file mode 100644 index 1273bd8..0000000 --- hbase-server/src/main/java/org/apache/hadoop/hbase/TableNotDisabledException.java +++ /dev/null @@ -1,53 +0,0 @@ -/** - * - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hbase; - -import java.io.IOException; - -import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.classification.InterfaceStability; -import org.apache.hadoop.hbase.util.Bytes; - -/** - * Thrown if a table should be offline but is not - */ -@InterfaceAudience.Public -@InterfaceStability.Stable -public class TableNotDisabledException extends IOException { - private static final long serialVersionUID = 1L << 19 - 1L; - /** default constructor */ - public TableNotDisabledException() { - super(); - } - - /** - * Constructor - * @param s message - */ - public TableNotDisabledException(String s) { - super(s); - } - - /** - * @param tableName Name of table that is not disabled - */ - public TableNotDisabledException(byte[] tableName) { - this(Bytes.toString(tableName)); - } -} diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/TableNotEnabledException.java hbase-server/src/main/java/org/apache/hadoop/hbase/TableNotEnabledException.java deleted file mode 100644 index b3c31b4..0000000 --- hbase-server/src/main/java/org/apache/hadoop/hbase/TableNotEnabledException.java +++ /dev/null @@ -1,53 +0,0 @@ -/** - * - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hbase; - -import java.io.IOException; - -import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.classification.InterfaceStability; -import org.apache.hadoop.hbase.util.Bytes; - -/** - * Thrown if a table should be enabled but is not - */ -@InterfaceAudience.Public -@InterfaceStability.Stable -public class TableNotEnabledException extends IOException { - private static final long serialVersionUID = 262144L; - /** default constructor */ - public TableNotEnabledException() { - super(); - } - - /** - * Constructor - * @param s message - */ - public TableNotEnabledException(String s) { - super(s); - } - - /** - * @param tableName Name of table that is not enabled - */ - public TableNotEnabledException(byte[] tableName) { - this(Bytes.toString(tableName)); - } -} diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/TableNotFoundException.java hbase-server/src/main/java/org/apache/hadoop/hbase/TableNotFoundException.java deleted file mode 100644 index fbd3db4..0000000 --- hbase-server/src/main/java/org/apache/hadoop/hbase/TableNotFoundException.java +++ /dev/null @@ -1,39 +0,0 @@ -/** - * - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hbase; - -import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.classification.InterfaceStability; - -/** Thrown when a table can not be located */ -@InterfaceAudience.Public -@InterfaceStability.Stable -public class TableNotFoundException extends RegionException { - private static final long serialVersionUID = 993179627856392526L; - - /** default constructor */ - public TableNotFoundException() { - super(); - } - - /** @param s message */ - public TableNotFoundException(String s) { - super(s); - } -} diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/UnknownRegionException.java hbase-server/src/main/java/org/apache/hadoop/hbase/UnknownRegionException.java deleted file mode 100644 index 046670f..0000000 --- hbase-server/src/main/java/org/apache/hadoop/hbase/UnknownRegionException.java +++ /dev/null @@ -1,35 +0,0 @@ -/** - * - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hbase; - -import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.classification.InterfaceStability; - -/** - * Thrown when we are asked to operate on a region we know nothing about. - */ -@InterfaceAudience.Public -@InterfaceStability.Stable -public class UnknownRegionException extends RegionException { - private static final long serialVersionUID = 1968858760475205392L; - - public UnknownRegionException(String regionName) { - super(regionName); - } -} diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/UnknownRowLockException.java hbase-server/src/main/java/org/apache/hadoop/hbase/UnknownRowLockException.java deleted file mode 100644 index e42f3a9..0000000 --- hbase-server/src/main/java/org/apache/hadoop/hbase/UnknownRowLockException.java +++ /dev/null @@ -1,45 +0,0 @@ -/** - * - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hbase; - -import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.classification.InterfaceStability; - - -/** - * Thrown if a region server is passed an unknown row lock id - */ -@InterfaceAudience.Public -@InterfaceStability.Stable -public class UnknownRowLockException extends DoNotRetryIOException { - private static final long serialVersionUID = 993179627856392526L; - - /** constructor */ - public UnknownRowLockException() { - super(); - } - - /** - * Constructor - * @param s message - */ - public UnknownRowLockException(String s) { - super(s); - } -} diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/UnknownScannerException.java hbase-server/src/main/java/org/apache/hadoop/hbase/UnknownScannerException.java deleted file mode 100644 index 856c029..0000000 --- hbase-server/src/main/java/org/apache/hadoop/hbase/UnknownScannerException.java +++ /dev/null @@ -1,48 +0,0 @@ -/** - * - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hbase; - -import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.classification.InterfaceStability; - - -/** - * Thrown if a region server is passed an unknown scanner id. - * Usually means the client has take too long between checkins and so the - * scanner lease on the serverside has expired OR the serverside is closing - * down and has cancelled all leases. - */ -@InterfaceAudience.Public -@InterfaceStability.Stable -public class UnknownScannerException extends DoNotRetryIOException { - private static final long serialVersionUID = 993179627856392526L; - - /** constructor */ - public UnknownScannerException() { - super(); - } - - /** - * Constructor - * @param s message - */ - public UnknownScannerException(String s) { - super(s); - } -} diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/YouAreDeadException.java hbase-server/src/main/java/org/apache/hadoop/hbase/YouAreDeadException.java deleted file mode 100644 index 76fc841..0000000 --- hbase-server/src/main/java/org/apache/hadoop/hbase/YouAreDeadException.java +++ /dev/null @@ -1,38 +0,0 @@ -/** - * - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hbase; - -import java.io.IOException; - -import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.classification.InterfaceStability; - -/** - * This exception is thrown by the master when a region server reports and is - * already being processed as dead. This can happen when a region server loses - * its session but didn't figure it yet. - */ -@SuppressWarnings("serial") -@InterfaceAudience.Public -@InterfaceStability.Stable -public class YouAreDeadException extends IOException { - public YouAreDeadException(String message) { - super(message); - } -} diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/ZooKeeperConnectionException.java hbase-server/src/main/java/org/apache/hadoop/hbase/ZooKeeperConnectionException.java deleted file mode 100644 index feedff3..0000000 --- hbase-server/src/main/java/org/apache/hadoop/hbase/ZooKeeperConnectionException.java +++ /dev/null @@ -1,53 +0,0 @@ -/** - * - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hbase; - -import java.io.IOException; - -import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.classification.InterfaceStability; - -/** - * Thrown if the client can't connect to zookeeper - */ -@InterfaceAudience.Public -@InterfaceStability.Evolving -public class ZooKeeperConnectionException extends IOException { - private static final long serialVersionUID = 1L << 23 - 1L; - /** default constructor */ - public ZooKeeperConnectionException() { - super(); - } - - /** - * Constructor - * @param s message - */ - public ZooKeeperConnectionException(String s) { - super(s); - } - - /** - * Constructor taking another exception. - * @param e Exception to grab data from. - */ - public ZooKeeperConnectionException(String message, Exception e) { - super(message, e); - } -} diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/catalog/CatalogTracker.java hbase-server/src/main/java/org/apache/hadoop/hbase/catalog/CatalogTracker.java deleted file mode 100644 index 8a383e4..0000000 --- hbase-server/src/main/java/org/apache/hadoop/hbase/catalog/CatalogTracker.java +++ /dev/null @@ -1,703 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hbase.catalog; - -import java.io.EOFException; -import java.io.IOException; -import java.net.ConnectException; -import java.net.NoRouteToHostException; -import java.net.SocketException; -import java.net.SocketTimeoutException; -import java.net.UnknownHostException; -import java.util.concurrent.atomic.AtomicBoolean; - -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hbase.Abortable; -import org.apache.hadoop.hbase.HRegionInfo; -import org.apache.hadoop.hbase.NotAllMetaRegionsOnlineException; -import org.apache.hadoop.hbase.ServerName; -import org.apache.hadoop.hbase.client.AdminProtocol; -import org.apache.hadoop.hbase.client.HConnection; -import org.apache.hadoop.hbase.client.HConnectionManager; -import org.apache.hadoop.hbase.client.HTable; -import org.apache.hadoop.hbase.client.RetriesExhaustedException; -import org.apache.hadoop.hbase.ipc.ServerNotRunningYetException; -import org.apache.hadoop.hbase.protobuf.ProtobufUtil; -import org.apache.hadoop.hbase.util.Bytes; -import org.apache.hadoop.hbase.zookeeper.MetaNodeTracker; -import org.apache.hadoop.hbase.zookeeper.RootRegionTracker; -import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher; -import org.apache.hadoop.ipc.RemoteException; - -/** - * Tracks the availability of the catalog tables -ROOT- and - * .META.. - * - * This class is "read-only" in that the locations of the catalog tables cannot - * be explicitly set. Instead, ZooKeeper is used to learn of the availability - * and location of -ROOT-. -ROOT- is used to learn of - * the location of .META. If not available in -ROOT-, - * ZooKeeper is used to monitor for a new location of .META.. - * - *

      Call {@link #start()} to start up operation. Call {@link #stop()}} to - * interrupt waits and close up shop. - */ -@InterfaceAudience.Private -public class CatalogTracker { - // TODO: This class needs a rethink. The original intent was that it would be - // the one-stop-shop for root and meta locations and that it would get this - // info from reading and watching zk state. The class was to be used by - // servers when they needed to know of root and meta movement but also by - // client-side (inside in HTable) so rather than figure root and meta - // locations on fault, the client would instead get notifications out of zk. - // - // But this original intent is frustrated by the fact that this class has to - // read an hbase table, the -ROOT- table, to figure out the .META. region - // location which means we depend on an HConnection. HConnection will do - // retrying but also, it has its own mechanism for finding root and meta - // locations (and for 'verifying'; it tries the location and if it fails, does - // new lookup, etc.). So, at least for now, HConnection (or HTable) can't - // have a CT since CT needs a HConnection (Even then, do want HT to have a CT? - // For HT keep up a session with ZK? Rather, shouldn't we do like asynchbase - // where we'd open a connection to zk, read what we need then let the - // connection go?). The 'fix' is make it so both root and meta addresses - // are wholey up in zk -- not in zk (root) -- and in an hbase table (meta). - // - // But even then, this class does 'verification' of the location and it does - // this by making a call over an HConnection (which will do its own root - // and meta lookups). Isn't this verification 'useless' since when we - // return, whatever is dependent on the result of this call then needs to - // use HConnection; what we have verified may change in meantime (HConnection - // uses the CT primitives, the root and meta trackers finding root locations). - // - // When meta is moved to zk, this class may make more sense. In the - // meantime, it does not cohere. It should just watch meta and root and not - // NOT do verification -- let that be out in HConnection since its going to - // be done there ultimately anyways. - // - // This class has spread throughout the codebase. It needs to be reigned in. - // This class should be used server-side only, even if we move meta location - // up into zk. Currently its used over in the client package. Its used in - // MetaReader and MetaEditor classes usually just to get the Configuration - // its using (It does this indirectly by asking its HConnection for its - // Configuration and even then this is just used to get an HConnection out on - // the other end). I made https://issues.apache.org/jira/browse/HBASE-4495 for - // doing CT fixup. St.Ack 09/30/2011. - // - - // TODO: Timeouts have never been as advertised in here and its worse now - // with retries; i.e. the HConnection retries and pause goes ahead whatever - // the passed timeout is. Fix. - private static final Log LOG = LogFactory.getLog(CatalogTracker.class); - private final HConnection connection; - private final ZooKeeperWatcher zookeeper; - private final RootRegionTracker rootRegionTracker; - private final MetaNodeTracker metaNodeTracker; - private final AtomicBoolean metaAvailable = new AtomicBoolean(false); - private boolean instantiatedzkw = false; - private Abortable abortable; - - /* - * Do not clear this address once set. Its needed when we do - * server shutdown processing -- we need to know who had .META. last. If you - * want to know if the address is good, rely on {@link #metaAvailable} value. - */ - private ServerName metaLocation; - - /* - * Timeout waiting on root or meta to be set. - */ - private final int defaultTimeout; - - private boolean stopped = false; - - static final byte [] ROOT_REGION_NAME = - HRegionInfo.ROOT_REGIONINFO.getRegionName(); - static final byte [] META_REGION_NAME = - HRegionInfo.FIRST_META_REGIONINFO.getRegionName(); - - /** - * Constructs a catalog tracker. Find current state of catalog tables. - * Begin active tracking by executing {@link #start()} post construction. Does - * not timeout. - * - * @param conf - * the {@link Configuration} from which a {@link HConnection} will be - * obtained; if problem, this connections - * {@link HConnection#abort(String, Throwable)} will be called. - * @throws IOException - */ - public CatalogTracker(final Configuration conf) throws IOException { - this(null, conf, null); - } - - /** - * Constructs the catalog tracker. Find current state of catalog tables. - * Begin active tracking by executing {@link #start()} post construction. - * Does not timeout. - * @param zk If zk is null, we'll create an instance (and shut it down - * when {@link #stop()} is called) else we'll use what is passed. - * @param conf - * @param abortable If fatal exception we'll call abort on this. May be null. - * If it is we'll use the Connection associated with the passed - * {@link Configuration} as our Abortable. - * @throws IOException - */ - public CatalogTracker(final ZooKeeperWatcher zk, final Configuration conf, - final Abortable abortable) - throws IOException { - this(zk, conf, abortable, - conf.getInt("hbase.catalogtracker.default.timeout", 1000)); - } - - /** - * Constructs the catalog tracker. Find current state of catalog tables. - * Begin active tracking by executing {@link #start()} post construction. - * @param zk If zk is null, we'll create an instance (and shut it down - * when {@link #stop()} is called) else we'll use what is passed. - * @param conf - * @param abortable If fatal exception we'll call abort on this. May be null. - * If it is we'll use the Connection associated with the passed - * {@link Configuration} as our Abortable. - * @param defaultTimeout Timeout to use. Pass zero for no timeout - * ({@link Object#wait(long)} when passed a 0 waits for ever). - * @throws IOException - */ - public CatalogTracker(final ZooKeeperWatcher zk, final Configuration conf, - Abortable abortable, final int defaultTimeout) - throws IOException { - this(zk, conf, HConnectionManager.getConnection(conf), abortable, defaultTimeout); - } - - public CatalogTracker(final ZooKeeperWatcher zk, final Configuration conf, - HConnection connection, Abortable abortable, final int defaultTimeout) - throws IOException { - this.connection = connection; - if (abortable == null) { - // A connection is abortable. - this.abortable = this.connection; - } - Abortable throwableAborter = new Abortable() { - - @Override - public void abort(String why, Throwable e) { - throw new RuntimeException(why, e); - } - - @Override - public boolean isAborted() { - return true; - } - - }; - if (zk == null) { - // Create our own. Set flag so we tear it down on stop. - this.zookeeper = - new ZooKeeperWatcher(conf, "catalogtracker-on-" + connection.toString(), - abortable); - instantiatedzkw = true; - } else { - this.zookeeper = zk; - } - this.rootRegionTracker = new RootRegionTracker(zookeeper, throwableAborter); - final CatalogTracker ct = this; - // Override nodeDeleted so we get notified when meta node deleted - this.metaNodeTracker = new MetaNodeTracker(zookeeper, throwableAborter) { - public void nodeDeleted(String path) { - if (!path.equals(node)) return; - ct.resetMetaLocation(); - } - }; - this.defaultTimeout = defaultTimeout; - } - - /** - * Starts the catalog tracker. - * Determines current availability of catalog tables and ensures all further - * transitions of either region are tracked. - * @throws IOException - * @throws InterruptedException - */ - public void start() throws IOException, InterruptedException { - LOG.debug("Starting catalog tracker " + this); - try { - this.rootRegionTracker.start(); - this.metaNodeTracker.start(); - } catch (RuntimeException e) { - Throwable t = e.getCause(); - this.abortable.abort(e.getMessage(), t); - throw new IOException("Attempt to start root/meta tracker failed.", t); - } - } - - /** - * Stop working. - * Interrupts any ongoing waits. - */ - public void stop() { - if (!this.stopped) { - LOG.debug("Stopping catalog tracker " + this); - this.stopped = true; - this.rootRegionTracker.stop(); - this.metaNodeTracker.stop(); - try { - if (this.connection != null) { - this.connection.close(); - } - } catch (IOException e) { - // Although the {@link Closeable} interface throws an {@link - // IOException}, in reality, the implementation would never do that. - LOG.error("Attempt to close catalog tracker's connection failed.", e); - } - if (this.instantiatedzkw) { - this.zookeeper.close(); - } - // Call this and it will interrupt any ongoing waits on meta. - synchronized (this.metaAvailable) { - this.metaAvailable.notifyAll(); - } - } - } - - /** - * Gets the current location for -ROOT- or null if location is - * not currently available. - * @return {@link ServerName} for server hosting -ROOT- or null - * if none available - * @throws InterruptedException - */ - public ServerName getRootLocation() throws InterruptedException { - return this.rootRegionTracker.getRootRegionLocation(); - } - - /** - * @return {@link ServerName} for server hosting .META. or null - * if none available - */ - public ServerName getMetaLocation() { - return this.metaLocation; - } - - /** - * Method used by master on startup trying to figure state of cluster. - * Returns the current meta location unless its null. In this latter case, - * it has not yet been set so go check whats up in -ROOT- and - * return that. - * @return {@link ServerName} for server hosting .META. or if null, - * we'll read the location that is up in -ROOT- table (which - * could be null or just plain stale). - * @throws IOException - */ - public ServerName getMetaLocationOrReadLocationFromRoot() throws IOException { - ServerName sn = getMetaLocation(); - return sn != null? sn: MetaReader.getMetaRegionLocation(this); - } - - /** - * Gets the current location for -ROOT- if available and waits - * for up to the specified timeout if not immediately available. Returns null - * if the timeout elapses before root is available. - * @param timeout maximum time to wait for root availability, in milliseconds - * @return {@link ServerName} for server hosting -ROOT- or null - * if none available - * @throws InterruptedException if interrupted while waiting - * @throws NotAllMetaRegionsOnlineException if root not available before - * timeout - */ - public ServerName waitForRoot(final long timeout) - throws InterruptedException, NotAllMetaRegionsOnlineException { - ServerName sn = rootRegionTracker.waitRootRegionLocation(timeout); - if (sn == null) { - throw new NotAllMetaRegionsOnlineException("Timed out; " + timeout + "ms"); - } - return sn; - } - - /** - * Gets a connection to the server hosting root, as reported by ZooKeeper, - * waiting up to the specified timeout for availability. - * @param timeout How long to wait on root location - * @see #waitForRoot(long) for additional information - * @return connection to server hosting root - * @throws InterruptedException - * @throws NotAllMetaRegionsOnlineException if timed out waiting - * @throws IOException - * @deprecated Use #getRootServerConnection(long) - */ - public AdminProtocol waitForRootServerConnection(long timeout) - throws InterruptedException, NotAllMetaRegionsOnlineException, IOException { - return getRootServerConnection(timeout); - } - - /** - * Gets a connection to the server hosting root, as reported by ZooKeeper, - * waiting up to the specified timeout for availability. - *

      WARNING: Does not retry. Use an {@link HTable} instead. - * @param timeout How long to wait on root location - * @see #waitForRoot(long) for additional information - * @return connection to server hosting root - * @throws InterruptedException - * @throws NotAllMetaRegionsOnlineException if timed out waiting - * @throws IOException - */ - AdminProtocol getRootServerConnection(long timeout) - throws InterruptedException, NotAllMetaRegionsOnlineException, IOException { - return getCachedConnection(waitForRoot(timeout)); - } - - /** - * Gets a connection to the server hosting root, as reported by ZooKeeper, - * waiting for the default timeout specified on instantiation. - * @see #waitForRoot(long) for additional information - * @return connection to server hosting root - * @throws NotAllMetaRegionsOnlineException if timed out waiting - * @throws IOException - * @deprecated Use #getRootServerConnection(long) - */ - public AdminProtocol waitForRootServerConnectionDefault() - throws NotAllMetaRegionsOnlineException, IOException { - try { - return getRootServerConnection(this.defaultTimeout); - } catch (InterruptedException e) { - throw new NotAllMetaRegionsOnlineException("Interrupted"); - } - } - - /** - * Gets a connection to the server currently hosting .META. or - * null if location is not currently available. - *

      - * If a location is known, a connection to the cached location is returned. - * If refresh is true, the cached connection is verified first before - * returning. If the connection is not valid, it is reset and rechecked. - *

      - * If no location for meta is currently known, method checks ROOT for a new - * location, verifies META is currently there, and returns a cached connection - * to the server hosting META. - * - * @return connection to server hosting meta, null if location not available - * @throws IOException - * @throws InterruptedException - */ - private AdminProtocol getMetaServerConnection() - throws IOException, InterruptedException { - synchronized (metaAvailable) { - if (metaAvailable.get()) { - AdminProtocol current = getCachedConnection(this.metaLocation); - // If we are to refresh, verify we have a good connection by making - // an invocation on it. - if (verifyRegionLocation(current, this.metaLocation, META_REGION_NAME)) { - return current; - } - resetMetaLocation(); - } - // We got here because there is no meta available or because whats - // available is bad. - - // Now read the current .META. content from -ROOT-. Note: This goes via - // an HConnection. It has its own way of figuring root and meta locations - // which we have to wait on. - ServerName newLocation = MetaReader.getMetaRegionLocation(this); - if (newLocation == null) return null; - - AdminProtocol newConnection = getCachedConnection(newLocation); - if (verifyRegionLocation(newConnection, newLocation, META_REGION_NAME)) { - setMetaLocation(newLocation); - return newConnection; - } else { - if (LOG.isTraceEnabled()) { - LOG.trace("New .META. server: " + newLocation + " isn't valid." + - " Cached .META. server: " + this.metaLocation); - } - } - return null; - } - } - - /** - * Waits indefinitely for availability of .META.. Used during - * cluster startup. Does not verify meta, just that something has been - * set up in zk. - * @see #waitForMeta(long) - * @throws InterruptedException if interrupted while waiting - */ - public void waitForMeta() throws InterruptedException { - while (!this.stopped) { - try { - if (waitForMeta(100) != null) break; - } catch (NotAllMetaRegionsOnlineException e) { - if (LOG.isTraceEnabled()) { - LOG.info(".META. still not available, sleeping and retrying." + - " Reason: " + e.getMessage()); - } - } catch (IOException e) { - LOG.info("Retrying", e); - } - } - } - - /** - * Gets the current location for .META. if available and waits - * for up to the specified timeout if not immediately available. Throws an - * exception if timed out waiting. This method differs from {@link #waitForMeta()} - * in that it will go ahead and verify the location gotten from ZooKeeper and - * -ROOT- region by trying to use returned connection. - * @param timeout maximum time to wait for meta availability, in milliseconds - * @return {@link ServerName} for server hosting .META. or null - * if none available - * @throws InterruptedException if interrupted while waiting - * @throws IOException unexpected exception connecting to meta server - * @throws NotAllMetaRegionsOnlineException if meta not available before - * timeout - */ - public ServerName waitForMeta(long timeout) - throws InterruptedException, IOException, NotAllMetaRegionsOnlineException { - long stop = System.currentTimeMillis() + timeout; - long waitTime = Math.min(50, timeout); - synchronized (metaAvailable) { - while(!stopped && (timeout == 0 || System.currentTimeMillis() < stop)) { - if (getMetaServerConnection() != null) { - return metaLocation; - } - // perhaps -ROOT- region isn't available, let us wait a bit and retry. - metaAvailable.wait(waitTime); - } - if (getMetaServerConnection() == null) { - throw new NotAllMetaRegionsOnlineException("Timed out (" + timeout + "ms)"); - } - return metaLocation; - } - } - - /** - * Gets a connection to the server hosting meta, as reported by ZooKeeper, - * waiting up to the specified timeout for availability. - * @see #waitForMeta(long) for additional information - * @return connection to server hosting meta - * @throws InterruptedException - * @throws NotAllMetaRegionsOnlineException if timed out waiting - * @throws IOException - * @deprecated Does not retry; use an HTable instance instead. - */ - public AdminProtocol waitForMetaServerConnection(long timeout) - throws InterruptedException, NotAllMetaRegionsOnlineException, IOException { - return getCachedConnection(waitForMeta(timeout)); - } - - /** - * Gets a connection to the server hosting meta, as reported by ZooKeeper, - * waiting up to the specified timeout for availability. - * Used in tests. - * @see #waitForMeta(long) for additional information - * @return connection to server hosting meta - * @throws NotAllMetaRegionsOnlineException if timed out or interrupted - * @throws IOException - * @deprecated Does not retry; use an HTable instance instead. - */ - public AdminProtocol waitForMetaServerConnectionDefault() - throws NotAllMetaRegionsOnlineException, IOException { - try { - return getCachedConnection(waitForMeta(defaultTimeout)); - } catch (InterruptedException e) { - throw new NotAllMetaRegionsOnlineException("Interrupted"); - } - } - - /** - * Called when we figure current meta is off (called from zk callback). - */ - public void resetMetaLocation() { - LOG.debug("Current cached META location, " + metaLocation + - ", is not valid, resetting"); - synchronized(this.metaAvailable) { - this.metaAvailable.set(false); - this.metaAvailable.notifyAll(); - } - } - - /** - * @param metaLocation - */ - void setMetaLocation(final ServerName metaLocation) { - LOG.debug("Set new cached META location: " + metaLocation); - synchronized (this.metaAvailable) { - this.metaLocation = metaLocation; - this.metaAvailable.set(true); - // no synchronization because these are private and already under lock - this.metaAvailable.notifyAll(); - } - } - - /** - * @param sn ServerName to get a connection against. - * @return The AdminProtocol we got when we connected to sn - * May have come from cache, may not be good, may have been setup by this - * invocation, or may be null. - * @throws IOException - */ - private AdminProtocol getCachedConnection(ServerName sn) - throws IOException { - if (sn == null) { - return null; - } - AdminProtocol protocol = null; - try { - protocol = connection.getAdmin(sn.getHostname(), sn.getPort()); - } catch (RetriesExhaustedException e) { - if (e.getCause() != null && e.getCause() instanceof ConnectException) { - // Catch this; presume it means the cached connection has gone bad. - } else { - throw e; - } - } catch (SocketTimeoutException e) { - LOG.debug("Timed out connecting to " + sn); - } catch (NoRouteToHostException e) { - LOG.debug("Connecting to " + sn, e); - } catch (SocketException e) { - LOG.debug("Exception connecting to " + sn); - } catch (UnknownHostException e) { - LOG.debug("Unknown host exception connecting to " + sn); - } catch (IOException ioe) { - Throwable cause = ioe.getCause(); - if (ioe instanceof ConnectException) { - // Catch. Connect refused. - } else if (cause != null && cause instanceof EOFException) { - // Catch. Other end disconnected us. - } else if (cause != null && cause.getMessage() != null && - cause.getMessage().toLowerCase().contains("connection reset")) { - // Catch. Connection reset. - } else { - throw ioe; - } - - } - return protocol; - } - - /** - * Verify we can connect to hostingServer and that its carrying - * regionName. - * @param hostingServer Interface to the server hosting regionName - * @param serverName The servername that goes with the metaServer - * Interface. Used logging. - * @param regionName The regionname we are interested in. - * @return True if we were able to verify the region located at other side of - * the Interface. - * @throws IOException - */ - // TODO: We should be able to get the ServerName from the AdminProtocol - // rather than have to pass it in. Its made awkward by the fact that the - // HRI is likely a proxy against remote server so the getServerName needs - // to be fixed to go to a local method or to a cache before we can do this. - private boolean verifyRegionLocation(AdminProtocol hostingServer, - final ServerName address, final byte [] regionName) - throws IOException { - if (hostingServer == null) { - LOG.info("Passed hostingServer is null"); - return false; - } - Throwable t = null; - try { - // Try and get regioninfo from the hosting server. - return ProtobufUtil.getRegionInfo(hostingServer, regionName) != null; - } catch (ConnectException e) { - t = e; - } catch (RetriesExhaustedException e) { - t = e; - } catch (RemoteException e) { - IOException ioe = e.unwrapRemoteException(); - t = ioe; - } catch (IOException e) { - Throwable cause = e.getCause(); - if (cause != null && cause instanceof EOFException) { - t = cause; - } else if (cause != null && cause.getMessage() != null - && cause.getMessage().contains("Connection reset")) { - t = cause; - } else { - t = e; - } - } - LOG.info("Failed verification of " + Bytes.toStringBinary(regionName) + - " at address=" + address + "; " + t); - return false; - } - - /** - * Verify -ROOT- is deployed and accessible. - * @param timeout How long to wait on zk for root address (passed through to - * the internal call to {@link #waitForRootServerConnection(long)}. - * @return True if the -ROOT- location is healthy. - * @throws IOException - * @throws InterruptedException - */ - public boolean verifyRootRegionLocation(final long timeout) - throws InterruptedException, IOException { - AdminProtocol connection = null; - try { - connection = waitForRootServerConnection(timeout); - } catch (NotAllMetaRegionsOnlineException e) { - // Pass - } catch (ServerNotRunningYetException e) { - // Pass -- remote server is not up so can't be carrying root - } catch (UnknownHostException e) { - // Pass -- server name doesn't resolve so it can't be assigned anything. - } - return (connection == null)? false: - verifyRegionLocation(connection, - this.rootRegionTracker.getRootRegionLocation(), ROOT_REGION_NAME); - } - - /** - * Verify .META. is deployed and accessible. - * @param timeout How long to wait on zk for .META. address - * (passed through to the internal call to {@link #waitForMetaServerConnection(long)}. - * @return True if the .META. location is healthy. - * @throws IOException Some unexpected IOE. - * @throws InterruptedException - */ - public boolean verifyMetaRegionLocation(final long timeout) - throws InterruptedException, IOException { - AdminProtocol connection = null; - try { - connection = waitForMetaServerConnection(timeout); - } catch (NotAllMetaRegionsOnlineException e) { - // Pass - } catch (ServerNotRunningYetException e) { - // Pass -- remote server is not up so can't be carrying .META. - } catch (UnknownHostException e) { - // Pass -- server name doesn't resolve so it can't be assigned anything. - } catch (RetriesExhaustedException e) { - // Pass -- failed after bunch of retries. - LOG.debug("Failed verify meta region location after retries", e); - } - return connection != null; - } - - // Used by tests. - MetaNodeTracker getMetaNodeTracker() { - return this.metaNodeTracker; - } - - public HConnection getConnection() { - return this.connection; - } -} diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/catalog/MetaReader.java hbase-server/src/main/java/org/apache/hadoop/hbase/catalog/MetaReader.java deleted file mode 100644 index ea9da0c..0000000 --- hbase-server/src/main/java/org/apache/hadoop/hbase/catalog/MetaReader.java +++ /dev/null @@ -1,643 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hbase.catalog; - -import java.io.IOException; -import java.util.ArrayList; -import java.util.List; -import java.util.Map; -import java.util.NavigableMap; -import java.util.Set; -import java.util.TreeMap; - -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hbase.HConstants; -import org.apache.hadoop.hbase.HRegionInfo; -import org.apache.hadoop.hbase.HTableDescriptor; -import org.apache.hadoop.hbase.ServerName; -import org.apache.hadoop.hbase.client.Get; -import org.apache.hadoop.hbase.client.HTable; -import org.apache.hadoop.hbase.client.Result; -import org.apache.hadoop.hbase.client.ResultScanner; -import org.apache.hadoop.hbase.client.Scan; -import org.apache.hadoop.hbase.util.Bytes; -import org.apache.hadoop.hbase.util.Pair; - -/** - * Reads region and assignment information from .META.. - */ -@InterfaceAudience.Private -public class MetaReader { - // TODO: Strip CatalogTracker from this class. Its all over and in the end - // its only used to get its Configuration so we can get associated - // Connection. - private static final Log LOG = LogFactory.getLog(MetaReader.class); - - static final byte [] META_REGION_PREFIX; - static { - // Copy the prefix from FIRST_META_REGIONINFO into META_REGION_PREFIX. - // FIRST_META_REGIONINFO == '.META.,,1'. META_REGION_PREFIX == '.META.,' - int len = HRegionInfo.FIRST_META_REGIONINFO.getRegionName().length - 2; - META_REGION_PREFIX = new byte [len]; - System.arraycopy(HRegionInfo.FIRST_META_REGIONINFO.getRegionName(), 0, - META_REGION_PREFIX, 0, len); - } - - /** - * @param row - * @return True if row is row of -ROOT- table. - */ - private static boolean isRootTableRow(final byte [] row) { - if (row.length < META_REGION_PREFIX.length + 2 /* ',', + '1' */) { - // Can't be meta table region. - return false; - } - // Compare the prefix of row. If it matches META_REGION_PREFIX prefix, - // then this is row from -ROOT_ table. - return Bytes.equals(row, 0, META_REGION_PREFIX.length, - META_REGION_PREFIX, 0, META_REGION_PREFIX.length); - } - - /** - * Performs a full scan of .META., skipping regions from any - * tables in the specified set of disabled tables. - * @param catalogTracker - * @param disabledTables set of disabled tables that will not be returned - * @return Returns a map of every region to it's currently assigned server, - * according to META. If the region does not have an assignment it will have - * a null value in the map. - * @throws IOException - */ - public static Map fullScan( - CatalogTracker catalogTracker, final Set disabledTables) - throws IOException { - return fullScan(catalogTracker, disabledTables, false); - } - - /** - * Performs a full scan of .META., skipping regions from any - * tables in the specified set of disabled tables. - * @param catalogTracker - * @param disabledTables set of disabled tables that will not be returned - * @param excludeOfflinedSplitParents If true, do not include offlined split - * parents in the return. - * @return Returns a map of every region to it's currently assigned server, - * according to META. If the region does not have an assignment it will have - * a null value in the map. - * @throws IOException - */ - public static Map fullScan( - CatalogTracker catalogTracker, final Set disabledTables, - final boolean excludeOfflinedSplitParents) - throws IOException { - final Map regions = - new TreeMap(); - Visitor v = new Visitor() { - @Override - public boolean visit(Result r) throws IOException { - if (r == null || r.isEmpty()) return true; - Pair region = HRegionInfo.getHRegionInfoAndServerName(r); - if (region == null) return true; - HRegionInfo hri = region.getFirst(); - if (hri == null) return true; - if (hri.getTableNameAsString() == null) return true; - if (disabledTables.contains( - hri.getTableNameAsString())) return true; - // Are we to include split parents in the list? - if (excludeOfflinedSplitParents && hri.isSplitParent()) return true; - regions.put(hri, region.getSecond()); - return true; - } - }; - fullScan(catalogTracker, v); - return regions; - } - - /** - * Performs a full scan of .META.. - * @return List of {@link Result} - * @throws IOException - */ - public static List fullScan(CatalogTracker catalogTracker) - throws IOException { - CollectAllVisitor v = new CollectAllVisitor(); - fullScan(catalogTracker, v, null); - return v.getResults(); - } - - /** - * Performs a full scan of a -ROOT- table. - * @return List of {@link Result} - * @throws IOException - */ - public static List fullScanOfRoot(CatalogTracker catalogTracker) - throws IOException { - CollectAllVisitor v = new CollectAllVisitor(); - fullScan(catalogTracker, v, null, true); - return v.getResults(); - } - - /** - * Performs a full scan of .META.. - * @param catalogTracker - * @param visitor Visitor invoked against each row. - * @throws IOException - */ - public static void fullScan(CatalogTracker catalogTracker, - final Visitor visitor) - throws IOException { - fullScan(catalogTracker, visitor, null); - } - - /** - * Performs a full scan of .META.. - * @param catalogTracker - * @param visitor Visitor invoked against each row. - * @param startrow Where to start the scan. Pass null if want to begin scan - * at first row (The visitor will stop the Scan when its done so no need to - * pass a stoprow). - * @throws IOException - */ - public static void fullScan(CatalogTracker catalogTracker, - final Visitor visitor, final byte [] startrow) - throws IOException { - fullScan(catalogTracker, visitor, startrow, false); - } - - /** - * Callers should call close on the returned {@link HTable} instance. - * @param catalogTracker We'll use this catalogtracker's connection - * @param tableName Table to get an {@link HTable} against. - * @return An {@link HTable} for tableName - * @throws IOException - */ - private static HTable getHTable(final CatalogTracker catalogTracker, - final byte [] tableName) - throws IOException { - // Passing the CatalogTracker's connection configuration ensures this - // HTable instance uses the CatalogTracker's connection. - org.apache.hadoop.hbase.client.HConnection c = catalogTracker.getConnection(); - if (c == null) throw new NullPointerException("No connection"); - return new HTable(catalogTracker.getConnection().getConfiguration(), tableName); - } - - /** - * Callers should call close on the returned {@link HTable} instance. - * @param catalogTracker - * @param row Row we are putting - * @return - * @throws IOException - */ - static HTable getCatalogHTable(final CatalogTracker catalogTracker, - final byte [] row) - throws IOException { - return isRootTableRow(row)? - getRootHTable(catalogTracker): - getMetaHTable(catalogTracker); - } - - /** - * Callers should call close on the returned {@link HTable} instance. - * @param ct - * @return An {@link HTable} for .META. - * @throws IOException - */ - static HTable getMetaHTable(final CatalogTracker ct) - throws IOException { - return getHTable(ct, HConstants.META_TABLE_NAME); - } - - /** - * Callers should call close on the returned {@link HTable} instance. - * @param ct - * @return An {@link HTable} for -ROOT- - * @throws IOException - */ - static HTable getRootHTable(final CatalogTracker ct) - throws IOException { - return getHTable(ct, HConstants.ROOT_TABLE_NAME); - } - - /** - * @param t Table to use (will be closed when done). - * @param g Get to run - * @throws IOException - */ - private static Result get(final HTable t, final Get g) throws IOException { - try { - return t.get(g); - } finally { - t.close(); - } - } - - /** - * Gets the location of .META. region by reading content of - * -ROOT-. - * @param ct - * @return location of .META. region as a {@link ServerName} or - * null if not found - * @throws IOException - */ - static ServerName getMetaRegionLocation(final CatalogTracker ct) - throws IOException { - return MetaReader.readRegionLocation(ct, CatalogTracker.META_REGION_NAME); - } - - /** - * Reads the location of the specified region - * @param catalogTracker - * @param regionName region whose location we are after - * @return location of region as a {@link ServerName} or null if not found - * @throws IOException - */ - static ServerName readRegionLocation(CatalogTracker catalogTracker, - byte [] regionName) - throws IOException { - Pair pair = getRegion(catalogTracker, regionName); - return (pair == null || pair.getSecond() == null)? null: pair.getSecond(); - } - - /** - * Gets the region info and assignment for the specified region. - * @param catalogTracker - * @param regionName Region to lookup. - * @return Location and HRegionInfo for regionName - * @throws IOException - */ - public static Pair getRegion( - CatalogTracker catalogTracker, byte [] regionName) - throws IOException { - Get get = new Get(regionName); - get.addFamily(HConstants.CATALOG_FAMILY); - Result r = get(getCatalogHTable(catalogTracker, regionName), get); - return (r == null || r.isEmpty())? null: HRegionInfo.getHRegionInfoAndServerName(r); - } - - /** - * Checks if the specified table exists. Looks at the META table hosted on - * the specified server. - * @param catalogTracker - * @param tableName table to check - * @return true if the table exists in meta, false if not - * @throws IOException - */ - public static boolean tableExists(CatalogTracker catalogTracker, - String tableName) - throws IOException { - if (tableName.equals(HTableDescriptor.ROOT_TABLEDESC.getNameAsString()) || - tableName.equals(HTableDescriptor.META_TABLEDESC.getNameAsString())) { - // Catalog tables always exist. - return true; - } - final byte [] tableNameBytes = Bytes.toBytes(tableName); - // Make a version of ResultCollectingVisitor that only collects the first - CollectingVisitor visitor = new CollectingVisitor() { - private HRegionInfo current = null; - - @Override - public boolean visit(Result r) throws IOException { - this.current = - HRegionInfo.getHRegionInfo(r, HConstants.REGIONINFO_QUALIFIER); - if (this.current == null) { - LOG.warn("No serialized HRegionInfo in " + r); - return true; - } - if (!isInsideTable(this.current, tableNameBytes)) return false; - // Else call super and add this Result to the collection. - super.visit(r); - // Stop collecting regions from table after we get one. - return false; - } - - @Override - void add(Result r) { - // Add the current HRI. - this.results.add(this.current); - } - }; - fullScan(catalogTracker, visitor, getTableStartRowForMeta(tableNameBytes)); - // If visitor has results >= 1 then table exists. - return visitor.getResults().size() >= 1; - } - - /** - * Gets all of the regions of the specified table. - * @param catalogTracker - * @param tableName - * @return Ordered list of {@link HRegionInfo}. - * @throws IOException - */ - public static List getTableRegions(CatalogTracker catalogTracker, - byte [] tableName) - throws IOException { - return getTableRegions(catalogTracker, tableName, false); - } - - /** - * Gets all of the regions of the specified table. - * @param catalogTracker - * @param tableName - * @param excludeOfflinedSplitParents If true, do not include offlined split - * parents in the return. - * @return Ordered list of {@link HRegionInfo}. - * @throws IOException - */ - public static List getTableRegions(CatalogTracker catalogTracker, - byte [] tableName, final boolean excludeOfflinedSplitParents) - throws IOException { - List> result = null; - try { - result = getTableRegionsAndLocations(catalogTracker, tableName, - excludeOfflinedSplitParents); - } catch (InterruptedException e) { - throw new RuntimeException(e); - } - return getListOfHRegionInfos(result); - } - - static List getListOfHRegionInfos(final List> pairs) { - if (pairs == null || pairs.isEmpty()) return null; - List result = new ArrayList(pairs.size()); - for (Pair pair: pairs) { - result.add(pair.getFirst()); - } - return result; - } - - /** - * @param current - * @param tableName - * @return True if current tablename is equal to - * tableName - */ - static boolean isInsideTable(final HRegionInfo current, final byte [] tableName) { - return Bytes.equals(tableName, current.getTableName()); - } - - /** - * @param tableName - * @return Place to start Scan in .META. when passed a - * tableName; returns <tableName&rt; <,&rt; <,&rt; - */ - static byte [] getTableStartRowForMeta(final byte [] tableName) { - byte [] startRow = new byte[tableName.length + 2]; - System.arraycopy(tableName, 0, startRow, 0, tableName.length); - startRow[startRow.length - 2] = HConstants.DELIMITER; - startRow[startRow.length - 1] = HConstants.DELIMITER; - return startRow; - } - - /** - * This method creates a Scan object that will only scan catalog rows that - * belong to the specified table. It doesn't specify any columns. - * This is a better alternative to just using a start row and scan until - * it hits a new table since that requires parsing the HRI to get the table - * name. - * @param tableName bytes of table's name - * @return configured Scan object - */ - public static Scan getScanForTableName(byte[] tableName) { - String strName = Bytes.toString(tableName); - // Start key is just the table name with delimiters - byte[] startKey = Bytes.toBytes(strName + ",,"); - // Stop key appends the smallest possible char to the table name - byte[] stopKey = Bytes.toBytes(strName + " ,,"); - - Scan scan = new Scan(startKey); - scan.setStopRow(stopKey); - return scan; - } - - /** - * @param catalogTracker - * @param tableName - * @return Return list of regioninfos and server. - * @throws IOException - * @throws InterruptedException - */ - public static List> - getTableRegionsAndLocations(CatalogTracker catalogTracker, String tableName) - throws IOException, InterruptedException { - return getTableRegionsAndLocations(catalogTracker, Bytes.toBytes(tableName), - true); - } - - /** - * @param catalogTracker - * @param tableName - * @return Return list of regioninfos and server addresses. - * @throws IOException - * @throws InterruptedException - */ - public static List> - getTableRegionsAndLocations(final CatalogTracker catalogTracker, - final byte [] tableName, final boolean excludeOfflinedSplitParents) - throws IOException, InterruptedException { - if (Bytes.equals(tableName, HConstants.ROOT_TABLE_NAME)) { - // If root, do a bit of special handling. - ServerName serverName = catalogTracker.getRootLocation(); - List> list = - new ArrayList>(); - list.add(new Pair(HRegionInfo.ROOT_REGIONINFO, - serverName)); - return list; - } - // Make a version of CollectingVisitor that collects HRegionInfo and ServerAddress - CollectingVisitor> visitor = - new CollectingVisitor>() { - private Pair current = null; - - @Override - public boolean visit(Result r) throws IOException { - HRegionInfo hri = - HRegionInfo.getHRegionInfo(r, HConstants.REGIONINFO_QUALIFIER); - if (hri == null) { - LOG.warn("No serialized HRegionInfo in " + r); - return true; - } - if (!isInsideTable(hri, tableName)) return false; - if (excludeOfflinedSplitParents && hri.isSplitParent()) return true; - ServerName sn = HRegionInfo.getServerName(r); - // Populate this.current so available when we call #add - this.current = new Pair(hri, sn); - // Else call super and add this Result to the collection. - return super.visit(r); - } - - @Override - void add(Result r) { - this.results.add(this.current); - } - }; - fullScan(catalogTracker, visitor, getTableStartRowForMeta(tableName), - Bytes.equals(tableName, HConstants.META_TABLE_NAME)); - return visitor.getResults(); - } - - /** - * @param catalogTracker - * @param serverName - * @return List of user regions installed on this server (does not include - * catalog regions). - * @throws IOException - */ - public static NavigableMap - getServerUserRegions(CatalogTracker catalogTracker, final ServerName serverName) - throws IOException { - final NavigableMap hris = new TreeMap(); - // Fill the above hris map with entries from .META. that have the passed - // servername. - CollectingVisitor v = new CollectingVisitor() { - @Override - void add(Result r) { - if (r == null || r.isEmpty()) return; - ServerName sn = HRegionInfo.getServerName(r); - if (sn != null && sn.equals(serverName)) this.results.add(r); - } - }; - fullScan(catalogTracker, v); - List results = v.getResults(); - if (results != null && !results.isEmpty()) { - // Convert results to Map keyed by HRI - for (Result r: results) { - Pair p = HRegionInfo.getHRegionInfoAndServerName(r); - if (p != null && p.getFirst() != null) hris.put(p.getFirst(), r); - } - } - return hris; - } - - public static void fullScanMetaAndPrint(final CatalogTracker catalogTracker) - throws IOException { - Visitor v = new Visitor() { - @Override - public boolean visit(Result r) throws IOException { - if (r == null || r.isEmpty()) return true; - LOG.info("fullScanMetaAndPrint.Current Meta Row: " + r); - HRegionInfo hrim = HRegionInfo.getHRegionInfo(r); - LOG.info("fullScanMetaAndPrint.HRI Print= " + hrim); - return true; - } - }; - fullScan(catalogTracker, v); - } - - /** - * Performs a full scan of a catalog table. - * @param catalogTracker - * @param visitor Visitor invoked against each row. - * @param startrow Where to start the scan. Pass null if want to begin scan - * at first row. - * @param scanRoot True if we are to scan -ROOT- rather than - * .META., the default (pass false to scan .META.) - * @throws IOException - */ - static void fullScan(CatalogTracker catalogTracker, - final Visitor visitor, final byte [] startrow, final boolean scanRoot) - throws IOException { - Scan scan = new Scan(); - if (startrow != null) scan.setStartRow(startrow); - if (startrow == null && !scanRoot) { - int caching = catalogTracker.getConnection().getConfiguration() - .getInt(HConstants.HBASE_META_SCANNER_CACHING, 100); - scan.setCaching(caching); - } - scan.addFamily(HConstants.CATALOG_FAMILY); - HTable metaTable = scanRoot? - getRootHTable(catalogTracker): getMetaHTable(catalogTracker); - ResultScanner scanner = metaTable.getScanner(scan); - try { - Result data; - while((data = scanner.next()) != null) { - if (data.isEmpty()) continue; - // Break if visit returns false. - if (!visitor.visit(data)) break; - } - } finally { - scanner.close(); - metaTable.close(); - } - return; - } - - /** - * Implementations 'visit' a catalog table row. - */ - public interface Visitor { - /** - * Visit the catalog table row. - * @param r A row from catalog table - * @return True if we are to proceed scanning the table, else false if - * we are to stop now. - */ - public boolean visit(final Result r) throws IOException; - } - - /** - * A {@link Visitor} that collects content out of passed {@link Result}. - */ - static abstract class CollectingVisitor implements Visitor { - final List results = new ArrayList(); - @Override - public boolean visit(Result r) throws IOException { - if (r == null || r.isEmpty()) return true; - add(r); - return true; - } - - abstract void add(Result r); - - /** - * @return Collected results; wait till visits complete to collect all - * possible results - */ - List getResults() { - return this.results; - } - } - - /** - * Collects all returned. - */ - static class CollectAllVisitor extends CollectingVisitor { - @Override - void add(Result r) { - this.results.add(r); - } - } - - /** - * Count regions in .META. for passed table. - * @param c - * @param tableName - * @return Count or regions in table tableName - * @throws IOException - */ - public static int getRegionCount(final Configuration c, final String tableName) throws IOException { - HTable t = new HTable(c, tableName); - try { - return t.getRegionLocations().size(); - } finally { - t.close(); - } - } -} diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/client/AbstractClientScanner.java hbase-server/src/main/java/org/apache/hadoop/hbase/client/AbstractClientScanner.java deleted file mode 100644 index 2bb0687..0000000 --- hbase-server/src/main/java/org/apache/hadoop/hbase/client/AbstractClientScanner.java +++ /dev/null @@ -1,77 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hbase.client; - -import java.io.IOException; -import java.util.Iterator; - -import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.classification.InterfaceStability; - -/** - * Helper class for custom client scanners. - */ -@InterfaceAudience.Public -@InterfaceStability.Stable -public abstract class AbstractClientScanner implements ResultScanner { - - @Override - public Iterator iterator() { - return new Iterator() { - // The next RowResult, possibly pre-read - Result next = null; - - // return true if there is another item pending, false if there isn't. - // this method is where the actual advancing takes place, but you need - // to call next() to consume it. hasNext() will only advance if there - // isn't a pending next(). - public boolean hasNext() { - if (next == null) { - try { - next = AbstractClientScanner.this.next(); - return next != null; - } catch (IOException e) { - throw new RuntimeException(e); - } - } - return true; - } - - // get the pending next item and advance the iterator. returns null if - // there is no next item. - public Result next() { - // since hasNext() does the real advancing, we call this to determine - // if there is a next before proceeding. - if (!hasNext()) { - return null; - } - - // if we get to here, then hasNext() has given us an item to return. - // we want to return the item and then null out the next pointer, so - // we use a temporary variable. - Result temp = next; - next = null; - return temp; - } - - public void remove() { - throw new UnsupportedOperationException(); - } - }; - } -} diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/client/Action.java hbase-server/src/main/java/org/apache/hadoop/hbase/client/Action.java deleted file mode 100644 index 06475d0..0000000 --- hbase-server/src/main/java/org/apache/hadoop/hbase/client/Action.java +++ /dev/null @@ -1,80 +0,0 @@ -/** - * - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hbase.client; - -import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.classification.InterfaceStability; - -/** - * A Get, Put or Delete associated with it's region. Used internally by - * {@link HTable#batch} to associate the action with it's region and maintain - * the index from the original request. - */ -@InterfaceAudience.Public -@InterfaceStability.Stable -public class Action implements Comparable { - - private Row action; - private int originalIndex; - private R result; - - /** - * This constructor is replaced by {@link #Action(Row, int)} - */ - @Deprecated - public Action(byte[] regionName, Row action, int originalIndex) { - this(action, originalIndex); - } - - public Action(Row action, int originalIndex) { - super(); - this.action = action; - this.originalIndex = originalIndex; - } - - @Deprecated - public byte[] getRegionName() { - return null; - } - - @Deprecated - public void setRegionName(byte[] regionName) { - } - - public R getResult() { - return result; - } - - public void setResult(R result) { - this.result = result; - } - - public Row getAction() { - return action; - } - - public int getOriginalIndex() { - return originalIndex; - } - - @Override - public int compareTo(Object o) { - return action.compareTo(((Action) o).getAction()); - } -} diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/client/AdminProtocol.java hbase-server/src/main/java/org/apache/hadoop/hbase/client/AdminProtocol.java deleted file mode 100644 index 50f8b2a..0000000 --- hbase-server/src/main/java/org/apache/hadoop/hbase/client/AdminProtocol.java +++ /dev/null @@ -1,37 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hbase.client; - -import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.hbase.ipc.VersionedProtocol; -import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.AdminService; -import org.apache.hadoop.hbase.security.TokenInfo; -import org.apache.hadoop.hbase.security.KerberosInfo; - -/** - * Protocol that a HBase client uses to communicate with a region server. - */ -@KerberosInfo( - serverPrincipal = "hbase.regionserver.kerberos.principal") -@TokenInfo("HBASE_AUTH_TOKEN") -@InterfaceAudience.Private -public interface AdminProtocol extends - AdminService.BlockingInterface, VersionedProtocol { - public static final long VERSION = 1L; -} diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/client/Append.java hbase-server/src/main/java/org/apache/hadoop/hbase/client/Append.java deleted file mode 100644 index ba1e085..0000000 --- hbase-server/src/main/java/org/apache/hadoop/hbase/client/Append.java +++ /dev/null @@ -1,90 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hbase.client; - -import java.util.ArrayList; -import java.util.Arrays; -import java.util.List; - -import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.classification.InterfaceStability; -import org.apache.hadoop.hbase.KeyValue; -import org.apache.hadoop.hbase.util.Bytes; - -/** - * Performs Append operations on a single row. - *

      - * Note that this operation does not appear atomic to readers. Appends are done - * under a single row lock, so write operations to a row are synchronized, but - * readers do not take row locks so get and scan operations can see this - * operation partially completed. - *

      - * To append to a set of columns of a row, instantiate an Append object with the - * row to append to. At least one column to append must be specified using the - * {@link #add(byte[], byte[], byte[])} method. - */ -@InterfaceAudience.Public -@InterfaceStability.Stable -public class Append extends Mutation { - private static final String RETURN_RESULTS = "_rr_"; - /** - * @param returnResults - * True (default) if the append operation should return the results. - * A client that is not interested in the result can save network - * bandwidth setting this to false. - */ - public void setReturnResults(boolean returnResults) { - setAttribute(RETURN_RESULTS, Bytes.toBytes(returnResults)); - } - - /** - * @return current setting for returnResults - */ - public boolean isReturnResults() { - byte[] v = getAttribute(RETURN_RESULTS); - return v == null ? true : Bytes.toBoolean(v); - } - - /** - * Create a Append operation for the specified row. - *

      - * At least one column must be appended to. - * @param row row key - */ - public Append(byte[] row) { - this.row = Arrays.copyOf(row, row.length); - } - - /** - * Add the specified column and value to this Append operation. - * @param family family name - * @param qualifier column qualifier - * @param value value to append to specified column - * @return this - */ - public Append add(byte [] family, byte [] qualifier, byte [] value) { - List list = familyMap.get(family); - if(list == null) { - list = new ArrayList(); - } - list.add(new KeyValue( - this.row, family, qualifier, this.ts, KeyValue.Type.Put, value)); - familyMap.put(family, list); - return this; - } -} \ No newline at end of file diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/client/Attributes.java hbase-server/src/main/java/org/apache/hadoop/hbase/client/Attributes.java deleted file mode 100644 index f916ea6..0000000 --- hbase-server/src/main/java/org/apache/hadoop/hbase/client/Attributes.java +++ /dev/null @@ -1,51 +0,0 @@ -/* - * - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hbase.client; - -import java.util.Map; - -import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.classification.InterfaceStability; - -@InterfaceAudience.Public -@InterfaceStability.Stable -public interface Attributes { - /** - * Sets an attribute. - * In case value = null attribute is removed from the attributes map. - * Attribute names starting with _ indicate system attributes. - * @param name attribute name - * @param value attribute value - */ - public void setAttribute(String name, byte[] value); - - /** - * Gets an attribute - * @param name attribute name - * @return attribute value if attribute is set, null otherwise - */ - public byte[] getAttribute(String name); - - /** - * Gets all attributes - * @return unmodifiable map of all attributes - */ - public Map getAttributesMap(); -} diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/client/ClientProtocol.java hbase-server/src/main/java/org/apache/hadoop/hbase/client/ClientProtocol.java deleted file mode 100644 index 57d84e5..0000000 --- hbase-server/src/main/java/org/apache/hadoop/hbase/client/ClientProtocol.java +++ /dev/null @@ -1,39 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hbase.client; - -import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.classification.InterfaceStability; -import org.apache.hadoop.hbase.ipc.VersionedProtocol; -import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ClientService; -import org.apache.hadoop.hbase.security.TokenInfo; -import org.apache.hadoop.hbase.security.KerberosInfo; - -/** - * Protocol that a HBase client uses to communicate with a region server. - */ -@KerberosInfo( - serverPrincipal = "hbase.regionserver.kerberos.principal") -@TokenInfo("HBASE_AUTH_TOKEN") -@InterfaceAudience.Public -@InterfaceStability.Evolving -public interface ClientProtocol extends - ClientService.BlockingInterface, VersionedProtocol { - public static final long VERSION = 1L; -} diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/client/ClientScanner.java hbase-server/src/main/java/org/apache/hadoop/hbase/client/ClientScanner.java deleted file mode 100644 index 553346b..0000000 --- hbase-server/src/main/java/org/apache/hadoop/hbase/client/ClientScanner.java +++ /dev/null @@ -1,405 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hbase.client; - -import java.io.IOException; -import java.util.ArrayList; -import java.util.LinkedList; - -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.classification.InterfaceStability; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hbase.DoNotRetryIOException; -import org.apache.hadoop.hbase.HConstants; -import org.apache.hadoop.hbase.HRegionInfo; -import org.apache.hadoop.hbase.KeyValue; -import org.apache.hadoop.hbase.NotServingRegionException; -import org.apache.hadoop.hbase.OutOfOrderScannerNextException; -import org.apache.hadoop.hbase.UnknownScannerException; -import org.apache.hadoop.hbase.client.metrics.ScanMetrics; -import org.apache.hadoop.hbase.protobuf.ProtobufUtil; -import org.apache.hadoop.hbase.protobuf.generated.MapReduceProtos; -import org.apache.hadoop.hbase.regionserver.RegionServerStoppedException; -import org.apache.hadoop.hbase.util.Bytes; -import org.apache.hadoop.io.DataOutputBuffer; - -/** - * Implements the scanner interface for the HBase client. - * If there are multiple regions in a table, this scanner will iterate - * through them all. - */ -@InterfaceAudience.Public -@InterfaceStability.Stable -public class ClientScanner extends AbstractClientScanner { - private final Log LOG = LogFactory.getLog(this.getClass()); - private Scan scan; - private boolean closed = false; - // Current region scanner is against. Gets cleared if current region goes - // wonky: e.g. if it splits on us. - private HRegionInfo currentRegion = null; - private ScannerCallable callable = null; - private final LinkedList cache = new LinkedList(); - private final int caching; - private long lastNext; - // Keep lastResult returned successfully in case we have to reset scanner. - private Result lastResult = null; - private ScanMetrics scanMetrics = null; - private final long maxScannerResultSize; - private final HConnection connection; - private final byte[] tableName; - private final int scannerTimeout; - - /** - * Create a new ClientScanner for the specified table. An HConnection will be - * retrieved using the passed Configuration. - * Note that the passed {@link Scan}'s start row maybe changed changed. - * - * @param conf The {@link Configuration} to use. - * @param scan {@link Scan} to use in this scanner - * @param tableName The table that we wish to scan - * @throws IOException - */ - public ClientScanner(final Configuration conf, final Scan scan, - final byte[] tableName) throws IOException { - this(conf, scan, tableName, HConnectionManager.getConnection(conf)); - } - - /** - * Create a new ClientScanner for the specified table - * Note that the passed {@link Scan}'s start row maybe changed changed. - * - * @param conf The {@link Configuration} to use. - * @param scan {@link Scan} to use in this scanner - * @param tableName The table that we wish to scan - * @param connection Connection identifying the cluster - * @throws IOException - */ - public ClientScanner(final Configuration conf, final Scan scan, - final byte[] tableName, HConnection connection) throws IOException { - if (LOG.isDebugEnabled()) { - LOG.debug("Creating scanner over " - + Bytes.toString(tableName) - + " starting at key '" + Bytes.toStringBinary(scan.getStartRow()) + "'"); - } - this.scan = scan; - this.tableName = tableName; - this.lastNext = System.currentTimeMillis(); - this.connection = connection; - if (scan.getMaxResultSize() > 0) { - this.maxScannerResultSize = scan.getMaxResultSize(); - } else { - this.maxScannerResultSize = conf.getLong( - HConstants.HBASE_CLIENT_SCANNER_MAX_RESULT_SIZE_KEY, - HConstants.DEFAULT_HBASE_CLIENT_SCANNER_MAX_RESULT_SIZE); - } - this.scannerTimeout = conf.getInt(HConstants.HBASE_CLIENT_SCANNER_TIMEOUT_PERIOD, - HConstants.DEFAULT_HBASE_CLIENT_SCANNER_TIMEOUT_PERIOD); - - // check if application wants to collect scan metrics - byte[] enableMetrics = scan.getAttribute( - Scan.SCAN_ATTRIBUTES_METRICS_ENABLE); - if (enableMetrics != null && Bytes.toBoolean(enableMetrics)) { - scanMetrics = new ScanMetrics(); - } - - // Use the caching from the Scan. If not set, use the default cache setting for this table. - if (this.scan.getCaching() > 0) { - this.caching = this.scan.getCaching(); - } else { - this.caching = conf.getInt( - HConstants.HBASE_CLIENT_SCANNER_CACHING, - HConstants.DEFAULT_HBASE_CLIENT_SCANNER_CACHING); - } - - // initialize the scanner - nextScanner(this.caching, false); - } - - protected HConnection getConnection() { - return this.connection; - } - - protected byte[] getTableName() { - return this.tableName; - } - - protected Scan getScan() { - return scan; - } - - protected long getTimestamp() { - return lastNext; - } - - // returns true if the passed region endKey - private boolean checkScanStopRow(final byte [] endKey) { - if (this.scan.getStopRow().length > 0) { - // there is a stop row, check to see if we are past it. - byte [] stopRow = scan.getStopRow(); - int cmp = Bytes.compareTo(stopRow, 0, stopRow.length, - endKey, 0, endKey.length); - if (cmp <= 0) { - // stopRow <= endKey (endKey is equals to or larger than stopRow) - // This is a stop. - return true; - } - } - return false; //unlikely. - } - - /* - * Gets a scanner for the next region. If this.currentRegion != null, then - * we will move to the endrow of this.currentRegion. Else we will get - * scanner at the scan.getStartRow(). We will go no further, just tidy - * up outstanding scanners, if currentRegion != null and - * done is true. - * @param nbRows - * @param done Server-side says we're done scanning. - */ - private boolean nextScanner(int nbRows, final boolean done) - throws IOException { - // Close the previous scanner if it's open - if (this.callable != null) { - this.callable.setClose(); - callable.withRetries(); - this.callable = null; - } - - // Where to start the next scanner - byte [] localStartKey; - - // if we're at end of table, close and return false to stop iterating - if (this.currentRegion != null) { - byte [] endKey = this.currentRegion.getEndKey(); - if (endKey == null || - Bytes.equals(endKey, HConstants.EMPTY_BYTE_ARRAY) || - checkScanStopRow(endKey) || - done) { - close(); - if (LOG.isDebugEnabled()) { - LOG.debug("Finished with scanning at " + this.currentRegion); - } - return false; - } - localStartKey = endKey; - if (LOG.isDebugEnabled()) { - LOG.debug("Finished with region " + this.currentRegion); - } - } else { - localStartKey = this.scan.getStartRow(); - } - - if (LOG.isDebugEnabled()) { - LOG.debug("Advancing internal scanner to startKey at '" + - Bytes.toStringBinary(localStartKey) + "'"); - } - try { - callable = getScannerCallable(localStartKey, nbRows); - // Open a scanner on the region server starting at the - // beginning of the region - callable.withRetries(); - this.currentRegion = callable.getHRegionInfo(); - if (this.scanMetrics != null) { - this.scanMetrics.countOfRegions.incrementAndGet(); - } - } catch (IOException e) { - close(); - throw e; - } - return true; - } - - protected ScannerCallable getScannerCallable(byte [] localStartKey, - int nbRows) { - scan.setStartRow(localStartKey); - ScannerCallable s = new ScannerCallable(getConnection(), - getTableName(), scan, this.scanMetrics); - s.setCaching(nbRows); - return s; - } - - /** - * Publish the scan metrics. For now, we use scan.setAttribute to pass the metrics back to the - * application or TableInputFormat.Later, we could push it to other systems. We don't use metrics - * framework because it doesn't support multi-instances of the same metrics on the same machine; - * for scan/map reduce scenarios, we will have multiple scans running at the same time. - * - * By default, scan metrics are disabled; if the application wants to collect them, this behavior - * can be turned on by calling calling: - * - * scan.setAttribute(SCAN_ATTRIBUTES_METRICS_ENABLE, Bytes.toBytes(Boolean.TRUE)) - */ - private void writeScanMetrics() throws IOException { - if (this.scanMetrics == null) { - return; - } - final DataOutputBuffer d = new DataOutputBuffer(); - MapReduceProtos.ScanMetrics pScanMetrics = ProtobufUtil.toScanMetrics(scanMetrics); - scan.setAttribute(Scan.SCAN_ATTRIBUTES_METRICS_DATA, pScanMetrics.toByteArray()); - } - - public Result next() throws IOException { - // If the scanner is closed and there's nothing left in the cache, next is a no-op. - if (cache.size() == 0 && this.closed) { - return null; - } - if (cache.size() == 0) { - Result [] values = null; - long remainingResultSize = maxScannerResultSize; - int countdown = this.caching; - // We need to reset it if it's a new callable that was created - // with a countdown in nextScanner - callable.setCaching(this.caching); - // This flag is set when we want to skip the result returned. We do - // this when we reset scanner because it split under us. - boolean skipFirst = false; - boolean retryAfterOutOfOrderException = true; - do { - try { - if (skipFirst) { - // Skip only the first row (which was the last row of the last - // already-processed batch). - callable.setCaching(1); - values = callable.withRetries(); - callable.setCaching(this.caching); - skipFirst = false; - } - // Server returns a null values if scanning is to stop. Else, - // returns an empty array if scanning is to go on and we've just - // exhausted current region. - values = callable.withRetries(); - retryAfterOutOfOrderException = true; - } catch (DoNotRetryIOException e) { - if (e instanceof UnknownScannerException) { - long timeout = lastNext + scannerTimeout; - // If we are over the timeout, throw this exception to the client - // Else, it's because the region moved and we used the old id - // against the new region server; reset the scanner. - if (timeout < System.currentTimeMillis()) { - long elapsed = System.currentTimeMillis() - lastNext; - ScannerTimeoutException ex = new ScannerTimeoutException( - elapsed + "ms passed since the last invocation, " + - "timeout is currently set to " + scannerTimeout); - ex.initCause(e); - throw ex; - } - } else { - Throwable cause = e.getCause(); - if ((cause == null || (!(cause instanceof NotServingRegionException) - && !(cause instanceof RegionServerStoppedException))) - && !(e instanceof OutOfOrderScannerNextException)) { - throw e; - } - } - // Else, its signal from depths of ScannerCallable that we got an - // NSRE on a next and that we need to reset the scanner. - if (this.lastResult != null) { - this.scan.setStartRow(this.lastResult.getRow()); - // Skip first row returned. We already let it out on previous - // invocation. - skipFirst = true; - } - if (e instanceof OutOfOrderScannerNextException) { - if (retryAfterOutOfOrderException) { - retryAfterOutOfOrderException = false; - } else { - throw new DoNotRetryIOException("Failed after retry" - + ", it could be cause by rpc timeout", e); - } - } - // Clear region - this.currentRegion = null; - callable = null; - continue; - } - long currentTime = System.currentTimeMillis(); - if (this.scanMetrics != null ) { - this.scanMetrics.sumOfMillisSecBetweenNexts.addAndGet(currentTime-lastNext); - } - lastNext = currentTime; - if (values != null && values.length > 0) { - for (Result rs : values) { - cache.add(rs); - for (KeyValue kv : rs.raw()) { - remainingResultSize -= kv.heapSize(); - } - countdown--; - this.lastResult = rs; - } - } - // Values == null means server-side filter has determined we must STOP - } while (remainingResultSize > 0 && countdown > 0 && nextScanner(countdown, values == null)); - } - - if (cache.size() > 0) { - return cache.poll(); - } - - // if we exhausted this scanner before calling close, write out the scan metrics - writeScanMetrics(); - return null; - } - - /** - * Get nbRows rows. - * How many RPCs are made is determined by the {@link Scan#setCaching(int)} - * setting (or hbase.client.scanner.caching in hbase-site.xml). - * @param nbRows number of rows to return - * @return Between zero and nbRows RowResults. Scan is done - * if returned array is of zero-length (We never return null). - * @throws IOException - */ - public Result [] next(int nbRows) throws IOException { - // Collect values to be returned here - ArrayList resultSets = new ArrayList(nbRows); - for(int i = 0; i < nbRows; i++) { - Result next = next(); - if (next != null) { - resultSets.add(next); - } else { - break; - } - } - return resultSets.toArray(new Result[resultSets.size()]); - } - - public void close() { - if (callable != null) { - callable.setClose(); - try { - callable.withRetries(); - } catch (IOException e) { - // We used to catch this error, interpret, and rethrow. However, we - // have since decided that it's not nice for a scanner's close to - // throw exceptions. Chances are it was just an UnknownScanner - // exception due to lease time out. - } finally { - // we want to output the scan metrics even if an error occurred on close - try { - writeScanMetrics(); - } catch (IOException e) { - // As above, we still don't want the scanner close() method to throw. - } - } - callable = null; - } - closed = true; - } -} diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/client/ConnectionUtils.java hbase-server/src/main/java/org/apache/hadoop/hbase/client/ConnectionUtils.java deleted file mode 100644 index d368b24..0000000 --- hbase-server/src/main/java/org/apache/hadoop/hbase/client/ConnectionUtils.java +++ /dev/null @@ -1,52 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hbase.client; - -import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.classification.InterfaceStability; -import org.apache.hadoop.hbase.HConstants; - -import java.util.Random; - -/** - * Utility used by client connections such as {@link HConnection} and - * {@link ServerCallable} - */ -@InterfaceAudience.Public -@InterfaceStability.Evolving -public class ConnectionUtils { - - private static final Random RANDOM = new Random(); - /** - * Calculate pause time. - * Built on {@link HConstants#RETRY_BACKOFF}. - * @param pause - * @param tries - * @return How long to wait after tries retries - */ - public static long getPauseTime(final long pause, final int tries) { - int ntries = tries; - if (ntries >= HConstants.RETRY_BACKOFF.length) { - ntries = HConstants.RETRY_BACKOFF.length - 1; - } - - long normalPause = pause * HConstants.RETRY_BACKOFF[ntries]; - long jitter = (long)(normalPause * RANDOM.nextFloat() * 0.01f); // 1% possible jitter - return normalPause + jitter; - } -} diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/client/Delete.java hbase-server/src/main/java/org/apache/hadoop/hbase/client/Delete.java deleted file mode 100644 index 9a75546..0000000 --- hbase-server/src/main/java/org/apache/hadoop/hbase/client/Delete.java +++ /dev/null @@ -1,256 +0,0 @@ -/* - * - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hbase.client; - -import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.classification.InterfaceStability; -import org.apache.hadoop.hbase.HConstants; -import org.apache.hadoop.hbase.KeyValue; -import org.apache.hadoop.hbase.util.Bytes; - -import java.io.IOException; -import java.util.ArrayList; -import java.util.List; -import java.util.Map; - -/** - * Used to perform Delete operations on a single row. - *

      - * To delete an entire row, instantiate a Delete object with the row - * to delete. To further define the scope of what to delete, perform - * additional methods as outlined below. - *

      - * To delete specific families, execute {@link #deleteFamily(byte[]) deleteFamily} - * for each family to delete. - *

      - * To delete multiple versions of specific columns, execute - * {@link #deleteColumns(byte[], byte[]) deleteColumns} - * for each column to delete. - *

      - * To delete specific versions of specific columns, execute - * {@link #deleteColumn(byte[], byte[], long) deleteColumn} - * for each column version to delete. - *

      - * Specifying timestamps, deleteFamily and deleteColumns will delete all - * versions with a timestamp less than or equal to that passed. If no - * timestamp is specified, an entry is added with a timestamp of 'now' - * where 'now' is the servers's System.currentTimeMillis(). - * Specifying a timestamp to the deleteColumn method will - * delete versions only with a timestamp equal to that specified. - * If no timestamp is passed to deleteColumn, internally, it figures the - * most recent cell's timestamp and adds a delete at that timestamp; i.e. - * it deletes the most recently added cell. - *

      The timestamp passed to the constructor is used ONLY for delete of - * rows. For anything less -- a deleteColumn, deleteColumns or - * deleteFamily -- then you need to use the method overrides that take a - * timestamp. The constructor timestamp is not referenced. - */ -@InterfaceAudience.Public -@InterfaceStability.Stable -public class Delete extends Mutation implements Comparable { - /** - * Create a Delete operation for the specified row. - *

      - * If no further operations are done, this will delete everything - * associated with the specified row (all versions of all columns in all - * families). - * @param row row key - */ - public Delete(byte [] row) { - this(row, HConstants.LATEST_TIMESTAMP, null); - } - - /** - * Create a Delete operation for the specified row and timestamp, using - * an optional row lock.

      - * - * If no further operations are done, this will delete all columns in all - * families of the specified row with a timestamp less than or equal to the - * specified timestamp.

      - * - * This timestamp is ONLY used for a delete row operation. If specifying - * families or columns, you must specify each timestamp individually. - * @param row row key - * @param timestamp maximum version timestamp (only for delete row) - * @param rowLock previously acquired row lock, or null - */ - public Delete(byte [] row, long timestamp, RowLock rowLock) { - this.row = row; - this.ts = timestamp; - if (rowLock != null) { - this.lockId = rowLock.getLockId(); - } - } - - /** - * @param d Delete to clone. - */ - public Delete(final Delete d) { - this.row = d.getRow(); - this.ts = d.getTimeStamp(); - this.lockId = d.getLockId(); - this.familyMap.putAll(d.getFamilyMap()); - this.writeToWAL = d.writeToWAL; - } - - /** - * Advanced use only. - * Add an existing delete marker to this Delete object. - * @param kv An existing KeyValue of type "delete". - * @return this for invocation chaining - * @throws IOException - */ - public Delete addDeleteMarker(KeyValue kv) throws IOException { - if (!kv.isDelete()) { - throw new IOException("The recently added KeyValue is not of type " - + "delete. Rowkey: " + Bytes.toStringBinary(this.row)); - } - if (Bytes.compareTo(this.row, 0, row.length, kv.getBuffer(), - kv.getRowOffset(), kv.getRowLength()) != 0) { - throw new IOException("The row in the recently added KeyValue " - + Bytes.toStringBinary(kv.getBuffer(), kv.getRowOffset(), - kv.getRowLength()) + " doesn't match the original one " - + Bytes.toStringBinary(this.row)); - } - byte [] family = kv.getFamily(); - List list = familyMap.get(family); - if (list == null) { - list = new ArrayList(); - } - list.add(kv); - familyMap.put(family, list); - return this; - } - - /** - * Delete all versions of all columns of the specified family. - *

      - * Overrides previous calls to deleteColumn and deleteColumns for the - * specified family. - * @param family family name - * @return this for invocation chaining - */ - public Delete deleteFamily(byte [] family) { - this.deleteFamily(family, HConstants.LATEST_TIMESTAMP); - return this; - } - - /** - * Delete all columns of the specified family with a timestamp less than - * or equal to the specified timestamp. - *

      - * Overrides previous calls to deleteColumn and deleteColumns for the - * specified family. - * @param family family name - * @param timestamp maximum version timestamp - * @return this for invocation chaining - */ - public Delete deleteFamily(byte [] family, long timestamp) { - List list = familyMap.get(family); - if(list == null) { - list = new ArrayList(); - } else if(!list.isEmpty()) { - list.clear(); - } - list.add(new KeyValue(row, family, null, timestamp, KeyValue.Type.DeleteFamily)); - familyMap.put(family, list); - return this; - } - - /** - * Delete all versions of the specified column. - * @param family family name - * @param qualifier column qualifier - * @return this for invocation chaining - */ - public Delete deleteColumns(byte [] family, byte [] qualifier) { - this.deleteColumns(family, qualifier, HConstants.LATEST_TIMESTAMP); - return this; - } - - /** - * Delete all versions of the specified column with a timestamp less than - * or equal to the specified timestamp. - * @param family family name - * @param qualifier column qualifier - * @param timestamp maximum version timestamp - * @return this for invocation chaining - */ - public Delete deleteColumns(byte [] family, byte [] qualifier, long timestamp) { - List list = familyMap.get(family); - if (list == null) { - list = new ArrayList(); - } - list.add(new KeyValue(this.row, family, qualifier, timestamp, - KeyValue.Type.DeleteColumn)); - familyMap.put(family, list); - return this; - } - - /** - * Delete the latest version of the specified column. - * This is an expensive call in that on the server-side, it first does a - * get to find the latest versions timestamp. Then it adds a delete using - * the fetched cells timestamp. - * @param family family name - * @param qualifier column qualifier - * @return this for invocation chaining - */ - public Delete deleteColumn(byte [] family, byte [] qualifier) { - this.deleteColumn(family, qualifier, HConstants.LATEST_TIMESTAMP); - return this; - } - - /** - * Delete the specified version of the specified column. - * @param family family name - * @param qualifier column qualifier - * @param timestamp version timestamp - * @return this for invocation chaining - */ - public Delete deleteColumn(byte [] family, byte [] qualifier, long timestamp) { - List list = familyMap.get(family); - if(list == null) { - list = new ArrayList(); - } - list.add(new KeyValue( - this.row, family, qualifier, timestamp, KeyValue.Type.Delete)); - familyMap.put(family, list); - return this; - } - - /** - * Set the timestamp of the delete. - * - * @param timestamp - */ - public void setTimestamp(long timestamp) { - this.ts = timestamp; - } - - @Override - public Map toMap(int maxCols) { - // we start with the fingerprint map and build on top of it. - Map map = super.toMap(maxCols); - // why is put not doing this? - map.put("ts", this.ts); - return map; - } -} diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/client/Get.java hbase-server/src/main/java/org/apache/hadoop/hbase/client/Get.java deleted file mode 100644 index 0ade4e9..0000000 --- hbase-server/src/main/java/org/apache/hadoop/hbase/client/Get.java +++ /dev/null @@ -1,425 +0,0 @@ -/** - * - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hbase.client; - -import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.classification.InterfaceStability; -import org.apache.hadoop.hbase.HConstants; -import org.apache.hadoop.hbase.KeyValue; -import org.apache.hadoop.hbase.filter.Filter; -import org.apache.hadoop.hbase.io.TimeRange; -import org.apache.hadoop.hbase.util.Bytes; - -import java.io.IOException; -import java.util.ArrayList; -import java.util.HashMap; -import java.util.List; -import java.util.Map; -import java.util.NavigableSet; -import java.util.Set; -import java.util.TreeMap; -import java.util.TreeSet; - -/** - * Used to perform Get operations on a single row. - *

      - * To get everything for a row, instantiate a Get object with the row to get. - * To further define the scope of what to get, perform additional methods as - * outlined below. - *

      - * To get all columns from specific families, execute {@link #addFamily(byte[]) addFamily} - * for each family to retrieve. - *

      - * To get specific columns, execute {@link #addColumn(byte[], byte[]) addColumn} - * for each column to retrieve. - *

      - * To only retrieve columns within a specific range of version timestamps, - * execute {@link #setTimeRange(long, long) setTimeRange}. - *

      - * To only retrieve columns with a specific timestamp, execute - * {@link #setTimeStamp(long) setTimestamp}. - *

      - * To limit the number of versions of each column to be returned, execute - * {@link #setMaxVersions(int) setMaxVersions}. - *

      - * To add a filter, execute {@link #setFilter(Filter) setFilter}. - */ -@InterfaceAudience.Public -@InterfaceStability.Stable -public class Get extends OperationWithAttributes - implements Row, Comparable { - - private byte [] row = null; - private long lockId = -1L; - private int maxVersions = 1; - private boolean cacheBlocks = true; - private int storeLimit = -1; - private int storeOffset = 0; - private Filter filter = null; - private TimeRange tr = new TimeRange(); - private Map> familyMap = - new TreeMap>(Bytes.BYTES_COMPARATOR); - - /** - * Create a Get operation for the specified row. - *

      - * If no further operations are done, this will get the latest version of - * all columns in all families of the specified row. - * @param row row key - */ - public Get(byte [] row) { - this(row, null); - } - - /** - * Create a Get operation for the specified row, using an existing row lock. - *

      - * If no further operations are done, this will get the latest version of - * all columns in all families of the specified row. - * @param row row key - * @param rowLock previously acquired row lock, or null - */ - public Get(byte [] row, RowLock rowLock) { - this.row = row; - if(rowLock != null) { - this.lockId = rowLock.getLockId(); - } - } - - /** - * Get all columns from the specified family. - *

      - * Overrides previous calls to addColumn for this family. - * @param family family name - * @return the Get object - */ - public Get addFamily(byte [] family) { - familyMap.remove(family); - familyMap.put(family, null); - return this; - } - - /** - * Get the column from the specific family with the specified qualifier. - *

      - * Overrides previous calls to addFamily for this family. - * @param family family name - * @param qualifier column qualifier - * @return the Get objec - */ - public Get addColumn(byte [] family, byte [] qualifier) { - NavigableSet set = familyMap.get(family); - if(set == null) { - set = new TreeSet(Bytes.BYTES_COMPARATOR); - } - if (qualifier == null) { - qualifier = HConstants.EMPTY_BYTE_ARRAY; - } - set.add(qualifier); - familyMap.put(family, set); - return this; - } - - /** - * Get versions of columns only within the specified timestamp range, - * [minStamp, maxStamp). - * @param minStamp minimum timestamp value, inclusive - * @param maxStamp maximum timestamp value, exclusive - * @throws IOException if invalid time range - * @return this for invocation chaining - */ - public Get setTimeRange(long minStamp, long maxStamp) - throws IOException { - tr = new TimeRange(minStamp, maxStamp); - return this; - } - - /** - * Get versions of columns with the specified timestamp. - * @param timestamp version timestamp - * @return this for invocation chaining - */ - public Get setTimeStamp(long timestamp) { - try { - tr = new TimeRange(timestamp, timestamp+1); - } catch(IOException e) { - // Will never happen - } - return this; - } - - /** - * Get all available versions. - * @return this for invocation chaining - */ - public Get setMaxVersions() { - this.maxVersions = Integer.MAX_VALUE; - return this; - } - - /** - * Get up to the specified number of versions of each column. - * @param maxVersions maximum versions for each column - * @throws IOException if invalid number of versions - * @return this for invocation chaining - */ - public Get setMaxVersions(int maxVersions) throws IOException { - if(maxVersions <= 0) { - throw new IOException("maxVersions must be positive"); - } - this.maxVersions = maxVersions; - return this; - } - - /** - * Set the maximum number of values to return per row per Column Family - * @param limit the maximum number of values returned / row / CF - * @return this for invocation chaining - */ - public Get setMaxResultsPerColumnFamily(int limit) { - this.storeLimit = limit; - return this; - } - - /** - * Set offset for the row per Column Family. This offset is only within a particular row/CF - * combination. It gets reset back to zero when we move to the next row or CF. - * @param offset is the number of kvs that will be skipped. - * @return this for invocation chaining - */ - public Get setRowOffsetPerColumnFamily(int offset) { - this.storeOffset = offset; - return this; - } - - /** - * Apply the specified server-side filter when performing the Get. - * Only {@link Filter#filterKeyValue(KeyValue)} is called AFTER all tests - * for ttl, column match, deletes and max versions have been run. - * @param filter filter to run on the server - * @return this for invocation chaining - */ - public Get setFilter(Filter filter) { - this.filter = filter; - return this; - } - - /* Accessors */ - - /** - * @return Filter - */ - public Filter getFilter() { - return this.filter; - } - - /** - * Set whether blocks should be cached for this Get. - *

      - * This is true by default. When true, default settings of the table and - * family are used (this will never override caching blocks if the block - * cache is disabled for that family or entirely). - * - * @param cacheBlocks if false, default settings are overridden and blocks - * will not be cached - */ - public void setCacheBlocks(boolean cacheBlocks) { - this.cacheBlocks = cacheBlocks; - } - - /** - * Get whether blocks should be cached for this Get. - * @return true if default caching should be used, false if blocks should not - * be cached - */ - public boolean getCacheBlocks() { - return cacheBlocks; - } - - /** - * Method for retrieving the get's row - * @return row - */ - public byte [] getRow() { - return this.row; - } - - /** - * Method for retrieving the get's RowLock - * @return RowLock - */ - public RowLock getRowLock() { - return new RowLock(this.row, this.lockId); - } - - /** - * Method for retrieving the get's lockId - * @return lockId - */ - public long getLockId() { - return this.lockId; - } - - /** - * Method for retrieving the get's maximum number of version - * @return the maximum number of version to fetch for this get - */ - public int getMaxVersions() { - return this.maxVersions; - } - - /** - * Method for retrieving the get's maximum number of values - * to return per Column Family - * @return the maximum number of values to fetch per CF - */ - public int getMaxResultsPerColumnFamily() { - return this.storeLimit; - } - - /** - * Method for retrieving the get's offset per row per column - * family (#kvs to be skipped) - * @return the row offset - */ - public int getRowOffsetPerColumnFamily() { - return this.storeOffset; - } - - /** - * Method for retrieving the get's TimeRange - * @return timeRange - */ - public TimeRange getTimeRange() { - return this.tr; - } - - /** - * Method for retrieving the keys in the familyMap - * @return keys in the current familyMap - */ - public Set familySet() { - return this.familyMap.keySet(); - } - - /** - * Method for retrieving the number of families to get from - * @return number of families - */ - public int numFamilies() { - return this.familyMap.size(); - } - - /** - * Method for checking if any families have been inserted into this Get - * @return true if familyMap is non empty false otherwise - */ - public boolean hasFamilies() { - return !this.familyMap.isEmpty(); - } - - /** - * Method for retrieving the get's familyMap - * @return familyMap - */ - public Map> getFamilyMap() { - return this.familyMap; - } - - /** - * Compile the table and column family (i.e. schema) information - * into a String. Useful for parsing and aggregation by debugging, - * logging, and administration tools. - * @return Map - */ - @Override - public Map getFingerprint() { - Map map = new HashMap(); - List families = new ArrayList(); - map.put("families", families); - for (Map.Entry> entry : - this.familyMap.entrySet()) { - families.add(Bytes.toStringBinary(entry.getKey())); - } - return map; - } - - /** - * Compile the details beyond the scope of getFingerprint (row, columns, - * timestamps, etc.) into a Map along with the fingerprinted information. - * Useful for debugging, logging, and administration tools. - * @param maxCols a limit on the number of columns output prior to truncation - * @return Map - */ - @Override - public Map toMap(int maxCols) { - // we start with the fingerprint map and build on top of it. - Map map = getFingerprint(); - // replace the fingerprint's simple list of families with a - // map from column families to lists of qualifiers and kv details - Map> columns = new HashMap>(); - map.put("families", columns); - // add scalar information first - map.put("row", Bytes.toStringBinary(this.row)); - map.put("maxVersions", this.maxVersions); - map.put("cacheBlocks", this.cacheBlocks); - List timeRange = new ArrayList(); - timeRange.add(this.tr.getMin()); - timeRange.add(this.tr.getMax()); - map.put("timeRange", timeRange); - int colCount = 0; - // iterate through affected families and add details - for (Map.Entry> entry : - this.familyMap.entrySet()) { - List familyList = new ArrayList(); - columns.put(Bytes.toStringBinary(entry.getKey()), familyList); - if(entry.getValue() == null) { - colCount++; - --maxCols; - familyList.add("ALL"); - } else { - colCount += entry.getValue().size(); - if (maxCols <= 0) { - continue; - } - for (byte [] column : entry.getValue()) { - if (--maxCols <= 0) { - continue; - } - familyList.add(Bytes.toStringBinary(column)); - } - } - } - map.put("totalColumns", colCount); - if (this.filter != null) { - map.put("filter", this.filter.toString()); - } - // add the id if set - if (getId() != null) { - map.put("id", getId()); - } - return map; - } - - //Row - public int compareTo(Row other) { - return Bytes.compareTo(this.getRow(), other.getRow()); - } - -} diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java hbase-server/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java deleted file mode 100644 index 6f3ce22..0000000 --- hbase-server/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java +++ /dev/null @@ -1,2171 +0,0 @@ -/** - * - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hbase.client; - -import java.io.Closeable; -import java.io.IOException; -import java.io.InterruptedIOException; -import java.net.SocketTimeoutException; -import java.util.Arrays; -import java.util.LinkedList; -import java.util.List; -import java.util.concurrent.Callable; -import java.util.concurrent.atomic.AtomicInteger; -import java.util.concurrent.atomic.AtomicReference; -import java.util.regex.Pattern; - -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.classification.InterfaceStability; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hbase.Abortable; -import org.apache.hadoop.hbase.ClusterStatus; -import org.apache.hadoop.hbase.DeserializationException; -import org.apache.hadoop.hbase.HBaseConfiguration; -import org.apache.hadoop.hbase.HColumnDescriptor; -import org.apache.hadoop.hbase.HConstants; -import org.apache.hadoop.hbase.HRegionInfo; -import org.apache.hadoop.hbase.HRegionLocation; -import org.apache.hadoop.hbase.HTableDescriptor; -import org.apache.hadoop.hbase.MasterNotRunningException; -import org.apache.hadoop.hbase.NotServingRegionException; -import org.apache.hadoop.hbase.RegionException; -import org.apache.hadoop.hbase.ServerName; -import org.apache.hadoop.hbase.TableExistsException; -import org.apache.hadoop.hbase.TableNotEnabledException; -import org.apache.hadoop.hbase.TableNotFoundException; -import org.apache.hadoop.hbase.UnknownRegionException; -import org.apache.hadoop.hbase.ZooKeeperConnectionException; -import org.apache.hadoop.hbase.catalog.CatalogTracker; -import org.apache.hadoop.hbase.catalog.MetaReader; -import org.apache.hadoop.hbase.client.MetaScanner.MetaScannerVisitor; -import org.apache.hadoop.hbase.client.MetaScanner.MetaScannerVisitorBase; -import org.apache.hadoop.hbase.ipc.CoprocessorRpcChannel; -import org.apache.hadoop.hbase.ipc.MasterCoprocessorRpcChannel; -import org.apache.hadoop.hbase.protobuf.ProtobufUtil; -import org.apache.hadoop.hbase.protobuf.RequestConverter; -import org.apache.hadoop.hbase.protobuf.ResponseConverter; -import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.CloseRegionRequest; -import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.CloseRegionResponse; -import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.CompactRegionRequest; -import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.FlushRegionRequest; -import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetRegionInfoRequest; -import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetRegionInfoResponse; -import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetRegionInfoResponse.CompactionState; -import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.RollWALWriterRequest; -import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.RollWALWriterResponse; -import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.StopServerRequest; -import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ScanRequest; -import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ScanResponse; -import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema; -import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.AddColumnRequest; -import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.AssignRegionRequest; -import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.CreateTableRequest; -import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DeleteColumnRequest; -import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DeleteTableRequest; -import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DisableTableRequest; -import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.EnableTableRequest; -import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ModifyColumnRequest; -import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ModifyTableRequest; -import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.MoveRegionRequest; -import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.SetBalancerRunningRequest; -import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ShutdownRequest; -import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.StopMasterRequest; -import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.UnassignRegionRequest; -import org.apache.hadoop.hbase.protobuf.generated.MasterMonitorProtos.GetClusterStatusRequest; -import org.apache.hadoop.hbase.protobuf.generated.MasterMonitorProtos.GetSchemaAlterStatusRequest; -import org.apache.hadoop.hbase.protobuf.generated.MasterMonitorProtos.GetSchemaAlterStatusResponse; -import org.apache.hadoop.hbase.protobuf.generated.MasterMonitorProtos.GetTableDescriptorsRequest; -import org.apache.hadoop.hbase.protobuf.generated.MasterMonitorProtos.GetTableDescriptorsResponse; -import org.apache.hadoop.hbase.regionserver.wal.FailedLogCloseException; -import org.apache.hadoop.hbase.util.Addressing; -import org.apache.hadoop.hbase.util.Bytes; -import org.apache.hadoop.hbase.util.Pair; -import org.apache.hadoop.ipc.RemoteException; -import org.apache.hadoop.util.StringUtils; -import org.apache.zookeeper.KeeperException; - -import com.google.protobuf.ByteString; -import com.google.protobuf.ServiceException; - -/** - * Provides an interface to manage HBase database table metadata + general - * administrative functions. Use HBaseAdmin to create, drop, list, enable and - * disable tables. Use it also to add and drop table column families. - * - *

      See {@link HTable} to add, update, and delete data from an individual table. - *

      Currently HBaseAdmin instances are not expected to be long-lived. For - * example, an HBaseAdmin instance will not ride over a Master restart. - */ -@InterfaceAudience.Public -@InterfaceStability.Evolving -public class HBaseAdmin implements Abortable, Closeable { - private static final Log LOG = LogFactory.getLog(HBaseAdmin.class); - - // We use the implementation class rather then the interface because we - // need the package protected functions to get the connection to master - private HConnection connection; - - private volatile Configuration conf; - private final long pause; - private final int numRetries; - // Some operations can take a long time such as disable of big table. - // numRetries is for 'normal' stuff... Multiply by this factor when - // want to wait a long time. - private final int retryLongerMultiplier; - private boolean aborted; - - /** - * Constructor. - * See {@link #HBaseAdmin(HConnection connection)} - * - * @param c Configuration object. Copied internally. - */ - public HBaseAdmin(Configuration c) - throws MasterNotRunningException, ZooKeeperConnectionException { - // Will not leak connections, as the new implementation of the constructor - // does not throw exceptions anymore. - this(HConnectionManager.getConnection(new Configuration(c))); - } - - /** - * Constructor for externally managed HConnections. - * The connection to master will be created when required by admin functions. - * - * @param connection The HConnection instance to use - * @throws MasterNotRunningException, ZooKeeperConnectionException are not - * thrown anymore but kept into the interface for backward api compatibility - */ - public HBaseAdmin(HConnection connection) - throws MasterNotRunningException, ZooKeeperConnectionException { - this.conf = connection.getConfiguration(); - this.connection = connection; - - this.pause = this.conf.getLong("hbase.client.pause", 1000); - this.numRetries = this.conf.getInt("hbase.client.retries.number", 10); - this.retryLongerMultiplier = this.conf.getInt( - "hbase.client.retries.longer.multiplier", 10); - } - - /** - * @return A new CatalogTracker instance; call {@link #cleanupCatalogTracker(CatalogTracker)} - * to cleanup the returned catalog tracker. - * @throws ZooKeeperConnectionException - * @throws IOException - * @see #cleanupCatalogTracker(CatalogTracker) - */ - private synchronized CatalogTracker getCatalogTracker() - throws ZooKeeperConnectionException, IOException { - CatalogTracker ct = null; - try { - ct = new CatalogTracker(this.conf); - ct.start(); - } catch (InterruptedException e) { - // Let it out as an IOE for now until we redo all so tolerate IEs - Thread.currentThread().interrupt(); - throw new IOException("Interrupted", e); - } - return ct; - } - - private void cleanupCatalogTracker(final CatalogTracker ct) { - ct.stop(); - } - - @Override - public void abort(String why, Throwable e) { - // Currently does nothing but throw the passed message and exception - this.aborted = true; - throw new RuntimeException(why, e); - } - - @Override - public boolean isAborted(){ - return this.aborted; - } - - /** @return HConnection used by this object. */ - public HConnection getConnection() { - return connection; - } - - /** @return - true if the master server is running. Throws an exception - * otherwise. - * @throws ZooKeeperConnectionException - * @throws MasterNotRunningException - */ - public boolean isMasterRunning() - throws MasterNotRunningException, ZooKeeperConnectionException { - return connection.isMasterRunning(); - } - - /** - * @param tableName Table to check. - * @return True if table exists already. - * @throws IOException - */ - public boolean tableExists(final String tableName) - throws IOException { - boolean b = false; - CatalogTracker ct = getCatalogTracker(); - try { - b = MetaReader.tableExists(ct, tableName); - } finally { - cleanupCatalogTracker(ct); - } - return b; - } - - /** - * @param tableName Table to check. - * @return True if table exists already. - * @throws IOException - */ - public boolean tableExists(final byte [] tableName) - throws IOException { - return tableExists(Bytes.toString(tableName)); - } - - /** - * List all the userspace tables. In other words, scan the META table. - * - * If we wanted this to be really fast, we could implement a special - * catalog table that just contains table names and their descriptors. - * Right now, it only exists as part of the META table's region info. - * - * @return - returns an array of HTableDescriptors - * @throws IOException if a remote or network exception occurs - */ - public HTableDescriptor[] listTables() throws IOException { - return this.connection.listTables(); - } - - /** - * List all the userspace tables matching the given pattern. - * - * @param pattern The compiled regular expression to match against - * @return - returns an array of HTableDescriptors - * @throws IOException if a remote or network exception occurs - * @see #listTables() - */ - public HTableDescriptor[] listTables(Pattern pattern) throws IOException { - List matched = new LinkedList(); - HTableDescriptor[] tables = listTables(); - for (HTableDescriptor table : tables) { - if (pattern.matcher(table.getNameAsString()).matches()) { - matched.add(table); - } - } - return matched.toArray(new HTableDescriptor[matched.size()]); - } - - /** - * List all the userspace tables matching the given regular expression. - * - * @param regex The regular expression to match against - * @return - returns an array of HTableDescriptors - * @throws IOException if a remote or network exception occurs - * @see #listTables(java.util.regex.Pattern) - */ - public HTableDescriptor[] listTables(String regex) throws IOException { - return listTables(Pattern.compile(regex)); - } - - - /** - * Method for getting the tableDescriptor - * @param tableName as a byte [] - * @return the tableDescriptor - * @throws TableNotFoundException - * @throws IOException if a remote or network exception occurs - */ - public HTableDescriptor getTableDescriptor(final byte [] tableName) - throws TableNotFoundException, IOException { - return this.connection.getHTableDescriptor(tableName); - } - - private long getPauseTime(int tries) { - int triesCount = tries; - if (triesCount >= HConstants.RETRY_BACKOFF.length) { - triesCount = HConstants.RETRY_BACKOFF.length - 1; - } - return this.pause * HConstants.RETRY_BACKOFF[triesCount]; - } - - /** - * Creates a new table. - * Synchronous operation. - * - * @param desc table descriptor for table - * - * @throws IllegalArgumentException if the table name is reserved - * @throws MasterNotRunningException if master is not running - * @throws TableExistsException if table already exists (If concurrent - * threads, the table may have been created between test-for-existence - * and attempt-at-creation). - * @throws IOException if a remote or network exception occurs - */ - public void createTable(HTableDescriptor desc) - throws IOException { - createTable(desc, null); - } - - /** - * Creates a new table with the specified number of regions. The start key - * specified will become the end key of the first region of the table, and - * the end key specified will become the start key of the last region of the - * table (the first region has a null start key and the last region has a - * null end key). - * - * BigInteger math will be used to divide the key range specified into - * enough segments to make the required number of total regions. - * - * Synchronous operation. - * - * @param desc table descriptor for table - * @param startKey beginning of key range - * @param endKey end of key range - * @param numRegions the total number of regions to create - * - * @throws IllegalArgumentException if the table name is reserved - * @throws MasterNotRunningException if master is not running - * @throws TableExistsException if table already exists (If concurrent - * threads, the table may have been created between test-for-existence - * and attempt-at-creation). - * @throws IOException - */ - public void createTable(HTableDescriptor desc, byte [] startKey, - byte [] endKey, int numRegions) - throws IOException { - HTableDescriptor.isLegalTableName(desc.getName()); - if(numRegions < 3) { - throw new IllegalArgumentException("Must create at least three regions"); - } else if(Bytes.compareTo(startKey, endKey) >= 0) { - throw new IllegalArgumentException("Start key must be smaller than end key"); - } - byte [][] splitKeys = Bytes.split(startKey, endKey, numRegions - 3); - if(splitKeys == null || splitKeys.length != numRegions - 1) { - throw new IllegalArgumentException("Unable to split key range into enough regions"); - } - createTable(desc, splitKeys); - } - - /** - * Creates a new table with an initial set of empty regions defined by the - * specified split keys. The total number of regions created will be the - * number of split keys plus one. Synchronous operation. - * Note : Avoid passing empty split key. - * - * @param desc table descriptor for table - * @param splitKeys array of split keys for the initial regions of the table - * - * @throws IllegalArgumentException if the table name is reserved, if the split keys - * are repeated and if the split key has empty byte array. - * @throws MasterNotRunningException if master is not running - * @throws TableExistsException if table already exists (If concurrent - * threads, the table may have been created between test-for-existence - * and attempt-at-creation). - * @throws IOException - */ - public void createTable(final HTableDescriptor desc, byte [][] splitKeys) - throws IOException { - HTableDescriptor.isLegalTableName(desc.getName()); - try { - createTableAsync(desc, splitKeys); - } catch (SocketTimeoutException ste) { - LOG.warn("Creating " + desc.getNameAsString() + " took too long", ste); - } - int numRegs = splitKeys == null ? 1 : splitKeys.length + 1; - int prevRegCount = 0; - boolean doneWithMetaScan = false; - for (int tries = 0; tries < this.numRetries * this.retryLongerMultiplier; - ++tries) { - if (!doneWithMetaScan) { - // Wait for new table to come on-line - final AtomicInteger actualRegCount = new AtomicInteger(0); - MetaScannerVisitor visitor = new MetaScannerVisitorBase() { - @Override - public boolean processRow(Result rowResult) throws IOException { - HRegionInfo info = HRegionInfo.getHRegionInfo(rowResult); - if (info == null) { - LOG.warn("No serialized HRegionInfo in " + rowResult); - return true; - } - if (!(Bytes.equals(info.getTableName(), desc.getName()))) { - return false; - } - ServerName serverName = HRegionInfo.getServerName(rowResult); - // Make sure that regions are assigned to server - if (!(info.isOffline() || info.isSplit()) && serverName != null - && serverName.getHostAndPort() != null) { - actualRegCount.incrementAndGet(); - } - return true; - } - }; - MetaScanner.metaScan(conf, visitor, desc.getName()); - if (actualRegCount.get() != numRegs) { - if (tries == this.numRetries * this.retryLongerMultiplier - 1) { - throw new RegionOfflineException("Only " + actualRegCount.get() + - " of " + numRegs + " regions are online; retries exhausted."); - } - try { // Sleep - Thread.sleep(getPauseTime(tries)); - } catch (InterruptedException e) { - throw new InterruptedIOException("Interrupted when opening" + - " regions; " + actualRegCount.get() + " of " + numRegs + - " regions processed so far"); - } - if (actualRegCount.get() > prevRegCount) { // Making progress - prevRegCount = actualRegCount.get(); - tries = -1; - } - } else { - doneWithMetaScan = true; - tries = -1; - } - } else if (isTableEnabled(desc.getName())) { - return; - } else { - try { // Sleep - Thread.sleep(getPauseTime(tries)); - } catch (InterruptedException e) { - throw new InterruptedIOException("Interrupted when waiting" + - " for table to be enabled; meta scan was done"); - } - } - } - throw new TableNotEnabledException( - "Retries exhausted while still waiting for table: " - + desc.getNameAsString() + " to be enabled"); - } - - /** - * Creates a new table but does not block and wait for it to come online. - * Asynchronous operation. To check if the table exists, use - * {@link #isTableAvailable} -- it is not safe to create an HTable - * instance to this table before it is available. - * Note : Avoid passing empty split key. - * @param desc table descriptor for table - * - * @throws IllegalArgumentException Bad table name, if the split keys - * are repeated and if the split key has empty byte array. - * @throws MasterNotRunningException if master is not running - * @throws TableExistsException if table already exists (If concurrent - * threads, the table may have been created between test-for-existence - * and attempt-at-creation). - * @throws IOException - */ - public void createTableAsync( - final HTableDescriptor desc, final byte [][] splitKeys) - throws IOException { - HTableDescriptor.isLegalTableName(desc.getName()); - if(splitKeys != null && splitKeys.length > 0) { - Arrays.sort(splitKeys, Bytes.BYTES_COMPARATOR); - // Verify there are no duplicate split keys - byte [] lastKey = null; - for(byte [] splitKey : splitKeys) { - if (Bytes.compareTo(splitKey, HConstants.EMPTY_BYTE_ARRAY) == 0) { - throw new IllegalArgumentException( - "Empty split key must not be passed in the split keys."); - } - if(lastKey != null && Bytes.equals(splitKey, lastKey)) { - throw new IllegalArgumentException("All split keys must be unique, " + - "found duplicate: " + Bytes.toStringBinary(splitKey) + - ", " + Bytes.toStringBinary(lastKey)); - } - lastKey = splitKey; - } - } - - execute(new MasterAdminCallable() { - @Override - public Void call() throws ServiceException { - CreateTableRequest request = RequestConverter.buildCreateTableRequest(desc, splitKeys); - masterAdmin.createTable(null, request); - return null; - } - }); - } - - /** - * Deletes a table. - * Synchronous operation. - * - * @param tableName name of table to delete - * @throws IOException if a remote or network exception occurs - */ - public void deleteTable(final String tableName) throws IOException { - deleteTable(Bytes.toBytes(tableName)); - } - - /** - * Deletes a table. - * Synchronous operation. - * - * @param tableName name of table to delete - * @throws IOException if a remote or network exception occurs - */ - public void deleteTable(final byte [] tableName) throws IOException { - HTableDescriptor.isLegalTableName(tableName); - HRegionLocation firstMetaServer = getFirstMetaServerForTable(tableName); - boolean tableExists = true; - - execute(new MasterAdminCallable() { - @Override - public Void call() throws ServiceException { - DeleteTableRequest req = RequestConverter.buildDeleteTableRequest(tableName); - masterAdmin.deleteTable(null,req); - return null; - } - }); - - // Wait until all regions deleted - ClientProtocol server = - connection.getClient(firstMetaServer.getHostname(), firstMetaServer.getPort()); - for (int tries = 0; tries < (this.numRetries * this.retryLongerMultiplier); tries++) { - try { - - Scan scan = MetaReader.getScanForTableName(tableName); - scan.addColumn(HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER); - ScanRequest request = RequestConverter.buildScanRequest( - firstMetaServer.getRegionInfo().getRegionName(), scan, 1, true); - Result[] values = null; - // Get a batch at a time. - try { - ScanResponse response = server.scan(null, request); - values = ResponseConverter.getResults(response); - } catch (ServiceException se) { - throw ProtobufUtil.getRemoteException(se); - } - - // let us wait until .META. table is updated and - // HMaster removes the table from its HTableDescriptors - if (values == null || values.length == 0) { - tableExists = false; - GetTableDescriptorsResponse htds; - MasterMonitorKeepAliveConnection master = connection.getKeepAliveMasterMonitor(); - try { - GetTableDescriptorsRequest req = - RequestConverter.buildGetTableDescriptorsRequest(null); - htds = master.getTableDescriptors(null, req); - } catch (ServiceException se) { - throw ProtobufUtil.getRemoteException(se); - } finally { - master.close(); - } - for (TableSchema ts : htds.getTableSchemaList()) { - if (Bytes.equals(tableName, ts.getName().toByteArray())) { - tableExists = true; - break; - } - } - if (!tableExists) { - break; - } - } - } catch (IOException ex) { - if(tries == numRetries - 1) { // no more tries left - if (ex instanceof RemoteException) { - throw ((RemoteException) ex).unwrapRemoteException(); - }else { - throw ex; - } - } - } - try { - Thread.sleep(getPauseTime(tries)); - } catch (InterruptedException e) { - // continue - } - } - - if (tableExists) { - throw new IOException("Retries exhausted, it took too long to wait"+ - " for the table " + Bytes.toString(tableName) + " to be deleted."); - } - // Delete cached information to prevent clients from using old locations - this.connection.clearRegionCache(tableName); - LOG.info("Deleted " + Bytes.toString(tableName)); - } - - /** - * Deletes tables matching the passed in pattern and wait on completion. - * - * Warning: Use this method carefully, there is no prompting and the effect is - * immediate. Consider using {@link #listTables(java.lang.String)} and - * {@link #deleteTable(byte[])} - * - * @param regex The regular expression to match table names against - * @return Table descriptors for tables that couldn't be deleted - * @throws IOException - * @see #deleteTables(java.util.regex.Pattern) - * @see #deleteTable(java.lang.String) - */ - public HTableDescriptor[] deleteTables(String regex) throws IOException { - return deleteTables(Pattern.compile(regex)); - } - - /** - * Delete tables matching the passed in pattern and wait on completion. - * - * Warning: Use this method carefully, there is no prompting and the effect is - * immediate. Consider using {@link #listTables(java.util.regex.Pattern) } and - * {@link #deleteTable(byte[])} - * - * @param pattern The pattern to match table names against - * @return Table descriptors for tables that couldn't be deleted - * @throws IOException - */ - public HTableDescriptor[] deleteTables(Pattern pattern) throws IOException { - List failed = new LinkedList(); - for (HTableDescriptor table : listTables(pattern)) { - try { - deleteTable(table.getName()); - } catch (IOException ex) { - LOG.info("Failed to delete table " + table.getNameAsString(), ex); - failed.add(table); - } - } - return failed.toArray(new HTableDescriptor[failed.size()]); - } - - - public void enableTable(final String tableName) - throws IOException { - enableTable(Bytes.toBytes(tableName)); - } - - /** - * Enable a table. May timeout. Use {@link #enableTableAsync(byte[])} - * and {@link #isTableEnabled(byte[])} instead. - * The table has to be in disabled state for it to be enabled. - * @param tableName name of the table - * @throws IOException if a remote or network exception occurs - * There could be couple types of IOException - * TableNotFoundException means the table doesn't exist. - * TableNotDisabledException means the table isn't in disabled state. - * @see #isTableEnabled(byte[]) - * @see #disableTable(byte[]) - * @see #enableTableAsync(byte[]) - */ - public void enableTable(final byte [] tableName) - throws IOException { - enableTableAsync(tableName); - - // Wait until all regions are enabled - boolean enabled = false; - for (int tries = 0; tries < (this.numRetries * this.retryLongerMultiplier); tries++) { - enabled = isTableEnabled(tableName); - if (enabled) { - break; - } - long sleep = getPauseTime(tries); - if (LOG.isDebugEnabled()) { - LOG.debug("Sleeping= " + sleep + "ms, waiting for all regions to be " + - "enabled in " + Bytes.toString(tableName)); - } - try { - Thread.sleep(sleep); - } catch (InterruptedException e) { - Thread.currentThread().interrupt(); - // Do this conversion rather than let it out because do not want to - // change the method signature. - throw new IOException("Interrupted", e); - } - } - if (!enabled) { - throw new IOException("Unable to enable table " + - Bytes.toString(tableName)); - } - LOG.info("Enabled table " + Bytes.toString(tableName)); - } - - public void enableTableAsync(final String tableName) - throws IOException { - enableTableAsync(Bytes.toBytes(tableName)); - } - - /** - * Brings a table on-line (enables it). Method returns immediately though - * enable of table may take some time to complete, especially if the table - * is large (All regions are opened as part of enabling process). Check - * {@link #isTableEnabled(byte[])} to learn when table is fully online. If - * table is taking too long to online, check server logs. - * @param tableName - * @throws IOException - * @since 0.90.0 - */ - public void enableTableAsync(final byte [] tableName) - throws IOException { - HTableDescriptor.isLegalTableName(tableName); - execute(new MasterAdminCallable() { - @Override - public Void call() throws ServiceException { - LOG.info("Started enable of " + Bytes.toString(tableName)); - EnableTableRequest req = RequestConverter.buildEnableTableRequest(tableName); - masterAdmin.enableTable(null,req); - return null; - } - }); - } - - /** - * Enable tables matching the passed in pattern and wait on completion. - * - * Warning: Use this method carefully, there is no prompting and the effect is - * immediate. Consider using {@link #listTables(java.lang.String)} and - * {@link #enableTable(byte[])} - * - * @param regex The regular expression to match table names against - * @throws IOException - * @see #enableTables(java.util.regex.Pattern) - * @see #enableTable(java.lang.String) - */ - public HTableDescriptor[] enableTables(String regex) throws IOException { - return enableTables(Pattern.compile(regex)); - } - - /** - * Enable tables matching the passed in pattern and wait on completion. - * - * Warning: Use this method carefully, there is no prompting and the effect is - * immediate. Consider using {@link #listTables(java.util.regex.Pattern) } and - * {@link #enableTable(byte[])} - * - * @param pattern The pattern to match table names against - * @throws IOException - */ - public HTableDescriptor[] enableTables(Pattern pattern) throws IOException { - List failed = new LinkedList(); - for (HTableDescriptor table : listTables(pattern)) { - if (isTableDisabled(table.getName())) { - try { - enableTable(table.getName()); - } catch (IOException ex) { - LOG.info("Failed to enable table " + table.getNameAsString(), ex); - failed.add(table); - } - } - } - return failed.toArray(new HTableDescriptor[failed.size()]); - } - - public void disableTableAsync(final String tableName) throws IOException { - disableTableAsync(Bytes.toBytes(tableName)); - } - - /** - * Starts the disable of a table. If it is being served, the master - * will tell the servers to stop serving it. This method returns immediately. - * The disable of a table can take some time if the table is large (all - * regions are closed as part of table disable operation). - * Call {@link #isTableDisabled(byte[])} to check for when disable completes. - * If table is taking too long to online, check server logs. - * @param tableName name of table - * @throws IOException if a remote or network exception occurs - * @see #isTableDisabled(byte[]) - * @see #isTableEnabled(byte[]) - * @since 0.90.0 - */ - public void disableTableAsync(final byte [] tableName) throws IOException { - HTableDescriptor.isLegalTableName(tableName); - execute(new MasterAdminCallable() { - @Override - public Void call() throws ServiceException { - LOG.info("Started disable of " + Bytes.toString(tableName)); - DisableTableRequest req = RequestConverter.buildDisableTableRequest(tableName); - masterAdmin.disableTable(null,req); - return null; - } - }); - } - - public void disableTable(final String tableName) - throws IOException { - disableTable(Bytes.toBytes(tableName)); - } - - /** - * Disable table and wait on completion. May timeout eventually. Use - * {@link #disableTableAsync(byte[])} and {@link #isTableDisabled(String)} - * instead. - * The table has to be in enabled state for it to be disabled. - * @param tableName - * @throws IOException - * There could be couple types of IOException - * TableNotFoundException means the table doesn't exist. - * TableNotEnabledException means the table isn't in enabled state. - */ - public void disableTable(final byte [] tableName) - throws IOException { - disableTableAsync(tableName); - // Wait until table is disabled - boolean disabled = false; - for (int tries = 0; tries < (this.numRetries * this.retryLongerMultiplier); tries++) { - disabled = isTableDisabled(tableName); - if (disabled) { - break; - } - long sleep = getPauseTime(tries); - if (LOG.isDebugEnabled()) { - LOG.debug("Sleeping= " + sleep + "ms, waiting for all regions to be " + - "disabled in " + Bytes.toString(tableName)); - } - try { - Thread.sleep(sleep); - } catch (InterruptedException e) { - // Do this conversion rather than let it out because do not want to - // change the method signature. - Thread.currentThread().interrupt(); - throw new IOException("Interrupted", e); - } - } - if (!disabled) { - throw new RegionException("Retries exhausted, it took too long to wait"+ - " for the table " + Bytes.toString(tableName) + " to be disabled."); - } - LOG.info("Disabled " + Bytes.toString(tableName)); - } - - /** - * Disable tables matching the passed in pattern and wait on completion. - * - * Warning: Use this method carefully, there is no prompting and the effect is - * immediate. Consider using {@link #listTables(java.lang.String)} and - * {@link #disableTable(byte[])} - * - * @param regex The regular expression to match table names against - * @return Table descriptors for tables that couldn't be disabled - * @throws IOException - * @see #disableTables(java.util.regex.Pattern) - * @see #disableTable(java.lang.String) - */ - public HTableDescriptor[] disableTables(String regex) throws IOException { - return disableTables(Pattern.compile(regex)); - } - - /** - * Disable tables matching the passed in pattern and wait on completion. - * - * Warning: Use this method carefully, there is no prompting and the effect is - * immediate. Consider using {@link #listTables(java.util.regex.Pattern) } and - * {@link #disableTable(byte[])} - * - * @param pattern The pattern to match table names against - * @return Table descriptors for tables that couldn't be disabled - * @throws IOException - */ - public HTableDescriptor[] disableTables(Pattern pattern) throws IOException { - List failed = new LinkedList(); - for (HTableDescriptor table : listTables(pattern)) { - if (isTableEnabled(table.getName())) { - try { - disableTable(table.getName()); - } catch (IOException ex) { - LOG.info("Failed to disable table " + table.getNameAsString(), ex); - failed.add(table); - } - } - } - return failed.toArray(new HTableDescriptor[failed.size()]); - } - - /** - * @param tableName name of table to check - * @return true if table is on-line - * @throws IOException if a remote or network exception occurs - */ - public boolean isTableEnabled(String tableName) throws IOException { - return isTableEnabled(Bytes.toBytes(tableName)); - } - /** - * @param tableName name of table to check - * @return true if table is on-line - * @throws IOException if a remote or network exception occurs - */ - public boolean isTableEnabled(byte[] tableName) throws IOException { - if (!HTableDescriptor.isMetaTable(tableName)) { - HTableDescriptor.isLegalTableName(tableName); - } - return connection.isTableEnabled(tableName); - } - - /** - * @param tableName name of table to check - * @return true if table is off-line - * @throws IOException if a remote or network exception occurs - */ - public boolean isTableDisabled(final String tableName) throws IOException { - return isTableDisabled(Bytes.toBytes(tableName)); - } - - /** - * @param tableName name of table to check - * @return true if table is off-line - * @throws IOException if a remote or network exception occurs - */ - public boolean isTableDisabled(byte[] tableName) throws IOException { - if (!HTableDescriptor.isMetaTable(tableName)) { - HTableDescriptor.isLegalTableName(tableName); - } - return connection.isTableDisabled(tableName); - } - - /** - * @param tableName name of table to check - * @return true if all regions of the table are available - * @throws IOException if a remote or network exception occurs - */ - public boolean isTableAvailable(byte[] tableName) throws IOException { - return connection.isTableAvailable(tableName); - } - - /** - * @param tableName name of table to check - * @return true if all regions of the table are available - * @throws IOException if a remote or network exception occurs - */ - public boolean isTableAvailable(String tableName) throws IOException { - return connection.isTableAvailable(Bytes.toBytes(tableName)); - } - - /** - * Get the status of alter command - indicates how many regions have received - * the updated schema Asynchronous operation. - * - * @param tableName - * name of the table to get the status of - * @return Pair indicating the number of regions updated Pair.getFirst() is the - * regions that are yet to be updated Pair.getSecond() is the total number - * of regions of the table - * @throws IOException - * if a remote or network exception occurs - */ - public Pair getAlterStatus(final byte[] tableName) - throws IOException { - HTableDescriptor.isLegalTableName(tableName); - return execute(new MasterMonitorCallable>() { - @Override - public Pair call() throws ServiceException { - GetSchemaAlterStatusRequest req = RequestConverter - .buildGetSchemaAlterStatusRequest(tableName); - GetSchemaAlterStatusResponse ret = masterMonitor.getSchemaAlterStatus(null, req); - Pair pair = new Pair(Integer.valueOf(ret - .getYetToUpdateRegions()), Integer.valueOf(ret.getTotalRegions())); - return pair; - } - }); - } - - /** - * Add a column to an existing table. - * Asynchronous operation. - * - * @param tableName name of the table to add column to - * @param column column descriptor of column to be added - * @throws IOException if a remote or network exception occurs - */ - public void addColumn(final String tableName, HColumnDescriptor column) - throws IOException { - addColumn(Bytes.toBytes(tableName), column); - } - - /** - * Add a column to an existing table. - * Asynchronous operation. - * - * @param tableName name of the table to add column to - * @param column column descriptor of column to be added - * @throws IOException if a remote or network exception occurs - */ - public void addColumn(final byte [] tableName, final HColumnDescriptor column) - throws IOException { - execute(new MasterAdminCallable() { - @Override - public Void call() throws ServiceException { - AddColumnRequest req = RequestConverter.buildAddColumnRequest(tableName, column); - masterAdmin.addColumn(null,req); - return null; - } - }); - } - - /** - * Delete a column from a table. - * Asynchronous operation. - * - * @param tableName name of table - * @param columnName name of column to be deleted - * @throws IOException if a remote or network exception occurs - */ - public void deleteColumn(final String tableName, final String columnName) - throws IOException { - deleteColumn(Bytes.toBytes(tableName), Bytes.toBytes(columnName)); - } - - /** - * Delete a column from a table. - * Asynchronous operation. - * - * @param tableName name of table - * @param columnName name of column to be deleted - * @throws IOException if a remote or network exception occurs - */ - public void deleteColumn(final byte [] tableName, final byte [] columnName) - throws IOException { - execute(new MasterAdminCallable() { - @Override - public Void call() throws ServiceException { - DeleteColumnRequest req = RequestConverter.buildDeleteColumnRequest(tableName, columnName); - masterAdmin.deleteColumn(null,req); - return null; - } - }); - } - - /** - * Modify an existing column family on a table. - * Asynchronous operation. - * - * @param tableName name of table - * @param descriptor new column descriptor to use - * @throws IOException if a remote or network exception occurs - */ - public void modifyColumn(final String tableName, HColumnDescriptor descriptor) - throws IOException { - modifyColumn(Bytes.toBytes(tableName), descriptor); - } - - - - /** - * Modify an existing column family on a table. - * Asynchronous operation. - * - * @param tableName name of table - * @param descriptor new column descriptor to use - * @throws IOException if a remote or network exception occurs - */ - public void modifyColumn(final byte [] tableName, final HColumnDescriptor descriptor) - throws IOException { - execute(new MasterAdminCallable() { - @Override - public Void call() throws ServiceException { - ModifyColumnRequest req = RequestConverter.buildModifyColumnRequest(tableName, descriptor); - masterAdmin.modifyColumn(null,req); - return null; - } - }); - } - - /** - * Close a region. For expert-admins. Runs close on the regionserver. The - * master will not be informed of the close. - * @param regionname region name to close - * @param serverName If supplied, we'll use this location rather than - * the one currently in .META. - * @throws IOException if a remote or network exception occurs - */ - public void closeRegion(final String regionname, final String serverName) - throws IOException { - closeRegion(Bytes.toBytes(regionname), serverName); - } - - /** - * Close a region. For expert-admins Runs close on the regionserver. The - * master will not be informed of the close. - * @param regionname region name to close - * @param serverName The servername of the regionserver. If passed null we - * will use servername found in the .META. table. A server name - * is made of host, port and startcode. Here is an example: - * host187.example.com,60020,1289493121758 - * @throws IOException if a remote or network exception occurs - */ - public void closeRegion(final byte [] regionname, final String serverName) - throws IOException { - CatalogTracker ct = getCatalogTracker(); - try { - if (serverName != null) { - Pair pair = MetaReader.getRegion(ct, regionname); - if (pair == null || pair.getFirst() == null) { - throw new UnknownRegionException(Bytes.toStringBinary(regionname)); - } else { - closeRegion(new ServerName(serverName), pair.getFirst()); - } - } else { - Pair pair = MetaReader.getRegion(ct, regionname); - if (pair == null) { - throw new UnknownRegionException(Bytes.toStringBinary(regionname)); - } else if (pair.getSecond() == null) { - throw new NoServerForRegionException(Bytes.toStringBinary(regionname)); - } else { - closeRegion(pair.getSecond(), pair.getFirst()); - } - } - } finally { - cleanupCatalogTracker(ct); - } - } - - /** - * For expert-admins. Runs close on the regionserver. Closes a region based on - * the encoded region name. The region server name is mandatory. If the - * servername is provided then based on the online regions in the specified - * regionserver the specified region will be closed. The master will not be - * informed of the close. Note that the regionname is the encoded regionname. - * - * @param encodedRegionName - * The encoded region name; i.e. the hash that makes up the region - * name suffix: e.g. if regionname is - * TestTable,0094429456,1289497600452.527db22f95c8a9e0116f0cc13c680396. - * , then the encoded region name is: - * 527db22f95c8a9e0116f0cc13c680396. - * @param serverName - * The servername of the regionserver. A server name is made of host, - * port and startcode. This is mandatory. Here is an example: - * host187.example.com,60020,1289493121758 - * @return true if the region was closed, false if not. - * @throws IOException - * if a remote or network exception occurs - */ - public boolean closeRegionWithEncodedRegionName(final String encodedRegionName, - final String serverName) throws IOException { - if (null == serverName || ("").equals(serverName.trim())) { - throw new IllegalArgumentException( - "The servername cannot be null or empty."); - } - ServerName sn = new ServerName(serverName); - AdminProtocol admin = this.connection.getAdmin( - sn.getHostname(), sn.getPort()); - // Close the region without updating zk state. - CloseRegionRequest request = - RequestConverter.buildCloseRegionRequest(encodedRegionName, false); - try { - CloseRegionResponse response = admin.closeRegion(null, request); - boolean isRegionClosed = response.getClosed(); - if (false == isRegionClosed) { - LOG.error("Not able to close the region " + encodedRegionName + "."); - } - return isRegionClosed; - } catch (ServiceException se) { - throw ProtobufUtil.getRemoteException(se); - } - } - - /** - * Close a region. For expert-admins Runs close on the regionserver. The - * master will not be informed of the close. - * @param sn - * @param hri - * @throws IOException - */ - public void closeRegion(final ServerName sn, final HRegionInfo hri) - throws IOException { - AdminProtocol admin = - this.connection.getAdmin(sn.getHostname(), sn.getPort()); - // Close the region without updating zk state. - ProtobufUtil.closeRegion(admin, hri.getRegionName(), false); - } - - /** - * Get all the online regions on a region server. - */ - public List getOnlineRegions( - final ServerName sn) throws IOException { - AdminProtocol admin = - this.connection.getAdmin(sn.getHostname(), sn.getPort()); - return ProtobufUtil.getOnlineRegions(admin); - } - - /** - * Flush a table or an individual region. - * Synchronous operation. - * - * @param tableNameOrRegionName table or region to flush - * @throws IOException if a remote or network exception occurs - * @throws InterruptedException - */ - public void flush(final String tableNameOrRegionName) - throws IOException, InterruptedException { - flush(Bytes.toBytes(tableNameOrRegionName)); - } - - /** - * Flush a table or an individual region. - * Synchronous operation. - * - * @param tableNameOrRegionName table or region to flush - * @throws IOException if a remote or network exception occurs - * @throws InterruptedException - */ - public void flush(final byte [] tableNameOrRegionName) - throws IOException, InterruptedException { - CatalogTracker ct = getCatalogTracker(); - try { - Pair regionServerPair - = getRegion(tableNameOrRegionName, ct); - if (regionServerPair != null) { - if (regionServerPair.getSecond() == null) { - throw new NoServerForRegionException(Bytes.toStringBinary(tableNameOrRegionName)); - } else { - flush(regionServerPair.getSecond(), regionServerPair.getFirst()); - } - } else { - final String tableName = tableNameString(tableNameOrRegionName, ct); - List> pairs = - MetaReader.getTableRegionsAndLocations(ct, - tableName); - for (Pair pair: pairs) { - if (pair.getFirst().isOffline()) continue; - if (pair.getSecond() == null) continue; - try { - flush(pair.getSecond(), pair.getFirst()); - } catch (NotServingRegionException e) { - if (LOG.isDebugEnabled()) { - LOG.debug("Trying to flush " + pair.getFirst() + ": " + - StringUtils.stringifyException(e)); - } - } - } - } - } finally { - cleanupCatalogTracker(ct); - } - } - - private void flush(final ServerName sn, final HRegionInfo hri) - throws IOException { - AdminProtocol admin = - this.connection.getAdmin(sn.getHostname(), sn.getPort()); - FlushRegionRequest request = - RequestConverter.buildFlushRegionRequest(hri.getRegionName()); - try { - admin.flushRegion(null, request); - } catch (ServiceException se) { - throw ProtobufUtil.getRemoteException(se); - } - } - - /** - * Compact a table or an individual region. - * Asynchronous operation. - * - * @param tableNameOrRegionName table or region to compact - * @throws IOException if a remote or network exception occurs - * @throws InterruptedException - */ - public void compact(final String tableNameOrRegionName) - throws IOException, InterruptedException { - compact(Bytes.toBytes(tableNameOrRegionName)); - } - - /** - * Compact a table or an individual region. - * Asynchronous operation. - * - * @param tableNameOrRegionName table or region to compact - * @throws IOException if a remote or network exception occurs - * @throws InterruptedException - */ - public void compact(final byte [] tableNameOrRegionName) - throws IOException, InterruptedException { - compact(tableNameOrRegionName, null, false); - } - - /** - * Compact a column family within a table or region. - * Asynchronous operation. - * - * @param tableOrRegionName table or region to compact - * @param columnFamily column family within a table or region - * @throws IOException if a remote or network exception occurs - * @throws InterruptedException - */ - public void compact(String tableOrRegionName, String columnFamily) - throws IOException, InterruptedException { - compact(Bytes.toBytes(tableOrRegionName), Bytes.toBytes(columnFamily)); - } - - /** - * Compact a column family within a table or region. - * Asynchronous operation. - * - * @param tableNameOrRegionName table or region to compact - * @param columnFamily column family within a table or region - * @throws IOException if a remote or network exception occurs - * @throws InterruptedException - */ - public void compact(final byte [] tableNameOrRegionName, final byte[] columnFamily) - throws IOException, InterruptedException { - compact(tableNameOrRegionName, columnFamily, false); - } - - /** - * Major compact a table or an individual region. - * Asynchronous operation. - * - * @param tableNameOrRegionName table or region to major compact - * @throws IOException if a remote or network exception occurs - * @throws InterruptedException - */ - public void majorCompact(final String tableNameOrRegionName) - throws IOException, InterruptedException { - majorCompact(Bytes.toBytes(tableNameOrRegionName)); - } - - /** - * Major compact a table or an individual region. - * Asynchronous operation. - * - * @param tableNameOrRegionName table or region to major compact - * @throws IOException if a remote or network exception occurs - * @throws InterruptedException - */ - public void majorCompact(final byte [] tableNameOrRegionName) - throws IOException, InterruptedException { - compact(tableNameOrRegionName, null, true); - } - - /** - * Major compact a column family within a table or region. - * Asynchronous operation. - * - * @param tableNameOrRegionName table or region to major compact - * @param columnFamily column family within a table or region - * @throws IOException if a remote or network exception occurs - * @throws InterruptedException - */ - public void majorCompact(final String tableNameOrRegionName, - final String columnFamily) throws IOException, InterruptedException { - majorCompact(Bytes.toBytes(tableNameOrRegionName), - Bytes.toBytes(columnFamily)); - } - - /** - * Major compact a column family within a table or region. - * Asynchronous operation. - * - * @param tableNameOrRegionName table or region to major compact - * @param columnFamily column family within a table or region - * @throws IOException if a remote or network exception occurs - * @throws InterruptedException - */ - public void majorCompact(final byte [] tableNameOrRegionName, - final byte[] columnFamily) throws IOException, InterruptedException { - compact(tableNameOrRegionName, columnFamily, true); - } - - /** - * Compact a table or an individual region. - * Asynchronous operation. - * - * @param tableNameOrRegionName table or region to compact - * @param columnFamily column family within a table or region - * @param major True if we are to do a major compaction. - * @throws IOException if a remote or network exception occurs - * @throws InterruptedException - */ - private void compact(final byte [] tableNameOrRegionName, - final byte[] columnFamily,final boolean major) - throws IOException, InterruptedException { - CatalogTracker ct = getCatalogTracker(); - try { - Pair regionServerPair - = getRegion(tableNameOrRegionName, ct); - if (regionServerPair != null) { - if (regionServerPair.getSecond() == null) { - throw new NoServerForRegionException(Bytes.toStringBinary(tableNameOrRegionName)); - } else { - compact(regionServerPair.getSecond(), regionServerPair.getFirst(), major, columnFamily); - } - } else { - final String tableName = tableNameString(tableNameOrRegionName, ct); - List> pairs = - MetaReader.getTableRegionsAndLocations(ct, - tableName); - for (Pair pair: pairs) { - if (pair.getFirst().isOffline()) continue; - if (pair.getSecond() == null) continue; - try { - compact(pair.getSecond(), pair.getFirst(), major, columnFamily); - } catch (NotServingRegionException e) { - if (LOG.isDebugEnabled()) { - LOG.debug("Trying to" + (major ? " major" : "") + " compact " + - pair.getFirst() + ": " + - StringUtils.stringifyException(e)); - } - } - } - } - } finally { - cleanupCatalogTracker(ct); - } - } - - private void compact(final ServerName sn, final HRegionInfo hri, - final boolean major, final byte [] family) - throws IOException { - AdminProtocol admin = - this.connection.getAdmin(sn.getHostname(), sn.getPort()); - CompactRegionRequest request = - RequestConverter.buildCompactRegionRequest(hri.getRegionName(), major, family); - try { - admin.compactRegion(null, request); - } catch (ServiceException se) { - throw ProtobufUtil.getRemoteException(se); - } - } - - /** - * Move the region r to dest. - * @param encodedRegionName The encoded region name; i.e. the hash that makes - * up the region name suffix: e.g. if regionname is - * TestTable,0094429456,1289497600452.527db22f95c8a9e0116f0cc13c680396., - * then the encoded region name is: 527db22f95c8a9e0116f0cc13c680396. - * @param destServerName The servername of the destination regionserver. If - * passed the empty byte array we'll assign to a random server. A server name - * is made of host, port and startcode. Here is an example: - * host187.example.com,60020,1289493121758 - * @throws UnknownRegionException Thrown if we can't find a region named - * encodedRegionName - * @throws ZooKeeperConnectionException - * @throws MasterNotRunningException - */ - public void move(final byte [] encodedRegionName, final byte [] destServerName) - throws UnknownRegionException, MasterNotRunningException, ZooKeeperConnectionException { - MasterAdminKeepAliveConnection master = connection.getKeepAliveMasterAdmin(); - try { - MoveRegionRequest request = RequestConverter.buildMoveRegionRequest(encodedRegionName, destServerName); - master.moveRegion(null,request); - } catch (ServiceException se) { - IOException ioe = ProtobufUtil.getRemoteException(se); - if (ioe instanceof UnknownRegionException) { - throw (UnknownRegionException)ioe; - } - LOG.error("Unexpected exception: " + se + " from calling HMaster.moveRegion"); - } catch (DeserializationException de) { - LOG.error("Could not parse destination server name: " + de); - } - finally { - master.close(); - } - } - - /** - * @param regionName - * Region name to assign. - * @throws MasterNotRunningException - * @throws ZooKeeperConnectionException - * @throws IOException - */ - public void assign(final byte[] regionName) throws MasterNotRunningException, - ZooKeeperConnectionException, IOException { - execute(new MasterAdminCallable() { - @Override - public Void call() throws ServiceException { - AssignRegionRequest request = RequestConverter.buildAssignRegionRequest(regionName); - masterAdmin.assignRegion(null,request); - return null; - } - }); - } - - /** - * Unassign a region from current hosting regionserver. Region will then be - * assigned to a regionserver chosen at random. Region could be reassigned - * back to the same server. Use {@link #move(byte[], byte[])} if you want - * to control the region movement. - * @param regionName Region to unassign. Will clear any existing RegionPlan - * if one found. - * @param force If true, force unassign (Will remove region from - * regions-in-transition too if present. If results in double assignment - * use hbck -fix to resolve. To be used by experts). - * @throws MasterNotRunningException - * @throws ZooKeeperConnectionException - * @throws IOException - */ - public void unassign(final byte [] regionName, final boolean force) - throws MasterNotRunningException, ZooKeeperConnectionException, IOException { - execute(new MasterAdminCallable() { - @Override - public Void call() throws ServiceException { - UnassignRegionRequest request = - RequestConverter.buildUnassignRegionRequest(regionName, force); - masterAdmin.unassignRegion(null,request); - return null; - } - }); - } - - /** - * Special method, only used by hbck. - */ - public void offline(final byte [] regionName) - throws IOException { - MasterAdminKeepAliveConnection master = connection.getKeepAliveMasterAdmin(); - try { - master.offlineRegion(null,RequestConverter.buildOfflineRegionRequest(regionName)); - } catch (ServiceException se) { - throw ProtobufUtil.getRemoteException(se); - } finally { - master.close(); - } - } - - /** - * Turn the load balancer on or off. - * @param on If true, enable balancer. If false, disable balancer. - * @param synchronous If true, it waits until current balance() call, if outstanding, to return. - * @return Previous balancer value - */ - public boolean setBalancerRunning(final boolean on, final boolean synchronous) - throws MasterNotRunningException, ZooKeeperConnectionException { - MasterAdminKeepAliveConnection master = connection.getKeepAliveMasterAdmin(); - try { - SetBalancerRunningRequest req = - RequestConverter.buildSetBalancerRunningRequest(on, synchronous); - return master.setBalancerRunning(null, req).getPrevBalanceValue(); - } catch (ServiceException se) { - IOException ioe = ProtobufUtil.getRemoteException(se); - if (ioe instanceof MasterNotRunningException) { - throw (MasterNotRunningException)ioe; - } - if (ioe instanceof ZooKeeperConnectionException) { - throw (ZooKeeperConnectionException)ioe; - } - - // Throwing MasterNotRunningException even though not really valid in order to not - // break interface by adding additional exception type. - throw new MasterNotRunningException("Unexpected exception when calling balanceSwitch",se); - } finally { - master.close(); - } - } - - /** - * Invoke the balancer. Will run the balancer and if regions to move, it will - * go ahead and do the reassignments. Can NOT run for various reasons. Check - * logs. - * @return True if balancer ran, false otherwise. - */ - public boolean balancer() - throws MasterNotRunningException, ZooKeeperConnectionException, ServiceException { - MasterAdminKeepAliveConnection master = connection.getKeepAliveMasterAdmin(); - try { - return master.balance(null,RequestConverter.buildBalanceRequest()).getBalancerRan(); - } finally { - master.close(); - } - } - - /** - * Enable/Disable the catalog janitor - * @param enable if true enables the catalog janitor - * @return the previous state - * @throws ServiceException - * @throws MasterNotRunningException - */ - public boolean enableCatalogJanitor(boolean enable) - throws ServiceException, MasterNotRunningException { - MasterAdminKeepAliveConnection master = connection.getKeepAliveMasterAdmin(); - try { - return master.enableCatalogJanitor(null, - RequestConverter.buildEnableCatalogJanitorRequest(enable)).getPrevValue(); - } finally { - master.close(); - } - } - - /** - * Ask for a scan of the catalog table - * @return the number of entries cleaned - * @throws ServiceException - * @throws MasterNotRunningException - */ - public int runCatalogScan() throws ServiceException, MasterNotRunningException { - MasterAdminKeepAliveConnection master = connection.getKeepAliveMasterAdmin(); - try { - return master.runCatalogScan(null, - RequestConverter.buildCatalogScanRequest()).getScanResult(); - } finally { - master.close(); - } - } - - /** - * Query on the catalog janitor state (Enabled/Disabled?) - * @throws ServiceException - * @throws MasterNotRunningException - */ - public boolean isCatalogJanitorEnabled() throws ServiceException, MasterNotRunningException { - MasterAdminKeepAliveConnection master = connection.getKeepAliveMasterAdmin(); - try { - return master.isCatalogJanitorEnabled(null, - RequestConverter.buildIsCatalogJanitorEnabledRequest()).getValue(); - } finally { - master.close(); - } - } - - /** - * Split a table or an individual region. - * Asynchronous operation. - * - * @param tableNameOrRegionName table or region to split - * @throws IOException if a remote or network exception occurs - * @throws InterruptedException - */ - public void split(final String tableNameOrRegionName) - throws IOException, InterruptedException { - split(Bytes.toBytes(tableNameOrRegionName)); - } - - /** - * Split a table or an individual region. Implicitly finds an optimal split - * point. Asynchronous operation. - * - * @param tableNameOrRegionName table to region to split - * @throws IOException if a remote or network exception occurs - * @throws InterruptedException - */ - public void split(final byte [] tableNameOrRegionName) - throws IOException, InterruptedException { - split(tableNameOrRegionName, null); - } - - public void split(final String tableNameOrRegionName, - final String splitPoint) throws IOException, InterruptedException { - split(Bytes.toBytes(tableNameOrRegionName), Bytes.toBytes(splitPoint)); - } - - /** - * Split a table or an individual region. - * Asynchronous operation. - * - * @param tableNameOrRegionName table to region to split - * @param splitPoint the explicit position to split on - * @throws IOException if a remote or network exception occurs - * @throws InterruptedException interrupt exception occurred - */ - public void split(final byte [] tableNameOrRegionName, - final byte [] splitPoint) throws IOException, InterruptedException { - CatalogTracker ct = getCatalogTracker(); - try { - Pair regionServerPair - = getRegion(tableNameOrRegionName, ct); - if (regionServerPair != null) { - if (regionServerPair.getSecond() == null) { - throw new NoServerForRegionException(Bytes.toStringBinary(tableNameOrRegionName)); - } else { - split(regionServerPair.getSecond(), regionServerPair.getFirst(), splitPoint); - } - } else { - final String tableName = tableNameString(tableNameOrRegionName, ct); - List> pairs = - MetaReader.getTableRegionsAndLocations(ct, - tableName); - for (Pair pair: pairs) { - // May not be a server for a particular row - if (pair.getSecond() == null) continue; - HRegionInfo r = pair.getFirst(); - // check for parents - if (r.isSplitParent()) continue; - // if a split point given, only split that particular region - if (splitPoint != null && !r.containsRow(splitPoint)) continue; - // call out to region server to do split now - split(pair.getSecond(), pair.getFirst(), splitPoint); - } - } - } finally { - cleanupCatalogTracker(ct); - } - } - - private void split(final ServerName sn, final HRegionInfo hri, - byte[] splitPoint) throws IOException { - AdminProtocol admin = - this.connection.getAdmin(sn.getHostname(), sn.getPort()); - ProtobufUtil.split(admin, hri, splitPoint); - } - - /** - * Modify an existing table, more IRB friendly version. - * Asynchronous operation. This means that it may be a while before your - * schema change is updated across all of the table. - * - * @param tableName name of table. - * @param htd modified description of the table - * @throws IOException if a remote or network exception occurs - */ - public void modifyTable(final byte [] tableName, final HTableDescriptor htd) - throws IOException { - execute(new MasterAdminCallable() { - @Override - public Void call() throws ServiceException { - ModifyTableRequest request = RequestConverter.buildModifyTableRequest(tableName, htd); - masterAdmin.modifyTable(null, request); - return null; - } - }); - } - - /** - * @param tableNameOrRegionName Name of a table or name of a region. - * @param ct A {@link CatalogTracker} instance (caller of this method usually has one). - * @return a pair of HRegionInfo and ServerName if tableNameOrRegionName is - * a verified region name (we call {@link MetaReader#getRegion( CatalogTracker, byte[])} - * else null. - * Throw an exception if tableNameOrRegionName is null. - * @throws IOException - */ - Pair getRegion(final byte[] tableNameOrRegionName, - final CatalogTracker ct) throws IOException { - if (tableNameOrRegionName == null) { - throw new IllegalArgumentException("Pass a table name or region name"); - } - Pair pair = MetaReader.getRegion(ct, tableNameOrRegionName); - if (pair == null) { - final AtomicReference> result = - new AtomicReference>(null); - final String encodedName = Bytes.toString(tableNameOrRegionName); - MetaScannerVisitor visitor = new MetaScannerVisitorBase() { - @Override - public boolean processRow(Result data) throws IOException { - HRegionInfo info = HRegionInfo.getHRegionInfo(data); - if (info == null) { - LOG.warn("No serialized HRegionInfo in " + data); - return true; - } - if (!encodedName.equals(info.getEncodedName())) return true; - ServerName sn = HRegionInfo.getServerName(data); - result.set(new Pair(info, sn)); - return false; // found the region, stop - } - }; - - MetaScanner.metaScan(conf, visitor); - pair = result.get(); - } - return pair; - } - - /** - * Convert the table name byte array into a table name string and check if table - * exists or not. - * @param tableNameBytes Name of a table. - * @param ct A {@link CatalogTracker} instance (caller of this method usually has one). - * @return tableName in string form. - * @throws IOException if a remote or network exception occurs. - * @throws TableNotFoundException if table does not exist. - */ - private String tableNameString(final byte[] tableNameBytes, CatalogTracker ct) - throws IOException { - String tableNameString = Bytes.toString(tableNameBytes); - if (!MetaReader.tableExists(ct, tableNameString)) { - throw new TableNotFoundException(tableNameString); - } - return tableNameString; - } - - /** - * Shuts down the HBase cluster - * @throws IOException if a remote or network exception occurs - */ - public synchronized void shutdown() throws IOException { - execute(new MasterAdminCallable() { - @Override - public Void call() throws ServiceException { - masterAdmin.shutdown(null,ShutdownRequest.newBuilder().build()); - return null; - } - }); - } - - /** - * Shuts down the current HBase master only. - * Does not shutdown the cluster. - * @see #shutdown() - * @throws IOException if a remote or network exception occurs - */ - public synchronized void stopMaster() throws IOException { - execute(new MasterAdminCallable() { - @Override - public Void call() throws ServiceException { - masterAdmin.stopMaster(null,StopMasterRequest.newBuilder().build()); - return null; - } - }); - } - - /** - * Stop the designated regionserver - * @param hostnamePort Hostname and port delimited by a : as in - * example.org:1234 - * @throws IOException if a remote or network exception occurs - */ - public synchronized void stopRegionServer(final String hostnamePort) - throws IOException { - String hostname = Addressing.parseHostname(hostnamePort); - int port = Addressing.parsePort(hostnamePort); - AdminProtocol admin = - this.connection.getAdmin(hostname, port); - StopServerRequest request = RequestConverter.buildStopServerRequest( - "Called by admin client " + this.connection.toString()); - try { - admin.stopServer(null, request); - } catch (ServiceException se) { - throw ProtobufUtil.getRemoteException(se); - } - } - - /** - * @return cluster status - * @throws IOException if a remote or network exception occurs - */ - public ClusterStatus getClusterStatus() throws IOException { - return execute(new MasterMonitorCallable() { - @Override - public ClusterStatus call() throws ServiceException { - GetClusterStatusRequest req = RequestConverter.buildGetClusterStatusRequest(); - return ClusterStatus.convert(masterMonitor.getClusterStatus(null,req).getClusterStatus()); - } - }); - } - - private HRegionLocation getFirstMetaServerForTable(final byte [] tableName) - throws IOException { - return connection.locateRegion(HConstants.META_TABLE_NAME, - HRegionInfo.createRegionName(tableName, null, HConstants.NINES, false)); - } - - /** - * @return Configuration used by the instance. - */ - public Configuration getConfiguration() { - return this.conf; - } - - /** - * Check to see if HBase is running. Throw an exception if not. - * We consider that HBase is running if ZooKeeper and Master are running. - * - * @param conf system configuration - * @throws MasterNotRunningException if the master is not running - * @throws ZooKeeperConnectionException if unable to connect to zookeeper - */ - public static void checkHBaseAvailable(Configuration conf) - throws MasterNotRunningException, ZooKeeperConnectionException, ServiceException { - Configuration copyOfConf = HBaseConfiguration.create(conf); - - // We set it to make it fail as soon as possible if HBase is not available - copyOfConf.setInt("hbase.client.retries.number", 1); - copyOfConf.setInt("zookeeper.recovery.retry", 0); - - HConnectionManager.HConnectionImplementation connection - = (HConnectionManager.HConnectionImplementation) - HConnectionManager.getConnection(copyOfConf); - - try { - // Check ZK first. - // If the connection exists, we may have a connection to ZK that does - // not work anymore - ZooKeeperKeepAliveConnection zkw = null; - try { - zkw = connection.getKeepAliveZooKeeperWatcher(); - zkw.getRecoverableZooKeeper().getZooKeeper().exists( - zkw.baseZNode, false); - - } catch (IOException e) { - throw new ZooKeeperConnectionException("Can't connect to ZooKeeper", e); - } catch (InterruptedException e) { - Thread.currentThread().interrupt(); - throw new ZooKeeperConnectionException("Can't connect to ZooKeeper", e); - } catch (KeeperException e) { - throw new ZooKeeperConnectionException("Can't connect to ZooKeeper", e); - } finally { - if (zkw != null) { - zkw.close(); - } - } - - // Check Master - connection.isMasterRunning(); - - } finally { - connection.close(); - } - } - - /** - * get the regions of a given table. - * - * @param tableName the name of the table - * @return Ordered list of {@link HRegionInfo}. - * @throws IOException - */ - public List getTableRegions(final byte[] tableName) - throws IOException { - CatalogTracker ct = getCatalogTracker(); - List Regions = null; - try { - Regions = MetaReader.getTableRegions(ct, tableName, true); - } finally { - cleanupCatalogTracker(ct); - } - return Regions; - } - - @Override - public void close() throws IOException { - if (this.connection != null) { - this.connection.close(); - } - } - - /** - * Get tableDescriptors - * @param tableNames List of table names - * @return HTD[] the tableDescriptor - * @throws IOException if a remote or network exception occurs - */ - public HTableDescriptor[] getTableDescriptors(List tableNames) - throws IOException { - return this.connection.getHTableDescriptors(tableNames); - } - - /** - * Roll the log writer. That is, start writing log messages to a new file. - * - * @param serverName - * The servername of the regionserver. A server name is made of host, - * port and startcode. This is mandatory. Here is an example: - * host187.example.com,60020,1289493121758 - * @return If lots of logs, flush the returned regions so next time through - * we can clean logs. Returns null if nothing to flush. Names are actual - * region names as returned by {@link HRegionInfo#getEncodedName()} - * @throws IOException if a remote or network exception occurs - * @throws FailedLogCloseException - */ - public synchronized byte[][] rollHLogWriter(String serverName) - throws IOException, FailedLogCloseException { - ServerName sn = new ServerName(serverName); - AdminProtocol admin = this.connection.getAdmin( - sn.getHostname(), sn.getPort()); - RollWALWriterRequest request = RequestConverter.buildRollWALWriterRequest();; - try { - RollWALWriterResponse response = admin.rollWALWriter(null, request); - int regionCount = response.getRegionToFlushCount(); - byte[][] regionsToFlush = new byte[regionCount][]; - for (int i = 0; i < regionCount; i++) { - ByteString region = response.getRegionToFlush(i); - regionsToFlush[i] = region.toByteArray(); - } - return regionsToFlush; - } catch (ServiceException se) { - throw ProtobufUtil.getRemoteException(se); - } - } - - public String[] getMasterCoprocessors() { - try { - return getClusterStatus().getMasterCoprocessors(); - } catch (IOException e) { - LOG.error("Could not getClusterStatus()",e); - return null; - } - } - - /** - * Get the current compaction state of a table or region. - * It could be in a major compaction, a minor compaction, both, or none. - * - * @param tableNameOrRegionName table or region to major compact - * @throws IOException if a remote or network exception occurs - * @throws InterruptedException - * @return the current compaction state - */ - public CompactionState getCompactionState(final String tableNameOrRegionName) - throws IOException, InterruptedException { - return getCompactionState(Bytes.toBytes(tableNameOrRegionName)); - } - - /** - * Get the current compaction state of a table or region. - * It could be in a major compaction, a minor compaction, both, or none. - * - * @param tableNameOrRegionName table or region to major compact - * @throws IOException if a remote or network exception occurs - * @throws InterruptedException - * @return the current compaction state - */ - public CompactionState getCompactionState(final byte [] tableNameOrRegionName) - throws IOException, InterruptedException { - CompactionState state = CompactionState.NONE; - CatalogTracker ct = getCatalogTracker(); - try { - Pair regionServerPair - = getRegion(tableNameOrRegionName, ct); - if (regionServerPair != null) { - if (regionServerPair.getSecond() == null) { - throw new NoServerForRegionException(Bytes.toStringBinary(tableNameOrRegionName)); - } else { - ServerName sn = regionServerPair.getSecond(); - AdminProtocol admin = - this.connection.getAdmin(sn.getHostname(), sn.getPort()); - GetRegionInfoRequest request = RequestConverter.buildGetRegionInfoRequest( - regionServerPair.getFirst().getRegionName(), true); - GetRegionInfoResponse response = admin.getRegionInfo(null, request); - return response.getCompactionState(); - } - } else { - final String tableName = tableNameString(tableNameOrRegionName, ct); - List> pairs = - MetaReader.getTableRegionsAndLocations(ct, tableName); - for (Pair pair: pairs) { - if (pair.getFirst().isOffline()) continue; - if (pair.getSecond() == null) continue; - try { - ServerName sn = pair.getSecond(); - AdminProtocol admin = - this.connection.getAdmin(sn.getHostname(), sn.getPort()); - GetRegionInfoRequest request = RequestConverter.buildGetRegionInfoRequest( - pair.getFirst().getRegionName(), true); - GetRegionInfoResponse response = admin.getRegionInfo(null, request); - switch (response.getCompactionState()) { - case MAJOR_AND_MINOR: - return CompactionState.MAJOR_AND_MINOR; - case MAJOR: - if (state == CompactionState.MINOR) { - return CompactionState.MAJOR_AND_MINOR; - } - state = CompactionState.MAJOR; - break; - case MINOR: - if (state == CompactionState.MAJOR) { - return CompactionState.MAJOR_AND_MINOR; - } - state = CompactionState.MINOR; - break; - case NONE: - default: // nothing, continue - } - } catch (NotServingRegionException e) { - if (LOG.isDebugEnabled()) { - LOG.debug("Trying to get compaction state of " + - pair.getFirst() + ": " + - StringUtils.stringifyException(e)); - } - } - } - } - } catch (ServiceException se) { - throw ProtobufUtil.getRemoteException(se); - } finally { - cleanupCatalogTracker(ct); - } - return state; - } - - /** - * @see {@link #execute(MasterAdminCallable)} - */ - private abstract static class MasterAdminCallable implements Callable{ - protected MasterAdminKeepAliveConnection masterAdmin; - } - - /** - * @see {@link #execute(MasterMonitorCallable)} - */ - private abstract static class MasterMonitorCallable implements Callable { - protected MasterMonitorKeepAliveConnection masterMonitor; - } - - /** - * This method allows to execute a function requiring a connection to - * master without having to manage the connection creation/close. - * Create a {@link MasterAdminCallable} to use it. - */ - private V execute(MasterAdminCallable function) throws IOException { - function.masterAdmin = connection.getKeepAliveMasterAdmin(); - try { - return executeCallable(function); - } finally { - function.masterAdmin.close(); - } - } - - /** - * This method allows to execute a function requiring a connection to - * master without having to manage the connection creation/close. - * Create a {@link MasterAdminCallable} to use it. - */ - private V execute(MasterMonitorCallable function) throws IOException { - function.masterMonitor = connection.getKeepAliveMasterMonitor(); - try { - return executeCallable(function); - } finally { - function.masterMonitor.close(); - } - } - - /** - * Helper function called by other execute functions. - */ - private V executeCallable(Callable function) throws IOException { - try { - return function.call(); - } catch (RemoteException re) { - throw re.unwrapRemoteException(); - } catch (IOException e) { - throw e; - } catch (ServiceException se) { - throw ProtobufUtil.getRemoteException(se); - } catch (Exception e) { - // This should not happen... - throw new IOException("Unexpected exception when calling master", e); - } - } - - /** - * Creates and returns a {@link com.google.protobuf.RpcChannel} instance - * connected to the active master. - * - *

      - * The obtained {@link com.google.protobuf.RpcChannel} instance can be used to access a published - * coprocessor {@link com.google.protobuf.Service} using standard protobuf service invocations: - *

      - * - *
      - *
      -   * CoprocessorRpcChannel channel = myAdmin.coprocessorService();
      -   * MyService.BlockingInterface service = MyService.newBlockingStub(channel);
      -   * MyCallRequest request = MyCallRequest.newBuilder()
      -   *     ...
      -   *     .build();
      -   * MyCallResponse response = service.myCall(null, request);
      -   * 
      - * - * @return A MasterCoprocessorRpcChannel instance - */ - public CoprocessorRpcChannel coprocessorService() { - return new MasterCoprocessorRpcChannel(connection); - } -} diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/client/HConnection.java hbase-server/src/main/java/org/apache/hadoop/hbase/client/HConnection.java deleted file mode 100644 index 9456b9c..0000000 --- hbase-server/src/main/java/org/apache/hadoop/hbase/client/HConnection.java +++ /dev/null @@ -1,364 +0,0 @@ -/** - * - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hbase.client; - -import java.io.Closeable; -import java.io.IOException; -import java.util.List; -import java.util.concurrent.ExecutorService; - -import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.classification.InterfaceStability; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hbase.Abortable; -import org.apache.hadoop.hbase.HRegionLocation; -import org.apache.hadoop.hbase.HTableDescriptor; -import org.apache.hadoop.hbase.MasterAdminProtocol; -import org.apache.hadoop.hbase.MasterMonitorProtocol; -import org.apache.hadoop.hbase.MasterNotRunningException; -import org.apache.hadoop.hbase.ZooKeeperConnectionException; -import org.apache.hadoop.hbase.catalog.CatalogTracker; -import org.apache.hadoop.hbase.client.coprocessor.Batch; -import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher; - -/** - * Cluster connection. Hosts a connection to the ZooKeeper ensemble and - * thereafter into the HBase cluster. Knows how to locate regions out on the cluster, - * keeps a cache of locations and then knows how to recalibrate after they move. - * {@link HConnectionManager} manages instances of this class. - * - *

      HConnections are used by {@link HTable} mostly but also by - * {@link HBaseAdmin}, {@link CatalogTracker}, - * and {@link ZooKeeperWatcher}. HConnection instances can be shared. Sharing - * is usually what you want because rather than each HConnection instance - * having to do its own discovery of regions out on the cluster, instead, all - * clients get to share the one cache of locations. Sharing makes cleanup of - * HConnections awkward. See {@link HConnectionManager} for cleanup - * discussion. - * - * @see HConnectionManager - */ -@InterfaceAudience.Public -@InterfaceStability.Stable -public interface HConnection extends Abortable, Closeable { - /** - * @return Configuration instance being used by this HConnection instance. - */ - public Configuration getConfiguration(); - - /** - * Retrieve ZooKeeperWatcher used by this connection. - * @return ZooKeeperWatcher handle being used by the connection. - * @throws IOException if a remote or network exception occurs - * @deprecated Removed because it was a mistake exposing zookeeper in this - * interface (ZooKeeper is an implementation detail). - * Deprecated in HBase 0.94 - */ - @Deprecated - public ZooKeeperWatcher getZooKeeperWatcher() throws IOException; - - /** @return - true if the master server is running */ - public boolean isMasterRunning() - throws MasterNotRunningException, ZooKeeperConnectionException; - - /** - * A table that isTableEnabled == false and isTableDisabled == false - * is possible. This happens when a table has a lot of regions - * that must be processed. - * @param tableName table name - * @return true if the table is enabled, false otherwise - * @throws IOException if a remote or network exception occurs - */ - public boolean isTableEnabled(byte[] tableName) throws IOException; - - /** - * @param tableName table name - * @return true if the table is disabled, false otherwise - * @throws IOException if a remote or network exception occurs - */ - public boolean isTableDisabled(byte[] tableName) throws IOException; - - /** - * @param tableName table name - * @return true if all regions of the table are available, false otherwise - * @throws IOException if a remote or network exception occurs - */ - public boolean isTableAvailable(byte[] tableName) throws IOException; - - /** - * List all the userspace tables. In other words, scan the META table. - * - * If we wanted this to be really fast, we could implement a special - * catalog table that just contains table names and their descriptors. - * Right now, it only exists as part of the META table's region info. - * - * @return - returns an array of HTableDescriptors - * @throws IOException if a remote or network exception occurs - */ - public HTableDescriptor[] listTables() throws IOException; - - /** - * @param tableName table name - * @return table metadata - * @throws IOException if a remote or network exception occurs - */ - public HTableDescriptor getHTableDescriptor(byte[] tableName) - throws IOException; - - /** - * Find the location of the region of tableName that row - * lives in. - * @param tableName name of the table row is in - * @param row row key you're trying to find the region of - * @return HRegionLocation that describes where to find the region in - * question - * @throws IOException if a remote or network exception occurs - */ - public HRegionLocation locateRegion(final byte [] tableName, - final byte [] row) - throws IOException; - - /** - * Allows flushing the region cache. - */ - public void clearRegionCache(); - - /** - * Allows flushing the region cache of all locations that pertain to - * tableName - * @param tableName Name of the table whose regions we are to remove from - * cache. - */ - public void clearRegionCache(final byte [] tableName); - - /** - * Find the location of the region of tableName that row - * lives in, ignoring any value that might be in the cache. - * @param tableName name of the table row is in - * @param row row key you're trying to find the region of - * @return HRegionLocation that describes where to find the region in - * question - * @throws IOException if a remote or network exception occurs - */ - public HRegionLocation relocateRegion(final byte [] tableName, - final byte [] row) - throws IOException; - - /** - * Gets the location of the region of regionName. - * @param regionName name of the region to locate - * @return HRegionLocation that describes where to find the region in - * question - * @throws IOException if a remote or network exception occurs - */ - public HRegionLocation locateRegion(final byte [] regionName) - throws IOException; - - /** - * Gets the locations of all regions in the specified table, tableName. - * @param tableName table to get regions of - * @return list of region locations for all regions of table - * @throws IOException - */ - public List locateRegions(byte[] tableName) - throws IOException; - - /** - * Returns a {@link MasterAdminProtocol} to the active master - */ - public MasterAdminProtocol getMasterAdmin() throws IOException; - - /** - * Returns an {@link MasterMonitorProtocol} to the active master - */ - public MasterMonitorProtocol getMasterMonitor() throws IOException; - - - /** - * Establishes a connection to the region server at the specified address. - * @param hostname RegionServer hostname - * @param port RegionServer port - * @return proxy for HRegionServer - * @throws IOException if a remote or network exception occurs - * - */ - public AdminProtocol getAdmin(final String hostname, final int port) - throws IOException; - - /** - * Establishes a connection to the region server at the specified address, and return - * a region client protocol. - * - * @param hostname RegionServer hostname - * @param port RegionServer port - * @return ClientProtocol proxy for RegionServer - * @throws IOException if a remote or network exception occurs - * - */ - public ClientProtocol getClient(final String hostname, final int port) - throws IOException; - - /** - * Establishes a connection to the region server at the specified address. - * @param hostname RegionServer hostname - * @param port RegionServer port - * @param getMaster - do we check if master is alive - * @return proxy for HRegionServer - * @throws IOException if a remote or network exception occurs - */ - public AdminProtocol getAdmin(final String hostname, - final int port, boolean getMaster) - throws IOException; - - /** - * Find region location hosting passed row - * @param tableName table name - * @param row Row to find. - * @param reload If true do not use cache, otherwise bypass. - * @return Location of row. - * @throws IOException if a remote or network exception occurs - */ - HRegionLocation getRegionLocation(byte [] tableName, byte [] row, - boolean reload) - throws IOException; - - /** - * Pass in a ServerCallable with your particular bit of logic defined and - * this method will manage the process of doing retries with timed waits - * and refinds of missing regions. - * - * @param the type of the return value - * @param callable callable to run - * @return an object of type T - * @throws IOException if a remote or network exception occurs - * @throws RuntimeException other unspecified error - */ - @Deprecated - public T getRegionServerWithRetries(ServerCallable callable) - throws IOException, RuntimeException; - - /** - * Pass in a ServerCallable with your particular bit of logic defined and - * this method will pass it to the defined region server. - * @param the type of the return value - * @param callable callable to run - * @return an object of type T - * @throws IOException if a remote or network exception occurs - * @throws RuntimeException other unspecified error - */ - @Deprecated - public T getRegionServerWithoutRetries(ServerCallable callable) - throws IOException, RuntimeException; - - /** - * Process a mixed batch of Get, Put and Delete actions. All actions for a - * RegionServer are forwarded in one RPC call. - * - * - * @param actions The collection of actions. - * @param tableName Name of the hbase table - * @param pool thread pool for parallel execution - * @param results An empty array, same size as list. If an exception is thrown, - * you can test here for partial results, and to determine which actions - * processed successfully. - * @throws IOException if there are problems talking to META. Per-item - * exceptions are stored in the results array. - * @deprecated since 0.96 - Use {@link HTableInterface#batch} instead - */ - @Deprecated - public void processBatch(List actions, final byte[] tableName, - ExecutorService pool, Object[] results) - throws IOException, InterruptedException; - - /** - * Parameterized batch processing, allowing varying return types for different - * {@link Row} implementations. - * @deprecated since 0.96 - Use {@link HTableInterface#batchCallback} instead - */ - @Deprecated - public void processBatchCallback(List list, - byte[] tableName, - ExecutorService pool, - Object[] results, - Batch.Callback callback) throws IOException, InterruptedException; - - /** - * Enable or disable region cache prefetch for the table. It will be - * applied for the given table's all HTable instances within this - * connection. By default, the cache prefetch is enabled. - * @param tableName name of table to configure. - * @param enable Set to true to enable region cache prefetch. - */ - public void setRegionCachePrefetch(final byte[] tableName, - final boolean enable); - - /** - * Check whether region cache prefetch is enabled or not. - * @param tableName name of table to check - * @return true if table's region cache prefetch is enabled. Otherwise - * it is disabled. - */ - public boolean getRegionCachePrefetch(final byte[] tableName); - - /** - * Scan zookeeper to get the number of region servers - * @return the number of region servers that are currently running - * @throws IOException if a remote or network exception occurs - * @deprecated This method will be changed from public to package protected. - */ - @Deprecated - public int getCurrentNrHRS() throws IOException; - - /** - * @param tableNames List of table names - * @return HTD[] table metadata - * @throws IOException if a remote or network exception occurs - */ - public HTableDescriptor[] getHTableDescriptors(List tableNames) - throws IOException; - - /** - * @return true if this connection is closed - */ - public boolean isClosed(); - - /** - * Clear any caches that pertain to server name sn - * @param sn A server name as hostname:port - */ - public void clearCaches(final String sn); - - /** - * This function allows HBaseAdminProtocol and potentially others to get a shared MasterMonitor - * connection. - * @return The shared instance. Never returns null. - * @throws MasterNotRunningException - */ - public MasterMonitorKeepAliveConnection getKeepAliveMasterMonitor() - throws MasterNotRunningException; - - /** - * This function allows HBaseAdmin and potentially others to get a shared MasterAdminProtocol - * connection. - * @return The shared instance. Never returns null. - * @throws MasterNotRunningException - */ - public MasterAdminKeepAliveConnection getKeepAliveMasterAdmin() throws MasterNotRunningException; -} - diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/client/HConnectionManager.java hbase-server/src/main/java/org/apache/hadoop/hbase/client/HConnectionManager.java deleted file mode 100644 index 5cfd89f..0000000 --- hbase-server/src/main/java/org/apache/hadoop/hbase/client/HConnectionManager.java +++ /dev/null @@ -1,2378 +0,0 @@ -/** - * - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hbase.client; - -import java.io.Closeable; -import java.io.IOException; -import java.lang.reflect.InvocationHandler; -import java.lang.reflect.InvocationTargetException; -import java.lang.reflect.Method; -import java.lang.reflect.Proxy; -import java.lang.reflect.UndeclaredThrowableException; -import java.net.InetSocketAddress; -import java.util.ArrayList; -import java.util.Collections; -import java.util.HashMap; -import java.util.HashSet; -import java.util.Iterator; -import java.util.LinkedHashMap; -import java.util.LinkedList; -import java.util.List; -import java.util.Map; -import java.util.Map.Entry; -import java.util.Set; -import java.util.concurrent.Callable; -import java.util.concurrent.ConcurrentHashMap; -import java.util.concurrent.CopyOnWriteArraySet; -import java.util.concurrent.ExecutionException; -import java.util.concurrent.ExecutorService; -import java.util.concurrent.Future; -import java.util.concurrent.atomic.AtomicBoolean; -import java.util.concurrent.atomic.AtomicInteger; - -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.classification.InterfaceStability; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hbase.Chore; -import org.apache.hadoop.hbase.DoNotRetryIOException; -import org.apache.hadoop.hbase.HBaseConfiguration; -import org.apache.hadoop.hbase.HConstants; -import org.apache.hadoop.hbase.HRegionInfo; -import org.apache.hadoop.hbase.HRegionLocation; -import org.apache.hadoop.hbase.HTableDescriptor; -import org.apache.hadoop.hbase.KeyValue; -import org.apache.hadoop.hbase.MasterAdminProtocol; -import org.apache.hadoop.hbase.MasterMonitorProtocol; -import org.apache.hadoop.hbase.MasterNotRunningException; -import org.apache.hadoop.hbase.MasterProtocol; -import org.apache.hadoop.hbase.RegionMovedException; -import org.apache.hadoop.hbase.RemoteExceptionHandler; -import org.apache.hadoop.hbase.ServerName; -import org.apache.hadoop.hbase.Stoppable; -import org.apache.hadoop.hbase.TableNotFoundException; -import org.apache.hadoop.hbase.ZooKeeperConnectionException; -import org.apache.hadoop.hbase.client.MetaScanner.MetaScannerVisitor; -import org.apache.hadoop.hbase.client.MetaScanner.MetaScannerVisitorBase; -import org.apache.hadoop.hbase.client.coprocessor.Batch; -import org.apache.hadoop.hbase.ipc.HBaseClientRPC; -import org.apache.hadoop.hbase.ipc.VersionedProtocol; -import org.apache.hadoop.hbase.protobuf.ProtobufUtil; -import org.apache.hadoop.hbase.protobuf.RequestConverter; -import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema; -import org.apache.hadoop.hbase.protobuf.generated.MasterMonitorProtos.GetTableDescriptorsRequest; -import org.apache.hadoop.hbase.protobuf.generated.MasterMonitorProtos.GetTableDescriptorsResponse; -import org.apache.hadoop.hbase.security.User; -import org.apache.hadoop.hbase.util.Addressing; -import org.apache.hadoop.hbase.util.Bytes; -import org.apache.hadoop.hbase.util.Pair; -import org.apache.hadoop.hbase.util.SoftValueSortedMap; -import org.apache.hadoop.hbase.util.Triple; -import org.apache.hadoop.hbase.zookeeper.MasterAddressTracker; -import org.apache.hadoop.hbase.zookeeper.RootRegionTracker; -import org.apache.hadoop.hbase.zookeeper.ZKClusterId; -import org.apache.hadoop.hbase.zookeeper.ZKTableReadOnly; -import org.apache.hadoop.hbase.zookeeper.ZKUtil; -import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher; -import org.apache.hadoop.ipc.RemoteException; -import org.apache.zookeeper.KeeperException; - -import com.google.protobuf.ServiceException; - -/** - * A non-instantiable class that manages {@link HConnection}s. - * This class has a static Map of {@link HConnection} instances keyed by - * {@link Configuration}; all invocations of {@link #getConnection(Configuration)} - * that pass the same {@link Configuration} instance will be returned the same - * {@link HConnection} instance (Adding properties to a Configuration - * instance does not change its object identity). Sharing {@link HConnection} - * instances is usually what you want; all clients of the {@link HConnection} - * instances share the HConnections' cache of Region locations rather than each - * having to discover for itself the location of meta, root, etc. It makes - * sense for the likes of the pool of HTables class {@link HTablePool}, for - * instance (If concerned that a single {@link HConnection} is insufficient - * for sharing amongst clients in say an heavily-multithreaded environment, - * in practise its not proven to be an issue. Besides, {@link HConnection} is - * implemented atop Hadoop RPC and as of this writing, Hadoop RPC does a - * connection per cluster-member, exclusively). - * - *

      But sharing connections - * makes clean up of {@link HConnection} instances a little awkward. Currently, - * clients cleanup by calling - * {@link #deleteConnection(Configuration, boolean)}. This will shutdown the - * zookeeper connection the HConnection was using and clean up all - * HConnection resources as well as stopping proxies to servers out on the - * cluster. Not running the cleanup will not end the world; it'll - * just stall the closeup some and spew some zookeeper connection failed - * messages into the log. Running the cleanup on a {@link HConnection} that is - * subsequently used by another will cause breakage so be careful running - * cleanup. - *

      To create a {@link HConnection} that is not shared by others, you can - * create a new {@link Configuration} instance, pass this new instance to - * {@link #getConnection(Configuration)}, and then when done, close it up by - * doing something like the following: - *

      - * {@code
      - * Configuration newConfig = new Configuration(originalConf);
      - * HConnection connection = HConnectionManager.getConnection(newConfig);
      - * // Use the connection to your hearts' delight and then when done...
      - * HConnectionManager.deleteConnection(newConfig, true);
      - * }
      - * 
      - *

      Cleanup used to be done inside in a shutdown hook. On startup we'd - * register a shutdown hook that called {@link #deleteAllConnections(boolean)} - * on its way out but the order in which shutdown hooks run is not defined so - * were problematic for clients of HConnection that wanted to register their - * own shutdown hooks so we removed ours though this shifts the onus for - * cleanup to the client. - */ -@SuppressWarnings("serial") -@InterfaceAudience.Public -@InterfaceStability.Evolving -public class HConnectionManager { - // An LRU Map of HConnectionKey -> HConnection (TableServer). All - // access must be synchronized. This map is not private because tests - // need to be able to tinker with it. - static final Map HBASE_INSTANCES; - - public static final int MAX_CACHED_HBASE_INSTANCES; - - /** Parameter name for what client protocol to use. */ - public static final String CLIENT_PROTOCOL_CLASS = "hbase.clientprotocol.class"; - - /** Default client protocol class name. */ - public static final String DEFAULT_CLIENT_PROTOCOL_CLASS = ClientProtocol.class.getName(); - - /** Parameter name for what admin protocol to use. */ - public static final String REGION_PROTOCOL_CLASS = "hbase.adminprotocol.class"; - - /** Default admin protocol class name. */ - public static final String DEFAULT_ADMIN_PROTOCOL_CLASS = AdminProtocol.class.getName(); - - private static final Log LOG = LogFactory.getLog(HConnectionManager.class); - - static { - // We set instances to one more than the value specified for {@link - // HConstants#ZOOKEEPER_MAX_CLIENT_CNXNS}. By default, the zk default max - // connections to the ensemble from the one client is 30, so in that case we - // should run into zk issues before the LRU hit this value of 31. - MAX_CACHED_HBASE_INSTANCES = HBaseConfiguration.create().getInt( - HConstants.ZOOKEEPER_MAX_CLIENT_CNXNS, - HConstants.DEFAULT_ZOOKEPER_MAX_CLIENT_CNXNS) + 1; - HBASE_INSTANCES = new LinkedHashMap( - (int) (MAX_CACHED_HBASE_INSTANCES / 0.75F) + 1, 0.75F, true) { - @Override - protected boolean removeEldestEntry( - Map.Entry eldest) { - return size() > MAX_CACHED_HBASE_INSTANCES; - } - }; - } - - /* - * Non-instantiable. - */ - protected HConnectionManager() { - super(); - } - - /** - * Get the connection that goes with the passed conf - * configuration instance. - * If no current connection exists, method creates a new connection for the - * passed conf instance. - * @param conf configuration - * @return HConnection object for conf - * @throws ZooKeeperConnectionException - */ - public static HConnection getConnection(Configuration conf) - throws ZooKeeperConnectionException { - HConnectionKey connectionKey = new HConnectionKey(conf); - synchronized (HBASE_INSTANCES) { - HConnectionImplementation connection = HBASE_INSTANCES.get(connectionKey); - if (connection == null) { - connection = new HConnectionImplementation(conf, true); - HBASE_INSTANCES.put(connectionKey, connection); - } - connection.incCount(); - return connection; - } - } - - /** - * Create a new HConnection instance using the passed conf - * instance. - * Note: This bypasses the usual HConnection life cycle management! - * Use this with caution, the caller is responsible for closing the - * created connection. - * @param conf configuration - * @return HConnection object for conf - * @throws ZooKeeperConnectionException - */ - public static HConnection createConnection(Configuration conf) - throws ZooKeeperConnectionException { - return new HConnectionImplementation(conf, false); - } - - /** - * Delete connection information for the instance specified by configuration. - * If there are no more references to it, this will then close connection to - * the zookeeper ensemble and let go of all resources. - * - * @param conf - * configuration whose identity is used to find {@link HConnection} - * instance. - * @param stopProxy - * Shuts down all the proxy's put up to cluster members including to - * cluster HMaster. Calls - * {@link HBaseClientRPC#stopProxy(org.apache.hadoop.hbase.ipc.VersionedProtocol)} - * . - */ - public static void deleteConnection(Configuration conf, boolean stopProxy) { - deleteConnection(new HConnectionKey(conf), stopProxy, false); - } - - /** - * Delete stale connection information for the instance specified by configuration. - * This will then close connection to - * the zookeeper ensemble and let go of all resources. - * - * @param connection - */ - public static void deleteStaleConnection(HConnection connection) { - deleteConnection(connection, true, true); - } - - /** - * Delete information for all connections. - * @param stopProxy stop the proxy as well - * @throws IOException - */ - public static void deleteAllConnections(boolean stopProxy) { - synchronized (HBASE_INSTANCES) { - Set connectionKeys = new HashSet(); - connectionKeys.addAll(HBASE_INSTANCES.keySet()); - for (HConnectionKey connectionKey : connectionKeys) { - deleteConnection(connectionKey, stopProxy, false); - } - HBASE_INSTANCES.clear(); - } - } - - private static void deleteConnection(HConnection connection, boolean stopProxy, - boolean staleConnection) { - synchronized (HBASE_INSTANCES) { - for (Entry connectionEntry : HBASE_INSTANCES - .entrySet()) { - if (connectionEntry.getValue() == connection) { - deleteConnection(connectionEntry.getKey(), stopProxy, staleConnection); - break; - } - } - } - } - - private static void deleteConnection(HConnectionKey connectionKey, - boolean stopProxy, boolean staleConnection) { - synchronized (HBASE_INSTANCES) { - HConnectionImplementation connection = HBASE_INSTANCES - .get(connectionKey); - if (connection != null) { - connection.decCount(); - if (connection.isZeroReference() || staleConnection) { - HBASE_INSTANCES.remove(connectionKey); - connection.close(stopProxy); - } else if (stopProxy) { - connection.stopProxyOnClose(stopProxy); - } - }else { - LOG.error("Connection not found in the list, can't delete it "+ - "(connection key="+connectionKey+"). May be the key was modified?"); - } - } - } - - /** - * It is provided for unit test cases which verify the behavior of region - * location cache prefetch. - * @return Number of cached regions for the table. - * @throws ZooKeeperConnectionException - */ - static int getCachedRegionCount(Configuration conf, - final byte[] tableName) - throws IOException { - return execute(new HConnectable(conf) { - @Override - public Integer connect(HConnection connection) { - return ((HConnectionImplementation) connection) - .getNumberOfCachedRegionLocations(tableName); - } - }); - } - - /** - * It's provided for unit test cases which verify the behavior of region - * location cache prefetch. - * @return true if the region where the table and row reside is cached. - * @throws ZooKeeperConnectionException - */ - static boolean isRegionCached(Configuration conf, - final byte[] tableName, final byte[] row) throws IOException { - return execute(new HConnectable(conf) { - @Override - public Boolean connect(HConnection connection) { - return ((HConnectionImplementation) connection).isRegionCached(tableName, row); - } - }); - } - - /** - * This class makes it convenient for one to execute a command in the context - * of a {@link HConnection} instance based on the given {@link Configuration}. - * - *

      - * If you find yourself wanting to use a {@link HConnection} for a relatively - * short duration of time, and do not want to deal with the hassle of creating - * and cleaning up that resource, then you should consider using this - * convenience class. - * - * @param - * the return type of the {@link HConnectable#connect(HConnection)} - * method. - */ - public static abstract class HConnectable { - public Configuration conf; - - protected HConnectable(Configuration conf) { - this.conf = conf; - } - - public abstract T connect(HConnection connection) throws IOException; - } - - /** - * This convenience method invokes the given {@link HConnectable#connect} - * implementation using a {@link HConnection} instance that lasts just for the - * duration of that invocation. - * - * @param the return type of the connect method - * @param connectable the {@link HConnectable} instance - * @return the value returned by the connect method - * @throws IOException - */ - public static T execute(HConnectable connectable) throws IOException { - if (connectable == null || connectable.conf == null) { - return null; - } - Configuration conf = connectable.conf; - HConnection connection = HConnectionManager.getConnection(conf); - boolean connectSucceeded = false; - try { - T returnValue = connectable.connect(connection); - connectSucceeded = true; - return returnValue; - } finally { - try { - connection.close(); - } catch (Exception e) { - if (connectSucceeded) { - throw new IOException("The connection to " + connection - + " could not be deleted.", e); - } - } - } - } - - /** - * Denotes a unique key to a {@link HConnection} instance. - * - * In essence, this class captures the properties in {@link Configuration} - * that may be used in the process of establishing a connection. In light of - * that, if any new such properties are introduced into the mix, they must be - * added to the {@link HConnectionKey#properties} list. - * - */ - public static class HConnectionKey { - public static String[] CONNECTION_PROPERTIES = new String[] { - HConstants.ZOOKEEPER_QUORUM, HConstants.ZOOKEEPER_ZNODE_PARENT, - HConstants.ZOOKEEPER_CLIENT_PORT, - HConstants.ZOOKEEPER_RECOVERABLE_WAITTIME, - HConstants.HBASE_CLIENT_PAUSE, HConstants.HBASE_CLIENT_RETRIES_NUMBER, - HConstants.HBASE_CLIENT_RPC_MAXATTEMPTS, - HConstants.HBASE_RPC_TIMEOUT_KEY, - HConstants.HBASE_CLIENT_PREFETCH_LIMIT, - HConstants.HBASE_META_SCANNER_CACHING, - HConstants.HBASE_CLIENT_INSTANCE_ID }; - - private Map properties; - private String username; - - public HConnectionKey(Configuration conf) { - Map m = new HashMap(); - if (conf != null) { - for (String property : CONNECTION_PROPERTIES) { - String value = conf.get(property); - if (value != null) { - m.put(property, value); - } - } - } - this.properties = Collections.unmodifiableMap(m); - - try { - User currentUser = User.getCurrent(); - if (currentUser != null) { - username = currentUser.getName(); - } - } catch (IOException ioe) { - LOG.warn("Error obtaining current user, skipping username in HConnectionKey", - ioe); - } - } - - @Override - public int hashCode() { - final int prime = 31; - int result = 1; - if (username != null) { - result = username.hashCode(); - } - for (String property : CONNECTION_PROPERTIES) { - String value = properties.get(property); - if (value != null) { - result = prime * result + value.hashCode(); - } - } - - return result; - } - - @Override - public boolean equals(Object obj) { - if (this == obj) - return true; - if (obj == null) - return false; - if (getClass() != obj.getClass()) - return false; - HConnectionKey that = (HConnectionKey) obj; - if (this.username != null && !this.username.equals(that.username)) { - return false; - } else if (this.username == null && that.username != null) { - return false; - } - if (this.properties == null) { - if (that.properties != null) { - return false; - } - } else { - if (that.properties == null) { - return false; - } - for (String property : CONNECTION_PROPERTIES) { - String thisValue = this.properties.get(property); - String thatValue = that.properties.get(property); - if (thisValue == thatValue) { - continue; - } - if (thisValue == null || !thisValue.equals(thatValue)) { - return false; - } - } - } - return true; - } - - @Override - public String toString() { - return "HConnectionKey{" + - "properties=" + properties + - ", username='" + username + '\'' + - '}'; - } - } - - /* Encapsulates connection to zookeeper and regionservers.*/ - static class HConnectionImplementation implements HConnection, Closeable { - static final Log LOG = LogFactory.getLog(HConnectionImplementation.class); - private final Class adminClass; - private final Class clientClass; - private final long pause; - private final int numRetries; - private final int maxRPCAttempts; - private final int rpcTimeout; - private final int prefetchRegionLimit; - - private volatile boolean closed; - private volatile boolean aborted; - - private final Object metaRegionLock = new Object(); - private final Object userRegionLock = new Object(); - - // We have a single lock for master & zk to prevent deadlocks. Having - // one lock for ZK and one lock for master is not possible: - // When creating a connection to master, we need a connection to ZK to get - // its address. But another thread could have taken the ZK lock, and could - // be waiting for the master lock => deadlock. - private final Object masterAndZKLock = new Object(); - - private long keepZooKeeperWatcherAliveUntil = Long.MAX_VALUE; - private final DelayedClosing delayedClosing = - DelayedClosing.createAndStart(this); - - - private final Configuration conf; - - // Known region ServerName.toString() -> RegionClient/Admin - private final ConcurrentHashMap> servers = - new ConcurrentHashMap>(); - private final ConcurrentHashMap connectionLock = - new ConcurrentHashMap(); - - /** - * Map of table to table {@link HRegionLocation}s. The table key is made - * by doing a {@link Bytes#mapKey(byte[])} of the table's name. - */ - private final Map> - cachedRegionLocations = - new HashMap>(); - - // The presence of a server in the map implies it's likely that there is an - // entry in cachedRegionLocations that map to this server; but the absence - // of a server in this map guarentees that there is no entry in cache that - // maps to the absent server. - private final Set cachedServers = - new HashSet(); - - // region cache prefetch is enabled by default. this set contains all - // tables whose region cache prefetch are disabled. - private final Set regionCachePrefetchDisabledTables = - new CopyOnWriteArraySet(); - - private boolean stopProxy; - private int refCount; - - // indicates whether this connection's life cycle is managed (by us) - private final boolean managed; - /** - * constructor - * @param conf Configuration object - */ - @SuppressWarnings("unchecked") - public HConnectionImplementation(Configuration conf, boolean managed) - throws ZooKeeperConnectionException { - this.conf = conf; - this.managed = managed; - String adminClassName = conf.get(REGION_PROTOCOL_CLASS, - DEFAULT_ADMIN_PROTOCOL_CLASS); - this.closed = false; - try { - this.adminClass = - (Class) Class.forName(adminClassName); - } catch (ClassNotFoundException e) { - throw new UnsupportedOperationException( - "Unable to find region server interface " + adminClassName, e); - } - String clientClassName = conf.get(CLIENT_PROTOCOL_CLASS, - DEFAULT_CLIENT_PROTOCOL_CLASS); - try { - this.clientClass = - (Class) Class.forName(clientClassName); - } catch (ClassNotFoundException e) { - throw new UnsupportedOperationException( - "Unable to find client protocol " + clientClassName, e); - } - this.pause = conf.getLong(HConstants.HBASE_CLIENT_PAUSE, - HConstants.DEFAULT_HBASE_CLIENT_PAUSE); - this.numRetries = conf.getInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, - HConstants.DEFAULT_HBASE_CLIENT_RETRIES_NUMBER); - this.maxRPCAttempts = conf.getInt( - HConstants.HBASE_CLIENT_RPC_MAXATTEMPTS, - HConstants.DEFAULT_HBASE_CLIENT_RPC_MAXATTEMPTS); - this.rpcTimeout = conf.getInt( - HConstants.HBASE_RPC_TIMEOUT_KEY, - HConstants.DEFAULT_HBASE_RPC_TIMEOUT); - this.prefetchRegionLimit = conf.getInt( - HConstants.HBASE_CLIENT_PREFETCH_LIMIT, - HConstants.DEFAULT_HBASE_CLIENT_PREFETCH_LIMIT); - - retrieveClusterId(); - } - - /** - * An identifier that will remain the same for a given connection. - * @return - */ - public String toString(){ - return "hconnection 0x" + Integer.toHexString( hashCode() ); - } - - private String clusterId = null; - public final void retrieveClusterId(){ - if (conf.get(HConstants.CLUSTER_ID) != null){ - return; - } - - // No synchronized here, worse case we will retrieve it twice, that's - // not an issue. - if (this.clusterId == null){ - this.clusterId = conf.get(HConstants.CLUSTER_ID); - if (this.clusterId == null) { - ZooKeeperKeepAliveConnection zkw = null; - try { - zkw = getKeepAliveZooKeeperWatcher(); - this.clusterId = ZKClusterId.readClusterIdZNode(zkw); - if (clusterId == null) { - LOG.info("ClusterId read in ZooKeeper is null"); - } - } catch (KeeperException e) { - LOG.warn("Can't retrieve clusterId from Zookeeper", e); - } catch (IOException e) { - LOG.warn("Can't retrieve clusterId from Zookeeper", e); - } finally { - if (zkw != null) { - zkw.close(); - } - } - if (this.clusterId == null) { - this.clusterId = "default"; - } - - LOG.info("ClusterId is " + clusterId); - } - } - - conf.set(HConstants.CLUSTER_ID, clusterId); - } - - @Override - public Configuration getConfiguration() { - return this.conf; - } - - private static class MasterProtocolState { - public MasterProtocol protocol; - public int userCount; - public long keepAliveUntil = Long.MAX_VALUE; - public final Class protocolClass; - public long version; - - public MasterProtocolState ( - final Class protocolClass, long version) { - this.protocolClass = protocolClass; - this.version = version; - } - } - - /** - * Create a new Master proxy. Try once only. - */ - private MasterProtocol createMasterInterface( - MasterProtocolState masterProtocolState) - throws IOException, KeeperException, ServiceException { - - ZooKeeperKeepAliveConnection zkw; - try { - zkw = getKeepAliveZooKeeperWatcher(); - } catch (IOException e) { - throw new ZooKeeperConnectionException("Can't connect to ZooKeeper", e); - } - - try { - - checkIfBaseNodeAvailable(zkw); - ServerName sn = MasterAddressTracker.getMasterAddress(zkw); - if (sn == null) { - String msg = - "ZooKeeper available but no active master location found"; - LOG.info(msg); - throw new MasterNotRunningException(msg); - } - - - InetSocketAddress isa = - new InetSocketAddress(sn.getHostname(), sn.getPort()); - MasterProtocol tryMaster = (MasterProtocol) HBaseClientRPC.getProxy( - masterProtocolState.protocolClass, - masterProtocolState.version, - isa, this.conf, this.rpcTimeout); - - if (tryMaster.isMasterRunning( - null, RequestConverter.buildIsMasterRunningRequest()).getIsMasterRunning()) { - return tryMaster; - } else { - HBaseClientRPC.stopProxy(tryMaster); - String msg = "Can create a proxy to master, but it is not running"; - LOG.info(msg); - throw new MasterNotRunningException(msg); - } - } finally { - zkw.close(); - } - } - - /** - * Create a master, retries if necessary. - */ - private MasterProtocol createMasterWithRetries( - MasterProtocolState masterProtocolState) throws MasterNotRunningException { - - // The lock must be at the beginning to prevent multiple master creation - // (and leaks) in a multithread context - synchronized (this.masterAndZKLock) { - Exception exceptionCaught = null; - MasterProtocol master = null; - int tries = 0; - while ( - !this.closed && master == null - ) { - tries++; - try { - master = createMasterInterface(masterProtocolState); - } catch (IOException e) { - exceptionCaught = e; - } catch (KeeperException e) { - exceptionCaught = e; - } catch (ServiceException e) { - exceptionCaught = e; - } - - if (exceptionCaught != null) - // It failed. If it's not the last try, we're going to wait a little - if (tries < numRetries) { - long pauseTime = ConnectionUtils.getPauseTime(this.pause, tries); - LOG.info("getMaster attempt " + tries + " of " + numRetries + - " failed; retrying after sleep of " +pauseTime, exceptionCaught); - - try { - Thread.sleep(pauseTime); - } catch (InterruptedException e) { - Thread.currentThread().interrupt(); - throw new RuntimeException( - "Thread was interrupted while trying to connect to master.", e); - } - - } else { - // Enough tries, we stop now - LOG.info("getMaster attempt " + tries + " of " + numRetries + - " failed; no more retrying.", exceptionCaught); - throw new MasterNotRunningException(exceptionCaught); - } - } - - if (master == null) { - // implies this.closed true - throw new MasterNotRunningException( - "Connection was closed while trying to get master"); - } - - return master; - } - } - - private void checkIfBaseNodeAvailable(ZooKeeperWatcher zkw) - throws MasterNotRunningException { - String errorMsg; - try { - if (ZKUtil.checkExists(zkw, zkw.baseZNode) == -1) { - errorMsg = "The node " + zkw.baseZNode+" is not in ZooKeeper. " - + "It should have been written by the master. " - + "Check the value configured in 'zookeeper.znode.parent'. " - + "There could be a mismatch with the one configured in the master."; - LOG.error(errorMsg); - throw new MasterNotRunningException(errorMsg); - } - } catch (KeeperException e) { - errorMsg = "Can't get connection to ZooKeeper: " + e.getMessage(); - LOG.error(errorMsg); - throw new MasterNotRunningException(errorMsg, e); - } - } - - /** - * @return true if the master is running, throws an exception otherwise - * @throws MasterNotRunningException - if the master is not running - * @throws ZooKeeperConnectionException - */ - @Override - public boolean isMasterRunning() - throws MasterNotRunningException, ZooKeeperConnectionException { - // When getting the master proxy connection, we check it's running, - // so if there is no exception, it means we've been able to get a - // connection on a running master - getKeepAliveMasterMonitor().close(); - return true; - } - - @Override - public HRegionLocation getRegionLocation(final byte [] name, - final byte [] row, boolean reload) - throws IOException { - return reload? relocateRegion(name, row): locateRegion(name, row); - } - - @Override - public boolean isTableEnabled(byte[] tableName) throws IOException { - return testTableOnlineState(tableName, true); - } - - @Override - public boolean isTableDisabled(byte[] tableName) throws IOException { - return testTableOnlineState(tableName, false); - } - - @Override - public boolean isTableAvailable(final byte[] tableName) throws IOException { - final AtomicBoolean available = new AtomicBoolean(true); - final AtomicInteger regionCount = new AtomicInteger(0); - MetaScannerVisitor visitor = new MetaScannerVisitorBase() { - @Override - public boolean processRow(Result row) throws IOException { - HRegionInfo info = MetaScanner.getHRegionInfo(row); - if (info != null) { - if (Bytes.equals(tableName, info.getTableName())) { - ServerName server = HRegionInfo.getServerName(row); - if (server == null) { - available.set(false); - return false; - } - regionCount.incrementAndGet(); - } - } - return true; - } - }; - MetaScanner.metaScan(conf, visitor); - return available.get() && (regionCount.get() > 0); - } - - /* - * @param True if table is online - */ - private boolean testTableOnlineState(byte [] tableName, boolean online) - throws IOException { - if (Bytes.equals(tableName, HConstants.ROOT_TABLE_NAME)) { - // The root region is always enabled - return online; - } - String tableNameStr = Bytes.toString(tableName); - ZooKeeperKeepAliveConnection zkw = getKeepAliveZooKeeperWatcher(); - try { - if (online) { - return ZKTableReadOnly.isEnabledTable(zkw, tableNameStr); - } - return ZKTableReadOnly.isDisabledTable(zkw, tableNameStr); - } catch (KeeperException e) { - throw new IOException("Enable/Disable failed", e); - }finally { - zkw.close(); - } - } - - @Override - public HRegionLocation locateRegion(final byte [] regionName) - throws IOException { - // TODO implement. use old stuff or new stuff? - return null; - } - - @Override - public List locateRegions(final byte [] tableName) - throws IOException { - // TODO implement. use old stuff or new stuff? - return null; - } - - @Override - public HRegionLocation locateRegion(final byte [] tableName, - final byte [] row) - throws IOException{ - return locateRegion(tableName, row, true, true); - } - - @Override - public HRegionLocation relocateRegion(final byte [] tableName, - final byte [] row) - throws IOException{ - - // Since this is an explicit request not to use any caching, finding - // disabled tables should not be desirable. This will ensure that an exception is thrown when - // the first time a disabled table is interacted with. - if (isTableDisabled(tableName)) { - throw new DoNotRetryIOException(Bytes.toString(tableName) + " is disabled."); - } - - return locateRegion(tableName, row, false, true); - } - - private HRegionLocation locateRegion(final byte [] tableName, - final byte [] row, boolean useCache, boolean retry) - throws IOException { - if (this.closed) throw new IOException(toString() + " closed"); - if (tableName == null || tableName.length == 0) { - throw new IllegalArgumentException( - "table name cannot be null or zero length"); - } - - if (Bytes.equals(tableName, HConstants.ROOT_TABLE_NAME)) { - ZooKeeperKeepAliveConnection zkw = getKeepAliveZooKeeperWatcher(); - try { - LOG.debug("Looking up root region location in ZK," + - " connection=" + this); - ServerName servername = - RootRegionTracker.blockUntilAvailable(zkw, this.rpcTimeout); - - LOG.debug("Looked up root region location, connection=" + this + - "; serverName=" + ((servername == null) ? "null" : servername)); - if (servername == null) return null; - return new HRegionLocation(HRegionInfo.ROOT_REGIONINFO, - servername.getHostname(), servername.getPort()); - } catch (InterruptedException e) { - Thread.currentThread().interrupt(); - return null; - } finally { - zkw.close(); - } - } else if (Bytes.equals(tableName, HConstants.META_TABLE_NAME)) { - return locateRegionInMeta(HConstants.ROOT_TABLE_NAME, tableName, row, - useCache, metaRegionLock, retry); - } else { - // Region not in the cache - have to go to the meta RS - return locateRegionInMeta(HConstants.META_TABLE_NAME, tableName, row, - useCache, userRegionLock, retry); - } - } - - /* - * Search .META. for the HRegionLocation info that contains the table and - * row we're seeking. It will prefetch certain number of regions info and - * save them to the global region cache. - */ - private void prefetchRegionCache(final byte[] tableName, - final byte[] row) { - // Implement a new visitor for MetaScanner, and use it to walk through - // the .META. - MetaScannerVisitor visitor = new MetaScannerVisitorBase() { - public boolean processRow(Result result) throws IOException { - try { - HRegionInfo regionInfo = MetaScanner.getHRegionInfo(result); - if (regionInfo == null) { - return true; - } - - // possible we got a region of a different table... - if (!Bytes.equals(regionInfo.getTableName(), tableName)) { - return false; // stop scanning - } - if (regionInfo.isOffline()) { - // don't cache offline regions - return true; - } - - ServerName serverName = HRegionInfo.getServerName(result); - if (serverName == null) { - return true; // don't cache it - } - // instantiate the location - HRegionLocation loc = new HRegionLocation(regionInfo, serverName.getHostname(), - serverName.getPort()); - // cache this meta entry - cacheLocation(tableName, loc); - - return true; - } catch (RuntimeException e) { - throw new IOException(e); - } - } - }; - try { - // pre-fetch certain number of regions info at region cache. - MetaScanner.metaScan(conf, visitor, tableName, row, - this.prefetchRegionLimit); - } catch (IOException e) { - LOG.warn("Encountered problems when prefetch META table: ", e); - } - } - - /* - * Search one of the meta tables (-ROOT- or .META.) for the HRegionLocation - * info that contains the table and row we're seeking. - */ - private HRegionLocation locateRegionInMeta(final byte [] parentTable, - final byte [] tableName, final byte [] row, boolean useCache, - Object regionLockObject, boolean retry) - throws IOException { - HRegionLocation location; - // If we are supposed to be using the cache, look in the cache to see if - // we already have the region. - if (useCache) { - location = getCachedLocation(tableName, row); - if (location != null) { - return location; - } - } - int localNumRetries = retry ? numRetries : 1; - // build the key of the meta region we should be looking for. - // the extra 9's on the end are necessary to allow "exact" matches - // without knowing the precise region names. - byte [] metaKey = HRegionInfo.createRegionName(tableName, row, - HConstants.NINES, false); - for (int tries = 0; true; tries++) { - if (tries >= localNumRetries) { - throw new NoServerForRegionException("Unable to find region for " - + Bytes.toStringBinary(row) + " after " + numRetries + " tries."); - } - - HRegionLocation metaLocation = null; - try { - // locate the root or meta region - metaLocation = locateRegion(parentTable, metaKey, true, false); - // If null still, go around again. - if (metaLocation == null) continue; - ClientProtocol server = - getClient(metaLocation.getHostname(), metaLocation.getPort()); - - Result regionInfoRow = null; - // This block guards against two threads trying to load the meta - // region at the same time. The first will load the meta region and - // the second will use the value that the first one found. - synchronized (regionLockObject) { - // If the parent table is META, we may want to pre-fetch some - // region info into the global region cache for this table. - if (Bytes.equals(parentTable, HConstants.META_TABLE_NAME) && - (getRegionCachePrefetch(tableName)) ) { - prefetchRegionCache(tableName, row); - } - - // Check the cache again for a hit in case some other thread made the - // same query while we were waiting on the lock. If not supposed to - // be using the cache, delete any existing cached location so it won't - // interfere. - if (useCache) { - location = getCachedLocation(tableName, row); - if (location != null) { - return location; - } - } else { - deleteCachedLocation(tableName, row); - } - - // Query the root or meta region for the location of the meta region - regionInfoRow = ProtobufUtil.getRowOrBefore(server, - metaLocation.getRegionInfo().getRegionName(), metaKey, - HConstants.CATALOG_FAMILY); - } - if (regionInfoRow == null) { - throw new TableNotFoundException(Bytes.toString(tableName)); - } - - // convert the row result into the HRegionLocation we need! - HRegionInfo regionInfo = MetaScanner.getHRegionInfo(regionInfoRow); - if (regionInfo == null) { - throw new IOException("HRegionInfo was null or empty in " + - Bytes.toString(parentTable) + ", row=" + regionInfoRow); - } - - // possible we got a region of a different table... - if (!Bytes.equals(regionInfo.getTableName(), tableName)) { - throw new TableNotFoundException( - "Table '" + Bytes.toString(tableName) + "' was not found, got: " + - Bytes.toString(regionInfo.getTableName()) + "."); - } - if (regionInfo.isSplit()) { - throw new RegionOfflineException("the only available region for" + - " the required row is a split parent," + - " the daughters should be online soon: " + - regionInfo.getRegionNameAsString()); - } - if (regionInfo.isOffline()) { - throw new RegionOfflineException("the region is offline, could" + - " be caused by a disable table call: " + - regionInfo.getRegionNameAsString()); - } - - ServerName serverName = HRegionInfo.getServerName(regionInfoRow); - if (serverName == null) { - throw new NoServerForRegionException("No server address listed " + - "in " + Bytes.toString(parentTable) + " for region " + - regionInfo.getRegionNameAsString() + " containing row " + - Bytes.toStringBinary(row)); - } - - // Instantiate the location - location = - new HRegionLocation(regionInfo, serverName.getHostname(), serverName.getPort()); - cacheLocation(tableName, location); - return location; - } catch (TableNotFoundException e) { - // if we got this error, probably means the table just plain doesn't - // exist. rethrow the error immediately. this should always be coming - // from the HTable constructor. - throw e; - } catch (IOException e) { - if (e instanceof RemoteException) { - e = RemoteExceptionHandler.decodeRemoteException((RemoteException) e); - } - if (tries < numRetries - 1) { - if (LOG.isDebugEnabled()) { - LOG.debug("locateRegionInMeta parentTable=" + - Bytes.toString(parentTable) + ", metaLocation=" + - ((metaLocation == null)? "null": "{" + metaLocation + "}") + - ", attempt=" + tries + " of " + - this.numRetries + " failed; retrying after sleep of " + - ConnectionUtils.getPauseTime(this.pause, tries) + " because: " + e.getMessage()); - } - } else { - throw e; - } - // Only relocate the parent region if necessary - if(!(e instanceof RegionOfflineException || - e instanceof NoServerForRegionException)) { - relocateRegion(parentTable, metaKey); - } - } - try{ - Thread.sleep(ConnectionUtils.getPauseTime(this.pause, tries)); - } catch (InterruptedException e) { - Thread.currentThread().interrupt(); - throw new IOException("Giving up trying to location region in " + - "meta: thread is interrupted."); - } - } - } - - /* - * Search the cache for a location that fits our table and row key. - * Return null if no suitable region is located. TODO: synchronization note - * - *

      TODO: This method during writing consumes 15% of CPU doing lookup - * into the Soft Reference SortedMap. Improve. - * - * @param tableName - * @param row - * @return Null or region location found in cache. - */ - HRegionLocation getCachedLocation(final byte [] tableName, - final byte [] row) { - SoftValueSortedMap tableLocations = - getTableLocations(tableName); - - // start to examine the cache. we can only do cache actions - // if there's something in the cache for this table. - if (tableLocations.isEmpty()) { - return null; - } - - HRegionLocation possibleRegion = tableLocations.get(row); - if (possibleRegion != null) { - return possibleRegion; - } - - possibleRegion = tableLocations.lowerValueByKey(row); - if (possibleRegion == null) { - return null; - } - - // make sure that the end key is greater than the row we're looking - // for, otherwise the row actually belongs in the next region, not - // this one. the exception case is when the endkey is - // HConstants.EMPTY_END_ROW, signifying that the region we're - // checking is actually the last region in the table. - byte[] endKey = possibleRegion.getRegionInfo().getEndKey(); - if (Bytes.equals(endKey, HConstants.EMPTY_END_ROW) || - KeyValue.getRowComparator(tableName).compareRows( - endKey, 0, endKey.length, row, 0, row.length) > 0) { - return possibleRegion; - } - - // Passed all the way through, so we got nothin - complete cache miss - return null; - } - - /** - * Delete a cached location - * @param tableName tableName - * @param row - */ - void deleteCachedLocation(final byte [] tableName, final byte [] row) { - synchronized (this.cachedRegionLocations) { - Map tableLocations = - getTableLocations(tableName); - // start to examine the cache. we can only do cache actions - // if there's something in the cache for this table. - if (!tableLocations.isEmpty()) { - HRegionLocation rl = getCachedLocation(tableName, row); - if (rl != null) { - tableLocations.remove(rl.getRegionInfo().getStartKey()); - if (LOG.isDebugEnabled()) { - LOG.debug("Removed " + - rl.getRegionInfo().getRegionNameAsString() + - " for tableName=" + Bytes.toString(tableName) + - " from cache " + "because of " + Bytes.toStringBinary(row)); - } - } - } - } - } - - @Override - public void clearCaches(String sn) { - clearCachedLocationForServer(sn); - } - - /* - * Delete all cached entries of a table that maps to a specific location. - * - * @param tablename - * @param server - */ - private void clearCachedLocationForServer(final String server) { - boolean deletedSomething = false; - synchronized (this.cachedRegionLocations) { - if (!cachedServers.contains(server)) { - return; - } - for (Map tableLocations : - cachedRegionLocations.values()) { - for (Entry e : tableLocations.entrySet()) { - if (e.getValue().getHostnamePort().equals(server)) { - tableLocations.remove(e.getKey()); - deletedSomething = true; - } - } - } - cachedServers.remove(server); - } - if (deletedSomething && LOG.isDebugEnabled()) { - LOG.debug("Removed all cached region locations that map to " + server); - } - } - - /* - * @param tableName - * @return Map of cached locations for passed tableName - */ - private SoftValueSortedMap getTableLocations( - final byte [] tableName) { - // find the map of cached locations for this table - Integer key = Bytes.mapKey(tableName); - SoftValueSortedMap result; - synchronized (this.cachedRegionLocations) { - result = this.cachedRegionLocations.get(key); - // if tableLocations for this table isn't built yet, make one - if (result == null) { - result = new SoftValueSortedMap( - Bytes.BYTES_COMPARATOR); - this.cachedRegionLocations.put(key, result); - } - } - return result; - } - - @Override - public void clearRegionCache() { - synchronized(this.cachedRegionLocations) { - this.cachedRegionLocations.clear(); - this.cachedServers.clear(); - } - } - - @Override - public void clearRegionCache(final byte [] tableName) { - synchronized (this.cachedRegionLocations) { - this.cachedRegionLocations.remove(Bytes.mapKey(tableName)); - } - } - - /* - * Put a newly discovered HRegionLocation into the cache. - */ - private void cacheLocation(final byte [] tableName, - final HRegionLocation location) { - byte [] startKey = location.getRegionInfo().getStartKey(); - Map tableLocations = - getTableLocations(tableName); - boolean hasNewCache = false; - synchronized (this.cachedRegionLocations) { - cachedServers.add(location.getHostnamePort()); - hasNewCache = (tableLocations.put(startKey, location) == null); - } - if (hasNewCache) { - LOG.debug("Cached location for " + - location.getRegionInfo().getRegionNameAsString() + - " is " + location.getHostnamePort()); - } - } - - @Override - public AdminProtocol getAdmin(final String hostname, - final int port) throws IOException { - return getAdmin(hostname, port, false); - } - - @Override - public ClientProtocol getClient( - final String hostname, final int port) throws IOException { - return (ClientProtocol)getProtocol(hostname, port, - clientClass, ClientProtocol.VERSION); - } - - @Override - public AdminProtocol getAdmin(final String hostname, - final int port, final boolean master) throws IOException { - return (AdminProtocol)getProtocol(hostname, port, - adminClass, AdminProtocol.VERSION); - } - - /** - * Either the passed isa is null or hostname - * can be but not both. - * @param hostname - * @param port - * @param protocolClass - * @param version - * @return Proxy. - * @throws IOException - */ - VersionedProtocol getProtocol(final String hostname, - final int port, final Class protocolClass, - final long version) throws IOException { - String rsName = Addressing.createHostAndPortStr(hostname, port); - // See if we already have a connection (common case) - Map protocols = this.servers.get(rsName); - if (protocols == null) { - protocols = new HashMap(); - Map existingProtocols = - this.servers.putIfAbsent(rsName, protocols); - if (existingProtocols != null) { - protocols = existingProtocols; - } - } - String protocol = protocolClass.getName(); - VersionedProtocol server = protocols.get(protocol); - if (server == null) { - // create a unique lock for this RS + protocol (if necessary) - String lockKey = protocol + "@" + rsName; - this.connectionLock.putIfAbsent(lockKey, lockKey); - // get the RS lock - synchronized (this.connectionLock.get(lockKey)) { - // do one more lookup in case we were stalled above - server = protocols.get(protocol); - if (server == null) { - try { - // Only create isa when we need to. - InetSocketAddress address = new InetSocketAddress(hostname, port); - // definitely a cache miss. establish an RPC for this RS - server = HBaseClientRPC.waitForProxy( - protocolClass, version, address, this.conf, - this.maxRPCAttempts, this.rpcTimeout, this.rpcTimeout); - protocols.put(protocol, server); - } catch (RemoteException e) { - LOG.warn("RemoteException connecting to RS", e); - // Throw what the RemoteException was carrying. - throw e.unwrapRemoteException(); - } - } - } - } - return server; - } - - @Override - @Deprecated - public ZooKeeperWatcher getZooKeeperWatcher() - throws ZooKeeperConnectionException { - canCloseZKW = false; - - try { - return getKeepAliveZooKeeperWatcher(); - } catch (ZooKeeperConnectionException e){ - throw e; - }catch (IOException e) { - // Encapsulate exception to keep interface - throw new ZooKeeperConnectionException( - "Can't create a zookeeper connection", e); - } - } - - - private ZooKeeperKeepAliveConnection keepAliveZookeeper; - private int keepAliveZookeeperUserCount; - private boolean canCloseZKW = true; - - // keepAlive time, in ms. No reason to make it configurable. - private static final long keepAlive = 5 * 60 * 1000; - - /** - * Retrieve a shared ZooKeeperWatcher. You must close it it once you've have - * finished with it. - * @return The shared instance. Never returns null. - */ - public ZooKeeperKeepAliveConnection getKeepAliveZooKeeperWatcher() - throws IOException { - synchronized (masterAndZKLock) { - - if (keepAliveZookeeper == null) { - // We don't check that our link to ZooKeeper is still valid - // But there is a retry mechanism in the ZooKeeperWatcher itself - keepAliveZookeeper = new ZooKeeperKeepAliveConnection( - conf, this.toString(), this); - } - keepAliveZookeeperUserCount++; - keepZooKeeperWatcherAliveUntil = Long.MAX_VALUE; - - return keepAliveZookeeper; - } - } - - void releaseZooKeeperWatcher(ZooKeeperWatcher zkw) { - if (zkw == null){ - return; - } - synchronized (masterAndZKLock) { - --keepAliveZookeeperUserCount; - if (keepAliveZookeeperUserCount <=0 ){ - keepZooKeeperWatcherAliveUntil = - System.currentTimeMillis() + keepAlive; - } - } - } - - - /** - * Creates a Chore thread to check the connections to master & zookeeper - * and close them when they reach their closing time ( - * {@link #MasterProtocolState.keepAliveUntil} and - * {@link #keepZooKeeperWatcherAliveUntil}). Keep alive time is - * managed by the release functions and the variable {@link #keepAlive} - */ - private static class DelayedClosing extends Chore implements Stoppable { - private HConnectionImplementation hci; - Stoppable stoppable; - - private DelayedClosing( - HConnectionImplementation hci, Stoppable stoppable){ - super( - "ZooKeeperWatcher and Master delayed closing for connection "+hci, - 60*1000, // We check every minutes - stoppable); - this.hci = hci; - this.stoppable = stoppable; - } - - static DelayedClosing createAndStart(HConnectionImplementation hci){ - Stoppable stoppable = new Stoppable() { - private volatile boolean isStopped = false; - @Override public void stop(String why) { isStopped = true;} - @Override public boolean isStopped() {return isStopped;} - }; - - return new DelayedClosing(hci, stoppable); - } - - protected void closeMasterProtocol(MasterProtocolState protocolState) { - if (System.currentTimeMillis() > protocolState.keepAliveUntil) { - hci.closeMasterProtocol(protocolState); - protocolState.keepAliveUntil = Long.MAX_VALUE; - } - } - - @Override - protected void chore() { - synchronized (hci.masterAndZKLock) { - if (hci.canCloseZKW) { - if (System.currentTimeMillis() > - hci.keepZooKeeperWatcherAliveUntil) { - - hci.closeZooKeeperWatcher(); - hci.keepZooKeeperWatcherAliveUntil = Long.MAX_VALUE; - } - } - closeMasterProtocol(hci.masterAdminProtocol); - closeMasterProtocol(hci.masterMonitorProtocol); - } - } - - @Override - public void stop(String why) { - stoppable.stop(why); - } - - @Override - public boolean isStopped() { - return stoppable.isStopped(); - } - } - - private void closeZooKeeperWatcher() { - synchronized (masterAndZKLock) { - if (keepAliveZookeeper != null) { - LOG.info("Closing zookeeper sessionid=0x" + - Long.toHexString( - keepAliveZookeeper.getRecoverableZooKeeper().getSessionId())); - keepAliveZookeeper.internalClose(); - keepAliveZookeeper = null; - } - keepAliveZookeeperUserCount = 0; - } - } - - private static class MasterProtocolHandler implements InvocationHandler { - private HConnectionImplementation connection; - private MasterProtocolState protocolStateTracker; - - protected MasterProtocolHandler(HConnectionImplementation connection, - MasterProtocolState protocolStateTracker) { - this.connection = connection; - this.protocolStateTracker = protocolStateTracker; - } - - @Override - public Object invoke(Object proxy, Method method, Object[] args) - throws Throwable { - if (method.getName().equals("close") && - method.getParameterTypes().length == 0) { - release(connection, protocolStateTracker); - return null; - } else { - try { - return method.invoke(protocolStateTracker.protocol, args); - }catch (InvocationTargetException e){ - // We will have this for all the exception, checked on not, sent - // by any layer, including the functional exception - Throwable cause = e.getCause(); - if (cause == null){ - throw new RuntimeException( - "Proxy invocation failed and getCause is null", e); - } - if (cause instanceof UndeclaredThrowableException) { - cause = cause.getCause(); - } - throw cause; - } - } - } - - private void release( - HConnectionImplementation connection, - MasterProtocolState target) { - connection.releaseMaster(target); - } - } - - MasterProtocolState masterAdminProtocol = - new MasterProtocolState(MasterAdminProtocol.class, MasterAdminProtocol.VERSION); - MasterProtocolState masterMonitorProtocol = - new MasterProtocolState(MasterMonitorProtocol.class, MasterMonitorProtocol.VERSION); - - /** - * This function allows HBaseAdmin and potentially others - * to get a shared master connection. - * - * @return The shared instance. Never returns null. - * @throws MasterNotRunningException - */ - private Object getKeepAliveMasterProtocol( - MasterProtocolState protocolState, Class connectionClass) - throws MasterNotRunningException { - synchronized (masterAndZKLock) { - if (!isKeepAliveMasterConnectedAndRunning(protocolState)) { - if (protocolState.protocol != null) { - HBaseClientRPC.stopProxy(protocolState.protocol); - } - protocolState.protocol = null; - protocolState.protocol = createMasterWithRetries(protocolState); - } - protocolState.userCount++; - protocolState.keepAliveUntil = Long.MAX_VALUE; - - return Proxy.newProxyInstance( - connectionClass.getClassLoader(), - new Class[]{connectionClass}, - new MasterProtocolHandler(this, protocolState) - ); - } - } - - @Override - public MasterAdminProtocol getMasterAdmin() throws MasterNotRunningException { - return getKeepAliveMasterAdmin(); - }; - - @Override - public MasterMonitorProtocol getMasterMonitor() throws MasterNotRunningException { - return getKeepAliveMasterMonitor(); - } - - @Override - public MasterAdminKeepAliveConnection getKeepAliveMasterAdmin() - throws MasterNotRunningException { - return (MasterAdminKeepAliveConnection) - getKeepAliveMasterProtocol(masterAdminProtocol, MasterAdminKeepAliveConnection.class); - } - - @Override - public MasterMonitorKeepAliveConnection getKeepAliveMasterMonitor() - throws MasterNotRunningException { - return (MasterMonitorKeepAliveConnection) - getKeepAliveMasterProtocol(masterMonitorProtocol, MasterMonitorKeepAliveConnection.class); - } - - private boolean isKeepAliveMasterConnectedAndRunning(MasterProtocolState protocolState){ - if (protocolState.protocol == null){ - return false; - } - try { - return protocolState.protocol.isMasterRunning( - null, RequestConverter.buildIsMasterRunningRequest()).getIsMasterRunning(); - }catch (UndeclaredThrowableException e){ - // It's somehow messy, but we can receive exceptions such as - // java.net.ConnectException but they're not declared. So we catch - // it... - LOG.info("Master connection is not running anymore", - e.getUndeclaredThrowable()); - return false; - } catch (ServiceException se) { - LOG.warn("Checking master connection", se); - return false; - } - } - - private void releaseMaster(MasterProtocolState protocolState) { - if (protocolState.protocol == null){ - return; - } - synchronized (masterAndZKLock) { - --protocolState.userCount; - if (protocolState.userCount <= 0) { - protocolState.keepAliveUntil = - System.currentTimeMillis() + keepAlive; - } - } - } - - private void closeMasterProtocol(MasterProtocolState protocolState) { - if (protocolState.protocol != null){ - LOG.info("Closing master protocol: " + protocolState.protocolClass.getName()); - HBaseClientRPC.stopProxy(protocolState.protocol); - protocolState.protocol = null; - } - protocolState.userCount = 0; - } - - /** - * Immediate close of the shared master. Can be by the delayed close or - * when closing the connection itself. - */ - private void closeMaster() { - synchronized (masterAndZKLock) { - closeMasterProtocol(masterAdminProtocol); - closeMasterProtocol(masterMonitorProtocol); - } - } - - @Override - public T getRegionServerWithRetries(ServerCallable callable) - throws IOException, RuntimeException { - return callable.withRetries(); - } - - @Override - public T getRegionServerWithoutRetries(ServerCallable callable) - throws IOException, RuntimeException { - return callable.withoutRetries(); - } - - @Deprecated - private Callable createCallable( - final HRegionLocation loc, final MultiAction multi, - final byte [] tableName) { - // TODO: This does not belong in here!!! St.Ack HConnections should - // not be dealing in Callables; Callables have HConnections, not other - // way around. - final HConnection connection = this; - return new Callable() { - public MultiResponse call() throws IOException { - ServerCallable callable = - new ServerCallable(connection, tableName, null) { - public MultiResponse call() throws IOException { - return ProtobufUtil.multi(server, multi); - } - - @Override - public void connect(boolean reload) throws IOException { - server = connection.getClient( - loc.getHostname(), loc.getPort()); - } - }; - return callable.withoutRetries(); - } - }; - } - - - void updateCachedLocation(HRegionLocation hrl, String hostname, int port) { - HRegionLocation newHrl = new HRegionLocation(hrl.getRegionInfo(), hostname, port); - synchronized (this.cachedRegionLocations) { - cacheLocation(hrl.getRegionInfo().getTableName(), newHrl); - } - } - - void deleteCachedLocation(HRegionLocation rl) { - synchronized (this.cachedRegionLocations) { - Map tableLocations = - getTableLocations(rl.getRegionInfo().getTableName()); - tableLocations.remove(rl.getRegionInfo().getStartKey()); - } - } - - private void updateCachedLocations(byte[] tableName, Row row, Object t) { - updateCachedLocations(null, tableName, row, t); - } - - /** - * Update the location with the new value (if the exception is a RegionMovedException) or delete - * it from the cache. - * @param hrl - can be null. If it's the case, tableName and row should not be null - * @param tableName - can be null if hrl is not null. - * @param row - can be null if hrl is not null. - * @param exception - An object (to simplify user code) on which we will try to find a nested - * or wrapped or both RegionMovedException - */ - private void updateCachedLocations(final HRegionLocation hrl, final byte[] tableName, - Row row, final Object exception) { - - if ((row == null || tableName == null) && hrl == null){ - LOG.warn ("Coding error, see method javadoc. row="+row+", tableName="+ - Bytes.toString(tableName)+", hrl="+hrl); - return; - } - - // Is it something we have already updated? - final HRegionLocation myLoc = (hrl != null ? - hrl : getCachedLocation(tableName, row.getRow())); - if (myLoc == null) { - // There is no such location in the cache => it's been removed already => nothing to do - return; - } - - final RegionMovedException rme = RegionMovedException.find(exception); - if (rme != null) { - LOG.info("Region " + myLoc.getRegionInfo().getRegionNameAsString() + " moved from " + - myLoc.getHostnamePort() + ", updating client location cache." + - " New server: " + rme.getHostname() + ":" + rme.getPort()); - updateCachedLocation(myLoc, rme.getHostname(), rme.getPort()); - } else { - deleteCachedLocation(myLoc); - } - } - - @Override - @Deprecated - public void processBatch(List list, - final byte[] tableName, - ExecutorService pool, - Object[] results) throws IOException, InterruptedException { - // This belongs in HTable!!! Not in here. St.Ack - - // results must be the same size as list - if (results.length != list.size()) { - throw new IllegalArgumentException( - "argument results must be the same size as argument list"); - } - processBatchCallback(list, tableName, pool, results, null); - } - - /** - * Send the queries in parallel on the different region servers. Retries on failures. - * If the method returns it means that there is no error, and the 'results' array will - * contain no exception. On error, an exception is thrown, and the 'results' array will - * contain results and exceptions. - * @deprecated since 0.96 - Use {@link HTable#processBatchCallback} instead - */ - @Override - @Deprecated - public void processBatchCallback( - List list, - byte[] tableName, - ExecutorService pool, - Object[] results, - Batch.Callback callback) - throws IOException, InterruptedException { - - Process p = new Process(this, list, tableName, pool, results, callback); - p.processBatchCallback(); - } - - - /** - * Methods and attributes to manage a batch process are grouped into this single class. - * This allows, by creating a Process per batch process to ensure multithread safety. - * - * This code should be move to HTable once processBatchCallback is not supported anymore in - * the HConnection interface. - */ - private static class Process { - // Info on the queries and their context - private final HConnectionImplementation hci; - private final List rows; - private final byte[] tableName; - private final ExecutorService pool; - private final Object[] results; - private final Batch.Callback callback; - - // Error management: these lists are filled by the errors on the final try. Indexes - // are consistent, i.e. exceptions[i] matches failedActions[i] and failedAddresses[i] - private final List exceptions; - private final List failedActions; - private final List failedAddresses; - - // Used during the batch process - private final List> toReplay; - private final LinkedList, HRegionLocation, Future>> - inProgress; - private int curNumRetries; - - // Notified when a tasks is done - private final List> finishedTasks = new ArrayList>(); - - private Process(HConnectionImplementation hci, List list, - byte[] tableName, ExecutorService pool, Object[] results, - Batch.Callback callback){ - this.hci = hci; - this.rows = list; - this.tableName = tableName; - this.pool = pool; - this.results = results; - this.callback = callback; - this.toReplay = new ArrayList>(); - this.inProgress = - new LinkedList, HRegionLocation, Future>>(); - this.exceptions = new ArrayList(); - this.failedActions = new ArrayList(); - this.failedAddresses = new ArrayList(); - this.curNumRetries = 0; - } - - - /** - * Group a list of actions per region servers, and send them. The created MultiActions are - * added to the inProgress list. - * @param actionsList - * @param sleepTime - sleep time before actually executing the actions. Can be zero. - * @throws IOException - if we can't locate a region after multiple retries. - */ - private void submit(List> actionsList, final long sleepTime) throws IOException { - // group per location => regions server - final Map> actionsByServer = - new HashMap>(); - for (Action aAction : actionsList) { - final Row row = aAction.getAction(); - - if (row != null) { - final HRegionLocation loc = hci.locateRegion(this.tableName, row.getRow()); - if (loc == null) { - throw new IOException("No location found, aborting submit."); - } - - final byte[] regionName = loc.getRegionInfo().getRegionName(); - MultiAction actions = actionsByServer.get(loc); - if (actions == null) { - actions = new MultiAction(); - actionsByServer.put(loc, actions); - } - actions.add(regionName, aAction); - } - } - - // Send the queries and add them to the inProgress list - for (Entry> e : actionsByServer.entrySet()) { - Callable callable = - createDelayedCallable(sleepTime, e.getKey(), e.getValue()); - Triple, HRegionLocation, Future> p = - new Triple, HRegionLocation, Future>( - e.getValue(), e.getKey(), this.pool.submit(callable)); - this.inProgress.addLast(p); - } - } - - - private void addToErrorsLists(Exception ex, Row row, Triple, - HRegionLocation, Future> obj) { - this.exceptions.add(ex); - this.failedActions.add(row); - this.failedAddresses.add(obj.getSecond().getHostnamePort()); - } - - /** - * Resubmit the actions which have failed, after a sleep time. - * @throws IOException - */ - private void doRetry() throws IOException{ - final long sleepTime = ConnectionUtils.getPauseTime(hci.pause, this.curNumRetries); - submit(this.toReplay, sleepTime); - this.toReplay.clear(); - } - - /** - * Parameterized batch processing, allowing varying return types for - * different {@link Row} implementations. - * Throws an exception on error. If there are no exceptions, it means that the 'results' - * array is clean. - */ - private void processBatchCallback() throws IOException, InterruptedException { - if (this.results.length != this.rows.size()) { - throw new IllegalArgumentException( - "argument results (size="+results.length+") must be the same size as " + - "argument list (size="+this.rows.size()+")"); - } - if (this.rows.isEmpty()) { - return; - } - - // We keep the number of retry per action. - int[] nbRetries = new int[this.results.length]; - - // Build the action list. This list won't change after being created, hence the - // indexes will remain constant, allowing a direct lookup. - final List> listActions = new ArrayList>(this.rows.size()); - for (int i = 0; i < this.rows.size(); i++) { - Action action = new Action(this.rows.get(i), i); - listActions.add(action); - } - - // execute the actions. We will analyze and resubmit the actions in a 'while' loop. - submit(listActions, 0); - - // LastRetry is true if, either: - // we had an exception 'DoNotRetry' - // we had more than numRetries for any action - // In this case, we will finish the current retries but we won't start new ones. - boolean lastRetry = false; - // despite its name numRetries means number of tries. So if numRetries == 1 it means we - // won't retry. And we compare vs. 2 in case someone set it to zero. - boolean noRetry = (hci.numRetries < 2); - - // Analyze and resubmit until all actions are done successfully or failed after numRetries - while (!this.inProgress.isEmpty()) { - - // We need the original multi action to find out what actions to replay if - // we have a 'total' failure of the Future - // We need the HRegionLocation as we give it back if we go out of retries - Triple, HRegionLocation, Future> currentTask = - removeFirstDone(); - - // Get the answer, keep the exception if any as we will use it for the analysis - MultiResponse responses = null; - ExecutionException exception = null; - try { - responses = currentTask.getThird().get(); - } catch (ExecutionException e) { - exception = e; - } - - // Error case: no result at all for this multi action. We need to redo all actions - if (responses == null) { - for (List> actions : currentTask.getFirst().actions.values()) { - for (Action action : actions) { - Row row = action.getAction(); - hci.updateCachedLocations(this.tableName, row, exception); - if (noRetry) { - addToErrorsLists(exception, row, currentTask); - } else { - lastRetry = addToReplay(nbRetries, action); - } - } - } - } else { // Success or partial success - // Analyze detailed results. We can still have individual failures to be redo. - // two specific exceptions are managed: - // - DoNotRetryIOException: we continue to retry for other actions - // - RegionMovedException: we update the cache with the new region location - for (Entry>> resultsForRS : - responses.getResults().entrySet()) { - for (Pair regionResult : resultsForRS.getValue()) { - Action correspondingAction = listActions.get(regionResult.getFirst()); - Object result = regionResult.getSecond(); - this.results[correspondingAction.getOriginalIndex()] = result; - - // Failure: retry if it's make sense else update the errors lists - if (result == null || result instanceof Throwable) { - Row row = correspondingAction.getAction(); - hci.updateCachedLocations(this.tableName, row, result); - if (result instanceof DoNotRetryIOException || noRetry) { - addToErrorsLists((Exception)result, row, currentTask); - } else { - lastRetry = addToReplay(nbRetries, correspondingAction); - } - } else // success - if (callback != null) { - this.callback.update(resultsForRS.getKey(), - this.rows.get(regionResult.getFirst()).getRow(), - (R) result); - } - } - } - } - - // Retry all actions in toReplay then clear it. - if (!noRetry && !toReplay.isEmpty()) { - doRetry(); - if (lastRetry) { - noRetry = true; - } - } - } - - if (!exceptions.isEmpty()) { - throw new RetriesExhaustedWithDetailsException(this.exceptions, - this.failedActions, - this.failedAddresses); - } - } - - /** - * Put the action that has to be retried in the Replay list. - * @return true if we're out of numRetries and it's the last retry. - */ - private boolean addToReplay(int[] nbRetries, Action action) { - this.toReplay.add(action); - nbRetries[action.getOriginalIndex()]++; - if (nbRetries[action.getOriginalIndex()] > this.curNumRetries) { - this.curNumRetries = nbRetries[action.getOriginalIndex()]; - } - // numRetries means number of tries, while curNumRetries means current number of retries. So - // we need to add 1 to make them comparable. And as we look for the last try we compare - // with '>=' and no '>'. And we need curNumRetries to means what it says as we don't want - // to initialize it to 1. - return ( (this.curNumRetries +1) >= hci.numRetries); - } - - /** - * Wait for one of tasks to be done, and remove it from the list. - * @return the tasks done. - */ - private Triple, HRegionLocation, Future> - removeFirstDone() throws InterruptedException { - while (true) { - synchronized (finishedTasks) { - if (!finishedTasks.isEmpty()) { - MultiAction done = finishedTasks.remove(finishedTasks.size() - 1); - - // We now need to remove it from the inProgress part. - Iterator, HRegionLocation, Future>> it = - inProgress.iterator(); - while (it.hasNext()) { - Triple, HRegionLocation, Future> task = it.next(); - if (task.getFirst() == done) { // We have the exact object. No java equals here. - it.remove(); - return task; - } - } - LOG.error("Development error: We didn't see a task in the list. " + - done.getRegions()); - } - finishedTasks.wait(10); - } - } - } - - private Callable createDelayedCallable( - final long delay, final HRegionLocation loc, final MultiAction multi) { - - final Callable delegate = hci.createCallable(loc, multi, tableName); - - return new Callable() { - private final long creationTime = System.currentTimeMillis(); - - @Override - public MultiResponse call() throws Exception { - try { - final long waitingTime = delay + creationTime - System.currentTimeMillis(); - if (waitingTime > 0) { - Thread.sleep(waitingTime); - } - return delegate.call(); - } finally { - synchronized (finishedTasks) { - finishedTasks.add(multi); - finishedTasks.notifyAll(); - } - } - } - }; - } - } - - /* - * Return the number of cached region for a table. It will only be called - * from a unit test. - */ - int getNumberOfCachedRegionLocations(final byte[] tableName) { - Integer key = Bytes.mapKey(tableName); - synchronized (this.cachedRegionLocations) { - Map tableLocs = - this.cachedRegionLocations.get(key); - - if (tableLocs == null) { - return 0; - } - return tableLocs.values().size(); - } - } - - /** - * Check the region cache to see whether a region is cached yet or not. - * Called by unit tests. - * @param tableName tableName - * @param row row - * @return Region cached or not. - */ - boolean isRegionCached(final byte[] tableName, final byte[] row) { - HRegionLocation location = getCachedLocation(tableName, row); - return location != null; - } - - @Override - public void setRegionCachePrefetch(final byte[] tableName, - final boolean enable) { - if (!enable) { - regionCachePrefetchDisabledTables.add(Bytes.mapKey(tableName)); - } - else { - regionCachePrefetchDisabledTables.remove(Bytes.mapKey(tableName)); - } - } - - @Override - public boolean getRegionCachePrefetch(final byte[] tableName) { - return !regionCachePrefetchDisabledTables.contains(Bytes.mapKey(tableName)); - } - - @Override - public void abort(final String msg, Throwable t) { - if (t instanceof KeeperException.SessionExpiredException - && keepAliveZookeeper != null) { - synchronized (masterAndZKLock) { - if (keepAliveZookeeper != null) { - LOG.warn("This client just lost it's session with ZooKeeper," + - " closing it." + - " It will be recreated next time someone needs it", t); - closeZooKeeperWatcher(); - } - } - }else { - if (t != null) { - LOG.fatal(msg, t); - } else { - LOG.fatal(msg); - } - this.aborted = true; - this.closed = true; - } - } - - @Override - public boolean isClosed() { - return this.closed; - } - - @Override - public boolean isAborted(){ - return this.aborted; - } - - @Override - public int getCurrentNrHRS() throws IOException { - ZooKeeperKeepAliveConnection zkw = getKeepAliveZooKeeperWatcher(); - - try { - // We go to zk rather than to master to get count of regions to avoid - // HTable having a Master dependency. See HBase-2828 - return ZKUtil.getNumberOfChildren(zkw, zkw.rsZNode); - } catch (KeeperException ke) { - throw new IOException("Unexpected ZooKeeper exception", ke); - } finally { - zkw.close(); - } - } - - public void stopProxyOnClose(boolean stopProxy) { - this.stopProxy = stopProxy; - } - - /** - * Increment this client's reference count. - */ - void incCount() { - ++refCount; - } - - /** - * Decrement this client's reference count. - */ - void decCount() { - if (refCount > 0) { - --refCount; - } - } - - /** - * Return if this client has no reference - * - * @return true if this client has no reference; false otherwise - */ - boolean isZeroReference() { - return refCount == 0; - } - - void close(boolean stopProxy) { - if (this.closed) { - return; - } - delayedClosing.stop("Closing connection"); - if (stopProxy) { - closeMaster(); - for (Map i : servers.values()) { - for (VersionedProtocol server: i.values()) { - HBaseClientRPC.stopProxy(server); - } - } - } - closeZooKeeperWatcher(); - this.servers.clear(); - this.closed = true; - } - - @Override - public void close() { - if (managed) { - HConnectionManager.deleteConnection(this, stopProxy, false); - } else { - close(true); - } - } - - /** - * Close the connection for good, regardless of what the current value of - * {@link #refCount} is. Ideally, {@link #refCount} should be zero at this - * point, which would be the case if all of its consumers close the - * connection. However, on the off chance that someone is unable to close - * the connection, perhaps because it bailed out prematurely, the method - * below will ensure that this {@link HConnection} instance is cleaned up. - * Caveat: The JVM may take an unknown amount of time to call finalize on an - * unreachable object, so our hope is that every consumer cleans up after - * itself, like any good citizen. - */ - @Override - protected void finalize() throws Throwable { - super.finalize(); - // Pretend as if we are about to release the last remaining reference - refCount = 1; - close(); - } - - @Override - public HTableDescriptor[] listTables() throws IOException { - MasterMonitorKeepAliveConnection master = getKeepAliveMasterMonitor(); - try { - GetTableDescriptorsRequest req = - RequestConverter.buildGetTableDescriptorsRequest(null); - return ProtobufUtil.getHTableDescriptorArray(master.getTableDescriptors(null, req)); - } catch (ServiceException se) { - throw ProtobufUtil.getRemoteException(se); - } finally { - master.close(); - } - } - - @Override - public HTableDescriptor[] getHTableDescriptors(List tableNames) throws IOException { - if (tableNames == null || tableNames.isEmpty()) return new HTableDescriptor[0]; - MasterMonitorKeepAliveConnection master = getKeepAliveMasterMonitor(); - try { - GetTableDescriptorsRequest req = - RequestConverter.buildGetTableDescriptorsRequest(tableNames); - return ProtobufUtil.getHTableDescriptorArray(master.getTableDescriptors(null, req)); - } catch (ServiceException se) { - throw ProtobufUtil.getRemoteException(se); - } finally { - master.close(); - } - } - - /** - * Connects to the master to get the table descriptor. - * @param tableName table name - * @return - * @throws IOException if the connection to master fails or if the table - * is not found. - */ - @Override - public HTableDescriptor getHTableDescriptor(final byte[] tableName) - throws IOException { - if (tableName == null || tableName.length == 0) return null; - if (Bytes.equals(tableName, HConstants.ROOT_TABLE_NAME)) { - return new UnmodifyableHTableDescriptor(HTableDescriptor.ROOT_TABLEDESC); - } - if (Bytes.equals(tableName, HConstants.META_TABLE_NAME)) { - return HTableDescriptor.META_TABLEDESC; - } - MasterMonitorKeepAliveConnection master = getKeepAliveMasterMonitor(); - GetTableDescriptorsResponse htds; - try { - GetTableDescriptorsRequest req = - RequestConverter.buildGetTableDescriptorsRequest(null); - htds = master.getTableDescriptors(null, req); - } catch (ServiceException se) { - throw ProtobufUtil.getRemoteException(se); - } finally { - master.close(); - } - for (TableSchema ts : htds.getTableSchemaList()) { - if (Bytes.equals(tableName, ts.getName().toByteArray())) { - return HTableDescriptor.convert(ts); - } - } - throw new TableNotFoundException(Bytes.toString(tableName)); - } - } - - /** - * Set the number of retries to use serverside when trying to communicate - * with another server over {@link HConnection}. Used updating catalog - * tables, etc. Call this method before we create any Connections. - * @param c The Configuration instance to set the retries into. - * @param log Used to log what we set in here. - */ - public static void setServerSideHConnectionRetries(final Configuration c, - final Log log) { - int hcRetries = c.getInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, - HConstants.DEFAULT_HBASE_CLIENT_RETRIES_NUMBER); - // Go big. Multiply by 10. If we can't get to meta after this many retries - // then something seriously wrong. - int serversideMultiplier = - c.getInt("hbase.client.serverside.retries.multiplier", 10); - int retries = hcRetries * serversideMultiplier; - c.setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, retries); - log.debug("HConnection retries=" + retries); - } -} - diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/client/HTable.java hbase-server/src/main/java/org/apache/hadoop/hbase/client/HTable.java deleted file mode 100644 index 17d7e15..0000000 --- hbase-server/src/main/java/org/apache/hadoop/hbase/client/HTable.java +++ /dev/null @@ -1,1300 +0,0 @@ -/** - * - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hbase.client; - -import java.io.Closeable; -import java.io.IOException; -import java.io.InterruptedIOException; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.Collections; -import java.util.List; -import java.util.Map; -import java.util.NavigableMap; -import java.util.TreeMap; -import java.util.concurrent.Callable; -import java.util.concurrent.ExecutionException; -import java.util.concurrent.ExecutorService; -import java.util.concurrent.Future; -import java.util.concurrent.SynchronousQueue; -import java.util.concurrent.ThreadPoolExecutor; -import java.util.concurrent.TimeUnit; - -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.classification.InterfaceStability; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hbase.HBaseConfiguration; -import org.apache.hadoop.hbase.HConstants; -import org.apache.hadoop.hbase.HRegionInfo; -import org.apache.hadoop.hbase.HRegionLocation; -import org.apache.hadoop.hbase.HTableDescriptor; -import org.apache.hadoop.hbase.KeyValue; -import org.apache.hadoop.hbase.ServerName; -import org.apache.hadoop.hbase.client.HConnectionManager.HConnectable; -import org.apache.hadoop.hbase.client.coprocessor.Batch; -import org.apache.hadoop.hbase.filter.BinaryComparator; -import org.apache.hadoop.hbase.ipc.CoprocessorRpcChannel; -import org.apache.hadoop.hbase.ipc.RegionCoprocessorRpcChannel; -import org.apache.hadoop.hbase.protobuf.ProtobufUtil; -import org.apache.hadoop.hbase.protobuf.RequestConverter; -import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.GetRequest; -import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.GetResponse; -import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.LockRowRequest; -import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.LockRowResponse; -import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MultiRequest; -import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutateRequest; -import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutateResponse; -import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.UnlockRowRequest; -import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.CompareType; -import org.apache.hadoop.hbase.util.Bytes; -import org.apache.hadoop.hbase.util.Pair; -import org.apache.hadoop.hbase.util.Threads; - -import com.google.protobuf.Service; -import com.google.protobuf.ServiceException; - -/** - *

      Used to communicate with a single HBase table. - * - *

      This class is not thread safe for reads nor write. - * - *

      In case of writes (Put, Delete), the underlying write buffer can - * be corrupted if multiple threads contend over a single HTable instance. - * - *

      In case of reads, some fields used by a Scan are shared among all threads. - * The HTable implementation can either not contract to be safe in case of a Get - * - *

      To access a table in a multi threaded environment, please consider - * using the {@link HTablePool} class to create your HTable instances. - * - *

      Instances of HTable passed the same {@link Configuration} instance will - * share connections to servers out on the cluster and to the zookeeper ensemble - * as well as caches of region locations. This is usually a *good* thing and it - * is recommended to reuse the same configuration object for all your tables. - * This happens because they will all share the same underlying - * {@link HConnection} instance. See {@link HConnectionManager} for more on - * how this mechanism works. - * - *

      {@link HConnection} will read most of the - * configuration it needs from the passed {@link Configuration} on initial - * construction. Thereafter, for settings such as - * hbase.client.pause, hbase.client.retries.number, - * and hbase.client.rpc.maxattempts updating their values in the - * passed {@link Configuration} subsequent to {@link HConnection} construction - * will go unnoticed. To run with changed values, make a new - * {@link HTable} passing a new {@link Configuration} instance that has the - * new configuration. - * - *

      Note that this class implements the {@link Closeable} interface. When a - * HTable instance is no longer required, it *should* be closed in order to ensure - * that the underlying resources are promptly released. Please note that the close - * method can throw java.io.IOException that must be handled. - * - * @see HBaseAdmin for create, drop, list, enable and disable of tables. - * @see HConnection - * @see HConnectionManager - */ -@InterfaceAudience.Public -@InterfaceStability.Stable -public class HTable implements HTableInterface { - private static final Log LOG = LogFactory.getLog(HTable.class); - private HConnection connection; - private final byte [] tableName; - private volatile Configuration configuration; - private final ArrayList writeBuffer = new ArrayList(); - private long writeBufferSize; - private boolean clearBufferOnFail; - private boolean autoFlush; - private long currentWriteBufferSize; - protected int scannerCaching; - private int maxKeyValueSize; - private ExecutorService pool; // For Multi - private boolean closed; - private int operationTimeout; - private static final int DOPUT_WB_CHECK = 10; // i.e., doPut checks the writebuffer every X Puts. - private final boolean cleanupPoolOnClose; // shutdown the pool in close() - private final boolean cleanupConnectionOnClose; // close the connection in close() - - /** - * Creates an object to access a HBase table. - * Shares zookeeper connection and other resources with other HTable instances - * created with the same conf instance. Uses already-populated - * region cache if one is available, populated by any other HTable instances - * sharing this conf instance. Recommended. - * @param conf Configuration object to use. - * @param tableName Name of the table. - * @throws IOException if a remote or network exception occurs - */ - public HTable(Configuration conf, final String tableName) - throws IOException { - this(conf, Bytes.toBytes(tableName)); - } - - - /** - * Creates an object to access a HBase table. - * Shares zookeeper connection and other resources with other HTable instances - * created with the same conf instance. Uses already-populated - * region cache if one is available, populated by any other HTable instances - * sharing this conf instance. Recommended. - * @param conf Configuration object to use. - * @param tableName Name of the table. - * @throws IOException if a remote or network exception occurs - */ - public HTable(Configuration conf, final byte [] tableName) - throws IOException { - this.tableName = tableName; - this.cleanupPoolOnClose = this.cleanupConnectionOnClose = true; - if (conf == null) { - this.connection = null; - return; - } - this.connection = HConnectionManager.getConnection(conf); - this.configuration = conf; - - int maxThreads = conf.getInt("hbase.htable.threads.max", Integer.MAX_VALUE); - if (maxThreads == 0) { - maxThreads = 1; // is there a better default? - } - long keepAliveTime = conf.getLong("hbase.htable.threads.keepalivetime", 60); - - // Using the "direct handoff" approach, new threads will only be created - // if it is necessary and will grow unbounded. This could be bad but in HCM - // we only create as many Runnables as there are region servers. It means - // it also scales when new region servers are added. - this.pool = new ThreadPoolExecutor(1, maxThreads, keepAliveTime, TimeUnit.SECONDS, - new SynchronousQueue(), Threads.newDaemonThreadFactory("hbase-table")); - ((ThreadPoolExecutor) this.pool).allowCoreThreadTimeOut(true); - - this.finishSetup(); - } - - /** - * Creates an object to access a HBase table. - * Shares zookeeper connection and other resources with other HTable instances - * created with the same conf instance. Uses already-populated - * region cache if one is available, populated by any other HTable instances - * sharing this conf instance. - * Use this constructor when the ExecutorService is externally managed. - * @param conf Configuration object to use. - * @param tableName Name of the table. - * @param pool ExecutorService to be used. - * @throws IOException if a remote or network exception occurs - */ - public HTable(Configuration conf, final byte[] tableName, final ExecutorService pool) - throws IOException { - this.connection = HConnectionManager.getConnection(conf); - this.configuration = conf; - this.pool = pool; - this.tableName = tableName; - this.cleanupPoolOnClose = false; - this.cleanupConnectionOnClose = true; - - this.finishSetup(); - } - - /** - * Creates an object to access a HBase table. - * Shares zookeeper connection and other resources with other HTable instances - * created with the same connection instance. - * Use this constructor when the ExecutorService and HConnection instance are - * externally managed. - * @param tableName Name of the table. - * @param connection HConnection to be used. - * @param pool ExecutorService to be used. - * @throws IOException if a remote or network exception occurs - */ - public HTable(final byte[] tableName, final HConnection connection, - final ExecutorService pool) throws IOException { - if (pool == null || pool.isShutdown()) { - throw new IllegalArgumentException("Pool is null or shut down."); - } - if (connection == null || connection.isClosed()) { - throw new IllegalArgumentException("Connection is null or closed."); - } - this.tableName = tableName; - this.cleanupPoolOnClose = this.cleanupConnectionOnClose = false; - this.connection = connection; - this.configuration = connection.getConfiguration(); - this.pool = pool; - - this.finishSetup(); - } - - /** - * setup this HTable's parameter based on the passed configuration - * @param conf - */ - private void finishSetup() throws IOException { - this.connection.locateRegion(tableName, HConstants.EMPTY_START_ROW); - this.operationTimeout = HTableDescriptor.isMetaTable(tableName) ? HConstants.DEFAULT_HBASE_CLIENT_OPERATION_TIMEOUT - : this.configuration.getInt(HConstants.HBASE_CLIENT_OPERATION_TIMEOUT, - HConstants.DEFAULT_HBASE_CLIENT_OPERATION_TIMEOUT); - this.writeBufferSize = this.configuration.getLong( - "hbase.client.write.buffer", 2097152); - this.clearBufferOnFail = true; - this.autoFlush = true; - this.currentWriteBufferSize = 0; - this.scannerCaching = this.configuration.getInt( - HConstants.HBASE_CLIENT_SCANNER_CACHING, - HConstants.DEFAULT_HBASE_CLIENT_SCANNER_CACHING); - - this.maxKeyValueSize = this.configuration.getInt( - "hbase.client.keyvalue.maxsize", -1); - this.closed = false; - } - - /** - * {@inheritDoc} - */ - @Override - public Configuration getConfiguration() { - return configuration; - } - - /** - * Tells whether or not a table is enabled or not. This method creates a - * new HBase configuration, so it might make your unit tests fail due to - * incorrect ZK client port. - * @param tableName Name of table to check. - * @return {@code true} if table is online. - * @throws IOException if a remote or network exception occurs - * @deprecated use {@link HBaseAdmin#isTableEnabled(byte[])} - */ - @Deprecated - public static boolean isTableEnabled(String tableName) throws IOException { - return isTableEnabled(Bytes.toBytes(tableName)); - } - - /** - * Tells whether or not a table is enabled or not. This method creates a - * new HBase configuration, so it might make your unit tests fail due to - * incorrect ZK client port. - * @param tableName Name of table to check. - * @return {@code true} if table is online. - * @throws IOException if a remote or network exception occurs - * @deprecated use {@link HBaseAdmin#isTableEnabled(byte[])} - */ - @Deprecated - public static boolean isTableEnabled(byte[] tableName) throws IOException { - return isTableEnabled(HBaseConfiguration.create(), tableName); - } - - /** - * Tells whether or not a table is enabled or not. - * @param conf The Configuration object to use. - * @param tableName Name of table to check. - * @return {@code true} if table is online. - * @throws IOException if a remote or network exception occurs - * @deprecated use {@link HBaseAdmin#isTableEnabled(byte[])} - */ - @Deprecated - public static boolean isTableEnabled(Configuration conf, String tableName) - throws IOException { - return isTableEnabled(conf, Bytes.toBytes(tableName)); - } - - /** - * Tells whether or not a table is enabled or not. - * @param conf The Configuration object to use. - * @param tableName Name of table to check. - * @return {@code true} if table is online. - * @throws IOException if a remote or network exception occurs - * @deprecated use {@link HBaseAdmin#isTableEnabled(byte[] tableName)} - */ - @Deprecated - public static boolean isTableEnabled(Configuration conf, - final byte[] tableName) throws IOException { - return HConnectionManager.execute(new HConnectable(conf) { - @Override - public Boolean connect(HConnection connection) throws IOException { - return connection.isTableEnabled(tableName); - } - }); - } - - /** - * Find region location hosting passed row using cached info - * @param row Row to find. - * @return The location of the given row. - * @throws IOException if a remote or network exception occurs - */ - public HRegionLocation getRegionLocation(final String row) - throws IOException { - return connection.getRegionLocation(tableName, Bytes.toBytes(row), false); - } - - /** - * Finds the region on which the given row is being served. - * @param row Row to find. - * @return Location of the row. - * @throws IOException if a remote or network exception occurs - * @deprecated use {@link #getRegionLocation(byte [], boolean)} instead - */ - public HRegionLocation getRegionLocation(final byte [] row) - throws IOException { - return connection.getRegionLocation(tableName, row, false); - } - - /** - * Finds the region on which the given row is being served. - * @param row Row to find. - * @param reload whether or not to reload information or just use cached - * information - * @return Location of the row. - * @throws IOException if a remote or network exception occurs - */ - public HRegionLocation getRegionLocation(final byte [] row, boolean reload) - throws IOException { - return connection.getRegionLocation(tableName, row, reload); - } - - /** - * {@inheritDoc} - */ - @Override - public byte [] getTableName() { - return this.tableName; - } - - /** - * INTERNAL Used by unit tests and tools to do low-level - * manipulations. - * @return An HConnection instance. - * @deprecated This method will be changed from public to package protected. - */ - // TODO(tsuna): Remove this. Unit tests shouldn't require public helpers. - @Deprecated - public HConnection getConnection() { - return this.connection; - } - - /** - * Gets the number of rows that a scanner will fetch at once. - *

      - * The default value comes from {@code hbase.client.scanner.caching}. - * @deprecated Use {@link Scan#setCaching(int)} and {@link Scan#getCaching()} - */ - @Deprecated - public int getScannerCaching() { - return scannerCaching; - } - - /** - * Sets the number of rows that a scanner will fetch at once. - *

      - * This will override the value specified by - * {@code hbase.client.scanner.caching}. - * Increasing this value will reduce the amount of work needed each time - * {@code next()} is called on a scanner, at the expense of memory use - * (since more rows will need to be maintained in memory by the scanners). - * @param scannerCaching the number of rows a scanner will fetch at once. - * @deprecated Use {@link Scan#setCaching(int)} - */ - @Deprecated - public void setScannerCaching(int scannerCaching) { - this.scannerCaching = scannerCaching; - } - - /** - * {@inheritDoc} - */ - @Override - public HTableDescriptor getTableDescriptor() throws IOException { - return new UnmodifyableHTableDescriptor( - this.connection.getHTableDescriptor(this.tableName)); - } - - /** - * Gets the starting row key for every region in the currently open table. - *

      - * This is mainly useful for the MapReduce integration. - * @return Array of region starting row keys - * @throws IOException if a remote or network exception occurs - */ - public byte [][] getStartKeys() throws IOException { - return getStartEndKeys().getFirst(); - } - - /** - * Gets the ending row key for every region in the currently open table. - *

      - * This is mainly useful for the MapReduce integration. - * @return Array of region ending row keys - * @throws IOException if a remote or network exception occurs - */ - public byte[][] getEndKeys() throws IOException { - return getStartEndKeys().getSecond(); - } - - /** - * Gets the starting and ending row keys for every region in the currently - * open table. - *

      - * This is mainly useful for the MapReduce integration. - * @return Pair of arrays of region starting and ending row keys - * @throws IOException if a remote or network exception occurs - */ - public Pair getStartEndKeys() throws IOException { - NavigableMap regions = getRegionLocations(); - final List startKeyList = new ArrayList(regions.size()); - final List endKeyList = new ArrayList(regions.size()); - - for (HRegionInfo region : regions.keySet()) { - startKeyList.add(region.getStartKey()); - endKeyList.add(region.getEndKey()); - } - - return new Pair( - startKeyList.toArray(new byte[startKeyList.size()][]), - endKeyList.toArray(new byte[endKeyList.size()][])); - } - - /** - * Gets all the regions and their address for this table. - *

      - * This is mainly useful for the MapReduce integration. - * @return A map of HRegionInfo with it's server address - * @throws IOException if a remote or network exception occurs - */ - public NavigableMap getRegionLocations() throws IOException { - // TODO: Odd that this returns a Map of HRI to SN whereas getRegionLocation, singular, returns an HRegionLocation. - return MetaScanner.allTableRegions(getConfiguration(), getTableName(), false); - } - - /** - * Get the corresponding regions for an arbitrary range of keys. - *

      - * @param startKey Starting row in range, inclusive - * @param endKey Ending row in range, exclusive - * @return A list of HRegionLocations corresponding to the regions that - * contain the specified range - * @throws IOException if a remote or network exception occurs - */ - public List getRegionsInRange(final byte [] startKey, - final byte [] endKey) throws IOException { - final boolean endKeyIsEndOfTable = Bytes.equals(endKey, - HConstants.EMPTY_END_ROW); - if ((Bytes.compareTo(startKey, endKey) > 0) && !endKeyIsEndOfTable) { - throw new IllegalArgumentException( - "Invalid range: " + Bytes.toStringBinary(startKey) + - " > " + Bytes.toStringBinary(endKey)); - } - final List regionList = new ArrayList(); - byte [] currentKey = startKey; - do { - HRegionLocation regionLocation = getRegionLocation(currentKey, false); - regionList.add(regionLocation); - currentKey = regionLocation.getRegionInfo().getEndKey(); - } while (!Bytes.equals(currentKey, HConstants.EMPTY_END_ROW) && - (endKeyIsEndOfTable || Bytes.compareTo(currentKey, endKey) < 0)); - return regionList; - } - - /** - * {@inheritDoc} - */ - @Override - public Result getRowOrBefore(final byte[] row, final byte[] family) - throws IOException { - return new ServerCallable(connection, tableName, row, operationTimeout) { - public Result call() throws IOException { - return ProtobufUtil.getRowOrBefore(server, - location.getRegionInfo().getRegionName(), row, family); - } - }.withRetries(); - } - - /** - * {@inheritDoc} - */ - @Override - public ResultScanner getScanner(final Scan scan) throws IOException { - if (scan.getCaching() <= 0) { - scan.setCaching(getScannerCaching()); - } - return new ClientScanner(getConfiguration(), scan, getTableName(), - this.connection); - } - - /** - * {@inheritDoc} - */ - @Override - public ResultScanner getScanner(byte [] family) throws IOException { - Scan scan = new Scan(); - scan.addFamily(family); - return getScanner(scan); - } - - /** - * {@inheritDoc} - */ - @Override - public ResultScanner getScanner(byte [] family, byte [] qualifier) - throws IOException { - Scan scan = new Scan(); - scan.addColumn(family, qualifier); - return getScanner(scan); - } - - /** - * {@inheritDoc} - */ - @Override - public Result get(final Get get) throws IOException { - return new ServerCallable(connection, tableName, get.getRow(), operationTimeout) { - public Result call() throws IOException { - return ProtobufUtil.get(server, - location.getRegionInfo().getRegionName(), get); - } - }.withRetries(); - } - - /** - * {@inheritDoc} - */ - @Override - public Result[] get(List gets) throws IOException { - try { - Object [] r1 = batch((List)gets); - - // translate. - Result [] results = new Result[r1.length]; - int i=0; - for (Object o : r1) { - // batch ensures if there is a failure we get an exception instead - results[i++] = (Result) o; - } - - return results; - } catch (InterruptedException e) { - throw new IOException(e); - } - } - - @Override - public void batch(final List actions, final Object[] results) - throws InterruptedException, IOException { - connection.processBatchCallback(actions, tableName, pool, results, null); - } - - @Override - public Object[] batch(final List actions) - throws InterruptedException, IOException { - Object[] results = new Object[actions.size()]; - connection.processBatchCallback(actions, tableName, pool, results, null); - return results; - } - - @Override - public void batchCallback( - final List actions, final Object[] results, final Batch.Callback callback) - throws IOException, InterruptedException { - connection.processBatchCallback(actions, tableName, pool, results, callback); - } - - @Override - public Object[] batchCallback( - final List actions, final Batch.Callback callback) throws IOException, - InterruptedException { - Object[] results = new Object[actions.size()]; - connection.processBatchCallback(actions, tableName, pool, results, callback); - return results; - } - - /** - * {@inheritDoc} - */ - @Override - public void delete(final Delete delete) - throws IOException { - new ServerCallable(connection, tableName, delete.getRow(), operationTimeout) { - public Boolean call() throws IOException { - try { - MutateRequest request = RequestConverter.buildMutateRequest( - location.getRegionInfo().getRegionName(), delete); - MutateResponse response = server.mutate(null, request); - return Boolean.valueOf(response.getProcessed()); - } catch (ServiceException se) { - throw ProtobufUtil.getRemoteException(se); - } - } - }.withRetries(); - } - - /** - * {@inheritDoc} - */ - @Override - public void delete(final List deletes) - throws IOException { - Object[] results = new Object[deletes.size()]; - try { - connection.processBatch((List) deletes, tableName, pool, results); - } catch (InterruptedException e) { - throw new IOException(e); - } finally { - // mutate list so that it is empty for complete success, or contains only failed records - // results are returned in the same order as the requests in list - // walk the list backwards, so we can remove from list without impacting the indexes of earlier members - for (int i = results.length - 1; i>=0; i--) { - // if result is not null, it succeeded - if (results[i] instanceof Result) { - deletes.remove(i); - } - } - } - } - - /** - * {@inheritDoc} - */ - @Override - public void put(final Put put) throws IOException { - doPut(Arrays.asList(put)); - } - - /** - * {@inheritDoc} - */ - @Override - public void put(final List puts) throws IOException { - doPut(puts); - } - - private void doPut(final List puts) throws IOException { - int n = 0; - for (Put put : puts) { - validatePut(put); - writeBuffer.add(put); - currentWriteBufferSize += put.heapSize(); - - // we need to periodically see if the writebuffer is full instead of waiting until the end of the List - n++; - if (n % DOPUT_WB_CHECK == 0 && currentWriteBufferSize > writeBufferSize) { - flushCommits(); - } - } - if (autoFlush || currentWriteBufferSize > writeBufferSize) { - flushCommits(); - } - } - - /** - * {@inheritDoc} - */ - @Override - public void mutateRow(final RowMutations rm) throws IOException { - new ServerCallable(connection, tableName, rm.getRow(), - operationTimeout) { - public Void call() throws IOException { - try { - MultiRequest request = RequestConverter.buildMultiRequest( - location.getRegionInfo().getRegionName(), rm); - server.multi(null, request); - } catch (ServiceException se) { - throw ProtobufUtil.getRemoteException(se); - } - return null; - } - }.withRetries(); - } - - /** - * {@inheritDoc} - */ - @Override - public Result append(final Append append) throws IOException { - if (append.numFamilies() == 0) { - throw new IOException( - "Invalid arguments to append, no columns specified"); - } - return new ServerCallable(connection, tableName, append.getRow(), operationTimeout) { - public Result call() throws IOException { - try { - MutateRequest request = RequestConverter.buildMutateRequest( - location.getRegionInfo().getRegionName(), append); - MutateResponse response = server.mutate(null, request); - if (!response.hasResult()) return null; - return ProtobufUtil.toResult(response.getResult()); - } catch (ServiceException se) { - throw ProtobufUtil.getRemoteException(se); - } - } - }.withRetries(); - } - - /** - * {@inheritDoc} - */ - @Override - public Result increment(final Increment increment) throws IOException { - if (!increment.hasFamilies()) { - throw new IOException( - "Invalid arguments to increment, no columns specified"); - } - return new ServerCallable(connection, tableName, increment.getRow(), operationTimeout) { - public Result call() throws IOException { - try { - MutateRequest request = RequestConverter.buildMutateRequest( - location.getRegionInfo().getRegionName(), increment); - MutateResponse response = server.mutate(null, request); - return ProtobufUtil.toResult(response.getResult()); - } catch (ServiceException se) { - throw ProtobufUtil.getRemoteException(se); - } - } - }.withRetries(); - } - - /** - * {@inheritDoc} - */ - @Override - public long incrementColumnValue(final byte [] row, final byte [] family, - final byte [] qualifier, final long amount) - throws IOException { - return incrementColumnValue(row, family, qualifier, amount, true); - } - - /** - * {@inheritDoc} - */ - @Override - public long incrementColumnValue(final byte [] row, final byte [] family, - final byte [] qualifier, final long amount, final boolean writeToWAL) - throws IOException { - NullPointerException npe = null; - if (row == null) { - npe = new NullPointerException("row is null"); - } else if (family == null) { - npe = new NullPointerException("family is null"); - } else if (qualifier == null) { - npe = new NullPointerException("qualifier is null"); - } - if (npe != null) { - throw new IOException( - "Invalid arguments to incrementColumnValue", npe); - } - return new ServerCallable(connection, tableName, row, operationTimeout) { - public Long call() throws IOException { - try { - MutateRequest request = RequestConverter.buildMutateRequest( - location.getRegionInfo().getRegionName(), row, family, - qualifier, amount, writeToWAL); - MutateResponse response = server.mutate(null, request); - Result result = ProtobufUtil.toResult(response.getResult()); - return Long.valueOf(Bytes.toLong(result.getValue(family, qualifier))); - } catch (ServiceException se) { - throw ProtobufUtil.getRemoteException(se); - } - } - }.withRetries(); - } - - /** - * {@inheritDoc} - */ - @Override - public boolean checkAndPut(final byte [] row, - final byte [] family, final byte [] qualifier, final byte [] value, - final Put put) - throws IOException { - return new ServerCallable(connection, tableName, row, operationTimeout) { - public Boolean call() throws IOException { - try { - MutateRequest request = RequestConverter.buildMutateRequest( - location.getRegionInfo().getRegionName(), row, family, qualifier, - new BinaryComparator(value), CompareType.EQUAL, put); - MutateResponse response = server.mutate(null, request); - return Boolean.valueOf(response.getProcessed()); - } catch (ServiceException se) { - throw ProtobufUtil.getRemoteException(se); - } - } - }.withRetries(); - } - - - /** - * {@inheritDoc} - */ - @Override - public boolean checkAndDelete(final byte [] row, - final byte [] family, final byte [] qualifier, final byte [] value, - final Delete delete) - throws IOException { - return new ServerCallable(connection, tableName, row, operationTimeout) { - public Boolean call() throws IOException { - try { - MutateRequest request = RequestConverter.buildMutateRequest( - location.getRegionInfo().getRegionName(), row, family, qualifier, - new BinaryComparator(value), CompareType.EQUAL, delete); - MutateResponse response = server.mutate(null, request); - return Boolean.valueOf(response.getProcessed()); - } catch (ServiceException se) { - throw ProtobufUtil.getRemoteException(se); - } - } - }.withRetries(); - } - - /** - * {@inheritDoc} - */ - @Override - public boolean exists(final Get get) throws IOException { - return new ServerCallable(connection, tableName, get.getRow(), operationTimeout) { - public Boolean call() throws IOException { - try { - GetRequest request = RequestConverter.buildGetRequest( - location.getRegionInfo().getRegionName(), get, true); - GetResponse response = server.get(null, request); - return response.getExists(); - } catch (ServiceException se) { - throw ProtobufUtil.getRemoteException(se); - } - } - }.withRetries(); - } - - /** - * {@inheritDoc} - */ - @Override - public void flushCommits() throws IOException { - Object[] results = new Object[writeBuffer.size()]; - boolean success = false; - try { - this.connection.processBatch(writeBuffer, tableName, pool, results); - success = true; - } catch (InterruptedException e) { - throw new InterruptedIOException(e.getMessage()); - } finally { - // mutate list so that it is empty for complete success, or contains - // only failed records. Results are returned in the same order as the - // requests in list. Walk the list backwards, so we can remove from list - // without impacting the indexes of earlier members - currentWriteBufferSize = 0; - if (success || clearBufferOnFail) { - writeBuffer.clear(); - } else { - for (int i = results.length - 1; i >= 0; i--) { - if (results[i] instanceof Result) { - writeBuffer.remove(i); - } else { - currentWriteBufferSize += writeBuffer.get(i).heapSize(); - } - } - } - } - } - - /** - * Process a mixed batch of Get, Put and Delete actions. All actions for a - * RegionServer are forwarded in one RPC call. Queries are executed in parallel. - * - * @param list The collection of actions. - * @param results An empty array, same size as list. If an exception is thrown, - * you can test here for partial results, and to determine which actions - * processed successfully. - * @throws IOException if there are problems talking to META. Per-item - * exceptions are stored in the results array. - */ - public void processBatchCallback( - final List list, final Object[] results, final Batch.Callback callback) - throws IOException, InterruptedException { - connection.processBatchCallback(list, tableName, pool, results, callback); - } - - - /** - * Parameterized batch processing, allowing varying return types for different - * {@link Row} implementations. - */ - public void processBatch(final List list, final Object[] results) - throws IOException, InterruptedException { - - this.processBatchCallback(list, results, null); - } - - - @Override - public void close() throws IOException { - if (this.closed) { - return; - } - flushCommits(); - if (cleanupPoolOnClose) { - this.pool.shutdown(); - } - if (cleanupConnectionOnClose) { - if (this.connection != null) { - this.connection.close(); - } - } - this.closed = true; - } - - // validate for well-formedness - private void validatePut(final Put put) throws IllegalArgumentException{ - if (put.isEmpty()) { - throw new IllegalArgumentException("No columns to insert"); - } - if (maxKeyValueSize > 0) { - for (List list : put.getFamilyMap().values()) { - for (KeyValue kv : list) { - if (kv.getLength() > maxKeyValueSize) { - throw new IllegalArgumentException("KeyValue size too large"); - } - } - } - } - } - - /** - * {@inheritDoc} - */ - @Override - public RowLock lockRow(final byte [] row) - throws IOException { - return new ServerCallable(connection, tableName, row, operationTimeout) { - public RowLock call() throws IOException { - try { - LockRowRequest request = RequestConverter.buildLockRowRequest( - location.getRegionInfo().getRegionName(), row); - LockRowResponse response = server.lockRow(null, request); - return new RowLock(row, response.getLockId()); - } catch (ServiceException se) { - throw ProtobufUtil.getRemoteException(se); - } - } - }.withRetries(); - } - - /** - * {@inheritDoc} - */ - @Override - public void unlockRow(final RowLock rl) - throws IOException { - new ServerCallable(connection, tableName, rl.getRow(), operationTimeout) { - public Boolean call() throws IOException { - try { - UnlockRowRequest request = RequestConverter.buildUnlockRowRequest( - location.getRegionInfo().getRegionName(), rl.getLockId()); - server.unlockRow(null, request); - return Boolean.TRUE; - } catch (ServiceException se) { - throw ProtobufUtil.getRemoteException(se); - } - } - }.withRetries(); - } - - /** - * {@inheritDoc} - */ - @Override - public boolean isAutoFlush() { - return autoFlush; - } - - /** - * See {@link #setAutoFlush(boolean, boolean)} - * - * @param autoFlush - * Whether or not to enable 'auto-flush'. - */ - public void setAutoFlush(boolean autoFlush) { - setAutoFlush(autoFlush, autoFlush); - } - - /** - * Turns 'auto-flush' on or off. - *

      - * When enabled (default), {@link Put} operations don't get buffered/delayed - * and are immediately executed. Failed operations are not retried. This is - * slower but safer. - *

      - * Turning off {@link #autoFlush} means that multiple {@link Put}s will be - * accepted before any RPC is actually sent to do the write operations. If the - * application dies before pending writes get flushed to HBase, data will be - * lost. - *

      - * When you turn {@link #autoFlush} off, you should also consider the - * {@link #clearBufferOnFail} option. By default, asynchronous {@link Put} - * requests will be retried on failure until successful. However, this can - * pollute the writeBuffer and slow down batching performance. Additionally, - * you may want to issue a number of Put requests and call - * {@link #flushCommits()} as a barrier. In both use cases, consider setting - * clearBufferOnFail to true to erase the buffer after {@link #flushCommits()} - * has been called, regardless of success. - * - * @param autoFlush - * Whether or not to enable 'auto-flush'. - * @param clearBufferOnFail - * Whether to keep Put failures in the writeBuffer - * @see #flushCommits - */ - public void setAutoFlush(boolean autoFlush, boolean clearBufferOnFail) { - this.autoFlush = autoFlush; - this.clearBufferOnFail = autoFlush || clearBufferOnFail; - } - - /** - * Returns the maximum size in bytes of the write buffer for this HTable. - *

      - * The default value comes from the configuration parameter - * {@code hbase.client.write.buffer}. - * @return The size of the write buffer in bytes. - */ - public long getWriteBufferSize() { - return writeBufferSize; - } - - /** - * Sets the size of the buffer in bytes. - *

      - * If the new size is less than the current amount of data in the - * write buffer, the buffer gets flushed. - * @param writeBufferSize The new write buffer size, in bytes. - * @throws IOException if a remote or network exception occurs. - */ - public void setWriteBufferSize(long writeBufferSize) throws IOException { - this.writeBufferSize = writeBufferSize; - if(currentWriteBufferSize > writeBufferSize) { - flushCommits(); - } - } - - /** - * Returns the write buffer. - * @return The current write buffer. - */ - public ArrayList getWriteBuffer() { - return writeBuffer; - } - - /** - * The pool is used for mutli requests for this HTable - * @return the pool used for mutli - */ - ExecutorService getPool() { - return this.pool; - } - - /** - * Enable or disable region cache prefetch for the table. It will be - * applied for the given table's all HTable instances who share the same - * connection. By default, the cache prefetch is enabled. - * @param tableName name of table to configure. - * @param enable Set to true to enable region cache prefetch. Or set to - * false to disable it. - * @throws IOException - */ - public static void setRegionCachePrefetch(final byte[] tableName, - final boolean enable) throws IOException { - HConnectionManager.execute(new HConnectable(HBaseConfiguration - .create()) { - @Override - public Void connect(HConnection connection) throws IOException { - connection.setRegionCachePrefetch(tableName, enable); - return null; - } - }); - } - - /** - * Enable or disable region cache prefetch for the table. It will be - * applied for the given table's all HTable instances who share the same - * connection. By default, the cache prefetch is enabled. - * @param conf The Configuration object to use. - * @param tableName name of table to configure. - * @param enable Set to true to enable region cache prefetch. Or set to - * false to disable it. - * @throws IOException - */ - public static void setRegionCachePrefetch(final Configuration conf, - final byte[] tableName, final boolean enable) throws IOException { - HConnectionManager.execute(new HConnectable(conf) { - @Override - public Void connect(HConnection connection) throws IOException { - connection.setRegionCachePrefetch(tableName, enable); - return null; - } - }); - } - - /** - * Check whether region cache prefetch is enabled or not for the table. - * @param conf The Configuration object to use. - * @param tableName name of table to check - * @return true if table's region cache prefecth is enabled. Otherwise - * it is disabled. - * @throws IOException - */ - public static boolean getRegionCachePrefetch(final Configuration conf, - final byte[] tableName) throws IOException { - return HConnectionManager.execute(new HConnectable(conf) { - @Override - public Boolean connect(HConnection connection) throws IOException { - return connection.getRegionCachePrefetch(tableName); - } - }); - } - - /** - * Check whether region cache prefetch is enabled or not for the table. - * @param tableName name of table to check - * @return true if table's region cache prefecth is enabled. Otherwise - * it is disabled. - * @throws IOException - */ - public static boolean getRegionCachePrefetch(final byte[] tableName) throws IOException { - return HConnectionManager.execute(new HConnectable( - HBaseConfiguration.create()) { - @Override - public Boolean connect(HConnection connection) throws IOException { - return connection.getRegionCachePrefetch(tableName); - } - }); - } - - /** - * Explicitly clears the region cache to fetch the latest value from META. - * This is a power user function: avoid unless you know the ramifications. - */ - public void clearRegionCache() { - this.connection.clearRegionCache(); - } - - /** - * {@inheritDoc} - */ - public CoprocessorRpcChannel coprocessorService(byte[] row) { - return new RegionCoprocessorRpcChannel(connection, tableName, row); - } - - /** - * {@inheritDoc} - */ - @Override - public Map coprocessorService(final Class service, - byte[] startKey, byte[] endKey, final Batch.Call callable) - throws ServiceException, Throwable { - final Map results = Collections.synchronizedMap( - new TreeMap(Bytes.BYTES_COMPARATOR)); - coprocessorService(service, startKey, endKey, callable, new Batch.Callback() { - public void update(byte[] region, byte[] row, R value) { - results.put(region, value); - } - }); - return results; - } - - /** - * {@inheritDoc} - */ - @Override - public void coprocessorService(final Class service, - byte[] startKey, byte[] endKey, final Batch.Call callable, - final Batch.Callback callback) throws ServiceException, Throwable { - - // get regions covered by the row range - List keys = getStartKeysInRange(startKey, endKey); - - Map> futures = - new TreeMap>(Bytes.BYTES_COMPARATOR); - for (final byte[] r : keys) { - final RegionCoprocessorRpcChannel channel = - new RegionCoprocessorRpcChannel(connection, tableName, r); - Future future = pool.submit( - new Callable() { - public R call() throws Exception { - T instance = ProtobufUtil.newServiceStub(service, channel); - R result = callable.call(instance); - byte[] region = channel.getLastRegion(); - if (callback != null) { - callback.update(region, r, result); - } - return result; - } - }); - futures.put(r, future); - } - for (Map.Entry> e : futures.entrySet()) { - try { - e.getValue().get(); - } catch (ExecutionException ee) { - LOG.warn("Error calling coprocessor service " + service.getName() + " for row " - + Bytes.toStringBinary(e.getKey()), ee); - throw ee.getCause(); - } catch (InterruptedException ie) { - Thread.currentThread().interrupt(); - throw new InterruptedIOException("Interrupted calling coprocessor service " + service.getName() - + " for row " + Bytes.toStringBinary(e.getKey())) - .initCause(ie); - } - } - } - - private List getStartKeysInRange(byte[] start, byte[] end) - throws IOException { - Pair startEndKeys = getStartEndKeys(); - byte[][] startKeys = startEndKeys.getFirst(); - byte[][] endKeys = startEndKeys.getSecond(); - - if (start == null) { - start = HConstants.EMPTY_START_ROW; - } - if (end == null) { - end = HConstants.EMPTY_END_ROW; - } - - List rangeKeys = new ArrayList(); - for (int i=0; i= 0 ) { - if (Bytes.equals(endKeys[i], HConstants.EMPTY_END_ROW) || - Bytes.compareTo(start, endKeys[i]) < 0) { - rangeKeys.add(start); - } - } else if (Bytes.equals(end, HConstants.EMPTY_END_ROW) || - Bytes.compareTo(startKeys[i], end) <= 0) { - rangeKeys.add(startKeys[i]); - } else { - break; // past stop - } - } - - return rangeKeys; - } - - public void setOperationTimeout(int operationTimeout) { - this.operationTimeout = operationTimeout; - } - - public int getOperationTimeout() { - return operationTimeout; - } - -} diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/client/HTableFactory.java hbase-server/src/main/java/org/apache/hadoop/hbase/client/HTableFactory.java deleted file mode 100644 index 1515b37..0000000 --- hbase-server/src/main/java/org/apache/hadoop/hbase/client/HTableFactory.java +++ /dev/null @@ -1,49 +0,0 @@ -/** - * - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hbase.client; - -import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.classification.InterfaceStability; -import org.apache.hadoop.conf.Configuration; - -import java.io.IOException; - -/** - * Factory for creating HTable instances. - * - * @since 0.21.0 - */ -@InterfaceAudience.Public -@InterfaceStability.Stable -public class HTableFactory implements HTableInterfaceFactory { - @Override - public HTableInterface createHTableInterface(Configuration config, - byte[] tableName) { - try { - return new HTable(config, tableName); - } catch (IOException ioe) { - throw new RuntimeException(ioe); - } - } - - @Override - public void releaseHTableInterface(HTableInterface table) throws IOException { - table.close(); - } -} diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/client/HTableInterface.java hbase-server/src/main/java/org/apache/hadoop/hbase/client/HTableInterface.java deleted file mode 100644 index c5fc356..0000000 --- hbase-server/src/main/java/org/apache/hadoop/hbase/client/HTableInterface.java +++ /dev/null @@ -1,557 +0,0 @@ -/** - * - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hbase.client; - -import java.io.Closeable; -import java.io.IOException; -import java.util.List; -import java.util.Map; - -import com.google.protobuf.Service; -import com.google.protobuf.ServiceException; -import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.classification.InterfaceStability; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hbase.HTableDescriptor; -import org.apache.hadoop.hbase.KeyValue; -import org.apache.hadoop.hbase.client.coprocessor.Batch; -import org.apache.hadoop.hbase.ipc.CoprocessorRpcChannel; - -/** - * Used to communicate with a single HBase table. - * - * @since 0.21.0 - */ -@InterfaceAudience.Public -@InterfaceStability.Stable -public interface HTableInterface extends Closeable { - - /** - * Gets the name of this table. - * - * @return the table name. - */ - byte[] getTableName(); - - /** - * Returns the {@link Configuration} object used by this instance. - *

      - * The reference returned is not a copy, so any change made to it will - * affect this instance. - */ - Configuration getConfiguration(); - - /** - * Gets the {@link HTableDescriptor table descriptor} for this table. - * @throws IOException if a remote or network exception occurs. - */ - HTableDescriptor getTableDescriptor() throws IOException; - - /** - * Test for the existence of columns in the table, as specified in the Get. - *

      - * - * This will return true if the Get matches one or more keys, false if not. - *

      - * - * This is a server-side call so it prevents any data from being transfered to - * the client. - * - * @param get the Get - * @return true if the specified Get matches one or more keys, false if not - * @throws IOException e - */ - boolean exists(Get get) throws IOException; - - /** - * Method that does a batch call on Deletes, Gets and Puts. The ordering of - * execution of the actions is not defined. Meaning if you do a Put and a - * Get in the same {@link #batch} call, you will not necessarily be - * guaranteed that the Get returns what the Put had put. - * - * @param actions list of Get, Put, Delete objects - * @param results Empty Object[], same size as actions. Provides access to partial - * results, in case an exception is thrown. A null in the result array means that - * the call for that action failed, even after retries - * @throws IOException - * @since 0.90.0 - */ - void batch(final List actions, final Object[] results) throws IOException, InterruptedException; - - /** - * Same as {@link #batch(List, Object[])}, but returns an array of - * results instead of using a results parameter reference. - * - * @param actions list of Get, Put, Delete objects - * @return the results from the actions. A null in the return array means that - * the call for that action failed, even after retries - * @throws IOException - * @since 0.90.0 - */ - Object[] batch(final List actions) throws IOException, InterruptedException; - - /** - * Same as {@link #batch(List, Object[])}, but with a callback. - * @since 0.96.0 - */ - public void batchCallback( - final List actions, final Object[] results, final Batch.Callback callback) - throws IOException, InterruptedException; - - - /** - * Same as {@link #batch(List)}, but with a callback. - * @since 0.96.0 - */ - public Object[] batchCallback( - List actions, Batch.Callback callback) throws IOException, - InterruptedException; - - /** - * Extracts certain cells from a given row. - * @param get The object that specifies what data to fetch and from which row. - * @return The data coming from the specified row, if it exists. If the row - * specified doesn't exist, the {@link Result} instance returned won't - * contain any {@link KeyValue}, as indicated by {@link Result#isEmpty()}. - * @throws IOException if a remote or network exception occurs. - * @since 0.20.0 - */ - Result get(Get get) throws IOException; - - /** - * Extracts certain cells from the given rows, in batch. - * - * @param gets The objects that specify what data to fetch and from which rows. - * - * @return The data coming from the specified rows, if it exists. If the row - * specified doesn't exist, the {@link Result} instance returned won't - * contain any {@link KeyValue}, as indicated by {@link Result#isEmpty()}. - * If there are any failures even after retries, there will be a null in - * the results array for those Gets, AND an exception will be thrown. - * @throws IOException if a remote or network exception occurs. - * - * @since 0.90.0 - */ - Result[] get(List gets) throws IOException; - - /** - * Return the row that matches row exactly, - * or the one that immediately precedes it. - * - * @param row A row key. - * @param family Column family to include in the {@link Result}. - * @throws IOException if a remote or network exception occurs. - * @since 0.20.0 - * - * @deprecated As of version 0.92 this method is deprecated without - * replacement. - * getRowOrBefore is used internally to find entries in .META. and makes - * various assumptions about the table (which are true for .META. but not - * in general) to be efficient. - */ - Result getRowOrBefore(byte[] row, byte[] family) throws IOException; - - /** - * Returns a scanner on the current table as specified by the {@link Scan} - * object. - * Note that the passed {@link Scan}'s start row and caching properties - * maybe changed. - * - * @param scan A configured {@link Scan} object. - * @return A scanner. - * @throws IOException if a remote or network exception occurs. - * @since 0.20.0 - */ - ResultScanner getScanner(Scan scan) throws IOException; - - /** - * Gets a scanner on the current table for the given family. - * - * @param family The column family to scan. - * @return A scanner. - * @throws IOException if a remote or network exception occurs. - * @since 0.20.0 - */ - ResultScanner getScanner(byte[] family) throws IOException; - - /** - * Gets a scanner on the current table for the given family and qualifier. - * - * @param family The column family to scan. - * @param qualifier The column qualifier to scan. - * @return A scanner. - * @throws IOException if a remote or network exception occurs. - * @since 0.20.0 - */ - ResultScanner getScanner(byte[] family, byte[] qualifier) throws IOException; - - - /** - * Puts some data in the table. - *

      - * If {@link #isAutoFlush isAutoFlush} is false, the update is buffered - * until the internal buffer is full. - * @param put The data to put. - * @throws IOException if a remote or network exception occurs. - * @since 0.20.0 - */ - void put(Put put) throws IOException; - - /** - * Puts some data in the table, in batch. - *

      - * If {@link #isAutoFlush isAutoFlush} is false, the update is buffered - * until the internal buffer is full. - *

      - * This can be used for group commit, or for submitting user defined - * batches. The writeBuffer will be periodically inspected while the List - * is processed, so depending on the List size the writeBuffer may flush - * not at all, or more than once. - * @param puts The list of mutations to apply. The batch put is done by - * aggregating the iteration of the Puts over the write buffer - * at the client-side for a single RPC call. - * @throws IOException if a remote or network exception occurs. - * @since 0.20.0 - */ - void put(List puts) throws IOException; - - /** - * Atomically checks if a row/family/qualifier value matches the expected - * value. If it does, it adds the put. If the passed value is null, the check - * is for the lack of column (ie: non-existance) - * - * @param row to check - * @param family column family to check - * @param qualifier column qualifier to check - * @param value the expected value - * @param put data to put if check succeeds - * @throws IOException e - * @return true if the new put was executed, false otherwise - */ - boolean checkAndPut(byte[] row, byte[] family, byte[] qualifier, - byte[] value, Put put) throws IOException; - - /** - * Deletes the specified cells/row. - * - * @param delete The object that specifies what to delete. - * @throws IOException if a remote or network exception occurs. - * @since 0.20.0 - */ - void delete(Delete delete) throws IOException; - - /** - * Deletes the specified cells/rows in bulk. - * @param deletes List of things to delete. List gets modified by this - * method (in particular it gets re-ordered, so the order in which the elements - * are inserted in the list gives no guarantee as to the order in which the - * {@link Delete}s are executed). - * @throws IOException if a remote or network exception occurs. In that case - * the {@code deletes} argument will contain the {@link Delete} instances - * that have not be successfully applied. - * @since 0.20.1 - */ - void delete(List deletes) throws IOException; - - /** - * Atomically checks if a row/family/qualifier value matches the expected - * value. If it does, it adds the delete. If the passed value is null, the - * check is for the lack of column (ie: non-existance) - * - * @param row to check - * @param family column family to check - * @param qualifier column qualifier to check - * @param value the expected value - * @param delete data to delete if check succeeds - * @throws IOException e - * @return true if the new delete was executed, false otherwise - */ - boolean checkAndDelete(byte[] row, byte[] family, byte[] qualifier, - byte[] value, Delete delete) throws IOException; - - /** - * Performs multiple mutations atomically on a single row. Currently - * {@link Put} and {@link Delete} are supported. - * - * @param rm object that specifies the set of mutations to perform atomically - * @throws IOException - */ - public void mutateRow(final RowMutations rm) throws IOException; - - /** - * Appends values to one or more columns within a single row. - *

      - * This operation does not appear atomic to readers. Appends are done - * under a single row lock, so write operations to a row are synchronized, but - * readers do not take row locks so get and scan operations can see this - * operation partially completed. - * - * @param append object that specifies the columns and amounts to be used - * for the increment operations - * @throws IOException e - * @return values of columns after the append operation (maybe null) - */ - public Result append(final Append append) throws IOException; - - /** - * Increments one or more columns within a single row. - *

      - * This operation does not appear atomic to readers. Increments are done - * under a single row lock, so write operations to a row are synchronized, but - * readers do not take row locks so get and scan operations can see this - * operation partially completed. - * - * @param increment object that specifies the columns and amounts to be used - * for the increment operations - * @throws IOException e - * @return values of columns after the increment - */ - public Result increment(final Increment increment) throws IOException; - - /** - * Atomically increments a column value. - *

      - * Equivalent to {@link #incrementColumnValue(byte[], byte[], byte[], - * long, boolean) incrementColumnValue}(row, family, qualifier, amount, - * true)} - * @param row The row that contains the cell to increment. - * @param family The column family of the cell to increment. - * @param qualifier The column qualifier of the cell to increment. - * @param amount The amount to increment the cell with (or decrement, if the - * amount is negative). - * @return The new value, post increment. - * @throws IOException if a remote or network exception occurs. - */ - long incrementColumnValue(byte[] row, byte[] family, byte[] qualifier, - long amount) throws IOException; - - /** - * Atomically increments a column value. If the column value already exists - * and is not a big-endian long, this could throw an exception. If the column - * value does not yet exist it is initialized to amount and - * written to the specified column. - * - *

      Setting writeToWAL to false means that in a fail scenario, you will lose - * any increments that have not been flushed. - * @param row The row that contains the cell to increment. - * @param family The column family of the cell to increment. - * @param qualifier The column qualifier of the cell to increment. - * @param amount The amount to increment the cell with (or decrement, if the - * amount is negative). - * @param writeToWAL if {@code true}, the operation will be applied to the - * Write Ahead Log (WAL). This makes the operation slower but safer, as if - * the call returns successfully, it is guaranteed that the increment will - * be safely persisted. When set to {@code false}, the call may return - * successfully before the increment is safely persisted, so it's possible - * that the increment be lost in the event of a failure happening before the - * operation gets persisted. - * @return The new value, post increment. - * @throws IOException if a remote or network exception occurs. - */ - long incrementColumnValue(byte[] row, byte[] family, byte[] qualifier, - long amount, boolean writeToWAL) throws IOException; - - /** - * Tells whether or not 'auto-flush' is turned on. - * - * @return {@code true} if 'auto-flush' is enabled (default), meaning - * {@link Put} operations don't get buffered/delayed and are immediately - * executed. - */ - boolean isAutoFlush(); - - /** - * Executes all the buffered {@link Put} operations. - *

      - * This method gets called once automatically for every {@link Put} or batch - * of {@link Put}s (when put(List) is used) when - * {@link #isAutoFlush} is {@code true}. - * @throws IOException if a remote or network exception occurs. - */ - void flushCommits() throws IOException; - - /** - * Releases any resources held or pending changes in internal buffers. - * - * @throws IOException if a remote or network exception occurs. - */ - void close() throws IOException; - - /** - * Obtains a lock on a row. - * - * @param row The row to lock. - * @return A {@link RowLock} containing the row and lock id. - * @throws IOException if a remote or network exception occurs. - * @see RowLock - * @see #unlockRow - */ - RowLock lockRow(byte[] row) throws IOException; - - /** - * Releases a row lock. - * - * @param rl The row lock to release. - * @throws IOException if a remote or network exception occurs. - * @see RowLock - * @see #unlockRow - */ - void unlockRow(RowLock rl) throws IOException; - - /** - * Creates and returns a {@link com.google.protobuf.RpcChannel} instance connected to the - * table region containing the specified row. The row given does not actually have - * to exist. Whichever region would contain the row based on start and end keys will - * be used. Note that the {@code row} parameter is also not passed to the - * coprocessor handler registered for this protocol, unless the {@code row} - * is separately passed as an argument in the service request. The parameter - * here is only used to locate the region used to handle the call. - * - *

      - * The obtained {@link com.google.protobuf.RpcChannel} instance can be used to access a published - * coprocessor {@link com.google.protobuf.Service} using standard protobuf service invocations: - *

      - * - *
      - *
      -   * CoprocessorRpcChannel channel = myTable.coprocessorService(rowkey);
      -   * MyService.BlockingInterface service = MyService.newBlockingStub(channel);
      -   * MyCallRequest request = MyCallRequest.newBuilder()
      -   *     ...
      -   *     .build();
      -   * MyCallResponse response = service.myCall(null, request);
      -   * 
      - * - * @param row The row key used to identify the remote region location - * @return A CoprocessorRpcChannel instance - */ - CoprocessorRpcChannel coprocessorService(byte[] row); - - /** - * Creates an instance of the given {@link com.google.protobuf.Service} subclass for each table - * region spanning the range from the {@code startKey} row to {@code endKey} row (inclusive), - * and invokes the passed {@link org.apache.hadoop.hbase.client.coprocessor.Batch.Call#call} - * method with each {@link Service} - * instance. - * - * @param service the protocol buffer {@code Service} implementation to call - * @param startKey start region selection with region containing this row. If {@code null}, the - * selection will start with the first table region. - * @param endKey select regions up to and including the region containing this row. - * If {@code null}, selection will continue through the last table region. - * @param callable this instance's - * {@link org.apache.hadoop.hbase.client.coprocessor.Batch.Call#call} - * method will be invoked once per table region, using the {@link Service} - * instance connected to that region. - * @param the {@link Service} subclass to connect to - * @param Return type for the {@code callable} parameter's - * {@link org.apache.hadoop.hbase.client.coprocessor.Batch.Call#call} method - * @return a map of result values keyed by region name - */ - Map coprocessorService(final Class service, - byte[] startKey, byte[] endKey, final Batch.Call callable) - throws ServiceException, Throwable; - - /** - * Creates an instance of the given {@link com.google.protobuf.Service} subclass for each table - * region spanning the range from the {@code startKey} row to {@code endKey} row (inclusive), - * and invokes the passed {@link org.apache.hadoop.hbase.client.coprocessor.Batch.Call#call} - * method with each {@link Service} instance. - * - *

      - * The given - * {@link org.apache.hadoop.hbase.client.coprocessor.Batch.Callback#update(byte[], byte[], Object)} - * method will be called with the return value from each region's - * {@link org.apache.hadoop.hbase.client.coprocessor.Batch.Call#call} invocation. - *

      - * - * @param service the protocol buffer {@code Service} implementation to call - * @param startKey start region selection with region containing this row. If {@code null}, the - * selection will start with the first table region. - * @param endKey select regions up to and including the region containing this row. - * If {@code null}, selection will continue through the last table region. - * @param callable this instance's - * {@link org.apache.hadoop.hbase.client.coprocessor.Batch.Call#call} method - * will be invoked once per table region, using the {@link Service} instance - * connected to that region. - * @param callback - * @param the {@link Service} subclass to connect to - * @param Return type for the {@code callable} parameter's - * {@link org.apache.hadoop.hbase.client.coprocessor.Batch.Call#call} method - */ - void coprocessorService(final Class service, - byte[] startKey, byte[] endKey, final Batch.Call callable, - final Batch.Callback callback) throws ServiceException, Throwable; - - /** - * See {@link #setAutoFlush(boolean, boolean)} - * - * @param autoFlush - * Whether or not to enable 'auto-flush'. - */ - public void setAutoFlush(boolean autoFlush); - - /** - * Turns 'auto-flush' on or off. - *

      - * When enabled (default), {@link Put} operations don't get buffered/delayed - * and are immediately executed. Failed operations are not retried. This is - * slower but safer. - *

      - * Turning off {@code autoFlush} means that multiple {@link Put}s will be - * accepted before any RPC is actually sent to do the write operations. If the - * application dies before pending writes get flushed to HBase, data will be - * lost. - *

      - * When you turn {@code #autoFlush} off, you should also consider the - * {@code clearBufferOnFail} option. By default, asynchronous {@link Put} - * requests will be retried on failure until successful. However, this can - * pollute the writeBuffer and slow down batching performance. Additionally, - * you may want to issue a number of Put requests and call - * {@link #flushCommits()} as a barrier. In both use cases, consider setting - * clearBufferOnFail to true to erase the buffer after {@link #flushCommits()} - * has been called, regardless of success. - * - * @param autoFlush - * Whether or not to enable 'auto-flush'. - * @param clearBufferOnFail - * Whether to keep Put failures in the writeBuffer - * @see #flushCommits - */ - public void setAutoFlush(boolean autoFlush, boolean clearBufferOnFail); - - /** - * Returns the maximum size in bytes of the write buffer for this HTable. - *

      - * The default value comes from the configuration parameter - * {@code hbase.client.write.buffer}. - * @return The size of the write buffer in bytes. - */ - public long getWriteBufferSize(); - - /** - * Sets the size of the buffer in bytes. - *

      - * If the new size is less than the current amount of data in the - * write buffer, the buffer gets flushed. - * @param writeBufferSize The new write buffer size, in bytes. - * @throws IOException if a remote or network exception occurs. - */ - public void setWriteBufferSize(long writeBufferSize) throws IOException; -} diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/client/HTableInterfaceFactory.java hbase-server/src/main/java/org/apache/hadoop/hbase/client/HTableInterfaceFactory.java deleted file mode 100644 index 40f1f47..0000000 --- hbase-server/src/main/java/org/apache/hadoop/hbase/client/HTableInterfaceFactory.java +++ /dev/null @@ -1,52 +0,0 @@ -/** - * - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hbase.client; - -import java.io.IOException; - -import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.classification.InterfaceStability; -import org.apache.hadoop.conf.Configuration; - - -/** - * Defines methods to create new HTableInterface. - * - * @since 0.21.0 - */ -@InterfaceAudience.Public -@InterfaceStability.Stable -public interface HTableInterfaceFactory { - - /** - * Creates a new HTableInterface. - * - * @param config HBaseConfiguration instance. - * @param tableName name of the HBase table. - * @return HTableInterface instance. - */ - HTableInterface createHTableInterface(Configuration config, byte[] tableName); - - - /** - * Release the HTable resource represented by the table. - * @param table - */ - void releaseHTableInterface(final HTableInterface table) throws IOException; -} diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/client/HTablePool.java hbase-server/src/main/java/org/apache/hadoop/hbase/client/HTablePool.java deleted file mode 100644 index ef9516f..0000000 --- hbase-server/src/main/java/org/apache/hadoop/hbase/client/HTablePool.java +++ /dev/null @@ -1,552 +0,0 @@ -/** - * - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hbase.client; - -import java.io.Closeable; -import java.io.IOException; -import java.util.Collection; -import java.util.List; -import java.util.Map; - -import com.google.protobuf.Service; -import com.google.protobuf.ServiceException; -import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.classification.InterfaceStability; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hbase.HBaseConfiguration; -import org.apache.hadoop.hbase.HTableDescriptor; -import org.apache.hadoop.hbase.client.coprocessor.Batch; -import org.apache.hadoop.hbase.client.coprocessor.Batch.Callback; -import org.apache.hadoop.hbase.ipc.CoprocessorRpcChannel; -import org.apache.hadoop.hbase.util.Bytes; -import org.apache.hadoop.hbase.util.PoolMap; -import org.apache.hadoop.hbase.util.PoolMap.PoolType; - -/** - * A simple pool of HTable instances. - * - * Each HTablePool acts as a pool for all tables. To use, instantiate an - * HTablePool and use {@link #getTable(String)} to get an HTable from the pool. - * - * This method is not needed anymore, clients should call - * HTableInterface.close() rather than returning the tables to the pool - * - * Once you are done with it, close your instance of {@link HTableInterface} - * by calling {@link HTableInterface#close()} rather than returning the tables - * to the pool with (deprecated) {@link #putTable(HTableInterface)}. - * - *

      - * A pool can be created with a maxSize which defines the most HTable - * references that will ever be retained for each table. Otherwise the default - * is {@link Integer#MAX_VALUE}. - * - *

      - * Pool will manage its own connections to the cluster. See - * {@link HConnectionManager}. - */ -@InterfaceAudience.Public -@InterfaceStability.Stable -public class HTablePool implements Closeable { - private final PoolMap tables; - private final int maxSize; - private final PoolType poolType; - private final Configuration config; - private final HTableInterfaceFactory tableFactory; - - /** - * Default Constructor. Default HBaseConfiguration and no limit on pool size. - */ - public HTablePool() { - this(HBaseConfiguration.create(), Integer.MAX_VALUE); - } - - /** - * Constructor to set maximum versions and use the specified configuration. - * - * @param config - * configuration - * @param maxSize - * maximum number of references to keep for each table - */ - public HTablePool(final Configuration config, final int maxSize) { - this(config, maxSize, null, null); - } - - /** - * Constructor to set maximum versions and use the specified configuration and - * table factory. - * - * @param config - * configuration - * @param maxSize - * maximum number of references to keep for each table - * @param tableFactory - * table factory - */ - public HTablePool(final Configuration config, final int maxSize, - final HTableInterfaceFactory tableFactory) { - this(config, maxSize, tableFactory, PoolType.Reusable); - } - - /** - * Constructor to set maximum versions and use the specified configuration and - * pool type. - * - * @param config - * configuration - * @param maxSize - * maximum number of references to keep for each table - * @param poolType - * pool type which is one of {@link PoolType#Reusable} or - * {@link PoolType#ThreadLocal} - */ - public HTablePool(final Configuration config, final int maxSize, - final PoolType poolType) { - this(config, maxSize, null, poolType); - } - - /** - * Constructor to set maximum versions and use the specified configuration, - * table factory and pool type. The HTablePool supports the - * {@link PoolType#Reusable} and {@link PoolType#ThreadLocal}. If the pool - * type is null or not one of those two values, then it will default to - * {@link PoolType#Reusable}. - * - * @param config - * configuration - * @param maxSize - * maximum number of references to keep for each table - * @param tableFactory - * table factory - * @param poolType - * pool type which is one of {@link PoolType#Reusable} or - * {@link PoolType#ThreadLocal} - */ - public HTablePool(final Configuration config, final int maxSize, - final HTableInterfaceFactory tableFactory, PoolType poolType) { - // Make a new configuration instance so I can safely cleanup when - // done with the pool. - this.config = config == null ? HBaseConfiguration.create() : config; - this.maxSize = maxSize; - this.tableFactory = tableFactory == null ? new HTableFactory() - : tableFactory; - if (poolType == null) { - this.poolType = PoolType.Reusable; - } else { - switch (poolType) { - case Reusable: - case ThreadLocal: - this.poolType = poolType; - break; - default: - this.poolType = PoolType.Reusable; - break; - } - } - this.tables = new PoolMap(this.poolType, - this.maxSize); - } - - /** - * Get a reference to the specified table from the pool. - *

      - *

      - * - * @param tableName - * table name - * @return a reference to the specified table - * @throws RuntimeException - * if there is a problem instantiating the HTable - */ - public HTableInterface getTable(String tableName) { - // call the old getTable implementation renamed to findOrCreateTable - HTableInterface table = findOrCreateTable(tableName); - // return a proxy table so when user closes the proxy, the actual table - // will be returned to the pool - return new PooledHTable(table); - } - - /** - * Get a reference to the specified table from the pool. - *

      - * - * Create a new one if one is not available. - * - * @param tableName - * table name - * @return a reference to the specified table - * @throws RuntimeException - * if there is a problem instantiating the HTable - */ - private HTableInterface findOrCreateTable(String tableName) { - HTableInterface table = tables.get(tableName); - if (table == null) { - table = createHTable(tableName); - } - return table; - } - - /** - * Get a reference to the specified table from the pool. - *

      - * - * Create a new one if one is not available. - * - * @param tableName - * table name - * @return a reference to the specified table - * @throws RuntimeException - * if there is a problem instantiating the HTable - */ - public HTableInterface getTable(byte[] tableName) { - return getTable(Bytes.toString(tableName)); - } - - /** - * This method is not needed anymore, clients should call - * HTableInterface.close() rather than returning the tables to the pool - * - * @param table - * the proxy table user got from pool - * @deprecated - */ - public void putTable(HTableInterface table) throws IOException { - // we need to be sure nobody puts a proxy implementation in the pool - // but if the client code is not updated - // and it will continue to call putTable() instead of calling close() - // then we need to return the wrapped table to the pool instead of the - // proxy - // table - if (table instanceof PooledHTable) { - returnTable(((PooledHTable) table).getWrappedTable()); - } else { - // normally this should not happen if clients pass back the same - // table - // object they got from the pool - // but if it happens then it's better to reject it - throw new IllegalArgumentException("not a pooled table: " + table); - } - } - - /** - * Puts the specified HTable back into the pool. - *

      - * - * If the pool already contains maxSize references to the table, then - * the table instance gets closed after flushing buffered edits. - * - * @param table - * table - */ - private void returnTable(HTableInterface table) throws IOException { - // this is the old putTable method renamed and made private - String tableName = Bytes.toString(table.getTableName()); - if (tables.size(tableName) >= maxSize) { - // release table instance since we're not reusing it - this.tables.remove(tableName, table); - this.tableFactory.releaseHTableInterface(table); - return; - } - tables.put(tableName, table); - } - - protected HTableInterface createHTable(String tableName) { - return this.tableFactory.createHTableInterface(config, - Bytes.toBytes(tableName)); - } - - /** - * Closes all the HTable instances , belonging to the given table, in the - * table pool. - *

      - * Note: this is a 'shutdown' of the given table pool and different from - * {@link #putTable(HTableInterface)}, that is used to return the table - * instance to the pool for future re-use. - * - * @param tableName - */ - public void closeTablePool(final String tableName) throws IOException { - Collection tables = this.tables.values(tableName); - if (tables != null) { - for (HTableInterface table : tables) { - this.tableFactory.releaseHTableInterface(table); - } - } - this.tables.remove(tableName); - } - - /** - * See {@link #closeTablePool(String)}. - * - * @param tableName - */ - public void closeTablePool(final byte[] tableName) throws IOException { - closeTablePool(Bytes.toString(tableName)); - } - - /** - * Closes all the HTable instances , belonging to all tables in the table - * pool. - *

      - * Note: this is a 'shutdown' of all the table pools. - */ - public void close() throws IOException { - for (String tableName : tables.keySet()) { - closeTablePool(tableName); - } - this.tables.clear(); - } - - int getCurrentPoolSize(String tableName) { - return tables.size(tableName); - } - - /** - * A proxy class that implements HTableInterface.close method to return the - * wrapped table back to the table pool - * - */ - class PooledHTable implements HTableInterface { - - private HTableInterface table; // actual table implementation - - public PooledHTable(HTableInterface table) { - this.table = table; - } - - @Override - public byte[] getTableName() { - return table.getTableName(); - } - - @Override - public Configuration getConfiguration() { - return table.getConfiguration(); - } - - @Override - public HTableDescriptor getTableDescriptor() throws IOException { - return table.getTableDescriptor(); - } - - @Override - public boolean exists(Get get) throws IOException { - return table.exists(get); - } - - @Override - public void batch(List actions, Object[] results) throws IOException, - InterruptedException { - table.batch(actions, results); - } - - @Override - public Object[] batch(List actions) throws IOException, - InterruptedException { - return table.batch(actions); - } - - @Override - public Result get(Get get) throws IOException { - return table.get(get); - } - - @Override - public Result[] get(List gets) throws IOException { - return table.get(gets); - } - - @Override - @SuppressWarnings("deprecation") - public Result getRowOrBefore(byte[] row, byte[] family) throws IOException { - return table.getRowOrBefore(row, family); - } - - @Override - public ResultScanner getScanner(Scan scan) throws IOException { - return table.getScanner(scan); - } - - @Override - public ResultScanner getScanner(byte[] family) throws IOException { - return table.getScanner(family); - } - - @Override - public ResultScanner getScanner(byte[] family, byte[] qualifier) - throws IOException { - return table.getScanner(family, qualifier); - } - - @Override - public void put(Put put) throws IOException { - table.put(put); - } - - @Override - public void put(List puts) throws IOException { - table.put(puts); - } - - @Override - public boolean checkAndPut(byte[] row, byte[] family, byte[] qualifier, - byte[] value, Put put) throws IOException { - return table.checkAndPut(row, family, qualifier, value, put); - } - - @Override - public void delete(Delete delete) throws IOException { - table.delete(delete); - } - - @Override - public void delete(List deletes) throws IOException { - table.delete(deletes); - } - - @Override - public boolean checkAndDelete(byte[] row, byte[] family, byte[] qualifier, - byte[] value, Delete delete) throws IOException { - return table.checkAndDelete(row, family, qualifier, value, delete); - } - - @Override - public Result increment(Increment increment) throws IOException { - return table.increment(increment); - } - - @Override - public long incrementColumnValue(byte[] row, byte[] family, - byte[] qualifier, long amount) throws IOException { - return table.incrementColumnValue(row, family, qualifier, amount); - } - - @Override - public long incrementColumnValue(byte[] row, byte[] family, - byte[] qualifier, long amount, boolean writeToWAL) throws IOException { - return table.incrementColumnValue(row, family, qualifier, amount, - writeToWAL); - } - - @Override - public boolean isAutoFlush() { - return table.isAutoFlush(); - } - - @Override - public void flushCommits() throws IOException { - table.flushCommits(); - } - - /** - * Returns the actual table back to the pool - * - * @throws IOException - */ - public void close() throws IOException { - returnTable(table); - } - - @Override - public RowLock lockRow(byte[] row) throws IOException { - return table.lockRow(row); - } - - @Override - public void unlockRow(RowLock rl) throws IOException { - table.unlockRow(rl); - } - - @Override - public CoprocessorRpcChannel coprocessorService(byte[] row) { - return table.coprocessorService(row); - } - - @Override - public Map coprocessorService(Class service, - byte[] startKey, byte[] endKey, Batch.Call callable) - throws ServiceException, Throwable { - return table.coprocessorService(service, startKey, endKey, callable); - } - - @Override - public void coprocessorService(Class service, - byte[] startKey, byte[] endKey, Batch.Call callable, Callback callback) - throws ServiceException, Throwable { - table.coprocessorService(service, startKey, endKey, callable, callback); - } - - @Override - public String toString() { - return "PooledHTable{" + ", table=" + table + '}'; - } - - /** - * Expose the wrapped HTable to tests in the same package - * - * @return wrapped htable - */ - HTableInterface getWrappedTable() { - return table; - } - - @Override - public void batchCallback(List actions, - Object[] results, Callback callback) throws IOException, - InterruptedException { - table.batchCallback(actions, results, callback); - } - - @Override - public Object[] batchCallback(List actions, - Callback callback) throws IOException, InterruptedException { - return table.batchCallback(actions, callback); - } - - @Override - public void mutateRow(RowMutations rm) throws IOException { - table.mutateRow(rm); - } - - @Override - public Result append(Append append) throws IOException { - return table.append(append); - } - - @Override - public void setAutoFlush(boolean autoFlush) { - table.setAutoFlush(autoFlush); - } - - @Override - public void setAutoFlush(boolean autoFlush, boolean clearBufferOnFail) { - table.setAutoFlush(autoFlush, clearBufferOnFail); - } - - @Override - public long getWriteBufferSize() { - return table.getWriteBufferSize(); - } - - @Override - public void setWriteBufferSize(long writeBufferSize) throws IOException { - table.setWriteBufferSize(writeBufferSize); - } - } -} diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/client/HTableUtil.java hbase-server/src/main/java/org/apache/hadoop/hbase/client/HTableUtil.java deleted file mode 100644 index 7ad6e65..0000000 --- hbase-server/src/main/java/org/apache/hadoop/hbase/client/HTableUtil.java +++ /dev/null @@ -1,141 +0,0 @@ -/** - * - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hbase.client; - -import java.io.IOException; -import java.lang.InterruptedException; -import java.util.ArrayList; -import java.util.HashMap; -import java.util.List; -import java.util.Map; - -import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.classification.InterfaceStability; -import org.apache.hadoop.hbase.HRegionLocation; -import org.apache.hadoop.hbase.client.HTable; -import org.apache.hadoop.hbase.client.Put; -import org.apache.hadoop.hbase.client.Row; - -/** - * Utility class for HTable. - * - * - */ -@InterfaceAudience.Public -@InterfaceStability.Stable -public class HTableUtil { - - private static final int INITIAL_LIST_SIZE = 250; - - /** - * Processes a List of Puts and writes them to an HTable instance in RegionServer buckets via the htable.put method. - * This will utilize the writeBuffer, thus the writeBuffer flush frequency may be tuned accordingly via htable.setWriteBufferSize. - *

      - * The benefit of submitting Puts in this manner is to minimize the number of RegionServer RPCs in each flush. - *

      - * Assumption #1: Regions have been pre-created for the table. If they haven't, then all of the Puts will go to the same region, - * defeating the purpose of this utility method. See the Apache HBase book for an explanation of how to do this. - *
      - * Assumption #2: Row-keys are not monotonically increasing. See the Apache HBase book for an explanation of this problem. - *
      - * Assumption #3: That the input list of Puts is big enough to be useful (in the thousands or more). The intent of this - * method is to process larger chunks of data. - *
      - * Assumption #4: htable.setAutoFlush(false) has been set. This is a requirement to use the writeBuffer. - *

      - * @param htable HTable instance for target HBase table - * @param puts List of Put instances - * @throws IOException if a remote or network exception occurs - * - */ - public static void bucketRsPut(HTable htable, List puts) throws IOException { - - Map> putMap = createRsPutMap(htable, puts); - for (List rsPuts: putMap.values()) { - htable.put( rsPuts ); - } - htable.flushCommits(); - } - - /** - * Processes a List of Rows (Put, Delete) and writes them to an HTable instance in RegionServer buckets via the htable.batch method. - *

      - * The benefit of submitting Puts in this manner is to minimize the number of RegionServer RPCs, thus this will - * produce one RPC of Puts per RegionServer. - *

      - * Assumption #1: Regions have been pre-created for the table. If they haven't, then all of the Puts will go to the same region, - * defeating the purpose of this utility method. See the Apache HBase book for an explanation of how to do this. - *
      - * Assumption #2: Row-keys are not monotonically increasing. See the Apache HBase book for an explanation of this problem. - *
      - * Assumption #3: That the input list of Rows is big enough to be useful (in the thousands or more). The intent of this - * method is to process larger chunks of data. - *

      - * This method accepts a list of Row objects because the underlying .batch method accepts a list of Row objects. - *

      - * @param htable HTable instance for target HBase table - * @param rows List of Row instances - * @throws IOException if a remote or network exception occurs - */ - public static void bucketRsBatch(HTable htable, List rows) throws IOException { - - try { - Map> rowMap = createRsRowMap(htable, rows); - for (List rsRows: rowMap.values()) { - htable.batch( rsRows ); - } - } catch (InterruptedException e) { - throw new IOException(e); - } - - } - - private static Map> createRsPutMap(HTable htable, List puts) throws IOException { - - Map> putMap = new HashMap>(); - for (Put put: puts) { - HRegionLocation rl = htable.getRegionLocation( put.getRow() ); - String hostname = rl.getHostname(); - List recs = putMap.get( hostname); - if (recs == null) { - recs = new ArrayList(INITIAL_LIST_SIZE); - putMap.put( hostname, recs); - } - recs.add(put); - } - return putMap; - } - - private static Map> createRsRowMap(HTable htable, List rows) throws IOException { - - Map> rowMap = new HashMap>(); - for (Row row: rows) { - HRegionLocation rl = htable.getRegionLocation( row.getRow() ); - String hostname = rl.getHostname(); - List recs = rowMap.get( hostname); - if (recs == null) { - recs = new ArrayList(INITIAL_LIST_SIZE); - rowMap.put( hostname, recs); - } - recs.add(row); - } - return rowMap; - } - -} diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/client/Increment.java hbase-server/src/main/java/org/apache/hadoop/hbase/client/Increment.java deleted file mode 100644 index 7df6e4b..0000000 --- hbase-server/src/main/java/org/apache/hadoop/hbase/client/Increment.java +++ /dev/null @@ -1,277 +0,0 @@ -/** - * - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hbase.client; - -import java.io.IOException; -import java.util.Map; -import java.util.NavigableMap; -import java.util.Set; -import java.util.TreeMap; - -import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.classification.InterfaceStability; -import org.apache.hadoop.hbase.io.TimeRange; -import org.apache.hadoop.hbase.util.Bytes; - -/** - * Used to perform Increment operations on a single row. - *

      - * This operation does not appear atomic to readers. Increments are done - * under a single row lock, so write operations to a row are synchronized, but - * readers do not take row locks so get and scan operations can see this - * operation partially completed. - *

      - * To increment columns of a row, instantiate an Increment object with the row - * to increment. At least one column to increment must be specified using the - * {@link #addColumn(byte[], byte[], long)} method. - */ -@InterfaceAudience.Public -@InterfaceStability.Stable -public class Increment implements Row { - private byte [] row = null; - private long lockId = -1L; - private boolean writeToWAL = true; - private TimeRange tr = new TimeRange(); - private Map> familyMap = - new TreeMap>(Bytes.BYTES_COMPARATOR); - - /** Constructor for Writable. DO NOT USE */ - public Increment() {} - - /** - * Create a Increment operation for the specified row. - *

      - * At least one column must be incremented. - * @param row row key - */ - public Increment(byte [] row) { - this(row, null); - } - - /** - * Create a Increment operation for the specified row, using an existing row - * lock. - *

      - * At least one column must be incremented. - * @param row row key - * @param rowLock previously acquired row lock, or null - */ - public Increment(byte [] row, RowLock rowLock) { - if (row == null) { - throw new IllegalArgumentException("Cannot increment a null row"); - } - this.row = row; - if(rowLock != null) { - this.lockId = rowLock.getLockId(); - } - } - - /** - * Increment the column from the specific family with the specified qualifier - * by the specified amount. - *

      - * Overrides previous calls to addColumn for this family and qualifier. - * @param family family name - * @param qualifier column qualifier - * @param amount amount to increment by - * @return the Increment object - */ - public Increment addColumn(byte [] family, byte [] qualifier, long amount) { - if (family == null) { - throw new IllegalArgumentException("family cannot be null"); - } - if (qualifier == null) { - throw new IllegalArgumentException("qualifier cannot be null"); - } - NavigableMap set = familyMap.get(family); - if(set == null) { - set = new TreeMap(Bytes.BYTES_COMPARATOR); - } - set.put(qualifier, amount); - familyMap.put(family, set); - return this; - } - - /* Accessors */ - - /** - * Method for retrieving the increment's row - * @return row - */ - public byte [] getRow() { - return this.row; - } - - /** - * Method for retrieving the increment's RowLock - * @return RowLock - */ - public RowLock getRowLock() { - return new RowLock(this.row, this.lockId); - } - - /** - * Method for retrieving the increment's lockId - * @return lockId - */ - public long getLockId() { - return this.lockId; - } - - /** - * Method for retrieving whether WAL will be written to or not - * @return true if WAL should be used, false if not - */ - public boolean getWriteToWAL() { - return this.writeToWAL; - } - - /** - * Sets whether this operation should write to the WAL or not. - * @param writeToWAL true if WAL should be used, false if not - * @return this increment operation - */ - public Increment setWriteToWAL(boolean writeToWAL) { - this.writeToWAL = writeToWAL; - return this; - } - - /** - * Gets the TimeRange used for this increment. - * @return TimeRange - */ - public TimeRange getTimeRange() { - return this.tr; - } - - /** - * Sets the TimeRange to be used on the Get for this increment. - *

      - * This is useful for when you have counters that only last for specific - * periods of time (ie. counters that are partitioned by time). By setting - * the range of valid times for this increment, you can potentially gain - * some performance with a more optimal Get operation. - *

      - * This range is used as [minStamp, maxStamp). - * @param minStamp minimum timestamp value, inclusive - * @param maxStamp maximum timestamp value, exclusive - * @throws IOException if invalid time range - * @return this - */ - public Increment setTimeRange(long minStamp, long maxStamp) - throws IOException { - tr = new TimeRange(minStamp, maxStamp); - return this; - } - - /** - * Method for retrieving the keys in the familyMap - * @return keys in the current familyMap - */ - public Set familySet() { - return this.familyMap.keySet(); - } - - /** - * Method for retrieving the number of families to increment from - * @return number of families - */ - public int numFamilies() { - return this.familyMap.size(); - } - - /** - * Method for retrieving the number of columns to increment - * @return number of columns across all families - */ - public int numColumns() { - if (!hasFamilies()) return 0; - int num = 0; - for (NavigableMap family : familyMap.values()) { - num += family.size(); - } - return num; - } - - /** - * Method for checking if any families have been inserted into this Increment - * @return true if familyMap is non empty false otherwise - */ - public boolean hasFamilies() { - return !this.familyMap.isEmpty(); - } - - /** - * Method for retrieving the increment's familyMap - * @return familyMap - */ - public Map> getFamilyMap() { - return this.familyMap; - } - - /** - * @return String - */ - @Override - public String toString() { - StringBuilder sb = new StringBuilder(); - sb.append("row="); - sb.append(Bytes.toStringBinary(this.row)); - if(this.familyMap.size() == 0) { - sb.append(", no columns set to be incremented"); - return sb.toString(); - } - sb.append(", families="); - boolean moreThanOne = false; - for(Map.Entry> entry : - this.familyMap.entrySet()) { - if(moreThanOne) { - sb.append("), "); - } else { - moreThanOne = true; - sb.append("{"); - } - sb.append("(family="); - sb.append(Bytes.toString(entry.getKey())); - sb.append(", columns="); - if(entry.getValue() == null) { - sb.append("NONE"); - } else { - sb.append("{"); - boolean moreThanOneB = false; - for(Map.Entry column : entry.getValue().entrySet()) { - if(moreThanOneB) { - sb.append(", "); - } else { - moreThanOneB = true; - } - sb.append(Bytes.toStringBinary(column.getKey()) + "+=" + column.getValue()); - } - sb.append("}"); - } - } - sb.append("}"); - return sb.toString(); - } - - @Override - public int compareTo(Row i) { - return Bytes.compareTo(this.getRow(), i.getRow()); - } -} diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/client/IsolationLevel.java hbase-server/src/main/java/org/apache/hadoop/hbase/client/IsolationLevel.java deleted file mode 100644 index fb910d8..0000000 --- hbase-server/src/main/java/org/apache/hadoop/hbase/client/IsolationLevel.java +++ /dev/null @@ -1,59 +0,0 @@ -/* - * Copyright The Apache Software Foundation - * - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hbase.client; - -import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.classification.InterfaceStability; - -/** - * Specify Isolation levels in Scan operations. - *

      - * There are two isolation levels. A READ_COMMITTED isolation level - * indicates that only data that is committed be returned in a scan. - * An isolation level of READ_UNCOMMITTED indicates that a scan - * should return data that is being modified by transactions that might - * not have been committed yet. - */ -@InterfaceAudience.Public -@InterfaceStability.Stable -public enum IsolationLevel { - - READ_COMMITTED(1), - READ_UNCOMMITTED(2); - - IsolationLevel(int value) {} - - public byte [] toBytes() { - return new byte [] { toByte() }; - } - - public byte toByte() { - return (byte)this.ordinal(); - } - - public static IsolationLevel fromBytes(byte [] bytes) { - return IsolationLevel.fromByte(bytes[0]); - } - - public static IsolationLevel fromByte(byte vbyte) { - return IsolationLevel.values()[vbyte]; - } -} diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/client/MasterAdminKeepAliveConnection.java hbase-server/src/main/java/org/apache/hadoop/hbase/client/MasterAdminKeepAliveConnection.java deleted file mode 100644 index 7126073..0000000 --- hbase-server/src/main/java/org/apache/hadoop/hbase/client/MasterAdminKeepAliveConnection.java +++ /dev/null @@ -1,44 +0,0 @@ -/** - * Copyright The Apache Software Foundation - * - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hbase.client; - - -import org.apache.hadoop.hbase.MasterAdminProtocol; - -import java.io.Closeable; - -/** - * A KeepAlive connection is not physically closed immediately after the close, - * but rather kept alive for a few minutes. It makes sense only if it's shared. - * - * This interface is used by a dynamic proxy. It allows to have a #close - * function in a master client. - * - * This class is intended to be used internally by HBase classes that need to - * speak the MasterAdminProtocol; but not by * final user code. Hence it's - * package protected. - */ -interface MasterAdminKeepAliveConnection extends MasterAdminProtocol, Closeable { - - @Override - public void close(); -} - diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/client/MasterMonitorKeepAliveConnection.java hbase-server/src/main/java/org/apache/hadoop/hbase/client/MasterMonitorKeepAliveConnection.java deleted file mode 100644 index a4c7650..0000000 --- hbase-server/src/main/java/org/apache/hadoop/hbase/client/MasterMonitorKeepAliveConnection.java +++ /dev/null @@ -1,44 +0,0 @@ -/** - * Copyright The Apache Software Foundation - * - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hbase.client; - - -import org.apache.hadoop.hbase.MasterMonitorProtocol; - -import java.io.Closeable; - -/** - * A KeepAlive connection is not physically closed immediately after the close, - * but rather kept alive for a few minutes. It makes sense only if it's shared. - * - * This interface is used by a dynamic proxy. It allows to have a #close - * function in a master client. - * - * This class is intended to be used internally by HBase classes that need to - * speak the MasterMonitorProtocol; but not by final user code. Hence it's - * package protected. - */ -interface MasterMonitorKeepAliveConnection extends MasterMonitorProtocol, Closeable { - - @Override - public void close(); -} - diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/client/MetaScanner.java hbase-server/src/main/java/org/apache/hadoop/hbase/client/MetaScanner.java deleted file mode 100644 index eba32a7..0000000 --- hbase-server/src/main/java/org/apache/hadoop/hbase/client/MetaScanner.java +++ /dev/null @@ -1,489 +0,0 @@ -/** - * - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hbase.client; - -import java.io.Closeable; -import java.io.IOException; -import java.util.ArrayList; -import java.util.List; -import java.util.NavigableMap; -import java.util.TreeMap; -import java.util.TreeSet; - -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hbase.HConstants; -import org.apache.hadoop.hbase.HRegionInfo; -import org.apache.hadoop.hbase.ServerName; -import org.apache.hadoop.hbase.TableNotFoundException; -import org.apache.hadoop.hbase.client.HConnectionManager.HConnectable; -import org.apache.hadoop.hbase.util.Bytes; -import org.apache.hadoop.hbase.util.PairOfSameType; - -/** - * Scanner class that contains the .META. table scanning logic - * and uses a Retryable scanner. Provided visitors will be called - * for each row. - * - * Although public visibility, this is not a public-facing API and may evolve in - * minor releases. - * - *

      Note that during concurrent region splits, the scanner might not see - * META changes across rows (for parent and daughter entries) consistently. - * see HBASE-5986, and {@link BlockingMetaScannerVisitor} for details.

      - */ -@InterfaceAudience.Private -public class MetaScanner { - private static final Log LOG = LogFactory.getLog(MetaScanner.class); - /** - * Scans the meta table and calls a visitor on each RowResult and uses a empty - * start row value as table name. - * - * @param configuration conf - * @param visitor A custom visitor - * @throws IOException e - */ - public static void metaScan(Configuration configuration, - MetaScannerVisitor visitor) - throws IOException { - metaScan(configuration, visitor, null); - } - - /** - * Scans the meta table and calls a visitor on each RowResult. Uses a table - * name to locate meta regions. - * - * @param configuration config - * @param visitor visitor object - * @param userTableName User table name in meta table to start scan at. Pass - * null if not interested in a particular table. - * @throws IOException e - */ - public static void metaScan(Configuration configuration, - MetaScannerVisitor visitor, byte [] userTableName) - throws IOException { - metaScan(configuration, visitor, userTableName, null, Integer.MAX_VALUE); - } - - /** - * Scans the meta table and calls a visitor on each RowResult. Uses a table - * name and a row name to locate meta regions. And it only scans at most - * rowLimit of rows. - * - * @param configuration HBase configuration. - * @param visitor Visitor object. - * @param userTableName User table name in meta table to start scan at. Pass - * null if not interested in a particular table. - * @param row Name of the row at the user table. The scan will start from - * the region row where the row resides. - * @param rowLimit Max of processed rows. If it is less than 0, it - * will be set to default value Integer.MAX_VALUE. - * @throws IOException e - */ - public static void metaScan(Configuration configuration, - MetaScannerVisitor visitor, byte [] userTableName, byte[] row, - int rowLimit) - throws IOException { - metaScan(configuration, visitor, userTableName, row, rowLimit, - HConstants.META_TABLE_NAME); - } - - /** - * Scans the meta table and calls a visitor on each RowResult. Uses a table - * name and a row name to locate meta regions. And it only scans at most - * rowLimit of rows. - * - * @param configuration HBase configuration. - * @param visitor Visitor object. Closes the visitor before returning. - * @param tableName User table name in meta table to start scan at. Pass - * null if not interested in a particular table. - * @param row Name of the row at the user table. The scan will start from - * the region row where the row resides. - * @param rowLimit Max of processed rows. If it is less than 0, it - * will be set to default value Integer.MAX_VALUE. - * @param metaTableName Meta table to scan, root or meta. - * @throws IOException e - */ - public static void metaScan(Configuration configuration, - final MetaScannerVisitor visitor, final byte[] tableName, - final byte[] row, final int rowLimit, final byte[] metaTableName) - throws IOException { - try { - HConnectionManager.execute(new HConnectable(configuration) { - @Override - public Void connect(HConnection connection) throws IOException { - metaScan(conf, connection, visitor, tableName, row, rowLimit, - metaTableName); - return null; - } - }); - } finally { - visitor.close(); - } - } - - private static void metaScan(Configuration configuration, HConnection connection, - MetaScannerVisitor visitor, byte [] tableName, byte[] row, - int rowLimit, final byte [] metaTableName) - throws IOException { - int rowUpperLimit = rowLimit > 0 ? rowLimit: Integer.MAX_VALUE; - - // if row is not null, we want to use the startKey of the row's region as - // the startRow for the meta scan. - byte[] startRow; - if (row != null) { - // Scan starting at a particular row in a particular table - assert tableName != null; - byte[] searchRow = - HRegionInfo.createRegionName(tableName, row, HConstants.NINES, - false); - HTable metaTable = null; - try { - metaTable = new HTable(configuration, HConstants.META_TABLE_NAME); - Result startRowResult = metaTable.getRowOrBefore(searchRow, - HConstants.CATALOG_FAMILY); - if (startRowResult == null) { - throw new TableNotFoundException("Cannot find row in .META. for table: " - + Bytes.toString(tableName) + ", row=" + Bytes.toStringBinary(searchRow)); - } - HRegionInfo regionInfo = getHRegionInfo(startRowResult); - if (regionInfo == null) { - throw new IOException("HRegionInfo was null or empty in Meta for " + - Bytes.toString(tableName) + ", row=" + Bytes.toStringBinary(searchRow)); - } - - byte[] rowBefore = regionInfo.getStartKey(); - startRow = HRegionInfo.createRegionName(tableName, rowBefore, - HConstants.ZEROES, false); - } finally { - if (metaTable != null) { - metaTable.close(); - } - } - } else if (tableName == null || tableName.length == 0) { - // Full META scan - startRow = HConstants.EMPTY_START_ROW; - } else { - // Scan META for an entire table - startRow = HRegionInfo.createRegionName( - tableName, HConstants.EMPTY_START_ROW, HConstants.ZEROES, false); - } - - // Scan over each meta region - ScannerCallable callable; - int rows = Math.min(rowLimit, configuration.getInt( - HConstants.HBASE_META_SCANNER_CACHING, - HConstants.DEFAULT_HBASE_META_SCANNER_CACHING)); - do { - final Scan scan = new Scan(startRow).addFamily(HConstants.CATALOG_FAMILY); - if (LOG.isDebugEnabled()) { - LOG.debug("Scanning " + Bytes.toString(metaTableName) + - " starting at row=" + Bytes.toStringBinary(startRow) + " for max=" + - rowUpperLimit + " rows using " + connection.toString()); - } - callable = new ScannerCallable(connection, metaTableName, scan, null); - // Open scanner - callable.withRetries(); - - int processedRows = 0; - try { - callable.setCaching(rows); - done: do { - if (processedRows >= rowUpperLimit) { - break; - } - //we have all the rows here - Result [] rrs = callable.withRetries(); - if (rrs == null || rrs.length == 0 || rrs[0].size() == 0) { - break; //exit completely - } - for (Result rr : rrs) { - if (processedRows >= rowUpperLimit) { - break done; - } - if (!visitor.processRow(rr)) - break done; //exit completely - processedRows++; - } - //here, we didn't break anywhere. Check if we have more rows - } while(true); - // Advance the startRow to the end key of the current region - startRow = callable.getHRegionInfo().getEndKey(); - } finally { - // Close scanner - callable.setClose(); - callable.withRetries(); - } - } while (Bytes.compareTo(startRow, HConstants.LAST_ROW) != 0); - } - - /** - * Returns HRegionInfo object from the column - * HConstants.CATALOG_FAMILY:HConstants.REGIONINFO_QUALIFIER of the catalog - * table Result. - * @param data a Result object from the catalog table scan - * @return HRegionInfo or null - */ - public static HRegionInfo getHRegionInfo(Result data) { - byte [] bytes = - data.getValue(HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER); - if (bytes == null) return null; - HRegionInfo info = HRegionInfo.parseFromOrNull(bytes); - if (LOG.isDebugEnabled()) { - LOG.debug("Current INFO from scan results = " + info); - } - return info; - } - - /** - * Lists all of the regions currently in META. - * @param conf - * @return List of all user-space regions. - * @throws IOException - */ - public static List listAllRegions(Configuration conf) - throws IOException { - return listAllRegions(conf, true); - } - - /** - * Lists all of the regions currently in META. - * @param conf - * @param offlined True if we are to include offlined regions, false and we'll - * leave out offlined regions from returned list. - * @return List of all user-space regions. - * @throws IOException - */ - public static List listAllRegions(Configuration conf, final boolean offlined) - throws IOException { - final List regions = new ArrayList(); - MetaScannerVisitor visitor = new BlockingMetaScannerVisitor(conf) { - @Override - public boolean processRowInternal(Result result) throws IOException { - if (result == null || result.isEmpty()) { - return true; - } - - HRegionInfo regionInfo = getHRegionInfo(result); - if (regionInfo == null) { - LOG.warn("Null REGIONINFO_QUALIFIER: " + result); - return true; - } - - // If region offline AND we are not to include offlined regions, return. - if (regionInfo.isOffline() && !offlined) return true; - regions.add(regionInfo); - return true; - } - }; - metaScan(conf, visitor); - return regions; - } - - /** - * Lists all of the table regions currently in META. - * @param conf - * @param offlined True if we are to include offlined regions, false and we'll - * leave out offlined regions from returned list. - * @return Map of all user-space regions to servers - * @throws IOException - */ - public static NavigableMap allTableRegions(Configuration conf, - final byte [] tablename, final boolean offlined) throws IOException { - final NavigableMap regions = - new TreeMap(); - MetaScannerVisitor visitor = new TableMetaScannerVisitor(conf, tablename) { - @Override - public boolean processRowInternal(Result rowResult) throws IOException { - HRegionInfo info = getHRegionInfo(rowResult); - ServerName serverName = HRegionInfo.getServerName(rowResult); - - if (!(info.isOffline() || info.isSplit())) { - regions.put(new UnmodifyableHRegionInfo(info), serverName); - } - return true; - } - }; - metaScan(conf, visitor, tablename); - return regions; - } - - /** - * Visitor class called to process each row of the .META. table - */ - public interface MetaScannerVisitor extends Closeable { - /** - * Visitor method that accepts a RowResult and the meta region location. - * Implementations can return false to stop the region's loop if it becomes - * unnecessary for some reason. - * - * @param rowResult result - * @return A boolean to know if it should continue to loop in the region - * @throws IOException e - */ - public boolean processRow(Result rowResult) throws IOException; - } - - public static abstract class MetaScannerVisitorBase implements MetaScannerVisitor { - @Override - public void close() throws IOException { - } - } - - /** - * A MetaScannerVisitor that provides a consistent view of the table's - * META entries during concurrent splits (see HBASE-5986 for details). This class - * does not guarantee ordered traversal of meta entries, and can block until the - * META entries for daughters are available during splits. - */ - public static abstract class BlockingMetaScannerVisitor - extends MetaScannerVisitorBase { - - private static final int DEFAULT_BLOCKING_TIMEOUT = 10000; - private Configuration conf; - private TreeSet daughterRegions = new TreeSet(Bytes.BYTES_COMPARATOR); - private int blockingTimeout; - private HTable metaTable; - - public BlockingMetaScannerVisitor(Configuration conf) { - this.conf = conf; - this.blockingTimeout = conf.getInt(HConstants.HBASE_CLIENT_OPERATION_TIMEOUT, - DEFAULT_BLOCKING_TIMEOUT); - } - - public abstract boolean processRowInternal(Result rowResult) throws IOException; - - @Override - public void close() throws IOException { - super.close(); - if (metaTable != null) { - metaTable.close(); - metaTable = null; - } - } - - public HTable getMetaTable() throws IOException { - if (metaTable == null) { - metaTable = new HTable(conf, HConstants.META_TABLE_NAME); - } - return metaTable; - } - - @Override - public boolean processRow(Result rowResult) throws IOException { - HRegionInfo info = getHRegionInfo(rowResult); - if (info == null) { - return true; - } - - if (daughterRegions.remove(info.getRegionName())) { - return true; //we have already processed this row - } - - if (info.isSplitParent()) { - /* we have found a parent region which was split. We have to ensure that it's daughters are - * seen by this scanner as well, so we block until they are added to the META table. Even - * though we are waiting for META entries, ACID semantics in HBase indicates that this - * scanner might not see the new rows. So we manually query the daughter rows */ - PairOfSameType daughters = HRegionInfo.getDaughterRegions(rowResult); - HRegionInfo splitA = daughters.getFirst(); - HRegionInfo splitB = daughters.getSecond(); - - HTable metaTable = getMetaTable(); - long start = System.currentTimeMillis(); - Result resultA = getRegionResultBlocking(metaTable, blockingTimeout, - splitA.getRegionName()); - if (resultA != null) { - processRow(resultA); - daughterRegions.add(splitA.getRegionName()); - } else { - throw new RegionOfflineException("Split daughter region " + - splitA.getRegionNameAsString() + " cannot be found in META."); - } - long rem = blockingTimeout - (System.currentTimeMillis() - start); - - Result resultB = getRegionResultBlocking(metaTable, rem, - splitB.getRegionName()); - if (resultB != null) { - processRow(resultB); - daughterRegions.add(splitB.getRegionName()); - } else { - throw new RegionOfflineException("Split daughter region " + - splitB.getRegionNameAsString() + " cannot be found in META."); - } - } - - return processRowInternal(rowResult); - } - - private Result getRegionResultBlocking(HTable metaTable, long timeout, byte[] regionName) - throws IOException { - if (LOG.isDebugEnabled()) { - LOG.debug("blocking until region is in META: " + Bytes.toStringBinary(regionName)); - } - long start = System.currentTimeMillis(); - while (System.currentTimeMillis() - start < timeout) { - Get get = new Get(regionName); - Result result = metaTable.get(get); - HRegionInfo info = getHRegionInfo(result); - if (info != null) { - return result; - } - try { - Thread.sleep(10); - } catch (InterruptedException ex) { - Thread.currentThread().interrupt(); - break; - } - } - return null; - } - } - - /** - * A MetaScannerVisitor for a table. Provides a consistent view of the table's - * META entries during concurrent splits (see HBASE-5986 for details). This class - * does not guarantee ordered traversal of meta entries, and can block until the - * META entries for daughters are available during splits. - */ - public static abstract class TableMetaScannerVisitor extends BlockingMetaScannerVisitor { - private byte[] tableName; - - public TableMetaScannerVisitor(Configuration conf, byte[] tableName) { - super(conf); - this.tableName = tableName; - } - - @Override - public final boolean processRow(Result rowResult) throws IOException { - HRegionInfo info = getHRegionInfo(rowResult); - if (info == null) { - return true; - } - if (!(Bytes.equals(info.getTableName(), tableName))) { - return false; - } - return super.processRow(rowResult); - } - - } -} diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/client/MultiAction.java hbase-server/src/main/java/org/apache/hadoop/hbase/client/MultiAction.java deleted file mode 100644 index 5605013..0000000 --- hbase-server/src/main/java/org/apache/hadoop/hbase/client/MultiAction.java +++ /dev/null @@ -1,90 +0,0 @@ -/* - * - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hbase.client; - -import java.util.ArrayList; -import java.util.List; -import java.util.Map; -import java.util.Set; -import java.util.TreeMap; - -import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.classification.InterfaceStability; -import org.apache.hadoop.hbase.util.Bytes; - -/** - * Container for Actions (i.e. Get, Delete, or Put), which are grouped by - * regionName. Intended to be used with HConnectionManager.processBatch() - */ -@InterfaceAudience.Public -@InterfaceStability.Evolving -public final class MultiAction { - - // map of regions to lists of puts/gets/deletes for that region. - public Map>> actions = new TreeMap>>(Bytes.BYTES_COMPARATOR); - - public MultiAction() { - super(); - } - - /** - * Get the total number of Actions - * - * @return total number of Actions for all groups in this container. - */ - public int size() { - int size = 0; - for (List l : actions.values()) { - size += l.size(); - } - return size; - } - - /** - * Add an Action to this container based on it's regionName. If the regionName - * is wrong, the initial execution will fail, but will be automatically - * retried after looking up the correct region. - * - * @param regionName - * @param a - */ - public void add(byte[] regionName, Action a) { - List> rsActions = actions.get(regionName); - if (rsActions == null) { - rsActions = new ArrayList>(); - actions.put(regionName, rsActions); - } - rsActions.add(a); - } - - public Set getRegions() { - return actions.keySet(); - } - - /** - * @return All actions from all regions in this container - */ - public List> allActions() { - List> res = new ArrayList>(); - for (List> lst : actions.values()) { - res.addAll(lst); - } - return res; - } -} diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/client/MultiResponse.java hbase-server/src/main/java/org/apache/hadoop/hbase/client/MultiResponse.java deleted file mode 100644 index f83f9ab..0000000 --- hbase-server/src/main/java/org/apache/hadoop/hbase/client/MultiResponse.java +++ /dev/null @@ -1,85 +0,0 @@ -/* - * - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hbase.client; - -import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.classification.InterfaceStability; -import org.apache.hadoop.hbase.util.Bytes; -import org.apache.hadoop.hbase.util.Pair; - -import java.util.Collection; -import java.util.List; -import java.util.Map; -import java.util.ArrayList; -import java.util.TreeMap; - -/** - * A container for Result objects, grouped by regionName. - */ -@InterfaceAudience.Public -@InterfaceStability.Evolving -public class MultiResponse { - - // map of regionName to list of (Results paired to the original index for that - // Result) - private Map>> results = - new TreeMap>>(Bytes.BYTES_COMPARATOR); - - public MultiResponse() { - super(); - } - - /** - * @return Number of pairs in this container - */ - public int size() { - int size = 0; - for (Collection c : results.values()) { - size += c.size(); - } - return size; - } - - /** - * Add the pair to the container, grouped by the regionName - * - * @param regionName - * @param r - * First item in the pair is the original index of the Action - * (request). Second item is the Result. Result will be empty for - * successful Put and Delete actions. - */ - public void add(byte[] regionName, Pair r) { - List> rs = results.get(regionName); - if (rs == null) { - rs = new ArrayList>(); - results.put(regionName, rs); - } - rs.add(r); - } - - public void add(byte []regionName, int originalIndex, Object resOrEx) { - add(regionName, new Pair(originalIndex, resOrEx)); - } - - public Map>> getResults() { - return results; - } -} \ No newline at end of file diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/client/Mutation.java hbase-server/src/main/java/org/apache/hadoop/hbase/client/Mutation.java deleted file mode 100644 index 9f601e6..0000000 --- hbase-server/src/main/java/org/apache/hadoop/hbase/client/Mutation.java +++ /dev/null @@ -1,232 +0,0 @@ -/* - * - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hbase.client; - -import java.util.ArrayList; -import java.util.HashMap; -import java.util.List; -import java.util.Map; -import java.util.TreeMap; -import java.util.UUID; - -import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.classification.InterfaceStability; -import org.apache.hadoop.hbase.HConstants; -import org.apache.hadoop.hbase.KeyValue; -import org.apache.hadoop.hbase.util.Bytes; - -@InterfaceAudience.Public -@InterfaceStability.Evolving -public abstract class Mutation extends OperationWithAttributes implements Row { - // Attribute used in Mutations to indicate the originating cluster. - private static final String CLUSTER_ID_ATTR = "_c.id_"; - - protected byte [] row = null; - protected long ts = HConstants.LATEST_TIMESTAMP; - protected long lockId = -1L; - protected boolean writeToWAL = true; - protected Map> familyMap = - new TreeMap>(Bytes.BYTES_COMPARATOR); - - /** - * Compile the column family (i.e. schema) information - * into a Map. Useful for parsing and aggregation by debugging, - * logging, and administration tools. - * @return Map - */ - @Override - public Map getFingerprint() { - Map map = new HashMap(); - List families = new ArrayList(); - // ideally, we would also include table information, but that information - // is not stored in each Operation instance. - map.put("families", families); - for (Map.Entry> entry : this.familyMap.entrySet()) { - families.add(Bytes.toStringBinary(entry.getKey())); - } - return map; - } - - /** - * Compile the details beyond the scope of getFingerprint (row, columns, - * timestamps, etc.) into a Map along with the fingerprinted information. - * Useful for debugging, logging, and administration tools. - * @param maxCols a limit on the number of columns output prior to truncation - * @return Map - */ - @Override - public Map toMap(int maxCols) { - // we start with the fingerprint map and build on top of it. - Map map = getFingerprint(); - // replace the fingerprint's simple list of families with a - // map from column families to lists of qualifiers and kv details - Map>> columns = - new HashMap>>(); - map.put("families", columns); - map.put("row", Bytes.toStringBinary(this.row)); - int colCount = 0; - // iterate through all column families affected - for (Map.Entry> entry : this.familyMap.entrySet()) { - // map from this family to details for each kv affected within the family - List> qualifierDetails = - new ArrayList>(); - columns.put(Bytes.toStringBinary(entry.getKey()), qualifierDetails); - colCount += entry.getValue().size(); - if (maxCols <= 0) { - continue; - } - // add details for each kv - for (KeyValue kv : entry.getValue()) { - if (--maxCols <= 0 ) { - continue; - } - Map kvMap = kv.toStringMap(); - // row and family information are already available in the bigger map - kvMap.remove("row"); - kvMap.remove("family"); - qualifierDetails.add(kvMap); - } - } - map.put("totalColumns", colCount); - // add the id if set - if (getId() != null) { - map.put("id", getId()); - } - return map; - } - - /** - * @return true if edits should be applied to WAL, false if not - */ - public boolean getWriteToWAL() { - return this.writeToWAL; - } - - /** - * Set whether this Delete should be written to the WAL or not. - * Not writing the WAL means you may lose edits on server crash. - * @param write true if edits should be written to WAL, false if not - */ - public void setWriteToWAL(boolean write) { - this.writeToWAL = write; - } - - /** - * Method for retrieving the put's familyMap - * @return familyMap - */ - public Map> getFamilyMap() { - return this.familyMap; - } - - /** - * Method for setting the put's familyMap - */ - public void setFamilyMap(Map> map) { - this.familyMap = map; - } - - /** - * Method to check if the familyMap is empty - * @return true if empty, false otherwise - */ - public boolean isEmpty() { - return familyMap.isEmpty(); - } - - /** - * Method for retrieving the delete's row - * @return row - */ - @Override - public byte [] getRow() { - return this.row; - } - - public int compareTo(final Row d) { - return Bytes.compareTo(this.getRow(), d.getRow()); - } - - /** - * Method for retrieving the delete's RowLock - * @return RowLock - */ - public RowLock getRowLock() { - return new RowLock(this.row, this.lockId); - } - - /** - * Method for retrieving the delete's lock ID. - * - * @return The lock ID. - */ - public long getLockId() { - return this.lockId; - } - - /** - * Method for retrieving the timestamp - * @return timestamp - */ - public long getTimeStamp() { - return this.ts; - } - - /** - * Set the replication custer id. - * @param clusterId - */ - public void setClusterId(UUID clusterId) { - if (clusterId == null) return; - byte[] val = new byte[2*Bytes.SIZEOF_LONG]; - Bytes.putLong(val, 0, clusterId.getMostSignificantBits()); - Bytes.putLong(val, Bytes.SIZEOF_LONG, clusterId.getLeastSignificantBits()); - setAttribute(CLUSTER_ID_ATTR, val); - } - - /** - * @return The replication cluster id. - */ - public UUID getClusterId() { - byte[] attr = getAttribute(CLUSTER_ID_ATTR); - if (attr == null) { - return HConstants.DEFAULT_CLUSTER_ID; - } - return new UUID(Bytes.toLong(attr,0), Bytes.toLong(attr, Bytes.SIZEOF_LONG)); - } - - /** - * @return the total number of KeyValues - */ - public int size() { - int size = 0; - for(List kvList : this.familyMap.values()) { - size += kvList.size(); - } - return size; - } - - /** - * @return the number of different families - */ - public int numFamilies() { - return familyMap.size(); - } -} diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/client/NoServerForRegionException.java hbase-server/src/main/java/org/apache/hadoop/hbase/client/NoServerForRegionException.java deleted file mode 100644 index 6de1007..0000000 --- hbase-server/src/main/java/org/apache/hadoop/hbase/client/NoServerForRegionException.java +++ /dev/null @@ -1,45 +0,0 @@ -/** - * - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hbase.client; - -import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.classification.InterfaceStability; -import org.apache.hadoop.hbase.RegionException; - -/** - * Thrown when no region server can be found for a region - */ -@InterfaceAudience.Public -@InterfaceStability.Stable -public class NoServerForRegionException extends RegionException { - private static final long serialVersionUID = 1L << 11 - 1L; - - /** default constructor */ - public NoServerForRegionException() { - super(); - } - - /** - * Constructor - * @param s message - */ - public NoServerForRegionException(String s) { - super(s); - } -} diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/client/Operation.java hbase-server/src/main/java/org/apache/hadoop/hbase/client/Operation.java deleted file mode 100644 index 07e9c19..0000000 --- hbase-server/src/main/java/org/apache/hadoop/hbase/client/Operation.java +++ /dev/null @@ -1,113 +0,0 @@ -/* - * - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hbase.client; - -import java.io.IOException; -import java.util.Map; - -import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.classification.InterfaceStability; -import org.codehaus.jackson.map.ObjectMapper; - -/** - * Superclass for any type that maps to a potentially application-level query. - * (e.g. Put, Get, Delete, Scan, Next, etc.) - * Contains methods for exposure to logging and debugging tools. - */ -@InterfaceAudience.Public -@InterfaceStability.Evolving -public abstract class Operation { - // TODO make this configurable - private static final int DEFAULT_MAX_COLS = 5; - - /** - * Produces a Map containing a fingerprint which identifies the type and - * the static schema components of a query (i.e. column families) - * @return a map containing fingerprint information (i.e. column families) - */ - public abstract Map getFingerprint(); - - /** - * Produces a Map containing a summary of the details of a query - * beyond the scope of the fingerprint (i.e. columns, rows...) - * @param maxCols a limit on the number of columns output prior to truncation - * @return a map containing parameters of a query (i.e. rows, columns...) - */ - public abstract Map toMap(int maxCols); - - /** - * Produces a Map containing a full summary of a query. - * @return a map containing parameters of a query (i.e. rows, columns...) - */ - public Map toMap() { - return toMap(DEFAULT_MAX_COLS); - } - - /** - * Produces a JSON object for fingerprint and details exposure in a - * parseable format. - * @param maxCols a limit on the number of columns to include in the JSON - * @return a JSONObject containing this Operation's information, as a string - */ - public String toJSON(int maxCols) throws IOException { - ObjectMapper mapper = new ObjectMapper(); - return mapper.writeValueAsString(toMap(maxCols)); - } - - /** - * Produces a JSON object sufficient for description of a query - * in a debugging or logging context. - * @return the produced JSON object, as a string - */ - public String toJSON() throws IOException { - return toJSON(DEFAULT_MAX_COLS); - } - - /** - * Produces a string representation of this Operation. It defaults to a JSON - * representation, but falls back to a string representation of the - * fingerprint and details in the case of a JSON encoding failure. - * @param maxCols a limit on the number of columns output in the summary - * prior to truncation - * @return a JSON-parseable String - */ - public String toString(int maxCols) { - /* for now this is merely a wrapper from producing a JSON string, but - * toJSON is kept separate in case this is changed to be a less parsable - * pretty printed representation. - */ - try { - return toJSON(maxCols); - } catch (IOException ioe) { - return toMap(maxCols).toString(); - } - } - - /** - * Produces a string representation of this Operation. It defaults to a JSON - * representation, but falls back to a string representation of the - * fingerprint and details in the case of a JSON encoding failure. - * @return String - */ - @Override - public String toString() { - return toString(DEFAULT_MAX_COLS); - } -} - diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/client/OperationWithAttributes.java hbase-server/src/main/java/org/apache/hadoop/hbase/client/OperationWithAttributes.java deleted file mode 100644 index 52d50aa..0000000 --- hbase-server/src/main/java/org/apache/hadoop/hbase/client/OperationWithAttributes.java +++ /dev/null @@ -1,108 +0,0 @@ -/* - * - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hbase.client; - -import java.util.Collections; -import java.util.HashMap; -import java.util.Map; - -import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.classification.InterfaceStability; -import org.apache.hadoop.hbase.util.Bytes; -import org.apache.hadoop.hbase.util.ClassSize; - -@InterfaceAudience.Public -@InterfaceStability.Evolving -public abstract class OperationWithAttributes extends Operation implements Attributes { - // a opaque blob of attributes - private Map attributes; - - // used for uniquely identifying an operation - static public String ID_ATRIBUTE = "_operation.attributes.id"; - - public void setAttribute(String name, byte[] value) { - if (attributes == null && value == null) { - return; - } - - if (attributes == null) { - attributes = new HashMap(); - } - - if (value == null) { - attributes.remove(name); - if (attributes.isEmpty()) { - this.attributes = null; - } - } else { - attributes.put(name, value); - } - } - - public byte[] getAttribute(String name) { - if (attributes == null) { - return null; - } - - return attributes.get(name); - } - - public Map getAttributesMap() { - if (attributes == null) { - return Collections.emptyMap(); - } - return Collections.unmodifiableMap(attributes); - } - - protected long getAttributeSize() { - long size = 0; - if (attributes != null) { - size += ClassSize.align(this.attributes.size() * ClassSize.MAP_ENTRY); - for(Map.Entry entry : this.attributes.entrySet()) { - size += ClassSize.align(ClassSize.STRING + entry.getKey().length()); - size += ClassSize.align(ClassSize.ARRAY + entry.getValue().length); - } - } - return size; - } - - /** - * This method allows you to set an identifier on an operation. The original - * motivation for this was to allow the identifier to be used in slow query - * logging, but this could obviously be useful in other places. One use of - * this could be to put a class.method identifier in here to see where the - * slow query is coming from. - * @param id - * id to set for the scan - */ - public void setId(String id) { - setAttribute(ID_ATRIBUTE, Bytes.toBytes(id)); - } - - /** - * This method allows you to retrieve the identifier for the operation if one - * was set. - * @return the id or null if not set - */ - public String getId() { - byte[] attr = getAttribute(ID_ATRIBUTE); - return attr == null? null: Bytes.toString(attr); - } -} diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/client/Put.java hbase-server/src/main/java/org/apache/hadoop/hbase/client/Put.java deleted file mode 100644 index 31b5573..0000000 --- hbase-server/src/main/java/org/apache/hadoop/hbase/client/Put.java +++ /dev/null @@ -1,357 +0,0 @@ -/* - * - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hbase.client; - -import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.classification.InterfaceStability; -import org.apache.hadoop.hbase.HConstants; -import org.apache.hadoop.hbase.KeyValue; -import org.apache.hadoop.hbase.io.HeapSize; -import org.apache.hadoop.hbase.util.Bytes; -import org.apache.hadoop.hbase.util.ClassSize; - -import java.io.IOException; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.List; -import java.util.Map; -import java.util.TreeMap; - -/** - * Used to perform Put operations for a single row. - *

      - * To perform a Put, instantiate a Put object with the row to insert to and - * for each column to be inserted, execute {@link #add(byte[], byte[], byte[]) add} or - * {@link #add(byte[], byte[], long, byte[]) add} if setting the timestamp. - */ -@InterfaceAudience.Public -@InterfaceStability.Stable -public class Put extends Mutation implements HeapSize, Comparable { - private static final long OVERHEAD = ClassSize.align( - ClassSize.OBJECT + 2 * ClassSize.REFERENCE + - 2 * Bytes.SIZEOF_LONG + Bytes.SIZEOF_BOOLEAN + - ClassSize.REFERENCE + ClassSize.TREEMAP); - - /** - * Create a Put operation for the specified row. - * @param row row key - */ - public Put(byte [] row) { - this(row, null); - } - - /** - * Create a Put operation for the specified row, using an existing row lock. - * @param row row key - * @param rowLock previously acquired row lock, or null - */ - public Put(byte [] row, RowLock rowLock) { - this(row, HConstants.LATEST_TIMESTAMP, rowLock); - } - - /** - * Create a Put operation for the specified row, using a given timestamp. - * - * @param row row key - * @param ts timestamp - */ - public Put(byte[] row, long ts) { - this(row, ts, null); - } - - /** - * Create a Put operation for the specified row, using a given timestamp, and an existing row lock. - * @param row row key - * @param ts timestamp - * @param rowLock previously acquired row lock, or null - */ - public Put(byte [] row, long ts, RowLock rowLock) { - if(row == null || row.length > HConstants.MAX_ROW_LENGTH) { - throw new IllegalArgumentException("Row key is invalid"); - } - this.row = Arrays.copyOf(row, row.length); - this.ts = ts; - if(rowLock != null) { - this.lockId = rowLock.getLockId(); - } - } - - /** - * Copy constructor. Creates a Put operation cloned from the specified Put. - * @param putToCopy put to copy - */ - public Put(Put putToCopy) { - this(putToCopy.getRow(), putToCopy.ts, putToCopy.getRowLock()); - this.familyMap = - new TreeMap>(Bytes.BYTES_COMPARATOR); - for(Map.Entry> entry : - putToCopy.getFamilyMap().entrySet()) { - this.familyMap.put(entry.getKey(), entry.getValue()); - } - this.writeToWAL = putToCopy.writeToWAL; - } - - /** - * Add the specified column and value to this Put operation. - * @param family family name - * @param qualifier column qualifier - * @param value column value - * @return this - */ - public Put add(byte [] family, byte [] qualifier, byte [] value) { - return add(family, qualifier, this.ts, value); - } - - /** - * Add the specified column and value, with the specified timestamp as - * its version to this Put operation. - * @param family family name - * @param qualifier column qualifier - * @param ts version timestamp - * @param value column value - * @return this - */ - public Put add(byte [] family, byte [] qualifier, long ts, byte [] value) { - List list = getKeyValueList(family); - KeyValue kv = createPutKeyValue(family, qualifier, ts, value); - list.add(kv); - familyMap.put(kv.getFamily(), list); - return this; - } - - /** - * Add the specified KeyValue to this Put operation. Operation assumes that - * the passed KeyValue is immutable and its backing array will not be modified - * for the duration of this Put. - * @param kv individual KeyValue - * @return this - * @throws java.io.IOException e - */ - public Put add(KeyValue kv) throws IOException{ - byte [] family = kv.getFamily(); - List list = getKeyValueList(family); - //Checking that the row of the kv is the same as the put - int res = Bytes.compareTo(this.row, 0, row.length, - kv.getBuffer(), kv.getRowOffset(), kv.getRowLength()); - if(res != 0) { - throw new IOException("The row in the recently added KeyValue " + - Bytes.toStringBinary(kv.getBuffer(), kv.getRowOffset(), - kv.getRowLength()) + " doesn't match the original one " + - Bytes.toStringBinary(this.row)); - } - list.add(kv); - familyMap.put(family, list); - return this; - } - - /* - * Create a KeyValue with this objects row key and the Put identifier. - * - * @return a KeyValue with this objects row key and the Put identifier. - */ - private KeyValue createPutKeyValue(byte[] family, byte[] qualifier, long ts, - byte[] value) { - return new KeyValue(this.row, family, qualifier, ts, KeyValue.Type.Put, - value); - } - - /** - * A convenience method to determine if this object's familyMap contains - * a value assigned to the given family & qualifier. - * Both given arguments must match the KeyValue object to return true. - * - * @param family column family - * @param qualifier column qualifier - * @return returns true if the given family and qualifier already has an - * existing KeyValue object in the family map. - */ - public boolean has(byte [] family, byte [] qualifier) { - return has(family, qualifier, this.ts, new byte[0], true, true); - } - - /** - * A convenience method to determine if this object's familyMap contains - * a value assigned to the given family, qualifier and timestamp. - * All 3 given arguments must match the KeyValue object to return true. - * - * @param family column family - * @param qualifier column qualifier - * @param ts timestamp - * @return returns true if the given family, qualifier and timestamp already has an - * existing KeyValue object in the family map. - */ - public boolean has(byte [] family, byte [] qualifier, long ts) { - return has(family, qualifier, ts, new byte[0], false, true); - } - - /** - * A convenience method to determine if this object's familyMap contains - * a value assigned to the given family, qualifier and timestamp. - * All 3 given arguments must match the KeyValue object to return true. - * - * @param family column family - * @param qualifier column qualifier - * @param value value to check - * @return returns true if the given family, qualifier and value already has an - * existing KeyValue object in the family map. - */ - public boolean has(byte [] family, byte [] qualifier, byte [] value) { - return has(family, qualifier, this.ts, value, true, false); - } - - /** - * A convenience method to determine if this object's familyMap contains - * the given value assigned to the given family, qualifier and timestamp. - * All 4 given arguments must match the KeyValue object to return true. - * - * @param family column family - * @param qualifier column qualifier - * @param ts timestamp - * @param value value to check - * @return returns true if the given family, qualifier timestamp and value - * already has an existing KeyValue object in the family map. - */ - public boolean has(byte [] family, byte [] qualifier, long ts, byte [] value) { - return has(family, qualifier, ts, value, false, false); - } - - /* - * Private method to determine if this object's familyMap contains - * the given value assigned to the given family, qualifier and timestamp - * respecting the 2 boolean arguments - * - * @param family - * @param qualifier - * @param ts - * @param value - * @param ignoreTS - * @param ignoreValue - * @return returns true if the given family, qualifier timestamp and value - * already has an existing KeyValue object in the family map. - */ - private boolean has(byte[] family, byte[] qualifier, long ts, byte[] value, - boolean ignoreTS, boolean ignoreValue) { - List list = getKeyValueList(family); - if (list.size() == 0) { - return false; - } - // Boolean analysis of ignoreTS/ignoreValue. - // T T => 2 - // T F => 3 (first is always true) - // F T => 2 - // F F => 1 - if (!ignoreTS && !ignoreValue) { - for (KeyValue kv : list) { - if (Arrays.equals(kv.getFamily(), family) && - Arrays.equals(kv.getQualifier(), qualifier) && - Arrays.equals(kv.getValue(), value) && - kv.getTimestamp() == ts) { - return true; - } - } - } else if (ignoreValue && !ignoreTS) { - for (KeyValue kv : list) { - if (Arrays.equals(kv.getFamily(), family) && Arrays.equals(kv.getQualifier(), qualifier) - && kv.getTimestamp() == ts) { - return true; - } - } - } else if (!ignoreValue && ignoreTS) { - for (KeyValue kv : list) { - if (Arrays.equals(kv.getFamily(), family) && Arrays.equals(kv.getQualifier(), qualifier) - && Arrays.equals(kv.getValue(), value)) { - return true; - } - } - } else { - for (KeyValue kv : list) { - if (Arrays.equals(kv.getFamily(), family) && - Arrays.equals(kv.getQualifier(), qualifier)) { - return true; - } - } - } - return false; - } - - /** - * Returns a list of all KeyValue objects with matching column family and qualifier. - * - * @param family column family - * @param qualifier column qualifier - * @return a list of KeyValue objects with the matching family and qualifier, - * returns an empty list if one doesnt exist for the given family. - */ - public List get(byte[] family, byte[] qualifier) { - List filteredList = new ArrayList(); - for (KeyValue kv: getKeyValueList(family)) { - if (Arrays.equals(kv.getQualifier(), qualifier)) { - filteredList.add(kv); - } - } - return filteredList; - } - - /** - * Creates an empty list if one doesnt exist for the given column family - * or else it returns the associated list of KeyValue objects. - * - * @param family column family - * @return a list of KeyValue objects, returns an empty list if one doesnt exist. - */ - private List getKeyValueList(byte[] family) { - List list = familyMap.get(family); - if(list == null) { - list = new ArrayList(0); - } - return list; - } - - //HeapSize - public long heapSize() { - long heapsize = OVERHEAD; - //Adding row - heapsize += ClassSize.align(ClassSize.ARRAY + this.row.length); - - //Adding map overhead - heapsize += - ClassSize.align(this.familyMap.size() * ClassSize.MAP_ENTRY); - for(Map.Entry> entry : this.familyMap.entrySet()) { - //Adding key overhead - heapsize += - ClassSize.align(ClassSize.ARRAY + entry.getKey().length); - - //This part is kinds tricky since the JVM can reuse references if you - //store the same value, but have a good match with SizeOf at the moment - //Adding value overhead - heapsize += ClassSize.align(ClassSize.ARRAYLIST); - int size = entry.getValue().size(); - heapsize += ClassSize.align(ClassSize.ARRAY + - size * ClassSize.REFERENCE); - - for(KeyValue kv : entry.getValue()) { - heapsize += kv.heapSize(); - } - } - heapsize += getAttributeSize(); - - return ClassSize.align((int)heapsize); - } -} diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/client/RegionOfflineException.java hbase-server/src/main/java/org/apache/hadoop/hbase/client/RegionOfflineException.java deleted file mode 100644 index 65a5088..0000000 --- hbase-server/src/main/java/org/apache/hadoop/hbase/client/RegionOfflineException.java +++ /dev/null @@ -1,39 +0,0 @@ -/** - * - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hbase.client; - -import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.classification.InterfaceStability; -import org.apache.hadoop.hbase.RegionException; - -/** Thrown when a table can not be located */ -@InterfaceAudience.Public -@InterfaceStability.Stable -public class RegionOfflineException extends RegionException { - private static final long serialVersionUID = 466008402L; - /** default constructor */ - public RegionOfflineException() { - super(); - } - - /** @param s message */ - public RegionOfflineException(String s) { - super(s); - } -} diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/client/Result.java hbase-server/src/main/java/org/apache/hadoop/hbase/client/Result.java deleted file mode 100644 index 9e15bbb..0000000 --- hbase-server/src/main/java/org/apache/hadoop/hbase/client/Result.java +++ /dev/null @@ -1,709 +0,0 @@ -/* - * - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hbase.client; - -import java.nio.BufferOverflowException; -import java.nio.ByteBuffer; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.Comparator; -import java.util.List; -import java.util.Map; -import java.util.NavigableMap; -import java.util.TreeMap; - -import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.classification.InterfaceStability; -import org.apache.hadoop.hbase.KeyValue; -import org.apache.hadoop.hbase.KeyValue.SplitKeyValue; -import org.apache.hadoop.hbase.util.Bytes; - -/** - * Single row result of a {@link Get} or {@link Scan} query.

      - * - * This class is NOT THREAD SAFE.

      - * - * Convenience methods are available that return various {@link Map} - * structures and values directly.

      - * - * To get a complete mapping of all cells in the Result, which can include - * multiple families and multiple versions, use {@link #getMap()}.

      - * - * To get a mapping of each family to its columns (qualifiers and values), - * including only the latest version of each, use {@link #getNoVersionMap()}. - * - * To get a mapping of qualifiers to latest values for an individual family use - * {@link #getFamilyMap(byte[])}.

      - * - * To get the latest value for a specific family and qualifier use {@link #getValue(byte[], byte[])}. - * - * A Result is backed by an array of {@link KeyValue} objects, each representing - * an HBase cell defined by the row, family, qualifier, timestamp, and value.

      - * - * The underlying {@link KeyValue} objects can be accessed through the method {@link #list()}. - * Each KeyValue can then be accessed through - * {@link KeyValue#getRow()}, {@link KeyValue#getFamily()}, {@link KeyValue#getQualifier()}, - * {@link KeyValue#getTimestamp()}, and {@link KeyValue#getValue()}.

      - * - * If you need to overwrite a Result with another Result instance -- as in the old 'mapred' RecordReader next - * invocations -- then create an empty Result with the null constructor and in then use {@link #copyFrom(Result)} - */ -@InterfaceAudience.Public -@InterfaceStability.Stable -public class Result { - private KeyValue [] kvs; - // We're not using java serialization. Transient here is just a marker to say - // that this is where we cache row if we're ever asked for it. - private transient byte [] row = null; - // Ditto for familyMap. It can be composed on fly from passed in kvs. - private transient NavigableMap>> familyMap = null; - - // never use directly - private static byte [] buffer = null; - private static final int PAD_WIDTH = 128; - - /** - * Creates an empty Result w/ no KeyValue payload; returns null if you call {@link #raw()}. - * Use this to represent no results if null won't do or in old 'mapred' as oppposed to 'mapreduce' package - * MapReduce where you need to overwrite a Result - * instance with a {@link #copyFrom(Result)} call. - */ - public Result() { - super(); - } - - /** - * Instantiate a Result with the specified array of KeyValues. - *
      Note: You must ensure that the keyvalues - * are already sorted - * @param kvs array of KeyValues - */ - public Result(KeyValue [] kvs) { - this.kvs = kvs; - } - - /** - * Instantiate a Result with the specified List of KeyValues. - *
      Note: You must ensure that the keyvalues - * are already sorted - * @param kvs List of KeyValues - */ - public Result(List kvs) { - this(kvs.toArray(new KeyValue[kvs.size()])); - } - - /** - * Method for retrieving the row key that corresponds to - * the row from which this Result was created. - * @return row - */ - public byte [] getRow() { - if (this.row == null) { - this.row = this.kvs == null || this.kvs.length == 0? null: this.kvs[0].getRow(); - } - return this.row; - } - - /** - * Return the array of KeyValues backing this Result instance. - * - * The array is sorted from smallest -> largest using the - * {@link KeyValue#COMPARATOR}. - * - * The array only contains what your Get or Scan specifies and no more. - * For example if you request column "A" 1 version you will have at most 1 - * KeyValue in the array. If you request column "A" with 2 version you will - * have at most 2 KeyValues, with the first one being the newer timestamp and - * the second being the older timestamp (this is the sort order defined by - * {@link KeyValue#COMPARATOR}). If columns don't exist, they won't be - * present in the result. Therefore if you ask for 1 version all columns, - * it is safe to iterate over this array and expect to see 1 KeyValue for - * each column and no more. - * - * This API is faster than using getFamilyMap() and getMap() - * - * @return array of KeyValues; can be null if nothing in the result - */ - public KeyValue[] raw() { - return kvs; - } - - /** - * Create a sorted list of the KeyValue's in this result. - * - * Since HBase 0.20.5 this is equivalent to raw(). - * - * @return The sorted list of KeyValue's. - */ - public List list() { - return isEmpty()? null: Arrays.asList(raw()); - } - - /** - * Return the KeyValues for the specific column. The KeyValues are sorted in - * the {@link KeyValue#COMPARATOR} order. That implies the first entry in - * the list is the most recent column. If the query (Scan or Get) only - * requested 1 version the list will contain at most 1 entry. If the column - * did not exist in the result set (either the column does not exist - * or the column was not selected in the query) the list will be empty. - * - * Also see getColumnLatest which returns just a KeyValue - * - * @param family the family - * @param qualifier - * @return a list of KeyValues for this column or empty list if the column - * did not exist in the result set - */ - public List getColumn(byte [] family, byte [] qualifier) { - List result = new ArrayList(); - - KeyValue [] kvs = raw(); - - if (kvs == null || kvs.length == 0) { - return result; - } - int pos = binarySearch(kvs, family, qualifier); - if (pos == -1) { - return result; // cant find it - } - - for (int i = pos ; i < kvs.length ; i++ ) { - KeyValue kv = kvs[i]; - if (kv.matchingColumn(family,qualifier)) { - result.add(kv); - } else { - break; - } - } - - return result; - } - - protected int binarySearch(final KeyValue [] kvs, - final byte [] family, - final byte [] qualifier) { - KeyValue searchTerm = - KeyValue.createFirstOnRow(kvs[0].getRow(), - family, qualifier); - - // pos === ( -(insertion point) - 1) - int pos = Arrays.binarySearch(kvs, searchTerm, KeyValue.COMPARATOR); - // never will exact match - if (pos < 0) { - pos = (pos+1) * -1; - // pos is now insertion point - } - if (pos == kvs.length) { - return -1; // doesn't exist - } - return pos; - } - - /** - * Searches for the latest value for the specified column. - * - * @param kvs the array to search - * @param family family name - * @param foffset family offset - * @param flength family length - * @param qualifier column qualifier - * @param qoffset qualifier offset - * @param qlength qualifier length - * - * @return the index where the value was found, or -1 otherwise - */ - protected int binarySearch(final KeyValue [] kvs, - final byte [] family, final int foffset, final int flength, - final byte [] qualifier, final int qoffset, final int qlength) { - - double keyValueSize = (double) - KeyValue.getKeyValueDataStructureSize(kvs[0].getRowLength(), flength, qlength, 0); - - if (buffer == null || keyValueSize > buffer.length) { - // pad to the smallest multiple of the pad width - buffer = new byte[(int) Math.ceil(keyValueSize / PAD_WIDTH) * PAD_WIDTH]; - } - - KeyValue searchTerm = KeyValue.createFirstOnRow(buffer, 0, - kvs[0].getBuffer(), kvs[0].getRowOffset(), kvs[0].getRowLength(), - family, foffset, flength, - qualifier, qoffset, qlength); - - // pos === ( -(insertion point) - 1) - int pos = Arrays.binarySearch(kvs, searchTerm, KeyValue.COMPARATOR); - // never will exact match - if (pos < 0) { - pos = (pos+1) * -1; - // pos is now insertion point - } - if (pos == kvs.length) { - return -1; // doesn't exist - } - return pos; - } - - /** - * The KeyValue for the most recent timestamp for a given column. - * - * @param family - * @param qualifier - * - * @return the KeyValue for the column, or null if no value exists in the row or none have been - * selected in the query (Get/Scan) - */ - public KeyValue getColumnLatest(byte [] family, byte [] qualifier) { - KeyValue [] kvs = raw(); // side effect possibly. - if (kvs == null || kvs.length == 0) { - return null; - } - int pos = binarySearch(kvs, family, qualifier); - if (pos == -1) { - return null; - } - KeyValue kv = kvs[pos]; - if (kv.matchingColumn(family, qualifier)) { - return kv; - } - return null; - } - - /** - * The KeyValue for the most recent timestamp for a given column. - * - * @param family family name - * @param foffset family offset - * @param flength family length - * @param qualifier column qualifier - * @param qoffset qualifier offset - * @param qlength qualifier length - * - * @return the KeyValue for the column, or null if no value exists in the row or none have been - * selected in the query (Get/Scan) - */ - public KeyValue getColumnLatest(byte [] family, int foffset, int flength, - byte [] qualifier, int qoffset, int qlength) { - - KeyValue [] kvs = raw(); // side effect possibly. - if (kvs == null || kvs.length == 0) { - return null; - } - int pos = binarySearch(kvs, family, foffset, flength, qualifier, qoffset, qlength); - if (pos == -1) { - return null; - } - KeyValue kv = kvs[pos]; - if (kv.matchingColumn(family, foffset, flength, qualifier, qoffset, qlength)) { - return kv; - } - return null; - } - - /** - * Get the latest version of the specified column. - * @param family family name - * @param qualifier column qualifier - * @return value of latest version of column, null if none found - */ - public byte[] getValue(byte [] family, byte [] qualifier) { - KeyValue kv = getColumnLatest(family, qualifier); - if (kv == null) { - return null; - } - return kv.getValue(); - } - - /** - * Returns the value wrapped in a new ByteBuffer. - * - * @param family family name - * @param qualifier column qualifier - * - * @return the latest version of the column, or null if none found - */ - public ByteBuffer getValueAsByteBuffer(byte [] family, byte [] qualifier) { - - KeyValue kv = getColumnLatest(family, 0, family.length, qualifier, 0, qualifier.length); - - if (kv == null) { - return null; - } - return kv.getValueAsByteBuffer(); - } - - /** - * Returns the value wrapped in a new ByteBuffer. - * - * @param family family name - * @param foffset family offset - * @param flength family length - * @param qualifier column qualifier - * @param qoffset qualifier offset - * @param qlength qualifier length - * - * @return the latest version of the column, or null if none found - */ - public ByteBuffer getValueAsByteBuffer(byte [] family, int foffset, int flength, - byte [] qualifier, int qoffset, int qlength) { - - KeyValue kv = getColumnLatest(family, foffset, flength, qualifier, qoffset, qlength); - - if (kv == null) { - return null; - } - return kv.getValueAsByteBuffer(); - } - - /** - * Loads the latest version of the specified column into the provided ByteBuffer. - *

      - * Does not clear or flip the buffer. - * - * @param family family name - * @param qualifier column qualifier - * @param dst the buffer where to write the value - * - * @return true if a value was found, false otherwise - * - * @throws BufferOverflowException there is insufficient space remaining in the buffer - */ - public boolean loadValue(byte [] family, byte [] qualifier, ByteBuffer dst) - throws BufferOverflowException { - return loadValue(family, 0, family.length, qualifier, 0, qualifier.length, dst); - } - - /** - * Loads the latest version of the specified column into the provided ByteBuffer. - *

      - * Does not clear or flip the buffer. - * - * @param family family name - * @param foffset family offset - * @param flength family length - * @param qualifier column qualifier - * @param qoffset qualifier offset - * @param qlength qualifier length - * @param dst the buffer where to write the value - * - * @return true if a value was found, false otherwise - * - * @throws BufferOverflowException there is insufficient space remaining in the buffer - */ - public boolean loadValue(byte [] family, int foffset, int flength, - byte [] qualifier, int qoffset, int qlength, ByteBuffer dst) - throws BufferOverflowException { - KeyValue kv = getColumnLatest(family, foffset, flength, qualifier, qoffset, qlength); - - if (kv == null) { - return false; - } - kv.loadValue(dst); - return true; - } - - /** - * Checks if the specified column contains a non-empty value (not a zero-length byte array). - * - * @param family family name - * @param qualifier column qualifier - * - * @return whether or not a latest value exists and is not empty - */ - public boolean containsNonEmptyColumn(byte [] family, byte [] qualifier) { - - return containsNonEmptyColumn(family, 0, family.length, qualifier, 0, qualifier.length); - } - - /** - * Checks if the specified column contains a non-empty value (not a zero-length byte array). - * - * @param family family name - * @param foffset family offset - * @param flength family length - * @param qualifier column qualifier - * @param qoffset qualifier offset - * @param qlength qualifier length - * - * @return whether or not a latest value exists and is not empty - */ - public boolean containsNonEmptyColumn(byte [] family, int foffset, int flength, - byte [] qualifier, int qoffset, int qlength) { - - KeyValue kv = getColumnLatest(family, foffset, flength, qualifier, qoffset, qlength); - - return (kv != null) && (kv.getValueLength() > 0); - } - - /** - * Checks if the specified column contains an empty value (a zero-length byte array). - * - * @param family family name - * @param qualifier column qualifier - * - * @return whether or not a latest value exists and is empty - */ - public boolean containsEmptyColumn(byte [] family, byte [] qualifier) { - - return containsEmptyColumn(family, 0, family.length, qualifier, 0, qualifier.length); - } - - /** - * Checks if the specified column contains an empty value (a zero-length byte array). - * - * @param family family name - * @param foffset family offset - * @param flength family length - * @param qualifier column qualifier - * @param qoffset qualifier offset - * @param qlength qualifier length - * - * @return whether or not a latest value exists and is empty - */ - public boolean containsEmptyColumn(byte [] family, int foffset, int flength, - byte [] qualifier, int qoffset, int qlength) { - KeyValue kv = getColumnLatest(family, foffset, flength, qualifier, qoffset, qlength); - - return (kv != null) && (kv.getValueLength() == 0); - } - - /** - * Checks for existence of a value for the specified column (empty or not). - * - * @param family family name - * @param qualifier column qualifier - * - * @return true if at least one value exists in the result, false if not - */ - public boolean containsColumn(byte [] family, byte [] qualifier) { - KeyValue kv = getColumnLatest(family, qualifier); - return kv != null; - } - - /** - * Checks for existence of a value for the specified column (empty or not). - * - * @param family family name - * @param foffset family offset - * @param flength family length - * @param qualifier column qualifier - * @param qoffset qualifier offset - * @param qlength qualifier length - * - * @return true if at least one value exists in the result, false if not - */ - public boolean containsColumn(byte [] family, int foffset, int flength, - byte [] qualifier, int qoffset, int qlength) { - - return getColumnLatest(family, foffset, flength, qualifier, qoffset, qlength) != null; - } - - /** - * Map of families to all versions of its qualifiers and values. - *

      - * Returns a three level Map of the form: - * Map&family,Map<qualifier,Map<timestamp,value>>> - *

      - * Note: All other map returning methods make use of this map internally. - * @return map from families to qualifiers to versions - */ - public NavigableMap>> getMap() { - if (this.familyMap != null) { - return this.familyMap; - } - if(isEmpty()) { - return null; - } - this.familyMap = new TreeMap>>(Bytes.BYTES_COMPARATOR); - for(KeyValue kv : this.kvs) { - SplitKeyValue splitKV = kv.split(); - byte [] family = splitKV.getFamily(); - NavigableMap> columnMap = - familyMap.get(family); - if(columnMap == null) { - columnMap = new TreeMap> - (Bytes.BYTES_COMPARATOR); - familyMap.put(family, columnMap); - } - byte [] qualifier = splitKV.getQualifier(); - NavigableMap versionMap = columnMap.get(qualifier); - if(versionMap == null) { - versionMap = new TreeMap(new Comparator() { - public int compare(Long l1, Long l2) { - return l2.compareTo(l1); - } - }); - columnMap.put(qualifier, versionMap); - } - Long timestamp = Bytes.toLong(splitKV.getTimestamp()); - byte [] value = splitKV.getValue(); - versionMap.put(timestamp, value); - } - return this.familyMap; - } - - /** - * Map of families to their most recent qualifiers and values. - *

      - * Returns a two level Map of the form: Map&family,Map<qualifier,value>> - *

      - * The most recent version of each qualifier will be used. - * @return map from families to qualifiers and value - */ - public NavigableMap> getNoVersionMap() { - if(this.familyMap == null) { - getMap(); - } - if(isEmpty()) { - return null; - } - NavigableMap> returnMap = - new TreeMap>(Bytes.BYTES_COMPARATOR); - for(Map.Entry>> - familyEntry : familyMap.entrySet()) { - NavigableMap qualifierMap = - new TreeMap(Bytes.BYTES_COMPARATOR); - for(Map.Entry> qualifierEntry : - familyEntry.getValue().entrySet()) { - byte [] value = - qualifierEntry.getValue().get(qualifierEntry.getValue().firstKey()); - qualifierMap.put(qualifierEntry.getKey(), value); - } - returnMap.put(familyEntry.getKey(), qualifierMap); - } - return returnMap; - } - - /** - * Map of qualifiers to values. - *

      - * Returns a Map of the form: Map<qualifier,value> - * @param family column family to get - * @return map of qualifiers to values - */ - public NavigableMap getFamilyMap(byte [] family) { - if(this.familyMap == null) { - getMap(); - } - if(isEmpty()) { - return null; - } - NavigableMap returnMap = - new TreeMap(Bytes.BYTES_COMPARATOR); - NavigableMap> qualifierMap = - familyMap.get(family); - if(qualifierMap == null) { - return returnMap; - } - for(Map.Entry> entry : - qualifierMap.entrySet()) { - byte [] value = - entry.getValue().get(entry.getValue().firstKey()); - returnMap.put(entry.getKey(), value); - } - return returnMap; - } - - /** - * Returns the value of the first column in the Result. - * @return value of the first column - */ - public byte [] value() { - if (isEmpty()) { - return null; - } - return kvs[0].getValue(); - } - - /** - * Check if the underlying KeyValue [] is empty or not - * @return true if empty - */ - public boolean isEmpty() { - return this.kvs == null || this.kvs.length == 0; - } - - /** - * @return the size of the underlying KeyValue [] - */ - public int size() { - return this.kvs == null? 0: this.kvs.length; - } - - /** - * @return String - */ - @Override - public String toString() { - StringBuilder sb = new StringBuilder(); - sb.append("keyvalues="); - if(isEmpty()) { - sb.append("NONE"); - return sb.toString(); - } - sb.append("{"); - boolean moreThanOne = false; - for(KeyValue kv : this.kvs) { - if(moreThanOne) { - sb.append(", "); - } else { - moreThanOne = true; - } - sb.append(kv.toString()); - } - sb.append("}"); - return sb.toString(); - } - - /** - * Does a deep comparison of two Results, down to the byte arrays. - * @param res1 first result to compare - * @param res2 second result to compare - * @throws Exception Every difference is throwing an exception - */ - public static void compareResults(Result res1, Result res2) - throws Exception { - if (res2 == null) { - throw new Exception("There wasn't enough rows, we stopped at " - + Bytes.toStringBinary(res1.getRow())); - } - if (res1.size() != res2.size()) { - throw new Exception("This row doesn't have the same number of KVs: " - + res1.toString() + " compared to " + res2.toString()); - } - KeyValue[] ourKVs = res1.raw(); - KeyValue[] replicatedKVs = res2.raw(); - for (int i = 0; i < res1.size(); i++) { - if (!ourKVs[i].equals(replicatedKVs[i]) || - !Bytes.equals(ourKVs[i].getValue(), replicatedKVs[i].getValue())) { - throw new Exception("This result was different: " - + res1.toString() + " compared to " + res2.toString()); - } - } - } - - /** - * Copy another Result into this one. Needed for the old Mapred framework - * @param other - */ - public void copyFrom(Result other) { - this.row = null; - this.familyMap = null; - this.kvs = other.kvs; - } -} \ No newline at end of file diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/client/ResultScanner.java hbase-server/src/main/java/org/apache/hadoop/hbase/client/ResultScanner.java deleted file mode 100644 index ef72543..0000000 --- hbase-server/src/main/java/org/apache/hadoop/hbase/client/ResultScanner.java +++ /dev/null @@ -1,54 +0,0 @@ -/** - * - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hbase.client; - -import java.io.Closeable; -import java.io.IOException; - -import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.classification.InterfaceStability; - -/** - * Interface for client-side scanning. - * Go to {@link HTable} to obtain instances. - */ -@InterfaceAudience.Public -@InterfaceStability.Stable -public interface ResultScanner extends Closeable, Iterable { - - /** - * Grab the next row's worth of values. The scanner will return a Result. - * @return Result object if there is another row, null if the scanner is - * exhausted. - * @throws IOException e - */ - public Result next() throws IOException; - - /** - * @param nbRows number of rows to return - * @return Between zero and nbRows Results - * @throws IOException e - */ - public Result [] next(int nbRows) throws IOException; - - /** - * Closes the scanner and releases any resources it has allocated - */ - public void close(); -} diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/client/RetriesExhaustedException.java hbase-server/src/main/java/org/apache/hadoop/hbase/client/RetriesExhaustedException.java deleted file mode 100644 index d0b98f5..0000000 --- hbase-server/src/main/java/org/apache/hadoop/hbase/client/RetriesExhaustedException.java +++ /dev/null @@ -1,109 +0,0 @@ -/** - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hbase.client; - -import java.io.IOException; -import java.util.Date; -import java.util.List; - -import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.classification.InterfaceStability; - -/** - * Exception thrown by HTable methods when an attempt to do something (like - * commit changes) fails after a bunch of retries. - */ -@InterfaceAudience.Public -@InterfaceStability.Stable -public class RetriesExhaustedException extends IOException { - private static final long serialVersionUID = 1876775844L; - - public RetriesExhaustedException(final String msg) { - super(msg); - } - - public RetriesExhaustedException(final String msg, final IOException e) { - super(msg, e); - } - - /** - * Datastructure that allows adding more info around Throwable incident. - */ - public static class ThrowableWithExtraContext { - private final Throwable t; - private final long when; - private final String extras; - - public ThrowableWithExtraContext(final Throwable t, final long when, - final String extras) { - this.t = t; - this.when = when; - this.extras = extras; - } - - @Override - public String toString() { - return new Date(this.when).toString() + ", " + extras + ", " + t.toString(); - } - } - - /** - * Create a new RetriesExhaustedException from the list of prior failures. - * @param callableVitals Details from the {@link ServerCallable} we were using - * when we got this exception. - * @param numTries The number of tries we made - * @param exceptions List of exceptions that failed before giving up - */ - public RetriesExhaustedException(final String callableVitals, int numTries, - List exceptions) { - super(getMessage(callableVitals, numTries, exceptions)); - } - - /** - * Create a new RetriesExhaustedException from the list of prior failures. - * @param numTries - * @param exceptions List of exceptions that failed before giving up - */ - public RetriesExhaustedException(final int numTries, - final List exceptions) { - super(getMessage(numTries, exceptions)); - } - - private static String getMessage(String callableVitals, int numTries, - List exceptions) { - StringBuilder buffer = new StringBuilder("Failed contacting "); - buffer.append(callableVitals); - buffer.append(" after "); - buffer.append(numTries + 1); - buffer.append(" attempts.\nExceptions:\n"); - for (Throwable t : exceptions) { - buffer.append(t.toString()); - buffer.append("\n"); - } - return buffer.toString(); - } - - private static String getMessage(final int numTries, - final List exceptions) { - StringBuilder buffer = new StringBuilder("Failed after attempts="); - buffer.append(numTries + 1); - buffer.append(", exceptions:\n"); - for (ThrowableWithExtraContext t : exceptions) { - buffer.append(t.toString()); - buffer.append("\n"); - } - return buffer.toString(); - } -} diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/client/RetriesExhaustedWithDetailsException.java hbase-server/src/main/java/org/apache/hadoop/hbase/client/RetriesExhaustedWithDetailsException.java deleted file mode 100644 index e7f5a4f..0000000 --- hbase-server/src/main/java/org/apache/hadoop/hbase/client/RetriesExhaustedWithDetailsException.java +++ /dev/null @@ -1,152 +0,0 @@ -/* - * - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hbase.client; - -import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.classification.InterfaceStability; -import org.apache.hadoop.hbase.DoNotRetryIOException; - -import java.util.Collection; -import java.util.HashMap; -import java.util.HashSet; -import java.util.List; -import java.util.Map; -import java.util.Set; - -/** - * This subclass of {@link org.apache.hadoop.hbase.client.RetriesExhaustedException} - * is thrown when we have more information about which rows were causing which - * exceptions on what servers. You can call {@link #mayHaveClusterIssues()} - * and if the result is false, you have input error problems, otherwise you - * may have cluster issues. You can iterate over the causes, rows and last - * known server addresses via {@link #getNumExceptions()} and - * {@link #getCause(int)}, {@link #getRow(int)} and {@link #getHostnamePort(int)}. - */ -@SuppressWarnings("serial") -@InterfaceAudience.Public -@InterfaceStability.Stable -public class RetriesExhaustedWithDetailsException -extends RetriesExhaustedException { - List exceptions; - List actions; - List hostnameAndPort; - - public RetriesExhaustedWithDetailsException(List exceptions, - List actions, - List hostnameAndPort) { - super("Failed " + exceptions.size() + " action" + - pluralize(exceptions) + ": " + - getDesc(exceptions, actions, hostnameAndPort)); - - this.exceptions = exceptions; - this.actions = actions; - this.hostnameAndPort = hostnameAndPort; - } - - public List getCauses() { - return exceptions; - } - - public int getNumExceptions() { - return exceptions.size(); - } - - public Throwable getCause(int i) { - return exceptions.get(i); - } - - public Row getRow(int i) { - return actions.get(i); - } - - public String getHostnamePort(final int i) { - return this.hostnameAndPort.get(i); - } - - public boolean mayHaveClusterIssues() { - boolean res = false; - - // If all of the exceptions are DNRIOE not exception - for (Throwable t : exceptions) { - if ( !(t instanceof DoNotRetryIOException)) { - res = true; - } - } - return res; - } - - - public static String pluralize(Collection c) { - return pluralize(c.size()); - } - - public static String pluralize(int c) { - return c > 1 ? "s" : ""; - } - - public static String getDesc(List exceptions, - List actions, - List hostnamePort) { - String s = getDesc(classifyExs(exceptions)); - StringBuilder addrs = new StringBuilder(s); - addrs.append("servers with issues: "); - Set uniqAddr = new HashSet(); - uniqAddr.addAll(hostnamePort); - - for(String addr : uniqAddr) { - addrs.append(addr).append(", "); - } - return s; - } - - public static Map classifyExs(List ths) { - Map cls = new HashMap(); - for (Throwable t : ths) { - if (t == null) continue; - String name = ""; - if (t instanceof DoNotRetryIOException) { - name = t.getMessage(); - } else { - name = t.getClass().getSimpleName(); - } - Integer i = cls.get(name); - if (i == null) { - i = 0; - } - i += 1; - cls.put(name, i); - } - return cls; - } - - public static String getDesc(Map classificaton) { - StringBuilder classificatons =new StringBuilder(11); - for (Map.Entry e : classificaton.entrySet()) { - classificatons.append(e.getKey()); - classificatons.append(": "); - classificatons.append(e.getValue()); - classificatons.append(" time"); - classificatons.append(pluralize(e.getValue())); - classificatons.append(", "); - } - return classificatons.toString(); - } - -} diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/client/Row.java hbase-server/src/main/java/org/apache/hadoop/hbase/client/Row.java deleted file mode 100644 index 63dca08..0000000 --- hbase-server/src/main/java/org/apache/hadoop/hbase/client/Row.java +++ /dev/null @@ -1,34 +0,0 @@ -/* - * - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hbase.client; - -import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.classification.InterfaceStability; - -/** - * Has a row. - */ -@InterfaceAudience.Public -@InterfaceStability.Stable -public interface Row extends Comparable { - /** - * @return The row. - */ - public byte [] getRow(); -} diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/client/RowLock.java hbase-server/src/main/java/org/apache/hadoop/hbase/client/RowLock.java deleted file mode 100644 index 6736877..0000000 --- hbase-server/src/main/java/org/apache/hadoop/hbase/client/RowLock.java +++ /dev/null @@ -1,66 +0,0 @@ -/** - * - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hbase.client; - -import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.classification.InterfaceStability; - -/** - * Holds row name and lock id. - */ -@InterfaceAudience.Public -@InterfaceStability.Stable -public class RowLock { - private byte [] row = null; - private long lockId = -1L; - - /** - * Creates a RowLock from a row and lock id - * @param row row to lock on - * @param lockId the lock id - */ - public RowLock(final byte [] row, final long lockId) { - this.row = row; - this.lockId = lockId; - } - - /** - * Creates a RowLock with only a lock id - * @param lockId lock id - */ - public RowLock(final long lockId) { - this.lockId = lockId; - } - - /** - * Get the row for this RowLock - * @return the row - */ - public byte [] getRow() { - return row; - } - - /** - * Get the lock id from this RowLock - * @return the lock id - */ - public long getLockId() { - return lockId; - } -} diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/client/RowMutations.java hbase-server/src/main/java/org/apache/hadoop/hbase/client/RowMutations.java deleted file mode 100644 index 8a6e5a7..0000000 --- hbase-server/src/main/java/org/apache/hadoop/hbase/client/RowMutations.java +++ /dev/null @@ -1,103 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hbase.client; - -import java.io.IOException; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.Collections; -import java.util.List; - -import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.classification.InterfaceStability; -import org.apache.hadoop.hbase.HConstants; -import org.apache.hadoop.hbase.util.Bytes; - -/** - * Performs multiple mutations atomically on a single row. - * Currently {@link Put} and {@link Delete} are supported. - * - * The mutations are performed in the order in which they - * were added. - */ -@InterfaceAudience.Public -@InterfaceStability.Evolving -public class RowMutations implements Row { - private List mutations = new ArrayList(); - private byte [] row; - private static final byte VERSION = (byte)0; - - /** Constructor for Writable. DO NOT USE */ - public RowMutations() {} - - /** - * Create an atomic mutation for the specified row. - * @param row row key - */ - public RowMutations(byte [] row) { - if(row == null || row.length > HConstants.MAX_ROW_LENGTH) { - throw new IllegalArgumentException("Row key is invalid"); - } - this.row = Arrays.copyOf(row, row.length); - } - - /** - * Add a {@link Put} operation to the list of mutations - * @param p The {@link Put} to add - * @throws IOException - */ - public void add(Put p) throws IOException { - internalAdd(p); - } - - /** - * Add a {@link Delete} operation to the list of mutations - * @param d The {@link Delete} to add - * @throws IOException - */ - public void add(Delete d) throws IOException { - internalAdd(d); - } - - private void internalAdd(Mutation m) throws IOException { - int res = Bytes.compareTo(this.row, m.getRow()); - if(res != 0) { - throw new IOException("The row in the recently added Put/Delete " + - Bytes.toStringBinary(m.getRow()) + " doesn't match the original one " + - Bytes.toStringBinary(this.row)); - } - mutations.add(m); - } - - @Override - public int compareTo(Row i) { - return Bytes.compareTo(this.getRow(), i.getRow()); - } - - @Override - public byte[] getRow() { - return row; - } - - /** - * @return An unmodifiable list of the current mutations. - */ - public List getMutations() { - return Collections.unmodifiableList(mutations); - } -} diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/client/Scan.java hbase-server/src/main/java/org/apache/hadoop/hbase/client/Scan.java deleted file mode 100644 index 23bbf18..0000000 --- hbase-server/src/main/java/org/apache/hadoop/hbase/client/Scan.java +++ /dev/null @@ -1,652 +0,0 @@ -/* - * - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hbase.client; - -import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.classification.InterfaceStability; -import org.apache.hadoop.hbase.HConstants; -import org.apache.hadoop.hbase.filter.Filter; -import org.apache.hadoop.hbase.filter.IncompatibleFilterException; -import org.apache.hadoop.hbase.io.TimeRange; -import org.apache.hadoop.hbase.util.Bytes; - -import java.io.IOException; -import java.util.ArrayList; -import java.util.HashMap; -import java.util.List; -import java.util.Map; -import java.util.NavigableSet; -import java.util.TreeMap; -import java.util.TreeSet; - -/** - * Used to perform Scan operations. - *

      - * All operations are identical to {@link Get} with the exception of - * instantiation. Rather than specifying a single row, an optional startRow - * and stopRow may be defined. If rows are not specified, the Scanner will - * iterate over all rows. - *

      - * To scan everything for each row, instantiate a Scan object. - *

      - * To modify scanner caching for just this scan, use {@link #setCaching(int) setCaching}. - * If caching is NOT set, we will use the caching value of the hosting {@link HTable}. See - * {@link HTable#setScannerCaching(int)}. In addition to row caching, it is possible to specify a - * maximum result size, using {@link #setMaxResultSize(long)}. When both are used, - * single server requests are limited by either number of rows or maximum result size, whichever - * limit comes first. - *

      - * To further define the scope of what to get when scanning, perform additional - * methods as outlined below. - *

      - * To get all columns from specific families, execute {@link #addFamily(byte[]) addFamily} - * for each family to retrieve. - *

      - * To get specific columns, execute {@link #addColumn(byte[], byte[]) addColumn} - * for each column to retrieve. - *

      - * To only retrieve columns within a specific range of version timestamps, - * execute {@link #setTimeRange(long, long) setTimeRange}. - *

      - * To only retrieve columns with a specific timestamp, execute - * {@link #setTimeStamp(long) setTimestamp}. - *

      - * To limit the number of versions of each column to be returned, execute - * {@link #setMaxVersions(int) setMaxVersions}. - *

      - * To limit the maximum number of values returned for each call to next(), - * execute {@link #setBatch(int) setBatch}. - *

      - * To add a filter, execute {@link #setFilter(org.apache.hadoop.hbase.filter.Filter) setFilter}. - *

      - * Expert: To explicitly disable server-side block caching for this scan, - * execute {@link #setCacheBlocks(boolean)}. - */ -@InterfaceAudience.Public -@InterfaceStability.Stable -public class Scan extends OperationWithAttributes { - private static final String RAW_ATTR = "_raw_"; - private static final String ISOLATION_LEVEL = "_isolationlevel_"; - - private byte [] startRow = HConstants.EMPTY_START_ROW; - private byte [] stopRow = HConstants.EMPTY_END_ROW; - private int maxVersions = 1; - private int batch = -1; - - private int storeLimit = -1; - private int storeOffset = 0; - - // If application wants to collect scan metrics, it needs to - // call scan.setAttribute(SCAN_ATTRIBUTES_ENABLE, Bytes.toBytes(Boolean.TRUE)) - static public String SCAN_ATTRIBUTES_METRICS_ENABLE = - "scan.attributes.metrics.enable"; - static public String SCAN_ATTRIBUTES_METRICS_DATA = - "scan.attributes.metrics.data"; - - /* - * -1 means no caching - */ - private int caching = -1; - private long maxResultSize = -1; - private boolean cacheBlocks = true; - private Filter filter = null; - private TimeRange tr = new TimeRange(); - private Map> familyMap = - new TreeMap>(Bytes.BYTES_COMPARATOR); - - /** - * Create a Scan operation across all rows. - */ - public Scan() {} - - public Scan(byte [] startRow, Filter filter) { - this(startRow); - this.filter = filter; - } - - /** - * Create a Scan operation starting at the specified row. - *

      - * If the specified row does not exist, the Scanner will start from the - * next closest row after the specified row. - * @param startRow row to start scanner at or after - */ - public Scan(byte [] startRow) { - this.startRow = startRow; - } - - /** - * Create a Scan operation for the range of rows specified. - * @param startRow row to start scanner at or after (inclusive) - * @param stopRow row to stop scanner before (exclusive) - */ - public Scan(byte [] startRow, byte [] stopRow) { - this.startRow = startRow; - this.stopRow = stopRow; - } - - /** - * Creates a new instance of this class while copying all values. - * - * @param scan The scan instance to copy from. - * @throws IOException When copying the values fails. - */ - public Scan(Scan scan) throws IOException { - startRow = scan.getStartRow(); - stopRow = scan.getStopRow(); - maxVersions = scan.getMaxVersions(); - batch = scan.getBatch(); - storeLimit = scan.getMaxResultsPerColumnFamily(); - storeOffset = scan.getRowOffsetPerColumnFamily(); - caching = scan.getCaching(); - maxResultSize = scan.getMaxResultSize(); - cacheBlocks = scan.getCacheBlocks(); - filter = scan.getFilter(); // clone? - TimeRange ctr = scan.getTimeRange(); - tr = new TimeRange(ctr.getMin(), ctr.getMax()); - Map> fams = scan.getFamilyMap(); - for (Map.Entry> entry : fams.entrySet()) { - byte [] fam = entry.getKey(); - NavigableSet cols = entry.getValue(); - if (cols != null && cols.size() > 0) { - for (byte[] col : cols) { - addColumn(fam, col); - } - } else { - addFamily(fam); - } - } - for (Map.Entry attr : scan.getAttributesMap().entrySet()) { - setAttribute(attr.getKey(), attr.getValue()); - } - } - - /** - * Builds a scan object with the same specs as get. - * @param get get to model scan after - */ - public Scan(Get get) { - this.startRow = get.getRow(); - this.stopRow = get.getRow(); - this.filter = get.getFilter(); - this.cacheBlocks = get.getCacheBlocks(); - this.maxVersions = get.getMaxVersions(); - this.storeLimit = get.getMaxResultsPerColumnFamily(); - this.storeOffset = get.getRowOffsetPerColumnFamily(); - this.tr = get.getTimeRange(); - this.familyMap = get.getFamilyMap(); - } - - public boolean isGetScan() { - return this.startRow != null && this.startRow.length > 0 && - Bytes.equals(this.startRow, this.stopRow); - } - - /** - * Get all columns from the specified family. - *

      - * Overrides previous calls to addColumn for this family. - * @param family family name - * @return this - */ - public Scan addFamily(byte [] family) { - familyMap.remove(family); - familyMap.put(family, null); - return this; - } - - /** - * Get the column from the specified family with the specified qualifier. - *

      - * Overrides previous calls to addFamily for this family. - * @param family family name - * @param qualifier column qualifier - * @return this - */ - public Scan addColumn(byte [] family, byte [] qualifier) { - NavigableSet set = familyMap.get(family); - if(set == null) { - set = new TreeSet(Bytes.BYTES_COMPARATOR); - } - if (qualifier == null) { - qualifier = HConstants.EMPTY_BYTE_ARRAY; - } - set.add(qualifier); - familyMap.put(family, set); - return this; - } - - /** - * Get versions of columns only within the specified timestamp range, - * [minStamp, maxStamp). Note, default maximum versions to return is 1. If - * your time range spans more than one version and you want all versions - * returned, up the number of versions beyond the defaut. - * @param minStamp minimum timestamp value, inclusive - * @param maxStamp maximum timestamp value, exclusive - * @throws IOException if invalid time range - * @see #setMaxVersions() - * @see #setMaxVersions(int) - * @return this - */ - public Scan setTimeRange(long minStamp, long maxStamp) - throws IOException { - tr = new TimeRange(minStamp, maxStamp); - return this; - } - - /** - * Get versions of columns with the specified timestamp. Note, default maximum - * versions to return is 1. If your time range spans more than one version - * and you want all versions returned, up the number of versions beyond the - * defaut. - * @param timestamp version timestamp - * @see #setMaxVersions() - * @see #setMaxVersions(int) - * @return this - */ - public Scan setTimeStamp(long timestamp) { - try { - tr = new TimeRange(timestamp, timestamp+1); - } catch(IOException e) { - // Will never happen - } - return this; - } - - /** - * Set the start row of the scan. - * @param startRow row to start scan on (inclusive) - * Note: In order to make startRow exclusive add a trailing 0 byte - * @return this - */ - public Scan setStartRow(byte [] startRow) { - this.startRow = startRow; - return this; - } - - /** - * Set the stop row. - * @param stopRow row to end at (exclusive) - * Note: In order to make stopRow inclusive add a trailing 0 byte - * @return this - */ - public Scan setStopRow(byte [] stopRow) { - this.stopRow = stopRow; - return this; - } - - /** - * Get all available versions. - * @return this - */ - public Scan setMaxVersions() { - this.maxVersions = Integer.MAX_VALUE; - return this; - } - - /** - * Get up to the specified number of versions of each column. - * @param maxVersions maximum versions for each column - * @return this - */ - public Scan setMaxVersions(int maxVersions) { - this.maxVersions = maxVersions; - return this; - } - - /** - * Set the maximum number of values to return for each call to next() - * @param batch the maximum number of values - */ - public void setBatch(int batch) { - if (this.hasFilter() && this.filter.hasFilterRow()) { - throw new IncompatibleFilterException( - "Cannot set batch on a scan using a filter" + - " that returns true for filter.hasFilterRow"); - } - this.batch = batch; - } - - /** - * Set the maximum number of values to return per row per Column Family - * @param limit the maximum number of values returned / row / CF - */ - public void setMaxResultsPerColumnFamily(int limit) { - this.storeLimit = limit; - } - - /** - * Set offset for the row per Column Family. - * @param offset is the number of kvs that will be skipped. - */ - public void setRowOffsetPerColumnFamily(int offset) { - this.storeOffset = offset; - } - - /** - * Set the number of rows for caching that will be passed to scanners. - * If not set, the default setting from {@link HTable#getScannerCaching()} will apply. - * Higher caching values will enable faster scanners but will use more memory. - * @param caching the number of rows for caching - */ - public void setCaching(int caching) { - this.caching = caching; - } - - /** - * @return the maximum result size in bytes. See {@link #setMaxResultSize(long)} - */ - public long getMaxResultSize() { - return maxResultSize; - } - - /** - * Set the maximum result size. The default is -1; this means that no specific - * maximum result size will be set for this scan, and the global configured - * value will be used instead. (Defaults to unlimited). - * - * @param maxResultSize The maximum result size in bytes. - */ - public void setMaxResultSize(long maxResultSize) { - this.maxResultSize = maxResultSize; - } - - /** - * Apply the specified server-side filter when performing the Scan. - * @param filter filter to run on the server - * @return this - */ - public Scan setFilter(Filter filter) { - this.filter = filter; - return this; - } - - /** - * Setting the familyMap - * @param familyMap map of family to qualifier - * @return this - */ - public Scan setFamilyMap(Map> familyMap) { - this.familyMap = familyMap; - return this; - } - - /** - * Getting the familyMap - * @return familyMap - */ - public Map> getFamilyMap() { - return this.familyMap; - } - - /** - * @return the number of families in familyMap - */ - public int numFamilies() { - if(hasFamilies()) { - return this.familyMap.size(); - } - return 0; - } - - /** - * @return true if familyMap is non empty, false otherwise - */ - public boolean hasFamilies() { - return !this.familyMap.isEmpty(); - } - - /** - * @return the keys of the familyMap - */ - public byte[][] getFamilies() { - if(hasFamilies()) { - return this.familyMap.keySet().toArray(new byte[0][0]); - } - return null; - } - - /** - * @return the startrow - */ - public byte [] getStartRow() { - return this.startRow; - } - - /** - * @return the stoprow - */ - public byte [] getStopRow() { - return this.stopRow; - } - - /** - * @return the max number of versions to fetch - */ - public int getMaxVersions() { - return this.maxVersions; - } - - /** - * @return maximum number of values to return for a single call to next() - */ - public int getBatch() { - return this.batch; - } - - /** - * @return maximum number of values to return per row per CF - */ - public int getMaxResultsPerColumnFamily() { - return this.storeLimit; - } - - /** - * Method for retrieving the scan's offset per row per column - * family (#kvs to be skipped) - * @return row offset - */ - public int getRowOffsetPerColumnFamily() { - return this.storeOffset; - } - - /** - * @return caching the number of rows fetched when calling next on a scanner - */ - public int getCaching() { - return this.caching; - } - - /** - * @return TimeRange - */ - public TimeRange getTimeRange() { - return this.tr; - } - - /** - * @return RowFilter - */ - public Filter getFilter() { - return filter; - } - - /** - * @return true is a filter has been specified, false if not - */ - public boolean hasFilter() { - return filter != null; - } - - /** - * Set whether blocks should be cached for this Scan. - *

      - * This is true by default. When true, default settings of the table and - * family are used (this will never override caching blocks if the block - * cache is disabled for that family or entirely). - * - * @param cacheBlocks if false, default settings are overridden and blocks - * will not be cached - */ - public void setCacheBlocks(boolean cacheBlocks) { - this.cacheBlocks = cacheBlocks; - } - - /** - * Get whether blocks should be cached for this Scan. - * @return true if default caching should be used, false if blocks should not - * be cached - */ - public boolean getCacheBlocks() { - return cacheBlocks; - } - - /** - * Compile the table and column family (i.e. schema) information - * into a String. Useful for parsing and aggregation by debugging, - * logging, and administration tools. - * @return Map - */ - @Override - public Map getFingerprint() { - Map map = new HashMap(); - List families = new ArrayList(); - if(this.familyMap.size() == 0) { - map.put("families", "ALL"); - return map; - } else { - map.put("families", families); - } - for (Map.Entry> entry : - this.familyMap.entrySet()) { - families.add(Bytes.toStringBinary(entry.getKey())); - } - return map; - } - - /** - * Compile the details beyond the scope of getFingerprint (row, columns, - * timestamps, etc.) into a Map along with the fingerprinted information. - * Useful for debugging, logging, and administration tools. - * @param maxCols a limit on the number of columns output prior to truncation - * @return Map - */ - @Override - public Map toMap(int maxCols) { - // start with the fingerpring map and build on top of it - Map map = getFingerprint(); - // map from families to column list replaces fingerprint's list of families - Map> familyColumns = - new HashMap>(); - map.put("families", familyColumns); - // add scalar information first - map.put("startRow", Bytes.toStringBinary(this.startRow)); - map.put("stopRow", Bytes.toStringBinary(this.stopRow)); - map.put("maxVersions", this.maxVersions); - map.put("batch", this.batch); - map.put("caching", this.caching); - map.put("maxResultSize", this.maxResultSize); - map.put("cacheBlocks", this.cacheBlocks); - List timeRange = new ArrayList(); - timeRange.add(this.tr.getMin()); - timeRange.add(this.tr.getMax()); - map.put("timeRange", timeRange); - int colCount = 0; - // iterate through affected families and list out up to maxCols columns - for (Map.Entry> entry : - this.familyMap.entrySet()) { - List columns = new ArrayList(); - familyColumns.put(Bytes.toStringBinary(entry.getKey()), columns); - if(entry.getValue() == null) { - colCount++; - --maxCols; - columns.add("ALL"); - } else { - colCount += entry.getValue().size(); - if (maxCols <= 0) { - continue; - } - for (byte [] column : entry.getValue()) { - if (--maxCols <= 0) { - continue; - } - columns.add(Bytes.toStringBinary(column)); - } - } - } - map.put("totalColumns", colCount); - if (this.filter != null) { - map.put("filter", this.filter.toString()); - } - // add the id if set - if (getId() != null) { - map.put("id", getId()); - } - return map; - } - - /** - * Enable/disable "raw" mode for this scan. - * If "raw" is enabled the scan will return all - * delete marker and deleted rows that have not - * been collected, yet. - * This is mostly useful for Scan on column families - * that have KEEP_DELETED_ROWS enabled. - * It is an error to specify any column when "raw" is set. - * @param raw True/False to enable/disable "raw" mode. - */ - public void setRaw(boolean raw) { - setAttribute(RAW_ATTR, Bytes.toBytes(raw)); - } - - /** - * @return True if this Scan is in "raw" mode. - */ - public boolean isRaw() { - byte[] attr = getAttribute(RAW_ATTR); - return attr == null ? false : Bytes.toBoolean(attr); - } - - /* - * Set the isolation level for this scan. If the - * isolation level is set to READ_UNCOMMITTED, then - * this scan will return data from committed and - * uncommitted transactions. If the isolation level - * is set to READ_COMMITTED, then this scan will return - * data from committed transactions only. If a isolation - * level is not explicitly set on a Scan, then it - * is assumed to be READ_COMMITTED. - * @param level IsolationLevel for this scan - */ - public void setIsolationLevel(IsolationLevel level) { - setAttribute(ISOLATION_LEVEL, level.toBytes()); - } - /* - * @return The isolation level of this scan. - * If no isolation level was set for this scan object, - * then it returns READ_COMMITTED. - * @return The IsolationLevel for this scan - */ - public IsolationLevel getIsolationLevel() { - byte[] attr = getAttribute(ISOLATION_LEVEL); - return attr == null ? IsolationLevel.READ_COMMITTED : - IsolationLevel.fromBytes(attr); - } -} diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/client/ScannerCallable.java hbase-server/src/main/java/org/apache/hadoop/hbase/client/ScannerCallable.java deleted file mode 100644 index 634d774..0000000 --- hbase-server/src/main/java/org/apache/hadoop/hbase/client/ScannerCallable.java +++ /dev/null @@ -1,322 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hbase.client; - -import java.io.IOException; -import java.net.UnknownHostException; - -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.classification.InterfaceStability; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hbase.DoNotRetryIOException; -import org.apache.hadoop.hbase.HRegionInfo; -import org.apache.hadoop.hbase.HRegionLocation; -import org.apache.hadoop.hbase.NotServingRegionException; -import org.apache.hadoop.hbase.RemoteExceptionHandler; -import org.apache.hadoop.hbase.UnknownScannerException; -import org.apache.hadoop.hbase.client.metrics.ScanMetrics; -import org.apache.hadoop.hbase.protobuf.ProtobufUtil; -import org.apache.hadoop.hbase.protobuf.RequestConverter; -import org.apache.hadoop.hbase.protobuf.ResponseConverter; -import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ScanRequest; -import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ScanResponse; -import org.apache.hadoop.hbase.regionserver.RegionServerStoppedException; -import org.apache.hadoop.ipc.RemoteException; -import org.apache.hadoop.net.DNS; - -import com.google.protobuf.ServiceException; - -/** - * Retries scanner operations such as create, next, etc. - * Used by {@link ResultScanner}s made by {@link HTable}. - */ -@InterfaceAudience.Public -@InterfaceStability.Stable -public class ScannerCallable extends ServerCallable { - public static final String LOG_SCANNER_LATENCY_CUTOFF - = "hbase.client.log.scanner.latency.cutoff"; - public static final String LOG_SCANNER_ACTIVITY = "hbase.client.log.scanner.activity"; - - private static final Log LOG = LogFactory.getLog(ScannerCallable.class); - private long scannerId = -1L; - private boolean instantiated = false; - private boolean closed = false; - private Scan scan; - private int caching = 1; - private ScanMetrics scanMetrics; - private boolean logScannerActivity = false; - private int logCutOffLatency = 1000; - - // indicate if it is a remote server call - private boolean isRegionServerRemote = true; - private long nextCallSeq = 0; - - /** - * @param connection which connection - * @param tableName table callable is on - * @param scan the scan to execute - * @param scanMetrics the ScanMetrics to used, if it is null, ScannerCallable - * won't collect metrics - */ - public ScannerCallable (HConnection connection, byte [] tableName, Scan scan, - ScanMetrics scanMetrics) { - super(connection, tableName, scan.getStartRow()); - this.scan = scan; - this.scanMetrics = scanMetrics; - Configuration conf = connection.getConfiguration(); - logScannerActivity = conf.getBoolean(LOG_SCANNER_ACTIVITY, false); - logCutOffLatency = conf.getInt(LOG_SCANNER_LATENCY_CUTOFF, 1000); - } - - /** - * @param reload force reload of server location - * @throws IOException - */ - @Override - public void connect(boolean reload) throws IOException { - if (!instantiated || reload) { - super.connect(reload); - checkIfRegionServerIsRemote(); - instantiated = true; - } - - // check how often we retry. - // HConnectionManager will call instantiateServer with reload==true - // if and only if for retries. - if (reload && this.scanMetrics != null) { - this.scanMetrics.countOfRPCRetries.incrementAndGet(); - if (isRegionServerRemote) { - this.scanMetrics.countOfRemoteRPCRetries.incrementAndGet(); - } - } - } - - /** - * compare the local machine hostname with region server's hostname - * to decide if hbase client connects to a remote region server - * @throws UnknownHostException. - */ - private void checkIfRegionServerIsRemote() throws UnknownHostException { - String myAddress = DNS.getDefaultHost("default", "default"); - if (this.location.getHostname().equalsIgnoreCase(myAddress)) { - isRegionServerRemote = false; - } else { - isRegionServerRemote = true; - } - } - - /** - * @see java.util.concurrent.Callable#call() - */ - public Result [] call() throws IOException { - if (closed) { - if (scannerId != -1) { - close(); - } - } else { - if (scannerId == -1L) { - this.scannerId = openScanner(); - } else { - Result [] rrs = null; - try { - incRPCcallsMetrics(); - ScanRequest request = - RequestConverter.buildScanRequest(scannerId, caching, false, nextCallSeq); - try { - ScanResponse response = server.scan(null, request); - // Client and RS maintain a nextCallSeq number during the scan. Every next() call - // from client to server will increment this number in both sides. Client passes this - // number along with the request and at RS side both the incoming nextCallSeq and its - // nextCallSeq will be matched. In case of a timeout this increment at the client side - // should not happen. If at the server side fetching of next batch of data was over, - // there will be mismatch in the nextCallSeq number. Server will throw - // OutOfOrderScannerNextException and then client will reopen the scanner with startrow - // as the last successfully retrieved row. - // See HBASE-5974 - nextCallSeq++; - long timestamp = System.currentTimeMillis(); - rrs = ResponseConverter.getResults(response); - if (logScannerActivity) { - long now = System.currentTimeMillis(); - if (now - timestamp > logCutOffLatency) { - int rows = rrs == null ? 0 : rrs.length; - LOG.info("Took " + (now-timestamp) + "ms to fetch " - + rows + " rows from scanner=" + scannerId); - } - } - if (response.hasMoreResults() - && !response.getMoreResults()) { - scannerId = -1L; - closed = true; - return null; - } - } catch (ServiceException se) { - throw ProtobufUtil.getRemoteException(se); - } - updateResultsMetrics(rrs); - } catch (IOException e) { - if (logScannerActivity) { - LOG.info("Got exception in fetching from scanner=" - + scannerId, e); - } - IOException ioe = e; - if (e instanceof RemoteException) { - ioe = RemoteExceptionHandler.decodeRemoteException((RemoteException)e); - } - if (logScannerActivity && (ioe instanceof UnknownScannerException)) { - try { - HRegionLocation location = - connection.relocateRegion(tableName, scan.getStartRow()); - LOG.info("Scanner=" + scannerId - + " expired, current region location is " + location.toString() - + " ip:" + location.getHostnamePort()); - } catch (Throwable t) { - LOG.info("Failed to relocate region", t); - } - } - if (ioe instanceof NotServingRegionException) { - // Throw a DNRE so that we break out of cycle of calling NSRE - // when what we need is to open scanner against new location. - // Attach NSRE to signal client that it needs to resetup scanner. - if (this.scanMetrics != null) { - this.scanMetrics.countOfNSRE.incrementAndGet(); - } - throw new DoNotRetryIOException("Reset scanner", ioe); - } else if (ioe instanceof RegionServerStoppedException) { - // Throw a DNRE so that we break out of cycle of calling RSSE - // when what we need is to open scanner against new location. - // Attach RSSE to signal client that it needs to resetup scanner. - throw new DoNotRetryIOException("Reset scanner", ioe); - } else { - // The outer layers will retry - throw ioe; - } - } - return rrs; - } - } - return null; - } - - private void incRPCcallsMetrics() { - if (this.scanMetrics == null) { - return; - } - this.scanMetrics.countOfRPCcalls.incrementAndGet(); - if (isRegionServerRemote) { - this.scanMetrics.countOfRemoteRPCcalls.incrementAndGet(); - } - } - - private void updateResultsMetrics(Result[] rrs) { - if (this.scanMetrics == null || rrs == null) { - return; - } - /* - * broken by protobufs - for (Result rr : rrs) { - if (rr.getBytes() != null) { - this.scanMetrics.countOfBytesInResults.inc(rr.getBytes().getLength()); - if (isRegionServerRemote) { - this.scanMetrics.countOfBytesInRemoteResults.inc( - rr.getBytes().getLength()); - } - } - } - */ - } - - private void close() { - if (this.scannerId == -1L) { - return; - } - try { - incRPCcallsMetrics(); - ScanRequest request = - RequestConverter.buildScanRequest(this.scannerId, 0, true); - try { - server.scan(null, request); - } catch (ServiceException se) { - throw ProtobufUtil.getRemoteException(se); - } - } catch (IOException e) { - LOG.warn("Ignore, probably already closed", e); - } - this.scannerId = -1L; - } - - protected long openScanner() throws IOException { - incRPCcallsMetrics(); - ScanRequest request = - RequestConverter.buildScanRequest( - this.location.getRegionInfo().getRegionName(), - this.scan, 0, false); - try { - ScanResponse response = server.scan(null, request); - long id = response.getScannerId(); - if (logScannerActivity) { - LOG.info("Open scanner=" + id + " for scan=" + scan.toString() - + " on region " + this.location.toString() + " ip:" - + this.location.getHostnamePort()); - } - return id; - } catch (ServiceException se) { - throw ProtobufUtil.getRemoteException(se); - } - } - - protected Scan getScan() { - return scan; - } - - /** - * Call this when the next invocation of call should close the scanner - */ - public void setClose() { - this.closed = true; - } - - /** - * @return the HRegionInfo for the current region - */ - public HRegionInfo getHRegionInfo() { - if (!instantiated) { - return null; - } - return location.getRegionInfo(); - } - - /** - * Get the number of rows that will be fetched on next - * @return the number of rows for caching - */ - public int getCaching() { - return caching; - } - - /** - * Set the number of rows that will be fetched on next - * @param caching the number of rows for caching - */ - public void setCaching(int caching) { - this.caching = caching; - } -} diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/client/ScannerTimeoutException.java hbase-server/src/main/java/org/apache/hadoop/hbase/client/ScannerTimeoutException.java deleted file mode 100644 index 08bc61c..0000000 --- hbase-server/src/main/java/org/apache/hadoop/hbase/client/ScannerTimeoutException.java +++ /dev/null @@ -1,44 +0,0 @@ -/** - * - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hbase.client; - -import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.classification.InterfaceStability; -import org.apache.hadoop.hbase.DoNotRetryIOException; - -/** - * Thrown when a scanner has timed out. - */ -@InterfaceAudience.Public -@InterfaceStability.Stable -public class ScannerTimeoutException extends DoNotRetryIOException { - - private static final long serialVersionUID = 8788838690290688313L; - - /** default constructor */ - ScannerTimeoutException() { - super(); - } - - /** @param s */ - ScannerTimeoutException(String s) { - super(s); - } -} diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/client/ServerCallable.java hbase-server/src/main/java/org/apache/hadoop/hbase/client/ServerCallable.java deleted file mode 100644 index 7c8418a..0000000 --- hbase-server/src/main/java/org/apache/hadoop/hbase/client/ServerCallable.java +++ /dev/null @@ -1,243 +0,0 @@ -/** - * - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hbase.client; - -import java.io.IOException; -import java.lang.reflect.UndeclaredThrowableException; -import java.net.ConnectException; -import java.net.SocketTimeoutException; -import java.util.ArrayList; -import java.util.List; -import java.util.concurrent.Callable; - -import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.classification.InterfaceStability; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hbase.DoNotRetryIOException; -import org.apache.hadoop.hbase.HConstants; -import org.apache.hadoop.hbase.HRegionLocation; -import org.apache.hadoop.hbase.ipc.HBaseClientRPC; -import org.apache.hadoop.hbase.util.Bytes; -import org.apache.hadoop.ipc.RemoteException; - -import com.google.protobuf.ServiceException; - -/** - * Abstract class that implements {@link Callable}. Implementation stipulates - * return type and method we actually invoke on remote Server. Usually - * used inside a try/catch that fields usual connection failures all wrapped - * up in a retry loop. - *

      Call {@link #connect(boolean)} to connect to server hosting region - * that contains the passed row in the passed table before invoking - * {@link #call()}. - * @see HConnection#getRegionServerWithoutRetries(ServerCallable) - * @param the class that the ServerCallable handles - */ -@InterfaceAudience.Public -@InterfaceStability.Stable -public abstract class ServerCallable implements Callable { - protected final HConnection connection; - protected final byte [] tableName; - protected final byte [] row; - protected HRegionLocation location; - protected ClientProtocol server; - protected int callTimeout; - protected long startTime, endTime; - - /** - * @param connection Connection to use. - * @param tableName Table name to which row belongs. - * @param row The row we want in tableName. - */ - public ServerCallable(HConnection connection, byte [] tableName, byte [] row) { - this(connection, tableName, row, HConstants.DEFAULT_HBASE_CLIENT_OPERATION_TIMEOUT); - } - - public ServerCallable(HConnection connection, byte [] tableName, byte [] row, int callTimeout) { - this.connection = connection; - this.tableName = tableName; - this.row = row; - this.callTimeout = callTimeout; - } - - /** - * Connect to the server hosting region with row from tablename. - * @param reload Set this to true if connection should re-find the region - * @throws IOException e - */ - public void connect(final boolean reload) throws IOException { - this.location = connection.getRegionLocation(tableName, row, reload); - this.server = connection.getClient(location.getHostname(), - location.getPort()); - } - - /** @return the server name - * @deprecated Just use {@link #toString()} instead. - */ - public String getServerName() { - if (location == null) return null; - return location.getHostnamePort(); - } - - /** @return the region name - * @deprecated Just use {@link #toString()} instead. - */ - public byte[] getRegionName() { - if (location == null) return null; - return location.getRegionInfo().getRegionName(); - } - - /** @return the row - * @deprecated Just use {@link #toString()} instead. - */ - public byte [] getRow() { - return row; - } - - public void beforeCall() { - HBaseClientRPC.setRpcTimeout(this.callTimeout); - this.startTime = System.currentTimeMillis(); - } - - public void afterCall() { - HBaseClientRPC.resetRpcTimeout(); - this.endTime = System.currentTimeMillis(); - } - - public void shouldRetry(Throwable throwable) throws IOException { - if (this.callTimeout != HConstants.DEFAULT_HBASE_CLIENT_OPERATION_TIMEOUT) - if (throwable instanceof SocketTimeoutException - || (this.endTime - this.startTime > this.callTimeout)) { - throw (SocketTimeoutException) (SocketTimeoutException) new SocketTimeoutException( - "Call to access row '" + Bytes.toString(row) + "' on table '" - + Bytes.toString(tableName) - + "' failed on socket timeout exception: " + throwable) - .initCause(throwable); - } else { - this.callTimeout = ((int) (this.endTime - this.startTime)); - } - } - - /** - * @return {@link HConnection} instance used by this Callable. - */ - HConnection getConnection() { - return this.connection; - } - - /** - * Run this instance with retries, timed waits, - * and refinds of missing regions. - * - * @return an object of type T - * @throws IOException if a remote or network exception occurs - * @throws RuntimeException other unspecified error - */ - public T withRetries() - throws IOException, RuntimeException { - Configuration c = getConnection().getConfiguration(); - final long pause = c.getLong(HConstants.HBASE_CLIENT_PAUSE, - HConstants.DEFAULT_HBASE_CLIENT_PAUSE); - final int numRetries = c.getInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, - HConstants.DEFAULT_HBASE_CLIENT_RETRIES_NUMBER); - List exceptions = - new ArrayList(); - for (int tries = 0; tries < numRetries; tries++) { - try { - beforeCall(); - connect(tries != 0); - return call(); - } catch (Throwable t) { - shouldRetry(t); - t = translateException(t); - if (t instanceof SocketTimeoutException || - t instanceof ConnectException || - t instanceof RetriesExhaustedException) { - // if thrown these exceptions, we clear all the cache entries that - // map to that slow/dead server; otherwise, let cache miss and ask - // .META. again to find the new location - HRegionLocation hrl = location; - if (hrl != null) { - getConnection().clearCaches(hrl.getHostnamePort()); - } - } - RetriesExhaustedException.ThrowableWithExtraContext qt = - new RetriesExhaustedException.ThrowableWithExtraContext(t, - System.currentTimeMillis(), toString()); - exceptions.add(qt); - if (tries == numRetries - 1) { - throw new RetriesExhaustedException(tries, exceptions); - } - } finally { - afterCall(); - } - try { - Thread.sleep(ConnectionUtils.getPauseTime(pause, tries)); - } catch (InterruptedException e) { - Thread.currentThread().interrupt(); - throw new IOException("Giving up after tries=" + tries, e); - } - } - return null; - } - - /** - * Run this instance against the server once. - * @return an object of type T - * @throws IOException if a remote or network exception occurs - * @throws RuntimeException other unspecified error - */ - public T withoutRetries() - throws IOException, RuntimeException { - try { - beforeCall(); - connect(false); - return call(); - } catch (Throwable t) { - Throwable t2 = translateException(t); - if (t2 instanceof IOException) { - throw (IOException)t2; - } else { - throw new RuntimeException(t2); - } - } finally { - afterCall(); - } - } - - protected static Throwable translateException(Throwable t) throws IOException { - if (t instanceof UndeclaredThrowableException) { - t = t.getCause(); - } - if (t instanceof RemoteException) { - t = ((RemoteException)t).unwrapRemoteException(); - } - if (t instanceof ServiceException) { - ServiceException se = (ServiceException)t; - Throwable cause = se.getCause(); - if (cause != null && cause instanceof DoNotRetryIOException) { - throw (DoNotRetryIOException)cause; - } - } else if (t instanceof DoNotRetryIOException) { - throw (DoNotRetryIOException)t; - } - return t; - } -} diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/client/UnmodifyableHColumnDescriptor.java hbase-server/src/main/java/org/apache/hadoop/hbase/client/UnmodifyableHColumnDescriptor.java deleted file mode 100644 index bad7ced..0000000 --- hbase-server/src/main/java/org/apache/hadoop/hbase/client/UnmodifyableHColumnDescriptor.java +++ /dev/null @@ -1,96 +0,0 @@ -/** - * - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hbase.client; - -import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.classification.InterfaceStability; -import org.apache.hadoop.hbase.HColumnDescriptor; -import org.apache.hadoop.hbase.io.compress.Compression; - -/** - * Immutable HColumnDescriptor - */ -@InterfaceAudience.Public -@InterfaceStability.Evolving -public class UnmodifyableHColumnDescriptor extends HColumnDescriptor { - - /** - * @param desc wrapped - */ - public UnmodifyableHColumnDescriptor (final HColumnDescriptor desc) { - super(desc); - } - - /** - * @see org.apache.hadoop.hbase.HColumnDescriptor#setValue(byte[], byte[]) - */ - @Override - public HColumnDescriptor setValue(byte[] key, byte[] value) { - throw new UnsupportedOperationException("HColumnDescriptor is read-only"); - } - - /** - * @see org.apache.hadoop.hbase.HColumnDescriptor#setValue(java.lang.String, java.lang.String) - */ - @Override - public HColumnDescriptor setValue(String key, String value) { - throw new UnsupportedOperationException("HColumnDescriptor is read-only"); - } - - /** - * @see org.apache.hadoop.hbase.HColumnDescriptor#setMaxVersions(int) - */ - @Override - public HColumnDescriptor setMaxVersions(int maxVersions) { - throw new UnsupportedOperationException("HColumnDescriptor is read-only"); - } - - /** - * @see org.apache.hadoop.hbase.HColumnDescriptor#setInMemory(boolean) - */ - @Override - public HColumnDescriptor setInMemory(boolean inMemory) { - throw new UnsupportedOperationException("HColumnDescriptor is read-only"); - } - - /** - * @see org.apache.hadoop.hbase.HColumnDescriptor#setBlockCacheEnabled(boolean) - */ - @Override - public HColumnDescriptor setBlockCacheEnabled(boolean blockCacheEnabled) { - throw new UnsupportedOperationException("HColumnDescriptor is read-only"); - } - - /** - * @see org.apache.hadoop.hbase.HColumnDescriptor#setTimeToLive(int) - */ - @Override - public HColumnDescriptor setTimeToLive(int timeToLive) { - throw new UnsupportedOperationException("HColumnDescriptor is read-only"); - } - - /** - * @see org.apache.hadoop.hbase.HColumnDescriptor#setCompressionType(org.apache.hadoop.hbase.io.compress.Compression.Algorithm) - */ - @Override - public HColumnDescriptor setCompressionType(Compression.Algorithm type) { - throw new UnsupportedOperationException("HColumnDescriptor is read-only"); - } -} diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/client/UnmodifyableHRegionInfo.java hbase-server/src/main/java/org/apache/hadoop/hbase/client/UnmodifyableHRegionInfo.java deleted file mode 100644 index f96096e..0000000 --- hbase-server/src/main/java/org/apache/hadoop/hbase/client/UnmodifyableHRegionInfo.java +++ /dev/null @@ -1,53 +0,0 @@ -/** - * - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hbase.client; - -import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.classification.InterfaceStability; -import org.apache.hadoop.hbase.HRegionInfo; - -@InterfaceAudience.Public -@InterfaceStability.Evolving -class UnmodifyableHRegionInfo extends HRegionInfo { - /* - * Creates an unmodifyable copy of an HRegionInfo - * - * @param info - */ - UnmodifyableHRegionInfo(HRegionInfo info) { - super(info); - } - - /** - * @param split set split status - */ - @Override - public void setSplit(boolean split) { - throw new UnsupportedOperationException("HRegionInfo is read-only"); - } - - /** - * @param offLine set online - offline status - */ - @Override - public void setOffline(boolean offLine) { - throw new UnsupportedOperationException("HRegionInfo is read-only"); - } -} diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/client/UnmodifyableHTableDescriptor.java hbase-server/src/main/java/org/apache/hadoop/hbase/client/UnmodifyableHTableDescriptor.java deleted file mode 100644 index 87c4f9b..0000000 --- hbase-server/src/main/java/org/apache/hadoop/hbase/client/UnmodifyableHTableDescriptor.java +++ /dev/null @@ -1,127 +0,0 @@ -/** - * - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hbase.client; - -import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.classification.InterfaceStability; -import org.apache.hadoop.hbase.HColumnDescriptor; -import org.apache.hadoop.hbase.HTableDescriptor; - -/** - * Read-only table descriptor. - */ -@InterfaceAudience.Public -@InterfaceStability.Evolving -public class UnmodifyableHTableDescriptor extends HTableDescriptor { - /** Default constructor */ - public UnmodifyableHTableDescriptor() { - super(); - } - - /* - * Create an unmodifyable copy of an HTableDescriptor - * @param desc - */ - UnmodifyableHTableDescriptor(final HTableDescriptor desc) { - super(desc.getName(), getUnmodifyableFamilies(desc), desc.getValues()); - } - - - /* - * @param desc - * @return Families as unmodifiable array. - */ - private static HColumnDescriptor[] getUnmodifyableFamilies( - final HTableDescriptor desc) { - HColumnDescriptor [] f = new HColumnDescriptor[desc.getFamilies().size()]; - int i = 0; - for (HColumnDescriptor c: desc.getFamilies()) { - f[i++] = c; - } - return f; - } - - /** - * Does NOT add a column family. This object is immutable - * @param family HColumnDescriptor of familyto add. - */ - @Override - public void addFamily(final HColumnDescriptor family) { - throw new UnsupportedOperationException("HTableDescriptor is read-only"); - } - - /** - * @param column - * @return Column descriptor for the passed family name or the family on - * passed in column. - */ - @Override - public HColumnDescriptor removeFamily(final byte [] column) { - throw new UnsupportedOperationException("HTableDescriptor is read-only"); - } - - /** - * @see org.apache.hadoop.hbase.HTableDescriptor#setReadOnly(boolean) - */ - @Override - public void setReadOnly(boolean readOnly) { - throw new UnsupportedOperationException("HTableDescriptor is read-only"); - } - - /** - * @see org.apache.hadoop.hbase.HTableDescriptor#setValue(byte[], byte[]) - */ - @Override - public void setValue(byte[] key, byte[] value) { - throw new UnsupportedOperationException("HTableDescriptor is read-only"); - } - - /** - * @see org.apache.hadoop.hbase.HTableDescriptor#setValue(java.lang.String, java.lang.String) - */ - @Override - public void setValue(String key, String value) { - throw new UnsupportedOperationException("HTableDescriptor is read-only"); - } - - /** - * @see org.apache.hadoop.hbase.HTableDescriptor#setMaxFileSize(long) - */ - @Override - public void setMaxFileSize(long maxFileSize) { - throw new UnsupportedOperationException("HTableDescriptor is read-only"); - } - - /** - * @see org.apache.hadoop.hbase.HTableDescriptor#setMemStoreFlushSize(long) - */ - @Override - public void setMemStoreFlushSize(long memstoreFlushSize) { - throw new UnsupportedOperationException("HTableDescriptor is read-only"); - } - -// /** -// * @see org.apache.hadoop.hbase.HTableDescriptor#addIndex(org.apache.hadoop.hbase.client.tableindexed.IndexSpecification) -// */ -// @Override -// public void addIndex(IndexSpecification index) { -// throw new UnsupportedOperationException("HTableDescriptor is read-only"); -// } -} diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/client/ZooKeeperKeepAliveConnection.java hbase-server/src/main/java/org/apache/hadoop/hbase/client/ZooKeeperKeepAliveConnection.java deleted file mode 100644 index 01890cf..0000000 --- hbase-server/src/main/java/org/apache/hadoop/hbase/client/ZooKeeperKeepAliveConnection.java +++ /dev/null @@ -1,54 +0,0 @@ -/** - * - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hbase.client; - - -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher; - -import java.io.IOException; - -/** - * We inherit the current ZooKeeperWatcher implementation to change the semantic - * of the close: the new close won't immediately close the connection but - * will have a keep alive. See {@link HConnection}. - * This allows to make it available with a consistent interface. The whole - * ZooKeeperWatcher use in HConnection will be then changed to remove the - * watcher part. - * - * This class is intended to be used internally by HBase classes; but not by - * final user code. Hence it's package protected. - */ -class ZooKeeperKeepAliveConnection extends ZooKeeperWatcher{ - ZooKeeperKeepAliveConnection( - Configuration conf, String descriptor, - HConnectionManager.HConnectionImplementation conn) throws IOException { - super(conf, descriptor, conn); - } - - @Override - public void close() { - ((HConnectionManager.HConnectionImplementation)abortable).releaseZooKeeperWatcher(this); - } - - void internalClose(){ - super.close(); - } -} diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/client/coprocessor/AggregationClient.java hbase-server/src/main/java/org/apache/hadoop/hbase/client/coprocessor/AggregationClient.java deleted file mode 100644 index 2e0c05e..0000000 --- hbase-server/src/main/java/org/apache/hadoop/hbase/client/coprocessor/AggregationClient.java +++ /dev/null @@ -1,701 +0,0 @@ -/* - * - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hbase.client.coprocessor; - -import java.io.IOException; -import java.nio.ByteBuffer; -import java.util.ArrayList; -import java.util.List; -import java.util.Map; -import java.util.NavigableMap; -import java.util.NavigableSet; -import java.util.TreeMap; -import java.util.concurrent.atomic.AtomicLong; - -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.classification.InterfaceStability; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hbase.HConstants; -import org.apache.hadoop.hbase.KeyValue; -import org.apache.hadoop.hbase.client.HTable; -import org.apache.hadoop.hbase.client.Result; -import org.apache.hadoop.hbase.client.ResultScanner; -import org.apache.hadoop.hbase.client.Scan; -import org.apache.hadoop.hbase.protobuf.ProtobufUtil; -import org.apache.hadoop.hbase.protobuf.generated.AggregateProtos.AggregateArgument; -import org.apache.hadoop.hbase.protobuf.generated.AggregateProtos.AggregateResponse; -import org.apache.hadoop.hbase.protobuf.generated.AggregateProtos.AggregateService; -import org.apache.hadoop.hbase.coprocessor.ColumnInterpreter; -import org.apache.hadoop.hbase.ipc.BlockingRpcCallback; -import org.apache.hadoop.hbase.ipc.ServerRpcController; -import org.apache.hadoop.hbase.util.Bytes; -import org.apache.hadoop.hbase.util.Pair; - -import com.google.protobuf.ByteString; - -/** - * This client class is for invoking the aggregate functions deployed on the - * Region Server side via the AggregateService. This class will implement the - * supporting functionality for summing/processing the individual results - * obtained from the AggregateService for each region. - *

      - * This will serve as the client side handler for invoking the aggregate - * functions. - *

        - * For all aggregate functions, - *
      • start row < end row is an essential condition (if they are not - * {@link HConstants#EMPTY_BYTE_ARRAY}) - *
      • Column family can't be null. In case where multiple families are - * provided, an IOException will be thrown. An optional column qualifier can - * also be defined. - *
      • For methods to find maximum, minimum, sum, rowcount, it returns the - * parameter type. For average and std, it returns a double value. For row - * count, it returns a long value. - */ -@InterfaceAudience.Public -@InterfaceStability.Evolving -public class AggregationClient { - - private static final Log log = LogFactory.getLog(AggregationClient.class); - Configuration conf; - - /** - * Constructor with Conf object - * @param cfg - */ - public AggregationClient(Configuration cfg) { - this.conf = cfg; - } - - /** - * It gives the maximum value of a column for a given column family for the - * given range. In case qualifier is null, a max of all values for the given - * family is returned. - * @param tableName - * @param ci - * @param scan - * @return max val - * @throws Throwable - * The caller is supposed to handle the exception as they are thrown - * & propagated to it. - */ - public R max(final byte[] tableName, final ColumnInterpreter ci, - final Scan scan) throws Throwable { - final AggregateArgument requestArg = validateArgAndGetPB(scan, ci); - class MaxCallBack implements Batch.Callback { - R max = null; - - R getMax() { - return max; - } - - @Override - public synchronized void update(byte[] region, byte[] row, R result) { - max = (max == null || (result != null && ci.compare(max, result) < 0)) ? result : max; - } - } - MaxCallBack aMaxCallBack = new MaxCallBack(); - HTable table = null; - try { - table = new HTable(conf, tableName); - table.coprocessorService(AggregateService.class, scan.getStartRow(), - scan.getStopRow(), new Batch.Call() { - @Override - public R call(AggregateService instance) throws IOException { - ServerRpcController controller = new ServerRpcController(); - BlockingRpcCallback rpcCallback = - new BlockingRpcCallback(); - instance.getMax(controller, requestArg, rpcCallback); - AggregateResponse response = rpcCallback.get(); - if (controller.failedOnException()) { - throw controller.getFailedOn(); - } - if (response.getFirstPartCount() > 0) { - return ci.castToCellType( - ci.parseResponseAsPromotedType( - getBytesFromResponse(response.getFirstPart(0)))); - } - return null; - } - }, aMaxCallBack); - } finally { - if (table != null) { - table.close(); - } - } - return aMaxCallBack.getMax(); - } - - private void validateParameters(Scan scan) throws IOException { - if (scan == null - || (Bytes.equals(scan.getStartRow(), scan.getStopRow()) && !Bytes - .equals(scan.getStartRow(), HConstants.EMPTY_START_ROW)) - || ((Bytes.compareTo(scan.getStartRow(), scan.getStopRow()) > 0) && - !Bytes.equals(scan.getStopRow(), HConstants.EMPTY_END_ROW))) { - throw new IOException( - "Agg client Exception: Startrow should be smaller than Stoprow"); - } else if (scan.getFamilyMap().size() != 1) { - throw new IOException("There must be only one family."); - } - } - - /** - * It gives the minimum value of a column for a given column family for the - * given range. In case qualifier is null, a min of all values for the given - * family is returned. - * @param tableName - * @param ci - * @param scan - * @return min val - * @throws Throwable - */ - public R min(final byte[] tableName, final ColumnInterpreter ci, - final Scan scan) throws Throwable { - final AggregateArgument requestArg = validateArgAndGetPB(scan, ci); - class MinCallBack implements Batch.Callback { - - private R min = null; - - public R getMinimum() { - return min; - } - - @Override - public synchronized void update(byte[] region, byte[] row, R result) { - min = (min == null || (result != null && ci.compare(result, min) < 0)) ? result : min; - } - } - MinCallBack minCallBack = new MinCallBack(); - HTable table = null; - try { - table = new HTable(conf, tableName); - table.coprocessorService(AggregateService.class, scan.getStartRow(), - scan.getStopRow(), new Batch.Call() { - - @Override - public R call(AggregateService instance) throws IOException { - ServerRpcController controller = new ServerRpcController(); - BlockingRpcCallback rpcCallback = - new BlockingRpcCallback(); - instance.getMin(controller, requestArg, rpcCallback); - AggregateResponse response = rpcCallback.get(); - if (controller.failedOnException()) { - throw controller.getFailedOn(); - } - if (response.getFirstPartCount() > 0) { - return ci.castToCellType( - ci.parseResponseAsPromotedType( - getBytesFromResponse(response.getFirstPart(0)))); - } - return null; - } - }, minCallBack); - } finally { - if (table != null) { - table.close(); - } - } - log.debug("Min fom all regions is: " + minCallBack.getMinimum()); - return minCallBack.getMinimum(); - } - - /** - * It gives the row count, by summing up the individual results obtained from - * regions. In case the qualifier is null, FirstKeyValueFilter is used to - * optimised the operation. In case qualifier is provided, I can't use the - * filter as it may set the flag to skip to next row, but the value read is - * not of the given filter: in this case, this particular row will not be - * counted ==> an error. - * @param tableName - * @param ci - * @param scan - * @return - * @throws Throwable - */ - public long rowCount(final byte[] tableName, - final ColumnInterpreter ci, final Scan scan) throws Throwable { - final AggregateArgument requestArg = validateArgAndGetPB(scan, ci); - class RowNumCallback implements Batch.Callback { - private final AtomicLong rowCountL = new AtomicLong(0); - - public long getRowNumCount() { - return rowCountL.get(); - } - - @Override - public void update(byte[] region, byte[] row, Long result) { - rowCountL.addAndGet(result.longValue()); - } - } - RowNumCallback rowNum = new RowNumCallback(); - HTable table = null; - try { - table = new HTable(conf, tableName); - table.coprocessorService(AggregateService.class, scan.getStartRow(), - scan.getStopRow(), new Batch.Call() { - @Override - public Long call(AggregateService instance) throws IOException { - ServerRpcController controller = new ServerRpcController(); - BlockingRpcCallback rpcCallback = - new BlockingRpcCallback(); - instance.getRowNum(controller, requestArg, rpcCallback); - AggregateResponse response = rpcCallback.get(); - if (controller.failedOnException()) { - throw controller.getFailedOn(); - } - byte[] bytes = getBytesFromResponse(response.getFirstPart(0)); - ByteBuffer bb = ByteBuffer.allocate(8).put(bytes); - bb.rewind(); - return bb.getLong(); - } - }, rowNum); - } finally { - if (table != null) { - table.close(); - } - } - return rowNum.getRowNumCount(); - } - - /** - * It sums up the value returned from various regions. In case qualifier is - * null, summation of all the column qualifiers in the given family is done. - * @param tableName - * @param ci - * @param scan - * @return sum - * @throws Throwable - */ - public S sum(final byte[] tableName, final ColumnInterpreter ci, - final Scan scan) throws Throwable { - final AggregateArgument requestArg = validateArgAndGetPB(scan, ci); - - class SumCallBack implements Batch.Callback { - S sumVal = null; - - public S getSumResult() { - return sumVal; - } - - @Override - public synchronized void update(byte[] region, byte[] row, S result) { - sumVal = ci.add(sumVal, result); - } - } - SumCallBack sumCallBack = new SumCallBack(); - HTable table = null; - try { - table = new HTable(conf, tableName); - table.coprocessorService(AggregateService.class, scan.getStartRow(), - scan.getStopRow(), new Batch.Call() { - @Override - public S call(AggregateService instance) throws IOException { - ServerRpcController controller = new ServerRpcController(); - BlockingRpcCallback rpcCallback = - new BlockingRpcCallback(); - instance.getSum(controller, requestArg, rpcCallback); - AggregateResponse response = rpcCallback.get(); - if (controller.failedOnException()) { - throw controller.getFailedOn(); - } - if (response.getFirstPartCount() == 0) { - return null; - } - return ci.parseResponseAsPromotedType( - getBytesFromResponse(response.getFirstPart(0))); - } - }, sumCallBack); - } finally { - if (table != null) { - table.close(); - } - } - return sumCallBack.getSumResult(); - } - - /** - * It computes average while fetching sum and row count from all the - * corresponding regions. Approach is to compute a global sum of region level - * sum and rowcount and then compute the average. - * @param tableName - * @param scan - * @throws Throwable - */ - private Pair getAvgArgs(final byte[] tableName, - final ColumnInterpreter ci, final Scan scan) throws Throwable { - final AggregateArgument requestArg = validateArgAndGetPB(scan, ci); - class AvgCallBack implements Batch.Callback> { - S sum = null; - Long rowCount = 0l; - - public Pair getAvgArgs() { - return new Pair(sum, rowCount); - } - - @Override - public synchronized void update(byte[] region, byte[] row, Pair result) { - sum = ci.add(sum, result.getFirst()); - rowCount += result.getSecond(); - } - } - AvgCallBack avgCallBack = new AvgCallBack(); - HTable table = null; - try { - table = new HTable(conf, tableName); - table.coprocessorService(AggregateService.class, scan.getStartRow(), - scan.getStopRow(), - new Batch.Call>() { - @Override - public Pair call(AggregateService instance) - throws IOException { - ServerRpcController controller = new ServerRpcController(); - BlockingRpcCallback rpcCallback = - new BlockingRpcCallback(); - instance.getAvg(controller, requestArg, rpcCallback); - AggregateResponse response = rpcCallback.get(); - if (controller.failedOnException()) { - throw controller.getFailedOn(); - } - Pair pair = new Pair(null, 0L); - if (response.getFirstPartCount() == 0) { - return pair; - } - pair.setFirst(ci.parseResponseAsPromotedType( - getBytesFromResponse(response.getFirstPart(0)))); - ByteBuffer bb = ByteBuffer.allocate(8).put( - getBytesFromResponse(response.getSecondPart())); - bb.rewind(); - pair.setSecond(bb.getLong()); - return pair; - } - }, avgCallBack); - } finally { - if (table != null) { - table.close(); - } - } - return avgCallBack.getAvgArgs(); - } - - /** - * This is the client side interface/handle for calling the average method for - * a given cf-cq combination. It was necessary to add one more call stack as - * its return type should be a decimal value, irrespective of what - * columninterpreter says. So, this methods collects the necessary parameters - * to compute the average and returs the double value. - * @param tableName - * @param ci - * @param scan - * @return - * @throws Throwable - */ - public double avg(final byte[] tableName, - final ColumnInterpreter ci, Scan scan) throws Throwable { - Pair p = getAvgArgs(tableName, ci, scan); - return ci.divideForAvg(p.getFirst(), p.getSecond()); - } - - /** - * It computes a global standard deviation for a given column and its value. - * Standard deviation is square root of (average of squares - - * average*average). From individual regions, it obtains sum, square sum and - * number of rows. With these, the above values are computed to get the global - * std. - * @param tableName - * @param scan - * @return - * @throws Throwable - */ - private Pair, Long> getStdArgs(final byte[] tableName, - final ColumnInterpreter ci, final Scan scan) throws Throwable { - final AggregateArgument requestArg = validateArgAndGetPB(scan, ci); - class StdCallback implements Batch.Callback, Long>> { - long rowCountVal = 0l; - S sumVal = null, sumSqVal = null; - - public Pair, Long> getStdParams() { - List l = new ArrayList(); - l.add(sumVal); - l.add(sumSqVal); - Pair, Long> p = new Pair, Long>(l, rowCountVal); - return p; - } - - @Override - public synchronized void update(byte[] region, byte[] row, Pair, Long> result) { - if (result.getFirst().size() > 0) { - sumVal = ci.add(sumVal, result.getFirst().get(0)); - sumSqVal = ci.add(sumSqVal, result.getFirst().get(1)); - rowCountVal += result.getSecond(); - } - } - } - StdCallback stdCallback = new StdCallback(); - HTable table = null; - try { - table = new HTable(conf, tableName); - table.coprocessorService(AggregateService.class, scan.getStartRow(), - scan.getStopRow(), - new Batch.Call, Long>>() { - @Override - public Pair, Long> call(AggregateService instance) - throws IOException { - ServerRpcController controller = new ServerRpcController(); - BlockingRpcCallback rpcCallback = - new BlockingRpcCallback(); - instance.getStd(controller, requestArg, rpcCallback); - AggregateResponse response = rpcCallback.get(); - if (controller.failedOnException()) { - throw controller.getFailedOn(); - } - Pair,Long> pair = - new Pair, Long>(new ArrayList(), 0L); - if (response.getFirstPartCount() == 0) { - return pair; - } - List list = new ArrayList(); - for (int i = 0; i < response.getFirstPartCount(); i++) { - list.add(ci.parseResponseAsPromotedType( - getBytesFromResponse(response.getFirstPart(i)))); - } - pair.setFirst(list); - ByteBuffer bb = ByteBuffer.allocate(8).put( - getBytesFromResponse(response.getSecondPart())); - bb.rewind(); - pair.setSecond(bb.getLong()); - return pair; - } - }, stdCallback); - } finally { - if (table != null) { - table.close(); - } - } - return stdCallback.getStdParams(); - } - - /** - * This is the client side interface/handle for calling the std method for a - * given cf-cq combination. It was necessary to add one more call stack as its - * return type should be a decimal value, irrespective of what - * columninterpreter says. So, this methods collects the necessary parameters - * to compute the std and returns the double value. - * @param tableName - * @param ci - * @param scan - * @return - * @throws Throwable - */ - public double std(final byte[] tableName, ColumnInterpreter ci, - Scan scan) throws Throwable { - Pair, Long> p = getStdArgs(tableName, ci, scan); - double res = 0d; - double avg = ci.divideForAvg(p.getFirst().get(0), p.getSecond()); - double avgOfSumSq = ci.divideForAvg(p.getFirst().get(1), p.getSecond()); - res = avgOfSumSq - (avg) * (avg); // variance - res = Math.pow(res, 0.5); - return res; - } - - /** - * It helps locate the region with median for a given column whose weight - * is specified in an optional column. - * From individual regions, it obtains sum of values and sum of weights. - * @param tableName - * @param ci - * @param scan - * @return pair whose first element is a map between start row of the region - * and (sum of values, sum of weights) for the region, the second element is - * (sum of values, sum of weights) for all the regions chosen - * @throws Throwable - */ - private Pair>, List> - getMedianArgs(final byte[] tableName, - final ColumnInterpreter ci, final Scan scan) throws Throwable { - final AggregateArgument requestArg = validateArgAndGetPB(scan, ci); - final NavigableMap> map = - new TreeMap>(Bytes.BYTES_COMPARATOR); - class StdCallback implements Batch.Callback> { - S sumVal = null, sumWeights = null; - - public Pair>, List> getMedianParams() { - List l = new ArrayList(); - l.add(sumVal); - l.add(sumWeights); - Pair>, List> p = - new Pair>, List>(map, l); - return p; - } - - @Override - public synchronized void update(byte[] region, byte[] row, List result) { - map.put(row, result); - sumVal = ci.add(sumVal, result.get(0)); - sumWeights = ci.add(sumWeights, result.get(1)); - } - } - StdCallback stdCallback = new StdCallback(); - HTable table = null; - try { - table = new HTable(conf, tableName); - table.coprocessorService(AggregateService.class, scan.getStartRow(), - scan.getStopRow(), new Batch.Call>() { - @Override - public List call(AggregateService instance) throws IOException { - ServerRpcController controller = new ServerRpcController(); - BlockingRpcCallback rpcCallback = - new BlockingRpcCallback(); - instance.getMedian(controller, requestArg, rpcCallback); - AggregateResponse response = rpcCallback.get(); - if (controller.failedOnException()) { - throw controller.getFailedOn(); - } - - List list = new ArrayList(); - for (int i = 0; i < response.getFirstPartCount(); i++) { - list.add(ci.parseResponseAsPromotedType( - getBytesFromResponse(response.getFirstPart(i)))); - } - return list; - } - - }, stdCallback); - } finally { - if (table != null) { - table.close(); - } - } - return stdCallback.getMedianParams(); - } - - /** - * This is the client side interface/handler for calling the median method for a - * given cf-cq combination. This method collects the necessary parameters - * to compute the median and returns the median. - * @param tableName - * @param ci - * @param scan - * @return R the median - * @throws Throwable - */ - public R median(final byte[] tableName, ColumnInterpreter ci, - Scan scan) throws Throwable { - Pair>, List> p = getMedianArgs(tableName, ci, scan); - byte[] startRow = null; - byte[] colFamily = scan.getFamilies()[0]; - NavigableSet quals = scan.getFamilyMap().get(colFamily); - NavigableMap> map = p.getFirst(); - S sumVal = p.getSecond().get(0); - S sumWeights = p.getSecond().get(1); - double halfSumVal = ci.divideForAvg(sumVal, 2L); - double movingSumVal = 0; - boolean weighted = false; - if (quals.size() > 1) { - weighted = true; - halfSumVal = ci.divideForAvg(sumWeights, 2L); - } - - for (Map.Entry> entry : map.entrySet()) { - S s = weighted ? entry.getValue().get(1) : entry.getValue().get(0); - double newSumVal = movingSumVal + ci.divideForAvg(s, 1L); - if (newSumVal > halfSumVal) break; // we found the region with the median - movingSumVal = newSumVal; - startRow = entry.getKey(); - } - // scan the region with median and find it - Scan scan2 = new Scan(scan); - // inherit stop row from method parameter - if (startRow != null) scan2.setStartRow(startRow); - HTable table = null; - ResultScanner scanner = null; - try { - table = new HTable(conf, tableName); - int cacheSize = scan2.getCaching(); - if (!scan2.getCacheBlocks() || scan2.getCaching() < 2) { - scan2.setCacheBlocks(true); - cacheSize = 5; - scan2.setCaching(cacheSize); - } - scanner = table.getScanner(scan2); - Result[] results = null; - byte[] qualifier = quals.pollFirst(); - // qualifier for the weight column - byte[] weightQualifier = weighted ? quals.pollLast() : qualifier; - R value = null; - do { - results = scanner.next(cacheSize); - if (results != null && results.length > 0) { - for (int i = 0; i < results.length; i++) { - Result r = results[i]; - // retrieve weight - KeyValue kv = r.getColumnLatest(colFamily, weightQualifier); - R newValue = ci.getValue(colFamily, weightQualifier, kv); - S s = ci.castToReturnType(newValue); - double newSumVal = movingSumVal + ci.divideForAvg(s, 1L); - // see if we have moved past the median - if (newSumVal > halfSumVal) { - return value; - } - movingSumVal = newSumVal; - kv = r.getColumnLatest(colFamily, qualifier); - value = ci.getValue(colFamily, qualifier, kv); - } - } - } while (results != null && results.length > 0); - } finally { - if (scanner != null) { - scanner.close(); - } - if (table != null) { - table.close(); - } - } - return null; - } - - AggregateArgument validateArgAndGetPB(Scan scan, ColumnInterpreter ci) - throws IOException { - validateParameters(scan); - final AggregateArgument.Builder requestBuilder = - AggregateArgument.newBuilder(); - requestBuilder.setInterpreterClassName(ci.getClass().getCanonicalName()); - ByteString columnInterpreterSpecificData = null; - if ((columnInterpreterSpecificData = ci.columnInterpreterSpecificData()) - != null) { - requestBuilder.setInterpreterSpecificBytes(columnInterpreterSpecificData); - } - requestBuilder.setScan(ProtobufUtil.toScan(scan)); - return requestBuilder.build(); - } - - byte[] getBytesFromResponse(ByteString response) { - ByteBuffer bb = response.asReadOnlyByteBuffer(); - bb.rewind(); - byte[] bytes; - if (bb.hasArray()) { - bytes = bb.array(); - } else { - bytes = response.toByteArray(); - } - return bytes; - } -} diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/client/coprocessor/Batch.java hbase-server/src/main/java/org/apache/hadoop/hbase/client/coprocessor/Batch.java deleted file mode 100644 index c1b6e35..0000000 --- hbase-server/src/main/java/org/apache/hadoop/hbase/client/coprocessor/Batch.java +++ /dev/null @@ -1,74 +0,0 @@ -/* - * - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hbase.client.coprocessor; - -import java.io.IOException; - -import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.classification.InterfaceStability; - - -/** - * A collection of interfaces and utilities used for interacting with custom RPC - * interfaces exposed by Coprocessors. - */ -@InterfaceAudience.Public -@InterfaceStability.Evolving -public abstract class Batch { - /** - * Defines a unit of work to be executed. - * - *

        - * When used with - * {@link org.apache.hadoop.hbase.client.HTable#coprocessorService(Class, byte[], byte[], org.apache.hadoop.hbase.client.coprocessor.Batch.Call)} - * the implementations {@link Batch.Call#call(Object)} method will be invoked - * with a proxy to the - * {@link org.apache.hadoop.hbase.coprocessor.CoprocessorService} - * sub-type instance. - *

        - * @see org.apache.hadoop.hbase.client.coprocessor - * @see org.apache.hadoop.hbase.client.HTable#coprocessorService(byte[]) - * @see org.apache.hadoop.hbase.client.HTable#coprocessorService(Class, byte[], byte[], org.apache.hadoop.hbase.client.coprocessor.Batch.Call) - * @param the instance type to be passed to - * {@link Batch.Call#call(Object)} - * @param the return type from {@link Batch.Call#call(Object)} - */ - public static interface Call { - public R call(T instance) throws IOException; - } - - /** - * Defines a generic callback to be triggered for each {@link Batch.Call#call(Object)} - * result. - * - *

        - * When used with - * {@link org.apache.hadoop.hbase.client.HTable#coprocessorService(Class, byte[], byte[], org.apache.hadoop.hbase.client.coprocessor.Batch.Call)} - * the implementation's {@link Batch.Callback#update(byte[], byte[], Object)} - * method will be called with the {@link Batch.Call#call(Object)} return value - * from each region in the selected range. - *

        - * @param the return type from the associated {@link Batch.Call#call(Object)} - * @see org.apache.hadoop.hbase.client.HTable#coprocessorService(Class, byte[], byte[], org.apache.hadoop.hbase.client.coprocessor.Batch.Call) - */ - public static interface Callback { - public void update(byte[] region, byte[] row, R result); - } -} \ No newline at end of file diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/client/coprocessor/LongColumnInterpreter.java hbase-server/src/main/java/org/apache/hadoop/hbase/client/coprocessor/LongColumnInterpreter.java deleted file mode 100644 index 404a3b4..0000000 --- hbase-server/src/main/java/org/apache/hadoop/hbase/client/coprocessor/LongColumnInterpreter.java +++ /dev/null @@ -1,141 +0,0 @@ -/* - * - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hbase.client.coprocessor; - -import java.io.IOException; -import java.nio.ByteBuffer; - -import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.classification.InterfaceStability; -import org.apache.hadoop.hbase.KeyValue; -import org.apache.hadoop.hbase.coprocessor.ColumnInterpreter; -import org.apache.hadoop.hbase.util.Bytes; - -import com.google.protobuf.ByteString; - -/** - * a concrete column interpreter implementation. The cell value is a Long value - * and its promoted data type is also a Long value. For computing aggregation - * function, this class is used to find the datatype of the cell value. Client - * is supposed to instantiate it and passed along as a parameter. See - * TestAggregateProtocol methods for its sample usage. - * Its methods handle null arguments gracefully. - */ -@InterfaceAudience.Public -@InterfaceStability.Evolving -public class LongColumnInterpreter implements ColumnInterpreter { - - public Long getValue(byte[] colFamily, byte[] colQualifier, KeyValue kv) - throws IOException { - if (kv == null || kv.getValueLength() != Bytes.SIZEOF_LONG) - return null; - return Bytes.toLong(kv.getBuffer(), kv.getValueOffset()); - } - - @Override - public Long add(Long l1, Long l2) { - if (l1 == null ^ l2 == null) { - return (l1 == null) ? l2 : l1; // either of one is null. - } else if (l1 == null) // both are null - return null; - return l1 + l2; - } - - @Override - public int compare(final Long l1, final Long l2) { - if (l1 == null ^ l2 == null) { - return l1 == null ? -1 : 1; // either of one is null. - } else if (l1 == null) - return 0; // both are null - return l1.compareTo(l2); // natural ordering. - } - - @Override - public Long getMaxValue() { - return Long.MAX_VALUE; - } - - @Override - public Long increment(Long o) { - return o == null ? null : (o + 1l); - } - - @Override - public Long multiply(Long l1, Long l2) { - return (l1 == null || l2 == null) ? null : l1 * l2; - } - - @Override - public Long getMinValue() { - return Long.MIN_VALUE; - } - - @Override - public double divideForAvg(Long l1, Long l2) { - return (l2 == null || l1 == null) ? Double.NaN : (l1.doubleValue() / l2 - .doubleValue()); - } - - @Override - public Long castToReturnType(Long o) { - return o; - } - - - @Override - public Long parseResponseAsPromotedType(byte[] response) { - ByteBuffer b = ByteBuffer.allocate(8).put(response); - b.rewind(); - long l = b.getLong(); - return l; - } - - @Override - public Long castToCellType(Long l) { - return l; - } - - @Override - public ByteString columnInterpreterSpecificData() { - // nothing - return null; - } - - @Override - public void initialize(ByteString bytes) { - // nothing - } - - @Override - public ByteString getProtoForCellType(Long t) { - return getProtoForPromotedOrCellType(t); - } - - @Override - public ByteString getProtoForPromotedType(Long s) { - return getProtoForPromotedOrCellType(s); - } - - private ByteString getProtoForPromotedOrCellType(Long s) { - ByteBuffer bb = ByteBuffer.allocate(8).putLong(s); - bb.rewind(); - ByteString bs = ByteString.copyFrom(bb); - return bs; - } -} \ No newline at end of file diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/client/coprocessor/package-info.java hbase-server/src/main/java/org/apache/hadoop/hbase/client/coprocessor/package-info.java deleted file mode 100644 index edb3c22..0000000 --- hbase-server/src/main/java/org/apache/hadoop/hbase/client/coprocessor/package-info.java +++ /dev/null @@ -1,226 +0,0 @@ -/* - * - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -/** -Provides client classes for invoking Coprocessor RPC protocols - -

        -

        -

        - -

        Overview

        -

        -The coprocessor framework provides a way for custom code to run in place on the -HBase region servers with each of a table's regions. These client classes -enable applications to communicate with coprocessor instances via custom RPC -protocols. -

        - -

        -In order to provide a custom RPC protocol to clients, a coprocessor implementation -must: -

          -
        • Define a protocol buffer Service and supporting Message types for the RPC methods. - See the - protocol buffer guide - for more details on defining services.
        • -
        • Generate the Service and Message code using the protoc compiler
        • -
        • Implement the generated Service interface in your coprocessor class and implement the - {@link org.apache.hadoop.hbase.coprocessor.CoprocessorService} interface. The - {@link org.apache.hadoop.hbase.coprocessor.CoprocessorService#getService()} - method should return a reference to the Endpoint's protocol buffer Service instance. -
        -Clients may then call the defined service methods on coprocessor instances via -the {@link org.apache.hadoop.hbase.client.HTable#coprocessorService(byte[])}, -{@link org.apache.hadoop.hbase.client.HTable#coprocessorService(Class, byte[], byte[], org.apache.hadoop.hbase.client.coprocessor.Batch.Call)}, and -{@link org.apache.hadoop.hbase.client.HTable#coprocessorService(Class, byte[], byte[], org.apache.hadoop.hbase.client.coprocessor.Batch.Call, org.apache.hadoop.hbase.client.coprocessor.Batch.Callback)} -methods. -

        - -

        -Since coprocessor Service instances are associated with individual regions within the table, -the client RPC calls must ultimately identify which regions should be used in the Service -method invocations. Since regions are seldom handled directly in client code -and the region names may change over time, the coprocessor RPC calls use row keys -to identify which regions should be used for the method invocations. Clients -can call coprocessor Service methods against either: -

          -
        • a single region - calling - {@link org.apache.hadoop.hbase.client.HTable#coprocessorService(byte[])} - with a single row key. This returns a {@link org.apache.hadoop.hbase.ipc.CoprocessorRpcChannel} - instance which communicates with the region containing the given row key (even if the - row does not exist) as the RPC endpoint. Clients can then use the {@code CoprocessorRpcChannel} - instance in creating a new Service stub to call RPC methods on the region's coprocessor.
        • -
        • a range of regions - calling - {@link org.apache.hadoop.hbase.client.HTable#coprocessorService(Class, byte[], byte[], org.apache.hadoop.hbase.client.coprocessor.Batch.Call)} - or {@link org.apache.hadoop.hbase.client.HTable#coprocessorService(Class, byte[], byte[], org.apache.hadoop.hbase.client.coprocessor.Batch.Call, org.apache.hadoop.hbase.client.coprocessor.Batch.Callback)} - with a starting row key and an ending row key. All regions in the table - from the region containing the start row key to the region containing the end - row key (inclusive), will we used as the RPC endpoints.
        • -
        -

        - -

        Note that the row keys passed as parameters to the HTable -methods are not passed directly to the coprocessor Service implementations. -They are only used to identify the regions for endpoints of the remote calls. -

        - -

        -The {@link org.apache.hadoop.hbase.client.coprocessor.Batch} class defines two -interfaces used for coprocessor Service invocations against multiple regions. Clients implement -{@link org.apache.hadoop.hbase.client.coprocessor.Batch.Call} to call methods of the actual -coprocessor Service instance. The interface's call() method will be called once -per selected region, passing the Service instance for the region as a parameter. Clients -can optionally implement {@link org.apache.hadoop.hbase.client.coprocessor.Batch.Callback} -to be notified of the results from each region invocation as they complete. -The instance's {@link org.apache.hadoop.hbase.client.coprocessor.Batch.Callback#update(byte[], byte[], Object)} -method will be called with the {@link org.apache.hadoop.hbase.client.coprocessor.Batch.Call#call(Object)} -return value from each region. -

        - -

        Example usage

        -

        -To start with, let's use a fictitious coprocessor, RowCountEndpoint -that counts the number of rows and key-values in each region where it is running. -For clients to query this information, the coprocessor defines the following protocol buffer -service: -

        - -
        -
        -message CountRequest {
        -}
        -
        -message CountResponse {
        -  required int64 count = 1 [default = 0];
        -}
        -
        -service RowCountService {
        -  rpc getRowCount(CountRequest)
        -    returns (CountResponse);
        -  rpc getKeyValueCount(CountRequest)
        -    returns (CountResponse);
        -}
        -
        - -

        -Next run the protoc compiler on the .proto file to generate Java code for the Service interface. -The generated {@code RowCountService} interface should look something like: -

        -
        -
        -public static abstract class RowCountService
        -  implements com.google.protobuf.Service {
        -  ...
        -  public interface Interface {
        -    public abstract void getRowCount(
        -        com.google.protobuf.RpcController controller,
        -        org.apache.hadoop.hbase.coprocessor.example.generated.ExampleProtos.CountRequest request,
        -        com.google.protobuf.RpcCallback done);
        -
        -    public abstract void getKeyValueCount(
        -        com.google.protobuf.RpcController controller,
        -        org.apache.hadoop.hbase.coprocessor.example.generated.ExampleProtos.CountRequest request,
        -        com.google.protobuf.RpcCallback done);
        -  }
        -}
        -
        - -

        -Our coprocessor Service will need to implement this interface and the {@link org.apache.hadoop.hbase.coprocessor.CoprocessorService} -in order to be registered correctly as an endpoint. For the sake of simplicity the server-side -implementation is omitted. To see the implementing code, please see the -{@link org.apache.hadoop.hbase.coprocessor.example.RowCountEndpoint} class in the HBase source code. -

        - -

        -Now we need a way to access the results that RowCountService -is making available. If we want to find the row count for all regions, we could -use: -

        - -
        -
        -HTable table = new HTable(conf, "mytable");
        -final ExampleProtos.CountRequest request = ExampleProtos.CountRequest.getDefaultInstance();
        -Map results = table.coprocessorService(
        -    ExampleProtos.RowCountService.class, // the protocol interface we're invoking
        -    null, null,                          // start and end row keys
        -    new Batch.Call() {
        -        public Long call(ExampleProtos.RowCountService counter) throws IOException {
        -          BlockingRpcCallback rpcCallback =
        -              new BlockingRpcCallback();
        -          counter.getRowCount(null, request, rpcCallback);
        -          ExampleProtos.CountResponse response = rpcCallback.get();
        -          return response.hasCount() ? response.getCount() : 0;
        -        }
        -    });
        -
        - -

        -This will return a java.util.Map of the counter.getRowCount() -result for the RowCountService instance running in each region -of mytable, keyed by the region name. -

        - -

        -By implementing {@link org.apache.hadoop.hbase.client.coprocessor.Batch.Call} -as an anonymous class, we can invoke RowCountService methods -directly against the {@link org.apache.hadoop.hbase.client.coprocessor.Batch.Call#call(Object)} -method's argument. Calling {@link org.apache.hadoop.hbase.client.HTable#coprocessorService(Class, byte[], byte[], org.apache.hadoop.hbase.client.coprocessor.Batch.Call)} -will take care of invoking Batch.Call.call() against our anonymous class -with the RowCountService instance for each table region. -

        - -

        -Implementing {@link org.apache.hadoop.hbase.client.coprocessor.Batch.Call} also allows you to -perform additional processing against each region's Service instance. For example, if you would -like to combine row count and key-value count for each region: -

        - -
        -
        -HTable table = new HTable(conf, "mytable");
        -// combine row count and kv count for region
        -final ExampleProtos.CountRequest request = ExampleProtos.CountRequest.getDefaultInstance();
        -Map results = table.coprocessorService(
        -    ExampleProtos.RowCountService.class, // the protocol interface we're invoking
        -    null, null,                          // start and end row keys
        -    new Batch.Call>() {
        -       public Long call(ExampleProtos.RowCountService counter) throws IOException {
        -         BlockingRpcCallback rowCallback =
        -             new BlockingRpcCallback();
        -         counter.getRowCount(null, request, rowCallback);
        -
        -         BlockingRpcCallback kvCallback =
        -             new BlockingRpcCallback();
        -         counter.getKeyValueCount(null, request, kvCallback);
        -
        -         ExampleProtos.CountResponse rowResponse = rowCallback.get();
        -         ExampleProtos.CountResponse kvResponse = kvCallback.get();
        -         return new Pair(rowResponse.hasCount() ? rowResponse.getCount() : 0,
        -             kvResponse.hasCount() ? kvResponse.getCount() : 0);
        -    }
        -});
        -
        -*/ -package org.apache.hadoop.hbase.client.coprocessor; diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/client/metrics/ScanMetrics.java hbase-server/src/main/java/org/apache/hadoop/hbase/client/metrics/ScanMetrics.java deleted file mode 100644 index 47b484c..0000000 --- hbase-server/src/main/java/org/apache/hadoop/hbase/client/metrics/ScanMetrics.java +++ /dev/null @@ -1,142 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hbase.client.metrics; - -import com.google.common.collect.ImmutableMap; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.classification.InterfaceStability; - -import java.util.HashMap; -import java.util.Map; -import java.util.concurrent.atomic.AtomicLong; - - -/** - * Provides client-side metrics related to scan operations - * The data can be passed to mapreduce framework or other systems. - * We use atomic longs so that one thread can increment, - * while another atomically resets to zero after the values are reported - * to hadoop's counters. - * - * Some of these metrics are general for any client operation such as put - * However, there is no need for this. So they are defined under scan operation - * for now. - */ -@InterfaceAudience.Public -@InterfaceStability.Evolving -public class ScanMetrics { - - - private static final Log LOG = LogFactory.getLog(ScanMetrics.class); - - /** - * Hash to hold the String -> Atomic Long mappings. - */ - private final Map counters = new HashMap(); - - // AtomicLongs to hold the metrics values. These are all updated through ClientScanner and - // ScannerCallable. They are atomic longs so that atomic getAndSet can be used to reset the - // values after progress is passed to hadoop's counters. - - - /** - * number of RPC calls - */ - public final AtomicLong countOfRPCcalls = createCounter("RPC_CALLS"); - - /** - * number of remote RPC calls - */ - public final AtomicLong countOfRemoteRPCcalls = createCounter("REMOTE_RPC_CALLS"); - - /** - * sum of milliseconds between sequential next calls - */ - public final AtomicLong sumOfMillisSecBetweenNexts = createCounter("MILLIS_BETWEEN_NEXTS"); - - /** - * number of NotServingRegionException caught - */ - public final AtomicLong countOfNSRE = createCounter("NOT_SERVING_REGION_EXCEPTION"); - - /** - * number of bytes in Result objects from region servers - */ - public final AtomicLong countOfBytesInResults = createCounter("BYTES_IN_RESULTS"); - - /** - * number of bytes in Result objects from remote region servers - */ - public final AtomicLong countOfBytesInRemoteResults = createCounter("BYTES_IN_REMOTE_RESULTS"); - - /** - * number of regions - */ - public final AtomicLong countOfRegions = createCounter("REGIONS_SCANNED"); - - /** - * number of RPC retries - */ - public final AtomicLong countOfRPCRetries = createCounter("RPC_RETRIES"); - - /** - * number of remote RPC retries - */ - public final AtomicLong countOfRemoteRPCRetries = createCounter("REMOTE_RPC_RETRIES"); - - /** - * constructor - */ - public ScanMetrics() { - } - - private AtomicLong createCounter(String counterName) { - AtomicLong c = new AtomicLong(0); - counters.put(counterName, c); - return c; - } - - public void setCounter(String counterName, long value) { - AtomicLong c = this.counters.get(counterName); - if (c != null) { - c.set(value); - } - } - - /** - * Get all of the values since the last time this function was called. - * - * Calling this function will reset all AtomicLongs in the instance back to 0. - * - * @return A Map of String -> Long for metrics - */ - public Map getMetricsMap() { - //Create a builder - ImmutableMap.Builder builder = ImmutableMap.builder(); - //For every entry add the value and reset the AtomicLong back to zero - for (Map.Entry e : this.counters.entrySet()) { - builder.put(e.getKey(), e.getValue().getAndSet(0)); - } - //Build the immutable map so that people can't mess around with it. - return builder.build(); - } - -} diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/client/package-info.java hbase-server/src/main/java/org/apache/hadoop/hbase/client/package-info.java deleted file mode 100644 index c79bd52..0000000 --- hbase-server/src/main/java/org/apache/hadoop/hbase/client/package-info.java +++ /dev/null @@ -1,185 +0,0 @@ -/* - * - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -/** -Provides HBase Client - -

        Table of Contents

        - - -

        Overview

        -

        To administer HBase, create and drop tables, list and alter tables, - use {@link org.apache.hadoop.hbase.client.HBaseAdmin}. Once created, table access is via an instance - of {@link org.apache.hadoop.hbase.client.HTable}. You add content to a table a row at a time. To insert, - create an instance of a {@link org.apache.hadoop.hbase.client.Put} object. Specify value, target column - and optionally a timestamp. Commit your update using {@link org.apache.hadoop.hbase.client.HTable#put(Put)}. - To fetch your inserted value, use {@link org.apache.hadoop.hbase.client.Get}. The Get can be specified to be broad -- get all - on a particular row -- or narrow; i.e. return only a single cell value. After creating an instance of - Get, invoke {@link org.apache.hadoop.hbase.client.HTable#get(Get)}. Use - {@link org.apache.hadoop.hbase.client.Scan} to set up a scanner -- a Cursor- like access. After - creating and configuring your Scan instance, call {@link org.apache.hadoop.hbase.client.HTable#getScanner(Scan)} and then - invoke next on the returned object. Both {@link org.apache.hadoop.hbase.client.HTable#get(Get)} and - {@link org.apache.hadoop.hbase.client.HTable#getScanner(Scan)} return a -{@link org.apache.hadoop.hbase.client.Result}. -A Result is a List of {@link org.apache.hadoop.hbase.KeyValue}s. It has facility for packaging the return -in different formats. - Use {@link org.apache.hadoop.hbase.client.Delete} to remove content. - You can remove individual cells or entire families, etc. Pass it to - {@link org.apache.hadoop.hbase.client.HTable#delete(Delete)} to execute. -

        -

        Puts, Gets and Deletes take out a lock on the target row for the duration of their operation. - Concurrent modifications to a single row are serialized. Gets and scans run concurrently without - interference of the row locks and are guaranteed to not to return half written rows. -

        -

        Client code accessing a cluster finds the cluster by querying ZooKeeper. - This means that the ZooKeeper quorum to use must be on the client CLASSPATH. - Usually this means make sure the client can find your hbase-site.xml. -

        - -

        Example API Usage

        - -

        Once you have a running HBase, you probably want a way to hook your application up to it. - If your application is in Java, then you should use the Java API. Here's an example of what - a simple client might look like. This example assumes that you've created a table called - "myTable" with a column family called "myColumnFamily". -

        - -
        -
        -import java.io.IOException;
        -
        -import org.apache.hadoop.hbase.HBaseConfiguration;
        -import org.apache.hadoop.hbase.client.Get;
        -import org.apache.hadoop.hbase.client.HTable;
        -import org.apache.hadoop.hbase.client.Put;
        -import org.apache.hadoop.hbase.client.Result;
        -import org.apache.hadoop.hbase.client.ResultScanner;
        -import org.apache.hadoop.hbase.client.Scan;
        -import org.apache.hadoop.hbase.util.Bytes;
        -
        -
        -// Class that has nothing but a main.
        -// Does a Put, Get and a Scan against an hbase table.
        -public class MyLittleHBaseClient {
        -  public static void main(String[] args) throws IOException {
        -    // You need a configuration object to tell the client where to connect.
        -    // When you create a HBaseConfiguration, it reads in whatever you've set
        -    // into your hbase-site.xml and in hbase-default.xml, as long as these can
        -    // be found on the CLASSPATH
        -    Configuration config = HBaseConfiguration.create();
        -
        -    // This instantiates an HTable object that connects you to
        -    // the "myLittleHBaseTable" table.
        -    HTable table = new HTable(config, "myLittleHBaseTable");
        -
        -    // To add to a row, use Put.  A Put constructor takes the name of the row
        -    // you want to insert into as a byte array.  In HBase, the Bytes class has
        -    // utility for converting all kinds of java types to byte arrays.  In the
        -    // below, we are converting the String "myLittleRow" into a byte array to
        -    // use as a row key for our update. Once you have a Put instance, you can
        -    // adorn it by setting the names of columns you want to update on the row,
        -    // the timestamp to use in your update, etc.If no timestamp, the server
        -    // applies current time to the edits.
        -    Put p = new Put(Bytes.toBytes("myLittleRow"));
        -
        -    // To set the value you'd like to update in the row 'myLittleRow', specify
        -    // the column family, column qualifier, and value of the table cell you'd
        -    // like to update.  The column family must already exist in your table
        -    // schema.  The qualifier can be anything.  All must be specified as byte
        -    // arrays as hbase is all about byte arrays.  Lets pretend the table
        -    // 'myLittleHBaseTable' was created with a family 'myLittleFamily'.
        -    p.add(Bytes.toBytes("myLittleFamily"), Bytes.toBytes("someQualifier"),
        -      Bytes.toBytes("Some Value"));
        -
        -    // Once you've adorned your Put instance with all the updates you want to
        -    // make, to commit it do the following (The HTable#put method takes the
        -    // Put instance you've been building and pushes the changes you made into
        -    // hbase)
        -    table.put(p);
        -
        -    // Now, to retrieve the data we just wrote. The values that come back are
        -    // Result instances. Generally, a Result is an object that will package up
        -    // the hbase return into the form you find most palatable.
        -    Get g = new Get(Bytes.toBytes("myLittleRow"));
        -    Result r = table.get(g);
        -    byte [] value = r.getValue(Bytes.toBytes("myLittleFamily"),
        -      Bytes.toBytes("someQualifier"));
        -    // If we convert the value bytes, we should get back 'Some Value', the
        -    // value we inserted at this location.
        -    String valueStr = Bytes.toString(value);
        -    System.out.println("GET: " + valueStr);
        -
        -    // Sometimes, you won't know the row you're looking for. In this case, you
        -    // use a Scanner. This will give you cursor-like interface to the contents
        -    // of the table.  To set up a Scanner, do like you did above making a Put
        -    // and a Get, create a Scan.  Adorn it with column names, etc.
        -    Scan s = new Scan();
        -    s.addColumn(Bytes.toBytes("myLittleFamily"), Bytes.toBytes("someQualifier"));
        -    ResultScanner scanner = table.getScanner(s);
        -    try {
        -      // Scanners return Result instances.
        -      // Now, for the actual iteration. One way is to use a while loop like so:
        -      for (Result rr = scanner.next(); rr != null; rr = scanner.next()) {
        -        // print out the row we found and the columns we were looking for
        -        System.out.println("Found row: " + rr);
        -      }
        -
        -      // The other approach is to use a foreach loop. Scanners are iterable!
        -      // for (Result rr : scanner) {
        -      //   System.out.println("Found row: " + rr);
        -      // }
        -    } finally {
        -      // Make sure you close your scanners when you are done!
        -      // Thats why we have it inside a try/finally clause
        -      scanner.close();
        -    }
        -  }
        -}
        -
        -
        - -

        There are many other methods for putting data into and getting data out of - HBase, but these examples should get you started. See the HTable javadoc for - more methods. Additionally, there are methods for managing tables in the - HBaseAdmin class.

        - -

        If your client is NOT Java, then you should consider the Thrift or REST - libraries.

        - -

        Related Documentation

        - -
        - - -

        There are many other methods for putting data into and getting data out of - HBase, but these examples should get you started. See the HTable javadoc for - more methods. Additionally, there are methods for managing tables in the - HBaseAdmin class.

        - - - -*/ -package org.apache.hadoop.hbase.client; diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/client/replication/ReplicationAdmin.java hbase-server/src/main/java/org/apache/hadoop/hbase/client/replication/ReplicationAdmin.java deleted file mode 100644 index 57457be..0000000 --- hbase-server/src/main/java/org/apache/hadoop/hbase/client/replication/ReplicationAdmin.java +++ /dev/null @@ -1,209 +0,0 @@ -/** - * - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hbase.client.replication; - -import java.io.Closeable; -import java.io.IOException; -import java.util.Map; - -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hbase.Abortable; -import org.apache.hadoop.hbase.HConstants; -import org.apache.hadoop.hbase.client.HConnection; -import org.apache.hadoop.hbase.client.HConnectionManager; -import org.apache.hadoop.hbase.replication.ReplicationZookeeper; -import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher; -import org.apache.zookeeper.KeeperException; - -/** - *

        - * This class provides the administrative interface to HBase cluster - * replication. In order to use it, the cluster and the client using - * ReplicationAdmin must be configured with hbase.replication - * set to true. - *

        - *

        - * Adding a new peer results in creating new outbound connections from every - * region server to a subset of region servers on the slave cluster. Each - * new stream of replication will start replicating from the beginning of the - * current HLog, meaning that edits from that past will be replicated. - *

        - *

        - * Removing a peer is a destructive and irreversible operation that stops - * all the replication streams for the given cluster and deletes the metadata - * used to keep track of the replication state. - *

        - *

        - * Enabling and disabling peers is currently not supported. - *

        - *

        - * As cluster replication is still experimental, a kill switch is provided - * in order to stop all replication-related operations, see - * {@link #setReplicating(boolean)}. When setting it back to true, the new - * state of all the replication streams will be unknown and may have holes. - * Use at your own risk. - *

        - *

        - * To see which commands are available in the shell, type - * replication. - *

        - */ -public class ReplicationAdmin implements Closeable { - private static final Log LOG = LogFactory.getLog(ReplicationAdmin.class); - - private final ReplicationZookeeper replicationZk; - private final HConnection connection; - - /** - * Constructor that creates a connection to the local ZooKeeper ensemble. - * @param conf Configuration to use - * @throws IOException if the connection to ZK cannot be made - * @throws RuntimeException if replication isn't enabled. - */ - public ReplicationAdmin(Configuration conf) throws IOException { - if (!conf.getBoolean(HConstants.REPLICATION_ENABLE_KEY, false)) { - throw new RuntimeException("hbase.replication isn't true, please " + - "enable it in order to use replication"); - } - this.connection = HConnectionManager.getConnection(conf); - ZooKeeperWatcher zkw = createZooKeeperWatcher(); - try { - this.replicationZk = new ReplicationZookeeper(this.connection, conf, zkw); - } catch (KeeperException e) { - throw new IOException("Unable setup the ZooKeeper connection", e); - } - } - - private ZooKeeperWatcher createZooKeeperWatcher() throws IOException { - return new ZooKeeperWatcher(connection.getConfiguration(), - "Replication Admin", new Abortable() { - @Override - public void abort(String why, Throwable e) { - LOG.error(why, e); - System.exit(1); - } - - @Override - public boolean isAborted() { - return false; - } - - }); - } - - - /** - * Add a new peer cluster to replicate to. - * @param id a short that identifies the cluster - * @param clusterKey the concatenation of the slave cluster's - * hbase.zookeeper.quorum:hbase.zookeeper.property.clientPort:zookeeper.znode.parent - * @throws IllegalStateException if there's already one slave since - * multi-slave isn't supported yet. - */ - public void addPeer(String id, String clusterKey) throws IOException { - this.replicationZk.addPeer(id, clusterKey); - } - - /** - * Removes a peer cluster and stops the replication to it. - * @param id a short that identifies the cluster - */ - public void removePeer(String id) throws IOException { - this.replicationZk.removePeer(id); - } - - /** - * Restart the replication stream to the specified peer. - * @param id a short that identifies the cluster - */ - public void enablePeer(String id) throws IOException { - this.replicationZk.enablePeer(id); - } - - /** - * Stop the replication stream to the specified peer. - * @param id a short that identifies the cluster - */ - public void disablePeer(String id) throws IOException { - this.replicationZk.disablePeer(id); - } - - /** - * Get the number of slave clusters the local cluster has. - * @return number of slave clusters - */ - public int getPeersCount() { - return this.replicationZk.listPeersIdsAndWatch().size(); - } - - /** - * Map of this cluster's peers for display. - * @return A map of peer ids to peer cluster keys - */ - public Map listPeers() { - return this.replicationZk.listPeers(); - } - - /** - * Get the current status of the kill switch, if the cluster is replicating - * or not. - * @return true if the cluster is replicated, otherwise false - */ - public boolean getReplicating() throws IOException { - try { - return this.replicationZk.getReplication(); - } catch (KeeperException e) { - throw new IOException("Couldn't get the replication status"); - } - } - - /** - * Kill switch for all replication-related features - * @param newState true to start replication, false to stop it. - * completely - * @return the previous state - */ - public boolean setReplicating(boolean newState) throws IOException { - boolean prev = true; - try { - prev = getReplicating(); - this.replicationZk.setReplicating(newState); - } catch (KeeperException e) { - throw new IOException("Unable to set the replication state", e); - } - return prev; - } - - /** - * Get the ZK-support tool created and used by this object for replication. - * @return the ZK-support tool - */ - ReplicationZookeeper getReplicationZk() { - return replicationZk; - } - - @Override - public void close() throws IOException { - if (this.connection != null) { - this.connection.close(); - } - } -} diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/ColumnInterpreter.java hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/ColumnInterpreter.java deleted file mode 100644 index d74929c..0000000 --- hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/ColumnInterpreter.java +++ /dev/null @@ -1,164 +0,0 @@ -/** - * - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hbase.coprocessor; - -import java.io.IOException; - -import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.classification.InterfaceStability; -import org.apache.hadoop.hbase.KeyValue; -import org.apache.hadoop.hbase.client.coprocessor.LongColumnInterpreter; - -import com.google.protobuf.ByteString; - -/** - * Defines how value for specific column is interpreted and provides utility - * methods like compare, add, multiply etc for them. Takes column family, column - * qualifier and return the cell value. Its concrete implementation should - * handle null case gracefully. Refer to {@link LongColumnInterpreter} for an - * example. - *

        - * Takes two generic parameters. The cell value type of the interpreter is . - * During some computations like sum, average, the return type can be different - * than the cell value data type, for eg, sum of int cell values might overflow - * in case of a int result, we should use Long for its result. Therefore, this - * class mandates to use a different (promoted) data type for result of these - * computations . All computations are performed on the promoted data type - * . There is a conversion method - * {@link ColumnInterpreter#castToReturnType(Object)} which takes a type and - * returns a type. - * @param Cell value data type - * @param Promoted data type - */ -@InterfaceAudience.Public -@InterfaceStability.Evolving -public interface ColumnInterpreter { - - /** - * @param colFamily - * @param colQualifier - * @param kv - * @return value of type T - * @throws IOException - */ - T getValue(byte[] colFamily, byte[] colQualifier, KeyValue kv) - throws IOException; - - /** - * @param l1 - * @param l2 - * @return sum or non null value among (if either of them is null); otherwise - * returns a null. - */ - public S add(S l1, S l2); - - /** - * returns the maximum value for this type T - * @return max - */ - - T getMaxValue(); - - T getMinValue(); - - /** - * @param o1 - * @param o2 - * @return multiplication - */ - S multiply(S o1, S o2); - - /** - * @param o - * @return increment - */ - S increment(S o); - - /** - * provides casting opportunity between the data types. - * @param o - * @return cast - */ - S castToReturnType(T o); - - /** - * This takes care if either of arguments are null. returns 0 if they are - * equal or both are null; - *

          - *
        • >0 if l1 > l2 or l1 is not null and l2 is null. - *
        • < 0 if l1 < l2 or l1 is null and l2 is not null. - */ - int compare(final T l1, final T l2); - - /** - * used for computing average of data values. Not providing the divide - * method that takes two values as it is not needed as of now. - * @param o - * @param l - * @return Average - */ - double divideForAvg(S o, Long l); - - /** - * This method should return any additional data that is needed on the - * server side to construct the ColumnInterpreter. The server - * will pass this to the {@link #initialize(ByteString)} - * method. If there is no ColumnInterpreter specific data (for e.g., - * {@link LongColumnInterpreter}) then null should be returned. - * @return the PB message - */ - ByteString columnInterpreterSpecificData(); - - /** - * Return the PB for type T - * @param t - * @return PB-message - */ - ByteString getProtoForCellType(T t); - - /** - * Return the PB for type S - * @param s - * @return PB-message - */ - ByteString getProtoForPromotedType(S s); - - /** - * This method should initialize any field(s) of the ColumnInterpreter with - * a parsing of the passed message bytes (used on the server side). - * @param bytes - */ - void initialize(ByteString bytes); - - /** - * Converts the bytes in the server's response to the expected type S - * @param response - * @return response of type S constructed from the message - */ - S parseResponseAsPromotedType(byte[] response); - - /** - * The response message comes as type S. This will convert/cast it to T. - * In some sense, performs the opposite of {@link #castToReturnType(Object)} - * @param response - * @return cast - */ - T castToCellType(S response); -} diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/filter/BinaryComparator.java hbase-server/src/main/java/org/apache/hadoop/hbase/filter/BinaryComparator.java deleted file mode 100644 index 26282db..0000000 --- hbase-server/src/main/java/org/apache/hadoop/hbase/filter/BinaryComparator.java +++ /dev/null @@ -1,89 +0,0 @@ -/* - * - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hbase.filter; - -import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.classification.InterfaceStability; -import org.apache.hadoop.hbase.DeserializationException; -import org.apache.hadoop.hbase.protobuf.generated.ComparatorProtos; -import org.apache.hadoop.hbase.util.Bytes; - -import com.google.protobuf.InvalidProtocolBufferException; - -/** - * A binary comparator which lexicographically compares against the specified - * byte array using {@link org.apache.hadoop.hbase.util.Bytes#compareTo(byte[], byte[])}. - */ -@InterfaceAudience.Public -@InterfaceStability.Stable -public class BinaryComparator extends ByteArrayComparable { - - /** - * Constructor - * @param value value - */ - public BinaryComparator(byte[] value) { - super(value); - } - - @Override - public int compareTo(byte [] value, int offset, int length) { - return Bytes.compareTo(this.value, 0, this.value.length, value, offset, length); - } - - /** - * @return The comparator serialized using pb - */ - public byte [] toByteArray() { - ComparatorProtos.BinaryComparator.Builder builder = - ComparatorProtos.BinaryComparator.newBuilder(); - builder.setComparable(super.convert()); - return builder.build().toByteArray(); - } - - /** - * @param pbBytes A pb serialized {@link BinaryComparator} instance - * @return An instance of {@link BinaryComparator} made from bytes - * @throws DeserializationException - * @see #toByteArray - */ - public static BinaryComparator parseFrom(final byte [] pbBytes) - throws DeserializationException { - ComparatorProtos.BinaryComparator proto; - try { - proto = ComparatorProtos.BinaryComparator.parseFrom(pbBytes); - } catch (InvalidProtocolBufferException e) { - throw new DeserializationException(e); - } - return new BinaryComparator(proto.getComparable().getValue().toByteArray()); - } - - /** - * @param other - * @return true if and only if the fields of the comparator that are serialized - * are equal to the corresponding fields in other. Used for testing. - */ - boolean areSerializedFieldsEqual(ByteArrayComparable other) { - if (other == this) return true; - if (!(other instanceof BinaryComparator)) return false; - - return super.areSerializedFieldsEqual(other); - } -} diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/filter/BinaryPrefixComparator.java hbase-server/src/main/java/org/apache/hadoop/hbase/filter/BinaryPrefixComparator.java deleted file mode 100644 index 575ff1e..0000000 --- hbase-server/src/main/java/org/apache/hadoop/hbase/filter/BinaryPrefixComparator.java +++ /dev/null @@ -1,91 +0,0 @@ -/* - * - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hbase.filter; - -import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.classification.InterfaceStability; -import org.apache.hadoop.hbase.DeserializationException; -import org.apache.hadoop.hbase.protobuf.generated.ComparatorProtos; -import org.apache.hadoop.hbase.util.Bytes; - -import com.google.protobuf.InvalidProtocolBufferException; - -/** - * A comparator which compares against a specified byte array, but only compares - * up to the length of this byte array. For the rest it is similar to - * {@link BinaryComparator}. - */ -@InterfaceAudience.Public -@InterfaceStability.Stable -public class BinaryPrefixComparator extends ByteArrayComparable { - - /** - * Constructor - * @param value value - */ - public BinaryPrefixComparator(byte[] value) { - super(value); - } - - @Override - public int compareTo(byte [] value, int offset, int length) { - return Bytes.compareTo(this.value, 0, this.value.length, value, offset, - this.value.length <= length ? this.value.length : length); - } - - /** - * @return The comparator serialized using pb - */ - public byte [] toByteArray() { - ComparatorProtos.BinaryPrefixComparator.Builder builder = - ComparatorProtos.BinaryPrefixComparator.newBuilder(); - builder.setComparable(super.convert()); - return builder.build().toByteArray(); - } - - /** - * @param pbBytes A pb serialized {@link BinaryPrefixComparator} instance - * @return An instance of {@link BinaryPrefixComparator} made from bytes - * @throws DeserializationException - * @see #toByteArray - */ - public static BinaryPrefixComparator parseFrom(final byte [] pbBytes) - throws DeserializationException { - ComparatorProtos.BinaryPrefixComparator proto; - try { - proto = ComparatorProtos.BinaryPrefixComparator.parseFrom(pbBytes); - } catch (InvalidProtocolBufferException e) { - throw new DeserializationException(e); - } - return new BinaryPrefixComparator(proto.getComparable().getValue().toByteArray()); - } - - /** - * @param other - * @return true if and only if the fields of the comparator that are serialized - * are equal to the corresponding fields in other. Used for testing. - */ - boolean areSerializedFieldsEqual(ByteArrayComparable other) { - if (other == this) return true; - if (!(other instanceof BinaryPrefixComparator)) return false; - - return super.areSerializedFieldsEqual(other); - } -} diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/filter/BitComparator.java hbase-server/src/main/java/org/apache/hadoop/hbase/filter/BitComparator.java deleted file mode 100644 index 73afedc..0000000 --- hbase-server/src/main/java/org/apache/hadoop/hbase/filter/BitComparator.java +++ /dev/null @@ -1,133 +0,0 @@ -/* - * - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hbase.filter; - -import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.classification.InterfaceStability; -import org.apache.hadoop.hbase.DeserializationException; -import org.apache.hadoop.hbase.protobuf.generated.ComparatorProtos; - -import com.google.protobuf.InvalidProtocolBufferException; - -/** - * A bit comparator which performs the specified bitwise operation on each of the bytes - * with the specified byte array. Then returns whether the result is non-zero. - */ -@InterfaceAudience.Public -@InterfaceStability.Stable -public class BitComparator extends ByteArrayComparable { - - /** Bit operators. */ - public enum BitwiseOp { - /** and */ - AND, - /** or */ - OR, - /** xor */ - XOR - } - protected BitwiseOp bitOperator; - - /** - * Constructor - * @param value value - * @param bitOperator operator to use on the bit comparison - */ - public BitComparator(byte[] value, BitwiseOp bitOperator) { - super(value); - this.bitOperator = bitOperator; - } - - /** - * @return the bitwise operator - */ - public BitwiseOp getOperator() { - return bitOperator; - } - - /** - * @return The comparator serialized using pb - */ - public byte [] toByteArray() { - ComparatorProtos.BitComparator.Builder builder = - ComparatorProtos.BitComparator.newBuilder(); - builder.setComparable(super.convert()); - ComparatorProtos.BitComparator.BitwiseOp bitwiseOpPb = - ComparatorProtos.BitComparator.BitwiseOp.valueOf(bitOperator.name()); - builder.setBitwiseOp(bitwiseOpPb); - return builder.build().toByteArray(); - } - - /** - * @param pbBytes A pb serialized {@link BitComparator} instance - * @return An instance of {@link BitComparator} made from bytes - * @throws DeserializationException - * @see #toByteArray - */ - public static BitComparator parseFrom(final byte [] pbBytes) - throws DeserializationException { - ComparatorProtos.BitComparator proto; - try { - proto = ComparatorProtos.BitComparator.parseFrom(pbBytes); - } catch (InvalidProtocolBufferException e) { - throw new DeserializationException(e); - } - BitwiseOp bitwiseOp = BitwiseOp.valueOf(proto.getBitwiseOp().name()); - return new BitComparator(proto.getComparable().getValue().toByteArray(),bitwiseOp); - } - - /** - * @param other - * @return true if and only if the fields of the comparator that are serialized - * are equal to the corresponding fields in other. Used for testing. - */ - boolean areSerializedFieldsEqual(ByteArrayComparable other) { - if (other == this) return true; - if (!(other instanceof BitComparator)) return false; - - BitComparator comparator = (BitComparator)other; - return super.areSerializedFieldsEqual(other) - && this.getOperator().equals(comparator.getOperator()); - } - - @Override - public int compareTo(byte[] value, int offset, int length) { - if (length != this.value.length) { - return 1; - } - int b = 0; - //Iterating backwards is faster because we can quit after one non-zero byte. - for (int i = length - 1; i >= 0 && b == 0; i--) { - switch (bitOperator) { - case AND: - b = (this.value[i] & value[i+offset]) & 0xff; - break; - case OR: - b = (this.value[i] | value[i+offset]) & 0xff; - break; - case XOR: - b = (this.value[i] ^ value[i+offset]) & 0xff; - break; - } - } - return b == 0 ? 1 : 0; - } -} - diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/filter/ByteArrayComparable.java hbase-server/src/main/java/org/apache/hadoop/hbase/filter/ByteArrayComparable.java deleted file mode 100644 index 93b73e2..0000000 --- hbase-server/src/main/java/org/apache/hadoop/hbase/filter/ByteArrayComparable.java +++ /dev/null @@ -1,100 +0,0 @@ -/** - * - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hbase.filter; - -import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.classification.InterfaceStability; -import org.apache.hadoop.hbase.DeserializationException; -import org.apache.hadoop.hbase.protobuf.generated.ComparatorProtos; -import org.apache.hadoop.hbase.util.Bytes; - -import com.google.protobuf.ByteString; - - -/** Base class for byte array comparators */ -@InterfaceAudience.Public -@InterfaceStability.Stable -public abstract class ByteArrayComparable implements Comparable { - - byte[] value; - - /** - * Constructor. - * @param value the value to compare against - */ - public ByteArrayComparable(byte [] value) { - this.value = value; - } - - public byte[] getValue() { - return value; - } - - /** - * @return The comparator serialized using pb - */ - public abstract byte [] toByteArray(); - - ComparatorProtos.ByteArrayComparable convert() { - ComparatorProtos.ByteArrayComparable.Builder builder = - ComparatorProtos.ByteArrayComparable.newBuilder(); - if (value != null) builder.setValue(ByteString.copyFrom(value)); - return builder.build(); - } - - /** - * @param pbBytes A pb serialized {@link ByteArrayComparable} instance - * @return An instance of {@link ByteArrayComparable} made from bytes - * @throws DeserializationException - * @see #toByteArray - */ - public static ByteArrayComparable parseFrom(final byte [] pbBytes) - throws DeserializationException { - throw new DeserializationException( - "parseFrom called on base ByteArrayComparable, but should be called on derived type"); - } - - /** - * @param other - * @return true if and only if the fields of the comparator that are serialized - * are equal to the corresponding fields in other. Used for testing. - */ - boolean areSerializedFieldsEqual(ByteArrayComparable o) { - if (o == this) return true; - if (!(o instanceof ByteArrayComparable)) return false; - - return Bytes.equals(this.getValue(), o.getValue()); - } - - @Override - public int compareTo(byte [] value) { - return compareTo(value, 0, value.length); - } - - /** - * Special compareTo method for subclasses, to avoid - * copying byte[] unnecessarily. - * @param value byte[] to compare - * @param offset offset into value - * @param length number of bytes to compare - * @return a negative integer, zero, or a positive integer as this object - * is less than, equal to, or greater than the specified object. - */ - public abstract int compareTo(byte [] value, int offset, int length); -} diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/filter/ColumnCountGetFilter.java hbase-server/src/main/java/org/apache/hadoop/hbase/filter/ColumnCountGetFilter.java deleted file mode 100644 index d775177d..0000000 --- hbase-server/src/main/java/org/apache/hadoop/hbase/filter/ColumnCountGetFilter.java +++ /dev/null @@ -1,121 +0,0 @@ -/* - * - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hbase.filter; - -import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.classification.InterfaceStability; -import org.apache.hadoop.hbase.DeserializationException; -import org.apache.hadoop.hbase.KeyValue; -import org.apache.hadoop.hbase.protobuf.generated.FilterProtos; - -import java.util.ArrayList; - -import com.google.common.base.Preconditions; -import com.google.protobuf.InvalidProtocolBufferException; - -/** - * Simple filter that returns first N columns on row only. - * This filter was written to test filters in Get and as soon as it gets - * its quota of columns, {@link #filterAllRemaining()} returns true. This - * makes this filter unsuitable as a Scan filter. - */ -@InterfaceAudience.Public -@InterfaceStability.Stable -public class ColumnCountGetFilter extends FilterBase { - private int limit = 0; - private int count = 0; - - public ColumnCountGetFilter(final int n) { - Preconditions.checkArgument(n >= 0, "limit be positive %s", n); - this.limit = n; - } - - public int getLimit() { - return limit; - } - - @Override - public boolean filterAllRemaining() { - return this.count > this.limit; - } - - @Override - public ReturnCode filterKeyValue(KeyValue v) { - this.count++; - return filterAllRemaining() ? ReturnCode.NEXT_COL : ReturnCode.INCLUDE_AND_NEXT_COL; - } - - @Override - public void reset() { - this.count = 0; - } - - public static Filter createFilterFromArguments(ArrayList filterArguments) { - Preconditions.checkArgument(filterArguments.size() == 1, - "Expected 1 but got: %s", filterArguments.size()); - int limit = ParseFilter.convertByteArrayToInt(filterArguments.get(0)); - return new ColumnCountGetFilter(limit); - } - - /** - * @return The filter serialized using pb - */ - public byte [] toByteArray() { - FilterProtos.ColumnCountGetFilter.Builder builder = - FilterProtos.ColumnCountGetFilter.newBuilder(); - builder.setLimit(this.limit); - return builder.build().toByteArray(); - } - - /** - * @param pbBytes A pb serialized {@link ColumnCountGetFilter} instance - * @return An instance of {@link ColumnCountGetFilter} made from bytes - * @throws DeserializationException - * @see #toByteArray - */ - public static ColumnCountGetFilter parseFrom(final byte [] pbBytes) - throws DeserializationException { - FilterProtos.ColumnCountGetFilter proto; - try { - proto = FilterProtos.ColumnCountGetFilter.parseFrom(pbBytes); - } catch (InvalidProtocolBufferException e) { - throw new DeserializationException(e); - } - return new ColumnCountGetFilter(proto.getLimit()); - } - - /** - * @param other - * @return true if and only if the fields of the filter that are serialized - * are equal to the corresponding fields in other. Used for testing. - */ - boolean areSerializedFieldsEqual(Filter o) { - if (o == this) return true; - if (!(o instanceof ColumnCountGetFilter)) return false; - - ColumnCountGetFilter other = (ColumnCountGetFilter)o; - return this.getLimit() == other.getLimit(); - } - - @Override - public String toString() { - return this.getClass().getSimpleName() + " " + this.limit; - } -} diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/filter/ColumnPaginationFilter.java hbase-server/src/main/java/org/apache/hadoop/hbase/filter/ColumnPaginationFilter.java deleted file mode 100644 index d58429f..0000000 --- hbase-server/src/main/java/org/apache/hadoop/hbase/filter/ColumnPaginationFilter.java +++ /dev/null @@ -1,142 +0,0 @@ -/* - * - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hbase.filter; - -import java.util.ArrayList; - -import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.classification.InterfaceStability; -import org.apache.hadoop.hbase.DeserializationException; -import org.apache.hadoop.hbase.KeyValue; -import org.apache.hadoop.hbase.protobuf.generated.FilterProtos; - -import com.google.common.base.Preconditions; -import com.google.protobuf.InvalidProtocolBufferException; - -/** - * A filter, based on the ColumnCountGetFilter, takes two arguments: limit and offset. - * This filter can be used for row-based indexing, where references to other tables are stored across many columns, - * in order to efficient lookups and paginated results for end users. Only most recent versions are considered - * for pagination. - */ -@InterfaceAudience.Public -@InterfaceStability.Stable -public class ColumnPaginationFilter extends FilterBase -{ - private int limit = 0; - private int offset = 0; - private int count = 0; - - public ColumnPaginationFilter(final int limit, final int offset) - { - Preconditions.checkArgument(limit >= 0, "limit must be positive %s", limit); - Preconditions.checkArgument(offset >= 0, "offset must be positive %s", offset); - this.limit = limit; - this.offset = offset; - } - - /** - * @return limit - */ - public int getLimit() { - return limit; - } - - /** - * @return offset - */ - public int getOffset() { - return offset; - } - - @Override - public ReturnCode filterKeyValue(KeyValue v) - { - if(count >= offset + limit) - { - return ReturnCode.NEXT_ROW; - } - - ReturnCode code = count < offset ? ReturnCode.NEXT_COL : - ReturnCode.INCLUDE_AND_NEXT_COL; - count++; - return code; - } - - @Override - public void reset() - { - this.count = 0; - } - - public static Filter createFilterFromArguments(ArrayList filterArguments) { - Preconditions.checkArgument(filterArguments.size() == 2, - "Expected 2 but got: %s", filterArguments.size()); - int limit = ParseFilter.convertByteArrayToInt(filterArguments.get(0)); - int offset = ParseFilter.convertByteArrayToInt(filterArguments.get(1)); - return new ColumnPaginationFilter(limit, offset); - } - - /** - * @return The filter serialized using pb - */ - public byte [] toByteArray() { - FilterProtos.ColumnPaginationFilter.Builder builder = - FilterProtos.ColumnPaginationFilter.newBuilder(); - builder.setLimit(this.limit); - builder.setOffset(this.offset); - return builder.build().toByteArray(); - } - - /** - * @param pbBytes A pb serialized {@link ColumnPaginationFilter} instance - * @return An instance of {@link ColumnPaginationFilter} made from bytes - * @throws DeserializationException - * @see #toByteArray - */ - public static ColumnPaginationFilter parseFrom(final byte [] pbBytes) - throws DeserializationException { - FilterProtos.ColumnPaginationFilter proto; - try { - proto = FilterProtos.ColumnPaginationFilter.parseFrom(pbBytes); - } catch (InvalidProtocolBufferException e) { - throw new DeserializationException(e); - } - return new ColumnPaginationFilter(proto.getLimit(),proto.getOffset()); - } - - /** - * @param other - * @return true if and only if the fields of the filter that are serialized - * are equal to the corresponding fields in other. Used for testing. - */ - boolean areSerializedFieldsEqual(Filter o) { - if (o == this) return true; - if (!(o instanceof ColumnPaginationFilter)) return false; - - ColumnPaginationFilter other = (ColumnPaginationFilter)o; - return this.getLimit() == other.getLimit() && this.getOffset() == other.getOffset(); - } - - @Override - public String toString() { - return String.format("%s (%d, %d)", this.getClass().getSimpleName(), - this.limit, this.offset); - } -} diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/filter/ColumnPrefixFilter.java hbase-server/src/main/java/org/apache/hadoop/hbase/filter/ColumnPrefixFilter.java deleted file mode 100644 index 226b2b1..0000000 --- hbase-server/src/main/java/org/apache/hadoop/hbase/filter/ColumnPrefixFilter.java +++ /dev/null @@ -1,141 +0,0 @@ -/* - * - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hbase.filter; - -import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.classification.InterfaceStability; -import org.apache.hadoop.hbase.DeserializationException; -import org.apache.hadoop.hbase.KeyValue; -import org.apache.hadoop.hbase.protobuf.generated.FilterProtos; -import org.apache.hadoop.hbase.util.Bytes; - -import java.util.ArrayList; - -import com.google.common.base.Preconditions; -import com.google.protobuf.ByteString; -import com.google.protobuf.InvalidProtocolBufferException; - -/** - * This filter is used for selecting only those keys with columns that matches - * a particular prefix. For example, if prefix is 'an', it will pass keys with - * columns like 'and', 'anti' but not keys with columns like 'ball', 'act'. - */ -@InterfaceAudience.Public -@InterfaceStability.Stable -public class ColumnPrefixFilter extends FilterBase { - protected byte [] prefix = null; - - public ColumnPrefixFilter(final byte [] prefix) { - this.prefix = prefix; - } - - public byte[] getPrefix() { - return prefix; - } - - @Override - public ReturnCode filterKeyValue(KeyValue kv) { - if (this.prefix == null || kv.getBuffer() == null) { - return ReturnCode.INCLUDE; - } else { - return filterColumn(kv.getBuffer(), kv.getQualifierOffset(), kv.getQualifierLength()); - } - } - - public ReturnCode filterColumn(byte[] buffer, int qualifierOffset, int qualifierLength) { - if (qualifierLength < prefix.length) { - int cmp = Bytes.compareTo(buffer, qualifierOffset, qualifierLength, this.prefix, 0, - qualifierLength); - if (cmp <= 0) { - return ReturnCode.SEEK_NEXT_USING_HINT; - } else { - return ReturnCode.NEXT_ROW; - } - } else { - int cmp = Bytes.compareTo(buffer, qualifierOffset, this.prefix.length, this.prefix, 0, - this.prefix.length); - if (cmp < 0) { - return ReturnCode.SEEK_NEXT_USING_HINT; - } else if (cmp > 0) { - return ReturnCode.NEXT_ROW; - } else { - return ReturnCode.INCLUDE; - } - } - } - - public static Filter createFilterFromArguments(ArrayList filterArguments) { - Preconditions.checkArgument(filterArguments.size() == 1, - "Expected 1 but got: %s", filterArguments.size()); - byte [] columnPrefix = ParseFilter.removeQuotesFromByteArray(filterArguments.get(0)); - return new ColumnPrefixFilter(columnPrefix); - } - - /** - * @return The filter serialized using pb - */ - public byte [] toByteArray() { - FilterProtos.ColumnPrefixFilter.Builder builder = - FilterProtos.ColumnPrefixFilter.newBuilder(); - if (this.prefix != null) builder.setPrefix(ByteString.copyFrom(this.prefix)); - return builder.build().toByteArray(); - } - - /** - * @param pbBytes A pb serialized {@link ColumnPrefixFilter} instance - * @return An instance of {@link ColumnPrefixFilter} made from bytes - * @throws DeserializationException - * @see #toByteArray - */ - public static ColumnPrefixFilter parseFrom(final byte [] pbBytes) - throws DeserializationException { - FilterProtos.ColumnPrefixFilter proto; - try { - proto = FilterProtos.ColumnPrefixFilter.parseFrom(pbBytes); - } catch (InvalidProtocolBufferException e) { - throw new DeserializationException(e); - } - return new ColumnPrefixFilter(proto.getPrefix().toByteArray()); - } - - /** - * @param other - * @return true if and only if the fields of the filter that are serialized - * are equal to the corresponding fields in other. Used for testing. - */ - boolean areSerializedFieldsEqual(Filter o) { - if (o == this) return true; - if (!(o instanceof ColumnPrefixFilter)) return false; - - ColumnPrefixFilter other = (ColumnPrefixFilter)o; - return Bytes.equals(this.getPrefix(), other.getPrefix()); - } - - public KeyValue getNextKeyHint(KeyValue kv) { - return KeyValue.createFirstOnRow( - kv.getBuffer(), kv.getRowOffset(), kv.getRowLength(), kv.getBuffer(), - kv.getFamilyOffset(), kv.getFamilyLength(), prefix, 0, prefix.length); - } - - @Override - public String toString() { - return this.getClass().getSimpleName() + " " + Bytes.toStringBinary(this.prefix); - } -} diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/filter/ColumnRangeFilter.java hbase-server/src/main/java/org/apache/hadoop/hbase/filter/ColumnRangeFilter.java deleted file mode 100644 index a275d7d..0000000 --- hbase-server/src/main/java/org/apache/hadoop/hbase/filter/ColumnRangeFilter.java +++ /dev/null @@ -1,229 +0,0 @@ -/* - * - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hbase.filter; - -import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.classification.InterfaceStability; -import org.apache.hadoop.hbase.DeserializationException; -import org.apache.hadoop.hbase.KeyValue; -import org.apache.hadoop.hbase.protobuf.generated.FilterProtos; -import org.apache.hadoop.hbase.util.Bytes; - -import java.util.ArrayList; - -import com.google.common.base.Preconditions; -import com.google.protobuf.ByteString; -import com.google.protobuf.InvalidProtocolBufferException; - -/** - * This filter is used for selecting only those keys with columns that are - * between minColumn to maxColumn. For example, if minColumn is 'an', and - * maxColumn is 'be', it will pass keys with columns like 'ana', 'bad', but not - * keys with columns like 'bed', 'eye' - * - * If minColumn is null, there is no lower bound. If maxColumn is null, there is - * no upper bound. - * - * minColumnInclusive and maxColumnInclusive specify if the ranges are inclusive - * or not. - */ -@InterfaceAudience.Public -@InterfaceStability.Stable -public class ColumnRangeFilter extends FilterBase { - protected byte[] minColumn = null; - protected boolean minColumnInclusive = true; - protected byte[] maxColumn = null; - protected boolean maxColumnInclusive = false; - - /** - * Create a filter to select those keys with columns that are between minColumn - * and maxColumn. - * @param minColumn minimum value for the column range. If if it's null, - * there is no lower bound. - * @param minColumnInclusive if true, include minColumn in the range. - * @param maxColumn maximum value for the column range. If it's null, - * @param maxColumnInclusive if true, include maxColumn in the range. - * there is no upper bound. - */ - public ColumnRangeFilter(final byte[] minColumn, boolean minColumnInclusive, - final byte[] maxColumn, boolean maxColumnInclusive) { - this.minColumn = minColumn; - this.minColumnInclusive = minColumnInclusive; - this.maxColumn = maxColumn; - this.maxColumnInclusive = maxColumnInclusive; - } - - /** - * @return if min column range is inclusive. - */ - public boolean isMinColumnInclusive() { - return minColumnInclusive; - } - - /** - * @return if max column range is inclusive. - */ - public boolean isMaxColumnInclusive() { - return maxColumnInclusive; - } - - /** - * @return the min column range for the filter - */ - public byte[] getMinColumn() { - return this.minColumn; - } - - /** - * @return true if min column is inclusive, false otherwise - */ - public boolean getMinColumnInclusive() { - return this.minColumnInclusive; - } - - /** - * @return the max column range for the filter - */ - public byte[] getMaxColumn() { - return this.maxColumn; - } - - /** - * @return true if max column is inclusive, false otherwise - */ - public boolean getMaxColumnInclusive() { - return this.maxColumnInclusive; - } - - @Override - public ReturnCode filterKeyValue(KeyValue kv) { - byte[] buffer = kv.getBuffer(); - int qualifierOffset = kv.getQualifierOffset(); - int qualifierLength = kv.getQualifierLength(); - int cmpMin = 1; - - if (this.minColumn != null) { - cmpMin = Bytes.compareTo(buffer, qualifierOffset, qualifierLength, - this.minColumn, 0, this.minColumn.length); - } - - if (cmpMin < 0) { - return ReturnCode.SEEK_NEXT_USING_HINT; - } - - if (!this.minColumnInclusive && cmpMin == 0) { - return ReturnCode.SKIP; - } - - if (this.maxColumn == null) { - return ReturnCode.INCLUDE; - } - - int cmpMax = Bytes.compareTo(buffer, qualifierOffset, qualifierLength, - this.maxColumn, 0, this.maxColumn.length); - - if (this.maxColumnInclusive && cmpMax <= 0 || - !this.maxColumnInclusive && cmpMax < 0) { - return ReturnCode.INCLUDE; - } - - return ReturnCode.NEXT_ROW; - } - - public static Filter createFilterFromArguments(ArrayList filterArguments) { - Preconditions.checkArgument(filterArguments.size() == 4, - "Expected 4 but got: %s", filterArguments.size()); - byte [] minColumn = ParseFilter.removeQuotesFromByteArray(filterArguments.get(0)); - boolean minColumnInclusive = ParseFilter.convertByteArrayToBoolean(filterArguments.get(1)); - byte [] maxColumn = ParseFilter.removeQuotesFromByteArray(filterArguments.get(2)); - boolean maxColumnInclusive = ParseFilter.convertByteArrayToBoolean(filterArguments.get(3)); - - if (minColumn.length == 0) - minColumn = null; - if (maxColumn.length == 0) - maxColumn = null; - return new ColumnRangeFilter(minColumn, minColumnInclusive, - maxColumn, maxColumnInclusive); - } - - /** - * @return The filter serialized using pb - */ - public byte [] toByteArray() { - FilterProtos.ColumnRangeFilter.Builder builder = - FilterProtos.ColumnRangeFilter.newBuilder(); - if (this.minColumn != null) builder.setMinColumn(ByteString.copyFrom(this.minColumn)); - builder.setMinColumnInclusive(this.minColumnInclusive); - if (this.maxColumn != null) builder.setMaxColumn(ByteString.copyFrom(this.maxColumn)); - builder.setMaxColumnInclusive(this.maxColumnInclusive); - return builder.build().toByteArray(); - } - - /** - * @param pbBytes A pb serialized {@link ColumnRangeFilter} instance - * @return An instance of {@link ColumnRangeFilter} made from bytes - * @throws DeserializationException - * @see #toByteArray - */ - public static ColumnRangeFilter parseFrom(final byte [] pbBytes) - throws DeserializationException { - FilterProtos.ColumnRangeFilter proto; - try { - proto = FilterProtos.ColumnRangeFilter.parseFrom(pbBytes); - } catch (InvalidProtocolBufferException e) { - throw new DeserializationException(e); - } - return new ColumnRangeFilter(proto.hasMinColumn()?proto.getMinColumn().toByteArray():null, - proto.getMinColumnInclusive(),proto.hasMaxColumn()?proto.getMaxColumn().toByteArray():null, - proto.getMaxColumnInclusive()); - } - - /** - * @param other - * @return true if and only if the fields of the filter that are serialized - * are equal to the corresponding fields in other. Used for testing. - */ - boolean areSerializedFieldsEqual(Filter o) { - if (o == this) return true; - if (!(o instanceof ColumnRangeFilter)) return false; - - ColumnRangeFilter other = (ColumnRangeFilter)o; - return Bytes.equals(this.getMinColumn(),other.getMinColumn()) - && this.getMinColumnInclusive() == other.getMinColumnInclusive() - && Bytes.equals(this.getMaxColumn(), other.getMaxColumn()) - && this.getMaxColumnInclusive() == other.getMaxColumnInclusive(); - } - - @Override - public KeyValue getNextKeyHint(KeyValue kv) { - return KeyValue.createFirstOnRow(kv.getBuffer(), kv.getRowOffset(), kv - .getRowLength(), kv.getBuffer(), kv.getFamilyOffset(), kv - .getFamilyLength(), this.minColumn, 0, this.minColumn == null ? 0 - : this.minColumn.length); - } - - @Override - public String toString() { - return this.getClass().getSimpleName() + " " - + (this.minColumnInclusive ? "[" : "(") + Bytes.toStringBinary(this.minColumn) - + ", " + Bytes.toStringBinary(this.maxColumn) - + (this.maxColumnInclusive ? "]" : ")"); - } -} diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/filter/CompareFilter.java hbase-server/src/main/java/org/apache/hadoop/hbase/filter/CompareFilter.java deleted file mode 100644 index ff00af3..0000000 --- hbase-server/src/main/java/org/apache/hadoop/hbase/filter/CompareFilter.java +++ /dev/null @@ -1,180 +0,0 @@ -/** - * - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hbase.filter; - -import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.classification.InterfaceStability; -import org.apache.hadoop.hbase.protobuf.ProtobufUtil; -import org.apache.hadoop.hbase.protobuf.generated.FilterProtos; -import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos; -import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.CompareType; -import org.apache.hadoop.hbase.util.Bytes; - -import java.util.ArrayList; - -import com.google.common.base.Preconditions; -/** - * This is a generic filter to be used to filter by comparison. It takes an - * operator (equal, greater, not equal, etc) and a byte [] comparator. - *

          - * To filter by row key, use {@link RowFilter}. - *

          - * To filter by column qualifier, use {@link QualifierFilter}. - *

          - * To filter by value, use {@link SingleColumnValueFilter}. - *

          - * These filters can be wrapped with {@link SkipFilter} and {@link WhileMatchFilter} - * to add more control. - *

          - * Multiple filters can be combined using {@link FilterList}. - */ -@InterfaceAudience.Public -@InterfaceStability.Stable -public abstract class CompareFilter extends FilterBase { - - /** Comparison operators. */ - public enum CompareOp { - /** less than */ - LESS, - /** less than or equal to */ - LESS_OR_EQUAL, - /** equals */ - EQUAL, - /** not equal */ - NOT_EQUAL, - /** greater than or equal to */ - GREATER_OR_EQUAL, - /** greater than */ - GREATER, - /** no operation */ - NO_OP, - } - - protected CompareOp compareOp; - protected ByteArrayComparable comparator; - - /** - * Constructor. - * @param compareOp the compare op for row matching - * @param comparator the comparator for row matching - */ - public CompareFilter(final CompareOp compareOp, - final ByteArrayComparable comparator) { - this.compareOp = compareOp; - this.comparator = comparator; - } - - /** - * @return operator - */ - public CompareOp getOperator() { - return compareOp; - } - - /** - * @return the comparator - */ - public ByteArrayComparable getComparator() { - return comparator; - } - - protected boolean doCompare(final CompareOp compareOp, - final ByteArrayComparable comparator, final byte [] data, - final int offset, final int length) { - if (compareOp == CompareOp.NO_OP) { - return true; - } - int compareResult = comparator.compareTo(data, offset, length); - switch (compareOp) { - case LESS: - return compareResult <= 0; - case LESS_OR_EQUAL: - return compareResult < 0; - case EQUAL: - return compareResult != 0; - case NOT_EQUAL: - return compareResult == 0; - case GREATER_OR_EQUAL: - return compareResult > 0; - case GREATER: - return compareResult >= 0; - default: - throw new RuntimeException("Unknown Compare op " + - compareOp.name()); - } - } - - public static ArrayList extractArguments(ArrayList filterArguments) { - Preconditions.checkArgument(filterArguments.size() == 2, - "Expected 2 but got: %s", filterArguments.size()); - CompareOp compareOp = ParseFilter.createCompareOp(filterArguments.get(0)); - ByteArrayComparable comparator = ParseFilter.createComparator( - ParseFilter.removeQuotesFromByteArray(filterArguments.get(1))); - - if (comparator instanceof RegexStringComparator || - comparator instanceof SubstringComparator) { - if (compareOp != CompareOp.EQUAL && - compareOp != CompareOp.NOT_EQUAL) { - throw new IllegalArgumentException ("A regexstring comparator and substring comparator" + - " can only be used with EQUAL and NOT_EQUAL"); - } - } - ArrayList arguments = new ArrayList(); - arguments.add(compareOp); - arguments.add(comparator); - return arguments; - } - - /** - * @return A pb instance to represent this instance. - */ - FilterProtos.CompareFilter convert() { - FilterProtos.CompareFilter.Builder builder = - FilterProtos.CompareFilter.newBuilder(); - HBaseProtos.CompareType compareOp = CompareType.valueOf(this.compareOp.name()); - builder.setCompareOp(compareOp); - if (this.comparator != null) builder.setComparator(ProtobufUtil.toComparator(this.comparator)); - return builder.build(); - } - - /** - * - * @param other - * @return true if and only if the fields of the filter that are serialized - * are equal to the corresponding fields in other. Used for testing. - */ - boolean areSerializedFieldsEqual(Filter o) { - if (o == this) return true; - if (!(o instanceof CompareFilter)) return false; - - CompareFilter other = (CompareFilter)o; - return this.getOperator().equals(other.getOperator()) && - (this.getComparator() == other.getComparator() - || this.getComparator().areSerializedFieldsEqual(other.getComparator())); - } - - @Override - public String toString() { - return String.format("%s (%s, %s)", - this.getClass().getSimpleName(), - this.compareOp.name(), - Bytes.toStringBinary(this.comparator.getValue())); - } -} diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/filter/DependentColumnFilter.java hbase-server/src/main/java/org/apache/hadoop/hbase/filter/DependentColumnFilter.java deleted file mode 100644 index 65ec48f..0000000 --- hbase-server/src/main/java/org/apache/hadoop/hbase/filter/DependentColumnFilter.java +++ /dev/null @@ -1,289 +0,0 @@ -/** - * - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hbase.filter; - -import java.io.IOException; -import java.util.HashSet; -import java.util.Iterator; -import java.util.List; -import java.util.Set; -import java.util.ArrayList; - -import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.classification.InterfaceStability; -import org.apache.hadoop.hbase.DeserializationException; -import org.apache.hadoop.hbase.KeyValue; -import org.apache.hadoop.hbase.protobuf.ProtobufUtil; -import org.apache.hadoop.hbase.protobuf.generated.FilterProtos; -import org.apache.hadoop.hbase.util.Bytes; - -import com.google.common.base.Preconditions; -import com.google.protobuf.ByteString; -import com.google.protobuf.InvalidProtocolBufferException; - -/** - * A filter for adding inter-column timestamp matching - * Only cells with a correspondingly timestamped entry in - * the target column will be retained - * Not compatible with Scan.setBatch as operations need - * full rows for correct filtering - */ -@InterfaceAudience.Public -@InterfaceStability.Stable -public class DependentColumnFilter extends CompareFilter { - - protected byte[] columnFamily; - protected byte[] columnQualifier; - protected boolean dropDependentColumn; - - protected Set stampSet = new HashSet(); - - /** - * Build a dependent column filter with value checking - * dependent column varies will be compared using the supplied - * compareOp and comparator, for usage of which - * refer to {@link CompareFilter} - * - * @param family dependent column family - * @param qualifier dependent column qualifier - * @param dropDependentColumn whether the column should be discarded after - * @param valueCompareOp comparison op - * @param valueComparator comparator - */ - public DependentColumnFilter(final byte [] family, final byte[] qualifier, - final boolean dropDependentColumn, final CompareOp valueCompareOp, - final ByteArrayComparable valueComparator) { - // set up the comparator - super(valueCompareOp, valueComparator); - this.columnFamily = family; - this.columnQualifier = qualifier; - this.dropDependentColumn = dropDependentColumn; - } - - /** - * Constructor for DependentColumn filter. - * Keyvalues where a keyvalue from target column - * with the same timestamp do not exist will be dropped. - * - * @param family name of target column family - * @param qualifier name of column qualifier - */ - public DependentColumnFilter(final byte [] family, final byte [] qualifier) { - this(family, qualifier, false); - } - - /** - * Constructor for DependentColumn filter. - * Keyvalues where a keyvalue from target column - * with the same timestamp do not exist will be dropped. - * - * @param family name of dependent column family - * @param qualifier name of dependent qualifier - * @param dropDependentColumn whether the dependent columns keyvalues should be discarded - */ - public DependentColumnFilter(final byte [] family, final byte [] qualifier, - final boolean dropDependentColumn) { - this(family, qualifier, dropDependentColumn, CompareOp.NO_OP, null); - } - - /** - * @return the column family - */ - public byte[] getFamily() { - return this.columnFamily; - } - - /** - * @return the column qualifier - */ - public byte[] getQualifier() { - return this.columnQualifier; - } - - /** - * @return true if we should drop the dependent column, false otherwise - */ - public boolean dropDependentColumn() { - return this.dropDependentColumn; - } - - public boolean getDropDependentColumn() { - return this.dropDependentColumn; - } - - @Override - public boolean filterAllRemaining() { - return false; - } - - @Override - public ReturnCode filterKeyValue(KeyValue v) { - // Check if the column and qualifier match - if (!v.matchingColumn(this.columnFamily, this.columnQualifier)) { - // include non-matches for the time being, they'll be discarded afterwards - return ReturnCode.INCLUDE; - } - // If it doesn't pass the op, skip it - if (comparator != null - && doCompare(compareOp, comparator, v.getBuffer(), v.getValueOffset(), - v.getValueLength())) - return ReturnCode.SKIP; - - stampSet.add(v.getTimestamp()); - if(dropDependentColumn) { - return ReturnCode.SKIP; - } - return ReturnCode.INCLUDE; - } - - @Override - public void filterRow(List kvs) { - Iterator it = kvs.iterator(); - KeyValue kv; - while(it.hasNext()) { - kv = it.next(); - if(!stampSet.contains(kv.getTimestamp())) { - it.remove(); - } - } - } - - @Override - public boolean hasFilterRow() { - return true; - } - - @Override - public boolean filterRow() { - return false; - } - - @Override - public boolean filterRowKey(byte[] buffer, int offset, int length) { - return false; - } - @Override - public void reset() { - stampSet.clear(); - } - - public static Filter createFilterFromArguments(ArrayList filterArguments) { - Preconditions.checkArgument(filterArguments.size() == 2 || - filterArguments.size() == 3 || - filterArguments.size() == 5, - "Expected 2, 3 or 5 but got: %s", filterArguments.size()); - if (filterArguments.size() == 2) { - byte [] family = ParseFilter.removeQuotesFromByteArray(filterArguments.get(0)); - byte [] qualifier = ParseFilter.removeQuotesFromByteArray(filterArguments.get(1)); - return new DependentColumnFilter(family, qualifier); - - } else if (filterArguments.size() == 3) { - byte [] family = ParseFilter.removeQuotesFromByteArray(filterArguments.get(0)); - byte [] qualifier = ParseFilter.removeQuotesFromByteArray(filterArguments.get(1)); - boolean dropDependentColumn = ParseFilter.convertByteArrayToBoolean(filterArguments.get(2)); - return new DependentColumnFilter(family, qualifier, dropDependentColumn); - - } else if (filterArguments.size() == 5) { - byte [] family = ParseFilter.removeQuotesFromByteArray(filterArguments.get(0)); - byte [] qualifier = ParseFilter.removeQuotesFromByteArray(filterArguments.get(1)); - boolean dropDependentColumn = ParseFilter.convertByteArrayToBoolean(filterArguments.get(2)); - CompareOp compareOp = ParseFilter.createCompareOp(filterArguments.get(3)); - ByteArrayComparable comparator = ParseFilter.createComparator( - ParseFilter.removeQuotesFromByteArray(filterArguments.get(4))); - return new DependentColumnFilter(family, qualifier, dropDependentColumn, - compareOp, comparator); - } else { - throw new IllegalArgumentException("Expected 2, 3 or 5 but got: " + filterArguments.size()); - } - } - - /** - * @return The filter serialized using pb - */ - public byte [] toByteArray() { - FilterProtos.DependentColumnFilter.Builder builder = - FilterProtos.DependentColumnFilter.newBuilder(); - builder.setCompareFilter(super.convert()); - if (this.columnFamily != null) { - builder.setColumnFamily(ByteString.copyFrom(this.columnFamily)); - } - if (this.columnQualifier != null) { - builder.setColumnQualifier(ByteString.copyFrom(this.columnQualifier)); - } - builder.setDropDependentColumn(this.dropDependentColumn); - return builder.build().toByteArray(); - } - - /** - * @param pbBytes A pb serialized {@link DependentColumnFilter} instance - * @return An instance of {@link DependentColumnFilter} made from bytes - * @throws DeserializationException - * @see #toByteArray - */ - public static DependentColumnFilter parseFrom(final byte [] pbBytes) - throws DeserializationException { - FilterProtos.DependentColumnFilter proto; - try { - proto = FilterProtos.DependentColumnFilter.parseFrom(pbBytes); - } catch (InvalidProtocolBufferException e) { - throw new DeserializationException(e); - } - final CompareOp valueCompareOp = - CompareOp.valueOf(proto.getCompareFilter().getCompareOp().name()); - ByteArrayComparable valueComparator = null; - try { - if (proto.getCompareFilter().hasComparator()) { - valueComparator = ProtobufUtil.toComparator(proto.getCompareFilter().getComparator()); - } - } catch (IOException ioe) { - throw new DeserializationException(ioe); - } - return new DependentColumnFilter( - proto.hasColumnFamily()?proto.getColumnFamily().toByteArray():null, - proto.hasColumnQualifier()?proto.getColumnQualifier().toByteArray():null, - proto.getDropDependentColumn(), valueCompareOp, valueComparator); - } - - /** - * @param other - * @return true if and only if the fields of the filter that are serialized - * are equal to the corresponding fields in other. Used for testing. - */ - boolean areSerializedFieldsEqual(Filter o) { - if (o == this) return true; - if (!(o instanceof DependentColumnFilter)) return false; - - DependentColumnFilter other = (DependentColumnFilter)o; - return other != null && super.areSerializedFieldsEqual(other) - && Bytes.equals(this.getFamily(), other.getFamily()) - && Bytes.equals(this.getQualifier(), other.getQualifier()) - && this.dropDependentColumn() == other.dropDependentColumn(); - } - - @Override - public String toString() { - return String.format("%s (%s, %s, %s, %s, %s)", - this.getClass().getSimpleName(), - Bytes.toStringBinary(this.columnFamily), - Bytes.toStringBinary(this.columnQualifier), - this.dropDependentColumn, - this.compareOp.name(), - Bytes.toStringBinary(this.comparator.getValue())); - } -} diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/filter/FamilyFilter.java hbase-server/src/main/java/org/apache/hadoop/hbase/filter/FamilyFilter.java deleted file mode 100644 index fb7af8d..0000000 --- hbase-server/src/main/java/org/apache/hadoop/hbase/filter/FamilyFilter.java +++ /dev/null @@ -1,130 +0,0 @@ -/** - * - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hbase.filter; - -import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.classification.InterfaceStability; -import org.apache.hadoop.hbase.DeserializationException; -import org.apache.hadoop.hbase.KeyValue; -import org.apache.hadoop.hbase.protobuf.ProtobufUtil; -import org.apache.hadoop.hbase.protobuf.generated.FilterProtos; - -import com.google.protobuf.InvalidProtocolBufferException; - -import java.io.IOException; -import java.util.ArrayList; - -/** - * This filter is used to filter based on the column family. It takes an - * operator (equal, greater, not equal, etc) and a byte [] comparator for the - * column family portion of a key. - *

          - * This filter can be wrapped with {@link org.apache.hadoop.hbase.filter.WhileMatchFilter} and {@link org.apache.hadoop.hbase.filter.SkipFilter} - * to add more control. - *

          - * Multiple filters can be combined using {@link org.apache.hadoop.hbase.filter.FilterList}. - *

          - * If an already known column family is looked for, use {@link org.apache.hadoop.hbase.client.Get#addFamily(byte[])} - * directly rather than a filter. - */ -@InterfaceAudience.Public -@InterfaceStability.Stable -public class FamilyFilter extends CompareFilter { - - /** - * Constructor. - * - * @param familyCompareOp the compare op for column family matching - * @param familyComparator the comparator for column family matching - */ - public FamilyFilter(final CompareOp familyCompareOp, - final ByteArrayComparable familyComparator) { - super(familyCompareOp, familyComparator); - } - - @Override - public ReturnCode filterKeyValue(KeyValue v) { - int familyLength = v.getFamilyLength(); - if (familyLength > 0) { - if (doCompare(this.compareOp, this.comparator, v.getBuffer(), - v.getFamilyOffset(), familyLength)) { - return ReturnCode.SKIP; - } - } - return ReturnCode.INCLUDE; - } - - public static Filter createFilterFromArguments(ArrayList filterArguments) { - ArrayList arguments = CompareFilter.extractArguments(filterArguments); - CompareOp compareOp = (CompareOp)arguments.get(0); - ByteArrayComparable comparator = (ByteArrayComparable)arguments.get(1); - return new FamilyFilter(compareOp, comparator); - } - - /** - * @return The filter serialized using pb - */ - public byte [] toByteArray() { - FilterProtos.FamilyFilter.Builder builder = - FilterProtos.FamilyFilter.newBuilder(); - builder.setCompareFilter(super.convert()); - return builder.build().toByteArray(); - } - - /** - * @param pbBytes A pb serialized {@link FamilyFilter} instance - * @return An instance of {@link FamilyFilter} made from bytes - * @throws DeserializationException - * @see #toByteArray - */ - public static FamilyFilter parseFrom(final byte [] pbBytes) - throws DeserializationException { - FilterProtos.FamilyFilter proto; - try { - proto = FilterProtos.FamilyFilter.parseFrom(pbBytes); - } catch (InvalidProtocolBufferException e) { - throw new DeserializationException(e); - } - final CompareOp valueCompareOp = - CompareOp.valueOf(proto.getCompareFilter().getCompareOp().name()); - ByteArrayComparable valueComparator = null; - try { - if (proto.getCompareFilter().hasComparator()) { - valueComparator = ProtobufUtil.toComparator(proto.getCompareFilter().getComparator()); - } - } catch (IOException ioe) { - throw new DeserializationException(ioe); - } - return new FamilyFilter(valueCompareOp,valueComparator); - } - - /** - * @param other - * @return true if and only if the fields of the filter that are serialized - * are equal to the corresponding fields in other. Used for testing. - */ - boolean areSerializedFieldsEqual(Filter o) { - if (o == this) return true; - if (!(o instanceof FamilyFilter)) return false; - - FamilyFilter other = (FamilyFilter)o; - return super.areSerializedFieldsEqual(other); - } -} diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/filter/Filter.java hbase-server/src/main/java/org/apache/hadoop/hbase/filter/Filter.java deleted file mode 100644 index edb456e..0000000 --- hbase-server/src/main/java/org/apache/hadoop/hbase/filter/Filter.java +++ /dev/null @@ -1,196 +0,0 @@ -/* - * - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hbase.filter; - -import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.classification.InterfaceStability; -import org.apache.hadoop.hbase.DeserializationException; -import org.apache.hadoop.hbase.KeyValue; - -import java.util.List; - -/** - * Interface for row and column filters directly applied within the regionserver. - * A filter can expect the following call sequence: - *

            - *
          • {@link #reset()}
          • - *
          • {@link #filterAllRemaining()} -> true indicates scan is over, false, keep going on.
          • - *
          • {@link #filterRowKey(byte[],int,int)} -> true to drop this row, - * if false, we will also call
          • - *
          • {@link #filterKeyValue(KeyValue)} -> true to drop this key/value
          • - *
          • {@link #filterRow(List)} -> allows directmodification of the final list to be submitted - *
          • {@link #filterRow()} -> last chance to drop entire row based on the sequence of - * filterValue() calls. Eg: filter a row if it doesn't contain a specified column. - *
          • - *
          - * - * Filter instances are created one per region/scan. This abstract class replaces - * the old RowFilterInterface. - * - * When implementing your own filters, consider inheriting {@link FilterBase} to help - * you reduce boilerplate. - * - * @see FilterBase - */ -@InterfaceAudience.Public -@InterfaceStability.Stable -public abstract class Filter { - /** - * Reset the state of the filter between rows. - */ - abstract public void reset(); - - /** - * Filters a row based on the row key. If this returns true, the entire - * row will be excluded. If false, each KeyValue in the row will be - * passed to {@link #filterKeyValue(KeyValue)} below. - * - * @param buffer buffer containing row key - * @param offset offset into buffer where row key starts - * @param length length of the row key - * @return true, remove entire row, false, include the row (maybe). - */ - abstract public boolean filterRowKey(byte [] buffer, int offset, int length); - - /** - * If this returns true, the scan will terminate. - * - * @return true to end scan, false to continue. - */ - abstract public boolean filterAllRemaining(); - - /** - * A way to filter based on the column family, column qualifier and/or the - * column value. Return code is described below. This allows filters to - * filter only certain number of columns, then terminate without matching ever - * column. - * - * If your filter returns ReturnCode.NEXT_ROW, it should return - * ReturnCode.NEXT_ROW until {@link #reset()} is called - * just in case the caller calls for the next row. - * - * @param v the KeyValue in question - * @return code as described below - * @see Filter.ReturnCode - */ - abstract public ReturnCode filterKeyValue(final KeyValue v); - - /** - * Give the filter a chance to transform the passed KeyValue. - * If the KeyValue is changed a new KeyValue object must be returned. - * @see org.apache.hadoop.hbase.KeyValue#shallowCopy() - * - * The transformed KeyValue is what is eventually returned to the - * client. Most filters will return the passed KeyValue unchanged. - * @see org.apache.hadoop.hbase.filter.KeyOnlyFilter#transform(KeyValue) - * for an example of a transformation. - * - * @param v the KeyValue in question - * @return the changed KeyValue - */ - abstract public KeyValue transform(final KeyValue v); - - /** - * Return codes for filterValue(). - */ - public enum ReturnCode { - /** - * Include the KeyValue - */ - INCLUDE, - /** - * Include the KeyValue and seek to the next column skipping older versions. - */ - INCLUDE_AND_NEXT_COL, - /** - * Skip this KeyValue - */ - SKIP, - /** - * Skip this column. Go to the next column in this row. - */ - NEXT_COL, - /** - * Done with columns, skip to next row. Note that filterRow() will - * still be called. - */ - NEXT_ROW, - /** - * Seek to next key which is given as hint by the filter. - */ - SEEK_NEXT_USING_HINT, -} - - /** - * Chance to alter the list of keyvalues to be submitted. - * Modifications to the list will carry on - * @param kvs the list of keyvalues to be filtered - */ - abstract public void filterRow(List kvs); - - /** - * @return True if this filter actively uses filterRow(List) or filterRow(). - * Primarily used to check for conflicts with scans(such as scans - * that do not read a full row at a time) - */ - abstract public boolean hasFilterRow(); - - /** - * Last chance to veto row based on previous {@link #filterKeyValue(KeyValue)} - * calls. The filter needs to retain state then return a particular value for - * this call if they wish to exclude a row if a certain column is missing - * (for example). - * @return true to exclude row, false to include row. - */ - abstract public boolean filterRow(); - - /** - * If the filter returns the match code SEEK_NEXT_USING_HINT, then - * it should also tell which is the next key it must seek to. - * After receiving the match code SEEK_NEXT_USING_HINT, the QueryMatcher would - * call this function to find out which key it must next seek to. - * @return KeyValue which must be next seeked. return null if the filter is - * not sure which key to seek to next. - */ - abstract public KeyValue getNextKeyHint(final KeyValue currentKV); - - /** - * @return The filter serialized using pb - */ - abstract public byte [] toByteArray(); - - /** - * @param pbBytes A pb serialized {@link Filter} instance - * @return An instance of {@link Filter} made from bytes - * @throws DeserializationException - * @see #toByteArray - */ - public static Filter parseFrom(final byte [] pbBytes) throws DeserializationException { - throw new DeserializationException( - "parseFrom called on base Filter, but should be called on derived type"); - } - - /** - * @param other - * @return true if and only if the fields of the filter that are serialized - * are equal to the corresponding fields in other. Used for testing. - */ - abstract boolean areSerializedFieldsEqual(Filter other); -} diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/filter/FilterBase.java hbase-server/src/main/java/org/apache/hadoop/hbase/filter/FilterBase.java deleted file mode 100644 index b41bc7f..0000000 --- hbase-server/src/main/java/org/apache/hadoop/hbase/filter/FilterBase.java +++ /dev/null @@ -1,170 +0,0 @@ -/* - * - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hbase.filter; - -import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.classification.InterfaceStability; -import org.apache.hadoop.hbase.KeyValue; - -import java.util.List; -import java.util.ArrayList; - -/** - * Abstract base class to help you implement new Filters. Common "ignore" or NOOP type - * methods can go here, helping to reduce boiler plate in an ever-expanding filter - * library. - * - * If you could instantiate FilterBase, it would end up being a "null" filter - - * that is one that never filters anything. - */ -@InterfaceAudience.Public -@InterfaceStability.Stable -public abstract class FilterBase extends Filter { - - /** - * Filters that are purely stateless and do nothing in their reset() methods can inherit - * this null/empty implementation. - * - * @inheritDoc - */ - @Override - public void reset() { - } - - /** - * Filters that do not filter by row key can inherit this implementation that - * never filters anything. (ie: returns false). - * - * @inheritDoc - */ - @Override - public boolean filterRowKey(byte [] buffer, int offset, int length) { - return false; - } - - /** - * Filters that never filter all remaining can inherit this implementation that - * never stops the filter early. - * - * @inheritDoc - */ - @Override - public boolean filterAllRemaining() { - return false; - } - - /** - * Filters that dont filter by key value can inherit this implementation that - * includes all KeyValues. - * - * @inheritDoc - */ - @Override - public ReturnCode filterKeyValue(KeyValue ignored) { - return ReturnCode.INCLUDE; - } - - /** - * By default no transformation takes place - * - * @inheritDoc - */ - @Override - public KeyValue transform(KeyValue v) { - return v; - } - - /** - * Filters that never filter by modifying the returned List of KeyValues can - * inherit this implementation that does nothing. - * - * @inheritDoc - */ - @Override - public void filterRow(List ignored) { - } - - /** - * Fitlers that never filter by modifying the returned List of KeyValues can - * inherit this implementation that does nothing. - * - * @inheritDoc - */ - @Override - public boolean hasFilterRow() { - return false; - } - - /** - * Filters that never filter by rows based on previously gathered state from - * {@link #filterKeyValue(KeyValue)} can inherit this implementation that - * never filters a row. - * - * @inheritDoc - */ - @Override - public boolean filterRow() { - return false; - } - - /** - * Filters that are not sure which key must be next seeked to, can inherit - * this implementation that, by default, returns a null KeyValue. - * - * @inheritDoc - */ - public KeyValue getNextKeyHint(KeyValue currentKV) { - return null; - } - - /** - * Given the filter's arguments it constructs the filter - *

          - * @param filterArguments the filter's arguments - * @return constructed filter object - */ - public static Filter createFilterFromArguments(ArrayList filterArguments) { - throw new IllegalArgumentException("This method has not been implemented"); - } - - /** - * Return filter's info for debugging and logging purpose. - */ - public String toString() { - return this.getClass().getSimpleName(); - } - - /** - * Return length 0 byte array for Filters that don't require special serialization - */ - public byte [] toByteArray() { - return new byte[0]; - } - - /** - * Default implementation so that writers of custom filters aren't forced to implement. - * - * @param other - * @return true if and only if the fields of the filter that are serialized - * are equal to the corresponding fields in other. Used for testing. - */ - boolean areSerializedFieldsEqual(Filter other) { - return true; - } -} diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/filter/FilterList.java hbase-server/src/main/java/org/apache/hadoop/hbase/filter/FilterList.java deleted file mode 100644 index 81f9a4b..0000000 --- hbase-server/src/main/java/org/apache/hadoop/hbase/filter/FilterList.java +++ /dev/null @@ -1,378 +0,0 @@ -/** - * - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hbase.filter; - -import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.classification.InterfaceStability; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hbase.DeserializationException; -import org.apache.hadoop.hbase.HBaseConfiguration; -import org.apache.hadoop.hbase.KeyValue; -import org.apache.hadoop.hbase.protobuf.ProtobufUtil; -import org.apache.hadoop.hbase.protobuf.generated.FilterProtos; -import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos; - -import com.google.protobuf.InvalidProtocolBufferException; - -import java.io.IOException; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.List; - -/** - * Implementation of {@link Filter} that represents an ordered List of Filters - * which will be evaluated with a specified boolean operator {@link Operator#MUST_PASS_ALL} - * (!AND) or {@link Operator#MUST_PASS_ONE} (!OR). - * Since you can use Filter Lists as children of Filter Lists, you can create a - * hierarchy of filters to be evaluated. - * Defaults to {@link Operator#MUST_PASS_ALL}. - *

          TODO: Fix creation of Configuration on serialization and deserialization. - */ -@InterfaceAudience.Public -@InterfaceStability.Stable -public class FilterList extends Filter { - /** set operator */ - public static enum Operator { - /** !AND */ - MUST_PASS_ALL, - /** !OR */ - MUST_PASS_ONE - } - - private static final Configuration conf = HBaseConfiguration.create(); - private static final int MAX_LOG_FILTERS = 5; - private Operator operator = Operator.MUST_PASS_ALL; - private List filters = new ArrayList(); - - /** - * Constructor that takes a set of {@link Filter}s. The default operator - * MUST_PASS_ALL is assumed. - * - * @param rowFilters list of filters - */ - public FilterList(final List rowFilters) { - if (rowFilters instanceof ArrayList) { - this.filters = rowFilters; - } else { - this.filters = new ArrayList(rowFilters); - } - } - - /** - * Constructor that takes a var arg number of {@link Filter}s. The fefault operator - * MUST_PASS_ALL is assumed. - * @param rowFilters - */ - public FilterList(final Filter... rowFilters) { - this.filters = new ArrayList(Arrays.asList(rowFilters)); - } - - /** - * Constructor that takes an operator. - * - * @param operator Operator to process filter set with. - */ - public FilterList(final Operator operator) { - this.operator = operator; - } - - /** - * Constructor that takes a set of {@link Filter}s and an operator. - * - * @param operator Operator to process filter set with. - * @param rowFilters Set of row filters. - */ - public FilterList(final Operator operator, final List rowFilters) { - this.filters = new ArrayList(rowFilters); - this.operator = operator; - } - - /** - * Constructor that takes a var arg number of {@link Filter}s and an operator. - * - * @param operator Operator to process filter set with. - * @param rowFilters Filters to use - */ - public FilterList(final Operator operator, final Filter... rowFilters) { - this.filters = new ArrayList(Arrays.asList(rowFilters)); - this.operator = operator; - } - - /** - * Get the operator. - * - * @return operator - */ - public Operator getOperator() { - return operator; - } - - /** - * Get the filters. - * - * @return filters - */ - public List getFilters() { - return filters; - } - - /** - * Add a filter. - * - * @param filter another filter - */ - public void addFilter(Filter filter) { - this.filters.add(filter); - } - - @Override - public void reset() { - for (Filter filter : filters) { - filter.reset(); - } - } - - @Override - public boolean filterRowKey(byte[] rowKey, int offset, int length) { - for (Filter filter : filters) { - if (this.operator == Operator.MUST_PASS_ALL) { - if (filter.filterAllRemaining() || - filter.filterRowKey(rowKey, offset, length)) { - return true; - } - } else if (this.operator == Operator.MUST_PASS_ONE) { - if (!filter.filterAllRemaining() && - !filter.filterRowKey(rowKey, offset, length)) { - return false; - } - } - } - return this.operator == Operator.MUST_PASS_ONE; - } - - @Override - public boolean filterAllRemaining() { - for (Filter filter : filters) { - if (filter.filterAllRemaining()) { - if (operator == Operator.MUST_PASS_ALL) { - return true; - } - } else { - if (operator == Operator.MUST_PASS_ONE) { - return false; - } - } - } - return operator == Operator.MUST_PASS_ONE; - } - - @Override - public KeyValue transform(KeyValue v) { - KeyValue current = v; - for (Filter filter : filters) { - current = filter.transform(current); - } - return current; - } - - @Override - public ReturnCode filterKeyValue(KeyValue v) { - ReturnCode rc = operator == Operator.MUST_PASS_ONE? - ReturnCode.SKIP: ReturnCode.INCLUDE; - for (Filter filter : filters) { - if (operator == Operator.MUST_PASS_ALL) { - if (filter.filterAllRemaining()) { - return ReturnCode.NEXT_ROW; - } - ReturnCode code = filter.filterKeyValue(v); - switch (code) { - // Override INCLUDE and continue to evaluate. - case INCLUDE_AND_NEXT_COL: - rc = ReturnCode.INCLUDE_AND_NEXT_COL; - case INCLUDE: - continue; - default: - return code; - } - } else if (operator == Operator.MUST_PASS_ONE) { - if (filter.filterAllRemaining()) { - continue; - } - - switch (filter.filterKeyValue(v)) { - case INCLUDE: - if (rc != ReturnCode.INCLUDE_AND_NEXT_COL) { - rc = ReturnCode.INCLUDE; - } - break; - case INCLUDE_AND_NEXT_COL: - rc = ReturnCode.INCLUDE_AND_NEXT_COL; - // must continue here to evaluate all filters - break; - case NEXT_ROW: - break; - case SKIP: - // continue; - break; - case NEXT_COL: - break; - case SEEK_NEXT_USING_HINT: - break; - default: - throw new IllegalStateException("Received code is not valid."); - } - } - } - return rc; - } - - @Override - public void filterRow(List kvs) { - for (Filter filter : filters) { - filter.filterRow(kvs); - } - } - - @Override - public boolean hasFilterRow() { - for (Filter filter : filters) { - if(filter.hasFilterRow()) { - return true; - } - } - return false; - } - - @Override - public boolean filterRow() { - for (Filter filter : filters) { - if (operator == Operator.MUST_PASS_ALL) { - if (filter.filterAllRemaining() || filter.filterRow()) { - return true; - } - } else if (operator == Operator.MUST_PASS_ONE) { - if (!filter.filterAllRemaining() - && !filter.filterRow()) { - return false; - } - } - } - return operator == Operator.MUST_PASS_ONE; - } - - /** - * @return The filter serialized using pb - */ - public byte [] toByteArray() { - FilterProtos.FilterList.Builder builder = - FilterProtos.FilterList.newBuilder(); - builder.setOperator(FilterProtos.FilterList.Operator.valueOf(operator.name())); - for (Filter filter : filters) { - builder.addFilters(ProtobufUtil.toFilter(filter)); - } - return builder.build().toByteArray(); - } - - /** - * @param pbBytes A pb serialized {@link FilterList} instance - * @return An instance of {@link FilterList} made from bytes - * @throws DeserializationException - * @see #toByteArray - */ - public static FilterList parseFrom(final byte [] pbBytes) - throws DeserializationException { - FilterProtos.FilterList proto; - try { - proto = FilterProtos.FilterList.parseFrom(pbBytes); - } catch (InvalidProtocolBufferException e) { - throw new DeserializationException(e); - } - - List rowFilters = new ArrayList(proto.getFiltersCount()); - try { - for (HBaseProtos.Filter filter : proto.getFiltersList()) { - rowFilters.add(ProtobufUtil.toFilter(filter)); - } - } catch (IOException ioe) { - throw new DeserializationException(ioe); - } - return new FilterList(Operator.valueOf(proto.getOperator().name()),rowFilters); - } - - /** - * @param other - * @return true if and only if the fields of the filter that are serialized - * are equal to the corresponding fields in other. Used for testing. - */ - boolean areSerializedFieldsEqual(Filter o) { - if (o == this) return true; - if (!(o instanceof FilterList)) return false; - - FilterList other = (FilterList)o; - return this.getOperator().equals(other.getOperator()) && - ((this.getFilters() == other.getFilters()) - || this.getFilters().equals(other.getFilters())); - } - - @Override - public KeyValue getNextKeyHint(KeyValue currentKV) { - KeyValue keyHint = null; - for (Filter filter : filters) { - KeyValue curKeyHint = filter.getNextKeyHint(currentKV); - if (curKeyHint == null && operator == Operator.MUST_PASS_ONE) { - // If we ever don't have a hint and this is must-pass-one, then no hint - return null; - } - if (curKeyHint != null) { - // If this is the first hint we find, set it - if (keyHint == null) { - keyHint = curKeyHint; - continue; - } - // There is an existing hint - if (operator == Operator.MUST_PASS_ALL && - KeyValue.COMPARATOR.compare(keyHint, curKeyHint) < 0) { - // If all conditions must pass, we can keep the max hint - keyHint = curKeyHint; - } else if (operator == Operator.MUST_PASS_ONE && - KeyValue.COMPARATOR.compare(keyHint, curKeyHint) > 0) { - // If any condition can pass, we need to keep the min hint - keyHint = curKeyHint; - } - } - } - return keyHint; - } - - @Override - public String toString() { - return toString(MAX_LOG_FILTERS); - } - - protected String toString(int maxFilters) { - int endIndex = this.filters.size() < maxFilters - ? this.filters.size() : maxFilters; - return String.format("%s %s (%d/%d): %s", - this.getClass().getSimpleName(), - this.operator == Operator.MUST_PASS_ALL ? "AND" : "OR", - endIndex, - this.filters.size(), - this.filters.subList(0, endIndex).toString()); - } -} diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/filter/FilterWrapper.java hbase-server/src/main/java/org/apache/hadoop/hbase/filter/FilterWrapper.java deleted file mode 100644 index 7a9af35..0000000 --- hbase-server/src/main/java/org/apache/hadoop/hbase/filter/FilterWrapper.java +++ /dev/null @@ -1,151 +0,0 @@ -/* - * Copyright The Apache Software Foundation - * - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hbase.filter; - -import java.io.IOException; -import java.util.List; - -import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.classification.InterfaceStability; - -import org.apache.hadoop.hbase.DeserializationException; -import org.apache.hadoop.hbase.KeyValue; -import org.apache.hadoop.hbase.protobuf.ProtobufUtil; -import org.apache.hadoop.hbase.protobuf.generated.FilterProtos; - -import com.google.protobuf.InvalidProtocolBufferException; - -/** - * This is a Filter wrapper class which is used in the server side. Some filter - * related hooks can be defined in this wrapper. The only way to create a - * FilterWrapper instance is passing a client side Filter instance through - * {@link org.apache.hadoop.hbase.client.Scan#getFilter()}. - * - */ -@InterfaceAudience.Private -@InterfaceStability.Evolving -public class FilterWrapper extends Filter { - Filter filter = null; - - public FilterWrapper( Filter filter ) { - if (null == filter) { - // ensure the filter instance is not null - throw new NullPointerException("Cannot create FilterWrapper with null Filter"); - } - this.filter = filter; - } - - /** - * @return The filter serialized using pb - */ - public byte [] toByteArray() { - FilterProtos.FilterWrapper.Builder builder = - FilterProtos.FilterWrapper.newBuilder(); - builder.setFilter(ProtobufUtil.toFilter(this.filter)); - return builder.build().toByteArray(); - } - - /** - * @param pbBytes A pb serialized {@link FilterWrapper} instance - * @return An instance of {@link FilterWrapper} made from bytes - * @throws DeserializationException - * @see #toByteArray - */ - public static FilterWrapper parseFrom(final byte [] pbBytes) - throws DeserializationException { - FilterProtos.FilterWrapper proto; - try { - proto = FilterProtos.FilterWrapper.parseFrom(pbBytes); - } catch (InvalidProtocolBufferException e) { - throw new DeserializationException(e); - } - try { - return new FilterWrapper(ProtobufUtil.toFilter(proto.getFilter())); - } catch (IOException ioe) { - throw new DeserializationException(ioe); - } - } - - @Override - public void reset() { - this.filter.reset(); - } - - @Override - public boolean filterAllRemaining() { - return this.filter.filterAllRemaining(); - } - - @Override - public boolean filterRow() { - return this.filter.filterRow(); - } - - @Override - public KeyValue getNextKeyHint(KeyValue currentKV) { - return this.filter.getNextKeyHint(currentKV); - } - - @Override - public boolean filterRowKey(byte[] buffer, int offset, int length) { - return this.filter.filterRowKey(buffer, offset, length); - } - - @Override - public ReturnCode filterKeyValue(KeyValue v) { - return this.filter.filterKeyValue(v); - } - - @Override - public KeyValue transform(KeyValue v) { - return this.filter.transform(v); - } - - @Override - public boolean hasFilterRow() { - return this.filter.hasFilterRow(); - } - - @Override - public void filterRow(List kvs) { - //To fix HBASE-6429, - //Filter with filterRow() returning true is incompatible with scan with limit - //1. hasFilterRow() returns true, if either filterRow() or filterRow(kvs) is implemented. - //2. filterRow() is merged with filterRow(kvs), - //so that to make all those row related filtering stuff in the same function. - this.filter.filterRow(kvs); - if (!kvs.isEmpty() && this.filter.filterRow()) { - kvs.clear(); - } - } - - /** - * @param other - * @return true if and only if the fields of the filter that are serialized - * are equal to the corresponding fields in other. Used for testing. - */ - boolean areSerializedFieldsEqual(Filter o) { - if (o == this) return true; - if (!(o instanceof FilterWrapper)) return false; - - FilterWrapper other = (FilterWrapper)o; - return this.filter.areSerializedFieldsEqual(other.filter); - } -} diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/filter/FirstKeyOnlyFilter.java hbase-server/src/main/java/org/apache/hadoop/hbase/filter/FirstKeyOnlyFilter.java deleted file mode 100644 index 1b63560..0000000 --- hbase-server/src/main/java/org/apache/hadoop/hbase/filter/FirstKeyOnlyFilter.java +++ /dev/null @@ -1,114 +0,0 @@ -/* - * - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hbase.filter; - -import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.classification.InterfaceStability; -import org.apache.hadoop.hbase.DeserializationException; -import org.apache.hadoop.hbase.KeyValue; -import org.apache.hadoop.hbase.protobuf.generated.FilterProtos; - -import java.util.ArrayList; - -import com.google.common.base.Preconditions; -import com.google.protobuf.InvalidProtocolBufferException; - -/** - * A filter that will only return the first KV from each row. - *

          - * This filter can be used to more efficiently perform row count operations. - */ -@InterfaceAudience.Public -@InterfaceStability.Stable -public class FirstKeyOnlyFilter extends FilterBase { - private boolean foundKV = false; - - public FirstKeyOnlyFilter() { - } - - public void reset() { - foundKV = false; - } - - public ReturnCode filterKeyValue(KeyValue v) { - if(foundKV) return ReturnCode.NEXT_ROW; - foundKV = true; - return ReturnCode.INCLUDE; - } - - public static Filter createFilterFromArguments(ArrayList filterArguments) { - Preconditions.checkArgument(filterArguments.size() == 0, - "Expected 0 but got: %s", filterArguments.size()); - return new FirstKeyOnlyFilter(); - } - - /** - * @return true if first KV has been found. - */ - protected boolean hasFoundKV() { - return this.foundKV; - } - - /** - * - * @param value update {@link #foundKV} flag with value. - */ - protected void setFoundKV(boolean value) { - this.foundKV = value; - } - - /** - * @return The filter serialized using pb - */ - public byte [] toByteArray() { - FilterProtos.FirstKeyOnlyFilter.Builder builder = - FilterProtos.FirstKeyOnlyFilter.newBuilder(); - return builder.build().toByteArray(); - } - - /** - * @param pbBytes A pb serialized {@link FirstKeyOnlyFilter} instance - * @return An instance of {@link FirstKeyOnlyFilter} made from bytes - * @throws DeserializationException - * @see #toByteArray - */ - public static FirstKeyOnlyFilter parseFrom(final byte [] pbBytes) - throws DeserializationException { - FilterProtos.FirstKeyOnlyFilter proto; - try { - proto = FilterProtos.FirstKeyOnlyFilter.parseFrom(pbBytes); - } catch (InvalidProtocolBufferException e) { - throw new DeserializationException(e); - } - - return new FirstKeyOnlyFilter(); - } - - /** - * @param other - * @return true if and only if the fields of the filter that are serialized - * are equal to the corresponding fields in other. Used for testing. - */ - boolean areSerializedFieldsEqual(Filter o) { - if (o == this) return true; - if (!(o instanceof FirstKeyOnlyFilter)) return false; - - return true; - } -} diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/filter/FirstKeyValueMatchingQualifiersFilter.java hbase-server/src/main/java/org/apache/hadoop/hbase/filter/FirstKeyValueMatchingQualifiersFilter.java deleted file mode 100644 index f479420..0000000 --- hbase-server/src/main/java/org/apache/hadoop/hbase/filter/FirstKeyValueMatchingQualifiersFilter.java +++ /dev/null @@ -1,124 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hbase.filter; - -import com.google.protobuf.ByteString; -import com.google.protobuf.InvalidProtocolBufferException; -import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.classification.InterfaceStability; -import org.apache.hadoop.hbase.DeserializationException; -import org.apache.hadoop.hbase.KeyValue; -import org.apache.hadoop.hbase.protobuf.generated.FilterProtos; -import org.apache.hadoop.hbase.util.Bytes; - -import java.util.Set; -import java.util.TreeSet; - -/** - * The filter looks for the given columns in KeyValue. Once there is a match for - * any one of the columns, it returns ReturnCode.NEXT_ROW for remaining - * KeyValues in the row. - *

          - * Note : It may emit KVs which do not have the given columns in them, if - * these KVs happen to occur before a KV which does have a match. Given this - * caveat, this filter is only useful for special cases - * like {@link org.apache.hadoop.hbase.mapreduce.RowCounter}. - *

          - */ -@InterfaceAudience.Public -@InterfaceStability.Stable -public class FirstKeyValueMatchingQualifiersFilter extends FirstKeyOnlyFilter { - - private Set qualifiers; - - /** - * Constructor which takes a set of columns. As soon as first KeyValue - * matching any of these columns is found, filter moves to next row. - * - * @param qualifiers the set of columns to me matched. - */ - public FirstKeyValueMatchingQualifiersFilter(Set qualifiers) { - this.qualifiers = qualifiers; - } - - public ReturnCode filterKeyValue(KeyValue v) { - if (hasFoundKV()) { - return ReturnCode.NEXT_ROW; - } else if (hasOneMatchingQualifier(v)) { - setFoundKV(true); - } - return ReturnCode.INCLUDE; - } - - private boolean hasOneMatchingQualifier(KeyValue v) { - for (byte[] q : qualifiers) { - if (v.matchingQualifier(q)) { - return true; - } - } - return false; - } - - /** - * @return The filter serialized using pb - */ - public byte [] toByteArray() { - FilterProtos.FirstKeyValueMatchingQualifiersFilter.Builder builder = - FilterProtos.FirstKeyValueMatchingQualifiersFilter.newBuilder(); - for (byte[] qualifier : qualifiers) { - if (qualifier != null) builder.addQualifiers(ByteString.copyFrom(qualifier)); - } - return builder.build().toByteArray(); - } - - /** - * @param pbBytes A pb serialized {@link FirstKeyValueMatchingQualifiersFilter} instance - * @return An instance of {@link FirstKeyValueMatchingQualifiersFilter} made from bytes - * @throws DeserializationException - * @see #toByteArray - */ - public static FirstKeyValueMatchingQualifiersFilter parseFrom(final byte [] pbBytes) - throws DeserializationException { - FilterProtos.FirstKeyValueMatchingQualifiersFilter proto; - try { - proto = FilterProtos.FirstKeyValueMatchingQualifiersFilter.parseFrom(pbBytes); - } catch (InvalidProtocolBufferException e) { - throw new DeserializationException(e); - } - - TreeSet qualifiers = new TreeSet(Bytes.BYTES_COMPARATOR); - for (ByteString qualifier : proto.getQualifiersList()) { - qualifiers.add(qualifier.toByteArray()); - } - return new FirstKeyValueMatchingQualifiersFilter(qualifiers); - } - - /** - * @param other - * @return true if and only if the fields of the filter that are serialized - * are equal to the corresponding fields in other. Used for testing. - */ - boolean areSerializedFieldsEqual(Filter o) { - if (o == this) return true; - if (!(o instanceof FirstKeyValueMatchingQualifiersFilter)) return false; - - FirstKeyValueMatchingQualifiersFilter other = (FirstKeyValueMatchingQualifiersFilter)o; - return this.qualifiers.equals(other.qualifiers); - } -} diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/filter/FuzzyRowFilter.java hbase-server/src/main/java/org/apache/hadoop/hbase/filter/FuzzyRowFilter.java deleted file mode 100644 index 00d7b12..0000000 --- hbase-server/src/main/java/org/apache/hadoop/hbase/filter/FuzzyRowFilter.java +++ /dev/null @@ -1,333 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hbase.filter; - -import java.util.ArrayList; -import java.util.Arrays; -import java.util.List; - -import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.classification.InterfaceStability; -import org.apache.hadoop.hbase.DeserializationException; -import org.apache.hadoop.hbase.KeyValue; -import org.apache.hadoop.hbase.protobuf.generated.FilterProtos; -import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BytesBytesPair; -import org.apache.hadoop.hbase.util.Bytes; -import org.apache.hadoop.hbase.util.Pair; - -import com.google.protobuf.ByteString; -import com.google.protobuf.InvalidProtocolBufferException; - -/** - * Filters data based on fuzzy row key. Performs fast-forwards during scanning. - * It takes pairs (row key, fuzzy info) to match row keys. Where fuzzy info is - * a byte array with 0 or 1 as its values: - *

            - *
          • - * 0 - means that this byte in provided row key is fixed, i.e. row key's byte at same position - * must match - *
          • - *
          • - * 1 - means that this byte in provided row key is NOT fixed, i.e. row key's byte at this - * position can be different from the one in provided row key - *
          • - *
          - * - * - * Example: - * Let's assume row key format is userId_actionId_year_month. Length of userId is fixed - * and is 4, length of actionId is 2 and year and month are 4 and 2 bytes long respectively. - * - * Let's assume that we need to fetch all users that performed certain action (encoded as "99") - * in Jan of any year. Then the pair (row key, fuzzy info) would be the following: - * row key = "????_99_????_01" (one can use any value instead of "?") - * fuzzy info = "\x01\x01\x01\x01\x00\x00\x00\x00\x01\x01\x01\x01\x00\x00\x00" - * - * I.e. fuzzy info tells the matching mask is "????_99_????_01", where at ? can be any value. - * - */ -@InterfaceAudience.Public -@InterfaceStability.Evolving -public class FuzzyRowFilter extends FilterBase { - private List> fuzzyKeysData; - private boolean done = false; - - public FuzzyRowFilter(List> fuzzyKeysData) { - this.fuzzyKeysData = fuzzyKeysData; - } - - // TODO: possible improvement: save which fuzzy row key to use when providing a hint - @Override - public ReturnCode filterKeyValue(KeyValue kv) { - byte[] rowKey = kv.getRow(); - // assigning "worst" result first and looking for better options - SatisfiesCode bestOption = SatisfiesCode.NO_NEXT; - for (Pair fuzzyData : fuzzyKeysData) { - SatisfiesCode satisfiesCode = - satisfies(rowKey, fuzzyData.getFirst(), fuzzyData.getSecond()); - if (satisfiesCode == SatisfiesCode.YES) { - return ReturnCode.INCLUDE; - } - - if (satisfiesCode == SatisfiesCode.NEXT_EXISTS) { - bestOption = SatisfiesCode.NEXT_EXISTS; - } - } - - if (bestOption == SatisfiesCode.NEXT_EXISTS) { - return ReturnCode.SEEK_NEXT_USING_HINT; - } - - // the only unhandled SatisfiesCode is NO_NEXT, i.e. we are done - done = true; - return ReturnCode.NEXT_ROW; - } - - @Override - public KeyValue getNextKeyHint(KeyValue currentKV) { - byte[] rowKey = currentKV.getRow(); - byte[] nextRowKey = null; - // Searching for the "smallest" row key that satisfies at least one fuzzy row key - for (Pair fuzzyData : fuzzyKeysData) { - byte[] nextRowKeyCandidate = getNextForFuzzyRule(rowKey, - fuzzyData.getFirst(), fuzzyData.getSecond()); - if (nextRowKeyCandidate == null) { - continue; - } - if (nextRowKey == null || Bytes.compareTo(nextRowKeyCandidate, nextRowKey) < 0) { - nextRowKey = nextRowKeyCandidate; - } - } - - if (nextRowKey == null) { - // SHOULD NEVER happen - // TODO: is there a better way than throw exception? (stop the scanner?) - throw new IllegalStateException("No next row key that satisfies fuzzy exists when" + - " getNextKeyHint() is invoked." + - " Filter: " + this.toString() + - " currentKV: " + currentKV.toString()); - } - - return KeyValue.createFirstOnRow(nextRowKey); - } - - @Override - public boolean filterAllRemaining() { - return done; - } - - /** - * @return The filter serialized using pb - */ - public byte [] toByteArray() { - FilterProtos.FuzzyRowFilter.Builder builder = - FilterProtos.FuzzyRowFilter.newBuilder(); - for (Pair fuzzyData : fuzzyKeysData) { - BytesBytesPair.Builder bbpBuilder = BytesBytesPair.newBuilder(); - bbpBuilder.setFirst(ByteString.copyFrom(fuzzyData.getFirst())); - bbpBuilder.setSecond(ByteString.copyFrom(fuzzyData.getSecond())); - builder.addFuzzyKeysData(bbpBuilder); - } - return builder.build().toByteArray(); - } - - /** - * @param pbBytes A pb serialized {@link FuzzyRowFilter} instance - * @return An instance of {@link FuzzyRowFilter} made from bytes - * @throws DeserializationException - * @see #toByteArray - */ - public static FuzzyRowFilter parseFrom(final byte [] pbBytes) - throws DeserializationException { - FilterProtos.FuzzyRowFilter proto; - try { - proto = FilterProtos.FuzzyRowFilter.parseFrom(pbBytes); - } catch (InvalidProtocolBufferException e) { - throw new DeserializationException(e); - } - int count = proto.getFuzzyKeysDataCount(); - ArrayList> fuzzyKeysData= new ArrayList>(count); - for (int i = 0; i < count; ++i) { - BytesBytesPair current = proto.getFuzzyKeysData(i); - byte[] keyBytes = current.getFirst().toByteArray(); - byte[] keyMeta = current.getSecond().toByteArray(); - fuzzyKeysData.add(new Pair(keyBytes, keyMeta)); - } - return new FuzzyRowFilter(fuzzyKeysData); - } - - @Override - public String toString() { - final StringBuilder sb = new StringBuilder(); - sb.append("FuzzyRowFilter"); - sb.append("{fuzzyKeysData="); - for (Pair fuzzyData : fuzzyKeysData) { - sb.append('{').append(Bytes.toStringBinary(fuzzyData.getFirst())).append(":"); - sb.append(Bytes.toStringBinary(fuzzyData.getSecond())).append('}'); - } - sb.append("}, "); - return sb.toString(); - } - - // Utility methods - - static enum SatisfiesCode { - // row satisfies fuzzy rule - YES, - // row doesn't satisfy fuzzy rule, but there's possible greater row that does - NEXT_EXISTS, - // row doesn't satisfy fuzzy rule and there's no greater row that does - NO_NEXT - } - - static SatisfiesCode satisfies(byte[] row, - byte[] fuzzyKeyBytes, byte[] fuzzyKeyMeta) { - return satisfies(row, 0, row.length, fuzzyKeyBytes, fuzzyKeyMeta); - } - - private static SatisfiesCode satisfies(byte[] row, int offset, int length, - byte[] fuzzyKeyBytes, byte[] fuzzyKeyMeta) { - if (row == null) { - // do nothing, let scan to proceed - return SatisfiesCode.YES; - } - - boolean nextRowKeyCandidateExists = false; - - for (int i = 0; i < fuzzyKeyMeta.length && i < length; i++) { - // First, checking if this position is fixed and not equals the given one - boolean byteAtPositionFixed = fuzzyKeyMeta[i] == 0; - boolean fixedByteIncorrect = byteAtPositionFixed && fuzzyKeyBytes[i] != row[i + offset]; - if (fixedByteIncorrect) { - // in this case there's another row that satisfies fuzzy rule and bigger than this row - if (nextRowKeyCandidateExists) { - return SatisfiesCode.NEXT_EXISTS; - } - - // If this row byte is less than fixed then there's a byte array bigger than - // this row and which satisfies the fuzzy rule. Otherwise there's no such byte array: - // this row is simply bigger than any byte array that satisfies the fuzzy rule - boolean rowByteLessThanFixed = (row[i + offset] & 0xFF) < (fuzzyKeyBytes[i] & 0xFF); - return rowByteLessThanFixed ? SatisfiesCode.NEXT_EXISTS : SatisfiesCode.NO_NEXT; - } - - // Second, checking if this position is not fixed and byte value is not the biggest. In this - // case there's a byte array bigger than this row and which satisfies the fuzzy rule. To get - // bigger byte array that satisfies the rule we need to just increase this byte - // (see the code of getNextForFuzzyRule below) by one. - // Note: if non-fixed byte is already at biggest value, this doesn't allow us to say there's - // bigger one that satisfies the rule as it can't be increased. - if (fuzzyKeyMeta[i] == 1 && !isMax(fuzzyKeyBytes[i])) { - nextRowKeyCandidateExists = true; - } - } - - return SatisfiesCode.YES; - } - - private static boolean isMax(byte fuzzyKeyByte) { - return (fuzzyKeyByte & 0xFF) == 255; - } - - static byte[] getNextForFuzzyRule(byte[] row, byte[] fuzzyKeyBytes, byte[] fuzzyKeyMeta) { - return getNextForFuzzyRule(row, 0, row.length, fuzzyKeyBytes, fuzzyKeyMeta); - } - - /** - * @return greater byte array than given (row) which satisfies the fuzzy rule if it exists, - * null otherwise - */ - private static byte[] getNextForFuzzyRule(byte[] row, int offset, int length, - byte[] fuzzyKeyBytes, byte[] fuzzyKeyMeta) { - // To find out the next "smallest" byte array that satisfies fuzzy rule and "greater" than - // the given one we do the following: - // 1. setting values on all "fixed" positions to the values from fuzzyKeyBytes - // 2. if during the first step given row did not increase, then we increase the value at - // the first "non-fixed" position (where it is not maximum already) - - // It is easier to perform this by using fuzzyKeyBytes copy and setting "non-fixed" position - // values than otherwise. - byte[] result = Arrays.copyOf(fuzzyKeyBytes, - length > fuzzyKeyBytes.length ? length : fuzzyKeyBytes.length); - int toInc = -1; - - boolean increased = false; - for (int i = 0; i < result.length; i++) { - if (i >= fuzzyKeyMeta.length || fuzzyKeyMeta[i] == 1) { - result[i] = row[offset + i]; - if (!isMax(row[i])) { - // this is "non-fixed" position and is not at max value, hence we can increase it - toInc = i; - } - } else if (i < fuzzyKeyMeta.length && fuzzyKeyMeta[i] == 0) { - if ((row[i + offset] & 0xFF) < (fuzzyKeyBytes[i] & 0xFF)) { - // if setting value for any fixed position increased the original array, - // we are OK - increased = true; - break; - } - if ((row[i + offset] & 0xFF) > (fuzzyKeyBytes[i] & 0xFF)) { - // if setting value for any fixed position makes array "smaller", then just stop: - // in case we found some non-fixed position to increase we will do it, otherwise - // there's no "next" row key that satisfies fuzzy rule and "greater" than given row - break; - } - } - } - - if (!increased) { - if (toInc < 0) { - return null; - } - result[toInc]++; - - // Setting all "non-fixed" positions to zeroes to the right of the one we increased so - // that found "next" row key is the smallest possible - for (int i = toInc + 1; i < result.length; i++) { - if (i >= fuzzyKeyMeta.length || fuzzyKeyMeta[i] == 1) { - result[i] = 0; - } - } - } - - return result; - } - - /** - * @param other - * @return true if and only if the fields of the filter that are serialized - * are equal to the corresponding fields in other. Used for testing. - */ - boolean areSerializedFieldsEqual(Filter o) { - if (o == this) return true; - if (!(o instanceof FuzzyRowFilter)) return false; - - FuzzyRowFilter other = (FuzzyRowFilter)o; - if (this.fuzzyKeysData.size() != other.fuzzyKeysData.size()) return false; - for (int i = 0; i < fuzzyKeysData.size(); ++i) { - Pair thisData = this.fuzzyKeysData.get(i); - Pair otherData = other.fuzzyKeysData.get(i); - if (!(Bytes.equals(thisData.getFirst(), otherData.getFirst()) - && Bytes.equals(thisData.getSecond(), otherData.getSecond()))) { - return false; - } - } - return true; - } - -} diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/filter/InclusiveStopFilter.java hbase-server/src/main/java/org/apache/hadoop/hbase/filter/InclusiveStopFilter.java deleted file mode 100644 index 6fb1a62..0000000 --- hbase-server/src/main/java/org/apache/hadoop/hbase/filter/InclusiveStopFilter.java +++ /dev/null @@ -1,128 +0,0 @@ -/* - * - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hbase.filter; - -import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.classification.InterfaceStability; -import org.apache.hadoop.hbase.DeserializationException; -import org.apache.hadoop.hbase.KeyValue; -import org.apache.hadoop.hbase.protobuf.generated.FilterProtos; -import org.apache.hadoop.hbase.util.Bytes; - -import java.util.ArrayList; - -import com.google.common.base.Preconditions; -import com.google.protobuf.ByteString; -import com.google.protobuf.InvalidProtocolBufferException; - -/** - * A Filter that stops after the given row. There is no "RowStopFilter" because - * the Scan spec allows you to specify a stop row. - * - * Use this filter to include the stop row, eg: [A,Z]. - */ -@InterfaceAudience.Public -@InterfaceStability.Stable -public class InclusiveStopFilter extends FilterBase { - private byte [] stopRowKey; - private boolean done = false; - - public InclusiveStopFilter(final byte [] stopRowKey) { - this.stopRowKey = stopRowKey; - } - - public byte[] getStopRowKey() { - return this.stopRowKey; - } - - public boolean filterRowKey(byte[] buffer, int offset, int length) { - if (buffer == null) { - //noinspection RedundantIfStatement - if (this.stopRowKey == null) { - return true; //filter... - } - return false; - } - // if stopRowKey is <= buffer, then true, filter row. - int cmp = Bytes.compareTo(stopRowKey, 0, stopRowKey.length, - buffer, offset, length); - - if(cmp < 0) { - done = true; - } - return done; - } - - public boolean filterAllRemaining() { - return done; - } - - public static Filter createFilterFromArguments (ArrayList filterArguments) { - Preconditions.checkArgument(filterArguments.size() == 1, - "Expected 1 but got: %s", filterArguments.size()); - byte [] stopRowKey = ParseFilter.removeQuotesFromByteArray(filterArguments.get(0)); - return new InclusiveStopFilter(stopRowKey); - } - - /** - * @return The filter serialized using pb - */ - public byte [] toByteArray() { - FilterProtos.InclusiveStopFilter.Builder builder = - FilterProtos.InclusiveStopFilter.newBuilder(); - if (this.stopRowKey != null) builder.setStopRowKey(ByteString.copyFrom(this.stopRowKey)); - return builder.build().toByteArray(); - } - - /** - * @param pbBytes A pb serialized {@link InclusiveStopFilter} instance - * @return An instance of {@link InclusiveStopFilter} made from bytes - * @throws DeserializationException - * @see #toByteArray - */ - public static InclusiveStopFilter parseFrom(final byte [] pbBytes) - throws DeserializationException { - FilterProtos.InclusiveStopFilter proto; - try { - proto = FilterProtos.InclusiveStopFilter.parseFrom(pbBytes); - } catch (InvalidProtocolBufferException e) { - throw new DeserializationException(e); - } - return new InclusiveStopFilter(proto.hasStopRowKey()?proto.getStopRowKey().toByteArray():null); - } - - /** - * @param other - * @return true if and only if the fields of the filter that are serialized - * are equal to the corresponding fields in other. Used for testing. - */ - boolean areSerializedFieldsEqual(Filter o) { - if (o == this) return true; - if (!(o instanceof InclusiveStopFilter)) return false; - - InclusiveStopFilter other = (InclusiveStopFilter)o; - return Bytes.equals(this.getStopRowKey(), other.getStopRowKey()); - } - - @Override - public String toString() { - return this.getClass().getSimpleName() + " " + Bytes.toStringBinary(this.stopRowKey); - } -} diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/filter/IncompatibleFilterException.java hbase-server/src/main/java/org/apache/hadoop/hbase/filter/IncompatibleFilterException.java deleted file mode 100644 index cf91072..0000000 --- hbase-server/src/main/java/org/apache/hadoop/hbase/filter/IncompatibleFilterException.java +++ /dev/null @@ -1,44 +0,0 @@ -/** - * - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hbase.filter; - -import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.classification.InterfaceStability; - -/** - * Used to indicate a filter incompatibility - */ -@InterfaceAudience.Public -@InterfaceStability.Stable -public class IncompatibleFilterException extends RuntimeException { - private static final long serialVersionUID = 3236763276623198231L; - -/** constructor */ - public IncompatibleFilterException() { - super(); - } - - /** - * constructor - * @param s message - */ - public IncompatibleFilterException(String s) { - super(s); - } -} diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/filter/InvalidRowFilterException.java hbase-server/src/main/java/org/apache/hadoop/hbase/filter/InvalidRowFilterException.java deleted file mode 100644 index 251c953..0000000 --- hbase-server/src/main/java/org/apache/hadoop/hbase/filter/InvalidRowFilterException.java +++ /dev/null @@ -1,45 +0,0 @@ -/** - * - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hbase.filter; - -import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.classification.InterfaceStability; - -/** - * Used to indicate an invalid RowFilter. - */ -@InterfaceAudience.Public -@InterfaceStability.Stable -public class InvalidRowFilterException extends RuntimeException { - private static final long serialVersionUID = 2667894046345657865L; - - - /** constructor */ - public InvalidRowFilterException() { - super(); - } - - /** - * constructor - * @param s message - */ - public InvalidRowFilterException(String s) { - super(s); - } -} diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/filter/KeyOnlyFilter.java hbase-server/src/main/java/org/apache/hadoop/hbase/filter/KeyOnlyFilter.java deleted file mode 100644 index 3bb1390..0000000 --- hbase-server/src/main/java/org/apache/hadoop/hbase/filter/KeyOnlyFilter.java +++ /dev/null @@ -1,102 +0,0 @@ -/* - * - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hbase.filter; - - -import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.classification.InterfaceStability; -import org.apache.hadoop.hbase.DeserializationException; -import org.apache.hadoop.hbase.KeyValue; -import org.apache.hadoop.hbase.protobuf.generated.FilterProtos; - -import java.util.ArrayList; - -import com.google.common.base.Preconditions; -import com.google.protobuf.InvalidProtocolBufferException; - -/** - * A filter that will only return the key component of each KV (the value will - * be rewritten as empty). - *

          - * This filter can be used to grab all of the keys without having to also grab - * the values. - */ -@InterfaceAudience.Public -@InterfaceStability.Stable -public class KeyOnlyFilter extends FilterBase { - - boolean lenAsVal; - public KeyOnlyFilter() { this(false); } - public KeyOnlyFilter(boolean lenAsVal) { this.lenAsVal = lenAsVal; } - - @Override - public KeyValue transform(KeyValue kv) { - return kv.createKeyOnly(this.lenAsVal); - } - - public static Filter createFilterFromArguments(ArrayList filterArguments) { - Preconditions.checkArgument((filterArguments.size() == 0 || filterArguments.size() == 1), - "Expected: 0 or 1 but got: %s", filterArguments.size()); - KeyOnlyFilter filter = new KeyOnlyFilter(); - if (filterArguments.size() == 1) { - filter.lenAsVal = ParseFilter.convertByteArrayToBoolean(filterArguments.get(0)); - } - return filter; - } - - /** - * @return The filter serialized using pb - */ - public byte [] toByteArray() { - FilterProtos.KeyOnlyFilter.Builder builder = - FilterProtos.KeyOnlyFilter.newBuilder(); - builder.setLenAsVal(this.lenAsVal); - return builder.build().toByteArray(); - } - - /** - * @param pbBytes A pb serialized {@link KeyOnlyFilter} instance - * @return An instance of {@link KeyOnlyFilter} made from bytes - * @throws DeserializationException - * @see #toByteArray - */ - public static KeyOnlyFilter parseFrom(final byte [] pbBytes) - throws DeserializationException { - FilterProtos.KeyOnlyFilter proto; - try { - proto = FilterProtos.KeyOnlyFilter.parseFrom(pbBytes); - } catch (InvalidProtocolBufferException e) { - throw new DeserializationException(e); - } - return new KeyOnlyFilter(proto.getLenAsVal()); - } - - /** - * @param other - * @return true if and only if the fields of the filter that are serialized - * are equal to the corresponding fields in other. Used for testing. - */ - boolean areSerializedFieldsEqual(Filter o) { - if (o == this) return true; - if (!(o instanceof KeyOnlyFilter)) return false; - - KeyOnlyFilter other = (KeyOnlyFilter)o; - return this.lenAsVal == other.lenAsVal; - } -} diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/filter/MultipleColumnPrefixFilter.java hbase-server/src/main/java/org/apache/hadoop/hbase/filter/MultipleColumnPrefixFilter.java deleted file mode 100644 index be165aa..0000000 --- hbase-server/src/main/java/org/apache/hadoop/hbase/filter/MultipleColumnPrefixFilter.java +++ /dev/null @@ -1,199 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hbase.filter; - -import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.classification.InterfaceStability; -import org.apache.hadoop.hbase.DeserializationException; -import org.apache.hadoop.hbase.KeyValue; -import org.apache.hadoop.hbase.protobuf.generated.FilterProtos; -import org.apache.hadoop.hbase.util.Bytes; - -import com.google.protobuf.ByteString; -import com.google.protobuf.InvalidProtocolBufferException; - -import java.util.Arrays; -import java.util.Comparator; -import java.util.TreeSet; -import java.util.ArrayList; - -/** - * This filter is used for selecting only those keys with columns that matches - * a particular prefix. For example, if prefix is 'an', it will pass keys will - * columns like 'and', 'anti' but not keys with columns like 'ball', 'act'. - */ -@InterfaceAudience.Public -@InterfaceStability.Stable -public class MultipleColumnPrefixFilter extends FilterBase { - protected byte [] hint = null; - protected TreeSet sortedPrefixes = createTreeSet(); - private final static int MAX_LOG_PREFIXES = 5; - - public MultipleColumnPrefixFilter(final byte [][] prefixes) { - if (prefixes != null) { - for (int i = 0; i < prefixes.length; i++) { - if (!sortedPrefixes.add(prefixes[i])) - throw new IllegalArgumentException ("prefixes must be distinct"); - } - } - } - - public byte [][] getPrefix() { - int count = 0; - byte [][] temp = new byte [sortedPrefixes.size()][]; - for (byte [] prefixes : sortedPrefixes) { - temp [count++] = prefixes; - } - return temp; - } - - @Override - public ReturnCode filterKeyValue(KeyValue kv) { - if (sortedPrefixes.size() == 0 || kv.getBuffer() == null) { - return ReturnCode.INCLUDE; - } else { - return filterColumn(kv.getBuffer(), kv.getQualifierOffset(), kv.getQualifierLength()); - } - } - - public ReturnCode filterColumn(byte[] buffer, int qualifierOffset, int qualifierLength) { - byte [] qualifier = Arrays.copyOfRange(buffer, qualifierOffset, - qualifierLength + qualifierOffset); - TreeSet lesserOrEqualPrefixes = - (TreeSet) sortedPrefixes.headSet(qualifier, true); - - if (lesserOrEqualPrefixes.size() != 0) { - byte [] largestPrefixSmallerThanQualifier = lesserOrEqualPrefixes.last(); - - if (Bytes.startsWith(qualifier, largestPrefixSmallerThanQualifier)) { - return ReturnCode.INCLUDE; - } - - if (lesserOrEqualPrefixes.size() == sortedPrefixes.size()) { - return ReturnCode.NEXT_ROW; - } else { - hint = sortedPrefixes.higher(largestPrefixSmallerThanQualifier); - return ReturnCode.SEEK_NEXT_USING_HINT; - } - } else { - hint = sortedPrefixes.first(); - return ReturnCode.SEEK_NEXT_USING_HINT; - } - } - - public static Filter createFilterFromArguments(ArrayList filterArguments) { - byte [][] prefixes = new byte [filterArguments.size()][]; - for (int i = 0 ; i < filterArguments.size(); i++) { - byte [] columnPrefix = ParseFilter.removeQuotesFromByteArray(filterArguments.get(i)); - prefixes[i] = columnPrefix; - } - return new MultipleColumnPrefixFilter(prefixes); - } - - /** - * @return The filter serialized using pb - */ - public byte [] toByteArray() { - FilterProtos.MultipleColumnPrefixFilter.Builder builder = - FilterProtos.MultipleColumnPrefixFilter.newBuilder(); - for (byte [] element : sortedPrefixes) { - if (element != null) builder.addSortedPrefixes(ByteString.copyFrom(element)); - } - return builder.build().toByteArray(); - } - - /** - * @param pbBytes A pb serialized {@link MultipleColumnPrefixFilter} instance - * @return An instance of {@link MultipleColumnPrefixFilter} made from bytes - * @throws DeserializationException - * @see #toByteArray - */ - public static MultipleColumnPrefixFilter parseFrom(final byte [] pbBytes) - throws DeserializationException { - FilterProtos.MultipleColumnPrefixFilter proto; - try { - proto = FilterProtos.MultipleColumnPrefixFilter.parseFrom(pbBytes); - } catch (InvalidProtocolBufferException e) { - throw new DeserializationException(e); - } - int numPrefixes = proto.getSortedPrefixesCount(); - byte [][] prefixes = new byte[numPrefixes][]; - for (int i = 0; i < numPrefixes; ++i) { - prefixes[i] = proto.getSortedPrefixes(i).toByteArray(); - } - - return new MultipleColumnPrefixFilter(prefixes); - } - - /** - * @param other - * @return true if and only if the fields of the filter that are serialized - * are equal to the corresponding fields in other. Used for testing. - */ - boolean areSerializedFieldsEqual(Filter o) { - if (o == this) return true; - if (!(o instanceof MultipleColumnPrefixFilter)) return false; - - MultipleColumnPrefixFilter other = (MultipleColumnPrefixFilter)o; - return this.sortedPrefixes.equals(other.sortedPrefixes); - } - - public KeyValue getNextKeyHint(KeyValue kv) { - return KeyValue.createFirstOnRow( - kv.getBuffer(), kv.getRowOffset(), kv.getRowLength(), kv.getBuffer(), - kv.getFamilyOffset(), kv.getFamilyLength(), hint, 0, hint.length); - } - - public TreeSet createTreeSet() { - return new TreeSet(new Comparator() { - @Override - public int compare (Object o1, Object o2) { - if (o1 == null || o2 == null) - throw new IllegalArgumentException ("prefixes can't be null"); - - byte [] b1 = (byte []) o1; - byte [] b2 = (byte []) o2; - return Bytes.compareTo (b1, 0, b1.length, b2, 0, b2.length); - } - }); - } - - @Override - public String toString() { - return toString(MAX_LOG_PREFIXES); - } - - protected String toString(int maxPrefixes) { - StringBuilder prefixes = new StringBuilder(); - - int count = 0; - for (byte[] ba : this.sortedPrefixes) { - if (count >= maxPrefixes) { - break; - } - ++count; - prefixes.append(Bytes.toStringBinary(ba)); - if (count < this.sortedPrefixes.size() && count < maxPrefixes) { - prefixes.append(", "); - } - } - - return String.format("%s (%d/%d): [%s]", this.getClass().getSimpleName(), - count, this.sortedPrefixes.size(), prefixes.toString()); - } -} diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/filter/NullComparator.java hbase-server/src/main/java/org/apache/hadoop/hbase/filter/NullComparator.java deleted file mode 100644 index d944d3e..0000000 --- hbase-server/src/main/java/org/apache/hadoop/hbase/filter/NullComparator.java +++ /dev/null @@ -1,88 +0,0 @@ -/* - * - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hbase.filter; - -import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.classification.InterfaceStability; -import org.apache.hadoop.hbase.DeserializationException; -import org.apache.hadoop.hbase.protobuf.generated.ComparatorProtos; - -import com.google.protobuf.InvalidProtocolBufferException; - -/** - * A binary comparator which lexicographically compares against the specified - * byte array using {@link org.apache.hadoop.hbase.util.Bytes#compareTo(byte[], byte[])}. - */ -@InterfaceAudience.Public -@InterfaceStability.Stable -public class NullComparator extends ByteArrayComparable { - - public NullComparator() { - super(new byte[0]); - } - - @Override - public int compareTo(byte[] value) { - return value != null ? 1 : 0; - } - - @Override - public int compareTo(byte[] value, int offset, int length) { - throw new UnsupportedOperationException(); - } - - /** - * @return The comparator serialized using pb - */ - public byte [] toByteArray() { - ComparatorProtos.NullComparator.Builder builder = - ComparatorProtos.NullComparator.newBuilder(); - return builder.build().toByteArray(); - } - - /** - * @param pbBytes A pb serialized {@link NullComparator} instance - * @return An instance of {@link NullComparator} made from bytes - * @throws DeserializationException - * @see #toByteArray - */ - public static NullComparator parseFrom(final byte [] pbBytes) - throws DeserializationException { - ComparatorProtos.NullComparator proto; - try { - proto = ComparatorProtos.NullComparator.parseFrom(pbBytes); - } catch (InvalidProtocolBufferException e) { - throw new DeserializationException(e); - } - return new NullComparator(); - } - - /** - * @param other - * @return true if and only if the fields of the comparator that are serialized - * are equal to the corresponding fields in other. Used for testing. - */ - boolean areSerializedFieldsEqual(ByteArrayComparable other) { - if (other == this) return true; - if (!(other instanceof NullComparator)) return false; - - return super.areSerializedFieldsEqual(other); - } -} diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/filter/PageFilter.java hbase-server/src/main/java/org/apache/hadoop/hbase/filter/PageFilter.java deleted file mode 100644 index 2e46288..0000000 --- hbase-server/src/main/java/org/apache/hadoop/hbase/filter/PageFilter.java +++ /dev/null @@ -1,126 +0,0 @@ -/** - * - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hbase.filter; - -import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.classification.InterfaceStability; -import org.apache.hadoop.hbase.DeserializationException; -import org.apache.hadoop.hbase.KeyValue; -import org.apache.hadoop.hbase.protobuf.generated.FilterProtos; - -import java.util.ArrayList; - -import com.google.common.base.Preconditions; -import com.google.protobuf.InvalidProtocolBufferException; -/** - * Implementation of Filter interface that limits results to a specific page - * size. It terminates scanning once the number of filter-passed rows is > - * the given page size. - *

          - * Note that this filter cannot guarantee that the number of results returned - * to a client are <= page size. This is because the filter is applied - * separately on different region servers. It does however optimize the scan of - * individual HRegions by making sure that the page size is never exceeded - * locally. - */ -@InterfaceAudience.Public -@InterfaceStability.Stable -public class PageFilter extends FilterBase { - private long pageSize = Long.MAX_VALUE; - private int rowsAccepted = 0; - - /** - * Constructor that takes a maximum page size. - * - * @param pageSize Maximum result size. - */ - public PageFilter(final long pageSize) { - Preconditions.checkArgument(pageSize >= 0, "must be positive %s", pageSize); - this.pageSize = pageSize; - } - - public long getPageSize() { - return pageSize; - } - - public boolean filterAllRemaining() { - return this.rowsAccepted >= this.pageSize; - } - - public boolean filterRow() { - this.rowsAccepted++; - return this.rowsAccepted > this.pageSize; - } - - public boolean hasFilterRow() { - return true; - } - - public static Filter createFilterFromArguments(ArrayList filterArguments) { - Preconditions.checkArgument(filterArguments.size() == 1, - "Expected 1 but got: %s", filterArguments.size()); - long pageSize = ParseFilter.convertByteArrayToLong(filterArguments.get(0)); - return new PageFilter(pageSize); - } - - /** - * @return The filter serialized using pb - */ - public byte [] toByteArray() { - FilterProtos.PageFilter.Builder builder = - FilterProtos.PageFilter.newBuilder(); - builder.setPageSize(this.pageSize); - return builder.build().toByteArray(); - } - - /** - * @param pbBytes A pb serialized {@link PageFilter} instance - * @return An instance of {@link PageFilter} made from bytes - * @throws DeserializationException - * @see #toByteArray - */ - public static PageFilter parseFrom(final byte [] pbBytes) - throws DeserializationException { - FilterProtos.PageFilter proto; - try { - proto = FilterProtos.PageFilter.parseFrom(pbBytes); - } catch (InvalidProtocolBufferException e) { - throw new DeserializationException(e); - } - return new PageFilter(proto.getPageSize()); - } - - /** - * @param other - * @return true if and only if the fields of the filter that are serialized - * are equal to the corresponding fields in other. Used for testing. - */ - boolean areSerializedFieldsEqual(Filter o) { - if (o == this) return true; - if (!(o instanceof PageFilter)) return false; - - PageFilter other = (PageFilter)o; - return this.getPageSize() == other.getPageSize(); - } - - @Override - public String toString() { - return this.getClass().getSimpleName() + " " + this.pageSize; - } -} diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/filter/ParseConstants.java hbase-server/src/main/java/org/apache/hadoop/hbase/filter/ParseConstants.java deleted file mode 100644 index 449104c..0000000 --- hbase-server/src/main/java/org/apache/hadoop/hbase/filter/ParseConstants.java +++ /dev/null @@ -1,263 +0,0 @@ -/** - * - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hbase.filter; - -import java.nio.ByteBuffer; - -import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.classification.InterfaceStability; - -/** - * ParseConstants holds a bunch of constants related to parsing Filter Strings - * Used by {@link ParseFilter} - */ -@InterfaceAudience.Public -@InterfaceStability.Stable -public final class ParseConstants { - - /** - * ASCII code for LPAREN - */ - public static final int LPAREN = '('; - - /** - * ASCII code for RPAREN - */ - public static final int RPAREN = ')'; - - /** - * ASCII code for whitespace - */ - public static final int WHITESPACE = ' '; - - /** - * ASCII code for tab - */ - public static final int TAB = '\t'; - - /** - * ASCII code for 'A' - */ - public static final int A = 'A'; - - /** - * ASCII code for 'N' - */ - public static final int N = 'N'; - - /** - * ASCII code for 'D' - */ - public static final int D = 'D'; - - /** - * ASCII code for 'O' - */ - public static final int O = 'O'; - - /** - * ASCII code for 'R' - */ - public static final int R = 'R'; - - /** - * ASCII code for 'S' - */ - public static final int S = 'S'; - - /** - * ASCII code for 'K' - */ - public static final int K = 'K'; - - /** - * ASCII code for 'I' - */ - public static final int I = 'I'; - - /** - * ASCII code for 'P' - */ - public static final int P = 'P'; - - /** - * SKIP Array - */ - public static final byte [] SKIP_ARRAY = new byte [ ] {'S', 'K', 'I', 'P'}; - public static final ByteBuffer SKIP_BUFFER = ByteBuffer.wrap(SKIP_ARRAY); - - /** - * ASCII code for 'W' - */ - public static final int W = 'W'; - - /** - * ASCII code for 'H' - */ - public static final int H = 'H'; - - /** - * ASCII code for 'L' - */ - public static final int L = 'L'; - - /** - * ASCII code for 'E' - */ - public static final int E = 'E'; - - /** - * WHILE Array - */ - public static final byte [] WHILE_ARRAY = new byte [] {'W', 'H', 'I', 'L', 'E'}; - public static final ByteBuffer WHILE_BUFFER = ByteBuffer.wrap(WHILE_ARRAY); - - /** - * OR Array - */ - public static final byte [] OR_ARRAY = new byte [] {'O','R'}; - public static final ByteBuffer OR_BUFFER = ByteBuffer.wrap(OR_ARRAY); - - /** - * AND Array - */ - public static final byte [] AND_ARRAY = new byte [] {'A','N', 'D'}; - public static final ByteBuffer AND_BUFFER = ByteBuffer.wrap(AND_ARRAY); - - /** - * ASCII code for Backslash - */ - public static final int BACKSLASH = '\\'; - - /** - * ASCII code for a single quote - */ - public static final int SINGLE_QUOTE = '\''; - - /** - * ASCII code for a comma - */ - public static final int COMMA = ','; - - /** - * LESS_THAN Array - */ - public static final byte [] LESS_THAN_ARRAY = new byte [] {'<'}; - public static final ByteBuffer LESS_THAN_BUFFER = ByteBuffer.wrap(LESS_THAN_ARRAY); - - /** - * LESS_THAN_OR_EQUAL_TO Array - */ - public static final byte [] LESS_THAN_OR_EQUAL_TO_ARRAY = new byte [] {'<', '='}; - public static final ByteBuffer LESS_THAN_OR_EQUAL_TO_BUFFER = - ByteBuffer.wrap(LESS_THAN_OR_EQUAL_TO_ARRAY); - - /** - * GREATER_THAN Array - */ - public static final byte [] GREATER_THAN_ARRAY = new byte [] {'>'}; - public static final ByteBuffer GREATER_THAN_BUFFER = ByteBuffer.wrap(GREATER_THAN_ARRAY); - - /** - * GREATER_THAN_OR_EQUAL_TO Array - */ - public static final byte [] GREATER_THAN_OR_EQUAL_TO_ARRAY = new byte [] {'>', '='}; - public static final ByteBuffer GREATER_THAN_OR_EQUAL_TO_BUFFER = - ByteBuffer.wrap(GREATER_THAN_OR_EQUAL_TO_ARRAY); - - /** - * EQUAL_TO Array - */ - public static final byte [] EQUAL_TO_ARRAY = new byte [] {'='}; - public static final ByteBuffer EQUAL_TO_BUFFER = ByteBuffer.wrap(EQUAL_TO_ARRAY); - - /** - * NOT_EQUAL_TO Array - */ - public static final byte [] NOT_EQUAL_TO_ARRAY = new byte [] {'!', '='}; - public static final ByteBuffer NOT_EQUAL_TO_BUFFER = ByteBuffer.wrap(NOT_EQUAL_TO_ARRAY); - - /** - * ASCII code for equal to (=) - */ - public static final int EQUAL_TO = '='; - - /** - * AND Byte Array - */ - public static final byte [] AND = new byte [] {'A','N','D'}; - - /** - * OR Byte Array - */ - public static final byte [] OR = new byte [] {'O', 'R'}; - - /** - * LPAREN Array - */ - public static final byte [] LPAREN_ARRAY = new byte [] {'('}; - public static final ByteBuffer LPAREN_BUFFER = ByteBuffer.wrap(LPAREN_ARRAY); - - /** - * ASCII code for colon (:) - */ - public static final int COLON = ':'; - - /** - * ASCII code for Zero - */ - public static final int ZERO = '0'; - - /** - * ASCII code foe Nine - */ - public static final int NINE = '9'; - - /** - * BinaryType byte array - */ - public static final byte [] binaryType = new byte [] {'b','i','n','a','r','y'}; - - /** - * BinaryPrefixType byte array - */ - public static final byte [] binaryPrefixType = new byte [] {'b','i','n','a','r','y', - 'p','r','e','f','i','x'}; - - /** - * RegexStringType byte array - */ - public static final byte [] regexStringType = new byte [] {'r','e','g','e', 'x', - 's','t','r','i','n','g'}; - - /** - * SubstringType byte array - */ - public static final byte [] substringType = new byte [] {'s','u','b','s','t','r','i','n','g'}; - - /** - * ASCII for Minus Sign - */ - public static final int MINUS_SIGN = '-'; - - /** - * Package containing filters - */ - public static final String FILTER_PACKAGE = "org.apache.hadoop.hbase.filter"; -} diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/filter/ParseFilter.java hbase-server/src/main/java/org/apache/hadoop/hbase/filter/ParseFilter.java deleted file mode 100644 index 02100d7..0000000 --- hbase-server/src/main/java/org/apache/hadoop/hbase/filter/ParseFilter.java +++ /dev/null @@ -1,859 +0,0 @@ -/** - * - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hbase.filter; - -import java.lang.reflect.InvocationTargetException; -import java.lang.reflect.Method; -import java.nio.ByteBuffer; -import java.nio.charset.CharacterCodingException; -import java.util.*; - -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.classification.InterfaceStability; -import org.apache.hadoop.hbase.KeyValue; -import org.apache.hadoop.hbase.filter.CompareFilter.CompareOp; -import org.apache.hadoop.hbase.util.Bytes; - -/** - * This class allows a user to specify a filter via a string - * The string is parsed using the methods of this class and - * a filter object is constructed. This filter object is then wrapped - * in a scanner object which is then returned - *

          - * This class addresses the HBASE-4168 JIRA. More documentaton on this - * Filter Language can be found at: https://issues.apache.org/jira/browse/HBASE-4176 - */ -@InterfaceAudience.Public -@InterfaceStability.Stable -public class ParseFilter { - private static final Log LOG = LogFactory.getLog(ParseFilter.class); - - private static HashMap operatorPrecedenceHashMap; - private static HashMap filterHashMap; - - static { - // Registers all the filter supported by the Filter Language - filterHashMap = new HashMap(); - filterHashMap.put("KeyOnlyFilter", ParseConstants.FILTER_PACKAGE + "." + - "KeyOnlyFilter"); - filterHashMap.put("FirstKeyOnlyFilter", ParseConstants.FILTER_PACKAGE + "." + - "FirstKeyOnlyFilter"); - filterHashMap.put("PrefixFilter", ParseConstants.FILTER_PACKAGE + "." + - "PrefixFilter"); - filterHashMap.put("ColumnPrefixFilter", ParseConstants.FILTER_PACKAGE + "." + - "ColumnPrefixFilter"); - filterHashMap.put("MultipleColumnPrefixFilter", ParseConstants.FILTER_PACKAGE + "." + - "MultipleColumnPrefixFilter"); - filterHashMap.put("ColumnCountGetFilter", ParseConstants.FILTER_PACKAGE + "." + - "ColumnCountGetFilter"); - filterHashMap.put("PageFilter", ParseConstants.FILTER_PACKAGE + "." + - "PageFilter"); - filterHashMap.put("ColumnPaginationFilter", ParseConstants.FILTER_PACKAGE + "." + - "ColumnPaginationFilter"); - filterHashMap.put("InclusiveStopFilter", ParseConstants.FILTER_PACKAGE + "." + - "InclusiveStopFilter"); - filterHashMap.put("TimestampsFilter", ParseConstants.FILTER_PACKAGE + "." + - "TimestampsFilter"); - filterHashMap.put("RowFilter", ParseConstants.FILTER_PACKAGE + "." + - "RowFilter"); - filterHashMap.put("FamilyFilter", ParseConstants.FILTER_PACKAGE + "." + - "FamilyFilter"); - filterHashMap.put("QualifierFilter", ParseConstants.FILTER_PACKAGE + "." + - "QualifierFilter"); - filterHashMap.put("ValueFilter", ParseConstants.FILTER_PACKAGE + "." + - "ValueFilter"); - filterHashMap.put("ColumnRangeFilter", ParseConstants.FILTER_PACKAGE + "." + - "ColumnRangeFilter"); - filterHashMap.put("SingleColumnValueFilter", ParseConstants.FILTER_PACKAGE + "." + - "SingleColumnValueFilter"); - filterHashMap.put("SingleColumnValueExcludeFilter", ParseConstants.FILTER_PACKAGE + "." + - "SingleColumnValueExcludeFilter"); - filterHashMap.put("DependentColumnFilter", ParseConstants.FILTER_PACKAGE + "." + - "DependentColumnFilter"); - - // Creates the operatorPrecedenceHashMap - operatorPrecedenceHashMap = new HashMap(); - operatorPrecedenceHashMap.put(ParseConstants.SKIP_BUFFER, 1); - operatorPrecedenceHashMap.put(ParseConstants.WHILE_BUFFER, 1); - operatorPrecedenceHashMap.put(ParseConstants.AND_BUFFER, 2); - operatorPrecedenceHashMap.put(ParseConstants.OR_BUFFER, 3); - } - - /** - * Parses the filterString and constructs a filter using it - *

          - * @param filterString filter string given by the user - * @return filter object we constructed - */ - public Filter parseFilterString (String filterString) - throws CharacterCodingException { - return parseFilterString(Bytes.toBytes(filterString)); - } - - /** - * Parses the filterString and constructs a filter using it - *

          - * @param filterStringAsByteArray filter string given by the user - * @return filter object we constructed - */ - public Filter parseFilterString (byte [] filterStringAsByteArray) - throws CharacterCodingException { - // stack for the operators and parenthesis - Stack operatorStack = new Stack(); - // stack for the filter objects - Stack filterStack = new Stack(); - - Filter filter = null; - for (int i=0; i - * A simpleFilterExpression is of the form: FilterName('arg', 'arg', 'arg') - * The user given filter string can have many simpleFilterExpressions combined - * using operators. - *

          - * This function extracts a simpleFilterExpression from the - * larger filterString given the start offset of the simpler expression - *

          - * @param filterStringAsByteArray filter string given by the user - * @param filterExpressionStartOffset start index of the simple filter expression - * @return byte array containing the simple filter expression - */ - public byte [] extractFilterSimpleExpression (byte [] filterStringAsByteArray, - int filterExpressionStartOffset) - throws CharacterCodingException { - int quoteCount = 0; - for (int i=filterExpressionStartOffset; i - * @param filterStringAsByteArray filter string given by the user - * @return filter object we constructed - */ - public Filter parseSimpleFilterExpression (byte [] filterStringAsByteArray) - throws CharacterCodingException { - - String filterName = Bytes.toString(getFilterName(filterStringAsByteArray)); - ArrayList filterArguments = getFilterArguments(filterStringAsByteArray); - if (!filterHashMap.containsKey(filterName)) { - throw new IllegalArgumentException("Filter Name " + filterName + " not supported"); - } - try { - filterName = filterHashMap.get(filterName); - Class c = Class.forName(filterName); - Class[] argTypes = new Class [] {ArrayList.class}; - Method m = c.getDeclaredMethod("createFilterFromArguments", argTypes); - return (Filter) m.invoke(null,filterArguments); - } catch (ClassNotFoundException e) { - e.printStackTrace(); - } catch (NoSuchMethodException e) { - e.printStackTrace(); - } catch (IllegalAccessException e) { - e.printStackTrace(); - } catch (InvocationTargetException e) { - e.printStackTrace(); - } - throw new IllegalArgumentException("Incorrect filter string " + - new String(filterStringAsByteArray)); - } - -/** - * Returns the filter name given a simple filter expression - *

          - * @param filterStringAsByteArray a simple filter expression - * @return name of filter in the simple filter expression - */ - public static byte [] getFilterName (byte [] filterStringAsByteArray) { - int filterNameStartIndex = 0; - int filterNameEndIndex = 0; - - for (int i=filterNameStartIndex; i - * @param filterStringAsByteArray filter string given by the user - * @return an ArrayList containing the arguments of the filter in the filter string - */ - public static ArrayList getFilterArguments (byte [] filterStringAsByteArray) { - int argumentListStartIndex = KeyValue.getDelimiter(filterStringAsByteArray, 0, - filterStringAsByteArray.length, - ParseConstants.LPAREN); - if (argumentListStartIndex == -1) { - throw new IllegalArgumentException("Incorrect argument list"); - } - - int argumentStartIndex = 0; - int argumentEndIndex = 0; - ArrayList filterArguments = new ArrayList(); - - for (int i = argumentListStartIndex + 1; i, != etc - argumentStartIndex = i; - for (int j = argumentStartIndex; j < filterStringAsByteArray.length; j++) { - if (filterStringAsByteArray[j] == ParseConstants.WHITESPACE || - filterStringAsByteArray[j] == ParseConstants.COMMA || - filterStringAsByteArray[j] == ParseConstants.RPAREN) { - argumentEndIndex = j - 1; - i = j; - byte [] filterArgument = new byte [argumentEndIndex - argumentStartIndex + 1]; - Bytes.putBytes(filterArgument, 0, filterStringAsByteArray, - argumentStartIndex, argumentEndIndex - argumentStartIndex + 1); - filterArguments.add(filterArgument); - break; - } else if (j == filterStringAsByteArray.length - 1) { - throw new IllegalArgumentException("Incorrect argument list"); - } - } - } - } - return filterArguments; - } - -/** - * This function is called while parsing the filterString and an operator is parsed - *

          - * @param operatorStack the stack containing the operators and parenthesis - * @param filterStack the stack containing the filters - * @param operator the operator found while parsing the filterString - */ - public void reduce(Stack operatorStack, - Stack filterStack, - ByteBuffer operator) { - while (!operatorStack.empty() && - !(ParseConstants.LPAREN_BUFFER.equals(operatorStack.peek())) && - hasHigherPriority(operatorStack.peek(), operator)) { - filterStack.push(popArguments(operatorStack, filterStack)); - } - } - - /** - * Pops an argument from the operator stack and the number of arguments required by the operator - * from the filterStack and evaluates them - *

          - * @param operatorStack the stack containing the operators - * @param filterStack the stack containing the filters - * @return the evaluated filter - */ - public static Filter popArguments (Stack operatorStack, Stack filterStack) { - ByteBuffer argumentOnTopOfStack = operatorStack.peek(); - - if (argumentOnTopOfStack.equals(ParseConstants.OR_BUFFER)) { - // The top of the stack is an OR - try { - ArrayList listOfFilters = new ArrayList(); - while (!operatorStack.empty() && operatorStack.peek().equals(ParseConstants.OR_BUFFER)) { - Filter filter = filterStack.pop(); - listOfFilters.add(0, filter); - operatorStack.pop(); - } - Filter filter = filterStack.pop(); - listOfFilters.add(0, filter); - Filter orFilter = new FilterList(FilterList.Operator.MUST_PASS_ONE, listOfFilters); - return orFilter; - } catch (EmptyStackException e) { - throw new IllegalArgumentException("Incorrect input string - an OR needs two filters"); - } - - } else if (argumentOnTopOfStack.equals(ParseConstants.AND_BUFFER)) { - // The top of the stack is an AND - try { - ArrayList listOfFilters = new ArrayList(); - while (!operatorStack.empty() && operatorStack.peek().equals(ParseConstants.AND_BUFFER)) { - Filter filter = filterStack.pop(); - listOfFilters.add(0, filter); - operatorStack.pop(); - } - Filter filter = filterStack.pop(); - listOfFilters.add(0, filter); - Filter andFilter = new FilterList(FilterList.Operator.MUST_PASS_ALL, listOfFilters); - return andFilter; - } catch (EmptyStackException e) { - throw new IllegalArgumentException("Incorrect input string - an AND needs two filters"); - } - - } else if (argumentOnTopOfStack.equals(ParseConstants.SKIP_BUFFER)) { - // The top of the stack is a SKIP - try { - Filter wrappedFilter = filterStack.pop(); - Filter skipFilter = new SkipFilter(wrappedFilter); - operatorStack.pop(); - return skipFilter; - } catch (EmptyStackException e) { - throw new IllegalArgumentException("Incorrect input string - a SKIP wraps a filter"); - } - - } else if (argumentOnTopOfStack.equals(ParseConstants.WHILE_BUFFER)) { - // The top of the stack is a WHILE - try { - Filter wrappedFilter = filterStack.pop(); - Filter whileMatchFilter = new WhileMatchFilter(wrappedFilter); - operatorStack.pop(); - return whileMatchFilter; - } catch (EmptyStackException e) { - throw new IllegalArgumentException("Incorrect input string - a WHILE wraps a filter"); - } - - } else if (argumentOnTopOfStack.equals(ParseConstants.LPAREN_BUFFER)) { - // The top of the stack is a LPAREN - try { - Filter filter = filterStack.pop(); - operatorStack.pop(); - return filter; - } catch (EmptyStackException e) { - throw new IllegalArgumentException("Incorrect Filter String"); - } - - } else { - throw new IllegalArgumentException("Incorrect arguments on operatorStack"); - } - } - -/** - * Returns which operator has higher precedence - *

          - * If a has higher precedence than b, it returns true - * If they have the same precedence, it returns false - */ - public boolean hasHigherPriority(ByteBuffer a, ByteBuffer b) { - if ((operatorPrecedenceHashMap.get(a) - operatorPrecedenceHashMap.get(b)) < 0) { - return true; - } - return false; - } - -/** - * Removes the single quote escaping a single quote - thus it returns an unescaped argument - *

          - * @param filterStringAsByteArray filter string given by user - * @param argumentStartIndex start index of the argument - * @param argumentEndIndex end index of the argument - * @return returns an unescaped argument - */ - public static byte [] createUnescapdArgument (byte [] filterStringAsByteArray, - int argumentStartIndex, int argumentEndIndex) { - int unescapedArgumentLength = 2; - for (int i = argumentStartIndex + 1; i <= argumentEndIndex - 1; i++) { - unescapedArgumentLength ++; - if (filterStringAsByteArray[i] == ParseConstants.SINGLE_QUOTE && - i != (argumentEndIndex - 1) && - filterStringAsByteArray[i+1] == ParseConstants.SINGLE_QUOTE) { - i++; - continue; - } - } - - byte [] unescapedArgument = new byte [unescapedArgumentLength]; - int count = 1; - unescapedArgument[0] = '\''; - for (int i = argumentStartIndex + 1; i <= argumentEndIndex - 1; i++) { - if (filterStringAsByteArray [i] == ParseConstants.SINGLE_QUOTE && - i != (argumentEndIndex - 1) && - filterStringAsByteArray [i+1] == ParseConstants.SINGLE_QUOTE) { - unescapedArgument[count++] = filterStringAsByteArray [i+1]; - i++; - } - else { - unescapedArgument[count++] = filterStringAsByteArray [i]; - } - } - unescapedArgument[unescapedArgumentLength - 1] = '\''; - return unescapedArgument; - } - -/** - * Checks if the current index of filter string we are on is the beginning of the keyword 'OR' - *

          - * @param filterStringAsByteArray filter string given by the user - * @param indexOfOr index at which an 'O' was read - * @return true if the keyword 'OR' is at the current index - */ - public static boolean checkForOr (byte [] filterStringAsByteArray, int indexOfOr) - throws CharacterCodingException, ArrayIndexOutOfBoundsException { - - try { - if (filterStringAsByteArray[indexOfOr] == ParseConstants.O && - filterStringAsByteArray[indexOfOr+1] == ParseConstants.R && - (filterStringAsByteArray[indexOfOr-1] == ParseConstants.WHITESPACE || - filterStringAsByteArray[indexOfOr-1] == ParseConstants.RPAREN) && - (filterStringAsByteArray[indexOfOr+2] == ParseConstants.WHITESPACE || - filterStringAsByteArray[indexOfOr+2] == ParseConstants.LPAREN)) { - return true; - } else { - return false; - } - } catch (ArrayIndexOutOfBoundsException e) { - return false; - } - } - -/** - * Checks if the current index of filter string we are on is the beginning of the keyword 'AND' - *

          - * @param filterStringAsByteArray filter string given by the user - * @param indexOfAnd index at which an 'A' was read - * @return true if the keyword 'AND' is at the current index - */ - public static boolean checkForAnd (byte [] filterStringAsByteArray, int indexOfAnd) - throws CharacterCodingException { - - try { - if (filterStringAsByteArray[indexOfAnd] == ParseConstants.A && - filterStringAsByteArray[indexOfAnd+1] == ParseConstants.N && - filterStringAsByteArray[indexOfAnd+2] == ParseConstants.D && - (filterStringAsByteArray[indexOfAnd-1] == ParseConstants.WHITESPACE || - filterStringAsByteArray[indexOfAnd-1] == ParseConstants.RPAREN) && - (filterStringAsByteArray[indexOfAnd+3] == ParseConstants.WHITESPACE || - filterStringAsByteArray[indexOfAnd+3] == ParseConstants.LPAREN)) { - return true; - } else { - return false; - } - } catch (ArrayIndexOutOfBoundsException e) { - return false; - } - } - -/** - * Checks if the current index of filter string we are on is the beginning of the keyword 'SKIP' - *

          - * @param filterStringAsByteArray filter string given by the user - * @param indexOfSkip index at which an 'S' was read - * @return true if the keyword 'SKIP' is at the current index - */ - public static boolean checkForSkip (byte [] filterStringAsByteArray, int indexOfSkip) - throws CharacterCodingException { - - try { - if (filterStringAsByteArray[indexOfSkip] == ParseConstants.S && - filterStringAsByteArray[indexOfSkip+1] == ParseConstants.K && - filterStringAsByteArray[indexOfSkip+2] == ParseConstants.I && - filterStringAsByteArray[indexOfSkip+3] == ParseConstants.P && - (indexOfSkip == 0 || - filterStringAsByteArray[indexOfSkip-1] == ParseConstants.WHITESPACE || - filterStringAsByteArray[indexOfSkip-1] == ParseConstants.RPAREN || - filterStringAsByteArray[indexOfSkip-1] == ParseConstants.LPAREN) && - (filterStringAsByteArray[indexOfSkip+4] == ParseConstants.WHITESPACE || - filterStringAsByteArray[indexOfSkip+4] == ParseConstants.LPAREN)) { - return true; - } else { - return false; - } - } catch (ArrayIndexOutOfBoundsException e) { - return false; - } - } - -/** - * Checks if the current index of filter string we are on is the beginning of the keyword 'WHILE' - *

          - * @param filterStringAsByteArray filter string given by the user - * @param indexOfWhile index at which an 'W' was read - * @return true if the keyword 'WHILE' is at the current index - */ - public static boolean checkForWhile (byte [] filterStringAsByteArray, int indexOfWhile) - throws CharacterCodingException { - - try { - if (filterStringAsByteArray[indexOfWhile] == ParseConstants.W && - filterStringAsByteArray[indexOfWhile+1] == ParseConstants.H && - filterStringAsByteArray[indexOfWhile+2] == ParseConstants.I && - filterStringAsByteArray[indexOfWhile+3] == ParseConstants.L && - filterStringAsByteArray[indexOfWhile+4] == ParseConstants.E && - (indexOfWhile == 0 || filterStringAsByteArray[indexOfWhile-1] == ParseConstants.WHITESPACE - || filterStringAsByteArray[indexOfWhile-1] == ParseConstants.RPAREN || - filterStringAsByteArray[indexOfWhile-1] == ParseConstants.LPAREN) && - (filterStringAsByteArray[indexOfWhile+5] == ParseConstants.WHITESPACE || - filterStringAsByteArray[indexOfWhile+5] == ParseConstants.LPAREN)) { - return true; - } else { - return false; - } - } catch (ArrayIndexOutOfBoundsException e) { - return false; - } - } - -/** - * Returns a boolean indicating whether the quote was escaped or not - *

          - * @param array byte array in which the quote was found - * @param quoteIndex index of the single quote - * @return returns true if the quote was unescaped - */ - public static boolean isQuoteUnescaped (byte [] array, int quoteIndex) { - if (array == null) { - throw new IllegalArgumentException("isQuoteUnescaped called with a null array"); - } - - if (quoteIndex == array.length - 1 || array[quoteIndex+1] != ParseConstants.SINGLE_QUOTE) { - return true; - } - else { - return false; - } - } - -/** - * Takes a quoted byte array and converts it into an unquoted byte array - * For example: given a byte array representing 'abc', it returns a - * byte array representing abc - *

          - * @param quotedByteArray the quoted byte array - * @return Unquoted byte array - */ - public static byte [] removeQuotesFromByteArray (byte [] quotedByteArray) { - if (quotedByteArray == null || - quotedByteArray.length < 2 || - quotedByteArray[0] != ParseConstants.SINGLE_QUOTE || - quotedByteArray[quotedByteArray.length - 1] != ParseConstants.SINGLE_QUOTE) { - throw new IllegalArgumentException("removeQuotesFromByteArray needs a quoted byte array"); - } else { - byte [] targetString = new byte [quotedByteArray.length - 2]; - Bytes.putBytes(targetString, 0, quotedByteArray, 1, quotedByteArray.length - 2); - return targetString; - } - } - -/** - * Converts an int expressed in a byte array to an actual int - *

          - * This doesn't use Bytes.toInt because that assumes - * that there will be {@link Bytes#SIZEOF_INT} bytes available. - *

          - * @param numberAsByteArray the int value expressed as a byte array - * @return the int value - */ - public static int convertByteArrayToInt (byte [] numberAsByteArray) { - - long tempResult = ParseFilter.convertByteArrayToLong(numberAsByteArray); - - if (tempResult > Integer.MAX_VALUE) { - throw new IllegalArgumentException("Integer Argument too large"); - } else if (tempResult < Integer.MIN_VALUE) { - throw new IllegalArgumentException("Integer Argument too small"); - } - - int result = (int) tempResult; - return result; - } - -/** - * Converts a long expressed in a byte array to an actual long - *

          - * This doesn't use Bytes.toLong because that assumes - * that there will be {@link Bytes#SIZEOF_INT} bytes available. - *

          - * @param numberAsByteArray the long value expressed as a byte array - * @return the long value - */ - public static long convertByteArrayToLong (byte [] numberAsByteArray) { - if (numberAsByteArray == null) { - throw new IllegalArgumentException("convertByteArrayToLong called with a null array"); - } - - int i = 0; - long result = 0; - boolean isNegative = false; - - if (numberAsByteArray[i] == ParseConstants.MINUS_SIGN) { - i++; - isNegative = true; - } - - while (i != numberAsByteArray.length) { - if (numberAsByteArray[i] < ParseConstants.ZERO || - numberAsByteArray[i] > ParseConstants.NINE) { - throw new IllegalArgumentException("Byte Array should only contain digits"); - } - result = result*10 + (numberAsByteArray[i] - ParseConstants.ZERO); - if (result < 0) { - throw new IllegalArgumentException("Long Argument too large"); - } - i++; - } - - if (isNegative) { - return -result; - } else { - return result; - } - } - -/** - * Converts a boolean expressed in a byte array to an actual boolean - *

          - * This doesn't used Bytes.toBoolean because Bytes.toBoolean(byte []) - * assumes that 1 stands for true and 0 for false. - * Here, the byte array representing "true" and "false" is parsed - *

          - * @param booleanAsByteArray the boolean value expressed as a byte array - * @return the boolean value - */ - public static boolean convertByteArrayToBoolean (byte [] booleanAsByteArray) { - if (booleanAsByteArray == null) { - throw new IllegalArgumentException("convertByteArrayToBoolean called with a null array"); - } - - if (booleanAsByteArray.length == 4 && - (booleanAsByteArray[0] == 't' || booleanAsByteArray[0] == 'T') && - (booleanAsByteArray[1] == 'r' || booleanAsByteArray[1] == 'R') && - (booleanAsByteArray[2] == 'u' || booleanAsByteArray[2] == 'U') && - (booleanAsByteArray[3] == 'e' || booleanAsByteArray[3] == 'E')) { - return true; - } - else if (booleanAsByteArray.length == 5 && - (booleanAsByteArray[0] == 'f' || booleanAsByteArray[0] == 'F') && - (booleanAsByteArray[1] == 'a' || booleanAsByteArray[1] == 'A') && - (booleanAsByteArray[2] == 'l' || booleanAsByteArray[2] == 'L') && - (booleanAsByteArray[3] == 's' || booleanAsByteArray[3] == 'S') && - (booleanAsByteArray[4] == 'e' || booleanAsByteArray[4] == 'E')) { - return false; - } - else { - throw new IllegalArgumentException("Incorrect Boolean Expression"); - } - } - -/** - * Takes a compareOperator symbol as a byte array and returns the corresponding CompareOperator - *

          - * @param compareOpAsByteArray the comparatorOperator symbol as a byte array - * @return the Compare Operator - */ - public static CompareFilter.CompareOp createCompareOp (byte [] compareOpAsByteArray) { - ByteBuffer compareOp = ByteBuffer.wrap(compareOpAsByteArray); - if (compareOp.equals(ParseConstants.LESS_THAN_BUFFER)) - return CompareOp.LESS; - else if (compareOp.equals(ParseConstants.LESS_THAN_OR_EQUAL_TO_BUFFER)) - return CompareOp.LESS_OR_EQUAL; - else if (compareOp.equals(ParseConstants.GREATER_THAN_BUFFER)) - return CompareOp.GREATER; - else if (compareOp.equals(ParseConstants.GREATER_THAN_OR_EQUAL_TO_BUFFER)) - return CompareOp.GREATER_OR_EQUAL; - else if (compareOp.equals(ParseConstants.NOT_EQUAL_TO_BUFFER)) - return CompareOp.NOT_EQUAL; - else if (compareOp.equals(ParseConstants.EQUAL_TO_BUFFER)) - return CompareOp.EQUAL; - else - throw new IllegalArgumentException("Invalid compare operator"); - } - -/** - * Parses a comparator of the form comparatorType:comparatorValue form and returns a comparator - *

          - * @param comparator the comparator in the form comparatorType:comparatorValue - * @return the parsed comparator - */ - public static ByteArrayComparable createComparator (byte [] comparator) { - if (comparator == null) - throw new IllegalArgumentException("Incorrect Comparator"); - byte [][] parsedComparator = ParseFilter.parseComparator(comparator); - byte [] comparatorType = parsedComparator[0]; - byte [] comparatorValue = parsedComparator[1]; - - - if (Bytes.equals(comparatorType, ParseConstants.binaryType)) - return new BinaryComparator(comparatorValue); - else if (Bytes.equals(comparatorType, ParseConstants.binaryPrefixType)) - return new BinaryPrefixComparator(comparatorValue); - else if (Bytes.equals(comparatorType, ParseConstants.regexStringType)) - return new RegexStringComparator(new String(comparatorValue)); - else if (Bytes.equals(comparatorType, ParseConstants.substringType)) - return new SubstringComparator(new String(comparatorValue)); - else - throw new IllegalArgumentException("Incorrect comparatorType"); - } - -/** - * Splits a column in comparatorType:comparatorValue form into separate byte arrays - *

          - * @param comparator the comparator - * @return the parsed arguments of the comparator as a 2D byte array - */ - public static byte [][] parseComparator (byte [] comparator) { - final int index = KeyValue.getDelimiter(comparator, 0, comparator.length, ParseConstants.COLON); - if (index == -1) { - throw new IllegalArgumentException("Incorrect comparator"); - } - - byte [][] result = new byte [2][0]; - result[0] = new byte [index]; - System.arraycopy(comparator, 0, result[0], 0, index); - - final int len = comparator.length - (index + 1); - result[1] = new byte[len]; - System.arraycopy(comparator, index + 1, result[1], 0, len); - - return result; - } - -/** - * Return a Set of filters supported by the Filter Language - */ - public Set getSupportedFilters () { - return filterHashMap.keySet(); - } - - /** - * Returns all known filters - * @return an unmodifiable map of filters - */ - public static Map getAllFilters() { - return Collections.unmodifiableMap(filterHashMap); - } - - /** - * Register a new filter with the parser. If the filter is already registered, - * an IllegalArgumentException will be thrown. - * - * @param name a name for the filter - * @param filterClass fully qualified class name - */ - public static void registerFilter(String name, String filterClass) { - if(LOG.isInfoEnabled()) - LOG.info("Registering new filter " + name); - - filterHashMap.put(name, filterClass); - } -} diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/filter/PrefixFilter.java hbase-server/src/main/java/org/apache/hadoop/hbase/filter/PrefixFilter.java deleted file mode 100644 index 9c37b3e..0000000 --- hbase-server/src/main/java/org/apache/hadoop/hbase/filter/PrefixFilter.java +++ /dev/null @@ -1,123 +0,0 @@ -/* - * - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hbase.filter; - -import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.classification.InterfaceStability; -import org.apache.hadoop.hbase.DeserializationException; -import org.apache.hadoop.hbase.KeyValue; -import org.apache.hadoop.hbase.protobuf.generated.FilterProtos; -import org.apache.hadoop.hbase.util.Bytes; - -import java.util.ArrayList; - -import com.google.common.base.Preconditions; -import com.google.protobuf.ByteString; -import com.google.protobuf.InvalidProtocolBufferException; - -/** - * Pass results that have same row prefix. - */ -@InterfaceAudience.Public -@InterfaceStability.Stable -public class PrefixFilter extends FilterBase { - protected byte [] prefix = null; - protected boolean passedPrefix = false; - - public PrefixFilter(final byte [] prefix) { - this.prefix = prefix; - } - - public byte[] getPrefix() { - return prefix; - } - - public boolean filterRowKey(byte[] buffer, int offset, int length) { - if (buffer == null || this.prefix == null) - return true; - if (length < prefix.length) - return true; - // if they are equal, return false => pass row - // else return true, filter row - // if we are passed the prefix, set flag - int cmp = Bytes.compareTo(buffer, offset, this.prefix.length, this.prefix, 0, - this.prefix.length); - if(cmp > 0) { - passedPrefix = true; - } - return cmp != 0; - } - - public boolean filterAllRemaining() { - return passedPrefix; - } - - public static Filter createFilterFromArguments(ArrayList filterArguments) { - Preconditions.checkArgument(filterArguments.size() == 1, - "Expected 1 but got: %s", filterArguments.size()); - byte [] prefix = ParseFilter.removeQuotesFromByteArray(filterArguments.get(0)); - return new PrefixFilter(prefix); - } - - /** - * @return The filter serialized using pb - */ - public byte [] toByteArray() { - FilterProtos.PrefixFilter.Builder builder = - FilterProtos.PrefixFilter.newBuilder(); - if (this.prefix != null) builder.setPrefix(ByteString.copyFrom(this.prefix)); - return builder.build().toByteArray(); - } - - /** - * @param pbBytes A pb serialized {@link PrefixFilter} instance - * @return An instance of {@link PrefixFilter} made from bytes - * @throws DeserializationException - * @see #toByteArray - */ - public static PrefixFilter parseFrom(final byte [] pbBytes) - throws DeserializationException { - FilterProtos.PrefixFilter proto; - try { - proto = FilterProtos.PrefixFilter.parseFrom(pbBytes); - } catch (InvalidProtocolBufferException e) { - throw new DeserializationException(e); - } - return new PrefixFilter(proto.hasPrefix()?proto.getPrefix().toByteArray():null); - } - - /** - * @param other - * @return true if and only if the fields of the filter that are serialized - * are equal to the corresponding fields in other. Used for testing. - */ - boolean areSerializedFieldsEqual(Filter o) { - if (o == this) return true; - if (!(o instanceof PrefixFilter)) return false; - - PrefixFilter other = (PrefixFilter)o; - return Bytes.equals(this.getPrefix(), other.getPrefix()); - } - - @Override - public String toString() { - return this.getClass().getSimpleName() + " " + Bytes.toStringBinary(this.prefix); - } -} diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/filter/QualifierFilter.java hbase-server/src/main/java/org/apache/hadoop/hbase/filter/QualifierFilter.java deleted file mode 100644 index 24dcb60..0000000 --- hbase-server/src/main/java/org/apache/hadoop/hbase/filter/QualifierFilter.java +++ /dev/null @@ -1,129 +0,0 @@ -/** - * - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hbase.filter; - -import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.classification.InterfaceStability; -import org.apache.hadoop.hbase.DeserializationException; -import org.apache.hadoop.hbase.KeyValue; -import org.apache.hadoop.hbase.client.Get; -import org.apache.hadoop.hbase.protobuf.ProtobufUtil; -import org.apache.hadoop.hbase.protobuf.generated.FilterProtos; - -import com.google.protobuf.InvalidProtocolBufferException; - -import java.io.IOException; -import java.util.ArrayList; - -/** - * This filter is used to filter based on the column qualifier. It takes an - * operator (equal, greater, not equal, etc) and a byte [] comparator for the - * column qualifier portion of a key. - *

          - * This filter can be wrapped with {@link WhileMatchFilter} and {@link SkipFilter} - * to add more control. - *

          - * Multiple filters can be combined using {@link FilterList}. - *

          - * If an already known column qualifier is looked for, use {@link Get#addColumn} - * directly rather than a filter. - */ -@InterfaceAudience.Public -@InterfaceStability.Stable -public class QualifierFilter extends CompareFilter { - - /** - * Constructor. - * @param op the compare op for column qualifier matching - * @param qualifierComparator the comparator for column qualifier matching - */ - public QualifierFilter(final CompareOp op, - final ByteArrayComparable qualifierComparator) { - super(op, qualifierComparator); - } - - @Override - public ReturnCode filterKeyValue(KeyValue v) { - int qualifierLength = v.getQualifierLength(); - if (qualifierLength > 0) { - if (doCompare(this.compareOp, this.comparator, v.getBuffer(), - v.getQualifierOffset(), qualifierLength)) { - return ReturnCode.SKIP; - } - } - return ReturnCode.INCLUDE; - } - - public static Filter createFilterFromArguments(ArrayList filterArguments) { - ArrayList arguments = CompareFilter.extractArguments(filterArguments); - CompareOp compareOp = (CompareOp)arguments.get(0); - ByteArrayComparable comparator = (ByteArrayComparable)arguments.get(1); - return new QualifierFilter(compareOp, comparator); - } - - /** - * @return The filter serialized using pb - */ - public byte [] toByteArray() { - FilterProtos.QualifierFilter.Builder builder = - FilterProtos.QualifierFilter.newBuilder(); - builder.setCompareFilter(super.convert()); - return builder.build().toByteArray(); - } - - /** - * @param pbBytes A pb serialized {@link QualifierFilter} instance - * @return An instance of {@link QualifierFilter} made from bytes - * @throws DeserializationException - * @see #toByteArray - */ - public static QualifierFilter parseFrom(final byte [] pbBytes) - throws DeserializationException { - FilterProtos.QualifierFilter proto; - try { - proto = FilterProtos.QualifierFilter.parseFrom(pbBytes); - } catch (InvalidProtocolBufferException e) { - throw new DeserializationException(e); - } - final CompareOp valueCompareOp = - CompareOp.valueOf(proto.getCompareFilter().getCompareOp().name()); - ByteArrayComparable valueComparator = null; - try { - if (proto.getCompareFilter().hasComparator()) { - valueComparator = ProtobufUtil.toComparator(proto.getCompareFilter().getComparator()); - } - } catch (IOException ioe) { - throw new DeserializationException(ioe); - } - return new QualifierFilter(valueCompareOp,valueComparator); - } - - /** - * @param other - * @return true if and only if the fields of the filter that are serialized - * are equal to the corresponding fields in other. Used for testing. - */ - boolean areSerializedFieldsEqual(Filter o) { - if (o == this) return true; - if (!(o instanceof QualifierFilter)) return false; - - return super.areSerializedFieldsEqual(o); - } -} diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/filter/RandomRowFilter.java hbase-server/src/main/java/org/apache/hadoop/hbase/filter/RandomRowFilter.java deleted file mode 100644 index 9724369..0000000 --- hbase-server/src/main/java/org/apache/hadoop/hbase/filter/RandomRowFilter.java +++ /dev/null @@ -1,150 +0,0 @@ -/** - * - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hbase.filter; - -import java.util.Random; - -import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.classification.InterfaceStability; -import org.apache.hadoop.hbase.DeserializationException; -import org.apache.hadoop.hbase.KeyValue; -import org.apache.hadoop.hbase.protobuf.generated.FilterProtos; - -import com.google.protobuf.InvalidProtocolBufferException; - -/** - * A filter that includes rows based on a chance. - * - */ -@InterfaceAudience.Public -@InterfaceStability.Stable -public class RandomRowFilter extends FilterBase { - protected static final Random random = new Random(); - - protected float chance; - protected boolean filterOutRow; - - /** - * Create a new filter with a specified chance for a row to be included. - * - * @param chance - */ - public RandomRowFilter(float chance) { - this.chance = chance; - } - - /** - * @return The chance that a row gets included. - */ - public float getChance() { - return chance; - } - - /** - * Set the chance that a row is included. - * - * @param chance - */ - public void setChance(float chance) { - this.chance = chance; - } - - @Override - public boolean filterAllRemaining() { - return false; - } - - @Override - public ReturnCode filterKeyValue(KeyValue v) { - if (filterOutRow) { - return ReturnCode.NEXT_ROW; - } - return ReturnCode.INCLUDE; - } - - @Override - public boolean filterRow() { - return filterOutRow; - } - - public boolean hasFilterRow() { - return true; - } - - @Override - public boolean filterRowKey(byte[] buffer, int offset, int length) { - if (chance < 0) { - // with a zero chance, the rows is always excluded - filterOutRow = true; - } else if (chance > 1) { - // always included - filterOutRow = false; - } else { - // roll the dice - filterOutRow = !(random.nextFloat() < chance); - } - return filterOutRow; - } - - @Override - public void reset() { - filterOutRow = false; - } - - /** - * @return The filter serialized using pb - */ - public byte [] toByteArray() { - FilterProtos.RandomRowFilter.Builder builder = - FilterProtos.RandomRowFilter.newBuilder(); - builder.setChance(this.chance); - return builder.build().toByteArray(); - } - - /** - * @param pbBytes A pb serialized {@link RandomRowFilter} instance - * @return An instance of {@link RandomRowFilter} made from bytes - * @throws DeserializationException - * @see #toByteArray - */ - public static RandomRowFilter parseFrom(final byte [] pbBytes) - throws DeserializationException { - FilterProtos.RandomRowFilter proto; - try { - proto = FilterProtos.RandomRowFilter.parseFrom(pbBytes); - } catch (InvalidProtocolBufferException e) { - throw new DeserializationException(e); - } - return new RandomRowFilter(proto.getChance()); - } - - /** - * @param other - * @return true if and only if the fields of the filter that are serialized - * are equal to the corresponding fields in other. Used for testing. - */ - boolean areSerializedFieldsEqual(Filter o) { - if (o == this) return true; - if (!(o instanceof RandomRowFilter)) return false; - - RandomRowFilter other = (RandomRowFilter)o; - return this.getChance() == other.getChance(); - } -} diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/filter/RegexStringComparator.java hbase-server/src/main/java/org/apache/hadoop/hbase/filter/RegexStringComparator.java deleted file mode 100644 index 96c35c3..0000000 --- hbase-server/src/main/java/org/apache/hadoop/hbase/filter/RegexStringComparator.java +++ /dev/null @@ -1,174 +0,0 @@ -/** - * - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hbase.filter; - -import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.classification.InterfaceStability; -import org.apache.hadoop.hbase.DeserializationException; -import org.apache.hadoop.hbase.HConstants; -import org.apache.hadoop.hbase.protobuf.generated.ComparatorProtos; -import org.apache.hadoop.hbase.util.Bytes; - -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; - -import com.google.protobuf.InvalidProtocolBufferException; - -import java.nio.charset.Charset; -import java.nio.charset.IllegalCharsetNameException; -import java.util.regex.Pattern; - -/** - * This comparator is for use with {@link CompareFilter} implementations, such - * as {@link RowFilter}, {@link QualifierFilter}, and {@link ValueFilter}, for - * filtering based on the value of a given column. Use it to test if a given - * regular expression matches a cell value in the column. - *

          - * Only EQUAL or NOT_EQUAL comparisons are valid with this comparator. - *

          - * For example: - *

          - *

          - * ValueFilter vf = new ValueFilter(CompareOp.EQUAL,
          - *     new RegexStringComparator(
          - *       // v4 IP address
          - *       "(((25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\\.){3,3}" +
          - *         "(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?))(\\/[0-9]+)?" +
          - *         "|" +
          - *       // v6 IP address
          - *       "((([\\dA-Fa-f]{1,4}:){7}[\\dA-Fa-f]{1,4})(:([\\d]{1,3}.)" +
          - *         "{3}[\\d]{1,3})?)(\\/[0-9]+)?"));
          - * 
          - *

          - * Supports {@link java.util.regex.Pattern} flags as well: - *

          - *

          - * ValueFilter vf = new ValueFilter(CompareOp.EQUAL,
          - *     new RegexStringComparator("regex", Pattern.CASE_INSENSITIVE | Pattern.DOTALL));
          - * 
          - * @see java.util.regex.Pattern - */ -@InterfaceAudience.Public -@InterfaceStability.Stable -public class RegexStringComparator extends ByteArrayComparable { - - private static final Log LOG = LogFactory.getLog(RegexStringComparator.class); - - private Charset charset = HConstants.UTF8_CHARSET; - - private Pattern pattern; - - /** - * Constructor - * Adds Pattern.DOTALL to the underlying Pattern - * @param expr a valid regular expression - */ - public RegexStringComparator(String expr) { - this(expr, Pattern.DOTALL); - } - - /** - * Constructor - * @param expr a valid regular expression - * @param flags java.util.regex.Pattern flags - */ - public RegexStringComparator(String expr, int flags) { - super(Bytes.toBytes(expr)); - this.pattern = Pattern.compile(expr, flags); - } - - /** - * Specifies the {@link Charset} to use to convert the row key to a String. - *

          - * The row key needs to be converted to a String in order to be matched - * against the regular expression. This method controls which charset is - * used to do this conversion. - *

          - * If the row key is made of arbitrary bytes, the charset {@code ISO-8859-1} - * is recommended. - * @param charset The charset to use. - */ - public void setCharset(final Charset charset) { - this.charset = charset; - } - - @Override - public int compareTo(byte[] value, int offset, int length) { - // Use find() for subsequence match instead of matches() (full sequence - // match) to adhere to the principle of least surprise. - return pattern.matcher(new String(value, offset, length, charset)).find() ? 0 - : 1; - } - - /** - * @return The comparator serialized using pb - */ - public byte [] toByteArray() { - ComparatorProtos.RegexStringComparator.Builder builder = - ComparatorProtos.RegexStringComparator.newBuilder(); - builder.setPattern(pattern.toString()); - builder.setPatternFlags(pattern.flags()); - builder.setCharset(charset.name()); - return builder.build().toByteArray(); - } - - /** - * @param pbBytes A pb serialized {@link RegexStringComparator} instance - * @return An instance of {@link RegexStringComparator} made from bytes - * @throws DeserializationException - * @see #toByteArray - */ - public static RegexStringComparator parseFrom(final byte [] pbBytes) - throws DeserializationException { - ComparatorProtos.RegexStringComparator proto; - try { - proto = ComparatorProtos.RegexStringComparator.parseFrom(pbBytes); - } catch (InvalidProtocolBufferException e) { - throw new DeserializationException(e); - } - - RegexStringComparator comparator = - new RegexStringComparator(proto.getPattern(), proto.getPatternFlags()); - final String charset = proto.getCharset(); - if (charset.length() > 0) { - try { - comparator.setCharset(Charset.forName(charset)); - } catch (IllegalCharsetNameException e) { - LOG.error("invalid charset", e); - } - } - return comparator; - } - - /** - * @param other - * @return true if and only if the fields of the comparator that are serialized - * are equal to the corresponding fields in other. Used for testing. - */ - boolean areSerializedFieldsEqual(ByteArrayComparable other) { - if (other == this) return true; - if (!(other instanceof RegexStringComparator)) return false; - - RegexStringComparator comparator = (RegexStringComparator)other; - return super.areSerializedFieldsEqual(comparator) - && this.pattern.toString().equals(comparator.pattern.toString()) - && this.pattern.flags() == comparator.pattern.flags() - && this.charset.equals(comparator.charset); - } -} diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/filter/RowFilter.java hbase-server/src/main/java/org/apache/hadoop/hbase/filter/RowFilter.java deleted file mode 100644 index 0226a13..0000000 --- hbase-server/src/main/java/org/apache/hadoop/hbase/filter/RowFilter.java +++ /dev/null @@ -1,144 +0,0 @@ -/** - * - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hbase.filter; - -import java.io.IOException; -import java.util.ArrayList; - -import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.classification.InterfaceStability; -import org.apache.hadoop.hbase.DeserializationException; -import org.apache.hadoop.hbase.KeyValue; -import org.apache.hadoop.hbase.client.Scan; -import org.apache.hadoop.hbase.protobuf.ProtobufUtil; -import org.apache.hadoop.hbase.protobuf.generated.FilterProtos; - -import com.google.protobuf.InvalidProtocolBufferException; - -/** - * This filter is used to filter based on the key. It takes an operator - * (equal, greater, not equal, etc) and a byte [] comparator for the row, - * and column qualifier portions of a key. - *

          - * This filter can be wrapped with {@link WhileMatchFilter} to add more control. - *

          - * Multiple filters can be combined using {@link FilterList}. - *

          - * If an already known row range needs to be scanned, use {@link Scan} start - * and stop rows directly rather than a filter. - */ -@InterfaceAudience.Public -@InterfaceStability.Stable -public class RowFilter extends CompareFilter { - - private boolean filterOutRow = false; - - /** - * Constructor. - * @param rowCompareOp the compare op for row matching - * @param rowComparator the comparator for row matching - */ - public RowFilter(final CompareOp rowCompareOp, - final ByteArrayComparable rowComparator) { - super(rowCompareOp, rowComparator); - } - - @Override - public void reset() { - this.filterOutRow = false; - } - - @Override - public ReturnCode filterKeyValue(KeyValue v) { - if(this.filterOutRow) { - return ReturnCode.NEXT_ROW; - } - return ReturnCode.INCLUDE; - } - - @Override - public boolean filterRowKey(byte[] data, int offset, int length) { - if(doCompare(this.compareOp, this.comparator, data, offset, length)) { - this.filterOutRow = true; - } - return this.filterOutRow; - } - - @Override - public boolean filterRow() { - return this.filterOutRow; - } - - public static Filter createFilterFromArguments(ArrayList filterArguments) { - ArrayList arguments = CompareFilter.extractArguments(filterArguments); - CompareOp compareOp = (CompareOp)arguments.get(0); - ByteArrayComparable comparator = (ByteArrayComparable)arguments.get(1); - return new RowFilter(compareOp, comparator); - } - - /** - * @return The filter serialized using pb - */ - public byte [] toByteArray() { - FilterProtos.RowFilter.Builder builder = - FilterProtos.RowFilter.newBuilder(); - builder.setCompareFilter(super.convert()); - return builder.build().toByteArray(); - } - - /** - * @param pbBytes A pb serialized {@link RowFilter} instance - * @return An instance of {@link RowFilter} made from bytes - * @throws DeserializationException - * @see #toByteArray - */ - public static RowFilter parseFrom(final byte [] pbBytes) - throws DeserializationException { - FilterProtos.RowFilter proto; - try { - proto = FilterProtos.RowFilter.parseFrom(pbBytes); - } catch (InvalidProtocolBufferException e) { - throw new DeserializationException(e); - } - final CompareOp valueCompareOp = - CompareOp.valueOf(proto.getCompareFilter().getCompareOp().name()); - ByteArrayComparable valueComparator = null; - try { - if (proto.getCompareFilter().hasComparator()) { - valueComparator = ProtobufUtil.toComparator(proto.getCompareFilter().getComparator()); - } - } catch (IOException ioe) { - throw new DeserializationException(ioe); - } - return new RowFilter(valueCompareOp,valueComparator); - } - - /** - * @param other - * @return true if and only if the fields of the filter that are serialized - * are equal to the corresponding fields in other. Used for testing. - */ - boolean areSerializedFieldsEqual(Filter o) { - if (o == this) return true; - if (!(o instanceof RowFilter)) return false; - - return super.areSerializedFieldsEqual(o); - } -} diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/filter/SingleColumnValueExcludeFilter.java hbase-server/src/main/java/org/apache/hadoop/hbase/filter/SingleColumnValueExcludeFilter.java deleted file mode 100644 index c838db5..0000000 --- hbase-server/src/main/java/org/apache/hadoop/hbase/filter/SingleColumnValueExcludeFilter.java +++ /dev/null @@ -1,178 +0,0 @@ -/** - * - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hbase.filter; - -import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.classification.InterfaceStability; -import org.apache.hadoop.hbase.DeserializationException; -import org.apache.hadoop.hbase.KeyValue; -import org.apache.hadoop.hbase.filter.CompareFilter.CompareOp; -import org.apache.hadoop.hbase.protobuf.ProtobufUtil; -import org.apache.hadoop.hbase.protobuf.generated.FilterProtos; - -import com.google.protobuf.InvalidProtocolBufferException; - -import java.io.IOException; -import java.util.ArrayList; - -/** - * A {@link Filter} that checks a single column value, but does not emit the - * tested column. This will enable a performance boost over - * {@link SingleColumnValueFilter}, if the tested column value is not actually - * needed as input (besides for the filtering itself). - */ -@InterfaceAudience.Public -@InterfaceStability.Stable -public class SingleColumnValueExcludeFilter extends SingleColumnValueFilter { - - /** - * Constructor for binary compare of the value of a single column. If the - * column is found and the condition passes, all columns of the row will be - * emitted; except for the tested column value. If the column is not found or - * the condition fails, the row will not be emitted. - * - * @param family name of column family - * @param qualifier name of column qualifier - * @param compareOp operator - * @param value value to compare column values against - */ - public SingleColumnValueExcludeFilter(byte[] family, byte[] qualifier, - CompareOp compareOp, byte[] value) { - super(family, qualifier, compareOp, value); - } - - /** - * Constructor for binary compare of the value of a single column. If the - * column is found and the condition passes, all columns of the row will be - * emitted; except for the tested column value. If the condition fails, the - * row will not be emitted. - *

          - * Use the filterIfColumnMissing flag to set whether the rest of the columns - * in a row will be emitted if the specified column to check is not found in - * the row. - * - * @param family name of column family - * @param qualifier name of column qualifier - * @param compareOp operator - * @param comparator Comparator to use. - */ - public SingleColumnValueExcludeFilter(byte[] family, byte[] qualifier, - CompareOp compareOp, ByteArrayComparable comparator) { - super(family, qualifier, compareOp, comparator); - } - - /** - * Constructor for protobuf deserialization only. - * @param family - * @param qualifier - * @param compareOp - * @param comparator - * @param foundColumn - * @param matchedColumn - * @param filterIfMissing - * @param latestVersionOnly - */ - protected SingleColumnValueExcludeFilter(final byte[] family, final byte [] qualifier, - final CompareOp compareOp, ByteArrayComparable comparator, final boolean foundColumn, - final boolean matchedColumn, final boolean filterIfMissing, final boolean latestVersionOnly) { - super(family,qualifier,compareOp,comparator,foundColumn, - matchedColumn,filterIfMissing,latestVersionOnly); - } - - public ReturnCode filterKeyValue(KeyValue keyValue) { - ReturnCode superRetCode = super.filterKeyValue(keyValue); - if (superRetCode == ReturnCode.INCLUDE) { - // If the current column is actually the tested column, - // we will skip it instead. - if (keyValue.matchingColumn(this.columnFamily, this.columnQualifier)) { - return ReturnCode.SKIP; - } - } - return superRetCode; - } - - public static Filter createFilterFromArguments(ArrayList filterArguments) { - SingleColumnValueFilter tempFilter = (SingleColumnValueFilter) - SingleColumnValueFilter.createFilterFromArguments(filterArguments); - SingleColumnValueExcludeFilter filter = new SingleColumnValueExcludeFilter ( - tempFilter.getFamily(), tempFilter.getQualifier(), - tempFilter.getOperator(), tempFilter.getComparator()); - - if (filterArguments.size() == 6) { - filter.setFilterIfMissing(tempFilter.getFilterIfMissing()); - filter.setLatestVersionOnly(tempFilter.getLatestVersionOnly()); - } - return filter; - } - - /** - * @return The filter serialized using pb - */ - public byte [] toByteArray() { - FilterProtos.SingleColumnValueExcludeFilter.Builder builder = - FilterProtos.SingleColumnValueExcludeFilter.newBuilder(); - builder.setSingleColumnValueFilter(super.convert()); - return builder.build().toByteArray(); - } - - /** - * @param pbBytes A pb serialized {@link SingleColumnValueExcludeFilter} instance - * @return An instance of {@link SingleColumnValueExcludeFilter} made from bytes - * @throws DeserializationException - * @see #toByteArray - */ - public static SingleColumnValueExcludeFilter parseFrom(final byte [] pbBytes) - throws DeserializationException { - FilterProtos.SingleColumnValueExcludeFilter proto; - try { - proto = FilterProtos.SingleColumnValueExcludeFilter.parseFrom(pbBytes); - } catch (InvalidProtocolBufferException e) { - throw new DeserializationException(e); - } - - FilterProtos.SingleColumnValueFilter parentProto = proto.getSingleColumnValueFilter(); - final CompareOp compareOp = - CompareOp.valueOf(parentProto.getCompareOp().name()); - final ByteArrayComparable comparator; - try { - comparator = ProtobufUtil.toComparator(parentProto.getComparator()); - } catch (IOException ioe) { - throw new DeserializationException(ioe); - } - - return new SingleColumnValueExcludeFilter( - parentProto.hasColumnFamily()?parentProto.getColumnFamily().toByteArray():null, - parentProto.hasColumnQualifier()?parentProto.getColumnQualifier().toByteArray():null, - compareOp, comparator, parentProto.getFoundColumn(),parentProto.getMatchedColumn(), - parentProto.getFilterIfMissing(),parentProto.getLatestVersionOnly()); - } - - /** - * @param other - * @return true if and only if the fields of the filter that are serialized - * are equal to the corresponding fields in other. Used for testing. - */ - boolean areSerializedFieldsEqual(Filter o) { - if (o == this) return true; - if (!(o instanceof SingleColumnValueExcludeFilter)) return false; - - return super.areSerializedFieldsEqual(o); - } -} diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/filter/SingleColumnValueFilter.java hbase-server/src/main/java/org/apache/hadoop/hbase/filter/SingleColumnValueFilter.java deleted file mode 100644 index f8f3da9..0000000 --- hbase-server/src/main/java/org/apache/hadoop/hbase/filter/SingleColumnValueFilter.java +++ /dev/null @@ -1,389 +0,0 @@ -/** - * - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hbase.filter; - -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.classification.InterfaceStability; -import org.apache.hadoop.hbase.DeserializationException; -import org.apache.hadoop.hbase.KeyValue; -import org.apache.hadoop.hbase.client.Scan; -import org.apache.hadoop.hbase.filter.CompareFilter.CompareOp; -import org.apache.hadoop.hbase.protobuf.ProtobufUtil; -import org.apache.hadoop.hbase.protobuf.generated.FilterProtos; -import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos; -import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.CompareType; -import org.apache.hadoop.hbase.util.Bytes; - -import java.io.IOException; -import java.util.ArrayList; - -import com.google.common.base.Preconditions; -import com.google.protobuf.ByteString; -import com.google.protobuf.InvalidProtocolBufferException; - -/** - * This filter is used to filter cells based on value. It takes a {@link CompareFilter.CompareOp} - * operator (equal, greater, not equal, etc), and either a byte [] value or - * a ByteArrayComparable. - *

          - * If we have a byte [] value then we just do a lexicographic compare. For - * example, if passed value is 'b' and cell has 'a' and the compare operator - * is LESS, then we will filter out this cell (return true). If this is not - * sufficient (eg you want to deserialize a long and then compare it to a fixed - * long value), then you can pass in your own comparator instead. - *

          - * You must also specify a family and qualifier. Only the value of this column - * will be tested. When using this filter on a {@link Scan} with specified - * inputs, the column to be tested should also be added as input (otherwise - * the filter will regard the column as missing). - *

          - * To prevent the entire row from being emitted if the column is not found - * on a row, use {@link #setFilterIfMissing}. - * Otherwise, if the column is found, the entire row will be emitted only if - * the value passes. If the value fails, the row will be filtered out. - *

          - * In order to test values of previous versions (timestamps), set - * {@link #setLatestVersionOnly} to false. The default is true, meaning that - * only the latest version's value is tested and all previous versions are ignored. - *

          - * To filter based on the value of all scanned columns, use {@link ValueFilter}. - */ -@InterfaceAudience.Public -@InterfaceStability.Stable -public class SingleColumnValueFilter extends FilterBase { - static final Log LOG = LogFactory.getLog(SingleColumnValueFilter.class); - - protected byte [] columnFamily; - protected byte [] columnQualifier; - protected CompareOp compareOp; - protected ByteArrayComparable comparator; - protected boolean foundColumn = false; - protected boolean matchedColumn = false; - protected boolean filterIfMissing = false; - protected boolean latestVersionOnly = true; - - /** - * Constructor for binary compare of the value of a single column. If the - * column is found and the condition passes, all columns of the row will be - * emitted. If the condition fails, the row will not be emitted. - *

          - * Use the filterIfColumnMissing flag to set whether the rest of the columns - * in a row will be emitted if the specified column to check is not found in - * the row. - * - * @param family name of column family - * @param qualifier name of column qualifier - * @param compareOp operator - * @param value value to compare column values against - */ - public SingleColumnValueFilter(final byte [] family, final byte [] qualifier, - final CompareOp compareOp, final byte[] value) { - this(family, qualifier, compareOp, new BinaryComparator(value)); - } - - /** - * Constructor for binary compare of the value of a single column. If the - * column is found and the condition passes, all columns of the row will be - * emitted. If the condition fails, the row will not be emitted. - *

          - * Use the filterIfColumnMissing flag to set whether the rest of the columns - * in a row will be emitted if the specified column to check is not found in - * the row. - * - * @param family name of column family - * @param qualifier name of column qualifier - * @param compareOp operator - * @param comparator Comparator to use. - */ - public SingleColumnValueFilter(final byte [] family, final byte [] qualifier, - final CompareOp compareOp, final ByteArrayComparable comparator) { - this.columnFamily = family; - this.columnQualifier = qualifier; - this.compareOp = compareOp; - this.comparator = comparator; - } - - /** - * Constructor for protobuf deserialization only. - * @param family - * @param qualifier - * @param compareOp - * @param comparator - * @param foundColumn - * @param matchedColumn - * @param filterIfMissing - * @param latestVersionOnly - */ - protected SingleColumnValueFilter(final byte[] family, final byte [] qualifier, - final CompareOp compareOp, ByteArrayComparable comparator, final boolean foundColumn, - final boolean matchedColumn, final boolean filterIfMissing, final boolean latestVersionOnly) { - this(family,qualifier,compareOp,comparator); - this.foundColumn = foundColumn; - this.matchedColumn = matchedColumn; - this.filterIfMissing = filterIfMissing; - this.latestVersionOnly = latestVersionOnly; - } - - /** - * @return operator - */ - public CompareOp getOperator() { - return compareOp; - } - - /** - * @return the comparator - */ - public ByteArrayComparable getComparator() { - return comparator; - } - - /** - * @return the family - */ - public byte[] getFamily() { - return columnFamily; - } - - /** - * @return the qualifier - */ - public byte[] getQualifier() { - return columnQualifier; - } - - public ReturnCode filterKeyValue(KeyValue keyValue) { - // System.out.println("REMOVE KEY=" + keyValue.toString() + ", value=" + Bytes.toString(keyValue.getValue())); - if (this.matchedColumn) { - // We already found and matched the single column, all keys now pass - return ReturnCode.INCLUDE; - } else if (this.latestVersionOnly && this.foundColumn) { - // We found but did not match the single column, skip to next row - return ReturnCode.NEXT_ROW; - } - if (!keyValue.matchingColumn(this.columnFamily, this.columnQualifier)) { - return ReturnCode.INCLUDE; - } - foundColumn = true; - if (filterColumnValue(keyValue.getBuffer(), - keyValue.getValueOffset(), keyValue.getValueLength())) { - return this.latestVersionOnly? ReturnCode.NEXT_ROW: ReturnCode.INCLUDE; - } - this.matchedColumn = true; - return ReturnCode.INCLUDE; - } - - private boolean filterColumnValue(final byte [] data, final int offset, - final int length) { - int compareResult = this.comparator.compareTo(data, offset, length); - switch (this.compareOp) { - case LESS: - return compareResult <= 0; - case LESS_OR_EQUAL: - return compareResult < 0; - case EQUAL: - return compareResult != 0; - case NOT_EQUAL: - return compareResult == 0; - case GREATER_OR_EQUAL: - return compareResult > 0; - case GREATER: - return compareResult >= 0; - default: - throw new RuntimeException("Unknown Compare op " + compareOp.name()); - } - } - - public boolean filterRow() { - // If column was found, return false if it was matched, true if it was not - // If column not found, return true if we filter if missing, false if not - return this.foundColumn? !this.matchedColumn: this.filterIfMissing; - } - - public boolean hasFilterRow() { - return true; - } - - public void reset() { - foundColumn = false; - matchedColumn = false; - } - - /** - * Get whether entire row should be filtered if column is not found. - * @return true if row should be skipped if column not found, false if row - * should be let through anyways - */ - public boolean getFilterIfMissing() { - return filterIfMissing; - } - - /** - * Set whether entire row should be filtered if column is not found. - *

          - * If true, the entire row will be skipped if the column is not found. - *

          - * If false, the row will pass if the column is not found. This is default. - * @param filterIfMissing flag - */ - public void setFilterIfMissing(boolean filterIfMissing) { - this.filterIfMissing = filterIfMissing; - } - - /** - * Get whether only the latest version of the column value should be compared. - * If true, the row will be returned if only the latest version of the column - * value matches. If false, the row will be returned if any version of the - * column value matches. The default is true. - * @return return value - */ - public boolean getLatestVersionOnly() { - return latestVersionOnly; - } - - /** - * Set whether only the latest version of the column value should be compared. - * If true, the row will be returned if only the latest version of the column - * value matches. If false, the row will be returned if any version of the - * column value matches. The default is true. - * @param latestVersionOnly flag - */ - public void setLatestVersionOnly(boolean latestVersionOnly) { - this.latestVersionOnly = latestVersionOnly; - } - - public static Filter createFilterFromArguments(ArrayList filterArguments) { - Preconditions.checkArgument(filterArguments.size() == 4 || filterArguments.size() == 6, - "Expected 4 or 6 but got: %s", filterArguments.size()); - byte [] family = ParseFilter.removeQuotesFromByteArray(filterArguments.get(0)); - byte [] qualifier = ParseFilter.removeQuotesFromByteArray(filterArguments.get(1)); - CompareOp compareOp = ParseFilter.createCompareOp(filterArguments.get(2)); - ByteArrayComparable comparator = ParseFilter.createComparator( - ParseFilter.removeQuotesFromByteArray(filterArguments.get(3))); - - if (comparator instanceof RegexStringComparator || - comparator instanceof SubstringComparator) { - if (compareOp != CompareOp.EQUAL && - compareOp != CompareOp.NOT_EQUAL) { - throw new IllegalArgumentException ("A regexstring comparator and substring comparator " + - "can only be used with EQUAL and NOT_EQUAL"); - } - } - - SingleColumnValueFilter filter = new SingleColumnValueFilter(family, qualifier, - compareOp, comparator); - - if (filterArguments.size() == 6) { - boolean filterIfMissing = ParseFilter.convertByteArrayToBoolean(filterArguments.get(4)); - boolean latestVersionOnly = ParseFilter.convertByteArrayToBoolean(filterArguments.get(5)); - filter.setFilterIfMissing(filterIfMissing); - filter.setLatestVersionOnly(latestVersionOnly); - } - return filter; - } - - FilterProtos.SingleColumnValueFilter convert() { - FilterProtos.SingleColumnValueFilter.Builder builder = - FilterProtos.SingleColumnValueFilter.newBuilder(); - if (this.columnFamily != null) { - builder.setColumnFamily(ByteString.copyFrom(this.columnFamily)); - } - if (this.columnQualifier != null) { - builder.setColumnQualifier(ByteString.copyFrom(this.columnQualifier)); - } - HBaseProtos.CompareType compareOp = CompareType.valueOf(this.compareOp.name()); - builder.setCompareOp(compareOp); - builder.setComparator(ProtobufUtil.toComparator(this.comparator)); - builder.setFoundColumn(this.foundColumn); - builder.setMatchedColumn(this.matchedColumn); - builder.setFilterIfMissing(this.filterIfMissing); - builder.setLatestVersionOnly(this.latestVersionOnly); - - return builder.build(); - } - - /** - * @return The filter serialized using pb - */ - public byte [] toByteArray() { - return convert().toByteArray(); - } - - /** - * @param pbBytes A pb serialized {@link SingleColumnValueFilter} instance - * @return An instance of {@link SingleColumnValueFilter} made from bytes - * @throws DeserializationException - * @see #toByteArray - */ - public static SingleColumnValueFilter parseFrom(final byte [] pbBytes) - throws DeserializationException { - FilterProtos.SingleColumnValueFilter proto; - try { - proto = FilterProtos.SingleColumnValueFilter.parseFrom(pbBytes); - } catch (InvalidProtocolBufferException e) { - throw new DeserializationException(e); - } - - final CompareOp compareOp = - CompareOp.valueOf(proto.getCompareOp().name()); - final ByteArrayComparable comparator; - try { - comparator = ProtobufUtil.toComparator(proto.getComparator()); - } catch (IOException ioe) { - throw new DeserializationException(ioe); - } - - return new SingleColumnValueFilter( - proto.hasColumnFamily()?proto.getColumnFamily().toByteArray():null, - proto.hasColumnQualifier()?proto.getColumnQualifier().toByteArray():null, - compareOp, comparator, proto.getFoundColumn(),proto.getMatchedColumn(), - proto.getFilterIfMissing(),proto.getLatestVersionOnly()); - } - - /** - * @param other - * @return true if and only if the fields of the filter that are serialized - * are equal to the corresponding fields in other. Used for testing. - */ - boolean areSerializedFieldsEqual(Filter o) { - if (o == this) return true; - if (!(o instanceof SingleColumnValueFilter)) return false; - - SingleColumnValueFilter other = (SingleColumnValueFilter)o; - return Bytes.equals(this.getFamily(), other.getFamily()) - && Bytes.equals(this.getQualifier(), other.getQualifier()) - && this.compareOp.equals(other.compareOp) - && this.getComparator().areSerializedFieldsEqual(other.getComparator()) - && this.foundColumn == other.foundColumn - && this.matchedColumn == other.matchedColumn - && this.getFilterIfMissing() == other.getFilterIfMissing() - && this.getLatestVersionOnly() == other.getLatestVersionOnly(); - } - - @Override - public String toString() { - return String.format("%s (%s, %s, %s, %s)", - this.getClass().getSimpleName(), Bytes.toStringBinary(this.columnFamily), - Bytes.toStringBinary(this.columnQualifier), this.compareOp.name(), - Bytes.toStringBinary(this.comparator.getValue())); - } -} diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/filter/SkipFilter.java hbase-server/src/main/java/org/apache/hadoop/hbase/filter/SkipFilter.java deleted file mode 100644 index 1d4388d..0000000 --- hbase-server/src/main/java/org/apache/hadoop/hbase/filter/SkipFilter.java +++ /dev/null @@ -1,145 +0,0 @@ -/* - * - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hbase.filter; - -import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.classification.InterfaceStability; -import org.apache.hadoop.hbase.DeserializationException; -import org.apache.hadoop.hbase.KeyValue; -import org.apache.hadoop.hbase.protobuf.ProtobufUtil; -import org.apache.hadoop.hbase.protobuf.generated.FilterProtos; - -import com.google.protobuf.InvalidProtocolBufferException; - -import java.io.DataInput; -import java.io.DataOutput; -import java.io.IOException; -import java.util.List; - -/** - * A wrapper filter that filters an entire row if any of the KeyValue checks do - * not pass. - *

          - * For example, if all columns in a row represent weights of different things, - * with the values being the actual weights, and we want to filter out the - * entire row if any of its weights are zero. In this case, we want to prevent - * rows from being emitted if a single key is filtered. Combine this filter - * with a {@link ValueFilter}: - *

          - *

          - * scan.setFilter(new SkipFilter(new ValueFilter(CompareOp.NOT_EQUAL,
          - *     new BinaryComparator(Bytes.toBytes(0))));
          - * 
          - * Any row which contained a column whose value was 0 will be filtered out
          - * (since ValueFilter will not pass that KeyValue).
          - * Without this filter, the other non-zero valued columns in the row would still
          - * be emitted.
          - */
          -@InterfaceAudience.Public
          -@InterfaceStability.Stable
          -public class SkipFilter extends FilterBase {
          -  private boolean filterRow = false;
          -  private Filter filter;
          -
          -  public SkipFilter(Filter filter) {
          -    this.filter = filter;
          -  }
          -
          -  public Filter getFilter() {
          -    return filter;
          -  }
          -
          -  public void reset() {
          -    filter.reset();
          -    filterRow = false;
          -  }
          -
          -  private void changeFR(boolean value) {
          -    filterRow = filterRow || value;
          -  }
          -
          -  public ReturnCode filterKeyValue(KeyValue v) {
          -    ReturnCode c = filter.filterKeyValue(v);
          -    changeFR(c != ReturnCode.INCLUDE);
          -    return c;
          -  }
          -
          -  @Override
          -  public KeyValue transform(KeyValue v) {
          -    return filter.transform(v);
          -  }
          -
          -  public boolean filterRow() {
          -    return filterRow;
          -  }
          -    
          -  public boolean hasFilterRow() {
          -    return true;
          -  }
          -
          -  /**
          -   * @return The filter serialized using pb
          -   */
          -  public byte [] toByteArray() {
          -    FilterProtos.SkipFilter.Builder builder =
          -      FilterProtos.SkipFilter.newBuilder();
          -    builder.setFilter(ProtobufUtil.toFilter(this.filter));
          -    return builder.build().toByteArray();
          -  }
          -
          -  /**
          -   * @param pbBytes A pb serialized {@link SkipFilter} instance
          -   * @return An instance of {@link SkipFilter} made from bytes
          -   * @throws DeserializationException
          -   * @see #toByteArray
          -   */
          -  public static SkipFilter parseFrom(final byte [] pbBytes)
          -  throws DeserializationException {
          -    FilterProtos.SkipFilter proto;
          -    try {
          -      proto = FilterProtos.SkipFilter.parseFrom(pbBytes);
          -    } catch (InvalidProtocolBufferException e) {
          -      throw new DeserializationException(e);
          -    }
          -    try {
          -      return new SkipFilter(ProtobufUtil.toFilter(proto.getFilter()));
          -    } catch (IOException ioe) {
          -      throw new DeserializationException(ioe);
          -    }
          -  }
          -
          -  /**
          -   * @param other
          -   * @return true if and only if the fields of the filter that are serialized
          -   * are equal to the corresponding fields in other.  Used for testing.
          -   */
          -  boolean areSerializedFieldsEqual(Filter o) {
          -    if (o == this) return true;
          -    if (!(o instanceof SkipFilter)) return false;
          -
          -    SkipFilter other = (SkipFilter)o;
          -    return getFilter().areSerializedFieldsEqual(other.getFilter());
          -  }
          -
          -  @Override
          -  public String toString() {
          -    return this.getClass().getSimpleName() + " " + this.filter.toString();
          -  }
          -}
          diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/filter/SubstringComparator.java hbase-server/src/main/java/org/apache/hadoop/hbase/filter/SubstringComparator.java
          deleted file mode 100644
          index 1ed08a2..0000000
          --- hbase-server/src/main/java/org/apache/hadoop/hbase/filter/SubstringComparator.java
          +++ /dev/null
          @@ -1,112 +0,0 @@
          -/**
          - *
          - * Licensed to the Apache Software Foundation (ASF) under one
          - * or more contributor license agreements.  See the NOTICE file
          - * distributed with this work for additional information
          - * regarding copyright ownership.  The ASF licenses this file
          - * to you under the Apache License, Version 2.0 (the
          - * "License"); you may not use this file except in compliance
          - * with the License.  You may obtain a copy of the License at
          - *
          - *     http://www.apache.org/licenses/LICENSE-2.0
          - *
          - * Unless required by applicable law or agreed to in writing, software
          - * distributed under the License is distributed on an "AS IS" BASIS,
          - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
          - * See the License for the specific language governing permissions and
          - * limitations under the License.
          - */
          -package org.apache.hadoop.hbase.filter;
          -
          -import org.apache.hadoop.classification.InterfaceAudience;
          -import org.apache.hadoop.classification.InterfaceStability;
          -import org.apache.hadoop.hbase.DeserializationException;
          -import org.apache.hadoop.hbase.protobuf.generated.ComparatorProtos;
          -import org.apache.hadoop.hbase.util.Bytes;
          -
          -import com.google.protobuf.InvalidProtocolBufferException;
          -
          -
          -/**
          - * This comparator is for use with SingleColumnValueFilter, for filtering based on
          - * the value of a given column. Use it to test if a given substring appears
          - * in a cell value in the column. The comparison is case insensitive.
          - * 

          - * Only EQUAL or NOT_EQUAL tests are valid with this comparator. - *

          - * For example: - *

          - *

          - * SingleColumnValueFilter scvf =
          - *   new SingleColumnValueFilter("col", CompareOp.EQUAL,
          - *     new SubstringComparator("substr"));
          - * 
          - */ -@InterfaceAudience.Public -@InterfaceStability.Stable -public class SubstringComparator extends ByteArrayComparable { - - private String substr; - - /** - * Constructor - * @param substr the substring - */ - public SubstringComparator(String substr) { - super(Bytes.toBytes(substr.toLowerCase())); - this.substr = substr.toLowerCase(); - } - - @Override - public byte[] getValue() { - return Bytes.toBytes(substr); - } - - @Override - public int compareTo(byte[] value, int offset, int length) { - return Bytes.toString(value, offset, length).toLowerCase().contains(substr) ? 0 - : 1; - } - - /** - * @return The comparator serialized using pb - */ - public byte [] toByteArray() { - ComparatorProtos.SubstringComparator.Builder builder = - ComparatorProtos.SubstringComparator.newBuilder(); - builder.setSubstr(this.substr); - return builder.build().toByteArray(); - } - - /** - * @param pbBytes A pb serialized {@link SubstringComparator} instance - * @return An instance of {@link SubstringComparator} made from bytes - * @throws DeserializationException - * @see #toByteArray - */ - public static SubstringComparator parseFrom(final byte [] pbBytes) - throws DeserializationException { - ComparatorProtos.SubstringComparator proto; - try { - proto = ComparatorProtos.SubstringComparator.parseFrom(pbBytes); - } catch (InvalidProtocolBufferException e) { - throw new DeserializationException(e); - } - return new SubstringComparator(proto.getSubstr()); - } - - /** - * @param other - * @return true if and only if the fields of the comparator that are serialized - * are equal to the corresponding fields in other. Used for testing. - */ - boolean areSerializedFieldsEqual(ByteArrayComparable other) { - if (other == this) return true; - if (!(other instanceof SubstringComparator)) return false; - - SubstringComparator comparator = (SubstringComparator)other; - return super.areSerializedFieldsEqual(comparator) - && this.substr.equals(comparator.substr); - } - -} diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/filter/TimestampsFilter.java hbase-server/src/main/java/org/apache/hadoop/hbase/filter/TimestampsFilter.java deleted file mode 100644 index 20f5d25..0000000 --- hbase-server/src/main/java/org/apache/hadoop/hbase/filter/TimestampsFilter.java +++ /dev/null @@ -1,175 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hbase.filter; - -import java.util.ArrayList; -import java.util.List; -import java.util.TreeSet; - -import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.classification.InterfaceStability; -import org.apache.hadoop.hbase.DeserializationException; -import org.apache.hadoop.hbase.KeyValue; -import org.apache.hadoop.hbase.protobuf.generated.FilterProtos; - -import com.google.common.base.Preconditions; -import com.google.protobuf.InvalidProtocolBufferException; - -/** - * Filter that returns only cells whose timestamp (version) is - * in the specified list of timestamps (versions). - *

          - * Note: Use of this filter overrides any time range/time stamp - * options specified using {@link org.apache.hadoop.hbase.client.Get#setTimeRange(long, long)}, - * {@link org.apache.hadoop.hbase.client.Scan#setTimeRange(long, long)}, {@link org.apache.hadoop.hbase.client.Get#setTimeStamp(long)}, - * or {@link org.apache.hadoop.hbase.client.Scan#setTimeStamp(long)}. - */ -@InterfaceAudience.Public -@InterfaceStability.Stable -public class TimestampsFilter extends FilterBase { - - TreeSet timestamps; - private static final int MAX_LOG_TIMESTAMPS = 5; - - // Used during scans to hint the scan to stop early - // once the timestamps fall below the minTimeStamp. - long minTimeStamp = Long.MAX_VALUE; - - /** - * Constructor for filter that retains only those - * cells whose timestamp (version) is in the specified - * list of timestamps. - * - * @param timestamps - */ - public TimestampsFilter(List timestamps) { - for (Long timestamp : timestamps) { - Preconditions.checkArgument(timestamp >= 0, "must be positive %s", timestamp); - } - this.timestamps = new TreeSet(timestamps); - init(); - } - - /** - * @return the list of timestamps - */ - public List getTimestamps() { - List list = new ArrayList(timestamps.size()); - list.addAll(timestamps); - return list; - } - - private void init() { - if (this.timestamps.size() > 0) { - minTimeStamp = this.timestamps.first(); - } - } - - /** - * Gets the minimum timestamp requested by filter. - * @return minimum timestamp requested by filter. - */ - public long getMin() { - return minTimeStamp; - } - - @Override - public ReturnCode filterKeyValue(KeyValue v) { - if (this.timestamps.contains(v.getTimestamp())) { - return ReturnCode.INCLUDE; - } else if (v.getTimestamp() < minTimeStamp) { - // The remaining versions of this column are guaranteed - // to be lesser than all of the other values. - return ReturnCode.NEXT_COL; - } - return ReturnCode.SKIP; - } - - public static Filter createFilterFromArguments(ArrayList filterArguments) { - ArrayList timestamps = new ArrayList(); - for (int i = 0; ibytes - * @throws DeserializationException - * @see #toByteArray - */ - public static TimestampsFilter parseFrom(final byte [] pbBytes) - throws DeserializationException { - FilterProtos.TimestampsFilter proto; - try { - proto = FilterProtos.TimestampsFilter.parseFrom(pbBytes); - } catch (InvalidProtocolBufferException e) { - throw new DeserializationException(e); - } - return new TimestampsFilter(proto.getTimestampsList()); - } - - /** - * @param other - * @return true if and only if the fields of the filter that are serialized - * are equal to the corresponding fields in other. Used for testing. - */ - boolean areSerializedFieldsEqual(Filter o) { - if (o == this) return true; - if (!(o instanceof TimestampsFilter)) return false; - - TimestampsFilter other = (TimestampsFilter)o; - return this.getTimestamps().equals(other.getTimestamps()); - } - - @Override - public String toString() { - return toString(MAX_LOG_TIMESTAMPS); - } - - protected String toString(int maxTimestamps) { - StringBuilder tsList = new StringBuilder(); - - int count = 0; - for (Long ts : this.timestamps) { - if (count >= maxTimestamps) { - break; - } - ++count; - tsList.append(ts.toString()); - if (count < this.timestamps.size() && count < maxTimestamps) { - tsList.append(", "); - } - } - - return String.format("%s (%d/%d): [%s]", this.getClass().getSimpleName(), - count, this.timestamps.size(), tsList.toString()); - } -} diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/filter/ValueFilter.java hbase-server/src/main/java/org/apache/hadoop/hbase/filter/ValueFilter.java deleted file mode 100644 index de100f3..0000000 --- hbase-server/src/main/java/org/apache/hadoop/hbase/filter/ValueFilter.java +++ /dev/null @@ -1,125 +0,0 @@ -/** - * - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hbase.filter; - -import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.classification.InterfaceStability; -import org.apache.hadoop.hbase.DeserializationException; -import org.apache.hadoop.hbase.KeyValue; -import org.apache.hadoop.hbase.protobuf.ProtobufUtil; -import org.apache.hadoop.hbase.protobuf.generated.FilterProtos; - -import com.google.protobuf.InvalidProtocolBufferException; - -import java.io.IOException; -import java.util.ArrayList; - -/** - * This filter is used to filter based on column value. It takes an - * operator (equal, greater, not equal, etc) and a byte [] comparator for the - * cell value. - *

          - * This filter can be wrapped with {@link WhileMatchFilter} and {@link SkipFilter} - * to add more control. - *

          - * Multiple filters can be combined using {@link FilterList}. - *

          - * To test the value of a single qualifier when scanning multiple qualifiers, - * use {@link SingleColumnValueFilter}. - */ -@InterfaceAudience.Public -@InterfaceStability.Stable -public class ValueFilter extends CompareFilter { - - /** - * Constructor. - * @param valueCompareOp the compare op for value matching - * @param valueComparator the comparator for value matching - */ - public ValueFilter(final CompareOp valueCompareOp, - final ByteArrayComparable valueComparator) { - super(valueCompareOp, valueComparator); - } - - @Override - public ReturnCode filterKeyValue(KeyValue v) { - if (doCompare(this.compareOp, this.comparator, v.getBuffer(), - v.getValueOffset(), v.getValueLength())) { - return ReturnCode.SKIP; - } - return ReturnCode.INCLUDE; - } - - public static Filter createFilterFromArguments(ArrayList filterArguments) { - ArrayList arguments = CompareFilter.extractArguments(filterArguments); - CompareOp compareOp = (CompareOp)arguments.get(0); - ByteArrayComparable comparator = (ByteArrayComparable)arguments.get(1); - return new ValueFilter(compareOp, comparator); - } - - /** - * @return The filter serialized using pb - */ - public byte [] toByteArray() { - FilterProtos.ValueFilter.Builder builder = - FilterProtos.ValueFilter.newBuilder(); - builder.setCompareFilter(super.convert()); - return builder.build().toByteArray(); - } - - /** - * @param pbBytes A pb serialized {@link ValueFilter} instance - * @return An instance of {@link ValueFilter} made from bytes - * @throws DeserializationException - * @see #toByteArray - */ - public static ValueFilter parseFrom(final byte [] pbBytes) - throws DeserializationException { - FilterProtos.ValueFilter proto; - try { - proto = FilterProtos.ValueFilter.parseFrom(pbBytes); - } catch (InvalidProtocolBufferException e) { - throw new DeserializationException(e); - } - final CompareOp valueCompareOp = - CompareOp.valueOf(proto.getCompareFilter().getCompareOp().name()); - ByteArrayComparable valueComparator = null; - try { - if (proto.getCompareFilter().hasComparator()) { - valueComparator = ProtobufUtil.toComparator(proto.getCompareFilter().getComparator()); - } - } catch (IOException ioe) { - throw new DeserializationException(ioe); - } - return new ValueFilter(valueCompareOp,valueComparator); - } - - /** - * @param other - * @return true if and only if the fields of the filter that are serialized - * are equal to the corresponding fields in other. Used for testing. - */ - boolean areSerializedFieldsEqual(Filter o) { - if (o == this) return true; - if (!(o instanceof ValueFilter)) return false; - - return super.areSerializedFieldsEqual(o); - } -} diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/filter/WhileMatchFilter.java hbase-server/src/main/java/org/apache/hadoop/hbase/filter/WhileMatchFilter.java deleted file mode 100644 index 6c454e5..0000000 --- hbase-server/src/main/java/org/apache/hadoop/hbase/filter/WhileMatchFilter.java +++ /dev/null @@ -1,145 +0,0 @@ -/* - * - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hbase.filter; - -import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.classification.InterfaceStability; -import org.apache.hadoop.hbase.DeserializationException; -import org.apache.hadoop.hbase.KeyValue; -import org.apache.hadoop.hbase.protobuf.ProtobufUtil; -import org.apache.hadoop.hbase.protobuf.generated.FilterProtos; - -import com.google.protobuf.InvalidProtocolBufferException; - -import java.io.DataInput; -import java.io.DataOutput; -import java.io.IOException; -import java.util.List; - -/** - * A wrapper filter that returns true from {@link #filterAllRemaining()} as soon - * as the wrapped filters {@link Filter#filterRowKey(byte[], int, int)}, - * {@link Filter#filterKeyValue(org.apache.hadoop.hbase.KeyValue)}, - * {@link org.apache.hadoop.hbase.filter.Filter#filterRow()} or - * {@link org.apache.hadoop.hbase.filter.Filter#filterAllRemaining()} methods - * returns true. - */ -@InterfaceAudience.Public -@InterfaceStability.Stable -public class WhileMatchFilter extends FilterBase { - private boolean filterAllRemaining = false; - private Filter filter; - - public WhileMatchFilter(Filter filter) { - this.filter = filter; - } - - public Filter getFilter() { - return filter; - } - - public void reset() { - this.filter.reset(); - } - - private void changeFAR(boolean value) { - filterAllRemaining = filterAllRemaining || value; - } - - public boolean filterAllRemaining() { - return this.filterAllRemaining || this.filter.filterAllRemaining(); - } - - public boolean filterRowKey(byte[] buffer, int offset, int length) { - boolean value = filter.filterRowKey(buffer, offset, length); - changeFAR(value); - return value; - } - - public ReturnCode filterKeyValue(KeyValue v) { - ReturnCode c = filter.filterKeyValue(v); - changeFAR(c != ReturnCode.INCLUDE); - return c; - } - - @Override - public KeyValue transform(KeyValue v) { - return filter.transform(v); - } - - public boolean filterRow() { - boolean filterRow = this.filter.filterRow(); - changeFAR(filterRow); - return filterRow; - } - - public boolean hasFilterRow() { - return true; - } - - /** - * @return The filter serialized using pb - */ - public byte [] toByteArray() { - FilterProtos.WhileMatchFilter.Builder builder = - FilterProtos.WhileMatchFilter.newBuilder(); - builder.setFilter(ProtobufUtil.toFilter(this.filter)); - return builder.build().toByteArray(); - } - - /** - * @param pbBytes A pb serialized {@link WhileMatchFilter} instance - * @return An instance of {@link WhileMatchFilter} made from bytes - * @throws DeserializationException - * @see #toByteArray - */ - public static WhileMatchFilter parseFrom(final byte [] pbBytes) - throws DeserializationException { - FilterProtos.WhileMatchFilter proto; - try { - proto = FilterProtos.WhileMatchFilter.parseFrom(pbBytes); - } catch (InvalidProtocolBufferException e) { - throw new DeserializationException(e); - } - try { - return new WhileMatchFilter(ProtobufUtil.toFilter(proto.getFilter())); - } catch (IOException ioe) { - throw new DeserializationException(ioe); - } - } - - /** - * @param other - * @return true if and only if the fields of the filter that are serialized - * are equal to the corresponding fields in other. Used for testing. - */ - boolean areSerializedFieldsEqual(Filter o) { - if (o == this) return true; - if (!(o instanceof WhileMatchFilter)) return false; - - WhileMatchFilter other = (WhileMatchFilter)o; - return getFilter().areSerializedFieldsEqual(other.getFilter()); - } - - @Override - public String toString() { - return this.getClass().getSimpleName() + " " + this.filter.toString(); - } -} diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/filter/package-info.java hbase-server/src/main/java/org/apache/hadoop/hbase/filter/package-info.java deleted file mode 100644 index 9dea254..0000000 --- hbase-server/src/main/java/org/apache/hadoop/hbase/filter/package-info.java +++ /dev/null @@ -1,34 +0,0 @@ -/* - * - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -/** - * Provides row-level filters applied to HRegion scan results during calls to - * {@link org.apache.hadoop.hbase.client.ResultScanner#next()}. - -

          -Filters run the extent of a table unless you wrap your filter in a -{@link org.apache.hadoop.hbase.filter.WhileMatchFilter}. -The latter returns as soon as the filter stops matching. -

          -

          Do not rely on filters carrying state across rows; its not reliable in current -hbase as we have no handlers in place for when regions split, close or server -crashes. -

          -*/ -package org.apache.hadoop.hbase.filter; diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/io/DataInputInputStream.java hbase-server/src/main/java/org/apache/hadoop/hbase/io/DataInputInputStream.java deleted file mode 100644 index b54c3ad..0000000 --- hbase-server/src/main/java/org/apache/hadoop/hbase/io/DataInputInputStream.java +++ /dev/null @@ -1,60 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hbase.io; - -import java.io.DataInput; -import java.io.IOException; -import java.io.InputStream; - -import org.apache.hadoop.classification.InterfaceAudience; - -/** - * An InputStream that wraps a DataInput. - * @see DataOutputOutputStream - */ -@InterfaceAudience.Private -public class DataInputInputStream extends InputStream { - - private DataInput in; - - /** - * Construct an InputStream from the given DataInput. If 'in' - * is already an InputStream, simply returns it. Otherwise, wraps - * it in an InputStream. - * @param in the DataInput to wrap - * @return an InputStream instance that reads from 'in' - */ - public static InputStream constructInputStream(DataInput in) { - if (in instanceof InputStream) { - return (InputStream)in; - } else { - return new DataInputInputStream(in); - } - } - - - public DataInputInputStream(DataInput in) { - this.in = in; - } - - @Override - public int read() throws IOException { - return in.readUnsignedByte(); - } -} diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/io/DataOutputOutputStream.java hbase-server/src/main/java/org/apache/hadoop/hbase/io/DataOutputOutputStream.java deleted file mode 100644 index fe7044d..0000000 --- hbase-server/src/main/java/org/apache/hadoop/hbase/io/DataOutputOutputStream.java +++ /dev/null @@ -1,67 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hbase.io; - -import java.io.DataOutput; -import java.io.IOException; -import java.io.OutputStream; - -import org.apache.hadoop.classification.InterfaceAudience; - -/** - * OutputStream implementation that wraps a DataOutput. - */ -@InterfaceAudience.Private -public class DataOutputOutputStream extends OutputStream { - - private final DataOutput out; - - /** - * Construct an OutputStream from the given DataOutput. If 'out' - * is already an OutputStream, simply returns it. Otherwise, wraps - * it in an OutputStream. - * @param out the DataOutput to wrap - * @return an OutputStream instance that outputs to 'out' - */ - public static OutputStream constructOutputStream(DataOutput out) { - if (out instanceof OutputStream) { - return (OutputStream)out; - } else { - return new DataOutputOutputStream(out); - } - } - - private DataOutputOutputStream(DataOutput out) { - this.out = out; - } - - @Override - public void write(int b) throws IOException { - out.writeByte(b); - } - - @Override - public void write(byte[] b, int off, int len) throws IOException { - out.write(b, off, len); - } - - @Override - public void write(byte[] b) throws IOException { - out.write(b); - } -} diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/io/HbaseObjectWritable.java hbase-server/src/main/java/org/apache/hadoop/hbase/io/HbaseObjectWritable.java deleted file mode 100644 index 824ba6e..0000000 --- hbase-server/src/main/java/org/apache/hadoop/hbase/io/HbaseObjectWritable.java +++ /dev/null @@ -1,806 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hbase.io; - -import java.io.ByteArrayInputStream; -import java.io.ByteArrayOutputStream; -import java.io.DataInput; -import java.io.DataOutput; -import java.io.InputStream; -import java.io.IOException; -import java.io.ObjectInputStream; -import java.io.ObjectOutputStream; -import java.io.Serializable; -import java.lang.reflect.Array; -import java.lang.reflect.InvocationTargetException; -import java.lang.reflect.Method; -import java.util.ArrayList; -import java.util.HashMap; -import java.util.List; -import java.util.Map; -import java.util.NavigableSet; - -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.conf.Configurable; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.conf.Configured; -import org.apache.hadoop.hbase.ClusterStatus; -import org.apache.hadoop.hbase.HColumnDescriptor; -import org.apache.hadoop.hbase.HConstants; -import org.apache.hadoop.hbase.HRegionInfo; -import org.apache.hadoop.hbase.HTableDescriptor; -import org.apache.hadoop.hbase.KeyValue; -import org.apache.hadoop.hbase.client.Action; -import org.apache.hadoop.hbase.client.Append; -import org.apache.hadoop.hbase.client.Delete; -import org.apache.hadoop.hbase.client.Get; -import org.apache.hadoop.hbase.client.Increment; -import org.apache.hadoop.hbase.client.MultiAction; -import org.apache.hadoop.hbase.client.MultiResponse; -import org.apache.hadoop.hbase.client.Put; -import org.apache.hadoop.hbase.client.Result; -import org.apache.hadoop.hbase.client.Row; -import org.apache.hadoop.hbase.client.RowMutations; -import org.apache.hadoop.hbase.client.Scan; -import org.apache.hadoop.hbase.filter.BinaryComparator; -import org.apache.hadoop.hbase.filter.BitComparator; -import org.apache.hadoop.hbase.filter.ColumnCountGetFilter; -import org.apache.hadoop.hbase.filter.ColumnPrefixFilter; -import org.apache.hadoop.hbase.filter.ColumnRangeFilter; -import org.apache.hadoop.hbase.filter.CompareFilter; -import org.apache.hadoop.hbase.filter.CompareFilter.CompareOp; -import org.apache.hadoop.hbase.filter.DependentColumnFilter; -import org.apache.hadoop.hbase.filter.FirstKeyOnlyFilter; -import org.apache.hadoop.hbase.filter.InclusiveStopFilter; -import org.apache.hadoop.hbase.filter.KeyOnlyFilter; -import org.apache.hadoop.hbase.filter.PageFilter; -import org.apache.hadoop.hbase.filter.PrefixFilter; -import org.apache.hadoop.hbase.filter.QualifierFilter; -import org.apache.hadoop.hbase.filter.RandomRowFilter; -import org.apache.hadoop.hbase.filter.RowFilter; -import org.apache.hadoop.hbase.filter.SingleColumnValueExcludeFilter; -import org.apache.hadoop.hbase.filter.SingleColumnValueFilter; -import org.apache.hadoop.hbase.filter.SkipFilter; -import org.apache.hadoop.hbase.filter.ValueFilter; -import org.apache.hadoop.hbase.filter.WhileMatchFilter; -import org.apache.hadoop.hbase.filter.ByteArrayComparable; -import org.apache.hadoop.hbase.protobuf.ProtobufUtil; -import org.apache.hadoop.hbase.protobuf.generated.ClientProtos; -import org.apache.hadoop.hbase.regionserver.HRegion; -import org.apache.hadoop.hbase.regionserver.RegionOpeningState; -import org.apache.hadoop.hbase.regionserver.wal.HLog; -import org.apache.hadoop.hbase.regionserver.wal.HLogKey; -import org.apache.hadoop.hbase.util.Bytes; -import org.apache.hadoop.hbase.util.ProtoUtil; -import org.apache.hadoop.io.MapWritable; -import org.apache.hadoop.io.ObjectWritable; -import org.apache.hadoop.io.Text; -import org.apache.hadoop.io.Writable; -import org.apache.hadoop.io.WritableFactories; -import org.apache.hadoop.io.WritableUtils; - -import com.google.protobuf.Message; -import com.google.protobuf.RpcController; - -/** - * This is a customized version of the polymorphic hadoop - * {@link ObjectWritable}. It removes UTF8 (HADOOP-414). - * Using {@link Text} intead of UTF-8 saves ~2% CPU between reading and writing - * objects running a short sequentialWrite Performance Evaluation test just in - * ObjectWritable alone; more when we're doing randomRead-ing. Other - * optimizations include our passing codes for classes instead of the - * actual class names themselves. This makes it so this class needs amendment - * if non-Writable classes are introduced -- if passed a Writable for which we - * have no code, we just do the old-school passing of the class name, etc. -- - * but passing codes the savings are large particularly when cell - * data is small (If < a couple of kilobytes, the encoding/decoding of class - * name and reflection to instantiate class was costing in excess of the cell - * handling). - */ -@InterfaceAudience.Private -public class HbaseObjectWritable implements Writable, WritableWithSize, Configurable { - protected final static Log LOG = LogFactory.getLog(HbaseObjectWritable.class); - - // Here we maintain two static maps of classes to code and vice versa. - // Add new classes+codes as wanted or figure way to auto-generate these - // maps. - static final Map> CODE_TO_CLASS = - new HashMap>(); - static final Map, Integer> CLASS_TO_CODE = - new HashMap, Integer>(); - // Special code that means 'not-encoded'; in this case we do old school - // sending of the class name using reflection, etc. - private static final byte NOT_ENCODED = 0; - //Generic array means that the array type is not one of the pre-defined arrays - //in the CLASS_TO_CODE map, but we have to still encode the array since it's - //elements are serializable by this class. - private static final int GENERIC_ARRAY_CODE; - private static final int NEXT_CLASS_CODE; - static { - //////////////////////////////////////////////////////////////////////////// - // WARNING: Please do not insert, remove or swap any line in this static // - // block. Doing so would change or shift all the codes used to serialize // - // objects, which makes backwards compatibility very hard for clients. // - // New codes should always be added at the end. Code removal is // - // discouraged because code is a short now. // - //////////////////////////////////////////////////////////////////////////// - - int code = NOT_ENCODED + 1; - // Primitive types. - addToMap(Boolean.TYPE, code++); - addToMap(Byte.TYPE, code++); - addToMap(Character.TYPE, code++); - addToMap(Short.TYPE, code++); - addToMap(Integer.TYPE, code++); - addToMap(Long.TYPE, code++); - addToMap(Float.TYPE, code++); - addToMap(Double.TYPE, code++); - addToMap(Void.TYPE, code++); - - // Other java types - addToMap(String.class, code++); - addToMap(byte [].class, code++); - addToMap(byte [][].class, code++); - - // Hadoop types - addToMap(Text.class, code++); - addToMap(Writable.class, code++); - addToMap(Writable [].class, code++); - code++; // Removed - addToMap(NullInstance.class, code++); - - // Hbase types - addToMap(HColumnDescriptor.class, code++); - addToMap(HConstants.Modify.class, code++); - - // We used to have a class named HMsg but its been removed. Rather than - // just axe it, use following random Integer class -- we just chose any - // class from java.lang -- instead just so codes that follow stay - // in same relative place. - addToMap(Integer.class, code++); - addToMap(Integer[].class, code++); - - //HRegion shouldn't be pushed across the wire. - code++; //addToMap(HRegion.class, code++); - code++; //addToMap(HRegion[].class, code++); - - addToMap(HRegionInfo.class, code++); - addToMap(HRegionInfo[].class, code++); - code++; // Removed - code++; // Removed - addToMap(HTableDescriptor.class, code++); - addToMap(MapWritable.class, code++); - - // - // HBASE-880 - // - addToMap(ClusterStatus.class, code++); - addToMap(Delete.class, code++); - addToMap(Get.class, code++); - addToMap(KeyValue.class, code++); - addToMap(KeyValue[].class, code++); - addToMap(Put.class, code++); - addToMap(Put[].class, code++); - addToMap(Result.class, code++); - addToMap(Result[].class, code++); - addToMap(Scan.class, code++); - - addToMap(WhileMatchFilter.class, code++); - addToMap(PrefixFilter.class, code++); - addToMap(PageFilter.class, code++); - addToMap(InclusiveStopFilter.class, code++); - addToMap(ColumnCountGetFilter.class, code++); - addToMap(SingleColumnValueFilter.class, code++); - addToMap(SingleColumnValueExcludeFilter.class, code++); - addToMap(BinaryComparator.class, code++); - addToMap(BitComparator.class, code++); - addToMap(CompareFilter.class, code++); - addToMap(RowFilter.class, code++); - addToMap(ValueFilter.class, code++); - addToMap(QualifierFilter.class, code++); - addToMap(SkipFilter.class, code++); - addToMap(ByteArrayComparable.class, code++); - addToMap(FirstKeyOnlyFilter.class, code++); - addToMap(DependentColumnFilter.class, code++); - - addToMap(Delete [].class, code++); - - addToMap(HLog.Entry.class, code++); - addToMap(HLog.Entry[].class, code++); - addToMap(HLogKey.class, code++); - - addToMap(List.class, code++); - - addToMap(NavigableSet.class, code++); - addToMap(ColumnPrefixFilter.class, code++); - - // Multi - addToMap(Row.class, code++); - addToMap(Action.class, code++); - addToMap(MultiAction.class, code++); - addToMap(MultiResponse.class, code++); - - // coprocessor execution - // Exec no longer exists --> addToMap(Exec.class, code++); - code++; - addToMap(Increment.class, code++); - - addToMap(KeyOnlyFilter.class, code++); - - // serializable - addToMap(Serializable.class, code++); - - addToMap(RandomRowFilter.class, code++); - - addToMap(CompareOp.class, code++); - - addToMap(ColumnRangeFilter.class, code++); - - // HServerLoad no longer exists; increase code so other classes stay the same. - code++; - //addToMap(HServerLoad.class, code++); - - addToMap(RegionOpeningState.class, code++); - - addToMap(HTableDescriptor[].class, code++); - - addToMap(Append.class, code++); - - addToMap(RowMutations.class, code++); - - addToMap(Message.class, code++); - - //java.lang.reflect.Array is a placeholder for arrays not defined above - GENERIC_ARRAY_CODE = code++; - addToMap(Array.class, GENERIC_ARRAY_CODE); - - addToMap(RpcController.class, code++); - - // make sure that this is the last statement in this static block - NEXT_CLASS_CODE = code; - } - - private Class declaredClass; - private Object instance; - private Configuration conf; - - /** default constructor for writable */ - public HbaseObjectWritable() { - super(); - } - - /** - * @param instance - */ - public HbaseObjectWritable(Object instance) { - set(instance); - } - - /** - * @param declaredClass - * @param instance - */ - public HbaseObjectWritable(Class declaredClass, Object instance) { - this.declaredClass = declaredClass; - this.instance = instance; - } - - /** @return the instance, or null if none. */ - public Object get() { return instance; } - - /** @return the class this is meant to be. */ - public Class getDeclaredClass() { return declaredClass; } - - /** - * Reset the instance. - * @param instance - */ - public void set(Object instance) { - this.declaredClass = instance.getClass(); - this.instance = instance; - } - - /** - * @see java.lang.Object#toString() - */ - @Override - public String toString() { - return "OW[class=" + declaredClass + ",value=" + instance + "]"; - } - - - public void readFields(DataInput in) throws IOException { - readObject(in, this, this.conf); - } - - public void write(DataOutput out) throws IOException { - writeObject(out, instance, declaredClass, conf); - } - - public long getWritableSize() { - return getWritableSize(instance, declaredClass, conf); - } - - private static class NullInstance extends Configured implements Writable { - Class declaredClass; - /** default constructor for writable */ - @SuppressWarnings("unused") - public NullInstance() { super(null); } - - /** - * @param declaredClass - * @param conf - */ - public NullInstance(Class declaredClass, Configuration conf) { - super(conf); - this.declaredClass = declaredClass; - } - - public void readFields(DataInput in) throws IOException { - this.declaredClass = CODE_TO_CLASS.get(WritableUtils.readVInt(in)); - } - - public void write(DataOutput out) throws IOException { - writeClassCode(out, this.declaredClass); - } - } - - public static Integer getClassCode(final Class c) - throws IOException { - Integer code = CLASS_TO_CODE.get(c); - if (code == null ) { - if (List.class.isAssignableFrom(c)) { - code = CLASS_TO_CODE.get(List.class); - } else if (Writable.class.isAssignableFrom(c)) { - code = CLASS_TO_CODE.get(Writable.class); - } else if (c.isArray()) { - code = CLASS_TO_CODE.get(Array.class); - } else if (Message.class.isAssignableFrom(c)) { - code = CLASS_TO_CODE.get(Message.class); - } else if (Serializable.class.isAssignableFrom(c)){ - code = CLASS_TO_CODE.get(Serializable.class); - } else if (Scan.class.isAssignableFrom(c)) { - code = CLASS_TO_CODE.get(Scan.class); - } - } - return code; - } - - /** - * @return the next object code in the list. Used in testing to verify that additional fields are not added - */ - static int getNextClassCode(){ - return NEXT_CLASS_CODE; - } - - /** - * Write out the code for passed Class. - * @param out - * @param c - * @throws IOException - */ - static void writeClassCode(final DataOutput out, final Class c) - throws IOException { - Integer code = getClassCode(c); - - if (code == null) { - LOG.error("Unsupported type " + c); - StackTraceElement[] els = new Exception().getStackTrace(); - for(StackTraceElement elem : els) { - LOG.error(elem.getMethodName()); - } - throw new UnsupportedOperationException("No code for unexpected " + c); - } - WritableUtils.writeVInt(out, code); - } - - public static long getWritableSize(Object instance, Class declaredClass, - Configuration conf) { - return 0L; // no hint is the default. - } - /** - * Write a {@link Writable}, {@link String}, primitive type, or an array of - * the preceding. - * @param out - * @param instance - * @param declaredClass - * @param conf - * @throws IOException - */ - @SuppressWarnings("unchecked") - public static void writeObject(DataOutput out, Object instance, - Class declaredClass, - Configuration conf) - throws IOException { - - Object instanceObj = instance; - Class declClass = declaredClass; - - if (instanceObj == null) { // null - instanceObj = new NullInstance(declClass, conf); - declClass = Writable.class; - } - writeClassCode(out, declClass); - if (declClass.isArray()) { // array - // If bytearray, just dump it out -- avoid the recursion and - // byte-at-a-time we were previously doing. - if (declClass.equals(byte [].class)) { - Bytes.writeByteArray(out, (byte [])instanceObj); - } else { - //if it is a Generic array, write the element's type - if (getClassCode(declaredClass) == GENERIC_ARRAY_CODE) { - Class componentType = declaredClass.getComponentType(); - writeClass(out, componentType); - } - - int length = Array.getLength(instanceObj); - out.writeInt(length); - for (int i = 0; i < length; i++) { - Object item = Array.get(instanceObj, i); - writeObject(out, item, - item.getClass(), conf); - } - } - } else if (List.class.isAssignableFrom(declClass)) { - List list = (List)instanceObj; - int length = list.size(); - out.writeInt(length); - for (int i = 0; i < length; i++) { - Object elem = list.get(i); - writeObject(out, elem, - elem == null ? Writable.class : elem.getClass(), conf); - } - } else if (declClass == String.class) { // String - Text.writeString(out, (String)instanceObj); - } else if (declClass.isPrimitive()) { // primitive type - if (declClass == Boolean.TYPE) { // boolean - out.writeBoolean(((Boolean)instanceObj).booleanValue()); - } else if (declClass == Character.TYPE) { // char - out.writeChar(((Character)instanceObj).charValue()); - } else if (declClass == Byte.TYPE) { // byte - out.writeByte(((Byte)instanceObj).byteValue()); - } else if (declClass == Short.TYPE) { // short - out.writeShort(((Short)instanceObj).shortValue()); - } else if (declClass == Integer.TYPE) { // int - out.writeInt(((Integer)instanceObj).intValue()); - } else if (declClass == Long.TYPE) { // long - out.writeLong(((Long)instanceObj).longValue()); - } else if (declClass == Float.TYPE) { // float - out.writeFloat(((Float)instanceObj).floatValue()); - } else if (declClass == Double.TYPE) { // double - out.writeDouble(((Double)instanceObj).doubleValue()); - } else if (declClass == Void.TYPE) { // void - } else { - throw new IllegalArgumentException("Not a primitive: "+declClass); - } - } else if (declClass.isEnum()) { // enum - Text.writeString(out, ((Enum)instanceObj).name()); - } else if (Message.class.isAssignableFrom(declaredClass)) { - Text.writeString(out, instanceObj.getClass().getName()); - ((Message)instance).writeDelimitedTo( - DataOutputOutputStream.constructOutputStream(out)); - } else if (Writable.class.isAssignableFrom(declClass)) { // Writable - Class c = instanceObj.getClass(); - Integer code = CLASS_TO_CODE.get(c); - if (code == null) { - out.writeByte(NOT_ENCODED); - Text.writeString(out, c.getName()); - } else { - writeClassCode(out, c); - } - ((Writable)instanceObj).write(out); - } else if (Serializable.class.isAssignableFrom(declClass)) { - Class c = instanceObj.getClass(); - Integer code = CLASS_TO_CODE.get(c); - if (code == null) { - out.writeByte(NOT_ENCODED); - Text.writeString(out, c.getName()); - } else { - writeClassCode(out, c); - } - ByteArrayOutputStream bos = null; - ObjectOutputStream oos = null; - try{ - bos = new ByteArrayOutputStream(); - oos = new ObjectOutputStream(bos); - oos.writeObject(instanceObj); - byte[] value = bos.toByteArray(); - out.writeInt(value.length); - out.write(value); - } finally { - if(bos!=null) bos.close(); - if(oos!=null) oos.close(); - } - } else if (Scan.class.isAssignableFrom(declClass)) { - Scan scan = (Scan)instanceObj; - byte [] scanBytes = ProtobufUtil.toScan(scan).toByteArray(); - out.writeInt(scanBytes.length); - out.write(scanBytes); - } else { - throw new IOException("Can't write: "+instanceObj+" as "+declClass); - } - } - - /** Writes the encoded class code as defined in CLASS_TO_CODE, or - * the whole class name if not defined in the mapping. - */ - static void writeClass(DataOutput out, Class c) throws IOException { - Integer code = CLASS_TO_CODE.get(c); - if (code == null) { - WritableUtils.writeVInt(out, NOT_ENCODED); - Text.writeString(out, c.getName()); - } else { - WritableUtils.writeVInt(out, code); - } - } - - /** Reads and returns the class as written by {@link #writeClass(DataOutput, Class)} */ - static Class readClass(Configuration conf, DataInput in) throws IOException { - Class instanceClass = null; - int b = (byte)WritableUtils.readVInt(in); - if (b == NOT_ENCODED) { - String className = Text.readString(in); - try { - instanceClass = getClassByName(conf, className); - } catch (ClassNotFoundException e) { - LOG.error("Can't find class " + className, e); - throw new IOException("Can't find class " + className, e); - } - } else { - instanceClass = CODE_TO_CLASS.get(b); - } - return instanceClass; - } - - /** - * Read a {@link Writable}, {@link String}, primitive type, or an array of - * the preceding. - * @param in - * @param conf - * @return the object - * @throws IOException - */ - public static Object readObject(DataInput in, Configuration conf) - throws IOException { - return readObject(in, null, conf); - } - - /** - * Read a {@link Writable}, {@link String}, primitive type, or an array of - * the preceding. - * @param in - * @param objectWritable - * @param conf - * @return the object - * @throws IOException - */ - @SuppressWarnings("unchecked") - public static Object readObject(DataInput in, - HbaseObjectWritable objectWritable, Configuration conf) - throws IOException { - Class declaredClass = CODE_TO_CLASS.get(WritableUtils.readVInt(in)); - Object instance; - if (declaredClass.isPrimitive()) { // primitive types - if (declaredClass == Boolean.TYPE) { // boolean - instance = Boolean.valueOf(in.readBoolean()); - } else if (declaredClass == Character.TYPE) { // char - instance = Character.valueOf(in.readChar()); - } else if (declaredClass == Byte.TYPE) { // byte - instance = Byte.valueOf(in.readByte()); - } else if (declaredClass == Short.TYPE) { // short - instance = Short.valueOf(in.readShort()); - } else if (declaredClass == Integer.TYPE) { // int - instance = Integer.valueOf(in.readInt()); - } else if (declaredClass == Long.TYPE) { // long - instance = Long.valueOf(in.readLong()); - } else if (declaredClass == Float.TYPE) { // float - instance = Float.valueOf(in.readFloat()); - } else if (declaredClass == Double.TYPE) { // double - instance = Double.valueOf(in.readDouble()); - } else if (declaredClass == Void.TYPE) { // void - instance = null; - } else { - throw new IllegalArgumentException("Not a primitive: "+declaredClass); - } - } else if (declaredClass.isArray()) { // array - if (declaredClass.equals(byte [].class)) { - instance = Bytes.readByteArray(in); - } else { - int length = in.readInt(); - instance = Array.newInstance(declaredClass.getComponentType(), length); - for (int i = 0; i < length; i++) { - Array.set(instance, i, readObject(in, conf)); - } - } - } else if (declaredClass.equals(Array.class)) { //an array not declared in CLASS_TO_CODE - Class componentType = readClass(conf, in); - int length = in.readInt(); - instance = Array.newInstance(componentType, length); - for (int i = 0; i < length; i++) { - Array.set(instance, i, readObject(in, conf)); - } - } else if (List.class.isAssignableFrom(declaredClass)) { // List - int length = in.readInt(); - instance = new ArrayList(length); - for (int i = 0; i < length; i++) { - ((ArrayList)instance).add(readObject(in, conf)); - } - } else if (declaredClass == String.class) { // String - instance = Text.readString(in); - } else if (declaredClass.isEnum()) { // enum - instance = Enum.valueOf((Class) declaredClass, - Text.readString(in)); - } else if (declaredClass == Message.class) { - String className = Text.readString(in); - try { - declaredClass = getClassByName(conf, className); - instance = tryInstantiateProtobuf(declaredClass, in); - } catch (ClassNotFoundException e) { - LOG.error("Can't find class " + className, e); - throw new IOException("Can't find class " + className, e); - } - } else if (Scan.class.isAssignableFrom(declaredClass)) { - int length = in.readInt(); - byte [] scanBytes = new byte[length]; - in.readFully(scanBytes); - ClientProtos.Scan.Builder scanProto = ClientProtos.Scan.newBuilder(); - instance = ProtobufUtil.toScan(scanProto.mergeFrom(scanBytes).build()); - } else { // Writable or Serializable - Class instanceClass = null; - int b = (byte)WritableUtils.readVInt(in); - if (b == NOT_ENCODED) { - String className = Text.readString(in); - try { - instanceClass = getClassByName(conf, className); - } catch (ClassNotFoundException e) { - LOG.error("Can't find class " + className, e); - throw new IOException("Can't find class " + className, e); - } - } else { - instanceClass = CODE_TO_CLASS.get(b); - } - if(Writable.class.isAssignableFrom(instanceClass)){ - Writable writable = WritableFactories.newInstance(instanceClass, conf); - try { - writable.readFields(in); - } catch (Exception e) { - LOG.error("Error in readFields", e); - throw new IOException("Error in readFields" , e); - } - instance = writable; - if (instanceClass == NullInstance.class) { // null - declaredClass = ((NullInstance)instance).declaredClass; - instance = null; - } - } else { - int length = in.readInt(); - byte[] objectBytes = new byte[length]; - in.readFully(objectBytes); - ByteArrayInputStream bis = null; - ObjectInputStream ois = null; - try { - bis = new ByteArrayInputStream(objectBytes); - ois = new ObjectInputStream(bis); - instance = ois.readObject(); - } catch (ClassNotFoundException e) { - LOG.error("Class not found when attempting to deserialize object", e); - throw new IOException("Class not found when attempting to " + - "deserialize object", e); - } finally { - if(bis!=null) bis.close(); - if(ois!=null) ois.close(); - } - } - } - if (objectWritable != null) { // store values - objectWritable.declaredClass = declaredClass; - objectWritable.instance = instance; - } - return instance; - } - - /** - * Try to instantiate a protocol buffer of the given message class - * from the given input stream. - * - * @param protoClass the class of the generated protocol buffer - * @param dataIn the input stream to read from - * @return the instantiated Message instance - * @throws IOException if an IO problem occurs - */ - public static Message tryInstantiateProtobuf( - Class protoClass, - DataInput dataIn) throws IOException { - - try { - if (dataIn instanceof InputStream) { - // We can use the built-in parseDelimitedFrom and not have to re-copy - // the data - Method parseMethod = getStaticProtobufMethod(protoClass, - "parseDelimitedFrom", InputStream.class); - return (Message)parseMethod.invoke(null, (InputStream)dataIn); - } else { - // Have to read it into a buffer first, since protobuf doesn't deal - // with the DataInput interface directly. - - // Read the size delimiter that writeDelimitedTo writes - int size = ProtoUtil.readRawVarint32(dataIn); - if (size < 0) { - throw new IOException("Invalid size: " + size); - } - - byte[] data = new byte[size]; - dataIn.readFully(data); - Method parseMethod = getStaticProtobufMethod(protoClass, - "parseFrom", byte[].class); - return (Message)parseMethod.invoke(null, data); - } - } catch (InvocationTargetException e) { - - if (e.getCause() instanceof IOException) { - throw (IOException)e.getCause(); - } else { - throw new IOException(e.getCause()); - } - } catch (IllegalAccessException iae) { - throw new AssertionError("Could not access parse method in " + - protoClass); - } - } - - static Method getStaticProtobufMethod(Class declaredClass, String method, - Class ... args) { - - try { - return declaredClass.getMethod(method, args); - } catch (Exception e) { - // This is a bug in Hadoop - protobufs should all have this static method - throw new AssertionError("Protocol buffer class " + declaredClass + - " does not have an accessible parseFrom(InputStream) method!"); - } - } - - @SuppressWarnings("unchecked") - private static Class getClassByName(Configuration conf, String className) - throws ClassNotFoundException { - if(conf != null) { - return conf.getClassByName(className); - } - ClassLoader cl = Thread.currentThread().getContextClassLoader(); - if(cl == null) { - cl = HbaseObjectWritable.class.getClassLoader(); - } - return Class.forName(className, true, cl); - } - - private static void addToMap(final Class clazz, final int code) { - CLASS_TO_CODE.put(clazz, code); - CODE_TO_CLASS.put(code, clazz); - } - - public void setConf(Configuration conf) { - this.conf = conf; - } - - public Configuration getConf() { - return this.conf; - } -} diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/io/TimeRange.java hbase-server/src/main/java/org/apache/hadoop/hbase/io/TimeRange.java deleted file mode 100644 index 2320582..0000000 --- hbase-server/src/main/java/org/apache/hadoop/hbase/io/TimeRange.java +++ /dev/null @@ -1,200 +0,0 @@ -/* - * - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hbase.io; - -import java.io.DataInput; -import java.io.DataOutput; -import java.io.IOException; - -import org.apache.hadoop.io.Writable; - -import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.classification.InterfaceStability; -import org.apache.hadoop.hbase.util.Bytes; - -/** - * Represents an interval of version timestamps. - *

          - * Evaluated according to minStamp <= timestamp < maxStamp - * or [minStamp,maxStamp) in interval notation. - *

          - * Only used internally; should not be accessed directly by clients. - */ -@InterfaceAudience.Public -@InterfaceStability.Stable -public class TimeRange implements Writable { - private long minStamp = 0L; - private long maxStamp = Long.MAX_VALUE; - private boolean allTime = false; - - /** - * Default constructor. - * Represents interval [0, Long.MAX_VALUE) (allTime) - */ - public TimeRange() { - allTime = true; - } - - /** - * Represents interval [minStamp, Long.MAX_VALUE) - * @param minStamp the minimum timestamp value, inclusive - */ - public TimeRange(long minStamp) { - this.minStamp = minStamp; - } - - /** - * Represents interval [minStamp, Long.MAX_VALUE) - * @param minStamp the minimum timestamp value, inclusive - */ - public TimeRange(byte [] minStamp) { - this.minStamp = Bytes.toLong(minStamp); - } - - /** - * Represents interval [minStamp, maxStamp) - * @param minStamp the minimum timestamp, inclusive - * @param maxStamp the maximum timestamp, exclusive - * @throws IOException - */ - public TimeRange(long minStamp, long maxStamp) - throws IOException { - if(maxStamp < minStamp) { - throw new IOException("maxStamp is smaller than minStamp"); - } - this.minStamp = minStamp; - this.maxStamp = maxStamp; - } - - /** - * Represents interval [minStamp, maxStamp) - * @param minStamp the minimum timestamp, inclusive - * @param maxStamp the maximum timestamp, exclusive - * @throws IOException - */ - public TimeRange(byte [] minStamp, byte [] maxStamp) - throws IOException { - this(Bytes.toLong(minStamp), Bytes.toLong(maxStamp)); - } - - /** - * @return the smallest timestamp that should be considered - */ - public long getMin() { - return minStamp; - } - - /** - * @return the biggest timestamp that should be considered - */ - public long getMax() { - return maxStamp; - } - - /** - * Check if it is for all time - * @return true if it is for all time - */ - public boolean isAllTime() { - return allTime; - } - - /** - * Check if the specified timestamp is within this TimeRange. - *

          - * Returns true if within interval [minStamp, maxStamp), false - * if not. - * @param bytes timestamp to check - * @param offset offset into the bytes - * @return true if within TimeRange, false if not - */ - public boolean withinTimeRange(byte [] bytes, int offset) { - if(allTime) return true; - return withinTimeRange(Bytes.toLong(bytes, offset)); - } - - /** - * Check if the specified timestamp is within this TimeRange. - *

          - * Returns true if within interval [minStamp, maxStamp), false - * if not. - * @param timestamp timestamp to check - * @return true if within TimeRange, false if not - */ - public boolean withinTimeRange(long timestamp) { - if(allTime) return true; - // check if >= minStamp - return (minStamp <= timestamp && timestamp < maxStamp); - } - - /** - * Check if the specified timestamp is within this TimeRange. - *

          - * Returns true if within interval [minStamp, maxStamp), false - * if not. - * @param timestamp timestamp to check - * @return true if within TimeRange, false if not - */ - public boolean withinOrAfterTimeRange(long timestamp) { - if(allTime) return true; - // check if >= minStamp - return (timestamp >= minStamp); - } - - /** - * Compare the timestamp to timerange - * @param timestamp - * @return -1 if timestamp is less than timerange, - * 0 if timestamp is within timerange, - * 1 if timestamp is greater than timerange - */ - public int compare(long timestamp) { - if (timestamp < minStamp) { - return -1; - } else if (timestamp >= maxStamp) { - return 1; - } else { - return 0; - } - } - - @Override - public String toString() { - StringBuilder sb = new StringBuilder(); - sb.append("maxStamp="); - sb.append(this.maxStamp); - sb.append(", minStamp="); - sb.append(this.minStamp); - return sb.toString(); - } - - //Writable - public void readFields(final DataInput in) throws IOException { - this.minStamp = in.readLong(); - this.maxStamp = in.readLong(); - this.allTime = in.readBoolean(); - } - - public void write(final DataOutput out) throws IOException { - out.writeLong(minStamp); - out.writeLong(maxStamp); - out.writeBoolean(this.allTime); - } -} diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/io/WritableWithSize.java hbase-server/src/main/java/org/apache/hadoop/hbase/io/WritableWithSize.java deleted file mode 100644 index 1da99c7..0000000 --- hbase-server/src/main/java/org/apache/hadoop/hbase/io/WritableWithSize.java +++ /dev/null @@ -1,38 +0,0 @@ -/* - * - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hbase.io; - -import org.apache.hadoop.classification.InterfaceAudience; - -/** - * An optional interface to 'size' writables. - */ -@InterfaceAudience.Private -public interface WritableWithSize { - /** - * Provide a size hint to the caller. write() should ideally - * not go beyond this if at all possible. - * - * You can return 0 if there is no size hint. - * - * @return the size of the writable - */ - public long getWritableSize(); -} diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CacheConfig.java hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CacheConfig.java index 44599dd..73c0955 100644 --- hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CacheConfig.java +++ hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CacheConfig.java @@ -343,7 +343,7 @@ public class CacheConfig { MemoryUsage mu = ManagementFactory.getMemoryMXBean().getHeapMemoryUsage(); long cacheSize = (long)(mu.getMax() * cachePercentage); int blockSize = conf.getInt("hbase.offheapcache.minblocksize", - HFile.DEFAULT_BLOCKSIZE); + HConstants.DEFAULT_BLOCKSIZE); long offHeapCacheSize = (long) (conf.getFloat("hbase.offheapcache.percentage", (float) 0) * DirectMemoryUtils.getDirectMemorySize()); diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFile.java hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFile.java index 0137337..d3a4cdc 100644 --- hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFile.java +++ hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFile.java @@ -144,11 +144,6 @@ public class HFile { public final static int MAXIMUM_KEY_LENGTH = Integer.MAX_VALUE; /** - * Default block size for an HFile. - */ - public final static int DEFAULT_BLOCKSIZE = 64 * 1024; - - /** * Default compression: none. */ public final static Compression.Algorithm DEFAULT_COMPRESSION_ALGORITHM = diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/BlockingRpcCallback.java hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/BlockingRpcCallback.java deleted file mode 100644 index 322e676..0000000 --- hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/BlockingRpcCallback.java +++ /dev/null @@ -1,73 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hbase.ipc; - -import com.google.protobuf.RpcCallback; -import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.classification.InterfaceStability; - -import java.io.IOException; -import java.io.InterruptedIOException; - -/** - * Simple {@link RpcCallback} implementation providing a - * {@link java.util.concurrent.Future}-like {@link BlockingRpcCallback#get()} method, which - * will block util the instance's {@link BlockingRpcCallback#run(Object)} method has been called. - * {@code R} is the RPC response type that will be passed to the {@link #run(Object)} method. - */ -@InterfaceAudience.Public -@InterfaceStability.Evolving -public class BlockingRpcCallback implements RpcCallback { - private R result; - private boolean resultSet = false; - - /** - * Called on completion of the RPC call with the response object, or {@code null} in the case of - * an error. - * @param parameter the response object or {@code null} if an error occurred - */ - @Override - public void run(R parameter) { - synchronized (this) { - result = parameter; - resultSet = true; - this.notify(); - } - } - - /** - * Returns the parameter passed to {@link #run(Object)} or {@code null} if a null value was - * passed. When used asynchronously, this method will block until the {@link #run(Object)} - * method has been called. - * @return the response object or {@code null} if no response was passed - */ - public synchronized R get() throws IOException { - while (!resultSet) { - try { - this.wait(); - } catch (InterruptedException ie) { - Thread.currentThread().interrupt(); - InterruptedIOException exception = new InterruptedIOException(ie.getMessage()); - exception.initCause(ie); - throw exception; - } - } - return result; - } -} diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/ClientCache.java hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/ClientCache.java deleted file mode 100644 index f33f59c..0000000 --- hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/ClientCache.java +++ /dev/null @@ -1,104 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hbase.ipc; - -import java.lang.reflect.Constructor; -import java.lang.reflect.InvocationTargetException; -import java.util.HashMap; -import java.util.Map; - -import javax.net.SocketFactory; - -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hbase.HConstants; -import org.apache.hadoop.hbase.io.HbaseObjectWritable; -import org.apache.hadoop.io.Writable; - -/** - * Cache a client using its socket factory as the hash key. - * Enables reuse/sharing of clients on a per SocketFactory basis. A client - * establishes certain configuration dependent characteristics like timeouts, - * tcp-keepalive (true or false), etc. For more details on the characteristics, - * look at {@link HBaseClient#HBaseClient(Configuration, SocketFactory)} - * Creation of dynamic proxies to protocols creates the clients (and increments - * reference count once created), and stopping of the proxies leads to clearing - * out references and when the reference drops to zero, the cache mapping is - * cleared. - */ -class ClientCache { - private Map clients = - new HashMap(); - - protected ClientCache() {} - - /** - * Construct & cache an IPC client with the user-provided SocketFactory - * if no cached client exists. - * - * @param conf Configuration - * @param factory socket factory - * @return an IPC client - */ - @SuppressWarnings("unchecked") - protected synchronized HBaseClient getClient(Configuration conf, SocketFactory factory) { - - HBaseClient client = clients.get(factory); - if (client == null) { - Class hbaseClientClass = (Class) conf - .getClass(HConstants.HBASECLIENT_IMPL, HBaseClient.class); - - // Make an hbase client instead of hadoop Client. - try { - Constructor cst = hbaseClientClass.getConstructor( - Configuration.class, SocketFactory.class); - client = cst.newInstance(conf, factory); - } catch (InvocationTargetException e) { - throw new RuntimeException(e); - } catch (InstantiationException e) { - throw new RuntimeException(e); - } catch (IllegalAccessException e) { - throw new RuntimeException(e); - } catch (NoSuchMethodException e) { - throw new RuntimeException("No matching constructor in "+hbaseClientClass.getName(), e); - } - - clients.put(factory, client); - } else { - client.incCount(); - } - return client; - } - - /** - * Stop a RPC client connection - * A RPC client is closed only when its reference count becomes zero. - * @param client client to stop - */ - protected void stopClient(HBaseClient client) { - synchronized (this) { - client.decCount(); - if (client.isZeroReference()) { - clients.remove(client.getSocketFactory()); - } - } - if (client.isZeroReference()) { - client.stop(); - } - } -} diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/CoprocessorRpcChannel.java hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/CoprocessorRpcChannel.java deleted file mode 100644 index d3d92e2..0000000 --- hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/CoprocessorRpcChannel.java +++ /dev/null @@ -1,68 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hbase.ipc; - -import com.google.protobuf.*; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.hbase.protobuf.ResponseConverter; - -import java.io.IOException; - -/** - * Base class which provides clients with an RPC connection to - * call coprocessor endpoint {@link Service}s - */ -@InterfaceAudience.Private -public abstract class CoprocessorRpcChannel implements RpcChannel, BlockingRpcChannel { - private static Log LOG = LogFactory.getLog(CoprocessorRpcChannel.class); - - @Override - public void callMethod(Descriptors.MethodDescriptor method, - RpcController controller, - Message request, Message responsePrototype, - RpcCallback callback) { - Message response = null; - try { - response = callExecService(method, request, responsePrototype); - } catch (IOException ioe) { - LOG.warn("Call failed on IOException", ioe); - ResponseConverter.setControllerException(controller, ioe); - } - if (callback != null) { - callback.run(response); - } - } - - @Override - public Message callBlockingMethod(Descriptors.MethodDescriptor method, - RpcController controller, - Message request, Message responsePrototype) - throws ServiceException { - try { - return callExecService(method, request, responsePrototype); - } catch (IOException ioe) { - throw new ServiceException("Error calling method "+method.getFullName(), ioe); - } - } - - protected abstract Message callExecService(Descriptors.MethodDescriptor method, - Message request, Message responsePrototype) throws IOException; -} diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/HBaseClient.java hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/HBaseClient.java deleted file mode 100644 index b770354..0000000 --- hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/HBaseClient.java +++ /dev/null @@ -1,1504 +0,0 @@ -/** - * - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hbase.ipc; - -import java.io.BufferedInputStream; -import java.io.BufferedOutputStream; -import java.io.DataInputStream; -import java.io.DataOutputStream; -import java.io.EOFException; -import java.io.FilterInputStream; -import java.io.IOException; -import java.io.InputStream; -import java.io.OutputStream; -import java.lang.reflect.Method; -import java.net.ConnectException; -import java.net.InetSocketAddress; -import java.net.Socket; -import java.net.SocketException; -import java.net.SocketTimeoutException; -import java.net.UnknownHostException; -import java.security.PrivilegedExceptionAction; -import java.util.HashMap; -import java.util.Iterator; -import java.util.LinkedList; -import java.util.Map; -import java.util.Map.Entry; -import java.util.Random; -import java.util.concurrent.ConcurrentHashMap; -import java.util.concurrent.ConcurrentSkipListMap; -import java.util.concurrent.atomic.AtomicBoolean; -import java.util.concurrent.atomic.AtomicLong; - -import javax.net.SocketFactory; -import javax.security.sasl.SaslException; - -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hbase.HConstants; -import org.apache.hadoop.hbase.protobuf.generated.RPCProtos; -import org.apache.hadoop.hbase.protobuf.generated.RPCProtos.RpcException; -import org.apache.hadoop.hbase.protobuf.generated.RPCProtos.RpcRequestBody; -import org.apache.hadoop.hbase.protobuf.generated.RPCProtos.RpcResponseBody; -import org.apache.hadoop.hbase.protobuf.generated.RPCProtos.RpcResponseHeader; -import org.apache.hadoop.hbase.protobuf.generated.RPCProtos.RpcResponseHeader.Status; -import org.apache.hadoop.hbase.protobuf.generated.RPCProtos.ConnectionHeader; -import org.apache.hadoop.hbase.protobuf.generated.RPCProtos.RpcRequestHeader; -import org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation; -import org.apache.hadoop.hbase.protobuf.generated.Tracing.RPCTInfo; -import org.apache.hadoop.hbase.security.HBaseSaslRpcClient; -import org.apache.hadoop.hbase.security.HBaseSaslRpcServer.AuthMethod; -import org.apache.hadoop.hbase.security.KerberosInfo; -import org.apache.hadoop.hbase.security.TokenInfo; -import org.apache.hadoop.hbase.security.User; -import org.apache.hadoop.hbase.security.token.AuthenticationTokenIdentifier; -import org.apache.hadoop.hbase.security.token.AuthenticationTokenSelector; -import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; -import org.apache.hadoop.hbase.util.Pair; -import org.apache.hadoop.hbase.util.PoolMap; -import org.apache.hadoop.hbase.util.PoolMap.PoolType; -import org.apache.hadoop.io.IOUtils; -import org.apache.hadoop.io.Text; -import org.apache.hadoop.ipc.RemoteException; -import org.apache.hadoop.net.NetUtils; -import org.apache.hadoop.security.SecurityUtil; -import org.apache.hadoop.security.UserGroupInformation; -import org.apache.hadoop.security.token.Token; -import org.apache.hadoop.security.token.TokenIdentifier; -import org.apache.hadoop.security.token.TokenSelector; -import org.cloudera.htrace.Span; -import org.cloudera.htrace.Trace; - -import com.google.protobuf.CodedOutputStream; -import com.google.protobuf.Message; -import com.google.protobuf.Message.Builder; - - -/** A client for an IPC service. IPC calls take a single Protobuf message as a - * parameter, and return a single Protobuf message as their value. A service runs on - * a port and is defined by a parameter class and a value class. - * - *

          This is the org.apache.hadoop.ipc.Client renamed as HBaseClient and - * moved into this package so can access package-private methods. - * - * @see HBaseServer - */ -@InterfaceAudience.Private -public class HBaseClient { - - public static final Log LOG = LogFactory - .getLog("org.apache.hadoop.ipc.HBaseClient"); - protected final PoolMap connections; - private static final Map methodInstances = - new ConcurrentHashMap(); - - protected int counter; // counter for call ids - protected final AtomicBoolean running = new AtomicBoolean(true); // if client runs - final protected Configuration conf; - final protected int maxIdleTime; // connections will be culled if it was idle for - // maxIdleTime microsecs - final protected int maxRetries; //the max. no. of retries for socket connections - final protected long failureSleep; // Time to sleep before retry on failure. - protected final boolean tcpNoDelay; // if T then disable Nagle's Algorithm - protected final boolean tcpKeepAlive; // if T then use keepalives - protected int pingInterval; // how often sends ping to the server in msecs - protected int socketTimeout; // socket timeout - protected FailedServers failedServers; - - protected final SocketFactory socketFactory; // how to create sockets - private int refCount = 1; - protected String clusterId; - - final private static String PING_INTERVAL_NAME = "ipc.ping.interval"; - final private static String SOCKET_TIMEOUT = "ipc.socket.timeout"; - final static int DEFAULT_PING_INTERVAL = 60000; // 1 min - final static int DEFAULT_SOCKET_TIMEOUT = 20000; // 20 seconds - final static int PING_CALL_ID = -1; - - public final static String FAILED_SERVER_EXPIRY_KEY = "hbase.ipc.client.failed.servers.expiry"; - public final static int FAILED_SERVER_EXPIRY_DEFAULT = 2000; - - /** - * A class to manage a list of servers that failed recently. - */ - static class FailedServers { - private final LinkedList> failedServers = new - LinkedList>(); - private final int recheckServersTimeout; - - FailedServers(Configuration conf) { - this.recheckServersTimeout = conf.getInt( - FAILED_SERVER_EXPIRY_KEY, FAILED_SERVER_EXPIRY_DEFAULT); - } - - /** - * Add an address to the list of the failed servers list. - */ - public synchronized void addToFailedServers(InetSocketAddress address) { - final long expiry = EnvironmentEdgeManager.currentTimeMillis() + recheckServersTimeout; - failedServers.addFirst(new Pair(expiry, address.toString())); - } - - /** - * Check if the server should be considered as bad. Clean the old entries of the list. - * - * @return true if the server is in the failed servers list - */ - public synchronized boolean isFailedServer(final InetSocketAddress address) { - if (failedServers.isEmpty()) { - return false; - } - - final String lookup = address.toString(); - final long now = EnvironmentEdgeManager.currentTimeMillis(); - - // iterate, looking for the search entry and cleaning expired entries - Iterator> it = failedServers.iterator(); - while (it.hasNext()) { - Pair cur = it.next(); - if (cur.getFirst() < now) { - it.remove(); - } else { - if (lookup.equals(cur.getSecond())) { - return true; - } - } - } - - return false; - } - - } - - public static class FailedServerException extends IOException { - public FailedServerException(String s) { - super(s); - } - } - - - /** - * set the ping interval value in configuration - * - * @param conf Configuration - * @param pingInterval the ping interval - */ - public static void setPingInterval(Configuration conf, int pingInterval) { - conf.setInt(PING_INTERVAL_NAME, pingInterval); - } - - /** - * Get the ping interval from configuration; - * If not set in the configuration, return the default value. - * - * @param conf Configuration - * @return the ping interval - */ - static int getPingInterval(Configuration conf) { - return conf.getInt(PING_INTERVAL_NAME, DEFAULT_PING_INTERVAL); - } - - /** - * Set the socket timeout - * @param conf Configuration - * @param socketTimeout the socket timeout - */ - public static void setSocketTimeout(Configuration conf, int socketTimeout) { - conf.setInt(SOCKET_TIMEOUT, socketTimeout); - } - - /** - * @return the socket timeout - */ - static int getSocketTimeout(Configuration conf) { - return conf.getInt(SOCKET_TIMEOUT, DEFAULT_SOCKET_TIMEOUT); - } - - /** - * Increment this client's reference count - * - */ - synchronized void incCount() { - refCount++; - } - - /** - * Decrement this client's reference count - * - */ - synchronized void decCount() { - refCount--; - } - - /** - * Return if this client has no reference - * - * @return true if this client has no reference; false otherwise - */ - synchronized boolean isZeroReference() { - return refCount==0; - } - - /** A call waiting for a value. */ - protected class Call { - final int id; // call id - final RpcRequestBody param; // rpc request object - Message value; // value, null if error - IOException error; // exception, null if value - boolean done; // true when call is done - long startTime; - - protected Call(RpcRequestBody param) { - this.param = param; - this.startTime = System.currentTimeMillis(); - synchronized (HBaseClient.this) { - this.id = counter++; - } - } - - /** Indicate when the call is complete and the - * value or error are available. Notifies by default. */ - protected synchronized void callComplete() { - this.done = true; - notify(); // notify caller - } - - /** Set the exception when there is an error. - * Notify the caller the call is done. - * - * @param error exception thrown by the call; either local or remote - */ - public synchronized void setException(IOException error) { - this.error = error; - callComplete(); - } - - /** Set the return value when there is no error. - * Notify the caller the call is done. - * - * @param value return value of the call. - */ - public synchronized void setValue(Message value) { - this.value = value; - callComplete(); - } - - public long getStartTime() { - return this.startTime; - } - } - protected static Map> tokenHandlers = - new HashMap>(); - static { - tokenHandlers.put(AuthenticationTokenIdentifier.AUTH_TOKEN_TYPE.toString(), - new AuthenticationTokenSelector()); - } - - /** - * Creates a connection. Can be overridden by a subclass for testing. - * @param remoteId - the ConnectionId to use for the connection creation. - */ - protected Connection createConnection(ConnectionId remoteId) throws IOException { - return new Connection(remoteId); - } - - /** Thread that reads responses and notifies callers. Each connection owns a - * socket connected to a remote address. Calls are multiplexed through this - * socket: responses may be delivered out of order. */ - protected class Connection extends Thread { - private ConnectionHeader header; // connection header - protected ConnectionId remoteId; - protected Socket socket = null; // connected socket - protected DataInputStream in; - protected DataOutputStream out; - private InetSocketAddress server; // server ip:port - private String serverPrincipal; // server's krb5 principal name - private AuthMethod authMethod; // authentication method - private boolean useSasl; - private Token token; - private HBaseSaslRpcClient saslRpcClient; - private int reloginMaxBackoff; // max pause before relogin on sasl failure - - // currently active calls - protected final ConcurrentSkipListMap calls = new ConcurrentSkipListMap(); - protected final AtomicLong lastActivity = new AtomicLong();// last I/O activity time - protected final AtomicBoolean shouldCloseConnection = new AtomicBoolean(); // indicate if the connection is closed - protected IOException closeException; // close reason - - Connection(ConnectionId remoteId) throws IOException { - if (remoteId.getAddress().isUnresolved()) { - throw new UnknownHostException("unknown host: " + - remoteId.getAddress().getHostName()); - } - this.server = remoteId.getAddress(); - - UserGroupInformation ticket = remoteId.getTicket().getUGI(); - Class protocol = remoteId.getProtocol(); - this.useSasl = User.isHBaseSecurityEnabled(conf); - if (useSasl && protocol != null) { - TokenInfo tokenInfo = protocol.getAnnotation(TokenInfo.class); - if (tokenInfo != null) { - TokenSelector tokenSelector = - tokenHandlers.get(tokenInfo.value()); - if (tokenSelector != null) { - token = tokenSelector.selectToken(new Text(clusterId), - ticket.getTokens()); - } else if (LOG.isDebugEnabled()) { - LOG.debug("No token selector found for type "+tokenInfo.value()); - } - } - KerberosInfo krbInfo = protocol.getAnnotation(KerberosInfo.class); - if (krbInfo != null) { - String serverKey = krbInfo.serverPrincipal(); - if (serverKey == null) { - throw new IOException( - "Can't obtain server Kerberos config key from KerberosInfo"); - } - serverPrincipal = SecurityUtil.getServerPrincipal( - conf.get(serverKey), server.getAddress().getCanonicalHostName().toLowerCase()); - if (LOG.isDebugEnabled()) { - LOG.debug("RPC Server Kerberos principal name for protocol=" - + protocol.getCanonicalName() + " is " + serverPrincipal); - } - } - } - - if (!useSasl) { - authMethod = AuthMethod.SIMPLE; - } else if (token != null) { - authMethod = AuthMethod.DIGEST; - } else { - authMethod = AuthMethod.KERBEROS; - } - - if (LOG.isDebugEnabled()) - LOG.debug("Use " + authMethod + " authentication for protocol " - + protocol.getSimpleName()); - - reloginMaxBackoff = conf.getInt("hbase.security.relogin.maxbackoff", 5000); - this.remoteId = remoteId; - - ConnectionHeader.Builder builder = ConnectionHeader.newBuilder(); - builder.setProtocol(protocol == null ? "" : protocol.getName()); - UserInformation userInfoPB; - if ((userInfoPB = getUserInfoPB(ticket)) != null) { - builder.setUserInfo(userInfoPB); - } - this.header = builder.build(); - - this.setName("IPC Client (" + socketFactory.hashCode() +") connection to " + - remoteId.getAddress().toString() + - ((ticket==null)?" from an unknown user": (" from " - + ticket.getUserName()))); - this.setDaemon(true); - } - - private UserInformation getUserInfoPB(UserGroupInformation ugi) { - if (ugi == null || authMethod == AuthMethod.DIGEST) { - // Don't send user for token auth - return null; - } - UserInformation.Builder userInfoPB = UserInformation.newBuilder(); - if (ugi != null) { - if (authMethod == AuthMethod.KERBEROS) { - // Send effective user for Kerberos auth - userInfoPB.setEffectiveUser(ugi.getUserName()); - } else if (authMethod == AuthMethod.SIMPLE) { - //Send both effective user and real user for simple auth - userInfoPB.setEffectiveUser(ugi.getUserName()); - if (ugi.getRealUser() != null) { - userInfoPB.setRealUser(ugi.getRealUser().getUserName()); - } - } - } - return userInfoPB.build(); - } - - /** Update lastActivity with the current time. */ - protected void touch() { - lastActivity.set(System.currentTimeMillis()); - } - - /** - * Add a call to this connection's call queue and notify - * a listener; synchronized. If the connection is dead, the call is not added, and the - * caller is notified. - * This function can return a connection that is already marked as 'shouldCloseConnection' - * It is up to the user code to check this status. - * @param call to add - */ - protected synchronized void addCall(Call call) { - // If the connection is about to close, we manage this as if the call was already added - // to the connection calls list. If not, the connection creations are serialized, as - // mentioned in HBASE-6364 - if (this.shouldCloseConnection.get()) { - if (this.closeException == null) { - call.setException(new IOException( - "Call " + call.id + " not added as the connection " + remoteId + " is closing")); - } else { - call.setException(this.closeException); - } - synchronized (call) { - call.notifyAll(); - } - } else { - calls.put(call.id, call); - notify(); - } - } - - /** This class sends a ping to the remote side when timeout on - * reading. If no failure is detected, it retries until at least - * a byte is read. - */ - protected class PingInputStream extends FilterInputStream { - /* constructor */ - protected PingInputStream(InputStream in) { - super(in); - } - - /* Process timeout exception - * if the connection is not going to be closed, send a ping. - * otherwise, throw the timeout exception. - */ - private void handleTimeout(SocketTimeoutException e) throws IOException { - if (shouldCloseConnection.get() || !running.get() || - remoteId.rpcTimeout > 0) { - throw e; - } - sendPing(); - } - - /** Read a byte from the stream. - * Send a ping if timeout on read. Retries if no failure is detected - * until a byte is read. - * @throws IOException for any IO problem other than socket timeout - */ - @Override - public int read() throws IOException { - do { - try { - return super.read(); - } catch (SocketTimeoutException e) { - handleTimeout(e); - } - } while (true); - } - - /** Read bytes into a buffer starting from offset off - * Send a ping if timeout on read. Retries if no failure is detected - * until a byte is read. - * - * @return the total number of bytes read; -1 if the connection is closed. - */ - @Override - public int read(byte[] buf, int off, int len) throws IOException { - do { - try { - return super.read(buf, off, len); - } catch (SocketTimeoutException e) { - handleTimeout(e); - } - } while (true); - } - } - - protected synchronized void setupConnection() throws IOException { - short ioFailures = 0; - short timeoutFailures = 0; - while (true) { - try { - this.socket = socketFactory.createSocket(); - this.socket.setTcpNoDelay(tcpNoDelay); - this.socket.setKeepAlive(tcpKeepAlive); - // connection time out is 20s - NetUtils.connect(this.socket, remoteId.getAddress(), - getSocketTimeout(conf)); - if (remoteId.rpcTimeout > 0) { - pingInterval = remoteId.rpcTimeout; // overwrite pingInterval - } - this.socket.setSoTimeout(pingInterval); - return; - } catch (SocketTimeoutException toe) { - /* The max number of retries is 45, - * which amounts to 20s*45 = 15 minutes retries. - */ - handleConnectionFailure(timeoutFailures++, maxRetries, toe); - } catch (IOException ie) { - handleConnectionFailure(ioFailures++, maxRetries, ie); - } - } - } - - protected void closeConnection() { - // close the current connection - if (socket != null) { - try { - socket.close(); - } catch (IOException e) { - LOG.warn("Not able to close a socket", e); - } - } - // set socket to null so that the next call to setupIOstreams - // can start the process of connect all over again. - socket = null; - } - - /** - * Handle connection failures - * - * If the current number of retries is equal to the max number of retries, - * stop retrying and throw the exception; Otherwise backoff N seconds and - * try connecting again. - * - * This Method is only called from inside setupIOstreams(), which is - * synchronized. Hence the sleep is synchronized; the locks will be retained. - * - * @param curRetries current number of retries - * @param maxRetries max number of retries allowed - * @param ioe failure reason - * @throws IOException if max number of retries is reached - */ - private void handleConnectionFailure( - int curRetries, int maxRetries, IOException ioe) throws IOException { - - closeConnection(); - - // throw the exception if the maximum number of retries is reached - if (curRetries >= maxRetries) { - throw ioe; - } - - // otherwise back off and retry - try { - Thread.sleep(failureSleep); - } catch (InterruptedException ignored) {} - - LOG.info("Retrying connect to server: " + remoteId.getAddress() + - " after sleeping " + failureSleep + "ms. Already tried " + curRetries + - " time(s)."); - } - - /* wait till someone signals us to start reading RPC response or - * it is idle too long, it is marked as to be closed, - * or the client is marked as not running. - * - * Return true if it is time to read a response; false otherwise. - */ - protected synchronized boolean waitForWork() { - if (calls.isEmpty() && !shouldCloseConnection.get() && running.get()) { - long timeout = maxIdleTime- - (System.currentTimeMillis()-lastActivity.get()); - if (timeout>0) { - try { - wait(timeout); - } catch (InterruptedException ignored) {} - } - } - - if (!calls.isEmpty() && !shouldCloseConnection.get() && running.get()) { - return true; - } else if (shouldCloseConnection.get()) { - return false; - } else if (calls.isEmpty()) { // idle connection closed or stopped - markClosed(null); - return false; - } else { // get stopped but there are still pending requests - markClosed((IOException)new IOException().initCause( - new InterruptedException())); - return false; - } - } - - public InetSocketAddress getRemoteAddress() { - return remoteId.getAddress(); - } - - /* Send a ping to the server if the time elapsed - * since last I/O activity is equal to or greater than the ping interval - */ - protected synchronized void sendPing() throws IOException { - long curTime = System.currentTimeMillis(); - if ( curTime - lastActivity.get() >= pingInterval) { - lastActivity.set(curTime); - //noinspection SynchronizeOnNonFinalField - synchronized (this.out) { - out.writeInt(PING_CALL_ID); - out.flush(); - } - } - } - - @Override - public void run() { - if (LOG.isDebugEnabled()) - LOG.debug(getName() + ": starting, having connections " - + connections.size()); - - try { - while (waitForWork()) {//wait here for work - read or close connection - receiveResponse(); - } - } catch (Throwable t) { - LOG.warn("Unexpected exception receiving call responses", t); - markClosed(new IOException("Unexpected exception receiving call responses", t)); - } - - close(); - - if (LOG.isDebugEnabled()) - LOG.debug(getName() + ": stopped, remaining connections " - + connections.size()); - } - - private synchronized void disposeSasl() { - if (saslRpcClient != null) { - try { - saslRpcClient.dispose(); - saslRpcClient = null; - } catch (IOException ioe) { - LOG.error("Error disposing of SASL client", ioe); - } - } - } - - private synchronized boolean shouldAuthenticateOverKrb() throws IOException { - UserGroupInformation loginUser = UserGroupInformation.getLoginUser(); - UserGroupInformation currentUser = - UserGroupInformation.getCurrentUser(); - UserGroupInformation realUser = currentUser.getRealUser(); - return authMethod == AuthMethod.KERBEROS && - loginUser != null && - //Make sure user logged in using Kerberos either keytab or TGT - loginUser.hasKerberosCredentials() && - // relogin only in case it is the login user (e.g. JT) - // or superuser (like oozie). - (loginUser.equals(currentUser) || loginUser.equals(realUser)); - } - - private synchronized boolean setupSaslConnection(final InputStream in2, - final OutputStream out2) throws IOException { - saslRpcClient = new HBaseSaslRpcClient(authMethod, token, serverPrincipal); - return saslRpcClient.saslConnect(in2, out2); - } - - /** - * If multiple clients with the same principal try to connect - * to the same server at the same time, the server assumes a - * replay attack is in progress. This is a feature of kerberos. - * In order to work around this, what is done is that the client - * backs off randomly and tries to initiate the connection - * again. - * The other problem is to do with ticket expiry. To handle that, - * a relogin is attempted. - *

          - * The retry logic is governed by the {@link #shouldAuthenticateOverKrb} - * method. In case when the user doesn't have valid credentials, we don't - * need to retry (from cache or ticket). In such cases, it is prudent to - * throw a runtime exception when we receive a SaslException from the - * underlying authentication implementation, so there is no retry from - * other high level (for eg, HCM or HBaseAdmin). - *

          - */ - private synchronized void handleSaslConnectionFailure( - final int currRetries, - final int maxRetries, final Exception ex, final Random rand, - final UserGroupInformation user) - throws IOException, InterruptedException{ - user.doAs(new PrivilegedExceptionAction() { - public Object run() throws IOException, InterruptedException { - closeConnection(); - if (shouldAuthenticateOverKrb()) { - if (currRetries < maxRetries) { - LOG.debug("Exception encountered while connecting to " + - "the server : " + ex); - //try re-login - if (UserGroupInformation.isLoginKeytabBased()) { - UserGroupInformation.getLoginUser().reloginFromKeytab(); - } else { - UserGroupInformation.getLoginUser().reloginFromTicketCache(); - } - disposeSasl(); - //have granularity of milliseconds - //we are sleeping with the Connection lock held but since this - //connection instance is being used for connecting to the server - //in question, it is okay - Thread.sleep((rand.nextInt(reloginMaxBackoff) + 1)); - return null; - } else { - String msg = "Couldn't setup connection for " + - UserGroupInformation.getLoginUser().getUserName() + - " to " + serverPrincipal; - LOG.warn(msg); - throw (IOException) new IOException(msg).initCause(ex); - } - } else { - LOG.warn("Exception encountered while connecting to " + - "the server : " + ex); - } - if (ex instanceof RemoteException) { - throw (RemoteException)ex; - } - if (ex instanceof SaslException) { - String msg = "SASL authentication failed." + - " The most likely cause is missing or invalid credentials." + - " Consider 'kinit'."; - LOG.fatal(msg, ex); - throw new RuntimeException(msg, ex); - } - throw new IOException(ex); - } - }); - } - - protected synchronized void setupIOstreams() - throws IOException, InterruptedException { - if (socket != null || shouldCloseConnection.get()) { - return; - } - - if (failedServers.isFailedServer(remoteId.getAddress())) { - if (LOG.isDebugEnabled()) { - LOG.debug("Not trying to connect to " + server + - " this server is in the failed servers list"); - } - IOException e = new FailedServerException( - "This server is in the failed servers list: " + server); - markClosed(e); - close(); - throw e; - } - - try { - if (LOG.isDebugEnabled()) { - LOG.debug("Connecting to "+server); - } - short numRetries = 0; - final short MAX_RETRIES = 5; - Random rand = null; - while (true) { - setupConnection(); - InputStream inStream = NetUtils.getInputStream(socket); - OutputStream outStream = NetUtils.getOutputStream(socket); - writeRpcHeader(outStream); - if (useSasl) { - final InputStream in2 = inStream; - final OutputStream out2 = outStream; - UserGroupInformation ticket = remoteId.getTicket().getUGI(); - if (authMethod == AuthMethod.KERBEROS) { - if (ticket != null && ticket.getRealUser() != null) { - ticket = ticket.getRealUser(); - } - } - boolean continueSasl = false; - try { - continueSasl = - ticket.doAs(new PrivilegedExceptionAction() { - @Override - public Boolean run() throws IOException { - return setupSaslConnection(in2, out2); - } - }); - } catch (Exception ex) { - if (rand == null) { - rand = new Random(); - } - handleSaslConnectionFailure(numRetries++, MAX_RETRIES, ex, rand, - ticket); - continue; - } - if (continueSasl) { - // Sasl connect is successful. Let's set up Sasl i/o streams. - inStream = saslRpcClient.getInputStream(inStream); - outStream = saslRpcClient.getOutputStream(outStream); - } else { - // fall back to simple auth because server told us so. - authMethod = AuthMethod.SIMPLE; - useSasl = false; - } - } - this.in = new DataInputStream(new BufferedInputStream - (new PingInputStream(inStream))); - this.out = new DataOutputStream - (new BufferedOutputStream(outStream)); - writeHeader(); - - // update last activity time - touch(); - - // start the receiver thread after the socket connection has been set up - start(); - return; - } - } catch (IOException e) { - failedServers.addToFailedServers(remoteId.address); - markClosed(e); - close(); - - throw e; - } - } - - /* Write the RPC header */ - private void writeRpcHeader(OutputStream outStream) throws IOException { - DataOutputStream out = new DataOutputStream(new BufferedOutputStream(outStream)); - // Write out the header, version and authentication method - out.write(HBaseServer.HEADER.array()); - out.write(HBaseServer.CURRENT_VERSION); - authMethod.write(out); - out.flush(); - } - - /** - * Write the protocol header for each connection - * Out is not synchronized because only the first thread does this. - */ - private void writeHeader() throws IOException { - // Write out the ConnectionHeader - out.writeInt(header.getSerializedSize()); - header.writeTo(out); - } - - /** Close the connection. */ - protected synchronized void close() { - if (!shouldCloseConnection.get()) { - LOG.error("The connection is not in the closed state"); - return; - } - - // release the resources - // first thing to do;take the connection out of the connection list - synchronized (connections) { - if (connections.get(remoteId) == this) { - connections.remove(remoteId); - } - } - - // close the streams and therefore the socket - IOUtils.closeStream(out); - IOUtils.closeStream(in); - disposeSasl(); - - // clean up all calls - if (closeException == null) { - if (!calls.isEmpty()) { - LOG.warn( - "A connection is closed for no cause and calls are not empty. " + - "#Calls: " + calls.size()); - - // clean up calls anyway - closeException = new IOException("Unexpected closed connection"); - cleanupCalls(); - } - } else { - // log the info - if (LOG.isDebugEnabled()) { - LOG.debug("closing ipc connection to " + server + ": " + - closeException.getMessage(),closeException); - } - - // cleanup calls - cleanupCalls(); - } - if (LOG.isDebugEnabled()) - LOG.debug(getName() + ": closed"); - } - - /* Initiates a call by sending the parameter to the remote server. - * Note: this is not called from the Connection thread, but by other - * threads. - */ - protected void sendParam(Call call) { - if (shouldCloseConnection.get()) { - return; - } - try { - if (LOG.isDebugEnabled()) - LOG.debug(getName() + " sending #" + call.id); - - RpcRequestHeader.Builder headerBuilder = RPCProtos.RpcRequestHeader.newBuilder(); - headerBuilder.setCallId(call.id); - - if (Trace.isTracing()) { - Span s = Trace.currentTrace(); - headerBuilder.setTinfo(RPCTInfo.newBuilder() - .setParentId(s.getSpanId()) - .setTraceId(s.getTraceId())); - } - - //noinspection SynchronizeOnNonFinalField - synchronized (this.out) { // FindBugs IS2_INCONSISTENT_SYNC - RpcRequestHeader header = headerBuilder.build(); - int serializedHeaderSize = header.getSerializedSize(); - int requestSerializedSize = call.param.getSerializedSize(); - this.out.writeInt(serializedHeaderSize + - CodedOutputStream.computeRawVarint32Size(serializedHeaderSize) + - requestSerializedSize + - CodedOutputStream.computeRawVarint32Size(requestSerializedSize)); - header.writeDelimitedTo(this.out); - call.param.writeDelimitedTo(this.out); - this.out.flush(); - } - } catch(IOException e) { - markClosed(e); - } - } - - - private Method getMethod(Class protocol, - String methodName) { - Method method = methodInstances.get(methodName); - if (method != null) { - return method; - } - Method[] methods = protocol.getMethods(); - for (Method m : methods) { - if (m.getName().equals(methodName)) { - m.setAccessible(true); - methodInstances.put(methodName, m); - return m; - } - } - return null; - } - - /* Receive a response. - * Because only one receiver, so no synchronization on in. - */ - protected void receiveResponse() { - if (shouldCloseConnection.get()) { - return; - } - touch(); - - try { - // See HBaseServer.Call.setResponse for where we write out the response. - // It writes the call.id (int), a boolean signifying any error (and if - // so the exception name/trace), and the response bytes - - // Read the call id. - RpcResponseHeader response = RpcResponseHeader.parseDelimitedFrom(in); - if (response == null) { - // When the stream is closed, protobuf doesn't raise an EOFException, - // instead, it returns a null message object. - throw new EOFException(); - } - int id = response.getCallId(); - - if (LOG.isDebugEnabled()) - LOG.debug(getName() + " got value #" + id); - Call call = calls.get(id); - - Status status = response.getStatus(); - if (status == Status.SUCCESS) { - Message rpcResponseType; - try { - rpcResponseType = ProtobufRpcClientEngine.Invoker.getReturnProtoType( - getMethod(remoteId.getProtocol(), - call.param.getMethodName())); - } catch (Exception e) { - throw new RuntimeException(e); //local exception - } - Builder builder = rpcResponseType.newBuilderForType(); - builder.mergeDelimitedFrom(in); - Message value = builder.build(); - // it's possible that this call may have been cleaned up due to a RPC - // timeout, so check if it still exists before setting the value. - if (call != null) { - call.setValue(value); - } - calls.remove(id); - } else if (status == Status.ERROR) { - RpcException exceptionResponse = RpcException.parseDelimitedFrom(in); - if (call != null) { - //noinspection ThrowableInstanceNeverThrown - call.setException(new RemoteException( - exceptionResponse.getExceptionName(), - exceptionResponse.getStackTrace())); - calls.remove(id); - } - } else if (status == Status.FATAL) { - RpcException exceptionResponse = RpcException.parseDelimitedFrom(in); - // Close the connection - markClosed(new RemoteException( - exceptionResponse.getExceptionName(), - exceptionResponse.getStackTrace())); - } - } catch (IOException e) { - if (e instanceof SocketTimeoutException && remoteId.rpcTimeout > 0) { - // Clean up open calls but don't treat this as a fatal condition, - // since we expect certain responses to not make it by the specified - // {@link ConnectionId#rpcTimeout}. - closeException = e; - } else { - // Since the server did not respond within the default ping interval - // time, treat this as a fatal condition and close this connection - markClosed(e); - } - } finally { - if (remoteId.rpcTimeout > 0) { - cleanupCalls(remoteId.rpcTimeout); - } - } - } - - protected synchronized void markClosed(IOException e) { - if (shouldCloseConnection.compareAndSet(false, true)) { - closeException = e; - notifyAll(); - } - } - - /* Cleanup all calls and mark them as done */ - protected void cleanupCalls() { - cleanupCalls(0); - } - - protected void cleanupCalls(long rpcTimeout) { - Iterator> itor = calls.entrySet().iterator(); - while (itor.hasNext()) { - Call c = itor.next().getValue(); - long waitTime = System.currentTimeMillis() - c.getStartTime(); - if (waitTime >= rpcTimeout) { - if (this.closeException == null) { - // There may be no exception in the case that there are many calls - // being multiplexed over this connection and these are succeeding - // fine while this Call object is taking a long time to finish - // over on the server; e.g. I just asked the regionserver to bulk - // open 3k regions or its a big fat multiput into a heavily-loaded - // server (Perhaps this only happens at the extremes?) - this.closeException = new CallTimeoutException("Call id=" + c.id + - ", waitTime=" + waitTime + ", rpcTimetout=" + rpcTimeout); - } - c.setException(this.closeException); - synchronized (c) { - c.notifyAll(); - } - itor.remove(); - } else { - break; - } - } - try { - if (!calls.isEmpty()) { - Call firstCall = calls.get(calls.firstKey()); - long maxWaitTime = System.currentTimeMillis() - firstCall.getStartTime(); - if (maxWaitTime < rpcTimeout) { - rpcTimeout -= maxWaitTime; - } - } - if (!shouldCloseConnection.get()) { - closeException = null; - if (socket != null) { - socket.setSoTimeout((int) rpcTimeout); - } - } - } catch (SocketException e) { - LOG.debug("Couldn't lower timeout, which may result in longer than expected calls"); - } - } - } - - /** - * Client-side call timeout - */ - public static class CallTimeoutException extends IOException { - public CallTimeoutException(final String msg) { - super(msg); - } - } - - /** Call implementation used for parallel calls. */ - protected class ParallelCall extends Call { - private final ParallelResults results; - protected final int index; - - public ParallelCall(RpcRequestBody param, ParallelResults results, int index) { - super(param); - this.results = results; - this.index = index; - } - - /** Deliver result to result collector. */ - @Override - protected void callComplete() { - results.callComplete(this); - } - } - - /** Result collector for parallel calls. */ - protected static class ParallelResults { - protected final Message[] values; - protected int size; - protected int count; - - public ParallelResults(int size) { - this.values = new RpcResponseBody[size]; - this.size = size; - } - - /* - * Collect a result. - */ - synchronized void callComplete(ParallelCall call) { - // FindBugs IS2_INCONSISTENT_SYNC - values[call.index] = call.value; // store the value - count++; // count it - if (count == size) // if all values are in - notify(); // then notify waiting caller - } - } - - /** - * Construct an IPC client whose values are of the {@link Message} - * class. - * @param conf configuration - * @param factory socket factory - */ - public HBaseClient(Configuration conf, SocketFactory factory) { - this.maxIdleTime = - conf.getInt("hbase.ipc.client.connection.maxidletime", 10000); //10s - this.maxRetries = conf.getInt("hbase.ipc.client.connect.max.retries", 0); - this.failureSleep = conf.getInt("hbase.client.pause", 1000); - this.tcpNoDelay = conf.getBoolean("hbase.ipc.client.tcpnodelay", true); - this.tcpKeepAlive = conf.getBoolean("hbase.ipc.client.tcpkeepalive", true); - this.pingInterval = getPingInterval(conf); - if (LOG.isDebugEnabled()) { - LOG.debug("The ping interval is" + this.pingInterval + "ms."); - } - this.conf = conf; - this.socketFactory = factory; - this.clusterId = conf.get(HConstants.CLUSTER_ID, "default"); - this.connections = new PoolMap( - getPoolType(conf), getPoolSize(conf)); - this.failedServers = new FailedServers(conf); - } - - /** - * Construct an IPC client with the default SocketFactory - * @param conf configuration - */ - public HBaseClient(Configuration conf) { - this(conf, NetUtils.getDefaultSocketFactory(conf)); - } - - /** - * Return the pool type specified in the configuration, which must be set to - * either {@link PoolType#RoundRobin} or {@link PoolType#ThreadLocal}, - * otherwise default to the former. - * - * For applications with many user threads, use a small round-robin pool. For - * applications with few user threads, you may want to try using a - * thread-local pool. In any case, the number of {@link HBaseClient} instances - * should not exceed the operating system's hard limit on the number of - * connections. - * - * @param config configuration - * @return either a {@link PoolType#RoundRobin} or - * {@link PoolType#ThreadLocal} - */ - protected static PoolType getPoolType(Configuration config) { - return PoolType.valueOf(config.get(HConstants.HBASE_CLIENT_IPC_POOL_TYPE), - PoolType.RoundRobin, PoolType.ThreadLocal); - } - - /** - * Return the pool size specified in the configuration, which is applicable only if - * the pool type is {@link PoolType#RoundRobin}. - * - * @param config - * @return the maximum pool size - */ - protected static int getPoolSize(Configuration config) { - return config.getInt(HConstants.HBASE_CLIENT_IPC_POOL_SIZE, 1); - } - - /** Return the socket factory of this client - * - * @return this client's socket factory - */ - SocketFactory getSocketFactory() { - return socketFactory; - } - - /** Stop all threads related to this client. No further calls may be made - * using this client. */ - public void stop() { - if (LOG.isDebugEnabled()) { - LOG.debug("Stopping client"); - } - - if (!running.compareAndSet(true, false)) { - return; - } - - // wake up all connections - synchronized (connections) { - for (Connection conn : connections.values()) { - conn.interrupt(); - } - } - - // wait until all connections are closed - while (!connections.isEmpty()) { - try { - Thread.sleep(100); - } catch (InterruptedException ignored) { - } - } - } - - /** Make a call, passing param, to the IPC server running at - * address, returning the value. Throws exceptions if there are - * network problems or if the remote code threw an exception. - * @param param RpcRequestBody parameter - * @param address network address - * @return Message - * @throws IOException e - */ - public Message call(RpcRequestBody param, InetSocketAddress address) - throws IOException, InterruptedException { - return call(param, address, null, 0); - } - - public Message call(RpcRequestBody param, InetSocketAddress addr, - User ticket, int rpcTimeout) - throws IOException, InterruptedException { - return call(param, addr, null, ticket, rpcTimeout); - } - - /** Make a call, passing param, to the IPC server running at - * address which is servicing the protocol protocol, - * with the ticket credentials, returning the value. - * Throws exceptions if there are network problems or if the remote code - * threw an exception. */ - public Message call(RpcRequestBody param, InetSocketAddress addr, - Class protocol, - User ticket, int rpcTimeout) - throws InterruptedException, IOException { - Call call = new Call(param); - Connection connection = getConnection(addr, protocol, ticket, rpcTimeout, call); - connection.sendParam(call); // send the parameter - boolean interrupted = false; - //noinspection SynchronizationOnLocalVariableOrMethodParameter - synchronized (call) { - while (!call.done) { - try { - call.wait(); // wait for the result - } catch (InterruptedException ignored) { - // save the fact that we were interrupted - interrupted = true; - } - } - - if (interrupted) { - // set the interrupt flag now that we are done waiting - Thread.currentThread().interrupt(); - } - - if (call.error != null) { - if (call.error instanceof RemoteException) { - call.error.fillInStackTrace(); - throw call.error; - } - // local exception - throw wrapException(addr, call.error); - } - return call.value; - } - } - - /** - * Take an IOException and the address we were trying to connect to - * and return an IOException with the input exception as the cause. - * The new exception provides the stack trace of the place where - * the exception is thrown and some extra diagnostics information. - * If the exception is ConnectException or SocketTimeoutException, - * return a new one of the same type; Otherwise return an IOException. - * - * @param addr target address - * @param exception the relevant exception - * @return an exception to throw - */ - @SuppressWarnings({"ThrowableInstanceNeverThrown"}) - protected IOException wrapException(InetSocketAddress addr, - IOException exception) { - if (exception instanceof ConnectException) { - //connection refused; include the host:port in the error - return (ConnectException)new ConnectException( - "Call to " + addr + " failed on connection exception: " + exception) - .initCause(exception); - } else if (exception instanceof SocketTimeoutException) { - return (SocketTimeoutException)new SocketTimeoutException( - "Call to " + addr + " failed on socket timeout exception: " - + exception).initCause(exception); - } else { - return (IOException)new IOException( - "Call to " + addr + " failed on local exception: " + exception) - .initCause(exception); - - } - } - - /** Makes a set of calls in parallel. Each parameter is sent to the - * corresponding address. When all values are available, or have timed out - * or errored, the collected results are returned in an array. The array - * contains nulls for calls that timed out or errored. - * @param params RpcRequestBody parameters - * @param addresses socket addresses - * @return RpcResponseBody[] - * @throws IOException e - * @deprecated Use {@code #call(RpcRequestBody[], InetSocketAddress[], Class, User)} instead - */ - @Deprecated - public Message[] call(RpcRequestBody[] params, InetSocketAddress[] addresses) - throws IOException, InterruptedException { - return call(params, addresses, null, null); - } - - /** Makes a set of calls in parallel. Each parameter is sent to the - * corresponding address. When all values are available, or have timed out - * or errored, the collected results are returned in an array. The array - * contains nulls for calls that timed out or errored. */ - public Message[] call(RpcRequestBody[] params, InetSocketAddress[] addresses, - Class protocol, - User ticket) - throws IOException, InterruptedException { - if (addresses.length == 0) return new RpcResponseBody[0]; - - ParallelResults results = new ParallelResults(params.length); - // TODO this synchronization block doesnt make any sense, we should possibly fix it - //noinspection SynchronizationOnLocalVariableOrMethodParameter - synchronized (results) { - for (int i = 0; i < params.length; i++) { - ParallelCall call = new ParallelCall(params[i], results, i); - try { - Connection connection = - getConnection(addresses[i], protocol, ticket, 0, call); - connection.sendParam(call); // send each parameter - } catch (IOException e) { - // log errors - LOG.info("Calling "+addresses[i]+" caught: " + - e.getMessage(),e); - results.size--; // wait for one fewer result - } - } - while (results.count != results.size) { - try { - results.wait(); // wait for all results - } catch (InterruptedException ignored) {} - } - - return results.values; - } - } - - /* Get a connection from the pool, or create a new one and add it to the - * pool. Connections to a given host/port are reused. */ - protected Connection getConnection(InetSocketAddress addr, - Class protocol, - User ticket, - int rpcTimeout, - Call call) - throws IOException, InterruptedException { - if (!running.get()) { - // the client is stopped - throw new IOException("The client is stopped"); - } - Connection connection; - /* we could avoid this allocation for each RPC by having a - * connectionsId object and with set() method. We need to manage the - * refs for keys in HashMap properly. For now its ok. - */ - ConnectionId remoteId = new ConnectionId(addr, protocol, ticket, rpcTimeout); - synchronized (connections) { - connection = connections.get(remoteId); - if (connection == null) { - connection = createConnection(remoteId); - connections.put(remoteId, connection); - } - } - connection.addCall(call); - - //we don't invoke the method below inside "synchronized (connections)" - //block above. The reason for that is if the server happens to be slow, - //it will take longer to establish a connection and that will slow the - //entire system down. - //Moreover, if the connection is currently created, there will be many threads - // waiting here; as setupIOstreams is synchronized. If the connection fails with a - // timeout, they will all fail simultaneously. This is checked in setupIOstreams. - connection.setupIOstreams(); - return connection; - } - - /** - * This class holds the address and the user ticket. The client connections - * to servers are uniquely identified by - */ - protected static class ConnectionId { - final InetSocketAddress address; - final User ticket; - final int rpcTimeout; - Class protocol; - private static final int PRIME = 16777619; - - ConnectionId(InetSocketAddress address, - Class protocol, - User ticket, - int rpcTimeout) { - this.protocol = protocol; - this.address = address; - this.ticket = ticket; - this.rpcTimeout = rpcTimeout; - } - - InetSocketAddress getAddress() { - return address; - } - - Class getProtocol() { - return protocol; - } - - User getTicket() { - return ticket; - } - - @Override - public boolean equals(Object obj) { - if (obj instanceof ConnectionId) { - ConnectionId id = (ConnectionId) obj; - return address.equals(id.address) && protocol == id.protocol && - ((ticket != null && ticket.equals(id.ticket)) || - (ticket == id.ticket)) && rpcTimeout == id.rpcTimeout; - } - return false; - } - - @Override // simply use the default Object#hashcode() ? - public int hashCode() { - return (address.hashCode() + PRIME * ( - PRIME * System.identityHashCode(protocol) ^ - (ticket == null ? 0 : ticket.hashCode()) )) ^ rpcTimeout; - } - } -} diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/HBaseClientRPC.java hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/HBaseClientRPC.java deleted file mode 100644 index 1b4f20b..0000000 --- hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/HBaseClientRPC.java +++ /dev/null @@ -1,294 +0,0 @@ -/** - * - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hbase.ipc; - -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hbase.HConstants; -import org.apache.hadoop.hbase.client.RetriesExhaustedException; -import org.apache.hadoop.hbase.security.User; -import org.apache.hadoop.net.NetUtils; -import org.apache.hadoop.util.ReflectionUtils; - -import javax.net.SocketFactory; -import java.io.IOException; -import java.lang.reflect.Field; -import java.lang.reflect.Proxy; -import java.net.ConnectException; -import java.net.InetSocketAddress; -import java.net.SocketTimeoutException; -import java.util.HashMap; -import java.util.Map; - -/** - * An RPC implementation. This class provides the client side implementation. - */ -@InterfaceAudience.Private -public class HBaseClientRPC { - - protected static final Log LOG = - LogFactory.getLog("org.apache.hadoop.ipc.HBaseClientRPC"); - - // cache of RpcEngines by protocol - private static final Map PROTOCOL_ENGINES - = new HashMap(); - /** - * Configuration key for the {@link org.apache.hadoop.hbase.ipc.RpcClientEngine} implementation - * to load to handle connection protocols. Handlers for individual protocols can be - * configured using {@code "hbase.rpc.client.engine." + protocol.class.name}. - */ - public static final String RPC_ENGINE_PROP = "hbase.rpc.client.engine"; - // track what RpcEngine is used by a proxy class, for stopProxy() - private static final Map PROXY_ENGINES - = new HashMap(); - // thread-specific RPC timeout, which may override that of RpcEngine - private static ThreadLocal rpcTimeout = new ThreadLocal() { - @Override - protected Integer initialValue() { - return HConstants.DEFAULT_HBASE_CLIENT_OPERATION_TIMEOUT; - } - }; - - static long getProtocolVersion(Class protocol) - throws NoSuchFieldException, IllegalAccessException { - Field versionField = protocol.getField("VERSION"); - versionField.setAccessible(true); - return versionField.getLong(protocol); - } - - // set a protocol to use a non-default RpcEngine - static void setProtocolEngine(Configuration conf, - Class protocol, Class engine) { - conf.setClass(RPC_ENGINE_PROP + "." + protocol.getName(), engine, RpcClientEngine.class); - } - - // return the RpcEngine configured to handle a protocol - static synchronized RpcClientEngine getProtocolEngine(Class protocol, - Configuration conf) { - RpcClientEngine engine = PROTOCOL_ENGINES.get(protocol); - if (engine == null) { - // check for a configured default engine - Class defaultEngine = - conf.getClass(RPC_ENGINE_PROP, ProtobufRpcClientEngine.class); - - // check for a per interface override - Class impl = conf.getClass(RPC_ENGINE_PROP + "." + protocol.getName(), - defaultEngine); - LOG.debug("Using " + impl.getName() + " for " + protocol.getName()); - engine = (RpcClientEngine) ReflectionUtils.newInstance(impl, conf); - if (protocol.isInterface()) - PROXY_ENGINES.put(Proxy.getProxyClass(protocol.getClassLoader(), - protocol), - engine); - PROTOCOL_ENGINES.put(protocol, engine); - } - return engine; - } - - // return the RpcEngine that handles a proxy object - private static synchronized RpcClientEngine getProxyEngine(Object proxy) { - return PROXY_ENGINES.get(proxy.getClass()); - } - - /** - * @param protocol protocol interface - * @param clientVersion which client version we expect - * @param addr address of remote service - * @param conf configuration - * @param maxAttempts max attempts - * @param rpcTimeout timeout for each RPC - * @param timeout timeout in milliseconds - * @return proxy - * @throws java.io.IOException e - */ - @SuppressWarnings("unchecked") - public static VersionedProtocol waitForProxy(Class protocol, - long clientVersion, - InetSocketAddress addr, - Configuration conf, - int maxAttempts, - int rpcTimeout, - long timeout - ) throws IOException { - // HBase does limited number of reconnects which is different from hadoop. - long startTime = System.currentTimeMillis(); - IOException ioe; - int reconnectAttempts = 0; - while (true) { - try { - return getProxy(protocol, clientVersion, addr, conf, rpcTimeout); - } catch (SocketTimeoutException te) { // namenode is busy - LOG.info("Problem connecting to server: " + addr); - ioe = te; - } catch (IOException ioex) { - // We only handle the ConnectException. - ConnectException ce = null; - if (ioex instanceof ConnectException) { - ce = (ConnectException) ioex; - ioe = ce; - } else if (ioex.getCause() != null - && ioex.getCause() instanceof ConnectException) { - ce = (ConnectException) ioex.getCause(); - ioe = ce; - } else if (ioex.getMessage().toLowerCase() - .contains("connection refused")) { - ce = new ConnectException(ioex.getMessage()); - ioe = ce; - } else { - // This is the exception we can't handle. - ioe = ioex; - } - if (ce != null) { - handleConnectionException(++reconnectAttempts, maxAttempts, protocol, - addr, ce); - } - } - // check if timed out - if (System.currentTimeMillis() - timeout >= startTime) { - throw ioe; - } - - // wait for retry - try { - Thread.sleep(1000); - } catch (InterruptedException ie) { - // IGNORE - } - } - } - - /** - * @param retries current retried times. - * @param maxAttmpts max attempts - * @param protocol protocol interface - * @param addr address of remote service - * @param ce ConnectException - * @throws org.apache.hadoop.hbase.client.RetriesExhaustedException - * - */ - private static void handleConnectionException(int retries, - int maxAttmpts, - Class protocol, - InetSocketAddress addr, - ConnectException ce) - throws RetriesExhaustedException { - if (maxAttmpts >= 0 && retries >= maxAttmpts) { - LOG.info("Server at " + addr + " could not be reached after " - + maxAttmpts + " tries, giving up."); - throw new RetriesExhaustedException("Failed setting up proxy " + protocol - + " to " + addr.toString() + " after attempts=" + maxAttmpts, ce); - } - } - - /** - * Construct a client-side proxy object that implements the named protocol, - * talking to a server at the named address. - * - * @param protocol interface - * @param clientVersion version we are expecting - * @param addr remote address - * @param conf configuration - * @param factory socket factory - * @param rpcTimeout timeout for each RPC - * @return proxy - * @throws java.io.IOException e - */ - public static VersionedProtocol getProxy(Class protocol, - long clientVersion, - InetSocketAddress addr, - Configuration conf, - SocketFactory factory, - int rpcTimeout) throws IOException { - return getProxy(protocol, clientVersion, addr, - User.getCurrent(), conf, factory, rpcTimeout); - } - - /** - * Construct a client-side proxy object that implements the named protocol, - * talking to a server at the named address. - * - * @param protocol interface - * @param clientVersion version we are expecting - * @param addr remote address - * @param ticket ticket - * @param conf configuration - * @param factory socket factory - * @param rpcTimeout timeout for each RPC - * @return proxy - * @throws java.io.IOException e - */ - public static VersionedProtocol getProxy( - Class protocol, - long clientVersion, InetSocketAddress addr, User ticket, - Configuration conf, SocketFactory factory, int rpcTimeout) - throws IOException { - RpcClientEngine engine = getProtocolEngine(protocol, conf); - VersionedProtocol proxy = engine - .getProxy(protocol, clientVersion, addr, ticket, conf, factory, - Math.min(rpcTimeout, getRpcTimeout())); - return proxy; - } - - /** - * Construct a client-side proxy object with the default SocketFactory - * - * @param protocol interface - * @param clientVersion version we are expecting - * @param addr remote address - * @param conf configuration - * @param rpcTimeout timeout for each RPC - * @return a proxy instance - * @throws java.io.IOException e - */ - public static VersionedProtocol getProxy( - Class protocol, - long clientVersion, InetSocketAddress addr, Configuration conf, - int rpcTimeout) - throws IOException { - - return getProxy(protocol, clientVersion, addr, conf, NetUtils - .getDefaultSocketFactory(conf), rpcTimeout); - } - - /** - * Stop this proxy and release its invoker's resource - * - * @param proxy the proxy to be stopped - */ - public static void stopProxy(VersionedProtocol proxy) { - if (proxy != null) { - getProxyEngine(proxy).stopProxy(proxy); - } - } - - public static void setRpcTimeout(int t) { - rpcTimeout.set(t); - } - - public static int getRpcTimeout() { - return rpcTimeout.get(); - } - - public static void resetRpcTimeout() { - rpcTimeout.remove(); - } -} diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/HBaseServer.java hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/HBaseServer.java index 884db91..22a12ec 100644 --- hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/HBaseServer.java +++ hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/HBaseServer.java @@ -77,12 +77,12 @@ import org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation; import org.apache.hadoop.hbase.protobuf.generated.RPCProtos.RpcResponseHeader.Status; import org.apache.hadoop.hbase.monitoring.MonitoredRPCHandler; import org.apache.hadoop.hbase.monitoring.TaskMonitor; +import org.apache.hadoop.hbase.security.SaslUtils; import org.apache.hadoop.hbase.security.User; -import org.apache.hadoop.hbase.security.HBaseSaslRpcServer; -import org.apache.hadoop.hbase.security.HBaseSaslRpcServer.AuthMethod; +import org.apache.hadoop.hbase.security.AuthMethod; import org.apache.hadoop.hbase.security.HBaseSaslRpcServer.SaslDigestCallbackHandler; import org.apache.hadoop.hbase.security.HBaseSaslRpcServer.SaslGssCallbackHandler; -import org.apache.hadoop.hbase.security.HBaseSaslRpcServer.SaslStatus; +import org.apache.hadoop.hbase.security.SaslStatus; import org.apache.hadoop.hbase.util.ByteBufferOutputStream; import org.apache.hadoop.io.BytesWritable; import org.apache.hadoop.io.IntWritable; @@ -125,11 +125,6 @@ import org.cloudera.htrace.Trace; public abstract class HBaseServer implements RpcServer { private final boolean authorize; protected boolean isSecurityEnabled; - /** - * The first four bytes of Hadoop RPC connections - */ - public static final ByteBuffer HEADER = ByteBuffer.wrap("hrpc".getBytes()); - public static final byte CURRENT_VERSION = 5; /** * How many calls/handler are allowed in the queue. @@ -1214,7 +1209,7 @@ public abstract class HBaseServer implements RpcServer { private UserGroupInformation getAuthorizedUgi(String authorizedId) throws IOException { if (authMethod == AuthMethod.DIGEST) { - TokenIdentifier tokenId = HBaseSaslRpcServer.getIdentifier(authorizedId, + TokenIdentifier tokenId = SaslUtils.getIdentifier(authorizedId, secretManager); UserGroupInformation ugi = tokenId.getUser(); if (ugi == null) { @@ -1253,8 +1248,8 @@ public abstract class HBaseServer implements RpcServer { "Server is not configured to do DIGEST authentication."); } saslServer = Sasl.createSaslServer(AuthMethod.DIGEST - .getMechanismName(), null, HBaseSaslRpcServer.SASL_DEFAULT_REALM, - HBaseSaslRpcServer.SASL_PROPS, new SaslDigestCallbackHandler( + .getMechanismName(), null, SaslUtils.SASL_DEFAULT_REALM, + SaslUtils.SASL_PROPS, new SaslDigestCallbackHandler( secretManager, this)); break; default: @@ -1263,7 +1258,7 @@ public abstract class HBaseServer implements RpcServer { String fullName = current.getUserName(); if (LOG.isDebugEnabled()) LOG.debug("Kerberos principal name is " + fullName); - final String names[] = HBaseSaslRpcServer.splitKerberosName(fullName); + final String names[] = SaslUtils.splitKerberosName(fullName); if (names.length != 3) { throw new AccessControlException( "Kerberos principal name does NOT have the expected " @@ -1274,7 +1269,7 @@ public abstract class HBaseServer implements RpcServer { public Object run() throws SaslException { saslServer = Sasl.createSaslServer(AuthMethod.KERBEROS .getMechanismName(), names[0], names[1], - HBaseSaslRpcServer.SASL_PROPS, new SaslGssCallbackHandler()); + SaslUtils.SASL_PROPS, new SaslGssCallbackHandler()); return null; } }); @@ -1388,11 +1383,11 @@ public abstract class HBaseServer implements RpcServer { authMethod = AuthMethod.read(new DataInputStream( new ByteArrayInputStream(method))); dataLengthBuffer.flip(); - if (!HEADER.equals(dataLengthBuffer) || version != CURRENT_VERSION) { + if (!HBaseClient.HEADER.equals(dataLengthBuffer) || version != HBaseClient.CURRENT_VERSION) { LOG.warn("Incorrect header or version mismatch from " + hostAddress + ":" + remotePort + " got version " + version + - " expected version " + CURRENT_VERSION); + " expected version " + HBaseClient.CURRENT_VERSION); setupBadVersionResponse(version); return -1; } @@ -1410,7 +1405,7 @@ public abstract class HBaseServer implements RpcServer { } if (!isSecurityEnabled && authMethod != AuthMethod.SIMPLE) { doRawSaslReply(SaslStatus.SUCCESS, new IntWritable( - HBaseSaslRpcServer.SWITCH_TO_SIMPLE_AUTH), null, null); + SaslUtils.SWITCH_TO_SIMPLE_AUTH), null, null); authMethod = AuthMethod.SIMPLE; // client has already sent the initial Sasl message and we // should ignore it. Both client and server should fall back @@ -1478,7 +1473,7 @@ public abstract class HBaseServer implements RpcServer { * @throws IOException */ private void setupBadVersionResponse(int clientVersion) throws IOException { - String errMsg = "Server IPC version " + CURRENT_VERSION + + String errMsg = "Server IPC version " + HBaseClient.CURRENT_VERSION + " cannot communicate with client version " + clientVersion; ByteArrayOutputStream buffer = new ByteArrayOutputStream(); @@ -1931,7 +1926,7 @@ public abstract class HBaseServer implements RpcServer { conf.getBoolean(HADOOP_SECURITY_AUTHORIZATION, false); this.isSecurityEnabled = User.isHBaseSecurityEnabled(this.conf); if (isSecurityEnabled) { - HBaseSaslRpcServer.init(conf); + SaslUtils.init(conf); } } diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/Invocation.java hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/Invocation.java deleted file mode 100644 index 574647c..0000000 --- hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/Invocation.java +++ /dev/null @@ -1,219 +0,0 @@ -/* - * - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hbase.ipc; - -import java.io.DataInput; -import java.io.DataOutput; -import java.io.IOException; -import java.lang.reflect.Field; -import java.lang.reflect.Method; -import java.util.HashMap; -import java.util.HashSet; -import java.util.Map; -import java.util.Set; - -import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.conf.Configurable; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hbase.client.AdminProtocol; -import org.apache.hadoop.hbase.client.ClientProtocol; -import org.apache.hadoop.hbase.MasterMonitorProtocol; -import org.apache.hadoop.hbase.MasterAdminProtocol; -import org.apache.hadoop.hbase.io.HbaseObjectWritable; -import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.AdminService; -import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ClientService; -import org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerStatusService; -import org.apache.hadoop.hbase.RegionServerStatusProtocol; -import org.apache.hadoop.io.VersionMismatchException; -import org.apache.hadoop.io.VersionedWritable; - -/** A method invocation, including the method name and its parameters.*/ -@InterfaceAudience.Private -public class Invocation extends VersionedWritable implements Configurable { - protected String methodName; - @SuppressWarnings("rawtypes") - protected Class[] parameterClasses; - protected Object[] parameters; - protected Configuration conf; - private long clientVersion; - private int clientMethodsHash; - - // For generated protocol classes which don't have VERSION field, - // such as protobuf interfaces. - static final Map, Long> - PROTOCOL_VERSION = new HashMap, Long>(); - - static { - PROTOCOL_VERSION.put(ClientService.BlockingInterface.class, - Long.valueOf(ClientProtocol.VERSION)); - PROTOCOL_VERSION.put(AdminService.BlockingInterface.class, - Long.valueOf(AdminProtocol.VERSION)); - PROTOCOL_VERSION.put(RegionServerStatusService.BlockingInterface.class, - Long.valueOf(RegionServerStatusProtocol.VERSION)); - PROTOCOL_VERSION.put(MasterMonitorProtocol.class,Long.valueOf(MasterMonitorProtocol.VERSION)); - PROTOCOL_VERSION.put(MasterAdminProtocol.class,Long.valueOf(MasterAdminProtocol.VERSION)); - } - - // For protobuf protocols, which use ServiceException, instead of IOException - protected static final Set> - PROTOBUF_PROTOCOLS = new HashSet>(); - - static { - PROTOBUF_PROTOCOLS.add(ClientProtocol.class); - PROTOBUF_PROTOCOLS.add(AdminProtocol.class); - PROTOBUF_PROTOCOLS.add(RegionServerStatusProtocol.class); - PROTOBUF_PROTOCOLS.add(MasterMonitorProtocol.class); - PROTOBUF_PROTOCOLS.add(MasterAdminProtocol.class); - } - - private static byte RPC_VERSION = 1; - - public Invocation() {} - - public Invocation(Method method, Object[] parameters) { - this.methodName = method.getName(); - this.parameterClasses = method.getParameterTypes(); - this.parameters = parameters; - Class declaringClass = method.getDeclaringClass(); - if (declaringClass.equals(VersionedProtocol.class)) { - //VersionedProtocol is exempted from version check. - clientVersion = 0; - clientMethodsHash = 0; - } else { - try { - Long version = PROTOCOL_VERSION.get(declaringClass); - if (version != null) { - this.clientVersion = version.longValue(); - } else { - Field versionField = declaringClass.getField("VERSION"); - versionField.setAccessible(true); - this.clientVersion = versionField.getLong(declaringClass); - } - } catch (NoSuchFieldException ex) { - throw new RuntimeException("The " + declaringClass, ex); - } catch (IllegalAccessException ex) { - throw new RuntimeException(ex); - } - this.clientMethodsHash = ProtocolSignature.getFingerprint( - declaringClass.getMethods()); - } - } - - /** @return The name of the method invoked. */ - public String getMethodName() { return methodName; } - - /** @return The parameter classes. */ - @SuppressWarnings({ "rawtypes" }) - public Class[] getParameterClasses() { return parameterClasses; } - - /** @return The parameter instances. */ - public Object[] getParameters() { return parameters; } - - long getProtocolVersion() { - return clientVersion; - } - - protected int getClientMethodsHash() { - return clientMethodsHash; - } - - /** - * Returns the rpc version used by the client. - * @return rpcVersion - */ - public long getRpcVersion() { - return RPC_VERSION; - } - - public void readFields(DataInput in) throws IOException { - try { - super.readFields(in); - methodName = in.readUTF(); - clientVersion = in.readLong(); - clientMethodsHash = in.readInt(); - } catch (VersionMismatchException e) { - // VersionMismatchException doesn't provide an API to access - // expectedVersion and foundVersion. This is really sad. - if (e.toString().endsWith("found v0")) { - // Try to be a bit backwards compatible. In previous versions of - // HBase (before HBASE-3939 in 0.92) Invocation wasn't a - // VersionedWritable and thus the first thing on the wire was always - // the 2-byte length of the method name. Because no method name is - // longer than 255 characters, and all method names are in ASCII, - // The following code is equivalent to `in.readUTF()', which we can't - // call again here, because `super.readFields(in)' already consumed - // the first byte of input, which can't be "unread" back into `in'. - final short len = (short) (in.readByte() & 0xFF); // Unsigned byte. - final byte[] buf = new byte[len]; - in.readFully(buf, 0, len); - methodName = new String(buf); - } - } - parameters = new Object[in.readInt()]; - parameterClasses = new Class[parameters.length]; - HbaseObjectWritable objectWritable = new HbaseObjectWritable(); - for (int i = 0; i < parameters.length; i++) { - parameters[i] = HbaseObjectWritable.readObject(in, objectWritable, - this.conf); - parameterClasses[i] = objectWritable.getDeclaredClass(); - } - } - - public void write(DataOutput out) throws IOException { - super.write(out); - out.writeUTF(this.methodName); - out.writeLong(clientVersion); - out.writeInt(clientMethodsHash); - out.writeInt(parameterClasses.length); - for (int i = 0; i < parameterClasses.length; i++) { - HbaseObjectWritable.writeObject(out, parameters[i], parameterClasses[i], - conf); - } - } - - @Override - public String toString() { - StringBuilder buffer = new StringBuilder(256); - buffer.append(methodName); - buffer.append("("); - for (int i = 0; i < parameters.length; i++) { - if (i != 0) - buffer.append(", "); - buffer.append(parameters[i]); - } - buffer.append(")"); - buffer.append(", rpc version="+RPC_VERSION); - buffer.append(", client version="+clientVersion); - buffer.append(", methodsFingerPrint="+clientMethodsHash); - return buffer.toString(); - } - - public void setConf(Configuration conf) { - this.conf = conf; - } - - public Configuration getConf() { - return this.conf; - } - - @Override - public byte getVersion() { - return RPC_VERSION; - } -} diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/MasterCoprocessorRpcChannel.java hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/MasterCoprocessorRpcChannel.java deleted file mode 100644 index b3a4228..0000000 --- hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/MasterCoprocessorRpcChannel.java +++ /dev/null @@ -1,88 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hbase.ipc; - -import com.google.protobuf.BlockingRpcChannel; -import com.google.protobuf.ByteString; -import com.google.protobuf.Descriptors; -import com.google.protobuf.Message; -import com.google.protobuf.RpcCallback; -import com.google.protobuf.RpcChannel; -import com.google.protobuf.RpcController; -import com.google.protobuf.ServiceException; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.hbase.HConstants; -import org.apache.hadoop.hbase.client.HConnection; -import org.apache.hadoop.hbase.protobuf.ProtobufUtil; -import org.apache.hadoop.hbase.protobuf.ResponseConverter; -import org.apache.hadoop.hbase.protobuf.generated.ClientProtos; - -import java.io.IOException; - -import static org.apache.hadoop.hbase.protobuf.generated.ClientProtos.CoprocessorServiceResponse; - -/** - * Provides clients with an RPC connection to call coprocessor endpoint {@link com.google.protobuf.Service}s - * against the active master. An instance of this class may be obtained - * by calling {@link org.apache.hadoop.hbase.client.HBaseAdmin#coprocessorService()}, - * but should normally only be used in creating a new {@link com.google.protobuf.Service} stub to call the endpoint - * methods. - * @see org.apache.hadoop.hbase.client.HBaseAdmin#coprocessorService() - */ -@InterfaceAudience.Private -public class MasterCoprocessorRpcChannel extends CoprocessorRpcChannel{ - private static Log LOG = LogFactory.getLog(MasterCoprocessorRpcChannel.class); - - private final HConnection connection; - - public MasterCoprocessorRpcChannel(HConnection conn) { - this.connection = conn; - } - - @Override - protected Message callExecService(Descriptors.MethodDescriptor method, - Message request, Message responsePrototype) - throws IOException { - if (LOG.isDebugEnabled()) { - LOG.debug("Call: "+method.getName()+", "+request.toString()); - } - - final ClientProtos.CoprocessorServiceCall call = - ClientProtos.CoprocessorServiceCall.newBuilder() - .setRow(ByteString.copyFrom(HConstants.EMPTY_BYTE_ARRAY)) - .setServiceName(method.getService().getFullName()) - .setMethodName(method.getName()) - .setRequest(request.toByteString()).build(); - CoprocessorServiceResponse result = ProtobufUtil.execService(connection.getMasterAdmin(), call); - Message response = null; - if (result.getValue().hasValue()) { - response = responsePrototype.newBuilderForType() - .mergeFrom(result.getValue().getValue()).build(); - } else { - response = responsePrototype.getDefaultInstanceForType(); - } - if (LOG.isTraceEnabled()) { - LOG.trace("Master Result is value=" + response); - } - return response; - } - -} diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/ProtobufRpcClientEngine.java hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/ProtobufRpcClientEngine.java deleted file mode 100644 index 46873ab..0000000 --- hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/ProtobufRpcClientEngine.java +++ /dev/null @@ -1,194 +0,0 @@ -/** - * - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hbase.ipc; - -import com.google.protobuf.Message; -import com.google.protobuf.ServiceException; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hbase.protobuf.generated.RPCProtos.RpcRequestBody; -import org.apache.hadoop.hbase.security.User; -import org.apache.hadoop.ipc.RemoteException; - -import javax.net.SocketFactory; -import java.io.IOException; -import java.lang.reflect.InvocationHandler; -import java.lang.reflect.Method; -import java.lang.reflect.Proxy; -import java.net.InetSocketAddress; -import java.util.Map; -import java.util.concurrent.ConcurrentHashMap; - -public class ProtobufRpcClientEngine implements RpcClientEngine { - - private static final Log LOG = - LogFactory.getLog("org.apache.hadoop.hbase.ipc.ProtobufRpcClientEngine"); - - ProtobufRpcClientEngine() { - super(); - } - - protected final static ClientCache CLIENTS = new ClientCache(); - @Override - public VersionedProtocol getProxy( - Class protocol, long clientVersion, - InetSocketAddress addr, User ticket, Configuration conf, - SocketFactory factory, int rpcTimeout) throws IOException { - final Invoker invoker = new Invoker(protocol, addr, ticket, conf, factory, - rpcTimeout); - return (VersionedProtocol) Proxy.newProxyInstance( - protocol.getClassLoader(), new Class[]{protocol}, invoker); - } - - @Override - public void stopProxy(VersionedProtocol proxy) { - if (proxy!=null) { - ((Invoker)Proxy.getInvocationHandler(proxy)).close(); - } - } - - static class Invoker implements InvocationHandler { - private static final Map returnTypes = - new ConcurrentHashMap(); - private Class protocol; - private InetSocketAddress address; - private User ticket; - private HBaseClient client; - private boolean isClosed = false; - final private int rpcTimeout; - private final long clientProtocolVersion; - - public Invoker(Class protocol, - InetSocketAddress addr, User ticket, Configuration conf, - SocketFactory factory, int rpcTimeout) throws IOException { - this.protocol = protocol; - this.address = addr; - this.ticket = ticket; - this.client = CLIENTS.getClient(conf, factory); - this.rpcTimeout = rpcTimeout; - Long version = Invocation.PROTOCOL_VERSION.get(protocol); - if (version != null) { - this.clientProtocolVersion = version; - } else { - try { - this.clientProtocolVersion = HBaseClientRPC.getProtocolVersion(protocol); - } catch (NoSuchFieldException e) { - throw new RuntimeException("Exception encountered during " + - protocol, e); - } catch (IllegalAccessException e) { - throw new RuntimeException("Exception encountered during " + - protocol, e); - } - } - } - - private RpcRequestBody constructRpcRequest(Method method, - Object[] params) throws ServiceException { - RpcRequestBody rpcRequest; - RpcRequestBody.Builder builder = RpcRequestBody.newBuilder(); - builder.setMethodName(method.getName()); - Message param; - int length = params.length; - if (length == 2) { - // RpcController + Message in the method args - // (generated code from RPC bits in .proto files have RpcController) - param = (Message)params[1]; - } else if (length == 1) { // Message - param = (Message)params[0]; - } else { - throw new ServiceException("Too many parameters for request. Method: [" - + method.getName() + "]" + ", Expected: 2, Actual: " - + params.length); - } - builder.setRequestClassName(param.getClass().getName()); - builder.setRequest(param.toByteString()); - builder.setClientProtocolVersion(clientProtocolVersion); - rpcRequest = builder.build(); - return rpcRequest; - } - - /** - * This is the client side invoker of RPC method. It only throws - * ServiceException, since the invocation proxy expects only - * ServiceException to be thrown by the method in case protobuf service. - * - * ServiceException has the following causes: - *
            - *
          1. Exceptions encountered on the client side in this method are - * set as cause in ServiceException as is.
          2. - *
          3. Exceptions from the server are wrapped in RemoteException and are - * set as cause in ServiceException
          4. - *
          - * - * Note that the client calling protobuf RPC methods, must handle - * ServiceException by getting the cause from the ServiceException. If the - * cause is RemoteException, then unwrap it to get the exception thrown by - * the server. - */ - @Override - public Object invoke(Object proxy, Method method, Object[] args) - throws ServiceException { - long startTime = 0; - if (LOG.isDebugEnabled()) { - startTime = System.currentTimeMillis(); - } - - RpcRequestBody rpcRequest = constructRpcRequest(method, args); - Message val = null; - try { - val = client.call(rpcRequest, address, protocol, ticket, rpcTimeout); - - if (LOG.isDebugEnabled()) { - long callTime = System.currentTimeMillis() - startTime; - if (LOG.isTraceEnabled()) LOG.trace("Call: " + method.getName() + " " + callTime); - } - return val; - } catch (Throwable e) { - if (e instanceof RemoteException) { - Throwable cause = ((RemoteException)e).unwrapRemoteException(); - throw new ServiceException(cause); - } - throw new ServiceException(e); - } - } - - synchronized protected void close() { - if (!isClosed) { - isClosed = true; - CLIENTS.stopClient(client); - } - } - - static Message getReturnProtoType(Method method) throws Exception { - if (returnTypes.containsKey(method.getName())) { - return returnTypes.get(method.getName()); - } - - Class returnType = method.getReturnType(); - Method newInstMethod = returnType.getMethod("getDefaultInstance"); - newInstMethod.setAccessible(true); - Message protoType = (Message) newInstMethod.invoke(null, (Object[]) null); - returnTypes.put(method.getName(), protoType); - return protoType; - } - } - -} diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/ProtobufRpcServerEngine.java hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/ProtobufRpcServerEngine.java index 3317af3..65e7f74 100644 --- hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/ProtobufRpcServerEngine.java +++ hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/ProtobufRpcServerEngine.java @@ -36,7 +36,6 @@ import org.apache.hadoop.hbase.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.protobuf.generated.RPCProtos.RpcRequestBody; import org.apache.hadoop.hbase.regionserver.HRegionServer; import org.apache.hadoop.hbase.security.HBasePolicyProvider; -import org.apache.hadoop.hbase.security.User; import org.apache.hadoop.hbase.security.token.AuthenticationTokenSecretManager; import org.apache.hadoop.security.authorize.ServiceAuthorizationManager; import org.apache.hadoop.hbase.util.Bytes; @@ -257,7 +256,7 @@ class ProtobufRpcServerEngine implements RpcServerEngine { throw (IOException)target; } if (target instanceof ServiceException) { - throw ProtobufUtil.getRemoteException((ServiceException)target); + throw ProtobufUtil.getRemoteException((ServiceException) target); } IOException ioe = new IOException(target.toString()); ioe.setStackTrace(target.getStackTrace()); diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/ProtocolSignature.java hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/ProtocolSignature.java deleted file mode 100644 index b3d1df5..0000000 --- hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/ProtocolSignature.java +++ /dev/null @@ -1,243 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hbase.ipc; - -import java.io.DataInput; -import java.io.DataOutput; -import java.io.IOException; -import java.lang.reflect.Method; -import java.util.Arrays; -import java.util.HashMap; - -import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.io.Writable; -import org.apache.hadoop.io.WritableFactories; -import org.apache.hadoop.io.WritableFactory; - -@InterfaceAudience.Private -public class ProtocolSignature implements Writable { - static { // register a ctor - WritableFactories.setFactory - (ProtocolSignature.class, - new WritableFactory() { - public Writable newInstance() { return new ProtocolSignature(); } - }); - } - - private long version; - private int[] methods = null; // an array of method hash codes - - /** - * default constructor - */ - public ProtocolSignature() { - } - - /** - * Constructor - * - * @param version server version - * @param methodHashcodes hash codes of the methods supported by server - */ - public ProtocolSignature(long version, int[] methodHashcodes) { - this.version = version; - this.methods = methodHashcodes; - } - - public long getVersion() { - return version; - } - - public int[] getMethods() { - return methods; - } - - @Override - public void readFields(DataInput in) throws IOException { - version = in.readLong(); - boolean hasMethods = in.readBoolean(); - if (hasMethods) { - int numMethods = in.readInt(); - methods = new int[numMethods]; - for (int i=0; i type : method.getParameterTypes()) { - hashcode = 31*hashcode ^ type.getName().hashCode(); - } - return hashcode; - } - - /** - * Convert an array of Method into an array of hash codes - * - * @param methods - * @return array of hash codes - */ - private static int[] getFingerprints(Method[] methods) { - if (methods == null) { - return null; - } - int[] hashCodes = new int[methods.length]; - for (int i = 0; i - PROTOCOL_FINGERPRINT_CACHE = - new HashMap(); - - /** - * Return a protocol's signature and finger print from cache - * - * @param protocol a protocol class - * @param serverVersion protocol version - * @return its signature and finger print - */ - private static ProtocolSigFingerprint getSigFingerprint( - Class protocol, long serverVersion) { - String protocolName = protocol.getName(); - synchronized (PROTOCOL_FINGERPRINT_CACHE) { - ProtocolSigFingerprint sig = PROTOCOL_FINGERPRINT_CACHE.get(protocolName); - if (sig == null) { - int[] serverMethodHashcodes = getFingerprints(protocol.getMethods()); - sig = new ProtocolSigFingerprint( - new ProtocolSignature(serverVersion, serverMethodHashcodes), - getFingerprint(serverMethodHashcodes)); - PROTOCOL_FINGERPRINT_CACHE.put(protocolName, sig); - } - return sig; - } - } - - /** - * Get a server protocol's signature - * - * @param clientMethodsHashCode client protocol methods hashcode - * @param serverVersion server protocol version - * @param protocol protocol - * @return the server's protocol signature - */ - static ProtocolSignature getProtocolSignature( - int clientMethodsHashCode, - long serverVersion, - Class protocol) { - // try to get the finger print & signature from the cache - ProtocolSigFingerprint sig = getSigFingerprint(protocol, serverVersion); - - // check if the client side protocol matches the one on the server side - if (clientMethodsHashCode == sig.fingerprint) { - return new ProtocolSignature(serverVersion, null); // null indicates a match - } - - return sig.signature; - } - - /** - * Get a server protocol's signature - * - * @param server server implementation - * @param protocol server protocol - * @param clientVersion client's version - * @param clientMethodsHash client's protocol's hash code - * @return the server protocol's signature - * @throws IOException if any error occurs - */ - @SuppressWarnings("unchecked") - public static ProtocolSignature getProtocolSignature(VersionedProtocol server, - String protocol, - long clientVersion, int clientMethodsHash) throws IOException { - Class inter; - try { - inter = (Class)Class.forName(protocol); - } catch (Exception e) { - throw new IOException(e); - } - long serverVersion = server.getProtocolVersion(protocol, clientVersion); - return ProtocolSignature.getProtocolSignature( - clientMethodsHash, serverVersion, inter); - } -} diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RegionCoprocessorRpcChannel.java hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RegionCoprocessorRpcChannel.java deleted file mode 100644 index e5855c9..0000000 --- hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RegionCoprocessorRpcChannel.java +++ /dev/null @@ -1,103 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hbase.ipc; - -import com.google.protobuf.ByteString; -import com.google.protobuf.Descriptors; -import com.google.protobuf.Message; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.hbase.client.HConnection; -import org.apache.hadoop.hbase.client.ServerCallable; -import org.apache.hadoop.hbase.protobuf.ProtobufUtil; -import org.apache.hadoop.hbase.protobuf.generated.ClientProtos; -import org.apache.hadoop.hbase.util.Bytes; - -import java.io.IOException; - -import static org.apache.hadoop.hbase.protobuf.generated.ClientProtos.CoprocessorServiceResponse; - -/** - * Provides clients with an RPC connection to call coprocessor endpoint {@link com.google.protobuf.Service}s - * against a given table region. An instance of this class may be obtained - * by calling {@link org.apache.hadoop.hbase.client.HTable#coprocessorService(byte[])}, - * but should normally only be used in creating a new {@link com.google.protobuf.Service} stub to call the endpoint - * methods. - * @see org.apache.hadoop.hbase.client.HTable#coprocessorService(byte[]) - */ -@InterfaceAudience.Private -public class RegionCoprocessorRpcChannel extends CoprocessorRpcChannel{ - private static Log LOG = LogFactory.getLog(RegionCoprocessorRpcChannel.class); - - private final HConnection connection; - private final byte[] table; - private final byte[] row; - private byte[] lastRegion; - - public RegionCoprocessorRpcChannel(HConnection conn, byte[] table, byte[] row) { - this.connection = conn; - this.table = table; - this.row = row; - } - - @Override - protected Message callExecService(Descriptors.MethodDescriptor method, - Message request, Message responsePrototype) - throws IOException { - if (LOG.isDebugEnabled()) { - LOG.debug("Call: "+method.getName()+", "+request.toString()); - } - - if (row == null) { - throw new IllegalArgumentException("Missing row property for remote region location"); - } - - final ClientProtos.CoprocessorServiceCall call = - ClientProtos.CoprocessorServiceCall.newBuilder() - .setRow(ByteString.copyFrom(row)) - .setServiceName(method.getService().getFullName()) - .setMethodName(method.getName()) - .setRequest(request.toByteString()).build(); - ServerCallable callable = - new ServerCallable(connection, table, row) { - public CoprocessorServiceResponse call() throws Exception { - byte[] regionName = location.getRegionInfo().getRegionName(); - return ProtobufUtil.execService(server, call, regionName); - } - }; - CoprocessorServiceResponse result = callable.withRetries(); - Message response = null; - if (result.getValue().hasValue()) { - response = responsePrototype.newBuilderForType() - .mergeFrom(result.getValue().getValue()).build(); - } else { - response = responsePrototype.getDefaultInstanceForType(); - } - lastRegion = result.getRegion().getValue().toByteArray(); - if (LOG.isTraceEnabled()) { - LOG.trace("Result is region=" + Bytes.toStringBinary(lastRegion) + ", value=" + response); - } - return response; - } - - public byte[] getLastRegion() { - return lastRegion; - } -} diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcClientEngine.java hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcClientEngine.java deleted file mode 100644 index f6dcbf9..0000000 --- hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcClientEngine.java +++ /dev/null @@ -1,42 +0,0 @@ -/** - * - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hbase.ipc; - -import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hbase.security.User; - -import javax.net.SocketFactory; -import java.io.IOException; -import java.net.InetSocketAddress; - -/** An RPC implementation for the client */ -@InterfaceAudience.Private -public interface RpcClientEngine { - /** Construct a client-side proxy object. */ - VersionedProtocol getProxy(Class protocol, - long clientVersion, InetSocketAddress addr, - User ticket, Configuration conf, - SocketFactory factory, int rpcTimeout) throws IOException; - - /** Stop this proxy. */ - void stopProxy(VersionedProtocol proxy); - -} diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/ServerNotRunningYetException.java hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/ServerNotRunningYetException.java deleted file mode 100644 index aa36b4c..0000000 --- hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/ServerNotRunningYetException.java +++ /dev/null @@ -1,32 +0,0 @@ -/* - * - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hbase.ipc; - -import java.io.IOException; - -import org.apache.hadoop.classification.InterfaceAudience; - -@SuppressWarnings("serial") -@InterfaceAudience.Private -public class ServerNotRunningYetException extends IOException { - public ServerNotRunningYetException(String s) { - super(s); - } -} diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/ServerRpcController.java hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/ServerRpcController.java deleted file mode 100644 index cbf63fc..0000000 --- hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/ServerRpcController.java +++ /dev/null @@ -1,133 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hbase.ipc; - -import com.google.protobuf.Descriptors; -import com.google.protobuf.Message; -import com.google.protobuf.RpcCallback; -import com.google.protobuf.RpcController; -import com.google.protobuf.Service; -import org.apache.hadoop.util.StringUtils; - -import java.io.IOException; - -/** - * Used for server-side protobuf RPC service invocations. This handler allows - * invocation exceptions to easily be passed through to the RPC server from coprocessor - * {@link Service} implementations. - * - *

          - * When implementing {@link Service} defined methods, coprocessor endpoints can use the following - * pattern to pass exceptions back to the RPC client: - * - * public void myMethod(RpcController controller, MyRequest request, RpcCallback done) { - * MyResponse response = null; - * try { - * // do processing - * response = MyResponse.getDefaultInstance(); // or use a new builder to populate the response - * } catch (IOException ioe) { - * // pass exception back up - * ResponseConverter.setControllerException(controller, ioe); - * } - * done.run(response); - * } - * - *

          - */ -public class ServerRpcController implements RpcController { - /** - * The exception thrown within - * {@link Service#callMethod(Descriptors.MethodDescriptor, RpcController, Message, RpcCallback)}, - * if any. - */ - // TODO: it would be good widen this to just Throwable, but IOException is what we allow now - private IOException serviceException; - private String errorMessage; - - @Override - public void reset() { - serviceException = null; - errorMessage = null; - } - - @Override - public boolean failed() { - return (failedOnException() || errorMessage != null); - } - - @Override - public String errorText() { - return errorMessage; - } - - @Override - public void startCancel() { - // not implemented - } - - @Override - public void setFailed(String message) { - errorMessage = message; - } - - @Override - public boolean isCanceled() { - return false; - } - - @Override - public void notifyOnCancel(RpcCallback objectRpcCallback) { - // not implemented - } - - /** - * Sets an exception to be communicated back to the {@link Service} client. - * @param ioe the exception encountered during execution of the service method - */ - public void setFailedOn(IOException ioe) { - serviceException = ioe; - setFailed(StringUtils.stringifyException(ioe)); - } - - /** - * Returns any exception thrown during service method invocation, or {@code null} if no exception - * was thrown. This can be used by clients to receive exceptions generated by RPC calls, even - * when {@link RpcCallback}s are used and no {@link com.google.protobuf.ServiceException} is - * declared. - */ - public IOException getFailedOn() { - return serviceException; - } - - /** - * Returns whether or not a server exception was generated in the prior RPC invocation. - */ - public boolean failedOnException() { - return serviceException != null; - } - - /** - * Throws an IOException back out if one is currently stored. - */ - public void checkFailed() throws IOException { - if (failedOnException()) { - throw getFailedOn(); - } - } -} diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/VersionedProtocol.java hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/VersionedProtocol.java deleted file mode 100644 index 3667c6d..0000000 --- hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/VersionedProtocol.java +++ /dev/null @@ -1,59 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hbase.ipc; - -import java.io.IOException; - -import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.classification.InterfaceStability; - -/** - * Superclass of all protocols that use Hadoop RPC. - * Subclasses of this interface are also supposed to have - * a static final long versionID field. - */ -@InterfaceAudience.Public -@InterfaceStability.Evolving -public interface VersionedProtocol { - - /** - * Return protocol version corresponding to protocol interface. - * @param protocol The classname of the protocol interface - * @param clientVersion The version of the protocol that the client speaks - * @return the version that the server will speak - * @throws IOException if any IO error occurs - */ - @Deprecated - public long getProtocolVersion(String protocol, - long clientVersion) throws IOException; - - /** - * Return protocol version corresponding to protocol interface. - * @param protocol The classname of the protocol interface - * @param clientVersion The version of the protocol that the client speaks - * @param clientMethodsHash the hashcode of client protocol methods - * @return the server protocol signature containing its version and - * a list of its supported methods - * @see ProtocolSignature#getProtocolSignature(VersionedProtocol, String, - * long, int) for a default implementation - */ - public ProtocolSignature getProtocolSignature(String protocol, - long clientVersion, - int clientMethodsHash) throws IOException; -} diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat.java hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat.java index 4dc2715..ce4bb04 100644 --- hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat.java +++ hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat.java @@ -97,7 +97,7 @@ public class HFileOutputFormat extends FileOutputFormat protoList) { + List entries = new ArrayList(); + for (AdminProtos.WALEntry entry: protoList) { + AdminProtos.WALEntry.WALKey walKey = entry.getKey(); + java.util.UUID clusterId = HConstants.DEFAULT_CLUSTER_ID; + if (walKey.hasClusterId()) { + AdminProtos.UUID protoUuid = walKey.getClusterId(); + clusterId = new java.util.UUID( + protoUuid.getMostSigBits(), protoUuid.getLeastSigBits()); + } + HLogKey key = new HLogKey(walKey.getEncodedRegionName().toByteArray(), + walKey.getTableName().toByteArray(), walKey.getLogSequenceNumber(), + walKey.getWriteTime(), clusterId); + AdminProtos.WALEntry.WALEdit walEdit = entry.getEdit(); + WALEdit edit = new WALEdit(); + for (ByteString keyValue: walEdit.getKeyValueBytesList()) { + edit.add(new KeyValue(keyValue.toByteArray())); + } + if (walEdit.getFamilyScopeCount() > 0) { + TreeMap scopes = + new TreeMap(Bytes.BYTES_COMPARATOR); + for (AdminProtos.WALEntry.WALEdit.FamilyScope scope: walEdit.getFamilyScopeList()) { + scopes.put(scope.getFamily().toByteArray(), + Integer.valueOf(scope.getScopeType().ordinal())); + } + edit.setScopes(scopes); + } + entries.add(new HLog.Entry(key, edit)); + } + return entries.toArray(new HLog.Entry[entries.size()]); + } + + /** + * A helper to replicate a list of HLog entries using admin protocol. + * + * @param admin + * @param entries + * @throws java.io.IOException + */ + public static void replicateWALEntry(final AdminProtocol admin, + final HLog.Entry[] entries) throws IOException { + AdminProtos.ReplicateWALEntryRequest request = + buildReplicateWALEntryRequest(entries); + try { + admin.replicateWALEntry(null, request); + } catch (ServiceException se) { + throw ProtobufUtil.getRemoteException(se); + } + } + + /** + * Create a new ReplicateWALEntryRequest from a list of HLog entries + * + * @param entries the HLog entries to be replicated + * @return a ReplicateWALEntryRequest + */ + public static AdminProtos.ReplicateWALEntryRequest + buildReplicateWALEntryRequest(final HLog.Entry[] entries) { + AdminProtos.WALEntry.WALEdit.FamilyScope.Builder scopeBuilder = AdminProtos.WALEntry + .WALEdit + .FamilyScope + .newBuilder(); + AdminProtos.WALEntry.Builder entryBuilder = AdminProtos.WALEntry.newBuilder(); + AdminProtos.ReplicateWALEntryRequest.Builder builder = + AdminProtos.ReplicateWALEntryRequest.newBuilder(); + for (HLog.Entry entry: entries) { + entryBuilder.clear(); + AdminProtos.WALEntry.WALKey.Builder keyBuilder = entryBuilder.getKeyBuilder(); + HLogKey key = entry.getKey(); + keyBuilder.setEncodedRegionName( + ByteString.copyFrom(key.getEncodedRegionName())); + keyBuilder.setTableName(ByteString.copyFrom(key.getTablename())); + keyBuilder.setLogSequenceNumber(key.getLogSeqNum()); + keyBuilder.setWriteTime(key.getWriteTime()); + UUID clusterId = key.getClusterId(); + if (clusterId != null) { + AdminProtos.UUID.Builder uuidBuilder = keyBuilder.getClusterIdBuilder(); + uuidBuilder.setLeastSigBits(clusterId.getLeastSignificantBits()); + uuidBuilder.setMostSigBits(clusterId.getMostSignificantBits()); + } + WALEdit edit = entry.getEdit(); + AdminProtos.WALEntry.WALEdit.Builder editBuilder = entryBuilder.getEditBuilder(); + NavigableMap scopes = edit.getScopes(); + if (scopes != null && !scopes.isEmpty()) { + for (Map.Entry scope: scopes.entrySet()) { + scopeBuilder.setFamily(ByteString.copyFrom(scope.getKey())); + AdminProtos.WALEntry.WALEdit.ScopeType + scopeType = AdminProtos.WALEntry + .WALEdit + .ScopeType + .valueOf(scope.getValue().intValue()); + scopeBuilder.setScopeType(scopeType); + editBuilder.addFamilyScope(scopeBuilder.build()); + } + } + List keyValues = edit.getKeyValues(); + for (KeyValue value: keyValues) { + editBuilder.addKeyValueBytes(ByteString.copyFrom( + value.getBuffer(), value.getOffset(), value.getLength())); + } + builder.addEntry(entryBuilder.build()); + } + return builder.build(); + } + +} diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java hbase-server/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java deleted file mode 100644 index 01d8428..0000000 --- hbase-server/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java +++ /dev/null @@ -1,1920 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hbase.protobuf; - -import static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.RegionSpecifierType.REGION_NAME; - -import java.io.ByteArrayInputStream; -import java.io.ByteArrayOutputStream; -import java.io.DataInput; -import java.io.DataInputStream; -import java.io.DataOutput; -import java.io.DataOutputStream; -import java.io.IOException; -import java.lang.reflect.Constructor; -import java.lang.reflect.Method; -import java.util.ArrayList; -import java.util.Collection; -import java.util.HashMap; -import java.util.List; -import java.util.Map; -import java.util.Map.Entry; -import java.util.NavigableMap; -import java.util.NavigableSet; -import java.util.TreeMap; - -import org.apache.hadoop.hbase.DeserializationException; -import org.apache.hadoop.hbase.DoNotRetryIOException; -import org.apache.hadoop.hbase.HConstants; -import org.apache.hadoop.hbase.HRegionInfo; -import org.apache.hadoop.hbase.HTableDescriptor; -import org.apache.hadoop.hbase.KeyValue; -import org.apache.hadoop.hbase.MasterAdminProtocol; -import org.apache.hadoop.hbase.ServerName; -import org.apache.hadoop.hbase.client.Action; -import org.apache.hadoop.hbase.client.AdminProtocol; -import org.apache.hadoop.hbase.client.Append; -import org.apache.hadoop.hbase.client.ClientProtocol; -import org.apache.hadoop.hbase.client.Delete; -import org.apache.hadoop.hbase.client.Get; -import org.apache.hadoop.hbase.client.Increment; -import org.apache.hadoop.hbase.client.MultiAction; -import org.apache.hadoop.hbase.client.MultiResponse; -import org.apache.hadoop.hbase.client.Mutation; -import org.apache.hadoop.hbase.client.Put; -import org.apache.hadoop.hbase.client.Result; -import org.apache.hadoop.hbase.client.Row; -import org.apache.hadoop.hbase.client.RowLock; -import org.apache.hadoop.hbase.client.RowMutations; -import org.apache.hadoop.hbase.client.Scan; -import org.apache.hadoop.hbase.client.metrics.ScanMetrics; -import org.apache.hadoop.hbase.filter.ByteArrayComparable; -import org.apache.hadoop.hbase.filter.Filter; -import org.apache.hadoop.hbase.io.HbaseObjectWritable; -import org.apache.hadoop.hbase.io.TimeRange; -import org.apache.hadoop.hbase.protobuf.generated.AccessControlProtos; -import org.apache.hadoop.hbase.protobuf.generated.AccessControlProtos.AccessControlService; -import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.CloseRegionRequest; -import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.CloseRegionResponse; -import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetOnlineRegionRequest; -import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetOnlineRegionResponse; -import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetRegionInfoRequest; -import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetRegionInfoResponse; -import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetServerInfoRequest; -import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetServerInfoResponse; -import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetStoreFileRequest; -import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetStoreFileResponse; -import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.OpenRegionRequest; -import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.ReplicateWALEntryRequest; -import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.ServerInfo; -import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.SplitRegionRequest; -import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.UUID; -import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.WALEntry; -import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.WALEntry.WALEdit.FamilyScope; -import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.WALEntry.WALKey; -import org.apache.hadoop.hbase.protobuf.generated.AuthenticationProtos; -import org.apache.hadoop.hbase.protobuf.generated.ClientProtos; -import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.BulkLoadHFileRequest; -import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.BulkLoadHFileResponse; -import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Column; -import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.CoprocessorServiceCall; -import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.CoprocessorServiceRequest; -import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.CoprocessorServiceResponse; -import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.GetRequest; -import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.GetResponse; -import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MultiRequest; -import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Mutate; -import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Mutate.ColumnValue; -import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Mutate.ColumnValue.QualifierValue; -import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Mutate.DeleteType; -import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Mutate.MutateType; -import org.apache.hadoop.hbase.protobuf.generated.ComparatorProtos; -import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos; -import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameBytesPair; -import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo; -import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionLoad; -import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier; -import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.RegionSpecifierType; -import org.apache.hadoop.hbase.protobuf.generated.MapReduceProtos; -import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.CreateTableRequest; -import org.apache.hadoop.hbase.protobuf.generated.MasterMonitorProtos.GetTableDescriptorsResponse; -import org.apache.hadoop.hbase.regionserver.wal.HLog; -import org.apache.hadoop.hbase.regionserver.wal.HLogKey; -import org.apache.hadoop.hbase.regionserver.wal.WALEdit; -import org.apache.hadoop.hbase.security.access.Permission; -import org.apache.hadoop.hbase.security.access.TablePermission; -import org.apache.hadoop.hbase.security.access.UserPermission; -import org.apache.hadoop.hbase.security.token.AuthenticationTokenIdentifier; -import org.apache.hadoop.hbase.util.Bytes; -import org.apache.hadoop.hbase.util.Methods; -import org.apache.hadoop.hbase.util.Pair; -import org.apache.hadoop.io.Text; -import org.apache.hadoop.security.token.Token; -import org.apache.hbase.Cell; - -import com.google.common.collect.ArrayListMultimap; -import com.google.common.collect.ListMultimap; -import com.google.common.collect.Lists; -import com.google.protobuf.ByteString; -import com.google.protobuf.InvalidProtocolBufferException; -import com.google.protobuf.Message; -import com.google.protobuf.RpcChannel; -import com.google.protobuf.Service; -import com.google.protobuf.ServiceException; - -/** - * Protobufs utility. - */ -public final class ProtobufUtil { - - private ProtobufUtil() { - } - - /** - * Primitive type to class mapping. - */ - private final static Map> - PRIMITIVES = new HashMap>(); - - static { - PRIMITIVES.put(Boolean.TYPE.getName(), Boolean.TYPE); - PRIMITIVES.put(Byte.TYPE.getName(), Byte.TYPE); - PRIMITIVES.put(Character.TYPE.getName(), Character.TYPE); - PRIMITIVES.put(Short.TYPE.getName(), Short.TYPE); - PRIMITIVES.put(Integer.TYPE.getName(), Integer.TYPE); - PRIMITIVES.put(Long.TYPE.getName(), Long.TYPE); - PRIMITIVES.put(Float.TYPE.getName(), Float.TYPE); - PRIMITIVES.put(Double.TYPE.getName(), Double.TYPE); - PRIMITIVES.put(Void.TYPE.getName(), Void.TYPE); - } - - /** - * Magic we put ahead of a serialized protobuf message. - * For example, all znode content is protobuf messages with the below magic - * for preamble. - */ - public static final byte [] PB_MAGIC = new byte [] {'P', 'B', 'U', 'F'}; - private static final String PB_MAGIC_STR = Bytes.toString(PB_MAGIC); - - /** - * Prepend the passed bytes with four bytes of magic, {@link #PB_MAGIC}, to flag what - * follows as a protobuf in hbase. Prepend these bytes to all content written to znodes, etc. - * @param bytes Bytes to decorate - * @return The passed bytes with magic prepended (Creates a new - * byte array that is bytes.length plus {@link #PB_MAGIC}.length. - */ - public static byte [] prependPBMagic(final byte [] bytes) { - return Bytes.add(PB_MAGIC, bytes); - } - - /** - * @param bytes Bytes to check. - * @return True if passed bytes has {@link #PB_MAGIC} for a prefix. - */ - public static boolean isPBMagicPrefix(final byte [] bytes) { - if (bytes == null || bytes.length < PB_MAGIC.length) return false; - return Bytes.compareTo(PB_MAGIC, 0, PB_MAGIC.length, bytes, 0, PB_MAGIC.length) == 0; - } - - /** - * @param bytes - * @throws DeserializationException if we are missing the pb magic prefix - */ - public static void expectPBMagicPrefix(final byte [] bytes) throws DeserializationException { - if (!isPBMagicPrefix(bytes)) { - throw new DeserializationException("Missing pb magic " + PB_MAGIC_STR + " prefix"); - } - } - - /** - * @return Length of {@link #PB_MAGIC} - */ - public static int lengthOfPBMagic() { - return PB_MAGIC.length; - } - - /** - * Return the IOException thrown by the remote server wrapped in - * ServiceException as cause. - * - * @param se ServiceException that wraps IO exception thrown by the server - * @return Exception wrapped in ServiceException or - * a new IOException that wraps the unexpected ServiceException. - */ - public static IOException getRemoteException(ServiceException se) { - Throwable e = se.getCause(); - if (e == null) { - return new IOException(se); - } - return e instanceof IOException ? (IOException) e : new IOException(se); - } - - /** - * Convert a ServerName to a protocol buffer ServerName - * - * @param serverName the ServerName to convert - * @return the converted protocol buffer ServerName - * @see #toServerName(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName) - */ - public static HBaseProtos.ServerName - toServerName(final ServerName serverName) { - if (serverName == null) return null; - HBaseProtos.ServerName.Builder builder = - HBaseProtos.ServerName.newBuilder(); - builder.setHostName(serverName.getHostname()); - if (serverName.getPort() >= 0) { - builder.setPort(serverName.getPort()); - } - if (serverName.getStartcode() >= 0) { - builder.setStartCode(serverName.getStartcode()); - } - return builder.build(); - } - - /** - * Convert a protocol buffer ServerName to a ServerName - * - * @param proto the protocol buffer ServerName to convert - * @return the converted ServerName - */ - public static ServerName toServerName(final HBaseProtos.ServerName proto) { - if (proto == null) return null; - String hostName = proto.getHostName(); - long startCode = -1; - int port = -1; - if (proto.hasPort()) { - port = proto.getPort(); - } - if (proto.hasStartCode()) { - startCode = proto.getStartCode(); - } - return new ServerName(hostName, port, startCode); - } - - /** - * Get HTableDescriptor[] from GetTableDescriptorsResponse protobuf - * - * @param proto the GetTableDescriptorsResponse - * @return HTableDescriptor[] - */ - public static HTableDescriptor[] getHTableDescriptorArray(GetTableDescriptorsResponse proto) { - if (proto == null) return null; - - HTableDescriptor[] ret = new HTableDescriptor[proto.getTableSchemaCount()]; - for (int i = 0; i < proto.getTableSchemaCount(); ++i) { - ret[i] = HTableDescriptor.convert(proto.getTableSchema(i)); - } - return ret; - } - - /** - * get the split keys in form "byte [][]" from a CreateTableRequest proto - * - * @param proto the CreateTableRequest - * @return the split keys - */ - public static byte [][] getSplitKeysArray(final CreateTableRequest proto) { - byte [][] splitKeys = new byte[proto.getSplitKeysCount()][]; - for (int i = 0; i < proto.getSplitKeysCount(); ++i) { - splitKeys[i] = proto.getSplitKeys(i).toByteArray(); - } - return splitKeys; - } - - /** - * Convert a protocol buffer Get to a client Get - * - * @param proto the protocol buffer Get to convert - * @return the converted client Get - * @throws IOException - */ - public static Get toGet( - final ClientProtos.Get proto) throws IOException { - if (proto == null) return null; - byte[] row = proto.getRow().toByteArray(); - RowLock rowLock = null; - if (proto.hasLockId()) { - rowLock = new RowLock(proto.getLockId()); - } - Get get = new Get(row, rowLock); - if (proto.hasCacheBlocks()) { - get.setCacheBlocks(proto.getCacheBlocks()); - } - if (proto.hasMaxVersions()) { - get.setMaxVersions(proto.getMaxVersions()); - } - if (proto.hasStoreLimit()) { - get.setMaxResultsPerColumnFamily(proto.getStoreLimit()); - } - if (proto.hasStoreOffset()) { - get.setRowOffsetPerColumnFamily(proto.getStoreOffset()); - } - if (proto.hasTimeRange()) { - HBaseProtos.TimeRange timeRange = proto.getTimeRange(); - long minStamp = 0; - long maxStamp = Long.MAX_VALUE; - if (timeRange.hasFrom()) { - minStamp = timeRange.getFrom(); - } - if (timeRange.hasTo()) { - maxStamp = timeRange.getTo(); - } - get.setTimeRange(minStamp, maxStamp); - } - if (proto.hasFilter()) { - HBaseProtos.Filter filter = proto.getFilter(); - get.setFilter(ProtobufUtil.toFilter(filter)); - } - for (NameBytesPair attribute: proto.getAttributeList()) { - get.setAttribute(attribute.getName(), attribute.getValue().toByteArray()); - } - if (proto.getColumnCount() > 0) { - for (Column column: proto.getColumnList()) { - byte[] family = column.getFamily().toByteArray(); - if (column.getQualifierCount() > 0) { - for (ByteString qualifier: column.getQualifierList()) { - get.addColumn(family, qualifier.toByteArray()); - } - } else { - get.addFamily(family); - } - } - } - return get; - } - - /** - * Convert a protocol buffer Mutate to a Put - * - * @param proto the protocol buffer Mutate to convert - * @return the converted client Put - * @throws DoNotRetryIOException - */ - public static Put toPut( - final Mutate proto) throws DoNotRetryIOException { - MutateType type = proto.getMutateType(); - assert type == MutateType.PUT : type.name(); - byte[] row = proto.getRow().toByteArray(); - long timestamp = HConstants.LATEST_TIMESTAMP; - if (proto.hasTimestamp()) { - timestamp = proto.getTimestamp(); - } - RowLock lock = null; - if (proto.hasLockId()) { - lock = new RowLock(proto.getLockId()); - } - Put put = new Put(row, timestamp, lock); - put.setWriteToWAL(proto.getWriteToWAL()); - for (NameBytesPair attribute: proto.getAttributeList()) { - put.setAttribute(attribute.getName(), - attribute.getValue().toByteArray()); - } - for (ColumnValue column: proto.getColumnValueList()) { - byte[] family = column.getFamily().toByteArray(); - for (QualifierValue qv: column.getQualifierValueList()) { - byte[] qualifier = qv.getQualifier().toByteArray(); - if (!qv.hasValue()) { - throw new DoNotRetryIOException( - "Missing required field: qualifer value"); - } - byte[] value = qv.getValue().toByteArray(); - long ts = timestamp; - if (qv.hasTimestamp()) { - ts = qv.getTimestamp(); - } - put.add(family, qualifier, ts, value); - } - } - return put; - } - - /** - * Convert a protocol buffer Mutate to a Delete - * - * @param proto the protocol buffer Mutate to convert - * @return the converted client Delete - */ - public static Delete toDelete(final Mutate proto) { - MutateType type = proto.getMutateType(); - assert type == MutateType.DELETE : type.name(); - byte[] row = proto.getRow().toByteArray(); - long timestamp = HConstants.LATEST_TIMESTAMP; - if (proto.hasTimestamp()) { - timestamp = proto.getTimestamp(); - } - RowLock lock = null; - if (proto.hasLockId()) { - lock = new RowLock(proto.getLockId()); - } - Delete delete = new Delete(row, timestamp, lock); - delete.setWriteToWAL(proto.getWriteToWAL()); - for (NameBytesPair attribute: proto.getAttributeList()) { - delete.setAttribute(attribute.getName(), - attribute.getValue().toByteArray()); - } - for (ColumnValue column: proto.getColumnValueList()) { - byte[] family = column.getFamily().toByteArray(); - for (QualifierValue qv: column.getQualifierValueList()) { - DeleteType deleteType = qv.getDeleteType(); - byte[] qualifier = null; - if (qv.hasQualifier()) { - qualifier = qv.getQualifier().toByteArray(); - } - long ts = HConstants.LATEST_TIMESTAMP; - if (qv.hasTimestamp()) { - ts = qv.getTimestamp(); - } - if (deleteType == DeleteType.DELETE_ONE_VERSION) { - delete.deleteColumn(family, qualifier, ts); - } else if (deleteType == DeleteType.DELETE_MULTIPLE_VERSIONS) { - delete.deleteColumns(family, qualifier, ts); - } else { - delete.deleteFamily(family, ts); - } - } - } - return delete; - } - - /** - * Convert a protocol buffer Mutate to an Append - * - * @param proto the protocol buffer Mutate to convert - * @return the converted client Append - * @throws DoNotRetryIOException - */ - public static Append toAppend( - final Mutate proto) throws DoNotRetryIOException { - MutateType type = proto.getMutateType(); - assert type == MutateType.APPEND : type.name(); - byte[] row = proto.getRow().toByteArray(); - Append append = new Append(row); - append.setWriteToWAL(proto.getWriteToWAL()); - for (NameBytesPair attribute: proto.getAttributeList()) { - append.setAttribute(attribute.getName(), - attribute.getValue().toByteArray()); - } - for (ColumnValue column: proto.getColumnValueList()) { - byte[] family = column.getFamily().toByteArray(); - for (QualifierValue qv: column.getQualifierValueList()) { - byte[] qualifier = qv.getQualifier().toByteArray(); - if (!qv.hasValue()) { - throw new DoNotRetryIOException( - "Missing required field: qualifer value"); - } - byte[] value = qv.getValue().toByteArray(); - append.add(family, qualifier, value); - } - } - return append; - } - - /** - * Convert a MutateRequest to Mutation - * - * @param proto the protocol buffer Mutate to convert - * @return the converted Mutation - * @throws IOException - */ - public static Mutation toMutation(final Mutate proto) throws IOException { - MutateType type = proto.getMutateType(); - if (type == MutateType.APPEND) { - return toAppend(proto); - } - if (type == MutateType.DELETE) { - return toDelete(proto); - } - if (type == MutateType.PUT) { - return toPut(proto); - } - throw new IOException("Not an understood mutate type " + type); - } - - /** - * Convert a protocol buffer Mutate to an Increment - * - * @param proto the protocol buffer Mutate to convert - * @return the converted client Increment - * @throws IOException - */ - public static Increment toIncrement( - final Mutate proto) throws IOException { - MutateType type = proto.getMutateType(); - assert type == MutateType.INCREMENT : type.name(); - RowLock lock = null; - if (proto.hasLockId()) { - lock = new RowLock(proto.getLockId()); - } - byte[] row = proto.getRow().toByteArray(); - Increment increment = new Increment(row, lock); - increment.setWriteToWAL(proto.getWriteToWAL()); - if (proto.hasTimeRange()) { - HBaseProtos.TimeRange timeRange = proto.getTimeRange(); - long minStamp = 0; - long maxStamp = Long.MAX_VALUE; - if (timeRange.hasFrom()) { - minStamp = timeRange.getFrom(); - } - if (timeRange.hasTo()) { - maxStamp = timeRange.getTo(); - } - increment.setTimeRange(minStamp, maxStamp); - } - for (ColumnValue column: proto.getColumnValueList()) { - byte[] family = column.getFamily().toByteArray(); - for (QualifierValue qv: column.getQualifierValueList()) { - byte[] qualifier = qv.getQualifier().toByteArray(); - if (!qv.hasValue()) { - throw new DoNotRetryIOException( - "Missing required field: qualifer value"); - } - long value = Bytes.toLong(qv.getValue().toByteArray()); - increment.addColumn(family, qualifier, value); - } - } - return increment; - } - - /** - * Convert a client Scan to a protocol buffer Scan - * - * @param scan the client Scan to convert - * @return the converted protocol buffer Scan - * @throws IOException - */ - public static ClientProtos.Scan toScan( - final Scan scan) throws IOException { - ClientProtos.Scan.Builder scanBuilder = - ClientProtos.Scan.newBuilder(); - scanBuilder.setCacheBlocks(scan.getCacheBlocks()); - if (scan.getBatch() > 0) { - scanBuilder.setBatchSize(scan.getBatch()); - } - if (scan.getMaxResultSize() > 0) { - scanBuilder.setMaxResultSize(scan.getMaxResultSize()); - } - scanBuilder.setMaxVersions(scan.getMaxVersions()); - TimeRange timeRange = scan.getTimeRange(); - if (!timeRange.isAllTime()) { - HBaseProtos.TimeRange.Builder timeRangeBuilder = - HBaseProtos.TimeRange.newBuilder(); - timeRangeBuilder.setFrom(timeRange.getMin()); - timeRangeBuilder.setTo(timeRange.getMax()); - scanBuilder.setTimeRange(timeRangeBuilder.build()); - } - Map attributes = scan.getAttributesMap(); - if (!attributes.isEmpty()) { - NameBytesPair.Builder attributeBuilder = NameBytesPair.newBuilder(); - for (Map.Entry attribute: attributes.entrySet()) { - attributeBuilder.setName(attribute.getKey()); - attributeBuilder.setValue(ByteString.copyFrom(attribute.getValue())); - scanBuilder.addAttribute(attributeBuilder.build()); - } - } - byte[] startRow = scan.getStartRow(); - if (startRow != null && startRow.length > 0) { - scanBuilder.setStartRow(ByteString.copyFrom(startRow)); - } - byte[] stopRow = scan.getStopRow(); - if (stopRow != null && stopRow.length > 0) { - scanBuilder.setStopRow(ByteString.copyFrom(stopRow)); - } - if (scan.hasFilter()) { - scanBuilder.setFilter(ProtobufUtil.toFilter(scan.getFilter())); - } - if (scan.hasFamilies()) { - Column.Builder columnBuilder = Column.newBuilder(); - for (Map.Entry> - family: scan.getFamilyMap().entrySet()) { - columnBuilder.setFamily(ByteString.copyFrom(family.getKey())); - NavigableSet qualifiers = family.getValue(); - columnBuilder.clearQualifier(); - if (qualifiers != null && qualifiers.size() > 0) { - for (byte [] qualifier: qualifiers) { - columnBuilder.addQualifier(ByteString.copyFrom(qualifier)); - } - } - scanBuilder.addColumn(columnBuilder.build()); - } - } - if (scan.getMaxResultsPerColumnFamily() >= 0) { - scanBuilder.setStoreLimit(scan.getMaxResultsPerColumnFamily()); - } - if (scan.getRowOffsetPerColumnFamily() > 0) { - scanBuilder.setStoreOffset(scan.getRowOffsetPerColumnFamily()); - } - return scanBuilder.build(); - } - - /** - * Convert a protocol buffer Scan to a client Scan - * - * @param proto the protocol buffer Scan to convert - * @return the converted client Scan - * @throws IOException - */ - public static Scan toScan( - final ClientProtos.Scan proto) throws IOException { - byte [] startRow = HConstants.EMPTY_START_ROW; - byte [] stopRow = HConstants.EMPTY_END_ROW; - if (proto.hasStartRow()) { - startRow = proto.getStartRow().toByteArray(); - } - if (proto.hasStopRow()) { - stopRow = proto.getStopRow().toByteArray(); - } - Scan scan = new Scan(startRow, stopRow); - if (proto.hasCacheBlocks()) { - scan.setCacheBlocks(proto.getCacheBlocks()); - } - if (proto.hasMaxVersions()) { - scan.setMaxVersions(proto.getMaxVersions()); - } - if (proto.hasStoreLimit()) { - scan.setMaxResultsPerColumnFamily(proto.getStoreLimit()); - } - if (proto.hasStoreOffset()) { - scan.setRowOffsetPerColumnFamily(proto.getStoreOffset()); - } - if (proto.hasTimeRange()) { - HBaseProtos.TimeRange timeRange = proto.getTimeRange(); - long minStamp = 0; - long maxStamp = Long.MAX_VALUE; - if (timeRange.hasFrom()) { - minStamp = timeRange.getFrom(); - } - if (timeRange.hasTo()) { - maxStamp = timeRange.getTo(); - } - scan.setTimeRange(minStamp, maxStamp); - } - if (proto.hasFilter()) { - HBaseProtos.Filter filter = proto.getFilter(); - scan.setFilter(ProtobufUtil.toFilter(filter)); - } - if (proto.hasBatchSize()) { - scan.setBatch(proto.getBatchSize()); - } - if (proto.hasMaxResultSize()) { - scan.setMaxResultSize(proto.getMaxResultSize()); - } - for (NameBytesPair attribute: proto.getAttributeList()) { - scan.setAttribute(attribute.getName(), attribute.getValue().toByteArray()); - } - if (proto.getColumnCount() > 0) { - for (Column column: proto.getColumnList()) { - byte[] family = column.getFamily().toByteArray(); - if (column.getQualifierCount() > 0) { - for (ByteString qualifier: column.getQualifierList()) { - scan.addColumn(family, qualifier.toByteArray()); - } - } else { - scan.addFamily(family); - } - } - } - return scan; - } - - /** - * Create a protocol buffer Get based on a client Get. - * - * @param get the client Get - * @return a protocol buffer Get - * @throws IOException - */ - public static ClientProtos.Get toGet( - final Get get) throws IOException { - ClientProtos.Get.Builder builder = - ClientProtos.Get.newBuilder(); - builder.setRow(ByteString.copyFrom(get.getRow())); - builder.setCacheBlocks(get.getCacheBlocks()); - builder.setMaxVersions(get.getMaxVersions()); - if (get.getLockId() >= 0) { - builder.setLockId(get.getLockId()); - } - if (get.getFilter() != null) { - builder.setFilter(ProtobufUtil.toFilter(get.getFilter())); - } - TimeRange timeRange = get.getTimeRange(); - if (!timeRange.isAllTime()) { - HBaseProtos.TimeRange.Builder timeRangeBuilder = - HBaseProtos.TimeRange.newBuilder(); - timeRangeBuilder.setFrom(timeRange.getMin()); - timeRangeBuilder.setTo(timeRange.getMax()); - builder.setTimeRange(timeRangeBuilder.build()); - } - Map attributes = get.getAttributesMap(); - if (!attributes.isEmpty()) { - NameBytesPair.Builder attributeBuilder = NameBytesPair.newBuilder(); - for (Map.Entry attribute: attributes.entrySet()) { - attributeBuilder.setName(attribute.getKey()); - attributeBuilder.setValue(ByteString.copyFrom(attribute.getValue())); - builder.addAttribute(attributeBuilder.build()); - } - } - if (get.hasFamilies()) { - Column.Builder columnBuilder = Column.newBuilder(); - Map> families = get.getFamilyMap(); - for (Map.Entry> family: families.entrySet()) { - NavigableSet qualifiers = family.getValue(); - columnBuilder.setFamily(ByteString.copyFrom(family.getKey())); - columnBuilder.clearQualifier(); - if (qualifiers != null && qualifiers.size() > 0) { - for (byte[] qualifier: qualifiers) { - columnBuilder.addQualifier(ByteString.copyFrom(qualifier)); - } - } - builder.addColumn(columnBuilder.build()); - } - } - if (get.getMaxResultsPerColumnFamily() >= 0) { - builder.setStoreLimit(get.getMaxResultsPerColumnFamily()); - } - if (get.getRowOffsetPerColumnFamily() > 0) { - builder.setStoreOffset(get.getRowOffsetPerColumnFamily()); - } - return builder.build(); - } - - /** - * Convert a client Increment to a protobuf Mutate. - * - * @param increment - * @return the converted mutate - */ - public static Mutate toMutate(final Increment increment) { - Mutate.Builder builder = Mutate.newBuilder(); - builder.setRow(ByteString.copyFrom(increment.getRow())); - builder.setMutateType(MutateType.INCREMENT); - builder.setWriteToWAL(increment.getWriteToWAL()); - if (increment.getLockId() >= 0) { - builder.setLockId(increment.getLockId()); - } - TimeRange timeRange = increment.getTimeRange(); - if (!timeRange.isAllTime()) { - HBaseProtos.TimeRange.Builder timeRangeBuilder = - HBaseProtos.TimeRange.newBuilder(); - timeRangeBuilder.setFrom(timeRange.getMin()); - timeRangeBuilder.setTo(timeRange.getMax()); - builder.setTimeRange(timeRangeBuilder.build()); - } - ColumnValue.Builder columnBuilder = ColumnValue.newBuilder(); - QualifierValue.Builder valueBuilder = QualifierValue.newBuilder(); - for (Map.Entry> - family: increment.getFamilyMap().entrySet()) { - columnBuilder.setFamily(ByteString.copyFrom(family.getKey())); - columnBuilder.clearQualifierValue(); - NavigableMap values = family.getValue(); - if (values != null && values.size() > 0) { - for (Map.Entry value: values.entrySet()) { - valueBuilder.setQualifier(ByteString.copyFrom(value.getKey())); - valueBuilder.setValue(ByteString.copyFrom( - Bytes.toBytes(value.getValue().longValue()))); - columnBuilder.addQualifierValue(valueBuilder.build()); - } - } - builder.addColumnValue(columnBuilder.build()); - } - return builder.build(); - } - - /** - * Create a protocol buffer Mutate based on a client Mutation - * - * @param mutateType - * @param mutation - * @return a mutate - * @throws IOException - */ - public static Mutate toMutate(final MutateType mutateType, - final Mutation mutation) throws IOException { - Mutate.Builder mutateBuilder = Mutate.newBuilder(); - mutateBuilder.setRow(ByteString.copyFrom(mutation.getRow())); - mutateBuilder.setMutateType(mutateType); - mutateBuilder.setWriteToWAL(mutation.getWriteToWAL()); - if (mutation.getLockId() >= 0) { - mutateBuilder.setLockId(mutation.getLockId()); - } - mutateBuilder.setTimestamp(mutation.getTimeStamp()); - Map attributes = mutation.getAttributesMap(); - if (!attributes.isEmpty()) { - NameBytesPair.Builder attributeBuilder = NameBytesPair.newBuilder(); - for (Map.Entry attribute: attributes.entrySet()) { - attributeBuilder.setName(attribute.getKey()); - attributeBuilder.setValue(ByteString.copyFrom(attribute.getValue())); - mutateBuilder.addAttribute(attributeBuilder.build()); - } - } - ColumnValue.Builder columnBuilder = ColumnValue.newBuilder(); - QualifierValue.Builder valueBuilder = QualifierValue.newBuilder(); - for (Map.Entry> - family: mutation.getFamilyMap().entrySet()) { - columnBuilder.setFamily(ByteString.copyFrom(family.getKey())); - columnBuilder.clearQualifierValue(); - for (KeyValue value: family.getValue()) { - valueBuilder.setQualifier(ByteString.copyFrom(value.getQualifier())); - valueBuilder.setValue(ByteString.copyFrom(value.getValue())); - valueBuilder.setTimestamp(value.getTimestamp()); - if (mutateType == MutateType.DELETE) { - KeyValue.Type keyValueType = KeyValue.Type.codeToType(value.getType()); - valueBuilder.setDeleteType(toDeleteType(keyValueType)); - } - columnBuilder.addQualifierValue(valueBuilder.build()); - } - mutateBuilder.addColumnValue(columnBuilder.build()); - } - return mutateBuilder.build(); - } - - /** - * Convert a client Result to a protocol buffer Result - * - * @param result the client Result to convert - * @return the converted protocol buffer Result - */ - public static ClientProtos.Result toResult(final Result result) { - ClientProtos.Result.Builder builder = ClientProtos.Result.newBuilder(); - Cell [] cells = result.raw(); - if (cells != null) { - for (Cell c : cells) { - builder.addKeyValue(toKeyValue(c)); - } - } - return builder.build(); - } - - /** - * Convert a protocol buffer Result to a client Result - * - * @param proto the protocol buffer Result to convert - * @return the converted client Result - */ - public static Result toResult(final ClientProtos.Result proto) { - List values = proto.getKeyValueList(); - List keyValues = new ArrayList(values.size()); - for (HBaseProtos.KeyValue kv: values) { - keyValues.add(toKeyValue(kv)); - } - return new Result(keyValues); - } - - /** - * Convert a ByteArrayComparable to a protocol buffer Comparator - * - * @param comparator the ByteArrayComparable to convert - * @return the converted protocol buffer Comparator - */ - public static ComparatorProtos.Comparator toComparator(ByteArrayComparable comparator) { - ComparatorProtos.Comparator.Builder builder = ComparatorProtos.Comparator.newBuilder(); - builder.setName(comparator.getClass().getName()); - builder.setSerializedComparator(ByteString.copyFrom(comparator.toByteArray())); - return builder.build(); - } - - /** - * Convert a protocol buffer Comparator to a ByteArrayComparable - * - * @param proto the protocol buffer Comparator to convert - * @return the converted ByteArrayComparable - */ - @SuppressWarnings("unchecked") - public static ByteArrayComparable toComparator(ComparatorProtos.Comparator proto) - throws IOException { - String type = proto.getName(); - String funcName = "parseFrom"; - byte [] value = proto.getSerializedComparator().toByteArray(); - try { - Class c = - (Class)(Class.forName(type)); - Method parseFrom = c.getMethod(funcName, byte[].class); - if (parseFrom == null) { - throw new IOException("Unable to locate function: " + funcName + " in type: " + type); - } - return (ByteArrayComparable)parseFrom.invoke(null, value); - } catch (Exception e) { - throw new IOException(e); - } - } - - /** - * Convert a protocol buffer Filter to a client Filter - * - * @param proto the protocol buffer Filter to convert - * @return the converted Filter - */ - @SuppressWarnings("unchecked") - public static Filter toFilter(HBaseProtos.Filter proto) throws IOException { - String type = proto.getName(); - final byte [] value = proto.getSerializedFilter().toByteArray(); - String funcName = "parseFrom"; - try { - Class c = - (Class)Class.forName(type); - Method parseFrom = c.getMethod(funcName, byte[].class); - if (parseFrom == null) { - throw new IOException("Unable to locate function: " + funcName + " in type: " + type); - } - return (Filter)parseFrom.invoke(c, value); - } catch (Exception e) { - throw new IOException(e); - } - } - - /** - * Convert a client Filter to a protocol buffer Filter - * - * @param filter the Filter to convert - * @return the converted protocol buffer Filter - */ - public static HBaseProtos.Filter toFilter(Filter filter) { - HBaseProtos.Filter.Builder builder = HBaseProtos.Filter.newBuilder(); - builder.setName(filter.getClass().getName()); - builder.setSerializedFilter(ByteString.copyFrom(filter.toByteArray())); - return builder.build(); - } - - /** - * Get the HLog entries from a list of protocol buffer WALEntry - * - * @param protoList the list of protocol buffer WALEntry - * @return an array of HLog entries - */ - public static HLog.Entry[] - toHLogEntries(final List protoList) { - List entries = new ArrayList(); - for (WALEntry entry: protoList) { - WALKey walKey = entry.getKey(); - java.util.UUID clusterId = HConstants.DEFAULT_CLUSTER_ID; - if (walKey.hasClusterId()) { - UUID protoUuid = walKey.getClusterId(); - clusterId = new java.util.UUID( - protoUuid.getMostSigBits(), protoUuid.getLeastSigBits()); - } - HLogKey key = new HLogKey(walKey.getEncodedRegionName().toByteArray(), - walKey.getTableName().toByteArray(), walKey.getLogSequenceNumber(), - walKey.getWriteTime(), clusterId); - WALEntry.WALEdit walEdit = entry.getEdit(); - WALEdit edit = new WALEdit(); - for (ByteString keyValue: walEdit.getKeyValueBytesList()) { - edit.add(new KeyValue(keyValue.toByteArray())); - } - if (walEdit.getFamilyScopeCount() > 0) { - TreeMap scopes = - new TreeMap(Bytes.BYTES_COMPARATOR); - for (FamilyScope scope: walEdit.getFamilyScopeList()) { - scopes.put(scope.getFamily().toByteArray(), - Integer.valueOf(scope.getScopeType().ordinal())); - } - edit.setScopes(scopes); - } - entries.add(new HLog.Entry(key, edit)); - } - return entries.toArray(new HLog.Entry[entries.size()]); - } - - /** - * Convert a delete KeyValue type to protocol buffer DeleteType. - * - * @param type - * @return a DeleteType - * @throws IOException - */ - public static DeleteType toDeleteType( - KeyValue.Type type) throws IOException { - switch (type) { - case Delete: - return DeleteType.DELETE_ONE_VERSION; - case DeleteColumn: - return DeleteType.DELETE_MULTIPLE_VERSIONS; - case DeleteFamily: - return DeleteType.DELETE_FAMILY; - default: - throw new IOException("Unknown delete type: " + type); - } - } - - /** - * Convert a protocol buffer Parameter to a Java object - * - * @param parameter the protocol buffer Parameter to convert - * @return the converted Java object - * @throws IOException if failed to deserialize the parameter - */ - public static Object toObject( - final NameBytesPair parameter) throws IOException { - if (parameter == null || !parameter.hasValue()) return null; - byte[] bytes = parameter.getValue().toByteArray(); - ByteArrayInputStream bais = null; - try { - bais = new ByteArrayInputStream(bytes); - DataInput in = new DataInputStream(bais); - return HbaseObjectWritable.readObject(in, null); - } finally { - if (bais != null) { - bais.close(); - } - } - } - - /** - * Convert a stringified protocol buffer exception Parameter to a Java Exception - * - * @param parameter the protocol buffer Parameter to convert - * @return the converted Exception - * @throws IOException if failed to deserialize the parameter - */ - @SuppressWarnings("unchecked") - public static Throwable toException( - final NameBytesPair parameter) throws IOException { - if (parameter == null || !parameter.hasValue()) return null; - String desc = parameter.getValue().toStringUtf8(); - String type = parameter.getName(); - try { - Class c = - (Class)Class.forName(type); - Constructor cn = - c.getDeclaredConstructor(String.class); - return cn.newInstance(desc); - } catch (Exception e) { - throw new IOException(e); - } - } - - /** - * Serialize a Java Object into a Parameter. The Java Object should be a - * Writable or protocol buffer Message - * - * @param value the Writable/Message object to be serialized - * @return the converted protocol buffer Parameter - * @throws IOException if failed to serialize the object - */ - public static NameBytesPair toParameter( - final Object value) throws IOException { - Class declaredClass = Object.class; - if (value != null) { - declaredClass = value.getClass(); - } - return toParameter(declaredClass, value); - } - - /** - * Serialize a Java Object into a Parameter. The Java Object should be a - * Writable or protocol buffer Message - * - * @param declaredClass the declared class of the parameter - * @param value the Writable/Message object to be serialized - * @return the converted protocol buffer Parameter - * @throws IOException if failed to serialize the object - */ - public static NameBytesPair toParameter( - final Class declaredClass, final Object value) throws IOException { - NameBytesPair.Builder builder = NameBytesPair.newBuilder(); - builder.setName(declaredClass.getName()); - if (value != null) { - ByteArrayOutputStream baos = null; - try { - baos = new ByteArrayOutputStream(); - DataOutput out = new DataOutputStream(baos); - Class clz = declaredClass; - if (HbaseObjectWritable.getClassCode(declaredClass) == null) { - clz = value.getClass(); - } - HbaseObjectWritable.writeObject(out, value, clz, null); - builder.setValue( - ByteString.copyFrom(baos.toByteArray())); - } finally { - if (baos != null) { - baos.close(); - } - } - } - return builder.build(); - } - -// Start helpers for Client - - /** - * A helper to invoke a Get using client protocol. - * - * @param client - * @param regionName - * @param get - * @return the result of the Get - * @throws IOException - */ - public static Result get(final ClientProtocol client, - final byte[] regionName, final Get get) throws IOException { - GetRequest request = - RequestConverter.buildGetRequest(regionName, get); - try { - GetResponse response = client.get(null, request); - if (response == null) return null; - return toResult(response.getResult()); - } catch (ServiceException se) { - throw getRemoteException(se); - } - } - - /** - * A helper to get a row of the closet one before using client protocol. - * - * @param client - * @param regionName - * @param row - * @param family - * @return the row or the closestRowBefore if it doesn't exist - * @throws IOException - */ - public static Result getRowOrBefore(final ClientProtocol client, - final byte[] regionName, final byte[] row, - final byte[] family) throws IOException { - GetRequest request = - RequestConverter.buildGetRowOrBeforeRequest( - regionName, row, family); - try { - GetResponse response = client.get(null, request); - if (!response.hasResult()) return null; - return toResult(response.getResult()); - } catch (ServiceException se) { - throw getRemoteException(se); - } - } - - /** - * A helper to invoke a multi action using client protocol. - * - * @param client - * @param multi - * @return a multi response - * @throws IOException - */ - public static MultiResponse multi(final ClientProtocol client, - final MultiAction multi) throws IOException { - try { - MultiResponse response = new MultiResponse(); - for (Map.Entry>> e: multi.actions.entrySet()) { - byte[] regionName = e.getKey(); - int rowMutations = 0; - List> actions = e.getValue(); - for (Action action: actions) { - Row row = action.getAction(); - if (row instanceof RowMutations) { - MultiRequest request = - RequestConverter.buildMultiRequest(regionName, (RowMutations)row); - client.multi(null, request); - response.add(regionName, action.getOriginalIndex(), new Result()); - rowMutations++; - } - } - if (actions.size() > rowMutations) { - MultiRequest request = - RequestConverter.buildMultiRequest(regionName, actions); - ClientProtos.MultiResponse - proto = client.multi(null, request); - List results = ResponseConverter.getResults(proto); - for (int i = 0, n = results.size(); i < n; i++) { - int originalIndex = actions.get(i).getOriginalIndex(); - response.add(regionName, originalIndex, results.get(i)); - } - } - } - return response; - } catch (ServiceException se) { - throw getRemoteException(se); - } - } - - /** - * A helper to bulk load a list of HFiles using client protocol. - * - * @param client - * @param familyPaths - * @param regionName - * @param assignSeqNum - * @return true if all are loaded - * @throws IOException - */ - public static boolean bulkLoadHFile(final ClientProtocol client, - final List> familyPaths, - final byte[] regionName, boolean assignSeqNum) throws IOException { - BulkLoadHFileRequest request = - RequestConverter.buildBulkLoadHFileRequest(familyPaths, regionName, assignSeqNum); - try { - BulkLoadHFileResponse response = - client.bulkLoadHFile(null, request); - return response.getLoaded(); - } catch (ServiceException se) { - throw getRemoteException(se); - } - } - - public static CoprocessorServiceResponse execService(final ClientProtocol client, - final CoprocessorServiceCall call, final byte[] regionName) throws IOException { - CoprocessorServiceRequest request = CoprocessorServiceRequest.newBuilder() - .setCall(call).setRegion( - RequestConverter.buildRegionSpecifier(REGION_NAME, regionName)).build(); - try { - CoprocessorServiceResponse response = - client.execService(null, request); - return response; - } catch (ServiceException se) { - throw getRemoteException(se); - } - } - - public static CoprocessorServiceResponse execService(final MasterAdminProtocol client, - final CoprocessorServiceCall call) throws IOException { - CoprocessorServiceRequest request = CoprocessorServiceRequest.newBuilder() - .setCall(call).setRegion( - RequestConverter.buildRegionSpecifier(REGION_NAME, HConstants.EMPTY_BYTE_ARRAY)).build(); - try { - CoprocessorServiceResponse response = - client.execMasterService(null, request); - return response; - } catch (ServiceException se) { - throw getRemoteException(se); - } - } - - @SuppressWarnings("unchecked") - public static T newServiceStub(Class service, RpcChannel channel) - throws Exception { - return (T)Methods.call(service, null, "newStub", - new Class[]{ RpcChannel.class }, new Object[]{ channel }); - } - -// End helpers for Client -// Start helpers for Admin - - /** - * A helper to retrieve region info given a region name - * using admin protocol. - * - * @param admin - * @param regionName - * @return the retrieved region info - * @throws IOException - */ - public static HRegionInfo getRegionInfo(final AdminProtocol admin, - final byte[] regionName) throws IOException { - try { - GetRegionInfoRequest request = - RequestConverter.buildGetRegionInfoRequest(regionName); - GetRegionInfoResponse response = - admin.getRegionInfo(null, request); - return HRegionInfo.convert(response.getRegionInfo()); - } catch (ServiceException se) { - throw getRemoteException(se); - } - } - - /** - * A helper to close a region given a region name - * using admin protocol. - * - * @param admin - * @param regionName - * @param transitionInZK - * @throws IOException - */ - public static void closeRegion(final AdminProtocol admin, - final byte[] regionName, final boolean transitionInZK) throws IOException { - CloseRegionRequest closeRegionRequest = - RequestConverter.buildCloseRegionRequest(regionName, transitionInZK); - try { - admin.closeRegion(null, closeRegionRequest); - } catch (ServiceException se) { - throw getRemoteException(se); - } - } - - /** - * A helper to close a region given a region name - * using admin protocol. - * - * @param admin - * @param regionName - * @param versionOfClosingNode - * @return true if the region is closed - * @throws IOException - */ - public static boolean closeRegion(final AdminProtocol admin, final byte[] regionName, - final int versionOfClosingNode, final ServerName destinationServer, - final boolean transitionInZK) throws IOException { - CloseRegionRequest closeRegionRequest = - RequestConverter.buildCloseRegionRequest( - regionName, versionOfClosingNode, destinationServer, transitionInZK); - try { - CloseRegionResponse response = admin.closeRegion(null, closeRegionRequest); - return ResponseConverter.isClosed(response); - } catch (ServiceException se) { - throw getRemoteException(se); - } - } - - - /** - * A helper to open a region using admin protocol. - * @param admin - * @param region - * @throws IOException - */ - public static void openRegion(final AdminProtocol admin, - final HRegionInfo region) throws IOException { - OpenRegionRequest request = - RequestConverter.buildOpenRegionRequest(region, -1); - try { - admin.openRegion(null, request); - } catch (ServiceException se) { - throw ProtobufUtil.getRemoteException(se); - } - } - - /** - * A helper to get the all the online regions on a region - * server using admin protocol. - * - * @param admin - * @return a list of online region info - * @throws IOException - */ - public static List getOnlineRegions(final AdminProtocol admin) throws IOException { - GetOnlineRegionRequest request = RequestConverter.buildGetOnlineRegionRequest(); - GetOnlineRegionResponse response = null; - try { - response = admin.getOnlineRegion(null, request); - } catch (ServiceException se) { - throw getRemoteException(se); - } - return getRegionInfos(response); - } - - /** - * Get the list of region info from a GetOnlineRegionResponse - * - * @param proto the GetOnlineRegionResponse - * @return the list of region info or null if proto is null - */ - static List getRegionInfos(final GetOnlineRegionResponse proto) { - if (proto == null) return null; - List regionInfos = new ArrayList(); - for (RegionInfo regionInfo: proto.getRegionInfoList()) { - regionInfos.add(HRegionInfo.convert(regionInfo)); - } - return regionInfos; - } - - /** - * A helper to get the info of a region server using admin protocol. - * - * @param admin - * @return the server name - * @throws IOException - */ - public static ServerInfo getServerInfo( - final AdminProtocol admin) throws IOException { - GetServerInfoRequest request = RequestConverter.buildGetServerInfoRequest(); - try { - GetServerInfoResponse response = admin.getServerInfo(null, request); - return response.getServerInfo(); - } catch (ServiceException se) { - throw getRemoteException(se); - } - } - - /** - * A helper to replicate a list of HLog entries using admin protocol. - * - * @param admin - * @param entries - * @throws IOException - */ - public static void replicateWALEntry(final AdminProtocol admin, - final HLog.Entry[] entries) throws IOException { - ReplicateWALEntryRequest request = - RequestConverter.buildReplicateWALEntryRequest(entries); - try { - admin.replicateWALEntry(null, request); - } catch (ServiceException se) { - throw ProtobufUtil.getRemoteException(se); - } - } - - /** - * A helper to get the list of files of a column family - * on a given region using admin protocol. - * - * @param admin - * @param regionName - * @param family - * @return the list of store files - * @throws IOException - */ - public static List getStoreFiles(final AdminProtocol admin, - final byte[] regionName, final byte[] family) throws IOException { - GetStoreFileRequest request = - RequestConverter.buildGetStoreFileRequest(regionName, family); - try { - GetStoreFileResponse response = admin.getStoreFile(null, request); - return response.getStoreFileList(); - } catch (ServiceException se) { - throw ProtobufUtil.getRemoteException(se); - } - } - - /** - * A helper to split a region using admin protocol. - * - * @param admin - * @param hri - * @param splitPoint - * @throws IOException - */ - public static void split(final AdminProtocol admin, - final HRegionInfo hri, byte[] splitPoint) throws IOException { - SplitRegionRequest request = - RequestConverter.buildSplitRegionRequest(hri.getRegionName(), splitPoint); - try { - admin.splitRegion(null, request); - } catch (ServiceException se) { - throw ProtobufUtil.getRemoteException(se); - } - } - -// End helpers for Admin - - /* - * Get the total (read + write) requests from a RegionLoad pb - * @param rl - RegionLoad pb - * @return total (read + write) requests - */ - public static long getTotalRequestsCount(RegionLoad rl) { - if (rl == null) { - return 0; - } - - return rl.getReadRequestsCount() + rl.getWriteRequestsCount(); - } - - - /** - * @param m Message to get delimited pb serialization of (with pb magic prefix) - */ - public static byte [] toDelimitedByteArray(final Message m) throws IOException { - // Allocate arbitrary big size so we avoid resizing. - ByteArrayOutputStream baos = new ByteArrayOutputStream(4096); - m.writeDelimitedTo(baos); - baos.close(); - return ProtobufUtil.prependPBMagic(baos.toByteArray()); - } - - /** - * Converts a Permission proto to a client Permission object. - * - * @param proto the protobuf Permission - * @return the converted Permission - */ - public static Permission toPermission(AccessControlProtos.Permission proto) { - if (proto.hasTable()) { - return toTablePermission(proto); - } else { - List actions = toPermissionActions(proto.getActionList()); - return new Permission(actions.toArray(new Permission.Action[actions.size()])); - } - } - - /** - * Converts a Permission proto to a client TablePermission object. - * - * @param proto the protobuf Permission - * @return the converted TablePermission - */ - public static TablePermission toTablePermission(AccessControlProtos.Permission proto) { - List actions = toPermissionActions(proto.getActionList()); - - byte[] qualifier = null; - byte[] family = null; - byte[] table = null; - - if (proto.hasTable()) table = proto.getTable().toByteArray(); - if (proto.hasFamily()) family = proto.getFamily().toByteArray(); - if (proto.hasQualifier()) qualifier = proto.getQualifier().toByteArray(); - - return new TablePermission(table, family, qualifier, - actions.toArray(new Permission.Action[actions.size()])); - } - - /** - * Convert a client Permission to a Permission proto - * - * @param perm the client Permission - * @return the protobuf Permission - */ - public static AccessControlProtos.Permission toPermission(Permission perm) { - AccessControlProtos.Permission.Builder builder = AccessControlProtos.Permission.newBuilder(); - if (perm instanceof TablePermission) { - TablePermission tablePerm = (TablePermission)perm; - if (tablePerm.hasTable()) { - builder.setTable(ByteString.copyFrom(tablePerm.getTable())); - } - if (tablePerm.hasFamily()) { - builder.setFamily(ByteString.copyFrom(tablePerm.getFamily())); - } - if (tablePerm.hasQualifier()) { - builder.setQualifier(ByteString.copyFrom(tablePerm.getQualifier())); - } - } - for (Permission.Action a : perm.getActions()) { - builder.addAction(toPermissionAction(a)); - } - return builder.build(); - } - - /** - * Converts a list of Permission.Action proto to a list of client Permission.Action objects. - * - * @param protoActions the list of protobuf Actions - * @return the converted list of Actions - */ - public static List toPermissionActions( - List protoActions) { - List actions = new ArrayList(protoActions.size()); - for (AccessControlProtos.Permission.Action a : protoActions) { - actions.add(toPermissionAction(a)); - } - return actions; - } - - /** - * Converts a Permission.Action proto to a client Permission.Action object. - * - * @param action the protobuf Action - * @return the converted Action - */ - public static Permission.Action toPermissionAction( - AccessControlProtos.Permission.Action action) { - switch (action) { - case READ: - return Permission.Action.READ; - case WRITE: - return Permission.Action.WRITE; - case EXEC: - return Permission.Action.EXEC; - case CREATE: - return Permission.Action.CREATE; - case ADMIN: - return Permission.Action.ADMIN; - } - throw new IllegalArgumentException("Unknown action value "+action.name()); - } - - /** - * Convert a client Permission.Action to a Permission.Action proto - * - * @param action the client Action - * @return the protobuf Action - */ - public static AccessControlProtos.Permission.Action toPermissionAction( - Permission.Action action) { - switch (action) { - case READ: - return AccessControlProtos.Permission.Action.READ; - case WRITE: - return AccessControlProtos.Permission.Action.WRITE; - case EXEC: - return AccessControlProtos.Permission.Action.EXEC; - case CREATE: - return AccessControlProtos.Permission.Action.CREATE; - case ADMIN: - return AccessControlProtos.Permission.Action.ADMIN; - } - throw new IllegalArgumentException("Unknown action value "+action.name()); - } - - /** - * Convert a client user permission to a user permission proto - * - * @param perm the client UserPermission - * @return the protobuf UserPermission - */ - public static AccessControlProtos.UserPermission toUserPermission(UserPermission perm) { - AccessControlProtos.Permission.Builder permissionBuilder = - AccessControlProtos.Permission.newBuilder(); - for (Permission.Action a : perm.getActions()) { - permissionBuilder.addAction(toPermissionAction(a)); - } - if (perm.hasTable()) { - permissionBuilder.setTable(ByteString.copyFrom(perm.getTable())); - } - if (perm.hasFamily()) { - permissionBuilder.setFamily(ByteString.copyFrom(perm.getFamily())); - } - if (perm.hasQualifier()) { - permissionBuilder.setQualifier(ByteString.copyFrom(perm.getQualifier())); - } - - return AccessControlProtos.UserPermission.newBuilder() - .setUser(ByteString.copyFrom(perm.getUser())) - .setPermission(permissionBuilder) - .build(); - } - - /** - * Converts a user permission proto to a client user permission object. - * - * @param proto the protobuf UserPermission - * @return the converted UserPermission - */ - public static UserPermission toUserPermission(AccessControlProtos.UserPermission proto) { - AccessControlProtos.Permission permission = proto.getPermission(); - List actions = toPermissionActions(permission.getActionList()); - - byte[] qualifier = null; - byte[] family = null; - byte[] table = null; - - if (permission.hasTable()) table = permission.getTable().toByteArray(); - if (permission.hasFamily()) family = permission.getFamily().toByteArray(); - if (permission.hasQualifier()) qualifier = permission.getQualifier().toByteArray(); - - return new UserPermission(proto.getUser().toByteArray(), - table, family, qualifier, - actions.toArray(new Permission.Action[actions.size()])); - } - - /** - * Convert a ListMultimap where key is username - * to a protobuf UserPermission - * - * @param perm the list of user and table permissions - * @return the protobuf UserTablePermissions - */ - public static AccessControlProtos.UserTablePermissions toUserTablePermissions( - ListMultimap perm) { - AccessControlProtos.UserTablePermissions.Builder builder = - AccessControlProtos.UserTablePermissions.newBuilder(); - for (Map.Entry> entry : perm.asMap().entrySet()) { - AccessControlProtos.UserTablePermissions.UserPermissions.Builder userPermBuilder = - AccessControlProtos.UserTablePermissions.UserPermissions.newBuilder(); - userPermBuilder.setUser(ByteString.copyFromUtf8(entry.getKey())); - for (TablePermission tablePerm: entry.getValue()) { - userPermBuilder.addPermissions(toPermission(tablePerm)); - } - builder.addPermissions(userPermBuilder.build()); - } - return builder.build(); - } - - /** - * A utility used to grant a user some permissions. The permissions will - * be global if table is not specified. Otherwise, they are for those - * table/column family/qualifier only. - *

          - * It's also called by the shell, in case you want to find references. - * - * @param protocol the AccessControlService protocol proxy - * @param userShortName the short name of the user to grant permissions - * @param t optional table name - * @param f optional column family - * @param q optional qualifier - * @param actions the permissions to be granted - * @throws ServiceException - */ - public static void grant(AccessControlService.BlockingInterface protocol, - String userShortName, byte[] t, byte[] f, byte[] q, - Permission.Action... actions) throws ServiceException { - List permActions = - Lists.newArrayListWithCapacity(actions.length); - for (Permission.Action a : actions) { - permActions.add(ProtobufUtil.toPermissionAction(a)); - } - AccessControlProtos.GrantRequest request = RequestConverter. - buildGrantRequest(userShortName, t, f, q, permActions.toArray( - new AccessControlProtos.Permission.Action[actions.length])); - protocol.grant(null, request); - } - - /** - * A utility used to revoke a user some permissions. The permissions will - * be global if table is not specified. Otherwise, they are for those - * table/column family/qualifier only. - *

          - * It's also called by the shell, in case you want to find references. - * - * @param protocol the AccessControlService protocol proxy - * @param userShortName the short name of the user to revoke permissions - * @param t optional table name - * @param f optional column family - * @param q optional qualifier - * @param actions the permissions to be revoked - * @throws ServiceException - */ - public static void revoke(AccessControlService.BlockingInterface protocol, - String userShortName, byte[] t, byte[] f, byte[] q, - Permission.Action... actions) throws ServiceException { - List permActions = - Lists.newArrayListWithCapacity(actions.length); - for (Permission.Action a : actions) { - permActions.add(ProtobufUtil.toPermissionAction(a)); - } - AccessControlProtos.RevokeRequest request = RequestConverter. - buildRevokeRequest(userShortName, t, f, q, permActions.toArray( - new AccessControlProtos.Permission.Action[actions.length])); - protocol.revoke(null, request); - } - - /** - * A utility used to get user permissions. - *

          - * It's also called by the shell, in case you want to find references. - * - * @param protocol the AccessControlService protocol proxy - * @param t optional table name - * @throws ServiceException - */ - public static List getUserPermissions( - AccessControlService.BlockingInterface protocol, - byte[] t) throws ServiceException { - AccessControlProtos.UserPermissionsRequest.Builder builder = - AccessControlProtos.UserPermissionsRequest.newBuilder(); - if (t != null) { - builder.setTable(ByteString.copyFrom(t)); - } - AccessControlProtos.UserPermissionsRequest request = builder.build(); - AccessControlProtos.UserPermissionsResponse response = - protocol.getUserPermissions(null, request); - List perms = new ArrayList(); - for (AccessControlProtos.UserPermission perm: response.getPermissionList()) { - perms.add(ProtobufUtil.toUserPermission(perm)); - } - return perms; - } - - /** - * Convert a protobuf UserTablePermissions to a - * ListMultimap where key is username. - * - * @param proto the protobuf UserPermission - * @return the converted UserPermission - */ - public static ListMultimap toUserTablePermissions( - AccessControlProtos.UserTablePermissions proto) { - ListMultimap perms = ArrayListMultimap.create(); - AccessControlProtos.UserTablePermissions.UserPermissions userPerm; - - for (int i = 0; i < proto.getPermissionsCount(); i++) { - userPerm = proto.getPermissions(i); - for (int j = 0; j < userPerm.getPermissionsCount(); j++) { - TablePermission tablePerm = toTablePermission(userPerm.getPermissions(j)); - perms.put(userPerm.getUser().toStringUtf8(), tablePerm); - } - } - - return perms; - } - - /** - * Converts a Token instance (with embedded identifier) to the protobuf representation. - * - * @param token the Token instance to copy - * @return the protobuf Token message - */ - public static AuthenticationProtos.Token toToken(Token token) { - AuthenticationProtos.Token.Builder builder = AuthenticationProtos.Token.newBuilder(); - builder.setIdentifier(ByteString.copyFrom(token.getIdentifier())); - builder.setPassword(ByteString.copyFrom(token.getPassword())); - if (token.getService() != null) { - builder.setService(ByteString.copyFromUtf8(token.getService().toString())); - } - return builder.build(); - } - - /** - * Converts a protobuf Token message back into a Token instance. - * - * @param proto the protobuf Token message - * @return the Token instance - */ - public static Token toToken(AuthenticationProtos.Token proto) { - return new Token( - proto.hasIdentifier() ? proto.getIdentifier().toByteArray() : null, - proto.hasPassword() ? proto.getPassword().toByteArray() : null, - AuthenticationTokenIdentifier.AUTH_TOKEN_TYPE, - proto.hasService() ? new Text(proto.getService().toStringUtf8()) : null); - } - - /** - * Find the HRegion encoded name based on a region specifier - * - * @param regionSpecifier the region specifier - * @return the corresponding region's encoded name - * @throws DoNotRetryIOException if the specifier type is unsupported - */ - public static String getRegionEncodedName( - final RegionSpecifier regionSpecifier) throws DoNotRetryIOException { - byte[] value = regionSpecifier.getValue().toByteArray(); - RegionSpecifierType type = regionSpecifier.getType(); - switch (type) { - case REGION_NAME: - return HRegionInfo.encodeRegionName(value); - case ENCODED_REGION_NAME: - return Bytes.toString(value); - default: - throw new DoNotRetryIOException( - "Unsupported region specifier type: " + type); - } - } - - public static ScanMetrics toScanMetrics(final byte[] bytes) { - MapReduceProtos.ScanMetrics.Builder builder = MapReduceProtos.ScanMetrics.newBuilder(); - try { - builder.mergeFrom(bytes); - } catch (InvalidProtocolBufferException e) { - //Ignored there are just no key values to add. - } - MapReduceProtos.ScanMetrics pScanMetrics = builder.build(); - ScanMetrics scanMetrics = new ScanMetrics(); - for (HBaseProtos.NameInt64Pair pair : pScanMetrics.getMetricsList()) { - if (pair.hasName() && pair.hasValue()) { - scanMetrics.setCounter(pair.getName(), pair.getValue()); - } - } - return scanMetrics; - } - - public static MapReduceProtos.ScanMetrics toScanMetrics(ScanMetrics scanMetrics) { - MapReduceProtos.ScanMetrics.Builder builder = MapReduceProtos.ScanMetrics.newBuilder(); - Map metrics = scanMetrics.getMetricsMap(); - for (Entry e : metrics.entrySet()) { - HBaseProtos.NameInt64Pair nameInt64Pair = - HBaseProtos.NameInt64Pair.newBuilder() - .setName(e.getKey()) - .setValue(e.getValue()) - .build(); - builder.addMetrics(nameInt64Pair); - } - return builder.build(); - } - - /** - * Unwraps an exception from a protobuf service into the underlying (expected) IOException. - * This method will always throw an exception. - * @param se the {@code ServiceException} instance to convert into an {@code IOException} - */ - public static void toIOException(ServiceException se) throws IOException { - if (se == null) { - throw new NullPointerException("Null service exception passed!"); - } - - Throwable cause = se.getCause(); - if (cause != null && cause instanceof IOException) { - throw (IOException)cause; - } - throw new IOException(se); - } - - public static HBaseProtos.KeyValue toKeyValue(final Cell kv) { - // Doing this is going to kill us if we do it for all data passed. - // St.Ack 20121205 - // TODO: Do a Cell version - HBaseProtos.KeyValue.Builder kvbuilder = HBaseProtos.KeyValue.newBuilder(); - kvbuilder.setRow(ByteString.copyFrom(kv.getRowArray(), kv.getRowOffset(), - kv.getRowLength())); - kvbuilder.setFamily(ByteString.copyFrom(kv.getFamilyArray(), - kv.getFamilyOffset(), kv.getFamilyLength())); - kvbuilder.setQualifier(ByteString.copyFrom(kv.getQualifierArray(), - kv.getQualifierOffset(), kv.getQualifierLength())); - kvbuilder.setKeyType(HBaseProtos.KeyType.valueOf(kv.getTypeByte())); - kvbuilder.setTimestamp(kv.getTimestamp()); - kvbuilder.setValue(ByteString.copyFrom(kv.getValueArray(), kv.getValueOffset(), kv.getValueLength())); - return kvbuilder.build(); - } - - public static KeyValue toKeyValue(final HBaseProtos.KeyValue kv) { - // Doing this is going to kill us if we do it for all data passed. - // St.Ack 20121205 - // TODO: Do a Cell version - return new KeyValue(kv.getRow().toByteArray(), - kv.getFamily().toByteArray(), - kv.getQualifier().toByteArray(), - kv.getTimestamp(), - KeyValue.Type.codeToType((byte)kv.getKeyType().getNumber()), - kv.getValue().toByteArray()); - } -} diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/protobuf/RequestConverter.java hbase-server/src/main/java/org/apache/hadoop/hbase/protobuf/RequestConverter.java deleted file mode 100644 index e332ab1..0000000 --- hbase-server/src/main/java/org/apache/hadoop/hbase/protobuf/RequestConverter.java +++ /dev/null @@ -1,1216 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hbase.protobuf; - -import java.io.IOException; -import java.util.List; -import java.util.Map; -import java.util.NavigableMap; -import java.util.UUID; - -import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.hbase.DeserializationException; -import org.apache.hadoop.hbase.DoNotRetryIOException; -import org.apache.hadoop.hbase.HColumnDescriptor; -import org.apache.hadoop.hbase.HRegionInfo; -import org.apache.hadoop.hbase.HTableDescriptor; -import org.apache.hadoop.hbase.KeyValue; -import org.apache.hadoop.hbase.ServerName; -import org.apache.hadoop.hbase.client.Action; -import org.apache.hadoop.hbase.client.Append; -import org.apache.hadoop.hbase.client.Delete; -import org.apache.hadoop.hbase.client.Get; -import org.apache.hadoop.hbase.client.Increment; -import org.apache.hadoop.hbase.client.Mutation; -import org.apache.hadoop.hbase.client.Put; -import org.apache.hadoop.hbase.client.Row; -import org.apache.hadoop.hbase.client.RowMutations; -import org.apache.hadoop.hbase.client.Scan; -import org.apache.hadoop.hbase.filter.ByteArrayComparable; -import org.apache.hadoop.hbase.protobuf.generated.AccessControlProtos; -import org.apache.hadoop.hbase.protobuf.generated.AdminProtos; -import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.CloseRegionRequest; -import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.CompactRegionRequest; -import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.FlushRegionRequest; -import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetOnlineRegionRequest; -import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetRegionInfoRequest; -import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetServerInfoRequest; -import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetStoreFileRequest; -import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.OpenRegionRequest; -import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.OpenRegionRequest.RegionOpenInfo; -import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.ReplicateWALEntryRequest; -import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.RollWALWriterRequest; -import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.SplitRegionRequest; -import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.StopServerRequest; -import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.WALEntry; -import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.WALEntry.WALEdit.FamilyScope; -import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.WALEntry.WALEdit.ScopeType; -import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.WALEntry.WALKey; -import org.apache.hadoop.hbase.protobuf.generated.ClientProtos; -import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.BulkLoadHFileRequest; -import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.BulkLoadHFileRequest.FamilyPath; -import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Column; -import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Condition; -import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.GetRequest; -import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.LockRowRequest; -import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MultiAction; -import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MultiRequest; -import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Mutate; -import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Mutate.ColumnValue; -import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Mutate.ColumnValue.QualifierValue; -import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Mutate.MutateType; -import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutateRequest; -import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ScanRequest; -import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.UnlockRowRequest; -import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.CompareType; -import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier; -import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.RegionSpecifierType; -import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.AddColumnRequest; -import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.AssignRegionRequest; -import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.BalanceRequest; -import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.CatalogScanRequest; -import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.CreateTableRequest; -import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DeleteColumnRequest; -import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DeleteTableRequest; -import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DisableTableRequest; -import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.EnableCatalogJanitorRequest; -import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.EnableTableRequest; -import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.IsCatalogJanitorEnabledRequest; -import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ModifyColumnRequest; -import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ModifyTableRequest; -import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.MoveRegionRequest; -import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.OfflineRegionRequest; -import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.SetBalancerRunningRequest; -import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.UnassignRegionRequest; -import org.apache.hadoop.hbase.protobuf.generated.MasterMonitorProtos.GetClusterStatusRequest; -import org.apache.hadoop.hbase.protobuf.generated.MasterMonitorProtos.GetSchemaAlterStatusRequest; -import org.apache.hadoop.hbase.protobuf.generated.MasterMonitorProtos.GetTableDescriptorsRequest; -import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningRequest; -import org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.GetLastFlushedSequenceIdRequest; -import org.apache.hadoop.hbase.regionserver.wal.HLog; -import org.apache.hadoop.hbase.regionserver.wal.HLogKey; -import org.apache.hadoop.hbase.regionserver.wal.WALEdit; -import org.apache.hadoop.hbase.util.Bytes; -import org.apache.hadoop.hbase.util.Pair; - -import com.google.protobuf.ByteString; - -/** - * Helper utility to build protocol buffer requests, - * or build components for protocol buffer requests. - */ -@InterfaceAudience.Private -public final class RequestConverter { - - private RequestConverter() { - } - -// Start utilities for Client - -/** - * Create a new protocol buffer GetRequest to get a row, all columns in a family. - * If there is no such row, return the closest row before it. - * - * @param regionName the name of the region to get - * @param row the row to get - * @param family the column family to get - * should return the immediate row before - * @return a protocol buffer GetReuqest - */ - public static GetRequest buildGetRowOrBeforeRequest( - final byte[] regionName, final byte[] row, final byte[] family) { - GetRequest.Builder builder = GetRequest.newBuilder(); - RegionSpecifier region = buildRegionSpecifier( - RegionSpecifierType.REGION_NAME, regionName); - builder.setClosestRowBefore(true); - builder.setRegion(region); - - Column.Builder columnBuilder = Column.newBuilder(); - columnBuilder.setFamily(ByteString.copyFrom(family)); - ClientProtos.Get.Builder getBuilder = - ClientProtos.Get.newBuilder(); - getBuilder.setRow(ByteString.copyFrom(row)); - getBuilder.addColumn(columnBuilder.build()); - builder.setGet(getBuilder.build()); - return builder.build(); - } - - /** - * Create a protocol buffer GetRequest for a client Get - * - * @param regionName the name of the region to get - * @param get the client Get - * @return a protocol buffer GetReuqest - */ - public static GetRequest buildGetRequest(final byte[] regionName, - final Get get) throws IOException { - return buildGetRequest(regionName, get, false); - } - - /** - * Create a protocol buffer GetRequest for a client Get - * - * @param regionName the name of the region to get - * @param get the client Get - * @param existenceOnly indicate if check row existence only - * @return a protocol buffer GetReuqest - */ - public static GetRequest buildGetRequest(final byte[] regionName, - final Get get, final boolean existenceOnly) throws IOException { - GetRequest.Builder builder = GetRequest.newBuilder(); - RegionSpecifier region = buildRegionSpecifier( - RegionSpecifierType.REGION_NAME, regionName); - builder.setExistenceOnly(existenceOnly); - builder.setRegion(region); - builder.setGet(ProtobufUtil.toGet(get)); - return builder.build(); - } - - /** - * Create a protocol buffer MutateRequest for a client increment - * - * @param regionName - * @param row - * @param family - * @param qualifier - * @param amount - * @param writeToWAL - * @return a mutate request - */ - public static MutateRequest buildMutateRequest( - final byte[] regionName, final byte[] row, final byte[] family, - final byte [] qualifier, final long amount, final boolean writeToWAL) { - MutateRequest.Builder builder = MutateRequest.newBuilder(); - RegionSpecifier region = buildRegionSpecifier( - RegionSpecifierType.REGION_NAME, regionName); - builder.setRegion(region); - - Mutate.Builder mutateBuilder = Mutate.newBuilder(); - mutateBuilder.setRow(ByteString.copyFrom(row)); - mutateBuilder.setMutateType(MutateType.INCREMENT); - mutateBuilder.setWriteToWAL(writeToWAL); - ColumnValue.Builder columnBuilder = ColumnValue.newBuilder(); - columnBuilder.setFamily(ByteString.copyFrom(family)); - QualifierValue.Builder valueBuilder = QualifierValue.newBuilder(); - valueBuilder.setValue(ByteString.copyFrom(Bytes.toBytes(amount))); - valueBuilder.setQualifier(ByteString.copyFrom(qualifier)); - columnBuilder.addQualifierValue(valueBuilder.build()); - mutateBuilder.addColumnValue(columnBuilder.build()); - - builder.setMutate(mutateBuilder.build()); - return builder.build(); - } - - /** - * Create a protocol buffer MutateRequest for a conditioned put - * - * @param regionName - * @param row - * @param family - * @param qualifier - * @param comparator - * @param compareType - * @param put - * @return a mutate request - * @throws IOException - */ - public static MutateRequest buildMutateRequest( - final byte[] regionName, final byte[] row, final byte[] family, - final byte [] qualifier, final ByteArrayComparable comparator, - final CompareType compareType, final Put put) throws IOException { - MutateRequest.Builder builder = MutateRequest.newBuilder(); - RegionSpecifier region = buildRegionSpecifier( - RegionSpecifierType.REGION_NAME, regionName); - builder.setRegion(region); - Condition condition = buildCondition( - row, family, qualifier, comparator, compareType); - builder.setMutate(ProtobufUtil.toMutate(MutateType.PUT, put)); - builder.setCondition(condition); - return builder.build(); - } - - /** - * Create a protocol buffer MutateRequest for a conditioned delete - * - * @param regionName - * @param row - * @param family - * @param qualifier - * @param comparator - * @param compareType - * @param delete - * @return a mutate request - * @throws IOException - */ - public static MutateRequest buildMutateRequest( - final byte[] regionName, final byte[] row, final byte[] family, - final byte [] qualifier, final ByteArrayComparable comparator, - final CompareType compareType, final Delete delete) throws IOException { - MutateRequest.Builder builder = MutateRequest.newBuilder(); - RegionSpecifier region = buildRegionSpecifier( - RegionSpecifierType.REGION_NAME, regionName); - builder.setRegion(region); - Condition condition = buildCondition( - row, family, qualifier, comparator, compareType); - builder.setMutate(ProtobufUtil.toMutate(MutateType.DELETE, delete)); - builder.setCondition(condition); - return builder.build(); - } - - /** - * Create a protocol buffer MutateRequest for a put - * - * @param regionName - * @param put - * @return a mutate request - * @throws IOException - */ - public static MutateRequest buildMutateRequest( - final byte[] regionName, final Put put) throws IOException { - MutateRequest.Builder builder = MutateRequest.newBuilder(); - RegionSpecifier region = buildRegionSpecifier( - RegionSpecifierType.REGION_NAME, regionName); - builder.setRegion(region); - builder.setMutate(ProtobufUtil.toMutate(MutateType.PUT, put)); - return builder.build(); - } - - /** - * Create a protocol buffer MutateRequest for an append - * - * @param regionName - * @param append - * @return a mutate request - * @throws IOException - */ - public static MutateRequest buildMutateRequest( - final byte[] regionName, final Append append) throws IOException { - MutateRequest.Builder builder = MutateRequest.newBuilder(); - RegionSpecifier region = buildRegionSpecifier( - RegionSpecifierType.REGION_NAME, regionName); - builder.setRegion(region); - builder.setMutate(ProtobufUtil.toMutate(MutateType.APPEND, append)); - return builder.build(); - } - - /** - * Create a protocol buffer MutateRequest for a client increment - * - * @param regionName - * @param increment - * @return a mutate request - */ - public static MutateRequest buildMutateRequest( - final byte[] regionName, final Increment increment) { - MutateRequest.Builder builder = MutateRequest.newBuilder(); - RegionSpecifier region = buildRegionSpecifier( - RegionSpecifierType.REGION_NAME, regionName); - builder.setRegion(region); - builder.setMutate(ProtobufUtil.toMutate(increment)); - return builder.build(); - } - - /** - * Create a protocol buffer MutateRequest for a delete - * - * @param regionName - * @param delete - * @return a mutate request - * @throws IOException - */ - public static MutateRequest buildMutateRequest( - final byte[] regionName, final Delete delete) throws IOException { - MutateRequest.Builder builder = MutateRequest.newBuilder(); - RegionSpecifier region = buildRegionSpecifier( - RegionSpecifierType.REGION_NAME, regionName); - builder.setRegion(region); - builder.setMutate(ProtobufUtil.toMutate(MutateType.DELETE, delete)); - return builder.build(); - } - - /** - * Create a protocol buffer MultiRequest for a row mutations - * - * @param regionName - * @param rowMutations - * @return a multi request - * @throws IOException - */ - public static MultiRequest buildMultiRequest(final byte[] regionName, - final RowMutations rowMutations) throws IOException { - MultiRequest.Builder builder = MultiRequest.newBuilder(); - RegionSpecifier region = buildRegionSpecifier( - RegionSpecifierType.REGION_NAME, regionName); - builder.setRegion(region); - builder.setAtomic(true); - for (Mutation mutation: rowMutations.getMutations()) { - MutateType mutateType = null; - if (mutation instanceof Put) { - mutateType = MutateType.PUT; - } else if (mutation instanceof Delete) { - mutateType = MutateType.DELETE; - } else { - throw new DoNotRetryIOException( - "RowMutations supports only put and delete, not " - + mutation.getClass().getName()); - } - Mutate mutate = ProtobufUtil.toMutate(mutateType, mutation); - builder.addAction(MultiAction.newBuilder().setMutate(mutate).build()); - } - return builder.build(); - } - - /** - * Create a protocol buffer ScanRequest for a client Scan - * - * @param regionName - * @param scan - * @param numberOfRows - * @param closeScanner - * @return a scan request - * @throws IOException - */ - public static ScanRequest buildScanRequest(final byte[] regionName, - final Scan scan, final int numberOfRows, - final boolean closeScanner) throws IOException { - ScanRequest.Builder builder = ScanRequest.newBuilder(); - RegionSpecifier region = buildRegionSpecifier( - RegionSpecifierType.REGION_NAME, regionName); - builder.setNumberOfRows(numberOfRows); - builder.setCloseScanner(closeScanner); - builder.setRegion(region); - builder.setScan(ProtobufUtil.toScan(scan)); - return builder.build(); - } - - /** - * Create a protocol buffer ScanRequest for a scanner id - * - * @param scannerId - * @param numberOfRows - * @param closeScanner - * @return a scan request - */ - public static ScanRequest buildScanRequest(final long scannerId, - final int numberOfRows, final boolean closeScanner) { - ScanRequest.Builder builder = ScanRequest.newBuilder(); - builder.setNumberOfRows(numberOfRows); - builder.setCloseScanner(closeScanner); - builder.setScannerId(scannerId); - return builder.build(); - } - - /** - * Create a protocol buffer ScanRequest for a scanner id - * - * @param scannerId - * @param numberOfRows - * @param closeScanner - * @param nextCallSeq - * @return a scan request - */ - public static ScanRequest buildScanRequest(final long scannerId, final int numberOfRows, - final boolean closeScanner, final long nextCallSeq) { - ScanRequest.Builder builder = ScanRequest.newBuilder(); - builder.setNumberOfRows(numberOfRows); - builder.setCloseScanner(closeScanner); - builder.setScannerId(scannerId); - builder.setNextCallSeq(nextCallSeq); - return builder.build(); - } - - /** - * Create a protocol buffer LockRowRequest - * - * @param regionName - * @param row - * @return a lock row request - */ - public static LockRowRequest buildLockRowRequest( - final byte[] regionName, final byte[] row) { - LockRowRequest.Builder builder = LockRowRequest.newBuilder(); - RegionSpecifier region = buildRegionSpecifier( - RegionSpecifierType.REGION_NAME, regionName); - builder.setRegion(region); - builder.addRow(ByteString.copyFrom(row)); - return builder.build(); - } - - /** - * Create a protocol buffer UnlockRowRequest - * - * @param regionName - * @param lockId - * @return a unlock row request - */ - public static UnlockRowRequest buildUnlockRowRequest( - final byte[] regionName, final long lockId) { - UnlockRowRequest.Builder builder = UnlockRowRequest.newBuilder(); - RegionSpecifier region = buildRegionSpecifier( - RegionSpecifierType.REGION_NAME, regionName); - builder.setRegion(region); - builder.setLockId(lockId); - return builder.build(); - } - - /** - * Create a protocol buffer bulk load request - * - * @param familyPaths - * @param regionName - * @param assignSeqNum - * @return a bulk load request - */ - public static BulkLoadHFileRequest buildBulkLoadHFileRequest( - final List> familyPaths, - final byte[] regionName, boolean assignSeqNum) { - BulkLoadHFileRequest.Builder builder = BulkLoadHFileRequest.newBuilder(); - RegionSpecifier region = buildRegionSpecifier( - RegionSpecifierType.REGION_NAME, regionName); - builder.setRegion(region); - FamilyPath.Builder familyPathBuilder = FamilyPath.newBuilder(); - for (Pair familyPath: familyPaths) { - familyPathBuilder.setFamily(ByteString.copyFrom(familyPath.getFirst())); - familyPathBuilder.setPath(familyPath.getSecond()); - builder.addFamilyPath(familyPathBuilder.build()); - } - builder.setAssignSeqNum(assignSeqNum); - return builder.build(); - } - - /** - * Create a protocol buffer multi request for a list of actions. - * RowMutations in the list (if any) will be ignored. - * - * @param regionName - * @param actions - * @return a multi request - * @throws IOException - */ - public static MultiRequest buildMultiRequest(final byte[] regionName, - final List> actions) throws IOException { - MultiRequest.Builder builder = MultiRequest.newBuilder(); - RegionSpecifier region = buildRegionSpecifier( - RegionSpecifierType.REGION_NAME, regionName); - builder.setRegion(region); - for (Action action: actions) { - MultiAction.Builder protoAction = MultiAction.newBuilder(); - - Row row = action.getAction(); - if (row instanceof Get) { - protoAction.setGet(ProtobufUtil.toGet((Get)row)); - } else if (row instanceof Put) { - protoAction.setMutate(ProtobufUtil.toMutate(MutateType.PUT, (Put)row)); - } else if (row instanceof Delete) { - protoAction.setMutate(ProtobufUtil.toMutate(MutateType.DELETE, (Delete)row)); - } else if (row instanceof Append) { - protoAction.setMutate(ProtobufUtil.toMutate(MutateType.APPEND, (Append)row)); - } else if (row instanceof Increment) { - protoAction.setMutate(ProtobufUtil.toMutate((Increment)row)); - } else if (row instanceof RowMutations) { - continue; // ignore RowMutations - } else { - throw new DoNotRetryIOException( - "multi doesn't support " + row.getClass().getName()); - } - builder.addAction(protoAction.build()); - } - return builder.build(); - } - -// End utilities for Client -//Start utilities for Admin - - /** - * Create a protocol buffer GetRegionInfoRequest for a given region name - * - * @param regionName the name of the region to get info - * @return a protocol buffer GetRegionInfoRequest - */ - public static GetRegionInfoRequest - buildGetRegionInfoRequest(final byte[] regionName) { - return buildGetRegionInfoRequest(regionName, false); - } - - /** - * Create a protocol buffer GetRegionInfoRequest for a given region name - * - * @param regionName the name of the region to get info - * @param includeCompactionState indicate if the compaction state is requested - * @return a protocol buffer GetRegionInfoRequest - */ - public static GetRegionInfoRequest - buildGetRegionInfoRequest(final byte[] regionName, - final boolean includeCompactionState) { - GetRegionInfoRequest.Builder builder = GetRegionInfoRequest.newBuilder(); - RegionSpecifier region = buildRegionSpecifier( - RegionSpecifierType.REGION_NAME, regionName); - builder.setRegion(region); - if (includeCompactionState) { - builder.setCompactionState(includeCompactionState); - } - return builder.build(); - } - - /** - * Create a protocol buffer GetStoreFileRequest for a given region name - * - * @param regionName the name of the region to get info - * @param family the family to get store file list - * @return a protocol buffer GetStoreFileRequest - */ - public static GetStoreFileRequest - buildGetStoreFileRequest(final byte[] regionName, final byte[] family) { - GetStoreFileRequest.Builder builder = GetStoreFileRequest.newBuilder(); - RegionSpecifier region = buildRegionSpecifier( - RegionSpecifierType.REGION_NAME, regionName); - builder.setRegion(region); - builder.addFamily(ByteString.copyFrom(family)); - return builder.build(); - } - - /** - * Create a protocol buffer GetOnlineRegionRequest - * - * @return a protocol buffer GetOnlineRegionRequest - */ - public static GetOnlineRegionRequest buildGetOnlineRegionRequest() { - return GetOnlineRegionRequest.newBuilder().build(); - } - - /** - * Create a protocol buffer FlushRegionRequest for a given region name - * - * @param regionName the name of the region to get info - * @return a protocol buffer FlushRegionRequest - */ - public static FlushRegionRequest - buildFlushRegionRequest(final byte[] regionName) { - FlushRegionRequest.Builder builder = FlushRegionRequest.newBuilder(); - RegionSpecifier region = buildRegionSpecifier( - RegionSpecifierType.REGION_NAME, regionName); - builder.setRegion(region); - return builder.build(); - } - - /** - * Create a protocol buffer OpenRegionRequest to open a list of regions - * - * @param regionOpenInfos info of a list of regions to open - * @return a protocol buffer OpenRegionRequest - */ - public static OpenRegionRequest - buildOpenRegionRequest(final List> regionOpenInfos) { - OpenRegionRequest.Builder builder = OpenRegionRequest.newBuilder(); - for (Pair regionOpenInfo: regionOpenInfos) { - Integer second = regionOpenInfo.getSecond(); - int versionOfOfflineNode = second == null ? -1 : second.intValue(); - builder.addOpenInfo(buildRegionOpenInfo( - regionOpenInfo.getFirst(), versionOfOfflineNode)); - } - return builder.build(); - } - - /** - * Create a protocol buffer OpenRegionRequest for a given region - * - * @param region the region to open - * @param versionOfOfflineNode that needs to be present in the offline node - * @return a protocol buffer OpenRegionRequest - */ - public static OpenRegionRequest buildOpenRegionRequest( - final HRegionInfo region, final int versionOfOfflineNode) { - OpenRegionRequest.Builder builder = OpenRegionRequest.newBuilder(); - builder.addOpenInfo(buildRegionOpenInfo(region, versionOfOfflineNode)); - return builder.build(); - } - - /** - * Create a CloseRegionRequest for a given region name - * - * @param regionName the name of the region to close - * @param transitionInZK indicator if to transition in ZK - * @return a CloseRegionRequest - */ - public static CloseRegionRequest buildCloseRegionRequest( - final byte[] regionName, final boolean transitionInZK) { - CloseRegionRequest.Builder builder = CloseRegionRequest.newBuilder(); - RegionSpecifier region = buildRegionSpecifier( - RegionSpecifierType.REGION_NAME, regionName); - builder.setRegion(region); - builder.setTransitionInZK(transitionInZK); - return builder.build(); - } - - public static CloseRegionRequest buildCloseRegionRequest( - final byte[] regionName, final int versionOfClosingNode, - ServerName destinationServer, final boolean transitionInZK) { - CloseRegionRequest.Builder builder = CloseRegionRequest.newBuilder(); - RegionSpecifier region = buildRegionSpecifier( - RegionSpecifierType.REGION_NAME, regionName); - builder.setRegion(region); - builder.setVersionOfClosingNode(versionOfClosingNode); - builder.setTransitionInZK(transitionInZK); - if (destinationServer != null){ - builder.setDestinationServer(ProtobufUtil.toServerName( destinationServer) ); - } - return builder.build(); - } - - /** - * Create a CloseRegionRequest for a given encoded region name - * - * @param encodedRegionName the name of the region to close - * @param transitionInZK indicator if to transition in ZK - * @return a CloseRegionRequest - */ - public static CloseRegionRequest - buildCloseRegionRequest(final String encodedRegionName, - final boolean transitionInZK) { - CloseRegionRequest.Builder builder = CloseRegionRequest.newBuilder(); - RegionSpecifier region = buildRegionSpecifier( - RegionSpecifierType.ENCODED_REGION_NAME, - Bytes.toBytes(encodedRegionName)); - builder.setRegion(region); - builder.setTransitionInZK(transitionInZK); - return builder.build(); - } - - /** - * Create a SplitRegionRequest for a given region name - * - * @param regionName the name of the region to split - * @param splitPoint the split point - * @return a SplitRegionRequest - */ - public static SplitRegionRequest buildSplitRegionRequest( - final byte[] regionName, final byte[] splitPoint) { - SplitRegionRequest.Builder builder = SplitRegionRequest.newBuilder(); - RegionSpecifier region = buildRegionSpecifier( - RegionSpecifierType.REGION_NAME, regionName); - builder.setRegion(region); - if (splitPoint != null) { - builder.setSplitPoint(ByteString.copyFrom(splitPoint)); - } - return builder.build(); - } - - /** - * Create a CompactRegionRequest for a given region name - * - * @param regionName the name of the region to get info - * @param major indicator if it is a major compaction - * @return a CompactRegionRequest - */ - public static CompactRegionRequest buildCompactRegionRequest( - final byte[] regionName, final boolean major, final byte [] family) { - CompactRegionRequest.Builder builder = CompactRegionRequest.newBuilder(); - RegionSpecifier region = buildRegionSpecifier( - RegionSpecifierType.REGION_NAME, regionName); - builder.setRegion(region); - builder.setMajor(major); - if (family != null) { - builder.setFamily(ByteString.copyFrom(family)); - } - return builder.build(); - } - - /** - * Create a new ReplicateWALEntryRequest from a list of HLog entries - * - * @param entries the HLog entries to be replicated - * @return a ReplicateWALEntryRequest - */ - public static ReplicateWALEntryRequest - buildReplicateWALEntryRequest(final HLog.Entry[] entries) { - FamilyScope.Builder scopeBuilder = FamilyScope.newBuilder(); - WALEntry.Builder entryBuilder = WALEntry.newBuilder(); - ReplicateWALEntryRequest.Builder builder = - ReplicateWALEntryRequest.newBuilder(); - for (HLog.Entry entry: entries) { - entryBuilder.clear(); - WALKey.Builder keyBuilder = entryBuilder.getKeyBuilder(); - HLogKey key = entry.getKey(); - keyBuilder.setEncodedRegionName( - ByteString.copyFrom(key.getEncodedRegionName())); - keyBuilder.setTableName(ByteString.copyFrom(key.getTablename())); - keyBuilder.setLogSequenceNumber(key.getLogSeqNum()); - keyBuilder.setWriteTime(key.getWriteTime()); - UUID clusterId = key.getClusterId(); - if (clusterId != null) { - AdminProtos.UUID.Builder uuidBuilder = keyBuilder.getClusterIdBuilder(); - uuidBuilder.setLeastSigBits(clusterId.getLeastSignificantBits()); - uuidBuilder.setMostSigBits(clusterId.getMostSignificantBits()); - } - WALEdit edit = entry.getEdit(); - WALEntry.WALEdit.Builder editBuilder = entryBuilder.getEditBuilder(); - NavigableMap scopes = edit.getScopes(); - if (scopes != null && !scopes.isEmpty()) { - for (Map.Entry scope: scopes.entrySet()) { - scopeBuilder.setFamily(ByteString.copyFrom(scope.getKey())); - ScopeType scopeType = ScopeType.valueOf(scope.getValue().intValue()); - scopeBuilder.setScopeType(scopeType); - editBuilder.addFamilyScope(scopeBuilder.build()); - } - } - List keyValues = edit.getKeyValues(); - for (KeyValue value: keyValues) { - editBuilder.addKeyValueBytes(ByteString.copyFrom( - value.getBuffer(), value.getOffset(), value.getLength())); - } - builder.addEntry(entryBuilder.build()); - } - return builder.build(); - } - - /** - * Create a new RollWALWriterRequest - * - * @return a ReplicateWALEntryRequest - */ - public static RollWALWriterRequest buildRollWALWriterRequest() { - RollWALWriterRequest.Builder builder = RollWALWriterRequest.newBuilder(); - return builder.build(); - } - - /** - * Create a new GetServerInfoRequest - * - * @return a GetServerInfoRequest - */ - public static GetServerInfoRequest buildGetServerInfoRequest() { - GetServerInfoRequest.Builder builder = GetServerInfoRequest.newBuilder(); - return builder.build(); - } - - /** - * Create a new StopServerRequest - * - * @param reason the reason to stop the server - * @return a StopServerRequest - */ - public static StopServerRequest buildStopServerRequest(final String reason) { - StopServerRequest.Builder builder = StopServerRequest.newBuilder(); - builder.setReason(reason); - return builder.build(); - } - -//End utilities for Admin - - /** - * Convert a byte array to a protocol buffer RegionSpecifier - * - * @param type the region specifier type - * @param value the region specifier byte array value - * @return a protocol buffer RegionSpecifier - */ - public static RegionSpecifier buildRegionSpecifier( - final RegionSpecifierType type, final byte[] value) { - RegionSpecifier.Builder regionBuilder = RegionSpecifier.newBuilder(); - regionBuilder.setValue(ByteString.copyFrom(value)); - regionBuilder.setType(type); - return regionBuilder.build(); - } - - /** - * Create a protocol buffer Condition - * - * @param row - * @param family - * @param qualifier - * @param comparator - * @param compareType - * @return a Condition - * @throws IOException - */ - private static Condition buildCondition(final byte[] row, - final byte[] family, final byte [] qualifier, - final ByteArrayComparable comparator, - final CompareType compareType) throws IOException { - Condition.Builder builder = Condition.newBuilder(); - builder.setRow(ByteString.copyFrom(row)); - builder.setFamily(ByteString.copyFrom(family)); - builder.setQualifier(ByteString.copyFrom(qualifier)); - builder.setComparator(ProtobufUtil.toComparator(comparator)); - builder.setCompareType(compareType); - return builder.build(); - } - - /** - * Create a protocol buffer AddColumnRequest - * - * @param tableName - * @param column - * @return an AddColumnRequest - */ - public static AddColumnRequest buildAddColumnRequest( - final byte [] tableName, final HColumnDescriptor column) { - AddColumnRequest.Builder builder = AddColumnRequest.newBuilder(); - builder.setTableName(ByteString.copyFrom(tableName)); - builder.setColumnFamilies(column.convert()); - return builder.build(); - } - - /** - * Create a protocol buffer DeleteColumnRequest - * - * @param tableName - * @param columnName - * @return a DeleteColumnRequest - */ - public static DeleteColumnRequest buildDeleteColumnRequest( - final byte [] tableName, final byte [] columnName) { - DeleteColumnRequest.Builder builder = DeleteColumnRequest.newBuilder(); - builder.setTableName(ByteString.copyFrom(tableName)); - builder.setColumnName(ByteString.copyFrom(columnName)); - return builder.build(); - } - - /** - * Create a protocol buffer ModifyColumnRequest - * - * @param tableName - * @param column - * @return an ModifyColumnRequest - */ - public static ModifyColumnRequest buildModifyColumnRequest( - final byte [] tableName, final HColumnDescriptor column) { - ModifyColumnRequest.Builder builder = ModifyColumnRequest.newBuilder(); - builder.setTableName(ByteString.copyFrom(tableName)); - builder.setColumnFamilies(column.convert()); - return builder.build(); - } - - /** - * Create a protocol buffer MoveRegionRequest - * - * @param encodedRegionName - * @param destServerName - * @return A MoveRegionRequest - * @throws DeserializationException - */ - public static MoveRegionRequest buildMoveRegionRequest( - final byte [] encodedRegionName, final byte [] destServerName) throws DeserializationException { - MoveRegionRequest.Builder builder = MoveRegionRequest.newBuilder(); - builder.setRegion( - buildRegionSpecifier(RegionSpecifierType.ENCODED_REGION_NAME,encodedRegionName)); - if (destServerName != null) { - builder.setDestServerName( - ProtobufUtil.toServerName(new ServerName(Bytes.toString(destServerName)))); - } - return builder.build(); - } - - /** - * Create a protocol buffer AssignRegionRequest - * - * @param regionName - * @return an AssignRegionRequest - */ - public static AssignRegionRequest buildAssignRegionRequest(final byte [] regionName) { - AssignRegionRequest.Builder builder = AssignRegionRequest.newBuilder(); - builder.setRegion(buildRegionSpecifier(RegionSpecifierType.REGION_NAME,regionName)); - return builder.build(); - } - - /** - * Creates a protocol buffer UnassignRegionRequest - * - * @param regionName - * @param force - * @return an UnassignRegionRequest - */ - public static UnassignRegionRequest buildUnassignRegionRequest( - final byte [] regionName, final boolean force) { - UnassignRegionRequest.Builder builder = UnassignRegionRequest.newBuilder(); - builder.setRegion(buildRegionSpecifier(RegionSpecifierType.REGION_NAME,regionName)); - builder.setForce(force); - return builder.build(); - } - - /** - * Creates a protocol buffer OfflineRegionRequest - * - * @param regionName - * @return an OfflineRegionRequest - */ - public static OfflineRegionRequest buildOfflineRegionRequest(final byte [] regionName) { - OfflineRegionRequest.Builder builder = OfflineRegionRequest.newBuilder(); - builder.setRegion(buildRegionSpecifier(RegionSpecifierType.REGION_NAME,regionName)); - return builder.build(); - } - - /** - * Creates a protocol buffer DeleteTableRequest - * - * @param tableName - * @return a DeleteTableRequest - */ - public static DeleteTableRequest buildDeleteTableRequest(final byte [] tableName) { - DeleteTableRequest.Builder builder = DeleteTableRequest.newBuilder(); - builder.setTableName(ByteString.copyFrom(tableName)); - return builder.build(); - } - - /** - * Creates a protocol buffer EnableTableRequest - * - * @param tableName - * @return an EnableTableRequest - */ - public static EnableTableRequest buildEnableTableRequest(final byte [] tableName) { - EnableTableRequest.Builder builder = EnableTableRequest.newBuilder(); - builder.setTableName(ByteString.copyFrom(tableName)); - return builder.build(); - } - - /** - * Creates a protocol buffer DisableTableRequest - * - * @param tableName - * @return a DisableTableRequest - */ - public static DisableTableRequest buildDisableTableRequest(final byte [] tableName) { - DisableTableRequest.Builder builder = DisableTableRequest.newBuilder(); - builder.setTableName(ByteString.copyFrom(tableName)); - return builder.build(); - } - - /** - * Creates a protocol buffer CreateTableRequest - * - * @param hTableDesc - * @param splitKeys - * @return a CreateTableRequest - */ - public static CreateTableRequest buildCreateTableRequest( - final HTableDescriptor hTableDesc, final byte [][] splitKeys) { - CreateTableRequest.Builder builder = CreateTableRequest.newBuilder(); - builder.setTableSchema(hTableDesc.convert()); - if (splitKeys != null) { - for (byte [] splitKey : splitKeys) { - builder.addSplitKeys(ByteString.copyFrom(splitKey)); - } - } - return builder.build(); - } - - - /** - * Creates a protocol buffer ModifyTableRequest - * - * @param table - * @param hTableDesc - * @return a ModifyTableRequest - */ - public static ModifyTableRequest buildModifyTableRequest( - final byte [] table, final HTableDescriptor hTableDesc) { - ModifyTableRequest.Builder builder = ModifyTableRequest.newBuilder(); - builder.setTableName(ByteString.copyFrom(table)); - builder.setTableSchema(hTableDesc.convert()); - return builder.build(); - } - - /** - * Creates a protocol buffer GetSchemaAlterStatusRequest - * - * @param tableName - * @return a GetSchemaAlterStatusRequest - */ - public static GetSchemaAlterStatusRequest buildGetSchemaAlterStatusRequest( - final byte [] tableName) { - GetSchemaAlterStatusRequest.Builder builder = GetSchemaAlterStatusRequest.newBuilder(); - builder.setTableName(ByteString.copyFrom(tableName)); - return builder.build(); - } - - /** - * Creates a protocol buffer GetTableDescriptorsRequest - * - * @param tableNames - * @return a GetTableDescriptorsRequest - */ - public static GetTableDescriptorsRequest buildGetTableDescriptorsRequest( - final List tableNames) { - GetTableDescriptorsRequest.Builder builder = GetTableDescriptorsRequest.newBuilder(); - if (tableNames != null) { - for (String str : tableNames) { - builder.addTableNames(str); - } - } - return builder.build(); - } - - /** - * Creates a protocol buffer IsMasterRunningRequest - * - * @return a IsMasterRunningRequest - */ - public static IsMasterRunningRequest buildIsMasterRunningRequest() { - return IsMasterRunningRequest.newBuilder().build(); - } - - /** - * Creates a protocol buffer BalanceRequest - * - * @return a BalanceRequest - */ - public static BalanceRequest buildBalanceRequest() { - return BalanceRequest.newBuilder().build(); - } - - /** - * Creates a protocol buffer SetBalancerRunningRequest - * - * @param on - * @param synchronous - * @return a SetBalancerRunningRequest - */ - public static SetBalancerRunningRequest buildSetBalancerRunningRequest(boolean on, boolean synchronous) { - return SetBalancerRunningRequest.newBuilder().setOn(on).setSynchronous(synchronous).build(); - } - - /** - * Creates a protocol buffer GetClusterStatusRequest - * - * @return A GetClusterStatusRequest - */ - public static GetClusterStatusRequest buildGetClusterStatusRequest() { - return GetClusterStatusRequest.newBuilder().build(); - } - - /** - * Creates a request for running a catalog scan - * @return A {@link CatalogScanRequest} - */ - public static CatalogScanRequest buildCatalogScanRequest() { - return CatalogScanRequest.newBuilder().build(); - } - - /** - * Creates a request for enabling/disabling the catalog janitor - * @return A {@link EnableCatalogJanitorRequest} - */ - public static EnableCatalogJanitorRequest buildEnableCatalogJanitorRequest(boolean enable) { - return EnableCatalogJanitorRequest.newBuilder().setEnable(enable).build(); - } - - /** - * Creates a request for querying the master whether the catalog janitor is enabled - * @return A {@link IsCatalogJanitorEnabledRequest} - */ - public static IsCatalogJanitorEnabledRequest buildIsCatalogJanitorEnabledRequest() { - return IsCatalogJanitorEnabledRequest.newBuilder().build(); - } - - /** - * Creates a request for querying the master the last flushed sequence Id for a region - * @param regionName - * @return A {@link GetLastFlushedSequenceIdRequest} - */ - public static GetLastFlushedSequenceIdRequest buildGetLastFlushedSequenceIdRequest( - byte[] regionName) { - return GetLastFlushedSequenceIdRequest.newBuilder().setRegionName( - ByteString.copyFrom(regionName)).build(); - } - - /** - * Create a request to grant user permissions. - * - * @param username the short user name who to grant permissions - * @param table optional table name the permissions apply - * @param family optional column family - * @param qualifier optional qualifier - * @param actions the permissions to be granted - * @return A {@link AccessControlProtos} GrantRequest - */ - public static AccessControlProtos.GrantRequest buildGrantRequest( - String username, byte[] table, byte[] family, byte[] qualifier, - AccessControlProtos.Permission.Action... actions) { - AccessControlProtos.Permission.Builder permissionBuilder = - AccessControlProtos.Permission.newBuilder(); - for (AccessControlProtos.Permission.Action a : actions) { - permissionBuilder.addAction(a); - } - if (table != null) { - permissionBuilder.setTable(ByteString.copyFrom(table)); - } - if (family != null) { - permissionBuilder.setFamily(ByteString.copyFrom(family)); - } - if (qualifier != null) { - permissionBuilder.setQualifier(ByteString.copyFrom(qualifier)); - } - - return AccessControlProtos.GrantRequest.newBuilder() - .setPermission( - AccessControlProtos.UserPermission.newBuilder() - .setUser(ByteString.copyFromUtf8(username)) - .setPermission(permissionBuilder.build()) - ).build(); - } - - /** - * Create a request to revoke user permissions. - * - * @param username the short user name whose permissions to be revoked - * @param table optional table name the permissions apply - * @param family optional column family - * @param qualifier optional qualifier - * @param actions the permissions to be revoked - * @return A {@link AccessControlProtos} RevokeRequest - */ - public static AccessControlProtos.RevokeRequest buildRevokeRequest( - String username, byte[] table, byte[] family, byte[] qualifier, - AccessControlProtos.Permission.Action... actions) { - AccessControlProtos.Permission.Builder permissionBuilder = - AccessControlProtos.Permission.newBuilder(); - for (AccessControlProtos.Permission.Action a : actions) { - permissionBuilder.addAction(a); - } - if (table != null) { - permissionBuilder.setTable(ByteString.copyFrom(table)); - } - if (family != null) { - permissionBuilder.setFamily(ByteString.copyFrom(family)); - } - if (qualifier != null) { - permissionBuilder.setQualifier(ByteString.copyFrom(qualifier)); - } - - return AccessControlProtos.RevokeRequest.newBuilder() - .setPermission( - AccessControlProtos.UserPermission.newBuilder() - .setUser(ByteString.copyFromUtf8(username)) - .setPermission(permissionBuilder.build()) - ).build(); - } - - /** - * Create a RegionOpenInfo based on given region info and version of offline node - */ - private static RegionOpenInfo buildRegionOpenInfo( - final HRegionInfo region, final int versionOfOfflineNode) { - RegionOpenInfo.Builder builder = RegionOpenInfo.newBuilder(); - builder.setRegion(HRegionInfo.convert(region)); - if (versionOfOfflineNode >= 0) { - builder.setVersionOfOfflineNode(versionOfOfflineNode); - } - return builder.build(); - } -} diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/protobuf/ResponseConverter.java hbase-server/src/main/java/org/apache/hadoop/hbase/protobuf/ResponseConverter.java deleted file mode 100644 index 66c6cf5..0000000 --- hbase-server/src/main/java/org/apache/hadoop/hbase/protobuf/ResponseConverter.java +++ /dev/null @@ -1,282 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hbase.protobuf; - -import java.io.IOException; -import java.util.ArrayList; -import java.util.List; - -import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.hbase.HRegionInfo; -import org.apache.hadoop.hbase.ServerName; -import org.apache.hadoop.hbase.client.Result; -import org.apache.hadoop.hbase.ipc.ServerRpcController; -import org.apache.hadoop.hbase.protobuf.generated.AccessControlProtos.UserPermissionsResponse; -import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.CloseRegionResponse; -import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetOnlineRegionResponse; -import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetServerInfoResponse; -import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.OpenRegionResponse; -import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.RollWALWriterResponse; -import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.ServerInfo; -import org.apache.hadoop.hbase.protobuf.generated.ClientProtos; -import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ActionResult; -import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ScanResponse; -import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameBytesPair; -import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.CatalogScanResponse; -import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.EnableCatalogJanitorResponse; -import org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.GetLastFlushedSequenceIdResponse; -import org.apache.hadoop.hbase.regionserver.RegionOpeningState; -import org.apache.hadoop.hbase.security.access.UserPermission; -import org.apache.hadoop.util.StringUtils; - -import com.google.protobuf.ByteString; -import com.google.protobuf.RpcController; - -/** - * Helper utility to build protocol buffer responses, - * or retrieve data from protocol buffer responses. - */ -@InterfaceAudience.Private -public final class ResponseConverter { - - private ResponseConverter() { - } - -// Start utilities for Client - - /** - * Get the client Results from a protocol buffer ScanResponse - * - * @param response the protocol buffer ScanResponse - * @return the client Results in the response - */ - public static Result[] getResults(final ScanResponse response) { - if (response == null) return null; - int count = response.getResultCount(); - Result[] results = new Result[count]; - for (int i = 0; i < count; i++) { - results[i] = ProtobufUtil.toResult(response.getResult(i)); - } - return results; - } - - /** - * Get the results from a protocol buffer MultiResponse - * - * @param proto the protocol buffer MultiResponse to convert - * @return the results in the MultiResponse - * @throws IOException - */ - public static List getResults( - final ClientProtos.MultiResponse proto) throws IOException { - List results = new ArrayList(); - List resultList = proto.getResultList(); - for (int i = 0, n = resultList.size(); i < n; i++) { - ActionResult result = resultList.get(i); - if (result.hasException()) { - results.add(ProtobufUtil.toException(result.getException())); - } else if (result.hasValue()) { - Object value = ProtobufUtil.toObject(result.getValue()); - if (value instanceof ClientProtos.Result) { - results.add(ProtobufUtil.toResult((ClientProtos.Result)value)); - } else { - results.add(value); - } - } else { - results.add(new Result()); - } - } - return results; - } - - /** - * Wrap a throwable to an action result. - * - * @param t - * @return an action result - */ - public static ActionResult buildActionResult(final Throwable t) { - ActionResult.Builder builder = ActionResult.newBuilder(); - NameBytesPair.Builder parameterBuilder = NameBytesPair.newBuilder(); - parameterBuilder.setName(t.getClass().getName()); - parameterBuilder.setValue( - ByteString.copyFromUtf8(StringUtils.stringifyException(t))); - builder.setException(parameterBuilder.build()); - return builder.build(); - } - - /** - * Converts the permissions list into a protocol buffer UserPermissionsResponse - */ - public static UserPermissionsResponse buildUserPermissionsResponse( - final List permissions) { - UserPermissionsResponse.Builder builder = UserPermissionsResponse.newBuilder(); - for (UserPermission perm : permissions) { - builder.addPermission(ProtobufUtil.toUserPermission(perm)); - } - return builder.build(); - } - -// End utilities for Client -// Start utilities for Admin - - /** - * Get the list of regions to flush from a RollLogWriterResponse - * - * @param proto the RollLogWriterResponse - * @return the the list of regions to flush - */ - public static byte[][] getRegions(final RollWALWriterResponse proto) { - if (proto == null || proto.getRegionToFlushCount() == 0) return null; - List regions = new ArrayList(); - for (ByteString region: proto.getRegionToFlushList()) { - regions.add(region.toByteArray()); - } - return (byte[][])regions.toArray(); - } - - /** - * Get the list of region info from a GetOnlineRegionResponse - * - * @param proto the GetOnlineRegionResponse - * @return the list of region info - */ - public static List getRegionInfos(final GetOnlineRegionResponse proto) { - if (proto == null || proto.getRegionInfoCount() == 0) return null; - return ProtobufUtil.getRegionInfos(proto); - } - - /** - * Get the region opening state from a OpenRegionResponse - * - * @param proto the OpenRegionResponse - * @return the region opening state - */ - public static RegionOpeningState getRegionOpeningState - (final OpenRegionResponse proto) { - if (proto == null || proto.getOpeningStateCount() != 1) return null; - return RegionOpeningState.valueOf( - proto.getOpeningState(0).name()); - } - - /** - * Get a list of region opening state from a OpenRegionResponse - * - * @param proto the OpenRegionResponse - * @return the list of region opening state - */ - public static List getRegionOpeningStateList( - final OpenRegionResponse proto) { - if (proto == null) return null; - List regionOpeningStates = new ArrayList(); - for (int i = 0; i < proto.getOpeningStateCount(); i++) { - regionOpeningStates.add(RegionOpeningState.valueOf( - proto.getOpeningState(i).name())); - } - return regionOpeningStates; - } - - /** - * Check if the region is closed from a CloseRegionResponse - * - * @param proto the CloseRegionResponse - * @return the region close state - */ - public static boolean isClosed - (final CloseRegionResponse proto) { - if (proto == null || !proto.hasClosed()) return false; - return proto.getClosed(); - } - - /** - * A utility to build a GetServerInfoResponse. - * - * @param serverName - * @param webuiPort - * @return the response - */ - public static GetServerInfoResponse buildGetServerInfoResponse( - final ServerName serverName, final int webuiPort) { - GetServerInfoResponse.Builder builder = GetServerInfoResponse.newBuilder(); - ServerInfo.Builder serverInfoBuilder = ServerInfo.newBuilder(); - serverInfoBuilder.setServerName(ProtobufUtil.toServerName(serverName)); - if (webuiPort >= 0) { - serverInfoBuilder.setWebuiPort(webuiPort); - } - builder.setServerInfo(serverInfoBuilder.build()); - return builder.build(); - } - - /** - * A utility to build a GetOnlineRegionResponse. - * - * @param regions - * @return the response - */ - public static GetOnlineRegionResponse buildGetOnlineRegionResponse( - final List regions) { - GetOnlineRegionResponse.Builder builder = GetOnlineRegionResponse.newBuilder(); - for (HRegionInfo region: regions) { - builder.addRegionInfo(HRegionInfo.convert(region)); - } - return builder.build(); - } - - /** - * Creates a response for the catalog scan request - * @return A CatalogScanResponse - */ - public static CatalogScanResponse buildCatalogScanResponse(int numCleaned) { - return CatalogScanResponse.newBuilder().setScanResult(numCleaned).build(); - } - - /** - * Creates a response for the catalog scan request - * @return A EnableCatalogJanitorResponse - */ - public static EnableCatalogJanitorResponse buildEnableCatalogJanitorResponse(boolean prevValue) { - return EnableCatalogJanitorResponse.newBuilder().setPrevValue(prevValue).build(); - } - -// End utilities for Admin - - /** - * Creates a response for the last flushed sequence Id request - * @return A GetLastFlushedSequenceIdResponse - */ - public static GetLastFlushedSequenceIdResponse buildGetLastFlushedSequenceIdResponse( - long seqId) { - return GetLastFlushedSequenceIdResponse.newBuilder().setLastFlushedSequenceId(seqId).build(); - } - - /** - * Stores an exception encountered during RPC invocation so it can be passed back - * through to the client. - * @param controller the controller instance provided by the client when calling the service - * @param ioe the exception encountered - */ - public static void setControllerException(RpcController controller, IOException ioe) { - if (controller != null) { - if (controller instanceof ServerRpcController) { - ((ServerRpcController)controller).setFailedOn(ioe); - } else { - controller.setFailed(StringUtils.stringifyException(ioe)); - } - } - } -} diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/BloomType.java hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/BloomType.java deleted file mode 100644 index b3d50c9..0000000 --- hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/BloomType.java +++ /dev/null @@ -1,35 +0,0 @@ -/** - * - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hbase.regionserver; - -public enum BloomType { - /** - * Bloomfilters disabled - */ - NONE, - /** - * Bloom enabled with Table row as Key - */ - ROW, - /** - * Bloom enabled with Table row & column (family+qualifier) as Key - */ - ROWCOL -} \ No newline at end of file diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java index dc12b7e..81ca5e6 100644 --- hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java +++ hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java @@ -103,14 +103,13 @@ import org.apache.hadoop.hbase.fs.HFileSystem; import org.apache.hadoop.hbase.io.hfile.CacheConfig; import org.apache.hadoop.hbase.ipc.HBaseRPCErrorHandler; import org.apache.hadoop.hbase.ipc.HBaseClientRPC; -import org.apache.hadoop.hbase.ipc.HBaseRPCErrorHandler; import org.apache.hadoop.hbase.ipc.HBaseServerRPC; -import org.apache.hadoop.hbase.ipc.MetricsHBaseServer; import org.apache.hadoop.hbase.ipc.ProtocolSignature; import org.apache.hadoop.hbase.ipc.RpcServer; import org.apache.hadoop.hbase.ipc.ServerNotRunningYetException; import org.apache.hadoop.hbase.ipc.ServerRpcController; import org.apache.hadoop.hbase.protobuf.ProtobufUtil; +import org.apache.hadoop.hbase.protobuf.ProtobufReplicationUtil; import org.apache.hadoop.hbase.protobuf.RequestConverter; import org.apache.hadoop.hbase.protobuf.ResponseConverter; import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.CloseRegionRequest; @@ -3604,7 +3603,7 @@ public class HRegionServer implements ClientProtocol, if (replicationSinkHandler != null) { checkOpen(); requestCount.increment(); - HLog.Entry[] entries = ProtobufUtil.toHLogEntries(request.getEntryList()); + HLog.Entry[] entries = ProtobufReplicationUtil.toHLogEntries(request.getEntryList()); if (entries != null && entries.length > 0) { replicationSinkHandler.replicateLogEntries(entries); } diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/NoSuchColumnFamilyException.java hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/NoSuchColumnFamilyException.java deleted file mode 100644 index 3fc7e57..0000000 --- hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/NoSuchColumnFamilyException.java +++ /dev/null @@ -1,42 +0,0 @@ -/** - * - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hbase.regionserver; - -import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.hbase.DoNotRetryIOException; - -/** - * Thrown if request for nonexistent column family. - */ -@InterfaceAudience.Private -public class NoSuchColumnFamilyException extends DoNotRetryIOException { - private static final long serialVersionUID = -6569952730832331274L; - - /** default constructor */ - public NoSuchColumnFamilyException() { - super(); - } - - /** - * @param message exception message - */ - public NoSuchColumnFamilyException(String message) { - super(message); - } -} diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionOpeningState.java hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionOpeningState.java deleted file mode 100644 index 2de11dc..0000000 --- hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionOpeningState.java +++ /dev/null @@ -1,31 +0,0 @@ -/** - * - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hbase.regionserver; - -import org.apache.hadoop.classification.InterfaceAudience; - -@InterfaceAudience.Private -public enum RegionOpeningState { - - OPENED, - - ALREADY_OPENED, - - FAILED_OPENING; -} diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionServerRunningException.java hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionServerRunningException.java deleted file mode 100644 index e8bce88..0000000 --- hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionServerRunningException.java +++ /dev/null @@ -1,46 +0,0 @@ -/** - * - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hbase.regionserver; - -import java.io.IOException; - -import org.apache.hadoop.classification.InterfaceAudience; - -/** - * Thrown if the region server log directory exists (which indicates another - * region server is running at the same address) - */ -@InterfaceAudience.Private -public class RegionServerRunningException extends IOException { - private static final long serialVersionUID = 1L << 31 - 1L; - - /** Default Constructor */ - public RegionServerRunningException() { - super(); - } - - /** - * Constructs the exception and supplies a string as the message - * @param s - message - */ - public RegionServerRunningException(String s) { - super(s); - } - -} diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionServerStoppedException.java hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionServerStoppedException.java deleted file mode 100644 index 11ec072..0000000 --- hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionServerStoppedException.java +++ /dev/null @@ -1,35 +0,0 @@ -/** - * - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hbase.regionserver; - -import java.io.IOException; - -import org.apache.hadoop.classification.InterfaceAudience; - -/** - * Thrown by the region server when it is in shutting down state. - */ -@SuppressWarnings("serial") -@InterfaceAudience.Private -public class RegionServerStoppedException extends IOException { - - public RegionServerStoppedException(String s) { - super(s); - } -} diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/WrongRegionException.java hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/WrongRegionException.java deleted file mode 100644 index dfc6aab..0000000 --- hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/WrongRegionException.java +++ /dev/null @@ -1,46 +0,0 @@ -/** - * - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hbase.regionserver; - -import java.io.IOException; - -import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.classification.InterfaceStability; - -/** - * Thrown when a request contains a key which is not part of this region - */ -@InterfaceAudience.Public -@InterfaceStability.Stable -public class WrongRegionException extends IOException { - private static final long serialVersionUID = 993179627856392526L; - - /** constructor */ - public WrongRegionException() { - super(); - } - - /** - * Constructor - * @param s message - */ - public WrongRegionException(String s) { - super(s); - } -} diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/FailedLogCloseException.java hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/FailedLogCloseException.java deleted file mode 100644 index 8fb8f08..0000000 --- hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/FailedLogCloseException.java +++ /dev/null @@ -1,46 +0,0 @@ -/** - * - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hbase.regionserver.wal; - -import java.io.IOException; - -import org.apache.hadoop.classification.InterfaceAudience; - -/** - * Thrown when we fail close of the write-ahead-log file. - * Package private. Only used inside this package. - */ -@InterfaceAudience.Private -public class FailedLogCloseException extends IOException { - private static final long serialVersionUID = 1759152841462990925L; - - /** - * - */ - public FailedLogCloseException() { - super(); - } - - /** - * @param arg0 - */ - public FailedLogCloseException(String arg0) { - super(arg0); - } -} diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/OrphanHLogAfterSplitException.java hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/OrphanHLogAfterSplitException.java deleted file mode 100644 index 640bffa..0000000 --- hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/OrphanHLogAfterSplitException.java +++ /dev/null @@ -1,42 +0,0 @@ -/** - * - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hbase.regionserver.wal; - -import java.io.IOException; - -import org.apache.hadoop.classification.InterfaceAudience; - -@InterfaceAudience.Private -public class OrphanHLogAfterSplitException extends IOException { - - /** - * Create this exception without a message - */ - public OrphanHLogAfterSplitException() { - super(); - } - - /** - * Create this exception with a message - * @param message why it failed - */ - public OrphanHLogAfterSplitException(String message) { - super(message); - } -} diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeer.java hbase-server/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeer.java deleted file mode 100644 index 2ec760c..0000000 --- hbase-server/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeer.java +++ /dev/null @@ -1,206 +0,0 @@ -/* - * - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hbase.replication; - -import java.io.Closeable; -import java.io.IOException; -import java.util.ArrayList; -import java.util.List; -import java.util.concurrent.atomic.AtomicBoolean; - -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hbase.Abortable; -import org.apache.hadoop.hbase.DeserializationException; -import org.apache.hadoop.hbase.ServerName; -import org.apache.hadoop.hbase.zookeeper.ZKUtil; -import org.apache.hadoop.hbase.zookeeper.ZooKeeperNodeTracker; -import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher; -import org.apache.zookeeper.KeeperException; - -/** - * This class acts as a wrapper for all the objects used to identify and - * communicate with remote peers and is responsible for answering to expired - * sessions and re-establishing the ZK connections. - */ -@InterfaceAudience.Private -public class ReplicationPeer implements Abortable, Closeable { - private static final Log LOG = LogFactory.getLog(ReplicationPeer.class); - - private final String clusterKey; - private final String id; - private List regionServers = new ArrayList(0); - private final AtomicBoolean peerEnabled = new AtomicBoolean(); - // Cannot be final since a new object needs to be recreated when session fails - private ZooKeeperWatcher zkw; - private final Configuration conf; - - private PeerStateTracker peerStateTracker; - - /** - * Constructor that takes all the objects required to communicate with the - * specified peer, except for the region server addresses. - * @param conf configuration object to this peer - * @param key cluster key used to locate the peer - * @param id string representation of this peer's identifier - */ - public ReplicationPeer(Configuration conf, String key, - String id) throws IOException { - this.conf = conf; - this.clusterKey = key; - this.id = id; - this.reloadZkWatcher(); - } - - /** - * start a state tracker to check whether this peer is enabled or not - * - * @param zookeeper zk watcher for the local cluster - * @param peerStateNode path to zk node which stores peer state - * @throws KeeperException - */ - public void startStateTracker(ZooKeeperWatcher zookeeper, String peerStateNode) - throws KeeperException { - ReplicationZookeeper.ensurePeerEnabled(zookeeper, peerStateNode); - this.peerStateTracker = new PeerStateTracker(peerStateNode, zookeeper, this); - this.peerStateTracker.start(); - try { - this.readPeerStateZnode(); - } catch (DeserializationException e) { - throw ZKUtil.convert(e); - } - } - - private void readPeerStateZnode() throws DeserializationException { - this.peerEnabled.set(ReplicationZookeeper.isPeerEnabled(this.peerStateTracker.getData(false))); - } - - /** - * Get the cluster key of that peer - * @return string consisting of zk ensemble addresses, client port - * and root znode - */ - public String getClusterKey() { - return clusterKey; - } - - /** - * Get the state of this peer - * @return atomic boolean that holds the status - */ - public AtomicBoolean getPeerEnabled() { - return peerEnabled; - } - - /** - * Get a list of all the addresses of all the region servers - * for this peer cluster - * @return list of addresses - */ - public List getRegionServers() { - return regionServers; - } - - /** - * Set the list of region servers for that peer - * @param regionServers list of addresses for the region servers - */ - public void setRegionServers(List regionServers) { - this.regionServers = regionServers; - } - - /** - * Get the ZK connection to this peer - * @return zk connection - */ - public ZooKeeperWatcher getZkw() { - return zkw; - } - - /** - * Get the identifier of this peer - * @return string representation of the id (short) - */ - public String getId() { - return id; - } - - /** - * Get the configuration object required to communicate with this peer - * @return configuration object - */ - public Configuration getConfiguration() { - return conf; - } - - @Override - public void abort(String why, Throwable e) { - LOG.fatal("The ReplicationPeer coresponding to peer " + clusterKey - + " was aborted for the following reason(s):" + why, e); - } - - /** - * Closes the current ZKW (if not null) and creates a new one - * @throws IOException If anything goes wrong connecting - */ - public void reloadZkWatcher() throws IOException { - if (zkw != null) zkw.close(); - zkw = new ZooKeeperWatcher(conf, - "connection to cluster: " + id, this); - } - - @Override - public boolean isAborted() { - // Currently the replication peer is never "Aborted", we just log when the - // abort method is called. - return false; - } - - @Override - public void close() throws IOException { - if (zkw != null){ - zkw.close(); - } - } - - /** - * Tracker for state of this peer - */ - public class PeerStateTracker extends ZooKeeperNodeTracker { - - public PeerStateTracker(String peerStateZNode, ZooKeeperWatcher watcher, - Abortable abortable) { - super(watcher, peerStateZNode, abortable); - } - - @Override - public synchronized void nodeDataChanged(String path) { - if (path.equals(node)) { - super.nodeDataChanged(path); - try { - readPeerStateZnode(); - } catch (DeserializationException e) { - LOG.warn("Failed deserializing the content of " + path, e); - } - } - } - } -} diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/replication/ReplicationZookeeper.java hbase-server/src/main/java/org/apache/hadoop/hbase/replication/ReplicationZookeeper.java deleted file mode 100644 index 2db7df9..0000000 --- hbase-server/src/main/java/org/apache/hadoop/hbase/replication/ReplicationZookeeper.java +++ /dev/null @@ -1,1104 +0,0 @@ -/* - * - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hbase.replication; - -import java.io.Closeable; -import java.io.IOException; -import java.util.ArrayList; -import java.util.Collections; -import java.util.HashMap; -import java.util.List; -import java.util.Map; -import java.util.SortedMap; -import java.util.SortedSet; -import java.util.TreeMap; -import java.util.TreeSet; -import java.util.UUID; -import java.util.concurrent.atomic.AtomicBoolean; - -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hbase.Abortable; -import org.apache.hadoop.hbase.DeserializationException; -import org.apache.hadoop.hbase.HConstants; -import org.apache.hadoop.hbase.Server; -import org.apache.hadoop.hbase.ServerName; -import org.apache.hadoop.hbase.protobuf.ProtobufUtil; -import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos; -import org.apache.hadoop.hbase.replication.regionserver.Replication; -import org.apache.hadoop.hbase.util.Bytes; -import org.apache.hadoop.hbase.zookeeper.ZKClusterId; -import org.apache.hadoop.hbase.zookeeper.ZKUtil; -import org.apache.hadoop.hbase.zookeeper.ZooKeeperListener; -import org.apache.hadoop.hbase.zookeeper.ZooKeeperNodeTracker; -import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher; -import org.apache.zookeeper.KeeperException; -import org.apache.zookeeper.KeeperException.ConnectionLossException; -import org.apache.zookeeper.KeeperException.NodeExistsException; -import org.apache.zookeeper.KeeperException.SessionExpiredException; - -import com.google.protobuf.InvalidProtocolBufferException; - -/** - * This class serves as a helper for all things related to zookeeper in - * replication. - *

          - * The layout looks something like this under zookeeper.znode.parent for the - * master cluster: - *

          - * - *

          - * replication/
          - *  state      {contains true or false}
          - *  clusterId  {contains a byte}
          - *  peers/
          - *    1/   {contains a full cluster address}
          - *      peer-state  {contains ENABLED or DISABLED}
          - *    2/
          - *    ...
          - *  rs/ {lists all RS that replicate}
          - *    startcode1/ {lists all peer clusters}
          - *      1/ {lists hlogs to process}
          - *        10.10.1.76%3A53488.123456789 {contains nothing or a position}
          - *        10.10.1.76%3A53488.123456790
          - *        ...
          - *      2/
          - *      ...
          - *    startcode2/
          - *    ...
          - * 
          - */ -@InterfaceAudience.Private -public class ReplicationZookeeper implements Closeable{ - private static final Log LOG = - LogFactory.getLog(ReplicationZookeeper.class); - // Name of znode we use to lock when failover - private final static String RS_LOCK_ZNODE = "lock"; - - // Our handle on zookeeper - private final ZooKeeperWatcher zookeeper; - // Map of peer clusters keyed by their id - private Map peerClusters; - // Path to the root replication znode - private String replicationZNode; - // Path to the peer clusters znode - private String peersZNode; - // Path to the znode that contains all RS that replicates - private String rsZNode; - // Path to this region server's name under rsZNode - private String rsServerNameZnode; - // Name node if the replicationState znode - private String replicationStateNodeName; - // Name of zk node which stores peer state. The peer-state znode is under a - // peers' id node; e.g. /hbase/replication/peers/PEER_ID/peer-state - private String peerStateNodeName; - private final Configuration conf; - // Is this cluster replicating at the moment? - private AtomicBoolean replicating; - // The key to our own cluster - private String ourClusterKey; - // Abortable - private Abortable abortable; - private ReplicationStatusTracker statusTracker; - - /** - * ZNode content if enabled state. - */ - // Public so it can be seen by test code. - public static final byte[] ENABLED_ZNODE_BYTES = toByteArray(ZooKeeperProtos.ReplicationState.State.ENABLED); - - /** - * ZNode content if disabled state. - */ - static final byte[] DISABLED_ZNODE_BYTES = toByteArray(ZooKeeperProtos.ReplicationState.State.DISABLED); - - /** - * Constructor used by clients of replication (like master and HBase clients) - * @param conf conf to use - * @param zk zk connection to use - * @throws IOException - */ - public ReplicationZookeeper(final Abortable abortable, final Configuration conf, - final ZooKeeperWatcher zk) throws KeeperException { - this.conf = conf; - this.zookeeper = zk; - this.replicating = new AtomicBoolean(); - setZNodes(abortable); - } - - /** - * Constructor used by region servers, connects to the peer cluster right away. - * - * @param server - * @param replicating atomic boolean to start/stop replication - * @throws IOException - * @throws KeeperException - */ - public ReplicationZookeeper(final Server server, final AtomicBoolean replicating) - throws IOException, KeeperException { - this.abortable = server; - this.zookeeper = server.getZooKeeper(); - this.conf = server.getConfiguration(); - this.replicating = replicating; - setZNodes(server); - - this.peerClusters = new HashMap(); - ZKUtil.createWithParents(this.zookeeper, - ZKUtil.joinZNode(this.replicationZNode, this.replicationStateNodeName)); - this.rsServerNameZnode = ZKUtil.joinZNode(rsZNode, server.getServerName().toString()); - ZKUtil.createWithParents(this.zookeeper, this.rsServerNameZnode); - connectExistingPeers(); - } - - private void setZNodes(Abortable abortable) throws KeeperException { - String replicationZNodeName = conf.get("zookeeper.znode.replication", "replication"); - String peersZNodeName = conf.get("zookeeper.znode.replication.peers", "peers"); - this.peerStateNodeName = conf.get("zookeeper.znode.replication.peers.state", "peer-state"); - this.replicationStateNodeName = conf.get("zookeeper.znode.replication.state", "state"); - String rsZNodeName = conf.get("zookeeper.znode.replication.rs", "rs"); - this.ourClusterKey = ZKUtil.getZooKeeperClusterKey(this.conf); - this.replicationZNode = ZKUtil.joinZNode(this.zookeeper.baseZNode, replicationZNodeName); - this.peersZNode = ZKUtil.joinZNode(replicationZNode, peersZNodeName); - ZKUtil.createWithParents(this.zookeeper, this.peersZNode); - this.rsZNode = ZKUtil.joinZNode(replicationZNode, rsZNodeName); - ZKUtil.createWithParents(this.zookeeper, this.rsZNode); - - // Set a tracker on replicationStateNodeNode - this.statusTracker = new ReplicationStatusTracker(this.zookeeper, abortable); - statusTracker.start(); - readReplicationStateZnode(); - } - - private void connectExistingPeers() throws IOException, KeeperException { - List znodes = ZKUtil.listChildrenNoWatch(this.zookeeper, this.peersZNode); - if (znodes != null) { - for (String z : znodes) { - connectToPeer(z); - } - } - } - - /** - * List this cluster's peers' IDs - * @return list of all peers' identifiers - */ - public List listPeersIdsAndWatch() { - List ids = null; - try { - ids = ZKUtil.listChildrenAndWatchThem(this.zookeeper, this.peersZNode); - } catch (KeeperException e) { - this.abortable.abort("Cannot get the list of peers ", e); - } - return ids; - } - - /** - * Map of this cluster's peers for display. - * @return A map of peer ids to peer cluster keys - */ - public Map listPeers() { - Map peers = new TreeMap(); - List ids = null; - try { - ids = ZKUtil.listChildrenNoWatch(this.zookeeper, this.peersZNode); - for (String id : ids) { - byte[] bytes = ZKUtil.getData(this.zookeeper, ZKUtil.joinZNode(this.peersZNode, id)); - String clusterKey = null; - try { - clusterKey = parsePeerFrom(bytes); - } catch (DeserializationException de) { - LOG.warn("Failed parse of clusterid=" + id + " znode content, continuing."); - continue; - } - peers.put(id, clusterKey); - } - } catch (KeeperException e) { - this.abortable.abort("Cannot get the list of peers ", e); - } - return peers; - } - - /** - * Returns all region servers from given peer - * - * @param peerClusterId (byte) the cluster to interrogate - * @return addresses of all region servers - */ - public List getSlavesAddresses(String peerClusterId) { - if (this.peerClusters.size() == 0) { - return Collections.emptyList(); - } - ReplicationPeer peer = this.peerClusters.get(peerClusterId); - if (peer == null) { - return Collections.emptyList(); - } - - List addresses; - try { - addresses = fetchSlavesAddresses(peer.getZkw()); - } catch (KeeperException ke) { - reconnectPeer(ke, peer); - addresses = Collections.emptyList(); - } - peer.setRegionServers(addresses); - return peer.getRegionServers(); - } - - /** - * Get the list of all the region servers from the specified peer - * @param zkw zk connection to use - * @return list of region server addresses or an empty list if the slave - * is unavailable - */ - private List fetchSlavesAddresses(ZooKeeperWatcher zkw) - throws KeeperException { - return listChildrenAndGetAsServerNames(zkw, zkw.rsZNode); - } - - /** - * Lists the children of the specified znode, retrieving the data of each - * child as a server address. - * - * Used to list the currently online regionservers and their addresses. - * - * Sets no watches at all, this method is best effort. - * - * Returns an empty list if the node has no children. Returns null if the - * parent node itself does not exist. - * - * @param zkw zookeeper reference - * @param znode node to get children of as addresses - * @return list of data of children of specified znode, empty if no children, - * null if parent does not exist - * @throws KeeperException if unexpected zookeeper exception - */ - public static List listChildrenAndGetAsServerNames( - ZooKeeperWatcher zkw, String znode) - throws KeeperException { - List children = ZKUtil.listChildrenNoWatch(zkw, znode); - if(children == null) { - return Collections.emptyList(); - } - List addresses = new ArrayList(children.size()); - for (String child : children) { - addresses.add(ServerName.parseServerName(child)); - } - return addresses; - } - - /** - * This method connects this cluster to another one and registers it - * in this region server's replication znode - * @param peerId id of the peer cluster - * @throws KeeperException - */ - public boolean connectToPeer(String peerId) - throws IOException, KeeperException { - if (peerClusters == null) { - return false; - } - if (this.peerClusters.containsKey(peerId)) { - return false; - } - ReplicationPeer peer = getPeer(peerId); - if (peer == null) { - return false; - } - this.peerClusters.put(peerId, peer); - ZKUtil.createWithParents(this.zookeeper, ZKUtil.joinZNode( - this.rsServerNameZnode, peerId)); - LOG.info("Added new peer cluster " + peer.getClusterKey()); - return true; - } - - /** - * Helper method to connect to a peer - * @param peerId peer's identifier - * @return object representing the peer - * @throws IOException - * @throws KeeperException - */ - public ReplicationPeer getPeer(String peerId) throws IOException, KeeperException{ - String znode = ZKUtil.joinZNode(this.peersZNode, peerId); - byte [] data = ZKUtil.getData(this.zookeeper, znode); - String otherClusterKey = ""; - try { - otherClusterKey = parsePeerFrom(data); - } catch (DeserializationException e) { - LOG.warn("Failed parse of cluster key from peerId=" + peerId - + ", specifically the content from the following znode: " + znode); - } - if (this.ourClusterKey.equals(otherClusterKey)) { - LOG.debug("Not connecting to " + peerId + " because it's us"); - return null; - } - // Construct the connection to the new peer - Configuration otherConf = new Configuration(this.conf); - try { - ZKUtil.applyClusterKeyToConf(otherConf, otherClusterKey); - } catch (IOException e) { - LOG.error("Can't get peer because:", e); - return null; - } - - ReplicationPeer peer = new ReplicationPeer(otherConf, peerId, - otherClusterKey); - peer.startStateTracker(this.zookeeper, this.getPeerStateNode(peerId)); - return peer; - } - - /** - * Set the new replication state for this cluster - * @param newState - */ - public void setReplicating(boolean newState) throws KeeperException { - ZKUtil.createWithParents(this.zookeeper, - ZKUtil.joinZNode(this.replicationZNode, this.replicationStateNodeName)); - byte[] stateBytes = (newState == true) ? ENABLED_ZNODE_BYTES : DISABLED_ZNODE_BYTES; - ZKUtil.setData(this.zookeeper, - ZKUtil.joinZNode(this.replicationZNode, this.replicationStateNodeName), stateBytes); - } - - /** - * Remove the peer from zookeeper. which will trigger the watchers on every - * region server and close their sources - * @param id - * @throws IllegalArgumentException Thrown when the peer doesn't exist - */ - public void removePeer(String id) throws IOException { - try { - if (!peerExists(id)) { - throw new IllegalArgumentException("Cannot remove inexisting peer"); - } - ZKUtil.deleteNodeRecursively(this.zookeeper, - ZKUtil.joinZNode(this.peersZNode, id)); - } catch (KeeperException e) { - throw new IOException("Unable to remove a peer", e); - } - } - - /** - * Add a new peer to this cluster - * @param id peer's identifier - * @param clusterKey ZK ensemble's addresses, client port and root znode - * @throws IllegalArgumentException Thrown when the peer doesn't exist - * @throws IllegalStateException Thrown when a peer already exists, since - * multi-slave isn't supported yet. - */ - public void addPeer(String id, String clusterKey) throws IOException { - try { - if (peerExists(id)) { - throw new IllegalArgumentException("Cannot add existing peer"); - } - ZKUtil.createWithParents(this.zookeeper, this.peersZNode); - ZKUtil.createAndWatch(this.zookeeper, ZKUtil.joinZNode(this.peersZNode, id), - toByteArray(clusterKey)); - // A peer is enabled by default - ZKUtil.createAndWatch(this.zookeeper, getPeerStateNode(id), ENABLED_ZNODE_BYTES); - } catch (KeeperException e) { - throw new IOException("Unable to add peer", e); - } - } - - /** - * @param clusterKey - * @return Serialized protobuf of clusterKey with pb magic prefix - * prepended suitable for use as content of a this.peersZNode; i.e. - * the content of PEER_ID znode under /hbase/replication/peers/PEER_ID - */ - static byte[] toByteArray(final String clusterKey) { - byte[] bytes = ZooKeeperProtos.ReplicationPeer.newBuilder().setClusterkey(clusterKey).build() - .toByteArray(); - return ProtobufUtil.prependPBMagic(bytes); - } - - /** - * @param state - * @return Serialized protobuf of state with pb magic prefix - * prepended suitable for use as content of either the cluster state - * znode -- whether or not we should be replicating kept in - * /hbase/replication/state -- or as content of a peer-state znode - * under a peer cluster id as in - * /hbase/replication/peers/PEER_ID/peer-state. - */ - static byte[] toByteArray(final ZooKeeperProtos.ReplicationState.State state) { - byte[] bytes = ZooKeeperProtos.ReplicationState.newBuilder().setState(state).build() - .toByteArray(); - return ProtobufUtil.prependPBMagic(bytes); - } - - /** - * @param position - * @return Serialized protobuf of position with pb magic prefix - * prepended suitable for use as content of an hlog position in a - * replication queue. - */ - static byte[] toByteArray( - final long position) { - byte[] bytes = ZooKeeperProtos.ReplicationHLogPosition.newBuilder().setPosition(position) - .build().toByteArray(); - return ProtobufUtil.prependPBMagic(bytes); - } - - /** - * @param lockOwner - * @return Serialized protobuf of lockOwner with pb magic prefix - * prepended suitable for use as content of an replication lock during - * region server fail over. - */ - static byte[] lockToByteArray( - final String lockOwner) { - byte[] bytes = ZooKeeperProtos.ReplicationLock.newBuilder().setLockOwner(lockOwner).build() - .toByteArray(); - return ProtobufUtil.prependPBMagic(bytes); - } - - /** - * @param bytes Content of a peer znode. - * @return ClusterKey parsed from the passed bytes. - * @throws DeserializationException - */ - static String parsePeerFrom(final byte[] bytes) throws DeserializationException { - if (ProtobufUtil.isPBMagicPrefix(bytes)) { - int pblen = ProtobufUtil.lengthOfPBMagic(); - ZooKeeperProtos.ReplicationPeer.Builder builder = ZooKeeperProtos.ReplicationPeer - .newBuilder(); - ZooKeeperProtos.ReplicationPeer peer; - try { - peer = builder.mergeFrom(bytes, pblen, bytes.length - pblen).build(); - } catch (InvalidProtocolBufferException e) { - throw new DeserializationException(e); - } - return peer.getClusterkey(); - } else { - if (bytes.length > 0) { - return Bytes.toString(bytes); - } - return ""; - } - } - - /** - * @param bytes Content of a state znode. - * @return State parsed from the passed bytes. - * @throws DeserializationException - */ - static ZooKeeperProtos.ReplicationState.State parseStateFrom(final byte[] bytes) - throws DeserializationException { - ProtobufUtil.expectPBMagicPrefix(bytes); - int pblen = ProtobufUtil.lengthOfPBMagic(); - ZooKeeperProtos.ReplicationState.Builder builder = ZooKeeperProtos.ReplicationState - .newBuilder(); - ZooKeeperProtos.ReplicationState state; - try { - state = builder.mergeFrom(bytes, pblen, bytes.length - pblen).build(); - return state.getState(); - } catch (InvalidProtocolBufferException e) { - throw new DeserializationException(e); - } - } - - /** - * @param bytes - Content of a HLog position znode. - * @return long - The current HLog position. - * @throws DeserializationException - */ - static long parseHLogPositionFrom( - final byte[] bytes) throws DeserializationException { - if (ProtobufUtil.isPBMagicPrefix(bytes)) { - int pblen = ProtobufUtil.lengthOfPBMagic(); - ZooKeeperProtos.ReplicationHLogPosition.Builder builder = ZooKeeperProtos.ReplicationHLogPosition - .newBuilder(); - ZooKeeperProtos.ReplicationHLogPosition position; - try { - position = builder.mergeFrom(bytes, pblen, bytes.length - pblen).build(); - } catch (InvalidProtocolBufferException e) { - throw new DeserializationException(e); - } - return position.getPosition(); - } else { - if (bytes.length > 0) { - return Bytes.toLong(bytes); - } - return 0; - } - } - - /** - * @param bytes - Content of a lock znode. - * @return String - The owner of the lock. - * @throws DeserializationException - */ - static String parseLockOwnerFrom( - final byte[] bytes) throws DeserializationException { - if (ProtobufUtil.isPBMagicPrefix(bytes)) { - int pblen = ProtobufUtil.lengthOfPBMagic(); - ZooKeeperProtos.ReplicationLock.Builder builder = ZooKeeperProtos.ReplicationLock - .newBuilder(); - ZooKeeperProtos.ReplicationLock lock; - try { - lock = builder.mergeFrom(bytes, pblen, bytes.length - pblen).build(); - } catch (InvalidProtocolBufferException e) { - throw new DeserializationException(e); - } - return lock.getLockOwner(); - } else { - if (bytes.length > 0) { - return Bytes.toString(bytes); - } - return ""; - } - } - - private boolean peerExists(String id) throws KeeperException { - return ZKUtil.checkExists(this.zookeeper, - ZKUtil.joinZNode(this.peersZNode, id)) >= 0; - } - - /** - * Enable replication to the peer - * - * @param id peer's identifier - * @throws IllegalArgumentException - * Thrown when the peer doesn't exist - */ - public void enablePeer(String id) throws IOException { - changePeerState(id, ZooKeeperProtos.ReplicationState.State.ENABLED); - LOG.info("peer " + id + " is enabled"); - } - - /** - * Disable replication to the peer - * - * @param id peer's identifier - * @throws IllegalArgumentException - * Thrown when the peer doesn't exist - */ - public void disablePeer(String id) throws IOException { - changePeerState(id, ZooKeeperProtos.ReplicationState.State.DISABLED); - LOG.info("peer " + id + " is disabled"); - } - - private void changePeerState(String id, ZooKeeperProtos.ReplicationState.State state) - throws IOException { - try { - if (!peerExists(id)) { - throw new IllegalArgumentException("peer " + id + " is not registered"); - } - String peerStateZNode = getPeerStateNode(id); - byte[] stateBytes = (state == ZooKeeperProtos.ReplicationState.State.ENABLED) ? ENABLED_ZNODE_BYTES - : DISABLED_ZNODE_BYTES; - if (ZKUtil.checkExists(this.zookeeper, peerStateZNode) != -1) { - ZKUtil.setData(this.zookeeper, peerStateZNode, stateBytes); - } else { - ZKUtil.createAndWatch(zookeeper, peerStateZNode, stateBytes); - } - LOG.info("state of the peer " + id + " changed to " + state.name()); - } catch (KeeperException e) { - throw new IOException("Unable to change state of the peer " + id, e); - } - } - - /** - * Check whether the peer is enabled or not. This method checks the atomic - * boolean of ReplicationPeer locally. - * - * @param id peer identifier - * @return true if the peer is enabled, otherwise false - * @throws IllegalArgumentException - * Thrown when the peer doesn't exist - */ - public boolean getPeerEnabled(String id) { - if (!this.peerClusters.containsKey(id)) { - throw new IllegalArgumentException("peer " + id + " is not registered"); - } - return this.peerClusters.get(id).getPeerEnabled().get(); - } - - private String getPeerStateNode(String id) { - return ZKUtil.joinZNode(this.peersZNode, ZKUtil.joinZNode(id, this.peerStateNodeName)); - } - - /** - * This reads the state znode for replication and sets the atomic boolean - */ - private void readReplicationStateZnode() { - try { - this.replicating.set(getReplication()); - LOG.info("Replication is now " + (this.replicating.get()? - "started" : "stopped")); - } catch (KeeperException e) { - this.abortable.abort("Failed getting data on from " + getRepStateNode(), e); - } - } - - /** - * Get the replication status of this cluster. If the state znode doesn't - * exist it will also create it and set it true. - * @return returns true when it's enabled, else false - * @throws KeeperException - */ - public boolean getReplication() throws KeeperException { - byte [] data = this.statusTracker.getData(false); - if (data == null || data.length == 0) { - setReplicating(true); - return true; - } - try { - return isPeerEnabled(data); - } catch (DeserializationException e) { - throw ZKUtil.convert(e); - } - } - - private String getRepStateNode() { - return ZKUtil.joinZNode(this.replicationZNode, this.replicationStateNodeName); - } - - /** - * Add a new log to the list of hlogs in zookeeper - * @param filename name of the hlog's znode - * @param peerId name of the cluster's znode - */ - public void addLogToList(String filename, String peerId) - throws KeeperException { - String znode = ZKUtil.joinZNode(this.rsServerNameZnode, peerId); - znode = ZKUtil.joinZNode(znode, filename); - ZKUtil.createWithParents(this.zookeeper, znode); - } - - /** - * Remove a log from the list of hlogs in zookeeper - * @param filename name of the hlog's znode - * @param clusterId name of the cluster's znode - */ - public void removeLogFromList(String filename, String clusterId) { - try { - String znode = ZKUtil.joinZNode(rsServerNameZnode, clusterId); - znode = ZKUtil.joinZNode(znode, filename); - ZKUtil.deleteNode(this.zookeeper, znode); - } catch (KeeperException e) { - this.abortable.abort("Failed remove from list", e); - } - } - - /** - * Set the current position of the specified cluster in the current hlog - * @param filename filename name of the hlog's znode - * @param clusterId clusterId name of the cluster's znode - * @param position the position in the file - * @throws IOException - */ - public void writeReplicationStatus(String filename, String clusterId, - long position) { - try { - String znode = ZKUtil.joinZNode(this.rsServerNameZnode, clusterId); - znode = ZKUtil.joinZNode(znode, filename); - // Why serialize String of Long and note Long as bytes? - ZKUtil.setData(this.zookeeper, znode, toByteArray(position)); - } catch (KeeperException e) { - this.abortable.abort("Writing replication status", e); - } - } - - /** - * Get a list of all the other region servers in this cluster - * and set a watch - * @return a list of server nanes - */ - public List getRegisteredRegionServers() { - List result = null; - try { - result = ZKUtil.listChildrenAndWatchThem( - this.zookeeper, this.zookeeper.rsZNode); - } catch (KeeperException e) { - this.abortable.abort("Get list of registered region servers", e); - } - return result; - } - - /** - * Get the list of the replicators that have queues, they can be alive, dead - * or simply from a previous run - * @return a list of server names - */ - public List getListOfReplicators() { - List result = null; - try { - result = ZKUtil.listChildrenNoWatch(this.zookeeper, rsZNode); - } catch (KeeperException e) { - this.abortable.abort("Get list of replicators", e); - } - return result; - } - - /** - * Get the list of peer clusters for the specified server names - * @param rs server names of the rs - * @return a list of peer cluster - */ - public List getListPeersForRS(String rs) { - String znode = ZKUtil.joinZNode(rsZNode, rs); - List result = null; - try { - result = ZKUtil.listChildrenNoWatch(this.zookeeper, znode); - } catch (KeeperException e) { - this.abortable.abort("Get list of peers for rs", e); - } - return result; - } - - /** - * Get the list of hlogs for the specified region server and peer cluster - * @param rs server names of the rs - * @param id peer cluster - * @return a list of hlogs - */ - public List getListHLogsForPeerForRS(String rs, String id) { - String znode = ZKUtil.joinZNode(rsZNode, rs); - znode = ZKUtil.joinZNode(znode, id); - List result = null; - try { - result = ZKUtil.listChildrenNoWatch(this.zookeeper, znode); - } catch (KeeperException e) { - this.abortable.abort("Get list of hlogs for peer", e); - } - return result; - } - - /** - * Try to set a lock in another server's znode. - * @param znode the server names of the other server - * @return true if the lock was acquired, false in every other cases - */ - public boolean lockOtherRS(String znode) { - try { - String parent = ZKUtil.joinZNode(this.rsZNode, znode); - if (parent.equals(rsServerNameZnode)) { - LOG.warn("Won't lock because this is us, we're dead!"); - return false; - } - String p = ZKUtil.joinZNode(parent, RS_LOCK_ZNODE); - ZKUtil.createAndWatch(this.zookeeper, p, lockToByteArray(rsServerNameZnode)); - } catch (KeeperException e) { - // This exception will pop up if the znode under which we're trying to - // create the lock is already deleted by another region server, meaning - // that the transfer already occurred. - // NoNode => transfer is done and znodes are already deleted - // NodeExists => lock znode already created by another RS - if (e instanceof KeeperException.NoNodeException || - e instanceof KeeperException.NodeExistsException) { - LOG.info("Won't transfer the queue," + - " another RS took care of it because of: " + e.getMessage()); - } else { - LOG.info("Failed lock other rs", e); - } - return false; - } - return true; - } - - /** - * This methods copies all the hlogs queues from another region server - * and returns them all sorted per peer cluster (appended with the dead - * server's znode) - * @param znode server names to copy - * @return all hlogs for all peers of that cluster, null if an error occurred - */ - public SortedMap> copyQueuesFromRS(String znode) { - // TODO this method isn't atomic enough, we could start copying and then - // TODO fail for some reason and we would end up with znodes we don't want. - SortedMap> queues = - new TreeMap>(); - try { - String nodePath = ZKUtil.joinZNode(rsZNode, znode); - List clusters = - ZKUtil.listChildrenNoWatch(this.zookeeper, nodePath); - // We have a lock znode in there, it will count as one. - if (clusters == null || clusters.size() <= 1) { - return queues; - } - // The lock isn't a peer cluster, remove it - clusters.remove(RS_LOCK_ZNODE); - for (String cluster : clusters) { - // We add the name of the recovered RS to the new znode, we can even - // do that for queues that were recovered 10 times giving a znode like - // number-startcode-number-otherstartcode-number-anotherstartcode-etc - String newCluster = cluster+"-"+znode; - String newClusterZnode = ZKUtil.joinZNode(rsServerNameZnode, newCluster); - String clusterPath = ZKUtil.joinZNode(nodePath, cluster); - List hlogs = ZKUtil.listChildrenNoWatch(this.zookeeper, clusterPath); - // That region server didn't have anything to replicate for this cluster - if (hlogs == null || hlogs.size() == 0) { - continue; - } - ZKUtil.createNodeIfNotExistsAndWatch(this.zookeeper, newClusterZnode, - HConstants.EMPTY_BYTE_ARRAY); - SortedSet logQueue = new TreeSet(); - queues.put(newCluster, logQueue); - for (String hlog : hlogs) { - String z = ZKUtil.joinZNode(clusterPath, hlog); - byte[] positionBytes = ZKUtil.getData(this.zookeeper, z); - long position = 0; - try { - position = parseHLogPositionFrom(positionBytes); - } catch (DeserializationException e) { - LOG.warn("Failed parse of hlog position from the following znode: " + z); - } - LOG.debug("Creating " + hlog + " with data " + position); - String child = ZKUtil.joinZNode(newClusterZnode, hlog); - // Position doesn't actually change, we are just deserializing it for - // logging, so just use the already serialized version - ZKUtil.createAndWatch(this.zookeeper, child, positionBytes); - logQueue.add(hlog); - } - } - } catch (KeeperException e) { - this.abortable.abort("Copy queues from rs", e); - } - return queues; - } - - /** - * Delete a complete queue of hlogs - * @param peerZnode znode of the peer cluster queue of hlogs to delete - */ - public void deleteSource(String peerZnode, boolean closeConnection) { - try { - ZKUtil.deleteNodeRecursively(this.zookeeper, - ZKUtil.joinZNode(rsServerNameZnode, peerZnode)); - if (closeConnection) { - this.peerClusters.get(peerZnode).getZkw().close(); - this.peerClusters.remove(peerZnode); - } - } catch (KeeperException e) { - this.abortable.abort("Failed delete of " + peerZnode, e); - } - } - - /** - * Recursive deletion of all znodes in specified rs' znode - * @param znode - */ - public void deleteRsQueues(String znode) { - String fullpath = ZKUtil.joinZNode(rsZNode, znode); - try { - List clusters = - ZKUtil.listChildrenNoWatch(this.zookeeper, fullpath); - for (String cluster : clusters) { - // We'll delete it later - if (cluster.equals(RS_LOCK_ZNODE)) { - continue; - } - String fullClusterPath = ZKUtil.joinZNode(fullpath, cluster); - ZKUtil.deleteNodeRecursively(this.zookeeper, fullClusterPath); - } - // Finish cleaning up - ZKUtil.deleteNodeRecursively(this.zookeeper, fullpath); - } catch (KeeperException e) { - if (e instanceof KeeperException.NoNodeException || - e instanceof KeeperException.NotEmptyException) { - // Testing a special case where another region server was able to - // create a lock just after we deleted it, but then was also able to - // delete the RS znode before us or its lock znode is still there. - if (e.getPath().equals(fullpath)) { - return; - } - } - this.abortable.abort("Failed delete of " + znode, e); - } - } - - /** - * Delete this cluster's queues - */ - public void deleteOwnRSZNode() { - try { - ZKUtil.deleteNodeRecursively(this.zookeeper, - this.rsServerNameZnode); - } catch (KeeperException e) { - // if the znode is already expired, don't bother going further - if (e instanceof KeeperException.SessionExpiredException) { - return; - } - this.abortable.abort("Failed delete of " + this.rsServerNameZnode, e); - } - } - - /** - * Get the position of the specified hlog in the specified peer znode - * @param peerId znode of the peer cluster - * @param hlog name of the hlog - * @return the position in that hlog - * @throws KeeperException - */ - public long getHLogRepPosition(String peerId, String hlog) - throws KeeperException { - String clusterZnode = ZKUtil.joinZNode(rsServerNameZnode, peerId); - String znode = ZKUtil.joinZNode(clusterZnode, hlog); - byte[] bytes = ZKUtil.getData(this.zookeeper, znode); - try { - return parseHLogPositionFrom(bytes); - } catch (DeserializationException de) { - LOG.warn("Failed parse of HLogPosition for peerId=" + peerId + " and hlog=" + hlog - + "znode content, continuing."); - } - // if we can not parse the position, start at the beginning of the hlog file - // again - return 0; - } - - /** - * Returns the UUID of the provided peer id. Should a connection loss or session - * expiration happen, the ZK handler will be reopened once and if it still doesn't - * work then it will bail and return null. - * @param peerId the peer's ID that will be converted into a UUID - * @return a UUID or null if there's a ZK connection issue - */ - public UUID getPeerUUID(String peerId) { - ReplicationPeer peer = getPeerClusters().get(peerId); - UUID peerUUID = null; - try { - peerUUID = getUUIDForCluster(peer.getZkw()); - } catch (KeeperException ke) { - reconnectPeer(ke, peer); - } - return peerUUID; - } - - /** - * Get the UUID for the provided ZK watcher. Doesn't handle any ZK exceptions - * @param zkw watcher connected to an ensemble - * @return the UUID read from zookeeper - * @throws KeeperException - */ - public UUID getUUIDForCluster(ZooKeeperWatcher zkw) throws KeeperException { - return UUID.fromString(ZKClusterId.readClusterIdZNode(zkw)); - } - - private void reconnectPeer(KeeperException ke, ReplicationPeer peer) { - if (ke instanceof ConnectionLossException - || ke instanceof SessionExpiredException) { - LOG.warn( - "Lost the ZooKeeper connection for peer " + peer.getClusterKey(), - ke); - try { - peer.reloadZkWatcher(); - } catch(IOException io) { - LOG.warn( - "Creation of ZookeeperWatcher failed for peer " - + peer.getClusterKey(), io); - } - } - } - - public void registerRegionServerListener(ZooKeeperListener listener) { - this.zookeeper.registerListener(listener); - } - - /** - * Get a map of all peer clusters - * @return map of peer cluster keyed by id - */ - public Map getPeerClusters() { - return this.peerClusters; - } - - /** - * Extracts the znode name of a peer cluster from a ZK path - * @param fullPath Path to extract the id from - * @return the id or an empty string if path is invalid - */ - public static String getZNodeName(String fullPath) { - String[] parts = fullPath.split("/"); - return parts.length > 0 ? parts[parts.length-1] : ""; - } - - /** - * Get this cluster's zk connection - * @return zk connection - */ - public ZooKeeperWatcher getZookeeperWatcher() { - return this.zookeeper; - } - - - /** - * Get the full path to the peers' znode - * @return path to peers in zk - */ - public String getPeersZNode() { - return peersZNode; - } - - @Override - public void close() throws IOException { - if (statusTracker != null) - statusTracker.stop(); - } - - /** - * Utility method to ensure an ENABLED znode is in place; if not present, we - * create it. - * @param zookeeper - * @param path Path to znode to check - * @return True if we created the znode. - * @throws NodeExistsException - * @throws KeeperException - */ - static boolean ensurePeerEnabled(final ZooKeeperWatcher zookeeper, final String path) - throws NodeExistsException, KeeperException { - if (ZKUtil.checkExists(zookeeper, path) == -1) { - ZKUtil.createAndWatch(zookeeper, path, ENABLED_ZNODE_BYTES); - return true; - } - return false; - } - - /** - * @param bytes - * @return True if the passed in bytes are those of a pb - * serialized ENABLED state. - * @throws DeserializationException - */ - static boolean isPeerEnabled(final byte[] bytes) throws DeserializationException { - ZooKeeperProtos.ReplicationState.State state = parseStateFrom(bytes); - return ZooKeeperProtos.ReplicationState.State.ENABLED == state; - } - - /** - * Tracker for status of the replication - */ - public class ReplicationStatusTracker extends ZooKeeperNodeTracker { - public ReplicationStatusTracker(ZooKeeperWatcher watcher, - Abortable abortable) { - super(watcher, getRepStateNode(), abortable); - } - - @Override - public synchronized void nodeDataChanged(String path) { - if (path.equals(node)) { - super.nodeDataChanged(path); - readReplicationStateZnode(); - } - } - } -} diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSource.java hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSource.java index f63e6e7..e400312 100644 --- hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSource.java +++ hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSource.java @@ -51,8 +51,8 @@ import org.apache.hadoop.hbase.client.AdminProtocol; import org.apache.hadoop.hbase.client.HConnection; import org.apache.hadoop.hbase.client.HConnectionManager; import org.apache.hadoop.hbase.protobuf.ProtobufUtil; +import org.apache.hadoop.hbase.protobuf.ProtobufReplicationUtil; import org.apache.hadoop.hbase.regionserver.wal.HLog; -import org.apache.hadoop.hbase.regionserver.wal.HLogFactory; import org.apache.hadoop.hbase.regionserver.wal.HLogKey; import org.apache.hadoop.hbase.regionserver.wal.WALEdit; import org.apache.hadoop.hbase.replication.ReplicationZookeeper; @@ -634,8 +634,8 @@ public class ReplicationSource extends Thread } try { AdminProtocol rrs = getRS(); - ProtobufUtil.replicateWALEntry(rrs, - Arrays.copyOf(this.entriesArray, currentNbEntries)); + ProtobufReplicationUtil.replicateWALEntry(rrs, + Arrays.copyOf(this.entriesArray, currentNbEntries)); if (this.lastLoggedPosition != this.repLogReader.getPosition()) { this.manager.logPositionAndCleanOldLogs(this.currentPath, this.peerClusterZnode, this.repLogReader.getPosition(), diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/security/AccessDeniedException.java hbase-server/src/main/java/org/apache/hadoop/hbase/security/AccessDeniedException.java deleted file mode 100644 index b8c5d3b..0000000 --- hbase-server/src/main/java/org/apache/hadoop/hbase/security/AccessDeniedException.java +++ /dev/null @@ -1,39 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hbase.security; - -import org.apache.hadoop.hbase.DoNotRetryIOException; - -/** - * Exception thrown by access-related methods. - */ -public class AccessDeniedException extends DoNotRetryIOException { - private static final long serialVersionUID = 1913879564363001780L; - - public AccessDeniedException() { - super(); - } - - public AccessDeniedException(Class clazz, String s) { - super( "AccessDenied [" + clazz.getName() + "]: " + s); - } - - public AccessDeniedException(String s) { - super(s); - } -} diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/security/HBaseSaslRpcClient.java hbase-server/src/main/java/org/apache/hadoop/hbase/security/HBaseSaslRpcClient.java deleted file mode 100644 index 859ad12..0000000 --- hbase-server/src/main/java/org/apache/hadoop/hbase/security/HBaseSaslRpcClient.java +++ /dev/null @@ -1,279 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hbase.security; - -import java.io.BufferedInputStream; -import java.io.BufferedOutputStream; -import java.io.DataInputStream; -import java.io.DataOutputStream; -import java.io.IOException; -import java.io.InputStream; -import java.io.OutputStream; - -import javax.security.auth.callback.Callback; -import javax.security.auth.callback.CallbackHandler; -import javax.security.auth.callback.NameCallback; -import javax.security.auth.callback.PasswordCallback; -import javax.security.auth.callback.UnsupportedCallbackException; -import javax.security.sasl.RealmCallback; -import javax.security.sasl.RealmChoiceCallback; -import javax.security.sasl.Sasl; -import javax.security.sasl.SaslException; -import javax.security.sasl.SaslClient; - -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.io.WritableUtils; -import org.apache.hadoop.ipc.RemoteException; -import org.apache.hadoop.hbase.security.HBaseSaslRpcServer.AuthMethod; -import org.apache.hadoop.hbase.security.HBaseSaslRpcServer.SaslStatus; -import org.apache.hadoop.security.SaslInputStream; -import org.apache.hadoop.security.SaslOutputStream; -import org.apache.hadoop.security.token.Token; -import org.apache.hadoop.security.token.TokenIdentifier; - -/** - * A utility class that encapsulates SASL logic for RPC client. - * Copied from org.apache.hadoop.security - */ -public class HBaseSaslRpcClient { - public static final Log LOG = LogFactory.getLog(HBaseSaslRpcClient.class); - - private final SaslClient saslClient; - - /** - * Create a HBaseSaslRpcClient for an authentication method - * - * @param method - * the requested authentication method - * @param token - * token to use if needed by the authentication method - */ - public HBaseSaslRpcClient(AuthMethod method, - Token token, String serverPrincipal) - throws IOException { - switch (method) { - case DIGEST: - if (LOG.isDebugEnabled()) - LOG.debug("Creating SASL " + AuthMethod.DIGEST.getMechanismName() - + " client to authenticate to service at " + token.getService()); - saslClient = Sasl.createSaslClient(new String[] { AuthMethod.DIGEST - .getMechanismName() }, null, null, HBaseSaslRpcServer.SASL_DEFAULT_REALM, - HBaseSaslRpcServer.SASL_PROPS, new SaslClientCallbackHandler(token)); - break; - case KERBEROS: - if (LOG.isDebugEnabled()) { - LOG - .debug("Creating SASL " + AuthMethod.KERBEROS.getMechanismName() - + " client. Server's Kerberos principal name is " - + serverPrincipal); - } - if (serverPrincipal == null || serverPrincipal.length() == 0) { - throw new IOException( - "Failed to specify server's Kerberos principal name"); - } - String names[] = HBaseSaslRpcServer.splitKerberosName(serverPrincipal); - if (names.length != 3) { - throw new IOException( - "Kerberos principal does not have the expected format: " - + serverPrincipal); - } - saslClient = Sasl.createSaslClient(new String[] { AuthMethod.KERBEROS - .getMechanismName() }, null, names[0], names[1], - HBaseSaslRpcServer.SASL_PROPS, null); - break; - default: - throw new IOException("Unknown authentication method " + method); - } - if (saslClient == null) - throw new IOException("Unable to find SASL client implementation"); - } - - private static void readStatus(DataInputStream inStream) throws IOException { - int status = inStream.readInt(); // read status - if (status != SaslStatus.SUCCESS.state) { - throw new RemoteException(WritableUtils.readString(inStream), - WritableUtils.readString(inStream)); - } - } - - /** - * Do client side SASL authentication with server via the given InputStream - * and OutputStream - * - * @param inS - * InputStream to use - * @param outS - * OutputStream to use - * @return true if connection is set up, or false if needs to switch - * to simple Auth. - * @throws IOException - */ - public boolean saslConnect(InputStream inS, OutputStream outS) - throws IOException { - DataInputStream inStream = new DataInputStream(new BufferedInputStream(inS)); - DataOutputStream outStream = new DataOutputStream(new BufferedOutputStream( - outS)); - - try { - byte[] saslToken = new byte[0]; - if (saslClient.hasInitialResponse()) - saslToken = saslClient.evaluateChallenge(saslToken); - if (saslToken != null) { - outStream.writeInt(saslToken.length); - outStream.write(saslToken, 0, saslToken.length); - outStream.flush(); - if (LOG.isDebugEnabled()) - LOG.debug("Have sent token of size " + saslToken.length - + " from initSASLContext."); - } - if (!saslClient.isComplete()) { - readStatus(inStream); - int len = inStream.readInt(); - if (len == HBaseSaslRpcServer.SWITCH_TO_SIMPLE_AUTH) { - if (LOG.isDebugEnabled()) - LOG.debug("Server asks us to fall back to simple auth."); - saslClient.dispose(); - return false; - } - saslToken = new byte[len]; - if (LOG.isDebugEnabled()) - LOG.debug("Will read input token of size " + saslToken.length - + " for processing by initSASLContext"); - inStream.readFully(saslToken); - } - - while (!saslClient.isComplete()) { - saslToken = saslClient.evaluateChallenge(saslToken); - if (saslToken != null) { - if (LOG.isDebugEnabled()) - LOG.debug("Will send token of size " + saslToken.length - + " from initSASLContext."); - outStream.writeInt(saslToken.length); - outStream.write(saslToken, 0, saslToken.length); - outStream.flush(); - } - if (!saslClient.isComplete()) { - readStatus(inStream); - saslToken = new byte[inStream.readInt()]; - if (LOG.isDebugEnabled()) - LOG.debug("Will read input token of size " + saslToken.length - + " for processing by initSASLContext"); - inStream.readFully(saslToken); - } - } - if (LOG.isDebugEnabled()) { - LOG.debug("SASL client context established. Negotiated QoP: " - + saslClient.getNegotiatedProperty(Sasl.QOP)); - } - return true; - } catch (IOException e) { - try { - saslClient.dispose(); - } catch (SaslException ignored) { - // ignore further exceptions during cleanup - } - throw e; - } - } - - /** - * Get a SASL wrapped InputStream. Can be called only after saslConnect() has - * been called. - * - * @param in - * the InputStream to wrap - * @return a SASL wrapped InputStream - * @throws IOException - */ - public InputStream getInputStream(InputStream in) throws IOException { - if (!saslClient.isComplete()) { - throw new IOException("Sasl authentication exchange hasn't completed yet"); - } - return new SaslInputStream(in, saslClient); - } - - /** - * Get a SASL wrapped OutputStream. Can be called only after saslConnect() has - * been called. - * - * @param out - * the OutputStream to wrap - * @return a SASL wrapped OutputStream - * @throws IOException - */ - public OutputStream getOutputStream(OutputStream out) throws IOException { - if (!saslClient.isComplete()) { - throw new IOException("Sasl authentication exchange hasn't completed yet"); - } - return new SaslOutputStream(out, saslClient); - } - - /** Release resources used by wrapped saslClient */ - public void dispose() throws SaslException { - saslClient.dispose(); - } - - private static class SaslClientCallbackHandler implements CallbackHandler { - private final String userName; - private final char[] userPassword; - - public SaslClientCallbackHandler(Token token) { - this.userName = HBaseSaslRpcServer.encodeIdentifier(token.getIdentifier()); - this.userPassword = HBaseSaslRpcServer.encodePassword(token.getPassword()); - } - - public void handle(Callback[] callbacks) - throws UnsupportedCallbackException { - NameCallback nc = null; - PasswordCallback pc = null; - RealmCallback rc = null; - for (Callback callback : callbacks) { - if (callback instanceof RealmChoiceCallback) { - continue; - } else if (callback instanceof NameCallback) { - nc = (NameCallback) callback; - } else if (callback instanceof PasswordCallback) { - pc = (PasswordCallback) callback; - } else if (callback instanceof RealmCallback) { - rc = (RealmCallback) callback; - } else { - throw new UnsupportedCallbackException(callback, - "Unrecognized SASL client callback"); - } - } - if (nc != null) { - if (LOG.isDebugEnabled()) - LOG.debug("SASL client callback: setting username: " + userName); - nc.setName(userName); - } - if (pc != null) { - if (LOG.isDebugEnabled()) - LOG.debug("SASL client callback: setting userPassword"); - pc.setPassword(userPassword); - } - if (rc != null) { - if (LOG.isDebugEnabled()) - LOG.debug("SASL client callback: setting realm: " - + rc.getDefaultText()); - rc.setText(rc.getDefaultText()); - } - } - } -} diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/security/HBaseSaslRpcServer.java hbase-server/src/main/java/org/apache/hadoop/hbase/security/HBaseSaslRpcServer.java index 09a26ff..9af1cd8 100644 --- hbase-server/src/main/java/org/apache/hadoop/hbase/security/HBaseSaslRpcServer.java +++ hbase-server/src/main/java/org/apache/hadoop/hbase/security/HBaseSaslRpcServer.java @@ -18,14 +18,6 @@ package org.apache.hadoop.hbase.security; -import java.io.ByteArrayInputStream; -import java.io.DataInput; -import java.io.DataInputStream; -import java.io.DataOutput; -import java.io.IOException; -import java.util.Map; -import java.util.TreeMap; - import javax.security.auth.callback.Callback; import javax.security.auth.callback.CallbackHandler; import javax.security.auth.callback.NameCallback; @@ -33,17 +25,13 @@ import javax.security.auth.callback.PasswordCallback; import javax.security.auth.callback.UnsupportedCallbackException; import javax.security.sasl.AuthorizeCallback; import javax.security.sasl.RealmCallback; -import javax.security.sasl.Sasl; -import org.apache.commons.codec.binary.Base64; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.ipc.HBaseServer; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.token.SecretManager; import org.apache.hadoop.security.token.TokenIdentifier; -import org.apache.hadoop.security.UserGroupInformation.AuthenticationMethod; import org.apache.hadoop.security.token.SecretManager.InvalidToken; /** @@ -51,126 +39,6 @@ import org.apache.hadoop.security.token.SecretManager.InvalidToken; */ public class HBaseSaslRpcServer { public static final Log LOG = LogFactory.getLog(HBaseSaslRpcServer.class); - public static final String SASL_DEFAULT_REALM = "default"; - public static final Map SASL_PROPS = - new TreeMap(); - - public static final int SWITCH_TO_SIMPLE_AUTH = -88; - - public static enum QualityOfProtection { - AUTHENTICATION("auth"), - INTEGRITY("auth-int"), - PRIVACY("auth-conf"); - - public final String saslQop; - - private QualityOfProtection(String saslQop) { - this.saslQop = saslQop; - } - - public String getSaslQop() { - return saslQop; - } - } - - public static void init(Configuration conf) { - QualityOfProtection saslQOP = QualityOfProtection.AUTHENTICATION; - String rpcProtection = conf.get("hbase.rpc.protection", - QualityOfProtection.AUTHENTICATION.name().toLowerCase()); - if (QualityOfProtection.INTEGRITY.name().toLowerCase() - .equals(rpcProtection)) { - saslQOP = QualityOfProtection.INTEGRITY; - } else if (QualityOfProtection.PRIVACY.name().toLowerCase().equals( - rpcProtection)) { - saslQOP = QualityOfProtection.PRIVACY; - } - - SASL_PROPS.put(Sasl.QOP, saslQOP.getSaslQop()); - SASL_PROPS.put(Sasl.SERVER_AUTH, "true"); - } - - static String encodeIdentifier(byte[] identifier) { - return new String(Base64.encodeBase64(identifier)); - } - - static byte[] decodeIdentifier(String identifier) { - return Base64.decodeBase64(identifier.getBytes()); - } - - public static T getIdentifier(String id, - SecretManager secretManager) throws InvalidToken { - byte[] tokenId = decodeIdentifier(id); - T tokenIdentifier = secretManager.createIdentifier(); - try { - tokenIdentifier.readFields(new DataInputStream(new ByteArrayInputStream( - tokenId))); - } catch (IOException e) { - throw (InvalidToken) new InvalidToken( - "Can't de-serialize tokenIdentifier").initCause(e); - } - return tokenIdentifier; - } - - static char[] encodePassword(byte[] password) { - return new String(Base64.encodeBase64(password)).toCharArray(); - } - - /** Splitting fully qualified Kerberos name into parts */ - public static String[] splitKerberosName(String fullName) { - return fullName.split("[/@]"); - } - - public enum SaslStatus { - SUCCESS (0), - ERROR (1); - - public final int state; - private SaslStatus(int state) { - this.state = state; - } - } - - /** Authentication method */ - public static enum AuthMethod { - SIMPLE((byte) 80, "", AuthenticationMethod.SIMPLE), - KERBEROS((byte) 81, "GSSAPI", AuthenticationMethod.KERBEROS), - DIGEST((byte) 82, "DIGEST-MD5", AuthenticationMethod.TOKEN); - - /** The code for this method. */ - public final byte code; - public final String mechanismName; - public final AuthenticationMethod authenticationMethod; - - private AuthMethod(byte code, String mechanismName, - AuthenticationMethod authMethod) { - this.code = code; - this.mechanismName = mechanismName; - this.authenticationMethod = authMethod; - } - - private static final int FIRST_CODE = values()[0].code; - - /** Return the object represented by the code. */ - private static AuthMethod valueOf(byte code) { - final int i = (code & 0xff) - FIRST_CODE; - return i < 0 || i >= values().length ? null : values()[i]; - } - - /** Return the SASL mechanism name */ - public String getMechanismName() { - return mechanismName; - } - - /** Read from in */ - public static AuthMethod read(DataInput in) throws IOException { - return valueOf(in.readByte()); - } - - /** Write to out */ - public void write(DataOutput out) throws IOException { - out.write(code); - } - }; /** CallbackHandler for SASL DIGEST-MD5 mechanism */ public static class SaslDigestCallbackHandler implements CallbackHandler { @@ -185,7 +53,7 @@ public class HBaseSaslRpcServer { } private char[] getPassword(TokenIdentifier tokenid) throws InvalidToken { - return encodePassword(secretManager.retrievePassword(tokenid)); + return SaslUtils.encodePassword(secretManager.retrievePassword(tokenid)); } /** {@inheritDoc} */ @@ -210,7 +78,8 @@ public class HBaseSaslRpcServer { } } if (pc != null) { - TokenIdentifier tokenIdentifier = getIdentifier(nc.getDefaultName(), secretManager); + TokenIdentifier tokenIdentifier = SaslUtils.getIdentifier(nc.getDefaultName(), + secretManager); char[] password = getPassword(tokenIdentifier); UserGroupInformation user = null; user = tokenIdentifier.getUser(); // may throw exception @@ -232,7 +101,7 @@ public class HBaseSaslRpcServer { if (ac.isAuthorized()) { if (LOG.isDebugEnabled()) { String username = - getIdentifier(authzid, secretManager).getUser().getUserName(); + SaslUtils.getIdentifier(authzid, secretManager).getUser().getUserName(); LOG.debug("SASL server DIGEST-MD5 callback: setting " + "canonicalized client ID: " + username); } diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/security/KerberosInfo.java hbase-server/src/main/java/org/apache/hadoop/hbase/security/KerberosInfo.java deleted file mode 100644 index 5e5e6ee..0000000 --- hbase-server/src/main/java/org/apache/hadoop/hbase/security/KerberosInfo.java +++ /dev/null @@ -1,39 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hbase.security; - -import java.lang.annotation.ElementType; -import java.lang.annotation.Retention; -import java.lang.annotation.RetentionPolicy; -import java.lang.annotation.Target; - -import org.apache.hadoop.classification.InterfaceAudience; - -/** - * Indicates Kerberos related information to be used for authorizing connections - * over a given RPC protocol interface. - */ -@Retention(RetentionPolicy.RUNTIME) -@Target(ElementType.TYPE) -@InterfaceAudience.Private -public @interface KerberosInfo { - /** Key for getting server's Kerberos principal name from Configuration */ - String serverPrincipal(); - String clientPrincipal() default ""; -} diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/security/TokenInfo.java hbase-server/src/main/java/org/apache/hadoop/hbase/security/TokenInfo.java deleted file mode 100644 index a652a74..0000000 --- hbase-server/src/main/java/org/apache/hadoop/hbase/security/TokenInfo.java +++ /dev/null @@ -1,38 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hbase.security; - -import java.lang.annotation.ElementType; -import java.lang.annotation.Retention; -import java.lang.annotation.RetentionPolicy; -import java.lang.annotation.Target; - -import org.apache.hadoop.classification.InterfaceAudience; - -/** - * Indicates Token related information to be used in authorizing connections - * over a given RPC protocol interface. - */ -@Retention(RetentionPolicy.RUNTIME) -@Target(ElementType.TYPE) -@InterfaceAudience.Private -public @interface TokenInfo { - /** The type of Token.getKind() to be handled */ - String value(); -} diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/security/User.java hbase-server/src/main/java/org/apache/hadoop/hbase/security/User.java deleted file mode 100644 index 2cc3f59..0000000 --- hbase-server/src/main/java/org/apache/hadoop/hbase/security/User.java +++ /dev/null @@ -1,407 +0,0 @@ -/* - * - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hbase.security; - -import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.classification.InterfaceStability; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.fs.CommonConfigurationKeys; -import org.apache.hadoop.hbase.util.Methods; -import org.apache.hadoop.mapred.JobConf; -import org.apache.hadoop.mapreduce.Job; -import org.apache.hadoop.security.UserGroupInformation; - -import java.io.IOException; -import java.lang.reflect.UndeclaredThrowableException; -import java.security.PrivilegedAction; -import java.security.PrivilegedExceptionAction; - -import org.apache.commons.logging.Log; - -/** - * Wrapper to abstract out usage of user and group information in HBase. - * - *

          - * This class provides a common interface for interacting with user and group - * information across changing APIs in different versions of Hadoop. It only - * provides access to the common set of functionality in - * {@link org.apache.hadoop.security.UserGroupInformation} currently needed by - * HBase, but can be extended as needs change. - *

          - */ -@InterfaceAudience.Public -@InterfaceStability.Evolving -public abstract class User { - public static final String HBASE_SECURITY_CONF_KEY = - "hbase.security.authentication"; - - private static Log LOG = LogFactory.getLog(User.class); - - protected UserGroupInformation ugi; - - public UserGroupInformation getUGI() { - return ugi; - } - - /** - * Returns the full user name. For Kerberos principals this will include - * the host and realm portions of the principal name. - * @return User full name. - */ - public String getName() { - return ugi.getUserName(); - } - - /** - * Returns the list of groups of which this user is a member. On secure - * Hadoop this returns the group information for the user as resolved on the - * server. For 0.20 based Hadoop, the group names are passed from the client. - */ - public String[] getGroupNames() { - return ugi.getGroupNames(); - } - - /** - * Returns the shortened version of the user name -- the portion that maps - * to an operating system user name. - * @return Short name - */ - public abstract String getShortName(); - - /** - * Executes the given action within the context of this user. - */ - public abstract T runAs(PrivilegedAction action); - - /** - * Executes the given action within the context of this user. - */ - public abstract T runAs(PrivilegedExceptionAction action) - throws IOException, InterruptedException; - - /** - * Requests an authentication token for this user and stores it in the - * user's credentials. - * - * @throws IOException - */ - public abstract void obtainAuthTokenForJob(Configuration conf, Job job) - throws IOException, InterruptedException; - - /** - * Requests an authentication token for this user and stores it in the - * user's credentials. - * - * @throws IOException - */ - public abstract void obtainAuthTokenForJob(JobConf job) - throws IOException, InterruptedException; - - public String toString() { - return ugi.toString(); - } - - /** - * Returns the {@code User} instance within current execution context. - */ - public static User getCurrent() throws IOException { - User user = new SecureHadoopUser(); - if (user.getUGI() == null) { - return null; - } - return user; - } - - /** - * Wraps an underlying {@code UserGroupInformation} instance. - * @param ugi The base Hadoop user - * @return User - */ - public static User create(UserGroupInformation ugi) { - if (ugi == null) { - return null; - } - return new SecureHadoopUser(ugi); - } - - /** - * Generates a new {@code User} instance specifically for use in test code. - * @param name the full username - * @param groups the group names to which the test user will belong - * @return a new User instance - */ - public static User createUserForTesting(Configuration conf, - String name, String[] groups) { - return SecureHadoopUser.createUserForTesting(conf, name, groups); - } - - /** - * Log in the current process using the given configuration keys for the - * credential file and login principal. - * - *

          This is only applicable when - * running on secure Hadoop -- see - * org.apache.hadoop.security.SecurityUtil#login(Configuration,String,String,String). - * On regular Hadoop (without security features), this will safely be ignored. - *

          - * - * @param conf The configuration data to use - * @param fileConfKey Property key used to configure path to the credential file - * @param principalConfKey Property key used to configure login principal - * @param localhost Current hostname to use in any credentials - * @throws IOException underlying exception from SecurityUtil.login() call - */ - public static void login(Configuration conf, String fileConfKey, - String principalConfKey, String localhost) throws IOException { - SecureHadoopUser.login(conf, fileConfKey, principalConfKey, localhost); - } - - /** - * Returns whether or not Kerberos authentication is configured for Hadoop. - * For non-secure Hadoop, this always returns false. - * For secure Hadoop, it will return the value from - * {@code UserGroupInformation.isSecurityEnabled()}. - */ - public static boolean isSecurityEnabled() { - return SecureHadoopUser.isSecurityEnabled(); - } - - /** - * Returns whether or not secure authentication is enabled for HBase. Note that - * HBase security requires HDFS security to provide any guarantees, so this requires that - * both hbase.security.authentication and hadoop.security.authentication - * are set to kerberos. - */ - public static boolean isHBaseSecurityEnabled(Configuration conf) { - return "kerberos".equalsIgnoreCase(conf.get(HBASE_SECURITY_CONF_KEY)) && - "kerberos".equalsIgnoreCase( - conf.get(CommonConfigurationKeys.HADOOP_SECURITY_AUTHENTICATION)); - } - - /* Concrete implementations */ - - /** - * Bridges {@code User} invocations to underlying calls to - * {@link org.apache.hadoop.security.UserGroupInformation} for secure Hadoop - * 0.20 and versions 0.21 and above. - */ - private static class SecureHadoopUser extends User { - private String shortName; - - private SecureHadoopUser() throws IOException { - try { - ugi = (UserGroupInformation) callStatic("getCurrentUser"); - } catch (IOException ioe) { - throw ioe; - } catch (RuntimeException re) { - throw re; - } catch (Exception e) { - throw new UndeclaredThrowableException(e, - "Unexpected exception getting current secure user"); - } - } - - private SecureHadoopUser(UserGroupInformation ugi) { - this.ugi = ugi; - } - - @Override - public String getShortName() { - if (shortName != null) return shortName; - - try { - shortName = (String)call(ugi, "getShortUserName", null, null); - return shortName; - } catch (RuntimeException re) { - throw re; - } catch (Exception e) { - throw new UndeclaredThrowableException(e, - "Unexpected error getting user short name"); - } - } - - @Override - public T runAs(PrivilegedAction action) { - try { - return (T) call(ugi, "doAs", new Class[]{PrivilegedAction.class}, - new Object[]{action}); - } catch (RuntimeException re) { - throw re; - } catch (Exception e) { - throw new UndeclaredThrowableException(e, - "Unexpected exception in runAs()"); - } - } - - @Override - public T runAs(PrivilegedExceptionAction action) - throws IOException, InterruptedException { - try { - return (T) call(ugi, "doAs", - new Class[]{PrivilegedExceptionAction.class}, - new Object[]{action}); - } catch (IOException ioe) { - throw ioe; - } catch (InterruptedException ie) { - throw ie; - } catch (RuntimeException re) { - throw re; - } catch (Exception e) { - throw new UndeclaredThrowableException(e, - "Unexpected exception in runAs(PrivilegedExceptionAction)"); - } - } - - @Override - public void obtainAuthTokenForJob(Configuration conf, Job job) - throws IOException, InterruptedException { - try { - Class c = Class.forName( - "org.apache.hadoop.hbase.security.token.TokenUtil"); - Methods.call(c, null, "obtainTokenForJob", - new Class[]{Configuration.class, UserGroupInformation.class, - Job.class}, - new Object[]{conf, ugi, job}); - } catch (ClassNotFoundException cnfe) { - throw new RuntimeException("Failure loading TokenUtil class, " - +"is secure RPC available?", cnfe); - } catch (IOException ioe) { - throw ioe; - } catch (InterruptedException ie) { - throw ie; - } catch (RuntimeException re) { - throw re; - } catch (Exception e) { - throw new UndeclaredThrowableException(e, - "Unexpected error calling TokenUtil.obtainAndCacheToken()"); - } - } - - @Override - public void obtainAuthTokenForJob(JobConf job) - throws IOException, InterruptedException { - try { - Class c = Class.forName( - "org.apache.hadoop.hbase.security.token.TokenUtil"); - Methods.call(c, null, "obtainTokenForJob", - new Class[]{JobConf.class, UserGroupInformation.class}, - new Object[]{job, ugi}); - } catch (ClassNotFoundException cnfe) { - throw new RuntimeException("Failure loading TokenUtil class, " - +"is secure RPC available?", cnfe); - } catch (IOException ioe) { - throw ioe; - } catch (InterruptedException ie) { - throw ie; - } catch (RuntimeException re) { - throw re; - } catch (Exception e) { - throw new UndeclaredThrowableException(e, - "Unexpected error calling TokenUtil.obtainAndCacheToken()"); - } - } - - /** @see User#createUserForTesting(org.apache.hadoop.conf.Configuration, String, String[]) */ - public static User createUserForTesting(Configuration conf, - String name, String[] groups) { - try { - return new SecureHadoopUser( - (UserGroupInformation)callStatic("createUserForTesting", - new Class[]{String.class, String[].class}, - new Object[]{name, groups}) - ); - } catch (RuntimeException re) { - throw re; - } catch (Exception e) { - throw new UndeclaredThrowableException(e, - "Error creating secure test user"); - } - } - - /** - * Obtain credentials for the current process using the configured - * Kerberos keytab file and principal. - * @see User#login(org.apache.hadoop.conf.Configuration, String, String, String) - * - * @param conf the Configuration to use - * @param fileConfKey Configuration property key used to store the path - * to the keytab file - * @param principalConfKey Configuration property key used to store the - * principal name to login as - * @param localhost the local hostname - */ - public static void login(Configuration conf, String fileConfKey, - String principalConfKey, String localhost) throws IOException { - if (isSecurityEnabled()) { - // check for SecurityUtil class - try { - Class c = Class.forName("org.apache.hadoop.security.SecurityUtil"); - Class[] types = new Class[]{ - Configuration.class, String.class, String.class, String.class }; - Object[] args = new Object[]{ - conf, fileConfKey, principalConfKey, localhost }; - Methods.call(c, null, "login", types, args); - } catch (ClassNotFoundException cnfe) { - throw new RuntimeException("Unable to login using " + - "org.apache.hadoop.security.SecurityUtil.login(). SecurityUtil class " + - "was not found! Is this a version of secure Hadoop?", cnfe); - } catch (IOException ioe) { - throw ioe; - } catch (RuntimeException re) { - throw re; - } catch (Exception e) { - throw new UndeclaredThrowableException(e, - "Unhandled exception in User.login()"); - } - } - } - - /** - * Returns the result of {@code UserGroupInformation.isSecurityEnabled()}. - */ - public static boolean isSecurityEnabled() { - try { - return (Boolean)callStatic("isSecurityEnabled"); - } catch (RuntimeException re) { - throw re; - } catch (Exception e) { - throw new UndeclaredThrowableException(e, - "Unexpected exception calling UserGroupInformation.isSecurityEnabled()"); - } - } - } - - /* Reflection helper methods */ - private static Object callStatic(String methodName) throws Exception { - return call(null, methodName, null, null); - } - - private static Object callStatic(String methodName, Class[] types, - Object[] args) throws Exception { - return call(null, methodName, types, args); - } - - private static Object call(UserGroupInformation instance, String methodName, - Class[] types, Object[] args) throws Exception { - return Methods.call(UserGroupInformation.class, instance, methodName, types, - args); - } -} diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/Permission.java hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/Permission.java deleted file mode 100644 index a7a3bc9..0000000 --- hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/Permission.java +++ /dev/null @@ -1,199 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hbase.security.access; - -import com.google.common.collect.Maps; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.hbase.util.Bytes; -import org.apache.hadoop.io.VersionedWritable; - -import java.io.DataInput; -import java.io.DataOutput; -import java.io.IOException; -import java.util.Arrays; -import java.util.Map; - -/** - * Base permissions instance representing the ability to perform a given set - * of actions. - * - * @see TablePermission - */ -public class Permission extends VersionedWritable { - protected static final byte VERSION = 0; - public enum Action { - READ('R'), WRITE('W'), EXEC('X'), CREATE('C'), ADMIN('A'); - - private byte code; - Action(char code) { - this.code = (byte)code; - } - - public byte code() { return code; } - } - - private static Log LOG = LogFactory.getLog(Permission.class); - protected static Map ACTION_BY_CODE = Maps.newHashMap(); - - protected Action[] actions; - - static { - for (Action a : Action.values()) { - ACTION_BY_CODE.put(a.code(), a); - } - } - - /** Empty constructor for Writable implementation. Do not use. */ - public Permission() { - super(); - } - - public Permission(Action... assigned) { - if (assigned != null && assigned.length > 0) { - actions = Arrays.copyOf(assigned, assigned.length); - } - } - - public Permission(byte[] actionCodes) { - if (actionCodes != null) { - Action acts[] = new Action[actionCodes.length]; - int j = 0; - for (int i=0; i 0) { - actions = new Action[length]; - for (int i = 0; i < length; i++) { - byte b = in.readByte(); - Action a = ACTION_BY_CODE.get(b); - if (a == null) { - throw new IOException("Unknown action code '"+ - Bytes.toStringBinary(new byte[]{b})+"' in input"); - } - this.actions[i] = a; - } - } else { - actions = new Action[0]; - } - } - - @Override - public void write(DataOutput out) throws IOException { - super.write(out); - out.writeByte(actions != null ? actions.length : 0); - if (actions != null) { - for (Action a: actions) { - out.writeByte(a.code()); - } - } - } -} diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/TablePermission.java hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/TablePermission.java deleted file mode 100644 index f00c54a..0000000 --- hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/TablePermission.java +++ /dev/null @@ -1,307 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hbase.security.access; - -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.hbase.KeyValue; -import org.apache.hadoop.hbase.util.Bytes; - -import java.io.DataInput; -import java.io.DataOutput; -import java.io.IOException; - -/** - * Represents an authorization for access for the given actions, optionally - * restricted to the given column family or column qualifier, over the - * given table. If the family property is null, it implies - * full table access. - */ -public class TablePermission extends Permission { - private static Log LOG = LogFactory.getLog(TablePermission.class); - - private byte[] table; - private byte[] family; - private byte[] qualifier; - - /** Nullary constructor for Writable, do not use */ - public TablePermission() { - super(); - } - - /** - * Create a new permission for the given table and (optionally) column family, - * allowing the given actions. - * @param table the table - * @param family the family, can be null if a global permission on the table - * @param assigned the list of allowed actions - */ - public TablePermission(byte[] table, byte[] family, Action... assigned) { - this(table, family, null, assigned); - } - - /** - * Creates a new permission for the given table, restricted to the given - * column family and qualifer, allowing the assigned actions to be performed. - * @param table the table - * @param family the family, can be null if a global permission on the table - * @param assigned the list of allowed actions - */ - public TablePermission(byte[] table, byte[] family, byte[] qualifier, - Action... assigned) { - super(assigned); - this.table = table; - this.family = family; - this.qualifier = qualifier; - } - - /** - * Creates a new permission for the given table, family and column qualifier, - * allowing the actions matching the provided byte codes to be performed. - * @param table the table - * @param family the family, can be null if a global permission on the table - * @param actionCodes the list of allowed action codes - */ - public TablePermission(byte[] table, byte[] family, byte[] qualifier, - byte[] actionCodes) { - super(actionCodes); - this.table = table; - this.family = family; - this.qualifier = qualifier; - } - - public boolean hasTable() { - return table != null; - } - - public byte[] getTable() { - return table; - } - - public boolean hasFamily() { - return family != null; - } - - public byte[] getFamily() { - return family; - } - - public boolean hasQualifier() { - return qualifier != null; - } - - public byte[] getQualifier() { - return qualifier; - } - - /** - * Checks that a given table operation is authorized by this permission - * instance. - * - * @param table the table where the operation is being performed - * @param family the column family to which the operation is restricted, - * if null implies "all" - * @param qualifier the column qualifier to which the action is restricted, - * if null implies "all" - * @param action the action being requested - * @return true if the action within the given scope is allowed - * by this permission, false - */ - public boolean implies(byte[] table, byte[] family, byte[] qualifier, - Action action) { - if (!Bytes.equals(this.table, table)) { - return false; - } - - if (this.family != null && - (family == null || - !Bytes.equals(this.family, family))) { - return false; - } - - if (this.qualifier != null && - (qualifier == null || - !Bytes.equals(this.qualifier, qualifier))) { - return false; - } - - // check actions - return super.implies(action); - } - - /** - * Checks if this permission grants access to perform the given action on - * the given table and key value. - * @param table the table on which the operation is being performed - * @param kv the KeyValue on which the operation is being requested - * @param action the action requested - * @return true if the action is allowed over the given scope - * by this permission, otherwise false - */ - public boolean implies(byte[] table, KeyValue kv, Action action) { - if (!Bytes.equals(this.table, table)) { - return false; - } - - if (family != null && - (Bytes.compareTo(family, 0, family.length, - kv.getBuffer(), kv.getFamilyOffset(), kv.getFamilyLength()) != 0)) { - return false; - } - - if (qualifier != null && - (Bytes.compareTo(qualifier, 0, qualifier.length, - kv.getBuffer(), kv.getQualifierOffset(), kv.getQualifierLength()) != 0)) { - return false; - } - - // check actions - return super.implies(action); - } - - /** - * Returns true if this permission matches the given column - * family at least. This only indicates a partial match against the table - * and column family, however, and does not guarantee that implies() for the - * column same family would return true. In the case of a - * column-qualifier specific permission, for example, implies() would still - * return false. - */ - public boolean matchesFamily(byte[] table, byte[] family, Action action) { - if (!Bytes.equals(this.table, table)) { - return false; - } - - if (this.family != null && - (family == null || - !Bytes.equals(this.family, family))) { - return false; - } - - // ignore qualifier - // check actions - return super.implies(action); - } - - /** - * Returns if the given permission matches the given qualifier. - * @param table the table name to match - * @param family the column family to match - * @param qualifier the qualifier name to match - * @param action the action requested - * @return true if the table, family and qualifier match, - * otherwise false - */ - public boolean matchesFamilyQualifier(byte[] table, byte[] family, byte[] qualifier, - Action action) { - if (!matchesFamily(table, family, action)) { - return false; - } else { - if (this.qualifier != null && - (qualifier == null || - !Bytes.equals(this.qualifier, qualifier))) { - return false; - } - } - return super.implies(action); - } - - @Override - public boolean equals(Object obj) { - if (!(obj instanceof TablePermission)) { - return false; - } - TablePermission other = (TablePermission)obj; - - if (!(Bytes.equals(table, other.getTable()) && - ((family == null && other.getFamily() == null) || - Bytes.equals(family, other.getFamily())) && - ((qualifier == null && other.getQualifier() == null) || - Bytes.equals(qualifier, other.getQualifier())) - )) { - return false; - } - - // check actions - return super.equals(other); - } - - @Override - public int hashCode() { - final int prime = 37; - int result = super.hashCode(); - if (table != null) { - result = prime * result + Bytes.hashCode(table); - } - if (family != null) { - result = prime * result + Bytes.hashCode(family); - } - if (qualifier != null) { - result = prime * result + Bytes.hashCode(qualifier); - } - return result; - } - - public String toString() { - StringBuilder str = new StringBuilder("[TablePermission: ") - .append("table=").append(Bytes.toString(table)) - .append(", family=").append(Bytes.toString(family)) - .append(", qualifier=").append(Bytes.toString(qualifier)) - .append(", actions="); - if (actions != null) { - for (int i=0; i 0) - str.append(","); - if (actions[i] != null) - str.append(actions[i].toString()); - else - str.append("NULL"); - } - } - str.append("]"); - - return str.toString(); - } - - @Override - public void readFields(DataInput in) throws IOException { - super.readFields(in); - table = Bytes.readByteArray(in); - if (in.readBoolean()) { - family = Bytes.readByteArray(in); - } - if (in.readBoolean()) { - qualifier = Bytes.readByteArray(in); - } - } - - @Override - public void write(DataOutput out) throws IOException { - super.write(out); - Bytes.writeByteArray(out, table); - out.writeBoolean(family != null); - if (family != null) { - Bytes.writeByteArray(out, family); - } - out.writeBoolean(qualifier != null); - if (qualifier != null) { - Bytes.writeByteArray(out, qualifier); - } - } -} diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/UserPermission.java hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/UserPermission.java deleted file mode 100644 index fd5b755..0000000 --- hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/UserPermission.java +++ /dev/null @@ -1,167 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hbase.security.access; - -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.hbase.util.Bytes; - -import java.io.DataInput; -import java.io.DataOutput; -import java.io.IOException; - -/** - * Represents an authorization for access over the given table, column family - * plus qualifier, for the given user. - */ -public class UserPermission extends TablePermission { - private static Log LOG = LogFactory.getLog(UserPermission.class); - - private byte[] user; - - /** Nullary constructor for Writable, do not use */ - public UserPermission() { - super(); - } - - /** - * Creates a new instance for the given user. - * @param user the user - * @param assigned the list of allowed actions - */ - public UserPermission(byte[] user, Action... assigned) { - super(null, null, null, assigned); - this.user = user; - } - - /** - * Creates a new instance for the given user, - * matching the actions with the given codes. - * @param user the user - * @param actionCodes the list of allowed action codes - */ - public UserPermission(byte[] user, byte[] actionCodes) { - super(null, null, null, actionCodes); - this.user = user; - } - - /** - * Creates a new instance for the given user, table and column family. - * @param user the user - * @param table the table - * @param family the family, can be null if action is allowed over the entire - * table - * @param assigned the list of allowed actions - */ - public UserPermission(byte[] user, byte[] table, byte[] family, - Action... assigned) { - super(table, family, assigned); - this.user = user; - } - - /** - * Creates a new permission for the given user, table, column family and - * column qualifier. - * @param user the user - * @param table the table - * @param family the family, can be null if action is allowed over the entire - * table - * @param qualifier the column qualifier, can be null if action is allowed - * over the entire column family - * @param assigned the list of allowed actions - */ - public UserPermission(byte[] user, byte[] table, byte[] family, - byte[] qualifier, Action... assigned) { - super(table, family, qualifier, assigned); - this.user = user; - } - - /** - * Creates a new instance for the given user, table, column family and - * qualifier, matching the actions with the given codes. - * @param user the user - * @param table the table - * @param family the family, can be null if action is allowed over the entire - * table - * @param qualifier the column qualifier, can be null if action is allowed - * over the entire column family - * @param actionCodes the list of allowed action codes - */ - public UserPermission(byte[] user, byte[] table, byte[] family, - byte[] qualifier, byte[] actionCodes) { - super(table, family, qualifier, actionCodes); - this.user = user; - } - - public byte[] getUser() { - return user; - } - - /** - * Returns true if this permission describes a global user permission. - */ - public boolean isGlobal() { - byte[] tableName = getTable(); - return(tableName == null || tableName.length == 0); - } - - @Override - public boolean equals(Object obj) { - if (!(obj instanceof UserPermission)) { - return false; - } - UserPermission other = (UserPermission)obj; - - if ((Bytes.equals(user, other.getUser()) && - super.equals(obj))) { - return true; - } else { - return false; - } - } - - @Override - public int hashCode() { - final int prime = 37; - int result = super.hashCode(); - if (user != null) { - result = prime * result + Bytes.hashCode(user); - } - return result; - } - - public String toString() { - StringBuilder str = new StringBuilder("UserPermission: ") - .append("user=").append(Bytes.toString(user)) - .append(", ").append(super.toString()); - return str.toString(); - } - - @Override - public void readFields(DataInput in) throws IOException { - super.readFields(in); - user = Bytes.readByteArray(in); - } - - @Override - public void write(DataOutput out) throws IOException { - super.write(out); - Bytes.writeByteArray(out, user); - } -} diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/security/token/AuthenticationTokenIdentifier.java hbase-server/src/main/java/org/apache/hadoop/hbase/security/token/AuthenticationTokenIdentifier.java deleted file mode 100644 index 83f824a..0000000 --- hbase-server/src/main/java/org/apache/hadoop/hbase/security/token/AuthenticationTokenIdentifier.java +++ /dev/null @@ -1,185 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hbase.security.token; - -import java.io.DataInput; -import java.io.DataOutput; -import java.io.IOException; - -import com.google.protobuf.ByteString; -import org.apache.hadoop.hbase.protobuf.generated.AuthenticationProtos; -import org.apache.hadoop.io.Text; -import org.apache.hadoop.io.WritableUtils; -import org.apache.hadoop.security.UserGroupInformation; -import org.apache.hadoop.security.token.TokenIdentifier; - -/** - * Represents the identity information stored in an HBase authentication token. - */ -public class AuthenticationTokenIdentifier extends TokenIdentifier { - public static final Text AUTH_TOKEN_TYPE = new Text("HBASE_AUTH_TOKEN"); - - protected String username; - protected int keyId; - protected long issueDate; - protected long expirationDate; - protected long sequenceNumber; - - public AuthenticationTokenIdentifier() { - } - - public AuthenticationTokenIdentifier(String username) { - this.username = username; - } - - public AuthenticationTokenIdentifier(String username, int keyId, - long issueDate, long expirationDate) { - this.username = username; - this.keyId = keyId; - this.issueDate = issueDate; - this.expirationDate = expirationDate; - } - - @Override - public Text getKind() { - return AUTH_TOKEN_TYPE; - } - - @Override - public UserGroupInformation getUser() { - if (username == null || "".equals(username)) { - return null; - } - return UserGroupInformation.createRemoteUser(username); - } - - public String getUsername() { - return username; - } - - void setUsername(String name) { - this.username = name; - } - - public int getKeyId() { - return keyId; - } - - void setKeyId(int id) { - this.keyId = id; - } - - public long getIssueDate() { - return issueDate; - } - - void setIssueDate(long timestamp) { - this.issueDate = timestamp; - } - - public long getExpirationDate() { - return expirationDate; - } - - void setExpirationDate(long timestamp) { - this.expirationDate = timestamp; - } - - public long getSequenceNumber() { - return sequenceNumber; - } - - void setSequenceNumber(long seq) { - this.sequenceNumber = seq; - } - - public byte[] toBytes() { - AuthenticationProtos.TokenIdentifier.Builder builder = - AuthenticationProtos.TokenIdentifier.newBuilder(); - builder.setKind(AuthenticationProtos.TokenIdentifier.Kind.HBASE_AUTH_TOKEN); - if (username != null) { - builder.setUsername(ByteString.copyFromUtf8(username)); - } - builder.setIssueDate(issueDate) - .setExpirationDate(expirationDate) - .setKeyId(keyId) - .setSequenceNumber(sequenceNumber); - return builder.build().toByteArray(); - } - - @Override - public void write(DataOutput out) throws IOException { - byte[] pbBytes = toBytes(); - out.writeInt(pbBytes.length); - out.write(pbBytes); - } - - @Override - public void readFields(DataInput in) throws IOException { - int len = in.readInt(); - byte[] inBytes = new byte[len]; - in.readFully(inBytes); - AuthenticationProtos.TokenIdentifier identifier = - AuthenticationProtos.TokenIdentifier.newBuilder().mergeFrom(inBytes).build(); - // sanity check on type - if (!identifier.hasKind() || - identifier.getKind() != AuthenticationProtos.TokenIdentifier.Kind.HBASE_AUTH_TOKEN) { - throw new IOException("Invalid TokenIdentifier kind from input "+identifier.getKind()); - } - - // copy the field values - if (identifier.hasUsername()) { - username = identifier.getUsername().toStringUtf8(); - } - if (identifier.hasKeyId()) { - keyId = identifier.getKeyId(); - } - if (identifier.hasIssueDate()) { - issueDate = identifier.getIssueDate(); - } - if (identifier.hasExpirationDate()) { - expirationDate = identifier.getExpirationDate(); - } - if (identifier.hasSequenceNumber()) { - sequenceNumber = identifier.getSequenceNumber(); - } - } - - @Override - public boolean equals(Object other) { - if (other == null) { - return false; - } - if (other instanceof AuthenticationTokenIdentifier) { - AuthenticationTokenIdentifier ident = (AuthenticationTokenIdentifier)other; - return sequenceNumber == ident.getSequenceNumber() - && keyId == ident.getKeyId() - && issueDate == ident.getIssueDate() - && expirationDate == ident.getExpirationDate() - && (username == null ? ident.getUsername() == null : - username.equals(ident.getUsername())); - } - return false; - } - - @Override - public int hashCode() { - return (int)sequenceNumber; - } -} diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/security/token/AuthenticationTokenSelector.java hbase-server/src/main/java/org/apache/hadoop/hbase/security/token/AuthenticationTokenSelector.java deleted file mode 100644 index 6b71f3a..0000000 --- hbase-server/src/main/java/org/apache/hadoop/hbase/security/token/AuthenticationTokenSelector.java +++ /dev/null @@ -1,54 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hbase.security.token; - -import java.util.Collection; - -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.io.Text; -import org.apache.hadoop.security.token.Token; -import org.apache.hadoop.security.token.TokenIdentifier; -import org.apache.hadoop.security.token.TokenSelector; - -public class AuthenticationTokenSelector - implements TokenSelector { - private static Log LOG = LogFactory.getLog(AuthenticationTokenSelector.class); - - public AuthenticationTokenSelector() { - } - - @Override - public Token selectToken(Text serviceName, - Collection> tokens) { - if (serviceName != null) { - for (Token ident : tokens) { - if (serviceName.equals(ident.getService()) && - AuthenticationTokenIdentifier.AUTH_TOKEN_TYPE.equals(ident.getKind())) { - if (LOG.isDebugEnabled()) { - LOG.debug("Returning token "+ident); - } - return (Token)ident; - } - } - } - LOG.debug("No matching token found"); - return null; - } -} diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/util/Addressing.java hbase-server/src/main/java/org/apache/hadoop/hbase/util/Addressing.java deleted file mode 100644 index caf78f3..0000000 --- hbase-server/src/main/java/org/apache/hadoop/hbase/util/Addressing.java +++ /dev/null @@ -1,80 +0,0 @@ -/** - * - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hbase.util; - -import java.net.InetSocketAddress; - -import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.classification.InterfaceStability; - -/** - * Utility for network addresses, resolving and naming. - */ -@InterfaceAudience.Public -@InterfaceStability.Evolving -public class Addressing { - public static final String VALID_PORT_REGEX = "[\\d]+"; - public static final String HOSTNAME_PORT_SEPARATOR = ":"; - - /** - * @param hostAndPort Formatted as <hostname> ':' <port> - * @return An InetSocketInstance - */ - public static InetSocketAddress createInetSocketAddressFromHostAndPortStr( - final String hostAndPort) { - return new InetSocketAddress(parseHostname(hostAndPort), parsePort(hostAndPort)); - } - - /** - * @param hostname Server hostname - * @param port Server port - * @return Returns a concatenation of hostname and - * port in following - * form: <hostname> ':' <port>. For example, if hostname - * is example.org and port is 1234, this method will return - * example.org:1234 - */ - public static String createHostAndPortStr(final String hostname, final int port) { - return hostname + HOSTNAME_PORT_SEPARATOR + port; - } - - /** - * @param hostAndPort Formatted as <hostname> ':' <port> - * @return The hostname portion of hostAndPort - */ - public static String parseHostname(final String hostAndPort) { - int colonIndex = hostAndPort.lastIndexOf(HOSTNAME_PORT_SEPARATOR); - if (colonIndex < 0) { - throw new IllegalArgumentException("Not a host:port pair: " + hostAndPort); - } - return hostAndPort.substring(0, colonIndex); - } - - /** - * @param hostAndPort Formatted as <hostname> ':' <port> - * @return The port portion of hostAndPort - */ - public static int parsePort(final String hostAndPort) { - int colonIndex = hostAndPort.lastIndexOf(HOSTNAME_PORT_SEPARATOR); - if (colonIndex < 0) { - throw new IllegalArgumentException("Not a host:port pair: " + hostAndPort); - } - return Integer.parseInt(hostAndPort.substring(colonIndex + 1)); - } -} diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/util/Classes.java hbase-server/src/main/java/org/apache/hadoop/hbase/util/Classes.java deleted file mode 100644 index 76551d9..0000000 --- hbase-server/src/main/java/org/apache/hadoop/hbase/util/Classes.java +++ /dev/null @@ -1,83 +0,0 @@ -/* - * Copyright The Apache Software Foundation - * - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hbase.util; - -import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.classification.InterfaceStability; - -/** - * Utilities for class manipulation. - */ -@InterfaceAudience.Public -@InterfaceStability.Stable -public class Classes { - - /** - * Equivalent of {@link Class#forName(String)} which also returns classes for - * primitives like boolean, etc. - * - * @param className - * The name of the class to retrieve. Can be either a normal class or - * a primitive class. - * @return The class specified by className - * @throws ClassNotFoundException - * If the requested class can not be found. - */ - public static Class extendedForName(String className) - throws ClassNotFoundException { - Class valueType; - if (className.equals("boolean")) { - valueType = boolean.class; - } else if (className.equals("byte")) { - valueType = byte.class; - } else if (className.equals("short")) { - valueType = short.class; - } else if (className.equals("int")) { - valueType = int.class; - } else if (className.equals("long")) { - valueType = long.class; - } else if (className.equals("float")) { - valueType = float.class; - } else if (className.equals("double")) { - valueType = double.class; - } else if (className.equals("char")) { - valueType = char.class; - } else { - valueType = Class.forName(className); - } - return valueType; - } - - public static String stringify(Class[] classes) { - StringBuilder buf = new StringBuilder(); - if (classes != null) { - for (Class c : classes) { - if (buf.length() > 0) { - buf.append(","); - } - buf.append(c.getName()); - } - } else { - buf.append("NULL"); - } - return buf.toString(); - } -} diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/util/HasThread.java hbase-server/src/main/java/org/apache/hadoop/hbase/util/HasThread.java deleted file mode 100644 index dbf9164..0000000 --- hbase-server/src/main/java/org/apache/hadoop/hbase/util/HasThread.java +++ /dev/null @@ -1,100 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hbase.util; - -import java.lang.Thread.UncaughtExceptionHandler; - -import org.apache.hadoop.classification.InterfaceAudience; - -/** - * Abstract class which contains a Thread and delegates the common Thread - * methods to that instance. - * - * The purpose of this class is to workaround Sun JVM bug #6915621, in which - * something internal to the JDK uses Thread.currentThread() as a monitor - * lock. This can produce deadlocks like HBASE-4367, HBASE-4101, etc. - */ -@InterfaceAudience.Private -public abstract class HasThread implements Runnable { - private final Thread thread; - - public HasThread() { - this.thread = new Thread(this); - } - - public HasThread(String name) { - this.thread = new Thread(this, name); - } - - public Thread getThread() { - return thread; - } - - public abstract void run(); - - //// Begin delegation to Thread - - public final String getName() { - return thread.getName(); - } - - public void interrupt() { - thread.interrupt(); - } - - public final boolean isAlive() { - return thread.isAlive(); - } - - public boolean isInterrupted() { - return thread.isInterrupted(); - } - - public final void setDaemon(boolean on) { - thread.setDaemon(on); - } - - public final void setName(String name) { - thread.setName(name); - } - - public final void setPriority(int newPriority) { - thread.setPriority(newPriority); - } - - public void setUncaughtExceptionHandler(UncaughtExceptionHandler eh) { - thread.setUncaughtExceptionHandler(eh); - } - - public void start() { - thread.start(); - } - - public final void join() throws InterruptedException { - thread.join(); - } - - public final void join(long millis, int nanos) throws InterruptedException { - thread.join(millis, nanos); - } - - public final void join(long millis) throws InterruptedException { - thread.join(millis); - } - //// End delegation to Thread -} diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/util/Hash.java hbase-server/src/main/java/org/apache/hadoop/hbase/util/Hash.java deleted file mode 100644 index b5addf2..0000000 --- hbase-server/src/main/java/org/apache/hadoop/hbase/util/Hash.java +++ /dev/null @@ -1,137 +0,0 @@ -/** - * - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hbase.util; - -import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.classification.InterfaceStability; -import org.apache.hadoop.conf.Configuration; - -/** - * This class represents a common API for hashing functions. - */ -@InterfaceAudience.Public -@InterfaceStability.Stable -public abstract class Hash { - /** Constant to denote invalid hash type. */ - public static final int INVALID_HASH = -1; - /** Constant to denote {@link JenkinsHash}. */ - public static final int JENKINS_HASH = 0; - /** Constant to denote {@link MurmurHash}. */ - public static final int MURMUR_HASH = 1; - - /** - * This utility method converts String representation of hash function name - * to a symbolic constant. Currently two function types are supported, - * "jenkins" and "murmur". - * @param name hash function name - * @return one of the predefined constants - */ - public static int parseHashType(String name) { - if ("jenkins".equalsIgnoreCase(name)) { - return JENKINS_HASH; - } else if ("murmur".equalsIgnoreCase(name)) { - return MURMUR_HASH; - } else { - return INVALID_HASH; - } - } - - /** - * This utility method converts the name of the configured - * hash type to a symbolic constant. - * @param conf configuration - * @return one of the predefined constants - */ - public static int getHashType(Configuration conf) { - String name = conf.get("hbase.hash.type", "murmur"); - return parseHashType(name); - } - - /** - * Get a singleton instance of hash function of a given type. - * @param type predefined hash type - * @return hash function instance, or null if type is invalid - */ - public static Hash getInstance(int type) { - switch(type) { - case JENKINS_HASH: - return JenkinsHash.getInstance(); - case MURMUR_HASH: - return MurmurHash.getInstance(); - default: - return null; - } - } - - /** - * Get a singleton instance of hash function of a type - * defined in the configuration. - * @param conf current configuration - * @return defined hash type, or null if type is invalid - */ - public static Hash getInstance(Configuration conf) { - int type = getHashType(conf); - return getInstance(type); - } - - /** - * Calculate a hash using all bytes from the input argument, and - * a seed of -1. - * @param bytes input bytes - * @return hash value - */ - public int hash(byte[] bytes) { - return hash(bytes, bytes.length, -1); - } - - /** - * Calculate a hash using all bytes from the input argument, - * and a provided seed value. - * @param bytes input bytes - * @param initval seed value - * @return hash value - */ - public int hash(byte[] bytes, int initval) { - return hash(bytes, 0, bytes.length, initval); - } - - /** - * Calculate a hash using bytes from 0 to length, and - * the provided seed value - * @param bytes input bytes - * @param length length of the valid bytes after offset to consider - * @param initval seed value - * @return hash value - */ - public int hash(byte[] bytes, int length, int initval) { - return hash(bytes, 0, length, initval); - } - - /** - * Calculate a hash using bytes from offset to offset + - * length, and the provided seed value. - * @param bytes input bytes - * @param offset the offset into the array to start consideration - * @param length length of the valid bytes after offset to consider - * @param initval seed value - * @return hash value - */ - public abstract int hash(byte[] bytes, int offset, int length, int initval); -} diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/util/HashedBytes.java hbase-server/src/main/java/org/apache/hadoop/hbase/util/HashedBytes.java deleted file mode 100644 index 2e768ed..0000000 --- hbase-server/src/main/java/org/apache/hadoop/hbase/util/HashedBytes.java +++ /dev/null @@ -1,65 +0,0 @@ -/* - * - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hbase.util; - -import java.util.Arrays; - -import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.classification.InterfaceStability; - -/** - * This class encapsulates a byte array and overrides hashCode and equals so - * that it's identity is based on the data rather than the array instance. - */ -@InterfaceAudience.Public -@InterfaceStability.Stable -public class HashedBytes { - - private final byte[] bytes; - private final int hashCode; - - public HashedBytes(byte[] bytes) { - this.bytes = bytes; - hashCode = Bytes.hashCode(bytes); - } - - public byte[] getBytes() { - return bytes; - } - - @Override - public int hashCode() { - return hashCode; - } - - @Override - public boolean equals(Object obj) { - if (this == obj) - return true; - if (obj == null || getClass() != obj.getClass()) - return false; - HashedBytes other = (HashedBytes) obj; - return Arrays.equals(bytes, other.bytes); - } - - @Override - public String toString() { - return Bytes.toStringBinary(bytes); - } -} diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/util/JenkinsHash.java hbase-server/src/main/java/org/apache/hadoop/hbase/util/JenkinsHash.java deleted file mode 100644 index 26fda22..0000000 --- hbase-server/src/main/java/org/apache/hadoop/hbase/util/JenkinsHash.java +++ /dev/null @@ -1,261 +0,0 @@ -/** - * - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hbase.util; - -import static java.lang.Integer.rotateLeft; - -import java.io.FileInputStream; -import java.io.IOException; - -import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.classification.InterfaceStability; - -/** - * Produces 32-bit hash for hash table lookup. - * - *
          lookup3.c, by Bob Jenkins, May 2006, Public Domain.
          - *
          - * You can use this free for any purpose.  It's in the public domain.
          - * It has no warranty.
          - * 
          - * - * @see lookup3.c - * @see Hash Functions (and how this - * function compares to others such as CRC, MD?, etc - * @see Has update on the - * Dr. Dobbs Article - */ -@InterfaceAudience.Public -@InterfaceStability.Stable -public class JenkinsHash extends Hash { - private static final int BYTE_MASK = 0xff; - - private static JenkinsHash _instance = new JenkinsHash(); - - public static Hash getInstance() { - return _instance; - } - - /** - * taken from hashlittle() -- hash a variable-length key into a 32-bit value - * - * @param key the key (the unaligned variable-length array of bytes) - * @param nbytes number of bytes to include in hash - * @param initval can be any integer value - * @return a 32-bit value. Every bit of the key affects every bit of the - * return value. Two keys differing by one or two bits will have totally - * different hash values. - * - *

          The best hash table sizes are powers of 2. There is no need to do mod - * a prime (mod is sooo slow!). If you need less than 32 bits, use a bitmask. - * For example, if you need only 10 bits, do - * h = (h & hashmask(10)); - * In which case, the hash table should have hashsize(10) elements. - * - *

          If you are hashing n strings byte[][] k, do it like this: - * for (int i = 0, h = 0; i < n; ++i) h = hash( k[i], h); - * - *

          By Bob Jenkins, 2006. bob_jenkins@burtleburtle.net. You may use this - * code any way you wish, private, educational, or commercial. It's free. - * - *

          Use for hash table lookup, or anything where one collision in 2^^32 is - * acceptable. Do NOT use for cryptographic purposes. - */ - @Override - @SuppressWarnings("fallthrough") - public int hash(byte[] key, int off, int nbytes, int initval) { - int length = nbytes; - int a, b, c; - a = b = c = 0xdeadbeef + length + initval; - int offset = off; - for (; length > 12; offset += 12, length -= 12) { - a += (key[offset] & BYTE_MASK); - a += ((key[offset + 1] & BYTE_MASK) << 8); - a += ((key[offset + 2] & BYTE_MASK) << 16); - a += ((key[offset + 3] & BYTE_MASK) << 24); - b += (key[offset + 4] & BYTE_MASK); - b += ((key[offset + 5] & BYTE_MASK) << 8); - b += ((key[offset + 6] & BYTE_MASK) << 16); - b += ((key[offset + 7] & BYTE_MASK) << 24); - c += (key[offset + 8] & BYTE_MASK); - c += ((key[offset + 9] & BYTE_MASK) << 8); - c += ((key[offset + 10] & BYTE_MASK) << 16); - c += ((key[offset + 11] & BYTE_MASK) << 24); - - /* - * mix -- mix 3 32-bit values reversibly. - * This is reversible, so any information in (a,b,c) before mix() is - * still in (a,b,c) after mix(). - * - * If four pairs of (a,b,c) inputs are run through mix(), or through - * mix() in reverse, there are at least 32 bits of the output that - * are sometimes the same for one pair and different for another pair. - * - * This was tested for: - * - pairs that differed by one bit, by two bits, in any combination - * of top bits of (a,b,c), or in any combination of bottom bits of - * (a,b,c). - * - "differ" is defined as +, -, ^, or ~^. For + and -, I transformed - * the output delta to a Gray code (a^(a>>1)) so a string of 1's (as - * is commonly produced by subtraction) look like a single 1-bit - * difference. - * - the base values were pseudorandom, all zero but one bit set, or - * all zero plus a counter that starts at zero. - * - * Some k values for my "a-=c; a^=rot(c,k); c+=b;" arrangement that - * satisfy this are - * 4 6 8 16 19 4 - * 9 15 3 18 27 15 - * 14 9 3 7 17 3 - * Well, "9 15 3 18 27 15" didn't quite get 32 bits diffing for - * "differ" defined as + with a one-bit base and a two-bit delta. I - * used http://burtleburtle.net/bob/hash/avalanche.html to choose - * the operations, constants, and arrangements of the variables. - * - * This does not achieve avalanche. There are input bits of (a,b,c) - * that fail to affect some output bits of (a,b,c), especially of a. - * The most thoroughly mixed value is c, but it doesn't really even - * achieve avalanche in c. - * - * This allows some parallelism. Read-after-writes are good at doubling - * the number of bits affected, so the goal of mixing pulls in the - * opposite direction as the goal of parallelism. I did what I could. - * Rotates seem to cost as much as shifts on every machine I could lay - * my hands on, and rotates are much kinder to the top and bottom bits, - * so I used rotates. - * - * #define mix(a,b,c) \ - * { \ - * a -= c; a ^= rot(c, 4); c += b; \ - * b -= a; b ^= rot(a, 6); a += c; \ - * c -= b; c ^= rot(b, 8); b += a; \ - * a -= c; a ^= rot(c,16); c += b; \ - * b -= a; b ^= rot(a,19); a += c; \ - * c -= b; c ^= rot(b, 4); b += a; \ - * } - * - * mix(a,b,c); - */ - a -= c; a ^= rotateLeft(c, 4); c += b; - b -= a; b ^= rotateLeft(a, 6); a += c; - c -= b; c ^= rotateLeft(b, 8); b += a; - a -= c; a ^= rotateLeft(c, 16); c += b; - b -= a; b ^= rotateLeft(a, 19); a += c; - c -= b; c ^= rotateLeft(b, 4); b += a; - } - - //-------------------------------- last block: affect all 32 bits of (c) - switch (length) { // all the case statements fall through - case 12: - c += ((key[offset + 11] & BYTE_MASK) << 24); - case 11: - c += ((key[offset + 10] & BYTE_MASK) << 16); - case 10: - c += ((key[offset + 9] & BYTE_MASK) << 8); - case 9: - c += (key[offset + 8] & BYTE_MASK); - case 8: - b += ((key[offset + 7] & BYTE_MASK) << 24); - case 7: - b += ((key[offset + 6] & BYTE_MASK) << 16); - case 6: - b += ((key[offset + 5] & BYTE_MASK) << 8); - case 5: - b += (key[offset + 4] & BYTE_MASK); - case 4: - a += ((key[offset + 3] & BYTE_MASK) << 24); - case 3: - a += ((key[offset + 2] & BYTE_MASK) << 16); - case 2: - a += ((key[offset + 1] & BYTE_MASK) << 8); - case 1: - //noinspection PointlessArithmeticExpression - a += (key[offset + 0] & BYTE_MASK); - break; - case 0: - return c; - } - /* - * final -- final mixing of 3 32-bit values (a,b,c) into c - * - * Pairs of (a,b,c) values differing in only a few bits will usually - * produce values of c that look totally different. This was tested for - * - pairs that differed by one bit, by two bits, in any combination - * of top bits of (a,b,c), or in any combination of bottom bits of - * (a,b,c). - * - * - "differ" is defined as +, -, ^, or ~^. For + and -, I transformed - * the output delta to a Gray code (a^(a>>1)) so a string of 1's (as - * is commonly produced by subtraction) look like a single 1-bit - * difference. - * - * - the base values were pseudorandom, all zero but one bit set, or - * all zero plus a counter that starts at zero. - * - * These constants passed: - * 14 11 25 16 4 14 24 - * 12 14 25 16 4 14 24 - * and these came close: - * 4 8 15 26 3 22 24 - * 10 8 15 26 3 22 24 - * 11 8 15 26 3 22 24 - * - * #define final(a,b,c) \ - * { - * c ^= b; c -= rot(b,14); \ - * a ^= c; a -= rot(c,11); \ - * b ^= a; b -= rot(a,25); \ - * c ^= b; c -= rot(b,16); \ - * a ^= c; a -= rot(c,4); \ - * b ^= a; b -= rot(a,14); \ - * c ^= b; c -= rot(b,24); \ - * } - * - */ - c ^= b; c -= rotateLeft(b, 14); - a ^= c; a -= rotateLeft(c, 11); - b ^= a; b -= rotateLeft(a, 25); - c ^= b; c -= rotateLeft(b, 16); - a ^= c; a -= rotateLeft(c, 4); - b ^= a; b -= rotateLeft(a, 14); - c ^= b; c -= rotateLeft(b, 24); - return c; - } - - /** - * Compute the hash of the specified file - * @param args name of file to compute hash of. - * @throws IOException e - */ - public static void main(String[] args) throws IOException { - if (args.length != 1) { - System.err.println("Usage: JenkinsHash filename"); - System.exit(-1); - } - FileInputStream in = new FileInputStream(args[0]); - byte[] bytes = new byte[512]; - int value = 0; - JenkinsHash hash = new JenkinsHash(); - for (int length = in.read(bytes); length > 0; length = in.read(bytes)) { - value = hash.hash(bytes, length, value); - } - System.out.println(Math.abs(value)); - } -} diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/util/Methods.java hbase-server/src/main/java/org/apache/hadoop/hbase/util/Methods.java deleted file mode 100644 index 8f0a6e3..0000000 --- hbase-server/src/main/java/org/apache/hadoop/hbase/util/Methods.java +++ /dev/null @@ -1,69 +0,0 @@ -/* - * Copyright The Apache Software Foundation - * - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hbase.util; - -import java.lang.reflect.InvocationTargetException; -import java.lang.reflect.Method; -import java.lang.reflect.UndeclaredThrowableException; - -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.classification.InterfaceStability; - -@InterfaceAudience.Public -@InterfaceStability.Stable -public class Methods { - private static Log LOG = LogFactory.getLog(Methods.class); - - public static Object call(Class clazz, T instance, String methodName, - Class[] types, Object[] args) throws Exception { - try { - Method m = clazz.getMethod(methodName, types); - return m.invoke(instance, args); - } catch (IllegalArgumentException arge) { - LOG.fatal("Constructed invalid call. class="+clazz.getName()+ - " method=" + methodName + " types=" + Classes.stringify(types), arge); - throw arge; - } catch (NoSuchMethodException nsme) { - throw new IllegalArgumentException( - "Can't find method "+methodName+" in "+clazz.getName()+"!", nsme); - } catch (InvocationTargetException ite) { - // unwrap the underlying exception and rethrow - if (ite.getTargetException() != null) { - if (ite.getTargetException() instanceof Exception) { - throw (Exception)ite.getTargetException(); - } else if (ite.getTargetException() instanceof Error) { - throw (Error)ite.getTargetException(); - } - } - throw new UndeclaredThrowableException(ite, - "Unknown exception invoking "+clazz.getName()+"."+methodName+"()"); - } catch (IllegalAccessException iae) { - throw new IllegalArgumentException( - "Denied access calling "+clazz.getName()+"."+methodName+"()", iae); - } catch (SecurityException se) { - LOG.fatal("SecurityException calling method. class="+clazz.getName()+ - " method=" + methodName + " types=" + Classes.stringify(types), se); - throw se; - } - } -} diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/util/MurmurHash.java hbase-server/src/main/java/org/apache/hadoop/hbase/util/MurmurHash.java deleted file mode 100644 index 9b498d1..0000000 --- hbase-server/src/main/java/org/apache/hadoop/hbase/util/MurmurHash.java +++ /dev/null @@ -1,92 +0,0 @@ -/** - * - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hbase.util; - -import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.classification.InterfaceStability; - -/** - * This is a very fast, non-cryptographic hash suitable for general hash-based - * lookup. See http://murmurhash.googlepages.com/ for more details. - * - *

          The C version of MurmurHash 2.0 found at that site was ported - * to Java by Andrzej Bialecki (ab at getopt org).

          - */ -@InterfaceAudience.Public -@InterfaceStability.Stable -public class MurmurHash extends Hash { - private static MurmurHash _instance = new MurmurHash(); - - public static Hash getInstance() { - return _instance; - } - - @Override - public int hash(byte[] data, int offset, int length, int seed) { - int m = 0x5bd1e995; - int r = 24; - - int h = seed ^ length; - - int len_4 = length >> 2; - - for (int i = 0; i < len_4; i++) { - int i_4 = (i << 2) + offset; - int k = data[i_4 + 3]; - k = k << 8; - k = k | (data[i_4 + 2] & 0xff); - k = k << 8; - k = k | (data[i_4 + 1] & 0xff); - k = k << 8; - //noinspection PointlessArithmeticExpression - k = k | (data[i_4 + 0] & 0xff); - k *= m; - k ^= k >>> r; - k *= m; - h *= m; - h ^= k; - } - - // avoid calculating modulo - int len_m = len_4 << 2; - int left = length - len_m; - int i_m = len_m + offset; - - if (left != 0) { - if (left >= 3) { - h ^= data[i_m + 2] << 16; - } - if (left >= 2) { - h ^= data[i_m + 1] << 8; - } - if (left >= 1) { - h ^= data[i_m]; - } - - h *= m; - } - - h ^= h >>> 13; - h *= m; - h ^= h >>> 15; - - return h; - } -} diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/util/Pair.java hbase-server/src/main/java/org/apache/hadoop/hbase/util/Pair.java deleted file mode 100644 index ecfc308..0000000 --- hbase-server/src/main/java/org/apache/hadoop/hbase/util/Pair.java +++ /dev/null @@ -1,135 +0,0 @@ -/** - * - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hbase.util; - -import java.io.Serializable; - -import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.classification.InterfaceStability; - -/** - * A generic class for pairs. - * @param - * @param - */ -@InterfaceAudience.Public -@InterfaceStability.Stable -public class Pair implements Serializable -{ - private static final long serialVersionUID = -3986244606585552569L; - protected T1 first = null; - protected T2 second = null; - - /** - * Default constructor. - */ - public Pair() - { - } - - /** - * Constructor - * @param a operand - * @param b operand - */ - public Pair(T1 a, T2 b) - { - this.first = a; - this.second = b; - } - - /** - * Constructs a new pair, inferring the type via the passed arguments - * @param type for first - * @param type for second - * @param a first element - * @param b second element - * @return a new pair containing the passed arguments - */ - public static Pair newPair(T1 a, T2 b) { - return new Pair(a, b); - } - - /** - * Replace the first element of the pair. - * @param a operand - */ - public void setFirst(T1 a) - { - this.first = a; - } - - /** - * Replace the second element of the pair. - * @param b operand - */ - public void setSecond(T2 b) - { - this.second = b; - } - - /** - * Return the first element stored in the pair. - * @return T1 - */ - public T1 getFirst() - { - return first; - } - - /** - * Return the second element stored in the pair. - * @return T2 - */ - public T2 getSecond() - { - return second; - } - - private static boolean equals(Object x, Object y) - { - return (x == null && y == null) || (x != null && x.equals(y)); - } - - @Override - @SuppressWarnings("unchecked") - public boolean equals(Object other) - { - return other instanceof Pair && equals(first, ((Pair)other).first) && - equals(second, ((Pair)other).second); - } - - @Override - public int hashCode() - { - if (first == null) - return (second == null) ? 0 : second.hashCode() + 1; - else if (second == null) - return first.hashCode() + 2; - else - return first.hashCode() * 17 + second.hashCode(); - } - - @Override - public String toString() - { - return "{" + getFirst() + "," + getSecond() + "}"; - } -} diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/util/PairOfSameType.java hbase-server/src/main/java/org/apache/hadoop/hbase/util/PairOfSameType.java deleted file mode 100644 index 2da2a3a..0000000 --- hbase-server/src/main/java/org/apache/hadoop/hbase/util/PairOfSameType.java +++ /dev/null @@ -1,115 +0,0 @@ -/** - * - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hbase.util; - -import java.util.Iterator; - -import org.apache.commons.lang.NotImplementedException; -import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.classification.InterfaceStability; - -/** - * A generic, immutable class for pairs of objects both of type T. - * @param - * @see Pair if Types differ. - */ -@InterfaceAudience.Public -@InterfaceStability.Stable -public class PairOfSameType implements Iterable { - private final T first; - private final T second; - - /** - * Constructor - * @param a operand - * @param b operand - */ - public PairOfSameType(T a, T b) { - this.first = a; - this.second = b; - } - - /** - * Return the first element stored in the pair. - * @return T - */ - public T getFirst() { - return first; - } - - /** - * Return the second element stored in the pair. - * @return T - */ - public T getSecond() { - return second; - } - - private static boolean equals(Object x, Object y) { - return (x == null && y == null) || (x != null && x.equals(y)); - } - - @Override - @SuppressWarnings("unchecked") - public boolean equals(Object other) { - return other instanceof PairOfSameType && - equals(first, ((PairOfSameType)other).first) && - equals(second, ((PairOfSameType)other).second); - } - - @Override - public int hashCode() { - if (first == null) - return (second == null) ? 0 : second.hashCode() + 1; - else if (second == null) - return first.hashCode() + 2; - else - return first.hashCode() * 17 + second.hashCode(); - } - - @Override - public String toString() { - return "{" + getFirst() + "," + getSecond() + "}"; - } - - @Override - public Iterator iterator() { - return new Iterator() { - private int returned = 0; - - @Override - public boolean hasNext() { - return this.returned < 2; - } - - @Override - public T next() { - if (++this.returned == 1) return getFirst(); - else if (this.returned == 2) return getSecond(); - else throw new IllegalAccessError("this.returned=" + this.returned); - } - - @Override - public void remove() { - throw new NotImplementedException(); - } - }; - } -} diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/util/PoolMap.java hbase-server/src/main/java/org/apache/hadoop/hbase/util/PoolMap.java deleted file mode 100644 index 364be66..0000000 --- hbase-server/src/main/java/org/apache/hadoop/hbase/util/PoolMap.java +++ /dev/null @@ -1,451 +0,0 @@ -/** - * - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hbase.util; - -import java.util.ArrayList; -import java.util.Collection; -import java.util.HashMap; -import java.util.HashSet; -import java.util.LinkedList; -import java.util.List; -import java.util.Map; -import java.util.Set; -import java.util.concurrent.ConcurrentHashMap; -import java.util.concurrent.ConcurrentLinkedQueue; -import java.util.concurrent.CopyOnWriteArrayList; -import java.util.concurrent.atomic.AtomicInteger; - -import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.classification.InterfaceStability; - -/** - * - * The PoolMap maps a key to a collection of values, the elements - * of which are managed by a pool. In effect, that collection acts as a shared - * pool of resources, access to which is closely controlled as per the semantics - * of the pool. - * - *

          - * In case the size of the pool is set to a non-zero positive number, that is - * used to cap the number of resources that a pool may contain for any given - * key. A size of {@link Integer#MAX_VALUE} is interpreted as an unbounded pool. - *

          - * - * @param - * the type of the key to the resource - * @param - * the type of the resource being pooled - */ -@InterfaceAudience.Public -@InterfaceStability.Evolving -public class PoolMap implements Map { - private PoolType poolType; - - private int poolMaxSize; - - private Map> pools = new ConcurrentHashMap>(); - - public PoolMap(PoolType poolType) { - this.poolType = poolType; - } - - public PoolMap(PoolType poolType, int poolMaxSize) { - this.poolType = poolType; - this.poolMaxSize = poolMaxSize; - } - - @Override - public V get(Object key) { - Pool pool = pools.get(key); - return pool != null ? pool.get() : null; - } - - @Override - public V put(K key, V value) { - Pool pool = pools.get(key); - if (pool == null) { - pools.put(key, pool = createPool()); - } - return pool != null ? pool.put(value) : null; - } - - @SuppressWarnings("unchecked") - @Override - public V remove(Object key) { - Pool pool = pools.remove(key); - if (pool != null) { - remove((K) key, pool.get()); - } - return null; - } - - public boolean remove(K key, V value) { - Pool pool = pools.get(key); - boolean res = false; - if (pool != null) { - res = pool.remove(value); - if (res && pool.size() == 0) { - pools.remove(key); - } - } - return res; - } - - @Override - public Collection values() { - Collection values = new ArrayList(); - for (Pool pool : pools.values()) { - Collection poolValues = pool.values(); - if (poolValues != null) { - values.addAll(poolValues); - } - } - return values; - } - - public Collection values(K key) { - Collection values = new ArrayList(); - Pool pool = pools.get(key); - if (pool != null) { - Collection poolValues = pool.values(); - if (poolValues != null) { - values.addAll(poolValues); - } - } - return values; - } - - - @Override - public boolean isEmpty() { - return pools.isEmpty(); - } - - @Override - public int size() { - return pools.size(); - } - - public int size(K key) { - Pool pool = pools.get(key); - return pool != null ? pool.size() : 0; - } - - @Override - public boolean containsKey(Object key) { - return pools.containsKey(key); - } - - @Override - public boolean containsValue(Object value) { - if (value == null) { - return false; - } - for (Pool pool : pools.values()) { - if (value.equals(pool.get())) { - return true; - } - } - return false; - } - - @Override - public void putAll(Map map) { - for (Map.Entry entry : map.entrySet()) { - put(entry.getKey(), entry.getValue()); - } - } - - @Override - public void clear() { - for (Pool pool : pools.values()) { - pool.clear(); - } - pools.clear(); - } - - @Override - public Set keySet() { - return pools.keySet(); - } - - @Override - public Set> entrySet() { - Set> entries = new HashSet>(); - for (Map.Entry> poolEntry : pools.entrySet()) { - final K poolKey = poolEntry.getKey(); - final Pool pool = poolEntry.getValue(); - if (pool != null) { - for (final V poolValue : pool.values()) { - entries.add(new Map.Entry() { - @Override - public K getKey() { - return poolKey; - } - - @Override - public V getValue() { - return poolValue; - } - - @Override - public V setValue(V value) { - return pool.put(value); - } - }); - } - } - } - return null; - } - - protected interface Pool { - public R get(); - - public R put(R resource); - - public boolean remove(R resource); - - public void clear(); - - public Collection values(); - - public int size(); - } - - public enum PoolType { - Reusable, ThreadLocal, RoundRobin; - - public static PoolType valueOf(String poolTypeName, - PoolType defaultPoolType, PoolType... allowedPoolTypes) { - PoolType poolType = PoolType.fuzzyMatch(poolTypeName); - if (poolType != null) { - boolean allowedType = false; - if (poolType.equals(defaultPoolType)) { - allowedType = true; - } else { - if (allowedPoolTypes != null) { - for (PoolType allowedPoolType : allowedPoolTypes) { - if (poolType.equals(allowedPoolType)) { - allowedType = true; - break; - } - } - } - } - if (!allowedType) { - poolType = null; - } - } - return (poolType != null) ? poolType : defaultPoolType; - } - - public static String fuzzyNormalize(String name) { - return name != null ? name.replaceAll("-", "").trim().toLowerCase() : ""; - } - - public static PoolType fuzzyMatch(String name) { - for (PoolType poolType : values()) { - if (fuzzyNormalize(name).equals(fuzzyNormalize(poolType.name()))) { - return poolType; - } - } - return null; - } - } - - protected Pool createPool() { - switch (poolType) { - case Reusable: - return new ReusablePool(poolMaxSize); - case RoundRobin: - return new RoundRobinPool(poolMaxSize); - case ThreadLocal: - return new ThreadLocalPool(); - } - return null; - } - - /** - * The ReusablePool represents a {@link PoolMap.Pool} that builds - * on the {@link LinkedList} class. It essentially allows resources to be - * checked out, at which point it is removed from this pool. When the resource - * is no longer required, it should be returned to the pool in order to be - * reused. - * - *

          - * If {@link #maxSize} is set to {@link Integer#MAX_VALUE}, then the size of - * the pool is unbounded. Otherwise, it caps the number of consumers that can - * check out a resource from this pool to the (non-zero positive) value - * specified in {@link #maxSize}. - *

          - * - * @param - * the type of the resource - */ - @SuppressWarnings("serial") - public class ReusablePool extends ConcurrentLinkedQueue implements Pool { - private int maxSize; - - public ReusablePool(int maxSize) { - this.maxSize = maxSize; - - } - - @Override - public R get() { - return poll(); - } - - @Override - public R put(R resource) { - if (super.size() < maxSize) { - add(resource); - } - return null; - } - - @Override - public Collection values() { - return this; - } - } - - /** - * The RoundRobinPool represents a {@link PoolMap.Pool}, which - * stores its resources in an {@link ArrayList}. It load-balances access to - * its resources by returning a different resource every time a given key is - * looked up. - * - *

          - * If {@link #maxSize} is set to {@link Integer#MAX_VALUE}, then the size of - * the pool is unbounded. Otherwise, it caps the number of resources in this - * pool to the (non-zero positive) value specified in {@link #maxSize}. - *

          - * - * @param - * the type of the resource - * - */ - @SuppressWarnings("serial") - class RoundRobinPool extends CopyOnWriteArrayList implements Pool { - private int maxSize; - private int nextResource = 0; - - public RoundRobinPool(int maxSize) { - this.maxSize = maxSize; - } - - @Override - public R put(R resource) { - if (super.size() < maxSize) { - add(resource); - } - return null; - } - - @Override - public R get() { - if (super.size() < maxSize) { - return null; - } - nextResource %= super.size(); - R resource = get(nextResource++); - return resource; - } - - @Override - public Collection values() { - return this; - } - - } - - /** - * The ThreadLocalPool represents a {@link PoolMap.Pool} that - * builds on the {@link ThreadLocal} class. It essentially binds the resource - * to the thread from which it is accessed. - * - *

          - * Note that the size of the pool is essentially bounded by the number of threads - * that add resources to this pool. - *

          - * - * @param - * the type of the resource - */ - static class ThreadLocalPool extends ThreadLocal implements Pool { - private static final Map, AtomicInteger> poolSizes = new HashMap, AtomicInteger>(); - - public ThreadLocalPool() { - } - - @Override - public R put(R resource) { - R previousResource = get(); - if (previousResource == null) { - AtomicInteger poolSize = poolSizes.get(this); - if (poolSize == null) { - poolSizes.put(this, poolSize = new AtomicInteger(0)); - } - poolSize.incrementAndGet(); - } - this.set(resource); - return previousResource; - } - - @Override - public void remove() { - super.remove(); - AtomicInteger poolSize = poolSizes.get(this); - if (poolSize != null) { - poolSize.decrementAndGet(); - } - } - - @Override - public int size() { - AtomicInteger poolSize = poolSizes.get(this); - return poolSize != null ? poolSize.get() : 0; - } - - @Override - public boolean remove(R resource) { - R previousResource = super.get(); - if (resource != null && resource.equals(previousResource)) { - remove(); - return true; - } else { - return false; - } - } - - @Override - public void clear() { - super.remove(); - } - - @Override - public Collection values() { - List values = new ArrayList(); - values.add(get()); - return values; - } - } -} diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/util/ProtoUtil.java hbase-server/src/main/java/org/apache/hadoop/hbase/util/ProtoUtil.java deleted file mode 100644 index bdbe7b7..0000000 --- hbase-server/src/main/java/org/apache/hadoop/hbase/util/ProtoUtil.java +++ /dev/null @@ -1,69 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hbase.util; - -import java.io.DataInput; -import java.io.IOException; - -import org.apache.hadoop.classification.InterfaceAudience; - -@InterfaceAudience.Private -public abstract class ProtoUtil { - - /** - * Read a variable length integer in the same format that ProtoBufs encodes. - * @param in the input stream to read from - * @return the integer - * @throws IOException if it is malformed or EOF. - */ - public static int readRawVarint32(DataInput in) throws IOException { - byte tmp = in.readByte(); - if (tmp >= 0) { - return tmp; - } - int result = tmp & 0x7f; - if ((tmp = in.readByte()) >= 0) { - result |= tmp << 7; - } else { - result |= (tmp & 0x7f) << 7; - if ((tmp = in.readByte()) >= 0) { - result |= tmp << 14; - } else { - result |= (tmp & 0x7f) << 14; - if ((tmp = in.readByte()) >= 0) { - result |= tmp << 21; - } else { - result |= (tmp & 0x7f) << 21; - result |= (tmp = in.readByte()) << 28; - if (tmp < 0) { - // Discard upper 32 bits. - for (int i = 0; i < 5; i++) { - if (in.readByte() >= 0) { - return result; - } - } - throw new IOException("Malformed varint"); - } - } - } - } - return result; - } - -} diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/util/RetryCounter.java hbase-server/src/main/java/org/apache/hadoop/hbase/util/RetryCounter.java deleted file mode 100644 index 7790362..0000000 --- hbase-server/src/main/java/org/apache/hadoop/hbase/util/RetryCounter.java +++ /dev/null @@ -1,69 +0,0 @@ -/** - * - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hbase.util; - -import java.util.concurrent.TimeUnit; - -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.classification.InterfaceAudience; - -@InterfaceAudience.Private -public class RetryCounter { - private static final Log LOG = LogFactory.getLog(RetryCounter.class); - private final int maxRetries; - private int retriesRemaining; - private final int retryIntervalMillis; - private final TimeUnit timeUnit; - - public RetryCounter(int maxRetries, - int retryIntervalMillis, TimeUnit timeUnit) { - this.maxRetries = maxRetries; - this.retriesRemaining = maxRetries; - this.retryIntervalMillis = retryIntervalMillis; - this.timeUnit = timeUnit; - } - - public int getMaxRetries() { - return maxRetries; - } - - /** - * Sleep for a exponentially back off time - * @throws InterruptedException - */ - public void sleepUntilNextRetry() throws InterruptedException { - int attempts = getAttemptTimes(); - long sleepTime = (long) (retryIntervalMillis * Math.pow(2, attempts)); - LOG.info("Sleeping " + sleepTime + "ms before retry #" + attempts + "..."); - timeUnit.sleep(sleepTime); - } - - public boolean shouldRetry() { - return retriesRemaining > 0; - } - - public void useRetry() { - retriesRemaining--; - } - - public int getAttemptTimes() { - return maxRetries-retriesRemaining+1; - } -} diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/util/RetryCounterFactory.java hbase-server/src/main/java/org/apache/hadoop/hbase/util/RetryCounterFactory.java deleted file mode 100644 index 59edf96..0000000 --- hbase-server/src/main/java/org/apache/hadoop/hbase/util/RetryCounterFactory.java +++ /dev/null @@ -1,40 +0,0 @@ -/** - * - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hbase.util; - -import java.util.concurrent.TimeUnit; - -import org.apache.hadoop.classification.InterfaceAudience; - -@InterfaceAudience.Private -public class RetryCounterFactory { - private final int maxRetries; - private final int retryIntervalMillis; - - public RetryCounterFactory(int maxRetries, int retryIntervalMillis) { - this.maxRetries = maxRetries; - this.retryIntervalMillis = retryIntervalMillis; - } - - public RetryCounter create() { - return new RetryCounter( - maxRetries, retryIntervalMillis, TimeUnit.MILLISECONDS - ); - } -} diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/util/Sleeper.java hbase-server/src/main/java/org/apache/hadoop/hbase/util/Sleeper.java deleted file mode 100644 index de84646..0000000 --- hbase-server/src/main/java/org/apache/hadoop/hbase/util/Sleeper.java +++ /dev/null @@ -1,117 +0,0 @@ -/** - * - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hbase.util; - -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.classification.InterfaceStability; -import org.apache.hadoop.hbase.Stoppable; - -/** - * Sleeper for current thread. - * Sleeps for passed period. Also checks passed boolean and if interrupted, - * will return if the flag is set (rather than go back to sleep until its - * sleep time is up). - */ -@InterfaceAudience.Public -@InterfaceStability.Stable -public class Sleeper { - private final Log LOG = LogFactory.getLog(this.getClass().getName()); - private final int period; - private final Stoppable stopper; - private static final long MINIMAL_DELTA_FOR_LOGGING = 10000; - - private final Object sleepLock = new Object(); - private boolean triggerWake = false; - - /** - * @param sleep sleep time in milliseconds - * @param stopper When {@link Stoppable#isStopped()} is true, this thread will - * cleanup and exit cleanly. - */ - public Sleeper(final int sleep, final Stoppable stopper) { - this.period = sleep; - this.stopper = stopper; - } - - /** - * Sleep for period. - */ - public void sleep() { - sleep(System.currentTimeMillis()); - } - - /** - * If currently asleep, stops sleeping; if not asleep, will skip the next - * sleep cycle. - */ - public void skipSleepCycle() { - synchronized (sleepLock) { - triggerWake = true; - sleepLock.notifyAll(); - } - } - - /** - * Sleep for period adjusted by passed startTime - * @param startTime Time some task started previous to now. Time to sleep - * will be docked current time minus passed startTime. - */ - public void sleep(final long startTime) { - if (this.stopper.isStopped()) { - return; - } - long now = System.currentTimeMillis(); - long waitTime = this.period - (now - startTime); - if (waitTime > this.period) { - LOG.warn("Calculated wait time > " + this.period + - "; setting to this.period: " + System.currentTimeMillis() + ", " + - startTime); - waitTime = this.period; - } - while (waitTime > 0) { - long woke = -1; - try { - synchronized (sleepLock) { - if (triggerWake) break; - sleepLock.wait(waitTime); - } - woke = System.currentTimeMillis(); - long slept = woke - now; - if (slept - this.period > MINIMAL_DELTA_FOR_LOGGING) { - LOG.warn("We slept " + slept + "ms instead of " + this.period + - "ms, this is likely due to a long " + - "garbage collecting pause and it's usually bad, see " + - "http://hbase.apache.org/book.html#trouble.rs.runtime.zkexpired"); - } - } catch(InterruptedException iex) { - // We we interrupted because we're meant to stop? If not, just - // continue ignoring the interruption - if (this.stopper.isStopped()) { - return; - } - } - // Recalculate waitTime. - woke = (woke == -1)? System.currentTimeMillis(): woke; - waitTime = this.period - (woke - startTime); - } - triggerWake = false; - } -} diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/util/SoftValueSortedMap.java hbase-server/src/main/java/org/apache/hadoop/hbase/util/SoftValueSortedMap.java deleted file mode 100644 index 6e77cb6..0000000 --- hbase-server/src/main/java/org/apache/hadoop/hbase/util/SoftValueSortedMap.java +++ /dev/null @@ -1,289 +0,0 @@ -/** - * - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hbase.util; - -import java.lang.ref.Reference; -import java.lang.ref.ReferenceQueue; -import java.lang.ref.SoftReference; -import java.util.ArrayList; -import java.util.Collection; -import java.util.Collections; -import java.util.Comparator; -import java.util.LinkedHashSet; -import java.util.Map; -import java.util.NavigableMap; -import java.util.Set; -import java.util.SortedMap; -import java.util.TreeMap; - -import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.classification.InterfaceStability; - -/** - * A SortedMap implementation that uses Soft Reference values - * internally to make it play well with the GC when in a low-memory - * situation. Use as a cache where you also need SortedMap functionality. - * - * @param key class - * @param value class - */ -@InterfaceAudience.Public -@InterfaceStability.Stable -public class SoftValueSortedMap implements SortedMap { - private final SortedMap> internalMap; - private final ReferenceQueue rq = new ReferenceQueue(); - private final Object sync; - - /** Constructor */ - public SoftValueSortedMap() { - this(new TreeMap>()); - } - - /** - * Constructor - * @param c comparator - */ - public SoftValueSortedMap(final Comparator c) { - this(new TreeMap>(c)); - } - - /** Internal constructor - * @param original object to wrap and synchronize on - */ - private SoftValueSortedMap(SortedMap> original) { - this(original, original); - } - - /** Internal constructor - * For headMap, tailMap, and subMap support - * @param original object to wrap - * @param sync object to synchronize on - */ - private SoftValueSortedMap(SortedMap> original, Object sync) { - this.internalMap = original; - this.sync = sync; - } - - /** - * Checks soft references and cleans any that have been placed on - * ReferenceQueue. Call if get/put etc. are not called regularly. - * Internally these call checkReferences on each access. - * @return How many references cleared. - */ - @SuppressWarnings("unchecked") - private int checkReferences() { - int i = 0; - for (Reference ref; (ref = this.rq.poll()) != null;) { - i++; - this.internalMap.remove(((SoftValue)ref).key); - } - return i; - } - - public V put(K key, V value) { - synchronized(sync) { - checkReferences(); - SoftValue oldValue = this.internalMap.put(key, - new SoftValue(key, value, this.rq)); - return oldValue == null ? null : oldValue.get(); - } - } - - @Override - public void putAll(Map m) { - throw new RuntimeException("Not implemented"); - } - - public V get(Object key) { - synchronized(sync) { - checkReferences(); - SoftValue value = this.internalMap.get(key); - if (value == null) { - return null; - } - if (value.get() == null) { - this.internalMap.remove(key); - return null; - } - return value.get(); - } - } - - public V remove(Object key) { - synchronized(sync) { - checkReferences(); - SoftValue value = this.internalMap.remove(key); - return value == null ? null : value.get(); - } - } - - public boolean containsKey(Object key) { - synchronized(sync) { - checkReferences(); - return this.internalMap.containsKey(key); - } - } - - public boolean containsValue(Object value) { - throw new UnsupportedOperationException("Don't support containsValue!"); - } - - public K firstKey() { - synchronized(sync) { - checkReferences(); - return internalMap.firstKey(); - } - } - - public K lastKey() { - synchronized(sync) { - checkReferences(); - return internalMap.lastKey(); - } - } - - public SoftValueSortedMap headMap(K key) { - synchronized(sync) { - checkReferences(); - return new SoftValueSortedMap(this.internalMap.headMap(key), sync); - } - } - - public SoftValueSortedMap tailMap(K key) { - synchronized(sync) { - checkReferences(); - return new SoftValueSortedMap(this.internalMap.tailMap(key), sync); - } - } - - public SoftValueSortedMap subMap(K fromKey, K toKey) { - synchronized(sync) { - checkReferences(); - return new SoftValueSortedMap(this.internalMap.subMap(fromKey, - toKey), sync); - } - } - - /* - * retrieves the value associated with the greatest key strictly less than - * the given key, or null if there is no such key - * @param key the key we're interested in - */ - public synchronized V lowerValueByKey(K key) { - synchronized(sync) { - checkReferences(); - - Map.Entry> entry = - ((NavigableMap>) this.internalMap).lowerEntry(key); - if (entry==null) { - return null; - } - SoftValue value=entry.getValue(); - if (value==null) { - return null; - } - if (value.get() == null) { - this.internalMap.remove(key); - return null; - } - return value.get(); - } - } - - public boolean isEmpty() { - synchronized(sync) { - checkReferences(); - return this.internalMap.isEmpty(); - } - } - - public int size() { - synchronized(sync) { - checkReferences(); - return this.internalMap.size(); - } - } - - public void clear() { - synchronized(sync) { - checkReferences(); - this.internalMap.clear(); - } - } - - public Set keySet() { - synchronized(sync) { - checkReferences(); - // this is not correct as per SortedMap contract (keySet should be - // modifiable) - // needed here so that another thread cannot modify the keyset - // without locking - return Collections.unmodifiableSet(this.internalMap.keySet()); - } - } - - public Comparator comparator() { - return this.internalMap.comparator(); - } - - public Set> entrySet() { - synchronized(sync) { - checkReferences(); - // this is not correct as per SortedMap contract (entrySet should be - // backed by map) - Set> realEntries = new LinkedHashSet>(); - for (Map.Entry> entry : this.internalMap.entrySet()) { - realEntries.add(entry.getValue()); - } - return realEntries; - } - } - - public Collection values() { - synchronized(sync) { - checkReferences(); - ArrayList hardValues = new ArrayList(); - for (SoftValue softValue : this.internalMap.values()) { - hardValues.add(softValue.get()); - } - return hardValues; - } - } - - private static class SoftValue extends SoftReference implements Map.Entry { - final K key; - - SoftValue(K key, V value, ReferenceQueue q) { - super(value, q); - this.key = key; - } - - public K getKey() { - return this.key; - } - - public V getValue() { - return get(); - } - - public V setValue(V value) { - throw new RuntimeException("Not implemented"); - } - } -} diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/util/Triple.java hbase-server/src/main/java/org/apache/hadoop/hbase/util/Triple.java deleted file mode 100644 index bb75553..0000000 --- hbase-server/src/main/java/org/apache/hadoop/hbase/util/Triple.java +++ /dev/null @@ -1,90 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hbase.util; - -/** - * Utility class to manage a triple. - */ -public class Triple { - private A first; - private B second; - private C third; - - public Triple(A first, B second, C third) { - this.first = first; - this.second = second; - this.third = third; - } - - public int hashCode() { - int hashFirst = (first != null ? first.hashCode() : 0); - int hashSecond = (second != null ? second.hashCode() : 0); - int hashThird = (third != null ? third.hashCode() : 0); - - return (hashFirst >> 1) ^ hashSecond ^ (hashThird << 1); - } - - public boolean equals(Object obj) { - if (!(obj instanceof Triple)) { - return false; - } - - Triple otherTriple = (Triple) obj; - - if (first != otherTriple.first && (first != null && !(first.equals(otherTriple.first)))) - return false; - if (second != otherTriple.second && (second != null && !(second.equals(otherTriple.second)))) - return false; - if (third != otherTriple.third && (third != null && !(third.equals(otherTriple.third)))) - return false; - - return true; - } - - public String toString() { - return "(" + first + ", " + second + "," + third + " )"; - } - - public A getFirst() { - return first; - } - - public void setFirst(A first) { - this.first = first; - } - - public B getSecond() { - return second; - } - - public void setSecond(B second) { - this.second = second; - } - - public C getThird() { - return third; - } - - public void setThird(C third) { - this.third = third; - } -} - - - diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/util/Writables.java hbase-server/src/main/java/org/apache/hadoop/hbase/util/Writables.java deleted file mode 100644 index 13157eb..0000000 --- hbase-server/src/main/java/org/apache/hadoop/hbase/util/Writables.java +++ /dev/null @@ -1,167 +0,0 @@ -/** - * - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hbase.util; - -import java.io.ByteArrayInputStream; -import java.io.ByteArrayOutputStream; -import java.io.DataInputStream; -import java.io.DataOutputStream; -import java.io.IOException; -import java.util.ArrayList; -import java.util.List; - -import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.io.DataInputBuffer; -import org.apache.hadoop.io.Writable; - -/** - * Utility class with methods for manipulating Writable objects - */ -@InterfaceAudience.Private -public class Writables { - /** - * @param w writable - * @return The bytes of w gotten by running its - * {@link Writable#write(java.io.DataOutput)} method. - * @throws IOException e - * @see #getWritable(byte[], Writable) - */ - public static byte [] getBytes(final Writable w) throws IOException { - if (w == null) { - throw new IllegalArgumentException("Writable cannot be null"); - } - ByteArrayOutputStream byteStream = new ByteArrayOutputStream(); - DataOutputStream out = new DataOutputStream(byteStream); - try { - w.write(out); - out.close(); - out = null; - return byteStream.toByteArray(); - } finally { - if (out != null) { - out.close(); - } - } - } - - /** - * Put a bunch of Writables as bytes all into the one byte array. - * @param ws writable - * @return The bytes of w gotten by running its - * {@link Writable#write(java.io.DataOutput)} method. - * @throws IOException e - */ - public static byte [] getBytes(final Writable... ws) throws IOException { - List bytes = new ArrayList(); - int size = 0; - for (Writable w: ws) { - byte [] b = getBytes(w); - size += b.length; - bytes.add(b); - } - byte [] result = new byte[size]; - int offset = 0; - for (byte [] b: bytes) { - System.arraycopy(b, 0, result, offset, b.length); - offset += b.length; - } - return result; - } - - /** - * Set bytes into the passed Writable by calling its - * {@link Writable#readFields(java.io.DataInput)}. - * @param bytes serialized bytes - * @param w An empty Writable (usually made by calling the null-arg - * constructor). - * @return The passed Writable after its readFields has been called fed - * by the passed bytes array or IllegalArgumentException - * if passed null or an empty bytes array. - * @throws IOException e - * @throws IllegalArgumentException - */ - public static Writable getWritable(final byte [] bytes, final Writable w) - throws IOException { - return getWritable(bytes, 0, bytes.length, w); - } - - /** - * Set bytes into the passed Writable by calling its - * {@link Writable#readFields(java.io.DataInput)}. - * @param bytes serialized bytes - * @param offset offset into array - * @param length length of data - * @param w An empty Writable (usually made by calling the null-arg - * constructor). - * @return The passed Writable after its readFields has been called fed - * by the passed bytes array or IllegalArgumentException - * if passed null or an empty bytes array. - * @throws IOException e - * @throws IllegalArgumentException - */ - public static Writable getWritable(final byte [] bytes, final int offset, - final int length, final Writable w) - throws IOException { - if (bytes == null || length <=0) { - throw new IllegalArgumentException("Can't build a writable with empty " + - "bytes array"); - } - if (w == null) { - throw new IllegalArgumentException("Writable cannot be null"); - } - DataInputBuffer in = new DataInputBuffer(); - try { - in.reset(bytes, offset, length); - w.readFields(in); - return w; - } finally { - in.close(); - } - } - - /** - * Copy one Writable to another. Copies bytes using data streams. - * @param src Source Writable - * @param tgt Target Writable - * @return The target Writable. - * @throws IOException e - */ - public static Writable copyWritable(final Writable src, final Writable tgt) - throws IOException { - return copyWritable(getBytes(src), tgt); - } - - /** - * Copy one Writable to another. Copies bytes using data streams. - * @param bytes Source Writable - * @param tgt Target Writable - * @return The target Writable. - * @throws IOException e - */ - public static Writable copyWritable(final byte [] bytes, final Writable tgt) - throws IOException { - DataInputStream dis = new DataInputStream(new ByteArrayInputStream(bytes)); - try { - tgt.readFields(dis); - } finally { - dis.close(); - } - return tgt; - } -} diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/EmptyWatcher.java hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/EmptyWatcher.java deleted file mode 100644 index 3aba999..0000000 --- hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/EmptyWatcher.java +++ /dev/null @@ -1,34 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hbase.zookeeper; - -import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.zookeeper.Watcher; -import org.apache.zookeeper.WatchedEvent; - -/** - * An empty ZooKeeper watcher - */ -@InterfaceAudience.Private -public class EmptyWatcher implements Watcher { - // Used in this package but also by tests so needs to be public - public static EmptyWatcher instance = new EmptyWatcher(); - private EmptyWatcher() {} - - public void process(WatchedEvent event) {} -} diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/HQuorumPeer.java hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/HQuorumPeer.java deleted file mode 100644 index 5b1ddbb..0000000 --- hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/HQuorumPeer.java +++ /dev/null @@ -1,160 +0,0 @@ -/** - * - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hbase.zookeeper; - -import java.io.File; -import java.io.IOException; -import java.io.PrintWriter; -import java.net.InetAddress; -import java.net.NetworkInterface; -import java.net.UnknownHostException; -import java.util.ArrayList; -import java.util.Enumeration; -import java.util.List; -import java.util.Properties; -import java.util.Map.Entry; - -import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.classification.InterfaceStability; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hbase.HBaseConfiguration; -import org.apache.hadoop.hbase.util.Strings; -import org.apache.hadoop.net.DNS; -import org.apache.hadoop.util.StringUtils; -import org.apache.zookeeper.server.ServerConfig; -import org.apache.zookeeper.server.ZooKeeperServerMain; -import org.apache.zookeeper.server.quorum.QuorumPeerConfig; -import org.apache.zookeeper.server.quorum.QuorumPeerMain; - -/** - * HBase's version of ZooKeeper's QuorumPeer. When HBase is set to manage - * ZooKeeper, this class is used to start up QuorumPeer instances. By doing - * things in here rather than directly calling to ZooKeeper, we have more - * control over the process. This class uses {@link ZKConfig} to parse the - * zoo.cfg and inject variables from HBase's site.xml configuration in. - */ -@InterfaceAudience.Public -@InterfaceStability.Evolving -public class HQuorumPeer { - - /** - * Parse ZooKeeper configuration from HBase XML config and run a QuorumPeer. - * @param args String[] of command line arguments. Not used. - */ - public static void main(String[] args) { - Configuration conf = HBaseConfiguration.create(); - try { - Properties zkProperties = ZKConfig.makeZKProps(conf); - writeMyID(zkProperties); - QuorumPeerConfig zkConfig = new QuorumPeerConfig(); - zkConfig.parseProperties(zkProperties); - - // login the zookeeper server principal (if using security) - ZKUtil.loginServer(conf, "hbase.zookeeper.server.keytab.file", - "hbase.zookeeper.server.kerberos.principal", - zkConfig.getClientPortAddress().getHostName()); - - runZKServer(zkConfig); - } catch (Exception e) { - e.printStackTrace(); - System.exit(-1); - } - } - - private static void runZKServer(QuorumPeerConfig zkConfig) throws UnknownHostException, IOException { - if (zkConfig.isDistributed()) { - QuorumPeerMain qp = new QuorumPeerMain(); - qp.runFromConfig(zkConfig); - } else { - ZooKeeperServerMain zk = new ZooKeeperServerMain(); - ServerConfig serverConfig = new ServerConfig(); - serverConfig.readFrom(zkConfig); - zk.runFromConfig(serverConfig); - } - } - - private static boolean addressIsLocalHost(String address) { - return address.equals("localhost") || address.equals("127.0.0.1"); - } - - static void writeMyID(Properties properties) throws IOException { - long myId = -1; - - Configuration conf = HBaseConfiguration.create(); - String myAddress = Strings.domainNamePointerToHostName(DNS.getDefaultHost( - conf.get("hbase.zookeeper.dns.interface","default"), - conf.get("hbase.zookeeper.dns.nameserver","default"))); - - List ips = new ArrayList(); - - // Add what could be the best (configured) match - ips.add(myAddress.contains(".") ? - myAddress : - StringUtils.simpleHostname(myAddress)); - - // For all nics get all hostnames and IPs - Enumeration nics = NetworkInterface.getNetworkInterfaces(); - while(nics.hasMoreElements()) { - Enumeration rawAdrs = - ((NetworkInterface)nics.nextElement()).getInetAddresses(); - while(rawAdrs.hasMoreElements()) { - InetAddress inet = (InetAddress) rawAdrs.nextElement(); - ips.add(StringUtils.simpleHostname(inet.getHostName())); - ips.add(inet.getHostAddress()); - } - } - - for (Entry entry : properties.entrySet()) { - String key = entry.getKey().toString().trim(); - String value = entry.getValue().toString().trim(); - if (key.startsWith("server.")) { - int dot = key.indexOf('.'); - long id = Long.parseLong(key.substring(dot + 1)); - String[] parts = value.split(":"); - String address = parts[0]; - if (addressIsLocalHost(address) || ips.contains(address)) { - myId = id; - break; - } - } - } - - // Set the max session timeout from the provided client-side timeout - properties.setProperty("maxSessionTimeout", - conf.get("zookeeper.session.timeout", "180000")); - - if (myId == -1) { - throw new IOException("Could not find my address: " + myAddress + - " in list of ZooKeeper quorum servers"); - } - - String dataDirStr = properties.get("dataDir").toString().trim(); - File dataDir = new File(dataDirStr); - if (!dataDir.isDirectory()) { - if (!dataDir.mkdirs()) { - throw new IOException("Unable to create data dir " + dataDir); - } - } - - File myIdFile = new File(dataDir, "myid"); - PrintWriter w = new PrintWriter(myIdFile); - w.println(myId); - w.close(); - } -} diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/MasterAddressTracker.java hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/MasterAddressTracker.java deleted file mode 100644 index 1fc5629..0000000 --- hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/MasterAddressTracker.java +++ /dev/null @@ -1,181 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hbase.zookeeper; - -import java.io.IOException; - -import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.hbase.Abortable; -import org.apache.hadoop.hbase.DeserializationException; -import org.apache.hadoop.hbase.ServerName; -import org.apache.hadoop.hbase.protobuf.ProtobufUtil; -import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos; -import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos; -import org.apache.zookeeper.KeeperException; -import org.apache.zookeeper.data.Stat; - -/** - * Manages the location of the current active Master for the RegionServer. - *

          - * Listens for ZooKeeper events related to the master address. The node - * /master will contain the address of the current master. - * This listener is interested in - * NodeDeleted and NodeCreated events on - * /master. - *

          - * Utilizes {@link ZooKeeperNodeTracker} for zk interactions. - *

          - * You can get the current master via {@link #getMasterAddress()} or via - * {@link #getMasterAddress(ZooKeeperWatcher)} if you do not have a running - * instance of this Tracker in your context. - *

          - * This class also includes utility for interacting with the master znode, for - * writing and reading the znode content. - */ -@InterfaceAudience.Private -public class MasterAddressTracker extends ZooKeeperNodeTracker { - /** - * Construct a master address listener with the specified - * zookeeper reference. - *

          - * This constructor does not trigger any actions, you must call methods - * explicitly. Normally you will just want to execute {@link #start()} to - * begin tracking of the master address. - * - * @param watcher zk reference and watcher - * @param abortable abortable in case of fatal error - */ - public MasterAddressTracker(ZooKeeperWatcher watcher, Abortable abortable) { - super(watcher, watcher.getMasterAddressZNode(), abortable); - } - - /** - * Get the address of the current master if one is available. Returns null - * if no current master. - * @return Server name or null if timed out. - */ - public ServerName getMasterAddress() { - return getMasterAddress(false); - } - - /** - * Get the address of the current master if one is available. Returns null - * if no current master. If refresh is set, try to load the data from ZK again, - * otherwise, cached data will be used. - * - * @param refresh whether to refresh the data by calling ZK directly. - * @return Server name or null if timed out. - */ - public ServerName getMasterAddress(final boolean refresh) { - try { - return ServerName.parseFrom(super.getData(refresh)); - } catch (DeserializationException e) { - LOG.warn("Failed parse", e); - return null; - } - } - - /** - * Get master address. - * Use this instead of {@link #getMasterAddress()} if you do not have an - * instance of this tracker in your context. - * @param zkw ZooKeeperWatcher to use - * @return ServerName stored in the the master address znode or null if no - * znode present. - * @throws KeeperException - * @throws IOException - */ - public static ServerName getMasterAddress(final ZooKeeperWatcher zkw) - throws KeeperException, IOException { - byte [] data = ZKUtil.getData(zkw, zkw.getMasterAddressZNode()); - if (data == null){ - throw new IOException("Can't get master address from ZooKeeper; znode data == null"); - } - try { - return ServerName.parseFrom(data); - } catch (DeserializationException e) { - KeeperException ke = new KeeperException.DataInconsistencyException(); - ke.initCause(e); - throw ke; - } - } - - /** - * Set master address into the master znode or into the backup - * subdirectory of backup masters; switch off the passed in znode - * path. - * @param zkw The ZooKeeperWatcher to use. - * @param znode Where to create the znode; could be at the top level or it - * could be under backup masters - * @param master ServerName of the current master - * @return true if node created, false if not; a watch is set in both cases - * @throws KeeperException - */ - public static boolean setMasterAddress(final ZooKeeperWatcher zkw, - final String znode, final ServerName master) - throws KeeperException { - return ZKUtil.createEphemeralNodeAndWatch(zkw, znode, toByteArray(master)); - } - - /** - * Check if there is a master available. - * @return true if there is a master set, false if not. - */ - public boolean hasMaster() { - return super.getData(false) != null; - } - - /** - * @param sn - * @return Content of the master znode as a serialized pb with the pb - * magic as prefix. - */ - static byte [] toByteArray(final ServerName sn) { - ZooKeeperProtos.Master.Builder mbuilder = ZooKeeperProtos.Master.newBuilder(); - HBaseProtos.ServerName.Builder snbuilder = HBaseProtos.ServerName.newBuilder(); - snbuilder.setHostName(sn.getHostname()); - snbuilder.setPort(sn.getPort()); - snbuilder.setStartCode(sn.getStartcode()); - mbuilder.setMaster(snbuilder.build()); - return ProtobufUtil.prependPBMagic(mbuilder.build().toByteArray()); - } - - /** - * delete the master znode if its content is same as the parameter - */ - public static boolean deleteIfEquals(ZooKeeperWatcher zkw, final String content) { - if (content == null){ - throw new IllegalArgumentException("Content must not be null"); - } - - try { - Stat stat = new Stat(); - byte[] data = ZKUtil.getDataNoWatch(zkw, zkw.getMasterAddressZNode(), stat); - ServerName sn = ServerName.parseFrom(data); - if (sn != null && content.equals(sn.toString())) { - return (ZKUtil.deleteNode(zkw, zkw.getMasterAddressZNode(), stat.getVersion())); - } - } catch (KeeperException e) { - LOG.warn("Can't get or delete the master znode", e); - } catch (DeserializationException e) { - LOG.warn("Can't get or delete the master znode", e); - } - - return false; - } -} diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/MetaNodeTracker.java hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/MetaNodeTracker.java deleted file mode 100644 index 02132c7..0000000 --- hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/MetaNodeTracker.java +++ /dev/null @@ -1,48 +0,0 @@ -/** - * - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hbase.zookeeper; - -import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.hbase.Abortable; -import org.apache.hadoop.hbase.HRegionInfo; - -/** - * Tracks the unassigned zookeeper node used by the META table. - *

          - * If META is already assigned when instantiating this class, you will not - * receive any notification for that assignment. You will receive a - * notification after META has been successfully assigned to a new location. - */ -@InterfaceAudience.Private -public class MetaNodeTracker extends ZooKeeperNodeTracker { - /** - * Creates a meta node tracker. - * @param watcher - * @param abortable - */ - public MetaNodeTracker(final ZooKeeperWatcher watcher, final Abortable abortable) { - super(watcher, ZKUtil.joinZNode(watcher.assignmentZNode, - HRegionInfo.FIRST_META_REGIONINFO.getEncodedName()), abortable); - } - - @Override - public void nodeDeleted(String path) { - super.nodeDeleted(path); - } -} diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/RecoverableZooKeeper.java hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/RecoverableZooKeeper.java deleted file mode 100644 index 4b355f7..0000000 --- hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/RecoverableZooKeeper.java +++ /dev/null @@ -1,598 +0,0 @@ -/** - * - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hbase.zookeeper; - -import java.io.IOException; -import java.lang.management.ManagementFactory; -import java.security.SecureRandom; -import java.util.ArrayList; -import java.util.List; -import java.util.Random; - -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.classification.InterfaceStability; -import org.apache.hadoop.hbase.util.Bytes; -import org.apache.hadoop.hbase.util.RetryCounter; -import org.apache.hadoop.hbase.util.RetryCounterFactory; -import org.apache.zookeeper.AsyncCallback; -import org.apache.zookeeper.CreateMode; -import org.apache.zookeeper.KeeperException; -import org.apache.zookeeper.Watcher; -import org.apache.zookeeper.ZooKeeper; -import org.apache.zookeeper.ZooKeeper.States; -import org.apache.zookeeper.data.ACL; -import org.apache.zookeeper.data.Stat; - -/** - * A zookeeper that can handle 'recoverable' errors. - * To handle recoverable errors, developers need to realize that there are two - * classes of requests: idempotent and non-idempotent requests. Read requests - * and unconditional sets and deletes are examples of idempotent requests, they - * can be reissued with the same results. - * (Although, the delete may throw a NoNodeException on reissue its effect on - * the ZooKeeper state is the same.) Non-idempotent requests need special - * handling, application and library writers need to keep in mind that they may - * need to encode information in the data or name of znodes to detect - * retries. A simple example is a create that uses a sequence flag. - * If a process issues a create("/x-", ..., SEQUENCE) and gets a connection - * loss exception, that process will reissue another - * create("/x-", ..., SEQUENCE) and get back x-111. When the process does a - * getChildren("/"), it sees x-1,x-30,x-109,x-110,x-111, now it could be - * that x-109 was the result of the previous create, so the process actually - * owns both x-109 and x-111. An easy way around this is to use "x-process id-" - * when doing the create. If the process is using an id of 352, before reissuing - * the create it will do a getChildren("/") and see "x-222-1", "x-542-30", - * "x-352-109", x-333-110". The process will know that the original create - * succeeded an the znode it created is "x-352-109". - * @see "http://wiki.apache.org/hadoop/ZooKeeper/ErrorHandling" - */ -@InterfaceAudience.Public -@InterfaceStability.Evolving -public class RecoverableZooKeeper { - private static final Log LOG = LogFactory.getLog(RecoverableZooKeeper.class); - // the actual ZooKeeper client instance - private ZooKeeper zk; - private final RetryCounterFactory retryCounterFactory; - // An identifier of this process in the cluster - private final String identifier; - private final byte[] id; - private Watcher watcher; - private int sessionTimeout; - private String quorumServers; - private final Random salter; - - // The metadata attached to each piece of data has the - // format: - // 1-byte constant - // 4-byte big-endian integer (length of next field) - // identifier corresponding uniquely to this process - // It is prepended to the data supplied by the user. - - // the magic number is to be backward compatible - private static final byte MAGIC =(byte) 0XFF; - private static final int MAGIC_SIZE = Bytes.SIZEOF_BYTE; - private static final int ID_LENGTH_OFFSET = MAGIC_SIZE; - private static final int ID_LENGTH_SIZE = Bytes.SIZEOF_INT; - - public RecoverableZooKeeper(String quorumServers, int sessionTimeout, - Watcher watcher, int maxRetries, int retryIntervalMillis) - throws IOException { - this.zk = new ZooKeeper(quorumServers, sessionTimeout, watcher); - this.retryCounterFactory = - new RetryCounterFactory(maxRetries, retryIntervalMillis); - - // the identifier = processID@hostName - this.identifier = ManagementFactory.getRuntimeMXBean().getName(); - LOG.info("The identifier of this process is " + identifier); - this.id = Bytes.toBytes(identifier); - - this.watcher = watcher; - this.sessionTimeout = sessionTimeout; - this.quorumServers = quorumServers; - salter = new SecureRandom(); - } - - public void reconnectAfterExpiration() - throws IOException, InterruptedException { - LOG.info("Closing dead ZooKeeper connection, session" + - " was: 0x"+Long.toHexString(zk.getSessionId())); - zk.close(); - this.zk = new ZooKeeper(this.quorumServers, - this.sessionTimeout, this.watcher); - LOG.info("Recreated a ZooKeeper, session" + - " is: 0x"+Long.toHexString(zk.getSessionId())); - } - - /** - * delete is an idempotent operation. Retry before throwing exception. - * This function will not throw NoNodeException if the path does not - * exist. - */ - public void delete(String path, int version) - throws InterruptedException, KeeperException { - RetryCounter retryCounter = retryCounterFactory.create(); - boolean isRetry = false; // False for first attempt, true for all retries. - while (true) { - try { - zk.delete(path, version); - return; - } catch (KeeperException e) { - switch (e.code()) { - case NONODE: - if (isRetry) { - LOG.info("Node " + path + " already deleted. Assuming a " + - "previous attempt succeeded."); - return; - } - LOG.warn("Node " + path + " already deleted, retry=" + isRetry); - throw e; - - case CONNECTIONLOSS: - case SESSIONEXPIRED: - case OPERATIONTIMEOUT: - retryOrThrow(retryCounter, e, "delete"); - break; - - default: - throw e; - } - } - retryCounter.sleepUntilNextRetry(); - retryCounter.useRetry(); - isRetry = true; - } - } - - /** - * exists is an idempotent operation. Retry before throwing exception - * @return A Stat instance - */ - public Stat exists(String path, Watcher watcher) - throws KeeperException, InterruptedException { - RetryCounter retryCounter = retryCounterFactory.create(); - while (true) { - try { - return zk.exists(path, watcher); - } catch (KeeperException e) { - switch (e.code()) { - case CONNECTIONLOSS: - case SESSIONEXPIRED: - case OPERATIONTIMEOUT: - retryOrThrow(retryCounter, e, "exists"); - break; - - default: - throw e; - } - } - retryCounter.sleepUntilNextRetry(); - retryCounter.useRetry(); - } - } - - /** - * exists is an idempotent operation. Retry before throwing exception - * @return A Stat instance - */ - public Stat exists(String path, boolean watch) - throws KeeperException, InterruptedException { - RetryCounter retryCounter = retryCounterFactory.create(); - while (true) { - try { - return zk.exists(path, watch); - } catch (KeeperException e) { - switch (e.code()) { - case CONNECTIONLOSS: - case SESSIONEXPIRED: - case OPERATIONTIMEOUT: - retryOrThrow(retryCounter, e, "exists"); - break; - - default: - throw e; - } - } - retryCounter.sleepUntilNextRetry(); - retryCounter.useRetry(); - } - } - - private void retryOrThrow(RetryCounter retryCounter, KeeperException e, - String opName) throws KeeperException { - LOG.warn("Possibly transient ZooKeeper exception: " + e); - if (!retryCounter.shouldRetry()) { - LOG.error("ZooKeeper " + opName + " failed after " - + retryCounter.getMaxRetries() + " retries"); - throw e; - } - } - - /** - * getChildren is an idempotent operation. Retry before throwing exception - * @return List of children znodes - */ - public List getChildren(String path, Watcher watcher) - throws KeeperException, InterruptedException { - RetryCounter retryCounter = retryCounterFactory.create(); - while (true) { - try { - return zk.getChildren(path, watcher); - } catch (KeeperException e) { - switch (e.code()) { - case CONNECTIONLOSS: - case SESSIONEXPIRED: - case OPERATIONTIMEOUT: - retryOrThrow(retryCounter, e, "getChildren"); - break; - - default: - throw e; - } - } - retryCounter.sleepUntilNextRetry(); - retryCounter.useRetry(); - } - } - - /** - * getChildren is an idempotent operation. Retry before throwing exception - * @return List of children znodes - */ - public List getChildren(String path, boolean watch) - throws KeeperException, InterruptedException { - RetryCounter retryCounter = retryCounterFactory.create(); - while (true) { - try { - return zk.getChildren(path, watch); - } catch (KeeperException e) { - switch (e.code()) { - case CONNECTIONLOSS: - case SESSIONEXPIRED: - case OPERATIONTIMEOUT: - retryOrThrow(retryCounter, e, "getChildren"); - break; - - default: - throw e; - } - } - retryCounter.sleepUntilNextRetry(); - retryCounter.useRetry(); - } - } - - /** - * getData is an idempotent operation. Retry before throwing exception - * @return Data - */ - public byte[] getData(String path, Watcher watcher, Stat stat) - throws KeeperException, InterruptedException { - RetryCounter retryCounter = retryCounterFactory.create(); - while (true) { - try { - byte[] revData = zk.getData(path, watcher, stat); - return this.removeMetaData(revData); - } catch (KeeperException e) { - switch (e.code()) { - case CONNECTIONLOSS: - case SESSIONEXPIRED: - case OPERATIONTIMEOUT: - retryOrThrow(retryCounter, e, "getData"); - break; - - default: - throw e; - } - } - retryCounter.sleepUntilNextRetry(); - retryCounter.useRetry(); - } - } - - /** - * getData is an idemnpotent operation. Retry before throwing exception - * @return Data - */ - public byte[] getData(String path, boolean watch, Stat stat) - throws KeeperException, InterruptedException { - RetryCounter retryCounter = retryCounterFactory.create(); - while (true) { - try { - byte[] revData = zk.getData(path, watch, stat); - return this.removeMetaData(revData); - } catch (KeeperException e) { - switch (e.code()) { - case CONNECTIONLOSS: - case SESSIONEXPIRED: - case OPERATIONTIMEOUT: - retryOrThrow(retryCounter, e, "getData"); - break; - - default: - throw e; - } - } - retryCounter.sleepUntilNextRetry(); - retryCounter.useRetry(); - } - } - - /** - * setData is NOT an idempotent operation. Retry may cause BadVersion Exception - * Adding an identifier field into the data to check whether - * badversion is caused by the result of previous correctly setData - * @return Stat instance - */ - public Stat setData(String path, byte[] data, int version) - throws KeeperException, InterruptedException { - RetryCounter retryCounter = retryCounterFactory.create(); - byte[] newData = appendMetaData(data); - boolean isRetry = false; - while (true) { - try { - return zk.setData(path, newData, version); - } catch (KeeperException e) { - switch (e.code()) { - case CONNECTIONLOSS: - case SESSIONEXPIRED: - case OPERATIONTIMEOUT: - retryOrThrow(retryCounter, e, "setData"); - break; - case BADVERSION: - if (isRetry) { - // try to verify whether the previous setData success or not - try{ - Stat stat = new Stat(); - byte[] revData = zk.getData(path, false, stat); - if(Bytes.compareTo(revData, newData) == 0) { - // the bad version is caused by previous successful setData - return stat; - } - } catch(KeeperException keeperException){ - // the ZK is not reliable at this moment. just throwing exception - throw keeperException; - } - } - // throw other exceptions and verified bad version exceptions - default: - throw e; - } - } - retryCounter.sleepUntilNextRetry(); - retryCounter.useRetry(); - isRetry = true; - } - } - - /** - *

          - * NONSEQUENTIAL create is idempotent operation. - * Retry before throwing exceptions. - * But this function will not throw the NodeExist exception back to the - * application. - *

          - *

          - * But SEQUENTIAL is NOT idempotent operation. It is necessary to add - * identifier to the path to verify, whether the previous one is successful - * or not. - *

          - * - * @return Path - */ - public String create(String path, byte[] data, List acl, - CreateMode createMode) - throws KeeperException, InterruptedException { - byte[] newData = appendMetaData(data); - switch (createMode) { - case EPHEMERAL: - case PERSISTENT: - return createNonSequential(path, newData, acl, createMode); - - case EPHEMERAL_SEQUENTIAL: - case PERSISTENT_SEQUENTIAL: - return createSequential(path, newData, acl, createMode); - - default: - throw new IllegalArgumentException("Unrecognized CreateMode: " + - createMode); - } - } - - private String createNonSequential(String path, byte[] data, List acl, - CreateMode createMode) throws KeeperException, InterruptedException { - RetryCounter retryCounter = retryCounterFactory.create(); - boolean isRetry = false; // False for first attempt, true for all retries. - while (true) { - try { - return zk.create(path, data, acl, createMode); - } catch (KeeperException e) { - switch (e.code()) { - case NODEEXISTS: - if (isRetry) { - // If the connection was lost, there is still a possibility that - // we have successfully created the node at our previous attempt, - // so we read the node and compare. - byte[] currentData = zk.getData(path, false, null); - if (currentData != null && - Bytes.compareTo(currentData, data) == 0) { - // We successfully created a non-sequential node - return path; - } - LOG.error("Node " + path + " already exists with " + - Bytes.toStringBinary(currentData) + ", could not write " + - Bytes.toStringBinary(data)); - throw e; - } - LOG.info("Node " + path + " already exists and this is not a " + - "retry"); - throw e; - - case CONNECTIONLOSS: - case SESSIONEXPIRED: - case OPERATIONTIMEOUT: - retryOrThrow(retryCounter, e, "create"); - break; - - default: - throw e; - } - } - retryCounter.sleepUntilNextRetry(); - retryCounter.useRetry(); - isRetry = true; - } - } - - private String createSequential(String path, byte[] data, - List acl, CreateMode createMode) - throws KeeperException, InterruptedException { - RetryCounter retryCounter = retryCounterFactory.create(); - boolean first = true; - String newPath = path+this.identifier; - while (true) { - try { - if (!first) { - // Check if we succeeded on a previous attempt - String previousResult = findPreviousSequentialNode(newPath); - if (previousResult != null) { - return previousResult; - } - } - first = false; - return zk.create(newPath, data, acl, createMode); - } catch (KeeperException e) { - switch (e.code()) { - case CONNECTIONLOSS: - case SESSIONEXPIRED: - case OPERATIONTIMEOUT: - retryOrThrow(retryCounter, e, "create"); - break; - - default: - throw e; - } - } - retryCounter.sleepUntilNextRetry(); - retryCounter.useRetry(); - } - } - - private String findPreviousSequentialNode(String path) - throws KeeperException, InterruptedException { - int lastSlashIdx = path.lastIndexOf('/'); - assert(lastSlashIdx != -1); - String parent = path.substring(0, lastSlashIdx); - String nodePrefix = path.substring(lastSlashIdx+1); - - List nodes = zk.getChildren(parent, false); - List matching = filterByPrefix(nodes, nodePrefix); - for (String node : matching) { - String nodePath = parent + "/" + node; - Stat stat = zk.exists(nodePath, false); - if (stat != null) { - return nodePath; - } - } - return null; - } - - public byte[] removeMetaData(byte[] data) { - if(data == null || data.length == 0) { - return data; - } - // check the magic data; to be backward compatible - byte magic = data[0]; - if(magic != MAGIC) { - return data; - } - - int idLength = Bytes.toInt(data, ID_LENGTH_OFFSET); - int dataLength = data.length-MAGIC_SIZE-ID_LENGTH_SIZE-idLength; - int dataOffset = MAGIC_SIZE+ID_LENGTH_SIZE+idLength; - - byte[] newData = new byte[dataLength]; - System.arraycopy(data, dataOffset, newData, 0, dataLength); - return newData; - } - - private byte[] appendMetaData(byte[] data) { - if(data == null || data.length == 0){ - return data; - } - byte[] salt = Bytes.toBytes(salter.nextLong()); - int idLength = id.length + salt.length; - byte[] newData = new byte[MAGIC_SIZE+ID_LENGTH_SIZE+idLength+data.length]; - int pos = 0; - pos = Bytes.putByte(newData, pos, MAGIC); - pos = Bytes.putInt(newData, pos, idLength); - pos = Bytes.putBytes(newData, pos, id, 0, id.length); - pos = Bytes.putBytes(newData, pos, salt, 0, salt.length); - pos = Bytes.putBytes(newData, pos, data, 0, data.length); - return newData; - } - - public long getSessionId() { - return zk.getSessionId(); - } - - public void close() throws InterruptedException { - zk.close(); - } - - public States getState() { - return zk.getState(); - } - - public ZooKeeper getZooKeeper() { - return zk; - } - - public byte[] getSessionPasswd() { - return zk.getSessionPasswd(); - } - - public void sync(String path, AsyncCallback.VoidCallback cb, Object ctx) { - this.zk.sync(path, null, null); - } - - /** - * Filters the given node list by the given prefixes. - * This method is all-inclusive--if any element in the node list starts - * with any of the given prefixes, then it is included in the result. - * - * @param nodes the nodes to filter - * @param prefixes the prefixes to include in the result - * @return list of every element that starts with one of the prefixes - */ - private static List filterByPrefix(List nodes, - String... prefixes) { - List lockChildren = new ArrayList(); - for (String child : nodes){ - for (String prefix : prefixes){ - if (child.startsWith(prefix)){ - lockChildren.add(child); - break; - } - } - } - return lockChildren; - } -} diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/RootRegionTracker.java hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/RootRegionTracker.java deleted file mode 100644 index c7145a2..0000000 --- hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/RootRegionTracker.java +++ /dev/null @@ -1,184 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hbase.zookeeper; - -import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.hbase.Abortable; -import org.apache.hadoop.hbase.DeserializationException; -import org.apache.hadoop.hbase.ServerName; -import org.apache.hadoop.hbase.protobuf.ProtobufUtil; -import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos; -import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos; -import org.apache.zookeeper.KeeperException; - -/** - * Tracks the root region server location node in zookeeper. - * Root region location is set by RegionServerServices. - * This class has a watcher on the root location and notices changes. - */ -@InterfaceAudience.Private -public class RootRegionTracker extends ZooKeeperNodeTracker { - /** - * Creates a root region location tracker. - * - *

          After construction, use {@link #start} to kick off tracking. - * - * @param watcher - * @param abortable - */ - public RootRegionTracker(ZooKeeperWatcher watcher, Abortable abortable) { - super(watcher, watcher.rootServerZNode, abortable); - } - - /** - * Checks if the root region location is available. - * @return true if root region location is available, false if not - */ - public boolean isLocationAvailable() { - return super.getData(true) != null; - } - - /** - * Gets the root region location, if available. Does not block. Sets a watcher. - * @return server name or null if we failed to get the data. - * @throws InterruptedException - */ - public ServerName getRootRegionLocation() throws InterruptedException { - try { - return ServerName.parseFrom(super.getData(true)); - } catch (DeserializationException e) { - LOG.warn("Failed parse", e); - return null; - } - } - - /** - * Gets the root region location, if available. Does not block. Does not set - * a watcher (In this regard it differs from {@link #getRootRegionLocation()}. - * @param zkw - * @return server name or null if we failed to get the data. - * @throws KeeperException - */ - public static ServerName getRootRegionLocation(final ZooKeeperWatcher zkw) - throws KeeperException { - try { - return ServerName.parseFrom(ZKUtil.getData(zkw, zkw.rootServerZNode)); - } catch (DeserializationException e) { - throw ZKUtil.convert(e); - } - } - - /** - * Gets the root region location, if available, and waits for up to the - * specified timeout if not immediately available. - * Given the zookeeper notification could be delayed, we will try to - * get the latest data. - * @param timeout maximum time to wait, in millis - * @return server name for server hosting root region formatted as per - * {@link ServerName}, or null if none available - * @throws InterruptedException if interrupted while waiting - */ - public ServerName waitRootRegionLocation(long timeout) - throws InterruptedException { - if (false == checkIfBaseNodeAvailable()) { - String errorMsg = "Check the value configured in 'zookeeper.znode.parent'. " - + "There could be a mismatch with the one configured in the master."; - LOG.error(errorMsg); - throw new IllegalArgumentException(errorMsg); - } - try { - return ServerName.parseFrom(super.blockUntilAvailable(timeout, true)); - } catch (DeserializationException e) { - LOG.warn("Failed parse", e); - return null; - } - } - - /** - * Sets the location of -ROOT- in ZooKeeper to the - * specified server address. - * @param zookeeper zookeeper reference - * @param location The server hosting -ROOT- - * @throws KeeperException unexpected zookeeper exception - */ - public static void setRootLocation(ZooKeeperWatcher zookeeper, - final ServerName location) - throws KeeperException { - LOG.info("Setting ROOT region location in ZooKeeper as " + location); - // Make the RootRegionServer pb and then get its bytes and save this as - // the znode content. - byte [] data = toByteArray(location); - try { - ZKUtil.createAndWatch(zookeeper, zookeeper.rootServerZNode, data); - } catch(KeeperException.NodeExistsException nee) { - LOG.debug("ROOT region location already existed, updated location"); - ZKUtil.setData(zookeeper, zookeeper.rootServerZNode, data); - } - } - - /** - * Build up the znode content. - * @param sn What to put into the znode. - * @return The content of the root-region-server znode - */ - static byte [] toByteArray(final ServerName sn) { - // ZNode content is a pb message preceeded by some pb magic. - HBaseProtos.ServerName pbsn = - HBaseProtos.ServerName.newBuilder().setHostName(sn.getHostname()). - setPort(sn.getPort()).setStartCode(sn.getStartcode()).build(); - ZooKeeperProtos.RootRegionServer pbrsr = - ZooKeeperProtos.RootRegionServer.newBuilder().setServer(pbsn).build(); - return ProtobufUtil.prependPBMagic(pbrsr.toByteArray()); - } - - /** - * Deletes the location of -ROOT- in ZooKeeper. - * @param zookeeper zookeeper reference - * @throws KeeperException unexpected zookeeper exception - */ - public static void deleteRootLocation(ZooKeeperWatcher zookeeper) - throws KeeperException { - LOG.info("Unsetting ROOT region location in ZooKeeper"); - try { - // Just delete the node. Don't need any watches. - ZKUtil.deleteNode(zookeeper, zookeeper.rootServerZNode); - } catch(KeeperException.NoNodeException nne) { - // Has already been deleted - } - } - - /** - * Wait until the root region is available. - * @param zkw - * @param timeout - * @return ServerName or null if we timed out. - * @throws InterruptedException - */ - public static ServerName blockUntilAvailable(final ZooKeeperWatcher zkw, - final long timeout) - throws InterruptedException { - byte [] data = ZKUtil.blockUntilAvailable(zkw, zkw.rootServerZNode, timeout); - if (data == null) return null; - try { - return ServerName.parseFrom(data); - } catch (DeserializationException e) { - LOG.warn("Failed parse", e); - return null; - } - } -} diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKClusterId.java hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKClusterId.java deleted file mode 100644 index 8d52341..0000000 --- hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKClusterId.java +++ /dev/null @@ -1,80 +0,0 @@ -/* - * - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hbase.zookeeper; - -import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.hbase.Abortable; -import org.apache.hadoop.hbase.ClusterId; -import org.apache.hadoop.hbase.DeserializationException; -import org.apache.zookeeper.KeeperException; - -/** - * Publishes and synchronizes a unique identifier specific to a given HBase - * cluster. The stored identifier is read from the file system by the active - * master on startup, and is subsequently available to all watchers (including - * clients). - */ -@InterfaceAudience.Private -public class ZKClusterId { - private ZooKeeperWatcher watcher; - private Abortable abortable; - private String id; - - public ZKClusterId(ZooKeeperWatcher watcher, Abortable abortable) { - this.watcher = watcher; - this.abortable = abortable; - } - - public boolean hasId() { - return getId() != null; - } - - public String getId() { - try { - if (id == null) { - id = readClusterIdZNode(watcher); - } - } catch (KeeperException ke) { - abortable.abort("Unexpected exception from ZooKeeper reading cluster ID", - ke); - } - return id; - } - - public static String readClusterIdZNode(ZooKeeperWatcher watcher) - throws KeeperException { - if (ZKUtil.checkExists(watcher, watcher.clusterIdZNode) != -1) { - byte [] data = ZKUtil.getData(watcher, watcher.clusterIdZNode); - if (data != null) { - try { - return ClusterId.parseFrom(data).toString(); - } catch (DeserializationException e) { - throw ZKUtil.convert(e); - } - } - } - return null; - } - - public static void setClusterId(ZooKeeperWatcher watcher, ClusterId id) - throws KeeperException { - ZKUtil.createSetData(watcher, watcher.clusterIdZNode, id.toByteArray()); - } -} diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKConfig.java hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKConfig.java deleted file mode 100644 index 3c74636..0000000 --- hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKConfig.java +++ /dev/null @@ -1,273 +0,0 @@ -/** - * - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hbase.zookeeper; - -import java.io.IOException; -import java.io.InputStream; -import java.net.InetAddress; -import java.net.UnknownHostException; -import java.util.ArrayList; -import java.util.List; -import java.util.Properties; -import java.util.Map.Entry; - -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.classification.InterfaceStability; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hbase.HConstants; -import org.apache.hadoop.util.StringUtils; - -/** - * Utility methods for reading, and building the ZooKeeper configuration. - */ -@InterfaceAudience.Public -@InterfaceStability.Evolving -public class ZKConfig { - private static final Log LOG = LogFactory.getLog(ZKConfig.class); - - private static final String VARIABLE_START = "${"; - private static final int VARIABLE_START_LENGTH = VARIABLE_START.length(); - private static final String VARIABLE_END = "}"; - private static final int VARIABLE_END_LENGTH = VARIABLE_END.length(); - - /** - * Make a Properties object holding ZooKeeper config. - * Parses the corresponding config options from the HBase XML configs - * and generates the appropriate ZooKeeper properties. - * @param conf Configuration to read from. - * @return Properties holding mappings representing ZooKeeper config file. - */ - public static Properties makeZKProps(Configuration conf) { - if (conf.getBoolean(HConstants.HBASE_CONFIG_READ_ZOOKEEPER_CONFIG, - false)) { - LOG.warn( - "Parsing ZooKeeper's " + HConstants.ZOOKEEPER_CONFIG_NAME + - " file for ZK properties " + - "has been deprecated. Please instead place all ZK related HBase " + - "configuration under the hbase-site.xml, using prefixes " + - "of the form '" + HConstants.ZK_CFG_PROPERTY_PREFIX + "', and " + - "set property '" + HConstants.HBASE_CONFIG_READ_ZOOKEEPER_CONFIG + - "' to false"); - // First check if there is a zoo.cfg in the CLASSPATH. If so, simply read - // it and grab its configuration properties. - ClassLoader cl = HQuorumPeer.class.getClassLoader(); - final InputStream inputStream = - cl.getResourceAsStream(HConstants.ZOOKEEPER_CONFIG_NAME); - if (inputStream != null) { - try { - return parseZooCfg(conf, inputStream); - } catch (IOException e) { - LOG.warn("Cannot read " + HConstants.ZOOKEEPER_CONFIG_NAME + - ", loading from XML files", e); - } - } - } else { - if (LOG.isDebugEnabled()) { - LOG.debug( - "Skipped reading ZK properties file '" + - HConstants.ZOOKEEPER_CONFIG_NAME + - "' since '" + HConstants.HBASE_CONFIG_READ_ZOOKEEPER_CONFIG + - "' was not set to true"); - } - } - - // Otherwise, use the configuration options from HBase's XML files. - Properties zkProperties = new Properties(); - - // Directly map all of the hbase.zookeeper.property.KEY properties. - for (Entry entry : conf) { - String key = entry.getKey(); - if (key.startsWith(HConstants.ZK_CFG_PROPERTY_PREFIX)) { - String zkKey = key.substring(HConstants.ZK_CFG_PROPERTY_PREFIX_LEN); - String value = entry.getValue(); - // If the value has variables substitutions, need to do a get. - if (value.contains(VARIABLE_START)) { - value = conf.get(key); - } - zkProperties.put(zkKey, value); - } - } - - // If clientPort is not set, assign the default. - if (zkProperties.getProperty(HConstants.CLIENT_PORT_STR) == null) { - zkProperties.put(HConstants.CLIENT_PORT_STR, - HConstants.DEFAULT_ZOOKEPER_CLIENT_PORT); - } - - // Create the server.X properties. - int peerPort = conf.getInt("hbase.zookeeper.peerport", 2888); - int leaderPort = conf.getInt("hbase.zookeeper.leaderport", 3888); - - final String[] serverHosts = conf.getStrings(HConstants.ZOOKEEPER_QUORUM, - HConstants.LOCALHOST); - for (int i = 0; i < serverHosts.length; ++i) { - String serverHost = serverHosts[i]; - String address = serverHost + ":" + peerPort + ":" + leaderPort; - String key = "server." + i; - zkProperties.put(key, address); - } - - return zkProperties; - } - - /** - * Parse ZooKeeper's zoo.cfg, injecting HBase Configuration variables in. - * This method is used for testing so we can pass our own InputStream. - * @param conf HBaseConfiguration to use for injecting variables. - * @param inputStream InputStream to read from. - * @return Properties parsed from config stream with variables substituted. - * @throws IOException if anything goes wrong parsing config - * @deprecated in 0.96 onwards. HBase will no longer rely on zoo.cfg - * availability. - */ - @Deprecated - public static Properties parseZooCfg(Configuration conf, - InputStream inputStream) throws IOException { - Properties properties = new Properties(); - try { - properties.load(inputStream); - } catch (IOException e) { - final String msg = "fail to read properties from " - + HConstants.ZOOKEEPER_CONFIG_NAME; - LOG.fatal(msg); - throw new IOException(msg, e); - } - for (Entry entry : properties.entrySet()) { - String value = entry.getValue().toString().trim(); - String key = entry.getKey().toString().trim(); - StringBuilder newValue = new StringBuilder(); - int varStart = value.indexOf(VARIABLE_START); - int varEnd = 0; - while (varStart != -1) { - varEnd = value.indexOf(VARIABLE_END, varStart); - if (varEnd == -1) { - String msg = "variable at " + varStart + " has no end marker"; - LOG.fatal(msg); - throw new IOException(msg); - } - String variable = value.substring(varStart + VARIABLE_START_LENGTH, varEnd); - - String substituteValue = System.getProperty(variable); - if (substituteValue == null) { - substituteValue = conf.get(variable); - } - if (substituteValue == null) { - String msg = "variable " + variable + " not set in system property " - + "or hbase configs"; - LOG.fatal(msg); - throw new IOException(msg); - } - - newValue.append(substituteValue); - - varEnd += VARIABLE_END_LENGTH; - varStart = value.indexOf(VARIABLE_START, varEnd); - } - // Special case for 'hbase.cluster.distributed' property being 'true' - if (key.startsWith("server.")) { - boolean mode = conf.getBoolean(HConstants.CLUSTER_DISTRIBUTED, HConstants.DEFAULT_CLUSTER_DISTRIBUTED); - if (mode == HConstants.CLUSTER_IS_DISTRIBUTED && value.startsWith(HConstants.LOCALHOST)) { - String msg = "The server in zoo.cfg cannot be set to localhost " + - "in a fully-distributed setup because it won't be reachable. " + - "See \"Getting Started\" for more information."; - LOG.fatal(msg); - throw new IOException(msg); - } - } - newValue.append(value.substring(varEnd)); - properties.setProperty(key, newValue.toString()); - } - return properties; - } - - /** - * Return the ZK Quorum servers string given zk properties returned by - * makeZKProps - * @param properties - * @return Quorum servers String - */ - public static String getZKQuorumServersString(Properties properties) { - String clientPort = null; - List servers = new ArrayList(); - - // The clientPort option may come after the server.X hosts, so we need to - // grab everything and then create the final host:port comma separated list. - boolean anyValid = false; - for (Entry property : properties.entrySet()) { - String key = property.getKey().toString().trim(); - String value = property.getValue().toString().trim(); - if (key.equals("clientPort")) { - clientPort = value; - } - else if (key.startsWith("server.")) { - String host = value.substring(0, value.indexOf(':')); - servers.add(host); - try { - //noinspection ResultOfMethodCallIgnored - InetAddress.getByName(host); - anyValid = true; - } catch (UnknownHostException e) { - LOG.warn(StringUtils.stringifyException(e)); - } - } - } - - if (!anyValid) { - LOG.error("no valid quorum servers found in " + HConstants.ZOOKEEPER_CONFIG_NAME); - return null; - } - - if (clientPort == null) { - LOG.error("no clientPort found in " + HConstants.ZOOKEEPER_CONFIG_NAME); - return null; - } - - if (servers.isEmpty()) { - LOG.fatal("No servers were found in provided ZooKeeper configuration. " + - "HBase must have a ZooKeeper cluster configured for its " + - "operation. Ensure that you've configured '" + - HConstants.ZOOKEEPER_QUORUM + "' properly."); - return null; - } - - StringBuilder hostPortBuilder = new StringBuilder(); - for (int i = 0; i < servers.size(); ++i) { - String host = servers.get(i); - if (i > 0) { - hostPortBuilder.append(','); - } - hostPortBuilder.append(host); - hostPortBuilder.append(':'); - hostPortBuilder.append(clientPort); - } - - return hostPortBuilder.toString(); - } - - /** - * Return the ZK Quorum servers string given the specified configuration. - * @param conf - * @return Quorum servers - */ - public static String getZKQuorumServersString(Configuration conf) { - return getZKQuorumServersString(makeZKProps(conf)); - } -} diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKTable.java hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKTable.java deleted file mode 100644 index eaa8ae7..0000000 --- hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKTable.java +++ /dev/null @@ -1,368 +0,0 @@ -/** - * - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hbase.zookeeper; - -import java.util.HashMap; -import java.util.HashSet; -import java.util.List; -import java.util.Map; -import java.util.Set; - -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.hbase.master.AssignmentManager; -import org.apache.hadoop.hbase.protobuf.ProtobufUtil; -import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos; -import org.apache.zookeeper.KeeperException; - -/** - * Helper class for table state tracking for use by {@link AssignmentManager}. - * Reads, caches and sets state up in zookeeper. If multiple read/write - * clients, will make for confusion. Read-only clients other than - * AssignmentManager interested in learning table state can use the - * read-only utility methods in {@link ZKTableReadOnly}. - * - *

          To save on trips to the zookeeper ensemble, internally we cache table - * state. - */ -@InterfaceAudience.Private -public class ZKTable { - // A znode will exist under the table directory if it is in any of the - // following states: {@link TableState#ENABLING} , {@link TableState#DISABLING}, - // or {@link TableState#DISABLED}. If {@link TableState#ENABLED}, there will - // be no entry for a table in zk. Thats how it currently works. - - private static final Log LOG = LogFactory.getLog(ZKTable.class); - private final ZooKeeperWatcher watcher; - - /** - * Cache of what we found in zookeeper so we don't have to go to zk ensemble - * for every query. Synchronize access rather than use concurrent Map because - * synchronization needs to span query of zk. - */ - private final Map cache = - new HashMap(); - - // TODO: Make it so always a table znode. Put table schema here as well as table state. - // Have watcher on table znode so all are notified of state or schema change. - - public ZKTable(final ZooKeeperWatcher zkw) throws KeeperException { - super(); - this.watcher = zkw; - populateTableStates(); - } - - /** - * Gets a list of all the tables set as disabled in zookeeper. - * @throws KeeperException - */ - private void populateTableStates() - throws KeeperException { - synchronized (this.cache) { - List children = ZKUtil.listChildrenNoWatch(this.watcher, this.watcher.tableZNode); - if (children == null) return; - for (String child: children) { - ZooKeeperProtos.Table.State state = ZKTableReadOnly.getTableState(this.watcher, child); - if (state != null) this.cache.put(child, state); - } - } - } - - /** - * Sets the specified table as DISABLED in zookeeper. Fails silently if the - * table is already disabled in zookeeper. Sets no watches. - * @param tableName - * @throws KeeperException unexpected zookeeper exception - */ - public void setDisabledTable(String tableName) - throws KeeperException { - synchronized (this.cache) { - if (!isDisablingOrDisabledTable(tableName)) { - LOG.warn("Moving table " + tableName + " state to disabled but was " + - "not first in disabling state: " + this.cache.get(tableName)); - } - setTableState(tableName, ZooKeeperProtos.Table.State.DISABLED); - } - } - - /** - * Sets the specified table as DISABLING in zookeeper. Fails silently if the - * table is already disabled in zookeeper. Sets no watches. - * @param tableName - * @throws KeeperException unexpected zookeeper exception - */ - public void setDisablingTable(final String tableName) - throws KeeperException { - synchronized (this.cache) { - if (!isEnabledOrDisablingTable(tableName)) { - LOG.warn("Moving table " + tableName + " state to disabling but was " + - "not first in enabled state: " + this.cache.get(tableName)); - } - setTableState(tableName, ZooKeeperProtos.Table.State.DISABLING); - } - } - - /** - * Sets the specified table as ENABLING in zookeeper. Fails silently if the - * table is already disabled in zookeeper. Sets no watches. - * @param tableName - * @throws KeeperException unexpected zookeeper exception - */ - public void setEnablingTable(final String tableName) - throws KeeperException { - synchronized (this.cache) { - if (!isDisabledOrEnablingTable(tableName)) { - LOG.warn("Moving table " + tableName + " state to enabling but was " + - "not first in disabled state: " + this.cache.get(tableName)); - } - setTableState(tableName, ZooKeeperProtos.Table.State.ENABLING); - } - } - - /** - * Sets the specified table as ENABLING in zookeeper atomically - * If the table is already in ENABLING state, no operation is performed - * @param tableName - * @return if the operation succeeds or not - * @throws KeeperException unexpected zookeeper exception - */ - public boolean checkAndSetEnablingTable(final String tableName) - throws KeeperException { - synchronized (this.cache) { - if (isEnablingTable(tableName)) { - return false; - } - setTableState(tableName, ZooKeeperProtos.Table.State.ENABLING); - return true; - } - } - - /** - * Sets the specified table as ENABLING in zookeeper atomically - * If the table isn't in DISABLED state, no operation is performed - * @param tableName - * @return if the operation succeeds or not - * @throws KeeperException unexpected zookeeper exception - */ - public boolean checkDisabledAndSetEnablingTable(final String tableName) - throws KeeperException { - synchronized (this.cache) { - if (!isDisabledTable(tableName)) { - return false; - } - setTableState(tableName, ZooKeeperProtos.Table.State.ENABLING); - return true; - } - } - - /** - * Sets the specified table as DISABLING in zookeeper atomically - * If the table isn't in ENABLED state, no operation is performed - * @param tableName - * @return if the operation succeeds or not - * @throws KeeperException unexpected zookeeper exception - */ - public boolean checkEnabledAndSetDisablingTable(final String tableName) - throws KeeperException { - synchronized (this.cache) { - if (this.cache.get(tableName) != null && !isEnabledTable(tableName)) { - return false; - } - setTableState(tableName, ZooKeeperProtos.Table.State.DISABLING); - return true; - } - } - - private void setTableState(final String tableName, final ZooKeeperProtos.Table.State state) - throws KeeperException { - String znode = ZKUtil.joinZNode(this.watcher.tableZNode, tableName); - if (ZKUtil.checkExists(this.watcher, znode) == -1) { - ZKUtil.createAndFailSilent(this.watcher, znode); - } - synchronized (this.cache) { - ZooKeeperProtos.Table.Builder builder = ZooKeeperProtos.Table.newBuilder(); - builder.setState(state); - byte [] data = ProtobufUtil.prependPBMagic(builder.build().toByteArray()); - ZKUtil.setData(this.watcher, znode, data); - this.cache.put(tableName, state); - } - } - - public boolean isDisabledTable(final String tableName) { - return isTableState(tableName, ZooKeeperProtos.Table.State.DISABLED); - } - - public boolean isDisablingTable(final String tableName) { - return isTableState(tableName, ZooKeeperProtos.Table.State.DISABLING); - } - - public boolean isEnablingTable(final String tableName) { - return isTableState(tableName, ZooKeeperProtos.Table.State.ENABLING); - } - - public boolean isEnabledTable(String tableName) { - return isTableState(tableName, ZooKeeperProtos.Table.State.ENABLED); - } - - public boolean isDisablingOrDisabledTable(final String tableName) { - synchronized (this.cache) { - return isDisablingTable(tableName) || isDisabledTable(tableName); - } - } - - public boolean isEnabledOrDisablingTable(final String tableName) { - synchronized (this.cache) { - return isEnabledTable(tableName) || isDisablingTable(tableName); - } - } - - public boolean isDisabledOrEnablingTable(final String tableName) { - synchronized (this.cache) { - return isDisabledTable(tableName) || isEnablingTable(tableName); - } - } - - private boolean isTableState(final String tableName, final ZooKeeperProtos.Table.State state) { - synchronized (this.cache) { - ZooKeeperProtos.Table.State currentState = this.cache.get(tableName); - return ZKTableReadOnly.isTableState(currentState, state); - } - } - - /** - * Deletes the table in zookeeper. Fails silently if the - * table is not currently disabled in zookeeper. Sets no watches. - * @param tableName - * @throws KeeperException unexpected zookeeper exception - */ - public void setDeletedTable(final String tableName) - throws KeeperException { - synchronized (this.cache) { - if (this.cache.remove(tableName) == null) { - LOG.warn("Moving table " + tableName + " state to deleted but was " + - "already deleted"); - } - ZKUtil.deleteNodeFailSilent(this.watcher, - ZKUtil.joinZNode(this.watcher.tableZNode, tableName)); - } - } - - /** - * Sets the ENABLED state in the cache and creates or force updates a node to - * ENABLED state for the specified table - * - * @param tableName - * @throws KeeperException - */ - public void setEnabledTable(final String tableName) throws KeeperException { - setTableState(tableName, ZooKeeperProtos.Table.State.ENABLED); - } - - /** - * check if table is present . - * - * @param tableName - * @return true if the table is present - */ - public boolean isTablePresent(final String tableName) { - synchronized (this.cache) { - ZooKeeperProtos.Table.State state = this.cache.get(tableName); - return !(state == null); - } - } - - /** - * Gets a list of all the tables set as disabled in zookeeper. - * @return Set of disabled tables, empty Set if none - */ - public Set getDisabledTables() { - Set disabledTables = new HashSet(); - synchronized (this.cache) { - Set tables = this.cache.keySet(); - for (String table: tables) { - if (isDisabledTable(table)) disabledTables.add(table); - } - } - return disabledTables; - } - - /** - * Gets a list of all the tables set as disabled in zookeeper. - * @return Set of disabled tables, empty Set if none - * @throws KeeperException - */ - public static Set getDisabledTables(ZooKeeperWatcher zkw) - throws KeeperException { - return getAllTables(zkw, ZooKeeperProtos.Table.State.DISABLED); - } - - /** - * Gets a list of all the tables set as disabling in zookeeper. - * @return Set of disabling tables, empty Set if none - * @throws KeeperException - */ - public static Set getDisablingTables(ZooKeeperWatcher zkw) - throws KeeperException { - return getAllTables(zkw, ZooKeeperProtos.Table.State.DISABLING); - } - - /** - * Gets a list of all the tables set as enabling in zookeeper. - * @return Set of enabling tables, empty Set if none - * @throws KeeperException - */ - public static Set getEnablingTables(ZooKeeperWatcher zkw) - throws KeeperException { - return getAllTables(zkw, ZooKeeperProtos.Table.State.ENABLING); - } - - /** - * Gets a list of all the tables set as disabled in zookeeper. - * @return Set of disabled tables, empty Set if none - * @throws KeeperException - */ - public static Set getDisabledOrDisablingTables(ZooKeeperWatcher zkw) - throws KeeperException { - return getAllTables(zkw, ZooKeeperProtos.Table.State.DISABLED, - ZooKeeperProtos.Table.State.DISABLING); - } - - /** - * Gets a list of all the tables of specified states in zookeeper. - * @return Set of tables of specified states, empty Set if none - * @throws KeeperException - */ - static Set getAllTables(final ZooKeeperWatcher zkw, - final ZooKeeperProtos.Table.State... states) throws KeeperException { - Set allTables = new HashSet(); - List children = - ZKUtil.listChildrenNoWatch(zkw, zkw.tableZNode); - for (String child: children) { - ZooKeeperProtos.Table.State state = ZKTableReadOnly.getTableState(zkw, child); - for (ZooKeeperProtos.Table.State expectedState: states) { - if (state == expectedState) { - allTables.add(child); - break; - } - } - } - return allTables; - } -} diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKTableReadOnly.java hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKTableReadOnly.java deleted file mode 100644 index a89d793..0000000 --- hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKTableReadOnly.java +++ /dev/null @@ -1,161 +0,0 @@ -/** - * Copyright The Apache Software Foundation - * - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hbase.zookeeper; - -import java.util.HashSet; -import java.util.List; -import java.util.Set; - -import org.apache.hadoop.hbase.DeserializationException; -import org.apache.hadoop.hbase.master.AssignmentManager; -import org.apache.zookeeper.KeeperException; -import org.apache.hadoop.hbase.protobuf.ProtobufUtil; -import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos; - -import com.google.protobuf.InvalidProtocolBufferException; - -/** - * Non-instantiable class that provides helper functions for - * clients other than {@link AssignmentManager} for reading the - * state of a table in ZK. - * - *

          Does not cache state like {@link ZKTable}, actually reads from ZK each call. - */ -public class ZKTableReadOnly { - - private ZKTableReadOnly() {} - - /** - * Go to zookeeper and see if state of table is {@code ZooKeeperProtos.Table.State#DISABLED}. - * This method does not use cache. - * This method is for clients other than {@link AssignmentManager} - * @param zkw - * @param tableName - * @return True if table is enabled. - * @throws KeeperException - */ - public static boolean isDisabledTable(final ZooKeeperWatcher zkw, - final String tableName) - throws KeeperException { - ZooKeeperProtos.Table.State state = getTableState(zkw, tableName); - return isTableState(ZooKeeperProtos.Table.State.DISABLED, state); - } - - /** - * Go to zookeeper and see if state of table is {@code ZooKeeperProtos.Table.State#ENABLED}. - * This method does not use cache. - * This method is for clients other than {@link AssignmentManager} - * @param zkw - * @param tableName - * @return True if table is enabled. - * @throws KeeperException - */ - public static boolean isEnabledTable(final ZooKeeperWatcher zkw, - final String tableName) - throws KeeperException { - return getTableState(zkw, tableName) == ZooKeeperProtos.Table.State.ENABLED; - } - - /** - * Go to zookeeper and see if state of table is {@code ZooKeeperProtos.Table.State#DISABLING} - * of {@code ZooKeeperProtos.Table.State#DISABLED}. - * This method does not use cache. - * This method is for clients other than {@link AssignmentManager}. - * @param zkw - * @param tableName - * @return True if table is enabled. - * @throws KeeperException - */ - public static boolean isDisablingOrDisabledTable(final ZooKeeperWatcher zkw, - final String tableName) - throws KeeperException { - ZooKeeperProtos.Table.State state = getTableState(zkw, tableName); - return isTableState(ZooKeeperProtos.Table.State.DISABLING, state) || - isTableState(ZooKeeperProtos.Table.State.DISABLED, state); - } - - /** - * Gets a list of all the tables set as disabled in zookeeper. - * @return Set of disabled tables, empty Set if none - * @throws KeeperException - */ - public static Set getDisabledTables(ZooKeeperWatcher zkw) - throws KeeperException { - Set disabledTables = new HashSet(); - List children = - ZKUtil.listChildrenNoWatch(zkw, zkw.tableZNode); - for (String child: children) { - ZooKeeperProtos.Table.State state = getTableState(zkw, child); - if (state == ZooKeeperProtos.Table.State.DISABLED) disabledTables.add(child); - } - return disabledTables; - } - - /** - * Gets a list of all the tables set as disabled in zookeeper. - * @return Set of disabled tables, empty Set if none - * @throws KeeperException - */ - public static Set getDisabledOrDisablingTables(ZooKeeperWatcher zkw) - throws KeeperException { - Set disabledTables = new HashSet(); - List children = - ZKUtil.listChildrenNoWatch(zkw, zkw.tableZNode); - for (String child: children) { - ZooKeeperProtos.Table.State state = getTableState(zkw, child); - if (state == ZooKeeperProtos.Table.State.DISABLED || - state == ZooKeeperProtos.Table.State.DISABLING) - disabledTables.add(child); - } - return disabledTables; - } - - static boolean isTableState(final ZooKeeperProtos.Table.State expectedState, - final ZooKeeperProtos.Table.State currentState) { - return currentState != null && currentState.equals(expectedState); - } - - /** - * @param zkw - * @param child - * @return Null or {@link ZooKeeperProtos.Table.State} found in znode. - * @throws KeeperException - */ - static ZooKeeperProtos.Table.State getTableState(final ZooKeeperWatcher zkw, - final String child) - throws KeeperException { - String znode = ZKUtil.joinZNode(zkw.tableZNode, child); - byte [] data = ZKUtil.getData(zkw, znode); - if (data == null || data.length <= 0) return ZooKeeperProtos.Table.State.ENABLED; - try { - ProtobufUtil.expectPBMagicPrefix(data); - ZooKeeperProtos.Table.Builder builder = ZooKeeperProtos.Table.newBuilder(); - int magicLen = ProtobufUtil.lengthOfPBMagic(); - ZooKeeperProtos.Table t = builder.mergeFrom(data, magicLen, data.length - magicLen).build(); - return t.getState(); - } catch (InvalidProtocolBufferException e) { - KeeperException ke = new KeeperException.DataInconsistencyException(); - ke.initCause(e); - throw ke; - } catch (DeserializationException e) { - throw ZKUtil.convert(e); - } - } -} diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKUtil.java hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKUtil.java deleted file mode 100644 index b583722..0000000 --- hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKUtil.java +++ /dev/null @@ -1,1425 +0,0 @@ -/** - * - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hbase.zookeeper; - -import java.io.BufferedReader; -import java.io.IOException; -import java.io.InputStreamReader; -import java.io.PrintWriter; -import java.net.InetSocketAddress; -import java.net.InetAddress; -import java.net.Socket; -import java.util.ArrayList; -import java.util.List; -import java.util.Properties; -import java.util.HashMap; -import java.util.Map; - -import javax.security.auth.login.LoginException; -import javax.security.auth.login.AppConfigurationEntry; -import javax.security.auth.login.AppConfigurationEntry.LoginModuleControlFlag; - -import org.apache.hadoop.security.SecurityUtil; -import org.apache.hadoop.security.authentication.util.KerberosUtil; - -import org.apache.commons.lang.StringUtils; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.classification.InterfaceStability; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hbase.DeserializationException; -import org.apache.hadoop.hbase.HConstants; -import org.apache.hadoop.hbase.ServerName; -import org.apache.hadoop.hbase.util.Bytes; -import org.apache.hadoop.hbase.util.Threads; -import org.apache.zookeeper.AsyncCallback; -import org.apache.zookeeper.CreateMode; -import org.apache.zookeeper.KeeperException; -import org.apache.zookeeper.KeeperException.NoNodeException; -import org.apache.zookeeper.Watcher; -import org.apache.zookeeper.ZooDefs.Ids; -import org.apache.zookeeper.ZooKeeper; -import org.apache.zookeeper.data.ACL; -import org.apache.zookeeper.data.Stat; -import org.apache.zookeeper.client.ZooKeeperSaslClient; -import org.apache.zookeeper.server.ZooKeeperSaslServer; - -/** - * Internal HBase utility class for ZooKeeper. - * - *

          Contains only static methods and constants. - * - *

          Methods all throw {@link KeeperException} if there is an unexpected - * zookeeper exception, so callers of these methods must handle appropriately. - * If ZK is required for the operation, the server will need to be aborted. - */ -@InterfaceAudience.Public -@InterfaceStability.Evolving -public class ZKUtil { - private static final Log LOG = LogFactory.getLog(ZKUtil.class); - - // TODO: Replace this with ZooKeeper constant when ZOOKEEPER-277 is resolved. - private static final char ZNODE_PATH_SEPARATOR = '/'; - private static int zkDumpConnectionTimeOut; - - /** - * Creates a new connection to ZooKeeper, pulling settings and ensemble config - * from the specified configuration object using methods from {@link ZKConfig}. - * - * Sets the connection status monitoring watcher to the specified watcher. - * - * @param conf configuration to pull ensemble and other settings from - * @param watcher watcher to monitor connection changes - * @return connection to zookeeper - * @throws IOException if unable to connect to zk or config problem - */ - public static RecoverableZooKeeper connect(Configuration conf, Watcher watcher) - throws IOException { - Properties properties = ZKConfig.makeZKProps(conf); - String ensemble = ZKConfig.getZKQuorumServersString(properties); - return connect(conf, ensemble, watcher); - } - - public static RecoverableZooKeeper connect(Configuration conf, String ensemble, - Watcher watcher) - throws IOException { - return connect(conf, ensemble, watcher, ""); - } - - public static RecoverableZooKeeper connect(Configuration conf, String ensemble, - Watcher watcher, final String descriptor) - throws IOException { - if(ensemble == null) { - throw new IOException("Unable to determine ZooKeeper ensemble"); - } - int timeout = conf.getInt(HConstants.ZK_SESSION_TIMEOUT, - HConstants.DEFAULT_ZK_SESSION_TIMEOUT); - LOG.debug(descriptor + " opening connection to ZooKeeper with ensemble (" + - ensemble + ")"); - int retry = conf.getInt("zookeeper.recovery.retry", 3); - int retryIntervalMillis = - conf.getInt("zookeeper.recovery.retry.intervalmill", 1000); - zkDumpConnectionTimeOut = conf.getInt("zookeeper.dump.connection.timeout", - 1000); - return new RecoverableZooKeeper(ensemble, timeout, watcher, - retry, retryIntervalMillis); - } - - /** - * Log in the current zookeeper server process using the given configuration - * keys for the credential file and login principal. - * - *

          This is only applicable when running on secure hbase - * On regular HBase (without security features), this will safely be ignored. - *

          - * - * @param conf The configuration data to use - * @param keytabFileKey Property key used to configure the path to the credential file - * @param userNameKey Property key used to configure the login principal - * @param hostname Current hostname to use in any credentials - * @throws IOException underlying exception from SecurityUtil.login() call - */ - public static void loginServer(Configuration conf, String keytabFileKey, - String userNameKey, String hostname) throws IOException { - login(conf, keytabFileKey, userNameKey, hostname, - ZooKeeperSaslServer.LOGIN_CONTEXT_NAME_KEY, - JaasConfiguration.SERVER_KEYTAB_KERBEROS_CONFIG_NAME); - } - - /** - * Log in the current zookeeper client using the given configuration - * keys for the credential file and login principal. - * - *

          This is only applicable when running on secure hbase - * On regular HBase (without security features), this will safely be ignored. - *

          - * - * @param conf The configuration data to use - * @param keytabFileKey Property key used to configure the path to the credential file - * @param userNameKey Property key used to configure the login principal - * @param hostname Current hostname to use in any credentials - * @throws IOException underlying exception from SecurityUtil.login() call - */ - public static void loginClient(Configuration conf, String keytabFileKey, - String userNameKey, String hostname) throws IOException { - login(conf, keytabFileKey, userNameKey, hostname, - ZooKeeperSaslClient.LOGIN_CONTEXT_NAME_KEY, - JaasConfiguration.CLIENT_KEYTAB_KERBEROS_CONFIG_NAME); - } - - /** - * Log in the current process using the given configuration keys for the - * credential file and login principal. - * - *

          This is only applicable when running on secure hbase - * On regular HBase (without security features), this will safely be ignored. - *

          - * - * @param conf The configuration data to use - * @param keytabFileKey Property key used to configure the path to the credential file - * @param userNameKey Property key used to configure the login principal - * @param hostname Current hostname to use in any credentials - * @param loginContextProperty property name to expose the entry name - * @param loginContextName jaas entry name - * @throws IOException underlying exception from SecurityUtil.login() call - */ - private static void login(Configuration conf, String keytabFileKey, - String userNameKey, String hostname, - String loginContextProperty, String loginContextName) - throws IOException { - if (!isSecureZooKeeper(conf)) - return; - - // User has specified a jaas.conf, keep this one as the good one. - // HBASE_OPTS="-Djava.security.auth.login.config=jaas.conf" - if (System.getProperty("java.security.auth.login.config") != null) - return; - - String keytabFilename = conf.get(keytabFileKey); - String principalConfig = conf.get(userNameKey, System.getProperty("user.name")); - String principalName = SecurityUtil.getServerPrincipal(principalConfig, hostname); - - // Initialize the "jaas.conf" for keyTab/principal, - // If keyTab is not specified use the Ticket Cache. - // and set the zookeeper login context name. - JaasConfiguration jaasConf = new JaasConfiguration(loginContextName, - keytabFilename, principalName); - javax.security.auth.login.Configuration.setConfiguration(jaasConf); - System.setProperty(loginContextProperty, loginContextName); - } - - /** - * A JAAS configuration that defines the login modules that we want to use for login. - */ - private static class JaasConfiguration extends javax.security.auth.login.Configuration { - private static final String SERVER_KEYTAB_KERBEROS_CONFIG_NAME = - "zookeeper-server-keytab-kerberos"; - private static final String CLIENT_KEYTAB_KERBEROS_CONFIG_NAME = - "zookeeper-client-keytab-kerberos"; - - private static final Map BASIC_JAAS_OPTIONS = - new HashMap(); - static { - String jaasEnvVar = System.getenv("HBASE_JAAS_DEBUG"); - if (jaasEnvVar != null && "true".equalsIgnoreCase(jaasEnvVar)) { - BASIC_JAAS_OPTIONS.put("debug", "true"); - } - } - - private static final Map KEYTAB_KERBEROS_OPTIONS = - new HashMap(); - static { - KEYTAB_KERBEROS_OPTIONS.put("doNotPrompt", "true"); - KEYTAB_KERBEROS_OPTIONS.put("storeKey", "true"); - KEYTAB_KERBEROS_OPTIONS.put("refreshKrb5Config", "true"); - KEYTAB_KERBEROS_OPTIONS.putAll(BASIC_JAAS_OPTIONS); - } - - private static final AppConfigurationEntry KEYTAB_KERBEROS_LOGIN = - new AppConfigurationEntry(KerberosUtil.getKrb5LoginModuleName(), - LoginModuleControlFlag.REQUIRED, - KEYTAB_KERBEROS_OPTIONS); - - private static final AppConfigurationEntry[] KEYTAB_KERBEROS_CONF = - new AppConfigurationEntry[]{KEYTAB_KERBEROS_LOGIN}; - - private javax.security.auth.login.Configuration baseConfig; - private final String loginContextName; - private final boolean useTicketCache; - private final String keytabFile; - private final String principal; - - public JaasConfiguration(String loginContextName, String principal) { - this(loginContextName, principal, null, true); - } - - public JaasConfiguration(String loginContextName, String principal, String keytabFile) { - this(loginContextName, principal, keytabFile, keytabFile == null || keytabFile.length() == 0); - } - - private JaasConfiguration(String loginContextName, String principal, - String keytabFile, boolean useTicketCache) { - try { - this.baseConfig = javax.security.auth.login.Configuration.getConfiguration(); - } catch (SecurityException e) { - this.baseConfig = null; - } - this.loginContextName = loginContextName; - this.useTicketCache = useTicketCache; - this.keytabFile = keytabFile; - this.principal = principal; - LOG.info("JaasConfiguration loginContextName=" + loginContextName + - " principal=" + principal + " useTicketCache=" + useTicketCache + - " keytabFile=" + keytabFile); - } - - @Override - public AppConfigurationEntry[] getAppConfigurationEntry(String appName) { - if (loginContextName.equals(appName)) { - if (!useTicketCache) { - KEYTAB_KERBEROS_OPTIONS.put("keyTab", keytabFile); - KEYTAB_KERBEROS_OPTIONS.put("useKeyTab", "true"); - } - KEYTAB_KERBEROS_OPTIONS.put("principal", principal); - KEYTAB_KERBEROS_OPTIONS.put("useTicketCache", useTicketCache ? "true" : "false"); - return KEYTAB_KERBEROS_CONF; - } - if (baseConfig != null) return baseConfig.getAppConfigurationEntry(appName); - return(null); - } - } - - // - // Helper methods - // - - /** - * Join the prefix znode name with the suffix znode name to generate a proper - * full znode name. - * - * Assumes prefix does not end with slash and suffix does not begin with it. - * - * @param prefix beginning of znode name - * @param suffix ending of znode name - * @return result of properly joining prefix with suffix - */ - public static String joinZNode(String prefix, String suffix) { - return prefix + ZNODE_PATH_SEPARATOR + suffix; - } - - /** - * Returns the full path of the immediate parent of the specified node. - * @param node path to get parent of - * @return parent of path, null if passed the root node or an invalid node - */ - public static String getParent(String node) { - int idx = node.lastIndexOf(ZNODE_PATH_SEPARATOR); - return idx <= 0 ? null : node.substring(0, idx); - } - - /** - * Get the name of the current node from the specified fully-qualified path. - * @param path fully-qualified path - * @return name of the current node - */ - public static String getNodeName(String path) { - return path.substring(path.lastIndexOf("/")+1); - } - - /** - * Get the key to the ZK ensemble for this configuration without - * adding a name at the end - * @param conf Configuration to use to build the key - * @return ensemble key without a name - */ - public static String getZooKeeperClusterKey(Configuration conf) { - return getZooKeeperClusterKey(conf, null); - } - - /** - * Get the key to the ZK ensemble for this configuration and append - * a name at the end - * @param conf Configuration to use to build the key - * @param name Name that should be appended at the end if not empty or null - * @return ensemble key with a name (if any) - */ - public static String getZooKeeperClusterKey(Configuration conf, String name) { - String ensemble = conf.get(HConstants.ZOOKEEPER_QUORUM.replaceAll( - "[\\t\\n\\x0B\\f\\r]", "")); - StringBuilder builder = new StringBuilder(ensemble); - builder.append(":"); - builder.append(conf.get(HConstants.ZOOKEEPER_CLIENT_PORT)); - builder.append(":"); - builder.append(conf.get(HConstants.ZOOKEEPER_ZNODE_PARENT)); - if (name != null && !name.isEmpty()) { - builder.append(","); - builder.append(name); - } - return builder.toString(); - } - - /** - * Apply the settings in the given key to the given configuration, this is - * used to communicate with distant clusters - * @param conf configuration object to configure - * @param key string that contains the 3 required configuratins - * @throws IOException - */ - public static void applyClusterKeyToConf(Configuration conf, String key) - throws IOException{ - String[] parts = transformClusterKey(key); - conf.set(HConstants.ZOOKEEPER_QUORUM, parts[0]); - conf.set(HConstants.ZOOKEEPER_CLIENT_PORT, parts[1]); - conf.set(HConstants.ZOOKEEPER_ZNODE_PARENT, parts[2]); - } - - /** - * Separate the given key into the three configurations it should contain: - * hbase.zookeeper.quorum, hbase.zookeeper.client.port - * and zookeeper.znode.parent - * @param key - * @return the three configuration in the described order - * @throws IOException - */ - public static String[] transformClusterKey(String key) throws IOException { - String[] parts = key.split(":"); - if (parts.length != 3) { - throw new IOException("Cluster key invalid, the format should be:" + - HConstants.ZOOKEEPER_QUORUM + ":hbase.zookeeper.client.port:" - + HConstants.ZOOKEEPER_ZNODE_PARENT); - } - return parts; - } - - // - // Existence checks and watches - // - - /** - * Watch the specified znode for delete/create/change events. The watcher is - * set whether or not the node exists. If the node already exists, the method - * returns true. If the node does not exist, the method returns false. - * - * @param zkw zk reference - * @param znode path of node to watch - * @return true if znode exists, false if does not exist or error - * @throws KeeperException if unexpected zookeeper exception - */ - public static boolean watchAndCheckExists(ZooKeeperWatcher zkw, String znode) - throws KeeperException { - try { - Stat s = zkw.getRecoverableZooKeeper().exists(znode, zkw); - boolean exists = s != null ? true : false; - if (exists) { - LOG.debug(zkw.prefix("Set watcher on existing znode " + znode)); - } else { - LOG.debug(zkw.prefix(znode+" does not exist. Watcher is set.")); - } - return exists; - } catch (KeeperException e) { - LOG.warn(zkw.prefix("Unable to set watcher on znode " + znode), e); - zkw.keeperException(e); - return false; - } catch (InterruptedException e) { - LOG.warn(zkw.prefix("Unable to set watcher on znode " + znode), e); - zkw.interruptedException(e); - return false; - } - } - - /** - * Check if the specified node exists. Sets no watches. - * - * @param zkw zk reference - * @param znode path of node to watch - * @return version of the node if it exists, -1 if does not exist - * @throws KeeperException if unexpected zookeeper exception - */ - public static int checkExists(ZooKeeperWatcher zkw, String znode) - throws KeeperException { - try { - Stat s = zkw.getRecoverableZooKeeper().exists(znode, null); - return s != null ? s.getVersion() : -1; - } catch (KeeperException e) { - LOG.warn(zkw.prefix("Unable to set watcher on znode (" + znode + ")"), e); - zkw.keeperException(e); - return -1; - } catch (InterruptedException e) { - LOG.warn(zkw.prefix("Unable to set watcher on znode (" + znode + ")"), e); - zkw.interruptedException(e); - return -1; - } - } - - // - // Znode listings - // - - /** - * Lists the children znodes of the specified znode. Also sets a watch on - * the specified znode which will capture a NodeDeleted event on the specified - * znode as well as NodeChildrenChanged if any children of the specified znode - * are created or deleted. - * - * Returns null if the specified node does not exist. Otherwise returns a - * list of children of the specified node. If the node exists but it has no - * children, an empty list will be returned. - * - * @param zkw zk reference - * @param znode path of node to list and watch children of - * @return list of children of the specified node, an empty list if the node - * exists but has no children, and null if the node does not exist - * @throws KeeperException if unexpected zookeeper exception - */ - public static List listChildrenAndWatchForNewChildren( - ZooKeeperWatcher zkw, String znode) - throws KeeperException { - try { - List children = zkw.getRecoverableZooKeeper().getChildren(znode, zkw); - return children; - } catch(KeeperException.NoNodeException ke) { - LOG.debug(zkw.prefix("Unable to list children of znode " + znode + " " + - "because node does not exist (not an error)")); - return null; - } catch (KeeperException e) { - LOG.warn(zkw.prefix("Unable to list children of znode " + znode + " "), e); - zkw.keeperException(e); - return null; - } catch (InterruptedException e) { - LOG.warn(zkw.prefix("Unable to list children of znode " + znode + " "), e); - zkw.interruptedException(e); - return null; - } - } - - /** - * List all the children of the specified znode, setting a watch for children - * changes and also setting a watch on every individual child in order to get - * the NodeCreated and NodeDeleted events. - * @param zkw zookeeper reference - * @param znode node to get children of and watch - * @return list of znode names, null if the node doesn't exist - * @throws KeeperException - */ - public static List listChildrenAndWatchThem(ZooKeeperWatcher zkw, - String znode) throws KeeperException { - List children = listChildrenAndWatchForNewChildren(zkw, znode); - if (children == null) { - return null; - } - for (String child : children) { - watchAndCheckExists(zkw, joinZNode(znode, child)); - } - return children; - } - - /** - * Lists the children of the specified znode without setting any watches. - * - * Used to list the currently online regionservers and their addresses. - * - * Sets no watches at all, this method is best effort. - * - * Returns an empty list if the node has no children. Returns null if the - * parent node itself does not exist. - * - * @param zkw zookeeper reference - * @param znode node to get children of as addresses - * @return list of data of children of specified znode, empty if no children, - * null if parent does not exist - * @throws KeeperException if unexpected zookeeper exception - */ - public static List listChildrenNoWatch(ZooKeeperWatcher zkw, String znode) - throws KeeperException { - List children = null; - try { - // List the children without watching - children = zkw.getRecoverableZooKeeper().getChildren(znode, null); - } catch(KeeperException.NoNodeException nne) { - return null; - } catch(InterruptedException ie) { - zkw.interruptedException(ie); - } - return children; - } - - /** - * Simple class to hold a node path and node data. - * @deprecated Unused - */ - @Deprecated - public static class NodeAndData { - private String node; - private byte [] data; - public NodeAndData(String node, byte [] data) { - this.node = node; - this.data = data; - } - public String getNode() { - return node; - } - public byte [] getData() { - return data; - } - @Override - public String toString() { - return node; - } - public boolean isEmpty() { - return (data.length == 0); - } - } - - /** - * Checks if the specified znode has any children. Sets no watches. - * - * Returns true if the node exists and has children. Returns false if the - * node does not exist or if the node does not have any children. - * - * Used during master initialization to determine if the master is a - * failed-over-to master or the first master during initial cluster startup. - * If the directory for regionserver ephemeral nodes is empty then this is - * a cluster startup, if not then it is not cluster startup. - * - * @param zkw zk reference - * @param znode path of node to check for children of - * @return true if node has children, false if not or node does not exist - * @throws KeeperException if unexpected zookeeper exception - */ - public static boolean nodeHasChildren(ZooKeeperWatcher zkw, String znode) - throws KeeperException { - try { - return !zkw.getRecoverableZooKeeper().getChildren(znode, null).isEmpty(); - } catch(KeeperException.NoNodeException ke) { - LOG.debug(zkw.prefix("Unable to list children of znode " + znode + " " + - "because node does not exist (not an error)")); - return false; - } catch (KeeperException e) { - LOG.warn(zkw.prefix("Unable to list children of znode " + znode), e); - zkw.keeperException(e); - return false; - } catch (InterruptedException e) { - LOG.warn(zkw.prefix("Unable to list children of znode " + znode), e); - zkw.interruptedException(e); - return false; - } - } - - /** - * Get the number of children of the specified node. - * - * If the node does not exist or has no children, returns 0. - * - * Sets no watches at all. - * - * @param zkw zk reference - * @param znode path of node to count children of - * @return number of children of specified node, 0 if none or parent does not - * exist - * @throws KeeperException if unexpected zookeeper exception - */ - public static int getNumberOfChildren(ZooKeeperWatcher zkw, String znode) - throws KeeperException { - try { - Stat stat = zkw.getRecoverableZooKeeper().exists(znode, null); - return stat == null ? 0 : stat.getNumChildren(); - } catch(KeeperException e) { - LOG.warn(zkw.prefix("Unable to get children of node " + znode)); - zkw.keeperException(e); - } catch(InterruptedException e) { - zkw.interruptedException(e); - } - return 0; - } - - // - // Data retrieval - // - - /** - * Get znode data. Does not set a watcher. - * @return ZNode data, null if the node does not exist or if there is an - * error. - */ - public static byte [] getData(ZooKeeperWatcher zkw, String znode) - throws KeeperException { - try { - byte [] data = zkw.getRecoverableZooKeeper().getData(znode, null, null); - logRetrievedMsg(zkw, znode, data, false); - return data; - } catch (KeeperException.NoNodeException e) { - LOG.debug(zkw.prefix("Unable to get data of znode " + znode + " " + - "because node does not exist (not an error)")); - return null; - } catch (KeeperException e) { - LOG.warn(zkw.prefix("Unable to get data of znode " + znode), e); - zkw.keeperException(e); - return null; - } catch (InterruptedException e) { - LOG.warn(zkw.prefix("Unable to get data of znode " + znode), e); - zkw.interruptedException(e); - return null; - } - } - - /** - * Get the data at the specified znode and set a watch. - * - * Returns the data and sets a watch if the node exists. Returns null and no - * watch is set if the node does not exist or there is an exception. - * - * @param zkw zk reference - * @param znode path of node - * @return data of the specified znode, or null - * @throws KeeperException if unexpected zookeeper exception - */ - public static byte [] getDataAndWatch(ZooKeeperWatcher zkw, String znode) - throws KeeperException { - return getDataInternal(zkw, znode, null, true); - } - - /** - * Get the data at the specified znode and set a watch. - * - * Returns the data and sets a watch if the node exists. Returns null and no - * watch is set if the node does not exist or there is an exception. - * - * @param zkw zk reference - * @param znode path of node - * @param stat object to populate the version of the znode - * @return data of the specified znode, or null - * @throws KeeperException if unexpected zookeeper exception - */ - public static byte[] getDataAndWatch(ZooKeeperWatcher zkw, String znode, - Stat stat) throws KeeperException { - return getDataInternal(zkw, znode, stat, true); - } - - private static byte[] getDataInternal(ZooKeeperWatcher zkw, String znode, Stat stat, - boolean watcherSet) - throws KeeperException { - try { - byte [] data = zkw.getRecoverableZooKeeper().getData(znode, zkw, stat); - logRetrievedMsg(zkw, znode, data, watcherSet); - return data; - } catch (KeeperException.NoNodeException e) { - // This log can get pretty annoying when we cycle on 100ms waits. - // Enable trace if you really want to see it. - LOG.trace(zkw.prefix("Unable to get data of znode " + znode + " " + - "because node does not exist (not an error)")); - return null; - } catch (KeeperException e) { - LOG.warn(zkw.prefix("Unable to get data of znode " + znode), e); - zkw.keeperException(e); - return null; - } catch (InterruptedException e) { - LOG.warn(zkw.prefix("Unable to get data of znode " + znode), e); - zkw.interruptedException(e); - return null; - } - } - - /** - * Get the data at the specified znode without setting a watch. - * - * Returns the data if the node exists. Returns null if the node does not - * exist. - * - * Sets the stats of the node in the passed Stat object. Pass a null stat if - * not interested. - * - * @param zkw zk reference - * @param znode path of node - * @param stat node status to get if node exists - * @return data of the specified znode, or null if node does not exist - * @throws KeeperException if unexpected zookeeper exception - */ - public static byte [] getDataNoWatch(ZooKeeperWatcher zkw, String znode, - Stat stat) - throws KeeperException { - try { - byte [] data = zkw.getRecoverableZooKeeper().getData(znode, null, stat); - logRetrievedMsg(zkw, znode, data, false); - return data; - } catch (KeeperException.NoNodeException e) { - LOG.debug(zkw.prefix("Unable to get data of znode " + znode + " " + - "because node does not exist (not necessarily an error)")); - return null; - } catch (KeeperException e) { - LOG.warn(zkw.prefix("Unable to get data of znode " + znode), e); - zkw.keeperException(e); - return null; - } catch (InterruptedException e) { - LOG.warn(zkw.prefix("Unable to get data of znode " + znode), e); - zkw.interruptedException(e); - return null; - } - } - - /** - * Returns the date of child znodes of the specified znode. Also sets a watch on - * the specified znode which will capture a NodeDeleted event on the specified - * znode as well as NodeChildrenChanged if any children of the specified znode - * are created or deleted. - * - * Returns null if the specified node does not exist. Otherwise returns a - * list of children of the specified node. If the node exists but it has no - * children, an empty list will be returned. - * - * @param zkw zk reference - * @param baseNode path of node to list and watch children of - * @return list of data of children of the specified node, an empty list if the node - * exists but has no children, and null if the node does not exist - * @throws KeeperException if unexpected zookeeper exception - * @deprecated Unused - */ - public static List getChildDataAndWatchForNewChildren( - ZooKeeperWatcher zkw, String baseNode) throws KeeperException { - List nodes = - ZKUtil.listChildrenAndWatchForNewChildren(zkw, baseNode); - List newNodes = new ArrayList(); - if (nodes != null) { - for (String node : nodes) { - String nodePath = ZKUtil.joinZNode(baseNode, node); - byte[] data = ZKUtil.getDataAndWatch(zkw, nodePath); - newNodes.add(new NodeAndData(nodePath, data)); - } - } - return newNodes; - } - - /** - * Update the data of an existing node with the expected version to have the - * specified data. - * - * Throws an exception if there is a version mismatch or some other problem. - * - * Sets no watches under any conditions. - * - * @param zkw zk reference - * @param znode - * @param data - * @param expectedVersion - * @throws KeeperException if unexpected zookeeper exception - * @throws KeeperException.BadVersionException if version mismatch - * @deprecated Unused - */ - public static void updateExistingNodeData(ZooKeeperWatcher zkw, String znode, - byte [] data, int expectedVersion) - throws KeeperException { - try { - zkw.getRecoverableZooKeeper().setData(znode, data, expectedVersion); - } catch(InterruptedException ie) { - zkw.interruptedException(ie); - } - } - - // - // Data setting - // - - /** - * Sets the data of the existing znode to be the specified data. Ensures that - * the current data has the specified expected version. - * - *

          If the node does not exist, a {@link NoNodeException} will be thrown. - * - *

          If their is a version mismatch, method returns null. - * - *

          No watches are set but setting data will trigger other watchers of this - * node. - * - *

          If there is another problem, a KeeperException will be thrown. - * - * @param zkw zk reference - * @param znode path of node - * @param data data to set for node - * @param expectedVersion version expected when setting data - * @return true if data set, false if version mismatch - * @throws KeeperException if unexpected zookeeper exception - */ - public static boolean setData(ZooKeeperWatcher zkw, String znode, - byte [] data, int expectedVersion) - throws KeeperException, KeeperException.NoNodeException { - try { - return zkw.getRecoverableZooKeeper().setData(znode, data, expectedVersion) != null; - } catch (InterruptedException e) { - zkw.interruptedException(e); - return false; - } - } - - /** - * Set data into node creating node if it doesn't yet exist. - * Does not set watch. - * @param zkw zk reference - * @param znode path of node - * @param data data to set for node - * @throws KeeperException - */ - public static void createSetData(final ZooKeeperWatcher zkw, final String znode, - final byte [] data) - throws KeeperException { - if (checkExists(zkw, znode) == -1) { - ZKUtil.createWithParents(zkw, znode); - } - ZKUtil.setData(zkw, znode, data); - } - - /** - * Sets the data of the existing znode to be the specified data. The node - * must exist but no checks are done on the existing data or version. - * - *

          If the node does not exist, a {@link NoNodeException} will be thrown. - * - *

          No watches are set but setting data will trigger other watchers of this - * node. - * - *

          If there is another problem, a KeeperException will be thrown. - * - * @param zkw zk reference - * @param znode path of node - * @param data data to set for node - * @throws KeeperException if unexpected zookeeper exception - */ - public static void setData(ZooKeeperWatcher zkw, String znode, byte [] data) - throws KeeperException, KeeperException.NoNodeException { - setData(zkw, znode, data, -1); - } - - /** - * Returns whether or not secure authentication is enabled - * (whether hbase.security.authentication is set to - * kerberos. - */ - public static boolean isSecureZooKeeper(Configuration conf) { - // hbase shell need to use: - // -Djava.security.auth.login.config=user-jaas.conf - // since each user has a different jaas.conf - if (System.getProperty("java.security.auth.login.config") != null) - return true; - - // Master & RSs uses hbase.zookeeper.client.* - return "kerberos".equalsIgnoreCase(conf.get("hbase.security.authentication")); - } - - private static ArrayList createACL(ZooKeeperWatcher zkw, String node) { - if (isSecureZooKeeper(zkw.getConfiguration())) { - // Certain znodes are accessed directly by the client, - // so they must be readable by non-authenticated clients - if ((node.equals(zkw.baseZNode) == true) || - (node.equals(zkw.rootServerZNode) == true) || - (node.equals(zkw.getMasterAddressZNode()) == true) || - (node.equals(zkw.clusterIdZNode) == true) || - (node.equals(zkw.rsZNode) == true) || - (node.equals(zkw.backupMasterAddressesZNode) == true) || - (node.startsWith(zkw.tableZNode) == true)) { - return ZooKeeperWatcher.CREATOR_ALL_AND_WORLD_READABLE; - } - return Ids.CREATOR_ALL_ACL; - } else { - return Ids.OPEN_ACL_UNSAFE; - } - } - - public static void waitForZKConnectionIfAuthenticating(ZooKeeperWatcher zkw) - throws InterruptedException { - if (isSecureZooKeeper(zkw.getConfiguration())) { - LOG.debug("Waiting for ZooKeeperWatcher to authenticate"); - zkw.saslLatch.await(); - LOG.debug("Done waiting."); - } - } - - // - // Node creation - // - - /** - * - * Set the specified znode to be an ephemeral node carrying the specified - * data. - * - * If the node is created successfully, a watcher is also set on the node. - * - * If the node is not created successfully because it already exists, this - * method will also set a watcher on the node. - * - * If there is another problem, a KeeperException will be thrown. - * - * @param zkw zk reference - * @param znode path of node - * @param data data of node - * @return true if node created, false if not, watch set in both cases - * @throws KeeperException if unexpected zookeeper exception - */ - public static boolean createEphemeralNodeAndWatch(ZooKeeperWatcher zkw, - String znode, byte [] data) - throws KeeperException { - try { - waitForZKConnectionIfAuthenticating(zkw); - zkw.getRecoverableZooKeeper().create(znode, data, createACL(zkw, znode), - CreateMode.EPHEMERAL); - } catch (KeeperException.NodeExistsException nee) { - if(!watchAndCheckExists(zkw, znode)) { - // It did exist but now it doesn't, try again - return createEphemeralNodeAndWatch(zkw, znode, data); - } - return false; - } catch (InterruptedException e) { - LOG.info("Interrupted", e); - Thread.currentThread().interrupt(); - } - return true; - } - - /** - * Creates the specified znode to be a persistent node carrying the specified - * data. - * - * Returns true if the node was successfully created, false if the node - * already existed. - * - * If the node is created successfully, a watcher is also set on the node. - * - * If the node is not created successfully because it already exists, this - * method will also set a watcher on the node but return false. - * - * If there is another problem, a KeeperException will be thrown. - * - * @param zkw zk reference - * @param znode path of node - * @param data data of node - * @return true if node created, false if not, watch set in both cases - * @throws KeeperException if unexpected zookeeper exception - */ - public static boolean createNodeIfNotExistsAndWatch( - ZooKeeperWatcher zkw, String znode, byte [] data) - throws KeeperException { - try { - waitForZKConnectionIfAuthenticating(zkw); - zkw.getRecoverableZooKeeper().create(znode, data, createACL(zkw, znode), - CreateMode.PERSISTENT); - } catch (KeeperException.NodeExistsException nee) { - try { - zkw.getRecoverableZooKeeper().exists(znode, zkw); - } catch (InterruptedException e) { - zkw.interruptedException(e); - return false; - } - return false; - } catch (InterruptedException e) { - zkw.interruptedException(e); - return false; - } - return true; - } - - /** - * Creates the specified node with the specified data and watches it. - * - *

          Throws an exception if the node already exists. - * - *

          The node created is persistent and open access. - * - *

          Returns the version number of the created node if successful. - * - * @param zkw zk reference - * @param znode path of node to create - * @param data data of node to create - * @return version of node created - * @throws KeeperException if unexpected zookeeper exception - * @throws KeeperException.NodeExistsException if node already exists - */ - public static int createAndWatch(ZooKeeperWatcher zkw, - String znode, byte [] data) - throws KeeperException, KeeperException.NodeExistsException { - try { - waitForZKConnectionIfAuthenticating(zkw); - zkw.getRecoverableZooKeeper().create(znode, data, createACL(zkw, znode), - CreateMode.PERSISTENT); - return zkw.getRecoverableZooKeeper().exists(znode, zkw).getVersion(); - } catch (InterruptedException e) { - zkw.interruptedException(e); - return -1; - } - } - - /** - * Async creates the specified node with the specified data. - * - *

          Throws an exception if the node already exists. - * - *

          The node created is persistent and open access. - * - * @param zkw zk reference - * @param znode path of node to create - * @param data data of node to create - * @param cb - * @param ctx - * @throws KeeperException if unexpected zookeeper exception - * @throws KeeperException.NodeExistsException if node already exists - */ - public static void asyncCreate(ZooKeeperWatcher zkw, - String znode, byte [] data, final AsyncCallback.StringCallback cb, - final Object ctx) { - try { - waitForZKConnectionIfAuthenticating(zkw); - zkw.getRecoverableZooKeeper().getZooKeeper().create(znode, data, - createACL(zkw, znode), CreateMode.PERSISTENT, cb, ctx); - } catch (InterruptedException e) { - zkw.interruptedException(e); - } - } - - /** - * Creates the specified node, if the node does not exist. Does not set a - * watch and fails silently if the node already exists. - * - * The node created is persistent and open access. - * - * @param zkw zk reference - * @param znode path of node - * @throws KeeperException if unexpected zookeeper exception - */ - public static void createAndFailSilent(ZooKeeperWatcher zkw, - String znode) - throws KeeperException { - try { - RecoverableZooKeeper zk = zkw.getRecoverableZooKeeper(); - waitForZKConnectionIfAuthenticating(zkw); - if (zk.exists(znode, false) == null) { - zk.create(znode, new byte[0], createACL(zkw,znode), - CreateMode.PERSISTENT); - } - } catch(KeeperException.NodeExistsException nee) { - } catch(KeeperException.NoAuthException nee){ - try { - if (null == zkw.getRecoverableZooKeeper().exists(znode, false)) { - // If we failed to create the file and it does not already exist. - throw(nee); - } - } catch (InterruptedException ie) { - zkw.interruptedException(ie); - } - - } catch(InterruptedException ie) { - zkw.interruptedException(ie); - } - } - - /** - * Creates the specified node and all parent nodes required for it to exist. - * - * No watches are set and no errors are thrown if the node already exists. - * - * The nodes created are persistent and open access. - * - * @param zkw zk reference - * @param znode path of node - * @throws KeeperException if unexpected zookeeper exception - */ - public static void createWithParents(ZooKeeperWatcher zkw, String znode) - throws KeeperException { - try { - if(znode == null) { - return; - } - waitForZKConnectionIfAuthenticating(zkw); - zkw.getRecoverableZooKeeper().create(znode, new byte[0], createACL(zkw, znode), - CreateMode.PERSISTENT); - } catch(KeeperException.NodeExistsException nee) { - return; - } catch(KeeperException.NoNodeException nne) { - createWithParents(zkw, getParent(znode)); - createWithParents(zkw, znode); - } catch(InterruptedException ie) { - zkw.interruptedException(ie); - } - } - - // - // Deletes - // - - /** - * Delete the specified node. Sets no watches. Throws all exceptions. - */ - public static void deleteNode(ZooKeeperWatcher zkw, String node) - throws KeeperException { - deleteNode(zkw, node, -1); - } - - /** - * Delete the specified node with the specified version. Sets no watches. - * Throws all exceptions. - */ - public static boolean deleteNode(ZooKeeperWatcher zkw, String node, - int version) - throws KeeperException { - try { - zkw.getRecoverableZooKeeper().delete(node, version); - return true; - } catch(KeeperException.BadVersionException bve) { - return false; - } catch(InterruptedException ie) { - zkw.interruptedException(ie); - return false; - } - } - - /** - * Deletes the specified node. Fails silent if the node does not exist. - * @param zkw - * @param node - * @throws KeeperException - */ - public static void deleteNodeFailSilent(ZooKeeperWatcher zkw, String node) - throws KeeperException { - try { - zkw.getRecoverableZooKeeper().delete(node, -1); - } catch(KeeperException.NoNodeException nne) { - } catch(InterruptedException ie) { - zkw.interruptedException(ie); - } - } - - /** - * Delete the specified node and all of it's children. - *

          - * If the node does not exist, just returns. - *

          - * Sets no watches. Throws all exceptions besides dealing with deletion of - * children. - */ - public static void deleteNodeRecursively(ZooKeeperWatcher zkw, String node) - throws KeeperException { - try { - List children = ZKUtil.listChildrenNoWatch(zkw, node); - // the node is already deleted, so we just finish - if (children == null) return; - - if(!children.isEmpty()) { - for(String child : children) { - deleteNodeRecursively(zkw, joinZNode(node, child)); - } - } - zkw.getRecoverableZooKeeper().delete(node, -1); - } catch(InterruptedException ie) { - zkw.interruptedException(ie); - } - } - - /** - * Delete all the children of the specified node but not the node itself. - * - * Sets no watches. Throws all exceptions besides dealing with deletion of - * children. - */ - public static void deleteChildrenRecursively(ZooKeeperWatcher zkw, String node) - throws KeeperException { - List children = ZKUtil.listChildrenNoWatch(zkw, node); - if (children == null || children.isEmpty()) return; - for(String child : children) { - deleteNodeRecursively(zkw, joinZNode(node, child)); - } - } - - // - // ZooKeeper cluster information - // - - /** @return String dump of everything in ZooKeeper. */ - public static String dump(ZooKeeperWatcher zkw) { - StringBuilder sb = new StringBuilder(); - try { - sb.append("HBase is rooted at ").append(zkw.baseZNode); - sb.append("\nActive master address: "); - try { - sb.append(MasterAddressTracker.getMasterAddress(zkw)); - } catch (IOException e) { - sb.append("<>"); - } - sb.append("\nBackup master addresses:"); - for (String child : listChildrenNoWatch(zkw, - zkw.backupMasterAddressesZNode)) { - sb.append("\n ").append(child); - } - sb.append("\nRegion server holding ROOT: " + RootRegionTracker.getRootRegionLocation(zkw)); - sb.append("\nRegion servers:"); - for (String child : listChildrenNoWatch(zkw, zkw.rsZNode)) { - sb.append("\n ").append(child); - } - sb.append("\nQuorum Server Statistics:"); - String[] servers = zkw.getQuorum().split(","); - for (String server : servers) { - sb.append("\n ").append(server); - try { - String[] stat = getServerStats(server, ZKUtil.zkDumpConnectionTimeOut); - - if (stat == null) { - sb.append("[Error] invalid quorum server: " + server); - break; - } - - for (String s : stat) { - sb.append("\n ").append(s); - } - } catch (Exception e) { - sb.append("\n ERROR: ").append(e.getMessage()); - } - } - } catch (KeeperException ke) { - sb.append("\nFATAL ZooKeeper Exception!\n"); - sb.append("\n" + ke.getMessage()); - } - return sb.toString(); - } - - /** - * Gets the statistics from the given server. - * - * @param server The server to get the statistics from. - * @param timeout The socket timeout to use. - * @return The array of response strings. - * @throws IOException When the socket communication fails. - */ - public static String[] getServerStats(String server, int timeout) - throws IOException { - String[] sp = server.split(":"); - if (sp == null || sp.length == 0) { - return null; - } - - String host = sp[0]; - int port = sp.length > 1 ? Integer.parseInt(sp[1]) - : HConstants.DEFAULT_ZOOKEPER_CLIENT_PORT; - - Socket socket = new Socket(); - InetSocketAddress sockAddr = new InetSocketAddress(host, port); - socket.connect(sockAddr, timeout); - - socket.setSoTimeout(timeout); - PrintWriter out = new PrintWriter(socket.getOutputStream(), true); - BufferedReader in = new BufferedReader(new InputStreamReader( - socket.getInputStream())); - out.println("stat"); - out.flush(); - ArrayList res = new ArrayList(); - while (true) { - String line = in.readLine(); - if (line != null) { - res.add(line); - } else { - break; - } - } - socket.close(); - return res.toArray(new String[res.size()]); - } - - private static void logRetrievedMsg(final ZooKeeperWatcher zkw, - final String znode, final byte [] data, final boolean watcherSet) { - if (!LOG.isDebugEnabled()) return; - LOG.debug(zkw.prefix("Retrieved " + ((data == null)? 0: data.length) + - " byte(s) of data from znode " + znode + - (watcherSet? " and set watcher; ": "; data=") + - (data == null? "null": data.length == 0? "empty": ( - znode.startsWith(zkw.assignmentZNode)? - ZKAssign.toString(data): // We should not be doing this reaching into another class - znode.startsWith(zkw.rootServerZNode)? - getServerNameOrEmptyString(data): - znode.startsWith(zkw.backupMasterAddressesZNode)? - getServerNameOrEmptyString(data): - StringUtils.abbreviate(Bytes.toStringBinary(data), 32))))); - } - - private static String getServerNameOrEmptyString(final byte [] data) { - try { - return ServerName.parseFrom(data).toString(); - } catch (DeserializationException e) { - return ""; - } - } - - /** - * Waits for HBase installation's base (parent) znode to become available. - * @throws IOException on ZK errors - */ - public static void waitForBaseZNode(Configuration conf) throws IOException { - LOG.info("Waiting until the base znode is available"); - String parentZNode = conf.get(HConstants.ZOOKEEPER_ZNODE_PARENT, - HConstants.DEFAULT_ZOOKEEPER_ZNODE_PARENT); - ZooKeeper zk = new ZooKeeper(ZKConfig.getZKQuorumServersString(conf), - conf.getInt(HConstants.ZK_SESSION_TIMEOUT, - HConstants.DEFAULT_ZK_SESSION_TIMEOUT), EmptyWatcher.instance); - - final int maxTimeMs = 10000; - final int maxNumAttempts = maxTimeMs / HConstants.SOCKET_RETRY_WAIT_MS; - - KeeperException keeperEx = null; - try { - try { - for (int attempt = 0; attempt < maxNumAttempts; ++attempt) { - try { - if (zk.exists(parentZNode, false) != null) { - LOG.info("Parent znode exists: " + parentZNode); - keeperEx = null; - break; - } - } catch (KeeperException e) { - keeperEx = e; - } - Threads.sleepWithoutInterrupt(HConstants.SOCKET_RETRY_WAIT_MS); - } - } finally { - zk.close(); - } - } catch (InterruptedException ex) { - Thread.currentThread().interrupt(); - } - - if (keeperEx != null) { - throw new IOException(keeperEx); - } - } - - - public static byte[] blockUntilAvailable( - final ZooKeeperWatcher zkw, final String znode, final long timeout) - throws InterruptedException { - if (timeout < 0) throw new IllegalArgumentException(); - if (zkw == null) throw new IllegalArgumentException(); - if (znode == null) throw new IllegalArgumentException(); - - byte[] data = null; - boolean finished = false; - final long endTime = System.currentTimeMillis() + timeout; - while (!finished) { - try { - data = ZKUtil.getData(zkw, znode); - } catch(KeeperException e) { - LOG.warn("Unexpected exception handling blockUntilAvailable", e); - } - - if (data == null && (System.currentTimeMillis() + - HConstants.SOCKET_RETRY_WAIT_MS < endTime)) { - Thread.sleep(HConstants.SOCKET_RETRY_WAIT_MS); - } else { - finished = true; - } - } - - return data; - } - - - /** - * Convert a {@link DeserializationException} to a more palatable {@link KeeperException}. - * Used when can't let a {@link DeserializationException} out w/o changing public API. - * @param e Exception to convert - * @return Converted exception - */ - public static KeeperException convert(final DeserializationException e) { - KeeperException ke = new KeeperException.DataInconsistencyException(); - ke.initCause(e); - return ke; - } -} diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/ZooKeeperListener.java hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/ZooKeeperListener.java deleted file mode 100644 index e743e88..0000000 --- hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/ZooKeeperListener.java +++ /dev/null @@ -1,82 +0,0 @@ -/** - * - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hbase.zookeeper; - -import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.classification.InterfaceStability; - - -/** - * Base class for internal listeners of ZooKeeper events. - * - * The {@link ZooKeeperWatcher} for a process will execute the appropriate - * methods of implementations of this class. In order to receive events from - * the watcher, every listener must register itself via {@link ZooKeeperWatcher#registerListener}. - * - * Subclasses need only override those methods in which they are interested. - * - * Note that the watcher will be blocked when invoking methods in listeners so - * they must not be long-running. - */ -@InterfaceAudience.Public -@InterfaceStability.Evolving -public abstract class ZooKeeperListener { - - // Reference to the zk watcher which also contains configuration and constants - protected ZooKeeperWatcher watcher; - - /** - * Construct a ZooKeeper event listener. - */ - public ZooKeeperListener(ZooKeeperWatcher watcher) { - this.watcher = watcher; - } - - /** - * Called when a new node has been created. - * @param path full path of the new node - */ - public void nodeCreated(String path) { - // no-op - } - - /** - * Called when a node has been deleted - * @param path full path of the deleted node - */ - public void nodeDeleted(String path) { - // no-op - } - - /** - * Called when an existing node has changed data. - * @param path full path of the updated node - */ - public void nodeDataChanged(String path) { - // no-op - } - - /** - * Called when an existing node has a child node added or removed. - * @param path full path of the node whose children have changed - */ - public void nodeChildrenChanged(String path) { - // no-op - } -} diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/ZooKeeperNodeTracker.java hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/ZooKeeperNodeTracker.java deleted file mode 100644 index 723fd77..0000000 --- hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/ZooKeeperNodeTracker.java +++ /dev/null @@ -1,255 +0,0 @@ -/** - * - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hbase.zookeeper; - -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.classification.InterfaceStability; -import org.apache.hadoop.hbase.Abortable; -import org.apache.zookeeper.KeeperException; - -/** - * Tracks the availability and value of a single ZooKeeper node. - * - *

          Utilizes the {@link ZooKeeperListener} interface to get the necessary - * ZooKeeper events related to the node. - * - *

          This is the base class used by trackers in both the Master and - * RegionServers. - */ -@InterfaceAudience.Public -@InterfaceStability.Evolving -public abstract class ZooKeeperNodeTracker extends ZooKeeperListener { - - static final Log LOG = LogFactory.getLog(ZooKeeperNodeTracker.class); - /** Path of node being tracked */ - protected final String node; - - /** Data of the node being tracked */ - private byte [] data; - - /** Used to abort if a fatal error occurs */ - protected final Abortable abortable; - - private boolean stopped = false; - - /** - * Constructs a new ZK node tracker. - * - *

          After construction, use {@link #start} to kick off tracking. - * - * @param watcher - * @param node - * @param abortable - */ - public ZooKeeperNodeTracker(ZooKeeperWatcher watcher, String node, - Abortable abortable) { - super(watcher); - this.node = node; - this.abortable = abortable; - this.data = null; - } - - /** - * Starts the tracking of the node in ZooKeeper. - * - *

          Use {@link #blockUntilAvailable()} to block until the node is available - * or {@link #getData(boolean)} to get the data of the node if it is available. - */ - public synchronized void start() { - try { - ZKUtil.waitForZKConnectionIfAuthenticating(watcher); - } catch (InterruptedException e) { - throw new IllegalStateException("ZookeeperNodeTracker on " + this.node - + " interuppted while waiting for SASL Authentication", e); - } - this.watcher.registerListener(this); - try { - if(ZKUtil.watchAndCheckExists(watcher, node)) { - byte [] data = ZKUtil.getDataAndWatch(watcher, node); - if(data != null) { - this.data = data; - } else { - // It existed but now does not, try again to ensure a watch is set - LOG.debug("Try starting again because there is no data from " + node); - start(); - } - } - } catch (KeeperException e) { - abortable.abort("Unexpected exception during initialization, aborting", e); - } - } - - public synchronized void stop() { - this.stopped = true; - notifyAll(); - } - - /** - * Gets the data of the node, blocking until the node is available. - * - * @return data of the node - * @throws InterruptedException if the waiting thread is interrupted - */ - public synchronized byte [] blockUntilAvailable() - throws InterruptedException { - return blockUntilAvailable(0, false); - } - - /** - * Gets the data of the node, blocking until the node is available or the - * specified timeout has elapsed. - * - * @param timeout maximum time to wait for the node data to be available, - * n milliseconds. Pass 0 for no timeout. - * @return data of the node - * @throws InterruptedException if the waiting thread is interrupted - */ - public synchronized byte [] blockUntilAvailable(long timeout, boolean refresh) - throws InterruptedException { - if (timeout < 0) throw new IllegalArgumentException(); - boolean notimeout = timeout == 0; - long startTime = System.currentTimeMillis(); - long remaining = timeout; - if (refresh) { - try { - // This does not create a watch if the node does not exists - this.data = ZKUtil.getDataAndWatch(watcher, node); - } catch(KeeperException e) { - // We use to abort here, but in some cases the abort is ignored ( - // (empty Abortable), so it's better to log... - LOG.warn("Unexpected exception handling blockUntilAvailable", e); - abortable.abort("Unexpected exception handling blockUntilAvailable", e); - } - } - boolean nodeExistsChecked = (!refresh ||data!=null); - while (!this.stopped && (notimeout || remaining > 0) && this.data == null) { - if (!nodeExistsChecked) { - try { - nodeExistsChecked = (ZKUtil.checkExists(watcher, node) != -1); - } catch (KeeperException e) { - LOG.warn( - "Got exception while trying to check existence in ZooKeeper" + - " of the node: "+node+", retrying if timeout not reached",e ); - } - - // It did not exists, and now it does. - if (nodeExistsChecked){ - LOG.info("Node "+node+" now exists, resetting a watcher"); - try { - // This does not create a watch if the node does not exists - this.data = ZKUtil.getDataAndWatch(watcher, node); - } catch (KeeperException e) { - LOG.warn("Unexpected exception handling blockUntilAvailable", e); - abortable.abort("Unexpected exception handling blockUntilAvailable", e); - } - } - } - // We expect a notification; but we wait with a - // a timeout to lower the impact of a race condition if any - wait(100); - remaining = timeout - (System.currentTimeMillis() - startTime); - } - return this.data; - } - - /** - * Gets the data of the node. - * - *

          If the node is currently available, the most up-to-date known version of - * the data is returned. If the node is not currently available, null is - * returned. - * @param refresh whether to refresh the data by calling ZK directly. - * @return data of the node, null if unavailable - */ - public synchronized byte [] getData(boolean refresh) { - if (refresh) { - try { - this.data = ZKUtil.getDataAndWatch(watcher, node); - } catch(KeeperException e) { - abortable.abort("Unexpected exception handling getData", e); - } - } - return this.data; - } - - public String getNode() { - return this.node; - } - - @Override - public synchronized void nodeCreated(String path) { - if (!path.equals(node)) return; - try { - byte [] data = ZKUtil.getDataAndWatch(watcher, node); - if (data != null) { - this.data = data; - notifyAll(); - } else { - nodeDeleted(path); - } - } catch(KeeperException e) { - abortable.abort("Unexpected exception handling nodeCreated event", e); - } - } - - @Override - public synchronized void nodeDeleted(String path) { - if(path.equals(node)) { - try { - if(ZKUtil.watchAndCheckExists(watcher, node)) { - nodeCreated(path); - } else { - this.data = null; - } - } catch(KeeperException e) { - abortable.abort("Unexpected exception handling nodeDeleted event", e); - } - } - } - - @Override - public synchronized void nodeDataChanged(String path) { - if(path.equals(node)) { - nodeCreated(path); - } - } - - /** - * Checks if the baseznode set as per the property 'zookeeper.znode.parent' - * exists. - * @return true if baseznode exists. - * false if doesnot exists. - */ - public boolean checkIfBaseNodeAvailable() { - try { - if (ZKUtil.checkExists(watcher, watcher.baseZNode) == -1) { - return false; - } - } catch (KeeperException e) { - abortable - .abort( - "Exception while checking if basenode ("+watcher.baseZNode+ - ") exists in ZooKeeper.", - e); - } - return true; - } -} diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/ZooKeeperWatcher.java hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/ZooKeeperWatcher.java deleted file mode 100644 index 128a0d9..0000000 --- hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/ZooKeeperWatcher.java +++ /dev/null @@ -1,474 +0,0 @@ -/** - * - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hbase.zookeeper; - -import java.io.Closeable; -import java.io.IOException; -import java.util.ArrayList; -import java.util.List; -import java.util.concurrent.CopyOnWriteArrayList; -import java.util.concurrent.CountDownLatch; - -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.classification.InterfaceStability; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hbase.Abortable; -import org.apache.hadoop.hbase.HConstants; -import org.apache.hadoop.hbase.ZooKeeperConnectionException; -import org.apache.hadoop.hbase.util.Threads; -import org.apache.zookeeper.KeeperException; -import org.apache.zookeeper.WatchedEvent; -import org.apache.zookeeper.Watcher; -import org.apache.zookeeper.ZooDefs; -import org.apache.zookeeper.data.ACL; - -/** - * Acts as the single ZooKeeper Watcher. One instance of this is instantiated - * for each Master, RegionServer, and client process. - * - *

          This is the only class that implements {@link Watcher}. Other internal - * classes which need to be notified of ZooKeeper events must register with - * the local instance of this watcher via {@link #registerListener}. - * - *

          This class also holds and manages the connection to ZooKeeper. Code to - * deal with connection related events and exceptions are handled here. - */ -@InterfaceAudience.Public -@InterfaceStability.Evolving -public class ZooKeeperWatcher implements Watcher, Abortable, Closeable { - private static final Log LOG = LogFactory.getLog(ZooKeeperWatcher.class); - - // Identifier for this watcher (for logging only). It is made of the prefix - // passed on construction and the zookeeper sessionid. - private String identifier; - - // zookeeper quorum - private String quorum; - - // zookeeper connection - private RecoverableZooKeeper recoverableZooKeeper; - - // abortable in case of zk failure - protected Abortable abortable; - - // listeners to be notified - private final List listeners = - new CopyOnWriteArrayList(); - - // Used by ZKUtil:waitForZKConnectionIfAuthenticating to wait for SASL - // negotiation to complete - public CountDownLatch saslLatch = new CountDownLatch(1); - - // node names - - // base znode for this cluster - public String baseZNode; - // znode containing location of server hosting root region - public String rootServerZNode; - // znode containing ephemeral nodes of the regionservers - public String rsZNode; - // znode containing ephemeral nodes of the draining regionservers - public String drainingZNode; - // znode of currently active master - private String masterAddressZNode; - // znode of this master in backup master directory, if not the active master - public String backupMasterAddressesZNode; - // znode containing the current cluster state - public String clusterStateZNode; - // znode used for region transitioning and assignment - public String assignmentZNode; - // znode used for table disabling/enabling - public String tableZNode; - // znode containing the unique cluster ID - public String clusterIdZNode; - // znode used for log splitting work assignment - public String splitLogZNode; - // znode containing the state of the load balancer - public String balancerZNode; - - // Certain ZooKeeper nodes need to be world-readable - public static final ArrayList CREATOR_ALL_AND_WORLD_READABLE = - new ArrayList() { { - add(new ACL(ZooDefs.Perms.READ,ZooDefs.Ids.ANYONE_ID_UNSAFE)); - add(new ACL(ZooDefs.Perms.ALL,ZooDefs.Ids.AUTH_IDS)); - }}; - - private final Configuration conf; - - private final Exception constructorCaller; - - /** - * Instantiate a ZooKeeper connection and watcher. - * @param descriptor Descriptive string that is added to zookeeper sessionid - * and used as identifier for this instance. - * @throws IOException - * @throws ZooKeeperConnectionException - */ - public ZooKeeperWatcher(Configuration conf, String descriptor, - Abortable abortable) throws ZooKeeperConnectionException, IOException { - this(conf, descriptor, abortable, false); - } - /** - * Instantiate a ZooKeeper connection and watcher. - * @param descriptor Descriptive string that is added to zookeeper sessionid - * and used as identifier for this instance. - * @throws IOException - * @throws ZooKeeperConnectionException - */ - public ZooKeeperWatcher(Configuration conf, String descriptor, - Abortable abortable, boolean canCreateBaseZNode) - throws IOException, ZooKeeperConnectionException { - this.conf = conf; - // Capture a stack trace now. Will print it out later if problem so we can - // distingush amongst the myriad ZKWs. - try { - throw new Exception("ZKW CONSTRUCTOR STACK TRACE FOR DEBUGGING"); - } catch (Exception e) { - this.constructorCaller = e; - } - this.quorum = ZKConfig.getZKQuorumServersString(conf); - // Identifier will get the sessionid appended later below down when we - // handle the syncconnect event. - this.identifier = descriptor; - this.abortable = abortable; - setNodeNames(conf); - this.recoverableZooKeeper = ZKUtil.connect(conf, quorum, this, descriptor); - if (canCreateBaseZNode) { - createBaseZNodes(); - } - } - - private void createBaseZNodes() throws ZooKeeperConnectionException { - try { - // Create all the necessary "directories" of znodes - ZKUtil.createAndFailSilent(this, baseZNode); - ZKUtil.createAndFailSilent(this, assignmentZNode); - ZKUtil.createAndFailSilent(this, rsZNode); - ZKUtil.createAndFailSilent(this, drainingZNode); - ZKUtil.createAndFailSilent(this, tableZNode); - ZKUtil.createAndFailSilent(this, splitLogZNode); - ZKUtil.createAndFailSilent(this, backupMasterAddressesZNode); - } catch (KeeperException e) { - throw new ZooKeeperConnectionException( - prefix("Unexpected KeeperException creating base node"), e); - } - } - - @Override - public String toString() { - return this.identifier; - } - - /** - * Adds this instance's identifier as a prefix to the passed str - * @param str String to amend. - * @return A new string with this instance's identifier as prefix: e.g. - * if passed 'hello world', the returned string could be - */ - public String prefix(final String str) { - return this.toString() + " " + str; - } - - /** - * Set the local variable node names using the specified configuration. - */ - private void setNodeNames(Configuration conf) { - baseZNode = conf.get(HConstants.ZOOKEEPER_ZNODE_PARENT, - HConstants.DEFAULT_ZOOKEEPER_ZNODE_PARENT); - rootServerZNode = ZKUtil.joinZNode(baseZNode, - conf.get("zookeeper.znode.rootserver", "root-region-server")); - rsZNode = ZKUtil.joinZNode(baseZNode, - conf.get("zookeeper.znode.rs", "rs")); - drainingZNode = ZKUtil.joinZNode(baseZNode, - conf.get("zookeeper.znode.draining.rs", "draining")); - masterAddressZNode = ZKUtil.joinZNode(baseZNode, - conf.get("zookeeper.znode.master", "master")); - backupMasterAddressesZNode = ZKUtil.joinZNode(baseZNode, - conf.get("zookeeper.znode.backup.masters", "backup-masters")); - clusterStateZNode = ZKUtil.joinZNode(baseZNode, - conf.get("zookeeper.znode.state", "shutdown")); - assignmentZNode = ZKUtil.joinZNode(baseZNode, - conf.get("zookeeper.znode.unassigned", "unassigned")); - tableZNode = ZKUtil.joinZNode(baseZNode, - conf.get("zookeeper.znode.tableEnableDisable", "table")); - clusterIdZNode = ZKUtil.joinZNode(baseZNode, - conf.get("zookeeper.znode.clusterId", "hbaseid")); - splitLogZNode = ZKUtil.joinZNode(baseZNode, - conf.get("zookeeper.znode.splitlog", HConstants.SPLIT_LOGDIR_NAME)); - balancerZNode = ZKUtil.joinZNode(baseZNode, - conf.get("zookeeper.znode.balancer", "balancer")); - } - - /** - * Register the specified listener to receive ZooKeeper events. - * @param listener - */ - public void registerListener(ZooKeeperListener listener) { - listeners.add(listener); - } - - /** - * Register the specified listener to receive ZooKeeper events and add it as - * the first in the list of current listeners. - * @param listener - */ - public void registerListenerFirst(ZooKeeperListener listener) { - listeners.add(0, listener); - } - - /** - * Get the connection to ZooKeeper. - * @return connection reference to zookeeper - */ - public RecoverableZooKeeper getRecoverableZooKeeper() { - return recoverableZooKeeper; - } - - public void reconnectAfterExpiration() throws IOException, InterruptedException { - recoverableZooKeeper.reconnectAfterExpiration(); - } - - /** - * Get the quorum address of this instance. - * @return quorum string of this zookeeper connection instance - */ - public String getQuorum() { - return quorum; - } - - /** - * Method called from ZooKeeper for events and connection status. - *

          - * Valid events are passed along to listeners. Connection status changes - * are dealt with locally. - */ - @Override - public void process(WatchedEvent event) { - LOG.debug(prefix("Received ZooKeeper Event, " + - "type=" + event.getType() + ", " + - "state=" + event.getState() + ", " + - "path=" + event.getPath())); - - switch(event.getType()) { - - // If event type is NONE, this is a connection status change - case None: { - connectionEvent(event); - break; - } - - // Otherwise pass along to the listeners - - case NodeCreated: { - for(ZooKeeperListener listener : listeners) { - listener.nodeCreated(event.getPath()); - } - break; - } - - case NodeDeleted: { - for(ZooKeeperListener listener : listeners) { - listener.nodeDeleted(event.getPath()); - } - break; - } - - case NodeDataChanged: { - for(ZooKeeperListener listener : listeners) { - listener.nodeDataChanged(event.getPath()); - } - break; - } - - case NodeChildrenChanged: { - for(ZooKeeperListener listener : listeners) { - listener.nodeChildrenChanged(event.getPath()); - } - break; - } - } - } - - // Connection management - - /** - * Called when there is a connection-related event via the Watcher callback. - *

          - * If Disconnected or Expired, this should shutdown the cluster. But, since - * we send a KeeperException.SessionExpiredException along with the abort - * call, it's possible for the Abortable to catch it and try to create a new - * session with ZooKeeper. This is what the client does in HCM. - *

          - * @param event - */ - private void connectionEvent(WatchedEvent event) { - switch(event.getState()) { - case SyncConnected: - // Now, this callback can be invoked before the this.zookeeper is set. - // Wait a little while. - long finished = System.currentTimeMillis() + - this.conf.getLong("hbase.zookeeper.watcher.sync.connected.wait", 2000); - while (System.currentTimeMillis() < finished) { - Threads.sleep(1); - if (this.recoverableZooKeeper != null) break; - } - if (this.recoverableZooKeeper == null) { - LOG.error("ZK is null on connection event -- see stack trace " + - "for the stack trace when constructor was called on this zkw", - this.constructorCaller); - throw new NullPointerException("ZK is null"); - } - this.identifier = this.identifier + "-0x" + - Long.toHexString(this.recoverableZooKeeper.getSessionId()); - // Update our identifier. Otherwise ignore. - LOG.debug(this.identifier + " connected"); - break; - - case SaslAuthenticated: - if (ZKUtil.isSecureZooKeeper(this.conf)) { - // We are authenticated, clients can proceed. - saslLatch.countDown(); - } - break; - - case AuthFailed: - if (ZKUtil.isSecureZooKeeper(this.conf)) { - // We could not be authenticated, but clients should proceed anyway. - // Only access to znodes that require SASL authentication will be - // denied. The client may never need to access them. - saslLatch.countDown(); - } - break; - - // Abort the server if Disconnected or Expired - case Disconnected: - LOG.debug(prefix("Received Disconnected from ZooKeeper, ignoring")); - break; - - case Expired: - if (ZKUtil.isSecureZooKeeper(this.conf)) { - // We consider Expired equivalent to AuthFailed for this - // connection. Authentication is never going to complete. The - // client should proceed to do cleanup. - saslLatch.countDown(); - } - String msg = prefix(this.identifier + " received expired from " + - "ZooKeeper, aborting"); - // TODO: One thought is to add call to ZooKeeperListener so say, - // ZooKeeperNodeTracker can zero out its data values. - if (this.abortable != null) this.abortable.abort(msg, - new KeeperException.SessionExpiredException()); - break; - - case ConnectedReadOnly: - break; - - default: - throw new IllegalStateException("Received event is not valid."); - } - } - - /** - * Forces a synchronization of this ZooKeeper client connection. - *

          - * Executing this method before running other methods will ensure that the - * subsequent operations are up-to-date and consistent as of the time that - * the sync is complete. - *

          - * This is used for compareAndSwap type operations where we need to read the - * data of an existing node and delete or transition that node, utilizing the - * previously read version and data. We want to ensure that the version read - * is up-to-date from when we begin the operation. - */ - public void sync(String path) { - this.recoverableZooKeeper.sync(path, null, null); - } - - /** - * Handles KeeperExceptions in client calls. - *

          - * This may be temporary but for now this gives one place to deal with these. - *

          - * TODO: Currently this method rethrows the exception to let the caller handle - *

          - * @param ke - * @throws KeeperException - */ - public void keeperException(KeeperException ke) - throws KeeperException { - LOG.error(prefix("Received unexpected KeeperException, re-throwing exception"), ke); - throw ke; - } - - /** - * Handles InterruptedExceptions in client calls. - *

          - * This may be temporary but for now this gives one place to deal with these. - *

          - * TODO: Currently, this method does nothing. - * Is this ever expected to happen? Do we abort or can we let it run? - * Maybe this should be logged as WARN? It shouldn't happen? - *

          - * @param ie - */ - public void interruptedException(InterruptedException ie) { - LOG.debug(prefix("Received InterruptedException, doing nothing here"), ie); - // At least preserver interrupt. - Thread.currentThread().interrupt(); - // no-op - } - - /** - * Close the connection to ZooKeeper. - * - * @throws InterruptedException - */ - public void close() { - try { - if (recoverableZooKeeper != null) { - recoverableZooKeeper.close(); - } - } catch (InterruptedException e) { - Thread.currentThread().interrupt(); - } - } - - public Configuration getConfiguration() { - return conf; - } - - @Override - public void abort(String why, Throwable e) { - this.abortable.abort(why, e); - } - - @Override - public boolean isAborted() { - return this.abortable.isAborted(); - } - - /** - * @return Path to the currently active master. - */ - public String getMasterAddressZNode() { - return this.masterAddressZNode; - } -} diff --git hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAttributes.java hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAttributes.java deleted file mode 100644 index 8c5abba..0000000 --- hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAttributes.java +++ /dev/null @@ -1,168 +0,0 @@ -/** - * - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hbase.client; - -import java.util.Arrays; - -import org.apache.hadoop.hbase.SmallTests; -import org.apache.hadoop.hbase.util.Bytes; -import org.junit.Assert; -import org.junit.Test; -import org.junit.experimental.categories.Category; - -@Category(SmallTests.class) -public class TestAttributes { - @Test - public void testPutAttributes() { - Put put = new Put(new byte [] {}); - Assert.assertTrue(put.getAttributesMap().isEmpty()); - Assert.assertNull(put.getAttribute("absent")); - - put.setAttribute("absent", null); - Assert.assertTrue(put.getAttributesMap().isEmpty()); - Assert.assertNull(put.getAttribute("absent")); - - // adding attribute - put.setAttribute("attribute1", Bytes.toBytes("value1")); - Assert.assertTrue(Arrays.equals(Bytes.toBytes("value1"), put.getAttribute("attribute1"))); - Assert.assertEquals(1, put.getAttributesMap().size()); - Assert.assertTrue(Arrays.equals(Bytes.toBytes("value1"), put.getAttributesMap().get("attribute1"))); - - // overriding attribute value - put.setAttribute("attribute1", Bytes.toBytes("value12")); - Assert.assertTrue(Arrays.equals(Bytes.toBytes("value12"), put.getAttribute("attribute1"))); - Assert.assertEquals(1, put.getAttributesMap().size()); - Assert.assertTrue(Arrays.equals(Bytes.toBytes("value12"), put.getAttributesMap().get("attribute1"))); - - // adding another attribute - put.setAttribute("attribute2", Bytes.toBytes("value2")); - Assert.assertTrue(Arrays.equals(Bytes.toBytes("value2"), put.getAttribute("attribute2"))); - Assert.assertEquals(2, put.getAttributesMap().size()); - Assert.assertTrue(Arrays.equals(Bytes.toBytes("value2"), put.getAttributesMap().get("attribute2"))); - - // removing attribute - put.setAttribute("attribute2", null); - Assert.assertNull(put.getAttribute("attribute2")); - Assert.assertEquals(1, put.getAttributesMap().size()); - Assert.assertNull(put.getAttributesMap().get("attribute2")); - - // removing non-existed attribute - put.setAttribute("attribute2", null); - Assert.assertNull(put.getAttribute("attribute2")); - Assert.assertEquals(1, put.getAttributesMap().size()); - Assert.assertNull(put.getAttributesMap().get("attribute2")); - - // removing another attribute - put.setAttribute("attribute1", null); - Assert.assertNull(put.getAttribute("attribute1")); - Assert.assertTrue(put.getAttributesMap().isEmpty()); - Assert.assertNull(put.getAttributesMap().get("attribute1")); - } - - - @Test - public void testDeleteAttributes() { - Delete del = new Delete(new byte [] {}); - Assert.assertTrue(del.getAttributesMap().isEmpty()); - Assert.assertNull(del.getAttribute("absent")); - - del.setAttribute("absent", null); - Assert.assertTrue(del.getAttributesMap().isEmpty()); - Assert.assertNull(del.getAttribute("absent")); - - // adding attribute - del.setAttribute("attribute1", Bytes.toBytes("value1")); - Assert.assertTrue(Arrays.equals(Bytes.toBytes("value1"), del.getAttribute("attribute1"))); - Assert.assertEquals(1, del.getAttributesMap().size()); - Assert.assertTrue(Arrays.equals(Bytes.toBytes("value1"), del.getAttributesMap().get("attribute1"))); - - // overriding attribute value - del.setAttribute("attribute1", Bytes.toBytes("value12")); - Assert.assertTrue(Arrays.equals(Bytes.toBytes("value12"), del.getAttribute("attribute1"))); - Assert.assertEquals(1, del.getAttributesMap().size()); - Assert.assertTrue(Arrays.equals(Bytes.toBytes("value12"), del.getAttributesMap().get("attribute1"))); - - // adding another attribute - del.setAttribute("attribute2", Bytes.toBytes("value2")); - Assert.assertTrue(Arrays.equals(Bytes.toBytes("value2"), del.getAttribute("attribute2"))); - Assert.assertEquals(2, del.getAttributesMap().size()); - Assert.assertTrue(Arrays.equals(Bytes.toBytes("value2"), del.getAttributesMap().get("attribute2"))); - - // removing attribute - del.setAttribute("attribute2", null); - Assert.assertNull(del.getAttribute("attribute2")); - Assert.assertEquals(1, del.getAttributesMap().size()); - Assert.assertNull(del.getAttributesMap().get("attribute2")); - - // removing non-existed attribute - del.setAttribute("attribute2", null); - Assert.assertNull(del.getAttribute("attribute2")); - Assert.assertEquals(1, del.getAttributesMap().size()); - Assert.assertNull(del.getAttributesMap().get("attribute2")); - - // removing another attribute - del.setAttribute("attribute1", null); - Assert.assertNull(del.getAttribute("attribute1")); - Assert.assertTrue(del.getAttributesMap().isEmpty()); - Assert.assertNull(del.getAttributesMap().get("attribute1")); - } - - @Test - public void testGetId() { - Get get = new Get(null); - Assert.assertNull("Make sure id is null if unset", get.toMap().get("id")); - get.setId("myId"); - Assert.assertEquals("myId", get.toMap().get("id")); - } - - @Test - public void testAppendId() { - Append append = new Append(Bytes.toBytes("testRow")); - Assert.assertNull("Make sure id is null if unset", append.toMap().get("id")); - append.setId("myId"); - Assert.assertEquals("myId", append.toMap().get("id")); - } - - @Test - public void testDeleteId() { - Delete delete = new Delete(new byte [] {}); - Assert.assertNull("Make sure id is null if unset", delete.toMap().get("id")); - delete.setId("myId"); - Assert.assertEquals("myId", delete.toMap().get("id")); - } - - @Test - public void testPutId() { - Put put = new Put(new byte [] {}); - Assert.assertNull("Make sure id is null if unset", put.toMap().get("id")); - put.setId("myId"); - Assert.assertEquals("myId", put.toMap().get("id")); - } - - @Test - public void testScanId() { - Scan scan = new Scan(); - Assert.assertNull("Make sure id is null if unset", scan.toMap().get("id")); - scan.setId("myId"); - Assert.assertEquals("myId", scan.toMap().get("id")); - } - -} - diff --git hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestGet.java hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestGet.java deleted file mode 100644 index 3d01d1b..0000000 --- hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestGet.java +++ /dev/null @@ -1,110 +0,0 @@ -/** - * - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hbase.client; - -import java.io.IOException; -import java.util.Arrays; -import java.util.Set; - -import org.apache.hadoop.hbase.SmallTests; -import org.apache.hadoop.hbase.protobuf.ProtobufUtil; -import org.apache.hadoop.hbase.protobuf.generated.ClientProtos; -import org.apache.hadoop.hbase.util.Bytes; -import org.junit.Assert; -import org.junit.Test; -import org.junit.experimental.categories.Category; - -// TODO: cover more test cases -@Category(SmallTests.class) -public class TestGet { - @Test - public void testAttributesSerialization() throws IOException { - Get get = new Get(Bytes.toBytes("row")); - get.setAttribute("attribute1", Bytes.toBytes("value1")); - get.setAttribute("attribute2", Bytes.toBytes("value2")); - get.setAttribute("attribute3", Bytes.toBytes("value3")); - - ClientProtos.Get getProto = ProtobufUtil.toGet(get); - - Get get2 = ProtobufUtil.toGet(getProto); - Assert.assertNull(get2.getAttribute("absent")); - Assert.assertTrue(Arrays.equals(Bytes.toBytes("value1"), get2.getAttribute("attribute1"))); - Assert.assertTrue(Arrays.equals(Bytes.toBytes("value2"), get2.getAttribute("attribute2"))); - Assert.assertTrue(Arrays.equals(Bytes.toBytes("value3"), get2.getAttribute("attribute3"))); - Assert.assertEquals(3, get2.getAttributesMap().size()); - } - - @Test - public void testGetAttributes() { - Get get = new Get(null); - Assert.assertTrue(get.getAttributesMap().isEmpty()); - Assert.assertNull(get.getAttribute("absent")); - - get.setAttribute("absent", null); - Assert.assertTrue(get.getAttributesMap().isEmpty()); - Assert.assertNull(get.getAttribute("absent")); - - // adding attribute - get.setAttribute("attribute1", Bytes.toBytes("value1")); - Assert.assertTrue(Arrays.equals(Bytes.toBytes("value1"), get.getAttribute("attribute1"))); - Assert.assertEquals(1, get.getAttributesMap().size()); - Assert.assertTrue(Arrays.equals(Bytes.toBytes("value1"), get.getAttributesMap().get("attribute1"))); - - // overriding attribute value - get.setAttribute("attribute1", Bytes.toBytes("value12")); - Assert.assertTrue(Arrays.equals(Bytes.toBytes("value12"), get.getAttribute("attribute1"))); - Assert.assertEquals(1, get.getAttributesMap().size()); - Assert.assertTrue(Arrays.equals(Bytes.toBytes("value12"), get.getAttributesMap().get("attribute1"))); - - // adding another attribute - get.setAttribute("attribute2", Bytes.toBytes("value2")); - Assert.assertTrue(Arrays.equals(Bytes.toBytes("value2"), get.getAttribute("attribute2"))); - Assert.assertEquals(2, get.getAttributesMap().size()); - Assert.assertTrue(Arrays.equals(Bytes.toBytes("value2"), get.getAttributesMap().get("attribute2"))); - - // removing attribute - get.setAttribute("attribute2", null); - Assert.assertNull(get.getAttribute("attribute2")); - Assert.assertEquals(1, get.getAttributesMap().size()); - Assert.assertNull(get.getAttributesMap().get("attribute2")); - - // removing non-existed attribute - get.setAttribute("attribute2", null); - Assert.assertNull(get.getAttribute("attribute2")); - Assert.assertEquals(1, get.getAttributesMap().size()); - Assert.assertNull(get.getAttributesMap().get("attribute2")); - - // removing another attribute - get.setAttribute("attribute1", null); - Assert.assertNull(get.getAttribute("attribute1")); - Assert.assertTrue(get.getAttributesMap().isEmpty()); - Assert.assertNull(get.getAttributesMap().get("attribute1")); - } - - @Test - public void testNullQualifier() { - Get get = new Get(null); - byte[] family = Bytes.toBytes("family"); - get.addColumn(family, null); - Set qualifiers = get.getFamilyMap().get(family); - Assert.assertEquals(1, qualifiers.size()); - } -} - diff --git hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestOperation.java hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestOperation.java deleted file mode 100644 index 2abd527..0000000 --- hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestOperation.java +++ /dev/null @@ -1,372 +0,0 @@ -/** - * - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hbase.client; - -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertNotNull; - -import org.apache.hadoop.hbase.SmallTests; -import org.junit.Test; - -import java.io.IOException; -import java.util.Arrays; -import java.util.HashMap; -import java.util.List; -import java.util.Map; - -import org.apache.hadoop.hbase.filter.BinaryComparator; -import org.apache.hadoop.hbase.filter.ColumnCountGetFilter; -import org.apache.hadoop.hbase.filter.ColumnPaginationFilter; -import org.apache.hadoop.hbase.filter.ColumnPrefixFilter; -import org.apache.hadoop.hbase.filter.ColumnRangeFilter; -import org.apache.hadoop.hbase.filter.CompareFilter.CompareOp; -import org.apache.hadoop.hbase.filter.DependentColumnFilter; -import org.apache.hadoop.hbase.filter.FamilyFilter; -import org.apache.hadoop.hbase.filter.Filter; -import org.apache.hadoop.hbase.filter.FilterList; -import org.apache.hadoop.hbase.filter.FilterList.Operator; -import org.apache.hadoop.hbase.filter.FirstKeyOnlyFilter; -import org.apache.hadoop.hbase.filter.InclusiveStopFilter; -import org.apache.hadoop.hbase.filter.KeyOnlyFilter; -import org.apache.hadoop.hbase.filter.MultipleColumnPrefixFilter; -import org.apache.hadoop.hbase.filter.PageFilter; -import org.apache.hadoop.hbase.filter.PrefixFilter; -import org.apache.hadoop.hbase.filter.QualifierFilter; -import org.apache.hadoop.hbase.filter.RowFilter; -import org.apache.hadoop.hbase.filter.SingleColumnValueFilter; -import org.apache.hadoop.hbase.filter.SingleColumnValueExcludeFilter; -import org.apache.hadoop.hbase.filter.SkipFilter; -import org.apache.hadoop.hbase.filter.TimestampsFilter; -import org.apache.hadoop.hbase.filter.ValueFilter; -import org.apache.hadoop.hbase.filter.WhileMatchFilter; -import org.apache.hadoop.hbase.util.Bytes; - -import org.codehaus.jackson.map.ObjectMapper; -import org.junit.experimental.categories.Category; - -/** - * Run tests that use the functionality of the Operation superclass for - * Puts, Gets, Deletes, Scans, and MultiPuts. - */ -@Category(SmallTests.class) -public class TestOperation { - private static byte [] ROW = Bytes.toBytes("testRow"); - private static byte [] FAMILY = Bytes.toBytes("testFamily"); - private static byte [] QUALIFIER = Bytes.toBytes("testQualifier"); - private static byte [] VALUE = Bytes.toBytes("testValue"); - - private static ObjectMapper mapper = new ObjectMapper(); - - private static List TS_LIST = Arrays.asList(2L, 3L, 5L); - private static TimestampsFilter TS_FILTER = new TimestampsFilter(TS_LIST); - private static String STR_TS_FILTER = - TS_FILTER.getClass().getSimpleName() + " (3/3): [2, 3, 5]"; - - private static List L_TS_LIST = - Arrays.asList(0L, 1L, 2L, 3L, 4L, 5L, 6L, 7L, 8L, 9L, 10L); - private static TimestampsFilter L_TS_FILTER = - new TimestampsFilter(L_TS_LIST); - private static String STR_L_TS_FILTER = - L_TS_FILTER.getClass().getSimpleName() + " (5/11): [0, 1, 2, 3, 4]"; - - private static String COL_NAME_1 = "col1"; - private static ColumnPrefixFilter COL_PRE_FILTER = - new ColumnPrefixFilter(COL_NAME_1.getBytes()); - private static String STR_COL_PRE_FILTER = - COL_PRE_FILTER.getClass().getSimpleName() + " " + COL_NAME_1; - - private static String COL_NAME_2 = "col2"; - private static ColumnRangeFilter CR_FILTER = new ColumnRangeFilter( - COL_NAME_1.getBytes(), true, COL_NAME_2.getBytes(), false); - private static String STR_CR_FILTER = CR_FILTER.getClass().getSimpleName() - + " [" + COL_NAME_1 + ", " + COL_NAME_2 + ")"; - - private static int COL_COUNT = 9; - private static ColumnCountGetFilter CCG_FILTER = - new ColumnCountGetFilter(COL_COUNT); - private static String STR_CCG_FILTER = - CCG_FILTER.getClass().getSimpleName() + " " + COL_COUNT; - - private static int LIMIT = 3; - private static int OFFSET = 4; - private static ColumnPaginationFilter CP_FILTER = - new ColumnPaginationFilter(LIMIT, OFFSET); - private static String STR_CP_FILTER = CP_FILTER.getClass().getSimpleName() - + " (" + LIMIT + ", " + OFFSET + ")"; - - private static String STOP_ROW_KEY = "stop"; - private static InclusiveStopFilter IS_FILTER = - new InclusiveStopFilter(STOP_ROW_KEY.getBytes()); - private static String STR_IS_FILTER = - IS_FILTER.getClass().getSimpleName() + " " + STOP_ROW_KEY; - - private static String PREFIX = "prefix"; - private static PrefixFilter PREFIX_FILTER = - new PrefixFilter(PREFIX.getBytes()); - private static String STR_PREFIX_FILTER = "PrefixFilter " + PREFIX; - - private static byte[][] PREFIXES = { - "0".getBytes(), "1".getBytes(), "2".getBytes()}; - private static MultipleColumnPrefixFilter MCP_FILTER = - new MultipleColumnPrefixFilter(PREFIXES); - private static String STR_MCP_FILTER = - MCP_FILTER.getClass().getSimpleName() + " (3/3): [0, 1, 2]"; - - private static byte[][] L_PREFIXES = { - "0".getBytes(), "1".getBytes(), "2".getBytes(), "3".getBytes(), - "4".getBytes(), "5".getBytes(), "6".getBytes(), "7".getBytes()}; - private static MultipleColumnPrefixFilter L_MCP_FILTER = - new MultipleColumnPrefixFilter(L_PREFIXES); - private static String STR_L_MCP_FILTER = - L_MCP_FILTER.getClass().getSimpleName() + " (5/8): [0, 1, 2, 3, 4]"; - - private static int PAGE_SIZE = 9; - private static PageFilter PAGE_FILTER = new PageFilter(PAGE_SIZE); - private static String STR_PAGE_FILTER = - PAGE_FILTER.getClass().getSimpleName() + " " + PAGE_SIZE; - - private static SkipFilter SKIP_FILTER = new SkipFilter(L_TS_FILTER); - private static String STR_SKIP_FILTER = - SKIP_FILTER.getClass().getSimpleName() + " " + STR_L_TS_FILTER; - - private static WhileMatchFilter WHILE_FILTER = - new WhileMatchFilter(L_TS_FILTER); - private static String STR_WHILE_FILTER = - WHILE_FILTER.getClass().getSimpleName() + " " + STR_L_TS_FILTER; - - private static KeyOnlyFilter KEY_ONLY_FILTER = new KeyOnlyFilter(); - private static String STR_KEY_ONLY_FILTER = - KEY_ONLY_FILTER.getClass().getSimpleName(); - - private static FirstKeyOnlyFilter FIRST_KEY_ONLY_FILTER = - new FirstKeyOnlyFilter(); - private static String STR_FIRST_KEY_ONLY_FILTER = - FIRST_KEY_ONLY_FILTER.getClass().getSimpleName(); - - private static CompareOp CMP_OP = CompareOp.EQUAL; - private static byte[] CMP_VALUE = "value".getBytes(); - private static BinaryComparator BC = new BinaryComparator(CMP_VALUE); - private static DependentColumnFilter DC_FILTER = - new DependentColumnFilter(FAMILY, QUALIFIER, true, CMP_OP, BC); - private static String STR_DC_FILTER = String.format( - "%s (%s, %s, %s, %s, %s)", DC_FILTER.getClass().getSimpleName(), - Bytes.toStringBinary(FAMILY), Bytes.toStringBinary(QUALIFIER), true, - CMP_OP.name(), Bytes.toStringBinary(BC.getValue())); - - private static FamilyFilter FAMILY_FILTER = new FamilyFilter(CMP_OP, BC); - private static String STR_FAMILY_FILTER = - FAMILY_FILTER.getClass().getSimpleName() + " (EQUAL, value)"; - - private static QualifierFilter QUALIFIER_FILTER = - new QualifierFilter(CMP_OP, BC); - private static String STR_QUALIFIER_FILTER = - QUALIFIER_FILTER.getClass().getSimpleName() + " (EQUAL, value)"; - - private static RowFilter ROW_FILTER = new RowFilter(CMP_OP, BC); - private static String STR_ROW_FILTER = - ROW_FILTER.getClass().getSimpleName() + " (EQUAL, value)"; - - private static ValueFilter VALUE_FILTER = new ValueFilter(CMP_OP, BC); - private static String STR_VALUE_FILTER = - VALUE_FILTER.getClass().getSimpleName() + " (EQUAL, value)"; - - private static SingleColumnValueFilter SCV_FILTER = - new SingleColumnValueFilter(FAMILY, QUALIFIER, CMP_OP, CMP_VALUE); - private static String STR_SCV_FILTER = String.format("%s (%s, %s, %s, %s)", - SCV_FILTER.getClass().getSimpleName(), Bytes.toStringBinary(FAMILY), - Bytes.toStringBinary(QUALIFIER), CMP_OP.name(), - Bytes.toStringBinary(CMP_VALUE)); - - private static SingleColumnValueExcludeFilter SCVE_FILTER = - new SingleColumnValueExcludeFilter(FAMILY, QUALIFIER, CMP_OP, CMP_VALUE); - private static String STR_SCVE_FILTER = String.format("%s (%s, %s, %s, %s)", - SCVE_FILTER.getClass().getSimpleName(), Bytes.toStringBinary(FAMILY), - Bytes.toStringBinary(QUALIFIER), CMP_OP.name(), - Bytes.toStringBinary(CMP_VALUE)); - - private static FilterList AND_FILTER_LIST = new FilterList( - Operator.MUST_PASS_ALL, Arrays.asList((Filter) TS_FILTER, L_TS_FILTER, - CR_FILTER)); - private static String STR_AND_FILTER_LIST = String.format( - "%s AND (3/3): [%s, %s, %s]", AND_FILTER_LIST.getClass().getSimpleName(), - STR_TS_FILTER, STR_L_TS_FILTER, STR_CR_FILTER); - - private static FilterList OR_FILTER_LIST = new FilterList( - Operator.MUST_PASS_ONE, Arrays.asList((Filter) TS_FILTER, L_TS_FILTER, - CR_FILTER)); - private static String STR_OR_FILTER_LIST = String.format( - "%s OR (3/3): [%s, %s, %s]", AND_FILTER_LIST.getClass().getSimpleName(), - STR_TS_FILTER, STR_L_TS_FILTER, STR_CR_FILTER); - - private static FilterList L_FILTER_LIST = new FilterList( - Arrays.asList((Filter) TS_FILTER, L_TS_FILTER, CR_FILTER, COL_PRE_FILTER, - CCG_FILTER, CP_FILTER, PREFIX_FILTER, PAGE_FILTER)); - private static String STR_L_FILTER_LIST = String.format( - "%s AND (5/8): [%s, %s, %s, %s, %s]", - L_FILTER_LIST.getClass().getSimpleName(), STR_TS_FILTER, STR_L_TS_FILTER, - STR_CR_FILTER, STR_COL_PRE_FILTER, STR_CCG_FILTER, STR_CP_FILTER); - - private static Filter[] FILTERS = { - TS_FILTER, // TimestampsFilter - L_TS_FILTER, // TimestampsFilter - COL_PRE_FILTER, // ColumnPrefixFilter - CP_FILTER, // ColumnPaginationFilter - CR_FILTER, // ColumnRangeFilter - CCG_FILTER, // ColumnCountGetFilter - IS_FILTER, // InclusiveStopFilter - PREFIX_FILTER, // PrefixFilter - PAGE_FILTER, // PageFilter - SKIP_FILTER, // SkipFilter - WHILE_FILTER, // WhileMatchFilter - KEY_ONLY_FILTER, // KeyOnlyFilter - FIRST_KEY_ONLY_FILTER, // FirstKeyOnlyFilter - MCP_FILTER, // MultipleColumnPrefixFilter - L_MCP_FILTER, // MultipleColumnPrefixFilter - DC_FILTER, // DependentColumnFilter - FAMILY_FILTER, // FamilyFilter - QUALIFIER_FILTER, // QualifierFilter - ROW_FILTER, // RowFilter - VALUE_FILTER, // ValueFilter - SCV_FILTER, // SingleColumnValueFilter - SCVE_FILTER, // SingleColumnValueExcludeFilter - AND_FILTER_LIST, // FilterList - OR_FILTER_LIST, // FilterList - L_FILTER_LIST, // FilterList - }; - - private static String[] FILTERS_INFO = { - STR_TS_FILTER, // TimestampsFilter - STR_L_TS_FILTER, // TimestampsFilter - STR_COL_PRE_FILTER, // ColumnPrefixFilter - STR_CP_FILTER, // ColumnPaginationFilter - STR_CR_FILTER, // ColumnRangeFilter - STR_CCG_FILTER, // ColumnCountGetFilter - STR_IS_FILTER, // InclusiveStopFilter - STR_PREFIX_FILTER, // PrefixFilter - STR_PAGE_FILTER, // PageFilter - STR_SKIP_FILTER, // SkipFilter - STR_WHILE_FILTER, // WhileMatchFilter - STR_KEY_ONLY_FILTER, // KeyOnlyFilter - STR_FIRST_KEY_ONLY_FILTER, // FirstKeyOnlyFilter - STR_MCP_FILTER, // MultipleColumnPrefixFilter - STR_L_MCP_FILTER, // MultipleColumnPrefixFilter - STR_DC_FILTER, // DependentColumnFilter - STR_FAMILY_FILTER, // FamilyFilter - STR_QUALIFIER_FILTER, // QualifierFilter - STR_ROW_FILTER, // RowFilter - STR_VALUE_FILTER, // ValueFilter - STR_SCV_FILTER, // SingleColumnValueFilter - STR_SCVE_FILTER, // SingleColumnValueExcludeFilter - STR_AND_FILTER_LIST, // FilterList - STR_OR_FILTER_LIST, // FilterList - STR_L_FILTER_LIST, // FilterList - }; - - static { - assertEquals("The sizes of static arrays do not match: " - + "[FILTERS: %d <=> FILTERS_INFO: %d]", - FILTERS.length, FILTERS_INFO.length); - } - - /** - * Test the client Operations' JSON encoding to ensure that produced JSON is - * parseable and that the details are present and not corrupted. - * @throws IOException - */ - @Test - public void testOperationJSON() - throws IOException { - // produce a Scan Operation - Scan scan = new Scan(ROW); - scan.addColumn(FAMILY, QUALIFIER); - // get its JSON representation, and parse it - String json = scan.toJSON(); - Map parsedJSON = mapper.readValue(json, HashMap.class); - // check for the row - assertEquals("startRow incorrect in Scan.toJSON()", - Bytes.toStringBinary(ROW), parsedJSON.get("startRow")); - // check for the family and the qualifier. - List familyInfo = (List) ((Map) parsedJSON.get("families")).get( - Bytes.toStringBinary(FAMILY)); - assertNotNull("Family absent in Scan.toJSON()", familyInfo); - assertEquals("Qualifier absent in Scan.toJSON()", 1, familyInfo.size()); - assertEquals("Qualifier incorrect in Scan.toJSON()", - Bytes.toStringBinary(QUALIFIER), - familyInfo.get(0)); - - // produce a Get Operation - Get get = new Get(ROW); - get.addColumn(FAMILY, QUALIFIER); - // get its JSON representation, and parse it - json = get.toJSON(); - parsedJSON = mapper.readValue(json, HashMap.class); - // check for the row - assertEquals("row incorrect in Get.toJSON()", - Bytes.toStringBinary(ROW), parsedJSON.get("row")); - // check for the family and the qualifier. - familyInfo = (List) ((Map) parsedJSON.get("families")).get( - Bytes.toStringBinary(FAMILY)); - assertNotNull("Family absent in Get.toJSON()", familyInfo); - assertEquals("Qualifier absent in Get.toJSON()", 1, familyInfo.size()); - assertEquals("Qualifier incorrect in Get.toJSON()", - Bytes.toStringBinary(QUALIFIER), - familyInfo.get(0)); - - // produce a Put operation - Put put = new Put(ROW); - put.add(FAMILY, QUALIFIER, VALUE); - // get its JSON representation, and parse it - json = put.toJSON(); - parsedJSON = mapper.readValue(json, HashMap.class); - // check for the row - assertEquals("row absent in Put.toJSON()", - Bytes.toStringBinary(ROW), parsedJSON.get("row")); - // check for the family and the qualifier. - familyInfo = (List) ((Map) parsedJSON.get("families")).get( - Bytes.toStringBinary(FAMILY)); - assertNotNull("Family absent in Put.toJSON()", familyInfo); - assertEquals("KeyValue absent in Put.toJSON()", 1, familyInfo.size()); - Map kvMap = (Map) familyInfo.get(0); - assertEquals("Qualifier incorrect in Put.toJSON()", - Bytes.toStringBinary(QUALIFIER), - kvMap.get("qualifier")); - assertEquals("Value length incorrect in Put.toJSON()", - VALUE.length, kvMap.get("vlen")); - - // produce a Delete operation - Delete delete = new Delete(ROW); - delete.deleteColumn(FAMILY, QUALIFIER); - // get its JSON representation, and parse it - json = delete.toJSON(); - parsedJSON = mapper.readValue(json, HashMap.class); - // check for the row - assertEquals("row absent in Delete.toJSON()", - Bytes.toStringBinary(ROW), parsedJSON.get("row")); - // check for the family and the qualifier. - familyInfo = (List) ((Map) parsedJSON.get("families")).get( - Bytes.toStringBinary(FAMILY)); - assertNotNull("Family absent in Delete.toJSON()", familyInfo); - assertEquals("KeyValue absent in Delete.toJSON()", 1, familyInfo.size()); - kvMap = (Map) familyInfo.get(0); - assertEquals("Qualifier incorrect in Delete.toJSON()", - Bytes.toStringBinary(QUALIFIER), kvMap.get("qualifier")); - } - -} - diff --git hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestPutDotHas.java hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestPutDotHas.java deleted file mode 100644 index 49cfcdc..0000000 --- hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestPutDotHas.java +++ /dev/null @@ -1,76 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hbase.client; - -import org.apache.hadoop.hbase.SmallTests; -import org.apache.hadoop.hbase.util.Bytes; -import org.junit.Assert; -import org.junit.Before; -import org.junit.Test; -import org.junit.experimental.categories.Category; - -@Category(SmallTests.class) -/** - * Addresses HBASE-6047 - * We test put.has call with all of its polymorphic magic - */ -public class TestPutDotHas { - - public static final byte[] ROW_01 = Bytes.toBytes("row-01"); - public static final byte[] QUALIFIER_01 = Bytes.toBytes("qualifier-01"); - public static final byte[] VALUE_01 = Bytes.toBytes("value-01"); - public static final byte[] FAMILY_01 = Bytes.toBytes("family-01"); - public static final long TS = 1234567L; - public Put put = new Put(ROW_01); - - @Before - public void setUp() { - put.add(FAMILY_01, QUALIFIER_01, TS, VALUE_01); - } - - @Test - public void testHasIgnoreValueIgnoreTS() { - Assert.assertTrue(put.has(FAMILY_01, QUALIFIER_01)); - Assert.assertFalse(put.has(QUALIFIER_01, FAMILY_01)); - } - - @Test - public void testHasIgnoreValue() { - Assert.assertTrue(put.has(FAMILY_01, QUALIFIER_01, TS)); - Assert.assertFalse(put.has(FAMILY_01, QUALIFIER_01, TS + 1)); - } - - @Test - public void testHasIgnoreTS() { - Assert.assertTrue(put.has(FAMILY_01, QUALIFIER_01, VALUE_01)); - Assert.assertFalse(put.has(FAMILY_01, VALUE_01, QUALIFIER_01)); - } - - @Test - public void testHas() { - Assert.assertTrue(put.has(FAMILY_01, QUALIFIER_01, TS, VALUE_01)); - // Bad TS - Assert.assertFalse(put.has(FAMILY_01, QUALIFIER_01, TS + 1, VALUE_01)); - // Bad Value - Assert.assertFalse(put.has(FAMILY_01, QUALIFIER_01, TS, QUALIFIER_01)); - // Bad Family - Assert.assertFalse(put.has(QUALIFIER_01, QUALIFIER_01, TS, VALUE_01)); - // Bad Qual - Assert.assertFalse(put.has(FAMILY_01, FAMILY_01, TS, VALUE_01)); - } -} diff --git hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestScan.java hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestScan.java deleted file mode 100644 index 9565764..0000000 --- hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestScan.java +++ /dev/null @@ -1,111 +0,0 @@ -/** - * - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hbase.client; - -import java.io.IOException; -import java.util.Arrays; -import java.util.Set; - -import org.apache.hadoop.hbase.SmallTests; -import org.apache.hadoop.hbase.protobuf.ProtobufUtil; -import org.apache.hadoop.hbase.protobuf.generated.ClientProtos; -import org.apache.hadoop.hbase.util.Bytes; -import org.junit.Assert; -import org.junit.Test; -import org.junit.experimental.categories.Category; - -// TODO: cover more test cases -@Category(SmallTests.class) -public class TestScan { - @Test - public void testAttributesSerialization() throws IOException { - Scan scan = new Scan(); - scan.setAttribute("attribute1", Bytes.toBytes("value1")); - scan.setAttribute("attribute2", Bytes.toBytes("value2")); - scan.setAttribute("attribute3", Bytes.toBytes("value3")); - - ClientProtos.Scan scanProto = ProtobufUtil.toScan(scan); - - Scan scan2 = ProtobufUtil.toScan(scanProto); - - Assert.assertNull(scan2.getAttribute("absent")); - Assert.assertTrue(Arrays.equals(Bytes.toBytes("value1"), scan2.getAttribute("attribute1"))); - Assert.assertTrue(Arrays.equals(Bytes.toBytes("value2"), scan2.getAttribute("attribute2"))); - Assert.assertTrue(Arrays.equals(Bytes.toBytes("value3"), scan2.getAttribute("attribute3"))); - Assert.assertEquals(3, scan2.getAttributesMap().size()); - } - - @Test - public void testScanAttributes() { - Scan scan = new Scan(); - Assert.assertTrue(scan.getAttributesMap().isEmpty()); - Assert.assertNull(scan.getAttribute("absent")); - - scan.setAttribute("absent", null); - Assert.assertTrue(scan.getAttributesMap().isEmpty()); - Assert.assertNull(scan.getAttribute("absent")); - - // adding attribute - scan.setAttribute("attribute1", Bytes.toBytes("value1")); - Assert.assertTrue(Arrays.equals(Bytes.toBytes("value1"), scan.getAttribute("attribute1"))); - Assert.assertEquals(1, scan.getAttributesMap().size()); - Assert.assertTrue(Arrays.equals(Bytes.toBytes("value1"), scan.getAttributesMap().get("attribute1"))); - - // overriding attribute value - scan.setAttribute("attribute1", Bytes.toBytes("value12")); - Assert.assertTrue(Arrays.equals(Bytes.toBytes("value12"), scan.getAttribute("attribute1"))); - Assert.assertEquals(1, scan.getAttributesMap().size()); - Assert.assertTrue(Arrays.equals(Bytes.toBytes("value12"), scan.getAttributesMap().get("attribute1"))); - - // adding another attribute - scan.setAttribute("attribute2", Bytes.toBytes("value2")); - Assert.assertTrue(Arrays.equals(Bytes.toBytes("value2"), scan.getAttribute("attribute2"))); - Assert.assertEquals(2, scan.getAttributesMap().size()); - Assert.assertTrue(Arrays.equals(Bytes.toBytes("value2"), scan.getAttributesMap().get("attribute2"))); - - // removing attribute - scan.setAttribute("attribute2", null); - Assert.assertNull(scan.getAttribute("attribute2")); - Assert.assertEquals(1, scan.getAttributesMap().size()); - Assert.assertNull(scan.getAttributesMap().get("attribute2")); - - // removing non-existed attribute - scan.setAttribute("attribute2", null); - Assert.assertNull(scan.getAttribute("attribute2")); - Assert.assertEquals(1, scan.getAttributesMap().size()); - Assert.assertNull(scan.getAttributesMap().get("attribute2")); - - // removing another attribute - scan.setAttribute("attribute1", null); - Assert.assertNull(scan.getAttribute("attribute1")); - Assert.assertTrue(scan.getAttributesMap().isEmpty()); - Assert.assertNull(scan.getAttributesMap().get("attribute1")); - } - - @Test - public void testNullQualifier() { - Scan scan = new Scan(); - byte[] family = Bytes.toBytes("family"); - scan.addColumn(family, null); - Set qualifiers = scan.getFamilyMap().get(family); - Assert.assertEquals(1, qualifiers.size()); - } -} - diff --git hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/CreateRandomStoreFile.java hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/CreateRandomStoreFile.java index b1da9c5..cc510af 100644 --- hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/CreateRandomStoreFile.java +++ hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/CreateRandomStoreFile.java @@ -34,12 +34,12 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.HBaseConfiguration; +import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.io.compress.Compression; import org.apache.hadoop.hbase.io.hfile.CacheConfig; import org.apache.hadoop.hbase.io.hfile.HFile; import org.apache.hadoop.hbase.io.hfile.HFileBlockIndex; -import org.apache.hadoop.hbase.regionserver.StoreFile; import org.apache.hadoop.hbase.util.BloomFilterFactory; import org.apache.hadoop.io.BytesWritable; @@ -168,7 +168,7 @@ public class CreateRandomStoreFile { BLOOM_FILTER_OPTION)); } - int blockSize = HFile.DEFAULT_BLOCKSIZE; + int blockSize = HConstants.DEFAULT_BLOCKSIZE; if (cmdLine.hasOption(BLOCK_SIZE_OPTION)) blockSize = Integer.valueOf(cmdLine.getOptionValue(BLOCK_SIZE_OPTION)); diff --git hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreFile.java hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreFile.java index 7ccd7a8..264ec55 100644 --- hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreFile.java +++ hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreFile.java @@ -35,6 +35,7 @@ import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.HBaseTestCase; import org.apache.hadoop.hbase.HBaseTestingUtility; +import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.SmallTests; @@ -49,7 +50,6 @@ import org.apache.hadoop.hbase.io.hfile.HFileDataBlockEncoder; import org.apache.hadoop.hbase.io.hfile.HFileDataBlockEncoderImpl; import org.apache.hadoop.hbase.io.hfile.HFileScanner; import org.apache.hadoop.hbase.io.hfile.NoOpDataBlockEncoder; -import org.apache.hadoop.hbase.regionserver.BloomType; import org.apache.hadoop.hbase.util.BloomFilterFactory; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.ChecksumType; @@ -925,7 +925,7 @@ public class TestStoreFile extends HBaseTestCase { dataBlockEncoderAlgo); cacheConf = new CacheConfig(conf); StoreFile.Writer writer = new StoreFile.WriterBuilder(conf, cacheConf, fs, - HFile.DEFAULT_BLOCKSIZE) + HConstants.DEFAULT_BLOCKSIZE) .withFilePath(path) .withDataBlockEncoder(dataBlockEncoder) .withMaxKeyCount(2000) -- 1.7.10.2 (Apple Git-33)