./hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/NamespacesInstanceResource.java: * Constructor for standard NamespaceInstanceResource. n
./hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/NamespacesInstanceResource.java: * Constructor for querying namespace table list via NamespaceInstanceResource. n
./hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/ExistsResource.java: * Constructor nn
./hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/client/Client.java: * @return the HTTP response code n
./hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/client/Client.java: * @return the HTTP response code n
./hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/client/Client.java: * @return the HTTP response code n
./hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/client/Client.java: * @return a Response object with response detail n
./hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/client/Client.java: * @return a Response object with response detail n
./hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/client/Client.java: * @return a Response object with response detail n
./hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/client/Client.java: * @return a Response object with response detail n
./hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/client/Client.java: * @return a Response object with response detail n
./hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/client/Client.java: * @return a Response object with response detail n
./hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/client/Client.java: * @return a Response object with response detail n
./hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/client/Client.java: * @return a Response object with response detail n
./hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/client/Client.java: * @return a Response object with response detail n
./hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/client/Client.java: * @return a Response object with response detail n
./hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/client/Client.java: * @return a Response object with response detail n
./hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/client/Client.java: * @return a Response object with response detail n
./hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/client/Client.java: * @return a Response object with response detail n
./hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/client/Client.java: * @return a Response object with response detail n
./hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/client/Client.java: * @return a Response object with response detail n
./hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/client/Client.java: * @return a Response object with response detail n
./hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/client/Client.java: * @return a Response object with response detail n
./hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/client/Client.java: * @return a Response object with response detail n
./hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/VersionResource.java: * Constructor n
./hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RowResource.java: * Constructor nnnnnn
./hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/ProtobufMessageHandler.java: * @return reference to self for convenience n
./hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/StorageClusterStatusResource.java: * Constructor n
./hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/StorageClusterVersionResource.java: * Constructor n
./hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/NamespacesResource.java: * Constructor n
./hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/NamespacesInstanceModel.java: * @param namespaceName the namespace name. n
./hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/NamespacesInstanceModel.java: * @param namespaceName the namespace name. n
./hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/TableModel.java: * Constructor n
./hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/CellModel.java: * Constructor nn
./hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/CellModel.java: * Constructor nnn
./hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/CellModel.java: * Constructor from KeyValue n
./hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/CellModel.java: * Constructor nnn
./hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/CellModel.java: * Constructor nnnn
./hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/NamespacesModel.java: * @param admin the administrative API n
./hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/ScannerModel.java: * @return the filter n
./hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/ScannerModel.java: * @return the JSON representation of the filter n
./hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/ScannerModel.java: * @param scan the scan specification n
./hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/TableInfoModel.java: * Constructor n
./hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/TableResource.java: * Constructor nn
./hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RegionsResource.java: * Constructor nn
./hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RootResource.java: * Constructor n
./hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/ScannerResource.java: * Constructor nn
./hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/filter/RestCsrfPreventionFilter.java: * Returns the method. n
./hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RESTServlet.java: * @param userProvider the login user provider n
./hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/client/TestRemoteTable.java: * number of rows returned should be equal to the limit n
./hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/client/RemoteAdmin.java: * Constructor nn
./hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/client/RemoteAdmin.java: * Constructor nnn
./hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/AssignmentVerificationReport.java: * Use this to project the dispersion scores nnn
./hbase-balancer/src/main/java/org/apache/hadoop/hbase/favored/FavoredNodesPlan.java: * size 3. n
./hbase-server/src/main/java/org/apache/hadoop/hbase/executor/EventHandler.java: * This method is the main processing loop to be implemented by the various subclasses. n
./hbase-server/src/main/java/org/apache/hadoop/hbase/executor/ExecutorService.java: * Submit the event to the queue for handling. n
./hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotDescriptionUtils.java: * n
./hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/RestoreSnapshotHelper.java: * @param regionDir {@link Path} cloned dir n
./hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/RestoreSnapshotHelper.java: * @param region {@link HRegion} cloned n
./hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/RestoreSnapshotHelper.java: * Copy the snapshot files for a snapshot scanner, discards meta changes. nnnnnn
./hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/RegionObserver.java: * Called after releasing read lock in {@link Region#closeRegionOperation()}. nn
./hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/MasterObserver.java: * @param ctx the environment to interact with the framework and master n
./hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/MasterObserver.java: * @param ctx the environment to interact with the framework and master n
./hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/MasterObserver.java: * @param ctx the environment to interact with the framework and master n
./hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/MasterObserver.java: * @param ctx the environment to interact with the framework and master n
./hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/MasterObserver.java: * @param ctx the environment to interact with the framework and master nn
./hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/CoprocessorHost.java: * @return true if bypaas coprocessor execution, false if not. n
./hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/SnapshotManager.java: * @return procId the ID of the clone snapshot procedure n
./hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/SnapshotManager.java: * duplicated RPC n
./hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/SnapshotManager.java: * @return procId the ID of the restore snapshot procedure n
./hbase-server/src/main/java/org/apache/hadoop/hbase/master/RegionPlacementMaintainer.java: * placeSecondaryAndTertiaryRS n
./hbase-server/src/main/java/org/apache/hadoop/hbase/master/RegionPlacementMaintainer.java: * Print the assignment plan to the system output stream n
./hbase-server/src/main/java/org/apache/hadoop/hbase/master/RegionPlacementMaintainer.java: * Update the assignment plan to all the region servers nn
./hbase-server/src/main/java/org/apache/hadoop/hbase/master/RegionPlacementMaintainer.java: * @param newPlan - new assignment plan n
./hbase-server/src/main/java/org/apache/hadoop/hbase/master/DrainingServerTracker.java: * All Draining RSs will be tracked after this method is called. n
./hbase-server/src/main/java/org/apache/hadoop/hbase/master/MetricsAssignmentManager.java: * set new value for number of regions in transition. n
./hbase-server/src/main/java/org/apache/hadoop/hbase/master/MetricsAssignmentManager.java: * rit.metrics.threshold.time. n
./hbase-server/src/main/java/org/apache/hadoop/hbase/master/MetricsAssignmentManager.java: * update the timestamp for oldest region in transition metrics. n
./hbase-server/src/main/java/org/apache/hadoop/hbase/master/MetricsAssignmentManager.java: * update the duration metrics of region is transition n
./hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterCoprocessorHost.java: * @param splitRow the split point n
./hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterCoprocessorHost.java: * @param user the user n
./hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterCoprocessorHost.java: * @param user the user n
./hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterCoprocessorHost.java: * This will be called before update META step as part of split table region procedure. nn
./hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterCoprocessorHost.java: * * @param user the user n
./hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterCoprocessorHost.java: * @param user the user n
./hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterCoprocessorHost.java: * @param user the user n
./hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterCoprocessorHost.java: * @param user the user n
./hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterCoprocessorHost.java: * @param user the user n
./hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterCoprocessorHost.java: * @param user the user n
./hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterCoprocessorHost.java: * @param user the user n
./hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterCoprocessorHost.java: * @param user the user n
./hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/MergeTableRegionsProcedure.java: * @param env MasterProcedureEnv n
./hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java: * @param tableName Name of table to check. nnn
./hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java: * @param tableName The table name nnn
./hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java: * @param preserveSplits True if the splits should be preserved nnn
./hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java: * @param descriptor The updated table descriptor nnn
./hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java: * @param tableName The table name nnn
./hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java: * @param tableName The table name nnn
./hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java: * @param column The column definition nnn
./hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java: * @param descriptor The updated column definition nnn
./hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java: * @param columnName The column name nnn
./hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java: * @return procedure Id n
./hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java: * @return procedure Id n
./hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java: * @return true if aborted, false if procedure already completed or does not exist n
./hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java: * @return procedure list n
./hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java: * @return lock list n
./hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java: * @param name namespace name nn
./hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java: * @return table names n
./hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java: * HFile resulting from a major compaction exists n
./hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java: * no HFile resulting from a major compaction exists n
./hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java: * to be updated Pair.getSecond is the total number of regions of the table n
./hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java: * empty, all are requested. nn
./hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java: * @param req GetTableNamesRequest nn
./hbase-server/src/main/java/org/apache/hadoop/hbase/master/ServerManager.java: * the 'hbase.master.wait.on.regionservers.timeout' is reached n
./hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/EnableTableProcedure.java: * @return whether the table passes the necessary checks n
./hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/EnableTableProcedure.java: * @param state the procedure state nn
./hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/EnableTableProcedure.java: * @param tableName the target table n
./hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/EnableTableProcedure.java: * @param env MasterProcedureEnv n
./hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/EnableTableProcedure.java: * @param state the procedure state nn
./hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/EnableTableProcedure.java: * @param state the procedure state nn
./hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/RestoreSnapshotProcedure.java: * @param snapshot snapshot to restore from n
./hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/RestoreSnapshotProcedure.java: * @param env MasterProcedureEnv n
./hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/RestoreSnapshotProcedure.java: * @param env MasterProcedureEnv n
./hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/RestoreSnapshotProcedure.java: * @param env MasterProcedureEnv n
./hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/CloneSnapshotProcedure.java: * @param env MasterProcedureEnv nn
./hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/CloneSnapshotProcedure.java: * @param env MasterProcedureEnv nn
./hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/CloneSnapshotProcedure.java: * @param env MasterProcedureEnv n
./hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/CloneSnapshotProcedure.java: * @param env MasterProcedureEnv n
./hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/CloneSnapshotProcedure.java: * @param env MasterProcedureEnv n
./hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterFileSystem.java: * Make sure the directories under rootDir have good permissions. Create if necessary. nn
./hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterFileSystem.java: * permissions. Create it if necessary. n
./hbase-server/src/main/java/org/apache/hadoop/hbase/master/SplitLogManager.java: * @param conf the HBase configuration n
./hbase-server/src/main/java/org/apache/hadoop/hbase/master/SplitLogManager.java: * @return cumulative size of the logfiles split n
./hbase-server/src/main/java/org/apache/hadoop/hbase/backup/HFileArchiver.java: * an empty list n
./hbase-server/src/main/java/org/apache/hadoop/hbase/backup/HFileArchiver.java: * close any outside readers of the file n
./hbase-server/src/main/java/org/apache/hadoop/hbase/backup/HFileArchiver.java: * Move the file to the given destination n * @return true on success n
./hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/SplitLogManagerCoordination.java: * Support method to init constants such as timeout. Mostly required for UTs. n
./hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/ZKSplitLogManagerCoordination.java: * scenario the SplitLogManager resubmits all orphan and UNASSIGNED tasks at startup. n
./hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/ZkSplitLogWorkerCoordination.java: * that has been put up n
./hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/ZkSplitLogWorkerCoordination.java: * {@link org.apache.hadoop.hbase.master.SplitLogManager} to timeout the task node. nn
./hbase-server/src/main/java/org/apache/hadoop/hbase/LocalHBaseCluster.java: * Constructor. nn
./hbase-server/src/main/java/org/apache/hadoop/hbase/LocalHBaseCluster.java: * @param noRegionServers Count of regionservers to start. n
./hbase-server/src/main/java/org/apache/hadoop/hbase/LocalHBaseCluster.java: * @param noRegionServers Count of regionservers to start. n
./hbase-server/src/main/java/org/apache/hadoop/hbase/LocalHBaseCluster.java: * @param noRegionServers Count of regionservers to start. nnn
./hbase-server/src/main/java/org/apache/hadoop/hbase/mob/MobFileName.java: * @param fileName file name. n
./hbase-server/src/main/java/org/apache/hadoop/hbase/mob/MobFileName.java: * @param fileName file name. n
./hbase-server/src/main/java/org/apache/hadoop/hbase/mob/MobFileCache.java: * @return A opened mob file. n
./hbase-server/src/main/java/org/apache/hadoop/hbase/mob/MobFile.java: * @return The store file scanner. n
./hbase-server/src/main/java/org/apache/hadoop/hbase/mob/MobFile.java: * @return The cell in the mob file. n
./hbase-server/src/main/java/org/apache/hadoop/hbase/mob/MobFile.java: * @return The cell in the mob file. n
./hbase-server/src/main/java/org/apache/hadoop/hbase/mob/MobFile.java: * Opens the underlying reader. It's not thread-safe. Use MobFileCache.openFile() instead. n
./hbase-server/src/main/java/org/apache/hadoop/hbase/mob/MobFile.java: * thread-safe. Use MobFileCache.closeFile() instead. n
./hbase-server/src/main/java/org/apache/hadoop/hbase/mob/MobFile.java: * @return An instance of the MobFile. n
./hbase-server/src/main/java/org/apache/hadoop/hbase/mob/DefaultMobStoreFlusher.java: * @param throughputController A controller to avoid flush too fast. n
./hbase-server/src/main/java/org/apache/hadoop/hbase/errorhandling/ForeignException.java: * source. nn
./hbase-server/src/main/java/org/apache/hadoop/hbase/errorhandling/ForeignException.java: * generated. nn
./hbase-server/src/main/java/org/apache/hadoop/hbase/io/util/MemorySizeUtil.java: * We need atleast 20% of heap left out for other RS functions. n
./hbase-server/src/main/java/org/apache/hadoop/hbase/io/util/MemorySizeUtil.java: * Retrieve global memstore configured size as percentage of total heap. nn
./hbase-server/src/main/java/org/apache/hadoop/hbase/io/util/MemorySizeUtil.java: * Retrieve configured size for on heap block cache as percentage of total heap. n
./hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/BlockCompressedSizePredicator.java: * @param uncompressed true if the block should be finished. n
./hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/NoOpIndexBlockEncoder.java: * records, and tuples of block offset, on-disk block size, and the first key for each entry. nn
./hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/NoOpIndexBlockEncoder.java: * an {@link HFile} block. n
./hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CacheConfig.java: * Constructs a cache configuration copied from the specified configuration. n
./hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/UncompressedBlockSizePredicator.java: * @param uncompressed true if the block should be finished. n
./hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileDataBlockEncoder.java: * finish encoding of a block. nnn
./hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileDataBlockEncoder.java: * Encodes a KeyValue. nnnn
./hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileDataBlockEncoder.java: * stuff for the encoded block. It must be called at the end of block encoding. nnnnn
./hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlockIndex.java: * @return reader a basic way to load blocks n
./hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlockIndex.java: * nextIndexedKey. n
./hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlockIndex.java: * the array of all keys being searched, or -1 otherwise n
./hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlockIndex.java: * @param numEntries the number of root-level index entries n
./hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlockIndex.java: * @return the buffered input stream or wrapped byte input stream n
./hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlockIndex.java: * @param numEntries the number of root-level index entries n
./hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlockIndex.java: * @return position at which we entered the root-level index. n
./hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlockIndex.java: * @param description a short description of the index being written. Used in a log message. n
./hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlockIndex.java: * iterations n
./hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlockIndex.java: * index format is used. n
./hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CacheableDeserializer.java: * @return T the deserialized object. n
./hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileScanner.java: * next() will return false when it is called. n
./hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileScanner.java: * position i; and 1, such that c[i] < cell, and scanner is left in position i. n
./hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileScanner.java: * there may also NOT be a c[i+1], and next() will return false (EOF). n
./hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileScanner.java: * value are undefined. n
./hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileScanner.java: * @return Returns false if you are at the end otherwise true if more in file. n
./hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/FileMmapIOEngine.java: * File IO engine is always able to support persistent storage for the cache n
./hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/FileMmapIOEngine.java: * @param offset The offset in the file where the first byte to be written n
./hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/FileMmapIOEngine.java: * Sync the data to file after writing n
./hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/FileIOEngine.java: * File IO engine is always able to support persistent storage for the cache n
./hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/FileIOEngine.java: * @param offset The offset in the file where the first byte to be written n
./hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/FileIOEngine.java: * Sync the data to file after writing n
./hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/IOEngine.java: * @param offset The offset in the IO engine where the first byte to be written n
./hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/IOEngine.java: * @param offset the offset in the IO engine where the first byte to be written n
./hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/IOEngine.java: * Sync the data to IOEngine after writing n
./hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java: * Get the IOEngine from the IO engine name nnn * @return the IOEngine n
./hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java: * Only used in test n
./hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/ByteBufferIOEngine.java: * Memory IO engine is always unable to support persistent storage for the cache n
./hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketAllocator.java: * @param realCacheSize cached data size statistics for bucket cache n
./hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/PreviousBlockCompressionRatePredicator.java: * @param uncompressed true if the block should be finished. n
./hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/BlockCache.java: * Get the statistics for this block cache. n
./hbase-server/src/main/java/org/apache/hadoop/hbase/io/Reference.java: * @param splitRow This is row we are splitting around. n
./hbase-server/src/main/java/org/apache/hadoop/hbase/io/Reference.java: * n
./hbase-server/src/main/java/org/apache/hadoop/hbase/io/Reference.java: * n
./hbase-server/src/main/java/org/apache/hadoop/hbase/io/Reference.java: * Read a Reference from FileSystem. nn * @return New Reference made from passed p n
./hbase-server/src/main/java/org/apache/hadoop/hbase/io/Reference.java: * @return This instance serialized as a delimited protobuf w/ a magic pb prefix. n
./hbase-server/src/main/java/org/apache/hadoop/hbase/HealthReport.java: * Gets the status of the region server. n
./hbase-server/src/main/java/org/apache/hadoop/hbase/HealthReport.java: * Gets the health report of the region server. n
./hbase-server/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupInfoManager.java: * @param newName new rsgroup name n
./hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java: * Open all Stores. nn * @return Highest sequenceId found out in a Store. n
./hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java: * @return The HDFS blocks distribution for the given region. n
./hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java: * Do preparation for pending compaction. n
./hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java: * @param majorCompaction True to force a major compaction regardless of thresholds n
./hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java: * @return Next sequence number unassociated with any actual edit. n
./hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java: * provided current timestamp. nn
./hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java: * Check the collection of families for valid timestamps n * @param now current timestamp n
./hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java: * @return True if file was zero-length (and if so, we'll delete it in here). n
./hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java: * @return A locked RowLock. The lock is exclusive and already aqquired. n
./hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java: * @return final path to be used for actual loading n
./hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java: * @param srcPath path of HFile n
./hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java: * @param srcPath path of HFile n
./hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java: * @return new HRegion n
./hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java: * @return new HRegion n
./hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java: * @return new HRegion n
./hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java: * @return new HRegion n
./hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java: * rowsToLock is sorted in order to avoid deadlocks. n
./hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionCoprocessorHost.java: * if there is a problem. nnn
./hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionCoprocessorHost.java: * @param user the user n
./hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionCoprocessorHost.java: * @return Scanner to use (cannot be null!) n
./hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionCoprocessorHost.java: * @param user the user n
./hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionCoprocessorHost.java: * @return Scanner to use (cannot be null!) n
./hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionCoprocessorHost.java: * Invoked before a memstore flush n
./hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionCoprocessorHost.java: * Invoked after a memstore flush n
./hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionCoprocessorHost.java: * Invoked after a memstore flush n
./hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionCoprocessorHost.java: * @return whether more rows are available for the scanner or not n
./hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionCoprocessorHost.java: * @param map Map of CF to List of file paths for the final loaded files n
./hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionCoprocessorHost.java: * null otherwise n
./hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionCoprocessorHost.java: * @return The reader to use n
./hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SegmentFactory.java: * @param comparator comparator n
./hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/snapshot/RegionServerSnapshotManager.java: * @param force forcefully stop all running tasks n
./hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/snapshot/RegionServerSnapshotManager.java: * no regions are responsible for the given snapshot. n
./hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/snapshot/RegionServerSnapshotManager.java: * This attempts to cancel out all pending and in progress tasks (interruptions issues) n
./hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ReversedStoreScanner.java: * @param columns which columns we are scanning n
./hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/InternalScanner.java: * Closes the scanner and releases any resources it has allocated n
./hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Store.java: * replicas to keep up to date with the primary region files. n
./hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileInfo.java: * Size of the Hfile n
./hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileInfo.java: * Sets the region coprocessor env. n
./hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/TimeRangeTracker.java: * NonSyncTimeRangeTracker in timeRangeTrackerBytes. n
./hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Region.java: * remove the store file readers for store files no longer available. n
./hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Region.java: * whether it succeeded or failed. n
./hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Region.java: * @param op The operation is about to be taken on the region n
./hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Region.java: * Closes the region operation lock. n
./hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Region.java: * the try block of {@link #startRegionOperation(Operation)} n
./hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Region.java: * Perform one or more append operations on a row. n * @return result of the operation n
./hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Region.java: * exceptionMessage if any. n
./hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Region.java: * Deletes the specified cells/row. nn
./hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Region.java: * Perform one or more increment operations on a row. n * @return result of the operation n
./hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Region.java: * null n
./hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Region.java: * rowsToLock is sorted in order to avoid deadlocks. n
./hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Region.java: * Puts some data in the table. nn
./hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SecureBulkLoadManager.java: * true if the p is a file n
./hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ChunkCreator.java: * chunks n
./hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java: * Execute a list of mutations. nnn
./hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java: * @param request the request n
./hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java: * @param request the request n
./hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java: * @param request the request n
./hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java: * @param request the request n
./hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java: * @param request the request n
./hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java: * @param request the request n
./hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java: * @param request the request n
./hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java: * @param request the get request n
./hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java: * @param request the multi request n
./hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java: * @param request the scan request n
./hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/CompactionProgress.java: * getter for calculated percent complete n
./hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MultiVersionConcurrencyControl.java: * Step the MVCC forward on to a new read/write basis. n
./hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MiniBatchOperationInProgress.java: * Mutations. nn
./hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MiniBatchOperationInProgress.java: * Sets the walEdit for the operation(Mutation) at the specified position. nn
./hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ReplicationSinkService.java: * n
./hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreFlusher.java: * nn
./hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreFlusher.java: * Register a MemstoreFlushListener n
./hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreFlusher.java: * Sets the global memstore limit to a new size. n
./hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionSplitPolicy.java: * Create the RegionSplitPolicy configured for the given table. nn * @return a RegionSplitPolicy n
./hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ShipperListener.java: * The action that needs to be performed before {@link Shipper#shipped()} is performed n
./hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MutableOnlineRegions.java: * Add to online regions. n
./hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFlushContext.java: * @return whether compaction is required n
./hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFlushContext.java: * @param dropMemstoreSnapshot whether to drop the prepared memstore snapshot n
./hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFlushContext.java: * Abort the snapshot preparation. Drops the snapshot if any. n
./hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileScanner.java: * nn * @return false if not found or if k is after the end. n
./hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionFileSystem.java: * @return true if family contains reference files n
./hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionFileSystem.java: * @return true if region has reference file n
./hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionFileSystem.java: * @return The new {@link Path} of the committed file n
./hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionFileSystem.java: * @return The new {@link Path} of the to be committed file n
./hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionFileSystem.java: * @return The {@link Path} of the committed file n
./hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionFileSystem.java: * @return The destination {@link Path} of the bulk loaded file n
./hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionFileSystem.java: * @param regionInfo daughter {@link RegionInfo} n
./hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionFileSystem.java: * @param mergedRegion {@link RegionInfo} n
./hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionFileSystem.java: * Commit a merged region, making it ready for use. n
./hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionFileSystem.java: * n * @return Content of the file we write out to the filesystem under a region n
./hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionFileSystem.java: * Creates a directory. Assumes the user has already checked for this directory existence. n
./hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionFileSystem.java: * whether the directory exists or not, and returns true if it exists. n
./hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionFileSystem.java: * Renames a directory. Assumes the user has already checked for this directory existence. nn
./hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionFileSystem.java: * * @return true if rename is successful. n
./hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionFileSystem.java: * Deletes a directory. Assumes the user has already checked for this directory existence. n
./hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionFileSystem.java: * * @return true if the directory is deleted. n
./hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionFileSystem.java: * true if it exists. n
./hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/OperationStatus.java: * n
./hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/OperationStatus.java: * n
./hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/OperationStatus.java: * n
./hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java: * register. n
./hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/ColumnTracker.java: * Retrieve the MatchCode for the next row or column n
./hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/ColumnTracker.java: * information to tracker. It is currently implemented for ExplicitColumnTracker. n
./hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/ScanWildcardColumnTracker.java: * We can never know a-priori if we are done, so always return false. n
./hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/ScanQueryMatcher.java: * Set the row when there is change in row n
./hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/ScanDeleteTracker.java: * @param cell - current cell to check if deleted by a previously seen delete n
./hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStoreFile.java: * @param primaryReplica true if this is a store file for primary replica, otherwise false. n
./hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStoreFile.java: * @param evictOnClose whether to evict blocks belonging to this file n
./hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStoreFile.java: * Delete this file n
./hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/KeyValueScanner.java: * @return true if scanner has values left, false if the underlying data is empty n
./hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CellSink.java: * @param cell the cell to be added n
./hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/FavoredNodesForRegion.java: * Used to update the favored nodes mapping when required. nn
./hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/InternalScan.java: * @param scan - original scan object n
./hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ReversedKeyValueHeap.java: * nnn
./hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ReversedKeyValueHeap.java: * Constructor n
./hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreScanner.java: * @param columns which columns we are scanning n
./hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreScanner.java: * @param isParallelSeek true if using parallel seek n
./hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreScanner.java: * Check whether scan as expected order nnnn
./hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreScanner.java: * false if end of scanner n
./hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreScanner.java: * @param kv the KeyValue on which the operation is being requested n
./hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HMobStore.java: * @return The writer for the mob file. n
./hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HMobStore.java: * @return The writer for the mob file. n
./hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HMobStore.java: * @return The writer for the mob file. n
./hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HMobStore.java: * @return The writer for the mob file. n
./hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HMobStore.java: * @param targetPath The directory path where the source file is renamed to. n
./hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HMobStore.java: * @return The cell found in the mob file. n
./hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HMobStore.java: * @return The cell found in the mob file. n
./hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HMobStore.java: * @return The cell found in the mob file. n
./hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HMobStore.java: * @return The found cell. Null if there's no such a cell. n
./hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactingMemStore.java: * write,checkEmpty parameter could be true. nn
./hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/SyncFuture.java: * @param txid the new transaction id n
./hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/ProtobufLogReader.java: * @return true if a valid trailer is present n
./hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AbstractFSWAL.java: * @param filenum to use n
./hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AbstractFSWAL.java: * file-number n
./hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/KeyValueHeap.java: * Constructor. This KeyValueHeap will handle closing of passed in KeyValueScanners. nn
./hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/KeyValueHeap.java: * Constructor. nnn
./hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/KeyValueHeap.java: * Constructor n
./hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/KeyValueHeap.java: * n
./hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/KeyValueHeap.java: * @return true if KeyValues exist at or after specified key, false if not n
./hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ShutdownHook.java: * '-Dhbase.shutdown.hook=false' to test turning off the running of shutdown hooks. nn
./hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/FlushRequester.java: * Register a FlushRequestListener n
./hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/FlushRequester.java: * Sets the global memstore limit to a new size. n
./hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSUtils.java: * @return True if deleted the region directory. n
./hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSUtils.java: * @return whether we're in safe mode n
./hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSUtils.java: * Check whether dfs is in safemode. nn
./hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSUtils.java: * nn
./hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSUtils.java: * folders such as .logs, .oldlogs, .corrupt folders. n
./hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSUtils.java: * @return List of paths to valid region directories in table dir. n
./hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSUtils.java: * @return List of paths to valid family directories in region dir. n
./hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSUtils.java: * @throws IOException When scanning the directory fails. n
./hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSUtils.java: * @throws IOException When scanning the directory fails. n
./hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSUtils.java: * hbase or hdfs. n
./hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSUtils.java: * Check if short circuit read buffer size is set and if not, set it to hbase value. n
./hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSUtils.java: * hdfs. n
./hbase-server/src/main/java/org/apache/hadoop/hbase/util/RegionSplitCalculator.java: * This is a map from start key to regions with the same start key. Invariant: This always have n
./hbase-server/src/main/java/org/apache/hadoop/hbase/util/hbck/HFileCorruptionChecker.java: * Check all files in a column family dir. n * column family directory n
./hbase-server/src/main/java/org/apache/hadoop/hbase/util/hbck/HFileCorruptionChecker.java: * Check all files in a mob column family dir. n * mob column family directory n
./hbase-server/src/main/java/org/apache/hadoop/hbase/util/hbck/HFileCorruptionChecker.java: * @param regionDir The mob region directory n
./hbase-server/src/main/java/org/apache/hadoop/hbase/util/hbck/HFileCorruptionChecker.java: * Check all column families in a region dir. n * region directory n
./hbase-server/src/main/java/org/apache/hadoop/hbase/util/hbck/HFileCorruptionChecker.java: * Check all the regiondirs in the specified tableDir n * path to a table n
./hbase-server/src/main/java/org/apache/hadoop/hbase/util/hbck/HFileCorruptionChecker.java: * Print a human readable summary of hfile quarantining operations. n
./hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java: * families n
./hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java: * n
./hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java: * n
./hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java: * n
./hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java: * Load the list of disabled tables in ZK into local set. nn
./hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java: * for each table and checks if there are missing, repeated or overlapping ones. n
./hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java: * @throws IOException from HBaseFsckRepair functions nn
./hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java: * actual error would have been reported previously. n
./hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java: * Main program nn
./hbase-server/src/main/java/org/apache/hadoop/hbase/util/BloomFilterUtil.java: * Increases the given byte size of a Bloom filter until it can be folded by the given factor. nn
./hbase-server/src/main/java/org/apache/hadoop/hbase/util/BloomFilterChunk.java: * keyCount is exponentially greater than maxKeys. n
./hbase-server/src/main/java/org/apache/hadoop/hbase/util/ModifyRegionUtils.java: * @param task {@link RegionFillTask} custom code to populate region after creation n
./hbase-server/src/main/java/org/apache/hadoop/hbase/util/ModifyRegionUtils.java: * @param task {@link RegionFillTask} custom code to populate region after creation n
./hbase-server/src/main/java/org/apache/hadoop/hbase/util/ModifyRegionUtils.java: * @param task {@link RegionFillTask} custom code to populate region after creation n
./hbase-server/src/main/java/org/apache/hadoop/hbase/util/ModifyRegionUtils.java: * @param task {@link RegionFillTask} custom code to edit the region n
./hbase-server/src/main/java/org/apache/hadoop/hbase/util/BloomContext.java: * Bloom information from the cell is retrieved nn
./hbase-server/src/main/java/org/apache/hadoop/hbase/util/BloomContext.java: * Adds the last bloom key to the HFile Writer as part of StorefileWriter close. nn
./hbase-server/src/main/java/org/apache/hadoop/hbase/util/EncryptionTest.java: * Check that the configured key provider can be loaded and initialized, or throw an exception. nn
./hbase-server/src/main/java/org/apache/hadoop/hbase/util/EncryptionTest.java: * nn
./hbase-server/src/main/java/org/apache/hadoop/hbase/util/MunkresAssignment.java: * modified. n
./hbase-server/src/main/java/org/apache/hadoop/hbase/util/BloomFilterFactory.java: * @return an instance of the correct type of Bloom filter n
./hbase-server/src/main/java/org/apache/hadoop/hbase/util/RegionMover.java: * Path of file where regions will be written to during unloading/read from during loading n
./hbase-server/src/main/java/org/apache/hadoop/hbase/util/RollingStatCalculator.java: * Creates a RollingStatCalculator with given number of rolling periods. n
./hbase-server/src/main/java/org/apache/hadoop/hbase/util/RollingStatCalculator.java: * Inserts given data value to array of data values to be considered for statistics calculation n
./hbase-server/src/main/java/org/apache/hadoop/hbase/util/RollingStatCalculator.java: * Update the statistics after removing the given data value n
./hbase-server/src/main/java/org/apache/hadoop/hbase/util/JVMClusterUtil.java: * nn
./hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsckRepair.java: * that info is in META. nnnn
./hbase-server/src/main/java/org/apache/hadoop/hbase/monitoring/ThreadMonitoring.java: * Print all of the thread's information and stack traces. nnn
./hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/ProcedureCoordinatorRpcs.java: * Initialize and start threads necessary to connect an implementation's rpc mechanisms. n
./hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/ZKProcedureUtil.java: * @param root name of the root directory in zk to print n
./hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/Procedure.java: * the {@link Subprocedure#acquireBarrier} step. n
./hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/Procedure.java: * {@link Subprocedure#insideBarrier} method. n
./hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/Procedure.java: * Call back triggered by an individual member upon successful local barrier acquisition n
./hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/Procedure.java: * release nn
./hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/Procedure.java: * is thrown the procedure may or not have run cleanup to trigger the completion latch yet. nn
./hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/Procedure.java: * @return data returned from procedure members upon successfully completing subprocedure. nn
./hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/Procedure.java: * Check if the entire procedure has globally completed, or has been aborted. n
./hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/ProcedureCoordinator.java: * Shutdown the thread pools and release rpc resources n
./hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/Subprocedure.java: * but can still be used to execute a procedure on all members and to propagate any exceptions. n
./hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/Subprocedure.java: * @return the data the subprocedure wants to return to coordinator side. n
./hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/Subprocedure.java: * created. n
./hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/Subprocedure.java: * Method to cancel the Subprocedure by injecting an exception from and external source. n
./hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/Subprocedure.java: * Wait for the reached global barrier notification. Package visibility for testing nn
./hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/Subprocedure.java: * Waits until the entire procedure has globally completed, or has been aborted. nn
./hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/ProcedureMember.java: * sent here) nnn
./hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/ProcedureMember.java: * @return true if successfully, false if bailed due to timeout. n
./hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/RegionServerProcedureManager.java: * @param rss Region Server service interface n
./hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/RegionServerProcedureManager.java: * @param force forcefully stop all running tasks n
./hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/flush/RegionServerFlushTableProcedureManager.java: * @param force forcefully stop all running tasks n
./hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/flush/RegionServerFlushTableProcedureManager.java: * regions. Empty list is returned if no regions. n
./hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/flush/RegionServerFlushTableProcedureManager.java: * @return true on success, false otherwise n
./hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/flush/RegionServerFlushTableProcedureManager.java: * tasks itself. An ongoing HRegion.flush() should not be interrupted (see HBASE-13877). n
./hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/MasterProcedureManager.java: * @param master Master service interface nnn
./hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/MasterProcedureManager.java: * @param desc Procedure description n
./hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/MasterProcedureManager.java: * @return data returned from the procedure execution, null if no data n
./hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/MasterProcedureManager.java: * @return true if the specified procedure is finished successfully n
./hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/DumpReplicationQueues.java: * Main nn
./hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsSink.java: * Convience method to change metrics when a batch of operations are applied. n
./hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsSink.java: * Get the count of the failed bathes n
./hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsSink.java: * Get the Age of Last Applied Op n
./hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsSink.java: * Gets the time stamp from when the Sink was initialized. n
./hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsSink.java: * Gets the total number of OPs delivered to this sink. n
./hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSink.java: * Get replication Sink Metrics n
./hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsSource.java: * @param walGroup which group we are getting n
./hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsSource.java: * Get AgeOfLastShippedOp n
./hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsSource.java: * Get the sizeOfLogQueue n
./hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsSource.java: * Get the value of uncleanlyClosedWAL counter n
./hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsSource.java: * Get the timestampsOfLastShippedOp, if there are multiple groups, return the latest one n
./hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsSource.java: * Get the slave peer ID n
./hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsSource.java: * Returns the amount of memory in bytes used in this RegionServer by edits pending replication. n
./hbase-server/src/main/java/org/apache/hadoop/hbase/replication/HBaseReplicationEndpoint.java: * A private method used to re-establish a zookeeper session with a peer cluster. n
./hbase-server/src/main/java/org/apache/hadoop/hbase/RegionStateListener.java: * @param hri An instance of RegionInfo n
./hbase-server/src/main/java/org/apache/hadoop/hbase/RegionStateListener.java: * Process region merge event. n
./hbase-server/src/main/java/org/apache/hadoop/hbase/HBaseServerBase.java: * get NamedQueue Provider to add different logs to ringbuffer n
./hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/SimpleRpcServerResponder.java: * @return true if we proceed the call fully, false otherwise. n
./hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/SimpleRpcServerResponder.java: * is still some work to do. In this case, we expect the caller to delay us. n
./hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcServer.java: * n
./hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcCall.java: * (connection, etc) to send. n
./hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcServerInterface.java: * Refresh authentication manager policy. n
./hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcCallContext.java: * optional one for any Rpc call. n
./hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/ServerRpcConnection.java: * Set up cell block codecs n
./hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/SimpleServerRpcConnection.java: * @return Returns -1 if failure (and caller will close connection), else zero or more. nn
./hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcSchedulerContext.java: * n
./hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WAL.java: * Gets the edit n
./hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WAL.java: * Gets the key n
./hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALKeyImpl.java: * @param clusterIds the clusters that have consumed the change(used in Replication) nn
./hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALKeyImpl.java: * HRegionInfo#getEncodedNameAsBytes(). nnnn
./hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityLabelService.java: * @return the modified visibility expression in the form of byte[] n
./hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityUtils.java: * details n
./hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityUtils.java: * Reads back User auth data written to zookeeper. n * @return User auth details n
./hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/ZKPermissionWatcher.java: * Write a table's access controls to the permissions mirror in zookeeper nn
./hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/ZKPermissionWatcher.java: * Delete the acl notify node of table n
./hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AuthManager.java: * Last modification logical time n
./hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessController.java: * @return false if cell ACLs failed to grant access, true otherwise n
./hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessController.java: * Create the ACL table n
./hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessController.java: * @param ctx the context n
./hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessController.java: * @param ctx the context n
./hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessChecker.java: * @param user User name n
./hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestOpenTableInCoprocessor.java: * @return number of keyvalues over all rows in the table n
./hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestCoreRegionServerCoprocessor.java: * a RegionServerServices instance. Assert the opposite too. Do it to RegionServerCoprocessors. n
./hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestRegionObserverInterface.java: * Tests overriding compaction handling via coprocessor hooks n
./hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestCoreMasterCoprocessor.java: * a MasterServices instance. Assert the opposite too. Do it to MasterCoprocessors. n
./hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestRegionObserverBypass.java: * do a single put that is bypassed by a RegionObserver n
./hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestRegionObserverBypass.java: * Test various multiput operations. If the column family is 'test', then bypass is invoked. n
./hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestCoreRegionCoprocessor.java: * a RegionServerServices instance. Assert the opposite too. Do it to RegionCoprocessors. n
./hbase-server/src/test/java/org/apache/hadoop/hbase/master/balancer/TestStochasticBalancerJmxMetrics.java: * Read the attributes from Hadoop->HBase->Master->Balancer in JMX n
./hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterTransitions.java: * Start up a mini cluster and put a small table of many empty regions into it. n
./hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterTransitions.java: * @return Count of regions in meta table. n
./hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterTransitions.java: * 'aaa' for first region). Actual value is the row name. nnn
./hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterFailoverBalancerPersistence.java: * next master takes over n
./hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterFailoverBalancerPersistence.java: * Kill the master and wait for a new active master to show up n * @return the new active master n
./hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestActiveMasterManager.java: * directly on ZK. n
./hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureTestingUtility.java: * @return false if test should fail otherwise true n
./hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestTableDescriptorModificationFromClient.java: * Start up a mini cluster and put a small table of empty regions into it. n
./hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestRegionPlacement.java: * the number regions on the primary region server are expected nnnnn
./hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestRegionPlacement.java: * @param expectedNum the expected number of assigned regions n
./hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestRegionPlacement.java: * Verify all the online region servers has been updated to the latest assignment plan nn
./hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestRegionPlacement.java: * @return the number of regions are assigned to the primary region server n
./hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestRegionPlacement.java: * @param regionNum number of regions to create n
./hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockRegionServer.java: * Use this method to set what a scanner will reply as we next through nn
./hbase-server/src/test/java/org/apache/hadoop/hbase/master/janitor/TestCatalogJanitor.java: * Test that we correctly archive all the storefiles when a region is deleted n
./hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdmin2.java: * Test read only tables n
./hbase-server/src/test/java/org/apache/hadoop/hbase/client/locking/TestEntityLocks.java: * Test basic lock function - requestLock, await, unlock. n
./hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMetaCache.java: * throw NotSevingRegionException which clears the meta cache. n
./hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSnapshotFromClient.java: * n
./hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSnapshotFromClient.java: * Test snapshotting a table that is offline n
./hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSnapshotMetadata.java: * the restored table's original metadata n
./hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide3.java: * scenario. n
./hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestResult.java: * n
./hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestResult.java: * Calls non-functional test methods. n
./hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestTimestampsFilter.java: * timestamps/version. n
./hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestTimestampsFilter.java: * Test TimestampsFilter in the presence of version deletes. n
./hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSideScanExcpetion.java: * ClientScanner does not get an exception and also sees all the data. nn
./hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestScannerTimeout.java: * 2772, it reused the same scanner id. n
./hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestScannerTimeout.java: * 3686, it would skip rows in the scan. n
./hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestScannerTimeout.java: * and the scan times out on the server but not the client. n
./hbase-server/src/test/java/org/apache/hadoop/hbase/HFilePerformanceEvaluation.java: * disable codec or cipher or both. n
./hbase-server/src/test/java/org/apache/hadoop/hbase/HFilePerformanceEvaluation.java: * @param cipher "none", "aes" n
./hbase-server/src/test/java/org/apache/hadoop/hbase/HFilePerformanceEvaluation.java: * @return elapsed time. n
./hbase-server/src/test/java/org/apache/hadoop/hbase/HFilePerformanceEvaluation.java: * nnn
./hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtil.java: * @return Fully qualified path for the default hbase root dir n
./hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtil.java: * @return Fully qualified path for the default hbase root dir n
./hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtil.java: * @return Fully qualified path to hbase root dir n
./hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtil.java: * @return Fully qualified path to hbase root dir n
./hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtil.java: * @return Fully qualified path to hbase root dir n
./hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtil.java: * Flushes all caches in the mini hbase cluster n
./hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtil.java: * Flushes all caches in the mini hbase cluster n
./hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtil.java: * Compact all regions in the mini hbase cluster n
./hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtil.java: * Compact all of a table's reagion in the mini hbase cluster n
./hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtil.java: * Create a table. nn * @return A Table instance for the created table. n
./hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtil.java: * Create a table. nn * @return A Table instance for the created table. n
./hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtil.java: * Create a table. nn * @return A Table instance for the created table. n
./hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtil.java: * Create a table with multiple regions. nnn * @return A Table instance for the created table. n
./hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtil.java: * Create a table. nn * @return A Table instance for the created table. n
./hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtil.java: * Create a table with multiple regions. nn * @return A Table instance for the created table. n
./hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtil.java: * Table instance for the created table. n
./hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtil.java: * Create a table. nnn * @return A Table instance for the created table. n
./hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtil.java: * @return A Table instance for the created table. n
./hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtil.java: * @return created hregion n
./hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtil.java: * {@link HBaseTestingUtil#closeRegionAndWAL(HRegion)} when done. n
./hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtil.java: * @return HTable to that new table n
./hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtil.java: * @return Count of rows loaded. n
./hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtil.java: * @return Count of rows loaded. n
./hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtil.java: * @return Count of rows loaded. n
./hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtil.java: * @return Count of rows loaded. n
./hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtil.java: * @return Count of rows loaded. n
./hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtil.java: * @return Count of rows loaded. n
./hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtil.java: * @param table The table to find the region. n
./hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtil.java: * Wait until no regions in transition. (time limit 15min) n
./hbase-server/src/test/java/org/apache/hadoop/hbase/mob/MobTestUtil.java: * Writes HStoreKey and ImmutableBytes data to passed writer and then closes it. n * n
./hbase-server/src/test/java/org/apache/hadoop/hbase/MetaMockingUtil.java: * @return A mocked up Result that fakes a Get on a row in the hbase:meta table. n
./hbase-server/src/test/java/org/apache/hadoop/hbase/MetaMockingUtil.java: * @return A mocked up Result that fakes a Get on a row in the hbase:meta table. n
./hbase-server/src/test/java/org/apache/hadoop/hbase/MetaMockingUtil.java: * @return A mocked up Result that fakes a Get on a row in the hbase:meta table. n
./hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFile.java: * Test empty HFile. Test all features work reasonably when hfile is empty of entries. n
./hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFile.java: * test none codecs n
./hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileDataBlockEncoder.java: * Test encoding. n
./hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileDataBlockEncoder.java: * does not use the getXXXArray() API n
./hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/NanoTimer.java: * Simple tester. n
./hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileBlockIndex.java: * to check if looks good when midKey on a leaf index block boundary n
./hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileBlockIndex.java: * n
./hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/bucket/TestBucketCacheRefCnt.java: * n
./hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/bucket/TestBucketWriterThread.java: * Test non-error case just works. nnn
./hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/bucket/TestBucketWriterThread.java: * the WriterThread. n
./hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/bucket/TestBucketWriterThread.java: * back and process it. nn
./hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/bucket/TestBucketWriterThread.java: * Do Cache full exception nn
./hbase-server/src/test/java/org/apache/hadoop/hbase/io/TestHeapSize.java: * for example BlockIndex which is tested in TestHFile since it is a non public class n
./hbase-server/src/test/java/org/apache/hadoop/hbase/io/TestHalfStoreFileReader.java: * top of the file while we are at it. n
./hbase-server/src/test/java/org/apache/hadoop/hbase/io/encoding/TestDataBlockEncoders.java: * Test whether the decompression of first key is implemented correctly. n
./hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerOnlineConfigChange.java: * Test that the configurations in the CompactionConfiguration class change properly. n
./hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/DataBlockEncodingTool.java: * @param length Length of data in buffer. n
./hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/DataBlockEncodingTool.java: * Display statistics of different compression algorithms. n
./hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactionState.java: * @param stateSource get the state by Admin or Master nn
./hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactingMemStore.java: * A simple test which verifies the 3 possible states when scanning across snapshot. nn
./hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactingMemStore.java: * Test memstore snapshots n
./hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactingMemStore.java: * Test getNextRow from memstore n
./hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactingMemStore.java: * keyvalues are deleted from the memstore. n
./hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestScanner.java: * @return Count of rows found. n
./hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestDateTieredCompactionPolicy.java: * boundary condition in which case binary sort gives insertion point as length of the array n
./hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestDefaultMemStore.java: * Test memstore snapshot happening while scanning. n
./hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestDefaultMemStore.java: * A simple test which verifies the 3 possible states when scanning across snapshot. nn
./hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestDefaultMemStore.java: * Test memstore snapshots n
./hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestDefaultMemStore.java: * Test getNextRow from memstore n
./hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestDefaultMemStore.java: * keyvalues are deleted from the memstore. n
./hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestDefaultMemStore.java: * memstore. n
./hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestDefaultMemStore.java: * shouldFlush returns false. n
./hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestDefaultMemStore.java: * @return How many rows we added. n
./hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestDefaultMemStore.java: * @return How many rows we added. n
./hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHMobStore.java: * Setup n
./hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHMobStore.java: * Getting data from memstore n
./hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHMobStore.java: * Getting MOB data from files n
./hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHMobStore.java: * Getting the reference data from files n
./hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHMobStore.java: * Getting data from memstore and files n
./hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHMobStore.java: * Getting data from memstore and files n
./hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHStoreFile.java: * Writes HStoreKey and ImmutableBytes data to passed writer and then closes it. nn
./hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionReplayEvents.java: * WAL. n
./hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionReplayEvents.java: * does not execute flush after n
./hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java: * Test that I can use the max flushed sequence id after the close. n
./hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java: * conditions except abort check so we ensure 2 flushes for region close." n
./hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java: * skipped because flush does not do anything if memstoreSize is not larger than 0." n
./hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java: * This method tests https://issues.apache.org/jira/browse/HBASE-2516. n
./hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java: * Short.MAX_VALUE. See HBASE-13329. n
./hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java: * during initialization n
./hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java: * Test case to check increment function with memstore flushing n
./hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java: * Test case to check append function with memstore flushing n
./hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java: * Test case to check put function with memstore flushing for same row, same ts n
./hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java: * Assert first value in the passed region is firstValue. n * n * n * n
./hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreScanner.java: * Test utility for building a NavigableSet for scanners. nn
./hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreScanner.java: * this case it doesnt. TODO this comment makes no sense above. Appears to do the right thing. n
./hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSplitTransactionOnCluster.java: * org.apache.hadoop.hbase.ZooKeeperConnectionException n
./hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/querymatcher/TestUserScanQueryMatcher.java: * explicitly specified column qualifiers. n
./hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/querymatcher/TestUserScanQueryMatcher.java: * wildcard-inferred column qualifiers. n
./hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMajorCompaction.java: * Run compaction and flushing memstore Assert deletes get cleaned up. n
./hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMajorCompaction.java: * basically works. n
./hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRowTooBig.java: * Get whole row. OOME happened in StoreScanner.next(..). n
./hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRequestsPerSecondMetric.java: * which result into negative value. nn
./hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionReplicaFailover.java: * (enable/disable table, etc) makes the region replicas readable. n
./hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/CreateRandomStoreFile.java: * @return true in case of success n
./hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestLogRolling.java: * Test that WAL is rolled when all data nodes in the pipeline have been restarted. n
./hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/AbstractTestProtobufLog.java: * Reads the WAL with and without WALTrailer. n
./hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/AbstractTestProtobufLog.java: * returns false from ProtoBufLogReader. n
./hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/AbstractTestFSWAL.java: * exception if we do). Comparison is based on the timestamp present in the wal name. n
./hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/AbstractTestFSWAL.java: * flush on rolling the wal. n
./hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/AbstractTestWALReplay.java: * n
./hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestLogRollingNoCluster.java: * and trigger NPE. nn
./hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRSKilledWhenInitializing.java: * down after registering with master. Test will TIMEOUT if an error!!!! n
./hbase-server/src/test/java/org/apache/hadoop/hbase/util/ProcessBasedLocalHBaseCluster.java: * {@link #startHBase()}. n
./hbase-server/src/test/java/org/apache/hadoop/hbase/util/hbck/HbckTestingUtil.java: * @param table table constraint n
./hbase-server/src/test/java/org/apache/hadoop/hbase/util/BaseTestHBaseFsck.java: * after test nnn
./hbase-server/src/test/java/org/apache/hadoop/hbase/util/BaseTestHBaseFsck.java: * closed after test n
./hbase-server/src/test/java/org/apache/hadoop/hbase/util/BaseTestHBaseFsck.java: * @param tablename The name of a table to be created. n
./hbase-server/src/test/java/org/apache/hadoop/hbase/util/BaseTestHBaseFsck.java: * legitimate hfile and return it. nn * @return Path of a flushed hfile. n
./hbase-server/src/test/java/org/apache/hadoop/hbase/util/BaseTestHBaseFsck.java: * @return Path of a flushed hfile. n
./hbase-server/src/test/java/org/apache/hadoop/hbase/util/HFileArchiveTestingUtil.java: * @param fs filessystem on which the archiving took place n
./hbase-server/src/test/java/org/apache/hadoop/hbase/util/HFileArchiveTestingUtil.java: * copy of the files in the archive directory (and the original files). n
./hbase-server/src/test/java/org/apache/hadoop/hbase/util/test/LoadTestDataGenerator.java: * Mutation n
./hbase-server/src/test/java/org/apache/hadoop/hbase/util/test/LoadTestDataGenerator.java: * n
./hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestFSUtils.java: * preading with hedged reads enabled using code taken from hdfs TestPread. n
./hbase-server/src/test/java/org/apache/hadoop/hbase/tool/TestBulkLoadHFilesSplitRecovery.java: * and splitkeys if the table does not already exist. nnn
./hbase-server/src/test/java/org/apache/hadoop/hbase/tool/TestBulkLoadHFilesSplitRecovery.java: * n
./hbase-server/src/test/java/org/apache/hadoop/hbase/TestRegionRebalancing.java: * if the assignment or regions is pretty balanced. nn
./hbase-server/src/test/java/org/apache/hadoop/hbase/TestPartialResultsFromClientSide.java: * combining partial results into complete results n
./hbase-server/src/test/java/org/apache/hadoop/hbase/TestPartialResultsFromClientSide.java: * Ensure that we only see Results marked as partial when the allowPartial flag is set n
./hbase-server/src/test/java/org/apache/hadoop/hbase/TestPartialResultsFromClientSide.java: * results into complete results. A variety of scan configurations can be tested n
./hbase-server/src/test/java/org/apache/hadoop/hbase/TestPartialResultsFromClientSide.java: * Order of cells in partial results matches the ordering of cells from complete results n
./hbase-server/src/test/java/org/apache/hadoop/hbase/TestPartialResultsFromClientSide.java: * to next on the scanner. Test a variety of different sizes for correctness n
./hbase-server/src/test/java/org/apache/hadoop/hbase/TestPartialResultsFromClientSide.java: * calculation n
./hbase-server/src/test/java/org/apache/hadoop/hbase/TestPartialResultsFromClientSide.java: * want the server to return exactly numberOfCells cells n
./hbase-server/src/test/java/org/apache/hadoop/hbase/TestPartialResultsFromClientSide.java: * Test the method {@link Result#createCompleteResult(Iterable)} n
./hbase-server/src/test/java/org/apache/hadoop/hbase/TestPartialResultsFromClientSide.java: * be seen. n
./hbase-server/src/test/java/org/apache/hadoop/hbase/TestPartialResultsFromClientSide.java: * Verifies that result contains all the key values within expKvList. Fails the test otherwise nnn
./hbase-server/src/test/java/org/apache/hadoop/hbase/TestPartialResultsFromClientSide.java: * Compares two results and fails the test if the results are different nnn
./hbase-server/src/test/java/org/apache/hadoop/hbase/TestPartialResultsFromClientSide.java: * result sizes (all of which are less than the size necessary to fetch an entire row) n
./hbase-server/src/test/java/org/apache/hadoop/hbase/TimestampTestBase.java: * href="https://issues.apache.org/jira/browse/HADOOP-1784">hadoop-1784. nnn
./hbase-server/src/test/java/org/apache/hadoop/hbase/TimestampTestBase.java: * timestamp. nnn
./hbase-server/src/test/java/org/apache/hadoop/hbase/TimestampTestBase.java: * Run test scanning different timestamps. nnn
./hbase-server/src/test/java/org/apache/hadoop/hbase/TimestampTestBase.java: * Assert that the scan returns only values < timestamp. nn * @return Count of items scanned. n
./hbase-server/src/test/java/org/apache/hadoop/hbase/TimestampTestBase.java: * Put values. nnnn
./hbase-server/src/test/java/org/apache/hadoop/hbase/procedure/SimpleRSProcedureManager.java: * @return true on success, false otherwise n
./hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestWALEntrySinkFilter.java: * our counting Table. n
./hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicationSink.java: * Insert a whole batch of entries n
./hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicationSink.java: * Insert a mix of puts and deletes n
./hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicationSink.java: * Insert to 2 different tables n
./hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicationSink.java: * Insert then do different types of deletes n
./hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicationSink.java: * that creates it. n
./hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationDisableInactivePeer.java: * and check the updates are replicated. n
./hbase-server/src/test/java/org/apache/hadoop/hbase/protobuf/TestReplicationProtobuf.java: * Little test to check we can basically convert list of a list of KVs into a CellScanner n
./hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestSingleColumnValueExcludeFilter.java: * Test the overridden functionality of filterCell(Cell) n
./hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestFilterList.java: * Test "must pass one" n
./hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestFilterList.java: * Test "must pass all" n
./hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestFilterList.java: * Test list ordering n
./hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestFilterList.java: * same result as the 'prefix' only result. n
./hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestFilterList.java: * result as the inclusive stop result. n
./hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestFilterList.java: * Test serialization n
./hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestFilterList.java: * Test filterCell logic. n
./hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestDependentColumnFilter.java: * but the scan total nnnn
./hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestDependentColumnFilter.java: * Test that the filter correctly drops rows without a corresponding timestamp n
./hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestSingleColumnValueFilter.java: * Tests identification of the stop row n
./hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestSingleColumnValueFilter.java: * Tests serialization n
./hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestFilter.java: * {@link Filter#filterRow()} method. See HBASE-2258. n
./hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestFilter.java: * 0.96+ code base. See HBASE-10366 n
./hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestFilter.java: * {@link Filter#filterRowKey(Cell)} method. See HBASE-2258. n
./hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestFilter.java: * {@link Filter#filterCell(Cell)} method. See HBASE-2258. n
./hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestPageFilter.java: * test page size filter n
./hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestPageFilter.java: * Test filter serialization n
./hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestRandomRowFilter.java: * Tests basics n
./hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestRandomRowFilter.java: * Tests serialization n
./hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestInclusiveStopFilter.java: * Tests identification of the stop row n
./hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestInclusiveStopFilter.java: * Tests serialization n
./hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestMultiRowRangeFilter.java: * n
./hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestMultiRowRangeFilter.java: * n
./hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestFilterListOrOperatorWithBlkCnt.java: * n
./hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestFilterListOrOperatorWithBlkCnt.java: * n
./hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestColumnPaginationFilter.java: * mainly for testing serialization nn
./hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestColumnPaginationFilter.java: * Tests serialization n
./hbase-server/src/test/java/org/apache/hadoop/hbase/wal/WALPerformanceEvaluation.java: * @param wals may not be null n * @return Count of edits. n
./hbase-server/src/test/java/org/apache/hadoop/hbase/wal/WALPerformanceEvaluation.java: * System.exit. nnn
./hbase-server/src/test/java/org/apache/hadoop/hbase/wal/TestWALOpenAfterDNRollingStart.java: * never be open n
./hbase-server/src/test/java/org/apache/hadoop/hbase/wal/IOTestProvider.java: * with {@link AbstractFSWALProvider#WAL_FILE_NAME_DELIMITER} n
./hbase-server/src/test/java/org/apache/hadoop/hbase/wal/TestWALFactory.java: * Just write multiple logs then split. Before fix for HADOOP-2283, this would fail. n
./hbase-server/src/test/java/org/apache/hadoop/hbase/wal/TestWALFactory.java: * Test new HDFS-265 sync. n
./hbase-server/src/test/java/org/apache/hadoop/hbase/wal/TestWALFactory.java: * Test that we can visit entries before they are appended n
./hbase-server/src/test/java/org/apache/hadoop/hbase/wal/TestFSHLogProvider.java: * used by TestDefaultWALProviderWithHLogKey n
./hbase-server/src/test/java/org/apache/hadoop/hbase/wal/TestFSHLogProvider.java: * helper method to simulate region flush for a WAL. nn
./hbase-server/src/test/java/org/apache/hadoop/hbase/wal/TestFSHLogProvider.java: * n
./hbase-server/src/test/java/org/apache/hadoop/hbase/wal/TestFSHLogProvider.java: * Write to a log file with three concurrent threads and verifying all data is written. n
./hbase-server/src/test/java/org/apache/hadoop/hbase/wal/TestWALMethods.java: * moved-aside files. n
./hbase-server/src/test/java/org/apache/hadoop/hbase/security/TestUsersOperationsWithSecureHadoop.java: * hbase.regionserver.keytab.file n
./hbase-server/src/test/java/org/apache/hadoop/hbase/security/AbstractTestSecureIPC.java: * Test various combinations of Server and Client configuration for Crypto AES. n
./hbase-server/src/test/java/org/apache/hadoop/hbase/TestSerialization.java: * Test RegionInfo serialization n
./hbase-server/src/test/java/org/apache/hadoop/hbase/TestGlobalMemStoreSize.java: * size n
./hbase-server/src/test/java/org/apache/hadoop/hbase/TestGlobalMemStoreSize.java: * Flush and log stats on flush nnn
./hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupManager.java: * Get configuration n
./hbase-it/src/test/java/org/apache/hadoop/hbase/mapreduce/IntegrationTestBulkLoad.java: * After adding data to the table start a mr job to nnn
./hbase-it/src/test/java/org/apache/hadoop/hbase/test/IntegrationTestBigLinkedList.java: * @return StringBuilder filled with references if any. n
./hbase-it/src/test/java/org/apache/hadoop/hbase/test/IntegrationTestReplication.java: * the {@link org.apache.hadoop.hbase.client.Admin} connection. n
./hbase-it/src/test/java/org/apache/hadoop/hbase/test/IntegrationTestReplication.java: * source cluster. This assumes that the tables have been setup via setupTablesAndReplication. n
./hbase-it/src/test/java/org/apache/hadoop/hbase/test/IntegrationTestReplication.java: * @param expectedNumNodes the number of nodes we are expecting to see in the sink cluster n
./hbase-it/src/test/java/org/apache/hadoop/hbase/DistributedHBaseCluster.java: * Returns a ClusterStatus for this HBase cluster n
./hbase-common/src/main/java/org/apache/hadoop/hbase/nio/ByteBuff.java: * Fetches the byte at the given index. Does not change position of the underlying ByteBuffers n
./hbase-common/src/main/java/org/apache/hadoop/hbase/nio/MultiByteBuff.java: * Fetches the byte at the given index. Does not change position of the underlying ByteBuffers n
./hbase-common/src/main/java/org/apache/hadoop/hbase/nio/MultiByteBuff.java: * Fetches the int at the given index. Does not change position of the underlying ByteBuffers n
./hbase-common/src/main/java/org/apache/hadoop/hbase/nio/MultiByteBuff.java: * Fetches the short at the given index. Does not change position of the underlying ByteBuffers n
./hbase-common/src/main/java/org/apache/hadoop/hbase/nio/MultiByteBuff.java: * Fetches the long at the given index. Does not change position of the underlying ByteBuffers n
./hbase-common/src/main/java/org/apache/hadoop/hbase/nio/MultiByteBuff.java: * advances the position of the MBB by the length of the byte[]. n
./hbase-common/src/main/java/org/apache/hadoop/hbase/nio/MultiByteBuff.java: * Jumps the current position of this MBB by specified length. n
./hbase-common/src/main/java/org/apache/hadoop/hbase/nio/MultiByteBuff.java: * Jumps back the current position of this MBB by specified length. n
./hbase-common/src/main/java/org/apache/hadoop/hbase/io/crypto/Cipher.java: * @return the encrypting wrapper n
./hbase-common/src/main/java/org/apache/hadoop/hbase/io/crypto/Cipher.java: * @return the encrypting wrapper n
./hbase-common/src/main/java/org/apache/hadoop/hbase/io/crypto/Cipher.java: * @return the decrypting wrapper n
./hbase-common/src/main/java/org/apache/hadoop/hbase/io/crypto/Cipher.java: * @return the decrypting wrapper n
./hbase-common/src/main/java/org/apache/hadoop/hbase/io/crypto/Encryptor.java: * Set the secret key n
./hbase-common/src/main/java/org/apache/hadoop/hbase/io/crypto/Encryptor.java: * Set the initialization vector n
./hbase-common/src/main/java/org/apache/hadoop/hbase/io/crypto/Encryptor.java: * Create a stream for encryption n
./hbase-common/src/main/java/org/apache/hadoop/hbase/io/crypto/Decryptor.java: * Set the secret key n
./hbase-common/src/main/java/org/apache/hadoop/hbase/io/crypto/Decryptor.java: * Set the initialization vector n
./hbase-common/src/main/java/org/apache/hadoop/hbase/io/crypto/Decryptor.java: * Create a stream for decryption n
./hbase-common/src/main/java/org/apache/hadoop/hbase/io/crypto/KeyProvider.java: * Initialize the key provider n
./hbase-common/src/main/java/org/apache/hadoop/hbase/io/crypto/Encryption.java: * @param src plaintext nnnn
./hbase-common/src/main/java/org/apache/hadoop/hbase/io/crypto/Encryption.java: * @param src plaintext nnnnn
./hbase-common/src/main/java/org/apache/hadoop/hbase/io/crypto/Encryption.java: * @param in plaintext nn
./hbase-common/src/main/java/org/apache/hadoop/hbase/io/crypto/Encryption.java: * @param in plaintet nnn
./hbase-common/src/main/java/org/apache/hadoop/hbase/io/crypto/Encryption.java: * nnnnnn
./hbase-common/src/main/java/org/apache/hadoop/hbase/io/crypto/Encryption.java: * Decrypt a block of ciphertext from a stream given a context and IV nnnnnnn
./hbase-common/src/main/java/org/apache/hadoop/hbase/io/crypto/Encryption.java: * Decrypt a stream of ciphertext given a decryptor nnnnn
./hbase-common/src/main/java/org/apache/hadoop/hbase/io/crypto/Encryption.java: * Decrypt a stream of ciphertext given a context and IV nnnnnn
./hbase-common/src/main/java/org/apache/hadoop/hbase/io/crypto/Encryption.java: * @param iv the initialization vector, can be null n
./hbase-common/src/main/java/org/apache/hadoop/hbase/io/crypto/Encryption.java: * @param iv the initialization vector, can be null n
./hbase-common/src/main/java/org/apache/hadoop/hbase/io/util/Dictionary.java: * @param dict the dictionary whose contents are to written n
./hbase-common/src/main/java/org/apache/hadoop/hbase/io/util/Dictionary.java: * @param dict the dictionary whose contents are to written n
./hbase-common/src/main/java/org/apache/hadoop/hbase/io/CellOutputStream.java: * @param cell Cell to write out n
./hbase-common/src/main/java/org/apache/hadoop/hbase/io/CellOutputStream.java: * sent over the network. n
./hbase-common/src/main/java/org/apache/hadoop/hbase/io/TagCompressionContext.java: * @param length Length of all tag bytes n
./hbase-common/src/main/java/org/apache/hadoop/hbase/io/TagCompressionContext.java: * @param length Length of all tag bytes n
./hbase-common/src/main/java/org/apache/hadoop/hbase/io/TagCompressionContext.java: * @param length Length of all tag bytes n
./hbase-common/src/main/java/org/apache/hadoop/hbase/io/TagCompressionContext.java: * @return bytes count read from source to uncompress all tags. n
./hbase-common/src/main/java/org/apache/hadoop/hbase/io/ByteBufferOutputStream.java: * This flips the underlying BB so be sure to use it _last_! n
./hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/IndexBlockEncoding.java: * @param offset starting offset of the output array n
./hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/DataBlockEncoding.java: * @param offset starting offset of the output array n
./hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/EncodedDataBlock.java: * @return Size of compressed data in bytes. n
./hbase-common/src/main/java/org/apache/hadoop/hbase/util/Bytes.java: * @return a copy of the given designated byte array nn
./hbase-common/src/main/java/org/apache/hadoop/hbase/util/Bytes.java: * Fill given array with zeros at the specified position. nnn
./hbase-common/src/main/java/org/apache/hadoop/hbase/util/Bytes.java: * of 2 n
./hbase-common/src/main/java/org/apache/hadoop/hbase/util/PairOfSameType.java: * Return the first element stored in the pair. n
./hbase-common/src/main/java/org/apache/hadoop/hbase/util/PairOfSameType.java: * Return the second element stored in the pair. n
./hbase-common/src/main/java/org/apache/hadoop/hbase/util/ChecksumType.java: * Cannot rely on enum ordinals . They change if item is removed or moved. Do our own codes. n
./hbase-common/src/main/java/org/apache/hadoop/hbase/util/TimeMeasurable.java: * Measure elapsed time. n
./hbase-common/src/main/java/org/apache/hadoop/hbase/util/KeyLocker.java: * Return a lock for the given key. The lock is already locked. n
./hbase-common/src/main/java/org/apache/hadoop/hbase/util/Pair.java: * Return the first element stored in the pair. n
./hbase-common/src/main/java/org/apache/hadoop/hbase/util/Pair.java: * Return the second element stored in the pair. n
./hbase-common/src/main/java/org/apache/hadoop/hbase/util/ByteRange.java: * n
./hbase-common/src/main/java/org/apache/hadoop/hbase/util/ByteRange.java: * @param capacity the size of a new byte[]. n
./hbase-common/src/main/java/org/apache/hadoop/hbase/util/ByteRange.java: * @param bytes the array to wrap. n
./hbase-common/src/main/java/org/apache/hadoop/hbase/util/ByteRange.java: * @param val the value to store n
./hbase-common/src/main/java/org/apache/hadoop/hbase/util/ByteRange.java: * @param val the value to store n
./hbase-common/src/main/java/org/apache/hadoop/hbase/util/ByteRange.java: * @param val the value to store n
./hbase-common/src/main/java/org/apache/hadoop/hbase/util/PrettyPrinter.java: * @return value in bytes n
./hbase-common/src/main/java/org/apache/hadoop/hbase/util/CommonFSUtils.java: * @return - the URI's to string format n
./hbase-common/src/main/java/org/apache/hadoop/hbase/util/PositionedByteRange.java: * Limits the byte range upto a specified value. Limit cannot be greater than capacity nn
./hbase-common/src/main/java/org/apache/hadoop/hbase/util/PositionedByteRange.java: * Return the current limit n
./hbase-common/src/main/java/org/apache/hadoop/hbase/util/WindowMovingAverage.java: * @param index index of bar n
./hbase-common/src/main/java/org/apache/hadoop/hbase/util/CoprocessorClassLoader.java: * @return a CoprocessorClassLoader for the coprocessor jar path n
./hbase-common/src/main/java/org/apache/hadoop/hbase/ExtendedCell.java: * @return how many bytes are written. n
./hbase-common/src/main/java/org/apache/hadoop/hbase/HBaseConfiguration.java: * @return String password or default password n
./hbase-common/src/main/java/org/apache/hadoop/hbase/CellBuilder.java: * Remove all internal elements from builder. n
./hbase-common/src/main/java/org/apache/hadoop/hbase/ByteBufferKeyOnlyKeyValue.java: * new OffheapKeyOnlyKeyValue. nnn
./hbase-common/src/main/java/org/apache/hadoop/hbase/PrivateCellUtil.java: * @param rlength the row length n
./hbase-common/src/main/java/org/apache/hadoop/hbase/PrivateCellUtil.java: * nn
./hbase-common/src/main/java/org/apache/hadoop/hbase/PrivateCellUtil.java: * timestamp><1 byte type> nnn
./hbase-common/src/main/java/org/apache/hadoop/hbase/PrivateCellUtil.java: * @return the cloned cell n
./hbase-common/src/main/java/org/apache/hadoop/hbase/PrivateCellUtil.java: * @return the total bytes written n
./hbase-common/src/main/java/org/apache/hadoop/hbase/PrivateCellUtil.java: * @param rlength the row length n
./hbase-common/src/main/java/org/apache/hadoop/hbase/PrivateCellUtil.java: * @param flength the family length n
./hbase-common/src/main/java/org/apache/hadoop/hbase/PrivateCellUtil.java: * @param qlength the qualifier length n
./hbase-common/src/main/java/org/apache/hadoop/hbase/PrivateCellUtil.java: * @param qlength the qualifier length n
./hbase-common/src/main/java/org/apache/hadoop/hbase/PrivateCellUtil.java: * @param vlength the value length n
./hbase-common/src/main/java/org/apache/hadoop/hbase/PrivateCellUtil.java: * @param tagsLength the tag length n
./hbase-common/src/main/java/org/apache/hadoop/hbase/PrivateCellUtil.java: * Create a Cell that is smaller than all other possible Cells for the given Cell's row. n
./hbase-common/src/main/java/org/apache/hadoop/hbase/PrivateCellUtil.java: * @param cell - cell n
./hbase-common/src/main/java/org/apache/hadoop/hbase/PrivateCellUtil.java: * Create a Cell that is larger than all other possible Cells for the given Cell's row. n
./hbase-common/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKConfig.java: * the described order n
./hbase-common/src/main/java/org/apache/hadoop/hbase/codec/CellCodec.java: * Write int length followed by array bytes. nnnn
./hbase-common/src/main/java/org/apache/hadoop/hbase/codec/BaseDecoder.java: * thrown if EOF is reached prematurely. Does not return null. n
./hbase-common/src/main/java/org/apache/hadoop/hbase/codec/CellCodecWithTags.java: * Write int length followed by array bytes. nnnn
./hbase-common/src/main/java/org/apache/hadoop/hbase/KeyValueUtil.java: * position to the start of the next KeyValue. Does not allocate a new array or copy data. nnn
./hbase-common/src/main/java/org/apache/hadoop/hbase/KeyValueUtil.java: * Write out a KeyValue in the manner in which we used to when KeyValue was a Writable. nn
./hbase-common/src/main/java/org/apache/hadoop/hbase/KeyValueUtil.java: * useful marking a stream as done. n
./hbase-common/src/main/java/org/apache/hadoop/hbase/KeyValueUtil.java: * we will return null which can be useful marking a stream as done. n
./hbase-common/src/main/java/org/apache/hadoop/hbase/KeyValueUtil.java: * stream as done. n
./hbase-common/src/main/java/org/apache/hadoop/hbase/security/UserProvider.java: * @param ugi The base Hadoop user n
./hbase-common/src/main/java/org/apache/hadoop/hbase/security/User.java: * @param ugi The base Hadoop user n
./hbase-common/src/test/java/org/apache/hadoop/hbase/io/crypto/tls/X509TestContext.java: * truststore and is more applicable to general use. nnn
./hbase-common/src/test/java/org/apache/hadoop/hbase/io/crypto/tls/X509TestHelpers.java: * @return the serialized bytes of the BCFKS trust store. nn
./hbase-common/src/test/java/org/apache/hadoop/hbase/io/crypto/tls/X509TestHelpers.java: * @return the serialized bytes of the BCFKS key store. nn
./hbase-client/src/main/java/org/apache/hadoop/hbase/ServerMetrics.java: * Call directly from client such as hbase shell n
./hbase-client/src/main/java/org/apache/hadoop/hbase/HRegionLocation.java: * n
./hbase-client/src/main/java/org/apache/hadoop/hbase/coprocessor/ColumnInterpreter.java: * nnn * @return value of type T n
./hbase-client/src/main/java/org/apache/hadoop/hbase/coprocessor/ColumnInterpreter.java: * returns the maximum value for this type T n
./hbase-client/src/main/java/org/apache/hadoop/hbase/coprocessor/ColumnInterpreter.java: * nnn
./hbase-client/src/main/java/org/apache/hadoop/hbase/coprocessor/ColumnInterpreter.java: * nn
./hbase-client/src/main/java/org/apache/hadoop/hbase/coprocessor/ColumnInterpreter.java: * provides casting opportunity between the data types. nn
./hbase-client/src/main/java/org/apache/hadoop/hbase/coprocessor/ColumnInterpreter.java: * two <S> values as it is not needed as of now. nnn
./hbase-client/src/main/java/org/apache/hadoop/hbase/coprocessor/ColumnInterpreter.java: * passed message bytes (used on the server side). n
./hbase-client/src/main/java/org/apache/hadoop/hbase/coprocessor/ColumnInterpreter.java: * the opposite of {@link #castToReturnType(Object)} nn
./hbase-client/src/main/java/org/apache/hadoop/hbase/coprocessor/CoprocessorException.java: * Constructor with a Class object and exception message. nn
./hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncAdmin.java: * Disable a table. The table has to be in enabled state for it to be disabled. n
./hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncAdmin.java: * Stop the designated regionserver. n
./hbase-client/src/main/java/org/apache/hadoop/hbase/client/TableState.java: * @param state convert from n
./hbase-client/src/main/java/org/apache/hadoop/hbase/client/TableState.java: * Covert to PB version of State n
./hbase-client/src/main/java/org/apache/hadoop/hbase/client/TableState.java: * Table name for state n
./hbase-client/src/main/java/org/apache/hadoop/hbase/client/TableState.java: * Covert to PB version of TableState n
./hbase-client/src/main/java/org/apache/hadoop/hbase/client/TableState.java: * @param tableState convert from n
./hbase-client/src/main/java/org/apache/hadoop/hbase/client/Result.java: * created. n
./hbase-client/src/main/java/org/apache/hadoop/hbase/client/Result.java: * n
./hbase-client/src/main/java/org/apache/hadoop/hbase/client/Result.java: * to be immutable). n
./hbase-client/src/main/java/org/apache/hadoop/hbase/client/Delete.java: * @param row We make a local copy of this passed in row. nn
./hbase-client/src/main/java/org/apache/hadoop/hbase/client/Delete.java: * @return this for invocation chaining n
./hbase-client/src/main/java/org/apache/hadoop/hbase/client/TableDescriptor.java: * Get the name of the table n
./hbase-client/src/main/java/org/apache/hadoop/hbase/client/backoff/ServerStatistics.java: * something gets set nn
./hbase-client/src/main/java/org/apache/hadoop/hbase/client/Increment.java: * Gets the TimeRange used for this increment. n
./hbase-client/src/main/java/org/apache/hadoop/hbase/client/Increment.java: * @throws IOException if invalid time range n
./hbase-client/src/main/java/org/apache/hadoop/hbase/client/Increment.java: * n
./hbase-client/src/main/java/org/apache/hadoop/hbase/client/Put.java: * We make a copy of the passed in row key to keep local. nnn
./hbase-client/src/main/java/org/apache/hadoop/hbase/client/Put.java: * We make a copy of the passed in row key to keep local. nnnn
./hbase-client/src/main/java/org/apache/hadoop/hbase/client/Put.java: * @param value column value n
./hbase-client/src/main/java/org/apache/hadoop/hbase/client/Put.java: * @param value column value n
./hbase-client/src/main/java/org/apache/hadoop/hbase/client/Put.java: * @param value column value n
./hbase-client/src/main/java/org/apache/hadoop/hbase/client/Operation.java: * encoding failure. n
./hbase-client/src/main/java/org/apache/hadoop/hbase/client/ColumnFamilyDescriptor.java: * Get the mob compact partition policy for this family n
./hbase-client/src/main/java/org/apache/hadoop/hbase/client/MutableRegionInfo.java: * Get current table name of the region n
./hbase-client/src/main/java/org/apache/hadoop/hbase/client/MutableRegionInfo.java: * @param split set split status n
./hbase-client/src/main/java/org/apache/hadoop/hbase/client/MutableRegionInfo.java: * @param offLine Set online/offline status. n
./hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java: * @throws IOException if a remote or network exception occurs n
./hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java: * @throws IOException if not a mob column family or if a remote or network exception occurs n
./hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java: * @throws IOException if a remote or network exception occurs n
./hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java: * @throws IOException if not a mob column family or if a remote or network exception occurs n
./hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java: * @throws IOException if a remote or network exception occurs n
./hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java: * @throws IOException if a remote or network exception occurs n
./hbase-client/src/main/java/org/apache/hadoop/hbase/client/Append.java: * @param maxStamp maximum timestamp value, exclusive n
./hbase-client/src/main/java/org/apache/hadoop/hbase/client/Append.java: * Gets the TimeRange used for this append. n
./hbase-client/src/main/java/org/apache/hadoop/hbase/client/Append.java: * @param rowArray Makes a copy out of this buffer. nn
./hbase-client/src/main/java/org/apache/hadoop/hbase/client/Append.java: * @param value value to append to specified column n
./hbase-client/src/main/java/org/apache/hadoop/hbase/client/Get.java: * Copy-constructor n
./hbase-client/src/main/java/org/apache/hadoop/hbase/client/Get.java: * Create a Get operation for the specified row. nnn
./hbase-client/src/main/java/org/apache/hadoop/hbase/client/Get.java: * Create a Get operation for the specified row. n
./hbase-client/src/main/java/org/apache/hadoop/hbase/client/Get.java: * Method for retrieving the get's row n
./hbase-client/src/main/java/org/apache/hadoop/hbase/client/Get.java: * Method for retrieving the get's TimeRange n
./hbase-client/src/main/java/org/apache/hadoop/hbase/client/Get.java: * Method for retrieving the get's familyMap n
./hbase-client/src/main/java/org/apache/hadoop/hbase/client/Get.java: * and aggregation by debugging, logging, and administration tools. n
./hbase-client/src/main/java/org/apache/hadoop/hbase/client/Get.java: * @param maxCols a limit on the number of columns output prior to truncation n
./hbase-client/src/main/java/org/apache/hadoop/hbase/client/Scan.java: * @param family family name n
./hbase-client/src/main/java/org/apache/hadoop/hbase/client/Scan.java: * @param qualifier column qualifier n
./hbase-client/src/main/java/org/apache/hadoop/hbase/client/Scan.java: * @see #readVersions(int) n
./hbase-client/src/main/java/org/apache/hadoop/hbase/client/Scan.java: * @see #readVersions(int) n
./hbase-client/src/main/java/org/apache/hadoop/hbase/client/Scan.java: * @param rowPrefix the prefix all rows must start with. (Set null to remove the filter.) n
./hbase-client/src/main/java/org/apache/hadoop/hbase/client/Scan.java: * @param rowPrefix the prefix all rows must start with. (Set null to remove the filter.) n
./hbase-client/src/main/java/org/apache/hadoop/hbase/client/Scan.java: * Get all available versions. n
./hbase-client/src/main/java/org/apache/hadoop/hbase/client/Scan.java: * @param versions specified number of versions for each column n
./hbase-client/src/main/java/org/apache/hadoop/hbase/client/Scan.java: * @param familyMap map of family to qualifier n
./hbase-client/src/main/java/org/apache/hadoop/hbase/client/Scan.java: * Getting the familyMap n
./hbase-client/src/main/java/org/apache/hadoop/hbase/client/Scan.java: * n
./hbase-client/src/main/java/org/apache/hadoop/hbase/client/Scan.java: * n
./hbase-client/src/main/java/org/apache/hadoop/hbase/client/Scan.java: * @param reversed if true, scan will be backward order n
./hbase-client/src/main/java/org/apache/hadoop/hbase/client/Scan.java: * and aggregation by debugging, logging, and administration tools. n
./hbase-client/src/main/java/org/apache/hadoop/hbase/client/Scan.java: * @param maxCols a limit on the number of columns output prior to truncation n
./hbase-client/src/main/java/org/apache/hadoop/hbase/client/Scan.java: * @param limit the limit of rows for this scan n
./hbase-client/src/main/java/org/apache/hadoop/hbase/client/Scan.java: * set {@code readType} to {@link ReadType#PREAD}. n
./hbase-client/src/main/java/org/apache/hadoop/hbase/client/Scan.java: * example, we will always use pread if this is a get scan. n
./hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionReplicaUtil.java: * Removes the non-default replicas from the passed regions collection n
./hbase-client/src/main/java/org/apache/hadoop/hbase/client/Query.java: * n
./hbase-client/src/main/java/org/apache/hadoop/hbase/client/Query.java: * Sets the authorizations to be used by this Query n
./hbase-client/src/main/java/org/apache/hadoop/hbase/client/Query.java: * Expert: This is an advanced API exposed. Only use it if you know what you are doing n
./hbase-client/src/main/java/org/apache/hadoop/hbase/client/Query.java: * @param maxStamp maximum timestamp value, exclusive n
./hbase-client/src/main/java/org/apache/hadoop/hbase/client/metrics/ServerSideScanMetrics.java: * nn
./hbase-client/src/main/java/org/apache/hadoop/hbase/client/metrics/ServerSideScanMetrics.java: * nn
./hbase-client/src/main/java/org/apache/hadoop/hbase/client/ResultScanner.java: * (We never return null). n
./hbase-client/src/main/java/org/apache/hadoop/hbase/client/TableDescriptorBuilder.java: * Get the name of the table n
./hbase-client/src/main/java/org/apache/hadoop/hbase/client/TableDescriptorBuilder.java: * @return An instance of {@link ModifyableTableDescriptor} made from bytes n
./hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncConnectionImpl.java: * If choreService has not been created yet, create the ChoreService. n
./hbase-client/src/main/java/org/apache/hadoop/hbase/client/Mutation.java: * @param maxCols a limit on the number of columns output prior to truncation n
./hbase-client/src/main/java/org/apache/hadoop/hbase/client/Mutation.java: * Set the durability for this mutation n
./hbase-client/src/main/java/org/apache/hadoop/hbase/client/Mutation.java: * Method for retrieving the put's familyMap n
./hbase-client/src/main/java/org/apache/hadoop/hbase/client/Mutation.java: * Method for retrieving the delete's row n
./hbase-client/src/main/java/org/apache/hadoop/hbase/client/Mutation.java: * Method for retrieving the timestamp. n
./hbase-client/src/main/java/org/apache/hadoop/hbase/client/Mutation.java: * Sets the visibility expression associated with cells in this Mutation. n
./hbase-client/src/main/java/org/apache/hadoop/hbase/client/Mutation.java: * converted client CellVisibility n
./hbase-client/src/main/java/org/apache/hadoop/hbase/client/Mutation.java: * @param ttl the TTL desired for the result of the mutation, in milliseconds n
./hbase-client/src/main/java/org/apache/hadoop/hbase/regionserver/LeaseException.java: * n
./hbase-client/src/main/java/org/apache/hadoop/hbase/regionserver/wal/FailedLogCloseException.java: * n
./hbase-client/src/main/java/org/apache/hadoop/hbase/regionserver/wal/FailedSyncBeforeLogCloseException.java: * n
./hbase-client/src/main/java/org/apache/hadoop/hbase/util/Writables.java: * @throws IOException e n
./hbase-client/src/main/java/org/apache/hadoop/hbase/util/Writables.java: * @throws IOException e n
./hbase-client/src/main/java/org/apache/hadoop/hbase/NotAllMetaRegionsOnlineException.java: * n
./hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java: * @return the converted client Get n
./hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java: * @return A client Put. n
./hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java: * @return A client Put. n
./hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java: * @return the converted client Delete n
./hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java: * @return the converted client Delete n
./hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java: * @return the converted Mutation n
./hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java: * @return the converted protocol buffer Scan n
./hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java: * @return the converted client Scan n
./hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java: * @return a protocol buffer Get n
./hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java: * Create a protocol buffer Mutate based on a client Mutation nn * @return a protobuf'd Mutation n
./hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java: * protobuf'd Mutation n
./hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java: * protobuf'd Mutation n
./hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java: * @return the converted client Result n
./hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java: * DeleteType n
./hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java: * @return The type. n
./hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java: * A helper to close a region given a region name using admin protocol. nnn
./hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java: * A helper to open a region using admin protocol. nnn
./hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java: * a list of online region info n
./hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java: * converted client CellVisibility n
./hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java: * converted client Authorizations n
./hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java: * @param in Inputsream with delimited protobuf data n
./hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java: * @param size known size of protobuf data n
./hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java: * @param in InputStream containing protobuf data n
./hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java: * @param bs ByteString containing the n
./hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java: * @param b byte array n
./hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java: * @param b byte array nnn
./hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java: * @param state the protobuf CompactionState n
./hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java: * @param state the protobuf CompactionState n
./hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java: * Contain ServiceException inside here. Take a callable that is doing our pb rpc and run it. n
./hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java: * instance. n
./hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ResponseConverter.java: * @return the results that were in the MultiResponse (a Result or an Exception). n
./hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ResponseConverter.java: * @return the results that were in the MultiResponse (a Result or an Exception). n
./hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ResponseConverter.java: * Create Results from the cells using the cells meta data. nnn
./hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/RequestConverter.java: * @return a mutate request n
./hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/RequestConverter.java: * @return a multi request n
./hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/RequestConverter.java: * Create a protocol buffer MutateRequest for a put nn * @return a mutate request n
./hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/RequestConverter.java: * Create a protocol buffer MutateRequest for an append nn * @return a mutate request n
./hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/RequestConverter.java: * Create a protocol buffer MutateRequest for a delete nn * @return a mutate request n
./hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/RequestConverter.java: * Create a protocol buffer ScanRequest for a client Scan nnnn * @return a scan request n
./hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/RequestConverter.java: * RowMutations/CheckAndMutate within the original list of actions n
./hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/RequestConverter.java: * expiration is turned on n
./hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZNodePaths.java: * @param path the name of the full path which includes baseZNode. n
./hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZNodePaths.java: * @param znode the name of the znode, does not include baseZNode n
./hbase-client/src/main/java/org/apache/hadoop/hbase/exceptions/ClientExceptionsUtil.java: * @return translated exception n
./hbase-client/src/main/java/org/apache/hadoop/hbase/exceptions/FailedSanityCheckException.java: * n
./hbase-client/src/main/java/org/apache/hadoop/hbase/exceptions/FailedSanityCheckException.java: * nn
./hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/CellBlockBuilder.java: * returned buffer has been flipped and is ready for reading. Use limit to find total size. n
./hbase-client/src/main/java/org/apache/hadoop/hbase/filter/FilterListBase.java: * @return the transformed cell. n
./hbase-client/src/main/java/org/apache/hadoop/hbase/filter/SingleColumnValueFilter.java: * Constructor for protobuf deserialization only. nnnnnn
./hbase-client/src/main/java/org/apache/hadoop/hbase/filter/RandomRowFilter.java: * Create a new filter with a specified chance for a row to be included. n
./hbase-client/src/main/java/org/apache/hadoop/hbase/filter/RandomRowFilter.java: * Set the chance that a row is included. n
./hbase-client/src/main/java/org/apache/hadoop/hbase/filter/FilterBase.java: * Default implementation so that writers of custom filters aren't forced to implement. n
./hbase-client/src/main/java/org/apache/hadoop/hbase/filter/SingleColumnValueExcludeFilter.java: * Constructor for protobuf deserialization only. nnnnnn
./hbase-client/src/main/java/org/apache/hadoop/hbase/filter/TimestampsFilter.java: * Constructor for filter that retains only the specified timestamps in the list. n
./hbase-client/src/main/java/org/apache/hadoop/hbase/filter/FilterList.java: * is assumed. n
./hbase-client/src/main/java/org/apache/hadoop/hbase/filter/FilterList.java: * Get the operator. n
./hbase-client/src/main/java/org/apache/hadoop/hbase/filter/FilterList.java: * Get the filters. n
./hbase-client/src/main/java/org/apache/hadoop/hbase/filter/ColumnValueFilter.java: * n
./hbase-client/src/main/java/org/apache/hadoop/hbase/security/EncryptionUtil.java: * @return the encrypted key bytes n
./hbase-client/src/main/java/org/apache/hadoop/hbase/security/EncryptionUtil.java: * @return the raw key bytes nn
./hbase-client/src/main/java/org/apache/hadoop/hbase/security/AbstractHBaseSaslRpcClient.java: * @param fallbackAllowed does the client allow fallback to simple authentication n
./hbase-client/src/main/java/org/apache/hadoop/hbase/security/AbstractHBaseSaslRpcClient.java: * @param rpcProtection the protection level ("authentication", "integrity" or "privacy") n
./hbase-client/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityClient.java: * @return true if cell visibility features are supported and enabled, false otherwise n
./hbase-client/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityClient.java: * Utility method for adding label to the system. nnnn
./hbase-client/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityClient.java: * Utility method for adding labels to the system. nnnn
./hbase-client/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityClient.java: * Sets given labels globally authorized for the user. nnnnn
./hbase-client/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityClient.java: * @return labels The list of visibility labels defined in the system. n
./hbase-client/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityClient.java: * Removes given labels from user's globally authorized list of labels. nnnnn
./hbase-client/src/main/java/org/apache/hadoop/hbase/security/access/AccessControlUtil.java: * @param userName User name, if empty then all user permissions will be retrieved. n
./hbase-client/src/main/java/org/apache/hadoop/hbase/security/access/AccessControlUtil.java: * @param userName User name, if empty then all user permissions will be retrieved. n
./hbase-client/src/main/java/org/apache/hadoop/hbase/security/access/AccessControlUtil.java: * @param userName User name, if empty then all user permissions will be retrieved. n
./hbase-client/src/main/java/org/apache/hadoop/hbase/security/access/AccessControlClient.java: * @return true if authorization is supported and enabled, false otherwise n
./hbase-client/src/main/java/org/apache/hadoop/hbase/security/access/AccessControlClient.java: * @return true if cell authorization is supported and enabled, false otherwise n
./hbase-client/src/main/java/org/apache/hadoop/hbase/security/access/AccessControlClient.java: * later granted permissions will override previous granted permissions. nnnn
./hbase-client/src/main/java/org/apache/hadoop/hbase/security/access/AccessControlClient.java: * @param connection The Connection instance to use nnnnnn
./hbase-client/src/main/java/org/apache/hadoop/hbase/security/access/AccessControlClient.java: * @param connection The Connection instance to use nnnn
./hbase-client/src/main/java/org/apache/hadoop/hbase/security/access/AccessControlClient.java: * @return List of UserPermissions n
./hbase-client/src/main/java/org/apache/hadoop/hbase/security/HBaseSaslRpcClient.java: * @return true if connection is set up, or false if needs to switch to simple Auth. n
./hbase-client/src/main/java/org/apache/hadoop/hbase/security/HBaseSaslRpcClient.java: * @return a SASL wrapped InputStream n
./hbase-client/src/main/java/org/apache/hadoop/hbase/security/HBaseSaslRpcClient.java: * @return a SASL wrapped OutputStream n
./hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestDeleteTimeStamp.java: * Test for verifying that the timestamp in delete object is being honored. n
./hbase-testing-util/src/main/java/org/apache/hadoop/hbase/HBaseCluster.java: * Wait for the namenode. n
./hbase-testing-util/src/main/java/org/apache/hadoop/hbase/MiniHBaseCluster.java: * @param numRegionServers initial number of region servers to start. n
./hbase-testing-util/src/main/java/org/apache/hadoop/hbase/MiniHBaseCluster.java: * @param numRegionServers initial number of region servers to start. n
./hbase-testing-util/src/main/java/org/apache/hadoop/hbase/MiniHBaseCluster.java: * ports on each cluster start. nn
./hbase-testing-util/src/main/java/org/apache/hadoop/hbase/MiniHBaseCluster.java: * @return A new fs instance if we are up on DistributeFileSystem. n
./hbase-testing-util/src/main/java/org/apache/hadoop/hbase/MiniHBaseCluster.java: * Wait for the specified master to stop. Removes this thread from list of running threads. n
./hbase-testing-util/src/main/java/org/apache/hadoop/hbase/MiniHBaseCluster.java: * @return true if an active master becomes available. false if there are no masters left. n
./hbase-testing-util/src/main/java/org/apache/hadoop/hbase/MiniHBaseCluster.java: * Call flushCache on all regions on all participating regionservers. n
./hbase-testing-util/src/main/java/org/apache/hadoop/hbase/MiniHBaseCluster.java: * Call flushCache on all regions of the specified table. n
./hbase-testing-util/src/main/java/org/apache/hadoop/hbase/HBaseTestingUtility.java: * @return True if we removed the test dirs n
./hbase-testing-util/src/main/java/org/apache/hadoop/hbase/HBaseTestingUtility.java: * @return True if we removed child n
./hbase-testing-util/src/main/java/org/apache/hadoop/hbase/HBaseTestingUtility.java: * Shuts down instance created by call to {@link #startMiniDFSCluster(int)} or does nothing. n
./hbase-testing-util/src/main/java/org/apache/hadoop/hbase/HBaseTestingUtility.java: * @return Fully qualified path for the default hbase root dir n
./hbase-testing-util/src/main/java/org/apache/hadoop/hbase/HBaseTestingUtility.java: * @return Fully qualified path for the default hbase root dir n
./hbase-testing-util/src/main/java/org/apache/hadoop/hbase/HBaseTestingUtility.java: * @return Fully qualified path to hbase root dir n
./hbase-testing-util/src/main/java/org/apache/hadoop/hbase/HBaseTestingUtility.java: * @return Fully qualified path to hbase root dir n
./hbase-testing-util/src/main/java/org/apache/hadoop/hbase/HBaseTestingUtility.java: * @return Fully qualified path to hbase root dir n
./hbase-testing-util/src/main/java/org/apache/hadoop/hbase/HBaseTestingUtility.java: * Create a table. nn * @return A Table instance for the created table. n
./hbase-testing-util/src/main/java/org/apache/hadoop/hbase/HBaseTestingUtility.java: * Create a table. nn * @return A Table instance for the created table. n
./hbase-testing-util/src/main/java/org/apache/hadoop/hbase/HBaseTestingUtility.java: * Create a table. nn * @return A Table instance for the created table. n
./hbase-testing-util/src/main/java/org/apache/hadoop/hbase/HBaseTestingUtility.java: * Create a table with multiple regions. nnn * @return A Table instance for the created table. n
./hbase-testing-util/src/main/java/org/apache/hadoop/hbase/HBaseTestingUtility.java: * Create a table. nn * @return A Table instance for the created table. n
./hbase-testing-util/src/main/java/org/apache/hadoop/hbase/HBaseTestingUtility.java: * Create a table with multiple regions. nn * @return A Table instance for the created table. n
./hbase-testing-util/src/main/java/org/apache/hadoop/hbase/HBaseTestingUtility.java: * Table instance for the created table. n
./hbase-testing-util/src/main/java/org/apache/hadoop/hbase/HBaseTestingUtility.java: * Create a table. nnn * @return A Table instance for the created table. n
./hbase-testing-util/src/main/java/org/apache/hadoop/hbase/HBaseTestingUtility.java: * @return A Table instance for the created table. n
./hbase-testing-util/src/main/java/org/apache/hadoop/hbase/HBaseTestingUtility.java: * @return created hregion n
./hbase-testing-util/src/main/java/org/apache/hadoop/hbase/HBaseTestingUtility.java: * {@link HBaseTestingUtility#closeRegionAndWAL(HRegion)} when done. n
./hbase-testing-util/src/main/java/org/apache/hadoop/hbase/HBaseTestingUtility.java: * @return HTable to that new table n
./hbase-testing-util/src/main/java/org/apache/hadoop/hbase/HBaseTestingUtility.java: * @return Count of rows loaded. n
./hbase-testing-util/src/main/java/org/apache/hadoop/hbase/HBaseTestingUtility.java: * @return Count of rows loaded. n
./hbase-testing-util/src/main/java/org/apache/hadoop/hbase/HBaseTestingUtility.java: * @return Count of rows loaded. n
./hbase-testing-util/src/main/java/org/apache/hadoop/hbase/HBaseTestingUtility.java: * @return Count of rows loaded. n
./hbase-testing-util/src/main/java/org/apache/hadoop/hbase/HBaseTestingUtility.java: * @return Count of rows loaded. n
./hbase-testing-util/src/main/java/org/apache/hadoop/hbase/HBaseTestingUtility.java: * @return Count of rows loaded. n
./hbase-testing-util/src/main/java/org/apache/hadoop/hbase/HBaseTestingUtility.java: * nnn * @return list of region info for regions added to meta n
./hbase-testing-util/src/main/java/org/apache/hadoop/hbase/HBaseTestingUtility.java: * Find any other region server which is different from the one identified by parameter n
./hbase-testing-util/src/main/java/org/apache/hadoop/hbase/HBaseTestingUtility.java: * @return region server that holds it, null if the row doesn't exist nn
./hbase-testing-util/src/main/java/org/apache/hadoop/hbase/HBaseTestingUtility.java: * Expire the Master's session n
./hbase-testing-util/src/main/java/org/apache/hadoop/hbase/HBaseTestingUtility.java: * @param table The table to find the region. n
./hbase-testing-util/src/main/java/org/apache/hadoop/hbase/HBaseTestingUtility.java: * @param table Table to wait on. nn
./hbase-testing-util/src/main/java/org/apache/hadoop/hbase/HBaseTestingUtility.java: * @param timeoutMillis Time to wait on it being marked enabled. nn
./hbase-testing-util/src/main/java/org/apache/hadoop/hbase/HBaseTestingUtility.java: * @param table Table to wait on. nn
./hbase-testing-util/src/main/java/org/apache/hadoop/hbase/HBaseTestingUtility.java: * @param timeoutMillis Time to wait on it being marked disabled. nn
./hbase-testing-util/src/main/java/org/apache/hadoop/hbase/HBaseTestingUtility.java: * @return true if we started some servers n
./hbase-testing-util/src/main/java/org/apache/hadoop/hbase/HBaseTestingUtility.java: * @return true if we started some servers n
./hbase-testing-util/src/main/java/org/apache/hadoop/hbase/HBaseTestingUtility.java: * @return A new configuration instance with a different user set into it. n
./hbase-testing-util/src/main/java/org/apache/hadoop/hbase/HBaseTestingUtility.java: * @param stream A DFSClient.DFSOutputStream. nnnnn
./hbase-testing-util/src/main/java/org/apache/hadoop/hbase/HBaseTestingUtility.java: * @param destServer destination server of the region nn
./hbase-testing-util/src/main/java/org/apache/hadoop/hbase/HBaseTestingUtility.java: * @param tableName the table name n
./hbase-testing-util/src/main/java/org/apache/hadoop/hbase/HBaseTestingUtility.java: * Waith until all system table's regions get assigned n
./hbase-testing-util/src/main/java/org/apache/hadoop/hbase/HBaseTestingUtility.java: * @param timeout timeout, in milliseconds n
./hbase-testing-util/src/main/java/org/apache/hadoop/hbase/HBaseTestingUtility.java: * @param timeout How long to wait. n
./hbase-testing-util/src/main/java/org/apache/hadoop/hbase/HBaseTestingUtility.java: * Wait until no regions in transition. (time limit 15min) n
./hbase-testing-util/src/main/java/org/apache/hadoop/hbase/HBaseTestingUtility.java: * Wait until labels is ready in VisibilityLabelsCache. nn
./hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableMapReduceUtil.java: * @return a jar file that contains the class. n
./hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableMapReduceUtil.java: * @return a jar file that contains the class, or null. n
./hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableInputFormatBase.java: * @return A list of TableSplit, the size of the list is n
./hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableInputFormatBase.java: * @param tableName The {@link TableName} of the table to process. n
./hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableInputFormatBase.java: * {@link #initializeTable(Connection, TableName)}. n
./hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/ImportTsv.java: * should be the special token TsvParser.ROWKEY_COLUMN_SPEC n
./hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/ImportTsv.java: * Pair of row key offset and length. n
./hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TextSortReducer.java: * handling it's own custom params. n
./hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TextSortReducer.java: * Handles common parameter initialization that a subclass might want to leverage. nn
./hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/HashTable.java: * Open a TableHash.Reader starting at the first hash at or after the given key. n
./hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/MultiTableHFileOutputFormat.java: * @param multiTableDescriptors Table descriptor and region locator pairs n
./hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/RowCounter.java: * contains exactly one element, startRow and stopRow are set to the scan. nn
./hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TsvImporterTextMapper.java: * handling it's own custom params. n
./hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TsvImporterTextMapper.java: * Handles common parameter initialization that a subclass might want to leverage. n
./hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/MultiTableSnapshotInputFormatImpl.java: * @return Return the list of splits extracted from the scans/snapshots pushed to conf n
./hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/MultiTableSnapshotInputFormatImpl.java: * @return the snapshot name -> list<scan> mapping pushed to configuration n
./hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/MultiTableSnapshotInputFormatImpl.java: * Push snapshotScans to conf (under the key {@link #SNAPSHOT_TO_SCANS_KEY}) nnn
./hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/MultiTableSnapshotInputFormatImpl.java: * @return the directories into which snapshots have been restored from n
./hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TsvImporterMapper.java: * handling it's own custom params. n
./hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TsvImporterMapper.java: * Handles common parameter initialization that a subclass might want to leverage. n
./hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/CellCreator.java: * @return created Cell n
./hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/CellCreator.java: * @param vlength value length n * @return created Cell n
./hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/IdentityTableMap.java: * Pass the key, value to reduce nnnnn
./hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/MultiTableSnapshotInputFormat.java: * nnnn
./hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/TableInputFormatBase.java: * @param tableName The {@link TableName} of the table to process. n
./hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/TableInputFormatBase.java: * Allows subclasses to set the {@link Filter} to be used. n
./hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/TableInputFormatBase.java: * {@link #initializeTable(Connection, TableName)}. n
./hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/TableRecordReader.java: * Restart from survivable exceptions by creating a new scanner. nn
./hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/TableRecordReader.java: * Build the scanner. Not done in constructor to allow for extension. n
./hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/TableRecordReader.java: * @return true if there was more data n
./hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/TableSplit.java: * Constructor nnnn
./hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/IdentityTableReduce.java: * No aggregation, output pairs of (key, record) nnnnn
./hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/Driver.java: * nn
./hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/RowCounter.java: * n * @return the JobConf n
./hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/RowCounter.java: * nn
./hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/GroupingTableMap.java: * reduce. If any of the grouping columns are not found in the value, the record is skipped. nnnnn
./hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableInputFormat.java: * Setup a table with two rows and values. n * @return A Table instance for the created table. n
./hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableInputFormat.java: * created table. n
./hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableInputFormat.java: * Create table data and run tests on specified htable using the o.a.h.hbase.mapreduce API. nnn
./hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableInputFormat.java: * Create a table that IOE's on first scanner next call n
./hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableInputFormat.java: * Create a table that throws a NotServingRegionException on first scanner next call n
./hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableInputFormat.java: * Run test assuming no errors using newer mapreduce api nn
./hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableInputFormat.java: * Run test assuming Scanner IOException failure using newer mapreduce api nn
./hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableInputFormat.java: * Run test assuming Scanner IOException failure using newer mapreduce api nn
./hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/MultiTableInputFormatTestBase.java: * Tests a MR scan using specific start and stop rows. nnn
./hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportTSVWithOperationAttributes.java: * Confirm ImportTsv via data in online table. n
./hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableMapReduce.java: * Pass the key, and reversed value to reduce nnnn
./hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableMapReduce.java: * Verify scan counters are emitted from the job nn
./hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportExport.java: * successfully nnn
./hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportExport.java: * successfully nnn
./hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportExport.java: * Test simple replication case with column mapping n
./hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportExport.java: * Test export hbase:meta table n
./hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportExport.java: * Test import data from 0.94 exported file n
./hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportExport.java: * @return the number of keyvalues found n
./hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableMapReduceBase.java: * Test a map/reduce against a multi-region table n
./hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestRowCounter.java: * Test a case when no column was specified in command line arguments. n
./hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestRowCounter.java: * Test a case when the column specified in command line arguments is exclusive for few rows. n
./hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestRowCounter.java: * contains colons. n
./hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestRowCounter.java: * row. n
./hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestRowCounter.java: * also a row range filter is specified n
./hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestRowCounter.java: * Test a case when a range is specified with single range of start-end keys n
./hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestRowCounter.java: * Test a case when a range is specified with single range with end key only n
./hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestRowCounter.java: * Test a case when a range is specified with two ranges where one range is with end key only n
./hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestRowCounter.java: * Test a case when a range is specified with multiple ranges of start-end keys n
./hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestRowCounter.java: * filled, another two are not n
./hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestRowCounter.java: * Test a case when the timerange is specified with --starttime and --endtime options n
./hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestRowCounter.java: * @param expectedCount the expected row count (result of map reduce job). n
./hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestRowCounter.java: * one. nn
./hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestHFileOutputFormat2.java: * family compression map is correctly serialized into and deserialized from configuration n
./hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestHFileOutputFormat2.java: * family bloom type map is correctly serialized into and deserialized from configuration n
./hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestHFileOutputFormat2.java: * family block size map is correctly serialized into and deserialized from configuration n
./hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestHFileOutputFormat2.java: * configuration n
./hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportTsv.java: * @return KV count in the given hfile n
./hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestMultiTableInputFormatBase.java: * only do one Connection when doing getSplits even if a MultiTableInputFormat. n
./hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportTSVWithVisibilityLabels.java: * @return KV count in the given hfile n
./hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestMultithreadedTableMapper.java: * Pass the key, and reversed value to reduce nnnn
./hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestMultithreadedTableMapper.java: * Test multithreadedTableMappper map/reduce against a multi-region table nnn
./hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/PerformanceEvaluation.java: * @param msg status message n
./hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/PerformanceEvaluation.java: * @param cmd Command to run. n
./hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/PerformanceEvaluation.java: * @return Directory that contains file written whose name is JOB_INPUT_FILENAME n
./hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/PerformanceEvaluation.java: * @return Elapsed time. n
./hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapred/TestTableInputFormat.java: * @return A Table instance for the created table. n
./hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapred/TestTableInputFormat.java: * created table. n
./hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapred/TestTableInputFormat.java: * Create table data and run tests on specified htable using the o.a.h.hbase.mapred API. nn
./hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapred/TestTableInputFormat.java: * Create a table that IOE's on first scanner next call n
./hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapred/TestTableInputFormat.java: * Create a table that throws a DoNoRetryIOException on first scanner next call n
./hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapred/TestTableInputFormat.java: * Run test assuming no errors using mapred api. n
./hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapred/TestTableInputFormat.java: * Run test assuming Scanner IOException failure using mapred api, n
./hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapred/TestTableInputFormat.java: * Run test assuming Scanner IOException failure using mapred api, n
./hbase-zookeeper/src/main/java/org/apache/hadoop/hbase/zookeeper/RecoverableZooKeeper.java: * getData is an idempotent operation. Retry before throwing exception n
./hbase-zookeeper/src/main/java/org/apache/hadoop/hbase/zookeeper/RecoverableZooKeeper.java: * getData is an idempotent operation. Retry before throwing exception n
./hbase-zookeeper/src/main/java/org/apache/hadoop/hbase/zookeeper/RecoverableZooKeeper.java: * n
./hbase-asyncfs/src/main/java/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutputSaslHelper.java: * @param payload byte array of payload. n
./hbase-thrift/src/test/java/org/apache/hadoop/hbase/thrift2/TestThriftHBaseServiceHandlerWithLabels.java: * Padding numbers to make comparison of sort order easier in a for loop n * The number to pad. n