diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java index 34a37f1..90359d5 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java @@ -1,20 +1,12 @@ /* - * - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Licensed to the Apache Software Foundation (ASF) under one or more contributor license + * agreements. See the NOTICE file distributed with this work for additional information regarding + * copyright ownership. The ASF licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. You may obtain a + * copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable + * law or agreed to in writing, software distributed under the License is distributed on an "AS IS" + * BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License + * for the specific language governing permissions and limitations under the License. */ package org.apache.hadoop.hbase.regionserver; @@ -204,51 +196,50 @@ private static final Log LOG = LogFactory.getLog(HRegion.class); public static final String LOAD_CFS_ON_DEMAND_CONFIG_KEY = - "hbase.hregion.scan.loadColumnFamiliesOnDemand"; + "hbase.hregion.scan.loadColumnFamiliesOnDemand"; /** - * Longest time we'll wait on a sequenceid. - * Sequenceid comes up out of the WAL subsystem. WAL subsystem can go bad or a test might use - * it without cleaning up previous usage properly; generally, a WAL roll is needed. The timeout - * is for a latch in WALKey. There is no global accounting of outstanding WALKeys; intentionally - * to avoid contention, but it makes it so if an abort or problem, we could be stuck waiting - * on the WALKey latch. Revisit. + * Longest time we'll wait on a sequenceid. Sequenceid comes up out of the WAL subsystem. WAL + * subsystem can go bad or a test might use it without cleaning up previous usage properly; + * generally, a WAL roll is needed. The timeout is for a latch in WALKey. There is no global + * accounting of outstanding WALKeys; intentionally to avoid contention, but it makes it so if an + * abort or problem, we could be stuck waiting on the WALKey latch. Revisit. */ private final int maxWaitForSeqId; private static final String MAX_WAIT_FOR_SEQ_ID_KEY = "hbase.hregion.max.wait.for.sequenceid.ms"; private static final int DEFAULT_MAX_WAIT_FOR_SEQ_ID = 30000; /** - * This is the global default value for durability. All tables/mutations not - * defining a durability or using USE_DEFAULT will default to this value. + * This is the global default value for durability. All tables/mutations not defining a durability + * or using USE_DEFAULT will default to this value. */ private static final Durability DEFAULT_DURABILITY = Durability.SYNC_WAL; final AtomicBoolean closed = new AtomicBoolean(false); - /* Closing can take some time; use the closing flag if there is stuff we don't - * want to do while in closing state; e.g. like offer this region up to the - * master as a region to close if the carrying regionserver is overloaded. - * Once set, it is never cleared. + /* + * Closing can take some time; use the closing flag if there is stuff we don't want to do while in + * closing state; e.g. like offer this region up to the master as a region to close if the + * carrying regionserver is overloaded. Once set, it is never cleared. */ final AtomicBoolean closing = new AtomicBoolean(false); /** - * The max sequence id of flushed data on this region. There is no edit in memory that is - * less that this sequence id. + * The max sequence id of flushed data on this region. There is no edit in memory that is less + * that this sequence id. */ private volatile long maxFlushedSeqId = HConstants.NO_SEQNUM; /** - * Record the sequence id of last flush operation. Can be in advance of - * {@link #maxFlushedSeqId} when flushing a single column family. In this case, - * {@link #maxFlushedSeqId} will be older than the oldest edit in memory. + * Record the sequence id of last flush operation. Can be in advance of {@link #maxFlushedSeqId} + * when flushing a single column family. In this case, {@link #maxFlushedSeqId} will be older than + * the oldest edit in memory. */ private volatile long lastFlushOpSeqId = HConstants.NO_SEQNUM; /** - * The sequence id of the last replayed open region event from the primary region. This is used - * to skip entries before this due to the possibility of replay edits coming out of order from + * The sequence id of the last replayed open region event from the primary region. This is used to + * skip entries before this due to the possibility of replay edits coming out of order from * replication. */ protected volatile long lastReplayedOpenRegionSeqId = -1L; @@ -266,8 +257,8 @@ private final ConcurrentHashMap lockedRows = new ConcurrentHashMap(); - protected final Map stores = new ConcurrentSkipListMap( - Bytes.BYTES_RAWCOMPARATOR); + protected final Map stores = + new ConcurrentSkipListMap(Bytes.BYTES_RAWCOMPARATOR); // TODO: account for each registered handler in HeapSize computation private Map coprocessorServiceHandlers = Maps.newHashMap(); @@ -282,7 +273,7 @@ final Counter checkAndMutateChecksPassed = new Counter(); final Counter checkAndMutateChecksFailed = new Counter(); - //Number of requests + // Number of requests final Counter readRequestsCount = new Counter(); final Counter writeRequestsCount = new Counter(); @@ -332,14 +323,18 @@ private long openSeqNum = HConstants.NO_SEQNUM; /** - * The default setting for whether to enable on-demand CF loading for - * scan requests to this region. Requests can override it. + * The default setting for whether to enable on-demand CF loading for scan requests to this + * region. Requests can override it. */ private boolean isLoadingCfsOnDemandDefault = false; private final AtomicInteger majorInProgress = new AtomicInteger(0); private final AtomicInteger minorInProgress = new AtomicInteger(0); + /** + * use only for test case to create dirty HRegion. + */ + private boolean skipRowChecking = false; // // Context: During replay we want to ensure that we do not lose any data. So, we // have to be conservative in how we replay wals. For each store, we calculate @@ -362,19 +357,26 @@ private volatile Optional configurationManager; /** - * @return The smallest mvcc readPoint across all the scanners in this - * region. Writes older than this readPoint, are included in every - * read operation. + * set this flag to true to skip row range check and used for test case only. + * @param skipRowChecking true:skip row checking false:default not + */ + public void setSkipRowChecking(boolean skipRowChecking) { + this.skipRowChecking = skipRowChecking; + } + + /** + * @return The smallest mvcc readPoint across all the scanners in this region. Writes older than + * this readPoint, are included in every read operation. */ public long getSmallestReadPoint() { long minimumReadPoint; // We need to ensure that while we are calculating the smallestReadPoint // no new RegionScanners can grab a readPoint that we are unaware of. // We achieve this by synchronizing on the scannerReadPoints object. - synchronized(scannerReadPoints) { + synchronized (scannerReadPoints) { minimumReadPoint = mvcc.getReadPoint(); - for (Long readPoint: this.scannerReadPoints.values()) { + for (Long readPoint : this.scannerReadPoints.values()) { if (readPoint < minimumReadPoint) { minimumReadPoint = readPoint; } @@ -384,8 +386,7 @@ } /* - * Data structure of write state flags used coordinating flushes, - * compactions and closes. + * Data structure of write state flags used coordinating flushes, compactions and closes. */ static class WriteState { // Set while a memstore flush is happening. @@ -404,7 +405,6 @@ /** * Set flags that make this region read-only. - * * @param onOff flip value for region r/o setting */ synchronized void setReadOnly(final boolean onOff) { @@ -424,15 +424,14 @@ this.readsEnabled = readsEnabled; } - static final long HEAP_SIZE = ClassSize.align( - ClassSize.OBJECT + 5 * Bytes.SIZEOF_BOOLEAN); + static final long HEAP_SIZE = ClassSize.align(ClassSize.OBJECT + 5 * Bytes.SIZEOF_BOOLEAN); } /** * Objects from this class are created when flushing to describe all the different states that - * that method ends up in. The Result enum describes those states. The sequence id should only - * be specified if the flush was successful, and the failure message should only be specified - * if it didn't flush. + * that method ends up in. The Result enum describes those states. The sequence id should only be + * specified if the flush was successful, and the failure message should only be specified if it + * didn't flush. */ public static class FlushResultImpl implements FlushResult { final Result result; @@ -445,12 +444,12 @@ * null. * @param result Expecting FLUSHED_NO_COMPACTION_NEEDED or FLUSHED_COMPACTION_NEEDED. * @param flushSequenceId Generated sequence id that comes right after the edits in the - * memstores. + * memstores. */ FlushResultImpl(Result result, long flushSequenceId) { this(result, flushSequenceId, null, false); - assert result == Result.FLUSHED_NO_COMPACTION_NEEDED || result == Result - .FLUSHED_COMPACTION_NEEDED; + assert result == Result.FLUSHED_NO_COMPACTION_NEEDED + || result == Result.FLUSHED_COMPACTION_NEEDED; } /** @@ -470,7 +469,7 @@ * @param failureReason Reason why we couldn't flush, or null. */ FlushResultImpl(Result result, long flushSequenceId, String failureReason, - boolean wroteFlushMarker) { + boolean wroteFlushMarker) { this.result = result; this.flushSequenceId = flushSequenceId; this.failureReason = failureReason; @@ -478,14 +477,14 @@ } /** - * Convenience method, the equivalent of checking if result is - * FLUSHED_NO_COMPACTION_NEEDED or FLUSHED_NO_COMPACTION_NEEDED. + * Convenience method, the equivalent of checking if result is FLUSHED_NO_COMPACTION_NEEDED or + * FLUSHED_NO_COMPACTION_NEEDED. * @return true if the memstores were flushed, else false. */ @Override public boolean isFlushSucceeded() { - return result == Result.FLUSHED_NO_COMPACTION_NEEDED || result == Result - .FLUSHED_COMPACTION_NEEDED; + return result == Result.FLUSHED_NO_COMPACTION_NEEDED + || result == Result.FLUSHED_COMPACTION_NEEDED; } /** @@ -499,10 +498,9 @@ @Override public String toString() { - return new StringBuilder() - .append("flush result:").append(result).append(", ") - .append("failureReason:").append(failureReason).append(",") - .append("flush seq id").append(flushSequenceId).toString(); + return new StringBuilder().append("flush result:").append(result).append(", ") + .append("failureReason:").append(failureReason).append(",").append("flush seq id") + .append(flushSequenceId).toString(); } @Override @@ -529,21 +527,17 @@ } /** Constructs a successful prepare flush result */ - PrepareFlushResult( - TreeMap storeFlushCtxs, - TreeMap> committedFiles, - TreeMap storeFlushableSize, long startTime, long flushSeqId, - long flushedSeqId, long totalFlushableSize) { - this(null, storeFlushCtxs, committedFiles, storeFlushableSize, startTime, - flushSeqId, flushedSeqId, totalFlushableSize); + PrepareFlushResult(TreeMap storeFlushCtxs, + TreeMap> committedFiles, TreeMap storeFlushableSize, + long startTime, long flushSeqId, long flushedSeqId, long totalFlushableSize) { + this(null, storeFlushCtxs, committedFiles, storeFlushableSize, startTime, flushSeqId, + flushedSeqId, totalFlushableSize); } - private PrepareFlushResult( - FlushResult result, - TreeMap storeFlushCtxs, - TreeMap> committedFiles, - TreeMap storeFlushableSize, long startTime, long flushSeqId, - long flushedSeqId, long totalFlushableSize) { + private PrepareFlushResult(FlushResult result, + TreeMap storeFlushCtxs, + TreeMap> committedFiles, TreeMap storeFlushableSize, + long startTime, long flushSeqId, long flushedSeqId, long totalFlushableSize) { this.result = result; this.storeFlushCtxs = storeFlushCtxs; this.committedFiles = committedFiles; @@ -599,22 +593,20 @@ private final boolean regionStatsEnabled; /** - * HRegion constructor. This constructor should only be used for testing and - * extensions. Instances of HRegion should be instantiated with the - * {@link HRegion#createHRegion} or {@link HRegion#openHRegion} method. - * - * @param tableDir qualified path of directory where region should be located, - * usually the table directory. - * @param wal The WAL is the outbound log for any updates to the HRegion - * The wal file is a logfile from the previous execution that's - * custom-computed for this HRegion. The HRegionServer computes and sorts the - * appropriate wal info for this HRegion. If there is a previous wal file - * (implying that the HRegion has been written-to before), then read it from - * the supplied path. + * HRegion constructor. This constructor should only be used for testing and extensions. Instances + * of HRegion should be instantiated with the {@link HRegion#createHRegion} or + * {@link HRegion#openHRegion} method. + * @param tableDir qualified path of directory where region should be located, usually the table + * directory. + * @param wal The WAL is the outbound log for any updates to the HRegion The wal file is a logfile + * from the previous execution that's custom-computed for this HRegion. The HRegionServer + * computes and sorts the appropriate wal info for this HRegion. If there is a previous + * wal file (implying that the HRegion has been written-to before), then read it from the + * supplied path. * @param fs is the filesystem. * @param confParam is global configuration settings. - * @param regionInfo - HRegionInfo that describes the region - * is new), then read them from the supplied path. + * @param regionInfo - HRegionInfo that describes the region is new), then read them from the + * supplied path. * @param htd the table descriptor * @param rsServices reference to {@link RegionServerServices} or null * @deprecated Use other constructors. @@ -622,24 +614,22 @@ @Deprecated @VisibleForTesting public HRegion(final Path tableDir, final WAL wal, final FileSystem fs, - final Configuration confParam, final HRegionInfo regionInfo, - final HTableDescriptor htd, final RegionServerServices rsServices) { - this(new HRegionFileSystem(confParam, fs, tableDir, regionInfo), - wal, confParam, htd, rsServices); + final Configuration confParam, final HRegionInfo regionInfo, final HTableDescriptor htd, + final RegionServerServices rsServices) { + this(new HRegionFileSystem(confParam, fs, tableDir, regionInfo), wal, confParam, htd, + rsServices); } /** - * HRegion constructor. This constructor should only be used for testing and - * extensions. Instances of HRegion should be instantiated with the - * {@link HRegion#createHRegion} or {@link HRegion#openHRegion} method. - * + * HRegion constructor. This constructor should only be used for testing and extensions. Instances + * of HRegion should be instantiated with the {@link HRegion#createHRegion} or + * {@link HRegion#openHRegion} method. * @param fs is the filesystem. - * @param wal The WAL is the outbound log for any updates to the HRegion - * The wal file is a logfile from the previous execution that's - * custom-computed for this HRegion. The HRegionServer computes and sorts the - * appropriate wal info for this HRegion. If there is a previous wal file - * (implying that the HRegion has been written-to before), then read it from - * the supplied path. + * @param wal The WAL is the outbound log for any updates to the HRegion The wal file is a logfile + * from the previous execution that's custom-computed for this HRegion. The HRegionServer + * computes and sorts the appropriate wal info for this HRegion. If there is a previous + * wal file (implying that the HRegion has been written-to before), then read it from the + * supplied path. * @param confParam is global configuration settings. * @param htd the table descriptor * @param rsServices reference to {@link RegionServerServices} or null @@ -659,19 +649,17 @@ // 'conf' renamed to 'confParam' b/c we use this.conf in the constructor this.baseConf = confParam; - this.conf = new CompoundConfiguration() - .add(confParam) - .addStringMap(htd.getConfiguration()) - .addBytesMap(htd.getValues()); - this.flushCheckInterval = conf.getInt(MEMSTORE_PERIODIC_FLUSH_INTERVAL, - DEFAULT_CACHE_FLUSH_INTERVAL); + this.conf = new CompoundConfiguration().add(confParam).addStringMap(htd.getConfiguration()) + .addBytesMap(htd.getValues()); + this.flushCheckInterval = + conf.getInt(MEMSTORE_PERIODIC_FLUSH_INTERVAL, DEFAULT_CACHE_FLUSH_INTERVAL); this.flushPerChanges = conf.getLong(MEMSTORE_FLUSH_PER_CHANGES, DEFAULT_FLUSH_PER_CHANGES); if (this.flushPerChanges > MAX_FLUSH_PER_CHANGES) { - throw new IllegalArgumentException(MEMSTORE_FLUSH_PER_CHANGES + " can not exceed " - + MAX_FLUSH_PER_CHANGES); + throw new IllegalArgumentException( + MEMSTORE_FLUSH_PER_CHANGES + " can not exceed " + MAX_FLUSH_PER_CHANGES); } - this.rowLockWaitDuration = conf.getInt("hbase.rowlock.wait.duration", - DEFAULT_ROWLOCK_WAIT_DURATION); + this.rowLockWaitDuration = + conf.getInt("hbase.rowlock.wait.duration", DEFAULT_ROWLOCK_WAIT_DURATION); this.maxWaitForSeqId = conf.getInt(MAX_WAIT_FOR_SEQ_ID_KEY, DEFAULT_MAX_WAIT_FOR_SEQ_ID); this.isLoadingCfsOnDemandDefault = conf.getBoolean(LOAD_CFS_ON_DEMAND_CONFIG_KEY, true); @@ -681,36 +669,31 @@ setHTableSpecificConf(); this.scannerReadPoints = new ConcurrentHashMap(); - this.busyWaitDuration = conf.getLong( - "hbase.busy.wait.duration", DEFAULT_BUSY_WAIT_DURATION); + this.busyWaitDuration = conf.getLong("hbase.busy.wait.duration", DEFAULT_BUSY_WAIT_DURATION); this.maxBusyWaitMultiplier = conf.getInt("hbase.busy.wait.multiplier.max", 2); if (busyWaitDuration * maxBusyWaitMultiplier <= 0L) { - throw new IllegalArgumentException("Invalid hbase.busy.wait.duration (" - + busyWaitDuration + ") or hbase.busy.wait.multiplier.max (" - + maxBusyWaitMultiplier + "). Their product should be positive"); + throw new IllegalArgumentException("Invalid hbase.busy.wait.duration (" + busyWaitDuration + + ") or hbase.busy.wait.multiplier.max (" + maxBusyWaitMultiplier + + "). Their product should be positive"); } this.maxBusyWaitDuration = conf.getLong("hbase.ipc.client.call.purge.timeout", 2 * HConstants.DEFAULT_HBASE_RPC_TIMEOUT); /* - * timestamp.slop provides a server-side constraint on the timestamp. This - * assumes that you base your TS around currentTimeMillis(). In this case, - * throw an error to the user if the user-specified TS is newer than now + - * slop. LATEST_TIMESTAMP == don't use this functionality + * timestamp.slop provides a server-side constraint on the timestamp. This assumes that you base + * your TS around currentTimeMillis(). In this case, throw an error to the user if the + * user-specified TS is newer than now + slop. LATEST_TIMESTAMP == don't use this functionality */ - this.timestampSlop = conf.getLong( - "hbase.hregion.keyvalue.timestamp.slop.millisecs", - HConstants.LATEST_TIMESTAMP); + this.timestampSlop = conf.getLong("hbase.hregion.keyvalue.timestamp.slop.millisecs", + HConstants.LATEST_TIMESTAMP); /** - * Timeout for the process time in processRowsWithLocks(). - * Use -1 to switch off time bound. + * Timeout for the process time in processRowsWithLocks(). Use -1 to switch off time bound. */ - this.rowProcessorTimeout = conf.getLong( - "hbase.hregion.row.processor.timeout", DEFAULT_ROW_PROCESSOR_TIMEOUT); - this.durability = htd.getDurability() == Durability.USE_DEFAULT - ? DEFAULT_DURABILITY - : htd.getDurability(); + this.rowProcessorTimeout = + conf.getLong("hbase.hregion.row.processor.timeout", DEFAULT_ROW_PROCESSOR_TIMEOUT); + this.durability = + htd.getDurability() == Durability.USE_DEFAULT ? DEFAULT_DURABILITY : htd.getDurability(); if (rsServices != null) { this.rsAccounting = this.rsServices.getRegionServerAccounting(); // don't initialize coprocessors if not running within a regionserver @@ -735,16 +718,14 @@ } // by default, we allow writes against a region when it's in recovering - this.disallowWritesInRecovering = - conf.getBoolean(HConstants.DISALLOW_WRITES_IN_RECOVERING, - HConstants.DEFAULT_DISALLOW_WRITES_IN_RECOVERING_CONFIG); + this.disallowWritesInRecovering = conf.getBoolean(HConstants.DISALLOW_WRITES_IN_RECOVERING, + HConstants.DEFAULT_DISALLOW_WRITES_IN_RECOVERING_CONFIG); configurationManager = Optional.absent(); // disable stats tracking system tables, but check the config for everything else - this.regionStatsEnabled = htd.getTableName().getNamespaceAsString().equals( - NamespaceDescriptor.SYSTEM_NAMESPACE_NAME_STR) ? - false : - conf.getBoolean(HConstants.ENABLE_CLIENT_BACKPRESSURE, + this.regionStatsEnabled = htd.getTableName().getNamespaceAsString() + .equals(NamespaceDescriptor.SYSTEM_NAMESPACE_NAME_STR) ? false + : conf.getBoolean(HConstants.ENABLE_CLIENT_BACKPRESSURE, HConstants.DEFAULT_ENABLE_CLIENT_BACKPRESSURE); } @@ -757,15 +738,14 @@ HTableDescriptor.DEFAULT_MEMSTORE_FLUSH_SIZE); } this.memstoreFlushSize = flushSize; - this.blockingMemStoreSize = this.memstoreFlushSize * - conf.getLong(HConstants.HREGION_MEMSTORE_BLOCK_MULTIPLIER, - HConstants.DEFAULT_HREGION_MEMSTORE_BLOCK_MULTIPLIER); + this.blockingMemStoreSize = + this.memstoreFlushSize * conf.getLong(HConstants.HREGION_MEMSTORE_BLOCK_MULTIPLIER, + HConstants.DEFAULT_HREGION_MEMSTORE_BLOCK_MULTIPLIER); } /** - * Initialize this region. - * Used only by tests and SplitTransaction to reopen the region. - * You should use createHRegion() or openHRegion() + * Initialize this region. Used only by tests and SplitTransaction to reopen the region. You + * should use createHRegion() or openHRegion() * @return What the next sequence (edit) id should be. * @throws IOException e * @deprecated use HRegion.createHRegion() or HRegion.openHRegion() @@ -777,7 +757,6 @@ /** * Initialize this region. - * * @param reporter Tickle every so often if initialize is taking a while. * @return What the next sequence (edit) id should be. * @throws IOException e @@ -792,8 +771,8 @@ // nextSeqid will be -1 if the initialization fails. // At least it will be 0 otherwise. if (nextSeqId == -1) { - status.abort("Exception during region " + getRegionInfo().getRegionNameAsString() + - " initialization."); + status.abort("Exception during region " + getRegionInfo().getRegionNameAsString() + + " initialization."); } } } @@ -834,8 +813,8 @@ if (this.writestate.writesEnabled) { status.setStatus("Cleaning up detritus from prior splits"); - // Get rid of any splits or merges that were lost in-progress. Clean out - // these directories here on open. We may be opening a region that was + // Get rid of any splits or merges that were lost in-progress. Clean out + // these directories here on open. We may be opening a region that was // being split but we crashed in the middle of it all. fs.cleanupAnySplitDetritus(); fs.cleanupMergesDir(); @@ -848,7 +827,7 @@ this.flushPolicy = FlushPolicyFactory.create(this, conf); long lastFlushTime = EnvironmentEdgeManager.currentTime(); - for (Store store: stores.values()) { + for (Store store : stores.values()) { this.lastStoreFlushTimeMap.put(store, lastFlushTime); } @@ -860,14 +839,15 @@ // is opened before recovery completes. So we add a safety bumper to avoid new sequence number // overlaps used sequence numbers if (this.writestate.writesEnabled) { - nextSeqid = WALSplitter.writeRegionSequenceIdFile(this.fs.getFileSystem(), this.fs - .getRegionDir(), nextSeqid, (this.recovering ? (this.flushPerChanges + 10000000) : 1)); + nextSeqid = + WALSplitter.writeRegionSequenceIdFile(this.fs.getFileSystem(), this.fs.getRegionDir(), + nextSeqid, (this.recovering ? (this.flushPerChanges + 10000000) : 1)); } else { nextSeqid++; } - LOG.info("Onlined " + this.getRegionInfo().getShortNameToLog() + - "; next sequenceid=" + nextSeqid); + LOG.info( + "Onlined " + this.getRegionInfo().getShortNameToLog() + "; next sequenceid=" + nextSeqid); // A region can be reopened if failed a split; reset flags this.closing.set(false); @@ -890,7 +870,7 @@ * @throws IOException */ private long initializeStores(final CancelableProgressable reporter, MonitoredTask status) - throws IOException { + throws IOException { // Load in all the HStores. long maxSeqId = -1; @@ -900,9 +880,9 @@ if (!htableDescriptor.getFamilies().isEmpty()) { // initialize the thread pool for opening stores in parallel. ThreadPoolExecutor storeOpenerThreadPool = - getStoreOpenAndCloseThreadPool("StoreOpener-" + this.getRegionInfo().getShortNameToLog()); + getStoreOpenAndCloseThreadPool("StoreOpener-" + this.getRegionInfo().getShortNameToLog()); CompletionService completionService = - new ExecutorCompletionService(storeOpenerThreadPool); + new ExecutorCompletionService(storeOpenerThreadPool); // initialize each store in parallel for (final HColumnDescriptor family : htableDescriptor.getFamilies()) { @@ -922,8 +902,7 @@ this.stores.put(store.getFamily().getName(), store); long storeMaxSequenceId = store.getMaxSequenceId(); - maxSeqIdInStores.put(store.getColumnFamilyName().getBytes(), - storeMaxSequenceId); + maxSeqIdInStores.put(store.getColumnFamilyName().getBytes(), storeMaxSequenceId); if (maxSeqId == -1 || storeMaxSequenceId > maxSeqId) { maxSeqId = storeMaxSequenceId; } @@ -934,7 +913,7 @@ } allStoresOpened = true; } catch (InterruptedException e) { - throw (InterruptedIOException)new InterruptedIOException().initCause(e); + throw (InterruptedIOException) new InterruptedIOException().initCause(e); } catch (ExecutionException e) { throw new IOException(e.getCause()); } finally { @@ -968,12 +947,12 @@ */ private NavigableMap> getStoreFiles() { NavigableMap> allStoreFiles = - new TreeMap>(Bytes.BYTES_COMPARATOR); - for (Store store: getStores()) { + new TreeMap>(Bytes.BYTES_COMPARATOR); + for (Store store : getStores()) { Collection storeFiles = store.getStorefiles(); if (storeFiles == null) continue; List storeFileNames = new ArrayList(); - for (StoreFile storeFile: storeFiles) { + for (StoreFile storeFile : storeFiles) { storeFileNames.add(storeFile.getPath()); } allStoreFiles.put(store.getFamily().getName(), storeFileNames); @@ -983,9 +962,9 @@ private void writeRegionOpenMarker(WAL wal, long openSeqId) throws IOException { Map> storeFiles = getStoreFiles(); - RegionEventDescriptor regionOpenDesc = ProtobufUtil.toRegionEventDescriptor( - RegionEventDescriptor.EventType.REGION_OPEN, getRegionInfo(), openSeqId, - getRegionServerServices().getServerName(), storeFiles); + RegionEventDescriptor regionOpenDesc = + ProtobufUtil.toRegionEventDescriptor(RegionEventDescriptor.EventType.REGION_OPEN, + getRegionInfo(), openSeqId, getRegionServerServices().getServerName(), storeFiles); WALUtil.writeRegionEventMarker(wal, getTableDesc(), getRegionInfo(), regionOpenDesc, mvcc); } @@ -1017,15 +996,13 @@ @Override public HDFSBlocksDistribution getHDFSBlocksDistribution() { - HDFSBlocksDistribution hdfsBlocksDistribution = - new HDFSBlocksDistribution(); + HDFSBlocksDistribution hdfsBlocksDistribution = new HDFSBlocksDistribution(); synchronized (this.stores) { for (Store store : this.stores.values()) { Collection storeFiles = store.getStorefiles(); if (storeFiles == null) continue; for (StoreFile sf : storeFiles) { - HDFSBlocksDistribution storeFileBlocksDistribution = - sf.getHDFSBlockDistribution(); + HDFSBlocksDistribution storeFileBlocksDistribution = sf.getHDFSBlockDistribution(); hdfsBlocksDistribution.add(storeFileBlocksDistribution); } } @@ -1057,13 +1034,13 @@ * @throws IOException */ public static HDFSBlocksDistribution computeHDFSBlocksDistribution(final Configuration conf, - final HTableDescriptor tableDescriptor, final HRegionInfo regionInfo, Path tablePath) - throws IOException { + final HTableDescriptor tableDescriptor, final HRegionInfo regionInfo, Path tablePath) + throws IOException { HDFSBlocksDistribution hdfsBlocksDistribution = new HDFSBlocksDistribution(); FileSystem fs = tablePath.getFileSystem(conf); HRegionFileSystem regionFs = new HRegionFileSystem(conf, fs, tablePath, regionInfo); - for (HColumnDescriptor family: tableDescriptor.getFamilies()) { + for (HColumnDescriptor family : tableDescriptor.getFamilies()) { Collection storeFiles = regionFs.getStoreFiles(family.getNameAsString()); if (storeFiles == null) continue; for (StoreFileInfo storeFileInfo : storeFiles) { @@ -1078,8 +1055,7 @@ } /** - * Increase the size of mem store in this region and the size of global mem - * store + * Increase the size of mem store in this region and the size of global mem store * @return the size of memstore in this region */ public long addAndGetGlobalMemstoreSize(long memStoreSize) { @@ -1095,8 +1071,7 @@ } /** - * @return Instance of {@link RegionServerServices} used by this HRegion. - * Can be null. + * @return Instance of {@link RegionServerServices} used by this HRegion. Can be null. */ RegionServerServices getRegionServerServices() { return this.rsServices; @@ -1179,15 +1154,15 @@ boolean wasRecovering = this.recovering; // before we flip the recovering switch (enabling reads) we should write the region open // event to WAL if needed - if (wal != null && getRegionServerServices() != null && !writestate.readOnly - && wasRecovering && !newState) { + if (wal != null && getRegionServerServices() != null && !writestate.readOnly && wasRecovering + && !newState) { // force a flush only if region replication is set up for this region. Otherwise no need. boolean forceFlush = getTableDesc().getRegionReplication() > 1; // force a flush first - MonitoredTask status = TaskMonitor.get().createStatus( - "Flushing region " + this + " because recovery is finished"); + MonitoredTask status = TaskMonitor.get() + .createStatus("Flushing region " + this + " because recovery is finished"); try { if (forceFlush) { internalFlushcache(status); @@ -1205,13 +1180,15 @@ // We cannot rethrow this exception since we are being called from the zk thread. The // region has already opened. In this case we log the error, but continue LOG.warn(getRegionInfo().getEncodedName() + " : was not able to write region opening " - + "event to WAL, continueing", e); + + "event to WAL, continueing", + e); } } catch (IOException ioe) { // Distributed log replay semantics does not necessarily require a flush, since the replayed // data is already written again in the WAL. So failed flush should be fine. LOG.warn(getRegionInfo().getEncodedName() + " : was not able to flush " - + "event to WAL, continueing", ioe); + + "event to WAL, continueing", + ioe); } finally { status.cleanup(); } @@ -1258,49 +1235,45 @@ } public boolean areWritesEnabled() { - synchronized(this.writestate) { + synchronized (this.writestate) { return this.writestate.writesEnabled; } } - public MultiVersionConcurrencyControl getMVCC() { - return mvcc; - } + public MultiVersionConcurrencyControl getMVCC() { + return mvcc; + } - @Override - public long getMaxFlushedSeqId() { - return maxFlushedSeqId; - } + @Override + public long getMaxFlushedSeqId() { + return maxFlushedSeqId; + } - @Override - public long getReadpoint(IsolationLevel isolationLevel) { - if (isolationLevel == IsolationLevel.READ_UNCOMMITTED) { - // This scan can read even uncommitted transactions - return Long.MAX_VALUE; - } - return mvcc.getReadPoint(); - } + @Override + public long getReadpoint(IsolationLevel isolationLevel) { + if (isolationLevel == IsolationLevel.READ_UNCOMMITTED) { + // This scan can read even uncommitted transactions + return Long.MAX_VALUE; + } + return mvcc.getReadPoint(); + } - @Override - public boolean isLoadingCfsOnDemandDefault() { - return this.isLoadingCfsOnDemandDefault; - } + @Override + public boolean isLoadingCfsOnDemandDefault() { + return this.isLoadingCfsOnDemandDefault; + } /** - * Close down this HRegion. Flush the cache, shut down each HStore, don't - * service any more calls. - * - *

This method could take some time to execute, so don't call it from a - * time-sensitive thread. - * - * @return Vector of all the storage files that the HRegion's component - * HStores make use of. It's a list of all HStoreFile objects. Returns empty - * vector if already closed and null if judged that it should not close. - * + * Close down this HRegion. Flush the cache, shut down each HStore, don't service any more calls. + *

+ * This method could take some time to execute, so don't call it from a time-sensitive thread. + * @return Vector of all the storage files that the HRegion's component HStores make use of. It's + * a list of all HStoreFile objects. Returns empty vector if already closed and null if + * judged that it should not close. * @throws IOException e - * @throws DroppedSnapshotException Thrown when replay of wal is required - * because a Snapshot was not properly persisted. The region is put in closing mode, and the - * caller MUST abort after this. + * @throws DroppedSnapshotException Thrown when replay of wal is required because a Snapshot was + * not properly persisted. The region is put in closing mode, and the caller MUST abort + * after this. */ public Map> close() throws IOException { return close(false); @@ -1317,8 +1290,7 @@ public static final int SYSTEM_CACHE_FLUSH_INTERVAL = 300000; // 5 minutes /** Conf key to force a flush if there are already enough changes for one region in memstore */ - public static final String MEMSTORE_FLUSH_PER_CHANGES = - "hbase.regionserver.flush.per.changes"; + public static final String MEMSTORE_FLUSH_PER_CHANGES = "hbase.regionserver.flush.per.changes"; public static final long DEFAULT_FLUSH_PER_CHANGES = 30000000; // 30 millions /** * The following MAX_FLUSH_PER_CHANGES is large enough because each KeyValue has 20+ bytes @@ -1327,28 +1299,23 @@ public static final long MAX_FLUSH_PER_CHANGES = 1000000000; // 1G /** - * Close down this HRegion. Flush the cache unless abort parameter is true, - * Shut down each HStore, don't service any more calls. - * - * This method could take some time to execute, so don't call it from a - * time-sensitive thread. - * + * Close down this HRegion. Flush the cache unless abort parameter is true, Shut down each HStore, + * don't service any more calls. This method could take some time to execute, so don't call it + * from a time-sensitive thread. * @param abort true if server is aborting (only during testing) - * @return Vector of all the storage files that the HRegion's component - * HStores make use of. It's a list of HStoreFile objects. Can be null if - * we are not to close at this time or we are already closed. - * + * @return Vector of all the storage files that the HRegion's component HStores make use of. It's + * a list of HStoreFile objects. Can be null if we are not to close at this time or we are + * already closed. * @throws IOException e - * @throws DroppedSnapshotException Thrown when replay of wal is required - * because a Snapshot was not properly persisted. The region is put in closing mode, and the - * caller MUST abort after this. + * @throws DroppedSnapshotException Thrown when replay of wal is required because a Snapshot was + * not properly persisted. The region is put in closing mode, and the caller MUST abort + * after this. */ public Map> close(final boolean abort) throws IOException { // Only allow one thread to close at a time. Serialize them so dual // threads attempting to close will run up against each other. - MonitoredTask status = TaskMonitor.get().createStatus( - "Closing region " + this + - (abort ? " due to abort" : "")); + MonitoredTask status = + TaskMonitor.get().createStatus("Closing region " + this + (abort ? " due to abort" : "")); status.setStatus("Waiting for close lock"); try { @@ -1368,8 +1335,8 @@ this.closing.set(closing); } - @edu.umd.cs.findbugs.annotations.SuppressWarnings(value="UL_UNRELEASED_LOCK_EXCEPTION_PATH", - justification="I think FindBugs is confused") + @edu.umd.cs.findbugs.annotations.SuppressWarnings(value = "UL_UNRELEASED_LOCK_EXCEPTION_PATH", + justification = "I think FindBugs is confused") private Map> doClose(final boolean abort, MonitoredTask status) throws IOException { if (isClosed()) { @@ -1427,12 +1394,11 @@ if (actualFlushes > 5) { // If we tried 5 times and are unable to clear memory, abort // so we do not lose data - throw new DroppedSnapshotException("Failed clearing memory after " + - actualFlushes + " attempts on region: " + - Bytes.toStringBinary(getRegionInfo().getRegionName())); + throw new DroppedSnapshotException( + "Failed clearing memory after " + actualFlushes + " attempts on region: " + + Bytes.toStringBinary(getRegionInfo().getRegionName())); } - LOG.info("Running extra flush, " + actualFlushes + - " (carrying snapshot?) " + this); + LOG.info("Running extra flush, " + actualFlushes + " (carrying snapshot?) " + this); } internalFlushcache(status); } catch (IOException ioe) { @@ -1440,42 +1406,42 @@ synchronized (writestate) { writestate.writesEnabled = true; } - // Have to throw to upper layers. I can't abort server from here. + // Have to throw to upper layers. I can't abort server from here. throw ioe; } } } Map> result = - new TreeMap>(Bytes.BYTES_COMPARATOR); + new TreeMap>(Bytes.BYTES_COMPARATOR); if (!stores.isEmpty()) { // initialize the thread pool for closing stores in parallel. - ThreadPoolExecutor storeCloserThreadPool = - getStoreOpenAndCloseThreadPool("StoreCloserThread-" + - getRegionInfo().getRegionNameAsString()); + ThreadPoolExecutor storeCloserThreadPool = getStoreOpenAndCloseThreadPool( + "StoreCloserThread-" + getRegionInfo().getRegionNameAsString()); CompletionService>> completionService = - new ExecutorCompletionService>>(storeCloserThreadPool); + new ExecutorCompletionService>>( + storeCloserThreadPool); // close each store in parallel for (final Store store : stores.values()) { long flushableSize = store.getFlushableSize(); if (!(abort || flushableSize == 0 || writestate.readOnly)) { if (getRegionServerServices() != null) { - getRegionServerServices().abort("Assertion failed while closing store " - + getRegionInfo().getRegionNameAsString() + " " + store - + ". flushableSize expected=0, actual= " + flushableSize - + ". Current memstoreSize=" + getMemstoreSize() + ". Maybe a coprocessor " - + "operation failed and left the memstore in a partially updated state.", null); + getRegionServerServices().abort( + "Assertion failed while closing store " + getRegionInfo().getRegionNameAsString() + + " " + store + ". flushableSize expected=0, actual= " + flushableSize + + ". Current memstoreSize=" + getMemstoreSize() + ". Maybe a coprocessor " + + "operation failed and left the memstore in a partially updated state.", + null); } } - completionService - .submit(new Callable>>() { - @Override - public Pair> call() throws IOException { - return new Pair>( - store.getFamily().getName(), store.close()); - } - }); + completionService.submit(new Callable>>() { + @Override + public Pair> call() throws IOException { + return new Pair>(store.getFamily().getName(), + store.close()); + } + }); } try { for (int i = 0; i < stores.size(); i++) { @@ -1489,7 +1455,7 @@ familyFiles.addAll(storeFiles.getSecond()); } } catch (InterruptedException e) { - throw (InterruptedIOException)new InterruptedIOException().initCause(e); + throw (InterruptedIOException) new InterruptedIOException().initCause(e); } catch (ExecutionException e) { throw new IOException(e.getCause()); } finally { @@ -1541,7 +1507,7 @@ try { while (writestate.compacting.get() > 0 || writestate.flushing) { LOG.debug("waiting for " + writestate.compacting + " compactions" - + (writestate.flushing ? " & cache flush" : "") + " to complete for region " + this); + + (writestate.flushing ? " & cache flush" : "") + " to complete for region " + this); try { writestate.wait(); } catch (InterruptedException iex) { @@ -1558,22 +1524,17 @@ } } - protected ThreadPoolExecutor getStoreOpenAndCloseThreadPool( - final String threadNamePrefix) { + protected ThreadPoolExecutor getStoreOpenAndCloseThreadPool(final String threadNamePrefix) { int numStores = Math.max(1, this.htableDescriptor.getFamilies().size()); - int maxThreads = Math.min(numStores, - conf.getInt(HConstants.HSTORE_OPEN_AND_CLOSE_THREADS_MAX, - HConstants.DEFAULT_HSTORE_OPEN_AND_CLOSE_THREADS_MAX)); + int maxThreads = Math.min(numStores, conf.getInt(HConstants.HSTORE_OPEN_AND_CLOSE_THREADS_MAX, + HConstants.DEFAULT_HSTORE_OPEN_AND_CLOSE_THREADS_MAX)); return getOpenAndCloseThreadPool(maxThreads, threadNamePrefix); } - protected ThreadPoolExecutor getStoreFileOpenAndCloseThreadPool( - final String threadNamePrefix) { + protected ThreadPoolExecutor getStoreFileOpenAndCloseThreadPool(final String threadNamePrefix) { int numStores = Math.max(1, this.htableDescriptor.getFamilies().size()); - int maxThreads = Math.max(1, - conf.getInt(HConstants.HSTORE_OPEN_AND_CLOSE_THREADS_MAX, - HConstants.DEFAULT_HSTORE_OPEN_AND_CLOSE_THREADS_MAX) - / numStores); + int maxThreads = Math.max(1, conf.getInt(HConstants.HSTORE_OPEN_AND_CLOSE_THREADS_MAX, + HConstants.DEFAULT_HSTORE_OPEN_AND_CLOSE_THREADS_MAX) / numStores); return getOpenAndCloseThreadPool(maxThreads, threadNamePrefix); } @@ -1590,12 +1551,12 @@ }); } - /** - * @return True if its worth doing a flush before we put up the close flag. - */ + /** + * @return True if its worth doing a flush before we put up the close flag. + */ private boolean worthPreFlushing() { - return this.memstoreSize.get() > - this.conf.getLong("hbase.hregion.preclose.flush.size", 1024 * 1024 * 5); + return this.memstoreSize.get() > this.conf.getLong("hbase.hregion.preclose.flush.size", + 1024 * 1024 * 5); } ////////////////////////////////////////////////////////////////////////////// @@ -1620,10 +1581,10 @@ } /** - * A split takes the config from the parent region & passes it to the daughter - * region's constructor. If 'conf' was passed, you would end up using the HTD - * of the parent region in addition to the new daughter HTD. Pass 'baseConf' - * to the daughter regions to avoid this tricky dedupe problem. + * A split takes the config from the parent region & passes it to the daughter region's + * constructor. If 'conf' was passed, you would end up using the HTD of the parent region in + * addition to the new daughter HTD. Pass 'baseConf' to the daughter regions to avoid this tricky + * dedupe problem. * @return Configuration object */ Configuration getBaseConf() { @@ -1642,8 +1603,8 @@ @Override public long getEarliestFlushTimeForAllStores() { - return lastStoreFlushTimeMap.isEmpty() ? Long.MAX_VALUE : Collections.min(lastStoreFlushTimeMap - .values()); + return lastStoreFlushTimeMap.isEmpty() ? Long.MAX_VALUE + : Collections.min(lastStoreFlushTimeMap.values()); } @Override @@ -1679,9 +1640,9 @@ // Subtract - 1 to go earlier than the current oldest, unflushed edit in memstore; this will // give us a sequence id that is for sure flushed. We want edit replay to start after this // sequence id in this region. If NO_SEQNUM, use the regions maximum flush id. - long csid = (earliest == HConstants.NO_SEQNUM)? lastFlushOpSeqIdLocal: earliest - 1; - regionLoadBldr.addStoreCompleteSequenceId(StoreSequenceId. - newBuilder().setFamilyName(ByteString.copyFrom(familyName)).setSequenceId(csid).build()); + long csid = (earliest == HConstants.NO_SEQNUM) ? lastFlushOpSeqIdLocal : earliest - 1; + regionLoadBldr.addStoreCompleteSequenceId(StoreSequenceId.newBuilder() + .setFamilyName(ByteString.copyFrom(familyName)).setSequenceId(csid).build()); } return regionLoadBldr.setCompleteSequenceId(getMaxFlushedSeqId()); } @@ -1740,9 +1701,8 @@ } /** - * This is a helper function that compact all the stores synchronously - * It is used by utilities and testing - * + * This is a helper function that compact all the stores synchronously It is used by utilities and + * testing * @throws IOException e */ public void compactStores() throws IOException { @@ -1755,14 +1715,12 @@ } /** - * This is a helper function that compact the given store - * It is used by utilities and testing - * + * This is a helper function that compact the given store It is used by utilities and testing * @throws IOException e */ @VisibleForTesting - void compactStore(byte[] family, CompactionThroughputController throughputController) - throws IOException { + void compactStore(byte[] family, CompactionThroughputController throughputController) + throws IOException { Store s = getStore(family); CompactionContext compaction = s.requestCompaction(); if (compaction != null) { @@ -1771,16 +1729,11 @@ } /* - * Called by compaction thread and after region is opened to compact the - * HStores if necessary. - * - *

This operation could block for a long time, so don't call it from a - * time-sensitive thread. - * - * Note that no locking is necessary at this level because compaction only - * conflicts with a region split, and that cannot happen because the region - * server does them sequentially and not in parallel. - * + * Called by compaction thread and after region is opened to compact the HStores if necessary. + *

This operation could block for a long time, so don't call it from a time-sensitive thread. + * Note that no locking is necessary at this level because compaction only conflicts with a region + * split, and that cannot happen because the region server does them sequentially and not in + * parallel. * @param compaction Compaction details, obtained by requestCompaction() * @param throughputController * @return whether the compaction completed @@ -1802,78 +1755,37 @@ MonitoredTask status = null; boolean requestNeedsCancellation = true; /* - * We are trying to remove / relax the region read lock for compaction. - * Let's see what are the potential race conditions among the operations (user scan, - * region split, region close and region bulk load). - * - * user scan ---> region read lock - * region split --> region close first --> region write lock - * region close --> region write lock - * region bulk load --> region write lock - * - * read lock is compatible with read lock. ---> no problem with user scan/read - * region bulk load does not cause problem for compaction (no consistency problem, store lock - * will help the store file accounting). - * They can run almost concurrently at the region level. - * - * The only remaining race condition is between the region close and compaction. - * So we will evaluate, below, how region close intervenes with compaction if compaction does - * not acquire region read lock. - * - * Here are the steps for compaction: - * 1. obtain list of StoreFile's - * 2. create StoreFileScanner's based on list from #1 - * 3. perform compaction and save resulting files under tmp dir - * 4. swap in compacted files - * - * #1 is guarded by store lock. This patch does not change this --> no worse or better - * For #2, we obtain smallest read point (for region) across all the Scanners (for both default - * compactor and stripe compactor). - * The read points are for user scans. Region keeps the read points for all currently open - * user scanners. - * Compaction needs to know the smallest read point so that during re-write of the hfiles, - * it can remove the mvcc points for the cells if their mvccs are older than the smallest - * since they are not needed anymore. - * This will not conflict with compaction. - * For #3, it can be performed in parallel to other operations. - * For #4 bulk load and compaction don't conflict with each other on the region level - * (for multi-family atomicy). - * Region close and compaction are guarded pretty well by the 'writestate'. - * In HRegion#doClose(), we have : - * synchronized (writestate) { - * // Disable compacting and flushing by background threads for this - * // region. - * canFlush = !writestate.readOnly; - * writestate.writesEnabled = false; - * LOG.debug("Closing " + this + ": disabling compactions & flushes"); - * waitForFlushesAndCompactions(); - * } - * waitForFlushesAndCompactions() would wait for writestate.compacting to come down to 0. - * and in HRegion.compact() - * try { - * synchronized (writestate) { - * if (writestate.writesEnabled) { - * wasStateSet = true; - * ++writestate.compacting; - * } else { - * String msg = "NOT compacting region " + this + ". Writes disabled."; - * LOG.info(msg); - * status.abort(msg); - * return false; - * } - * } - * Also in compactor.performCompaction(): - * check periodically to see if a system stop is requested - * if (closeCheckInterval > 0) { - * bytesWritten += len; - * if (bytesWritten > closeCheckInterval) { - * bytesWritten = 0; - * if (!store.areWritesEnabled()) { - * progress.cancel(); - * return false; - * } - * } - * } + * We are trying to remove / relax the region read lock for compaction. Let's see what are the + * potential race conditions among the operations (user scan, region split, region close and + * region bulk load). user scan ---> region read lock region split --> region close first --> + * region write lock region close --> region write lock region bulk load --> region write lock + * read lock is compatible with read lock. ---> no problem with user scan/read region bulk load + * does not cause problem for compaction (no consistency problem, store lock will help the store + * file accounting). They can run almost concurrently at the region level. The only remaining + * race condition is between the region close and compaction. So we will evaluate, below, how + * region close intervenes with compaction if compaction does not acquire region read lock. Here + * are the steps for compaction: 1. obtain list of StoreFile's 2. create StoreFileScanner's + * based on list from #1 3. perform compaction and save resulting files under tmp dir 4. swap in + * compacted files #1 is guarded by store lock. This patch does not change this --> no worse or + * better For #2, we obtain smallest read point (for region) across all the Scanners (for both + * default compactor and stripe compactor). The read points are for user scans. Region keeps the + * read points for all currently open user scanners. Compaction needs to know the smallest read + * point so that during re-write of the hfiles, it can remove the mvcc points for the cells if + * their mvccs are older than the smallest since they are not needed anymore. This will not + * conflict with compaction. For #3, it can be performed in parallel to other operations. For #4 + * bulk load and compaction don't conflict with each other on the region level (for multi-family + * atomicy). Region close and compaction are guarded pretty well by the 'writestate'. In + * HRegion#doClose(), we have : synchronized (writestate) { // Disable compacting and flushing + * by background threads for this // region. canFlush = !writestate.readOnly; + * writestate.writesEnabled = false; LOG.debug("Closing " + this + + * ": disabling compactions & flushes"); waitForFlushesAndCompactions(); } + * waitForFlushesAndCompactions() would wait for writestate.compacting to come down to 0. and in + * HRegion.compact() try { synchronized (writestate) { if (writestate.writesEnabled) { + * wasStateSet = true; ++writestate.compacting; } else { String msg = "NOT compacting region " + + * this + ". Writes disabled."; LOG.info(msg); status.abort(msg); return false; } } Also in + * compactor.performCompaction(): check periodically to see if a system stop is requested if + * (closeCheckInterval > 0) { bytesWritten += len; if (bytesWritten > closeCheckInterval) { + * bytesWritten = 0; if (!store.areWritesEnabled()) { progress.cancel(); return false; } } } */ try { byte[] cf = Bytes.toBytes(store.getColumnFamilyName()); @@ -1905,7 +1817,7 @@ } } LOG.info("Starting compaction on " + store + " in region " + this - + (compaction.getRequest().isOffPeak()?" as an off-peak compaction":"")); + + (compaction.getRequest().isOffPeak() ? " as an off-peak compaction" : "")); doRegionCompactionPrep(); try { status.setStatus("Compacting store " + store); @@ -1943,26 +1855,22 @@ } /** - * Flush the cache. - * - * When this method is called the cache will be flushed unless: + * Flush the cache. When this method is called the cache will be flushed unless: *

    - *
  1. the cache is empty
  2. - *
  3. the region is closed.
  4. - *
  5. a flush is already in progress
  6. - *
  7. writes are disabled
  8. + *
  9. the cache is empty
  10. + *
  11. the region is closed.
  12. + *
  13. a flush is already in progress
  14. + *
  15. writes are disabled
  16. *
- * - *

This method may block for some time, so it should not be called from a - * time-sensitive thread. + *

+ * This method may block for some time, so it should not be called from a time-sensitive thread. * @param forceFlushAllStores whether we want to flush all stores * @param writeFlushRequestWalMarker whether to write the flush request marker to WAL * @return whether the flush is success and whether the region needs compacting - * * @throws IOException general io exceptions - * @throws DroppedSnapshotException Thrown when replay of wal is required - * because a Snapshot was not properly persisted. The region is put in closing mode, and the - * caller MUST abort after this. + * @throws DroppedSnapshotException Thrown when replay of wal is required because a Snapshot was + * not properly persisted. The region is put in closing mode, and the caller MUST abort + * after this. */ public FlushResult flushcache(boolean forceFlushAllStores, boolean writeFlushRequestWalMarker) throws IOException { @@ -1998,13 +1906,11 @@ this.writestate.flushing = true; } else { if (LOG.isDebugEnabled()) { - LOG.debug("NOT flushing memstore for region " + this - + ", flushing=" + writestate.flushing + ", writesEnabled=" - + writestate.writesEnabled); + LOG.debug("NOT flushing memstore for region " + this + ", flushing=" + + writestate.flushing + ", writesEnabled=" + writestate.writesEnabled); } String msg = "Not flushing since " - + (writestate.flushing ? "already flushing" - : "writes not enabled"); + + (writestate.flushing ? "already flushing" : "writes not enabled"); status.abort(msg); return new FlushResultImpl(FlushResult.Result.CANNOT_FLUSH, msg, false); } @@ -2013,8 +1919,8 @@ try { Collection specificStoresToFlush = forceFlushAllStores ? stores.values() : flushPolicy.selectStoresToFlush(); - FlushResult fs = internalFlushcache(specificStoresToFlush, - status, writeFlushRequestWalMarker); + FlushResult fs = + internalFlushcache(specificStoresToFlush, status, writeFlushRequestWalMarker); if (coprocessorHost != null) { status.setStatus("Running post-flush coprocessor hooks"); @@ -2048,9 +1954,9 @@ store.getFamily().getName()) - 1; if (earliest > 0 && earliest + flushPerChanges < mvcc.getReadPoint()) { if (LOG.isDebugEnabled()) { - LOG.debug("Flush column family " + store.getColumnFamilyName() + " of " + - getRegionInfo().getEncodedName() + " because unflushed sequenceid=" + earliest + - " is > " + this.flushPerChanges + " from current=" + mvcc.getReadPoint()); + LOG.debug("Flush column family " + store.getColumnFamilyName() + " of " + + getRegionInfo().getEncodedName() + " because unflushed sequenceid=" + earliest + + " is > " + this.flushPerChanges + " from current=" + mvcc.getReadPoint()); } return true; } @@ -2060,9 +1966,9 @@ long now = EnvironmentEdgeManager.currentTime(); if (store.timeOfOldestEdit() < now - this.flushCheckInterval) { if (LOG.isDebugEnabled()) { - LOG.debug("Flush column family: " + store.getColumnFamilyName() + " of " + - getRegionInfo().getEncodedName() + " because time of oldest edit=" + - store.timeOfOldestEdit() + " is > " + this.flushCheckInterval + " from now =" + now); + LOG.debug("Flush column family: " + store.getColumnFamilyName() + " of " + + getRegionInfo().getEncodedName() + " because time of oldest edit=" + + store.timeOfOldestEdit() + " is > " + this.flushCheckInterval + " from now =" + now); } return true; } @@ -2076,25 +1982,25 @@ whyFlush.setLength(0); // This is a rough measure. if (this.maxFlushedSeqId > 0 - && (this.maxFlushedSeqId + this.flushPerChanges < this.mvcc.getReadPoint())) { + && (this.maxFlushedSeqId + this.flushPerChanges < this.mvcc.getReadPoint())) { whyFlush.append("more than max edits, " + this.flushPerChanges + ", since last flush"); return true; } long modifiedFlushCheckInterval = flushCheckInterval; - if (getRegionInfo().isSystemTable() && - getRegionInfo().getReplicaId() == HRegionInfo.DEFAULT_REPLICA_ID) { + if (getRegionInfo().isSystemTable() + && getRegionInfo().getReplicaId() == HRegionInfo.DEFAULT_REPLICA_ID) { modifiedFlushCheckInterval = SYSTEM_CACHE_FLUSH_INTERVAL; } - if (modifiedFlushCheckInterval <= 0) { //disabled + if (modifiedFlushCheckInterval <= 0) { // disabled return false; } long now = EnvironmentEdgeManager.currentTime(); - //if we flushed in the recent past, we don't need to do again now + // if we flushed in the recent past, we don't need to do again now if ((now - getEarliestFlushTimeForAllStores() < modifiedFlushCheckInterval)) { return false; } - //since we didn't flush in the recent past, flush now if certain conditions - //are met. Return true on first such memstore hit. + // since we didn't flush in the recent past, flush now if certain conditions + // are met. Return true on first such memstore hit. for (Store s : getStores()) { if (s.timeOfOldestEdit() < now - modifiedFlushCheckInterval) { // we have an old enough edit in the memstore, flush @@ -2107,58 +2013,46 @@ /** * Flushing all stores. - * * @see #internalFlushcache(Collection, MonitoredTask, boolean) */ - private FlushResult internalFlushcache(MonitoredTask status) - throws IOException { + private FlushResult internalFlushcache(MonitoredTask status) throws IOException { return internalFlushcache(stores.values(), status, false); } /** * Flushing given stores. - * * @see #internalFlushcache(WAL, long, Collection, MonitoredTask, boolean) */ private FlushResult internalFlushcache(final Collection storesToFlush, MonitoredTask status, boolean writeFlushWalMarker) throws IOException { - return internalFlushcache(this.wal, HConstants.NO_SEQNUM, storesToFlush, - status, writeFlushWalMarker); + return internalFlushcache(this.wal, HConstants.NO_SEQNUM, storesToFlush, status, + writeFlushWalMarker); } /** - * Flush the memstore. Flushing the memstore is a little tricky. We have a lot - * of updates in the memstore, all of which have also been written to the wal. - * We need to write those updates in the memstore out to disk, while being - * able to process reads/writes as much as possible during the flush - * operation. + * Flush the memstore. Flushing the memstore is a little tricky. We have a lot of updates in the + * memstore, all of which have also been written to the wal. We need to write those updates in the + * memstore out to disk, while being able to process reads/writes as much as possible during the + * flush operation. *

- * This method may block for some time. Every time you call it, we up the - * regions sequence id even if we don't flush; i.e. the returned region id - * will be at least one larger than the last edit applied to this region. The - * returned id does not refer to an actual edit. The returned id can be used - * for say installing a bulk loaded file just ahead of the last hfile that was - * the result of this flush, etc. - * - * @param wal - * Null if we're NOT to go via wal. - * @param myseqid - * The seqid to use if wal is null writing out flush - * file. - * @param storesToFlush - * The list of stores to flush. + * This method may block for some time. Every time you call it, we up the regions sequence id even + * if we don't flush; i.e. the returned region id will be at least one larger than the last edit + * applied to this region. The returned id does not refer to an actual edit. The returned id can + * be used for say installing a bulk loaded file just ahead of the last hfile that was the result + * of this flush, etc. + * @param wal Null if we're NOT to go via wal. + * @param myseqid The seqid to use if wal is null writing out flush file. + * @param storesToFlush The list of stores to flush. * @return object describing the flush's state - * @throws IOException - * general io exceptions - * @throws DroppedSnapshotException - * Thrown when replay of wal is required because a Snapshot was not - * properly persisted. + * @throws IOException general io exceptions + * @throws DroppedSnapshotException Thrown when replay of wal is required because a Snapshot was + * not properly persisted. */ protected FlushResult internalFlushcache(final WAL wal, final long myseqid, final Collection storesToFlush, MonitoredTask status, boolean writeFlushWalMarker) throws IOException { - PrepareFlushResult result - = internalPrepareFlushCache(wal, myseqid, storesToFlush, status, writeFlushWalMarker); + PrepareFlushResult result = + internalPrepareFlushCache(wal, myseqid, storesToFlush, status, writeFlushWalMarker); if (result.result == null) { return internalFlushCacheAndCommit(wal, status, result, storesToFlush); } else { @@ -2166,11 +2060,11 @@ } } - @edu.umd.cs.findbugs.annotations.SuppressWarnings(value="DLS_DEAD_LOCAL_STORE", - justification="FindBugs seems confused about trxId") + @edu.umd.cs.findbugs.annotations.SuppressWarnings(value = "DLS_DEAD_LOCAL_STORE", + justification = "FindBugs seems confused about trxId") protected PrepareFlushResult internalPrepareFlushCache(final WAL wal, final long myseqid, final Collection storesToFlush, MonitoredTask status, boolean writeFlushWalMarker) - throws IOException { + throws IOException { if (this.rsServices != null && this.rsServices.isAborted()) { // Don't flush when server aborting, it's unsafe throw new IOException("Aborting flush because server is aborted..."); @@ -2193,11 +2087,9 @@ if (wal != null) { writeEntry = mvcc.begin(); long flushOpSeqId = writeEntry.getWriteNumber(); - FlushResult flushResult = new FlushResultImpl( - FlushResult.Result.CANNOT_FLUSH_MEMSTORE_EMPTY, - flushOpSeqId, - "Nothing to flush", - writeFlushRequestMarkerToWAL(wal, writeFlushWalMarker)); + FlushResult flushResult = + new FlushResultImpl(FlushResult.Result.CANNOT_FLUSH_MEMSTORE_EMPTY, flushOpSeqId, + "Nothing to flush", writeFlushRequestMarkerToWAL(wal, writeFlushWalMarker)); // TODO: Lets see if we hang here, if there is a scenario where an outstanding reader // with a read point is in advance of this write point. mvcc.completeAndWait(writeEntry); @@ -2205,11 +2097,9 @@ return new PrepareFlushResult(flushResult, myseqid); } else { return new PrepareFlushResult( - new FlushResultImpl( - FlushResult.Result.CANNOT_FLUSH_MEMSTORE_EMPTY, - "Nothing to flush", - false), - myseqid); + new FlushResultImpl(FlushResult.Result.CANNOT_FLUSH_MEMSTORE_EMPTY, + "Nothing to flush", false), + myseqid); } } } finally { @@ -2225,18 +2115,18 @@ StringBuilder perCfExtras = null; if (!isAllFamilies(storesToFlush)) { perCfExtras = new StringBuilder(); - for (Store store: storesToFlush) { + for (Store store : storesToFlush) { perCfExtras.append("; ").append(store.getColumnFamilyName()); perCfExtras.append("=").append(StringUtils.byteDesc(store.getMemStoreSize())); } } - LOG.info("Flushing " + + storesToFlush.size() + "/" + stores.size() + - " column families, memstore=" + StringUtils.byteDesc(this.memstoreSize.get()) + - ((perCfExtras != null && perCfExtras.length() > 0)? perCfExtras.toString(): "") + - ((wal != null) ? "" : "; WAL is null, using passed sequenceid=" + myseqid)); + LOG.info("Flushing " + +storesToFlush.size() + "/" + stores.size() + + " column families, memstore=" + StringUtils.byteDesc(this.memstoreSize.get()) + + ((perCfExtras != null && perCfExtras.length() > 0) ? perCfExtras.toString() : "") + + ((wal != null) ? "" : "; WAL is null, using passed sequenceid=" + myseqid)); } // Stop updates while we snapshot the memstore of all of these regions' stores. We only have - // to do this for a moment. It is quick. We also set the memstore size to zero here before we + // to do this for a moment. It is quick. We also set the memstore size to zero here before we // allow updates again so its value will represent the size of the updates received // during flush @@ -2245,21 +2135,20 @@ status.setStatus("Obtaining lock to block concurrent updates"); // block waiting for the lock for internal flush this.updatesLock.writeLock().lock(); - status.setStatus("Preparing to flush by snapshotting stores in " + - getRegionInfo().getEncodedName()); + status.setStatus( + "Preparing to flush by snapshotting stores in " + getRegionInfo().getEncodedName()); long totalFlushableSizeOfFlushableStores = 0; Set flushedFamilyNames = new HashSet(); - for (Store store: storesToFlush) { + for (Store store : storesToFlush) { flushedFamilyNames.add(store.getFamily().getName()); } - TreeMap storeFlushCtxs - = new TreeMap(Bytes.BYTES_COMPARATOR); - TreeMap> committedFiles = new TreeMap>( - Bytes.BYTES_COMPARATOR); - TreeMap storeFlushableSize - = new TreeMap(Bytes.BYTES_COMPARATOR); + TreeMap storeFlushCtxs = + new TreeMap(Bytes.BYTES_COMPARATOR); + TreeMap> committedFiles = + new TreeMap>(Bytes.BYTES_COMPARATOR); + TreeMap storeFlushableSize = new TreeMap(Bytes.BYTES_COMPARATOR); // The sequence id of this flush operation which is used to log FlushMarker and pass to // createFlushContext to use as the store file's sequence id. It can be in advance of edits // still in the memstore, edits that are in other column families yet to be flushed. @@ -2275,20 +2164,18 @@ try { if (wal != null) { Long earliestUnflushedSequenceIdForTheRegion = - wal.startCacheFlush(encodedRegionName, flushedFamilyNames); + wal.startCacheFlush(encodedRegionName, flushedFamilyNames); if (earliestUnflushedSequenceIdForTheRegion == null) { // This should never happen. This is how startCacheFlush signals flush cannot proceed. String msg = this.getRegionInfo().getEncodedName() + " flush aborted; WAL closing."; status.setStatus(msg); return new PrepareFlushResult( - new FlushResultImpl(FlushResult.Result.CANNOT_FLUSH, msg, false), - myseqid); + new FlushResultImpl(FlushResult.Result.CANNOT_FLUSH, msg, false), myseqid); } flushOpSeqId = getNextSequenceId(wal); // Back up 1, minus 1 from oldest sequence id in memstore to get last 'flushed' edit - flushedSeqId = - earliestUnflushedSequenceIdForTheRegion.longValue() == HConstants.NO_SEQNUM? - flushOpSeqId: earliestUnflushedSequenceIdForTheRegion.longValue() - 1; + flushedSeqId = earliestUnflushedSequenceIdForTheRegion.longValue() == HConstants.NO_SEQNUM + ? flushOpSeqId : earliestUnflushedSequenceIdForTheRegion.longValue() - 1; } else { // use the provided sequence Id as WAL is not being used for this flush. flushedSeqId = flushOpSeqId = myseqid; @@ -2306,8 +2193,8 @@ FlushDescriptor desc = ProtobufUtil.toFlushDescriptor(FlushAction.START_FLUSH, getRegionInfo(), flushOpSeqId, committedFiles); // no sync. Sync is below where we do not hold the updates lock - trxId = WALUtil.writeFlushMarker(wal, this.htableDescriptor, getRegionInfo(), - desc, false, mvcc); + trxId = WALUtil.writeFlushMarker(wal, this.htableDescriptor, getRegionInfo(), desc, false, + mvcc); } // Prepare flush (take a snapshot) @@ -2320,11 +2207,11 @@ try { FlushDescriptor desc = ProtobufUtil.toFlushDescriptor(FlushAction.ABORT_FLUSH, getRegionInfo(), flushOpSeqId, committedFiles); - WALUtil.writeFlushMarker(wal, this.htableDescriptor, getRegionInfo(), - desc, false, mvcc); + WALUtil.writeFlushMarker(wal, this.htableDescriptor, getRegionInfo(), desc, false, + mvcc); } catch (Throwable t) { - LOG.warn("Received unexpected exception trying to write ABORT_FLUSH marker to WAL:" + - StringUtils.stringifyException(t)); + LOG.warn("Received unexpected exception trying to write ABORT_FLUSH marker to WAL:" + + StringUtils.stringifyException(t)); // ignore this since we will be aborting the RS with DSE. } } @@ -2335,8 +2222,8 @@ } finally { this.updatesLock.writeLock().unlock(); } - String s = "Finished memstore snapshotting " + this + - ", syncing WAL and waiting on mvcc, flushsize=" + totalFlushableSizeOfFlushableStores; + String s = "Finished memstore snapshotting " + this + + ", syncing WAL and waiting on mvcc, flushsize=" + totalFlushableSizeOfFlushableStores; status.setStatus(s); if (LOG.isTraceEnabled()) LOG.trace(s); // sync unflushed WAL changes @@ -2388,23 +2275,22 @@ FlushDescriptor desc = ProtobufUtil.toFlushDescriptor(FlushAction.CANNOT_FLUSH, getRegionInfo(), -1, new TreeMap>(Bytes.BYTES_COMPARATOR)); try { - WALUtil.writeFlushMarker(wal, this.htableDescriptor, getRegionInfo(), - desc, true, mvcc); + WALUtil.writeFlushMarker(wal, this.htableDescriptor, getRegionInfo(), desc, true, mvcc); return true; } catch (IOException e) { LOG.warn(getRegionInfo().getEncodedName() + " : " - + "Received exception while trying to write the flush request to wal", e); + + "Received exception while trying to write the flush request to wal", + e); } } return false; } - @edu.umd.cs.findbugs.annotations.SuppressWarnings(value="NN_NAKED_NOTIFY", - justification="Intentional; notify is about completed flush") - protected FlushResult internalFlushCacheAndCommit( - final WAL wal, MonitoredTask status, final PrepareFlushResult prepareResult, - final Collection storesToFlush) - throws IOException { + @edu.umd.cs.findbugs.annotations.SuppressWarnings(value = "NN_NAKED_NOTIFY", + justification = "Intentional; notify is about completed flush") + protected FlushResult internalFlushCacheAndCommit(final WAL wal, MonitoredTask status, + final PrepareFlushResult prepareResult, final Collection storesToFlush) + throws IOException { // prepare flush context is carried via PrepareFlushResult TreeMap storeFlushCtxs = prepareResult.storeFlushCtxs; @@ -2424,7 +2310,7 @@ // be part of the current running servers state. boolean compactionRequested = false; try { - // A. Flush memstore to all the HStores. + // A. Flush memstore to all the HStores. // Keep running vector of all store files that includes both old and the // just-made new flush store file. The new flushed file is still in the // tmp directory. @@ -2459,8 +2345,7 @@ // write flush marker to WAL. If fail, we should throw DroppedSnapshotException FlushDescriptor desc = ProtobufUtil.toFlushDescriptor(FlushAction.COMMIT_FLUSH, getRegionInfo(), flushOpSeqId, committedFiles); - WALUtil.writeFlushMarker(wal, this.htableDescriptor, getRegionInfo(), - desc, true, mvcc); + WALUtil.writeFlushMarker(wal, this.htableDescriptor, getRegionInfo(), desc, true, mvcc); } } catch (Throwable t) { // An exception here means that the snapshot was not persisted. @@ -2473,17 +2358,17 @@ try { FlushDescriptor desc = ProtobufUtil.toFlushDescriptor(FlushAction.ABORT_FLUSH, getRegionInfo(), flushOpSeqId, committedFiles); - WALUtil.writeFlushMarker(wal, this.htableDescriptor, getRegionInfo(), - desc, false, mvcc); + WALUtil.writeFlushMarker(wal, this.htableDescriptor, getRegionInfo(), desc, false, mvcc); } catch (Throwable ex) { - LOG.warn(getRegionInfo().getEncodedName() + " : " - + "failed writing ABORT_FLUSH marker to WAL", ex); + LOG.warn( + getRegionInfo().getEncodedName() + " : " + "failed writing ABORT_FLUSH marker to WAL", + ex); // ignore this since we will be aborting the RS with DSE. } wal.abortCacheFlush(this.getRegionInfo().getEncodedNameAsBytes()); } - DroppedSnapshotException dse = new DroppedSnapshotException("region: " + - Bytes.toStringBinary(getRegionInfo().getRegionName())); + DroppedSnapshotException dse = new DroppedSnapshotException( + "region: " + Bytes.toStringBinary(getRegionInfo().getRegionName())); dse.initCause(t); status.abort("Flush failed: " + StringUtils.stringifyException(t)); @@ -2507,7 +2392,7 @@ } // Record latest flush time - for (Store store: storesToFlush) { + for (Store store : storesToFlush) { this.lastStoreFlushTimeMap.put(store, startTime); } @@ -2522,20 +2407,17 @@ long time = EnvironmentEdgeManager.currentTime() - startTime; long memstoresize = this.memstoreSize.get(); - String msg = "Finished memstore flush of ~" - + StringUtils.byteDesc(totalFlushableSizeOfFlushableStores) + "/" - + totalFlushableSizeOfFlushableStores + ", currentsize=" - + StringUtils.byteDesc(memstoresize) + "/" + memstoresize - + " for region " + this + " in " + time + "ms, sequenceid=" - + flushOpSeqId + ", compaction requested=" + compactionRequested - + ((wal == null) ? "; wal=null" : ""); + String msg = + "Finished memstore flush of ~" + StringUtils.byteDesc(totalFlushableSizeOfFlushableStores) + + "/" + totalFlushableSizeOfFlushableStores + ", currentsize=" + + StringUtils.byteDesc(memstoresize) + "/" + memstoresize + " for region " + this + + " in " + time + "ms, sequenceid=" + flushOpSeqId + ", compaction requested=" + + compactionRequested + ((wal == null) ? "; wal=null" : ""); LOG.info(msg); status.setStatus(msg); - return new FlushResultImpl(compactionRequested ? - FlushResult.Result.FLUSHED_COMPACTION_NEEDED : - FlushResult.Result.FLUSHED_NO_COMPACTION_NEEDED, - flushOpSeqId); + return new FlushResultImpl(compactionRequested ? FlushResult.Result.FLUSHED_COMPACTION_NEEDED + : FlushResult.Result.FLUSHED_NO_COMPACTION_NEEDED, flushOpSeqId); } /** @@ -2562,7 +2444,7 @@ @Override public RegionScanner getScanner(Scan scan) throws IOException { - return getScanner(scan, true); + return getScanner(scan, true); } @Override @@ -2611,14 +2493,14 @@ @Override public void prepareDelete(Delete delete) throws IOException { // Check to see if this is a deleteRow insert - if(delete.getFamilyCellMap().isEmpty()){ - for(byte [] family : this.htableDescriptor.getFamiliesKeys()){ + if (delete.getFamilyCellMap().isEmpty()) { + for (byte[] family : this.htableDescriptor.getFamiliesKeys()) { // Don't eat the timestamp delete.addFamily(family, delete.getTimeStamp()); } } else { - for(byte [] family : delete.getFamilyCellMap().keySet()) { - if(family == null) { + for (byte[] family : delete.getFamilyCellMap().keySet()) { + if (family == null) { throw new NoSuchColumnFamilyException("Empty family is invalid"); } checkFamily(family); @@ -2643,15 +2525,15 @@ /** * Row needed by below method. */ - private static final byte [] FOR_UNIT_TESTS_ONLY = Bytes.toBytes("ForUnitTestsOnly"); + private static final byte[] FOR_UNIT_TESTS_ONLY = Bytes.toBytes("ForUnitTestsOnly"); /** * This is used only by unit tests. Not required to be a public API. * @param familyMap map of family to edits for the given family. * @throws IOException */ - void delete(NavigableMap> familyMap, - Durability durability) throws IOException { + void delete(NavigableMap> familyMap, Durability durability) + throws IOException { Delete delete = new Delete(FOR_UNIT_TESTS_ONLY); delete.setFamilyCellMap(familyMap); delete.setDurability(durability); @@ -2669,10 +2551,10 @@ Map kvCount = new TreeMap(Bytes.BYTES_COMPARATOR); int listSize = cells.size(); - for (int i=0; i < listSize; i++) { + for (int i = 0; i < listSize; i++) { Cell cell = cells.get(i); - // Check if time is LATEST, change to time of most recent addition if so - // This is expensive. + // Check if time is LATEST, change to time of most recent addition if so + // This is expensive. if (cell.getTimestamp() == HConstants.LATEST_TIMESTAMP && CellUtil.isDeleteType(cell)) { byte[] qual = CellUtil.cloneQualifier(cell); if (qual == null) qual = HConstants.EMPTY_BYTE_ARRAY; @@ -2689,8 +2571,8 @@ get.setMaxVersions(count); get.addColumn(family, qual); if (coprocessorHost != null) { - if (!coprocessorHost.prePrepareTimeStampForDeleteVersion(mutation, cell, - byteNow, get)) { + if (!coprocessorHost.prePrepareTimeStampForDeleteVersion(mutation, cell, byteNow, + get)) { updateDeleteLatestVersionTimeStamp(cell, get, count, byteNow); } } else { @@ -2723,9 +2605,9 @@ public void put(Put put) throws IOException { checkReadOnly(); - // Do a rough check that we have resources to accept a write. The check is + // Do a rough check that we have resources to accept a write. The check is // 'rough' in that between the resource check and the call to obtain a - // read lock, resources may run out. For now, the thought is that this + // read lock, resources may run out. For now, the thought is that this // will be extremely rare; we'll deal with it when it happens. checkResources(); startRegionOperation(Operation.PUT); @@ -2738,9 +2620,8 @@ } /** - * Struct-like class that tracks the progress of a batch operation, - * accumulating status codes and tracking the index at which processing - * is proceeding. + * Struct-like class that tracks the progress of a batch operation, accumulating status codes and + * tracking the index at which processing is proceeding. */ private abstract static class BatchOperationInProgress { T[] operations; @@ -2756,11 +2637,16 @@ } public abstract Mutation getMutation(int index); + public abstract long getNonceGroup(int index); + public abstract long getNonce(int index); + /** This method is potentially expensive and should only be used for non-replay CP path. */ public abstract Mutation[] getMutationsForCoprocs(); + public abstract boolean isInReplay(); + public abstract long getReplaySequenceId(); public boolean isDone() { @@ -2771,6 +2657,7 @@ private static class MutationBatch extends BatchOperationInProgress { private long nonceGroup; private long nonce; + public MutationBatch(Mutation[] operations, long nonceGroup, long nonce) { super(operations); this.nonceGroup = nonceGroup; @@ -2810,6 +2697,7 @@ private static class ReplayBatch extends BatchOperationInProgress { private long replaySeqId = 0; + public ReplayBatch(MutationReplay[] operations, long seqId) { super(operations); this.replaySeqId = seqId; @@ -2851,8 +2739,8 @@ public OperationStatus[] batchMutate(Mutation[] mutations, long nonceGroup, long nonce) throws IOException { // As it stands, this is used for 3 things - // * batchMutate with single mutation - put/delete, separate or from checkAndMutate. - // * coprocessor calls (see ex. BulkDeleteEndpoint). + // * batchMutate with single mutation - put/delete, separate or from checkAndMutate. + // * coprocessor calls (see ex. BulkDeleteEndpoint). // So nonces are not really ever used by HBase. They could be by coprocs, and checkAnd... return batchMutate(new MutationBatch(mutations, nonceGroup, nonce)); } @@ -2869,9 +2757,9 @@ // if it is a secondary replica we should ignore these entries silently // since they are coming out of order if (LOG.isTraceEnabled()) { - LOG.trace(getRegionInfo().getEncodedName() + " : " - + "Skipping " + mutations.length + " mutations with replaySeqId=" + replaySeqId - + " which is < than lastReplayedOpenRegionSeqId=" + lastReplayedOpenRegionSeqId); + LOG.trace(getRegionInfo().getEncodedName() + " : " + "Skipping " + mutations.length + + " mutations with replaySeqId=" + replaySeqId + + " which is < than lastReplayedOpenRegionSeqId=" + lastReplayedOpenRegionSeqId); for (MutationReplay mut : mutations) { LOG.trace(getRegionInfo().getEncodedName() + " : Skipping : " + mut.mutation); } @@ -2887,11 +2775,11 @@ } /** - * Perform a batch of mutations. - * It supports only Put and Delete mutations and will ignore other types passed. + * Perform a batch of mutations. It supports only Put and Delete mutations and will ignore other + * types passed. * @param batchOp contains the list of mutations - * @return an array of OperationStatus which internally contains the - * OperationStatusCode and the exceptionMessage if any. + * @return an array of OperationStatus which internally contains the OperationStatusCode and the + * exceptionMessage if any. * @throws IOException */ OperationStatus[] batchMutate(BatchOperationInProgress batchOp) throws IOException { @@ -2924,13 +2812,11 @@ return batchOp.retCodeDetails; } - - private void doPreMutationHook(BatchOperationInProgress batchOp) - throws IOException { + private void doPreMutationHook(BatchOperationInProgress batchOp) throws IOException { /* Run coprocessor pre hook outside of locks to avoid deadlock */ WALEdit walEdit = new WALEdit(); if (coprocessorHost != null) { - for (int i = 0 ; i < batchOp.operations.length; i++) { + for (int i = 0; i < batchOp.operations.length; i++) { Mutation m = batchOp.getMutation(i); if (m instanceof Put) { if (coprocessorHost.prePut((Put) m, walEdit, m.getDurability())) { @@ -2969,11 +2855,11 @@ boolean isInReplay = batchOp.isInReplay(); // variable to note if all Put items are for the same CF -- metrics related boolean putsCfSetConsistent = true; - //The set of columnFamilies first seen for Put. + // The set of columnFamilies first seen for Put. Set putsCfSet = null; // variable to note if all Delete items are for the same CF -- metrics related boolean deletesCfSetConsistent = true; - //The set of columnFamilies first seen for Delete. + // The set of columnFamilies first seen for Delete. Set deletesCfSet = null; long currentNonceGroup = HConstants.NO_NONCE, currentNonce = HConstants.NO_NONCE; @@ -3010,8 +2896,8 @@ familyMaps[lastIndexExclusive] = familyMap; // skip anything that "ran" already - if (batchOp.retCodeDetails[lastIndexExclusive].getOperationStatusCode() - != OperationStatusCode.NOT_RUN) { + if (batchOp.retCodeDetails[lastIndexExclusive] + .getOperationStatusCode() != OperationStatusCode.NOT_RUN) { lastIndexExclusive++; continue; } @@ -3031,20 +2917,20 @@ checkRow(mutation.getRow(), "doMiniBatchMutation"); } catch (NoSuchColumnFamilyException nscf) { LOG.warn("No such column family in batch mutation", nscf); - batchOp.retCodeDetails[lastIndexExclusive] = new OperationStatus( - OperationStatusCode.BAD_FAMILY, nscf.getMessage()); + batchOp.retCodeDetails[lastIndexExclusive] = + new OperationStatus(OperationStatusCode.BAD_FAMILY, nscf.getMessage()); lastIndexExclusive++; continue; } catch (FailedSanityCheckException fsce) { LOG.warn("Batch Mutation did not pass sanity check", fsce); - batchOp.retCodeDetails[lastIndexExclusive] = new OperationStatus( - OperationStatusCode.SANITY_CHECK_FAILURE, fsce.getMessage()); + batchOp.retCodeDetails[lastIndexExclusive] = + new OperationStatus(OperationStatusCode.SANITY_CHECK_FAILURE, fsce.getMessage()); lastIndexExclusive++; continue; } catch (WrongRegionException we) { LOG.warn("Batch mutation had a row that does not belong to this region", we); - batchOp.retCodeDetails[lastIndexExclusive] = new OperationStatus( - OperationStatusCode.SANITY_CHECK_FAILURE, we.getMessage()); + batchOp.retCodeDetails[lastIndexExclusive] = + new OperationStatus(OperationStatusCode.SANITY_CHECK_FAILURE, we.getMessage()); lastIndexExclusive++; continue; } @@ -3055,8 +2941,9 @@ try { rowLock = getRowLock(mutation.getRow(), true); } catch (IOException ioe) { - LOG.warn("Failed getting lock in batch put, row=" - + Bytes.toStringBinary(mutation.getRow()), ioe); + LOG.warn( + "Failed getting lock in batch put, row=" + Bytes.toStringBinary(mutation.getRow()), + ioe); } if (rowLock == null) { // We failed to grab another lock @@ -3075,15 +2962,15 @@ if (putsCfSet == null) { putsCfSet = mutation.getFamilyCellMap().keySet(); } else { - putsCfSetConsistent = putsCfSetConsistent - && mutation.getFamilyCellMap().keySet().equals(putsCfSet); + putsCfSetConsistent = + putsCfSetConsistent && mutation.getFamilyCellMap().keySet().equals(putsCfSet); } } else { if (deletesCfSet == null) { deletesCfSet = mutation.getFamilyCellMap().keySet(); } else { - deletesCfSetConsistent = deletesCfSetConsistent - && mutation.getFamilyCellMap().keySet().equals(deletesCfSet); + deletesCfSetConsistent = + deletesCfSetConsistent && mutation.getFamilyCellMap().keySet().equals(deletesCfSet); } } } @@ -3103,8 +2990,8 @@ // ---------------------------------- for (int i = firstIndex; !isInReplay && i < lastIndexExclusive; i++) { // skip invalid - if (batchOp.retCodeDetails[i].getOperationStatusCode() - != OperationStatusCode.NOT_RUN) continue; + if (batchOp.retCodeDetails[i].getOperationStatusCode() != OperationStatusCode.NOT_RUN) + continue; Mutation mutation = batchOp.getMutation(i); if (mutation instanceof Put) { @@ -3123,8 +3010,9 @@ // calling the pre CP hook for batch mutation if (!isInReplay && coprocessorHost != null) { MiniBatchOperationInProgress miniBatchOp = - new MiniBatchOperationInProgress(batchOp.getMutationsForCoprocs(), - batchOp.retCodeDetails, batchOp.walEditsFromCoprocessors, firstIndex, lastIndexExclusive); + new MiniBatchOperationInProgress(batchOp.getMutationsForCoprocs(), + batchOp.retCodeDetails, batchOp.walEditsFromCoprocessors, firstIndex, + lastIndexExclusive); if (coprocessorHost.preBatchMutate(miniBatchOp)) return 0L; } @@ -3161,10 +3049,10 @@ // txid should always increase, so having the one from the last call is ok. // we use HLogKey here instead of WALKey directly to support legacy coprocessors. walKey = new ReplayHLogKey(this.getRegionInfo().getEncodedNameAsBytes(), - this.htableDescriptor.getTableName(), now, m.getClusterIds(), - currentNonceGroup, currentNonce, mvcc); - txid = this.wal.append(this.htableDescriptor, this.getRegionInfo(), walKey, - walEdit, true); + this.htableDescriptor.getTableName(), now, m.getClusterIds(), currentNonceGroup, + currentNonce, mvcc); + txid = + this.wal.append(this.htableDescriptor, this.getRegionInfo(), walKey, walEdit, true); walEdit = new WALEdit(isInReplay); walKey = null; } @@ -3189,17 +3077,17 @@ if (isInReplay) { // use wal key from the original walKey = new ReplayHLogKey(this.getRegionInfo().getEncodedNameAsBytes(), - this.htableDescriptor.getTableName(), WALKey.NO_SEQUENCE_ID, now, - mutation.getClusterIds(), currentNonceGroup, currentNonce, mvcc); + this.htableDescriptor.getTableName(), WALKey.NO_SEQUENCE_ID, now, + mutation.getClusterIds(), currentNonceGroup, currentNonce, mvcc); long replaySeqId = batchOp.getReplaySequenceId(); walKey.setOrigLogSeqNum(replaySeqId); } if (walEdit.size() > 0) { if (!isInReplay) { - // we use HLogKey here instead of WALKey directly to support legacy coprocessors. - walKey = new HLogKey(this.getRegionInfo().getEncodedNameAsBytes(), - this.htableDescriptor.getTableName(), WALKey.NO_SEQUENCE_ID, now, - mutation.getClusterIds(), currentNonceGroup, currentNonce, mvcc); + // we use HLogKey here instead of WALKey directly to support legacy coprocessors. + walKey = new HLogKey(this.getRegionInfo().getEncodedNameAsBytes(), + this.htableDescriptor.getTableName(), WALKey.NO_SEQUENCE_ID, now, + mutation.getClusterIds(), currentNonceGroup, currentNonce, mvcc); } txid = this.wal.append(this.htableDescriptor, this.getRegionInfo(), walKey, walEdit, true); } @@ -3228,8 +3116,7 @@ // ---------------------------------- long addedSize = 0; for (int i = firstIndex; i < lastIndexExclusive; i++) { - if (batchOp.retCodeDetails[i].getOperationStatusCode() - != OperationStatusCode.NOT_RUN) { + if (batchOp.retCodeDetails[i].getOperationStatusCode() != OperationStatusCode.NOT_RUN) { continue; } doRollBackMemstore = true; // If we have a failure, we need to clean what we wrote @@ -3256,8 +3143,9 @@ // calling the post CP hook for batch mutation if (!isInReplay && coprocessorHost != null) { MiniBatchOperationInProgress miniBatchOp = - new MiniBatchOperationInProgress(batchOp.getMutationsForCoprocs(), - batchOp.retCodeDetails, batchOp.walEditsFromCoprocessors, firstIndex, lastIndexExclusive); + new MiniBatchOperationInProgress(batchOp.getMutationsForCoprocs(), + batchOp.retCodeDetails, batchOp.walEditsFromCoprocessors, firstIndex, + lastIndexExclusive); coprocessorHost.postBatchMutate(miniBatchOp); } @@ -3272,7 +3160,7 @@ mvcc.advanceTo(mvccNum); } - for (int i = firstIndex; i < lastIndexExclusive; i ++) { + for (int i = firstIndex; i < lastIndexExclusive; i++) { if (batchOp.retCodeDetails[i] == OperationStatus.NOT_RUN) { batchOp.retCodeDetails[i] = OperationStatus.SUCCESS; } @@ -3285,8 +3173,7 @@ if (!isInReplay && coprocessorHost != null) { for (int i = firstIndex; i < lastIndexExclusive; i++) { // only for successful puts - if (batchOp.retCodeDetails[i].getOperationStatusCode() - != OperationStatusCode.SUCCESS) { + if (batchOp.retCodeDetails[i].getOperationStatusCode() != OperationStatusCode.SUCCESS) { continue; } Mutation m = batchOp.getMutation(i); @@ -3304,7 +3191,7 @@ // if the wal sync was unsuccessful, remove keys from memstore if (doRollBackMemstore) { for (int j = 0; j < familyMaps.length; j++) { - for(List cells:familyMaps[j].values()) { + for (List cells : familyMaps[j].values()) { rollbackMemstore(cells); } } @@ -3358,34 +3245,30 @@ } /** - * Returns effective durability from the passed durability and - * the table descriptor. + * Returns effective durability from the passed durability and the table descriptor. */ protected Durability getEffectiveDurability(Durability d) { return d == Durability.USE_DEFAULT ? this.durability : d; } - //TODO, Think that gets/puts and deletes should be refactored a bit so that - //the getting of the lock happens before, so that you would just pass it into - //the methods. So in the case of checkAndMutate you could just do lockRow, - //get, put, unlockRow or something + // TODO, Think that gets/puts and deletes should be refactored a bit so that + // the getting of the lock happens before, so that you would just pass it into + // the methods. So in the case of checkAndMutate you could just do lockRow, + // get, put, unlockRow or something @Override - public boolean checkAndMutate(byte [] row, byte [] family, byte [] qualifier, - CompareOp compareOp, ByteArrayComparable comparator, Mutation w, - boolean writeToWAL) - throws IOException{ + public boolean checkAndMutate(byte[] row, byte[] family, byte[] qualifier, CompareOp compareOp, + ByteArrayComparable comparator, Mutation w, boolean writeToWAL) throws IOException { checkReadOnly(); - //TODO, add check for value length or maybe even better move this to the - //client if this becomes a global setting + // TODO, add check for value length or maybe even better move this to the + // client if this becomes a global setting checkResources(); boolean isPut = w instanceof Put; if (!isPut && !(w instanceof Delete)) - throw new org.apache.hadoop.hbase.DoNotRetryIOException("Action must " + - "be Put or Delete"); + throw new org.apache.hadoop.hbase.DoNotRetryIOException("Action must " + "be Put or Delete"); if (!Bytes.equals(row, w.getRow())) { - throw new org.apache.hadoop.hbase.DoNotRetryIOException("Action's " + - "getRow must match the passed row"); + throw new org.apache.hadoop.hbase.DoNotRetryIOException( + "Action's " + "getRow must match the passed row"); } startRegionOperation(); @@ -3402,11 +3285,11 @@ if (this.getCoprocessorHost() != null) { Boolean processed = null; if (w instanceof Put) { - processed = this.getCoprocessorHost().preCheckAndPutAfterRowLock(row, family, - qualifier, compareOp, comparator, (Put) w); + processed = this.getCoprocessorHost().preCheckAndPutAfterRowLock(row, family, qualifier, + compareOp, comparator, (Put) w); } else if (w instanceof Delete) { processed = this.getCoprocessorHost().preCheckAndDeleteAfterRowLock(row, family, - qualifier, compareOp, comparator, (Delete) w); + qualifier, compareOp, comparator, (Delete) w); } if (processed != null) { return processed; @@ -3414,14 +3297,12 @@ } List result = get(get, false); - boolean valueIsNull = comparator.getValue() == null || - comparator.getValue().length == 0; + boolean valueIsNull = comparator.getValue() == null || comparator.getValue().length == 0; boolean matches = false; long cellTs = 0; if (result.size() == 0 && valueIsNull) { matches = true; - } else if (result.size() > 0 && result.get(0).getValueLength() == 0 && - valueIsNull) { + } else if (result.size() > 0 && result.get(0).getValueLength() == 0 && valueIsNull) { matches = true; cellTs = result.get(0).getTimestamp(); } else if (result.size() == 1 && !valueIsNull) { @@ -3451,7 +3332,7 @@ throw new RuntimeException("Unknown Compare op " + compareOp.name()); } } - //If matches put the new put or delete the new delete + // If matches put the new put or delete the new delete if (matches) { // We have acquired the row lock already. If the system clock is NOT monotonically // non-decreasing (see HBASE-14070) we should make sure that the mutation has a @@ -3483,18 +3364,17 @@ } } - //TODO, Think that gets/puts and deletes should be refactored a bit so that - //the getting of the lock happens before, so that you would just pass it into - //the methods. So in the case of checkAndMutate you could just do lockRow, - //get, put, unlockRow or something + // TODO, Think that gets/puts and deletes should be refactored a bit so that + // the getting of the lock happens before, so that you would just pass it into + // the methods. So in the case of checkAndMutate you could just do lockRow, + // get, put, unlockRow or something @Override - public boolean checkAndRowMutate(byte [] row, byte [] family, byte [] qualifier, - CompareOp compareOp, ByteArrayComparable comparator, RowMutations rm, - boolean writeToWAL) throws IOException { + public boolean checkAndRowMutate(byte[] row, byte[] family, byte[] qualifier, CompareOp compareOp, + ByteArrayComparable comparator, RowMutations rm, boolean writeToWAL) throws IOException { checkReadOnly(); - //TODO, add check for value length or maybe even better move this to the - //client if this becomes a global setting + // TODO, add check for value length or maybe even better move this to the + // client if this becomes a global setting checkResources(); startRegionOperation(); @@ -3510,14 +3390,12 @@ try { List result = get(get, false); - boolean valueIsNull = comparator.getValue() == null || - comparator.getValue().length == 0; + boolean valueIsNull = comparator.getValue() == null || comparator.getValue().length == 0; boolean matches = false; long cellTs = 0; if (result.size() == 0 && valueIsNull) { matches = true; - } else if (result.size() > 0 && result.get(0).getValueLength() == 0 && - valueIsNull) { + } else if (result.size() > 0 && result.get(0).getValueLength() == 0 && valueIsNull) { matches = true; cellTs = result.get(0).getTimestamp(); } else if (result.size() == 1 && !valueIsNull) { @@ -3547,7 +3425,7 @@ throw new RuntimeException("Unknown Compare op " + compareOp.name()); } } - //If matches put the new put or delete the new delete + // If matches put the new put or delete the new delete if (matches) { // We have acquired the row lock already. If the system clock is NOT monotonically // non-decreasing (see HBASE-14070) we should make sure that the mutation has a @@ -3583,7 +3461,7 @@ private void doBatchMutate(Mutation mutation) throws IOException { // Currently this is only called for puts and deletes, so no nonces. - OperationStatus[] batchMutate = this.batchMutate(new Mutation[]{mutation}); + OperationStatus[] batchMutate = this.batchMutate(new Mutation[] { mutation }); if (batchMutate[0].getOperationStatusCode().equals(OperationStatusCode.SANITY_CHECK_FAILURE)) { throw new FailedSanityCheckException(batchMutate[0].getExceptionMsg()); } else if (batchMutate[0].getOperationStatusCode().equals(OperationStatusCode.BAD_FAMILY)) { @@ -3593,30 +3471,26 @@ /** * Complete taking the snapshot on the region. Writes the region info and adds references to the - * working snapshot directory. - * - * TODO for api consistency, consider adding another version with no {@link ForeignExceptionSnare} - * arg. (In the future other cancellable HRegion methods could eventually add a - * {@link ForeignExceptionSnare}, or we could do something fancier). - * + * working snapshot directory. TODO for api consistency, consider adding another version with no + * {@link ForeignExceptionSnare} arg. (In the future other cancellable HRegion methods could + * eventually add a {@link ForeignExceptionSnare}, or we could do something fancier). * @param desc snapshot description object - * @param exnSnare ForeignExceptionSnare that captures external exceptions in case we need to - * bail out. This is allowed to be null and will just be ignored in that case. + * @param exnSnare ForeignExceptionSnare that captures external exceptions in case we need to bail + * out. This is allowed to be null and will just be ignored in that case. * @throws IOException if there is an external or internal error causing the snapshot to fail */ - public void addRegionToSnapshot(SnapshotDescription desc, - ForeignExceptionSnare exnSnare) throws IOException { + public void addRegionToSnapshot(SnapshotDescription desc, ForeignExceptionSnare exnSnare) + throws IOException { Path rootDir = FSUtils.getRootDir(conf); Path snapshotDir = SnapshotDescriptionUtils.getWorkingSnapshotDir(desc, rootDir); - SnapshotManifest manifest = SnapshotManifest.create(conf, getFilesystem(), - snapshotDir, desc, exnSnare); + SnapshotManifest manifest = + SnapshotManifest.create(conf, getFilesystem(), snapshotDir, desc, exnSnare); manifest.addRegion(this); // The regionserver holding the first region of the table is responsible for taking the // manifest of the mob dir. - if (!Bytes.equals(getRegionInfo().getStartKey(), HConstants.EMPTY_START_ROW)) - return; + if (!Bytes.equals(getRegionInfo().getStartKey(), HConstants.EMPTY_START_ROW)) return; // if any cf's have is mob enabled, add the "mob region" to the manifest. List stores = getStores(); @@ -3635,7 +3509,7 @@ @Override public void updateCellTimestamps(final Iterable> cellItr, final byte[] now) throws IOException { - for (List cells: cellItr) { + for (List cells : cellItr) { if (cells == null) continue; assert cells instanceof RandomAccess; int listSize = cells.size(); @@ -3658,7 +3532,7 @@ // From this point we know we have some work to do - for (Map.Entry> e: familyMap.entrySet()) { + for (Map.Entry> e : familyMap.entrySet()) { List cells = e.getValue(); assert cells instanceof RandomAccess; int listSize = cells.size(); @@ -3692,11 +3566,9 @@ } /* - * Check if resources to support an update. - * - * We throw RegionTooBusyException if above memstore limit - * and expect client to retry using some kind of backoff - */ + * Check if resources to support an update. We throw RegionTooBusyException if above memstore + * limit and expect client to retry using some kind of backoff + */ private void checkResources() throws RegionTooBusyException { // If catalog region, do not impose resource constraints or block updates. if (this.getRegionInfo().isMetaRegion()) return; @@ -3704,13 +3576,14 @@ if (this.memstoreSize.get() > this.blockingMemStoreSize) { blockedRequestsCount.increment(); requestFlush(); - throw new RegionTooBusyException("Above memstore limit, " + - "regionName=" + (this.getRegionInfo() == null ? "unknown" : - this.getRegionInfo().getRegionNameAsString()) + - ", server=" + (this.getRegionServerServices() == null ? "unknown" : - this.getRegionServerServices().getServerName()) + - ", memstoreSize=" + memstoreSize.get() + - ", blockingMemStoreSize=" + blockingMemStoreSize); + throw new RegionTooBusyException("Above memstore limit, " + "regionName=" + + (this.getRegionInfo() == null ? "unknown" + : this.getRegionInfo().getRegionNameAsString()) + + ", server=" + + (this.getRegionServerServices() == null ? "unknown" + : this.getRegionServerServices().getServerName()) + + ", memstoreSize=" + memstoreSize.get() + ", blockingMemStoreSize=" + + blockingMemStoreSize); } } @@ -3726,25 +3599,24 @@ protected void checkReadsEnabled() throws IOException { if (!this.writestate.readsEnabled) { throw new IOException(getRegionInfo().getEncodedName() - + ": The region's reads are disabled. Cannot serve the request"); + + ": The region's reads are disabled. Cannot serve the request"); } } public void setReadsEnabled(boolean readsEnabled) { - if (readsEnabled && !this.writestate.readsEnabled) { - LOG.info(getRegionInfo().getEncodedName() + " : Enabling reads for region."); + if (readsEnabled && !this.writestate.readsEnabled) { + LOG.info(getRegionInfo().getEncodedName() + " : Enabling reads for region."); } this.writestate.setReadsEnabled(readsEnabled); } /** - * Add updates first to the wal and then add values to memstore. - * Warning: Assumption is caller has lock on passed in row. + * Add updates first to the wal and then add values to memstore. Warning: Assumption is caller has + * lock on passed in row. * @param edits Cell updates by column * @throws IOException */ - private void put(final byte [] row, byte [] family, List edits) - throws IOException { + private void put(final byte[] row, byte[] family, List edits) throws IOException { NavigableMap> familyMap; familyMap = new TreeMap>(Bytes.BYTES_COMPARATOR); @@ -3755,19 +3627,16 @@ } /** - * Atomically apply the given map of family->edits to the memstore. - * This handles the consistency control on its own, but the caller - * should already have locked updatesLock.readLock(). This also does - * not check the families for validity. - * + * Atomically apply the given map of family->edits to the memstore. This handles the consistency + * control on its own, but the caller should already have locked updatesLock.readLock(). This also + * does not check the families for validity. * @param familyMap Map of kvs per family * @param mvccNum The MVCC for this transaction. * @param isInReplay true when adding replayed KVs into memstore - * @return the additional memory usage of the memstore caused by the - * new entries. + * @return the additional memory usage of the memstore caused by the new entries. */ - private long applyFamilyMapToMemstore(Map> familyMap, - long mvccNum, boolean isInReplay) throws IOException { + private long applyFamilyMapToMemstore(Map> familyMap, long mvccNum, + boolean isInReplay) throws IOException { long size = 0; for (Map.Entry> e : familyMap.entrySet()) { @@ -3776,7 +3645,7 @@ assert cells instanceof RandomAccess; Store store = getStore(family); int listSize = cells.size(); - for (int i=0; i < listSize; i++) { + for (int i = 0; i < listSize; i++) { Cell cell = cells.get(i); if (cell.getSequenceId() == 0 || isInReplay) { CellUtil.setSequenceId(cell, mvccNum); @@ -3785,13 +3654,13 @@ } } - return size; - } + return size; + } /** - * Remove all the keys listed in the map from the memstore. This method is - * called when a Put/Delete has updated memstore but subsequently fails to update - * the wal. This method is then invoked to rollback the memstore. + * Remove all the keys listed in the map from the memstore. This method is called when a + * Put/Delete has updated memstore but subsequently fails to update the wal. This method is then + * invoked to rollback the memstore. */ private void rollbackMemstore(List memstoreCells) { int kvsRolledback = 0; @@ -3816,8 +3685,7 @@ * During replay, there could exist column families which are removed between region server * failure and replay */ - private void removeNonExistentColumnFamilyForReplay( - final Map> familyMap) { + private void removeNonExistentColumnFamilyForReplay(final Map> familyMap) { List nonExistentList = null; for (byte[] family : familyMap.keySet()) { if (!this.htableDescriptor.hasFamily(family)) { @@ -3845,31 +3713,30 @@ long maxTs = now + timestampSlop; for (List kvs : familyMap.values()) { assert kvs instanceof RandomAccess; - int listSize = kvs.size(); - for (int i=0; i < listSize; i++) { + int listSize = kvs.size(); + for (int i = 0; i < listSize; i++) { Cell cell = kvs.get(i); // see if the user-side TS is out of range. latest = server-side long ts = cell.getTimestamp(); if (ts != HConstants.LATEST_TIMESTAMP && ts > maxTs) { - throw new FailedSanityCheckException("Timestamp for KV out of range " - + cell + " (too.new=" + timestampSlop + ")"); + throw new FailedSanityCheckException( + "Timestamp for KV out of range " + cell + " (too.new=" + timestampSlop + ")"); } } } } /** - * Append the given map of family->edits to a WALEdit data structure. - * This does not write to the WAL itself. + * Append the given map of family->edits to a WALEdit data structure. This does not write to the + * WAL itself. * @param familyMap map of family->edits * @param walEdit the destination entry to append into */ - private void addFamilyMapToWALEdit(Map> familyMap, - WALEdit walEdit) { + private void addFamilyMapToWALEdit(Map> familyMap, WALEdit walEdit) { for (List edits : familyMap.values()) { assert edits instanceof RandomAccess; int listSize = edits.size(); - for (int i=0; i < listSize; i++) { + for (int i = 0; i < listSize; i++) { Cell cell = edits.get(i); walEdit.add(cell); } @@ -3902,42 +3769,35 @@ } /** - * Read the edits put under this region by wal splitting process. Put - * the recovered edits back up into this region. - * - *

We can ignore any wal message that has a sequence ID that's equal to or - * lower than minSeqId. (Because we know such messages are already - * reflected in the HFiles.) - * - *

While this is running we are putting pressure on memory yet we are - * outside of our usual accounting because we are not yet an onlined region - * (this stuff is being run as part of Region initialization). This means - * that if we're up against global memory limits, we'll not be flagged to flush - * because we are not online. We can't be flushed by usual mechanisms anyways; - * we're not yet online so our relative sequenceids are not yet aligned with - * WAL sequenceids -- not till we come up online, post processing of split - * edits. - * - *

But to help relieve memory pressure, at least manage our own heap size - * flushing if are in excess of per-region limits. Flushing, though, we have - * to be careful and avoid using the regionserver/wal sequenceid. Its running - * on a different line to whats going on in here in this region context so if we - * crashed replaying these edits, but in the midst had a flush that used the - * regionserver wal with a sequenceid in excess of whats going on in here - * in this region and with its split editlogs, then we could miss edits the - * next time we go to recover. So, we have to flush inline, using seqids that - * make sense in a this single region context only -- until we online. - * - * @param maxSeqIdInStores Any edit found in split editlogs needs to be in excess of - * the maxSeqId for the store to be applied, else its skipped. - * @return the sequence id of the last edit added to this region out of the - * recovered edits log or minSeqId if nothing added from editlogs. + * Read the edits put under this region by wal splitting process. Put the recovered edits back up + * into this region. + *

+ * We can ignore any wal message that has a sequence ID that's equal to or lower than minSeqId. + * (Because we know such messages are already reflected in the HFiles.) + *

+ * While this is running we are putting pressure on memory yet we are outside of our usual + * accounting because we are not yet an onlined region (this stuff is being run as part of Region + * initialization). This means that if we're up against global memory limits, we'll not be flagged + * to flush because we are not online. We can't be flushed by usual mechanisms anyways; we're not + * yet online so our relative sequenceids are not yet aligned with WAL sequenceids -- not till we + * come up online, post processing of split edits. + *

+ * But to help relieve memory pressure, at least manage our own heap size flushing if are in + * excess of per-region limits. Flushing, though, we have to be careful and avoid using the + * regionserver/wal sequenceid. Its running on a different line to whats going on in here in this + * region context so if we crashed replaying these edits, but in the midst had a flush that used + * the regionserver wal with a sequenceid in excess of whats going on in here in this region and + * with its split editlogs, then we could miss edits the next time we go to recover. So, we have + * to flush inline, using seqids that make sense in a this single region context only -- until we + * online. + * @param maxSeqIdInStores Any edit found in split editlogs needs to be in excess of the maxSeqId + * for the store to be applied, else its skipped. + * @return the sequence id of the last edit added to this region out of the recovered edits log or + * minSeqId if nothing added from editlogs. * @throws IOException */ - protected long replayRecoveredEditsIfAny(final Path regiondir, - Map maxSeqIdInStores, - final CancelableProgressable reporter, final MonitoredTask status) - throws IOException { + protected long replayRecoveredEditsIfAny(final Path regiondir, Map maxSeqIdInStores, + final CancelableProgressable reporter, final MonitoredTask status) throws IOException { long minSeqIdForTheRegion = -1; for (Long maxSeqIdInStore : maxSeqIdInStores.values()) { if (maxSeqIdInStore < minSeqIdForTheRegion || minSeqIdForTheRegion == -1) { @@ -3949,13 +3809,13 @@ FileSystem fs = this.fs.getFileSystem(); NavigableSet files = WALSplitter.getSplitEditFilesSorted(fs, regiondir); if (LOG.isDebugEnabled()) { - LOG.debug("Found " + (files == null ? 0 : files.size()) - + " recovered edits file(s) under " + regiondir); + LOG.debug("Found " + (files == null ? 0 : files.size()) + " recovered edits file(s) under " + + regiondir); } if (files == null || files.isEmpty()) return seqid; - for (Path edits: files) { + for (Path edits : files) { if (edits == null || !fs.exists(edits)) { LOG.warn("Null or non-existent edits file: " + edits); continue; @@ -3968,8 +3828,8 @@ if (maxSeqId <= minSeqIdForTheRegion) { if (LOG.isDebugEnabled()) { String msg = "Maximum sequenceid for this wal is " + maxSeqId - + " and minimum sequenceid for the region is " + minSeqIdForTheRegion - + ", skipped the whole file, path=" + edits; + + " and minimum sequenceid for the region is " + minSeqIdForTheRegion + + ", skipped the whole file, path=" + edits; LOG.debug(msg); } continue; @@ -3980,21 +3840,17 @@ // if seqId is greater seqid = Math.max(seqid, replayRecoveredEdits(edits, maxSeqIdInStores, reporter)); } catch (IOException e) { - boolean skipErrors = conf.getBoolean( - HConstants.HREGION_EDITS_REPLAY_SKIP_ERRORS, - conf.getBoolean( - "hbase.skip.errors", - HConstants.DEFAULT_HREGION_EDITS_REPLAY_SKIP_ERRORS)); + boolean skipErrors = conf.getBoolean(HConstants.HREGION_EDITS_REPLAY_SKIP_ERRORS, conf + .getBoolean("hbase.skip.errors", HConstants.DEFAULT_HREGION_EDITS_REPLAY_SKIP_ERRORS)); if (conf.get("hbase.skip.errors") != null) { - LOG.warn( - "The property 'hbase.skip.errors' has been deprecated. Please use " + - HConstants.HREGION_EDITS_REPLAY_SKIP_ERRORS + " instead."); + LOG.warn("The property 'hbase.skip.errors' has been deprecated. Please use " + + HConstants.HREGION_EDITS_REPLAY_SKIP_ERRORS + " instead."); } if (skipErrors) { Path p = WALSplitter.moveAsideBadEditsFile(fs, edits); - LOG.error(HConstants.HREGION_EDITS_REPLAY_SKIP_ERRORS - + "=true so continuing. Renamed " + edits + - " as " + p, e); + LOG.error(HConstants.HREGION_EDITS_REPLAY_SKIP_ERRORS + "=true so continuing. Renamed " + + edits + " as " + p, + e); } else { throw e; } @@ -4009,20 +3865,20 @@ // Then we added some edits to memory. Flush and cleanup split edit files. internalFlushcache(null, seqid, stores.values(), status, false); } - // Now delete the content of recovered edits. We're done w/ them. + // Now delete the content of recovered edits. We're done w/ them. if (files.size() > 0 && this.conf.getBoolean("hbase.region.archive.recovered.edits", false)) { // For debugging data loss issues! // If this flag is set, make use of the hfile archiving by making recovered.edits a fake // column family. Have to fake out file type too by casting our recovered.edits as storefiles String fakeFamilyName = WALSplitter.getRegionDirRecoveredEditsDir(regiondir).getName(); Set fakeStoreFiles = new HashSet(files.size()); - for (Path file: files) { - fakeStoreFiles.add(new StoreFile(getRegionFileSystem().getFileSystem(), file, this.conf, - null, null)); + for (Path file : files) { + fakeStoreFiles + .add(new StoreFile(getRegionFileSystem().getFileSystem(), file, this.conf, null, null)); } getRegionFileSystem().removeStoreFiles(fakeFamilyName, fakeStoreFiles); } else { - for (Path file: files) { + for (Path file : files) { if (!fs.delete(file, false)) { LOG.error("Failed delete of " + file); } else { @@ -4035,16 +3891,15 @@ /* * @param edits File of recovered edits. - * @param maxSeqIdInStores Maximum sequenceid found in each store. Edits in wal - * must be larger than this to be replayed for each store. + * @param maxSeqIdInStores Maximum sequenceid found in each store. Edits in wal must be larger + * than this to be replayed for each store. * @param reporter - * @return the sequence id of the last edit added to this region out of the - * recovered edits log or minSeqId if nothing added from editlogs. + * @return the sequence id of the last edit added to this region out of the recovered edits log or + * minSeqId if nothing added from editlogs. * @throws IOException */ - private long replayRecoveredEdits(final Path edits, - Map maxSeqIdInStores, final CancelableProgressable reporter) - throws IOException { + private long replayRecoveredEdits(final Path edits, Map maxSeqIdInStores, + final CancelableProgressable reporter) throws IOException { String msg = "Replaying edits from " + edits; LOG.info(msg); MonitoredTask status = TaskMonitor.get().createStatus(msg); @@ -4087,11 +3942,10 @@ intervalEdits = 0; long cur = EnvironmentEdgeManager.currentTime(); if (lastReport + period <= cur) { - status.setStatus("Replaying edits..." + - " skipped=" + skippedEdits + - " edits=" + editsCount); + status.setStatus( + "Replaying edits..." + " skipped=" + skippedEdits + " edits=" + editsCount); // Timeout reached - if(!reporter.progress()) { + if (!reporter.progress()) { msg = "Progressable reporter failed, stopping replay"; LOG.warn(msg); status.abort(msg); @@ -4109,14 +3963,13 @@ if (currentEditSeqId > key.getLogSeqNum()) { // when this condition is true, it means we have a serious defect because we need to // maintain increasing SeqId for WAL edits per region - LOG.error(getRegionInfo().getEncodedName() + " : " - + "Found decreasing SeqId. PreId=" + currentEditSeqId + " key=" + key - + "; edit=" + val); + LOG.error(getRegionInfo().getEncodedName() + " : " + "Found decreasing SeqId. PreId=" + + currentEditSeqId + " key=" + key + "; edit=" + val); } else { currentEditSeqId = key.getLogSeqNum(); } - currentReplaySeqId = (key.getOrigLogSeqNum() > 0) ? - key.getOrigLogSeqNum() : currentEditSeqId; + currentReplaySeqId = + (key.getOrigLogSeqNum() > 0) ? key.getOrigLogSeqNum() : currentEditSeqId; // Start coprocessor replay here. The coprocessor is for each WALEdit // instead of a KeyValue. @@ -4130,21 +3983,21 @@ boolean checkRowWithinBoundary = false; // Check this edit is for this region. if (!Bytes.equals(key.getEncodedRegionName(), - this.getRegionInfo().getEncodedNameAsBytes())) { + this.getRegionInfo().getEncodedNameAsBytes())) { checkRowWithinBoundary = true; } boolean flush = false; - for (Cell cell: val.getCells()) { + for (Cell cell : val.getCells()) { // Check this edit is for me. Also, guard against writing the special // METACOLUMN info such as HBASE::CACHEFLUSH entries if (CellUtil.matchingFamily(cell, WALEdit.METAFAMILY)) { // if region names don't match, skipp replaying compaction marker if (!checkRowWithinBoundary) { - //this is a special edit, we should handle it + // this is a special edit, we should handle it CompactionDescriptor compaction = WALEdit.getCompaction(cell); if (compaction != null) { - //replay the compaction + // replay the compaction replayWALCompactionMarker(compaction, false, true, Long.MAX_VALUE); } } @@ -4156,21 +4009,20 @@ store = getStore(cell); } if (store == null) { - // This should never happen. Perhaps schema was changed between + // This should never happen. Perhaps schema was changed between // crash and redeploy? LOG.warn("No family for " + cell); skippedEdits++; continue; } - if (checkRowWithinBoundary && !rowIsInRange(this.getRegionInfo(), - cell.getRowArray(), cell.getRowOffset(), cell.getRowLength())) { + if (checkRowWithinBoundary && !rowIsInRange(this.getRegionInfo(), cell.getRowArray(), + cell.getRowOffset(), cell.getRowLength())) { LOG.warn("Row of " + cell + " is not within region boundary"); skippedEdits++; continue; } // Now, figure if we should skip this edit. - if (key.getLogSeqNum() <= maxSeqIdInStores.get(store.getFamily() - .getName())) { + if (key.getLogSeqNum() <= maxSeqIdInStores.get(store.getFamily().getName())) { skippedEdits++; continue; } @@ -4192,9 +4044,9 @@ } } catch (EOFException eof) { Path p = WALSplitter.moveAsideBadEditsFile(fs, edits); - msg = "Encountered EOF. Most likely due to Master failure during " + - "wal splitting, so we have this data in another edit. " + - "Continuing, but renaming " + edits + " as " + p; + msg = "Encountered EOF. Most likely due to Master failure during " + + "wal splitting, so we have this data in another edit. " + "Continuing, but renaming " + + edits + " as " + p; LOG.warn(msg, eof); status.abort(msg); } catch (IOException ioe) { @@ -4202,42 +4054,38 @@ // then this problem is idempotent and retrying won't help if (ioe.getCause() instanceof ParseException) { Path p = WALSplitter.moveAsideBadEditsFile(fs, edits); - msg = "File corruption encountered! " + - "Continuing, but renaming " + edits + " as " + p; + msg = "File corruption encountered! " + "Continuing, but renaming " + edits + " as " + p; LOG.warn(msg, ioe); status.setStatus(msg); } else { status.abort(StringUtils.stringifyException(ioe)); // other IO errors may be transient (bad network connection, - // checksum exception on one datanode, etc). throw & retry + // checksum exception on one datanode, etc). throw & retry throw ioe; } } if (reporter != null && !reported_once) { reporter.progress(); } - msg = "Applied " + editsCount + ", skipped " + skippedEdits + - ", firstSequenceIdInLog=" + firstSeqIdInLog + - ", maxSequenceIdInLog=" + currentEditSeqId + ", path=" + edits; + msg = "Applied " + editsCount + ", skipped " + skippedEdits + ", firstSequenceIdInLog=" + + firstSeqIdInLog + ", maxSequenceIdInLog=" + currentEditSeqId + ", path=" + edits; status.markComplete(msg); LOG.debug(msg); return currentEditSeqId; } finally { status.cleanup(); if (reader != null) { - reader.close(); + reader.close(); } } } /** - * Call to complete a compaction. Its for the case where we find in the WAL a compaction - * that was not finished. We could find one recovering a WAL after a regionserver crash. - * See HBASE-2331. + * Call to complete a compaction. Its for the case where we find in the WAL a compaction that was + * not finished. We could find one recovering a WAL after a regionserver crash. See HBASE-2331. */ void replayWALCompactionMarker(CompactionDescriptor compaction, boolean pickCompactionFiles, - boolean removeFiles, long replaySeqId) - throws IOException { + boolean removeFiles, long replaySeqId) throws IOException { try { checkTargetRegion(compaction.getEncodedRegionName().toByteArray(), "Compaction marker from WAL ", compaction); @@ -4251,27 +4099,26 @@ synchronized (writestate) { if (replaySeqId < lastReplayedOpenRegionSeqId) { - LOG.warn(getRegionInfo().getEncodedName() + " : " - + "Skipping replaying compaction event :" + TextFormat.shortDebugString(compaction) - + " because its sequence id " + replaySeqId + " is smaller than this regions " - + "lastReplayedOpenRegionSeqId of " + lastReplayedOpenRegionSeqId); + LOG.warn(getRegionInfo().getEncodedName() + " : " + "Skipping replaying compaction event :" + + TextFormat.shortDebugString(compaction) + " because its sequence id " + replaySeqId + + " is smaller than this regions " + "lastReplayedOpenRegionSeqId of " + + lastReplayedOpenRegionSeqId); return; } if (replaySeqId < lastReplayedCompactionSeqId) { - LOG.warn(getRegionInfo().getEncodedName() + " : " - + "Skipping replaying compaction event :" + TextFormat.shortDebugString(compaction) - + " because its sequence id " + replaySeqId + " is smaller than this regions " - + "lastReplayedCompactionSeqId of " + lastReplayedCompactionSeqId); + LOG.warn(getRegionInfo().getEncodedName() + " : " + "Skipping replaying compaction event :" + + TextFormat.shortDebugString(compaction) + " because its sequence id " + replaySeqId + + " is smaller than this regions " + "lastReplayedCompactionSeqId of " + + lastReplayedCompactionSeqId); return; } else { lastReplayedCompactionSeqId = replaySeqId; } if (LOG.isDebugEnabled()) { - LOG.debug(getRegionInfo().getEncodedName() + " : " - + "Replaying compaction marker " + TextFormat.shortDebugString(compaction) - + " with seqId=" + replaySeqId + " and lastReplayedOpenRegionSeqId=" - + lastReplayedOpenRegionSeqId); + LOG.debug(getRegionInfo().getEncodedName() + " : " + "Replaying compaction marker " + + TextFormat.shortDebugString(compaction) + " with seqId=" + replaySeqId + + " and lastReplayedOpenRegionSeqId=" + lastReplayedOpenRegionSeqId); } startRegionOperation(Operation.REPLAY_EVENT); @@ -4289,7 +4136,8 @@ LOG.warn(getRegionInfo().getEncodedName() + " : " + "At least one of the store files in compaction: " + TextFormat.shortDebugString(compaction) - + " doesn't exist any more. Skip loading the file(s)", ex); + + " doesn't exist any more. Skip loading the file(s)", + ex); } finally { closeRegionOperation(Operation.REPLAY_EVENT); } @@ -4297,16 +4145,15 @@ } void replayWALFlushMarker(FlushDescriptor flush, long replaySeqId) throws IOException { - checkTargetRegion(flush.getEncodedRegionName().toByteArray(), - "Flush marker from WAL ", flush); + checkTargetRegion(flush.getEncodedRegionName().toByteArray(), "Flush marker from WAL ", flush); if (ServerRegionReplicaUtil.isDefaultReplica(this.getRegionInfo())) { return; // if primary nothing to do } if (LOG.isDebugEnabled()) { - LOG.debug(getRegionInfo().getEncodedName() + " : " - + "Replaying flush marker " + TextFormat.shortDebugString(flush)); + LOG.debug(getRegionInfo().getEncodedName() + " : " + "Replaying flush marker " + + TextFormat.shortDebugString(flush)); } startRegionOperation(Operation.REPLAY_EVENT); // use region close lock to guard against close @@ -4326,9 +4173,9 @@ replayWALFlushCannotFlushMarker(flush, replaySeqId); break; default: - LOG.warn(getRegionInfo().getEncodedName() + " : " + - "Received a flush event with unknown action, ignoring. " + - TextFormat.shortDebugString(flush)); + LOG.warn(getRegionInfo().getEncodedName() + " : " + + "Received a flush event with unknown action, ignoring. " + + TextFormat.shortDebugString(flush)); break; } @@ -4338,12 +4185,13 @@ } } - /** Replay the flush marker from primary region by creating a corresponding snapshot of - * the store memstores, only if the memstores do not have a higher seqId from an earlier wal - * edit (because the events may be coming out of order). + /** + * Replay the flush marker from primary region by creating a corresponding snapshot of the store + * memstores, only if the memstores do not have a higher seqId from an earlier wal edit (because + * the events may be coming out of order). */ @VisibleForTesting - PrepareFlushResult replayWALFlushStartMarker(FlushDescriptor flush) throws IOException { + PrepareFlushResult replayWALFlushStartMarker(FlushDescriptor flush) throws IOException { long flushSeqId = flush.getFlushSequenceNumber(); HashSet storesToFlush = new HashSet(); @@ -4352,8 +4200,8 @@ Store store = getStore(family); if (store == null) { LOG.warn(getRegionInfo().getEncodedName() + " : " - + "Received a flush start marker from primary, but the family is not found. Ignoring" - + " StoreFlushDescriptor:" + TextFormat.shortDebugString(storeFlush)); + + "Received a flush start marker from primary, but the family is not found. Ignoring" + + " StoreFlushDescriptor:" + TextFormat.shortDebugString(storeFlush)); continue; } storesToFlush.add(store); @@ -4366,8 +4214,8 @@ synchronized (writestate) { try { if (flush.getFlushSequenceNumber() < lastReplayedOpenRegionSeqId) { - LOG.warn(getRegionInfo().getEncodedName() + " : " - + "Skipping replaying flush event :" + TextFormat.shortDebugString(flush) + LOG.warn(getRegionInfo().getEncodedName() + " : " + "Skipping replaying flush event :" + + TextFormat.shortDebugString(flush) + " because its sequence id is smaller than this regions lastReplayedOpenRegionSeqId " + " of " + lastReplayedOpenRegionSeqId); return null; @@ -4382,27 +4230,27 @@ // we can just snapshot our memstores and continue as normal. // invoke prepareFlushCache. Send null as wal since we do not want the flush events in wal - PrepareFlushResult prepareResult = internalPrepareFlushCache(null, - flushSeqId, storesToFlush, status, false); + PrepareFlushResult prepareResult = + internalPrepareFlushCache(null, flushSeqId, storesToFlush, status, false); if (prepareResult.result == null) { // save the PrepareFlushResult so that we can use it later from commit flush this.writestate.flushing = true; this.prepareFlushResult = prepareResult; status.markComplete("Flush prepare successful"); if (LOG.isDebugEnabled()) { - LOG.debug(getRegionInfo().getEncodedName() + " : " - + " Prepared flush with seqId:" + flush.getFlushSequenceNumber()); + LOG.debug(getRegionInfo().getEncodedName() + " : " + " Prepared flush with seqId:" + + flush.getFlushSequenceNumber()); } } else { // special case empty memstore. We will still save the flush result in this case, since // our memstore ie empty, but the primary is still flushing - if (prepareResult.getResult().getResult() == - FlushResult.Result.CANNOT_FLUSH_MEMSTORE_EMPTY) { + if (prepareResult.getResult() + .getResult() == FlushResult.Result.CANNOT_FLUSH_MEMSTORE_EMPTY) { this.writestate.flushing = true; this.prepareFlushResult = prepareResult; if (LOG.isDebugEnabled()) { LOG.debug(getRegionInfo().getEncodedName() + " : " - + " Prepared empty flush with seqId:" + flush.getFlushSequenceNumber()); + + " Prepared empty flush with seqId:" + flush.getFlushSequenceNumber()); } } status.abort("Flush prepare failed with " + prepareResult.result); @@ -4414,23 +4262,23 @@ if (flush.getFlushSequenceNumber() == this.prepareFlushResult.flushOpSeqId) { // They define the same flush. Log and continue. LOG.warn(getRegionInfo().getEncodedName() + " : " - + "Received a flush prepare marker with the same seqId: " + - + flush.getFlushSequenceNumber() + " before clearing the previous one with seqId: " + + "Received a flush prepare marker with the same seqId: " + + +flush.getFlushSequenceNumber() + " before clearing the previous one with seqId: " + prepareFlushResult.flushOpSeqId + ". Ignoring"); // ignore } else if (flush.getFlushSequenceNumber() < this.prepareFlushResult.flushOpSeqId) { // We received a flush with a smaller seqNum than what we have prepared. We can only // ignore this prepare flush request. LOG.warn(getRegionInfo().getEncodedName() + " : " - + "Received a flush prepare marker with a smaller seqId: " + - + flush.getFlushSequenceNumber() + " before clearing the previous one with seqId: " + + "Received a flush prepare marker with a smaller seqId: " + + +flush.getFlushSequenceNumber() + " before clearing the previous one with seqId: " + prepareFlushResult.flushOpSeqId + ". Ignoring"); // ignore } else { // We received a flush with a larger seqNum than what we have prepared LOG.warn(getRegionInfo().getEncodedName() + " : " - + "Received a flush prepare marker with a larger seqId: " + - + flush.getFlushSequenceNumber() + " before clearing the previous one with seqId: " + + "Received a flush prepare marker with a larger seqId: " + + +flush.getFlushSequenceNumber() + " before clearing the previous one with seqId: " + prepareFlushResult.flushOpSeqId + ". Ignoring"); // We do not have multiple active snapshots in the memstore or a way to merge current // memstore snapshot with the contents and resnapshot for now. We cannot take @@ -4454,9 +4302,9 @@ } @VisibleForTesting - @edu.umd.cs.findbugs.annotations.SuppressWarnings(value="NN_NAKED_NOTIFY", - justification="Intentional; post memstore flush") - void replayWALFlushCommitMarker(FlushDescriptor flush) throws IOException { + @edu.umd.cs.findbugs.annotations.SuppressWarnings(value = "NN_NAKED_NOTIFY", + justification = "Intentional; post memstore flush") + void replayWALFlushCommitMarker(FlushDescriptor flush) throws IOException { MonitoredTask status = TaskMonitor.get().createStatus("Committing flush " + this); // check whether we have the memstore snapshot with the corresponding seqId. Replay to @@ -4466,10 +4314,10 @@ synchronized (writestate) { try { if (flush.getFlushSequenceNumber() < lastReplayedOpenRegionSeqId) { - LOG.warn(getRegionInfo().getEncodedName() + " : " - + "Skipping replaying flush event :" + TextFormat.shortDebugString(flush) - + " because its sequence id is smaller than this regions lastReplayedOpenRegionSeqId " - + " of " + lastReplayedOpenRegionSeqId); + LOG.warn(getRegionInfo().getEncodedName() + " : " + "Skipping replaying flush event :" + + TextFormat.shortDebugString(flush) + + " because its sequence id is smaller than this regions lastReplayedOpenRegionSeqId " + + " of " + lastReplayedOpenRegionSeqId); return; } @@ -4499,7 +4347,7 @@ + "Received a flush commit marker with smaller seqId: " + flush.getFlushSequenceNumber() + " than what we have prepared with seqId: " + prepareFlushResult.flushOpSeqId + ". Picking up new file, but not dropping" - +" prepared memstore snapshot"); + + " prepared memstore snapshot"); replayFlushInStores(flush, prepareFlushResult, false); // snapshot is not dropped, so memstore sizes should not be decremented @@ -4512,9 +4360,9 @@ // If not, we will drop all the memstore edits and the snapshot as well. LOG.warn(getRegionInfo().getEncodedName() + " : " + "Received a flush commit marker with larger seqId: " - + flush.getFlushSequenceNumber() + " than what we have prepared with seqId: " + - prepareFlushResult.flushOpSeqId + ". Picking up new file and dropping prepared" - +" memstore snapshot"); + + flush.getFlushSequenceNumber() + " than what we have prepared with seqId: " + + prepareFlushResult.flushOpSeqId + ". Picking up new file and dropping prepared" + + " memstore snapshot"); replayFlushInStores(flush, prepareFlushResult, true); @@ -4535,9 +4383,9 @@ // a previous flush we will not enable reads now. this.setReadsEnabled(true); } else { - LOG.warn(getRegionInfo().getEncodedName() + " : " - + "Received a flush commit marker with seqId:" + flush.getFlushSequenceNumber() - + ", but no previous prepared snapshot was found"); + LOG.warn( + getRegionInfo().getEncodedName() + " : " + "Received a flush commit marker with seqId:" + + flush.getFlushSequenceNumber() + ", but no previous prepared snapshot was found"); // There is no corresponding prepare snapshot from before. // We will pick up the new flushed file replayFlushInStores(flush, null, false); @@ -4558,9 +4406,9 @@ } catch (FileNotFoundException ex) { LOG.warn(getRegionInfo().getEncodedName() + " : " + "At least one of the store files in flush: " + TextFormat.shortDebugString(flush) - + " doesn't exist any more. Skip loading the file(s)", ex); - } - finally { + + " doesn't exist any more. Skip loading the file(s)", + ex); + } finally { status.cleanup(); writestate.notifyAll(); } @@ -4582,8 +4430,7 @@ * @throws IOException */ private void replayFlushInStores(FlushDescriptor flush, PrepareFlushResult prepareFlushResult, - boolean dropMemstoreSnapshot) - throws IOException { + boolean dropMemstoreSnapshot) throws IOException { for (StoreFlushDescriptor storeFlush : flush.getStoreFlushesList()) { byte[] family = storeFlush.getFamilyName().toByteArray(); Store store = getStore(family); @@ -4605,8 +4452,8 @@ if (ctx == null) { LOG.warn(getRegionInfo().getEncodedName() + " : " - + "Unexpected: flush commit marker received from store " - + Bytes.toString(family) + " but no associated flush context. Ignoring"); + + "Unexpected: flush commit marker received from store " + Bytes.toString(family) + + " but no associated flush context. Ignoring"); continue; } @@ -4618,8 +4465,8 @@ } /** - * Drops the memstore contents after replaying a flush descriptor or region open event replay - * if the memstore edits have seqNums smaller than the given seq id + * Drops the memstore contents after replaying a flush descriptor or region open event replay if + * the memstore edits have seqNums smaller than the given seq id * @throws IOException */ private long dropMemstoreContentsForSeqId(long seqId, Store store) throws IOException { @@ -4631,8 +4478,8 @@ if (seqId >= currentSeqId) { // then we can drop the memstore contents since everything is below this seqId LOG.info(getRegionInfo().getEncodedName() + " : " - + "Dropping memstore contents as well since replayed flush seqId: " - + seqId + " is greater than current seqId:" + currentSeqId); + + "Dropping memstore contents as well since replayed flush seqId: " + seqId + + " is greater than current seqId:" + currentSeqId); // Prepare flush (take a snapshot) and then abort (drop the snapshot) if (store == null) { @@ -4644,8 +4491,8 @@ } } else { LOG.info(getRegionInfo().getEncodedName() + " : " - + "Not dropping memstore contents since replayed flush seqId: " - + seqId + " is smaller than current seqId:" + currentSeqId); + + "Not dropping memstore contents since replayed flush seqId: " + seqId + + " is smaller than current seqId:" + currentSeqId); } } finally { this.updatesLock.writeLock().unlock(); @@ -4671,10 +4518,10 @@ private void replayWALFlushCannotFlushMarker(FlushDescriptor flush, long replaySeqId) { synchronized (writestate) { if (this.lastReplayedOpenRegionSeqId > replaySeqId) { - LOG.warn(getRegionInfo().getEncodedName() + " : " - + "Skipping replaying flush event :" + TextFormat.shortDebugString(flush) - + " because its sequence id " + replaySeqId + " is smaller than this regions " - + "lastReplayedOpenRegionSeqId of " + lastReplayedOpenRegionSeqId); + LOG.warn(getRegionInfo().getEncodedName() + " : " + "Skipping replaying flush event :" + + TextFormat.shortDebugString(flush) + " because its sequence id " + replaySeqId + + " is smaller than this regions " + "lastReplayedOpenRegionSeqId of " + + lastReplayedOpenRegionSeqId); return; } @@ -4688,13 +4535,13 @@ } @VisibleForTesting - PrepareFlushResult getPrepareFlushResult() { + PrepareFlushResult getPrepareFlushResult() { return prepareFlushResult; } - @edu.umd.cs.findbugs.annotations.SuppressWarnings(value="NN_NAKED_NOTIFY", - justification="Intentional; cleared the memstore") - void replayWALRegionEventMarker(RegionEventDescriptor regionEvent) throws IOException { + @edu.umd.cs.findbugs.annotations.SuppressWarnings(value = "NN_NAKED_NOTIFY", + justification = "Intentional; cleared the memstore") + void replayWALRegionEventMarker(RegionEventDescriptor regionEvent) throws IOException { checkTargetRegion(regionEvent.getEncodedRegionName().toByteArray(), "RegionEvent marker from WAL ", regionEvent); @@ -4709,15 +4556,15 @@ return; } if (regionEvent.getEventType() != EventType.REGION_OPEN) { - LOG.warn(getRegionInfo().getEncodedName() + " : " - + "Unknown region event received, ignoring :" - + TextFormat.shortDebugString(regionEvent)); + LOG.warn( + getRegionInfo().getEncodedName() + " : " + "Unknown region event received, ignoring :" + + TextFormat.shortDebugString(regionEvent)); return; } if (LOG.isDebugEnabled()) { - LOG.debug(getRegionInfo().getEncodedName() + " : " - + "Replaying region open event marker " + TextFormat.shortDebugString(regionEvent)); + LOG.debug(getRegionInfo().getEncodedName() + " : " + "Replaying region open event marker " + + TextFormat.shortDebugString(regionEvent)); } // we will use writestate as a coarse-grain lock for all the replay events @@ -4731,10 +4578,10 @@ if (this.lastReplayedOpenRegionSeqId <= regionEvent.getLogSequenceNumber()) { this.lastReplayedOpenRegionSeqId = regionEvent.getLogSequenceNumber(); } else { - LOG.warn(getRegionInfo().getEncodedName() + " : " - + "Skipping replaying region event :" + TextFormat.shortDebugString(regionEvent) - + " because its sequence id is smaller than this regions lastReplayedOpenRegionSeqId " - + " of " + lastReplayedOpenRegionSeqId); + LOG.warn(getRegionInfo().getEncodedName() + " : " + "Skipping replaying region event :" + + TextFormat.shortDebugString(regionEvent) + + " because its sequence id is smaller than this regions lastReplayedOpenRegionSeqId " + + " of " + lastReplayedOpenRegionSeqId); return; } @@ -4756,9 +4603,9 @@ try { store.refreshStoreFiles(storeFiles); // replace the files with the new ones } catch (FileNotFoundException ex) { - LOG.warn(getRegionInfo().getEncodedName() + " : " - + "At least one of the store files: " + storeFiles - + " doesn't exist any more. Skip loading the file(s)", ex); + LOG.warn(getRegionInfo().getEncodedName() + " : " + "At least one of the store files: " + + storeFiles + " doesn't exist any more. Skip loading the file(s)", + ex); continue; } if (store.getMaxSequenceId() != storeSeqId) { @@ -4769,8 +4616,8 @@ if (writestate.flushing) { // only drop memstore snapshots if they are smaller than last flush for the store if (this.prepareFlushResult.flushOpSeqId <= regionEvent.getLogSequenceNumber()) { - StoreFlushContext ctx = this.prepareFlushResult.storeFlushCtxs == null ? - null : this.prepareFlushResult.storeFlushCtxs.get(family); + StoreFlushContext ctx = this.prepareFlushResult.storeFlushCtxs == null ? null + : this.prepareFlushResult.storeFlushCtxs.get(family); if (ctx != null) { long snapshotSize = store.getFlushableSize(); ctx.abort(); @@ -4819,8 +4666,8 @@ } if (LOG.isDebugEnabled()) { - LOG.debug(getRegionInfo().getEncodedName() + " : " - + "Replaying bulkload event marker " + TextFormat.shortDebugString(bulkLoadEvent)); + LOG.debug(getRegionInfo().getEncodedName() + " : " + "Replaying bulkload event marker " + + TextFormat.shortDebugString(bulkLoadEvent)); } // check if multiple families involved boolean multipleFamilies = false; @@ -4847,8 +4694,7 @@ // smaller than this seqId if (bulkLoadEvent.getBulkloadSeqNum() >= 0 && this.lastReplayedOpenRegionSeqId >= bulkLoadEvent.getBulkloadSeqNum()) { - LOG.warn(getRegionInfo().getEncodedName() + " : " - + "Skipping replaying bulkload event :" + LOG.warn(getRegionInfo().getEncodedName() + " : " + "Skipping replaying bulkload event :" + TextFormat.shortDebugString(bulkLoadEvent) + " because its sequence id is smaller than this region's lastReplayedOpenRegionSeqId" + " =" + lastReplayedOpenRegionSeqId); @@ -4862,8 +4708,8 @@ Store store = getStore(family); if (store == null) { LOG.warn(getRegionInfo().getEncodedName() + " : " - + "Received a bulk load marker from primary, but the family is not found. " - + "Ignoring. StoreDescriptor:" + storeDescriptor); + + "Received a bulk load marker from primary, but the family is not found. " + + "Ignoring. StoreDescriptor:" + storeDescriptor); continue; } @@ -4873,11 +4719,11 @@ try { storeFileInfo = fs.getStoreFileInfo(Bytes.toString(family), storeFile); store.bulkLoadHFile(storeFileInfo); - } catch(FileNotFoundException ex) { + } catch (FileNotFoundException ex) { LOG.warn(getRegionInfo().getEncodedName() + " : " - + ((storeFileInfo != null) ? storeFileInfo.toString() : - (new Path(Bytes.toString(family), storeFile)).toString()) - + " doesn't exist any more. Skip loading the file"); + + ((storeFileInfo != null) ? storeFileInfo.toString() + : (new Path(Bytes.toString(family), storeFile)).toString()) + + " doesn't exist any more. Skip loading the file"); } } } @@ -4897,8 +4743,8 @@ if (writestate.flushing) { boolean canDrop = true; if (prepareFlushResult.storeFlushCtxs != null) { - for (Entry entry - : prepareFlushResult.storeFlushCtxs.entrySet()) { + for (Entry entry : prepareFlushResult.storeFlushCtxs + .entrySet()) { Store store = getStore(entry.getKey()); if (store == null) { continue; @@ -4924,8 +4770,8 @@ return refreshStoreFiles(false); } - @edu.umd.cs.findbugs.annotations.SuppressWarnings(value="NN_NAKED_NOTIFY", - justification="Notify is about post replay. Intentional") + @edu.umd.cs.findbugs.annotations.SuppressWarnings(value = "NN_NAKED_NOTIFY", + justification = "Notify is about post replay. Intentional") protected boolean refreshStoreFiles(boolean force) throws IOException { if (!force && ServerRegionReplicaUtil.isDefaultReplica(this.getRegionInfo())) { return false; // if primary nothing to do @@ -4962,8 +4808,8 @@ if (writestate.flushing) { // only drop memstore snapshots if they are smaller than last flush for the store if (this.prepareFlushResult.flushOpSeqId <= storeSeqId) { - StoreFlushContext ctx = this.prepareFlushResult.storeFlushCtxs == null ? - null : this.prepareFlushResult.storeFlushCtxs.get(store.getFamily().getName()); + StoreFlushContext ctx = this.prepareFlushResult.storeFlushCtxs == null ? null + : this.prepareFlushResult.storeFlushCtxs.get(store.getFamily().getName()); if (ctx != null) { long snapshotSize = store.getFlushableSize(); ctx.abort(); @@ -4984,11 +4830,10 @@ dropPrepareFlushIfPossible(); // advance the mvcc read point so that the new flushed files are visible. - // either greater than flush seq number or they were already picked up via flush. - for (Store s : getStores()) { - mvcc.advanceTo(s.getMaxMemstoreTS()); - } - + // either greater than flush seq number or they were already picked up via flush. + for (Store s : getStores()) { + mvcc.advanceTo(s.getMaxMemstoreTS()); + } // smallestSeqIdInStores is the seqId that we have a corresponding hfile for. We can safely // skip all edits that are to be replayed in the future with that has a smaller seqId @@ -5022,8 +4867,9 @@ } } - /** Checks whether the given regionName is either equal to our region, or that - * the regionName is the primary region to our corresponding range for the secondary replica. + /** + * Checks whether the given regionName is either equal to our region, or that the regionName is + * the primary region to our corresponding range for the secondary replica. */ private void checkTargetRegion(byte[] encodedRegionName, String exceptionMsg, Object payload) throws WrongRegionException { @@ -5031,15 +4877,14 @@ return; } - if (!RegionReplicaUtil.isDefaultReplica(this.getRegionInfo()) && - Bytes.equals(encodedRegionName, - this.fs.getRegionInfoForFS().getEncodedNameAsBytes())) { + if (!RegionReplicaUtil.isDefaultReplica(this.getRegionInfo()) + && Bytes.equals(encodedRegionName, this.fs.getRegionInfoForFS().getEncodedNameAsBytes())) { return; } - throw new WrongRegionException(exceptionMsg + payload - + " targetted for region " + Bytes.toStringBinary(encodedRegionName) - + " does not match this region: " + this.getRegionInfo()); + throw new WrongRegionException( + exceptionMsg + payload + " targetted for region " + Bytes.toStringBinary(encodedRegionName) + + " does not match this region: " + this.getRegionInfo()); } /** @@ -5074,8 +4919,7 @@ protected HStore instantiateHStore(final HColumnDescriptor family) throws IOException { if (family.isMobEnabled()) { if (HFile.getFormatVersion(this.conf) < HFile.MIN_FORMAT_VERSION_WITH_TAGS) { - throw new IOException("A minimum HFile version of " - + HFile.MIN_FORMAT_VERSION_WITH_TAGS + throw new IOException("A minimum HFile version of " + HFile.MIN_FORMAT_VERSION_WITH_TAGS + " is required for MOB feature. Consider setting " + HFile.FORMAT_VERSION_KEY + " accordingly."); } @@ -5090,14 +4934,13 @@ } /** - * Return HStore instance. Does not do any copy: as the number of store is limited, we - * iterate on the list. + * Return HStore instance. Does not do any copy: as the number of store is limited, we iterate on + * the list. */ private Store getStore(Cell cell) { for (Map.Entry famStore : stores.entrySet()) { - if (Bytes.equals( - cell.getFamilyArray(), cell.getFamilyOffset(), cell.getFamilyLength(), - famStore.getKey(), 0, famStore.getKey().length)) { + if (Bytes.equals(cell.getFamilyArray(), cell.getFamilyOffset(), cell.getFamilyLength(), + famStore.getKey(), 0, famStore.getKey().length)) { return famStore.getValue(); } } @@ -5113,19 +4956,18 @@ } @Override - public List getStoreFileList(final byte [][] columns) - throws IllegalArgumentException { + public List getStoreFileList(final byte[][] columns) throws IllegalArgumentException { List storeFileNames = new ArrayList(); - synchronized(closeLock) { - for(byte[] column : columns) { + synchronized (closeLock) { + for (byte[] column : columns) { Store store = this.stores.get(column); if (store == null) { - throw new IllegalArgumentException("No column family : " + - new String(column) + " available"); + throw new IllegalArgumentException( + "No column family : " + new String(column) + " available"); } Collection storeFiles = store.getStorefiles(); if (storeFiles == null) continue; - for (StoreFile storeFile: storeFiles) { + for (StoreFile storeFile : storeFiles) { storeFileNames.add(storeFile.getPath().toString()); } @@ -5140,16 +4982,14 @@ ////////////////////////////////////////////////////////////////////////////// /** Make sure this is a valid row for the HRegion */ - void checkRow(final byte [] row, String op) throws IOException { - if (!rowIsInRange(getRegionInfo(), row)) { - throw new WrongRegionException("Requested row out of range for " + - op + " on HRegion " + this + ", startKey='" + - Bytes.toStringBinary(getRegionInfo().getStartKey()) + "', getEndKey()='" + - Bytes.toStringBinary(getRegionInfo().getEndKey()) + "', row='" + - Bytes.toStringBinary(row) + "'"); + void checkRow(final byte[] row, String op) throws IOException { + if (!skipRowChecking & !rowIsInRange(getRegionInfo(), row)) { + throw new WrongRegionException("Requested row out of range for " + op + " on HRegion " + this + + ", startKey='" + Bytes.toStringBinary(getRegionInfo().getStartKey()) + + "', getEndKey()='" + Bytes.toStringBinary(getRegionInfo().getEndKey()) + "', row='" + + Bytes.toStringBinary(row) + "'"); } } - /** * Get an exclusive ( write lock ) lock on a given row. @@ -5162,14 +5002,12 @@ } /** - * - * Get a row lock for the specified row. All locks are reentrant. - * - * Before calling this function make sure that a region operation has already been - * started (the calling thread has already acquired the region-close-guard lock). + * Get a row lock for the specified row. All locks are reentrant. Before calling this function + * make sure that a region operation has already been started (the calling thread has already + * acquired the region-close-guard lock). * @param row The row actions will be performed against - * @param readLock is the lock reader or writer. True indicates that a non-exlcusive - * lock is requested + * @param readLock is the lock reader or writer. True indicates that a non-exlcusive lock is + * requested */ public RowLock getRowLock(byte[] row, boolean readLock) throws IOException { // Make sure the row is inside of this region before getting the lock for it. @@ -5184,7 +5022,8 @@ // If we're tracing start a span to show how long this took. if (Trace.isTracing()) { traceScope = Trace.startSpan("HRegion.getRowLock"); - traceScope.getSpan().addTimelineAnnotation("Getting a " + (readLock?"readLock":"writeLock")); + traceScope.getSpan() + .addTimelineAnnotation("Getting a " + (readLock ? "readLock" : "writeLock")); } try { @@ -5263,6 +5102,7 @@ Lock l = readWriteLock.writeLock(); return getRowLock(l); } + RowLockImpl newReadLock() { Lock l = readWriteLock.readLock(); return getRowLock(l); @@ -5283,10 +5123,10 @@ long c = count.decrementAndGet(); if (c <= 0) { synchronized (lock) { - if (count.get() <= 0 ){ + if (count.get() <= 0) { usable.set(false); RowLockContext removed = lockedRows.remove(row); - assert removed == this: "we should never remove a different context"; + assert removed == this : "we should never remove a different context"; } } } @@ -5294,11 +5134,8 @@ @Override public String toString() { - return "RowLockContext{" + - "row=" + row + - ", readWriteLock=" + readWriteLock + - ", count=" + count + - '}'; + return "RowLockContext{" + "row=" + row + ", readWriteLock=" + readWriteLock + ", count=" + + count + '}'; } } @@ -5331,17 +5168,12 @@ @Override public String toString() { - return "RowLockImpl{" + - "context=" + context + - ", lock=" + lock + - '}'; + return "RowLockImpl{" + "context=" + context + ", lock=" + lock + '}'; } } /** - * Determines whether multiple column families are present - * Precondition: familyPaths is not null - * + * Determines whether multiple column families are present Precondition: familyPaths is not null * @param familyPaths List of (column family, hfilePath) */ private static boolean hasMultipleColumnFamilies(Collection> familyPaths) { @@ -5371,7 +5203,7 @@ this.writeRequestsCount.increment(); // There possibly was a split that happened between when the split keys - // were gathered and before the HRegion's write lock was taken. We need + // were gathered and before the HRegion's write lock was taken. We need // to validate the HFile region before attempting to bulk load all of them List ioes = new ArrayList(); List> failures = new ArrayList>(); @@ -5412,25 +5244,26 @@ .append(p.getSecond()); } // problem when validating - LOG.warn("There was a recoverable bulk load failure likely due to a" + - " split. These (family, HFile) pairs were not loaded: " + list); + LOG.warn("There was a recoverable bulk load failure likely due to a" + + " split. These (family, HFile) pairs were not loaded: " + list); return false; } // We need to assign a sequential ID that's in between two memstores in order to preserve // the guarantee that all the edits lower than the highest sequential ID from all the - // HFiles are flushed on disk. See HBASE-10958. The sequence id returned when we flush is + // HFiles are flushed on disk. See HBASE-10958. The sequence id returned when we flush is // guaranteed to be one beyond the file made when we flushed (or if nothing to flush, it is // a sequence id that we can be sure is beyond the last hfile written). if (assignSeqId) { FlushResult fs = flushcache(true, false); if (fs.isFlushSucceeded()) { - seqId = ((FlushResultImpl)fs).flushSequenceId; + seqId = ((FlushResultImpl) fs).flushSequenceId; } else if (fs.getResult() == FlushResult.Result.CANNOT_FLUSH_MEMSTORE_EMPTY) { - seqId = ((FlushResultImpl)fs).flushSequenceId; + seqId = ((FlushResultImpl) fs).flushSequenceId; } else { - throw new IOException("Could not bulk load with an assigned sequential ID because the "+ - "flush didn't run. Reason for not flushing: " + ((FlushResultImpl)fs).failureReason); + throw new IOException("Could not bulk load with an assigned sequential ID because the " + + "flush didn't run. Reason for not flushing: " + + ((FlushResultImpl) fs).failureReason); } } @@ -5445,7 +5278,7 @@ } Path commitedStoreFile = store.bulkLoadHFile(finalPath, seqId); - if(storeFiles.containsKey(familyName)) { + if (storeFiles.containsKey(familyName)) { storeFiles.get(familyName).add(commitedStoreFile); } else { List storeFileNames = new ArrayList(); @@ -5460,14 +5293,16 @@ // cannot recover from since it is likely a failed HDFS operation. // TODO Need a better story for reverting partial failures due to HDFS. - LOG.error("There was a partial failure due to IO when attempting to" + - " load " + Bytes.toString(p.getFirst()) + " : " + p.getSecond(), ioe); + LOG.error("There was a partial failure due to IO when attempting to" + " load " + + Bytes.toString(p.getFirst()) + " : " + p.getSecond(), + ioe); if (bulkLoadListener != null) { try { bulkLoadListener.failedBulkLoad(familyName, path); } catch (Exception ex) { - LOG.error("Error while calling failedBulkLoad for family " + - Bytes.toString(familyName) + " with path " + path, ex); + LOG.error("Error while calling failedBulkLoad for family " + + Bytes.toString(familyName) + " with path " + path, + ex); } } throw ioe; @@ -5479,11 +5314,11 @@ if (wal != null && !storeFiles.isEmpty()) { // write a bulk load event when not all hfiles are loaded try { - WALProtos.BulkLoadDescriptor loadDescriptor = ProtobufUtil.toBulkLoadDescriptor( - this.getRegionInfo().getTable(), - ByteStringer.wrap(this.getRegionInfo().getEncodedNameAsBytes()), storeFiles, seqId); + WALProtos.BulkLoadDescriptor loadDescriptor = + ProtobufUtil.toBulkLoadDescriptor(this.getRegionInfo().getTable(), + ByteStringer.wrap(this.getRegionInfo().getEncodedNameAsBytes()), storeFiles, seqId); WALUtil.writeBulkLoadMarkerAndSync(wal, this.htableDescriptor, getRegionInfo(), - loadDescriptor, mvcc); + loadDescriptor, mvcc); } catch (IOException ioe) { if (this.rsServices != null) { // Have to abort region server because some hfiles has been loaded but we can't write @@ -5500,7 +5335,7 @@ @Override public boolean equals(Object o) { return o instanceof HRegion && Bytes.equals(getRegionInfo().getRegionName(), - ((HRegion) o).getRegionInfo().getRegionName()); + ((HRegion) o).getRegionInfo().getRegionName()); } @Override @@ -5519,12 +5354,15 @@ class RegionScannerImpl implements RegionScanner, org.apache.hadoop.hbase.ipc.RpcCallback { // Package local for testability KeyValueHeap storeHeap = null; - /** Heap of key-values that are not essential for the provided filters and are thus read - * on demand, if on-demand column family loading is enabled.*/ + /** + * Heap of key-values that are not essential for the provided filters and are thus read on + * demand, if on-demand column family loading is enabled. + */ KeyValueHeap joinedHeap = null; /** - * If the joined heap data gathering is interrupted due to scan limits, this will - * contain the row for which we are populating the values.*/ + * If the joined heap data gathering is interrupted due to scan limits, this will contain the + * row for which we are populating the values. + */ protected Cell joinedContinuationRow = null; private boolean filterClosed = false; @@ -5549,8 +5387,7 @@ } RegionScannerImpl(Scan scan, List additionalScanners, HRegion region, - boolean copyCellsFromSharedMem) - throws IOException { + boolean copyCellsFromSharedMem) throws IOException { this.region = region; this.maxResultSize = scan.getMaxResultSize(); if (scan.hasFilter()) { @@ -5564,11 +5401,24 @@ * scanner context that can be used to enforce the batch limit in the event that a * ScannerContext is not specified during an invocation of next/nextRaw */ - defaultScannerContext = ScannerContext.newBuilder() - .setBatchLimit(scan.getBatch()).build(); + defaultScannerContext = ScannerContext.newBuilder().setBatchLimit(scan.getBatch()).build(); - if (Bytes.equals(scan.getStopRow(), HConstants.EMPTY_END_ROW) && !scan.isGetScan()) { - this.stopRow = null; + // To resolve HBASE-15097,we should set the 'stopRow' exactly for the current region scan. + if (!scan.isReversed()) { + if (Bytes.equals(this.region.getRegionInfo().getEndKey(), HConstants.EMPTY_END_ROW)) { + if (Bytes.equals(scan.getStopRow(), HConstants.EMPTY_END_ROW)) { + this.stopRow = null; + } else { + this.stopRow = scan.getStopRow(); + } + } else { + if (Bytes.equals(scan.getStopRow(), HConstants.EMPTY_END_ROW) + || Bytes.compareTo(scan.getStopRow(), this.getRegionInfo().getEndKey()) >= 0) { + this.stopRow = this.region.getRegionInfo().getEndKey(); + } else { + this.stopRow = scan.getStopRow(); + } + } } else { this.stopRow = scan.getStopRow(); } @@ -5579,7 +5429,7 @@ // synchronize on scannerReadPoints so that nobody calculates // getSmallestReadPoint, before scannerReadPoints is updated. IsolationLevel isolationLevel = scan.getIsolationLevel(); - synchronized(scannerReadPoints) { + synchronized (scannerReadPoints) { this.readPt = getReadpoint(isolationLevel); scannerReadPoints.put(this, this.readPt); } @@ -5601,7 +5451,7 @@ throw handleFileNotFound(e); } if (this.filter == null || !scan.doLoadColumnFamiliesOnDemand() - || this.filter.isFamilyEssential(entry.getKey())) { + || this.filter.isFamilyEssential(entry.getKey())) { scanners.add(scanner); } else { joinedScanners.add(scanner); @@ -5612,8 +5462,7 @@ } protected void initializeKVHeap(List scanners, - List joinedScanners, HRegion region) - throws IOException { + List joinedScanners, HRegion region) throws IOException { this.storeHeap = new KeyValueHeap(scanners, comparator); if (!joinedScanners.isEmpty()) { this.joinedHeap = new KeyValueHeap(joinedScanners, comparator); @@ -5637,7 +5486,6 @@ /** * Reset both the filter and the old filter. - * * @throws IOException in case a filter raises an I/O exception. */ protected void resetFilters() throws IOException { @@ -5647,19 +5495,18 @@ } @Override - public boolean next(List outResults) - throws IOException { + public boolean next(List outResults) throws IOException { // apply the batching limit by default return next(outResults, defaultScannerContext); } @Override public synchronized boolean next(List outResults, ScannerContext scannerContext) - throws IOException { + throws IOException { if (this.filterClosed) { - throw new UnknownScannerException("Scanner was closed (timed out?) " + - "after we renewed it. Could be caused by a very slow scanner " + - "or a lengthy garbage collection"); + throw new UnknownScannerException("Scanner was closed (timed out?) " + + "after we renewed it. Could be caused by a very slow scanner " + + "or a lengthy garbage collection"); } startRegionOperation(Operation.SCAN); readRequestsCount.increment(); @@ -5733,10 +5580,10 @@ * @return true if more cells exist after this batch, false if scanner is done */ private boolean populateFromJoinedHeap(List results, ScannerContext scannerContext) - throws IOException { + throws IOException { assert joinedContinuationRow != null; - boolean moreValues = populateResult(results, this.joinedHeap, scannerContext, - joinedContinuationRow); + boolean moreValues = + populateResult(results, this.joinedHeap, scannerContext, joinedContinuationRow); if (!scannerContext.checkAnyLimitReached(LimitScope.BETWEEN_CELLS)) { // We are done with this row, reset the continuation. @@ -5778,12 +5625,12 @@ if (scannerContext.checkBatchLimit(limitScope)) { return scannerContext.setScannerState(NextState.BATCH_LIMIT_REACHED).hasMoreValues(); } else if (scannerContext.checkSizeLimit(limitScope)) { - ScannerContext.NextState state = - moreCellsInRow? NextState.SIZE_LIMIT_REACHED_MID_ROW: NextState.SIZE_LIMIT_REACHED; + ScannerContext.NextState state = moreCellsInRow ? NextState.SIZE_LIMIT_REACHED_MID_ROW + : NextState.SIZE_LIMIT_REACHED; return scannerContext.setScannerState(state).hasMoreValues(); } else if (scannerContext.checkTimeLimit(limitScope)) { - ScannerContext.NextState state = - moreCellsInRow? NextState.TIME_LIMIT_REACHED_MID_ROW: NextState.TIME_LIMIT_REACHED; + ScannerContext.NextState state = moreCellsInRow ? NextState.TIME_LIMIT_REACHED_MID_ROW + : NextState.TIME_LIMIT_REACHED; return scannerContext.setScannerState(state).hasMoreValues(); } } while (moreCellsInRow); @@ -5858,9 +5705,8 @@ long afterTime = rpcCall.disconnectSince(); if (afterTime >= 0) { throw new CallerDisconnectedException( - "Aborting on region " + getRegionInfo().getRegionNameAsString() + ", call " + - this + " after " + afterTime + " ms, since " + - "caller disconnected"); + "Aborting on region " + getRegionInfo().getRegionNameAsString() + ", call " + this + + " after " + afterTime + " ms, since " + "caller disconnected"); } } @@ -6033,8 +5879,7 @@ * @return true when the joined heap may have data for the current row * @throws IOException */ - private boolean joinedHeapMayHaveData(Cell currentRowCell) - throws IOException { + private boolean joinedHeapMayHaveData(Cell currentRowCell) throws IOException { Cell nextJoinedKv = joinedHeap.peek(); boolean matchCurrentRow = nextJoinedKv != null && CellUtil.matchingRow(nextJoinedKv, currentRowCell); @@ -6045,9 +5890,8 @@ if (!matchCurrentRow) { Cell firstOnCurrentRow = CellUtil.createFirstOnRow(currentRowCell); boolean seekSuccessful = this.joinedHeap.requestSeek(firstOnCurrentRow, true, true); - matchAfterSeek = - seekSuccessful && joinedHeap.peek() != null - && CellUtil.matchingRow(joinedHeap.peek(), currentRowCell); + matchAfterSeek = seekSuccessful && joinedHeap.peek() != null + && CellUtil.matchingRow(joinedHeap.peek(), currentRowCell); } return matchCurrentRow || matchAfterSeek; @@ -6063,8 +5907,7 @@ private boolean filterRow() throws IOException { // when hasFilterRow returns true, filter.filterRow() will be called automatically inside // filterRowCells(List kvs) so we skip that scenario here. - return filter != null && (!filter.hasFilterRow()) - && filter.filterRow(); + return filter != null && (!filter.hasFilterRow()) && filter.filterRow(); } private boolean filterRowKey(Cell current) throws IOException { @@ -6072,23 +5915,21 @@ } protected boolean nextRow(ScannerContext scannerContext, Cell curRowCell) throws IOException { - assert this.joinedContinuationRow == null: "Trying to go to next row during joinedHeap read."; + assert this.joinedContinuationRow == null : "Trying to go to next row during joinedHeap read."; Cell next; - while ((next = this.storeHeap.peek()) != null && - CellUtil.matchingRow(next, curRowCell)) { + while ((next = this.storeHeap.peek()) != null && CellUtil.matchingRow(next, curRowCell)) { this.storeHeap.next(MOCKED_LIST); } resetFilters(); // Calling the hook in CP which allows it to do a fast forward return this.region.getCoprocessorHost() == null - || this.region.getCoprocessorHost() - .postScannerFilterRow(this, curRowCell); + || this.region.getCoprocessorHost().postScannerFilterRow(this, curRowCell); } protected boolean isStopRow(Cell currentRowCell) { - return currentRowCell == null - || (stopRow != null && comparator.compareRows(currentRowCell, stopRow, 0, stopRow.length) >= isScan); + return currentRowCell == null || (stopRow != null + && comparator.compareRows(currentRowCell, stopRow, 0, stopRow.length) >= isScan); } @Override @@ -6143,13 +5984,13 @@ LOG.error("unable to refresh store files", e); abortRegionServer(msg); return new NotServingRegionException( - getRegionInfo().getRegionNameAsString() + " is closing"); + getRegionInfo().getRegionNameAsString() + " is closing"); } } private void abortRegionServer(String msg) throws IOException { if (rsServices instanceof HRegionServer) { - ((HRegionServer)rsServices).abort(msg); + ((HRegionServer) rsServices).abort(msg); } throw new UnsupportedOperationException("not able to abort RS after: " + msg); } @@ -6174,35 +6015,32 @@ // Utility methods /** - * A utility method to create new instances of HRegion based on the - * {@link HConstants#REGION_IMPL} configuration property. - * @param tableDir qualified path of directory where region should be located, - * usually the table directory. - * @param wal The WAL is the outbound log for any updates to the HRegion - * The wal file is a logfile from the previous execution that's - * custom-computed for this HRegion. The HRegionServer computes and sorts the - * appropriate wal info for this HRegion. If there is a previous file - * (implying that the HRegion has been written-to before), then read it from - * the supplied path. + * A utility method to create new instances of HRegion based on the {@link HConstants#REGION_IMPL} + * configuration property. + * @param tableDir qualified path of directory where region should be located, usually the table + * directory. + * @param wal The WAL is the outbound log for any updates to the HRegion The wal file is a logfile + * from the previous execution that's custom-computed for this HRegion. The HRegionServer + * computes and sorts the appropriate wal info for this HRegion. If there is a previous + * file (implying that the HRegion has been written-to before), then read it from the + * supplied path. * @param fs is the filesystem. * @param conf is global configuration settings. - * @param regionInfo - HRegionInfo that describes the region - * is new), then read them from the supplied path. + * @param regionInfo - HRegionInfo that describes the region is new), then read them from the + * supplied path. * @param htd the table descriptor * @return the new instance */ - static HRegion newHRegion(Path tableDir, WAL wal, FileSystem fs, - Configuration conf, HRegionInfo regionInfo, final HTableDescriptor htd, - RegionServerServices rsServices) { + static HRegion newHRegion(Path tableDir, WAL wal, FileSystem fs, Configuration conf, + HRegionInfo regionInfo, final HTableDescriptor htd, RegionServerServices rsServices) { try { @SuppressWarnings("unchecked") Class regionClass = (Class) conf.getClass(HConstants.REGION_IMPL, HRegion.class); Constructor c = - regionClass.getConstructor(Path.class, WAL.class, FileSystem.class, - Configuration.class, HRegionInfo.class, HTableDescriptor.class, - RegionServerServices.class); + regionClass.getConstructor(Path.class, WAL.class, FileSystem.class, Configuration.class, + HRegionInfo.class, HTableDescriptor.class, RegionServerServices.class); return c.newInstance(tableDir, wal, fs, conf, regionInfo, htd, rsServices); } catch (Throwable e) { @@ -6213,7 +6051,6 @@ /** * Convenience method creating new HRegions. Used by createTable. - * * @param info Info for region to create. * @param rootDir Root directory for HBase instance * @param wal shared WAL @@ -6222,12 +6059,10 @@ * @throws IOException */ public static HRegion createHRegion(final HRegionInfo info, final Path rootDir, - final Configuration conf, final HTableDescriptor hTableDescriptor, - final WAL wal, final boolean initialize) - throws IOException { - LOG.info("creating HRegion " + info.getTable().getNameAsString() - + " HTD == " + hTableDescriptor + " RootDir = " + rootDir + - " Table name == " + info.getTable().getNameAsString()); + final Configuration conf, final HTableDescriptor hTableDescriptor, final WAL wal, + final boolean initialize) throws IOException { + LOG.info("creating HRegion " + info.getTable().getNameAsString() + " HTD == " + hTableDescriptor + + " RootDir = " + rootDir + " Table name == " + info.getTable().getNameAsString()); FileSystem fs = FileSystem.get(conf); Path tableDir = FSUtils.getTableDir(rootDir, info.getTable()); HRegionFileSystem.createRegionOnFileSystem(conf, fs, tableDir, info); @@ -6237,29 +6072,22 @@ } public static HRegion createHRegion(final HRegionInfo info, final Path rootDir, - final Configuration conf, - final HTableDescriptor hTableDescriptor, - final WAL wal) - throws IOException { + final Configuration conf, final HTableDescriptor hTableDescriptor, final WAL wal) + throws IOException { return createHRegion(info, rootDir, conf, hTableDescriptor, wal, true); } - /** * Open a Region. * @param info Info for region to be opened. - * @param wal WAL for region to use. This method will call - * WAL#setSequenceNumber(long) passing the result of the call to - * HRegion#getMinSequenceId() to ensure the wal id is properly kept - * up. HRegionStore does this every time it opens a new region. + * @param wal WAL for region to use. This method will call WAL#setSequenceNumber(long) passing the + * result of the call to HRegion#getMinSequenceId() to ensure the wal id is properly kept + * up. HRegionStore does this every time it opens a new region. * @return new HRegion - * * @throws IOException */ - public static HRegion openHRegion(final HRegionInfo info, - final HTableDescriptor htd, final WAL wal, - final Configuration conf) - throws IOException { + public static HRegion openHRegion(final HRegionInfo info, final HTableDescriptor htd, + final WAL wal, final Configuration conf) throws IOException { return openHRegion(info, htd, wal, conf, null, null); } @@ -6267,22 +6095,18 @@ * Open a Region. * @param info Info for region to be opened * @param htd the table descriptor - * @param wal WAL for region to use. This method will call - * WAL#setSequenceNumber(long) passing the result of the call to - * HRegion#getMinSequenceId() to ensure the wal id is properly kept - * up. HRegionStore does this every time it opens a new region. + * @param wal WAL for region to use. This method will call WAL#setSequenceNumber(long) passing the + * result of the call to HRegion#getMinSequenceId() to ensure the wal id is properly kept + * up. HRegionStore does this every time it opens a new region. * @param conf The Configuration object to use. * @param rsServices An interface we can request flushes against. * @param reporter An interface we can report progress against. * @return new HRegion - * * @throws IOException */ - public static HRegion openHRegion(final HRegionInfo info, - final HTableDescriptor htd, final WAL wal, final Configuration conf, - final RegionServerServices rsServices, - final CancelableProgressable reporter) - throws IOException { + public static HRegion openHRegion(final HRegionInfo info, final HTableDescriptor htd, + final WAL wal, final Configuration conf, final RegionServerServices rsServices, + final CancelableProgressable reporter) throws IOException { return openHRegion(FSUtils.getRootDir(conf), info, htd, wal, conf, rsServices, reporter); } @@ -6291,17 +6115,15 @@ * @param rootDir Root directory for HBase instance * @param info Info for region to be opened. * @param htd the table descriptor - * @param wal WAL for region to use. This method will call - * WAL#setSequenceNumber(long) passing the result of the call to - * HRegion#getMinSequenceId() to ensure the wal id is properly kept - * up. HRegionStore does this every time it opens a new region. + * @param wal WAL for region to use. This method will call WAL#setSequenceNumber(long) passing the + * result of the call to HRegion#getMinSequenceId() to ensure the wal id is properly kept + * up. HRegionStore does this every time it opens a new region. * @param conf The Configuration object to use. * @return new HRegion * @throws IOException */ public static HRegion openHRegion(Path rootDir, final HRegionInfo info, - final HTableDescriptor htd, final WAL wal, final Configuration conf) - throws IOException { + final HTableDescriptor htd, final WAL wal, final Configuration conf) throws IOException { return openHRegion(rootDir, info, htd, wal, conf, null, null); } @@ -6310,10 +6132,9 @@ * @param rootDir Root directory for HBase instance * @param info Info for region to be opened. * @param htd the table descriptor - * @param wal WAL for region to use. This method will call - * WAL#setSequenceNumber(long) passing the result of the call to - * HRegion#getMinSequenceId() to ensure the wal id is properly kept - * up. HRegionStore does this every time it opens a new region. + * @param wal WAL for region to use. This method will call WAL#setSequenceNumber(long) passing the + * result of the call to HRegion#getMinSequenceId() to ensure the wal id is properly kept + * up. HRegionStore does this every time it opens a new region. * @param conf The Configuration object to use. * @param rsServices An interface we can request flushes against. * @param reporter An interface we can report progress against. @@ -6322,9 +6143,8 @@ */ public static HRegion openHRegion(final Path rootDir, final HRegionInfo info, final HTableDescriptor htd, final WAL wal, final Configuration conf, - final RegionServerServices rsServices, - final CancelableProgressable reporter) - throws IOException { + final RegionServerServices rsServices, final CancelableProgressable reporter) + throws IOException { FileSystem fs = null; if (rsServices != null) { fs = rsServices.getFileSystem(); @@ -6342,16 +6162,15 @@ * @param rootDir Root directory for HBase instance * @param info Info for region to be opened. * @param htd the table descriptor - * @param wal WAL for region to use. This method will call - * WAL#setSequenceNumber(long) passing the result of the call to - * HRegion#getMinSequenceId() to ensure the wal id is properly kept - * up. HRegionStore does this every time it opens a new region. + * @param wal WAL for region to use. This method will call WAL#setSequenceNumber(long) passing the + * result of the call to HRegion#getMinSequenceId() to ensure the wal id is properly kept + * up. HRegionStore does this every time it opens a new region. * @return new HRegion * @throws IOException */ public static HRegion openHRegion(final Configuration conf, final FileSystem fs, final Path rootDir, final HRegionInfo info, final HTableDescriptor htd, final WAL wal) - throws IOException { + throws IOException { return openHRegion(conf, fs, rootDir, info, htd, wal, null, null); } @@ -6362,10 +6181,9 @@ * @param rootDir Root directory for HBase instance * @param info Info for region to be opened. * @param htd the table descriptor - * @param wal WAL for region to use. This method will call - * WAL#setSequenceNumber(long) passing the result of the call to - * HRegion#getMinSequenceId() to ensure the wal id is properly kept - * up. HRegionStore does this every time it opens a new region. + * @param wal WAL for region to use. This method will call WAL#setSequenceNumber(long) passing the + * result of the call to HRegion#getMinSequenceId() to ensure the wal id is properly kept + * up. HRegionStore does this every time it opens a new region. * @param rsServices An interface we can request flushes against. * @param reporter An interface we can report progress against. * @return new HRegion @@ -6374,7 +6192,7 @@ public static HRegion openHRegion(final Configuration conf, final FileSystem fs, final Path rootDir, final HRegionInfo info, final HTableDescriptor htd, final WAL wal, final RegionServerServices rsServices, final CancelableProgressable reporter) - throws IOException { + throws IOException { Path tableDir = FSUtils.getTableDir(rootDir, info.getTable()); return openHRegion(conf, fs, rootDir, tableDir, info, htd, wal, rsServices, reporter); } @@ -6386,10 +6204,9 @@ * @param rootDir Root directory for HBase instance * @param info Info for region to be opened. * @param htd the table descriptor - * @param wal WAL for region to use. This method will call - * WAL#setSequenceNumber(long) passing the result of the call to - * HRegion#getMinSequenceId() to ensure the wal id is properly kept - * up. HRegionStore does this every time it opens a new region. + * @param wal WAL for region to use. This method will call WAL#setSequenceNumber(long) passing the + * result of the call to HRegion#getMinSequenceId() to ensure the wal id is properly kept + * up. HRegionStore does this every time it opens a new region. * @param rsServices An interface we can request flushes against. * @param reporter An interface we can report progress against. * @return new HRegion @@ -6397,9 +6214,8 @@ */ public static HRegion openHRegion(final Configuration conf, final FileSystem fs, final Path rootDir, final Path tableDir, final HRegionInfo info, final HTableDescriptor htd, - final WAL wal, final RegionServerServices rsServices, - final CancelableProgressable reporter) - throws IOException { + final WAL wal, final RegionServerServices rsServices, final CancelableProgressable reporter) + throws IOException { if (info == null) throw new NullPointerException("Passed region info is null"); if (LOG.isDebugEnabled()) { LOG.debug("Opening region: " + info); @@ -6407,7 +6223,6 @@ HRegion r = HRegion.newHRegion(tableDir, wal, fs, conf, info, htd, rsServices); return r.openHRegion(reporter); } - /** * Useful when reopening a closed region (normally for unit tests) @@ -6420,23 +6235,21 @@ throws IOException { HRegionFileSystem regionFs = other.getRegionFileSystem(); HRegion r = newHRegion(regionFs.getTableDir(), other.getWAL(), regionFs.getFileSystem(), - other.baseConf, other.getRegionInfo(), other.getTableDesc(), null); + other.baseConf, other.getRegionInfo(), other.getTableDesc(), null); return r.openHRegion(reporter); } public static Region openHRegion(final Region other, final CancelableProgressable reporter) - throws IOException { - return openHRegion((HRegion)other, reporter); + throws IOException { + return openHRegion((HRegion) other, reporter); } /** - * Open HRegion. - * Calls initialize and sets sequenceId. + * Open HRegion. Calls initialize and sets sequenceId. * @return Returns this * @throws IOException */ - protected HRegion openHRegion(final CancelableProgressable reporter) - throws IOException { + protected HRegion openHRegion(final CancelableProgressable reporter) throws IOException { // Refuse to open the region if we are missing local compression support checkCompressionCodecs(); // Refuse to open the region if encryption configuration is incorrect or @@ -6446,8 +6259,7 @@ checkClassLoading(); this.openSeqNum = initialize(reporter); this.mvcc.advanceTo(openSeqNum); - if (wal != null && getRegionServerServices() != null && !writestate.readOnly - && !recovering) { + if (wal != null && getRegionServerServices() != null && !writestate.readOnly && !recovering) { // Only write the region open event marker to WAL if (1) we are not read-only // (2) dist log replay is off or we are not recovering. In case region is // recovering, the open event will be written at setRecovering(false) @@ -6456,11 +6268,9 @@ return this; } - public static void warmupHRegion(final HRegionInfo info, - final HTableDescriptor htd, final WAL wal, final Configuration conf, - final RegionServerServices rsServices, - final CancelableProgressable reporter) - throws IOException { + public static void warmupHRegion(final HRegionInfo info, final HTableDescriptor htd, + final WAL wal, final Configuration conf, final RegionServerServices rsServices, + final CancelableProgressable reporter) throws IOException { if (info == null) throw new NullPointerException("Passed region info is null"); @@ -6484,16 +6294,15 @@ r.close(); } - private void checkCompressionCodecs() throws IOException { - for (HColumnDescriptor fam: this.htableDescriptor.getColumnFamilies()) { + for (HColumnDescriptor fam : this.htableDescriptor.getColumnFamilies()) { CompressionTest.testCompression(fam.getCompressionType()); CompressionTest.testCompression(fam.getCompactionCompressionType()); } } private void checkEncryption() throws IOException { - for (HColumnDescriptor fam: this.htableDescriptor.getColumnFamilies()) { + for (HColumnDescriptor fam : this.htableDescriptor.getColumnFamilies()) { EncryptionTest.testEncryption(conf, fam.getEncryptionType(), fam.getEncryptionKey()); } } @@ -6514,7 +6323,7 @@ // Create the daughter HRegion instance HRegion r = HRegion.newHRegion(this.fs.getTableDir(), this.getWAL(), fs.getFileSystem(), - this.getBaseConf(), hri, this.getTableDesc(), rsServices); + this.getBaseConf(), hri, this.getTableDesc(), rsServices); r.readRequestsCount.set(this.getReadRequestsCount() / 2); r.writeRequestsCount.set(this.getWriteRequestsCount() / 2); return r; @@ -6526,13 +6335,11 @@ * @return merged HRegion * @throws IOException */ - HRegion createMergedRegionFromMerges(final HRegionInfo mergedRegionInfo, - final HRegion region_b) throws IOException { - HRegion r = HRegion.newHRegion(this.fs.getTableDir(), this.getWAL(), - fs.getFileSystem(), this.getBaseConf(), mergedRegionInfo, - this.getTableDesc(), this.rsServices); - r.readRequestsCount.set(this.getReadRequestsCount() - + region_b.getReadRequestsCount()); + HRegion createMergedRegionFromMerges(final HRegionInfo mergedRegionInfo, final HRegion region_b) + throws IOException { + HRegion r = HRegion.newHRegion(this.fs.getTableDir(), this.getWAL(), fs.getFileSystem(), + this.getBaseConf(), mergedRegionInfo, this.getTableDesc(), this.rsServices); + r.readRequestsCount.set(this.getReadRequestsCount() + region_b.getReadRequestsCount()); r.writeRequestsCount.set(this.getWriteRequestsCount() + region_b.getWriteRequestsCount()); @@ -6541,13 +6348,10 @@ } /** - * Inserts a new region's meta information into the passed - * meta region. Used by the HMaster bootstrap code adding - * new table to hbase:meta table. - * + * Inserts a new region's meta information into the passed meta region. Used by the + * HMaster bootstrap code adding new table to hbase:meta table. * @param meta hbase:meta HRegion to be updated * @param r HRegion to add to meta - * * @throws IOException */ // TODO remove since only test and merge use this @@ -6557,19 +6361,16 @@ byte[] row = r.getRegionInfo().getRegionName(); final long now = EnvironmentEdgeManager.currentTime(); final List cells = new ArrayList(2); - cells.add(new KeyValue(row, HConstants.CATALOG_FAMILY, - HConstants.REGIONINFO_QUALIFIER, now, - r.getRegionInfo().toByteArray())); + cells.add(new KeyValue(row, HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER, now, + r.getRegionInfo().toByteArray())); // Set into the root table the version of the meta table. - cells.add(new KeyValue(row, HConstants.CATALOG_FAMILY, - HConstants.META_VERSION_QUALIFIER, now, - Bytes.toBytes(HConstants.META_VERSION))); + cells.add(new KeyValue(row, HConstants.CATALOG_FAMILY, HConstants.META_VERSION_QUALIFIER, now, + Bytes.toBytes(HConstants.META_VERSION))); meta.put(row, HConstants.CATALOG_FAMILY, cells); } /** * Computes the Path of the HRegion - * * @param tabledir qualified path for table * @param name ENCODED region name * @return Path of HRegion directory @@ -6582,7 +6383,6 @@ /** * Computes the Path of the HRegion - * * @param rootdir qualified path of HBase root directory * @param info HRegionInfo for the region * @return qualified path of region directory @@ -6591,42 +6391,34 @@ @Deprecated @VisibleForTesting public static Path getRegionDir(final Path rootdir, final HRegionInfo info) { - return new Path( - FSUtils.getTableDir(rootdir, info.getTable()), info.getEncodedName()); + return new Path(FSUtils.getTableDir(rootdir, info.getTable()), info.getEncodedName()); } /** - * Determines if the specified row is within the row range specified by the - * specified HRegionInfo - * + * Determines if the specified row is within the row range specified by the specified HRegionInfo * @param info HRegionInfo that specifies the row range * @param row row to be checked * @return true if the row is within the range specified by the HRegionInfo */ - public static boolean rowIsInRange(HRegionInfo info, final byte [] row) { - return ((info.getStartKey().length == 0) || - (Bytes.compareTo(info.getStartKey(), row) <= 0)) && - ((info.getEndKey().length == 0) || - (Bytes.compareTo(info.getEndKey(), row) > 0)); + public static boolean rowIsInRange(HRegionInfo info, final byte[] row) { + return ((info.getStartKey().length == 0) || (Bytes.compareTo(info.getStartKey(), row) <= 0)) + && ((info.getEndKey().length == 0) || (Bytes.compareTo(info.getEndKey(), row) > 0)); } - public static boolean rowIsInRange(HRegionInfo info, final byte [] row, final int offset, + public static boolean rowIsInRange(HRegionInfo info, final byte[] row, final int offset, final short length) { - return ((info.getStartKey().length == 0) || - (Bytes.compareTo(info.getStartKey(), 0, info.getStartKey().length, - row, offset, length) <= 0)) && - ((info.getEndKey().length == 0) || - (Bytes.compareTo(info.getEndKey(), 0, info.getEndKey().length, row, offset, length) > 0)); + return ((info.getStartKey().length == 0) || (Bytes + .compareTo(info.getStartKey(), 0, info.getStartKey().length, row, offset, length) <= 0)) + && ((info.getEndKey().length == 0) || (Bytes.compareTo(info.getEndKey(), 0, + info.getEndKey().length, row, offset, length) > 0)); } /** - * Merge two HRegions. The regions must be adjacent and must not overlap. - * + * Merge two HRegions. The regions must be adjacent and must not overlap. * @return new merged HRegion * @throws IOException */ - public static HRegion mergeAdjacent(final HRegion srcA, final HRegion srcB) - throws IOException { + public static HRegion mergeAdjacent(final HRegion srcA, final HRegion srcB) throws IOException { HRegion a = srcA; HRegion b = srcB; @@ -6637,15 +6429,13 @@ throw new IOException("Cannot merge two regions with null start key"); } // A's start key is null but B's isn't. Assume A comes before B - } else if ((srcB.getRegionInfo().getStartKey() == null) || - (Bytes.compareTo(srcA.getRegionInfo().getStartKey(), - srcB.getRegionInfo().getStartKey()) > 0)) { + } else if ((srcB.getRegionInfo().getStartKey() == null) || (Bytes + .compareTo(srcA.getRegionInfo().getStartKey(), srcB.getRegionInfo().getStartKey()) > 0)) { a = srcB; b = srcA; } - if (!(Bytes.compareTo(a.getRegionInfo().getEndKey(), - b.getRegionInfo().getStartKey()) == 0)) { + if (!(Bytes.compareTo(a.getRegionInfo().getEndKey(), b.getRegionInfo().getStartKey()) == 0)) { throw new IOException("Cannot merge non-adjacent regions"); } return merge(a, b); @@ -6653,7 +6443,6 @@ /** * Merge two regions whether they are adjacent or not. - * * @param a region a * @param b region b * @return new merged region @@ -6686,19 +6475,17 @@ throw new IOException("Unable to merge regions " + a + " and " + b); } HRegionInfo mergedRegionInfo = rmt.getMergedRegionInfo(); - LOG.info("starting merge of regions: " + a + " and " + b - + " into new region " + mergedRegionInfo.getRegionNameAsString() - + " with start key <" - + Bytes.toStringBinary(mergedRegionInfo.getStartKey()) - + "> and end key <" + LOG.info("starting merge of regions: " + a + " and " + b + " into new region " + + mergedRegionInfo.getRegionNameAsString() + " with start key <" + + Bytes.toStringBinary(mergedRegionInfo.getStartKey()) + "> and end key <" + Bytes.toStringBinary(mergedRegionInfo.getEndKey()) + ">"); HRegion dstRegion; try { - dstRegion = (HRegion)rmt.execute(null, null); + dstRegion = (HRegion) rmt.execute(null, null); } catch (IOException ioe) { rmt.rollback(null, null); - throw new IOException("Failed merging region " + a + " and " + b - + ", and successfully rolled back"); + throw new IOException( + "Failed merging region " + a + " and " + b + ", and successfully rolled back"); } dstRegion.compact(true); @@ -6733,15 +6520,15 @@ return Result.create(results, get.isCheckExistenceOnly() ? !results.isEmpty() : null, stale); } - void prepareGet(final Get get) throws IOException, NoSuchColumnFamilyException { + void prepareGet(final Get get) throws IOException, NoSuchColumnFamilyException { checkRow(get.getRow(), "Get"); // Verify families are all valid if (get.hasFamilies()) { - for (byte [] family: get.familySet()) { + for (byte[] family : get.familySet()) { checkFamily(family); } } else { // Adding all families to scanner - for (byte[] family: this.htableDescriptor.getFamiliesKeys()) { + for (byte[] family : this.htableDescriptor.getFamiliesKeys()) { get.addFamily(family); } } @@ -6766,8 +6553,7 @@ scanner = getScanner(scan); scanner.next(results); } finally { - if (scanner != null) - scanner.close(); + if (scanner != null) scanner.close(); } // post-get CP hook @@ -6799,29 +6585,29 @@ } /** - * Perform atomic mutations within the region w/o nonces. - * See {@link #mutateRowsWithLocks(Collection, Collection, long, long)} + * Perform atomic mutations within the region w/o nonces. See + * {@link #mutateRowsWithLocks(Collection, Collection, long, long)} */ - public void mutateRowsWithLocks(Collection mutations, - Collection rowsToLock) throws IOException { + public void mutateRowsWithLocks(Collection mutations, Collection rowsToLock) + throws IOException { mutateRowsWithLocks(mutations, rowsToLock, HConstants.NO_NONCE, HConstants.NO_NONCE); } /** * Perform atomic mutations within the region. - * @param mutations The list of mutations to perform. - * mutations can contain operations for multiple rows. - * Caller has to ensure that all rows are contained in this region. + * @param mutations The list of mutations to perform. mutations can contain + * operations for multiple rows. Caller has to ensure that all rows are contained in this + * region. * @param rowsToLock Rows to lock * @param nonceGroup Optional nonce group of the operation (client Id) - * @param nonce Optional nonce of the operation (unique random id to ensure "more idempotence") - * If multiple rows are locked care should be taken that - * rowsToLock is sorted in order to avoid deadlocks. + * @param nonce Optional nonce of the operation (unique random id to ensure "more idempotence") If + * multiple rows are locked care should be taken that rowsToLock is sorted + * in order to avoid deadlocks. * @throws IOException */ @Override - public void mutateRowsWithLocks(Collection mutations, - Collection rowsToLock, long nonceGroup, long nonce) throws IOException { + public void mutateRowsWithLocks(Collection mutations, Collection rowsToLock, + long nonceGroup, long nonce) throws IOException { MultiRowMutationProcessor proc = new MultiRowMutationProcessor(mutations, rowsToLock); processRowsWithLocks(proc, -1, nonceGroup, nonce); } @@ -6834,29 +6620,28 @@ return null; } ClientProtos.RegionLoadStats.Builder stats = ClientProtos.RegionLoadStats.newBuilder(); - stats.setMemstoreLoad((int) (Math.min(100, (this.memstoreSize.get() * 100) / this - .memstoreFlushSize))); - stats.setHeapOccupancy((int)rsServices.getHeapMemoryManager().getHeapOccupancyPercent()*100); - stats.setCompactionPressure((int)rsServices.getCompactionPressure()*100 > 100 ? 100 : - (int)rsServices.getCompactionPressure()*100); + stats.setMemstoreLoad( + (int) (Math.min(100, (this.memstoreSize.get() * 100) / this.memstoreFlushSize))); + stats.setHeapOccupancy((int) rsServices.getHeapMemoryManager().getHeapOccupancyPercent() * 100); + stats.setCompactionPressure((int) rsServices.getCompactionPressure() * 100 > 100 ? 100 + : (int) rsServices.getCompactionPressure() * 100); return stats.build(); } @Override - public void processRowsWithLocks(RowProcessor processor) throws IOException { - processRowsWithLocks(processor, rowProcessorTimeout, HConstants.NO_NONCE, - HConstants.NO_NONCE); + public void processRowsWithLocks(RowProcessor processor) throws IOException { + processRowsWithLocks(processor, rowProcessorTimeout, HConstants.NO_NONCE, HConstants.NO_NONCE); } @Override - public void processRowsWithLocks(RowProcessor processor, long nonceGroup, long nonce) + public void processRowsWithLocks(RowProcessor processor, long nonceGroup, long nonce) throws IOException { processRowsWithLocks(processor, rowProcessorTimeout, nonceGroup, nonce); } @Override - public void processRowsWithLocks(RowProcessor processor, long timeout, - long nonceGroup, long nonce) throws IOException { + public void processRowsWithLocks(RowProcessor processor, long timeout, long nonceGroup, + long nonce) throws IOException { for (byte[] row : processor.getRowsToLock()) { checkRow(row, "processRowsWithLocks"); @@ -6880,8 +6665,7 @@ if (processor.readOnly()) { try { long now = EnvironmentEdgeManager.currentTime(); - doProcessRowWithTimeout( - processor, now, this, null, null, timeout); + doProcessRowWithTimeout(processor, now, this, null, null, timeout); processor.postProcess(this, walEdit, true); } finally { closeRegionOperation(); @@ -6913,9 +6697,8 @@ long now = EnvironmentEdgeManager.currentTime(); try { // 4. Let the processor scan the rows, generate mutations and add - // waledits - doProcessRowWithTimeout( - processor, now, this, mutations, walEdit, timeout); + // waledits + doProcessRowWithTimeout(processor, now, this, mutations, walEdit, timeout); if (!mutations.isEmpty()) { @@ -6927,12 +6710,12 @@ if (!walEdit.isEmpty()) { // we use HLogKey here instead of WALKey directly to support legacy coprocessors. walKey = new HLogKey(this.getRegionInfo().getEncodedNameAsBytes(), - this.htableDescriptor.getTableName(), WALKey.NO_SEQUENCE_ID, now, - processor.getClusterIds(), nonceGroup, nonce, mvcc); - txid = this.wal.append(this.htableDescriptor, this.getRegionInfo(), - walKey, walEdit, false); + this.htableDescriptor.getTableName(), WALKey.NO_SEQUENCE_ID, now, + processor.getClusterIds(), nonceGroup, nonce, mvcc); + txid = this.wal.append(this.htableDescriptor, this.getRegionInfo(), walKey, walEdit, + false); } - if(walKey == null){ + if (walKey == null) { // since we use wal sequence Id as mvcc, for SKIP_WAL changes we need a "faked" WALEdit // to get a sequence id assigned which is done by FSWALEntry#stampRegionSequenceId walKey = this.appendEmptyEdit(this.wal); @@ -6941,8 +6724,6 @@ // 7. Start mvcc transaction writeEntry = walKey.getWriteEntry(); mvccNum = walKey.getSequenceId(); - - // 8. Apply to memstore for (Mutation m : mutations) { @@ -6983,9 +6764,9 @@ // memstore rollback such as append and doMiniBatchMutation. Currently it is a little // different. Make them all share same code! if (!mutations.isEmpty() && !walSyncSuccessful) { - LOG.warn("Wal sync failed. Roll back " + mutations.size() + - " memstore keyvalues for row(s):" + StringUtils.byteToHexString( - processor.getRowsToLock().iterator().next()) + "..."); + LOG.warn( + "Wal sync failed. Roll back " + mutations.size() + " memstore keyvalues for row(s):" + + StringUtils.byteToHexString(processor.getRowsToLock().iterator().next()) + "..."); for (Mutation m : mutations) { for (CellScanner cellScanner = m.cellScanner(); cellScanner.advance();) { Cell cell = cellScanner.current(); @@ -7013,57 +6794,49 @@ } finally { closeRegionOperation(); - if (!mutations.isEmpty() && - isFlushSize(this.addAndGetGlobalMemstoreSize(addedSize))) { + if (!mutations.isEmpty() && isFlushSize(this.addAndGetGlobalMemstoreSize(addedSize))) { requestFlush(); } } } - private void doProcessRowWithTimeout(final RowProcessor processor, - final long now, - final HRegion region, - final List mutations, - final WALEdit walEdit, - final long timeout) throws IOException { + private void doProcessRowWithTimeout(final RowProcessor processor, final long now, + final HRegion region, final List mutations, final WALEdit walEdit, + final long timeout) throws IOException { // Short circuit the no time bound case. if (timeout < 0) { try { processor.process(now, region, mutations, walEdit); } catch (IOException e) { - LOG.warn("RowProcessor:" + processor.getClass().getName() + - " throws Exception on row(s):" + - Bytes.toStringBinary( - processor.getRowsToLock().iterator().next()) + "...", e); + LOG.warn("RowProcessor:" + processor.getClass().getName() + " throws Exception on row(s):" + + Bytes.toStringBinary(processor.getRowsToLock().iterator().next()) + "...", + e); throw e; } return; } // Case with time bound - FutureTask task = - new FutureTask(new Callable() { - @Override - public Void call() throws IOException { - try { - processor.process(now, region, mutations, walEdit); - return null; - } catch (IOException e) { - LOG.warn("RowProcessor:" + processor.getClass().getName() + - " throws Exception on row(s):" + - Bytes.toStringBinary( - processor.getRowsToLock().iterator().next()) + "...", e); - throw e; - } + FutureTask task = new FutureTask(new Callable() { + @Override + public Void call() throws IOException { + try { + processor.process(now, region, mutations, walEdit); + return null; + } catch (IOException e) { + LOG.warn("RowProcessor:" + processor.getClass().getName() + " throws Exception on row(s):" + + Bytes.toStringBinary(processor.getRowsToLock().iterator().next()) + "...", + e); + throw e; } - }); + } + }); rowProcessorExecutor.execute(task); try { task.get(timeout, TimeUnit.MILLISECONDS); } catch (TimeoutException te) { - LOG.error("RowProcessor timeout:" + timeout + " ms on row(s):" + - Bytes.toStringBinary(processor.getRowsToLock().iterator().next()) + - "..."); + LOG.error("RowProcessor timeout:" + timeout + " ms on row(s):" + + Bytes.toStringBinary(processor.getRowsToLock().iterator().next()) + "..."); throw new IOException(te); } catch (Exception e) { throw new IOException(e); @@ -7075,9 +6848,10 @@ */ private static List carryForwardTags(final Cell cell, final List tags) { if (cell.getTagsLength() <= 0) return tags; - List newTags = tags == null? new ArrayList(): /*Append Tags*/tags; + List newTags = tags == null ? new ArrayList() : /* Append Tags */tags; Iterator i = CellUtil.tagsIterator(cell); - while (i.hasNext()) newTags.add(i.next()); + while (i.hasNext()) + newTags.add(i.next()); return newTags; } @@ -7085,9 +6859,8 @@ * Run a Get against passed in store on passed row, etc. * @return Get result. */ - private List doGet(final Store store, final byte [] row, - final Map.Entry> family, final TimeRange tr) - throws IOException { + private List doGet(final Store store, final byte[] row, + final Map.Entry> family, final TimeRange tr) throws IOException { // Sort the cells so that they match the order that they // appear in the Get results. Otherwise, we won't be able to // find the existing values if the cells are not specified @@ -7144,7 +6917,7 @@ mvcc.await(); if (this.coprocessorHost != null) { Result r = this.coprocessorHost.preAppendAfterRowLock(mutate); - if (r!= null) { + if (r != null) { return r; } } @@ -7166,8 +6939,7 @@ for (Cell cell : family.getValue()) { Cell newCell; Cell oldCell = null; - if (idx < results.size() - && CellUtil.matchingQualifier(results.get(idx), cell)) { + if (idx < results.size() && CellUtil.matchingQualifier(results.get(idx), cell)) { oldCell = results.get(idx); long ts = Math.max(now, oldCell.getTimestamp()); @@ -7181,7 +6953,7 @@ if (mutate.getTTL() != Long.MAX_VALUE) { // Add the new TTL tag newTags.add( - new ArrayBackedTag(TagType.TTL_TAG_TYPE, Bytes.toBytes(mutate.getTTL()))); + new ArrayBackedTag(TagType.TTL_TAG_TYPE, Bytes.toBytes(mutate.getTTL()))); } // Rebuild tags @@ -7190,22 +6962,19 @@ // allocate an empty cell once newCell = new KeyValue(row.length, cell.getFamilyLength(), cell.getQualifierLength(), ts, KeyValue.Type.Put, - oldCell.getValueLength() + cell.getValueLength(), - tagBytes.length); + oldCell.getValueLength() + cell.getValueLength(), tagBytes.length); // copy in row, family, and qualifier - System.arraycopy(cell.getRowArray(), cell.getRowOffset(), - newCell.getRowArray(), newCell.getRowOffset(), cell.getRowLength()); + System.arraycopy(cell.getRowArray(), cell.getRowOffset(), newCell.getRowArray(), + newCell.getRowOffset(), cell.getRowLength()); System.arraycopy(cell.getFamilyArray(), cell.getFamilyOffset(), - newCell.getFamilyArray(), newCell.getFamilyOffset(), - cell.getFamilyLength()); + newCell.getFamilyArray(), newCell.getFamilyOffset(), cell.getFamilyLength()); System.arraycopy(cell.getQualifierArray(), cell.getQualifierOffset(), newCell.getQualifierArray(), newCell.getQualifierOffset(), cell.getQualifierLength()); // copy in the value CellUtil.copyValueTo(oldCell, newCell.getValueArray(), newCell.getValueOffset()); System.arraycopy(cell.getValueArray(), cell.getValueOffset(), - newCell.getValueArray(), - newCell.getValueOffset() + oldCell.getValueLength(), + newCell.getValueArray(), newCell.getValueOffset() + oldCell.getValueLength(), cell.getValueLength()); // Copy in tag data System.arraycopy(tagBytes, 0, newCell.getTagsArray(), newCell.getTagsOffset(), @@ -7220,7 +6989,7 @@ if (mutate.getTTL() != Long.MAX_VALUE) { List newTags = new ArrayList(1); newTags.add( - new ArrayBackedTag(TagType.TTL_TAG_TYPE, Bytes.toBytes(mutate.getTTL()))); + new ArrayBackedTag(TagType.TTL_TAG_TYPE, Bytes.toBytes(mutate.getTTL()))); // Add the new TTL tag newCell = new TagRewriteCell(cell, TagUtil.fromList(newTags)); } else { @@ -7231,7 +7000,7 @@ // Give coprocessors a chance to update the new cell if (coprocessorHost != null) { newCell = coprocessorHost.postMutationBeforeWAL(RegionObserver.MutationType.APPEND, - mutate, oldCell, newCell); + mutate, oldCell, newCell); } kvs.add(newCell); @@ -7244,7 +7013,7 @@ } } - //store the kvs to the temporary memstore before writing WAL + // store the kvs to the temporary memstore before writing WAL tempMemstore.put(store, kvs); } @@ -7255,15 +7024,11 @@ // cluster. A slave cluster receives the final value (not the delta) // as a Put. // we use HLogKey here instead of WALKey directly to support legacy coprocessors. - walKey = new HLogKey( - getRegionInfo().getEncodedNameAsBytes(), - this.htableDescriptor.getTableName(), - WALKey.NO_SEQUENCE_ID, - nonceGroup, - nonce, + walKey = new HLogKey(getRegionInfo().getEncodedNameAsBytes(), + this.htableDescriptor.getTableName(), WALKey.NO_SEQUENCE_ID, nonceGroup, nonce, mvcc); txid = - this.wal.append(this.htableDescriptor, getRegionInfo(), walKey, walEdits, true); + this.wal.append(this.htableDescriptor, getRegionInfo(), walKey, walEdits, true); } else { recordMutationWithoutWal(mutate.getFamilyCellMap()); } @@ -7276,7 +7041,6 @@ // now start my own transaction writeEntry = walKey.getWriteEntry(); - // Actually write to Memstore now if (!tempMemstore.isEmpty()) { for (Map.Entry> entry : tempMemstore.entrySet()) { @@ -7287,7 +7051,7 @@ size += store.upsert(entry.getValue(), getSmallestReadPoint()); } else { // otherwise keep older versions around - for (Cell cell: entry.getValue()) { + for (Cell cell : entry.getValue()) { CellUtil.setSequenceId(cell, writeEntry.getWriteNumber()); size += store.add(cell); doRollBackMemstore = true; @@ -7310,7 +7074,7 @@ rowLock = null; } // sync the transaction log outside the rowlock - if(txid != 0){ + if (txid != 0) { syncOrDefer(txid, durability); } doRollBackMemstore = false; @@ -7354,10 +7118,9 @@ // TODO: St.Ack 20150907 @Override - public Result increment(Increment mutation, long nonceGroup, long nonce) - throws IOException { + public Result increment(Increment mutation, long nonceGroup, long nonce) throws IOException { Operation op = Operation.INCREMENT; - byte [] row = mutation.getRow(); + byte[] row = mutation.getRow(); checkRow(row, op.toString()); checkFamilies(mutation.getFamilyCellMap().keySet()); boolean flush = false; @@ -7396,7 +7159,7 @@ } long now = EnvironmentEdgeManager.currentTime(); // Process each family - for (Map.Entry> family: mutation.getFamilyCellMap().entrySet()) { + for (Map.Entry> family : mutation.getFamilyCellMap().entrySet()) { Store store = stores.get(family.getKey()); List kvs = new ArrayList(family.getValue().size()); @@ -7423,7 +7186,7 @@ if (idx < results.size() && CellUtil.matchingQualifier(results.get(idx), cell)) { c = results.get(idx); ts = Math.max(now, c.getTimestamp()); - if(c.getValueLength() == Bytes.SIZEOF_LONG) { + if (c.getValueLength() == Bytes.SIZEOF_LONG) { amount += CellUtil.getValueAsLong(c); } else { // throw DoNotRetryIOException instead of IllegalArgumentException @@ -7444,21 +7207,17 @@ // Add the TTL tag if the mutation carried one if (mutation.getTTL() != Long.MAX_VALUE) { newTags.add( - new ArrayBackedTag(TagType.TTL_TAG_TYPE, Bytes.toBytes(mutation.getTTL()))); + new ArrayBackedTag(TagType.TTL_TAG_TYPE, Bytes.toBytes(mutation.getTTL()))); } - Cell newKV = new KeyValue(row, 0, row.length, - family.getKey(), 0, family.getKey().length, - q, 0, q.length, - ts, - KeyValue.Type.Put, - val, 0, val.length, - newTags); + Cell newKV = + new KeyValue(row, 0, row.length, family.getKey(), 0, family.getKey().length, q, 0, + q.length, ts, KeyValue.Type.Put, val, 0, val.length, newTags); // Give coprocessors a chance to update the new cell if (coprocessorHost != null) { - newKV = coprocessorHost.postMutationBeforeWAL( - RegionObserver.MutationType.INCREMENT, mutation, c, newKV); + newKV = coprocessorHost.postMutationBeforeWAL(RegionObserver.MutationType.INCREMENT, + mutation, c, newKV); } allKVs.add(newKV); @@ -7475,7 +7234,7 @@ } } - //store the kvs to the temporary memstore before writing WAL + // store the kvs to the temporary memstore before writing WAL if (!kvs.isEmpty()) { tempMemstore.put(store, kvs); } @@ -7489,13 +7248,10 @@ // as a Put. // we use HLogKey here instead of WALKey directly to support legacy coprocessors. walKey = new HLogKey(this.getRegionInfo().getEncodedNameAsBytes(), - this.htableDescriptor.getTableName(), - WALKey.NO_SEQUENCE_ID, - nonceGroup, - nonce, + this.htableDescriptor.getTableName(), WALKey.NO_SEQUENCE_ID, nonceGroup, nonce, mvcc); - txid = this.wal.append(this.htableDescriptor, this.getRegionInfo(), - walKey, walEdits, true); + txid = this.wal.append(this.htableDescriptor, this.getRegionInfo(), walKey, walEdits, + true); } else { recordMutationWithoutWal(mutation.getFamilyCellMap()); } @@ -7536,7 +7292,7 @@ rowLock = null; } // sync the transaction log outside the rowlock - if(txid != 0){ + if (txid != 0) { syncOrDefer(txid, durability); } doRollBackMemstore = false; @@ -7546,7 +7302,7 @@ } // if the wal sync was unsuccessful, remove keys from memstore if (doRollBackMemstore) { - for(List cells: tempMemstore.values()) { + for (List cells : tempMemstore.values()) { rollbackMemstore(cells); } if (writeEntry != null) mvcc.complete(writeEntry); @@ -7560,7 +7316,7 @@ } if (flush) { - // Request a cache flush. Do it outside update lock. + // Request a cache flush. Do it outside update lock. requestFlush(); } return mutation.isReturnResults() ? Result.create(allKVs) : null; @@ -7570,37 +7326,31 @@ // New HBASE-880 Helpers // - void checkFamily(final byte [] family) - throws NoSuchColumnFamilyException { + void checkFamily(final byte[] family) throws NoSuchColumnFamilyException { if (!this.htableDescriptor.hasFamily(family)) { - throw new NoSuchColumnFamilyException("Column family " + - Bytes.toString(family) + " does not exist in region " + this - + " in table " + this.htableDescriptor); + throw new NoSuchColumnFamilyException("Column family " + Bytes.toString(family) + + " does not exist in region " + this + " in table " + this.htableDescriptor); } } - public static final long FIXED_OVERHEAD = ClassSize.align( - ClassSize.OBJECT + - ClassSize.ARRAY + - 44 * ClassSize.REFERENCE + 3 * Bytes.SIZEOF_INT + - (14 * Bytes.SIZEOF_LONG) + - 5 * Bytes.SIZEOF_BOOLEAN); + public static final long FIXED_OVERHEAD = + ClassSize.align(ClassSize.OBJECT + ClassSize.ARRAY + 44 * ClassSize.REFERENCE + + 3 * Bytes.SIZEOF_INT + (14 * Bytes.SIZEOF_LONG) + 5 * Bytes.SIZEOF_BOOLEAN); // woefully out of date - currently missing: // 1 x HashMap - coprocessorServiceHandlers // 6 x Counter - numMutationsWithoutWAL, dataInMemoryWithoutWAL, - // checkAndMutateChecksPassed, checkAndMutateChecksFailed, readRequestsCount, - // writeRequestsCount + // checkAndMutateChecksPassed, checkAndMutateChecksFailed, readRequestsCount, + // writeRequestsCount // 1 x HRegion$WriteState - writestate // 1 x RegionCoprocessorHost - coprocessorHost // 1 x RegionSplitPolicy - splitPolicy // 1 x MetricsRegion - metricsRegion // 1 x MetricsRegionWrapperImpl - metricsRegionWrapper - public static final long DEEP_OVERHEAD = FIXED_OVERHEAD + - ClassSize.OBJECT + // closeLock + public static final long DEEP_OVERHEAD = FIXED_OVERHEAD + ClassSize.OBJECT + // closeLock (2 * ClassSize.ATOMIC_BOOLEAN) + // closed, closing (3 * ClassSize.ATOMIC_LONG) + // memStoreSize, numPutsWithoutWAL, dataInMemoryWithoutWAL - (2 * ClassSize.CONCURRENT_HASHMAP) + // lockedRows, scannerReadPoints + (2 * ClassSize.CONCURRENT_HASHMAP) + // lockedRows, scannerReadPoints WriteState.HEAP_SIZE + // writestate ClassSize.CONCURRENT_SKIPLISTMAP + ClassSize.CONCURRENT_SKIPLISTMAP_ENTRY + // stores (2 * ClassSize.REENTRANT_LOCK) + // lock, updatesLock @@ -7621,14 +7371,13 @@ /* * This method calls System.exit. - * @param message Message to print out. May be null. + * @param message Message to print out. May be null. */ private static void printUsageAndExit(final String message) { if (message != null && message.length() > 0) System.out.println(message); System.out.println("Usage: HRegion CATALOG_TABLE_DIR [major_compact]"); System.out.println("Options:"); - System.out.println(" major_compact Pass this option to major compact " + - "passed region."); + System.out.println(" major_compact Pass this option to major compact " + "passed region."); System.out.println("Default outputs scan of passed region."); System.exit(1); } @@ -7640,17 +7389,16 @@ */ Descriptors.ServiceDescriptor serviceDesc = instance.getDescriptorForType(); if (coprocessorServiceHandlers.containsKey(serviceDesc.getFullName())) { - LOG.error("Coprocessor service " + serviceDesc.getFullName() + - " already registered, rejecting request from " + instance - ); + LOG.error("Coprocessor service " + serviceDesc.getFullName() + + " already registered, rejecting request from " + instance); return false; } coprocessorServiceHandlers.put(serviceDesc.getFullName(), instance); if (LOG.isDebugEnabled()) { - LOG.debug("Registered coprocessor service: region=" + - Bytes.toStringBinary(getRegionInfo().getRegionName()) + - " service=" + serviceDesc.getFullName()); + LOG.debug("Registered coprocessor service: region=" + + Bytes.toStringBinary(getRegionInfo().getRegionName()) + " service=" + + serviceDesc.getFullName()); } return true; } @@ -7661,9 +7409,8 @@ String serviceName = call.getServiceName(); String methodName = call.getMethodName(); if (!coprocessorServiceHandlers.containsKey(serviceName)) { - throw new UnknownProtocolException(null, - "No registered coprocessor service found for name "+serviceName+ - " in region "+Bytes.toStringBinary(getRegionInfo().getRegionName())); + throw new UnknownProtocolException(null, "No registered coprocessor service found for name " + + serviceName + " in region " + Bytes.toStringBinary(getRegionInfo().getRegionName())); } Service service = coprocessorServiceHandlers.get(serviceName); @@ -7671,8 +7418,8 @@ Descriptors.MethodDescriptor methodDesc = serviceDesc.findMethodByName(methodName); if (methodDesc == null) { throw new UnknownProtocolException(service.getClass(), - "Unknown method "+methodName+" called on service "+serviceName+ - " in region "+Bytes.toStringBinary(getRegionInfo().getRegionName())); + "Unknown method " + methodName + " called on service " + serviceName + " in region " + + Bytes.toStringBinary(getRegionInfo().getRegionName())); } Message.Builder builder = service.getRequestPrototype(methodDesc).newBuilderForType(); @@ -7707,23 +7454,19 @@ } /* - * Process table. - * Do major compaction or list content. + * Process table. Do major compaction or list content. * @throws IOException */ - private static void processTable(final FileSystem fs, final Path p, - final WALFactory walFactory, final Configuration c, - final boolean majorCompact) - throws IOException { + private static void processTable(final FileSystem fs, final Path p, final WALFactory walFactory, + final Configuration c, final boolean majorCompact) throws IOException { HRegion region; FSTableDescriptors fst = new FSTableDescriptors(c); // Currently expects tables have one region only. if (FSUtils.getTableName(p).equals(TableName.META_TABLE_NAME)) { - final WAL wal = walFactory.getMetaWAL( - HRegionInfo.FIRST_META_REGIONINFO.getEncodedNameAsBytes()); - region = HRegion.newHRegion(p, wal, fs, c, - HRegionInfo.FIRST_META_REGIONINFO, - fst.get(TableName.META_TABLE_NAME), null); + final WAL wal = + walFactory.getMetaWAL(HRegionInfo.FIRST_META_REGIONINFO.getEncodedNameAsBytes()); + region = HRegion.newHRegion(p, wal, fs, c, HRegionInfo.FIRST_META_REGIONINFO, + fst.get(TableName.META_TABLE_NAME), null); } else { throw new IOException("Not a known catalog table: " + p.toString()); } @@ -7783,15 +7526,14 @@ } /** - * Return the splitpoint. null indicates the region isn't splittable - * If the splitpoint isn't explicitly specified, it will go over the stores - * to find the best splitpoint. Currently the criteria of best splitpoint - * is based on the size of the store. + * Return the splitpoint. null indicates the region isn't splittable If the splitpoint isn't + * explicitly specified, it will go over the stores to find the best splitpoint. Currently the + * criteria of best splitpoint is based on the size of the store. */ public byte[] checkSplit() { // Can't split META - if (this.getRegionInfo().isMetaTable() || - TableName.NAMESPACE_TABLE_NAME.equals(this.getRegionInfo().getTable())) { + if (this.getRegionInfo().isMetaTable() + || TableName.NAMESPACE_TABLE_NAME.equals(this.getRegionInfo().getTable())) { if (shouldForceSplit()) { LOG.warn("Cannot split meta region in HBase 0.20 and above"); } @@ -7832,7 +7574,6 @@ return count; } - /** @return the coprocessor host */ @Override public RegionCoprocessorHost getCoprocessorHost() { @@ -7850,11 +7591,11 @@ } @Override - @edu.umd.cs.findbugs.annotations.SuppressWarnings(value="SF_SWITCH_FALLTHROUGH", - justification="Intentional") + @edu.umd.cs.findbugs.annotations.SuppressWarnings(value = "SF_SWITCH_FALLTHROUGH", + justification = "Intentional") public void startRegionOperation(Operation op) throws IOException { switch (op) { - case GET: // read operations + case GET: // read operations case SCAN: checkReadsEnabled(); case INCREMENT: // write operations @@ -7866,10 +7607,10 @@ case BATCH_MUTATE: case COMPACT_REGION: // when a region is in recovering state, no read, split or merge is allowed - if (isRecovering() && (this.disallowWritesInRecovering || - (op != Operation.PUT && op != Operation.DELETE && op != Operation.BATCH_MUTATE))) { - throw new RegionInRecoveryException(getRegionInfo().getRegionNameAsString() + - " is recovering; cannot take reads"); + if (isRecovering() && (this.disallowWritesInRecovering + || (op != Operation.PUT && op != Operation.DELETE && op != Operation.BATCH_MUTATE))) { + throw new RegionInRecoveryException( + getRegionInfo().getRegionNameAsString() + " is recovering; cannot take reads"); } break; default: @@ -7905,8 +7646,8 @@ } /** - * Closes the lock. This needs to be called in the finally block corresponding - * to the try block of {@link #startRegionOperation(Operation)} + * Closes the lock. This needs to be called in the finally block corresponding to the try block of + * {@link #startRegionOperation(Operation)} * @throws IOException */ public void closeRegionOperation(Operation operation) throws IOException { @@ -7917,10 +7658,9 @@ } /** - * This method needs to be called before any public call that reads or - * modifies stores in bulk. It has to be called just before a try. - * #closeBulkRegionOperation needs to be called in the try's finally block - * Acquires a writelock and checks if the region is closing or closed. + * This method needs to be called before any public call that reads or modifies stores in bulk. It + * has to be called just before a try. #closeBulkRegionOperation needs to be called in the try's + * finally block Acquires a writelock and checks if the region is closing or closed. * @throws NotServingRegionException when the region is closing or closed * @throws RegionTooBusyException if failed to get the lock in time * @throws InterruptedIOException if interrupted while waiting for a lock @@ -7940,30 +7680,30 @@ } /** - * Closes the lock. This needs to be called in the finally block corresponding - * to the try block of #startRegionOperation + * Closes the lock. This needs to be called in the finally block corresponding to the try block of + * #startRegionOperation */ - private void closeBulkRegionOperation(){ + private void closeBulkRegionOperation() { if (lock.writeLock().isHeldByCurrentThread()) lock.writeLock().unlock(); else lock.readLock().unlock(); } /** - * Update counters for number of puts without wal and the size of possible data loss. - * These information are exposed by the region server metrics. + * Update counters for number of puts without wal and the size of possible data loss. These + * information are exposed by the region server metrics. */ - private void recordMutationWithoutWal(final Map> familyMap) { + private void recordMutationWithoutWal(final Map> familyMap) { numMutationsWithoutWAL.increment(); if (numMutationsWithoutWAL.get() <= 1) { - LOG.info("writing data to region " + this + - " with WAL disabled. Data may be lost in the event of a crash."); + LOG.info("writing data to region " + this + + " with WAL disabled. Data may be lost in the event of a crash."); } long mutationSize = 0; - for (List cells: familyMap.values()) { + for (List cells : familyMap.values()) { assert cells instanceof RandomAccess; int listSize = cells.size(); - for (int i=0; i < listSize; i++) { + for (int i = 0; i < listSize; i++) { Cell cell = cells.get(i); // TODO we need include tags length also here. mutationSize += KeyValueUtil.keyLength(cell) + cell.getValueLength(); @@ -7973,28 +7713,26 @@ dataInMemoryWithoutWAL.add(mutationSize); } - private void lock(final Lock lock) - throws RegionTooBusyException, InterruptedIOException { + private void lock(final Lock lock) throws RegionTooBusyException, InterruptedIOException { lock(lock, 1); } /** - * Try to acquire a lock. Throw RegionTooBusyException - * if failed to get the lock in time. Throw InterruptedIOException - * if interrupted while waiting for the lock. + * Try to acquire a lock. Throw RegionTooBusyException if failed to get the lock in time. Throw + * InterruptedIOException if interrupted while waiting for the lock. */ private void lock(final Lock lock, final int multiplier) throws RegionTooBusyException, InterruptedIOException { try { final long waitTime = Math.min(maxBusyWaitDuration, - busyWaitDuration * Math.min(multiplier, maxBusyWaitMultiplier)); + busyWaitDuration * Math.min(multiplier, maxBusyWaitMultiplier)); if (!lock.tryLock(waitTime, TimeUnit.MILLISECONDS)) { throw new RegionTooBusyException( - "failed to get a lock in " + waitTime + " ms. " + - "regionName=" + (this.getRegionInfo() == null ? "unknown" : - this.getRegionInfo().getRegionNameAsString()) + - ", server=" + (this.getRegionServerServices() == null ? "unknown" : - this.getRegionServerServices().getServerName())); + "failed to get a lock in " + waitTime + " ms. " + "regionName=" + + (this.getRegionInfo() == null ? "unknown" + : this.getRegionInfo().getRegionNameAsString()) + + ", server=" + (this.getRegionServerServices() == null ? "unknown" + : this.getRegionServerServices().getServerName())); } } catch (InterruptedException ie) { LOG.info("Interrupted while waiting for a lock"); @@ -8005,8 +7743,7 @@ } /** - * Calls sync with the given transaction ID if the region's table is not - * deferring it. + * Calls sync with the given transaction ID if the region's table is not deferring it. * @param txid should sync up to which transaction * @throws IOException If anything goes wrong with DFS */ @@ -8014,7 +7751,7 @@ if (this.getRegionInfo().isMetaRegion()) { this.wal.sync(txid); } else { - switch(durability) { + switch (durability) { case USE_DEFAULT: // do what table defaults to if (shouldSyncWAL()) { @@ -8042,7 +7779,7 @@ * Check whether we should sync the wal from the table's durability settings */ private boolean shouldSyncWAL() { - return durability.ordinal() > Durability.ASYNC_WAL.ordinal(); + return durability.ordinal() > Durability.ASYNC_WAL.ordinal(); } /** @@ -8072,12 +7809,13 @@ }; /** - * Facility for dumping and compacting catalog tables. - * Only does catalog tables since these are only tables we for sure know - * schema on. For usage run: + * Facility for dumping and compacting catalog tables. Only does catalog tables since these are + * only tables we for sure know schema on. For usage run: + * *

    *   ./bin/hbase org.apache.hadoop.hbase.regionserver.HRegion
    * 
+ * * @throws IOException */ public static void main(String[] args) throws IOException { @@ -8103,10 +7841,10 @@ try { processTable(fs, tableDir, wals, c, majorCompact); } finally { - wals.close(); - // TODO: is this still right? - BlockCache bc = new CacheConfig(c).getBlockCache(); - if (bc != null) bc.shutdown(); + wals.close(); + // TODO: is this still right? + BlockCache bc = new CacheConfig(c).getBlockCache(); + if (bc != null) bc.shutdown(); } } @@ -8132,7 +7870,7 @@ : (hasMinor ? CompactionState.MINOR : CompactionState.NONE)); } - public void reportCompactionRequestStart(boolean isMajor){ + public void reportCompactionRequestStart(boolean isMajor) { (isMajor ? majorInProgress : minorInProgress).incrementAndGet(); } @@ -8156,7 +7894,6 @@ return this.mvcc.getReadPoint(); } - /** * Append a faked WALEdit in order to get a long sequence number and wal syncer will just ignore * the WALEdit append later. @@ -8167,11 +7904,10 @@ private WALKey appendEmptyEdit(final WAL wal) throws IOException { // we use HLogKey here instead of WALKey directly to support legacy coprocessors. @SuppressWarnings("deprecation") - WALKey key = new HLogKey(getRegionInfo().getEncodedNameAsBytes(), - getRegionInfo().getTable(), WALKey.NO_SEQUENCE_ID, 0, null, - HConstants.NO_NONCE, HConstants.NO_NONCE, getMVCC()); + WALKey key = new HLogKey(getRegionInfo().getEncodedNameAsBytes(), getRegionInfo().getTable(), + WALKey.NO_SEQUENCE_ID, 0, null, HConstants.NO_NONCE, HConstants.NO_NONCE, getMVCC()); - // Call append but with an empty WALEdit. The returned sequence id will not be associated + // Call append but with an empty WALEdit. The returned sequence id will not be associated // with any edit and we can be sure it went in after all outstanding appends. try { wal.append(getTableDesc(), getRegionInfo(), key, WALEdit.EMPTY_WALEDIT, false); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java index cef92a6..9305dcd 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java @@ -1,23 +1,14 @@ /** - * - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Licensed to the Apache Software Foundation (ASF) under one or more contributor license + * agreements. See the NOTICE file distributed with this work for additional information regarding + * copyright ownership. The ASF licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. You may obtain a + * copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable + * law or agreed to in writing, software distributed under the License is distributed on an "AS IS" + * BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License + * for the specific language governing permissions and limitations under the License. */ package org.apache.hadoop.hbase.regionserver; - import static org.apache.hadoop.hbase.HBaseTestingUtility.COLUMNS; import static org.apache.hadoop.hbase.HBaseTestingUtility.FIRST_CHAR; @@ -74,6 +65,7 @@ import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hbase.ArrayBackedTag; import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellUtil; import org.apache.hadoop.hbase.CompatibilitySingletonFactory; @@ -96,7 +88,6 @@ import org.apache.hadoop.hbase.RegionTooBusyException; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.TableName; -import org.apache.hadoop.hbase.ArrayBackedTag; import org.apache.hadoop.hbase.TagType; import org.apache.hadoop.hbase.Waiter; import org.apache.hadoop.hbase.client.Append; @@ -182,26 +173,25 @@ import com.google.protobuf.ByteString; /** - * Basic stand-alone testing of HRegion. No clusters! - * - * A lot of the meta information for an HRegion now lives inside other HRegions - * or in the HBaseMaster, so only basic testing is possible. + * Basic stand-alone testing of HRegion. No clusters! A lot of the meta information for an HRegion + * now lives inside other HRegions or in the HBaseMaster, so only basic testing is possible. */ -@Category({VerySlowRegionServerTests.class, LargeTests.class}) +@Category({ VerySlowRegionServerTests.class, LargeTests.class }) @SuppressWarnings("deprecation") public class TestHRegion { // Do not spin up clusters in here. If you need to spin up a cluster, do it // over in TestHRegionOnCluster. private static final Log LOG = LogFactory.getLog(TestHRegion.class); - @Rule public TestName name = new TestName(); + @Rule + public TestName name = new TestName(); private static final String COLUMN_FAMILY = "MyCF"; - private static final byte [] COLUMN_FAMILY_BYTES = Bytes.toBytes(COLUMN_FAMILY); + private static final byte[] COLUMN_FAMILY_BYTES = Bytes.toBytes(COLUMN_FAMILY); HRegion region = null; - // Do not run unit tests in parallel (? Why not? It don't work? Why not? St.Ack) + // Do not run unit tests in parallel (? Why not? It don't work? Why not? St.Ack) private static HBaseTestingUtility TEST_UTIL; - public static Configuration CONF ; + public static Configuration CONF; private String dir; private static FileSystem FILESYSTEM; private final int MAX_VERSIONS = 2; @@ -217,8 +207,8 @@ protected final byte[] row = Bytes.toBytes("rowA"); protected final byte[] row2 = Bytes.toBytes("rowB"); - protected final MetricsAssertHelper metricsAssertHelper = CompatibilitySingletonFactory - .getInstance(MetricsAssertHelper.class); + protected final MetricsAssertHelper metricsAssertHelper = + CompatibilitySingletonFactory.getInstance(MetricsAssertHelper.class); @Before public void setup() throws IOException { @@ -245,25 +235,25 @@ * Test that I can use the max flushed sequence id after the close. * @throws IOException */ - @Test (timeout = 100000) + @Test(timeout = 100000) public void testSequenceId() throws IOException { HRegion region = initHRegion(tableName, name.getMethodName(), CONF, COLUMN_FAMILY_BYTES); assertEquals(HConstants.NO_SEQNUM, region.getMaxFlushedSeqId()); // Weird. This returns 0 if no store files or no edits. Afraid to change it. - assertEquals(0, (long)region.getMaxStoreSeqId().get(COLUMN_FAMILY_BYTES)); + assertEquals(0, (long) region.getMaxStoreSeqId().get(COLUMN_FAMILY_BYTES)); region.close(); assertEquals(HConstants.NO_SEQNUM, region.getMaxFlushedSeqId()); - assertEquals(0, (long)region.getMaxStoreSeqId().get(COLUMN_FAMILY_BYTES)); + assertEquals(0, (long) region.getMaxStoreSeqId().get(COLUMN_FAMILY_BYTES)); // Open region again. region = initHRegion(tableName, name.getMethodName(), CONF, COLUMN_FAMILY_BYTES); - byte [] value = Bytes.toBytes(name.getMethodName()); + byte[] value = Bytes.toBytes(name.getMethodName()); // Make a random put against our cf. Put put = new Put(value); put.addColumn(COLUMN_FAMILY_BYTES, null, value); region.put(put); // No flush yet so init numbers should still be in place. assertEquals(HConstants.NO_SEQNUM, region.getMaxFlushedSeqId()); - assertEquals(0, (long)region.getMaxStoreSeqId().get(COLUMN_FAMILY_BYTES)); + assertEquals(0, (long) region.getMaxStoreSeqId().get(COLUMN_FAMILY_BYTES)); region.flush(true); long max = region.getMaxFlushedSeqId(); region.close(); @@ -271,21 +261,20 @@ } /** - * Test for Bug 2 of HBASE-10466. - * "Bug 2: Conditions for the first flush of region close (so-called pre-flush) If memstoreSize - * is smaller than a certain value, or when region close starts a flush is ongoing, the first - * flush is skipped and only the second flush takes place. However, two flushes are required in - * case previous flush fails and leaves some data in snapshot. The bug could cause loss of data - * in current memstore. The fix is removing all conditions except abort check so we ensure 2 - * flushes for region close." + * Test for Bug 2 of HBASE-10466. "Bug 2: Conditions for the first flush of region close + * (so-called pre-flush) If memstoreSize is smaller than a certain value, or when region close + * starts a flush is ongoing, the first flush is skipped and only the second flush takes place. + * However, two flushes are required in case previous flush fails and leaves some data in + * snapshot. The bug could cause loss of data in current memstore. The fix is removing all + * conditions except abort check so we ensure 2 flushes for region close." * @throws IOException */ - @Test (timeout=60000) + @Test(timeout = 60000) public void testCloseCarryingSnapshot() throws IOException { HRegion region = initHRegion(tableName, name.getMethodName(), CONF, COLUMN_FAMILY_BYTES); Store store = region.getStore(COLUMN_FAMILY_BYTES); // Get some random bytes. - byte [] value = Bytes.toBytes(name.getMethodName()); + byte[] value = Bytes.toBytes(name.getMethodName()); // Make a random put against our cf. Put put = new Put(value); put.addColumn(COLUMN_FAMILY_BYTES, null, value); @@ -296,22 +285,21 @@ // Second put something in current memstore put.addColumn(COLUMN_FAMILY_BYTES, Bytes.toBytes("abc"), value); region.put(put); - // Close with something in memstore and something in the snapshot. Make sure all is cleared. + // Close with something in memstore and something in the snapshot. Make sure all is cleared. region.close(); assertEquals(0, region.getMemstoreSize()); HBaseTestingUtility.closeRegionAndWAL(region); } - - /* - * This test is for verifying memstore snapshot size is correctly updated in case of rollback - * See HBASE-10845 + * This test is for verifying memstore snapshot size is correctly updated in case of rollback See + * HBASE-10845 */ - @Test (timeout=60000) + @Test(timeout = 60000) public void testMemstoreSnapshotSize() throws IOException { class MyFaultyFSLog extends FaultyFSLog { StoreFlushContext storeFlushCtx; + public MyFaultyFSLog(FileSystem fs, Path rootDir, String logName, Configuration conf) throws IOException { super(fs, rootDir, logName, conf); @@ -331,12 +319,12 @@ FileSystem fs = FileSystem.get(CONF); Path rootDir = new Path(dir + "testMemstoreSnapshotSize"); MyFaultyFSLog faultyLog = new MyFaultyFSLog(fs, rootDir, "testMemstoreSnapshotSize", CONF); - HRegion region = initHRegion(tableName, null, null, name.getMethodName(), - CONF, false, Durability.SYNC_WAL, faultyLog, COLUMN_FAMILY_BYTES); + HRegion region = initHRegion(tableName, null, null, name.getMethodName(), CONF, false, + Durability.SYNC_WAL, faultyLog, COLUMN_FAMILY_BYTES); Store store = region.getStore(COLUMN_FAMILY_BYTES); // Get some random bytes. - byte [] value = Bytes.toBytes(name.getMethodName()); + byte[] value = Bytes.toBytes(name.getMethodName()); faultyLog.setStoreFlushCtx(store.createFlushContext(12345)); Put put = new Put(value); @@ -369,8 +357,8 @@ final Configuration walConf = new Configuration(conf); FSUtils.setRootDir(walConf, logDir); return (new WALFactory(walConf, - Collections.singletonList(new MetricsWAL()), callingMethod)) - .getWAL(tableName.toBytes(), tableName.getNamespace()); + Collections. singletonList(new MetricsWAL()), callingMethod)) + .getWAL(tableName.toBytes(), tableName.getNamespace()); } /** @@ -381,13 +369,13 @@ FileSystem fs = FileSystem.get(CONF); Path rootDir = new Path(dir + "testMemstoreSizeWithFlushCanceling"); FSHLog hLog = new FSHLog(fs, rootDir, "testMemstoreSizeWithFlushCanceling", CONF); - HRegion region = initHRegion(tableName, null, null, name.getMethodName(), - CONF, false, Durability.SYNC_WAL, hLog, COLUMN_FAMILY_BYTES); + HRegion region = initHRegion(tableName, null, null, name.getMethodName(), CONF, false, + Durability.SYNC_WAL, hLog, COLUMN_FAMILY_BYTES); Store store = region.getStore(COLUMN_FAMILY_BYTES); assertEquals(0, region.getMemstoreSize()); // Put some value and make sure flush could be completed normally - byte [] value = Bytes.toBytes(name.getMethodName()); + byte[] value = Bytes.toBytes(name.getMethodName()); Put put = new Put(value); put.addColumn(COLUMN_FAMILY_BYTES, Bytes.toBytes("abc"), value); region.put(put); @@ -400,8 +388,8 @@ // save normalCPHost and replaced by mockedCPHost, which will cancel flush requests RegionCoprocessorHost normalCPHost = region.getCoprocessorHost(); RegionCoprocessorHost mockedCPHost = Mockito.mock(RegionCoprocessorHost.class); - when(mockedCPHost.preFlush(Mockito.isA(HStore.class), Mockito.isA(InternalScanner.class))). - thenReturn(null); + when(mockedCPHost.preFlush(Mockito.isA(HStore.class), Mockito.isA(InternalScanner.class))) + .thenReturn(null); region.setCoprocessorHost(mockedCPHost); region.put(put); region.flush(true); @@ -417,21 +405,20 @@ } /** - * Test we do not lose data if we fail a flush and then close. - * Part of HBase-10466. Tests the following from the issue description: - * "Bug 1: Wrong calculation of HRegion.memstoreSize: When a flush fails, data to be flushed is - * kept in each MemStore's snapshot and wait for next flush attempt to continue on it. But when - * the next flush succeeds, the counter of total memstore size in HRegion is always deduced by - * the sum of current memstore sizes instead of snapshots left from previous failed flush. This - * calculation is problematic that almost every time there is failed flush, HRegion.memstoreSize - * gets reduced by a wrong value. If region flush could not proceed for a couple cycles, the size - * in current memstore could be much larger than the snapshot. It's likely to drift memstoreSize - * much smaller than expected. In extreme case, if the error accumulates to even bigger than - * HRegion's memstore size limit, any further flush is skipped because flush does not do anything - * if memstoreSize is not larger than 0." + * Test we do not lose data if we fail a flush and then close. Part of HBase-10466. Tests the + * following from the issue description: "Bug 1: Wrong calculation of HRegion.memstoreSize: When a + * flush fails, data to be flushed is kept in each MemStore's snapshot and wait for next flush + * attempt to continue on it. But when the next flush succeeds, the counter of total memstore size + * in HRegion is always deduced by the sum of current memstore sizes instead of snapshots left + * from previous failed flush. This calculation is problematic that almost every time there is + * failed flush, HRegion.memstoreSize gets reduced by a wrong value. If region flush could not + * proceed for a couple cycles, the size in current memstore could be much larger than the + * snapshot. It's likely to drift memstoreSize much smaller than expected. In extreme case, if the + * error accumulates to even bigger than HRegion's memstore size limit, any further flush is + * skipped because flush does not do anything if memstoreSize is not larger than 0." * @throws Exception */ - @Test (timeout=60000) + @Test(timeout = 60000) public void testFlushSizeAccounting() throws Exception { final Configuration conf = HBaseConfiguration.create(CONF); final String callingMethod = name.getMethodName(); @@ -439,7 +426,7 @@ // Only retry once. conf.setInt("hbase.hstore.flush.retries.number", 1); final User user = - User.createUserForTesting(conf, this.name.getMethodName(), new String[]{"foo"}); + User.createUserForTesting(conf, this.name.getMethodName(), new String[] { "foo" }); // Inject our faulty LocalFileSystem conf.setClass("fs.file.impl", FaultyFileSystem.class, FileSystem.class); user.runAs(new PrivilegedExceptionAction() { @@ -448,15 +435,15 @@ // Make sure it worked (above is sensitive to caching details in hadoop core) FileSystem fs = FileSystem.get(conf); Assert.assertEquals(FaultyFileSystem.class, fs.getClass()); - FaultyFileSystem ffs = (FaultyFileSystem)fs; + FaultyFileSystem ffs = (FaultyFileSystem) fs; HRegion region = null; try { // Initialize region region = initHRegion(tableName, null, null, callingMethod, conf, false, - Durability.SYNC_WAL, wal, COLUMN_FAMILY_BYTES); + Durability.SYNC_WAL, wal, COLUMN_FAMILY_BYTES); long size = region.getMemstoreSize(); Assert.assertEquals(0, size); - // Put one item into memstore. Measure the size of one item in memstore. + // Put one item into memstore. Measure the size of one item in memstore. Put p1 = new Put(row); p1.add(new KeyValue(row, COLUMN_FAMILY_BYTES, qual1, 1, (byte[]) null)); region.put(p1); @@ -472,16 +459,16 @@ } // Make it so all writes succeed from here on out ffs.fault.set(false); - // Check sizes. Should still be the one entry. + // Check sizes. Should still be the one entry. Assert.assertEquals(sizeOfOnePut, region.getMemstoreSize()); // Now add two entries so that on this next flush that fails, we can see if we // subtract the right amount, the snapshot size only. Put p2 = new Put(row); - p2.add(new KeyValue(row, COLUMN_FAMILY_BYTES, qual2, 2, (byte[])null)); - p2.add(new KeyValue(row, COLUMN_FAMILY_BYTES, qual3, 3, (byte[])null)); + p2.add(new KeyValue(row, COLUMN_FAMILY_BYTES, qual2, 2, (byte[]) null)); + p2.add(new KeyValue(row, COLUMN_FAMILY_BYTES, qual3, 3, (byte[]) null)); region.put(p2); Assert.assertEquals(sizeOfOnePut * 3, region.getMemstoreSize()); - // Do a successful flush. It will clear the snapshot only. Thats how flushes work. + // Do a successful flush. It will clear the snapshot only. Thats how flushes work. // If already a snapshot, we clear it else we move the memstore to be snapshot and flush // it region.flush(true); @@ -496,7 +483,7 @@ FileSystem.closeAllForUGI(user.getUGI()); } - @Test (timeout=60000) + @Test(timeout = 60000) public void testCloseWithFailingFlush() throws Exception { final Configuration conf = HBaseConfiguration.create(CONF); final String callingMethod = name.getMethodName(); @@ -504,7 +491,7 @@ // Only retry once. conf.setInt("hbase.hstore.flush.retries.number", 1); final User user = - User.createUserForTesting(conf, this.name.getMethodName(), new String[]{"foo"}); + User.createUserForTesting(conf, this.name.getMethodName(), new String[] { "foo" }); // Inject our faulty LocalFileSystem conf.setClass("fs.file.impl", FaultyFileSystem.class, FileSystem.class); user.runAs(new PrivilegedExceptionAction() { @@ -513,17 +500,17 @@ // Make sure it worked (above is sensitive to caching details in hadoop core) FileSystem fs = FileSystem.get(conf); Assert.assertEquals(FaultyFileSystem.class, fs.getClass()); - FaultyFileSystem ffs = (FaultyFileSystem)fs; + FaultyFileSystem ffs = (FaultyFileSystem) fs; HRegion region = null; try { // Initialize region region = initHRegion(tableName, null, null, callingMethod, conf, false, - Durability.SYNC_WAL, wal, COLUMN_FAMILY_BYTES); + Durability.SYNC_WAL, wal, COLUMN_FAMILY_BYTES); long size = region.getMemstoreSize(); Assert.assertEquals(0, size); - // Put one item into memstore. Measure the size of one item in memstore. + // Put one item into memstore. Measure the size of one item in memstore. Put p1 = new Put(row); - p1.add(new KeyValue(row, COLUMN_FAMILY_BYTES, qual1, 1, (byte[])null)); + p1.add(new KeyValue(row, COLUMN_FAMILY_BYTES, qual1, 1, (byte[]) null)); region.put(p1); // Manufacture an outstanding snapshot -- fake a failed flush by doing prepare step only. Store store = region.getStore(COLUMN_FAMILY_BYTES); @@ -531,8 +518,8 @@ storeFlushCtx.prepare(); // Now add two entries to the foreground memstore. Put p2 = new Put(row); - p2.add(new KeyValue(row, COLUMN_FAMILY_BYTES, qual2, 2, (byte[])null)); - p2.add(new KeyValue(row, COLUMN_FAMILY_BYTES, qual3, 3, (byte[])null)); + p2.add(new KeyValue(row, COLUMN_FAMILY_BYTES, qual2, 2, (byte[]) null)); + p2.add(new KeyValue(row, COLUMN_FAMILY_BYTES, qual3, 3, (byte[]) null)); region.put(p2); // Now try close on top of a failing flush. region.close(); @@ -653,10 +640,10 @@ long time = System.nanoTime(); WALEdit edit = new WALEdit(); - edit.add(new KeyValue(row, family, Bytes.toBytes(i), time, KeyValue.Type.Put, Bytes - .toBytes(i))); - writer.append(new WAL.Entry(new HLogKey(regionName, tableName, i, time, - HConstants.DEFAULT_CLUSTER_ID), edit)); + edit.add( + new KeyValue(row, family, Bytes.toBytes(i), time, KeyValue.Type.Put, Bytes.toBytes(i))); + writer.append(new WAL.Entry( + new HLogKey(regionName, tableName, i, time, HConstants.DEFAULT_CLUSTER_ID), edit)); writer.close(); } @@ -706,10 +693,10 @@ long time = System.nanoTime(); WALEdit edit = new WALEdit(); - edit.add(new KeyValue(row, family, Bytes.toBytes(i), time, KeyValue.Type.Put, Bytes - .toBytes(i))); - writer.append(new WAL.Entry(new HLogKey(regionName, tableName, i, time, - HConstants.DEFAULT_CLUSTER_ID), edit)); + edit.add( + new KeyValue(row, family, Bytes.toBytes(i), time, KeyValue.Type.Put, Bytes.toBytes(i))); + writer.append(new WAL.Entry( + new HLogKey(regionName, tableName, i, time, HConstants.DEFAULT_CLUSTER_ID), edit)); writer.close(); } @@ -801,20 +788,19 @@ WALEdit edit = null; if (i == maxSeqId) { edit = WALEdit.createCompaction(region.getRegionInfo(), - CompactionDescriptor.newBuilder() - .setTableName(ByteString.copyFrom(tableName.getName())) - .setFamilyName(ByteString.copyFrom(regionName)) - .setEncodedRegionName(ByteString.copyFrom(regionName)) - .setStoreHomeDirBytes(ByteString.copyFrom(Bytes.toBytes(regiondir.toString()))) - .setRegionName(ByteString.copyFrom(region.getRegionInfo().getRegionName())) - .build()); + CompactionDescriptor.newBuilder().setTableName(ByteString.copyFrom(tableName.getName())) + .setFamilyName(ByteString.copyFrom(regionName)) + .setEncodedRegionName(ByteString.copyFrom(regionName)) + .setStoreHomeDirBytes(ByteString.copyFrom(Bytes.toBytes(regiondir.toString()))) + .setRegionName(ByteString.copyFrom(region.getRegionInfo().getRegionName())) + .build()); } else { edit = new WALEdit(); - edit.add(new KeyValue(row, family, Bytes.toBytes(i), time, KeyValue.Type.Put, Bytes - .toBytes(i))); + edit.add( + new KeyValue(row, family, Bytes.toBytes(i), time, KeyValue.Type.Put, Bytes.toBytes(i))); } - writer.append(new WAL.Entry(new HLogKey(regionName, tableName, i, time, - HConstants.DEFAULT_CLUSTER_ID), edit)); + writer.append(new WAL.Entry( + new HLogKey(regionName, tableName, i, time, HConstants.DEFAULT_CLUSTER_ID), edit)); writer.close(); } @@ -842,6 +828,7 @@ testRecoveredEditsReplayCompaction(false); testRecoveredEditsReplayCompaction(true); } + public void testRecoveredEditsReplayCompaction(boolean mismatchedRegionName) throws Exception { String method = name.getMethodName(); TableName tableName = TableName.valueOf(method); @@ -885,23 +872,23 @@ assertNotNull(errorMsg, files); assertEquals(errorMsg, 1, files.length); // move the file inside region dir - Path newFile = region.getRegionFileSystem().commitStoreFile(Bytes.toString(family), - files[0].getPath()); + Path newFile = + region.getRegionFileSystem().commitStoreFile(Bytes.toString(family), files[0].getPath()); byte[] encodedNameAsBytes = this.region.getRegionInfo().getEncodedNameAsBytes(); - byte[] fakeEncodedNameAsBytes = new byte [encodedNameAsBytes.length]; - for (int i=0; i < encodedNameAsBytes.length; i++) { + byte[] fakeEncodedNameAsBytes = new byte[encodedNameAsBytes.length]; + for (int i = 0; i < encodedNameAsBytes.length; i++) { // Mix the byte array to have a new encodedName fakeEncodedNameAsBytes[i] = (byte) (encodedNameAsBytes[i] + 1); } - CompactionDescriptor compactionDescriptor = ProtobufUtil.toCompactionDescriptor(this.region - .getRegionInfo(), mismatchedRegionName ? fakeEncodedNameAsBytes : null, family, - storeFiles, Lists.newArrayList(newFile), - region.getRegionFileSystem().getStoreDir(Bytes.toString(family))); + CompactionDescriptor compactionDescriptor = ProtobufUtil.toCompactionDescriptor( + this.region.getRegionInfo(), mismatchedRegionName ? fakeEncodedNameAsBytes : null, family, + storeFiles, Lists.newArrayList(newFile), + region.getRegionFileSystem().getStoreDir(Bytes.toString(family))); WALUtil.writeCompactionMarker(region.getWAL(), this.region.getTableDesc(), - this.region.getRegionInfo(), compactionDescriptor, region.getMVCC()); + this.region.getRegionInfo(), compactionDescriptor, region.getMVCC()); Path recoveredEditsDir = WALSplitter.getRegionDirRecoveredEditsDir(regiondir); @@ -911,9 +898,9 @@ long time = System.nanoTime(); - writer.append(new WAL.Entry(new HLogKey(regionName, tableName, 10, time, - HConstants.DEFAULT_CLUSTER_ID), WALEdit.createCompaction(region.getRegionInfo(), - compactionDescriptor))); + writer.append( + new WAL.Entry(new HLogKey(regionName, tableName, 10, time, HConstants.DEFAULT_CLUSTER_ID), + WALEdit.createCompaction(region.getRegionInfo(), compactionDescriptor))); writer.close(); // close the region now, and reopen again @@ -962,8 +949,8 @@ final WALFactory wals = new WALFactory(walConf, null, method); final WAL wal = wals.getWAL(tableName.getName(), tableName.getNamespace()); - this.region = initHRegion(tableName, HConstants.EMPTY_START_ROW, - HConstants.EMPTY_END_ROW, method, CONF, false, Durability.USE_DEFAULT, wal, family); + this.region = initHRegion(tableName, HConstants.EMPTY_START_ROW, HConstants.EMPTY_END_ROW, + method, CONF, false, Durability.USE_DEFAULT, wal, family); try { Path regiondir = region.getRegionFileSystem().getRegionDir(); FileSystem fs = region.getRegionFileSystem().getFileSystem(); @@ -1010,14 +997,14 @@ } lastFlushSeqId = flushDesc.getFlushSequenceNumber(); assertArrayEquals(regionName, flushDesc.getEncodedRegionName().toByteArray()); - assertEquals(1, flushDesc.getStoreFlushesCount()); //only one store + assertEquals(1, flushDesc.getStoreFlushesCount()); // only one store StoreFlushDescriptor storeFlushDesc = flushDesc.getStoreFlushes(0); assertArrayEquals(family, storeFlushDesc.getFamilyName().toByteArray()); assertEquals("family", storeFlushDesc.getStoreHomeDir()); if (flushDesc.getAction() == FlushAction.START_FLUSH) { assertEquals(0, storeFlushDesc.getFlushOutputCount()); } else { - assertEquals(1, storeFlushDesc.getFlushOutputCount()); //only one file from flush + assertEquals(1, storeFlushDesc.getFlushOutputCount()); // only one file from flush assertTrue(storeFiles.contains(storeFlushDesc.getFlushOutput(0))); } @@ -1050,7 +1037,6 @@ } } - // close the region now, and reopen again region.close(); region = HRegion.openHRegion(region, null); @@ -1071,12 +1057,14 @@ class IsFlushWALMarker extends ArgumentMatcher { volatile FlushAction[] actions; + public IsFlushWALMarker(FlushAction... actions) { this.actions = actions; } + @Override public boolean matches(Object edit) { - List cells = ((WALEdit)edit).getCells(); + List cells = ((WALEdit) edit).getCells(); if (cells.isEmpty()) { return false; } @@ -1098,13 +1086,14 @@ } return false; } + public IsFlushWALMarker set(FlushAction... actions) { this.actions = actions; return this; } } - @Test (timeout=60000) + @Test(timeout = 60000) public void testFlushMarkersWALFail() throws Exception { // test the cases where the WAL append for flush markers fail. String method = name.getMethodName(); @@ -1118,10 +1107,10 @@ FSUtils.setRootDir(walConf, logDir); // Make up a WAL that we can manipulate at append time. class FailAppendFlushMarkerWAL extends FSHLog { - volatile FlushAction [] flushActions = null; + volatile FlushAction[] flushActions = null; public FailAppendFlushMarkerWAL(FileSystem fs, Path root, String logDir, Configuration conf) - throws IOException { + throws IOException { super(fs, root, logDir, conf); } @@ -1145,7 +1134,7 @@ if (WALEdit.isMetaEditFamily(cells.get(0))) { FlushDescriptor desc = WALEdit.getFlushDescriptor(cells.get(0)); if (desc != null) { - for (FlushAction flushAction: flushActions) { + for (FlushAction flushAction : flushActions) { if (desc.getAction().equals(flushAction)) { throw new IOException("Failed to append flush marker! " + flushAction); } @@ -1162,11 +1151,10 @@ }; } } - FailAppendFlushMarkerWAL wal = - new FailAppendFlushMarkerWAL(FileSystem.get(walConf), FSUtils.getRootDir(walConf), - getName(), walConf); - this.region = initHRegion(tableName, HConstants.EMPTY_START_ROW, - HConstants.EMPTY_END_ROW, method, CONF, false, Durability.USE_DEFAULT, wal, family); + FailAppendFlushMarkerWAL wal = new FailAppendFlushMarkerWAL(FileSystem.get(walConf), + FSUtils.getRootDir(walConf), getName(), walConf); + this.region = initHRegion(tableName, HConstants.EMPTY_START_ROW, HConstants.EMPTY_END_ROW, + method, CONF, false, Durability.USE_DEFAULT, wal, family); try { int i = 0; Put put = new Put(Bytes.toBytes(i)); @@ -1175,7 +1163,7 @@ region.put(put); // 1. Test case where START_FLUSH throws exception - wal.flushActions = new FlushAction [] {FlushAction.START_FLUSH}; + wal.flushActions = new FlushAction[] { FlushAction.START_FLUSH }; // start cache flush will throw exception try { @@ -1193,18 +1181,18 @@ wal.close(); // 2. Test case where START_FLUSH succeeds but COMMIT_FLUSH will throw exception - wal.flushActions = new FlushAction [] {FlushAction.COMMIT_FLUSH}; + wal.flushActions = new FlushAction[] { FlushAction.COMMIT_FLUSH }; wal = new FailAppendFlushMarkerWAL(FileSystem.get(walConf), FSUtils.getRootDir(walConf), - getName(), walConf); + getName(), walConf); - this.region = initHRegion(tableName, HConstants.EMPTY_START_ROW, - HConstants.EMPTY_END_ROW, method, CONF, false, Durability.USE_DEFAULT, wal, family); + this.region = initHRegion(tableName, HConstants.EMPTY_START_ROW, HConstants.EMPTY_END_ROW, + method, CONF, false, Durability.USE_DEFAULT, wal, family); region.put(put); // 3. Test case where ABORT_FLUSH will throw exception. // Even if ABORT_FLUSH throws exception, we should not fail with IOE, but continue with // DroppedSnapshotException. Below COMMMIT_FLUSH will cause flush to abort - wal.flushActions = new FlushAction [] {FlushAction.COMMIT_FLUSH, FlushAction.ABORT_FLUSH}; + wal.flushActions = new FlushAction[] { FlushAction.COMMIT_FLUSH, FlushAction.ABORT_FLUSH }; try { region.flush(true); @@ -1280,8 +1268,8 @@ } /* - * Thread that does get on single row until 'done' flag is flipped. If an - * exception causes us to fail, it records it. + * Thread that does get on single row until 'done' flag is flipped. If an exception causes us to + * fail, it records it. */ class GetTillDoneOrException extends Thread { private final Get g; @@ -1289,7 +1277,8 @@ private final AtomicInteger count; private Exception e; - GetTillDoneOrException(final int i, final byte[] r, final AtomicBoolean d, final AtomicInteger c) { + GetTillDoneOrException(final int i, final byte[] r, final AtomicBoolean d, + final AtomicInteger c) { super("getter." + i); this.g = new Get(r); this.done = d; @@ -1333,23 +1322,23 @@ putRows(this.region, 3, value2, keyPrefix3); System.out.println("Checking values for key: " + keyPrefix1); assertEquals("Got back incorrect number of rows from scan", 3, - getNumberOfRows(keyPrefix1, value2, this.region)); + getNumberOfRows(keyPrefix1, value2, this.region)); System.out.println("Checking values for key: " + keyPrefix2); assertEquals("Got back incorrect number of rows from scan", 3, - getNumberOfRows(keyPrefix2, value2, this.region)); + getNumberOfRows(keyPrefix2, value2, this.region)); System.out.println("Checking values for key: " + keyPrefix3); assertEquals("Got back incorrect number of rows from scan", 3, - getNumberOfRows(keyPrefix3, value2, this.region)); + getNumberOfRows(keyPrefix3, value2, this.region)); deleteColumns(this.region, value2, keyPrefix1); deleteColumns(this.region, value2, keyPrefix2); deleteColumns(this.region, value2, keyPrefix3); System.out.println("Starting important checks....."); assertEquals("Got back incorrect number of rows from scan: " + keyPrefix1, 0, - getNumberOfRows(keyPrefix1, value2, this.region)); + getNumberOfRows(keyPrefix1, value2, this.region)); assertEquals("Got back incorrect number of rows from scan: " + keyPrefix2, 0, - getNumberOfRows(keyPrefix2, value2, this.region)); + getNumberOfRows(keyPrefix2, value2, this.region)); assertEquals("Got back incorrect number of rows from scan: " + keyPrefix3, 0, - getNumberOfRows(keyPrefix3, value2, this.region)); + getNumberOfRows(keyPrefix3, value2, this.region)); } finally { HBaseTestingUtility.closeRegionAndWAL(this.region); this.region = null; @@ -1364,7 +1353,7 @@ Append append = new Append(Bytes.toBytes("somerow")); append.setDurability(Durability.SKIP_WAL); append.add(Bytes.toBytes("somefamily"), Bytes.toBytes("somequalifier"), - Bytes.toBytes("somevalue")); + Bytes.toBytes("somevalue")); try { region.append(append); } catch (IOException e) { @@ -1402,10 +1391,8 @@ List results = new ArrayList(); do { more = scanner.next(results); - if (results != null && !results.isEmpty()) - count++; - else - break; + if (results != null && !results.isEmpty()) count++; + else break; Delete delete = new Delete(CellUtil.cloneRow(results.get(0))); delete.addColumn(Bytes.toBytes("trans-tags"), Bytes.toBytes("qual2")); r.delete(delete); @@ -1421,10 +1408,8 @@ boolean more = false; do { more = resultScanner.next(results); - if (results != null && !results.isEmpty()) - numberOfResults++; - else - break; + if (results != null && !results.isEmpty()) numberOfResults++; + else break; for (Cell kv : results) { System.out.println("kv=" + kv.toString() + ", " + Bytes.toString(CellUtil.cloneValue(kv))); } @@ -1521,7 +1506,7 @@ assertEquals(10, codes.length); for (int i = 0; i < 10; i++) { assertEquals((i == 5) ? OperationStatusCode.BAD_FAMILY : OperationStatusCode.SUCCESS, - codes[i].getOperationStatusCode()); + codes[i].getOperationStatusCode()); } metricsAssertHelper.assertCounter("syncTimeNumOps", syncs + 2, source); @@ -1555,9 +1540,9 @@ RowLock rowLock3 = region.getRowLock(Bytes.toBytes("row_3")); RowLock rowLock4 = region.getRowLock(Bytes.toBytes("row_3"), true); - MultithreadedTestUtil.TestContext ctx = new MultithreadedTestUtil.TestContext(CONF); - final AtomicReference retFromThread = new AtomicReference(); + final AtomicReference retFromThread = + new AtomicReference(); final CountDownLatch startingPuts = new CountDownLatch(1); final CountDownLatch startingClose = new CountDownLatch(1); TestThread putter = new TestThread(ctx) { @@ -1571,10 +1556,10 @@ ctx.addThread(putter); ctx.startThreads(); - // Now attempt to close the region from another thread. Prior to HBASE-12565 + // Now attempt to close the region from another thread. Prior to HBASE-12565 // this would cause the in-progress batchMutate operation to to fail with // exception because it use to release and re-acquire the close-guard lock - // between batches. Caller then didn't get status indicating which writes succeeded. + // between batches. Caller then didn't get status indicating which writes succeeded. // We now expect this thread to block until the batchMutate call finishes. Thread regionCloseThread = new TestThread(ctx) { @Override @@ -1611,7 +1596,7 @@ OperationStatus[] codes = retFromThread.get(); for (int i = 0; i < codes.length; i++) { assertEquals((i == 5) ? OperationStatusCode.BAD_FAMILY : OperationStatusCode.SUCCESS, - codes[i].getOperationStatusCode()); + codes[i].getOperationStatusCode()); } rowLock4.release(); } finally { @@ -1690,8 +1675,8 @@ put.addColumn(fam1, qf1, emptyVal); // checkAndPut with empty value - boolean res = region.checkAndMutate(row1, fam1, qf1, CompareOp.EQUAL, new BinaryComparator( - emptyVal), put, true); + boolean res = region.checkAndMutate(row1, fam1, qf1, CompareOp.EQUAL, + new BinaryComparator(emptyVal), put, true); assertTrue(res); // Putting data in key @@ -1700,25 +1685,25 @@ // checkAndPut with correct value res = region.checkAndMutate(row1, fam1, qf1, CompareOp.EQUAL, new BinaryComparator(emptyVal), - put, true); + put, true); assertTrue(res); // not empty anymore res = region.checkAndMutate(row1, fam1, qf1, CompareOp.EQUAL, new BinaryComparator(emptyVal), - put, true); + put, true); assertFalse(res); Delete delete = new Delete(row1); delete.addColumn(fam1, qf1); res = region.checkAndMutate(row1, fam1, qf1, CompareOp.EQUAL, new BinaryComparator(emptyVal), - delete, true); + delete, true); assertFalse(res); put = new Put(row1); put.addColumn(fam1, qf1, val2); // checkAndPut with correct value - res = region.checkAndMutate(row1, fam1, qf1, CompareOp.EQUAL, new BinaryComparator(val1), - put, true); + res = region.checkAndMutate(row1, fam1, qf1, CompareOp.EQUAL, new BinaryComparator(val1), put, + true); assertTrue(res); // checkAndDelete with correct value @@ -1726,20 +1711,20 @@ delete.addColumn(fam1, qf1); delete.addColumn(fam1, qf1); res = region.checkAndMutate(row1, fam1, qf1, CompareOp.EQUAL, new BinaryComparator(val2), - delete, true); + delete, true); assertTrue(res); delete = new Delete(row1); res = region.checkAndMutate(row1, fam1, qf1, CompareOp.EQUAL, new BinaryComparator(emptyVal), - delete, true); + delete, true); assertTrue(res); // checkAndPut looking for a null value put = new Put(row1); put.addColumn(fam1, qf1, val1); - res = region - .checkAndMutate(row1, fam1, qf1, CompareOp.EQUAL, new NullComparator(), put, true); + res = + region.checkAndMutate(row1, fam1, qf1, CompareOp.EQUAL, new NullComparator(), put, true); assertTrue(res); } finally { HBaseTestingUtility.closeRegionAndWAL(this.region); @@ -1765,15 +1750,15 @@ region.put(put); // checkAndPut with wrong value - boolean res = region.checkAndMutate(row1, fam1, qf1, CompareOp.EQUAL, new BinaryComparator( - val2), put, true); + boolean res = region.checkAndMutate(row1, fam1, qf1, CompareOp.EQUAL, + new BinaryComparator(val2), put, true); assertEquals(false, res); // checkAndDelete with wrong value Delete delete = new Delete(row1); delete.addFamily(fam1); - res = region.checkAndMutate(row1, fam1, qf1, CompareOp.EQUAL, new BinaryComparator(val2), - put, true); + res = region.checkAndMutate(row1, fam1, qf1, CompareOp.EQUAL, new BinaryComparator(val2), put, + true); assertEquals(false, res); } finally { HBaseTestingUtility.closeRegionAndWAL(this.region); @@ -1798,15 +1783,15 @@ region.put(put); // checkAndPut with correct value - boolean res = region.checkAndMutate(row1, fam1, qf1, CompareOp.EQUAL, new BinaryComparator( - val1), put, true); + boolean res = region.checkAndMutate(row1, fam1, qf1, CompareOp.EQUAL, + new BinaryComparator(val1), put, true); assertEquals(true, res); // checkAndDelete with correct value Delete delete = new Delete(row1); delete.addColumn(fam1, qf1); res = region.checkAndMutate(row1, fam1, qf1, CompareOp.EQUAL, new BinaryComparator(val1), - delete, true); + delete, true); assertEquals(true, res); } finally { HBaseTestingUtility.closeRegionAndWAL(this.region); @@ -1835,31 +1820,31 @@ // Test CompareOp.LESS: original = val3, compare with val3, fail boolean res = region.checkAndMutate(row1, fam1, qf1, CompareOp.LESS, - new BinaryComparator(val3), put, true); + new BinaryComparator(val3), put, true); assertEquals(false, res); // Test CompareOp.LESS: original = val3, compare with val4, fail - res = region.checkAndMutate(row1, fam1, qf1, CompareOp.LESS, - new BinaryComparator(val4), put, true); + res = region.checkAndMutate(row1, fam1, qf1, CompareOp.LESS, new BinaryComparator(val4), put, + true); assertEquals(false, res); // Test CompareOp.LESS: original = val3, compare with val2, // succeed (now value = val2) put = new Put(row1); put.addColumn(fam1, qf1, val2); - res = region.checkAndMutate(row1, fam1, qf1, CompareOp.LESS, - new BinaryComparator(val2), put, true); + res = region.checkAndMutate(row1, fam1, qf1, CompareOp.LESS, new BinaryComparator(val2), put, + true); assertEquals(true, res); // Test CompareOp.LESS_OR_EQUAL: original = val2, compare with val3, fail res = region.checkAndMutate(row1, fam1, qf1, CompareOp.LESS_OR_EQUAL, - new BinaryComparator(val3), put, true); + new BinaryComparator(val3), put, true); assertEquals(false, res); // Test CompareOp.LESS_OR_EQUAL: original = val2, compare with val2, // succeed (value still = val2) res = region.checkAndMutate(row1, fam1, qf1, CompareOp.LESS_OR_EQUAL, - new BinaryComparator(val2), put, true); + new BinaryComparator(val2), put, true); assertEquals(true, res); // Test CompareOp.LESS_OR_EQUAL: original = val2, compare with val1, @@ -1867,41 +1852,41 @@ put = new Put(row1); put.addColumn(fam1, qf1, val3); res = region.checkAndMutate(row1, fam1, qf1, CompareOp.LESS_OR_EQUAL, - new BinaryComparator(val1), put, true); + new BinaryComparator(val1), put, true); assertEquals(true, res); // Test CompareOp.GREATER: original = val3, compare with val3, fail - res = region.checkAndMutate(row1, fam1, qf1, CompareOp.GREATER, - new BinaryComparator(val3), put, true); + res = region.checkAndMutate(row1, fam1, qf1, CompareOp.GREATER, new BinaryComparator(val3), + put, true); assertEquals(false, res); // Test CompareOp.GREATER: original = val3, compare with val2, fail - res = region.checkAndMutate(row1, fam1, qf1, CompareOp.GREATER, - new BinaryComparator(val2), put, true); + res = region.checkAndMutate(row1, fam1, qf1, CompareOp.GREATER, new BinaryComparator(val2), + put, true); assertEquals(false, res); // Test CompareOp.GREATER: original = val3, compare with val4, // succeed (now value = val2) put = new Put(row1); put.addColumn(fam1, qf1, val2); - res = region.checkAndMutate(row1, fam1, qf1, CompareOp.GREATER, - new BinaryComparator(val4), put, true); + res = region.checkAndMutate(row1, fam1, qf1, CompareOp.GREATER, new BinaryComparator(val4), + put, true); assertEquals(true, res); // Test CompareOp.GREATER_OR_EQUAL: original = val2, compare with val1, fail res = region.checkAndMutate(row1, fam1, qf1, CompareOp.GREATER_OR_EQUAL, - new BinaryComparator(val1), put, true); + new BinaryComparator(val1), put, true); assertEquals(false, res); // Test CompareOp.GREATER_OR_EQUAL: original = val2, compare with val2, // succeed (value still = val2) res = region.checkAndMutate(row1, fam1, qf1, CompareOp.GREATER_OR_EQUAL, - new BinaryComparator(val2), put, true); + new BinaryComparator(val2), put, true); assertEquals(true, res); // Test CompareOp.GREATER_OR_EQUAL: original = val2, compare with val3, succeed res = region.checkAndMutate(row1, fam1, qf1, CompareOp.GREATER_OR_EQUAL, - new BinaryComparator(val3), put, true); + new BinaryComparator(val3), put, true); assertEquals(true, res); } finally { HBaseTestingUtility.closeRegionAndWAL(this.region); @@ -1936,8 +1921,8 @@ put.add(kv); // checkAndPut with wrong value - boolean res = region.checkAndMutate(row1, fam1, qf1, CompareOp.EQUAL, new BinaryComparator( - val1), put, true); + boolean res = region.checkAndMutate(row1, fam1, qf1, CompareOp.EQUAL, + new BinaryComparator(val1), put, true); assertEquals(true, res); Get get = new Get(row1); @@ -1964,8 +1949,8 @@ Put put = new Put(row2); put.addColumn(fam1, qual1, value1); try { - region.checkAndMutate(row, fam1, qual1, CompareOp.EQUAL, - new BinaryComparator(value2), put, false); + region.checkAndMutate(row, fam1, qual1, CompareOp.EQUAL, new BinaryComparator(value2), put, + false); fail(); } catch (org.apache.hadoop.hbase.DoNotRetryIOException expected) { // expected exception. @@ -2014,8 +1999,8 @@ delete.addColumn(fam1, qf1); delete.addColumn(fam2, qf1); delete.addColumn(fam1, qf3); - boolean res = region.checkAndMutate(row1, fam1, qf1, CompareOp.EQUAL, new BinaryComparator( - val2), delete, true); + boolean res = region.checkAndMutate(row1, fam1, qf1, CompareOp.EQUAL, + new BinaryComparator(val2), delete, true); assertEquals(true, res); Get get = new Get(row1); @@ -2031,7 +2016,7 @@ delete = new Delete(row1); delete.addFamily(fam2); res = region.checkAndMutate(row1, fam2, qf1, CompareOp.EQUAL, new BinaryComparator(emptyVal), - delete, true); + delete, true); assertEquals(true, res); get = new Get(row1); @@ -2042,7 +2027,7 @@ // Row delete delete = new Delete(row1); res = region.checkAndMutate(row1, fam1, qf1, CompareOp.EQUAL, new BinaryComparator(val1), - delete, true); + delete, true); assertEquals(true, res); get = new Get(row1); r = region.get(get); @@ -2106,8 +2091,8 @@ // testing existing family byte[] family = fam2; try { - NavigableMap> deleteMap = new TreeMap>( - Bytes.BYTES_COMPARATOR); + NavigableMap> deleteMap = + new TreeMap>(Bytes.BYTES_COMPARATOR); deleteMap.put(family, kvs); region.delete(deleteMap, Durability.SYNC_WAL); } catch (Exception e) { @@ -2118,8 +2103,8 @@ boolean ok = false; family = fam4; try { - NavigableMap> deleteMap = new TreeMap>( - Bytes.BYTES_COMPARATOR); + NavigableMap> deleteMap = + new TreeMap>(Bytes.BYTES_COMPARATOR); deleteMap.put(family, kvs); region.delete(deleteMap, Durability.SYNC_WAL); } catch (Exception e) { @@ -2240,8 +2225,7 @@ } /** - * Tests that the special LATEST_TIMESTAMP option for puts gets replaced by - * the actual timestamp + * Tests that the special LATEST_TIMESTAMP option for puts gets replaced by the actual timestamp */ @Test public void testPutWithLatestTS() throws IOException { @@ -2266,7 +2250,7 @@ Cell kv = result.rawCells()[0]; LOG.info("Got: " + kv); assertTrue("LATEST_TIMESTAMP was not replaced with real timestamp", - kv.getTimestamp() != HConstants.LATEST_TIMESTAMP); + kv.getTimestamp() != HConstants.LATEST_TIMESTAMP); // Check same with WAL enabled (historically these took different // code paths, so check both) @@ -2282,7 +2266,7 @@ kv = result.rawCells()[0]; LOG.info("Got: " + kv); assertTrue("LATEST_TIMESTAMP was not replaced with real timestamp", - kv.getTimestamp() != HConstants.LATEST_TIMESTAMP); + kv.getTimestamp() != HConstants.LATEST_TIMESTAMP); } finally { HBaseTestingUtility.closeRegionAndWAL(this.region); this.region = null; @@ -2291,9 +2275,8 @@ } /** - * Tests that there is server-side filtering for invalid timestamp upper - * bound. Note that the timestamp lower bound is automatically handled for us - * by the TTL field. + * Tests that there is server-side filtering for invalid timestamp upper bound. Note that the + * timestamp lower bound is automatically handled for us by the TTL field. */ @Test public void testPutWithTsSlop() throws IOException { @@ -2311,7 +2294,7 @@ region.put(new Put(row).addColumn(fam, Bytes.toBytes("qual"), Bytes.toBytes("value"))); // TS out of range. should error region.put(new Put(row).addColumn(fam, Bytes.toBytes("qual"), - System.currentTimeMillis() + 2000, Bytes.toBytes("value"))); + System.currentTimeMillis() + 2000, Bytes.toBytes("value"))); fail("Expected IOE for TS out of configured timerange"); } catch (FailedSanityCheckException ioe) { LOG.debug("Received expected exception", ioe); @@ -2443,8 +2426,8 @@ kvs.add(new KeyValue(row1, fam1, col2, null)); kvs.add(new KeyValue(row1, fam1, col3, null)); - NavigableMap> deleteMap = new TreeMap>( - Bytes.BYTES_COMPARATOR); + NavigableMap> deleteMap = + new TreeMap>(Bytes.BYTES_COMPARATOR); deleteMap.put(fam1, kvs); region.delete(deleteMap, Durability.SYNC_WAL); @@ -2596,8 +2579,8 @@ Path oldRegion2 = subregions[1].getRegionFileSystem().getRegionDir(); long startTime = System.currentTimeMillis(); region = HRegion.mergeAdjacent(subregions[0], subregions[1]); - LOG.info("Merge regions elapsed time: " - + ((System.currentTimeMillis() - startTime) / 1000.0)); + LOG.info( + "Merge regions elapsed time: " + ((System.currentTimeMillis() - startTime) / 1000.0)); FILESYSTEM.delete(oldRegion1, true); FILESYSTEM.delete(oldRegion2, true); FILESYSTEM.delete(oldRegionPath, true); @@ -2618,10 +2601,8 @@ } /** - * @param parent - * Region to split. - * @param midkey - * Key to split around. + * @param parent Region to split. + * @param midkey Key to split around. * @return The Regions we created. * @throws IOException */ @@ -2638,22 +2619,22 @@ result = st.execute(null, null); } catch (IOException ioe) { try { - LOG.info("Running rollback of failed split of " + - parent.getRegionInfo().getRegionNameAsString() + "; " + ioe.getMessage()); + LOG.info("Running rollback of failed split of " + + parent.getRegionInfo().getRegionNameAsString() + "; " + ioe.getMessage()); st.rollback(null, null); - LOG.info("Successful rollback of failed split of " + - parent.getRegionInfo().getRegionNameAsString()); + LOG.info("Successful rollback of failed split of " + + parent.getRegionInfo().getRegionNameAsString()); return null; } catch (RuntimeException e) { // If failed rollback, kill this server to avoid having a hole in table. - LOG.info("Failed rollback of failed split of " + - parent.getRegionInfo().getRegionNameAsString() + " -- aborting server", e); + LOG.info("Failed rollback of failed split of " + + parent.getRegionInfo().getRegionNameAsString() + " -- aborting server", + e); } - } - finally { + } finally { parent.clearSplit(); } - return new HRegion[] { (HRegion)result.getFirst(), (HRegion)result.getSecond() }; + return new HRegion[] { (HRegion) result.getFirst(), (HRegion) result.getSecond() }; } // //////////////////////////////////////////////////////////////////////////// @@ -2756,7 +2737,6 @@ /** * This method tests https://issues.apache.org/jira/browse/HBASE-2516. - * * @throws IOException */ @Test @@ -2782,8 +2762,9 @@ } catch (NotServingRegionException e) { // this is the correct exception that is expected } catch (IOException e) { - fail("Got wrong type of exception - should be a NotServingRegionException, but was an IOException: " - + e.getMessage()); + fail( + "Got wrong type of exception - should be a NotServingRegionException, but was an IOException: " + + e.getMessage()); } } finally { HBaseTestingUtility.closeRegionAndWAL(this.region); @@ -2980,7 +2961,8 @@ } @Test - public void testScanner_ExplicitColumns_FromMemStoreAndFiles_EnforceVersions() throws IOException { + public void testScanner_ExplicitColumns_FromMemStoreAndFiles_EnforceVersions() + throws IOException { byte[] row1 = Bytes.toBytes("row1"); byte[] fam1 = Bytes.toBytes("fam1"); byte[][] families = { fam1 }; @@ -3184,6 +3166,87 @@ } @Test + public void testScanner_StopRow() throws IOException { + InternalScanner s = null; + + byte[] row1 = Bytes.toBytes("row111"); + byte[] row2 = Bytes.toBytes("row222"); + byte[] row3 = Bytes.toBytes("row333"); + byte[] row4 = Bytes.toBytes("row444"); + byte[] row5 = Bytes.toBytes("row555"); + byte[] row6 = Bytes.toBytes("row666"); + byte[] row7 = Bytes.toBytes("row777"); + + byte[] col1 = Bytes.toBytes("Pub111"); + + byte[] family = Bytes.toBytes("testFamily"); + this.region = initHRegion(tableName, row1, row6, getName(), CONF, false, family); + this.region.setSkipRowChecking(true); + try { + + Put put = new Put(row1); + put.addColumn(family, col1, Bytes.toBytes(10L)); + region.put(put); + + put = new Put(row2); + put.addColumn(family, col1, Bytes.toBytes(15L)); + region.put(put); + + put = new Put(row3); + put.addColumn(family, col1, Bytes.toBytes(20L)); + region.put(put); + + put = new Put(row4); + put.addColumn(family, col1, Bytes.toBytes(30L)); + region.put(put); + + put = new Put(row5); + put.addColumn(family, col1, Bytes.toBytes(40L)); + region.put(put); + + Scan scan = new Scan(row1, row5); + scan.setMaxVersions(); + scan.addColumn(family, col1); + s = region.getScanner(scan); + List results = new ArrayList(); + boolean hasMore = s.next(results); + while (hasMore) { + hasMore = s.next(results); + } + s.close(); + + assertEquals(4, results.size()); + + put = new Put(row6); + put.addColumn(family, col1, Bytes.toBytes(40L)); + region.put(put); + + put = new Put(row7); + put.addColumn(family, col1, Bytes.toBytes(40L)); + region.put(put); + + scan = new Scan(row1); + scan.setMaxVersions(); + scan.addColumn(family, col1); + s = region.getScanner(scan); + results.clear(); + hasMore = s.next(results); + while (hasMore) { + hasMore = s.next(results); + } + s.close(); + + assertEquals(5, results.size()); + } finally { + if (s != null) { + s.close(); + } + HBaseTestingUtility.closeRegionAndWAL(this.region); + this.region = null; + } + } + + @Test public void testScanner_StopRow1542() throws IOException { byte[] family = Bytes.toBytes("testFamily"); this.region = initHRegion(tableName, getName(), CONF, family); @@ -3311,9 +3374,7 @@ } /** - * Added for HBASE-5416 - * - * Here we test scan optimization when only subset of CFs are used in filter + * Added for HBASE-5416 Here we test scan optimization when only subset of CFs are used in filter * conditions. */ @Test @@ -3380,9 +3441,7 @@ } /** - * HBASE-5416 - * - * Test case when scan limits amount of KVs returned on each next() call. + * HBASE-5416 Test case when scan limits amount of KVs returned on each next() call. */ @Test public void testScanner_JoinedScannersWithLimits() throws IOException { @@ -3414,6 +3473,7 @@ public ReturnCode filterKeyValue(Cell ignored) throws IOException { return ReturnCode.INCLUDE; } + @Override public boolean isFamilyEssential(byte[] name) { return Bytes.equals(name, cf_first); @@ -3458,16 +3518,12 @@ while (true) { boolean more = s.next(results, scannerContext); if ((index >> 1) < 5) { - if (index % 2 == 0) - assertEquals(results.size(), 3); - else - assertEquals(results.size(), 1); - } else - assertEquals(results.size(), 1); + if (index % 2 == 0) assertEquals(results.size(), 3); + else assertEquals(results.size(), 1); + } else assertEquals(results.size(), 1); results.clear(); index++; - if (!more) - break; + if (!more) break; } } finally { HBaseTestingUtility.closeRegionAndWAL(this.region); @@ -3476,8 +3532,8 @@ } /** - * Write an HFile block full with Cells whose qualifier that are identical between - * 0 and Short.MAX_VALUE. See HBASE-13329. + * Write an HFile block full with Cells whose qualifier that are identical between 0 and + * Short.MAX_VALUE. See HBASE-13329. * @throws Exception */ @Test @@ -3486,12 +3542,12 @@ TableName tableName = TableName.valueOf(method); byte[] family = Bytes.toBytes("family"); this.region = initHRegion(tableName, method, CONF, family); - byte[] q = new byte[Short.MAX_VALUE+2]; - Arrays.fill(q, 0, q.length-1, (byte)42); - for (byte i=0; i<10; i++) { + byte[] q = new byte[Short.MAX_VALUE + 2]; + Arrays.fill(q, 0, q.length - 1, (byte) 42); + for (byte i = 0; i < 10; i++) { Put p = new Put(Bytes.toBytes("row")); // qualifiers that differ past Short.MAX_VALUE - q[q.length-1]=i; + q[q.length - 1] = i; p.addColumn(family, q, q); region.put(p); } @@ -3499,12 +3555,12 @@ HBaseTestingUtility.closeRegionAndWAL(this.region); this.region = null; } + // //////////////////////////////////////////////////////////////////////////// // Split test // //////////////////////////////////////////////////////////////////////////// /** * Splits twice and verifies getting from each of the split regions. - * * @throws Exception */ @Test @@ -3664,7 +3720,7 @@ HStore storeMock = Mockito.mock(HStore.class); when(storeMock.hasReferences()).thenReturn(true); when(storeMock.getFamily()).thenReturn(new HColumnDescriptor("cf")); - when(storeMock.close()).thenReturn(ImmutableList.of()); + when(storeMock.close()).thenReturn(ImmutableList. of()); when(storeMock.getColumnFamilyName()).thenReturn("cf"); region.stores.put(Bytes.toBytes(storeMock.getColumnFamilyName()), storeMock); assertTrue(region.hasReferences()); @@ -3702,14 +3758,10 @@ } /** - * Flushes the cache in a thread while scanning. The tests verify that the - * scan is coherent - e.g. the returned results are always of the same or - * later update as the previous results. - * - * @throws IOException - * scan / compact - * @throws InterruptedException - * thread join + * Flushes the cache in a thread while scanning. The tests verify that the scan is coherent - e.g. + * the returned results are always of the same or later update as the previous results. + * @throws IOException scan / compact + * @throws InterruptedException thread join */ @Test public void testFlushCacheWhileScanning() throws IOException, InterruptedException { @@ -3830,13 +3882,10 @@ } /** - * Writes very wide records and scans for the latest every time.. Flushes and - * compacts the region every now and then to keep things realistic. - * - * @throws IOException - * by flush / scan / compaction - * @throws InterruptedException - * when joining threads + * Writes very wide records and scans for the latest every time.. Flushes and compacts the region + * every now and then to keep things realistic. + * @throws IOException by flush / scan / compaction + * @throws InterruptedException when joining threads */ @Test public void testWritesWhileScanning() throws IOException, InterruptedException { @@ -3890,7 +3939,7 @@ assertEquals("i=" + i, expectedCount, res.size()); long timestamp = res.get(0).getTimestamp(); assertTrue("Timestamps were broke: " + timestamp + " prev: " + prevTimestamp, - timestamp >= prevTimestamp); + timestamp >= prevTimestamp); prevTimestamp = timestamp; } } @@ -3997,13 +4046,10 @@ } /** - * Writes very wide records and gets the latest row every time.. Flushes and - * compacts the region aggressivly to catch issues. - * - * @throws IOException - * by flush / scan / compaction - * @throws InterruptedException - * when joining threads + * Writes very wide records and gets the latest row every time.. Flushes and compacts the region + * aggressivly to catch issues. + * @throws IOException by flush / scan / compaction + * @throws InterruptedException when joining threads */ @Test public void testWritesWhileGetting() throws Exception { @@ -4020,7 +4066,6 @@ for (int i = 0; i < numQualifiers; i++) { qualifiers[i] = Bytes.toBytes("qual" + i); } - String method = "testWritesWhileGetting"; // This test flushes constantly and can cause many files to be created, @@ -4099,8 +4144,7 @@ } } } finally { - if (putThread != null) - putThread.done(); + if (putThread != null) putThread.done(); region.flush(true); @@ -4118,8 +4162,8 @@ @Test public void testHolesInMeta() throws Exception { byte[] family = Bytes.toBytes("family"); - this.region = initHRegion(tableName, Bytes.toBytes("x"), Bytes.toBytes("z"), method, CONF, - false, family); + this.region = + initHRegion(tableName, Bytes.toBytes("x"), Bytes.toBytes("z"), method, CONF, false, family); try { byte[] rowNotServed = Bytes.toBytes("a"); Get g = new Get(rowNotServed); @@ -4161,10 +4205,12 @@ Scan idxScan = new Scan(); idxScan.addFamily(family); - idxScan.setFilter(new FilterList(FilterList.Operator.MUST_PASS_ALL, Arrays. asList( - new SingleColumnValueFilter(family, qual1, CompareOp.GREATER_OR_EQUAL, - new BinaryComparator(Bytes.toBytes(0L))), new SingleColumnValueFilter(family, qual1, - CompareOp.LESS_OR_EQUAL, new BinaryComparator(Bytes.toBytes(3L)))))); + idxScan.setFilter(new FilterList(FilterList.Operator.MUST_PASS_ALL, + Arrays. asList( + new SingleColumnValueFilter(family, qual1, CompareOp.GREATER_OR_EQUAL, + new BinaryComparator(Bytes.toBytes(0L))), + new SingleColumnValueFilter(family, qual1, CompareOp.LESS_OR_EQUAL, + new BinaryComparator(Bytes.toBytes(3L)))))); InternalScanner scanner = region.getScanner(idxScan); List res = new ArrayList(); @@ -4283,9 +4329,8 @@ } /** - * Testcase to cover bug-fix for HBASE-2823 Ensures correct delete when - * issuing delete row on columns with bloom filter set to row+col - * (BloomType.ROWCOL) + * Testcase to cover bug-fix for HBASE-2823 Ensures correct delete when issuing delete row on + * columns with bloom filter set to row+col (BloomType.ROWCOL) */ @Test public void testDeleteRowWithBloomFilter() throws IOException { @@ -4328,11 +4373,11 @@ @Test public void testgetHDFSBlocksDistribution() throws Exception { HBaseTestingUtility htu = new HBaseTestingUtility(); - // Why do we set the block size in this test? If we set it smaller than the kvs, then we'll + // Why do we set the block size in this test? If we set it smaller than the kvs, then we'll // break up the file in to more pieces that can be distributed across the three nodes and we // won't be able to have the condition this test asserts; that at least one node has // a copy of all replicas -- if small block size, then blocks are spread evenly across the - // the three nodes. hfilev3 with tags seems to put us over the block size. St.Ack. + // the three nodes. hfilev3 with tags seems to put us over the block size. St.Ack. // final int DEFAULT_BLOCK_SIZE = 1024; // htu.getConfiguration().setLong("dfs.blocksize", DEFAULT_BLOCK_SIZE); htu.getConfiguration().setInt("dfs.replication", 2); @@ -4356,8 +4401,8 @@ put.addColumn(fam2, col, (long) 1, Bytes.toBytes("test2")); ht.put(put); - HRegion firstRegion = htu.getHBaseCluster().getRegions(TableName.valueOf(this.getName())) - .get(0); + HRegion firstRegion = + htu.getHBaseCluster().getRegions(TableName.valueOf(this.getName())).get(0); firstRegion.flush(true); HDFSBlocksDistribution blocksDistribution1 = firstRegion.getHDFSBlocksDistribution(); @@ -4367,7 +4412,7 @@ // weight will be equal to the unique block weight. long uniqueBlocksWeight1 = blocksDistribution1.getUniqueBlocksTotalWeight(); StringBuilder sb = new StringBuilder(); - for (String host: blocksDistribution1.getTopHosts()) { + for (String host : blocksDistribution1.getTopHosts()) { if (sb.length() > 0) sb.append(", "); sb.append(host); sb.append("="); @@ -4376,15 +4421,15 @@ String topHost = blocksDistribution1.getTopHosts().get(0); long topHostWeight = blocksDistribution1.getWeight(topHost); - String msg = "uniqueBlocksWeight=" + uniqueBlocksWeight1 + ", topHostWeight=" + - topHostWeight + ", topHost=" + topHost + "; " + sb.toString(); + String msg = "uniqueBlocksWeight=" + uniqueBlocksWeight1 + ", topHostWeight=" + topHostWeight + + ", topHost=" + topHost + "; " + sb.toString(); LOG.info(msg); assertTrue(msg, uniqueBlocksWeight1 == topHostWeight); // use the static method to compute the value, it should be the same. // static method is used by load balancer or other components HDFSBlocksDistribution blocksDistribution2 = HRegion.computeHDFSBlocksDistribution( - htu.getConfiguration(), firstRegion.getTableDesc(), firstRegion.getRegionInfo()); + htu.getConfiguration(), firstRegion.getTableDesc(), firstRegion.getRegionInfo()); long uniqueBlocksWeight2 = blocksDistribution2.getUniqueBlocksTotalWeight(); assertTrue(uniqueBlocksWeight1 == uniqueBlocksWeight2); @@ -4398,9 +4443,8 @@ } /** - * Testcase to check state of region initialization task set to ABORTED or not - * if any exceptions during initialization - * + * Testcase to check state of region initialization task set to ABORTED or not if any exceptions + * during initialization * @throws Exception */ @Test @@ -4425,7 +4469,7 @@ if (!(monitoredTask instanceof MonitoredRPCHandler) && monitoredTask.getDescription().contains(region.toString())) { assertTrue("Region state should be ABORTED.", - monitoredTask.getState().equals(MonitoredTask.State.ABORTED)); + monitoredTask.getState().equals(MonitoredTask.State.ABORTED)); break; } } @@ -4435,8 +4479,8 @@ } /** - * Verifies that the .regioninfo file is written on region creation and that - * is recreated if missing during region opening. + * Verifies that the .regioninfo file is written on region creation and that is recreated if + * missing during region opening. */ @Test public void testRegionInfoFileCreation() throws IOException { @@ -4457,7 +4501,7 @@ // Verify that the .regioninfo file is present assertTrue(HRegionFileSystem.REGION_INFO_FILE + " should be present in the region dir", - fs.exists(regionInfoFile)); + fs.exists(regionInfoFile)); // Try to open the region region = HRegion.openHRegion(rootDir, hri, htd, null, CONF); @@ -4466,21 +4510,21 @@ // Verify that the .regioninfo file is still there assertTrue(HRegionFileSystem.REGION_INFO_FILE + " should be present in the region dir", - fs.exists(regionInfoFile)); + fs.exists(regionInfoFile)); // Remove the .regioninfo file and verify is recreated on region open fs.delete(regionInfoFile, true); assertFalse(HRegionFileSystem.REGION_INFO_FILE + " should be removed from the region dir", - fs.exists(regionInfoFile)); + fs.exists(regionInfoFile)); region = HRegion.openHRegion(rootDir, hri, htd, null, CONF); -// region = TEST_UTIL.openHRegion(hri, htd); + // region = TEST_UTIL.openHRegion(hri, htd); assertEquals(regionDir, region.getRegionFileSystem().getRegionDir()); HBaseTestingUtility.closeRegionAndWAL(region); // Verify that the .regioninfo file is still there assertTrue(HRegionFileSystem.REGION_INFO_FILE + " should be present in the region dir", - fs.exists(new Path(regionDir, HRegionFileSystem.REGION_INFO_FILE))); + fs.exists(new Path(regionDir, HRegionFileSystem.REGION_INFO_FILE))); } /** @@ -4786,19 +4830,17 @@ FSUtils.setRootDir(walConf, logDir); final WALFactory wals = new WALFactory(walConf, null, UUID.randomUUID().toString()); final WAL wal = spy(wals.getWAL(tableName.getName(), tableName.getNamespace())); - this.region = initHRegion(tableName, HConstants.EMPTY_START_ROW, - HConstants.EMPTY_END_ROW, method, conf, false, tableDurability, wal, - new byte[][] { family }); + this.region = initHRegion(tableName, HConstants.EMPTY_START_ROW, HConstants.EMPTY_END_ROW, + method, conf, false, tableDurability, wal, new byte[][] { family }); Put put = new Put(Bytes.toBytes("r1")); put.addColumn(family, Bytes.toBytes("q1"), Bytes.toBytes("v1")); put.setDurability(mutationDurability); region.put(put); - //verify append called or not - verify(wal, expectAppend ? times(1) : never()) - .append((HTableDescriptor)any(), (HRegionInfo)any(), (WALKey)any(), - (WALEdit)any(), Mockito.anyBoolean()); + // verify append called or not + verify(wal, expectAppend ? times(1) : never()).append((HTableDescriptor) any(), + (HRegionInfo) any(), (WALKey) any(), (WALEdit) any(), Mockito.anyBoolean()); // verify sync called or not if (expectSync || expectSyncFromLogSyncer) { @@ -4817,7 +4859,7 @@ } }); } else { - //verify(wal, never()).sync(anyLong()); + // verify(wal, never()).sync(anyLong()); verify(wal, never()).sync(); } @@ -4832,9 +4874,8 @@ Path rootDir = new Path(dir + "testRegionReplicaSecondary"); FSUtils.setRootDir(TEST_UTIL.getConfiguration(), rootDir); - byte[][] families = new byte[][] { - Bytes.toBytes("cf1"), Bytes.toBytes("cf2"), Bytes.toBytes("cf3") - }; + byte[][] families = + new byte[][] { Bytes.toBytes("cf1"), Bytes.toBytes("cf2"), Bytes.toBytes("cf3") }; byte[] cq = Bytes.toBytes("cq"); HTableDescriptor htd = new HTableDescriptor(TableName.valueOf("testRegionReplicaSecondary")); for (byte[] family : families) { @@ -4842,18 +4883,16 @@ } long time = System.currentTimeMillis(); - HRegionInfo primaryHri = new HRegionInfo(htd.getTableName(), - HConstants.EMPTY_START_ROW, HConstants.EMPTY_END_ROW, - false, time, 0); - HRegionInfo secondaryHri = new HRegionInfo(htd.getTableName(), - HConstants.EMPTY_START_ROW, HConstants.EMPTY_END_ROW, - false, time, 1); + HRegionInfo primaryHri = new HRegionInfo(htd.getTableName(), HConstants.EMPTY_START_ROW, + HConstants.EMPTY_END_ROW, false, time, 0); + HRegionInfo secondaryHri = new HRegionInfo(htd.getTableName(), HConstants.EMPTY_START_ROW, + HConstants.EMPTY_END_ROW, false, time, 1); HRegion primaryRegion = null, secondaryRegion = null; try { - primaryRegion = HBaseTestingUtility.createRegionAndWAL(primaryHri, - rootDir, TEST_UTIL.getConfiguration(), htd); + primaryRegion = HBaseTestingUtility.createRegionAndWAL(primaryHri, rootDir, + TEST_UTIL.getConfiguration(), htd); // load some data putData(primaryRegion, 0, 1000, cq, families); @@ -4882,9 +4921,8 @@ Path rootDir = new Path(dir + "testRegionReplicaSecondary"); FSUtils.setRootDir(TEST_UTIL.getConfiguration(), rootDir); - byte[][] families = new byte[][] { - Bytes.toBytes("cf1"), Bytes.toBytes("cf2"), Bytes.toBytes("cf3") - }; + byte[][] families = + new byte[][] { Bytes.toBytes("cf1"), Bytes.toBytes("cf2"), Bytes.toBytes("cf3") }; byte[] cq = Bytes.toBytes("cq"); HTableDescriptor htd = new HTableDescriptor(TableName.valueOf("testRegionReplicaSecondary")); for (byte[] family : families) { @@ -4892,18 +4930,16 @@ } long time = System.currentTimeMillis(); - HRegionInfo primaryHri = new HRegionInfo(htd.getTableName(), - HConstants.EMPTY_START_ROW, HConstants.EMPTY_END_ROW, - false, time, 0); - HRegionInfo secondaryHri = new HRegionInfo(htd.getTableName(), - HConstants.EMPTY_START_ROW, HConstants.EMPTY_END_ROW, - false, time, 1); + HRegionInfo primaryHri = new HRegionInfo(htd.getTableName(), HConstants.EMPTY_START_ROW, + HConstants.EMPTY_END_ROW, false, time, 0); + HRegionInfo secondaryHri = new HRegionInfo(htd.getTableName(), HConstants.EMPTY_START_ROW, + HConstants.EMPTY_END_ROW, false, time, 1); HRegion primaryRegion = null, secondaryRegion = null; try { - primaryRegion = HBaseTestingUtility.createRegionAndWAL(primaryHri, - rootDir, TEST_UTIL.getConfiguration(), htd); + primaryRegion = HBaseTestingUtility.createRegionAndWAL(primaryHri, rootDir, + TEST_UTIL.getConfiguration(), htd); // load some data putData(primaryRegion, 0, 1000, cq, families); @@ -4934,7 +4970,7 @@ Configuration confForWAL = new Configuration(conf); confForWAL.set(HConstants.HBASE_DIR, rootDir.toString()); return new WALFactory(confForWAL, - Collections.singletonList(new MetricsWAL()), + Collections. singletonList(new MetricsWAL()), "hregion-" + RandomStringUtils.randomNumeric(8)); } @@ -4943,9 +4979,8 @@ Path rootDir = new Path(dir + "testRegionReplicaSecondary"); FSUtils.setRootDir(TEST_UTIL.getConfiguration(), rootDir); - byte[][] families = new byte[][] { - Bytes.toBytes("cf1"), Bytes.toBytes("cf2"), Bytes.toBytes("cf3") - }; + byte[][] families = + new byte[][] { Bytes.toBytes("cf1"), Bytes.toBytes("cf2"), Bytes.toBytes("cf3") }; byte[] cq = Bytes.toBytes("cq"); HTableDescriptor htd = new HTableDescriptor(TableName.valueOf("testRegionReplicaSecondary")); for (byte[] family : families) { @@ -4953,18 +4988,16 @@ } long time = System.currentTimeMillis(); - HRegionInfo primaryHri = new HRegionInfo(htd.getTableName(), - HConstants.EMPTY_START_ROW, HConstants.EMPTY_END_ROW, - false, time, 0); - HRegionInfo secondaryHri = new HRegionInfo(htd.getTableName(), - HConstants.EMPTY_START_ROW, HConstants.EMPTY_END_ROW, - false, time, 1); + HRegionInfo primaryHri = new HRegionInfo(htd.getTableName(), HConstants.EMPTY_START_ROW, + HConstants.EMPTY_END_ROW, false, time, 0); + HRegionInfo secondaryHri = new HRegionInfo(htd.getTableName(), HConstants.EMPTY_START_ROW, + HConstants.EMPTY_END_ROW, false, time, 1); HRegion primaryRegion = null, secondaryRegion = null; try { - primaryRegion = HBaseTestingUtility.createRegionAndWAL(primaryHri, - rootDir, TEST_UTIL.getConfiguration(), htd); + primaryRegion = HBaseTestingUtility.createRegionAndWAL(primaryHri, rootDir, + TEST_UTIL.getConfiguration(), htd); // load some data putData(primaryRegion, 0, 1000, cq, families); @@ -4978,7 +5011,8 @@ // move the file of the primary region to the archive, simulating a compaction Collection storeFiles = primaryRegion.getStore(families[0]).getStorefiles(); primaryRegion.getRegionFileSystem().removeStoreFiles(Bytes.toString(families[0]), storeFiles); - Collection storeFileInfos = primaryRegion.getRegionFileSystem().getStoreFiles(families[0]); + Collection storeFileInfos = + primaryRegion.getRegionFileSystem().getStoreFiles(families[0]); Assert.assertTrue(storeFileInfos == null || storeFileInfos.size() == 0); verifyData(secondaryRegion, 0, 1000, cq, families); @@ -4992,17 +5026,18 @@ } } - private void putData(int startRow, int numRows, byte[] qf, byte[]... families) throws IOException { + private void putData(int startRow, int numRows, byte[] qf, byte[]... families) + throws IOException { putData(this.region, startRow, numRows, qf, families); } - private void putData(HRegion region, - int startRow, int numRows, byte[] qf, byte[]... families) throws IOException { + private void putData(HRegion region, int startRow, int numRows, byte[] qf, byte[]... families) + throws IOException { putData(region, Durability.SKIP_WAL, startRow, numRows, qf, families); } - static void putData(HRegion region, Durability durability, - int startRow, int numRows, byte[] qf, byte[]... families) throws IOException { + static void putData(HRegion region, Durability durability, int startRow, int numRows, byte[] qf, + byte[]... families) throws IOException { for (int i = startRow; i < startRow + numRows; i++) { Put put = new Put(Bytes.toBytes("" + i)); put.setDurability(durability); @@ -5045,13 +5080,9 @@ /* * Assert first value in the passed region is firstValue. - * * @param r - * * @param fs - * * @param firstValue - * * @throws IOException */ private void assertScan(final HRegion r, final byte[] fs, final byte[] firstValue) @@ -5142,8 +5173,8 @@ * @param conf * @param families * @throws IOException - * @return A region on which you must call - * {@link HBaseTestingUtility#closeRegionAndWAL(HRegion)} when done. + * @return A region on which you must call {@link HBaseTestingUtility#closeRegionAndWAL(HRegion)} + * when done. */ private static HRegion initHRegion(TableName tableName, String callingMethod, Configuration conf, byte[]... families) throws IOException { @@ -5157,8 +5188,8 @@ * @param isReadOnly * @param families * @throws IOException - * @return A region on which you must call - * {@link HBaseTestingUtility#closeRegionAndWAL(HRegion)} when done. + * @return A region on which you must call {@link HBaseTestingUtility#closeRegionAndWAL(HRegion)} + * when done. */ private static HRegion initHRegion(TableName tableName, String callingMethod, Configuration conf, boolean isReadOnly, byte[]... families) throws IOException { @@ -5167,12 +5198,12 @@ public static HRegion initHRegion(TableName tableName, byte[] startKey, byte[] stopKey, String callingMethod, Configuration conf, boolean isReadOnly, byte[]... families) - throws IOException { + throws IOException { Path logDir = TEST_UTIL.getDataTestDirOnTestFS(callingMethod + ".log"); HRegionInfo hri = new HRegionInfo(tableName, startKey, stopKey); final WAL wal = HBaseTestingUtility.createWal(conf, logDir, hri); return initHRegion(tableName, startKey, stopKey, callingMethod, conf, isReadOnly, - Durability.SYNC_WAL, wal, families); + Durability.SYNC_WAL, wal, families); } /** @@ -5184,36 +5215,34 @@ * @param isReadOnly * @param families * @throws IOException - * @return A region on which you must call - * {@link HBaseTestingUtility#closeRegionAndWAL(HRegion)} when done. + * @return A region on which you must call {@link HBaseTestingUtility#closeRegionAndWAL(HRegion)} + * when done. */ public static HRegion initHRegion(TableName tableName, byte[] startKey, byte[] stopKey, - String callingMethod, Configuration conf, boolean isReadOnly, Durability durability, - WAL wal, byte[]... families) throws IOException { - return TEST_UTIL.createLocalHRegion(tableName, startKey, stopKey, - isReadOnly, durability, wal, families); + String callingMethod, Configuration conf, boolean isReadOnly, Durability durability, WAL wal, + byte[]... families) throws IOException { + return TEST_UTIL.createLocalHRegion(tableName, startKey, stopKey, isReadOnly, durability, wal, + families); } /** - * Assert that the passed in Cell has expected contents for the specified row, - * column & timestamp. + * Assert that the passed in Cell has expected contents for the specified row, column & timestamp. */ private void checkOneCell(Cell kv, byte[] cf, int rowIdx, int colIdx, long ts) { String ctx = "rowIdx=" + rowIdx + "; colIdx=" + colIdx + "; ts=" + ts; assertEquals("Row mismatch which checking: " + ctx, "row:" + rowIdx, - Bytes.toString(CellUtil.cloneRow(kv))); + Bytes.toString(CellUtil.cloneRow(kv))); assertEquals("ColumnFamily mismatch while checking: " + ctx, Bytes.toString(cf), - Bytes.toString(CellUtil.cloneFamily(kv))); + Bytes.toString(CellUtil.cloneFamily(kv))); assertEquals("Column qualifier mismatch while checking: " + ctx, "column:" + colIdx, - Bytes.toString(CellUtil.cloneQualifier(kv))); + Bytes.toString(CellUtil.cloneQualifier(kv))); assertEquals("Timestamp mismatch while checking: " + ctx, ts, kv.getTimestamp()); assertEquals("Value mismatch while checking: " + ctx, "value-version-" + ts, - Bytes.toString(CellUtil.cloneValue(kv))); + Bytes.toString(CellUtil.cloneValue(kv))); } - @Test (timeout=60000) - public void testReverseScanner_FromMemStore_SingleCF_Normal() - throws IOException { + @Test(timeout = 60000) + public void testReverseScanner_FromMemStore_SingleCF_Normal() throws IOException { byte[] rowC = Bytes.toBytes("rowC"); byte[] rowA = Bytes.toBytes("rowA"); byte[] rowB = Bytes.toBytes("rowB"); @@ -5225,8 +5254,7 @@ this.region = initHRegion(tableName, method, families); try { KeyValue kv1 = new KeyValue(rowC, cf, col, ts, KeyValue.Type.Put, null); - KeyValue kv11 = new KeyValue(rowC, cf, col, ts + 1, KeyValue.Type.Put, - null); + KeyValue kv11 = new KeyValue(rowC, cf, col, ts + 1, KeyValue.Type.Put, null); KeyValue kv2 = new KeyValue(rowA, cf, col, ts, KeyValue.Type.Put, null); KeyValue kv3 = new KeyValue(rowB, cf, col, ts, KeyValue.Type.Put, null); Put put = null; @@ -5248,20 +5276,20 @@ List currRow = new ArrayList(); boolean hasNext = scanner.next(currRow); assertEquals(2, currRow.size()); - assertTrue(Bytes.equals(currRow.get(0).getRowArray(), currRow.get(0).getRowOffset(), currRow - .get(0).getRowLength(), rowC, 0, rowC.length)); + assertTrue(Bytes.equals(currRow.get(0).getRowArray(), currRow.get(0).getRowOffset(), + currRow.get(0).getRowLength(), rowC, 0, rowC.length)); assertTrue(hasNext); currRow.clear(); hasNext = scanner.next(currRow); assertEquals(1, currRow.size()); - assertTrue(Bytes.equals(currRow.get(0).getRowArray(), currRow.get(0).getRowOffset(), currRow - .get(0).getRowLength(), rowB, 0, rowB.length)); + assertTrue(Bytes.equals(currRow.get(0).getRowArray(), currRow.get(0).getRowOffset(), + currRow.get(0).getRowLength(), rowB, 0, rowB.length)); assertTrue(hasNext); currRow.clear(); hasNext = scanner.next(currRow); assertEquals(1, currRow.size()); - assertTrue(Bytes.equals(currRow.get(0).getRowArray(), currRow.get(0).getRowOffset(), currRow - .get(0).getRowLength(), rowA, 0, rowA.length)); + assertTrue(Bytes.equals(currRow.get(0).getRowArray(), currRow.get(0).getRowOffset(), + currRow.get(0).getRowLength(), rowA, 0, rowA.length)); assertFalse(hasNext); scanner.close(); } finally { @@ -5270,9 +5298,8 @@ } } - @Test (timeout=60000) - public void testReverseScanner_FromMemStore_SingleCF_LargerKey() - throws IOException { + @Test(timeout = 60000) + public void testReverseScanner_FromMemStore_SingleCF_LargerKey() throws IOException { byte[] rowC = Bytes.toBytes("rowC"); byte[] rowA = Bytes.toBytes("rowA"); byte[] rowB = Bytes.toBytes("rowB"); @@ -5285,8 +5312,7 @@ this.region = initHRegion(tableName, method, families); try { KeyValue kv1 = new KeyValue(rowC, cf, col, ts, KeyValue.Type.Put, null); - KeyValue kv11 = new KeyValue(rowC, cf, col, ts + 1, KeyValue.Type.Put, - null); + KeyValue kv11 = new KeyValue(rowC, cf, col, ts + 1, KeyValue.Type.Put, null); KeyValue kv2 = new KeyValue(rowA, cf, col, ts, KeyValue.Type.Put, null); KeyValue kv3 = new KeyValue(rowB, cf, col, ts, KeyValue.Type.Put, null); Put put = null; @@ -5308,20 +5334,20 @@ InternalScanner scanner = region.getScanner(scan); boolean hasNext = scanner.next(currRow); assertEquals(2, currRow.size()); - assertTrue(Bytes.equals(currRow.get(0).getRowArray(), currRow.get(0).getRowOffset(), currRow - .get(0).getRowLength(), rowC, 0, rowC.length)); + assertTrue(Bytes.equals(currRow.get(0).getRowArray(), currRow.get(0).getRowOffset(), + currRow.get(0).getRowLength(), rowC, 0, rowC.length)); assertTrue(hasNext); currRow.clear(); hasNext = scanner.next(currRow); assertEquals(1, currRow.size()); - assertTrue(Bytes.equals(currRow.get(0).getRowArray(), currRow.get(0).getRowOffset(), currRow - .get(0).getRowLength(), rowB, 0, rowB.length)); + assertTrue(Bytes.equals(currRow.get(0).getRowArray(), currRow.get(0).getRowOffset(), + currRow.get(0).getRowLength(), rowB, 0, rowB.length)); assertTrue(hasNext); currRow.clear(); hasNext = scanner.next(currRow); assertEquals(1, currRow.size()); - assertTrue(Bytes.equals(currRow.get(0).getRowArray(), currRow.get(0).getRowOffset(), currRow - .get(0).getRowLength(), rowA, 0, rowA.length)); + assertTrue(Bytes.equals(currRow.get(0).getRowArray(), currRow.get(0).getRowOffset(), + currRow.get(0).getRowLength(), rowA, 0, rowA.length)); assertFalse(hasNext); scanner.close(); } finally { @@ -5330,9 +5356,8 @@ } } - @Test (timeout=60000) - public void testReverseScanner_FromMemStore_SingleCF_FullScan() - throws IOException { + @Test(timeout = 60000) + public void testReverseScanner_FromMemStore_SingleCF_FullScan() throws IOException { byte[] rowC = Bytes.toBytes("rowC"); byte[] rowA = Bytes.toBytes("rowA"); byte[] rowB = Bytes.toBytes("rowB"); @@ -5344,8 +5369,7 @@ this.region = initHRegion(tableName, method, families); try { KeyValue kv1 = new KeyValue(rowC, cf, col, ts, KeyValue.Type.Put, null); - KeyValue kv11 = new KeyValue(rowC, cf, col, ts + 1, KeyValue.Type.Put, - null); + KeyValue kv11 = new KeyValue(rowC, cf, col, ts + 1, KeyValue.Type.Put, null); KeyValue kv2 = new KeyValue(rowA, cf, col, ts, KeyValue.Type.Put, null); KeyValue kv3 = new KeyValue(rowB, cf, col, ts, KeyValue.Type.Put, null); Put put = null; @@ -5365,20 +5389,20 @@ InternalScanner scanner = region.getScanner(scan); boolean hasNext = scanner.next(currRow); assertEquals(1, currRow.size()); - assertTrue(Bytes.equals(currRow.get(0).getRowArray(), currRow.get(0).getRowOffset(), currRow - .get(0).getRowLength(), rowC, 0, rowC.length)); + assertTrue(Bytes.equals(currRow.get(0).getRowArray(), currRow.get(0).getRowOffset(), + currRow.get(0).getRowLength(), rowC, 0, rowC.length)); assertTrue(hasNext); currRow.clear(); hasNext = scanner.next(currRow); assertEquals(1, currRow.size()); - assertTrue(Bytes.equals(currRow.get(0).getRowArray(), currRow.get(0).getRowOffset(), currRow - .get(0).getRowLength(), rowB, 0, rowB.length)); + assertTrue(Bytes.equals(currRow.get(0).getRowArray(), currRow.get(0).getRowOffset(), + currRow.get(0).getRowLength(), rowB, 0, rowB.length)); assertTrue(hasNext); currRow.clear(); hasNext = scanner.next(currRow); assertEquals(1, currRow.size()); - assertTrue(Bytes.equals(currRow.get(0).getRowArray(), currRow.get(0).getRowOffset(), currRow - .get(0).getRowLength(), rowA, 0, rowA.length)); + assertTrue(Bytes.equals(currRow.get(0).getRowArray(), currRow.get(0).getRowOffset(), + currRow.get(0).getRowLength(), rowA, 0, rowA.length)); assertFalse(hasNext); scanner.close(); } finally { @@ -5387,7 +5411,7 @@ } } - @Test (timeout=60000) + @Test(timeout = 60000) public void testReverseScanner_moreRowsMayExistAfter() throws IOException { // case for "INCLUDE_AND_SEEK_NEXT_ROW & SEEK_NEXT_ROW" endless loop byte[] rowA = Bytes.toBytes("rowA"); @@ -5436,20 +5460,20 @@ InternalScanner scanner = region.getScanner(scan); boolean hasNext = scanner.next(currRow); assertEquals(1, currRow.size()); - assertTrue(Bytes.equals(currRow.get(0).getRowArray(), currRow.get(0).getRowOffset(), currRow - .get(0).getRowLength(), rowD, 0, rowD.length)); + assertTrue(Bytes.equals(currRow.get(0).getRowArray(), currRow.get(0).getRowOffset(), + currRow.get(0).getRowLength(), rowD, 0, rowD.length)); assertTrue(hasNext); currRow.clear(); hasNext = scanner.next(currRow); assertEquals(1, currRow.size()); - assertTrue(Bytes.equals(currRow.get(0).getRowArray(), currRow.get(0).getRowOffset(), currRow - .get(0).getRowLength(), rowC, 0, rowC.length)); + assertTrue(Bytes.equals(currRow.get(0).getRowArray(), currRow.get(0).getRowOffset(), + currRow.get(0).getRowLength(), rowC, 0, rowC.length)); assertTrue(hasNext); currRow.clear(); hasNext = scanner.next(currRow); assertEquals(1, currRow.size()); - assertTrue(Bytes.equals(currRow.get(0).getRowArray(), currRow.get(0).getRowOffset(), currRow - .get(0).getRowLength(), rowB, 0, rowB.length)); + assertTrue(Bytes.equals(currRow.get(0).getRowArray(), currRow.get(0).getRowOffset(), + currRow.get(0).getRowLength(), rowB, 0, rowB.length)); assertFalse(hasNext); scanner.close(); @@ -5460,8 +5484,8 @@ scanner = region.getScanner(scan); hasNext = scanner.next(currRow); assertEquals(1, currRow.size()); - assertTrue(Bytes.equals(currRow.get(0).getRowArray(), currRow.get(0).getRowOffset(), currRow - .get(0).getRowLength(), rowD, 0, rowD.length)); + assertTrue(Bytes.equals(currRow.get(0).getRowArray(), currRow.get(0).getRowOffset(), + currRow.get(0).getRowLength(), rowD, 0, rowD.length)); scanner.close(); } finally { HBaseTestingUtility.closeRegionAndWAL(this.region); @@ -5469,7 +5493,7 @@ } } - @Test (timeout=60000) + @Test(timeout = 60000) public void testReverseScanner_smaller_blocksize() throws IOException { // case to ensure no conflict with HFile index optimization byte[] rowA = Bytes.toBytes("rowA"); @@ -5520,20 +5544,20 @@ InternalScanner scanner = region.getScanner(scan); boolean hasNext = scanner.next(currRow); assertEquals(1, currRow.size()); - assertTrue(Bytes.equals(currRow.get(0).getRowArray(), currRow.get(0).getRowOffset(), currRow - .get(0).getRowLength(), rowD, 0, rowD.length)); + assertTrue(Bytes.equals(currRow.get(0).getRowArray(), currRow.get(0).getRowOffset(), + currRow.get(0).getRowLength(), rowD, 0, rowD.length)); assertTrue(hasNext); currRow.clear(); hasNext = scanner.next(currRow); assertEquals(1, currRow.size()); - assertTrue(Bytes.equals(currRow.get(0).getRowArray(), currRow.get(0).getRowOffset(), currRow - .get(0).getRowLength(), rowC, 0, rowC.length)); + assertTrue(Bytes.equals(currRow.get(0).getRowArray(), currRow.get(0).getRowOffset(), + currRow.get(0).getRowLength(), rowC, 0, rowC.length)); assertTrue(hasNext); currRow.clear(); hasNext = scanner.next(currRow); assertEquals(1, currRow.size()); - assertTrue(Bytes.equals(currRow.get(0).getRowArray(), currRow.get(0).getRowOffset(), currRow - .get(0).getRowLength(), rowB, 0, rowB.length)); + assertTrue(Bytes.equals(currRow.get(0).getRowArray(), currRow.get(0).getRowOffset(), + currRow.get(0).getRowLength(), rowB, 0, rowB.length)); assertFalse(hasNext); scanner.close(); @@ -5544,8 +5568,8 @@ scanner = region.getScanner(scan); hasNext = scanner.next(currRow); assertEquals(1, currRow.size()); - assertTrue(Bytes.equals(currRow.get(0).getRowArray(), currRow.get(0).getRowOffset(), currRow - .get(0).getRowLength(), rowD, 0, rowD.length)); + assertTrue(Bytes.equals(currRow.get(0).getRowArray(), currRow.get(0).getRowOffset(), + currRow.get(0).getRowLength(), rowD, 0, rowD.length)); scanner.close(); } finally { HBaseTestingUtility.closeRegionAndWAL(this.region); @@ -5553,9 +5577,8 @@ } } - @Test (timeout=60000) - public void testReverseScanner_FromMemStoreAndHFiles_MultiCFs1() - throws IOException { + @Test(timeout = 60000) + public void testReverseScanner_FromMemStoreAndHFiles_MultiCFs1() throws IOException { byte[] row0 = Bytes.toBytes("row0"); // 1 kv byte[] row1 = Bytes.toBytes("row1"); // 2 kv byte[] row2 = Bytes.toBytes("row2"); // 4 kv @@ -5575,38 +5598,22 @@ this.region = initHRegion(tableName, method, conf, families); try { // kv naming style: kv(row number) totalKvCountInThisRow seq no - KeyValue kv0_1_1 = new KeyValue(row0, cf1, col, ts, KeyValue.Type.Put, - null); - KeyValue kv1_2_1 = new KeyValue(row1, cf2, col, ts, KeyValue.Type.Put, - null); - KeyValue kv1_2_2 = new KeyValue(row1, cf1, col, ts + 1, - KeyValue.Type.Put, null); - KeyValue kv2_4_1 = new KeyValue(row2, cf2, col, ts, KeyValue.Type.Put, - null); - KeyValue kv2_4_2 = new KeyValue(row2, cf1, col, ts, KeyValue.Type.Put, - null); - KeyValue kv2_4_3 = new KeyValue(row2, cf3, col, ts, KeyValue.Type.Put, - null); - KeyValue kv2_4_4 = new KeyValue(row2, cf1, col, ts + 4, - KeyValue.Type.Put, null); - KeyValue kv3_2_1 = new KeyValue(row3, cf2, col, ts, KeyValue.Type.Put, - null); - KeyValue kv3_2_2 = new KeyValue(row3, cf1, col, ts + 4, - KeyValue.Type.Put, null); - KeyValue kv4_5_1 = new KeyValue(row4, cf1, col, ts, KeyValue.Type.Put, - null); - KeyValue kv4_5_2 = new KeyValue(row4, cf3, col, ts, KeyValue.Type.Put, - null); - KeyValue kv4_5_3 = new KeyValue(row4, cf3, col, ts + 5, - KeyValue.Type.Put, null); - KeyValue kv4_5_4 = new KeyValue(row4, cf2, col, ts, KeyValue.Type.Put, - null); - KeyValue kv4_5_5 = new KeyValue(row4, cf1, col, ts + 3, - KeyValue.Type.Put, null); - KeyValue kv5_2_1 = new KeyValue(row5, cf2, col, ts, KeyValue.Type.Put, - null); - KeyValue kv5_2_2 = new KeyValue(row5, cf3, col, ts, KeyValue.Type.Put, - null); + KeyValue kv0_1_1 = new KeyValue(row0, cf1, col, ts, KeyValue.Type.Put, null); + KeyValue kv1_2_1 = new KeyValue(row1, cf2, col, ts, KeyValue.Type.Put, null); + KeyValue kv1_2_2 = new KeyValue(row1, cf1, col, ts + 1, KeyValue.Type.Put, null); + KeyValue kv2_4_1 = new KeyValue(row2, cf2, col, ts, KeyValue.Type.Put, null); + KeyValue kv2_4_2 = new KeyValue(row2, cf1, col, ts, KeyValue.Type.Put, null); + KeyValue kv2_4_3 = new KeyValue(row2, cf3, col, ts, KeyValue.Type.Put, null); + KeyValue kv2_4_4 = new KeyValue(row2, cf1, col, ts + 4, KeyValue.Type.Put, null); + KeyValue kv3_2_1 = new KeyValue(row3, cf2, col, ts, KeyValue.Type.Put, null); + KeyValue kv3_2_2 = new KeyValue(row3, cf1, col, ts + 4, KeyValue.Type.Put, null); + KeyValue kv4_5_1 = new KeyValue(row4, cf1, col, ts, KeyValue.Type.Put, null); + KeyValue kv4_5_2 = new KeyValue(row4, cf3, col, ts, KeyValue.Type.Put, null); + KeyValue kv4_5_3 = new KeyValue(row4, cf3, col, ts + 5, KeyValue.Type.Put, null); + KeyValue kv4_5_4 = new KeyValue(row4, cf2, col, ts, KeyValue.Type.Put, null); + KeyValue kv4_5_5 = new KeyValue(row4, cf1, col, ts + 3, KeyValue.Type.Put, null); + KeyValue kv5_2_1 = new KeyValue(row5, cf2, col, ts, KeyValue.Type.Put, null); + KeyValue kv5_2_2 = new KeyValue(row5, cf3, col, ts, KeyValue.Type.Put, null); // hfiles(cf1/cf2) :"row1"(1 kv) / "row2"(1 kv) / "row4"(2 kv) Put put = null; put = new Put(row1); @@ -5669,49 +5676,49 @@ // "row4" takes 2 next() calls since batch=3 hasNext = scanner.next(currRow); assertEquals(3, currRow.size()); - assertTrue(Bytes.equals(currRow.get(0).getRowArray(), currRow.get(0).getRowOffset(), currRow - .get(0).getRowLength(), row4, 0, row4.length)); + assertTrue(Bytes.equals(currRow.get(0).getRowArray(), currRow.get(0).getRowOffset(), + currRow.get(0).getRowLength(), row4, 0, row4.length)); assertTrue(hasNext); currRow.clear(); hasNext = scanner.next(currRow); assertEquals(2, currRow.size()); - assertTrue(Bytes.equals(currRow.get(0).getRowArray(), currRow.get(0).getRowOffset(), currRow.get(0).getRowLength(), row4, 0, - row4.length)); + assertTrue(Bytes.equals(currRow.get(0).getRowArray(), currRow.get(0).getRowOffset(), + currRow.get(0).getRowLength(), row4, 0, row4.length)); assertTrue(hasNext); // 2. scan out "row3" (2 kv) currRow.clear(); hasNext = scanner.next(currRow); assertEquals(2, currRow.size()); - assertTrue(Bytes.equals(currRow.get(0).getRowArray(), currRow.get(0).getRowOffset(), currRow - .get(0).getRowLength(), row3, 0, row3.length)); + assertTrue(Bytes.equals(currRow.get(0).getRowArray(), currRow.get(0).getRowOffset(), + currRow.get(0).getRowLength(), row3, 0, row3.length)); assertTrue(hasNext); // 3. scan out "row2" (4 kvs) // "row2" takes 2 next() calls since batch=3 currRow.clear(); hasNext = scanner.next(currRow); assertEquals(3, currRow.size()); - assertTrue(Bytes.equals(currRow.get(0).getRowArray(), currRow.get(0).getRowOffset(), currRow - .get(0).getRowLength(), row2, 0, row2.length)); + assertTrue(Bytes.equals(currRow.get(0).getRowArray(), currRow.get(0).getRowOffset(), + currRow.get(0).getRowLength(), row2, 0, row2.length)); assertTrue(hasNext); currRow.clear(); hasNext = scanner.next(currRow); assertEquals(1, currRow.size()); - assertTrue(Bytes.equals(currRow.get(0).getRowArray(), currRow.get(0).getRowOffset(), currRow - .get(0).getRowLength(), row2, 0, row2.length)); + assertTrue(Bytes.equals(currRow.get(0).getRowArray(), currRow.get(0).getRowOffset(), + currRow.get(0).getRowLength(), row2, 0, row2.length)); assertTrue(hasNext); // 4. scan out "row1" (2 kv) currRow.clear(); hasNext = scanner.next(currRow); assertEquals(2, currRow.size()); - assertTrue(Bytes.equals(currRow.get(0).getRowArray(), currRow.get(0).getRowOffset(), currRow - .get(0).getRowLength(), row1, 0, row1.length)); + assertTrue(Bytes.equals(currRow.get(0).getRowArray(), currRow.get(0).getRowOffset(), + currRow.get(0).getRowLength(), row1, 0, row1.length)); assertTrue(hasNext); // 5. scan out "row0" (1 kv) currRow.clear(); hasNext = scanner.next(currRow); assertEquals(1, currRow.size()); - assertTrue(Bytes.equals(currRow.get(0).getRowArray(), currRow.get(0).getRowOffset(), currRow - .get(0).getRowLength(), row0, 0, row0.length)); + assertTrue(Bytes.equals(currRow.get(0).getRowArray(), currRow.get(0).getRowOffset(), + currRow.get(0).getRowLength(), row0, 0, row0.length)); assertFalse(hasNext); scanner.close(); @@ -5721,9 +5728,8 @@ } } - @Test (timeout=60000) - public void testReverseScanner_FromMemStoreAndHFiles_MultiCFs2() - throws IOException { + @Test(timeout = 60000) + public void testReverseScanner_FromMemStoreAndHFiles_MultiCFs2() throws IOException { byte[] row1 = Bytes.toBytes("row1"); byte[] row2 = Bytes.toBytes("row2"); byte[] row3 = Bytes.toBytes("row3"); @@ -5772,26 +5778,26 @@ List currRow = new ArrayList(); boolean hasNext = scanner.next(currRow); assertEquals(1, currRow.size()); - assertTrue(Bytes.equals(currRow.get(0).getRowArray(), currRow.get(0).getRowOffset(), currRow - .get(0).getRowLength(), row4, 0, row4.length)); + assertTrue(Bytes.equals(currRow.get(0).getRowArray(), currRow.get(0).getRowOffset(), + currRow.get(0).getRowLength(), row4, 0, row4.length)); assertTrue(hasNext); currRow.clear(); hasNext = scanner.next(currRow); assertEquals(1, currRow.size()); - assertTrue(Bytes.equals(currRow.get(0).getRowArray(), currRow.get(0).getRowOffset(), currRow - .get(0).getRowLength(), row3, 0, row3.length)); + assertTrue(Bytes.equals(currRow.get(0).getRowArray(), currRow.get(0).getRowOffset(), + currRow.get(0).getRowLength(), row3, 0, row3.length)); assertTrue(hasNext); currRow.clear(); hasNext = scanner.next(currRow); assertEquals(1, currRow.size()); - assertTrue(Bytes.equals(currRow.get(0).getRowArray(), currRow.get(0).getRowOffset(), currRow - .get(0).getRowLength(), row2, 0, row2.length)); + assertTrue(Bytes.equals(currRow.get(0).getRowArray(), currRow.get(0).getRowOffset(), + currRow.get(0).getRowLength(), row2, 0, row2.length)); assertTrue(hasNext); currRow.clear(); hasNext = scanner.next(currRow); assertEquals(1, currRow.size()); - assertTrue(Bytes.equals(currRow.get(0).getRowArray(), currRow.get(0).getRowOffset(), currRow - .get(0).getRowLength(), row1, 0, row1.length)); + assertTrue(Bytes.equals(currRow.get(0).getRowArray(), currRow.get(0).getRowOffset(), + currRow.get(0).getRowLength(), row1, 0, row1.length)); assertFalse(hasNext); } finally { HBaseTestingUtility.closeRegionAndWAL(this.region); @@ -5802,10 +5808,10 @@ /** * Test for HBASE-14497: Reverse Scan threw StackOverflow caused by readPt checking */ - @Test (timeout = 60000) + @Test(timeout = 60000) public void testReverseScanner_StackOverflow() throws IOException { byte[] cf1 = Bytes.toBytes("CF1"); - byte[][] families = {cf1}; + byte[][] families = { cf1 }; byte[] col = Bytes.toBytes("C"); String method = this.getName(); HBaseConfiguration conf = new HBaseConfiguration(); @@ -5827,7 +5833,7 @@ // create one storefile contains many rows will be skipped // to check StoreFileScanner.seekToPreviousRow for (int i = 10000; i < 20000; i++) { - Put p = new Put(Bytes.toBytes(""+i)); + Put p = new Put(Bytes.toBytes("" + i)); p.addColumn(cf1, col, Bytes.toBytes("" + i)); region.put(p); } @@ -5836,7 +5842,7 @@ // create one memstore contains many rows will be skipped // to check MemStoreScanner.seekToPreviousRow for (int i = 10000; i < 20000; i++) { - Put p = new Put(Bytes.toBytes(""+i)); + Put p = new Put(Bytes.toBytes("" + i)); p.addColumn(cf1, col, Bytes.toBytes("" + i)); region.put(p); } @@ -5857,41 +5863,41 @@ } } - @Test (timeout=60000) + @Test(timeout = 60000) public void testSplitRegionWithReverseScan() throws IOException { TableName tableName = TableName.valueOf("testSplitRegionWithReverseScan"); - byte [] qualifier = Bytes.toBytes("qualifier"); + byte[] qualifier = Bytes.toBytes("qualifier"); Configuration hc = initSplit(); int numRows = 3; - byte [][] families = {fam1}; + byte[][] families = { fam1 }; - //Setting up region + // Setting up region String method = this.getName(); this.region = initHRegion(tableName, method, hc, families); - //Put data in region + // Put data in region int startRow = 100; putData(startRow, numRows, qualifier, families); int splitRow = startRow + numRows; putData(splitRow, numRows, qualifier, families); region.flush(true); - HRegion [] regions = null; + HRegion[] regions = null; try { regions = splitRegion(region, Bytes.toBytes("" + splitRow)); - //Opening the regions returned. + // Opening the regions returned. for (int i = 0; i < regions.length; i++) { regions[i] = HRegion.openHRegion(regions[i], null); } - //Verifying that the region has been split + // Verifying that the region has been split assertEquals(2, regions.length); - //Verifying that all data is still there and that data is in the right - //place + // Verifying that all data is still there and that data is in the right + // place verifyData(regions[0], startRow, numRows, qualifier, families); verifyData(regions[1], splitRow, numRows, qualifier, families); - //fire the reverse scan1: top range, and larger than the last row + // fire the reverse scan1: top range, and larger than the last row Scan scan = new Scan(Bytes.toBytes(String.valueOf(startRow + 10 * numRows))); scan.setReversed(true); InternalScanner scanner = regions[1].getScanner(scan); @@ -5904,10 +5910,10 @@ currRow.get(0).getRowLength()), verify + ""); verify--; currRow.clear(); - } while(more); + } while (more); assertEquals(verify, startRow + numRows - 1); scanner.close(); - //fire the reverse scan2: top range, and equals to the last row + // fire the reverse scan2: top range, and equals to the last row scan = new Scan(Bytes.toBytes(String.valueOf(startRow + 2 * numRows - 1))); scan.setReversed(true); scanner = regions[1].getScanner(scan); @@ -5918,10 +5924,10 @@ currRow.get(0).getRowLength()), verify + ""); verify--; currRow.clear(); - } while(more); + } while (more); assertEquals(verify, startRow + numRows - 1); scanner.close(); - //fire the reverse scan3: bottom range, and larger than the last row + // fire the reverse scan3: bottom range, and larger than the last row scan = new Scan(Bytes.toBytes(String.valueOf(startRow + numRows))); scan.setReversed(true); scanner = regions[0].getScanner(scan); @@ -5932,10 +5938,10 @@ currRow.get(0).getRowLength()), verify + ""); verify--; currRow.clear(); - } while(more); + } while (more); assertEquals(verify, 99); scanner.close(); - //fire the reverse scan4: bottom range, and equals to the last row + // fire the reverse scan4: bottom range, and equals to the last row scan = new Scan(Bytes.toBytes(String.valueOf(startRow + numRows - 1))); scan.setReversed(true); scanner = regions[0].getScanner(scan); @@ -5946,7 +5952,7 @@ currRow.get(0).getRowLength()), verify + ""); verify--; currRow.clear(); - } while(more); + } while (more); assertEquals(verify, startRow - 1); scanner.close(); } finally { @@ -5986,18 +5992,16 @@ final ServerName serverName = ServerName.valueOf("testOpenRegionWrittenToWAL", 100, 42); final RegionServerServices rss = spy(TEST_UTIL.createMockRegionServerService(serverName)); - HTableDescriptor htd - = new HTableDescriptor(TableName.valueOf("testOpenRegionWrittenToWAL")); + HTableDescriptor htd = new HTableDescriptor(TableName.valueOf("testOpenRegionWrittenToWAL")); htd.addFamily(new HColumnDescriptor(fam1)); htd.addFamily(new HColumnDescriptor(fam2)); - HRegionInfo hri = new HRegionInfo(htd.getTableName(), - HConstants.EMPTY_BYTE_ARRAY, HConstants.EMPTY_BYTE_ARRAY); + HRegionInfo hri = new HRegionInfo(htd.getTableName(), HConstants.EMPTY_BYTE_ARRAY, + HConstants.EMPTY_BYTE_ARRAY); // open the region w/o rss and wal and flush some files - HRegion region = - HBaseTestingUtility.createRegionAndWAL(hri, TEST_UTIL.getDataTestDir(), TEST_UTIL - .getConfiguration(), htd); + HRegion region = HBaseTestingUtility.createRegionAndWAL(hri, TEST_UTIL.getDataTestDir(), + TEST_UTIL.getConfiguration(), htd); assertNotNull(region); // create a file in fam1 for the region before opening in OpenRegionHandler @@ -6012,11 +6016,11 @@ when(rss.getWAL((HRegionInfo) any())).thenReturn(wal); try { - region = HRegion.openHRegion(hri, htd, rss.getWAL(hri), - TEST_UTIL.getConfiguration(), rss, null); + region = + HRegion.openHRegion(hri, htd, rss.getWAL(hri), TEST_UTIL.getConfiguration(), rss, null); - verify(wal, times(1)).append((HTableDescriptor)any(), (HRegionInfo)any(), (WALKey)any() - , editCaptor.capture(), anyBoolean()); + verify(wal, times(1)).append((HTableDescriptor) any(), (HRegionInfo) any(), (WALKey) any(), + editCaptor.capture(), anyBoolean()); WALEdit edit = editCaptor.getValue(); assertNotNull(edit); @@ -6029,8 +6033,8 @@ assertEquals(RegionEventDescriptor.EventType.REGION_OPEN, desc.getEventType()); assertTrue(Bytes.equals(desc.getTableName().toByteArray(), htd.getName())); - assertTrue(Bytes.equals(desc.getEncodedRegionName().toByteArray(), - hri.getEncodedNameAsBytes())); + assertTrue( + Bytes.equals(desc.getEncodedRegionName().toByteArray(), hri.getEncodedNameAsBytes())); assertTrue(desc.getLogSequenceNumber() > 0); assertEquals(serverName, ProtobufUtil.toServerName(desc.getServer())); assertEquals(2, desc.getStoresCount()); @@ -6054,10 +6058,11 @@ // Helper for test testOpenRegionWrittenToWALForLogReplay static class HRegionWithSeqId extends HRegion { public HRegionWithSeqId(final Path tableDir, final WAL wal, final FileSystem fs, - final Configuration confParam, final HRegionInfo regionInfo, - final HTableDescriptor htd, final RegionServerServices rsServices) { + final Configuration confParam, final HRegionInfo regionInfo, final HTableDescriptor htd, + final RegionServerServices rsServices) { super(tableDir, wal, fs, confParam, regionInfo, htd, rsServices); } + @Override protected long getNextSequenceId(WAL wal) throws IOException { return 42; @@ -6079,30 +6084,29 @@ Store store = region.getStore(fam1); Collection storefiles = store.getStorefiles(); for (StoreFile sf : storefiles) { - assertFalse("Tags should not be present " - ,sf.getReader().getHFileReader().getFileContext().isIncludesTags()); + assertFalse("Tags should not be present ", + sf.getReader().getHFileReader().getFileContext().isIncludesTags()); } } @Test public void testOpenRegionWrittenToWALForLogReplay() throws Exception { // similar to the above test but with distributed log replay - final ServerName serverName = ServerName.valueOf("testOpenRegionWrittenToWALForLogReplay", - 100, 42); + final ServerName serverName = + ServerName.valueOf("testOpenRegionWrittenToWALForLogReplay", 100, 42); final RegionServerServices rss = spy(TEST_UTIL.createMockRegionServerService(serverName)); - HTableDescriptor htd - = new HTableDescriptor(TableName.valueOf("testOpenRegionWrittenToWALForLogReplay")); + HTableDescriptor htd = + new HTableDescriptor(TableName.valueOf("testOpenRegionWrittenToWALForLogReplay")); htd.addFamily(new HColumnDescriptor(fam1)); htd.addFamily(new HColumnDescriptor(fam2)); - HRegionInfo hri = new HRegionInfo(htd.getTableName(), - HConstants.EMPTY_BYTE_ARRAY, HConstants.EMPTY_BYTE_ARRAY); + HRegionInfo hri = new HRegionInfo(htd.getTableName(), HConstants.EMPTY_BYTE_ARRAY, + HConstants.EMPTY_BYTE_ARRAY); // open the region w/o rss and wal and flush some files - HRegion region = - HBaseTestingUtility.createRegionAndWAL(hri, TEST_UTIL.getDataTestDir(), TEST_UTIL - .getConfiguration(), htd); + HRegion region = HBaseTestingUtility.createRegionAndWAL(hri, TEST_UTIL.getDataTestDir(), + TEST_UTIL.getConfiguration(), htd); assertNotNull(region); // create a file in fam1 for the region before opening in OpenRegionHandler @@ -6124,21 +6128,20 @@ try { Configuration conf = new Configuration(TEST_UTIL.getConfiguration()); conf.set(HConstants.REGION_IMPL, HRegionWithSeqId.class.getName()); - region = HRegion.openHRegion(hri, htd, rss.getWAL(hri), - conf, rss, null); + region = HRegion.openHRegion(hri, htd, rss.getWAL(hri), conf, rss, null); // verify that we have not appended region open event to WAL because this region is still // recovering - verify(wal, times(0)).append((HTableDescriptor)any(), (HRegionInfo)any(), (WALKey)any() - , editCaptor.capture(), anyBoolean()); + verify(wal, times(0)).append((HTableDescriptor) any(), (HRegionInfo) any(), (WALKey) any(), + editCaptor.capture(), anyBoolean()); // not put the region out of recovering state new FinishRegionRecoveringHandler(rss, region.getRegionInfo().getEncodedName(), "/foo") - .prepare().process(); + .prepare().process(); // now we should have put the entry - verify(wal, times(1)).append((HTableDescriptor)any(), (HRegionInfo)any(), (WALKey)any() - , editCaptor.capture(), anyBoolean()); + verify(wal, times(1)).append((HTableDescriptor) any(), (HRegionInfo) any(), (WALKey) any(), + editCaptor.capture(), anyBoolean()); WALEdit edit = editCaptor.getValue(); assertNotNull(edit); @@ -6151,8 +6154,8 @@ assertEquals(RegionEventDescriptor.EventType.REGION_OPEN, desc.getEventType()); assertTrue(Bytes.equals(desc.getTableName().toByteArray(), htd.getName())); - assertTrue(Bytes.equals(desc.getEncodedRegionName().toByteArray(), - hri.getEncodedNameAsBytes())); + assertTrue( + Bytes.equals(desc.getEncodedRegionName().toByteArray(), hri.getEncodedNameAsBytes())); assertTrue(desc.getLogSequenceNumber() > 0); assertEquals(serverName, ProtobufUtil.toServerName(desc.getServer())); assertEquals(2, desc.getStoresCount()); @@ -6174,25 +6177,26 @@ } /** - * Utility method to setup a WAL mock. - * Needs to do the bit where we close latch on the WALKey on append else test hangs. + * Utility method to setup a WAL mock. Needs to do the bit where we close latch on the WALKey on + * append else test hangs. * @return * @throws IOException */ private WAL mockWAL() throws IOException { WAL wal = mock(WAL.class); - Mockito.when(wal.append((HTableDescriptor)Mockito.any(), (HRegionInfo)Mockito.any(), - (WALKey)Mockito.any(), (WALEdit)Mockito.any(), Mockito.anyBoolean())). - thenAnswer(new Answer() { - @Override - public Long answer(InvocationOnMock invocation) throws Throwable { - WALKey key = invocation.getArgumentAt(2, WALKey.class); - MultiVersionConcurrencyControl.WriteEntry we = key.getMvcc().begin(); - key.setWriteEntry(we); - return 1L; - } + Mockito + .when(wal.append((HTableDescriptor) Mockito.any(), (HRegionInfo) Mockito.any(), + (WALKey) Mockito.any(), (WALEdit) Mockito.any(), Mockito.anyBoolean())) + .thenAnswer(new Answer() { + @Override + public Long answer(InvocationOnMock invocation) throws Throwable { + WALKey key = invocation.getArgumentAt(2, WALKey.class); + MultiVersionConcurrencyControl.WriteEntry we = key.getMvcc().begin(); + key.setWriteEntry(we); + return 1L; + } - }); + }); return wal; } @@ -6201,13 +6205,12 @@ final ServerName serverName = ServerName.valueOf("testCloseRegionWrittenToWAL", 100, 42); final RegionServerServices rss = spy(TEST_UTIL.createMockRegionServerService(serverName)); - HTableDescriptor htd - = new HTableDescriptor(TableName.valueOf("testOpenRegionWrittenToWAL")); + HTableDescriptor htd = new HTableDescriptor(TableName.valueOf("testOpenRegionWrittenToWAL")); htd.addFamily(new HColumnDescriptor(fam1)); htd.addFamily(new HColumnDescriptor(fam2)); - final HRegionInfo hri = new HRegionInfo(htd.getTableName(), - HConstants.EMPTY_BYTE_ARRAY, HConstants.EMPTY_BYTE_ARRAY); + final HRegionInfo hri = new HRegionInfo(htd.getTableName(), HConstants.EMPTY_BYTE_ARRAY, + HConstants.EMPTY_BYTE_ARRAY); ArgumentCaptor editCaptor = ArgumentCaptor.forClass(WALEdit.class); @@ -6215,16 +6218,15 @@ WAL wal = mockWAL(); when(rss.getWAL((HRegionInfo) any())).thenReturn(wal); - // open a region first so that it can be closed later - region = HRegion.openHRegion(hri, htd, rss.getWAL(hri), - TEST_UTIL.getConfiguration(), rss, null); + region = + HRegion.openHRegion(hri, htd, rss.getWAL(hri), TEST_UTIL.getConfiguration(), rss, null); // close the region region.close(false); // 2 times, one for region open, the other close region - verify(wal, times(2)).append((HTableDescriptor)any(), (HRegionInfo)any(), (WALKey)any(), + verify(wal, times(2)).append((HTableDescriptor) any(), (HRegionInfo) any(), (WALKey) any(), editCaptor.capture(), anyBoolean()); WALEdit edit = editCaptor.getAllValues().get(1); @@ -6238,8 +6240,8 @@ assertEquals(RegionEventDescriptor.EventType.REGION_CLOSE, desc.getEventType()); assertTrue(Bytes.equals(desc.getTableName().toByteArray(), htd.getName())); - assertTrue(Bytes.equals(desc.getEncodedRegionName().toByteArray(), - hri.getEncodedNameAsBytes())); + assertTrue( + Bytes.equals(desc.getEncodedRegionName().toByteArray(), hri.getEncodedNameAsBytes())); assertTrue(desc.getLogSequenceNumber() > 0); assertEquals(serverName, ProtobufUtil.toServerName(desc.getServer())); assertEquals(2, desc.getStoresCount()); @@ -6258,13 +6260,13 @@ /** * Test RegionTooBusyException thrown when region is busy */ - @Test (timeout=24000) + @Test(timeout = 24000) public void testRegionTooBusy() throws IOException { String method = "testRegionTooBusy"; TableName tableName = TableName.valueOf(method); byte[] family = Bytes.toBytes("family"); - long defaultBusyWaitDuration = CONF.getLong("hbase.busy.wait.duration", - HRegion.DEFAULT_BUSY_WAIT_DURATION); + long defaultBusyWaitDuration = + CONF.getLong("hbase.busy.wait.duration", HRegion.DEFAULT_BUSY_WAIT_DURATION); CONF.setLong("hbase.busy.wait.duration", 1000); region = initHRegion(tableName, method, CONF, family); final AtomicBoolean stopped = new AtomicBoolean(true); @@ -6327,24 +6329,25 @@ Configuration conf = new Configuration(TEST_UTIL.getConfiguration()); conf.setInt(HFile.FORMAT_VERSION_KEY, HFile.MIN_FORMAT_VERSION_WITH_TAGS); - HRegion region = HBaseTestingUtility.createRegionAndWAL(new HRegionInfo(htd.getTableName(), - HConstants.EMPTY_BYTE_ARRAY, HConstants.EMPTY_BYTE_ARRAY), - TEST_UTIL.getDataTestDir(), conf, htd); + HRegion region = HBaseTestingUtility.createRegionAndWAL( + new HRegionInfo(htd.getTableName(), HConstants.EMPTY_BYTE_ARRAY, HConstants.EMPTY_BYTE_ARRAY), + TEST_UTIL.getDataTestDir(), conf, htd); assertNotNull(region); try { long now = EnvironmentEdgeManager.currentTime(); // Add a cell that will expire in 5 seconds via cell TTL - region.put(new Put(row).add(new KeyValue(row, fam1, q1, now, - HConstants.EMPTY_BYTE_ARRAY, new ArrayBackedTag[] { - // TTL tags specify ts in milliseconds - new ArrayBackedTag(TagType.TTL_TAG_TYPE, Bytes.toBytes(5000L)) } ))); + region.put(new Put(row).add(new KeyValue(row, fam1, q1, now, HConstants.EMPTY_BYTE_ARRAY, + new ArrayBackedTag[] { + // TTL tags specify ts in milliseconds + new ArrayBackedTag(TagType.TTL_TAG_TYPE, Bytes.toBytes(5000L)) }))); // Add a cell that will expire after 10 seconds via family setting region.put(new Put(row).addColumn(fam1, q2, now, HConstants.EMPTY_BYTE_ARRAY)); // Add a cell that will expire in 15 seconds via cell TTL - region.put(new Put(row).add(new KeyValue(row, fam1, q3, now + 10000 - 1, - HConstants.EMPTY_BYTE_ARRAY, new ArrayBackedTag[] { - // TTL tags specify ts in milliseconds - new ArrayBackedTag(TagType.TTL_TAG_TYPE, Bytes.toBytes(5000L)) } ))); + region.put( + new Put(row).add(new KeyValue(row, fam1, q3, now + 10000 - 1, HConstants.EMPTY_BYTE_ARRAY, + new ArrayBackedTag[] { + // TTL tags specify ts in milliseconds + new ArrayBackedTag(TagType.TTL_TAG_TYPE, Bytes.toBytes(5000L)) }))); // Add a cell that will expire in 20 seconds via family setting region.put(new Put(row).addColumn(fam1, q4, now + 10000 - 1, HConstants.EMPTY_BYTE_ARRAY)); @@ -6483,12 +6486,12 @@ c = result.getColumnLatestCell(fam1, qual1); assertEquals(c.getTimestamp(), 10L); - byte[] expected = new byte[qual1.length*2]; + byte[] expected = new byte[qual1.length * 2]; System.arraycopy(qual1, 0, expected, 0, qual1.length); System.arraycopy(qual1, 0, expected, qual1.length, qual1.length); - assertTrue(Bytes.equals(c.getValueArray(), c.getValueOffset(), c.getValueLength(), - expected, 0, expected.length)); + assertTrue(Bytes.equals(c.getValueArray(), c.getValueOffset(), c.getValueLength(), expected, 0, + expected.length)); } @Test @@ -6517,8 +6520,8 @@ c = result.getColumnLatestCell(fam1, qual1); assertEquals(c.getTimestamp(), 10L); - assertTrue(Bytes.equals(c.getValueArray(), c.getValueOffset(), c.getValueLength(), - qual2, 0, qual2.length)); + assertTrue(Bytes.equals(c.getValueArray(), c.getValueOffset(), c.getValueLength(), qual2, 0, + qual2.length)); } @Test(timeout = 60000) @@ -6531,17 +6534,14 @@ CONF.setInt("hbase.rowlock.wait.duration", 1000); final HRegion region = initHRegion(tableName, a, c, name.getMethodName(), CONF, false, fam1); - Mutation[] mutations = new Mutation[] { - new Put(a).addImmutable(fam1, null, null), + Mutation[] mutations = new Mutation[] { new Put(a).addImmutable(fam1, null, null), new Put(c).addImmutable(fam1, null, null), // this is outside the region boundary - new Put(b).addImmutable(fam1, null, null), - }; + new Put(b).addImmutable(fam1, null, null), }; OperationStatus[] status = region.batchMutate(mutations); assertEquals(status[0].getOperationStatusCode(), OperationStatusCode.SUCCESS); assertEquals(status[1].getOperationStatusCode(), OperationStatusCode.SANITY_CHECK_FAILURE); assertEquals(status[2].getOperationStatusCode(), OperationStatusCode.SUCCESS); - // test with a row lock held for a long time final CountDownLatch obtainedRowLock = new CountDownLatch(1); @@ -6564,10 +6564,8 @@ Future f2 = exec.submit(new Callable() { @Override public Void call() throws Exception { - Mutation[] mutations = new Mutation[] { - new Put(a).addImmutable(fam1, null, null), - new Put(b).addImmutable(fam1, null, null), - }; + Mutation[] mutations = new Mutation[] { new Put(a).addImmutable(fam1, null, null), + new Put(b).addImmutable(fam1, null, null), }; // this will wait for the row lock, and it will eventually succeed OperationStatus[] status = region.batchMutate(mutations); @@ -6606,19 +6604,18 @@ p.addColumn(fam1, qual1, qual2); RowMutations rm = new RowMutations(row); rm.add(p); - region.checkAndRowMutate(row, fam1, qual1, CompareOp.EQUAL, new BinaryComparator(qual1), - rm, false); + region.checkAndRowMutate(row, fam1, qual1, CompareOp.EQUAL, new BinaryComparator(qual1), rm, + false); result = region.get(new Get(row)); c = result.getColumnLatestCell(fam1, qual1); assertEquals(c.getTimestamp(), 10L); - assertTrue(Bytes.equals(c.getValueArray(), c.getValueOffset(), c.getValueLength(), - qual2, 0, qual2.length)); + assertTrue(Bytes.equals(c.getValueArray(), c.getValueOffset(), c.getValueLength(), qual2, 0, + qual2.length)); } - static HRegion initHRegion(TableName tableName, String callingMethod, - byte[]... families) throws IOException { - return initHRegion(tableName, callingMethod, HBaseConfiguration.create(), - families); + static HRegion initHRegion(TableName tableName, String callingMethod, byte[]... families) + throws IOException { + return initHRegion(tableName, callingMethod, HBaseConfiguration.create(), families); } }