diff --git conf/hbase-env.sh conf/hbase-env.sh
index da53a27..2874a58 100644
--- conf/hbase-env.sh
+++ conf/hbase-env.sh
@@ -56,7 +56,7 @@ export HBASE_OPTS="-XX:+UseConcMarkSweepGC"
# Uncomment below if you intend to use the EXPERIMENTAL off heap cache.
-# export HBASE_OPTS="$HBASE_OPTS -XX:MaxDirectMemorySize="
+export HBASE_OPTS="$HBASE_OPTS -XX:MaxDirectMemorySize=2g"
# Set hbase.offheapcache.percentage in hbase-site.xml to a nonzero value.
diff --git conf/hbase-site.xml conf/hbase-site.xml
index af4c300..d2c4bbb 100644
--- conf/hbase-site.xml
+++ conf/hbase-site.xml
@@ -22,4 +22,39 @@
*/
-->
* NOTE:- This option might result in data loss if the region server crashes
- * before these deferred edits in memory are flushed onto the filesystem.
+ * before these deferred edits in memory are flushed onto the filesystem.
*
- * This is not an absolute value and might vary. Assume that a single row exceeds
+ * This is not an absolute value and might vary. Assume that a single row exceeds
* the maxFileSize then the storeFileSize will be greater than maxFileSize since
- * a single row cannot be split across multiple regions
+ * a single row cannot be split across multiple regions
*
diff --git src/main/java/org/apache/hadoop/hbase/client/MetaScanner.java src/main/java/org/apache/hadoop/hbase/client/MetaScanner.java
index 1f1bc2b..23d7077 100644
--- src/main/java/org/apache/hadoop/hbase/client/MetaScanner.java
+++ src/main/java/org/apache/hadoop/hbase/client/MetaScanner.java
@@ -26,7 +26,6 @@ import java.util.ArrayList;
import java.util.List;
import java.util.NavigableMap;
import java.util.TreeMap;
-import java.util.TreeSet;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
@@ -49,7 +48,7 @@ import org.apache.hadoop.hbase.util.Writables;
*
* Note that during concurrent region splits, the scanner might not see
* META changes across rows (for parent and daughter entries) consistently.
- * see HBASE-5986, and {@link BlockingMetaScannerVisitor} for details. -ROOT- or
- * .META. , is the table is read only, the maximum size of the memstore,
+ * all the column families, is the table a catalog table, -ROOT- or
+ * .META. , is the table is read only, the maximum size of the memstore,
* when the region split should occur, coprocessors associated with it etc...
*/
public class HTableDescriptor implements WritableComparable .META. or -ROOT-
- *
+ *
* @see #isMetaRegion()
*/
public static final String IS_META = "IS_META";
@@ -130,7 +129,7 @@ public class HTableDescriptor implements WritableComparable -ROOT- region.
- *
- * @return true if this is a -ROOT- region
+ *
+ * @return true if this is a -ROOT- region
*/
public boolean isRootRegion() {
if (this.root == null) {
@@ -285,11 +284,11 @@ public class HTableDescriptor implements WritableComparable -ROOT- region. This is used internally by the
- * HTableDescriptor constructors
- *
- * @param isRoot true if this is the -ROOT- region
+ * INTERNAL Used to denote if the current table represents
+ * -ROOT- region. This is used internally by the
+ * HTableDescriptor constructors
+ *
+ * @param isRoot true if this is the -ROOT- region
*/
protected void setRootRegion(boolean isRoot) {
// TODO: Make the value a boolean rather than String of boolean.
@@ -298,10 +297,10 @@ public class HTableDescriptor implements WritableComparable -ROOT- or .META.
- * region.
- *
- * @return true if this is either a -ROOT- or .META.
- * region
+ * region.
+ *
+ * @return true if this is either a -ROOT- or .META.
+ * region
*/
public boolean isMetaRegion() {
if (this.meta == null) {
@@ -326,31 +325,31 @@ public class HTableDescriptor implements WritableComparable -ROOT- or .META. region. This is used
- * internally by the HTableDescriptor constructors
- *
- * @param isMeta true if its either -ROOT- or
- * .META. region
+ * INTERNAL Used to denote if the current table represents
+ * -ROOT- or .META. region. This is used
+ * internally by the HTableDescriptor constructors
+ *
+ * @param isMeta true if its either -ROOT- or
+ * .META. region
*/
protected void setMetaRegion(boolean isMeta) {
values.put(IS_META_KEY, isMeta? TRUE: FALSE);
}
- /**
- * Checks if the table is a .META. table
- *
+ /**
+ * Checks if the table is a .META. table
+ *
* @return true if table is .META. region.
*/
public boolean isMetaTable() {
return isMetaRegion() && !isRootRegion();
}
-
+
/**
- * Checks of the tableName being passed represents either
+ * Checks of the tableName being passed represents either
* -ROOT- or .META.
- *
- * @return true if a tablesName is either -ROOT-
+ *
+ * @return true if a tablesName is either -ROOT-
* or .META.
*/
public static boolean isMetaTable(final byte [] tableName) {
@@ -379,7 +378,7 @@ public class HTableDescriptor implements WritableComparable hbase.regionserver.optionallogflushinterval and not flushed
* for every edit.
*
* Under some scenarios, the ROOT region can be opened twice, so it seemed online
* in two regionserver at the same time.
- * If the ROOT region has been assigned, so the operation can be canceled.
+ * If the ROOT region has been assigned, so the operation can be canceled.
* @throws InterruptedException
* @throws IOException
* @throws KeeperException
@@ -151,7 +151,7 @@ public class ServerShutdownHandler extends EventHandler {
}
}
}
-
+
/**
* @return True if the server we are processing was carrying Also has utility for keeping up the table descriptors tableinfo file.
* The table schema file is kept under the table directory in the filesystem.
* It has a {@link #TABLEINFO_NAME} prefix and then a suffix that is the
@@ -157,7 +157,7 @@ public class FSTableDescriptors implements TableDescriptors {
return cachedtdm.getTableDescriptor();
}
}
-
+
TableDescriptorModtime tdmt = null;
try {
tdmt = getTableDescriptorModtime(this.fs, this.rootdir, tablename);
@@ -168,7 +168,7 @@ public class FSTableDescriptors implements TableDescriptors {
LOG.debug("Exception during readTableDecriptor. Current table name = "
+ tablename, ioe);
}
-
+
if (tdmt == null) {
LOG.warn("The following folder is in HBase's root directory and " +
"doesn't contain a table descriptor, " +
@@ -235,7 +235,7 @@ public class FSTableDescriptors implements TableDescriptors {
/**
* Checks if -ROOT-
*/
@@ -441,6 +441,8 @@ public class ServerShutdownHandler extends EventHandler {
if (MetaReader.getRegion(catalogTracker, hri.getRegionName()) == null) {
return false;
}
+ //After HBASE-7721, we do not need this, but we keep it since in 0.94 during
+ //rolling restart this might be needed
fixupDaughters(result, assignmentManager, catalogTracker);
return false;
}
@@ -508,7 +510,7 @@ public class ServerShutdownHandler extends EventHandler {
* Daughter could have been split over on regionserver before a run of the
* catalogJanitor had chance to clear reference from parent.
* @param daughter Daughter region to search for.
- * @throws IOException
+ * @throws IOException
*/
private static boolean isDaughterMissing(final CatalogTracker catalogTracker,
final HRegionInfo daughter) throws IOException {
diff --git src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
index 1d42fea..7f44125 100644
--- src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
+++ src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
@@ -66,12 +66,12 @@ import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.HConstants.OperationStatusCode;
import org.apache.hadoop.hbase.HDFSBlocksDistribution;
-import org.apache.hadoop.hbase.HealthCheckChore;
import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.HServerAddress;
import org.apache.hadoop.hbase.HServerInfo;
import org.apache.hadoop.hbase.HServerLoad;
import org.apache.hadoop.hbase.HTableDescriptor;
+import org.apache.hadoop.hbase.HealthCheckChore;
import org.apache.hadoop.hbase.KeyValue;
import org.apache.hadoop.hbase.MasterAddressTracker;
import org.apache.hadoop.hbase.NotServingRegionException;
@@ -172,7 +172,6 @@ import org.apache.hadoop.util.ReflectionUtils;
import org.apache.hadoop.util.StringUtils;
import org.apache.zookeeper.KeeperException;
import org.codehaus.jackson.map.ObjectMapper;
-import org.joda.time.field.MillisDurationField;
import com.google.common.base.Function;
import com.google.common.collect.Lists;
@@ -246,7 +245,7 @@ public class HRegionServer implements HRegionInterface, HBaseRPCErrorHandler,
RpcServer rpcServer;
// Server to handle client requests.
- private HBaseServer server;
+ private HBaseServer server;
private final InetSocketAddress isa;
@@ -264,7 +263,7 @@ public class HRegionServer implements HRegionInterface, HBaseRPCErrorHandler,
/** region server process name */
public static final String REGIONSERVER = "regionserver";
-
+
/** region server configuration name */
public static final String REGIONSERVER_CONF = "regionserver_conf";
@@ -1515,10 +1514,10 @@ public class HRegionServer implements HRegionInterface, HBaseRPCErrorHandler,
.getCompactionQueueSize());
this.metrics.flushQueueSize.set(cacheFlusher
.getFlushQueueSize());
- this.metrics.updatesBlockedSeconds.update(updatesBlockedMs > 0 ?
+ this.metrics.updatesBlockedSeconds.update(updatesBlockedMs > 0 ?
updatesBlockedMs/1000: 0);
final long updatesBlockedMsHigherWater = cacheFlusher.getUpdatesBlockedMsHighWater().get();
- this.metrics.updatesBlockedSecondsHighWater.update(updatesBlockedMsHigherWater > 0 ?
+ this.metrics.updatesBlockedSecondsHighWater.update(updatesBlockedMsHigherWater > 0 ?
updatesBlockedMsHigherWater/1000: 0);
BlockCache blockCache = cacheConfig.getBlockCache();
@@ -1637,7 +1636,7 @@ public class HRegionServer implements HRegionInterface, HBaseRPCErrorHandler,
this.splitLogWorker = new SplitLogWorker(this.zooKeeper,
this.getConfiguration(), this.getServerName().toString());
splitLogWorker.start();
-
+
}
/**
@@ -1723,12 +1722,10 @@ public class HRegionServer implements HRegionInterface, HBaseRPCErrorHandler,
}
@Override
- public void postOpenDeployTasks(final HRegion r, final CatalogTracker ct,
- final boolean daughter)
+ public void postOpenDeployTasks(final HRegion r, final CatalogTracker ct)
throws KeeperException, IOException {
checkOpen();
- LOG.info("Post open deploy tasks for region=" + r.getRegionNameAsString() +
- ", daughter=" + daughter);
+ LOG.info("Post open deploy tasks for region=" + r.getRegionNameAsString());
// Do checks to see if we need to compact (references or too many files)
for (Store s : r.getStores().values()) {
if (s.hasReferences() || s.needsCompaction()) {
@@ -1743,17 +1740,11 @@ public class HRegionServer implements HRegionInterface, HBaseRPCErrorHandler,
MetaEditor.updateMetaLocation(ct, r.getRegionInfo(),
this.serverNameFromMasterPOV);
} else {
- if (daughter) {
- // If daughter of a split, update whole row, not just location.
- MetaEditor.addDaughter(ct, r.getRegionInfo(),
- this.serverNameFromMasterPOV);
- } else {
- MetaEditor.updateRegionLocation(ct, r.getRegionInfo(),
- this.serverNameFromMasterPOV);
- }
+ MetaEditor.updateRegionLocation(ct, r.getRegionInfo(),
+ this.serverNameFromMasterPOV);
}
LOG.info("Done with post open deploy task for region=" +
- r.getRegionNameAsString() + ", daughter=" + daughter);
+ r.getRegionNameAsString());
}
@@ -2316,7 +2307,7 @@ public class HRegionServer implements HRegionInterface, HBaseRPCErrorHandler,
int nCF = columnFamilies.size();
return region.getStoreFileList(columnFamilies.toArray(new byte[nCF][]));
}
-
+
/**
* Flushes the given region
*/
@@ -2355,7 +2346,7 @@ public class HRegionServer implements HRegionInterface, HBaseRPCErrorHandler,
}
return region.getLastFlushTime();
}
-
+
/**
*
* @param regionName
@@ -3043,7 +3034,7 @@ public class HRegionServer implements HRegionInterface, HBaseRPCErrorHandler,
*/
protected boolean closeRegion(HRegionInfo region, final boolean abort,
final boolean zk, final int versionOfClosingNode) {
-
+
HRegion actualRegion = this.getFromOnlineRegions(region.getEncodedName());
if (actualRegion != null && actualRegion.getCoprocessorHost() != null) {
try {
@@ -3282,7 +3273,7 @@ public class HRegionServer implements HRegionInterface, HBaseRPCErrorHandler,
public boolean removeFromOnlineRegions(final String encodedName) {
HRegion toReturn = null;
toReturn = this.onlineRegions.remove(encodedName);
-
+
//Clear all of the dynamic metrics as they are now probably useless.
//This is a clear because dynamic metrics could include metrics per cf and
//per hfile. Figuring out which cfs, hfiles, and regions are still relevant to
@@ -3601,7 +3592,7 @@ public class HRegionServer implements HRegionInterface, HBaseRPCErrorHandler,
try {
if (action instanceof Delete || action instanceof Put) {
- mutations.add(a);
+ mutations.add(a);
} else if (action instanceof Get) {
response.add(regionName, originalIndex,
get(regionName, (Get)action));
diff --git src/main/java/org/apache/hadoop/hbase/regionserver/RegionServerServices.java src/main/java/org/apache/hadoop/hbase/regionserver/RegionServerServices.java
index 1c66704..86774cd 100644
--- src/main/java/org/apache/hadoop/hbase/regionserver/RegionServerServices.java
+++ src/main/java/org/apache/hadoop/hbase/regionserver/RegionServerServices.java
@@ -59,15 +59,13 @@ public interface RegionServerServices extends OnlineRegions {
/**
* Tasks to perform after region open to complete deploy of region on
* regionserver
- *
+ *
* @param r Region to open.
* @param ct Instance of {@link CatalogTracker}
- * @param daughter True if this is daughter of a split
* @throws KeeperException
* @throws IOException
*/
- public void postOpenDeployTasks(final HRegion r, final CatalogTracker ct,
- final boolean daughter)
+ public void postOpenDeployTasks(final HRegion r, final CatalogTracker ct)
throws KeeperException, IOException;
/**
diff --git src/main/java/org/apache/hadoop/hbase/regionserver/SplitTransaction.java src/main/java/org/apache/hadoop/hbase/regionserver/SplitTransaction.java
index 058393d..51fd7c8 100644
--- src/main/java/org/apache/hadoop/hbase/regionserver/SplitTransaction.java
+++ src/main/java/org/apache/hadoop/hbase/regionserver/SplitTransaction.java
@@ -202,7 +202,7 @@ public class SplitTransaction {
private static IOException closedByOtherException = new IOException(
"Failed to close region: already closed by another thread");
-
+
/**
* Prepare the regions and region files.
* @param server Hosting server instance. Can be null when testing (won't try
@@ -259,7 +259,7 @@ public class SplitTransaction {
}
createSplitDir(this.parent.getFilesystem(), this.splitdir);
this.journal.add(JournalEntry.CREATE_SPLIT_DIR);
-
+
List.tableinfo exists for given table
- *
+ *
* @param fs file system
* @param rootdir root directory of HBase installation
* @param tableName name of table
@@ -473,7 +473,7 @@ public class FSTableDescriptors implements TableDescriptors {
* @param tableDir
* @param status
* @return Descriptor file or null if we failed write.
- * @throws IOException
+ * @throws IOException
*/
private static Path writeTableDescriptor(final FileSystem fs,
final HTableDescriptor hTableDescriptor, final Path tableDir,
@@ -543,7 +543,7 @@ public class FSTableDescriptors implements TableDescriptors {
/**
* Create new HTableDescriptor in HDFS. Happens when we are creating table.
- *
+ *
* @param htableDescriptor
* @param conf
*/
@@ -557,7 +557,7 @@ public class FSTableDescriptors implements TableDescriptors {
* Create new HTableDescriptor in HDFS. Happens when we are creating table. If
* forceCreation is true then even if previous table descriptor is present it
* will be overwritten
- *
+ *
* @param htableDescriptor
* @param conf
* @param forceCreation True if we are to overwrite existing file.
@@ -587,7 +587,7 @@ public class FSTableDescriptors implements TableDescriptors {
* Create new HTableDescriptor in HDFS. Happens when we are creating table. If
* forceCreation is true then even if previous table descriptor is present it
* will be overwritten
- *
+ *
* @param fs
* @param htableDescriptor
* @param rootdir
@@ -620,8 +620,10 @@ public class FSTableDescriptors implements TableDescriptors {
LOG.info("Current tableInfoPath = " + status.getPath());
if (!forceCreation) {
if (fs.exists(status.getPath()) && status.getLen() > 0) {
- LOG.info("TableInfo already exists.. Skipping creation");
- return false;
+ if (getTableDescriptor(fs, status.getPath().getParent()).equals(htableDescriptor)) {
+ LOG.info("TableInfo already exists.. Skipping creation");
+ return false;
+ }
}
}
}
diff --git src/test/java/org/apache/hadoop/hbase/coprocessor/TestMasterCoprocessorExceptionWithAbort.java src/test/java/org/apache/hadoop/hbase/coprocessor/TestMasterCoprocessorExceptionWithAbort.java
index 0e0b422..f701765 100644
--- src/test/java/org/apache/hadoop/hbase/coprocessor/TestMasterCoprocessorExceptionWithAbort.java
+++ src/test/java/org/apache/hadoop/hbase/coprocessor/TestMasterCoprocessorExceptionWithAbort.java
@@ -187,9 +187,7 @@ public class TestMasterCoprocessorExceptionWithAbort {
// (namely the part that shows the set of loaded coprocessors).
// In this test, there is only a single coprocessor (BuggyMasterObserver).
assertTrue(master.getLoadedCoprocessors().
- equals("[" +
- TestMasterCoprocessorExceptionWithAbort.BuggyMasterObserver.class.getName() +
- "]"));
+ contains(TestMasterCoprocessorExceptionWithAbort.BuggyMasterObserver.class.getName()));
CreateTableThread createTableThread = new CreateTableThread(UTIL);
diff --git src/test/java/org/apache/hadoop/hbase/coprocessor/TestMasterCoprocessorExceptionWithRemove.java src/test/java/org/apache/hadoop/hbase/coprocessor/TestMasterCoprocessorExceptionWithRemove.java
index d7e0f65..77da489 100644
--- src/test/java/org/apache/hadoop/hbase/coprocessor/TestMasterCoprocessorExceptionWithRemove.java
+++ src/test/java/org/apache/hadoop/hbase/coprocessor/TestMasterCoprocessorExceptionWithRemove.java
@@ -20,11 +20,21 @@
package org.apache.hadoop.hbase.coprocessor;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
+
import java.io.IOException;
-import java.io.InterruptedIOException;
import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hbase.*;
+import org.apache.hadoop.hbase.Abortable;
+import org.apache.hadoop.hbase.CoprocessorEnvironment;
+import org.apache.hadoop.hbase.HBaseTestingUtility;
+import org.apache.hadoop.hbase.HColumnDescriptor;
+import org.apache.hadoop.hbase.HRegionInfo;
+import org.apache.hadoop.hbase.HTableDescriptor;
+import org.apache.hadoop.hbase.MediumTests;
+import org.apache.hadoop.hbase.MiniHBaseCluster;
import org.apache.hadoop.hbase.client.HBaseAdmin;
import org.apache.hadoop.hbase.master.HMaster;
import org.apache.hadoop.hbase.master.MasterCoprocessorHost;
@@ -36,8 +46,6 @@ import org.junit.BeforeClass;
import org.junit.Test;
import org.junit.experimental.categories.Category;
-import static org.junit.Assert.*;
-
/**
* Tests unhandled exceptions thrown by coprocessors running on master.
* Expected result is that the master will remove the buggy coprocessor from
@@ -174,7 +182,7 @@ public class TestMasterCoprocessorExceptionWithRemove {
// In this test, there is only a single coprocessor (BuggyMasterObserver).
String coprocessorName =
BuggyMasterObserver.class.getName();
- assertTrue(master.getLoadedCoprocessors().equals("[" + coprocessorName + "]"));
+ assertTrue(master.getLoadedCoprocessors().contains(coprocessorName));
HTableDescriptor htd1 = new HTableDescriptor(TEST_TABLE1);
htd1.addFamily(new HColumnDescriptor(TEST_FAMILY1));
@@ -202,7 +210,7 @@ public class TestMasterCoprocessorExceptionWithRemove {
masterTracker.masterZKNodeWasDeleted);
String loadedCoprocessors = master.getLoadedCoprocessors();
- assertTrue(loadedCoprocessors.equals("[" + coprocessorName + "]"));
+ assertTrue(loadedCoprocessors.contains(coprocessorName));
// Verify that BuggyMasterObserver has been removed due to its misbehavior
// by creating another table: should not have a problem this time.
diff --git src/test/java/org/apache/hadoop/hbase/regionserver/TestEndToEndSplitTransaction.java src/test/java/org/apache/hadoop/hbase/regionserver/TestEndToEndSplitTransaction.java
index 4e62a1f..3187d78 100644
--- src/test/java/org/apache/hadoop/hbase/regionserver/TestEndToEndSplitTransaction.java
+++ src/test/java/org/apache/hadoop/hbase/regionserver/TestEndToEndSplitTransaction.java
@@ -117,7 +117,7 @@ public class TestEndToEndSplitTransaction {
// 3. finish phase II
// note that this replicates some code from SplitTransaction
// 2nd daughter first
- server.postOpenDeployTasks(regions.getSecond(), server.getCatalogTracker(), true);
+ server.postOpenDeployTasks(regions.getSecond(), server.getCatalogTracker());
// Add to online regions
server.addToOnlineRegions(regions.getSecond());
// THIS is the crucial point:
@@ -127,7 +127,7 @@ public class TestEndToEndSplitTransaction {
assertTrue(test(con, tableName, lastRow, server));
// first daughter second
- server.postOpenDeployTasks(regions.getFirst(), server.getCatalogTracker(), true);
+ server.postOpenDeployTasks(regions.getFirst(), server.getCatalogTracker());
// Add to online regions
server.addToOnlineRegions(regions.getFirst());
assertTrue(test(con, tableName, firstRow, server));
diff --git src/test/java/org/apache/hadoop/hbase/util/MockRegionServerServices.java src/test/java/org/apache/hadoop/hbase/util/MockRegionServerServices.java
index f115fb2..96c2c95 100644
--- src/test/java/org/apache/hadoop/hbase/util/MockRegionServerServices.java
+++ src/test/java/org/apache/hadoop/hbase/util/MockRegionServerServices.java
@@ -47,7 +47,7 @@ import org.apache.zookeeper.KeeperException;
public class MockRegionServerServices implements RegionServerServices {
private final Map