daughters = HRegionInfo.getDaughterRegions(rowResult);
- HRegionInfo splitA = daughters.getFirst();
- HRegionInfo splitB = daughters.getSecond();
-
- HTable metaTable = getMetaTable();
- long start = System.currentTimeMillis();
- Result resultA = getRegionResultBlocking(metaTable, blockingTimeout,
- splitA.getRegionName());
- if (resultA != null) {
- processRow(resultA);
- daughterRegions.add(splitA.getRegionName());
- } else {
- throw new RegionOfflineException("Split daughter region " +
- splitA.getRegionNameAsString() + " cannot be found in META.");
- }
- long rem = blockingTimeout - (System.currentTimeMillis() - start);
-
- Result resultB = getRegionResultBlocking(metaTable, rem,
- splitB.getRegionName());
- if (resultB != null) {
- processRow(resultB);
- daughterRegions.add(splitB.getRegionName());
- } else {
- throw new RegionOfflineException("Split daughter region " +
- splitB.getRegionNameAsString() + " cannot be found in META.");
- }
- }
-
- return processRowInternal(rowResult);
- }
-
- private Result getRegionResultBlocking(HTable metaTable, long timeout, byte[] regionName)
- throws IOException {
- if (LOG.isDebugEnabled()) {
- LOG.debug("blocking until region is in META: " + Bytes.toStringBinary(regionName));
- }
- long start = System.currentTimeMillis();
- while (System.currentTimeMillis() - start < timeout) {
- Get get = new Get(regionName);
- Result result = metaTable.get(get);
- HRegionInfo info = getHRegionInfo(result);
- if (info != null) {
- return result;
- }
- try {
- Thread.sleep(10);
- } catch (InterruptedException ex) {
- Thread.currentThread().interrupt();
- break;
- }
+ //skip over offline and split regions
+ if (!(info.isOffline() || info.isSplit())) {
+ return processRowInternal(rowResult);
}
- return null;
+ return true;
}
}
@@ -465,7 +383,7 @@ public class MetaScanner {
* does not guarantee ordered traversal of meta entries, and can block until the
* META entries for daughters are available during splits.
*/
- public static abstract class TableMetaScannerVisitor extends BlockingMetaScannerVisitor {
+ public static abstract class TableMetaScannerVisitor extends DefaultMetaScannerVisitor {
private byte[] tableName;
public TableMetaScannerVisitor(Configuration conf, byte[] tableName) {
diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/catalog/MetaEditor.java hbase-server/src/main/java/org/apache/hadoop/hbase/catalog/MetaEditor.java
index 90ccee6..8aab7c6 100644
--- hbase-server/src/main/java/org/apache/hadoop/hbase/catalog/MetaEditor.java
+++ hbase-server/src/main/java/org/apache/hadoop/hbase/catalog/MetaEditor.java
@@ -34,8 +34,15 @@ import org.apache.hadoop.hbase.client.Delete;
import org.apache.hadoop.hbase.client.HTable;
import org.apache.hadoop.hbase.client.Mutation;
import org.apache.hadoop.hbase.client.Put;
+import org.apache.hadoop.hbase.ipc.CoprocessorRpcChannel;
+import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
+import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Mutate.MutateType;
+import org.apache.hadoop.hbase.protobuf.generated.MultiRowMutation.MultiMutateRequest;
+import org.apache.hadoop.hbase.protobuf.generated.MultiRowMutation.MultiRowMutationService;
import org.apache.hadoop.hbase.util.Bytes;
+import com.google.protobuf.ServiceException;
+
/**
* Writes region and assignment information to .META..
* TODO: Put MetaReader and MetaEditor together; doesn't make sense having
@@ -246,32 +253,6 @@ public class MetaEditor {
}
/**
- * Offline parent in meta.
- * Used when splitting.
- * @param catalogTracker
- * @param parent
- * @param a Split daughter region A
- * @param b Split daughter region B
- * @throws NotAllMetaRegionsOnlineException
- * @throws IOException
- */
- public static void offlineParentInMeta(CatalogTracker catalogTracker,
- HRegionInfo parent, final HRegionInfo a, final HRegionInfo b)
- throws NotAllMetaRegionsOnlineException, IOException {
- HRegionInfo copyOfParent = new HRegionInfo(parent);
- copyOfParent.setOffline(true);
- copyOfParent.setSplit(true);
- HTable meta = MetaReader.getMetaHTable(catalogTracker);
- try {
- addRegionToMeta(meta, copyOfParent, a, b);
- LOG.info("Offlined parent region " + parent.getRegionNameAsString() +
- " in META");
- } finally {
- meta.close();
- }
- }
-
- /**
* Adds a daughter region entry to meta.
* @param regionInfo the region to put
* @param sn the location of the region
@@ -291,6 +272,60 @@ public class MetaEditor {
}
/**
+ * Splits the region into two in an atomic operation. Offlines the parent
+ * region with the information that it is split into two, and also adds
+ * the daughter regions. Does not add the location information to the daughter
+ * regions since they are not open yet.
+ * @param catalogTracker the catalog tracker
+ * @param parent the parent region which is split
+ * @param splitA Split daughter region A
+ * @param splitB Split daughter region A
+ * @param sn the location of the region
+ */
+ public static void splitRegion(final CatalogTracker catalogTracker,
+ HRegionInfo parent, HRegionInfo splitA, HRegionInfo splitB,
+ ServerName sn) throws IOException {
+ HTable meta = MetaReader.getMetaHTable(catalogTracker);
+ HRegionInfo copyOfParent = new HRegionInfo(parent);
+ copyOfParent.setOffline(true);
+ copyOfParent.setSplit(true);
+
+ //Put for parent
+ Put putParent = makePutFromRegionInfo(copyOfParent);
+ addDaughtersToPut(putParent, splitA, splitB);
+
+ //Puts for daughters
+ Put putA = makePutFromRegionInfo(splitA);
+ Put putB = makePutFromRegionInfo(splitB);
+
+ addLocation(putA, sn, 1); //these are new regions, openSeqNum = 1 is fine.
+ addLocation(putB, sn, 1);
+
+ byte[] tableRow = Bytes.toBytes(parent.getRegionNameAsString() + HConstants.DELIMITER);
+ multiPut(meta, tableRow, putParent, putA, putB);
+ }
+
+ /**
+ * Performs an atomic multi-Put operation against the given table.
+ */
+ private static void multiPut(HTable table, byte[] row, Put... puts) throws IOException {
+ CoprocessorRpcChannel channel = table.coprocessorService(row);
+ MultiMutateRequest.Builder mmrBuilder = MultiMutateRequest.newBuilder();
+ for (Put put : puts) {
+ mmrBuilder.addMutationRequest(ProtobufUtil.toMutate(MutateType.PUT, put));
+ }
+
+ MultiRowMutationService.BlockingInterface service =
+ MultiRowMutationService.newBlockingStub(channel);
+ try {
+ service.mutateRows(null, mmrBuilder.build());
+ } catch (ServiceException ex) {
+ ProtobufUtil.toIOException(ex);
+ }
+ }
+
+
+ /**
* Updates the location of the specified META region in ROOT to be the
* specified server hostname and startcode.
*
diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
index 9719012..8f9f158 100644
--- hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
+++ hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
@@ -27,7 +27,6 @@ import java.net.UnknownHostException;
import java.util.ArrayList;
import java.util.Collections;
import java.util.Comparator;
-import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Set;
@@ -752,10 +751,6 @@ Server {
this.balancer.setClusterStatus(getClusterStatus());
- // Fixing up missing daughters if any
- status.setStatus("Fixing up missing daughters");
- fixupDaughters(status);
-
if (!masterRecovery) {
// Start balancer and meta catalog janitor after meta and regions have
// been assigned.
@@ -943,41 +938,6 @@ Server {
}
}
- void fixupDaughters(final MonitoredTask status) throws IOException {
- final Map offlineSplitParents =
- new HashMap();
- // This visitor collects offline split parents in the .META. table
- MetaReader.Visitor visitor = new MetaReader.Visitor() {
- @Override
- public boolean visit(Result r) throws IOException {
- if (r == null || r.isEmpty()) return true;
- HRegionInfo info =
- HRegionInfo.getHRegionInfo(r);
- if (info == null) return true; // Keep scanning
- if (info.isOffline() && info.isSplit()) {
- offlineSplitParents.put(info, r);
- }
- // Returning true means "keep scanning"
- return true;
- }
- };
- // Run full scan of .META. catalog table passing in our custom visitor
- MetaReader.fullScan(this.catalogTracker, visitor);
- // Now work on our list of found parents. See if any we can clean up.
- int fixups = 0;
- for (Map.Entry e : offlineSplitParents.entrySet()) {
- ServerName sn = HRegionInfo.getServerName(e.getValue());
- if (!serverManager.isServerDead(sn)) { // Otherwise, let SSH take care of it
- fixups += ServerShutdownHandler.fixupDaughters(
- e.getValue(), assignmentManager, catalogTracker);
- }
- }
- if (fixups != 0) {
- LOG.info("Scanned the catalog and fixed up " + fixups +
- " missing daughter region(s)");
- }
- }
-
/**
* Split a server's log and expire it if we find it is one of the online
* servers.
diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterFileSystem.java hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterFileSystem.java
index 5ee7575..c81c2e4 100644
--- hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterFileSystem.java
+++ hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterFileSystem.java
@@ -445,7 +445,8 @@ public class MasterFileSystem {
bootstrap(rd, c);
}
- // Create tableinfo-s for ROOT and META if not already there.
+ // Create tableinfo-s for ROOT and META if not already there. This also updates the
+ //descriptors if they are older versions.
FSTableDescriptors.createTableDescriptor(fs, rd, HTableDescriptor.ROOT_TABLEDESC, false);
FSTableDescriptors.createTableDescriptor(fs, rd, HTableDescriptor.META_TABLEDESC, false);
diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/ServerShutdownHandler.java hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/ServerShutdownHandler.java
index fdd5f96..f663120 100644
--- hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/ServerShutdownHandler.java
+++ hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/ServerShutdownHandler.java
@@ -27,12 +27,10 @@ import java.util.NavigableMap;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.Server;
import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.catalog.CatalogTracker;
-import org.apache.hadoop.hbase.catalog.MetaEditor;
import org.apache.hadoop.hbase.catalog.MetaReader;
import org.apache.hadoop.hbase.client.Result;
import org.apache.hadoop.hbase.executor.EventHandler;
@@ -43,8 +41,6 @@ import org.apache.hadoop.hbase.master.MasterServices;
import org.apache.hadoop.hbase.master.RegionState;
import org.apache.hadoop.hbase.master.RegionStates;
import org.apache.hadoop.hbase.master.ServerManager;
-import org.apache.hadoop.hbase.util.Bytes;
-import org.apache.hadoop.hbase.util.PairOfSameType;
import org.apache.hadoop.hbase.zookeeper.ZKAssign;
import org.apache.zookeeper.KeeperException;
@@ -310,12 +306,9 @@ public class ServerShutdownHandler extends EventHandler {
return false;
}
if (hri.isOffline() && hri.isSplit()) {
- LOG.debug("Offlined and split region " + hri.getRegionNameAsString() +
- "; checking daughter presence");
- if (MetaReader.getRegion(catalogTracker, hri.getRegionName()) == null) {
- return false;
- }
- fixupDaughters(result, assignmentManager, catalogTracker);
+ //HBASE-7721: Split parent and daughters are inserted into META as an atomic operation.
+ //If the meta scanner saw the parent split, then it should see the daughters as assigned
+ //to the dead server. We don't have to do anything.
return false;
}
boolean disabling = assignmentManager.getZKTable().isDisablingTable(
@@ -327,124 +320,4 @@ public class ServerShutdownHandler extends EventHandler {
}
return true;
}
-
- /**
- * Check that daughter regions are up in .META. and if not, add them.
- * @param result The contents of the parent row in .META.
- * @param assignmentManager
- * @param catalogTracker
- * @return the number of daughters missing and fixed
- * @throws IOException
- */
- public static int fixupDaughters(final Result result,
- final AssignmentManager assignmentManager,
- final CatalogTracker catalogTracker)
- throws IOException {
- PairOfSameType daughters = HRegionInfo.getDaughterRegions(result);
- int fixedA = fixupDaughter(result, daughters.getFirst(),
- assignmentManager, catalogTracker);
- int fixedB = fixupDaughter(result, daughters.getSecond(),
- assignmentManager, catalogTracker);
- return fixedA + fixedB;
- }
-
- /**
- * Check individual daughter is up in .META.; fixup if its not.
- * @param result The contents of the parent row in .META. - not used
- * @param daughter Which daughter to check for.
- * @return 1 if the daughter is missing and fixed. Otherwise 0
- * @throws IOException
- */
- static int fixupDaughter(final Result result, HRegionInfo daughter,
- final AssignmentManager assignmentManager,
- final CatalogTracker catalogTracker)
- throws IOException {
- if (daughter == null) return 0;
- if (isDaughterMissing(catalogTracker, daughter)) {
- LOG.info("Fixup; missing daughter " + daughter.getRegionNameAsString());
- MetaEditor.addDaughter(catalogTracker, daughter, null, HConstants.NO_SEQNUM);
-
- // TODO: Log WARN if the regiondir does not exist in the fs. If its not
- // there then something wonky about the split -- things will keep going
- // but could be missing references to parent region.
-
- // And assign it.
- assignmentManager.assign(daughter, true, true);
- return 1;
- } else {
- LOG.debug("Daughter " + daughter.getRegionNameAsString() + " present");
- }
- return 0;
- }
-
- /**
- * Look for presence of the daughter OR of a split of the daughter in .META.
- * Daughter could have been split over on regionserver before a run of the
- * catalogJanitor had chance to clear reference from parent.
- * @param daughter Daughter region to search for.
- * @throws IOException
- */
- private static boolean isDaughterMissing(final CatalogTracker catalogTracker,
- final HRegionInfo daughter) throws IOException {
- FindDaughterVisitor visitor = new FindDaughterVisitor(daughter);
- // Start the scan at what should be the daughter's row in the .META.
- // We will either 1., find the daughter or some derivative split of the
- // daughter (will have same table name and start row at least but will sort
- // after because has larger regionid -- the regionid is timestamp of region
- // creation), OR, we will not find anything with same table name and start
- // row. If the latter, then assume daughter missing and do fixup.
- byte [] startrow = daughter.getRegionName();
- MetaReader.fullScan(catalogTracker, visitor, startrow);
- return !visitor.foundDaughter();
- }
-
- /**
- * Looks for daughter. Sets a flag if daughter or some progeny of daughter
- * is found up in .META..
- */
- static class FindDaughterVisitor implements MetaReader.Visitor {
- private final HRegionInfo daughter;
- private boolean found = false;
-
- FindDaughterVisitor(final HRegionInfo daughter) {
- this.daughter = daughter;
- }
-
- /**
- * @return True if we found a daughter region during our visiting.
- */
- boolean foundDaughter() {
- return this.found;
- }
-
- @Override
- public boolean visit(Result r) throws IOException {
- HRegionInfo hri =
- HRegionInfo.getHRegionInfo(r);
- if (hri == null) {
- LOG.warn("No serialized HRegionInfo in " + r);
- return true;
- }
- byte [] value = r.getValue(HConstants.CATALOG_FAMILY,
- HConstants.SERVER_QUALIFIER);
- // See if daughter is assigned to some server
- if (value == null) return false;
-
- // Now see if we have gone beyond the daughter's startrow.
- if (!Bytes.equals(daughter.getTableName(),
- hri.getTableName())) {
- // We fell into another table. Stop scanning.
- return false;
- }
- // If our start rows do not compare, move on.
- if (!Bytes.equals(daughter.getStartKey(), hri.getStartKey())) {
- return false;
- }
- // Else, table name and start rows compare. It means that the daughter
- // or some derivative split of the daughter is up in .META. Daughter
- // exists.
- this.found = true;
- return false;
- }
- }
}
diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
index fe65aa0..e43ef66 100644
--- hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
+++ hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
@@ -1688,12 +1688,10 @@ public class HRegionServer implements ClientProtocol,
}
@Override
- public void postOpenDeployTasks(final HRegion r, final CatalogTracker ct,
- final boolean daughter)
+ public void postOpenDeployTasks(final HRegion r, final CatalogTracker ct)
throws KeeperException, IOException {
checkOpen();
- LOG.info("Post open deploy tasks for region=" + r.getRegionNameAsString() +
- ", daughter=" + daughter);
+ LOG.info("Post open deploy tasks for region=" + r.getRegionNameAsString());
// Do checks to see if we need to compact (references or too many files)
for (Store s : r.getStores().values()) {
if (s.hasReferences() || s.needsCompaction()) {
@@ -1714,17 +1712,11 @@ public class HRegionServer implements ClientProtocol,
MetaEditor.updateMetaLocation(ct, r.getRegionInfo(),
this.serverNameFromMasterPOV, openSeqNum);
} else {
- if (daughter) {
- // If daughter of a split, update whole row, not just location.
- MetaEditor.addDaughter(ct, r.getRegionInfo(),
- this.serverNameFromMasterPOV, openSeqNum);
- } else {
- MetaEditor.updateRegionLocation(ct, r.getRegionInfo(),
- this.serverNameFromMasterPOV, openSeqNum);
- }
+ MetaEditor.updateRegionLocation(ct, r.getRegionInfo(),
+ this.serverNameFromMasterPOV, openSeqNum);
}
LOG.info("Done with post open deploy task for region=" +
- r.getRegionNameAsString() + ", daughter=" + daughter);
+ r.getRegionNameAsString());
}
diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionServerServices.java hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionServerServices.java
index 2aaa299..6dc6ae0 100644
--- hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionServerServices.java
+++ hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionServerServices.java
@@ -40,7 +40,7 @@ public interface RegionServerServices extends OnlineRegions {
*/
public boolean isStopping();
- /** @return the HLog for a particular region. Pass null for getting the
+ /** @return the HLog for a particular region. Pass null for getting the
* default (common) WAL */
public HLog getWAL(HRegionInfo regionInfo) throws IOException;
@@ -62,15 +62,13 @@ public interface RegionServerServices extends OnlineRegions {
/**
* Tasks to perform after region open to complete deploy of region on
* regionserver
- *
+ *
* @param r Region to open.
* @param ct Instance of {@link CatalogTracker}
- * @param daughter True if this is daughter of a split
* @throws KeeperException
* @throws IOException
*/
- public void postOpenDeployTasks(final HRegion r, final CatalogTracker ct,
- final boolean daughter)
+ public void postOpenDeployTasks(final HRegion r, final CatalogTracker ct)
throws KeeperException, IOException;
/**
diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SplitTransaction.java hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SplitTransaction.java
index efd9d77..eb3ed79 100644
--- hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SplitTransaction.java
+++ hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SplitTransaction.java
@@ -222,7 +222,7 @@ public class SplitTransaction {
if (this.parent.getCoprocessorHost() != null) {
this.parent.getCoprocessorHost().preSplit();
}
-
+
// Coprocessor callback
if (this.parent.getCoprocessorHost() != null) {
this.parent.getCoprocessorHost().preSplit(this.splitrow);
@@ -288,7 +288,7 @@ public class SplitTransaction {
throw new IOException(exceptionToThrow);
}
-
+
if (hstoreFilesToSplit.size() == 0) {
String errorMsg = "No store files to split for the region "+this.parent.getRegionInfo();
LOG.error(errorMsg);
@@ -335,10 +335,14 @@ public class SplitTransaction {
// HBase-4562).
this.journal.add(JournalEntry.PONR);
- // Edit parent in meta. Offlines parent region and adds splita and splitb.
+ // Edit parent in meta. Offlines parent region and adds splita and splitb
+ // as an atomic update. See HBASE-7721. This update to META makes the region
+ // will determine whether the region is split or not in case of failures.
+ // If it is successful, master will roll-forward, if not, master will rollback
+ // and assign the parent region.
if (!testing) {
- MetaEditor.offlineParentInMeta(server.getCatalogTracker(),
- this.parent.getRegionInfo(), a.getRegionInfo(), b.getRegionInfo());
+ MetaEditor.splitRegion(server.getCatalogTracker(), parent.getRegionInfo(),
+ a.getRegionInfo(), b.getRegionInfo(), server.getServerName());
}
return new PairOfSameType(a, b);
}
@@ -388,10 +392,10 @@ public class SplitTransaction {
if (services != null) {
try {
// add 2nd daughter first (see HBASE-4335)
- services.postOpenDeployTasks(b, server.getCatalogTracker(), true);
+ services.postOpenDeployTasks(b, server.getCatalogTracker());
// Should add it to OnlineRegions
services.addToOnlineRegions(b);
- services.postOpenDeployTasks(a, server.getCatalogTracker(), true);
+ services.postOpenDeployTasks(a, server.getCatalogTracker());
services.addToOnlineRegions(a);
} catch (KeeperException ke) {
throw new IOException(ke);
@@ -733,7 +737,7 @@ public class SplitTransaction {
if (this.parent.getCoprocessorHost() != null) {
this.parent.getCoprocessorHost().preRollBackSplit();
}
-
+
boolean result = true;
FileSystem fs = this.parent.getFilesystem();
ListIterator iterator =
diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/handler/OpenRegionHandler.java hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/handler/OpenRegionHandler.java
index 4cd8659..b0aa045 100644
--- hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/handler/OpenRegionHandler.java
+++ hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/handler/OpenRegionHandler.java
@@ -32,7 +32,6 @@ import org.apache.hadoop.hbase.executor.EventType;
import org.apache.hadoop.hbase.regionserver.HRegion;
import org.apache.hadoop.hbase.regionserver.RegionServerAccounting;
import org.apache.hadoop.hbase.regionserver.RegionServerServices;
-import org.apache.hadoop.hbase.regionserver.wal.HLog;
import org.apache.hadoop.hbase.util.CancelableProgressable;
import org.apache.hadoop.hbase.zookeeper.ZKAssign;
import org.apache.zookeeper.KeeperException;
@@ -314,7 +313,7 @@ public class OpenRegionHandler extends EventHandler {
public void run() {
try {
this.services.postOpenDeployTasks(this.region,
- this.server.getCatalogTracker(), false);
+ this.server.getCatalogTracker());
} catch (Exception e) {
LOG.warn("Exception running postOpenDeployTasks; region=" +
this.region.getRegionInfo().getEncodedName(), e);
@@ -442,7 +441,7 @@ public class OpenRegionHandler extends EventHandler {
// Instantiate the region. This also periodically tickles our zk OPENING
// state so master doesn't timeout this region in transition.
region = HRegion.openHRegion(this.regionInfo, this.htd,
- this.rsServices.getWAL(this.regionInfo),
+ this.rsServices.getWAL(this.regionInfo),
this.server.getConfiguration(),
this.rsServices,
new CancelableProgressable() {
@@ -487,7 +486,7 @@ public class OpenRegionHandler extends EventHandler {
* @param encodedName Name of the znode file (Region encodedName is the znode
* name).
* @param versionOfOfflineNode - version Of OfflineNode that needs to be compared
- * before changing the node's state from OFFLINE
+ * before changing the node's state from OFFLINE
* @return True if successful transition.
*/
boolean transitionZookeeperOfflineToOpening(final String encodedName,
diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSTableDescriptors.java hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSTableDescriptors.java
index 7bfeacf..1364b30 100644
--- hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSTableDescriptors.java
+++ hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSTableDescriptors.java
@@ -54,7 +54,7 @@ import com.google.common.primitives.Ints;
* passed filesystem. It expects descriptors to be in a file under the
* table's directory in FS. Can be read-only -- i.e. does not modify
* the filesystem or can be read and write.
- *
+ *
* Also has utility for keeping up the table descriptors tableinfo file.
* The table schema file is kept under the table directory in the filesystem.
* It has a {@link #TABLEINFO_NAME} prefix and then a suffix that is the
@@ -163,7 +163,7 @@ public class FSTableDescriptors implements TableDescriptors {
return cachedtdm.getTableDescriptor();
}
}
-
+
TableDescriptorModtime tdmt = null;
try {
tdmt = getTableDescriptorModtime(this.fs, this.rootdir, tablename);
@@ -174,7 +174,7 @@ public class FSTableDescriptors implements TableDescriptors {
LOG.debug("Exception during readTableDecriptor. Current table name = "
+ tablename, ioe);
}
-
+
if (tdmt == null) {
LOG.warn("The following folder is in HBase's root directory and " +
"doesn't contain a table descriptor, " +
@@ -241,7 +241,7 @@ public class FSTableDescriptors implements TableDescriptors {
/**
* Checks if .tableinfo exists for given table
- *
+ *
* @param fs file system
* @param rootdir root directory of HBase installation
* @param tableName name of table
@@ -488,7 +488,7 @@ public class FSTableDescriptors implements TableDescriptors {
* @param tableDir
* @param status
* @return Descriptor file or null if we failed write.
- * @throws IOException
+ * @throws IOException
*/
private static Path writeTableDescriptor(final FileSystem fs,
final HTableDescriptor hTableDescriptor, final Path tableDir,
@@ -554,7 +554,7 @@ public class FSTableDescriptors implements TableDescriptors {
/**
* Create new HTableDescriptor in HDFS. Happens when we are creating table.
- *
+ *
* @param htableDescriptor
* @param conf
*/
@@ -568,7 +568,7 @@ public class FSTableDescriptors implements TableDescriptors {
* Create new HTableDescriptor in HDFS. Happens when we are creating table. If
* forceCreation is true then even if previous table descriptor is present it
* will be overwritten
- *
+ *
* @param htableDescriptor
* @param conf
* @param forceCreation True if we are to overwrite existing file.
@@ -597,7 +597,7 @@ public class FSTableDescriptors implements TableDescriptors {
* Create new HTableDescriptor in HDFS. Happens when we are creating table. If
* forceCreation is true then even if previous table descriptor is present it
* will be overwritten
- *
+ *
* @param fs
* @param htableDescriptor
* @param rootdir
@@ -630,8 +630,10 @@ public class FSTableDescriptors implements TableDescriptors {
LOG.info("Current tableInfoPath = " + status.getPath());
if (!forceCreation) {
if (fs.exists(status.getPath()) && status.getLen() > 0) {
- LOG.info("TableInfo already exists.. Skipping creation");
- return false;
+ if (getTableDescriptor(fs, status.getPath().getParent()).equals(htableDescriptor)) {
+ LOG.info("TableInfo already exists.. Skipping creation");
+ return false;
+ }
}
}
}
diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSUtils.java hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSUtils.java
index c06e28e..05f6326 100644
--- hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSUtils.java
+++ hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSUtils.java
@@ -88,7 +88,7 @@ public abstract class FSUtils {
public static FSUtils getInstance(FileSystem fs, Configuration conf) {
String scheme = fs.getUri().getScheme();
if (scheme == null) {
- LOG.warn("Could not find scheme for uri " +
+ LOG.warn("Could not find scheme for uri " +
fs.getUri() + ", default to hdfs");
scheme = "hdfs";
}
@@ -134,7 +134,7 @@ public abstract class FSUtils {
* use the default block size
* not track progress
*
- *
+ *
* @param fs {@link FileSystem} on which to write the file
* @param path {@link Path} to the file to write
* @return output stream to the created file
@@ -154,7 +154,7 @@ public abstract class FSUtils {
* use the default block size
* not track progress
*
- *
+ *
* @param fs {@link FileSystem} on which to write the file
* @param path {@link Path} to the file to write
* @param perm
@@ -174,7 +174,7 @@ public abstract class FSUtils {
/**
* Get the file permissions specified in the configuration, if they are
* enabled.
- *
+ *
* @param fs filesystem that the file will be created on.
* @param conf configuration to read for determining if permissions are
* enabled and which to use
@@ -266,11 +266,11 @@ public abstract class FSUtils {
}
/**
- * Check whether dfs is in safemode.
+ * Check whether dfs is in safemode.
* @param conf
* @throws IOException
*/
- public static void checkDfsSafeMode(final Configuration conf)
+ public static void checkDfsSafeMode(final Configuration conf)
throws IOException {
boolean isInSafeMode = false;
FileSystem fs = FileSystem.get(conf);
@@ -282,7 +282,7 @@ public abstract class FSUtils {
throw new IOException("File system is in safemode, it can't be written now");
}
}
-
+
/**
* Verifies current version of file system
*
@@ -373,7 +373,7 @@ public abstract class FSUtils {
* @param message if true, issues a message on System.out
*
* @throws IOException e
- * @throws DeserializationException
+ * @throws DeserializationException
*/
public static void checkVersion(FileSystem fs, Path rootdir, boolean message)
throws IOException, DeserializationException {
@@ -390,7 +390,7 @@ public abstract class FSUtils {
* @param retries number of times to retry
*
* @throws IOException e
- * @throws DeserializationException
+ * @throws DeserializationException
*/
public static void checkVersion(FileSystem fs, Path rootdir,
boolean message, int wait, int retries)
@@ -555,7 +555,7 @@ public abstract class FSUtils {
/**
* @param cid
- * @throws IOException
+ * @throws IOException
*/
private static void rewriteAsPb(final FileSystem fs, final Path rootdir, final Path p,
final ClusterId cid)
@@ -725,9 +725,9 @@ public abstract class FSUtils {
* @param fs file system
* @param status file status of the file
* @param start start position of the portion
- * @param length length of the portion
+ * @param length length of the portion
* @return The HDFS blocks distribution
- */
+ */
static public HDFSBlocksDistribution computeHDFSBlocksDistribution(
final FileSystem fs, FileStatus status, long start, long length)
throws IOException {
@@ -739,12 +739,12 @@ public abstract class FSUtils {
long len = bl.getLength();
blocksDistribution.addHostsAndBlockWeight(hosts, len);
}
-
+
return blocksDistribution;
}
-
-
+
+
/**
* Runs through the hbase rootdir and checks all stores have only
* one file in them -- that is, they've been major compacted. Looks
@@ -813,7 +813,7 @@ public abstract class FSUtils {
*
* @param master The master defining the HBase root and file system.
* @return A map for each table and its percentage.
- *
+ *
* @throws IOException When scanning the directory fails.
*/
public static Map getTableFragmentation(
@@ -1005,7 +1005,7 @@ public abstract class FSUtils {
isValid = fs.getFileStatus(p).isDir();
}
} catch (IOException e) {
- LOG.warn("An error occurred while verifying if [" + p.toString() +
+ LOG.warn("An error occurred while verifying if [" + p.toString() +
"] is a valid directory. Returning 'not valid' and continuing.", e);
}
return isValid;
@@ -1057,7 +1057,7 @@ public abstract class FSUtils {
}
/**
- * Recover file lease. Used when a file might be suspect
+ * Recover file lease. Used when a file might be suspect
* to be had been left open by another process.
* @param fs FileSystem handle
* @param p Path of file to recover lease
@@ -1066,7 +1066,7 @@ public abstract class FSUtils {
*/
public abstract void recoverFileLease(final FileSystem fs, final Path p,
Configuration conf) throws IOException;
-
+
/**
* @param fs
* @param rootdir
@@ -1233,10 +1233,10 @@ public abstract class FSUtils {
throws IOException {
return getRootDir(conf).getFileSystem(conf);
}
-
+
/**
- * Runs through the HBase rootdir and creates a reverse lookup map for
- * table StoreFile names to the full Path.
+ * Runs through the HBase rootdir and creates a reverse lookup map for
+ * table StoreFile names to the full Path.
*
* Example...
* Key = 3944417774205889744
@@ -1251,17 +1251,17 @@ public abstract class FSUtils {
final FileSystem fs, final Path hbaseRootDir)
throws IOException {
Map map = new HashMap();
-
- // if this method looks similar to 'getTableFragmentation' that is because
+
+ // if this method looks similar to 'getTableFragmentation' that is because
// it was borrowed from it.
-
+
DirFilter df = new DirFilter(fs);
// presumes any directory under hbase.rootdir is a table
FileStatus [] tableDirs = fs.listStatus(hbaseRootDir, df);
for (FileStatus tableDir : tableDirs) {
// Skip the .log and other non-table directories. All others should be tables.
// Inside a table, there are compaction.dir directories to skip. Otherwise, all else
- // should be regions.
+ // should be regions.
Path d = tableDir.getPath();
if (HConstants.HBASE_NON_TABLE_DIRS.contains(d.getName())) {
continue;
@@ -1283,17 +1283,17 @@ public abstract class FSUtils {
Path sf = sfStatus.getPath();
map.put( sf.getName(), sf);
}
-
+
}
}
}
return map;
}
-
+
/**
* Calls fs.listStatus() and treats FileNotFoundException as non-fatal
* This accommodates differences between hadoop versions
- *
+ *
* @param fs file system
* @param dir directory
* @param filter path filter
@@ -1326,7 +1326,7 @@ public abstract class FSUtils {
/**
* Calls fs.delete() and returns the value returned by the fs.delete()
- *
+ *
* @param fs
* @param path
* @param recursive
@@ -1340,7 +1340,7 @@ public abstract class FSUtils {
/**
* Calls fs.exists(). Checks if the specified path exists
- *
+ *
* @param fs
* @param path
* @return the value returned by fs.exists()
@@ -1352,7 +1352,7 @@ public abstract class FSUtils {
/**
* Throw an exception if an action is not permitted by a user on a file.
- *
+ *
* @param ugi
* the user
* @param file
@@ -1401,7 +1401,7 @@ public abstract class FSUtils {
/**
* Recursive helper to log the state of the FS
- *
+ *
* @see #logFileSystemState(FileSystem, Path, Log)
*/
private static void logFSTree(Log LOG, final FileSystem fs, final Path root, String prefix)
diff --git hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestMasterCoprocessorExceptionWithAbort.java hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestMasterCoprocessorExceptionWithAbort.java
index dd8b5e3..3818e47 100644
--- hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestMasterCoprocessorExceptionWithAbort.java
+++ hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestMasterCoprocessorExceptionWithAbort.java
@@ -195,9 +195,7 @@ public class TestMasterCoprocessorExceptionWithAbort {
// (namely the part that shows the set of loaded coprocessors).
// In this test, there is only a single coprocessor (BuggyMasterObserver).
assertTrue(master.getLoadedCoprocessors().
- equals("[" +
- TestMasterCoprocessorExceptionWithAbort.BuggyMasterObserver.class.getName() +
- "]"));
+ contains(TestMasterCoprocessorExceptionWithAbort.BuggyMasterObserver.class.getName()));
CreateTableThread createTableThread = new CreateTableThread(UTIL);
diff --git hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestMasterCoprocessorExceptionWithRemove.java hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestMasterCoprocessorExceptionWithRemove.java
index 4f57ced..138a567 100644
--- hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestMasterCoprocessorExceptionWithRemove.java
+++ hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestMasterCoprocessorExceptionWithRemove.java
@@ -19,11 +19,21 @@
package org.apache.hadoop.hbase.coprocessor;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
+
import java.io.IOException;
-import java.io.InterruptedIOException;
import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hbase.*;
+import org.apache.hadoop.hbase.Abortable;
+import org.apache.hadoop.hbase.CoprocessorEnvironment;
+import org.apache.hadoop.hbase.HBaseTestingUtility;
+import org.apache.hadoop.hbase.HColumnDescriptor;
+import org.apache.hadoop.hbase.HRegionInfo;
+import org.apache.hadoop.hbase.HTableDescriptor;
+import org.apache.hadoop.hbase.MediumTests;
+import org.apache.hadoop.hbase.MiniHBaseCluster;
import org.apache.hadoop.hbase.client.HBaseAdmin;
import org.apache.hadoop.hbase.master.HMaster;
import org.apache.hadoop.hbase.master.MasterCoprocessorHost;
@@ -35,8 +45,6 @@ import org.junit.BeforeClass;
import org.junit.Test;
import org.junit.experimental.categories.Category;
-import static org.junit.Assert.*;
-
/**
* Tests unhandled exceptions thrown by coprocessors running on master.
* Expected result is that the master will remove the buggy coprocessor from
@@ -173,7 +181,7 @@ public class TestMasterCoprocessorExceptionWithRemove {
// In this test, there is only a single coprocessor (BuggyMasterObserver).
String coprocessorName =
BuggyMasterObserver.class.getName();
- assertTrue(master.getLoadedCoprocessors().equals("[" + coprocessorName + "]"));
+ assertTrue(master.getLoadedCoprocessors().contains(coprocessorName));
HTableDescriptor htd1 = new HTableDescriptor(TEST_TABLE1);
htd1.addFamily(new HColumnDescriptor(TEST_FAMILY1));
@@ -201,7 +209,7 @@ public class TestMasterCoprocessorExceptionWithRemove {
masterTracker.masterZKNodeWasDeleted);
String loadedCoprocessors = master.getLoadedCoprocessors();
- assertTrue(loadedCoprocessors.equals("[" + coprocessorName + "]"));
+ assertTrue(loadedCoprocessors.contains(coprocessorName));
diff --git hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockRegionServer.java hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockRegionServer.java
index ae601eb..a10e96f 100644
--- hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockRegionServer.java
+++ hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockRegionServer.java
@@ -291,7 +291,7 @@ class MockRegionServer implements AdminProtocol, ClientProtocol, RegionServerSer
}
@Override
- public void postOpenDeployTasks(HRegion r, CatalogTracker ct, boolean daughter)
+ public void postOpenDeployTasks(HRegion r, CatalogTracker ct)
throws KeeperException, IOException {
// TODO Auto-generated method stub
}
diff --git hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestEndToEndSplitTransaction.java hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestEndToEndSplitTransaction.java
index a05dbbd..86fd34d 100644
--- hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestEndToEndSplitTransaction.java
+++ hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestEndToEndSplitTransaction.java
@@ -118,7 +118,7 @@ public class TestEndToEndSplitTransaction {
// 3. finish phase II
// note that this replicates some code from SplitTransaction
// 2nd daughter first
- server.postOpenDeployTasks(regions.getSecond(), server.getCatalogTracker(), true);
+ server.postOpenDeployTasks(regions.getSecond(), server.getCatalogTracker());
// Add to online regions
server.addToOnlineRegions(regions.getSecond());
// THIS is the crucial point:
@@ -128,7 +128,7 @@ public class TestEndToEndSplitTransaction {
assertTrue(test(con, tableName, lastRow, server));
// first daughter second
- server.postOpenDeployTasks(regions.getFirst(), server.getCatalogTracker(), true);
+ server.postOpenDeployTasks(regions.getFirst(), server.getCatalogTracker());
// Add to online regions
server.addToOnlineRegions(regions.getFirst());
assertTrue(test(con, tableName, firstRow, server));
diff --git hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSplitTransactionOnCluster.java hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSplitTransactionOnCluster.java
index a0c4eb5..bddedc6 100644
--- hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSplitTransactionOnCluster.java
+++ hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSplitTransactionOnCluster.java
@@ -21,8 +21,8 @@ package org.apache.hadoop.hbase.regionserver;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.assertNull;
import static org.junit.Assert.assertNotSame;
+import static org.junit.Assert.assertNull;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
@@ -34,25 +34,25 @@ import java.util.concurrent.CountDownLatch;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hbase.exceptions.DeserializationException;
import org.apache.hadoop.hbase.HBaseTestingUtility;
import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.LargeTests;
-import org.apache.hadoop.hbase.exceptions.MasterNotRunningException;
import org.apache.hadoop.hbase.MiniHBaseCluster;
import org.apache.hadoop.hbase.RegionTransition;
import org.apache.hadoop.hbase.Server;
import org.apache.hadoop.hbase.ServerName;
-import org.apache.hadoop.hbase.exceptions.UnknownRegionException;
-import org.apache.hadoop.hbase.exceptions.ZooKeeperConnectionException;
import org.apache.hadoop.hbase.catalog.MetaReader;
import org.apache.hadoop.hbase.client.Delete;
import org.apache.hadoop.hbase.client.HBaseAdmin;
import org.apache.hadoop.hbase.client.HTable;
import org.apache.hadoop.hbase.client.Put;
+import org.apache.hadoop.hbase.exceptions.DeserializationException;
+import org.apache.hadoop.hbase.exceptions.MasterNotRunningException;
+import org.apache.hadoop.hbase.exceptions.UnknownRegionException;
+import org.apache.hadoop.hbase.exceptions.ZooKeeperConnectionException;
import org.apache.hadoop.hbase.executor.EventType;
import org.apache.hadoop.hbase.master.HMaster;
import org.apache.hadoop.hbase.master.RegionState;
@@ -339,59 +339,6 @@ public class TestSplitTransactionOnCluster {
}
/**
- * Messy test that simulates case where SplitTransactions fails to add one
- * of the daughters up into the .META. table before crash. We're testing
- * fact that the shutdown handler will fixup the missing daughter region
- * adding it back into .META.
- * @throws IOException
- * @throws InterruptedException
- */
- @Test (timeout = 300000) public void testShutdownSimpleFixup()
- throws IOException, InterruptedException, ServiceException {
- final byte [] tableName = Bytes.toBytes("testShutdownSimpleFixup");
-
- // Create table then get the single region for our new table.
- HTable t = createTableAndWait(tableName, HConstants.CATALOG_FAMILY);
- List regions = cluster.getRegions(tableName);
- HRegionInfo hri = getAndCheckSingleTableRegion(regions);
-
- int tableRegionIndex = ensureTableRegionNotOnSameServerAsMeta(admin, hri);
-
- // Turn off balancer so it doesn't cut in and mess up our placements.
- this.admin.setBalancerRunning(false, true);
- // Turn off the meta scanner so it don't remove parent on us.
- cluster.getMaster().setCatalogJanitorEnabled(false);
- try {
- // Add a bit of load up into the table so splittable.
- TESTING_UTIL.loadTable(t, HConstants.CATALOG_FAMILY);
- // Get region pre-split.
- HRegionServer server = cluster.getRegionServer(tableRegionIndex);
- printOutRegions(server, "Initial regions: ");
- int regionCount = ProtobufUtil.getOnlineRegions(server).size();
- // Now split.
- split(hri, server, regionCount);
- // Get daughters
- List daughters = checkAndGetDaughters(tableName);
- // Remove one of the daughters from .META. to simulate failed insert of
- // daughter region up into .META.
- removeDaughterFromMeta(daughters.get(0).getRegionName());
- // Now crash the server
- cluster.abortRegionServer(tableRegionIndex);
- waitUntilRegionServerDead();
- awaitDaughters(tableName, daughters.size());
- // Assert daughters are online.
- regions = cluster.getRegions(tableName);
- for (HRegion r: regions) {
- assertTrue(daughters.contains(r));
- }
- } finally {
- admin.setBalancerRunning(true, false);
- cluster.getMaster().setCatalogJanitorEnabled(true);
- t.close();
- }
- }
-
- /**
* Test that if daughter split on us, we won't do the shutdown handler fixup
* just because we can't find the immediate daughter of an offlined parent.
* @throws IOException
diff --git hbase-server/src/test/java/org/apache/hadoop/hbase/util/MockRegionServerServices.java hbase-server/src/test/java/org/apache/hadoop/hbase/util/MockRegionServerServices.java
index 32f49a0..efcbe81 100644
--- hbase-server/src/test/java/org/apache/hadoop/hbase/util/MockRegionServerServices.java
+++ hbase-server/src/test/java/org/apache/hadoop/hbase/util/MockRegionServerServices.java
@@ -47,7 +47,7 @@ import org.apache.zookeeper.KeeperException;
public class MockRegionServerServices implements RegionServerServices {
private final Map regions = new HashMap();
private boolean stopping = false;
- private final ConcurrentSkipListMap rit =
+ private final ConcurrentSkipListMap rit =
new ConcurrentSkipListMap(Bytes.BYTES_COMPARATOR);
private HFileSystem hfs = null;
private ZooKeeperWatcher zkw = null;
@@ -86,7 +86,7 @@ public class MockRegionServerServices implements RegionServerServices {
}
@Override
- public void postOpenDeployTasks(HRegion r, CatalogTracker ct, boolean daughter)
+ public void postOpenDeployTasks(HRegion r, CatalogTracker ct)
throws KeeperException, IOException {
addToOnlineRegions(r);
}
@@ -125,7 +125,7 @@ public class MockRegionServerServices implements RegionServerServices {
public ZooKeeperWatcher getZooKeeper() {
return zkw;
}
-
+
public RegionServerAccounting getRegionServerAccounting() {
return null;
}
diff --git hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestFSTableDescriptors.java hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestFSTableDescriptors.java
index fd3e483..a65436c 100644
--- hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestFSTableDescriptors.java
+++ hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestFSTableDescriptors.java
@@ -17,7 +17,12 @@
*/
package org.apache.hadoop.hbase.util;
-import static org.junit.Assert.*;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertNull;
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
import java.io.FileNotFoundException;
import java.io.IOException;
@@ -28,7 +33,12 @@ import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hbase.*;
+import org.apache.hadoop.hbase.HBaseTestingUtility;
+import org.apache.hadoop.hbase.HColumnDescriptor;
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.HTableDescriptor;
+import org.apache.hadoop.hbase.MediumTests;
+import org.apache.hadoop.hbase.TableDescriptors;
import org.apache.hadoop.hbase.exceptions.TableExistsException;
import org.junit.Test;
import org.junit.experimental.categories.Category;
@@ -266,5 +276,22 @@ public class TestFSTableDescriptors {
}
}
+ @Test
+ public void testCreateTableDescriptorUpdatesIfExistsAlready() throws IOException {
+ Path testdir = UTIL.getDataTestDir("testCreateTableDescriptorUpdatesIfThereExistsAlready");
+ HTableDescriptor htd = new HTableDescriptor("testCreateTableDescriptorUpdatesIfThereExistsAlready");
+ FileSystem fs = FileSystem.get(UTIL.getConfiguration());
+ assertTrue(FSTableDescriptors.createTableDescriptor(fs, testdir, htd));
+ assertFalse(FSTableDescriptors.createTableDescriptor(fs, testdir, htd));
+ htd.setValue(Bytes.toBytes("mykey"), Bytes.toBytes("myValue"));
+ assertTrue(FSTableDescriptors.createTableDescriptor(fs, testdir, htd)); //this will re-create again
+ Path tableDir = FSUtils.getTablePath(testdir, htd.getName());
+ Path tmpTableDir = new Path(tableDir, ".tmp");
+ FileStatus[] statuses = fs.listStatus(tmpTableDir);
+ assertTrue(statuses.length == 0);
+
+ assertEquals(htd, FSTableDescriptors.getTableDescriptor(fs, tableDir));
+ }
+
}