diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HMobStore.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HMobStore.java index 2cef3df..f554f2a 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HMobStore.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HMobStore.java @@ -328,7 +328,7 @@ public class HMobStore extends HStore { public List compact(CompactionContext compaction) throws IOException { // If it's major compaction, try to find whether there's a sweeper is running // If yes, change the major compaction to a minor one. - if (compaction.getRequest().isMajor()) { + if (compaction.getRequest().isAllFiles()) { // Use the Zookeeper to coordinate. // 1. Acquire a operation lock. // 1.1. If no, convert the major compaction to a minor one and continue the compaction. @@ -342,26 +342,27 @@ public class HMobStore extends HStore { try { zk = MobZookeeper.newInstance(region.getBaseConf(), compactionName); } catch (KeeperException e) { - LOG.error("Cannot connect to the zookeeper, ready to perform the minor compaction instead", - e); - // change the major compaction into a minor one - compaction.getRequest().setIsMajor(false, false); + LOG.error("Cannot connect to the zookeeper, forcing the delete markers to be retained", e); + compaction.getRequest().forceRetainDeleteMarkers(); return super.compact(compaction); } - boolean major = false; + boolean keepDeleteMarkers = true; + boolean majorCompactNodeAdded = false; try { // try to acquire the operation lock. if (zk.lockColumnFamily(getTableName().getNameAsString(), getFamily().getNameAsString())) { try { LOG.info("Obtain the lock for the store[" + this - + "], ready to perform the major compaction"); + + "], forcing the delete markers to be retained"); // check the sweeping node to find out whether the sweeping is in progress. boolean hasSweeper = zk.isSweeperZNodeExist(getTableName().getNameAsString(), getFamily().getNameAsString()); if (!hasSweeper) { // if not, add a child to the major compaction node of this store. - major = zk.addMajorCompactionZNode(getTableName().getNameAsString(), getFamily() - .getNameAsString(), compactionName); + majorCompactNodeAdded = zk.addMajorCompactionZNode(getTableName().getNameAsString(), + getFamily().getNameAsString(), compactionName); + // If we failed to add the major compact node, go with keep delete markers mode. + keepDeleteMarkers = !majorCompactNodeAdded; } } catch (Exception e) { LOG.error("Fail to handle the Zookeeper", e); @@ -371,17 +372,14 @@ public class HMobStore extends HStore { } } try { - if (major) { - return super.compact(compaction); - } else { - LOG.warn("Cannot obtain the lock or a sweep tool is running on this store[" - + this + "], ready to perform the minor compaction instead"); - // change the major compaction into a minor one - compaction.getRequest().setIsMajor(false, false); - return super.compact(compaction); + if (keepDeleteMarkers) { + LOG.warn("Cannot obtain the lock or a sweep tool is running on this store[" + this + + "], forcing the delete markers to be retained"); + compaction.getRequest().forceRetainDeleteMarkers(); } + return super.compact(compaction); } finally { - if (major) { + if (majorCompactNodeAdded) { try { zk.deleteMajorCompactionZNode(getTableName().getNameAsString(), getFamily() .getNameAsString(), compactionName); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/CompactionRequest.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/CompactionRequest.java index 0fc64d2..76085fa 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/CompactionRequest.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/CompactionRequest.java @@ -59,6 +59,8 @@ public class CompactionRequest implements Comparable { private String storeName = ""; private long totalSize = -1L; + private Boolean retainDeleteMarkers = null; + /** * This ctor should be used by coprocessors that want to subclass CompactionRequest. */ @@ -200,6 +202,23 @@ public class CompactionRequest implements Comparable { : (isMajor ? DisplayCompactionType.MAJOR : DisplayCompactionType.ALL_FILES); } + /** + * Forcefully setting that this compaction has to retain the delete markers in the new compacted + * file, whatever be the type of the compaction.
+ * Note : By default HBase drops delete markers when the compaction is on all files. + */ + public void forceRetainDeleteMarkers() { + this.retainDeleteMarkers = Boolean.TRUE; + } + + /** + * @return Whether the compaction has to retain the delete markers or not. + */ + public boolean isRetainDeleteMarkers() { + return (this.retainDeleteMarkers != null) ? this.retainDeleteMarkers.booleanValue() + : isAllFiles(); + } + @Override public String toString() { String fsList = Joiner.on(", ").join( diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/DefaultCompactor.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/DefaultCompactor.java index 8056dd0..be859c0 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/DefaultCompactor.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/DefaultCompactor.java @@ -59,8 +59,8 @@ public class DefaultCompactor extends Compactor { InternalScanner scanner = null; try { /* Include deletes, unless we are doing a compaction of all files */ - ScanType scanType = - request.isAllFiles() ? ScanType.COMPACT_DROP_DELETES : ScanType.COMPACT_RETAIN_DELETES; + ScanType scanType = request.isRetainDeleteMarkers() ? ScanType.COMPACT_RETAIN_DELETES + : ScanType.COMPACT_DROP_DELETES; scanner = preCreateCoprocScanner(request, scanType, fd.earliestPutTs, scanners); if (scanner == null) { scanner = createScanner(store, scanners, scanType, smallestReadPoint, fd.earliestPutTs);