diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/backup/HFileArchiver.java hbase-server/src/main/java/org/apache/hadoop/hbase/backup/HFileArchiver.java index c863b26..6a16903 100644 --- hbase-server/src/main/java/org/apache/hadoop/hbase/backup/HFileArchiver.java +++ hbase-server/src/main/java/org/apache/hadoop/hbase/backup/HFileArchiver.java @@ -62,20 +62,22 @@ public class HFileArchiver { /** * Cleans up all the files for a HRegion by archiving the HFiles to the * archive directory + * @param conf the configuration to use * @param fs the file system object * @param info HRegionInfo for region to be deleted * @throws IOException */ - public static void archiveRegion(FileSystem fs, HRegionInfo info) + public static void archiveRegion(Configuration conf, FileSystem fs, HRegionInfo info) throws IOException { - Path rootDir = FSUtils.getRootDir(fs.getConf()); - archiveRegion(fs, rootDir, HTableDescriptor.getTableDir(rootDir, info.getTableName()), + Path rootDir = FSUtils.getRootDir(conf); + archiveRegion(conf, fs, rootDir, HTableDescriptor.getTableDir(rootDir, info.getTableName()), HRegion.getRegionDir(rootDir, info)); } /** * Remove an entire region from the table directory via archiving the region's hfiles. + * @param conf the configuration to use * @param fs {@link FileSystem} from which to remove the region * @param rootdir {@link Path} to the root directory where hbase files are stored (for building * the archive path) @@ -85,7 +87,7 @@ public class HFileArchiver { * operations could not complete. * @throws IOException if the request cannot be completed */ - public static boolean archiveRegion(FileSystem fs, Path rootdir, Path tableDir, Path regionDir) + public static boolean archiveRegion(Configuration conf, FileSystem fs, Path rootdir, Path tableDir, Path regionDir) throws IOException { if (LOG.isDebugEnabled()) { LOG.debug("ARCHIVING region " + regionDir.toString()); @@ -104,7 +106,7 @@ public class HFileArchiver { // make sure the regiondir lives under the tabledir Preconditions.checkArgument(regionDir.toString().startsWith(tableDir.toString())); - Path regionArchiveDir = HFileArchiveUtil.getRegionArchiveDir(fs.getConf(), tableDir, regionDir); + Path regionArchiveDir = HFileArchiveUtil.getRegionArchiveDir(conf, tableDir, regionDir); LOG.debug("Have an archive directory, preparing to move files"); FileStatusConverter getAsFile = new FileStatusConverter(fs); @@ -180,16 +182,16 @@ public class HFileArchiver { /** * Remove the store files, either by archiving them or outright deletion + * @param conf {@link Configuration} to examine to determine the archive directory * @param fs the filesystem where the store files live * @param parent Parent region hosting the store files - * @param conf {@link Configuration} to examine to determine the archive directory * @param family the family hosting the store files * @param compactedFiles files to be disposed of. No further reading of these files should be * attempted; otherwise likely to cause an {@link IOException} * @throws IOException if the files could not be correctly disposed. */ - public static void archiveStoreFiles(FileSystem fs, HRegion parent, - Configuration conf, byte[] family, Collection compactedFiles) throws IOException { + public static void archiveStoreFiles(Configuration conf, FileSystem fs, HRegion parent, + byte[] family, Collection compactedFiles) throws IOException { // sometimes in testing, we don't have rss, so we need to check for that if (fs == null) { diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/master/CatalogJanitor.java hbase-server/src/main/java/org/apache/hadoop/hbase/master/CatalogJanitor.java index c6cadf1..176d9f1 100644 --- hbase-server/src/main/java/org/apache/hadoop/hbase/master/CatalogJanitor.java +++ hbase-server/src/main/java/org/apache/hadoop/hbase/master/CatalogJanitor.java @@ -245,7 +245,7 @@ class CatalogJanitor extends Chore { } FileSystem fs = this.services.getMasterFileSystem().getFileSystem(); LOG.debug("Archiving parent region:" + parent); - HFileArchiver.archiveRegion(fs, parent); + HFileArchiver.archiveRegion(this.services.getConfiguration(), fs, parent); MetaEditor.deleteRegion(this.server.getCatalogTracker(), parent); result = true; } diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterFileSystem.java hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterFileSystem.java index 398b66e..b9945d3 100644 --- hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterFileSystem.java +++ hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterFileSystem.java @@ -251,13 +251,13 @@ public class MasterFileSystem { } } while (retrySplitting); } - + public void splitLog(final ServerName serverName) throws IOException { List serverNames = new ArrayList(); serverNames.add(serverName); splitLog(serverNames); } - + public void splitLog(final List serverNames) throws IOException { long splitTime = 0, splitLogSize = 0; List logDirs = new ArrayList(); @@ -293,7 +293,7 @@ public class MasterFileSystem { // splitLogLock ensures that dead region servers' logs are processed // one at a time this.splitLogLock.lock(); - try { + try { HLogSplitter splitter = HLogSplitter.createLogSplitter( conf, rootdir, logDir, oldLogDir, this.fs); try { @@ -443,7 +443,7 @@ public class MasterFileSystem { public void deleteRegion(HRegionInfo region) throws IOException { - HFileArchiver.archiveRegion(fs, region); + HFileArchiver.archiveRegion(conf, fs, region); } public void deleteTable(byte[] tableName) throws IOException { @@ -481,7 +481,7 @@ public class MasterFileSystem { /** * Create new HTableDescriptor in HDFS. - * + * * @param htableDescriptor */ public void createTableDescriptor(HTableDescriptor htableDescriptor) diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java index f6d24f4..d373cb9 100644 --- hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java +++ hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java @@ -62,7 +62,6 @@ import java.util.concurrent.atomic.AtomicLong; import java.util.concurrent.locks.Lock; import java.util.concurrent.locks.ReentrantReadWriteLock; -import com.google.protobuf.*; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.classification.InterfaceAudience; @@ -102,11 +101,11 @@ import org.apache.hadoop.hbase.client.RowMutations; import org.apache.hadoop.hbase.client.Scan; import org.apache.hadoop.hbase.client.coprocessor.Exec; import org.apache.hadoop.hbase.client.coprocessor.ExecResult; +import org.apache.hadoop.hbase.filter.ByteArrayComparable; import org.apache.hadoop.hbase.filter.CompareFilter.CompareOp; import org.apache.hadoop.hbase.filter.Filter; import org.apache.hadoop.hbase.filter.FilterWrapper; import org.apache.hadoop.hbase.filter.IncompatibleFilterException; -import org.apache.hadoop.hbase.filter.ByteArrayComparable; import org.apache.hadoop.hbase.io.HeapSize; import org.apache.hadoop.hbase.io.TimeRange; import org.apache.hadoop.hbase.io.hfile.BlockCache; @@ -117,6 +116,7 @@ import org.apache.hadoop.hbase.ipc.HBaseServer; import org.apache.hadoop.hbase.ipc.RpcCallContext; import org.apache.hadoop.hbase.monitoring.MonitoredTask; import org.apache.hadoop.hbase.monitoring.TaskMonitor; +import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.CoprocessorServiceCall; import org.apache.hadoop.hbase.regionserver.MultiVersionConsistencyControl.WriteEntry; import org.apache.hadoop.hbase.regionserver.compactions.CompactionRequest; import org.apache.hadoop.hbase.regionserver.wal.HLog; @@ -143,8 +143,11 @@ import com.google.common.collect.ImmutableList; import com.google.common.collect.Lists; import com.google.common.collect.Maps; import com.google.common.collect.MutableClassToInstanceMap; - -import static org.apache.hadoop.hbase.protobuf.generated.ClientProtos.CoprocessorServiceCall; +import com.google.protobuf.Descriptors; +import com.google.protobuf.Message; +import com.google.protobuf.RpcCallback; +import com.google.protobuf.RpcController; +import com.google.protobuf.Service; /** * HRegion stores data for a certain region of a table. It stores all columns @@ -2599,7 +2602,7 @@ public class HRegion implements HeapSize { // , Writable{ p.setWriteToWAL(true); doBatchMutate(p, lid); } - + /** * Atomically apply the given map of family->edits to the memstore. * This handles the consistency control on its own, but the caller @@ -2793,7 +2796,7 @@ public class HRegion implements HeapSize { // , Writable{ } } long seqid = minSeqIdForTheRegion; - + NavigableSet files = HLogUtil.getSplitEditFilesSorted(fs, regiondir); if (files == null || files.isEmpty()) return seqid; @@ -3411,7 +3414,7 @@ public class HRegion implements HeapSize { // , Writable{ } else { this.filter = null; } - + this.batch = scan.getBatch(); if (Bytes.equals(scan.getStopRow(), HConstants.EMPTY_END_ROW)) { this.stopRow = null; @@ -3541,7 +3544,7 @@ public class HRegion implements HeapSize { // , Writable{ if (filter != null && filter.hasFilterRow()) { filter.filterRow(results); } - + return false; } else if (filterRowKey(currentRow)) { nextRow(currentRow); @@ -4188,10 +4191,10 @@ public class HRegion implements HeapSize { // , Writable{ } // delete out the 'A' region - HFileArchiver.archiveRegion(fs, FSUtils.getRootDir(a.getBaseConf()), a.getTableDir(), + HFileArchiver.archiveRegion(a.getBaseConf(), fs, FSUtils.getRootDir(a.getBaseConf()), a.getTableDir(), a.getRegionDir()); // delete out the 'B' region - HFileArchiver.archiveRegion(fs, FSUtils.getRootDir(b.getBaseConf()), b.getTableDir(), + HFileArchiver.archiveRegion(a.getBaseConf(), fs, FSUtils.getRootDir(b.getBaseConf()), b.getTableDir(), b.getRegionDir()); LOG.info("merge completed. New region is " + dstRegion); diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java index 04883c7..a283cbb 100644 --- hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java +++ hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java @@ -19,13 +19,11 @@ package org.apache.hadoop.hbase.regionserver; import java.io.IOException; -import java.lang.reflect.InvocationTargetException; import java.util.ArrayList; import java.util.Collection; import java.util.Collections; import java.util.List; import java.util.NavigableSet; -import java.util.Random; import java.util.SortedSet; import java.util.concurrent.Callable; import java.util.concurrent.CompletionService; @@ -64,7 +62,10 @@ import org.apache.hadoop.hbase.io.hfile.HFileScanner; import org.apache.hadoop.hbase.io.hfile.InvalidHFileException; import org.apache.hadoop.hbase.io.hfile.NoOpDataBlockEncoder; import org.apache.hadoop.hbase.monitoring.MonitoredTask; -import org.apache.hadoop.hbase.regionserver.compactions.*; +import org.apache.hadoop.hbase.regionserver.compactions.CompactSelection; +import org.apache.hadoop.hbase.regionserver.compactions.CompactionPolicy; +import org.apache.hadoop.hbase.regionserver.compactions.CompactionProgress; +import org.apache.hadoop.hbase.regionserver.compactions.CompactionRequest; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.ChecksumType; import org.apache.hadoop.hbase.util.ClassSize; @@ -1314,7 +1315,7 @@ public class HStore implements Store, StoreConfiguration { // let the archive util decide if we should archive or delete the files LOG.debug("Removing store files after compaction..."); - HFileArchiver.archiveStoreFiles(this.fs, this.region, this.conf, + HFileArchiver.archiveStoreFiles(this.conf, this.fs, this.region, this.family.getName(), compactedFiles); } catch (IOException e) { diff --git hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestHFileArchiving.java hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestHFileArchiving.java index e1dd0c5..3bb7c82 100644 --- hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestHFileArchiving.java +++ hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestHFileArchiving.java @@ -132,7 +132,7 @@ public class TestHFileArchiving { // now attempt to depose the region Path regionDir = HRegion.getRegionDir(region.getTableDir().getParent(), region.getRegionInfo()); - HFileArchiver.archiveRegion(fs, region.getRegionInfo()); + HFileArchiver.archiveRegion(UTIL.getConfiguration(), fs, region.getRegionInfo()); // check for the existence of the archive directory and some files in it Path archiveDir = HFileArchiveTestingUtil.getRegionArchiveDir(UTIL.getConfiguration(), region); @@ -192,7 +192,7 @@ public class TestHFileArchiving { } // then archive the region - HFileArchiver.archiveRegion(fs, region.getRegionInfo()); + HFileArchiver.archiveRegion(UTIL.getConfiguration(), fs, region.getRegionInfo()); // and check to make sure the region directoy got deleted assertFalse("Region directory (" + regionDir + "), still exists.", fs.exists(regionDir));