Index: src/test/org/apache/hadoop/hbase/TestHRegion.java =================================================================== --- src/test/org/apache/hadoop/hbase/TestHRegion.java (revision 638708) +++ src/test/org/apache/hadoop/hbase/TestHRegion.java (working copy) @@ -594,7 +594,7 @@ Path oldRegion1 = subregions[0].getRegionDir(); Path oldRegion2 = subregions[1].getRegionDir(); startTime = System.currentTimeMillis(); - r = HRegion.closeAndMerge(subregions[0], subregions[1]); + r = HRegion.mergeAdjacent(subregions[0], subregions[1]); region = new HRegionIncommon(r); System.out.println("Merge regions elapsed time: " + ((System.currentTimeMillis() - startTime) / 1000.0)); Index: src/test/org/apache/hadoop/hbase/util/TestMigrate.java =================================================================== --- src/test/org/apache/hadoop/hbase/util/TestMigrate.java (revision 638708) +++ src/test/org/apache/hadoop/hbase/util/TestMigrate.java (working copy) @@ -96,11 +96,11 @@ // this path is for running test with ant - "../../../../../src/contrib/hbase/src/testdata/HADOOP-2478-testdata.zip") + "../../../src/testdata/HADOOP-2478-testdata.zip") // and this path is for when you want to run inside eclipse - /*"src/contrib/hbase/src/testdata/HADOOP-2478-testdata.zip")*/ + /*"src/testdata/HADOOP-2478-testdata.zip")*/ ); ZipInputStream zip = new ZipInputStream(hs); @@ -177,13 +177,12 @@ return; } for (int i = 0; i < stats.length; i++) { - String relativePath = - stats[i].getPath().toString().substring(rootdirlength); + String path = stats[i].getPath().toString(); if (stats[i].isDir()) { - System.out.println("d " + relativePath); + System.out.println("d " + path); listPaths(fs, stats[i].getPath(), rootdirlength); } else { - System.out.println("f " + relativePath + " size=" + stats[i].getLen()); + System.out.println("f " + path + " size=" + stats[i].getLen()); } } } Index: src/test/org/apache/hadoop/hbase/util/TestMergeTool.java =================================================================== --- src/test/org/apache/hadoop/hbase/util/TestMergeTool.java (revision 0) +++ src/test/org/apache/hadoop/hbase/util/TestMergeTool.java (revision 0) @@ -0,0 +1,317 @@ +/** + * Copyright 2008 The Apache Software Foundation + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.util; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.io.Text; +import org.apache.hadoop.util.ToolRunner; + +import org.apache.hadoop.dfs.MiniDFSCluster; +import org.apache.hadoop.hbase.HBaseTestCase; +import org.apache.hadoop.hbase.HColumnDescriptor; +import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.HLog; +import org.apache.hadoop.hbase.HRegion; +import org.apache.hadoop.hbase.HRegionInfo; +import org.apache.hadoop.hbase.HTableDescriptor; +import org.apache.hadoop.hbase.StaticTestEnvironment; +import org.apache.hadoop.hbase.io.BatchUpdate; +import org.apache.hadoop.hbase.io.ImmutableBytesWritable; + +/** Test stand alone merge tool that can merge arbitrary regions */ +public class TestMergeTool extends HBaseTestCase { + static final Log LOG = LogFactory.getLog(TestMergeTool.class); + protected static final Text COLUMN_NAME = new Text("contents:"); + private final HTableDescriptor desc; + private final Text[][] rows; + private final HRegionInfo[] sourceRegions = new HRegionInfo[5]; + private final HRegion[] regions = new HRegion[5]; + private Path rootdir = null; + private MiniDFSCluster dfsCluster = null; + private FileSystem fs; + + /** constructor */ + public TestMergeTool() { + super(); + + // Create table description + + this.desc = new HTableDescriptor("TestMergeTool"); + this.desc.addFamily(new HColumnDescriptor(COLUMN_NAME.toString())); + + /* + * Create the HRegionInfos for the regions. + */ + + // Region 0 will contain the key range [row_0200,row_0300) + sourceRegions[0] = + new HRegionInfo(this.desc, new Text("row_0200"), new Text("row_0300")); + + // Region 1 will contain the key range [row_0250,row_0400) and overlaps + // with Region 0 + sourceRegions[1] = + new HRegionInfo(this.desc, new Text("row_0250"), new Text("row_0400")); + + // Region 2 will contain the key range [row_0100,row_0200) and is adjacent + // to Region 0 or the region resulting from the merge of Regions 0 and 1 + sourceRegions[2] = + new HRegionInfo(this.desc, new Text("row_0100"), new Text("row_0200")); + + // Region 3 will contain the key range [row_0500,row_0600) and is not + // adjacent to any of Regions 0, 1, 2 or the merged result of any or all + // of those regions + sourceRegions[3] = + new HRegionInfo(this.desc, new Text("row_0500"), new Text("row_0600")); + + // Region 4 will have empty start and end keys and overlaps all regions. + sourceRegions[4] = + new HRegionInfo(this.desc, HConstants.EMPTY_TEXT, HConstants.EMPTY_TEXT); + + /* + * Now create some row keys + */ + this.rows = new Text[5][]; + this.rows[0] = new Text[] { new Text("row_0210"), new Text("row_0280") }; + this.rows[1] = new Text[] { new Text("row_0260"), new Text("row_0350") }; + this.rows[2] = new Text[] { new Text("row_0110"), new Text("row_0175") }; + this.rows[3] = new Text[] { new Text("row_0525"), new Text("row_0560") }; + this.rows[4] = new Text[] { new Text("row_0050"), new Text("row_1000") }; + + } + + /** {@inheritDoc} */ + @Override + public void setUp() throws Exception { + // Start up dfs + this.dfsCluster = new MiniDFSCluster(conf, 2, true, (String[])null); + this.fs = this.dfsCluster.getFileSystem(); + // Set the hbase.rootdir to be the home directory in mini dfs. + this.rootdir = this.fs.getHomeDirectory(); + this.conf.set(HConstants.HBASE_DIR, this.rootdir.toString()); + + // Note: we must call super.setUp after starting the mini cluster or + // we will end up with a local file system + + super.setUp(); + + try { + /* + * Create the regions we will merge + */ + for (int i = 0; i < sourceRegions.length; i++) { + regions[i] = + HRegion.createHRegion(this.sourceRegions[i], this.rootdir, this.conf); + /* + * Insert data + */ + for (int j = 0; j < rows[i].length; j++) { + BatchUpdate b = new BatchUpdate(); + Text row = rows[i][j]; + long id = b.startUpdate(row); + b.put(id, COLUMN_NAME, + new ImmutableBytesWritable( + row.getBytes(), 0, row.getLength() + ).get() + ); + regions[i].batchUpdate(HConstants.LATEST_TIMESTAMP, b); + } + } + // Create root region + HRegion root = HRegion.createHRegion(HRegionInfo.rootRegionInfo, + this.rootdir, this.conf); + // Create meta region + HRegion meta = HRegion.createHRegion(HRegionInfo.firstMetaRegionInfo, + this.rootdir, this.conf); + // Insert meta into root region + HRegion.addRegionToMETA(root, meta); + // Insert the regions we created into the meta + for(int i = 0; i < regions.length; i++) { + HRegion.addRegionToMETA(meta, regions[i]); + } + // Close root and meta regions + root.close(); + root.getLog().closeAndDelete(); + meta.close(); + meta.getLog().closeAndDelete(); + + } catch (Exception e) { + StaticTestEnvironment.shutdownDfs(dfsCluster); + throw e; + } + } + + /** {@inheritDoc} */ + @Override + public void tearDown() throws Exception { + super.tearDown(); + StaticTestEnvironment.shutdownDfs(dfsCluster); + } + + /** @throws Exception */ + public void testMergeTool() throws Exception { + // First verify we can read the rows from the source regions and that they + // contain the right data. + for (int i = 0; i < regions.length; i++) { + for (int j = 0; j < rows[i].length; j++) { + byte[] bytes = regions[i].get(rows[i][j], COLUMN_NAME); + assertNotNull(bytes); + Text value = new Text(bytes); + assertTrue(value.equals(rows[i][j])); + } + // Close the region and delete the log + regions[i].close(); + regions[i].getLog().closeAndDelete(); + } + + // Create a log that we can reuse when we need to open regions + + HLog log = new HLog(this.fs, + new Path("/tmp", HConstants.HREGION_LOGDIR_NAME + "_" + + System.currentTimeMillis() + ), + this.conf, null + ); + try { + /* + * Merge Region 0 and Region 1 + */ + LOG.info("\n\nmerging regions 0 and 1\n\n"); + Merge merger = new Merge(this.conf); + ToolRunner.run(merger, + new String[] { + this.desc.getName().toString(), + this.sourceRegions[0].getRegionName().toString(), + this.sourceRegions[1].getRegionName().toString() + } + ); + HRegionInfo mergedInfo = merger.getMergedHRegionInfo(); + + // Now verify that we can read all the rows from regions 0, 1 + // in the new merged region. + HRegion merged = + HRegion.openHRegion(mergedInfo, this.rootdir, log, this.conf); + + for (int i = 0; i < 2 ; i++) { + for (int j = 0; j < rows[i].length; j++) { + byte[] bytes = merged.get(rows[i][j], COLUMN_NAME); + assertNotNull(rows[i][j].toString(), bytes); + Text value = new Text(bytes); + assertTrue(value.equals(rows[i][j])); + } + } + merged.close(); + LOG.info("\n\nverified merge of regions 0 and 1\n\n"); + /* + * Merge the result of merging regions 0 and 1 with region 2 + */ + LOG.info("\n\nmerging regions 0+1 and 2\n\n"); + merger = new Merge(this.conf); + ToolRunner.run(merger, + new String[] { + this.desc.getName().toString(), + mergedInfo.getRegionName().toString(), + this.sourceRegions[2].getRegionName().toString() + } + ); + mergedInfo = merger.getMergedHRegionInfo(); +// System.err.println(mergedInfo.toString()); + + // Now verify that we can read all the rows from regions 0, 1 and 2 + // in the new merged region. + + merged = HRegion.openHRegion(mergedInfo, this.rootdir, log, this.conf); + + for (int i = 0; i < 3 ; i++) { + for (int j = 0; j < rows[i].length; j++) { + byte[] bytes = merged.get(rows[i][j], COLUMN_NAME); + assertNotNull(bytes); + Text value = new Text(bytes); + assertTrue(value.equals(rows[i][j])); + } + } + merged.close(); + LOG.info("\n\nverified merge of regions 0+1 and 2\n\n"); + /* + * Merge the result of merging regions 0, 1 and 2 with region 3 + */ + LOG.info("\n\nmerging regions 0+1+2 and 3\n\n"); + merger = new Merge(this.conf); + ToolRunner.run(merger, + new String[] { + this.desc.getName().toString(), + mergedInfo.getRegionName().toString(), + this.sourceRegions[3].getRegionName().toString() + } + ); + mergedInfo = merger.getMergedHRegionInfo(); + + // Now verify that we can read all the rows from regions 0, 1, 2 and 3 + // in the new merged region. + + merged = HRegion.openHRegion(mergedInfo, this.rootdir, log, this.conf); + + for (int i = 0; i < 4 ; i++) { + for (int j = 0; j < rows[i].length; j++) { + byte[] bytes = merged.get(rows[i][j], COLUMN_NAME); + assertNotNull(bytes); + Text value = new Text(bytes); + assertTrue(value.equals(rows[i][j])); + } + } + merged.close(); + LOG.info("\n\nverified merge of regions 0+1+2 and 3\n\n"); + /* + * Merge the result of merging regions 0, 1, 2 and 3 with region 4 + */ + LOG.info("\n\nmerging regions 0+1+2+3 and 4\n\n"); + merger = new Merge(this.conf); + ToolRunner.run(merger, + new String[] { + this.desc.getName().toString(), + mergedInfo.getRegionName().toString(), + this.sourceRegions[4].getRegionName().toString() + } + ); + mergedInfo = merger.getMergedHRegionInfo(); + + // Now verify that we can read all the rows from the new merged region. + + merged = HRegion.openHRegion(mergedInfo, this.rootdir, log, this.conf); + + for (int i = 0; i < rows.length ; i++) { + for (int j = 0; j < rows[i].length; j++) { + byte[] bytes = merged.get(rows[i][j], COLUMN_NAME); + assertNotNull(bytes); + Text value = new Text(bytes); + assertTrue(value.equals(rows[i][j])); + } + } + merged.close(); + LOG.info("\n\nverified merge of regions 0+1+2+3 and 4\n\n"); + + } finally { + log.closeAndDelete(); + } + } +} Index: src/java/org/apache/hadoop/hbase/HStoreFile.java =================================================================== --- src/java/org/apache/hadoop/hbase/HStoreFile.java (revision 638708) +++ src/java/org/apache/hadoop/hbase/HStoreFile.java (working copy) @@ -1,890 +1,844 @@ -/** - * Copyright 2007 The Apache Software Foundation - * - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hbase; - -import java.io.ByteArrayInputStream; -import java.io.ByteArrayOutputStream; -import java.io.DataInputStream; -import java.io.DataOutputStream; -import java.io.DataInput; -import java.io.DataOutput; -import java.io.FileNotFoundException; -import java.io.IOException; -import java.io.UnsupportedEncodingException; -import java.util.List; -import java.util.Random; - -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.fs.FSDataOutputStream; -import org.apache.hadoop.fs.FileSystem; -import org.apache.hadoop.fs.Path; -import org.apache.hadoop.hbase.io.ImmutableBytesWritable; -import org.apache.hadoop.hbase.util.Writables; -import org.apache.hadoop.io.MapFile; -import org.apache.hadoop.io.SequenceFile; -import org.apache.hadoop.io.Text; -import org.apache.hadoop.io.Writable; -import org.apache.hadoop.io.WritableComparable; -import org.onelab.filter.Filter; -import org.onelab.filter.Key; - - -/** - * A HStore data file. HStores usually have one or more of these files. They - * are produced by flushing the memcache to disk. - * - *
Each HStore maintains a bunch of different data files. The filename is a - * mix of the parent dir, the region name, the column name, and a file - * identifier. The name may also be a reference to a store file located - * elsewhere. This class handles all that path-building stuff for you. - * - *
An HStoreFile usually tracks 4 things: its parent dir, the region - * identifier, the column family, and the file identifier. If you know those - * four things, you know how to obtain the right HStoreFile. HStoreFiles may - * also refernce store files in another region serving either from - * the top-half of the remote file or from the bottom-half. Such references - * are made fast splitting regions. - * - *
Plain HStoreFiles are named for a randomly generated id as in:
- * 1278437856009925445 A file by this name is made in both the
- * mapfiles and info subdirectories of a
- * HStore columnfamily directoy: E.g. If the column family is 'anchor:', then
- * under the region directory there is a subdirectory named 'anchor' within
- * which is a 'mapfiles' and 'info' subdirectory. In each will be found a
- * file named something like 1278437856009925445, one to hold the
- * data in 'mapfiles' and one under 'info' that holds the sequence id for this
- * store file.
- *
- *
References to store files located over in some other region look like
- * this:
- * 1278437856009925445.hbaserepository,qAReLZD-OyQORZWq_vqR1k==,959247014679548184:
- * i.e. an id followed by the name of the referenced region. The data
- * ('mapfiles') of HStoreFile references are empty. The accompanying
- * info file contains the
- * midkey, the id of the remote store we're referencing and whether we're
- * to serve the top or bottom region of the remote store file. Note, a region
- * is not splitable if it has instances of store file references (References
- * are cleaned up by compactions).
- *
- *
When merging or splitting HRegions, we might want to modify one of the
- * params for an HStoreFile (effectively moving it elsewhere).
- */
-public class HStoreFile implements HConstants {
- static final Log LOG = LogFactory.getLog(HStoreFile.class.getName());
- static final byte INFO_SEQ_NUM = 0;
- static final String HSTORE_DATFILE_DIR = "mapfiles";
- static final String HSTORE_INFO_DIR = "info";
- static final String HSTORE_FILTER_DIR = "filter";
-
- /**
- * For split HStoreFiles, specifies if the file covers the lower half or
- * the upper half of the key range
- */
- public static enum Range {
- /** HStoreFile contains upper half of key range */
- top,
- /** HStoreFile contains lower half of key range */
- bottom
- }
-
- private final static Random rand = new Random();
-
- private final Path basedir;
- private final String encodedRegionName;
- private final Text colFamily;
- private final long fileId;
- private final HBaseConfiguration conf;
- private final FileSystem fs;
- private final Reference reference;
-
- /**
- * Constructor that fully initializes the object
- * @param conf Configuration object
- * @param basedir qualified path that is parent of region directory
- * @param encodedRegionName file name friendly name of the region
- * @param colFamily name of the column family
- * @param fileId file identifier
- * @param ref Reference to another HStoreFile.
- * @throws IOException
- */
- HStoreFile(HBaseConfiguration conf, FileSystem fs, Path basedir,
- String encodedRegionName, Text colFamily, long fileId,
- final Reference ref) throws IOException {
- this.conf = conf;
- this.fs = fs;
- this.basedir = basedir;
- this.encodedRegionName = encodedRegionName;
- this.colFamily = new Text(colFamily);
-
- long id = fileId;
- if (id == -1) {
- Path mapdir = HStoreFile.getMapDir(basedir, encodedRegionName, colFamily);
- Path testpath = null;
- do {
- id = Math.abs(rand.nextLong());
- testpath = new Path(mapdir, createHStoreFilename(id, null));
- } while(fs.exists(testpath));
- }
- this.fileId = id;
-
- // If a reference, construction does not write the pointer files. Thats
- // done by invocations of writeReferenceFiles(hsf, fs). Happens at fast
- // split time.
- this.reference = ref;
- }
-
- /** @return the region name */
- boolean isReference() {
- return reference != null;
- }
-
- Reference getReference() {
- return reference;
- }
-
- String getEncodedRegionName() {
- return encodedRegionName;
- }
-
- /** @return the column family */
- Text getColFamily() {
- return colFamily;
- }
-
- /** @return the file identifier */
- long getFileId() {
- return fileId;
- }
-
- // Build full filenames from those components
-
- /** @return path for MapFile */
- Path getMapFilePath() {
- if (isReference()) {
- return getMapFilePath(encodedRegionName, fileId,
- reference.getEncodedRegionName());
- }
- return getMapFilePath(encodedRegionName, fileId, null);
- }
-
- private Path getMapFilePath(final Reference r) {
- if (r == null) {
- return getMapFilePath();
- }
- return getMapFilePath(r.getEncodedRegionName(), r.getFileId(), null);
- }
-
- private Path getMapFilePath(final String encodedName, final long fid,
- final String ern) {
- return new Path(HStoreFile.getMapDir(basedir, encodedName, colFamily),
- createHStoreFilename(fid, ern));
- }
-
- /** @return path for info file */
- Path getInfoFilePath() {
- if (isReference()) {
- return getInfoFilePath(encodedRegionName, fileId,
- reference.getEncodedRegionName());
-
- }
- return getInfoFilePath(encodedRegionName, fileId, null);
- }
-
- private Path getInfoFilePath(final String encodedName, final long fid,
- final String ern) {
- return new Path(HStoreFile.getInfoDir(basedir, encodedName, colFamily),
- createHStoreFilename(fid, ern));
- }
-
- // File handling
-
- /*
- * Split by making two new store files that reference top and bottom regions
- * of original store file.
- * @param midKey
- * @param dstA
- * @param dstB
- * @param fs
- * @param c
- * @throws IOException
- *
- * @param midKey the key which will be the starting key of the second region
- * @param dstA the file which will contain keys from the start of the source
- * @param dstB the file which will contain keys from midKey to end of source
- * @param fs file system
- * @param c configuration
- * @throws IOException
- */
- void splitStoreFile(final HStoreFile dstA, final HStoreFile dstB,
- final FileSystem fs)
- throws IOException {
- dstA.writeReferenceFiles(fs);
- dstB.writeReferenceFiles(fs);
- }
-
- void writeReferenceFiles(final FileSystem fs)
- throws IOException {
- createOrFail(fs, getMapFilePath());
- writeSplitInfo(fs);
- }
-
- /*
- * If reference, create and write the remote store file id, the midkey and
- * whether we're going against the top file region of the referent out to
- * the info file.
- * @param p Path to info file.
- * @param hsf
- * @param fs
- * @throws IOException
- */
- private void writeSplitInfo(final FileSystem fs) throws IOException {
- Path p = getInfoFilePath();
- if (fs.exists(p)) {
- throw new IOException("File already exists " + p.toString());
- }
- FSDataOutputStream out = fs.create(p);
- try {
- reference.write(out);
- } finally {
- out.close();
- }
- }
-
- private void createOrFail(final FileSystem fs, final Path p)
- throws IOException {
- if (fs.exists(p)) {
- throw new IOException("File already exists " + p.toString());
- }
- if (!fs.createNewFile(p)) {
- throw new IOException("Failed create of " + p);
- }
- }
-
- /**
- * Merges the contents of the given source HStoreFiles into a single new one.
- *
- * @param srcFiles files to be merged
- * @param fs file system
- * @param conf configuration object
- * @throws IOException
- */
- void mergeStoreFiles(List This file is not splitable. Calls to {@link #midKey()} return null.
- */
- static class HalfMapFileReader extends BloomFilterMapFile.Reader {
- private final boolean top;
- private final WritableComparable midkey;
- private boolean firstNextCall = true;
-
- HalfMapFileReader(final FileSystem fs, final String dirName,
- final Configuration conf, final Range r,
- final WritableComparable midKey)
- throws IOException {
- this(fs, dirName, conf, r, midKey, null);
- }
-
- HalfMapFileReader(final FileSystem fs, final String dirName,
- final Configuration conf, final Range r,
- final WritableComparable midKey, final Filter filter)
- throws IOException {
- super(fs, dirName, conf, filter);
- top = isTopFileRegion(r);
- midkey = midKey;
- }
-
- @SuppressWarnings("unchecked")
- private void checkKey(final WritableComparable key)
- throws IOException {
- if (top) {
- if (key.compareTo(midkey) < 0) {
- throw new IOException("Illegal Access: Key is less than midKey of " +
- "backing mapfile");
- }
- } else if (key.compareTo(midkey) >= 0) {
- throw new IOException("Illegal Access: Key is greater than or equal " +
- "to midKey of backing mapfile");
- }
- }
-
- /** {@inheritDoc} */
- @Override
- public synchronized void finalKey(WritableComparable key)
- throws IOException {
- if (top) {
- super.finalKey(key);
- } else {
- reset();
- Writable value = new ImmutableBytesWritable();
- WritableComparable k = super.getClosest(midkey, value, true);
- ByteArrayOutputStream byteout = new ByteArrayOutputStream();
- DataOutputStream out = new DataOutputStream(byteout);
- k.write(out);
- ByteArrayInputStream bytein =
- new ByteArrayInputStream(byteout.toByteArray());
- DataInputStream in = new DataInputStream(bytein);
- key.readFields(in);
- }
- }
-
- /** {@inheritDoc} */
- @Override
- public synchronized Writable get(WritableComparable key, Writable val)
- throws IOException {
- checkKey(key);
- return super.get(key, val);
- }
-
- /** {@inheritDoc} */
- @SuppressWarnings("unchecked")
- @Override
- public synchronized WritableComparable getClosest(WritableComparable key,
- Writable val)
- throws IOException {
- WritableComparable closest = null;
- if (top) {
- // If top, the lowest possible key is midkey. Do not have to check
- // what comes back from super getClosest. Will return exact match or
- // greater.
- closest = (key.compareTo(this.midkey) < 0)?
- this.midkey: super.getClosest(key, val);
- } else {
- // We're serving bottom of the file.
- if (key.compareTo(this.midkey) < 0) {
- // Check key is within range for bottom.
- closest = super.getClosest(key, val);
- // midkey was made against largest store file at time of split. Smaller
- // store files could have anything in them. Check return value is
- // not beyond the midkey (getClosest returns exact match or next
- // after).
- if (closest != null && closest.compareTo(this.midkey) >= 0) {
- // Don't let this value out.
- closest = null;
- }
- }
- // Else, key is > midkey so let out closest = null.
- }
- return closest;
- }
-
- /** {@inheritDoc} */
- @SuppressWarnings("unused")
- @Override
- public synchronized WritableComparable midKey() throws IOException {
- // Returns null to indicate file is not splitable.
- return null;
- }
-
- /** {@inheritDoc} */
- @SuppressWarnings("unchecked")
- @Override
- public synchronized boolean next(WritableComparable key, Writable val)
- throws IOException {
- if (firstNextCall) {
- firstNextCall = false;
- if (this.top) {
- // Seek to midkey. Midkey may not exist in this file. That should be
- // fine. Then we'll either be positioned at end or start of file.
- WritableComparable nearest = getClosest(midkey, val);
- // Now copy the mid key into the passed key.
- if (nearest != null) {
- Writables.copyWritable(nearest, key);
- return true;
- }
- return false;
- }
- }
- boolean result = super.next(key, val);
- if (!top && key.compareTo(midkey) >= 0) {
- result = false;
- }
- return result;
- }
-
- /** {@inheritDoc} */
- @Override
- public synchronized void reset() throws IOException {
- if (top) {
- firstNextCall = true;
- seek(midkey);
- return;
- }
- super.reset();
- }
-
- /** {@inheritDoc} */
- @Override
- public synchronized boolean seek(WritableComparable key)
- throws IOException {
- checkKey(key);
- return super.seek(key);
- }
- }
-}
+/**
+ * Copyright 2007 The Apache Software Foundation
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase;
+
+import java.io.ByteArrayInputStream;
+import java.io.ByteArrayOutputStream;
+import java.io.DataInputStream;
+import java.io.DataOutputStream;
+import java.io.DataInput;
+import java.io.DataOutput;
+import java.io.FileNotFoundException;
+import java.io.IOException;
+import java.io.UnsupportedEncodingException;
+import java.util.Random;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FSDataOutputStream;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
+import org.apache.hadoop.hbase.util.Writables;
+import org.apache.hadoop.io.MapFile;
+import org.apache.hadoop.io.SequenceFile;
+import org.apache.hadoop.io.Text;
+import org.apache.hadoop.io.Writable;
+import org.apache.hadoop.io.WritableComparable;
+import org.onelab.filter.Filter;
+import org.onelab.filter.Key;
+
+
+/**
+ * A HStore data file. HStores usually have one or more of these files. They
+ * are produced by flushing the memcache to disk.
+ *
+ * Each HStore maintains a bunch of different data files. The filename is a
+ * mix of the parent dir, the region name, the column name, and a file
+ * identifier. The name may also be a reference to a store file located
+ * elsewhere. This class handles all that path-building stuff for you.
+ *
+ * An HStoreFile usually tracks 4 things: its parent dir, the region
+ * identifier, the column family, and the file identifier. If you know those
+ * four things, you know how to obtain the right HStoreFile. HStoreFiles may
+ * also refernce store files in another region serving either from
+ * the top-half of the remote file or from the bottom-half. Such references
+ * are made fast splitting regions.
+ *
+ * Plain HStoreFiles are named for a randomly generated id as in:
+ * References to store files located over in some other region look like
+ * this:
+ * When merging or splitting HRegions, we might want to modify one of the
+ * params for an HStoreFile (effectively moving it elsewhere).
+ */
+public class HStoreFile implements HConstants {
+ static final Log LOG = LogFactory.getLog(HStoreFile.class.getName());
+ static final byte INFO_SEQ_NUM = 0;
+ static final String HSTORE_DATFILE_DIR = "mapfiles";
+ static final String HSTORE_INFO_DIR = "info";
+ static final String HSTORE_FILTER_DIR = "filter";
+
+ /**
+ * For split HStoreFiles, specifies if the file covers the lower half or
+ * the upper half of the key range
+ */
+ public static enum Range {
+ /** HStoreFile contains upper half of key range */
+ top,
+ /** HStoreFile contains lower half of key range */
+ bottom
+ }
+
+ private final static Random rand = new Random();
+
+ private final Path basedir;
+ private final String encodedRegionName;
+ private final Text colFamily;
+ private final long fileId;
+ private final HBaseConfiguration conf;
+ private final FileSystem fs;
+ private final Reference reference;
+
+ /**
+ * Constructor that fully initializes the object
+ * @param conf Configuration object
+ * @param basedir qualified path that is parent of region directory
+ * @param encodedRegionName file name friendly name of the region
+ * @param colFamily name of the column family
+ * @param fileId file identifier
+ * @param ref Reference to another HStoreFile.
+ * @throws IOException
+ */
+ HStoreFile(HBaseConfiguration conf, FileSystem fs, Path basedir,
+ String encodedRegionName, Text colFamily, long fileId,
+ final Reference ref) throws IOException {
+ this.conf = conf;
+ this.fs = fs;
+ this.basedir = basedir;
+ this.encodedRegionName = encodedRegionName;
+ this.colFamily = new Text(colFamily);
+
+ long id = fileId;
+ if (id == -1) {
+ Path mapdir = HStoreFile.getMapDir(basedir, encodedRegionName, colFamily);
+ Path testpath = null;
+ do {
+ id = Math.abs(rand.nextLong());
+ testpath = new Path(mapdir, createHStoreFilename(id, null));
+ } while(fs.exists(testpath));
+ }
+ this.fileId = id;
+
+ // If a reference, construction does not write the pointer files. Thats
+ // done by invocations of writeReferenceFiles(hsf, fs). Happens at fast
+ // split time.
+ this.reference = ref;
+ }
+
+ /** @return the region name */
+ boolean isReference() {
+ return reference != null;
+ }
+
+ Reference getReference() {
+ return reference;
+ }
+
+ String getEncodedRegionName() {
+ return encodedRegionName;
+ }
+
+ /** @return the column family */
+ Text getColFamily() {
+ return colFamily;
+ }
+
+ /** @return the file identifier */
+ long getFileId() {
+ return fileId;
+ }
+
+ // Build full filenames from those components
+
+ /** @return path for MapFile */
+ Path getMapFilePath() {
+ if (isReference()) {
+ return getMapFilePath(encodedRegionName, fileId,
+ reference.getEncodedRegionName());
+ }
+ return getMapFilePath(encodedRegionName, fileId, null);
+ }
+
+ private Path getMapFilePath(final Reference r) {
+ if (r == null) {
+ return getMapFilePath();
+ }
+ return getMapFilePath(r.getEncodedRegionName(), r.getFileId(), null);
+ }
+
+ private Path getMapFilePath(final String encodedName, final long fid,
+ final String ern) {
+ return new Path(HStoreFile.getMapDir(basedir, encodedName, colFamily),
+ createHStoreFilename(fid, ern));
+ }
+
+ /** @return path for info file */
+ Path getInfoFilePath() {
+ if (isReference()) {
+ return getInfoFilePath(encodedRegionName, fileId,
+ reference.getEncodedRegionName());
+
+ }
+ return getInfoFilePath(encodedRegionName, fileId, null);
+ }
+
+ private Path getInfoFilePath(final String encodedName, final long fid,
+ final String ern) {
+ return new Path(HStoreFile.getInfoDir(basedir, encodedName, colFamily),
+ createHStoreFilename(fid, ern));
+ }
+
+ // File handling
+
+ /*
+ * Split by making two new store files that reference top and bottom regions
+ * of original store file.
+ * @param midKey
+ * @param dstA
+ * @param dstB
+ * @param fs
+ * @param c
+ * @throws IOException
+ *
+ * @param midKey the key which will be the starting key of the second region
+ * @param dstA the file which will contain keys from the start of the source
+ * @param dstB the file which will contain keys from midKey to end of source
+ * @param fs file system
+ * @param c configuration
+ * @throws IOException
+ */
+ void splitStoreFile(final HStoreFile dstA, final HStoreFile dstB,
+ final FileSystem fs)
+ throws IOException {
+ dstA.writeReferenceFiles(fs);
+ dstB.writeReferenceFiles(fs);
+ }
+
+ void writeReferenceFiles(final FileSystem fs)
+ throws IOException {
+ createOrFail(fs, getMapFilePath());
+ writeSplitInfo(fs);
+ }
+
+ /*
+ * If reference, create and write the remote store file id, the midkey and
+ * whether we're going against the top file region of the referent out to
+ * the info file.
+ * @param p Path to info file.
+ * @param hsf
+ * @param fs
+ * @throws IOException
+ */
+ private void writeSplitInfo(final FileSystem fs) throws IOException {
+ Path p = getInfoFilePath();
+ if (fs.exists(p)) {
+ throw new IOException("File already exists " + p.toString());
+ }
+ FSDataOutputStream out = fs.create(p);
+ try {
+ reference.write(out);
+ } finally {
+ out.close();
+ }
+ }
+
+ private void createOrFail(final FileSystem fs, final Path p)
+ throws IOException {
+ if (fs.exists(p)) {
+ throw new IOException("File already exists " + p.toString());
+ }
+ if (!fs.createNewFile(p)) {
+ throw new IOException("Failed create of " + p);
+ }
+ }
+
+ /**
+ * Reads in an info file
+ *
+ * @param fs file system
+ * @return The sequence id contained in the info file
+ * @throws IOException
+ */
+ long loadInfo(FileSystem fs) throws IOException {
+ Path p = null;
+ if (isReference()) {
+ p = getInfoFilePath(reference.getEncodedRegionName(),
+ reference.getFileId(), null);
+ } else {
+ p = getInfoFilePath();
+ }
+ DataInputStream in = new DataInputStream(fs.open(p));
+ try {
+ byte flag = in.readByte();
+ if(flag == INFO_SEQ_NUM) {
+ return in.readLong();
+ }
+ throw new IOException("Cannot process log file: " + p);
+ } finally {
+ in.close();
+ }
+ }
+
+ /**
+ * Writes the file-identifier to disk
+ *
+ * @param fs file system
+ * @param infonum file id
+ * @throws IOException
+ */
+ void writeInfo(FileSystem fs, long infonum) throws IOException {
+ Path p = getInfoFilePath();
+ FSDataOutputStream out = fs.create(p);
+ try {
+ out.writeByte(INFO_SEQ_NUM);
+ out.writeLong(infonum);
+ } finally {
+ out.close();
+ }
+ }
+
+ /**
+ * Delete store map files.
+ * @throws IOException
+ */
+ public void delete() throws IOException {
+ fs.delete(getMapFilePath());
+ fs.delete(getInfoFilePath());
+ }
+
+ /**
+ * Renames the mapfiles and info directories under the passed
+ * This file is not splitable. Calls to {@link #midKey()} return null.
+ */
+ static class HalfMapFileReader extends BloomFilterMapFile.Reader {
+ private final boolean top;
+ private final WritableComparable midkey;
+ private boolean firstNextCall = true;
+
+ HalfMapFileReader(final FileSystem fs, final String dirName,
+ final Configuration conf, final Range r,
+ final WritableComparable midKey)
+ throws IOException {
+ this(fs, dirName, conf, r, midKey, null);
+ }
+
+ HalfMapFileReader(final FileSystem fs, final String dirName,
+ final Configuration conf, final Range r,
+ final WritableComparable midKey, final Filter filter)
+ throws IOException {
+ super(fs, dirName, conf, filter);
+ top = isTopFileRegion(r);
+ midkey = midKey;
+ }
+
+ @SuppressWarnings("unchecked")
+ private void checkKey(final WritableComparable key)
+ throws IOException {
+ if (top) {
+ if (key.compareTo(midkey) < 0) {
+ throw new IOException("Illegal Access: Key is less than midKey of " +
+ "backing mapfile");
+ }
+ } else if (key.compareTo(midkey) >= 0) {
+ throw new IOException("Illegal Access: Key is greater than or equal " +
+ "to midKey of backing mapfile");
+ }
+ }
+
+ /** {@inheritDoc} */
+ @Override
+ public synchronized void finalKey(WritableComparable key)
+ throws IOException {
+ if (top) {
+ super.finalKey(key);
+ } else {
+ reset();
+ Writable value = new ImmutableBytesWritable();
+ WritableComparable k = super.getClosest(midkey, value, true);
+ ByteArrayOutputStream byteout = new ByteArrayOutputStream();
+ DataOutputStream out = new DataOutputStream(byteout);
+ k.write(out);
+ ByteArrayInputStream bytein =
+ new ByteArrayInputStream(byteout.toByteArray());
+ DataInputStream in = new DataInputStream(bytein);
+ key.readFields(in);
+ }
+ }
+
+ /** {@inheritDoc} */
+ @Override
+ public synchronized Writable get(WritableComparable key, Writable val)
+ throws IOException {
+ checkKey(key);
+ return super.get(key, val);
+ }
+
+ /** {@inheritDoc} */
+ @SuppressWarnings("unchecked")
+ @Override
+ public synchronized WritableComparable getClosest(WritableComparable key,
+ Writable val)
+ throws IOException {
+ WritableComparable closest = null;
+ if (top) {
+ // If top, the lowest possible key is midkey. Do not have to check
+ // what comes back from super getClosest. Will return exact match or
+ // greater.
+ closest = (key.compareTo(this.midkey) < 0)?
+ this.midkey: super.getClosest(key, val);
+ } else {
+ // We're serving bottom of the file.
+ if (key.compareTo(this.midkey) < 0) {
+ // Check key is within range for bottom.
+ closest = super.getClosest(key, val);
+ // midkey was made against largest store file at time of split. Smaller
+ // store files could have anything in them. Check return value is
+ // not beyond the midkey (getClosest returns exact match or next
+ // after).
+ if (closest != null && closest.compareTo(this.midkey) >= 0) {
+ // Don't let this value out.
+ closest = null;
+ }
+ }
+ // Else, key is > midkey so let out closest = null.
+ }
+ return closest;
+ }
+
+ /** {@inheritDoc} */
+ @SuppressWarnings("unused")
+ @Override
+ public synchronized WritableComparable midKey() throws IOException {
+ // Returns null to indicate file is not splitable.
+ return null;
+ }
+
+ /** {@inheritDoc} */
+ @SuppressWarnings("unchecked")
+ @Override
+ public synchronized boolean next(WritableComparable key, Writable val)
+ throws IOException {
+ if (firstNextCall) {
+ firstNextCall = false;
+ if (this.top) {
+ // Seek to midkey. Midkey may not exist in this file. That should be
+ // fine. Then we'll either be positioned at end or start of file.
+ WritableComparable nearest = getClosest(midkey, val);
+ // Now copy the mid key into the passed key.
+ if (nearest != null) {
+ Writables.copyWritable(nearest, key);
+ return true;
+ }
+ return false;
+ }
+ }
+ boolean result = super.next(key, val);
+ if (!top && key.compareTo(midkey) >= 0) {
+ result = false;
+ }
+ return result;
+ }
+
+ /** {@inheritDoc} */
+ @Override
+ public synchronized void reset() throws IOException {
+ if (top) {
+ firstNextCall = true;
+ seek(midkey);
+ return;
+ }
+ super.reset();
+ }
+
+ /** {@inheritDoc} */
+ @Override
+ public synchronized boolean seek(WritableComparable key)
+ throws IOException {
+ checkKey(key);
+ return super.seek(key);
+ }
+ }
+}
Index: src/java/org/apache/hadoop/hbase/HMerge.java
===================================================================
--- src/java/org/apache/hadoop/hbase/HMerge.java (revision 638708)
+++ src/java/org/apache/hadoop/hbase/HMerge.java (working copy)
@@ -34,22 +34,16 @@
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.util.Writables;
import org.apache.hadoop.io.Text;
-import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.io.BatchUpdate;
-import org.apache.hadoop.hbase.util.Writables;
-import org.apache.hadoop.io.Text;
-import org.apache.hadoop.util.Tool;
-import org.apache.hadoop.util.ToolRunner;
/**
* A non-instantiable class that has a static method capable of compacting
* a table by merging adjacent regions that have grown too small.
*/
-class HMerge implements HConstants, Tool {
+class HMerge implements HConstants {
static final Log LOG = LogFactory.getLog(HMerge.class);
static final Random rand = new Random();
- private Configuration conf;
/*
* Not instantiable
@@ -138,11 +132,6 @@
}
protected boolean merge(final HRegionInfo[] info) throws IOException {
- return merge(info, false);
- }
-
- protected boolean merge(final HRegionInfo[] info, final boolean force)
- throws IOException {
if(info.length < 2) {
LOG.info("only one region - nothing to merge");
return false;
@@ -164,13 +153,13 @@
nextSize = nextRegion.largestHStore(midKey).getAggregate();
- if (force || (currentSize + nextSize) <= (maxFilesize / 2)) {
+ if ((currentSize + nextSize) <= (maxFilesize / 2)) {
// We merge two adjacent regions if their total size is less than
// one half of the desired maximum size
LOG.info("merging regions " + currentRegion.getRegionName()
+ " and " + nextRegion.getRegionName());
HRegion mergedRegion =
- HRegion.closeAndMerge(currentRegion, nextRegion);
+ HRegion.mergeAdjacent(currentRegion, nextRegion);
updateMeta(currentRegion.getRegionName(), nextRegion.getRegionName(),
mergedRegion);
break;
@@ -427,94 +416,4 @@
}
}
}
-
- public int run(String[] args) throws Exception {
- if (args.length == 0 || args.length > 4) {
- printUsage();
- return 1;
- }
- final String masterPrefix = "--master=";
- String tableName = null;
- String loRegion = null;
- String hiRegion = null;
- for (int i = 0; i < args.length; i++) {
- String arg = args[i];
- if (arg.startsWith(masterPrefix)) {
- this.conf.set("hbase.master", arg.substring(masterPrefix.length()));
- } else if (tableName == null) {
- tableName = arg;
- continue;
- } else if (loRegion == null) {
- loRegion = arg;
- continue;
- } else if (hiRegion == null) {
- hiRegion = arg;
- continue;
- } else {
- throw new IllegalArgumentException("Unsupported argument: " + arg);
- }
- }
- // Make finals of the region names so can be refererred to by anonymous
- // class.
- final Text lo = new Text(loRegion);
- final Text hi = new Text(hiRegion);
- // Make a version of OnlineMerger that does two regions only.
- Merger m = new OnlineMerger((HBaseConfiguration)this.conf,
- FileSystem.get(this.conf), new Text(tableName)) {
- @Override
- void process() throws IOException {
- try {
- for (HRegionInfo[] regionsToMerge = next(); regionsToMerge != null;
- regionsToMerge = next()) {
- if (regionsToMerge[0].getRegionName().equals(lo) &&
- regionsToMerge[1].getRegionName().equals(hi)) {
- merge(regionsToMerge, true);
- // Done
- break;
- }
- }
- } finally {
- try {
- this.hlog.closeAndDelete();
- } catch(IOException e) {
- LOG.error(e);
- }
- }
- }
-
- @Override
- protected void checkOfflined(@SuppressWarnings("unused") HRegionInfo hri)
- throws TableNotDisabledException {
- // Disabling does not work reliably. Just go ahead and merge.
- return;
- }
- };
- m.process();
- return 0;
- }
-
- public Configuration getConf() {
- return this.conf;
- }
-
- public void setConf(final Configuration c) {
- this.conf = c;
- }
-
- static int printUsage() {
- System.out.println("merge [--master=MASTER:PORT] hsf directory.
- * @param fs
- * @param hsf
- * @return True if succeeded.
- * @throws IOException
- */
- public boolean rename(final FileSystem fs, final HStoreFile hsf)
- throws IOException {
- Path src = getMapFilePath();
- if (!fs.exists(src)) {
- throw new FileNotFoundException(src.toString());
- }
- boolean success = fs.rename(src, hsf.getMapFilePath());
- if (!success) {
- LOG.warn("Failed rename of " + src + " to " + hsf.getMapFilePath());
- } else {
- src = getInfoFilePath();
- if (!fs.exists(src)) {
- throw new FileNotFoundException(src.toString());
- }
- success = fs.rename(src, hsf.getInfoFilePath());
- if (!success) {
- LOG.warn("Failed rename of " + src + " to " + hsf.getInfoFilePath());
- }
- }
- return success;
- }
-
- /**
- * Get reader for the store file map file.
- * Client is responsible for closing file when done.
- * @param fs
- * @param bloomFilter If null, no filtering is done.
- * @return MapFile.Reader
- * @throws IOException
- */
- public synchronized MapFile.Reader getReader(final FileSystem fs,
- final Filter bloomFilter)
- throws IOException {
-
- if (isReference()) {
- return new HStoreFile.HalfMapFileReader(fs,
- getMapFilePath(reference).toString(), conf,
- reference.getFileRegion(), reference.getMidkey(), bloomFilter);
- }
- return new BloomFilterMapFile.Reader(fs, getMapFilePath().toString(),
- conf, bloomFilter);
- }
-
- /**
- * Get a store file writer.
- * Client is responsible for closing file when done.
- * @param fs
- * @param compression Pass SequenceFile.CompressionType.NONE
- * for none.
- * @param bloomFilter If null, no filtering is done.
- * @return MapFile.Writer
- * @throws IOException
- */
- public MapFile.Writer getWriter(final FileSystem fs,
- final SequenceFile.CompressionType compression,
- final Filter bloomFilter)
- throws IOException {
- if (isReference()) {
- throw new IOException("Illegal Access: Cannot get a writer on a" +
- "HStoreFile reference");
- }
- return new BloomFilterMapFile.Writer(conf, fs,
- getMapFilePath().toString(), HStoreKey.class,
- ImmutableBytesWritable.class, compression, bloomFilter);
- }
-
- /**
- * @return Length of the store map file. If a reference, size is
- * approximation.
- * @throws IOException
- */
- public long length() throws IOException {
- Path p = new Path(getMapFilePath(reference), MapFile.DATA_FILE_NAME);
- long l = p.getFileSystem(conf).getFileStatus(p).getLen();
- return (isReference())? l / 2: l;
- }
-
- /** {@inheritDoc} */
- @Override
- public String toString() {
- return encodedRegionName + "/" + colFamily + "/" + fileId +
- (isReference()? "/" + reference.toString(): "");
- }
-
- /**
- * Custom bloom filter key maker.
- * @param key
- * @return Key made of bytes of row and column only.
- * @throws IOException
- */
- static Key getBloomFilterKey(WritableComparable key)
- throws IOException {
- HStoreKey hsk = (HStoreKey)key;
- byte [] bytes = null;
- try {
- bytes = (hsk.getRow().toString() + hsk.getColumn().toString()).
- getBytes(UTF8_ENCODING);
- } catch (UnsupportedEncodingException e) {
- throw new IOException(e.toString());
- }
- return new Key(bytes);
- }
-
- static boolean isTopFileRegion(final Range r) {
- return r.equals(Range.top);
- }
-
- private static String createHStoreFilename(final long fid,
- final String encodedRegionName) {
- return Long.toString(fid) +
- ((encodedRegionName != null) ? "." + encodedRegionName : "");
- }
-
- static Path getMapDir(Path dir, String encodedRegionName, Text colFamily) {
- return new Path(dir, new Path(encodedRegionName,
- new Path(colFamily.toString(), HSTORE_DATFILE_DIR)));
- }
-
- /** @return the info directory path */
- static Path getInfoDir(Path dir, String encodedRegionName, Text colFamily) {
- return new Path(dir, new Path(encodedRegionName,
- new Path(colFamily.toString(), HSTORE_INFO_DIR)));
- }
-
- /** @return the bloom filter directory path */
- static Path getFilterDir(Path dir, String encodedRegionName, Text colFamily) {
- return new Path(dir, new Path(encodedRegionName,
- new Path(colFamily.toString(), HSTORE_FILTER_DIR)));
- }
-
- /*
- * Data structure to hold reference to a store file over in another region.
- */
- static class Reference implements Writable {
- private String encodedRegionName;
- private long fileid;
- private Range region;
- private HStoreKey midkey;
-
- Reference(final String ern, final long fid, final HStoreKey m,
- final Range fr) {
- this.encodedRegionName = ern;
- this.fileid = fid;
- this.region = fr;
- this.midkey = m;
- }
-
- Reference() {
- this(null, -1, null, Range.bottom);
- }
-
- long getFileId() {
- return fileid;
- }
-
- Range getFileRegion() {
- return region;
- }
-
- HStoreKey getMidkey() {
- return midkey;
- }
-
- String getEncodedRegionName() {
- return encodedRegionName;
- }
-
- /** {@inheritDoc} */
- @Override
- public String toString() {
- return encodedRegionName + "/" + fileid + "/" + region;
- }
-
- // Make it serializable.
-
- /** {@inheritDoc} */
- public void write(DataOutput out) throws IOException {
- out.writeUTF(encodedRegionName);
- out.writeLong(fileid);
- // Write true if we're doing top of the file.
- out.writeBoolean(isTopFileRegion(region));
- midkey.write(out);
- }
-
- /** {@inheritDoc} */
- public void readFields(DataInput in) throws IOException {
- encodedRegionName = in.readUTF();
- fileid = in.readLong();
- boolean tmp = in.readBoolean();
- // If true, set region to top.
- region = tmp? Range.top: Range.bottom;
- midkey = new HStoreKey();
- midkey.readFields(in);
- }
- }
-
- /**
- * Hbase customizations of MapFile.
- */
- static class HbaseMapFile extends MapFile {
-
- static class HbaseReader extends MapFile.Reader {
-
- /**
- * @param fs
- * @param dirName
- * @param conf
- * @throws IOException
- */
- public HbaseReader(FileSystem fs, String dirName, Configuration conf)
- throws IOException {
- super(fs, dirName, conf);
- // Force reading of the mapfile index by calling midKey.
- // Reading the index will bring the index into memory over
- // here on the client and then close the index file freeing
- // up socket connection and resources in the datanode.
- // Usually, the first access on a MapFile.Reader will load the
- // index force the issue in HStoreFile MapFiles because an
- // access may not happen for some time; meantime we're
- // using up datanode resources. See HADOOP-2341.
- midKey();
- }
- }
-
- static class HbaseWriter extends MapFile.Writer {
- /**
- * @param conf
- * @param fs
- * @param dirName
- * @param keyClass
- * @param valClass
- * @param compression
- * @throws IOException
- */
- public HbaseWriter(Configuration conf, FileSystem fs, String dirName,
- Class1278437856009925445 A file by this name is made in both the
+ * mapfiles and info subdirectories of a
+ * HStore columnfamily directoy: E.g. If the column family is 'anchor:', then
+ * under the region directory there is a subdirectory named 'anchor' within
+ * which is a 'mapfiles' and 'info' subdirectory. In each will be found a
+ * file named something like 1278437856009925445, one to hold the
+ * data in 'mapfiles' and one under 'info' that holds the sequence id for this
+ * store file.
+ *
+ * 1278437856009925445.hbaserepository,qAReLZD-OyQORZWq_vqR1k==,959247014679548184:
+ * i.e. an id followed by the name of the referenced region. The data
+ * ('mapfiles') of HStoreFile references are empty. The accompanying
+ * info file contains the
+ * midkey, the id of the remote store we're referencing and whether we're
+ * to serve the top or bottom region of the remote store file. Note, a region
+ * is not splitable if it has instances of store file references (References
+ * are cleaned up by compactions).
+ *
+ * hsf directory.
+ * @param fs
+ * @param hsf
+ * @return True if succeeded.
+ * @throws IOException
+ */
+ public boolean rename(final FileSystem fs, final HStoreFile hsf)
+ throws IOException {
+ Path src = getMapFilePath();
+ if (!fs.exists(src)) {
+ throw new FileNotFoundException(src.toString());
+ }
+ boolean success = fs.rename(src, hsf.getMapFilePath());
+ if (!success) {
+ LOG.warn("Failed rename of " + src + " to " + hsf.getMapFilePath());
+ } else {
+ src = getInfoFilePath();
+ if (!fs.exists(src)) {
+ throw new FileNotFoundException(src.toString());
+ }
+ success = fs.rename(src, hsf.getInfoFilePath());
+ if (!success) {
+ LOG.warn("Failed rename of " + src + " to " + hsf.getInfoFilePath());
+ }
+ }
+ return success;
+ }
+
+ /**
+ * Get reader for the store file map file.
+ * Client is responsible for closing file when done.
+ * @param fs
+ * @param bloomFilter If null, no filtering is done.
+ * @return MapFile.Reader
+ * @throws IOException
+ */
+ public synchronized MapFile.Reader getReader(final FileSystem fs,
+ final Filter bloomFilter)
+ throws IOException {
+
+ if (isReference()) {
+ return new HStoreFile.HalfMapFileReader(fs,
+ getMapFilePath(reference).toString(), conf,
+ reference.getFileRegion(), reference.getMidkey(), bloomFilter);
+ }
+ return new BloomFilterMapFile.Reader(fs, getMapFilePath().toString(),
+ conf, bloomFilter);
+ }
+
+ /**
+ * Get a store file writer.
+ * Client is responsible for closing file when done.
+ * @param fs
+ * @param compression Pass SequenceFile.CompressionType.NONE
+ * for none.
+ * @param bloomFilter If null, no filtering is done.
+ * @return MapFile.Writer
+ * @throws IOException
+ */
+ public MapFile.Writer getWriter(final FileSystem fs,
+ final SequenceFile.CompressionType compression,
+ final Filter bloomFilter)
+ throws IOException {
+ if (isReference()) {
+ throw new IOException("Illegal Access: Cannot get a writer on a" +
+ "HStoreFile reference");
+ }
+ return new BloomFilterMapFile.Writer(conf, fs,
+ getMapFilePath().toString(), HStoreKey.class,
+ ImmutableBytesWritable.class, compression, bloomFilter);
+ }
+
+ /**
+ * @return Length of the store map file. If a reference, size is
+ * approximation.
+ * @throws IOException
+ */
+ public long length() throws IOException {
+ Path p = new Path(getMapFilePath(reference), MapFile.DATA_FILE_NAME);
+ long l = p.getFileSystem(conf).getFileStatus(p).getLen();
+ return (isReference())? l / 2: l;
+ }
+
+ /** {@inheritDoc} */
+ @Override
+ public String toString() {
+ return encodedRegionName + "/" + colFamily + "/" + fileId +
+ (isReference()? "/" + reference.toString(): "");
+ }
+
+ /**
+ * Custom bloom filter key maker.
+ * @param key
+ * @return Key made of bytes of row and column only.
+ * @throws IOException
+ */
+ static Key getBloomFilterKey(WritableComparable key)
+ throws IOException {
+ HStoreKey hsk = (HStoreKey)key;
+ byte [] bytes = null;
+ try {
+ bytes = (hsk.getRow().toString() + hsk.getColumn().toString()).
+ getBytes(UTF8_ENCODING);
+ } catch (UnsupportedEncodingException e) {
+ throw new IOException(e.toString());
+ }
+ return new Key(bytes);
+ }
+
+ static boolean isTopFileRegion(final Range r) {
+ return r.equals(Range.top);
+ }
+
+ private static String createHStoreFilename(final long fid,
+ final String encodedRegionName) {
+ return Long.toString(fid) +
+ ((encodedRegionName != null) ? "." + encodedRegionName : "");
+ }
+
+ static Path getMapDir(Path dir, String encodedRegionName, Text colFamily) {
+ return new Path(dir, new Path(encodedRegionName,
+ new Path(colFamily.toString(), HSTORE_DATFILE_DIR)));
+ }
+
+ /** @return the info directory path */
+ static Path getInfoDir(Path dir, String encodedRegionName, Text colFamily) {
+ return new Path(dir, new Path(encodedRegionName,
+ new Path(colFamily.toString(), HSTORE_INFO_DIR)));
+ }
+
+ /** @return the bloom filter directory path */
+ static Path getFilterDir(Path dir, String encodedRegionName, Text colFamily) {
+ return new Path(dir, new Path(encodedRegionName,
+ new Path(colFamily.toString(), HSTORE_FILTER_DIR)));
+ }
+
+ /*
+ * Data structure to hold reference to a store file over in another region.
+ */
+ static class Reference implements Writable {
+ private String encodedRegionName;
+ private long fileid;
+ private Range region;
+ private HStoreKey midkey;
+
+ Reference(final String ern, final long fid, final HStoreKey m,
+ final Range fr) {
+ this.encodedRegionName = ern;
+ this.fileid = fid;
+ this.region = fr;
+ this.midkey = m;
+ }
+
+ Reference() {
+ this(null, -1, null, Range.bottom);
+ }
+
+ long getFileId() {
+ return fileid;
+ }
+
+ Range getFileRegion() {
+ return region;
+ }
+
+ HStoreKey getMidkey() {
+ return midkey;
+ }
+
+ String getEncodedRegionName() {
+ return encodedRegionName;
+ }
+
+ /** {@inheritDoc} */
+ @Override
+ public String toString() {
+ return encodedRegionName + "/" + fileid + "/" + region;
+ }
+
+ // Make it serializable.
+
+ /** {@inheritDoc} */
+ public void write(DataOutput out) throws IOException {
+ out.writeUTF(encodedRegionName);
+ out.writeLong(fileid);
+ // Write true if we're doing top of the file.
+ out.writeBoolean(isTopFileRegion(region));
+ midkey.write(out);
+ }
+
+ /** {@inheritDoc} */
+ public void readFields(DataInput in) throws IOException {
+ encodedRegionName = in.readUTF();
+ fileid = in.readLong();
+ boolean tmp = in.readBoolean();
+ // If true, set region to top.
+ region = tmp? Range.top: Range.bottom;
+ midkey = new HStoreKey();
+ midkey.readFields(in);
+ }
+ }
+
+ /**
+ * Hbase customizations of MapFile.
+ */
+ static class HbaseMapFile extends MapFile {
+
+ static class HbaseReader extends MapFile.Reader {
+
+ /**
+ * @param fs
+ * @param dirName
+ * @param conf
+ * @throws IOException
+ */
+ public HbaseReader(FileSystem fs, String dirName, Configuration conf)
+ throws IOException {
+ super(fs, dirName, conf);
+ // Force reading of the mapfile index by calling midKey.
+ // Reading the index will bring the index into memory over
+ // here on the client and then close the index file freeing
+ // up socket connection and resources in the datanode.
+ // Usually, the first access on a MapFile.Reader will load the
+ // index force the issue in HStoreFile MapFiles because an
+ // access may not happen for some time; meantime we're
+ // using up datanode resources. See HADOOP-2341.
+ midKey();
+ }
+ }
+
+ static class HbaseWriter extends MapFile.Writer {
+ /**
+ * @param conf
+ * @param fs
+ * @param dirName
+ * @param keyClass
+ * @param valClass
+ * @param compression
+ * @throws IOException
+ */
+ public HbaseWriter(Configuration conf, FileSystem fs, String dirName,
+ Classmeta region. Used by the HMaster bootstrap code adding
* new table to ROOT table.
@@ -1827,9 +1949,8 @@
* @param r HRegion to add to meta
*
* @throws IOException
- * @see {@link #removeRegionFromMETA(HRegion, HRegion)}
*/
- static void addRegionToMETA(HRegion meta, HRegion r) throws IOException {
+ public static void addRegionToMETA(HRegion meta, HRegion r) throws IOException {
meta.checkResources();
// The row key is the region name
Text row = r.getRegionName();
@@ -1855,7 +1976,6 @@
* @param regionNmae HRegion to remove from meta
*
* @throws IOException
- * @see {@link #addRegionToMETA(HRegion, HRegion)}
*/
static void removeRegionFromMETA(final HRegionInterface server,
final Text metaRegionName, final Text regionName)
@@ -1870,7 +1990,6 @@
* @param info HRegion to update in meta
*
* @throws IOException
- * @see {@link #addRegionToMETA(HRegion, HRegion)}
*/
static void offlineRegionInMETA(final HRegionInterface srvr,
final Text metaRegionName, final HRegionInfo info)
@@ -1893,22 +2012,25 @@
* @param rootdir qualified path of HBase root directory
* @param info HRegionInfo for region to be deleted
* @throws IOException
- * @return True if deleted.
*/
- static boolean deleteRegion(FileSystem fs, Path rootdir, HRegionInfo info)
+ static void deleteRegion(FileSystem fs, Path rootdir, HRegionInfo info)
throws IOException {
- Path p = HRegion.getRegionDir(rootdir, info);
+ deleteRegion(fs, HRegion.getRegionDir(rootdir, info));
+ }
+
+ private static void deleteRegion(FileSystem fs, Path regiondir)
+ throws IOException {
if (LOG.isDebugEnabled()) {
- LOG.debug("DELETING region " + p.toString());
+ LOG.debug("DELETING region " + regiondir.toString());
}
- return fs.delete(p);
+ FileUtil.fullyDelete(fs, regiondir);
}
/**
* Computes the Path of the HRegion
*
* @param tabledir qualified path for table
- * @param name region file name ENCODED!
+ * @param name ENCODED region name
* @return Path of HRegion directory
* @see HRegionInfo#encodeRegionName(Text)
*/
@@ -1929,4 +2051,19 @@
info.getEncodedName()
);
}
+
+ /**
+ * Determines if the specified row is within the row range specified by the
+ * specified HRegionInfo
+ *
+ * @param info HRegionInfo that specifies the row range
+ * @param row row to be checked
+ * @return true if the row is within the range specified by the HRegionInfo
+ */
+ public static boolean rowIsInRange(HRegionInfo info, Text row) {
+ return ((info.getStartKey().getLength() == 0) ||
+ (info.getStartKey().compareTo(row) <= 0)) &&
+ ((info.getEndKey().getLength() == 0) ||
+ (info.getEndKey().compareTo(row) > 0));
+ }
}
Index: src/java/org/apache/hadoop/hbase/util/Merge.java
===================================================================
--- src/java/org/apache/hadoop/hbase/util/Merge.java (revision 0)
+++ src/java/org/apache/hadoop/hbase/util/Merge.java (revision 0)
@@ -0,0 +1,352 @@
+/**
+ * Copyright 2008 The Apache Software Foundation
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.util;
+
+import java.io.IOException;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configured;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.io.Text;
+import org.apache.hadoop.io.WritableComparator;
+import org.apache.hadoop.util.GenericOptionsParser;
+import org.apache.hadoop.util.Tool;
+import org.apache.hadoop.util.ToolRunner;
+
+import org.apache.hadoop.hbase.HBaseConfiguration;
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.HLog;
+import org.apache.hadoop.hbase.HRegion;
+import org.apache.hadoop.hbase.HRegionInfo;
+
+/**
+ *
+ */
+public class Merge extends Configured implements Tool {
+ static final Log LOG = LogFactory.getLog(Merge.class);
+ private final HBaseConfiguration conf;
+ private Path rootdir;
+ private volatile MetaUtils utils;
+ private Text tableName; // Name of table
+ private volatile Text region1; // Name of region 1
+ private volatile Text region2; // Name of region 2
+ private volatile boolean isMetaTable;
+ private volatile HRegionInfo mergeInfo;
+
+ /** default constructor */
+ public Merge() {
+ this(new HBaseConfiguration());
+ }
+
+ /**
+ * @param conf
+ */
+ public Merge(HBaseConfiguration conf) {
+ super(conf);
+ this.conf = conf;
+ conf.setInt("hbase.client.retries.number", 1);
+ this.mergeInfo = null;
+ }
+
+ /** {@inheritDoc} */
+ public int run(String[] args) throws Exception {
+ if (parseArgs(args) != 0) {
+ return -1;
+ }
+
+ this.utils = new MetaUtils(conf);
+
+ // Verify file system is up, HBase is down and get the root of the
+ // HBase installation
+
+ this.rootdir = utils.initialize();
+ try {
+ if (isMetaTable) {
+ mergeTwoMetaRegions();
+ } else {
+ mergeTwoRegions();
+ }
+ return 0;
+
+ } catch (Exception e) {
+ LOG.fatal("Merge failed", e);
+ utils.scanMetaRegion(HRegionInfo.firstMetaRegionInfo,
+ new MetaUtils.ScannerListener() {
+ public boolean processRow(HRegionInfo info) {
+ System.err.println(info.toString());
+ return true;
+ }
+ }
+ );
+
+ return -1;
+
+ } finally {
+ if (this.utils != null && this.utils.isInitialized()) {
+ this.utils.shutdown();
+ }
+ }
+ }
+
+ /** @return HRegionInfo for merge result */
+ HRegionInfo getMergedHRegionInfo() {
+ return this.mergeInfo;
+ }
+
+ /*
+ * Merge two meta regions. This is unlikely to be needed soon as we have only
+ * seend the meta table split once and that was with 64MB regions. With 256MB
+ * regions, it will be some time before someone has enough data in HBase to
+ * split the meta region and even less likely that a merge of two meta
+ * regions will be needed, but it is included for completeness.
+ */
+ private void mergeTwoMetaRegions() throws IOException {
+ HRegion rootRegion = utils.getRootRegion();
+ HRegionInfo info1 = Writables.getHRegionInfoOrNull(
+ rootRegion.get(region1, HConstants.COL_REGIONINFO));
+ HRegionInfo info2 = Writables.getHRegionInfoOrNull(
+ rootRegion.get(region2, HConstants.COL_REGIONINFO));
+ HRegion merged = merge(info1, rootRegion, info2, rootRegion);
+ LOG.info("Adding " + merged.getRegionInfo() + " to " +
+ rootRegion.getRegionInfo());
+ HRegion.addRegionToMETA(rootRegion, merged);
+ merged.close();
+ }
+
+ private static class MetaScannerListener
+ implements MetaUtils.ScannerListener {
+ private final Text region1;
+ private final Text region2;
+ private HRegionInfo meta1 = null;
+ private HRegionInfo meta2 = null;
+
+ MetaScannerListener(Text region1, Text region2) {
+ this.region1 = region1;
+ this.region2 = region2;
+ }
+
+ /** {@inheritDoc} */
+ public boolean processRow(HRegionInfo info) {
+ if (meta1 == null && HRegion.rowIsInRange(info, region1)) {
+ meta1 = info;
+ }
+ if (region2 != null && meta2 == null &&
+ HRegion.rowIsInRange(info, region2)) {
+ meta2 = info;
+ }
+ return meta1 == null || (region2 != null && meta2 == null);
+ }
+
+ HRegionInfo getMeta1() {
+ return meta1;
+ }
+
+ HRegionInfo getMeta2() {
+ return meta2;
+ }
+ }
+
+ /*
+ * Merges two regions from a user table.
+ */
+ private void mergeTwoRegions() throws IOException {
+ // Scan the root region for all the meta regions that contain the regions
+ // we're merging.
+ MetaScannerListener listener = new MetaScannerListener(region1, region2);
+ utils.scanRootRegion(listener);
+ HRegionInfo meta1 = listener.getMeta1();
+ if (meta1 == null) {
+ throw new IOException("Could not find meta region for " + region1);
+ }
+ HRegionInfo meta2 = listener.getMeta2();
+ if (meta2 == null) {
+ throw new IOException("Could not find meta region for " + region2);
+ }
+
+ HRegion metaRegion1 = utils.getMetaRegion(meta1);
+ HRegionInfo info1 = Writables.getHRegionInfoOrNull(
+ metaRegion1.get(region1, HConstants.COL_REGIONINFO));
+
+
+ HRegion metaRegion2 = null;
+ if (meta1.getRegionName().equals(meta2.getRegionName())) {
+ metaRegion2 = metaRegion1;
+ } else {
+ metaRegion2 = utils.getMetaRegion(meta2);
+ }
+ HRegionInfo info2 = Writables.getHRegionInfoOrNull(
+ metaRegion2.get(region2, HConstants.COL_REGIONINFO));
+
+ HRegion merged = merge(info1, metaRegion1, info2, metaRegion2);
+
+ // Now find the meta region which will contain the newly merged region
+
+ listener = new MetaScannerListener(merged.getRegionName(), null);
+ utils.scanRootRegion(listener);
+ HRegionInfo mergedInfo = listener.getMeta1();
+ if (mergedInfo == null) {
+ throw new IOException("Could not find meta region for " +
+ merged.getRegionName());
+ }
+ HRegion mergeMeta = null;
+ if (mergedInfo.getRegionName().equals(meta1.getRegionName())) {
+ mergeMeta = metaRegion1;
+ } else if (mergedInfo.getRegionName().equals(meta2.getRegionName())) {
+ mergeMeta = metaRegion2;
+ } else {
+ mergeMeta = utils.getMetaRegion(mergedInfo);
+ }
+ LOG.info("\n\n\nAdding " + merged.getRegionInfo() + " to " +
+ mergeMeta.getRegionInfo());
+
+ HRegion.addRegionToMETA(mergeMeta, merged);
+ merged.close();
+ }
+
+ /*
+ * Actually merge two regions and update their info in the meta region(s)
+ * If the meta is split, meta1 may be different from meta2. (and we may have
+ * to scan the meta if the resulting merged region does not go in either)
+ * Returns HRegion object for newly merged region
+ */
+ private HRegion merge(HRegionInfo info1, HRegion meta1, HRegionInfo info2,
+ HRegion meta2) throws IOException {
+ if (info1 == null) {
+ throw new IOException("Could not find " + region1 + " in " +
+ meta1.getRegionName());
+ }
+ if (info2 == null) {
+ throw new IOException("Cound not find " + region2 + " in " +
+ meta2.getRegionName());
+ }
+ HRegion merged = null;
+ HLog log = utils.getLog();
+ HRegion region1 =
+ HRegion.openHRegion(info1, this.rootdir, log, this.conf);
+ try {
+ HRegion region2 =
+ HRegion.openHRegion(info2, this.rootdir, log, this.conf);
+ try {
+ merged = HRegion.merge(region1, region2);
+ } finally {
+ if (!region2.isClosed()) {
+ region2.close();
+ }
+ }
+ } finally {
+ if (!region1.isClosed()) {
+ region1.close();
+ }
+ }
+
+ // Remove the old regions from meta.
+ // HRegion.merge has already deleted their files
+
+ removeRegionFromMeta(meta1, info1);
+ removeRegionFromMeta(meta2, info2);
+
+ this.mergeInfo = merged.getRegionInfo();
+ return merged;
+ }
+
+ /*
+ * Removes a region's meta information from the passed meta
+ * region.
+ *
+ * @param meta META HRegion to be updated
+ * @param regioninfo HRegionInfo of region to remove from meta
+ *
+ * @throws IOException
+ */
+ private void removeRegionFromMeta(HRegion meta, HRegionInfo regioninfo)
+ throws IOException {
+ if (LOG.isDebugEnabled()) {
+ LOG.debug("\n\n\nRemoving region: " + regioninfo + " from " + meta);
+ }
+ meta.deleteAll(regioninfo.getRegionName(), System.currentTimeMillis());
+ }
+
+ /*
+ * Adds a region's meta information from the passed meta
+ * region.
+ *
+ * @param metainfo META HRegionInfo to be updated
+ * @param region HRegion to add to meta
+ *
+ * @throws IOException
+ */
+ private int parseArgs(String[] args) {
+ GenericOptionsParser parser =
+ new GenericOptionsParser(this.getConf(), args);
+
+ String[] remainingArgs = parser.getRemainingArgs();
+ if (remainingArgs.length != 3) {
+ usage();
+ return -1;
+ }
+ tableName = new Text(remainingArgs[0]);
+ isMetaTable = tableName.compareTo(HConstants.META_TABLE_NAME) == 0;
+
+ region1 = new Text(remainingArgs[1]);
+ region2 = new Text(remainingArgs[2]);
+ int status = 0;
+ if (WritableComparator.compareBytes(
+ tableName.getBytes(), 0, tableName.getLength(),
+ region1.getBytes(), 0, tableName.getLength()) != 0) {
+ LOG.error("Region " + region1 + " does not belong to table " + tableName);
+ status = -1;
+ }
+ if (WritableComparator.compareBytes(
+ tableName.getBytes(), 0, tableName.getLength(),
+ region2.getBytes(), 0, tableName.getLength()) != 0) {
+ LOG.error("Region " + region2 + " does not belong to table " + tableName);
+ status = -1;
+ }
+ if (region1.equals(region2)) {
+ LOG.error("Can't merge a region with itself");
+ status = -1;
+ }
+ return status;
+ }
+
+ private void usage() {
+ System.err.println(
+ "Usage: bin/hbase merge