diff --git CHANGES.txt CHANGES.txt index 3e2fb78..16a88e9 100644 --- CHANGES.txt +++ CHANGES.txt @@ -10,6 +10,7 @@ Release 0.92.0 - Unreleased instead of wrapping in RuntimeException (Ted Yu via garyh) HBASE-3629 Update our thrift to 0.6 (Moaz Reyad) HBASE-1502 Remove need for heartbeats in HBase +<<<<<<< HEAD HBASE-451 Remove HTableDescriptor from HRegionInfo (Subbu M Iyer) HBASE-451 Remove HTableDescriptor from HRegionInfo addendum that fixes TestTableMapReduce @@ -23,6 +24,8 @@ Release 0.92.0 - Unreleased HBASE-4247 Add isAborted method to the Abortable interface (Akash Ashok) HBASE-4503 Purge deprecated HBaseClusterTestCase +======= +>>>>>>> parent of 80b6849... HBASE-451 Remove HTableDescriptor from HRegionInfo BUG FIXES HBASE-3280 YouAreDeadException being swallowed in HRS getMaster diff --git src/main/java/org/apache/hadoop/hbase/HConstants.java src/main/java/org/apache/hadoop/hbase/HConstants.java index 5e21d18..e87b8a0 100644 --- src/main/java/org/apache/hadoop/hbase/HConstants.java +++ src/main/java/org/apache/hadoop/hbase/HConstants.java @@ -148,7 +148,7 @@ public final class HConstants { /** Default value for thread wake frequency */ public static final int DEFAULT_THREAD_WAKE_FREQUENCY = 10 * 1000; - + /** Parameter name for how often a region should should perform a major compaction */ public static final String MAJOR_COMPACTION_PERIOD = "hbase.hregion.majorcompaction"; @@ -182,9 +182,6 @@ public final class HConstants { /** Used to construct the name of the compaction directory during compaction */ public static final String HREGION_COMPACTIONDIR_NAME = "compaction.dir"; - /** The file name used to store HTD in HDFS */ - public static final String TABLEINFO_NAME = ".tableinfo"; - /** Default maximum file size */ public static final long DEFAULT_MAX_FILE_SIZE = 256 * 1024 * 1024; @@ -463,7 +460,7 @@ public final class HConstants { * timeout for each RPC */ public static String HBASE_RPC_TIMEOUT_KEY = "hbase.rpc.timeout"; - + /** * Default value of {@link #HBASE_RPC_TIMEOUT_KEY} */ @@ -492,11 +489,6 @@ public final class HConstants { */ public static final float HBASE_CLUSTER_MINIMUM_MEMORY_THRESHOLD = 0.2f; - public static final List HBASE_NON_USER_TABLE_DIRS = new ArrayList( - Arrays.asList(new String[]{ HREGION_LOGDIR_NAME, HREGION_OLDLOGDIR_NAME, - CORRUPT_DIR_NAME, Bytes.toString(META_TABLE_NAME), - Bytes.toString(ROOT_TABLE_NAME), SPLIT_LOGDIR_NAME })); - public static final Pattern CP_HTD_ATTR_KEY_PATTERN = Pattern.compile ("coprocessor\\$([0-9]+)", Pattern.CASE_INSENSITIVE); public static final Pattern CP_HTD_ATTR_VALUE_PATTERN = diff --git src/main/java/org/apache/hadoop/hbase/HRegionInfo.java src/main/java/org/apache/hadoop/hbase/HRegionInfo.java index e950df3..6b5a6f3 100644 --- src/main/java/org/apache/hadoop/hbase/HRegionInfo.java +++ src/main/java/org/apache/hadoop/hbase/HRegionInfo.java @@ -26,13 +26,8 @@ import java.util.Arrays; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.fs.FileSystem; -import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.KeyValue.KVComparator; -import org.apache.hadoop.hbase.migration.HRegionInfo090x; import org.apache.hadoop.hbase.util.Bytes; -import org.apache.hadoop.hbase.util.FSTableDescriptors; import org.apache.hadoop.hbase.util.JenkinsHash; import org.apache.hadoop.hbase.util.MD5Hash; import org.apache.hadoop.io.VersionedWritable; @@ -135,11 +130,11 @@ public class HRegionInfo extends VersionedWritable implements WritableComparable /** HRegionInfo for root region */ public static final HRegionInfo ROOT_REGIONINFO = - new HRegionInfo(0L, Bytes.toBytes("-ROOT-")); + new HRegionInfo(0L, HTableDescriptor.ROOT_TABLEDESC); /** HRegionInfo for first meta region */ public static final HRegionInfo FIRST_META_REGIONINFO = - new HRegionInfo(1L, Bytes.toBytes(".META.")); + new HRegionInfo(1L, HTableDescriptor.META_TABLEDESC); private byte [] endKey = HConstants.EMPTY_BYTE_ARRAY; // This flag is in the parent of a split while the parent is still referenced @@ -151,37 +146,34 @@ public class HRegionInfo extends VersionedWritable implements WritableComparable private String regionNameStr = ""; private boolean split = false; private byte [] startKey = HConstants.EMPTY_BYTE_ARRAY; + protected HTableDescriptor tableDesc = null; private int hashCode = -1; //TODO: Move NO_HASH to HStoreFile which is really the only place it is used. public static final String NO_HASH = null; private volatile String encodedName = NO_HASH; private byte [] encodedNameAsBytes = null; - // Current TableName - private byte[] tableName = null; - private String tableNameAsString = null; - private void setHashCode() { int result = Arrays.hashCode(this.regionName); result ^= this.regionId; result ^= Arrays.hashCode(this.startKey); result ^= Arrays.hashCode(this.endKey); result ^= Boolean.valueOf(this.offLine).hashCode(); - result ^= Arrays.hashCode(this.tableName); + result ^= this.tableDesc.hashCode(); this.hashCode = result; } - /** * Private constructor used constructing HRegionInfo for the catalog root and * first meta regions */ - private HRegionInfo(long regionId, byte[] tableName) { + private HRegionInfo(long regionId, HTableDescriptor tableDesc) { super(); this.regionId = regionId; - this.tableName = tableName.clone(); - // Note: Root & First Meta regions names are still in old format - this.regionName = createRegionName(tableName, null, + this.tableDesc = tableDesc; + + // Note: Root & First Meta regions names are still in old format + this.regionName = createRegionName(tableDesc.getName(), null, regionId, false); this.regionNameStr = Bytes.toStringBinary(this.regionName); setHashCode(); @@ -190,66 +182,43 @@ public class HRegionInfo extends VersionedWritable implements WritableComparable /** Default constructor - creates empty object */ public HRegionInfo() { super(); - } - - /** - * Used only for migration - * @param other HRegionInfoForMigration - */ - public HRegionInfo(HRegionInfo090x other) { - super(); - this.endKey = other.getEndKey(); - this.offLine = other.isOffline(); - this.regionId = other.getRegionId(); - this.regionName = other.getRegionName(); - this.regionNameStr = Bytes.toStringBinary(this.regionName); - this.split = other.isSplit(); - this.startKey = other.getStartKey(); - this.hashCode = other.hashCode(); - this.encodedName = other.getEncodedName(); - this.tableName = other.getTableDesc().getName(); - } - - public HRegionInfo(final byte[] tableName) { - this(tableName, null, null); + this.tableDesc = new HTableDescriptor(); } /** * Construct HRegionInfo with explicit parameters * - * @param tableName the table name + * @param tableDesc the table descriptor * @param startKey first key in region * @param endKey end of key range * @throws IllegalArgumentException */ - public HRegionInfo(final byte[] tableName, final byte[] startKey, - final byte[] endKey) + public HRegionInfo(final HTableDescriptor tableDesc, final byte [] startKey, + final byte [] endKey) throws IllegalArgumentException { - this(tableName, startKey, endKey, false); + this(tableDesc, startKey, endKey, false); } - /** * Construct HRegionInfo with explicit parameters * - * @param tableName the table descriptor + * @param tableDesc the table descriptor * @param startKey first key in region * @param endKey end of key range * @param split true if this region has split and we have daughter regions * regions that may or may not hold references to this region. * @throws IllegalArgumentException */ - public HRegionInfo(final byte[] tableName, final byte[] startKey, - final byte[] endKey, final boolean split) + public HRegionInfo(HTableDescriptor tableDesc, final byte [] startKey, + final byte [] endKey, final boolean split) throws IllegalArgumentException { - this(tableName, startKey, endKey, split, System.currentTimeMillis()); + this(tableDesc, startKey, endKey, split, System.currentTimeMillis()); } - /** * Construct HRegionInfo with explicit parameters * - * @param tableName the table descriptor + * @param tableDesc the table descriptor * @param startKey first key in region * @param endKey end of key range * @param split true if this region has split and we have daughter regions @@ -257,26 +226,22 @@ public class HRegionInfo extends VersionedWritable implements WritableComparable * @param regionid Region id to use. * @throws IllegalArgumentException */ - public HRegionInfo(final byte[] tableName, final byte[] startKey, - final byte[] endKey, final boolean split, final long regionid) + public HRegionInfo(HTableDescriptor tableDesc, final byte [] startKey, + final byte [] endKey, final boolean split, final long regionid) throws IllegalArgumentException { - super(); - if (tableName == null) { - throw new IllegalArgumentException("tableName cannot be null"); + if (tableDesc == null) { + throw new IllegalArgumentException("tableDesc cannot be null"); } - this.tableName = tableName.clone(); this.offLine = false; this.regionId = regionid; - - this.regionName = createRegionName(this.tableName, startKey, regionId, true); - + this.regionName = createRegionName(tableDesc.getName(), startKey, regionId, true); this.regionNameStr = Bytes.toStringBinary(this.regionName); this.split = split; this.endKey = endKey == null? HConstants.EMPTY_END_ROW: endKey.clone(); this.startKey = startKey == null? HConstants.EMPTY_START_ROW: startKey.clone(); - this.tableName = tableName.clone(); + this.tableDesc = tableDesc; setHashCode(); } @@ -294,12 +259,11 @@ public class HRegionInfo extends VersionedWritable implements WritableComparable this.regionNameStr = Bytes.toStringBinary(this.regionName); this.split = other.isSplit(); this.startKey = other.getStartKey(); + this.tableDesc = other.getTableDesc(); this.hashCode = other.hashCode(); this.encodedName = other.getEncodedName(); - this.tableName = other.tableName; } - /** * Make a region name of passed parameters. * @param tableName @@ -494,22 +458,6 @@ public class HRegionInfo extends VersionedWritable implements WritableComparable } /** - * Get current table name of the region - * @return byte array of table name - */ - public byte[] getTableName() { - return tableName; - } - - /** - * Get current table name as string - * @return string representation of current table - */ - public String getTableNameAsString() { - return Bytes.toString(tableName); - } - - /** * Returns true if the given inclusive range of rows is fully contained * by this region. For example, if the region is foo,a,g and this is * passed ["b","c"] or ["a","c"] it will return true, but if this is passed @@ -539,67 +487,33 @@ public class HRegionInfo extends VersionedWritable implements WritableComparable Bytes.equals(endKey, HConstants.EMPTY_BYTE_ARRAY)); } - /** - * @return the tableDesc - * @deprecated Do not use; expensive call - * use HRegionInfo.getTableNameAsString() in place of - * HRegionInfo.getTableDesc().getNameAsString() - */ - @Deprecated - public HTableDescriptor getTableDesc() { - Configuration c = HBaseConfiguration.create(); - FileSystem fs; - try { - fs = FileSystem.get(c); - } catch (IOException e) { - throw new RuntimeException(e); - } - FSTableDescriptors fstd = - new FSTableDescriptors(fs, new Path(c.get(HConstants.HBASE_DIR))); - try { - return fstd.get(this.tableName); - } catch (IOException e) { - throw new RuntimeException(e); - } + /** @return the tableDesc */ + public HTableDescriptor getTableDesc(){ + return tableDesc; } /** * @param newDesc new table descriptor to use - * @deprecated Do not use; expensive call */ - @Deprecated public void setTableDesc(HTableDescriptor newDesc) { - Configuration c = HBaseConfiguration.create(); - FileSystem fs; - try { - fs = FileSystem.get(c); - } catch (IOException e) { - throw new RuntimeException(e); - } - FSTableDescriptors fstd = - new FSTableDescriptors(fs, new Path(c.get(HConstants.HBASE_DIR))); - try { - fstd.add(newDesc); - } catch (IOException e) { - throw new RuntimeException(e); - } + this.tableDesc = newDesc; } /** @return true if this is the root region */ public boolean isRootRegion() { - return Bytes.equals(tableName, HRegionInfo.ROOT_REGIONINFO.getTableName()); + return this.tableDesc.isRootRegion(); } /** @return true if this region is from a table that is a meta table, * either .META. or -ROOT- */ public boolean isMetaTable() { - return Bytes.equals(tableName, HRegionInfo.FIRST_META_REGIONINFO.getTableName()); + return this.tableDesc.isMetaTable(); } /** @return true if this region is a meta region */ public boolean isMetaRegion() { - return isMetaTable(); + return this.tableDesc.isMetaRegion(); } /** @@ -650,14 +564,14 @@ public class HRegionInfo extends VersionedWritable implements WritableComparable @Override public String toString() { return "REGION => {" + HConstants.NAME + " => '" + - this.regionNameStr - + "', TableName => '" + Bytes.toStringBinary(this.tableName) - + "', STARTKEY => '" + + this.regionNameStr + + "', STARTKEY => '" + Bytes.toStringBinary(this.startKey) + "', ENDKEY => '" + Bytes.toStringBinary(this.endKey) + "', ENCODED => " + getEncodedName() + "," + (isOffline()? " OFFLINE => true,": "") + - (isSplit()? " SPLIT => true,": "") + "}"; + (isSplit()? " SPLIT => true,": "") + + " TABLE => {" + this.tableDesc.toString() + "}"; } /** @@ -704,7 +618,7 @@ public class HRegionInfo extends VersionedWritable implements WritableComparable Bytes.writeByteArray(out, regionName); out.writeBoolean(split); Bytes.writeByteArray(out, startKey); - Bytes.writeByteArray(out, tableName); + tableDesc.write(out); out.writeInt(hashCode); } @@ -718,7 +632,7 @@ public class HRegionInfo extends VersionedWritable implements WritableComparable this.regionNameStr = Bytes.toStringBinary(this.regionName); this.split = in.readBoolean(); this.startKey = Bytes.readByteArray(in); - this.tableName = Bytes.readByteArray(in); + this.tableDesc.readFields(in); this.hashCode = in.readInt(); } @@ -732,7 +646,7 @@ public class HRegionInfo extends VersionedWritable implements WritableComparable } // Are regions of same table? - int result = Bytes.compareTo(this.tableName, o.tableName); + int result = Bytes.compareTo(this.tableDesc.getName(), o.tableDesc.getName()); if (result != 0) { return result; } @@ -762,4 +676,12 @@ public class HRegionInfo extends VersionedWritable implements WritableComparable return isRootRegion()? KeyValue.ROOT_COMPARATOR: isMetaRegion()? KeyValue.META_COMPARATOR: KeyValue.COMPARATOR; } + + public String getTableNameAsString() { + return getTableDesc().getNameAsString(); + } + + public byte[] getTableName() { + return getTableDesc().getName(); + } } diff --git src/main/java/org/apache/hadoop/hbase/KeyValue.java src/main/java/org/apache/hadoop/hbase/KeyValue.java index 24bd57d..3f8c141 100644 --- src/main/java/org/apache/hadoop/hbase/KeyValue.java +++ src/main/java/org/apache/hadoop/hbase/KeyValue.java @@ -970,7 +970,6 @@ public class KeyValue implements Writable, HeapSize { * @return True if this KV is a {@link KeyValue.Type#Delete} type. */ public boolean isDeleteType() { - // TODO: Fix this method name vis-a-vis isDelete! return getType() == Type.Delete.getCode(); } diff --git src/main/java/org/apache/hadoop/hbase/LocalHBaseCluster.java src/main/java/org/apache/hadoop/hbase/LocalHBaseCluster.java index d25b20c..54b4939 100644 --- src/main/java/org/apache/hadoop/hbase/LocalHBaseCluster.java +++ src/main/java/org/apache/hadoop/hbase/LocalHBaseCluster.java @@ -33,7 +33,6 @@ import org.apache.hadoop.hbase.regionserver.HRegionServer; import org.apache.hadoop.hbase.security.User; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.JVMClusterUtil.RegionServerThread; -import org.apache.hadoop.hbase.util.Threads; import java.util.concurrent.CopyOnWriteArrayList; import org.apache.hadoop.hbase.master.HMaster; @@ -239,7 +238,6 @@ public class LocalHBaseCluster { List list = getRegionServers(); for (JVMClusterUtil.RegionServerThread rst: list) { if (rst.isAlive()) liveServers.add(rst); - else LOG.info("Not alive " + rst.getName()); } return liveServers; } @@ -388,12 +386,12 @@ public class LocalHBaseCluster { */ public void join() { if (this.regionThreads != null) { - for(Thread t: this.regionThreads) { - if (t.isAlive()) { - try { - Threads.threadDumpingIsAlive(t); + for(Thread t: this.regionThreads) { + if (t.isAlive()) { + try { + t.join(); } catch (InterruptedException e) { - LOG.debug("Interrupted", e); + // continue } } } @@ -402,9 +400,9 @@ public class LocalHBaseCluster { for (Thread t : this.masterThreads) { if (t.isAlive()) { try { - Threads.threadDumpingIsAlive(t); + t.join(); } catch (InterruptedException e) { - LOG.debug("Interrupted", e); + // continue } } } diff --git src/main/java/org/apache/hadoop/hbase/TableDescriptors.java src/main/java/org/apache/hadoop/hbase/TableDescriptors.java deleted file mode 100644 index 245967b..0000000 --- src/main/java/org/apache/hadoop/hbase/TableDescriptors.java +++ /dev/null @@ -1,75 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hbase; - -import java.io.FileNotFoundException; -import java.io.IOException; -import java.util.Map; - -/** - * Get, remove and modify table descriptors. - * Used by servers to host descriptors. - */ -public interface TableDescriptors { - /** - * @param tablename - * @return HTableDescriptor for tablename - * @throws TableExistsException - * @throws FileNotFoundException - * @throws IOException - */ - public HTableDescriptor get(final String tablename) - throws TableExistsException, FileNotFoundException, IOException; - - /** - * @param tablename - * @return HTableDescriptor for tablename - * @throws TableExistsException - * @throws FileNotFoundException - * @throws IOException - */ - public HTableDescriptor get(final byte[] tablename) - throws TableExistsException, FileNotFoundException, IOException; - - /** - * Get Map of all HTableDescriptors. Populates the descriptor cache as a - * side effect. - * @param fs - * @param rootdir - * @return Map of all descriptors. - * @throws IOException - */ - public Map getAll() - throws IOException; - - /** - * Add or update descriptor - * @param htd Descriptor to set into TableDescriptors - * @throws IOException - */ - public void add(final HTableDescriptor htd) - throws IOException; - - /** - * @param tablename - * @return Instance of table descriptor or null if none found. - * @throws IOException - */ - public HTableDescriptor remove(final String tablename) - throws IOException; -} \ No newline at end of file diff --git src/main/java/org/apache/hadoop/hbase/avro/AvroServer.java src/main/java/org/apache/hadoop/hbase/avro/AvroServer.java index f45bbbd..8329a40 100644 --- src/main/java/org/apache/hadoop/hbase/avro/AvroServer.java +++ src/main/java/org/apache/hadoop/hbase/avro/AvroServer.java @@ -35,7 +35,6 @@ import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.MasterNotRunningException; import org.apache.hadoop.hbase.TableExistsException; -import org.apache.hadoop.hbase.TableNotFoundException; import org.apache.hadoop.hbase.avro.generated.AClusterStatus; import org.apache.hadoop.hbase.avro.generated.ADelete; import org.apache.hadoop.hbase.avro.generated.AFamilyDescriptor; @@ -191,8 +190,6 @@ public class AvroServer { public ATableDescriptor describeTable(ByteBuffer table) throws AIOError { try { return AvroUtil.htdToATD(admin.getTableDescriptor(Bytes.toBytes(table))); - } catch (TableNotFoundException e) { - return null; } catch (IOException e) { AIOError ioe = new AIOError(); ioe.message = new Utf8(e.getMessage()); diff --git src/main/java/org/apache/hadoop/hbase/catalog/CatalogTracker.java src/main/java/org/apache/hadoop/hbase/catalog/CatalogTracker.java index 119105e..3570e6a 100644 --- src/main/java/org/apache/hadoop/hbase/catalog/CatalogTracker.java +++ src/main/java/org/apache/hadoop/hbase/catalog/CatalogTracker.java @@ -1,4 +1,6 @@ /** + * Copyright 2010 The Apache Software Foundation + * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -31,6 +33,7 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.Abortable; import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.NotAllMetaRegionsOnlineException; +import org.apache.hadoop.hbase.NotServingRegionException; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.client.HConnection; import org.apache.hadoop.hbase.client.HConnectionManager; @@ -57,76 +60,30 @@ import org.apache.hadoop.ipc.RemoteException; * interrupt waits and close up shop. */ public class CatalogTracker { - // TODO: This class needs a rethink. The original intent was that it would be - // the one-stop-shop for root and meta locations and that it would get this - // info from reading and watching zk state. The class was to be used by - // servers when they needed to know of root and meta movement but also by - // client-side (inside in HTable) so rather than figure root and meta - // locations on fault, the client would instead get notifications out of zk. - // - // But this original intent is frustrated by the fact that this class has to - // read an hbase table, the -ROOT- table, to figure out the .META. region - // location which means we depend on an HConnection. HConnection will do - // retrying but also, it has its own mechanism for finding root and meta - // locations (and for 'verifying'; it tries the location and if it fails, does - // new lookup, etc.). So, at least for now, HConnection (or HTable) can't - // have a CT since CT needs a HConnection (Even then, do want HT to have a CT? - // For HT keep up a session with ZK? Rather, shouldn't we do like asynchbase - // where we'd open a connection to zk, read what we need then let the - // connection go?). The 'fix' is make it so both root and meta addresses - // are wholey up in zk -- not in zk (root) -- and in an hbase table (meta). - // - // But even then, this class does 'verification' of the location and it does - // this by making a call over an HConnection (which will do its own root - // and meta lookups). Isn't this verification 'useless' since when we - // return, whatever is dependent on the result of this call then needs to - // use HConnection; what we have verified may change in meantime (HConnection - // uses the CT primitives, the root and meta trackers finding root locations). - // - // When meta is moved to zk, this class may make more sense. In the - // meantime, it does not cohere. It should just watch meta and root and not - // NOT do verification -- let that be out in HConnection since its going to - // be done there ultimately anyways. - // - // This class has spread throughout the codebase. It needs to be reigned in. - // This class should be used server-side only, even if we move meta location - // up into zk. Currently its used over in the client package. Its used in - // MetaReader and MetaEditor classes usually just to get the Configuration - // its using (It does this indirectly by asking its HConnection for its - // Configuration and even then this is just used to get an HConnection out on - // the other end). I made https://issues.apache.org/jira/browse/HBASE-4495 for - // doing CT fixup. St.Ack 09/30/2011. - // private static final Log LOG = LogFactory.getLog(CatalogTracker.class); + private final Configuration conf; private final HConnection connection; private final ZooKeeperWatcher zookeeper; private final RootRegionTracker rootRegionTracker; private final MetaNodeTracker metaNodeTracker; private final AtomicBoolean metaAvailable = new AtomicBoolean(false); - private boolean instantiatedzkw = false; - - /* + /** * Do not clear this address once set. Its needed when we do * server shutdown processing -- we need to know who had .META. last. If you * want to know if the address is good, rely on {@link #metaAvailable} value. */ private ServerName metaLocation; - - /* - * Timeout waiting on root or meta to be set. - */ private final int defaultTimeout; - private boolean stopped = false; - static final byte [] ROOT_REGION_NAME = + public static final byte [] ROOT_REGION = HRegionInfo.ROOT_REGIONINFO.getRegionName(); - static final byte [] META_REGION_NAME = + public static final byte [] META_REGION = HRegionInfo.FIRST_META_REGIONINFO.getRegionName(); /** - * Constructs a catalog tracker. Find current state of catalog tables. - * Begin active tracking by executing {@link #start()} post construction. Does + * Constructs a catalog tracker. Find current state of catalog tables and + * begin active tracking by executing {@link #start()} post construction. Does * not timeout. * * @param conf @@ -140,36 +97,29 @@ public class CatalogTracker { } /** - * Constructs the catalog tracker. Find current state of catalog tables. - * Begin active tracking by executing {@link #start()} post construction. + * Constructs the catalog tracker. Find current state of catalog tables and + * begin active tracking by executing {@link #start()} post construction. * Does not timeout. - * @param zk If zk is null, we'll create an instance (and shut it down - * when {@link #stop()} is called) else we'll use what is passed. + * @param zk * @param connection server connection - * @param abortable If fatal exception we'll call abort on this. May be null. - * If it is we'll use the Connection associated with the passed - * {@link Configuration} as our Abortable. + * @param abortable if fatal exception * @throws IOException */ public CatalogTracker(final ZooKeeperWatcher zk, final Configuration conf, final Abortable abortable) throws IOException { - this(zk, conf, abortable, - conf.getInt("hbase.catalogtracker.default.timeout", 1000)); + this(zk, conf, abortable, 0); } /** - * Constructs the catalog tracker. Find current state of catalog tables. - * Begin active tracking by executing {@link #start()} post construction. - * @param zk If zk is null, we'll create an instance (and shut it down - * when {@link #stop()} is called) else we'll use what is passed. - * @param conf - * @param abortable If fatal exception we'll call abort on this. May be null. - * If it is we'll use the Connection associated with the passed - * {@link Configuration} as our Abortable. + * Constructs the catalog tracker. Find current state of catalog tables and + * begin active tracking by executing {@link #start()} post construction. + * @param zk + * @param connection server connection + * @param abortable if fatal exception * @param defaultTimeout Timeout to use. Pass zero for no timeout * ({@link Object#wait(long)} when passed a 0 waits for ever). - * @throws IOException + * @throws IOException */ public CatalogTracker(final ZooKeeperWatcher zk, final Configuration conf, Abortable abortable, final int defaultTimeout) @@ -180,29 +130,14 @@ public class CatalogTracker { CatalogTracker(final ZooKeeperWatcher zk, final Configuration conf, HConnection connection, Abortable abortable, final int defaultTimeout) throws IOException { + this.conf = conf; this.connection = connection; + this.zookeeper = (zk == null) ? this.connection.getZooKeeperWatcher() : zk; if (abortable == null) { - // A connection is abortable. abortable = this.connection; } - if (zk == null) { - // Create our own. Set flag so we tear it down on stop. - this.zookeeper = - new ZooKeeperWatcher(conf, "catalogtracker-on-" + connection.toString(), - abortable); - instantiatedzkw = true; - } else { - this.zookeeper = zk; - } this.rootRegionTracker = new RootRegionTracker(zookeeper, abortable); - final CatalogTracker ct = this; - // Override nodeDeleted so we get notified when meta node deleted - this.metaNodeTracker = new MetaNodeTracker(zookeeper, abortable) { - public void nodeDeleted(String path) { - if (!path.equals(node)) return; - ct.resetMetaLocation(); - } - }; + this.metaNodeTracker = new MetaNodeTracker(zookeeper, this, abortable); this.defaultTimeout = defaultTimeout; } @@ -214,9 +149,9 @@ public class CatalogTracker { * @throws InterruptedException */ public void start() throws IOException, InterruptedException { - LOG.debug("Starting catalog tracker " + this); this.rootRegionTracker.start(); this.metaNodeTracker.start(); + LOG.debug("Starting catalog tracker " + this); } /** @@ -238,9 +173,6 @@ public class CatalogTracker { // IOException}, in reality, the implementation would never do that. LOG.error("Attempt to close catalog tracker's connection failed.", e); } - if (this.instantiatedzkw) { - this.zookeeper.close(); - } // Call this and it will interrupt any ongoing waits on meta. synchronized (this.metaAvailable) { this.metaAvailable.notifyAll(); @@ -251,8 +183,7 @@ public class CatalogTracker { /** * Gets the current location for -ROOT- or null if location is * not currently available. - * @return {@link ServerName} for server hosting -ROOT- or null - * if none available + * @return server name * @throws InterruptedException */ public ServerName getRootLocation() throws InterruptedException { @@ -260,8 +191,8 @@ public class CatalogTracker { } /** - * @return {@link ServerName} for server hosting .META. or null - * if none available + * @return Location of server hosting meta region formatted as per + * {@link ServerName}, or null if none available */ public ServerName getMetaLocation() { return this.metaLocation; @@ -282,8 +213,7 @@ public class CatalogTracker { * for up to the specified timeout if not immediately available. Returns null * if the timeout elapses before root is available. * @param timeout maximum time to wait for root availability, in milliseconds - * @return {@link ServerName} for server hosting -ROOT- or null - * if none available + * @return Location of server hosting root region or null if none available * @throws InterruptedException if interrupted while waiting * @throws NotAllMetaRegionsOnlineException if root not available before * timeout @@ -300,32 +230,14 @@ public class CatalogTracker { /** * Gets a connection to the server hosting root, as reported by ZooKeeper, * waiting up to the specified timeout for availability. - * @param timeout How long to wait on root location * @see #waitForRoot(long) for additional information * @return connection to server hosting root * @throws InterruptedException * @throws NotAllMetaRegionsOnlineException if timed out waiting * @throws IOException - * @deprecated Use {@link #getRootServerConnection(long)} */ public HRegionInterface waitForRootServerConnection(long timeout) throws InterruptedException, NotAllMetaRegionsOnlineException, IOException { - return getRootServerConnection(timeout); - } - - /** - * Gets a connection to the server hosting root, as reported by ZooKeeper, - * waiting up to the specified timeout for availability. - *

WARNING: Does not retry. Use an {@link HTable} instead. - * @param timeout How long to wait on root location - * @see #waitForRoot(long) for additional information - * @return connection to server hosting root - * @throws InterruptedException - * @throws NotAllMetaRegionsOnlineException if timed out waiting - * @throws IOException - */ - HRegionInterface getRootServerConnection(long timeout) - throws InterruptedException, NotAllMetaRegionsOnlineException, IOException { return getCachedConnection(waitForRoot(timeout)); } @@ -336,18 +248,30 @@ public class CatalogTracker { * @return connection to server hosting root * @throws NotAllMetaRegionsOnlineException if timed out waiting * @throws IOException - * @deprecated Use {@link #getRootServerConnection(long)} */ public HRegionInterface waitForRootServerConnectionDefault() throws NotAllMetaRegionsOnlineException, IOException { try { - return getRootServerConnection(this.defaultTimeout); + return getCachedConnection(waitForRoot(defaultTimeout)); } catch (InterruptedException e) { throw new NotAllMetaRegionsOnlineException("Interrupted"); } } /** + * Gets a connection to the server hosting root, as reported by ZooKeeper, + * if available. Returns null if no location is immediately available. + * @return connection to server hosting root, null if not available + * @throws IOException + * @throws InterruptedException + */ + private HRegionInterface getRootServerConnection() + throws IOException, InterruptedException { + ServerName sn = this.rootRegionTracker.getRootRegionLocation(); + return getCachedConnection(sn); + } + + /** * Gets a connection to the server currently hosting .META. or * null if location is not currently available. *

@@ -368,28 +292,24 @@ public class CatalogTracker { synchronized (metaAvailable) { if (metaAvailable.get()) { HRegionInterface current = getCachedConnection(this.metaLocation); - // If we are to refresh, verify we have a good connection by making - // an invocation on it. - if (verifyRegionLocation(current, this.metaLocation, META_REGION_NAME)) { + if (verifyRegionLocation(current, this.metaLocation, META_REGION)) { return current; } resetMetaLocation(); } - // We got here because there is no meta available or because whats - // available is bad. - - // Now read the current .META. content from -ROOT-. Note: This goes via - // an HConnection. It has its own way of figuring root and meta locations - // which we have to wait on. - ServerName newLocation = - MetaReader.readRegionLocation(this, META_REGION_NAME); + HRegionInterface rootConnection = getRootServerConnection(); + if (rootConnection == null) { + LOG.debug("-ROOT- server unavailable."); + return null; + } + ServerName newLocation = MetaReader.readMetaLocation(rootConnection); if (newLocation == null) { LOG.debug(".META. server unavailable."); return null; } HRegionInterface newConnection = getCachedConnection(newLocation); - if (verifyRegionLocation(newConnection, newLocation, META_REGION_NAME)) { + if (verifyRegionLocation(newConnection, newLocation, META_REGION)) { setMetaLocation(newLocation); return newConnection; } else { @@ -402,19 +322,13 @@ public class CatalogTracker { /** * Waits indefinitely for availability of .META.. Used during - * cluster startup. Does not verify meta, just that something has been - * set up in zk. - * @see #waitForMeta(long) + * cluster startup. * @throws InterruptedException if interrupted while waiting */ public void waitForMeta() throws InterruptedException { - while (!this.stopped) { - try { - if (waitForMeta(100) != null) break; - } catch (NotAllMetaRegionsOnlineException e) { - LOG.info("Retrying", e); - } catch (IOException e) { - LOG.info("Retrying", e); + synchronized (metaAvailable) { + while (!stopped && !metaAvailable.get()) { + metaAvailable.wait(); } } } @@ -426,8 +340,7 @@ public class CatalogTracker { * in that it will go ahead and verify the location gotten from ZooKeeper and * -ROOT- region by trying to use returned connection. * @param timeout maximum time to wait for meta availability, in milliseconds - * @return {@link ServerName} for server hosting .META. or null - * if none available + * @return location of meta * @throws InterruptedException if interrupted while waiting * @throws IOException unexpected exception connecting to meta server * @throws NotAllMetaRegionsOnlineException if meta not available before @@ -446,7 +359,8 @@ public class CatalogTracker { metaAvailable.wait(waitTime); } if (getMetaServerConnection() == null) { - throw new NotAllMetaRegionsOnlineException("Timed out (" + timeout + "ms)"); + throw new NotAllMetaRegionsOnlineException( + "Timed out (" + timeout + "ms)"); } return metaLocation; } @@ -460,7 +374,6 @@ public class CatalogTracker { * @throws InterruptedException * @throws NotAllMetaRegionsOnlineException if timed out waiting * @throws IOException - * @deprecated Does not retry; use an HTable instance instead. */ public HRegionInterface waitForMetaServerConnection(long timeout) throws InterruptedException, NotAllMetaRegionsOnlineException, IOException { @@ -470,12 +383,10 @@ public class CatalogTracker { /** * Gets a connection to the server hosting meta, as reported by ZooKeeper, * waiting up to the specified timeout for availability. - * Used in tests. * @see #waitForMeta(long) for additional information * @return connection to server hosting meta * @throws NotAllMetaRegionsOnlineException if timed out or interrupted * @throws IOException - * @deprecated Does not retry; use an HTable instance instead. */ public HRegionInterface waitForMetaServerConnectionDefault() throws NotAllMetaRegionsOnlineException, IOException { @@ -486,22 +397,12 @@ public class CatalogTracker { } } - /** - * Called when we figure current meta is off (called from zk callback). - */ - public void resetMetaLocation() { - LOG.debug("Current cached META location, " + metaLocation + - ", is not valid, resetting"); - synchronized(this.metaAvailable) { - this.metaAvailable.set(false); - this.metaAvailable.notifyAll(); - } + private void resetMetaLocation() { + LOG.debug("Current cached META location: " + metaLocation + + " is not valid, resetting"); + this.metaAvailable.set(false); } - /** - * Caller must be synchronized on this.metaAvailable - * @param metaLocation - */ private void setMetaLocation(final ServerName metaLocation) { LOG.debug("set new cached META location: " + metaLocation); metaAvailable.set(true); @@ -510,13 +411,6 @@ public class CatalogTracker { this.metaAvailable.notifyAll(); } - /** - * @param sn ServerName to get a connection against. - * @return The HRegionInterface we got when we connected to sn - * May have come from cache, may not be good, may have been setup by this - * invocation, or may be null. - * @throws IOException - */ private HRegionInterface getCachedConnection(ServerName sn) throws IOException { if (sn == null) { @@ -552,32 +446,17 @@ public class CatalogTracker { return protocol; } - /** - * Verify we can connect to hostingServer and that its carrying - * regionName. - * @param hostingServer Interface to the server hosting regionName - * @param serverName The servername that goes with the metaServer - * Interface. Used logging. - * @param regionName The regionname we are interested in. - * @return True if we were able to verify the region located at other side of - * the Interface. - * @throws IOException - */ - // TODO: We should be able to get the ServerName from the HRegionInterface - // rather than have to pass it in. Its made awkward by the fact that the - // HRI is likely a proxy against remote server so the getServerName needs - // to be fixed to go to a local method or to a cache before we can do this. - private boolean verifyRegionLocation(HRegionInterface hostingServer, - final ServerName address, final byte [] regionName) + private boolean verifyRegionLocation(HRegionInterface metaServer, + final ServerName address, + byte [] regionName) throws IOException { - if (hostingServer == null) { - LOG.info("Passed hostingServer is null"); + if (metaServer == null) { + LOG.info("Passed metaserver is null"); return false; } Throwable t = null; try { - // Try and get regioninfo from the hosting server. - return hostingServer.getRegionInfo(regionName) != null; + return metaServer.getRegionInfo(regionName) != null; } catch (ConnectException e) { t = e; } catch (RemoteException e) { @@ -619,7 +498,8 @@ public class CatalogTracker { } return (connection == null)? false: verifyRegionLocation(connection, - this.rootRegionTracker.getRootRegionLocation(), ROOT_REGION_NAME); + this.rootRegionTracker.getRootRegionLocation(), + HRegionInfo.ROOT_REGIONINFO.getRegionName()); } /** @@ -643,7 +523,6 @@ public class CatalogTracker { return connection != null; } - // Used by tests. MetaNodeTracker getMetaNodeTracker() { return this.metaNodeTracker; } diff --git src/main/java/org/apache/hadoop/hbase/catalog/MetaEditor.java src/main/java/org/apache/hadoop/hbase/catalog/MetaEditor.java index 6387df9..c7fd85c 100644 --- src/main/java/org/apache/hadoop/hbase/catalog/MetaEditor.java +++ src/main/java/org/apache/hadoop/hbase/catalog/MetaEditor.java @@ -1,4 +1,6 @@ /** + * Copyright 2010 The Apache Software Foundation + * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -29,109 +31,26 @@ import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.NotAllMetaRegionsOnlineException; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.client.Delete; -import org.apache.hadoop.hbase.client.HTable; import org.apache.hadoop.hbase.client.Put; -import org.apache.hadoop.hbase.client.Result; +import org.apache.hadoop.hbase.ipc.HRegionInterface; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.Writables; /** * Writes region and assignment information to .META.. + *

+ * Uses the {@link CatalogTracker} to obtain locations and connections to + * catalogs. */ public class MetaEditor { - // TODO: Strip CatalogTracker from this class. Its all over and in the end - // its only used to get its Configuration so we can get associated - // Connection. private static final Log LOG = LogFactory.getLog(MetaEditor.class); - private static Put makePutFromRegionInfo(HRegionInfo regionInfo) - throws IOException { + private static Put makePutFromRegionInfo(HRegionInfo regionInfo) throws IOException { Put put = new Put(regionInfo.getRegionName()); put.add(HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER, Writables.getBytes(regionInfo)); return put; } - - /** - * Put the passed p to the .META. table. - * @param ct CatalogTracker on whose back we will ride the edit. - * @param p Put to add to .META. - * @throws IOException - */ - static void putToMetaTable(final CatalogTracker ct, final Put p) - throws IOException { - put(MetaReader.getMetaHTable(ct), p); - } - - /** - * Put the passed p to the .META. table. - * @param ct CatalogTracker on whose back we will ride the edit. - * @param p Put to add to .META. - * @throws IOException - */ - static void putToRootTable(final CatalogTracker ct, final Put p) - throws IOException { - put(MetaReader.getRootHTable(ct), p); - } - - /** - * Put the passed p to a catalog table. - * @param ct CatalogTracker on whose back we will ride the edit. - * @param regionName Name of the catalog table to put too. - * @param p Put to add - * @throws IOException - */ - static void putToCatalogTable(final CatalogTracker ct, - final byte [] regionName, final Put p) - throws IOException { - HTable t = MetaReader.getCatalogHTable(ct, regionName); - put(t, p); - } - - /** - * @param t Table to use (will be closed when done). - * @param p - * @throws IOException - */ - private static void put(final HTable t, final Put p) throws IOException { - try { - t.put(p); - } finally { - t.close(); - } - } - - /** - * Put the passed ps to the .META. table. - * @param ct CatalogTracker on whose back we will ride the edit. - * @param ps Put to add to .META. - * @throws IOException - */ - static void putsToMetaTable(final CatalogTracker ct, final List ps) - throws IOException { - HTable t = MetaReader.getMetaHTable(ct); - try { - t.put(ps); - } finally { - t.close(); - } - } - - /** - * Delete the passed d from the .META. table. - * @param ct CatalogTracker on whose back we will ride the edit. - * @param d Delete to add to .META. - * @throws IOException - */ - static void deleteMetaTable(final CatalogTracker ct, final Delete d) - throws IOException { - HTable t = MetaReader.getMetaHTable(ct); - try { - t.delete(d); - } finally { - t.close(); - } - } /** * Adds a META row for the specified new region. @@ -141,7 +60,8 @@ public class MetaEditor { public static void addRegionToMeta(CatalogTracker catalogTracker, HRegionInfo regionInfo) throws IOException { - putToMetaTable(catalogTracker, makePutFromRegionInfo(regionInfo)); + catalogTracker.waitForMetaServerConnectionDefault().put( + CatalogTracker.META_REGION, makePutFromRegionInfo(regionInfo)); LOG.info("Added region " + regionInfo.getRegionNameAsString() + " to META"); } @@ -157,9 +77,11 @@ public class MetaEditor { List puts = new ArrayList(); for (HRegionInfo regionInfo : regionInfos) { puts.add(makePutFromRegionInfo(regionInfo)); + LOG.debug("Added region " + regionInfo.getRegionNameAsString() + " to META"); } - putsToMetaTable(catalogTracker, puts); - LOG.info("Added " + puts.size() + " regions in META"); + catalogTracker.waitForMetaServerConnectionDefault().put( + CatalogTracker.META_REGION, puts); + LOG.info("Added " + puts.size() + " regions to META"); } /** @@ -184,7 +106,7 @@ public class MetaEditor { Writables.getBytes(a)); put.add(HConstants.CATALOG_FAMILY, HConstants.SPLITB_QUALIFIER, Writables.getBytes(b)); - putToMetaTable(catalogTracker, put); + catalogTracker.waitForMetaServerConnectionDefault().put(CatalogTracker.META_REGION, put); LOG.info("Offlined parent region " + parent.getRegionNameAsString() + " in META"); } @@ -192,11 +114,14 @@ public class MetaEditor { public static void addDaughter(final CatalogTracker catalogTracker, final HRegionInfo regionInfo, final ServerName sn) throws NotAllMetaRegionsOnlineException, IOException { + HRegionInterface server = catalogTracker.waitForMetaServerConnectionDefault(); + byte [] catalogRegionName = CatalogTracker.META_REGION; Put put = new Put(regionInfo.getRegionName()); addRegionInfo(put, regionInfo); if (sn != null) addLocation(put, sn); - putToMetaTable(catalogTracker, put); + server.put(catalogRegionName, put); LOG.info("Added daughter " + regionInfo.getRegionNameAsString() + + " in region " + Bytes.toString(catalogRegionName) + (sn == null? ", serverName=null": ", serverName=" + sn.toString())); } @@ -218,7 +143,9 @@ public class MetaEditor { public static void updateMetaLocation(CatalogTracker catalogTracker, HRegionInfo regionInfo, ServerName sn) throws IOException, ConnectException { - updateLocation(catalogTracker, regionInfo, sn); + HRegionInterface server = catalogTracker.waitForRootServerConnectionDefault(); + if (server == null) throw new IOException("No server for -ROOT-"); + updateLocation(server, CatalogTracker.ROOT_REGION, regionInfo, sn); } /** @@ -236,7 +163,8 @@ public class MetaEditor { public static void updateRegionLocation(CatalogTracker catalogTracker, HRegionInfo regionInfo, ServerName sn) throws IOException { - updateLocation(catalogTracker, regionInfo, sn); + updateLocation(catalogTracker.waitForMetaServerConnectionDefault(), + CatalogTracker.META_REGION, regionInfo, sn); } /** @@ -245,21 +173,22 @@ public class MetaEditor { * Connects to the specified server which should be hosting the specified * catalog region name to perform the edit. * - * @param catalogTracker + * @param server connection to server hosting catalog region + * @param catalogRegionName name of catalog region being updated * @param regionInfo region to update location of * @param sn Server name * @throws IOException In particular could throw {@link java.net.ConnectException} * if the server is down on other end. */ - private static void updateLocation(final CatalogTracker catalogTracker, - HRegionInfo regionInfo, ServerName sn) + private static void updateLocation(HRegionInterface server, + byte [] catalogRegionName, HRegionInfo regionInfo, ServerName sn) throws IOException { - final byte [] regionName = regionInfo.getRegionName(); Put put = new Put(regionInfo.getRegionName()); addLocation(put, sn); - putToCatalogTable(catalogTracker, regionName, put); + server.put(catalogRegionName, put); LOG.info("Updated row " + regionInfo.getRegionNameAsString() + - " with server=" + sn); + " in region " + Bytes.toStringBinary(catalogRegionName) + " with " + + "serverName=" + sn.toString()); } /** @@ -272,7 +201,8 @@ public class MetaEditor { HRegionInfo regionInfo) throws IOException { Delete delete = new Delete(regionInfo.getRegionName()); - deleteMetaTable(catalogTracker, delete); + catalogTracker.waitForMetaServerConnectionDefault(). + delete(CatalogTracker.META_REGION, delete); LOG.info("Deleted region " + regionInfo.getRegionNameAsString() + " from META"); } @@ -291,20 +221,27 @@ public class MetaEditor { throws NotAllMetaRegionsOnlineException, IOException { Delete delete = new Delete(parent.getRegionName()); delete.deleteColumns(HConstants.CATALOG_FAMILY, qualifier); - deleteMetaTable(catalogTracker, delete); + catalogTracker.waitForMetaServerConnectionDefault(). + delete(CatalogTracker.META_REGION, delete); LOG.info("Deleted daughter reference " + daughter.getRegionNameAsString() + ", qualifier=" + Bytes.toStringBinary(qualifier) + ", from parent " + parent.getRegionNameAsString()); } - public static HRegionInfo getHRegionInfo( - Result data) throws IOException { - byte [] bytes = - data.getValue(HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER); - if (bytes == null) return null; - HRegionInfo info = Writables.getHRegionInfo(bytes); - LOG.info("Current INFO from scan results = " + info); - return info; + /** + * Updates the region information for the specified region in META. + * @param catalogTracker + * @param regionInfo region to be updated in META + * @throws IOException + */ + public static void updateRegionInfo(CatalogTracker catalogTracker, + HRegionInfo regionInfo) + throws IOException { + Put put = new Put(regionInfo.getRegionName()); + addRegionInfo(put, regionInfo); + catalogTracker.waitForMetaServerConnectionDefault().put( + CatalogTracker.META_REGION, put); + LOG.info("Updated region " + regionInfo.getRegionNameAsString() + " in META"); } private static Put addRegionInfo(final Put p, final HRegionInfo hri) @@ -321,4 +258,4 @@ public class MetaEditor { Bytes.toBytes(sn.getStartcode())); return p; } -} \ No newline at end of file +} diff --git src/main/java/org/apache/hadoop/hbase/catalog/MetaMigrationRemovingHTD.java src/main/java/org/apache/hadoop/hbase/catalog/MetaMigrationRemovingHTD.java deleted file mode 100644 index 0c1d221..0000000 --- src/main/java/org/apache/hadoop/hbase/catalog/MetaMigrationRemovingHTD.java +++ /dev/null @@ -1,242 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hbase.catalog; - -import java.io.IOException; -import java.util.ArrayList; -import java.util.List; - -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.hbase.HConstants; -import org.apache.hadoop.hbase.HRegionInfo; -import org.apache.hadoop.hbase.HTableDescriptor; -import org.apache.hadoop.hbase.catalog.MetaReader.Visitor; -import org.apache.hadoop.hbase.client.Put; -import org.apache.hadoop.hbase.client.Result; -import org.apache.hadoop.hbase.master.MasterServices; -import org.apache.hadoop.hbase.migration.HRegionInfo090x; -import org.apache.hadoop.hbase.util.Bytes; -import org.apache.hadoop.hbase.util.Writables; - -/** - * Tools to help with migration of meta tables so they no longer host - * instances of HTableDescriptor. - * @deprecated Used migration from 0.90 to 0.92 so will be going away in next - * release - */ -public class MetaMigrationRemovingHTD { - private static final Log LOG = LogFactory.getLog(MetaMigrationRemovingHTD.class); - - /** The metaupdated column qualifier */ - public static final byte [] META_MIGRATION_QUALIFIER = - Bytes.toBytes("metamigrated"); - - /** - * Update legacy META rows, removing HTD from HRI. - * @param masterServices - * @return - * @throws IOException - */ - public static List updateMetaWithNewRegionInfo( - final MasterServices masterServices) - throws IOException { - final List htds = new ArrayList(); - Visitor v = new Visitor() { - @Override - public boolean visit(Result r) throws IOException { - if (r == null || r.isEmpty()) return true; - HRegionInfo090x hrfm = MetaMigrationRemovingHTD.getHRegionInfoForMigration(r); - if (hrfm == null) return true; - htds.add(hrfm.getTableDesc()); - masterServices.getMasterFileSystem() - .createTableDescriptor(hrfm.getTableDesc()); - updateHRI(masterServices.getCatalogTracker(), false, hrfm); - return true; - } - }; - MetaReader.fullScan(masterServices.getCatalogTracker(), v); - MetaMigrationRemovingHTD.updateRootWithMetaMigrationStatus(masterServices.getCatalogTracker(), true); - return htds; - } - - /** - * Update the ROOT with new HRI. (HRI with no HTD) - * @param masterServices - * @return - * @throws IOException - */ - public static List updateRootWithNewRegionInfo( - final MasterServices masterServices) - throws IOException { - final List htds = new ArrayList(); - Visitor v = new Visitor() { - @Override - public boolean visit(Result r) throws IOException { - if (r == null || r.isEmpty()) return true; - HRegionInfo090x hrfm = MetaMigrationRemovingHTD.getHRegionInfoForMigration(r); - if (hrfm == null) return true; - htds.add(hrfm.getTableDesc()); - masterServices.getMasterFileSystem().createTableDescriptor( - hrfm.getTableDesc()); - updateHRI(masterServices.getCatalogTracker(), true, hrfm); - return true; - } - }; - MetaReader.fullScan(masterServices.getCatalogTracker(), v, null, true); - return htds; - } - - /** - * Migrate root and meta to newer version. This updates the META and ROOT - * and removes the HTD from HRI. - * @param masterServices - * @throws IOException - */ - public static void migrateRootAndMeta(final MasterServices masterServices) - throws IOException { - updateRootWithNewRegionInfo(masterServices); - updateMetaWithNewRegionInfo(masterServices); - } - - /** - * Update the metamigrated flag in -ROOT-. - * @param catalogTracker - * @param metaUpdated - * @throws IOException - */ - public static void updateRootWithMetaMigrationStatus( - CatalogTracker catalogTracker, boolean metaUpdated) - throws IOException { - Put p = new Put(HRegionInfo.ROOT_REGIONINFO.getRegionName()); - MetaMigrationRemovingHTD.addMetaUpdateStatus(p, metaUpdated); - MetaEditor.putToRootTable(catalogTracker, p); - LOG.info("Updated -ROOT- row with metaMigrated status = " + metaUpdated); - } - - static void updateHRI(final CatalogTracker ct, final boolean rootTable, - final HRegionInfo090x hRegionInfo090x) - throws IOException { - HRegionInfo regionInfo = new HRegionInfo(hRegionInfo090x); - Put p = new Put(regionInfo.getRegionName()); - p.add(HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER, - Writables.getBytes(regionInfo)); - if (rootTable) { - MetaEditor.putToRootTable(ct, p); - } else { - MetaEditor.putToMetaTable(ct, p); - } - LOG.info("Updated region " + regionInfo + " to " + - (rootTable? "-ROOT-": ".META.")); - } - - /** - * @deprecated Going away in 0.94; used for migrating to 0.92 only. - */ - public static HRegionInfo090x getHRegionInfoForMigration( - Result data) throws IOException { - HRegionInfo090x info = null; - byte [] bytes = - data.getValue(HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER); - if (bytes == null) return null; - try { - info = Writables.getHRegionInfoForMigration(bytes); - } catch(IOException ioe) { - if (ioe.getMessage().equalsIgnoreCase("HTD not found in input buffer")) { - return null; - } else { - throw ioe; - } - } - LOG.info("Current INFO from scan results = " + info); - return info; - } - - public static List fullScanMetaAndPrintHRIM( - CatalogTracker catalogTracker) - throws IOException { - final List regions = - new ArrayList(); - Visitor v = new Visitor() { - @Override - public boolean visit(Result r) throws IOException { - if (r == null || r.isEmpty()) return true; - LOG.info("fullScanMetaAndPrint1.Current Meta Result: " + r); - HRegionInfo090x hrim = getHRegionInfoForMigration(r); - LOG.info("fullScanMetaAndPrint.HRIM Print= " + hrim); - regions.add(hrim); - return true; - } - }; - MetaReader.fullScan(catalogTracker, v); - return regions; - } - - static Put addMetaUpdateStatus(final Put p, final boolean metaUpdated) { - p.add(HConstants.CATALOG_FAMILY, - MetaMigrationRemovingHTD.META_MIGRATION_QUALIFIER, - Bytes.toBytes(metaUpdated)); - return p; - } - - /** - * @return True if the meta table has been migrated. - * @throws IOException - */ - // Public because used in tests - public static boolean isMetaHRIUpdated(final MasterServices services) - throws IOException { - boolean metaUpdated = false; - List results = - MetaReader.fullScanOfRoot(services.getCatalogTracker()); - if (results == null || results.isEmpty()) { - LOG.info("metaUpdated = NULL."); - return metaUpdated; - } - // Presume only the one result. - Result r = results.get(0); - byte [] metaMigrated = r.getValue(HConstants.CATALOG_FAMILY, - MetaMigrationRemovingHTD.META_MIGRATION_QUALIFIER); - if (metaMigrated != null && metaMigrated.length > 0) { - metaUpdated = Bytes.toBoolean(metaMigrated); - } - LOG.info("Meta updated status = " + metaUpdated); - return metaUpdated; - } - - /** - * @return True if migrated. - * @throws IOException - */ - public static boolean updateMetaWithNewHRI(final MasterServices services) - throws IOException { - if (isMetaHRIUpdated(services)) { - LOG.info("ROOT/Meta already up-to date with new HRI."); - return true; - } - LOG.info("Meta has HRI with HTDs. Updating meta now."); - try { - migrateRootAndMeta(services); - LOG.info("ROOT and Meta updated with new HRI."); - return true; - } catch (IOException e) { - throw new RuntimeException("Update ROOT/Meta with new HRI failed." + - "Master startup aborted."); - } - } -} \ No newline at end of file diff --git src/main/java/org/apache/hadoop/hbase/catalog/MetaReader.java src/main/java/org/apache/hadoop/hbase/catalog/MetaReader.java index f7f4d60..950f20d 100644 --- src/main/java/org/apache/hadoop/hbase/catalog/MetaReader.java +++ src/main/java/org/apache/hadoop/hbase/catalog/MetaReader.java @@ -1,4 +1,6 @@ /** + * Copyright 2010 The Apache Software Foundation + * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -24,18 +26,16 @@ import java.util.Map; import java.util.NavigableMap; import java.util.Set; import java.util.TreeMap; +import java.util.TreeSet; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.HTableDescriptor; +import org.apache.hadoop.hbase.NotAllMetaRegionsOnlineException; import org.apache.hadoop.hbase.NotServingRegionException; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.client.Get; -import org.apache.hadoop.hbase.client.HTable; import org.apache.hadoop.hbase.client.Result; -import org.apache.hadoop.hbase.client.ResultScanner; import org.apache.hadoop.hbase.client.Scan; import org.apache.hadoop.hbase.ipc.HRegionInterface; import org.apache.hadoop.hbase.util.Bytes; @@ -45,14 +45,12 @@ import org.apache.hadoop.ipc.RemoteException; /** * Reads region and assignment information from .META.. + *

+ * Uses the {@link CatalogTracker} to obtain locations and connections to + * catalogs. */ public class MetaReader { - // TODO: Strip CatalogTracker from this class. Its all over and in the end - // its only used to get its Configuration so we can get associated - // Connection. - private static final Log LOG = LogFactory.getLog(MetaReader.class); - - static final byte [] META_REGION_PREFIX; + public static final byte [] META_REGION_PREFIX; static { // Copy the prefix from FIRST_META_REGIONINFO into META_REGION_PREFIX. // FIRST_META_REGIONINFO == '.META.,,1'. META_REGION_PREFIX == '.META.,' @@ -63,6 +61,48 @@ public class MetaReader { } /** + * @param ct + * @param tableName A user tablename or a .META. table name. + * @return Interface on to server hosting the -ROOT- or + * .META. regions. + * @throws NotAllMetaRegionsOnlineException + * @throws IOException + */ + private static HRegionInterface getCatalogRegionInterface(final CatalogTracker ct, + final byte [] tableName) + throws NotAllMetaRegionsOnlineException, IOException { + return Bytes.equals(HConstants.META_TABLE_NAME, tableName)? + ct.waitForRootServerConnectionDefault(): + ct.waitForMetaServerConnectionDefault(); + } + + /** + * @param tableName + * @return Returns region name to look in for regions for tableName; + * e.g. if we are looking for .META. regions, we need to look + * in the -ROOT- region, else if a user table, we need to look + * in the .META. region. + */ + private static byte [] getCatalogRegionNameForTable(final byte [] tableName) { + return Bytes.equals(HConstants.META_TABLE_NAME, tableName)? + HRegionInfo.ROOT_REGIONINFO.getRegionName(): + HRegionInfo.FIRST_META_REGIONINFO.getRegionName(); + } + + /** + * @param regionName + * @return Returns region name to look in for regionName; + * e.g. if we are looking for .META.,,1 region, we need to look + * in -ROOT- region, else if a user region, we need to look + * in the .META.,,1 region. + */ + private static byte [] getCatalogRegionNameForRegion(final byte [] regionName) { + return isMetaRegion(regionName)? + HRegionInfo.ROOT_REGIONINFO.getRegionName(): + HRegionInfo.FIRST_META_REGIONINFO.getRegionName(); + } + + /** * @param regionName * @return True if regionName is from .META. table. */ @@ -78,13 +118,33 @@ public class MetaReader { } /** + * Performs a full scan of .META.. + *

+ * Returns a map of every region to it's currently assigned server, according + * to META. If the region does not have an assignment it will have a null + * value in the map. + * + * @return map of regions to their currently assigned server where server is + * a String of <host> ':' <port> + * @throws IOException + */ + public static Map fullScan( + CatalogTracker catalogTracker) + throws IOException { + return fullScan(catalogTracker, new TreeSet()); + } + + /** * Performs a full scan of .META., skipping regions from any * tables in the specified set of disabled tables. + *

+ * Returns a map of every region to it's currently assigned server, according + * to META. If the region does not have an assignment it will have a null + * value in the map. + * * @param catalogTracker * @param disabledTables set of disabled tables that will not be returned - * @return Returns a map of every region to it's currently assigned server, - * according to META. If the region does not have an assignment it will have - * a null value in the map. + * @return map of regions to their currently assigned server * @throws IOException */ public static Map fullScan( @@ -96,13 +156,16 @@ public class MetaReader { /** * Performs a full scan of .META., skipping regions from any * tables in the specified set of disabled tables. + *

+ * Returns a map of every region to it's currently assigned server, according + * to META. If the region does not have an assignment it will have a null + * value in the map. + * * @param catalogTracker * @param disabledTables set of disabled tables that will not be returned * @param excludeOfflinedSplitParents If true, do not include offlined split * parents in the return. - * @return Returns a map of every region to it's currently assigned server, - * according to META. If the region does not have an assignment it will have - * a null value in the map. + * @return map of regions to their currently assigned server * @throws IOException */ public static Map fullScan( @@ -115,11 +178,11 @@ public class MetaReader { @Override public boolean visit(Result r) throws IOException { if (r == null || r.isEmpty()) return true; - Pair region = parseCatalogResult(r); + Pair region = metaRowToRegionPair(r); if (region == null) return true; HRegionInfo hri = region.getFirst(); if (disabledTables.contains( - hri.getTableNameAsString())) return true; + hri.getTableDesc().getNameAsString())) return true; // Are we to include split parents in the list? if (excludeOfflinedSplitParents && hri.isSplitParent()) return true; regions.put(hri, region.getSecond()); @@ -132,32 +195,38 @@ public class MetaReader { /** * Performs a full scan of .META.. - * @return List of {@link Result} + *

+ * Returns a map of every region to it's currently assigned server, according + * to META. If the region does not have an assignment it will have a null + * value in the map. + * + * @return map of regions to their currently assigned server * @throws IOException */ - public static List fullScan(CatalogTracker catalogTracker) + public static List fullScanOfResults( + CatalogTracker catalogTracker) throws IOException { - CollectAllVisitor v = new CollectAllVisitor(); - fullScan(catalogTracker, v, null); - return v.getResults(); - } - - /** - * Performs a full scan of a -ROOT- table. - * @return List of {@link Result} - * @throws IOException - */ - public static List fullScanOfRoot(CatalogTracker catalogTracker) - throws IOException { - CollectAllVisitor v = new CollectAllVisitor(); - fullScan(catalogTracker, v, null, true); - return v.getResults(); + final List regions = new ArrayList(); + Visitor v = new Visitor() { + @Override + public boolean visit(Result r) throws IOException { + if (r == null || r.isEmpty()) return true; + regions.add(r); + return true; + } + }; + fullScan(catalogTracker, v); + return regions; } /** * Performs a full scan of .META.. + *

+ * Returns a map of every region to it's currently assigned server, according + * to META. If the region does not have an assignment it will have a null + * value in the map. * @param catalogTracker - * @param visitor Visitor invoked against each row. + * @param visitor * @throws IOException */ public static void fullScan(CatalogTracker catalogTracker, @@ -168,82 +237,35 @@ public class MetaReader { /** * Performs a full scan of .META.. + *

+ * Returns a map of every region to it's currently assigned server, according + * to META. If the region does not have an assignment it will have a null + * value in the map. * @param catalogTracker - * @param visitor Visitor invoked against each row. + * @param visitor * @param startrow Where to start the scan. Pass null if want to begin scan - * at first row (The visitor will stop the Scan when its done so no need to - * pass a stoprow). + * at first row. * @throws IOException */ public static void fullScan(CatalogTracker catalogTracker, final Visitor visitor, final byte [] startrow) throws IOException { - fullScan(catalogTracker, visitor, startrow, false); - } - - /** - * Callers should call close on the returned {@link HTable} instance. - * @param catalogTracker We'll use this catalogtracker's connection - * @param tableName Table to get an {@link HTable} against. - * @return An {@link HTable} for tableName - * @throws IOException - */ - private static HTable getHTable(final CatalogTracker catalogTracker, - final byte [] tableName) - throws IOException { - // Passing the CatalogTracker's connection configuration ensures this - // HTable instance uses the CatalogTracker's connection. - return new HTable(catalogTracker.getConnection().getConfiguration(), tableName); - } - - /** - * Callers should call close on the returned {@link HTable} instance. - * @param catalogTracker - * @param regionName - * @return - * @throws IOException - */ - static HTable getCatalogHTable(final CatalogTracker catalogTracker, - final byte [] regionName) - throws IOException { - return isMetaRegion(regionName)? - getRootHTable(catalogTracker): - getMetaHTable(catalogTracker); - } - - /** - * Callers should call close on the returned {@link HTable} instance. - * @param ct - * @return An {@link HTable} for .META. - * @throws IOException - */ - static HTable getMetaHTable(final CatalogTracker ct) - throws IOException { - return getHTable(ct, HConstants.META_TABLE_NAME); - } - - /** - * Callers should call close on the returned {@link HTable} instance. - * @param ct - * @return An {@link HTable} for -ROOT- - * @throws IOException - */ - static HTable getRootHTable(final CatalogTracker ct) - throws IOException { - return getHTable(ct, HConstants.ROOT_TABLE_NAME); - } - - /** - * @param t Table to use (will be closed when done). - * @param g Get to run - * @throws IOException - */ - private static Result get(final HTable t, final Get g) throws IOException { + HRegionInterface metaServer = + catalogTracker.waitForMetaServerConnectionDefault(); + Scan scan = new Scan(); + if (startrow != null) scan.setStartRow(startrow); + scan.addFamily(HConstants.CATALOG_FAMILY); + long scannerid = metaServer.openScanner( + HRegionInfo.FIRST_META_REGIONINFO.getRegionName(), scan); try { - return t.get(g); + Result data; + while((data = metaServer.next(scannerid)) != null) { + if (!data.isEmpty()) visitor.visit(data); + } } finally { - t.close(); + metaServer.close(scannerid); } + return; } /** @@ -251,29 +273,28 @@ public class MetaReader { * @param metaServer connection to server hosting ROOT * @return location of META in ROOT where location, or null if not available * @throws IOException - * @deprecated Does not retry; use {@link #readRegionLocation(CatalogTracker, byte[]) */ public static ServerName readMetaLocation(HRegionInterface metaServer) throws IOException { - return readLocation(metaServer, CatalogTracker.ROOT_REGION_NAME, - CatalogTracker.META_REGION_NAME); + return readLocation(metaServer, CatalogTracker.ROOT_REGION, + CatalogTracker.META_REGION); } /** - * Reads the location of the specified region + * Reads the location of the specified region from META. * @param catalogTracker - * @param regionName region whose location we are after - * @return location of region as a {@link ServerName} or null if not found + * @param regionName region to read location of + * @return location of META in ROOT where location is, or null if not available * @throws IOException */ public static ServerName readRegionLocation(CatalogTracker catalogTracker, byte [] regionName) throws IOException { - Pair pair = getRegion(catalogTracker, regionName); - return (pair == null || pair.getSecond() == null)? null: pair.getSecond(); + if (isMetaRegion(regionName)) throw new IllegalArgumentException("See readMetaLocation"); + return readLocation(catalogTracker.waitForMetaServerConnectionDefault(), + CatalogTracker.META_REGION, regionName); } - // TODO: Remove when deprecated dependencies are removed. private static ServerName readLocation(HRegionInterface metaServer, byte [] catalogRegionName, byte [] regionName) throws IOException { @@ -314,14 +335,15 @@ public class MetaReader { if (r == null || r.isEmpty()) { return null; } - return getServerNameFromCatalogResult(r); + return getServerNameFromResult(r); } /** - * Gets the region info and assignment for the specified region. + * Gets the region info and assignment for the specified region from META. * @param catalogTracker - * @param regionName Region to lookup. - * @return Location and HRegionInfo for regionName + * @param regionName + * @return location of META in ROOT where location is + * a String of <host> ':' <port>, or null if not available * @throws IOException */ public static Pair getRegion( @@ -329,60 +351,41 @@ public class MetaReader { throws IOException { Get get = new Get(regionName); get.addFamily(HConstants.CATALOG_FAMILY); - Result r = get(getCatalogHTable(catalogTracker, regionName), get); - return (r == null || r.isEmpty())? null: parseCatalogResult(r); - } - - /** - * Extract a {@link ServerName} - * For use on catalog table {@link Result}. - * @param r Result to pull from - * @return A ServerName instance or null if necessary fields not found or empty. - */ - public static ServerName getServerNameFromCatalogResult(final Result r) { - byte[] value = r.getValue(HConstants.CATALOG_FAMILY, - HConstants.SERVER_QUALIFIER); - if (value == null || value.length == 0) return null; - String hostAndPort = Bytes.toString(value); - value = r.getValue(HConstants.CATALOG_FAMILY, - HConstants.STARTCODE_QUALIFIER); - if (value == null || value.length == 0) return null; - return new ServerName(hostAndPort, Bytes.toLong(value)); + byte [] meta = getCatalogRegionNameForRegion(regionName); + Result r = catalogTracker.waitForMetaServerConnectionDefault().get(meta, get); + return (r == null || r.isEmpty())? null: metaRowToRegionPair(r); } /** - * Extract a HRegionInfo and ServerName. - * For use on catalog table {@link Result}. - * @param r Result to pull from - * @return A pair of the {@link HRegionInfo} and the {@link ServerName} + * @param data A .META. table row. + * @return A pair of the regioninfo and the ServerName * (or null for server address if no address set in .META.). * @throws IOException */ - public static Pair parseCatalogResult(final Result r) + public static Pair metaRowToRegionPair(Result data) throws IOException { - HRegionInfo info = - parseHRegionInfoFromCatalogResult(r, HConstants.REGIONINFO_QUALIFIER); - ServerName sn = getServerNameFromCatalogResult(r); + byte [] bytes = data.getValue(HConstants.CATALOG_FAMILY, + HConstants.REGIONINFO_QUALIFIER); + if (bytes == null) return null; + HRegionInfo info = Writables.getHRegionInfo(bytes); + ServerName sn = getServerNameFromResult(data); + // sn can be null in case where no server inof. return new Pair(info, sn); } /** - * Parse the content of the cell at {@link HConstants#CATALOG_FAMILY} and - * qualifier as an HRegionInfo and return it, or null. - * For use on catalog table {@link Result}. - * @param r Result instance to pull from. - * @param qualifier Column family qualifier -- either - * {@link HConstants#SPLITA_QUALIFIER}, {@link HConstants#SPLITB_QUALIFIER} or - * {@link HConstants#REGIONINFO_QUALIFIER}. - * @return An HRegionInfo instance or null. - * @throws IOException + * @param data Result to interrogate. + * @return A ServerName instance or null if necessary fields not found or empty. */ - public static HRegionInfo parseHRegionInfoFromCatalogResult(final Result r, - byte [] qualifier) - throws IOException { - byte [] bytes = r.getValue(HConstants.CATALOG_FAMILY, qualifier); - if (bytes == null || bytes.length <= 0) return null; - return Writables.getHRegionInfoOrNull(bytes); + private static ServerName getServerNameFromResult(final Result data) { + byte[] value = data.getValue(HConstants.CATALOG_FAMILY, + HConstants.SERVER_QUALIFIER); + if (value == null || value.length == 0) return null; + String hostAndPort = Bytes.toString(value); + value = data.getValue(HConstants.CATALOG_FAMILY, + HConstants.STARTCODE_QUALIFIER); + if (value == null || value.length == 0) return null; + return new ServerName(hostAndPort, Bytes.toLong(value)); } /** @@ -401,36 +404,21 @@ public class MetaReader { // Catalog tables always exist. return true; } - final byte [] tableNameBytes = Bytes.toBytes(tableName); - // Make a version of ResultCollectingVisitor that only collects the first - CollectingVisitor visitor = new CollectingVisitor() { - private HRegionInfo current = null; - - @Override - public boolean visit(Result r) throws IOException { - this.current = - parseHRegionInfoFromCatalogResult(r, HConstants.REGIONINFO_QUALIFIER); - if (this.current == null) { - LOG.warn("No serialized HRegionInfo in " + r); + HRegionInterface metaServer = + catalogTracker.waitForMetaServerConnectionDefault(); + Scan scan = getScanForTableName(Bytes.toBytes(tableName)); + scan.addColumn(HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER); + long scannerid = metaServer.openScanner( + HRegionInfo.FIRST_META_REGIONINFO.getRegionName(), scan); + try { + Result data = metaServer.next(scannerid); + if (data != null && data.size() > 0) { return true; - } - if (!isInsideTable(this.current, tableNameBytes)) return false; - if (this.current.isSplitParent()) return true; - // Else call super and add this Result to the collection. - super.visit(r); - // Stop collecting regions from table after we get one. - return false; } - - @Override - void add(Result r) { - // Add the current HRI. - this.results.add(this.current); - } - }; - fullScan(catalogTracker, visitor, getTableStartRowForMeta(tableNameBytes)); - // If visitor has results >= 1 then table exists. - return visitor.getResults().size() >= 1; + return false; + } finally { + metaServer.close(scannerid); + } } /** @@ -458,46 +446,42 @@ public class MetaReader { public static List getTableRegions(CatalogTracker catalogTracker, byte [] tableName, final boolean excludeOfflinedSplitParents) throws IOException { - List> result = null; - try { - result = getTableRegionsAndLocations(catalogTracker, tableName, - excludeOfflinedSplitParents); - } catch (InterruptedException e) { - throw new RuntimeException(e); + if (Bytes.equals(tableName, HConstants.ROOT_TABLE_NAME)) { + // If root, do a bit of special handling. + List list = new ArrayList(); + list.add(HRegionInfo.ROOT_REGIONINFO); + return list; + } else if (Bytes.equals(tableName, HConstants.META_TABLE_NAME)) { + // Same for .META. table + List list = new ArrayList(); + list.add(HRegionInfo.FIRST_META_REGIONINFO); + return list; } - return getListOfHRegionInfos(result); - } - static List getListOfHRegionInfos(final List> pairs) { - if (pairs == null || pairs.isEmpty()) return null; - List result = new ArrayList(pairs.size()); - for (Pair pair: pairs) { - result.add(pair.getFirst()); - } - return result; - } + // Its a user table. + HRegionInterface metaServer = + getCatalogRegionInterface(catalogTracker, tableName); + List regions = new ArrayList(); - /** - * @param current - * @param tableName - * @return True if current tablename is equal to - * tableName - */ - static boolean isInsideTable(final HRegionInfo current, final byte [] tableName) { - return Bytes.equals(tableName, current.getTableName()); - } - - /** - * @param tableName - * @return Place to start Scan in .META. when passed a - * tableName; returns <tableName&rt; <,&rt; <,&rt; - */ - static byte [] getTableStartRowForMeta(final byte [] tableName) { - byte [] startRow = new byte[tableName.length + 2]; - System.arraycopy(tableName, 0, startRow, 0, tableName.length); - startRow[startRow.length - 2] = HRegionInfo.DELIMITER; - startRow[startRow.length - 1] = HRegionInfo.DELIMITER; - return startRow; + Scan scan = getScanForTableName(tableName); + scan.addColumn(HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER); + long scannerid = + metaServer.openScanner(getCatalogRegionNameForTable(tableName), scan); + try { + Result data; + while((data = metaServer.next(scannerid)) != null) { + if (data != null && data.size() > 0) { + HRegionInfo info = Writables.getHRegionInfo( + data.getValue(HConstants.CATALOG_FAMILY, + HConstants.REGIONINFO_QUALIFIER)); + if (excludeOfflinedSplitParents && info.isSplitParent()) continue; + regions.add(info); + } + } + return regions; + } finally { + metaServer.close(scannerid); + } } /** @@ -531,22 +515,8 @@ public class MetaReader { public static List> getTableRegionsAndLocations(CatalogTracker catalogTracker, String tableName) throws IOException, InterruptedException { - return getTableRegionsAndLocations(catalogTracker, Bytes.toBytes(tableName), - true); - } - - /** - * @param catalogTracker - * @param tableName - * @return Return list of regioninfos and server addresses. - * @throws IOException - * @throws InterruptedException - */ - public static List> - getTableRegionsAndLocations(final CatalogTracker catalogTracker, - final byte [] tableName, final boolean excludeOfflinedSplitParents) - throws IOException, InterruptedException { - if (Bytes.equals(tableName, HConstants.ROOT_TABLE_NAME)) { + byte [] tableNameBytes = Bytes.toBytes(tableName); + if (Bytes.equals(tableNameBytes, HConstants.ROOT_TABLE_NAME)) { // If root, do a bit of special handling. ServerName serverName = catalogTracker.getRootLocation(); List> list = @@ -555,36 +525,27 @@ public class MetaReader { serverName)); return list; } - // Make a version of CollectingVisitor that collects HRegionInfo and ServerAddress - CollectingVisitor> visitor = - new CollectingVisitor>() { - private Pair current = null; - - @Override - public boolean visit(Result r) throws IOException { - HRegionInfo hri = - parseHRegionInfoFromCatalogResult(r, HConstants.REGIONINFO_QUALIFIER); - if (hri == null) { - LOG.warn("No serialized HRegionInfo in " + r); - return true; + HRegionInterface metaServer = + getCatalogRegionInterface(catalogTracker, tableNameBytes); + List> regions = + new ArrayList>(); + Scan scan = getScanForTableName(tableNameBytes); + scan.addFamily(HConstants.CATALOG_FAMILY); + long scannerid = + metaServer.openScanner(getCatalogRegionNameForTable(tableNameBytes), scan); + try { + Result data; + while((data = metaServer.next(scannerid)) != null) { + if (data != null && data.size() > 0) { + Pair region = metaRowToRegionPair(data); + if (region == null) continue; + regions.add(region); } - if (!isInsideTable(hri, tableName)) return false; - if (excludeOfflinedSplitParents && hri.isSplitParent()) return true; - ServerName sn = getServerNameFromCatalogResult(r); - // Populate this.current so available when we call #add - this.current = new Pair(hri, sn); - // Else call super and add this Result to the collection. - return super.visit(r); } - - @Override - void add(Result r) { - this.results.add(this.current); - } - }; - fullScan(catalogTracker, visitor, getTableStartRowForMeta(tableName), - Bytes.equals(tableName, HConstants.META_TABLE_NAME)); - return visitor.getResults(); + return regions; + } finally { + metaServer.close(scannerid); + } } /** @@ -597,104 +558,29 @@ public class MetaReader { public static NavigableMap getServerUserRegions(CatalogTracker catalogTracker, final ServerName serverName) throws IOException { - final NavigableMap hris = new TreeMap(); - // Fill the above hris map with entries from .META. that have the passed - // servername. - CollectingVisitor v = new CollectingVisitor() { - @Override - void add(Result r) { - if (r == null || r.isEmpty()) return; - ServerName sn = getServerNameFromCatalogResult(r); - if (sn != null && sn.equals(serverName)) this.results.add(r); - } - }; - fullScan(catalogTracker, v); - List results = v.getResults(); - if (results != null && !results.isEmpty()) { - // Convert results to Map keyed by HRI - for (Result r: results) { - Pair p = parseCatalogResult(r); - if (p != null && p.getFirst() != null) hris.put(p.getFirst(), r); - } - } - return hris; - } - - public static void fullScanMetaAndPrint(final CatalogTracker catalogTracker) - throws IOException { - Visitor v = new Visitor() { - @Override - public boolean visit(Result r) throws IOException { - if (r == null || r.isEmpty()) return true; - LOG.info("fullScanMetaAndPrint.Current Meta Row: " + r); - HRegionInfo hrim = MetaEditor.getHRegionInfo(r); - LOG.info("fullScanMetaAndPrint.HRI Print= " + hrim); - return true; - } - }; - fullScan(catalogTracker, v); - } - - /** - * Fully scan a given region, on a given server starting with given row. - * @param hRegionInterface region server - * @param visitor visitor - * @param regionName name of region - * @param startrow start row - * @throws IOException - * @deprecated Does not retry; use fullScan xxx instead. - x - */ - public static void fullScan(HRegionInterface hRegionInterface, - Visitor visitor, final byte[] regionName, - byte[] startrow) throws IOException { - if (hRegionInterface == null) return; - Scan scan = new Scan(); - if (startrow != null) scan.setStartRow(startrow); - scan.addFamily(HConstants.CATALOG_FAMILY); - long scannerid = hRegionInterface.openScanner(regionName, scan); - try { - Result data; - while((data = hRegionInterface.next(scannerid)) != null) { - if (!data.isEmpty()) visitor.visit(data); - } - } finally { - hRegionInterface.close(scannerid); - } - return; - } - - /** - * Performs a full scan of a catalog table. - * @param catalogTracker - * @param visitor Visitor invoked against each row. - * @param startrow Where to start the scan. Pass null if want to begin scan - * at first row. - * @param scanRoot True if we are to scan -ROOT- rather than - * .META., the default (pass false to scan .META.) - * @throws IOException - */ - static void fullScan(CatalogTracker catalogTracker, - final Visitor visitor, final byte [] startrow, final boolean scanRoot) - throws IOException { + HRegionInterface metaServer = + catalogTracker.waitForMetaServerConnectionDefault(); + NavigableMap hris = new TreeMap(); Scan scan = new Scan(); - if (startrow != null) scan.setStartRow(startrow); scan.addFamily(HConstants.CATALOG_FAMILY); - HTable metaTable = scanRoot? - getRootHTable(catalogTracker): getMetaHTable(catalogTracker); - ResultScanner scanner = metaTable.getScanner(scan); + long scannerid = metaServer.openScanner( + HRegionInfo.FIRST_META_REGIONINFO.getRegionName(), scan); try { - Result data; - while((data = scanner.next()) != null) { - if (data.isEmpty()) continue; - // Break if visit returns false. - if (!visitor.visit(data)) break; + Result result; + while((result = metaServer.next(scannerid)) != null) { + if (result != null && result.size() > 0) { + Pair pair = metaRowToRegionPair(result); + if (pair == null) continue; + if (pair.getSecond() == null || !serverName.equals(pair.getSecond())) { + continue; + } + hris.put(pair.getFirst(), result); + } } + return hris; } finally { - scanner.close(); - metaTable.close(); + metaServer.close(scannerid); } - return; } /** @@ -709,37 +595,4 @@ public class MetaReader { */ public boolean visit(final Result r) throws IOException; } - - /** - * A {@link Visitor} that collects content out of passed {@link Result}. - */ - static abstract class CollectingVisitor implements Visitor { - final List results = new ArrayList(); - @Override - public boolean visit(Result r) throws IOException { - if (r == null || r.isEmpty()) return true; - add(r); - return true; - } - - abstract void add(Result r); - - /** - * @return Collected results; wait till visits complete to collect all - * possible results - */ - List getResults() { - return this.results; - } - } - - /** - * Collects all returned. - */ - static class CollectAllVisitor extends CollectingVisitor { - @Override - void add(Result r) { - this.results.add(r); - } - } -} \ No newline at end of file +} diff --git src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java index 92c959c..207d926 100644 --- src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java +++ src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java @@ -48,12 +48,12 @@ import org.apache.hadoop.hbase.RegionException; import org.apache.hadoop.hbase.RemoteExceptionHandler; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.TableExistsException; -import org.apache.hadoop.hbase.TableNotFoundException; import org.apache.hadoop.hbase.UnknownRegionException; import org.apache.hadoop.hbase.ZooKeeperConnectionException; import org.apache.hadoop.hbase.catalog.CatalogTracker; import org.apache.hadoop.hbase.catalog.MetaReader; import org.apache.hadoop.hbase.client.MetaScanner.MetaScannerVisitor; +import org.apache.hadoop.hbase.coprocessor.ObserverContext; import org.apache.hadoop.hbase.ipc.HMasterInterface; import org.apache.hadoop.hbase.ipc.HRegionInterface; import org.apache.hadoop.hbase.regionserver.wal.FailedLogCloseException; @@ -263,11 +263,10 @@ public class HBaseAdmin implements Abortable, Closeable { * Method for getting the tableDescriptor * @param tableName as a byte [] * @return the tableDescriptor - * @throws TableNotFoundException * @throws IOException if a remote or network exception occurs */ public HTableDescriptor getTableDescriptor(final byte [] tableName) - throws TableNotFoundException, IOException { + throws IOException { return this.connection.getHTableDescriptor(tableName); } @@ -376,7 +375,7 @@ public class HBaseAdmin implements Abortable, Closeable { if (null == info) { return true; } - if (!(Bytes.equals(info.getTableName(), desc.getName()))) { + if (!(Bytes.equals(info.getTableDesc().getName(), desc.getName()))) { return false; } String hostAndPort = null; @@ -493,22 +492,8 @@ public class HBaseAdmin implements Abortable, Closeable { // Get a batch at a time. Result values = server.next(scannerId); - // let us wait until .META. table is updated and - // HMaster removes the table from its HTableDescriptors if (values == null) { - boolean tableExists = false; - HTableDescriptor[] htds = getMaster().getHTableDescriptors(); - if (htds != null && htds.length > 0) { - for (HTableDescriptor htd: htds) { - if (Bytes.equals(tableName, htd.getName())) { - tableExists = true; - break; - } - } - } - if (!tableExists) { - break; - } + break; } } catch (IOException ex) { if(tries == numRetries - 1) { // no more tries left @@ -1595,19 +1580,19 @@ public class HBaseAdmin implements Abortable, Closeable { * get the regions of a given table. * * @param tableName the name of the table - * @return Ordered list of {@link HRegionInfo}. + * @return Ordered list of {@link HRegionInfo}. * * @throws IOException */ public List getTableRegions(final byte[] tableName) throws IOException { CatalogTracker ct = getCatalogTracker(); - List Regions = null; + List Regions; try { Regions = MetaReader.getTableRegions(ct, tableName, true); } finally { cleanupCatalogTracker(ct); } - return Regions; + return Regions; } public void close() throws IOException { @@ -1616,17 +1601,6 @@ public class HBaseAdmin implements Abortable, Closeable { } } - /** - * Get tableDescriptors - * @param tableNames List of table names - * @return HTD[] the tableDescriptor - * @throws IOException if a remote or network exception occurs - */ - public HTableDescriptor[] getTableDescriptors(List tableNames) - throws IOException { - return this.connection.getHTableDescriptors(tableNames); - } - /** * Roll the log writer. That is, start writing log messages to a new file. * diff --git src/main/java/org/apache/hadoop/hbase/client/HConnection.java src/main/java/org/apache/hadoop/hbase/client/HConnection.java index ed6027c..5d75205 100644 --- src/main/java/org/apache/hadoop/hbase/client/HConnection.java +++ src/main/java/org/apache/hadoop/hbase/client/HConnection.java @@ -375,13 +375,4 @@ public interface HConnection extends Abortable, Closeable { * @deprecated This method will be changed from public to package protected. */ public int getCurrentNrHRS() throws IOException; - - /** - * @param tableNames List of table names - * @return HTD[] table metadata - * @throws IOException if a remote or network exception occurs - */ - public HTableDescriptor[] getHTableDescriptors(List tableNames) - throws IOException; - } \ No newline at end of file diff --git src/main/java/org/apache/hadoop/hbase/client/HConnectionManager.java src/main/java/org/apache/hadoop/hbase/client/HConnectionManager.java index f7fac44..751fc14 100644 --- src/main/java/org/apache/hadoop/hbase/client/HConnectionManager.java +++ src/main/java/org/apache/hadoop/hbase/client/HConnectionManager.java @@ -35,6 +35,7 @@ import java.util.NoSuchElementException; import java.util.Map.Entry; import java.util.Set; import java.util.TreeMap; +import java.util.TreeSet; import java.util.concurrent.Callable; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.CopyOnWriteArraySet; @@ -130,10 +131,8 @@ import org.apache.zookeeper.KeeperException; */ @SuppressWarnings("serial") public class HConnectionManager { - // An LRU Map of HConnectionKey -> HConnection (TableServer). All - // access must be synchronized. This map is not private because tests - // need to be able to tinker with it. - static final Map HBASE_INSTANCES; + // A LRU Map of HConnectionKey -> HConnection (TableServer). + private static final Map HBASE_INSTANCES; public static final int MAX_CACHED_HBASE_INSTANCES; @@ -661,6 +660,33 @@ public class HConnectionManager { return reload? relocateRegion(name, row): locateRegion(name, row); } + public HTableDescriptor[] listTables() throws IOException { + final TreeSet uniqueTables = + new TreeSet(); + MetaScannerVisitor visitor = new MetaScannerVisitor() { + public boolean processRow(Result result) throws IOException { + try { + byte[] value = result.getValue(HConstants.CATALOG_FAMILY, + HConstants.REGIONINFO_QUALIFIER); + HRegionInfo info = null; + if (value != null) { + info = Writables.getHRegionInfo(value); + } + // Only examine the rows where the startKey is zero length + if (info != null && info.getStartKey().length == 0) { + uniqueTables.add(info.getTableDesc()); + } + return true; + } catch (RuntimeException e) { + LOG.error("Result=" + result); + throw e; + } + } + }; + MetaScanner.metaScan(conf, visitor); + return uniqueTables.toArray(new HTableDescriptor[uniqueTables.size()]); + } + public boolean isTableEnabled(byte[] tableName) throws IOException { return testTableOnlineState(tableName, true); } @@ -679,7 +705,7 @@ public class HConnectionManager { HConstants.REGIONINFO_QUALIFIER); HRegionInfo info = Writables.getHRegionInfoOrNull(value); if (info != null) { - if (Bytes.equals(tableName, info.getTableName())) { + if (Bytes.equals(tableName, info.getTableDesc().getName())) { value = row.getValue(HConstants.CATALOG_FAMILY, HConstants.SERVER_QUALIFIER); if (value == null) { @@ -716,6 +742,47 @@ public class HConnectionManager { } } + private static class HTableDescriptorFinder + implements MetaScanner.MetaScannerVisitor { + byte[] tableName; + HTableDescriptor result; + protected HTableDescriptorFinder(byte[] tableName) { + this.tableName = tableName; + } + public boolean processRow(Result rowResult) throws IOException { + HRegionInfo info = Writables.getHRegionInfoOrNull( + rowResult.getValue(HConstants.CATALOG_FAMILY, + HConstants.REGIONINFO_QUALIFIER)); + if (info == null) return true; + HTableDescriptor desc = info.getTableDesc(); + if (Bytes.equals(desc.getName(), tableName)) { + result = desc; + return false; + } + return true; + } + HTableDescriptor getResult() { + return result; + } + } + + public HTableDescriptor getHTableDescriptor(final byte[] tableName) + throws IOException { + if (Bytes.equals(tableName, HConstants.ROOT_TABLE_NAME)) { + return new UnmodifyableHTableDescriptor(HTableDescriptor.ROOT_TABLEDESC); + } + if (Bytes.equals(tableName, HConstants.META_TABLE_NAME)) { + return HTableDescriptor.META_TABLEDESC; + } + HTableDescriptorFinder finder = new HTableDescriptorFinder(tableName); + MetaScanner.metaScan(conf, finder, tableName); + HTableDescriptor result = finder.getResult(); + if (result == null) { + throw new TableNotFoundException(Bytes.toString(tableName)); + } + return result; + } + @Override public HRegionLocation locateRegion(final byte [] regionName) throws IOException { @@ -795,7 +862,7 @@ public class HConnectionManager { regionInfo = Writables.getHRegionInfo(value); // possible we got a region of a different table... - if (!Bytes.equals(regionInfo.getTableName(), + if (!Bytes.equals(regionInfo.getTableDesc().getName(), tableName)) { return false; // stop scanning } @@ -897,7 +964,7 @@ public class HConnectionManager { deleteCachedLocation(tableName, row); } - // Query the root or meta region for the location of the meta region + // Query the root or meta region for the location of the meta region regionInfoRow = server.getClosestRowBefore( metaLocation.getRegionInfo().getRegionName(), metaKey, HConstants.CATALOG_FAMILY); @@ -915,10 +982,9 @@ public class HConnectionManager { HRegionInfo regionInfo = (HRegionInfo) Writables.getWritable( value, new HRegionInfo()); // possible we got a region of a different table... - if (!Bytes.equals(regionInfo.getTableName(), tableName)) { + if (!Bytes.equals(regionInfo.getTableDesc().getName(), tableName)) { throw new TableNotFoundException( - "Table '" + Bytes.toString(tableName) + "' was not found, got: " + - Bytes.toString(regionInfo.getTableName()) + "."); + "Table '" + Bytes.toString(tableName) + "' was not found."); } if (regionInfo.isSplit()) { throw new RegionOfflineException("the only available region for" + @@ -964,8 +1030,8 @@ public class HConnectionManager { if (LOG.isDebugEnabled()) { LOG.debug("locateRegionInMeta parentTable=" + Bytes.toString(parentTable) + ", metaLocation=" + - ((metaLocation == null)? "null": "{" + metaLocation + "}") + - ", attempt=" + tries + " of " + + ((metaLocation == null)? "null": metaLocation) + ", attempt=" + + tries + " of " + this.numRetries + " failed; retrying after sleep of " + getPauseTime(tries) + " because: " + e.getMessage()); } @@ -1203,7 +1269,7 @@ public class HConnectionManager { } catch (RemoteException e) { LOG.warn("RemoteException connecting to RS", e); // Throw what the RemoteException was carrying. - throw e.unwrapRemoteException(); + throw RemoteExceptionHandler.decodeRemoteException(e); } } } @@ -1235,22 +1301,19 @@ public class HConnectionManager { public T getRegionServerWithRetries(ServerCallable callable) throws IOException, RuntimeException { - List exceptions = - new ArrayList(); + List exceptions = new ArrayList(); for(int tries = 0; tries < numRetries; tries++) { try { + callable.instantiateServer(tries != 0); callable.beforeCall(); - callable.connect(tries != 0); return callable.call(); } catch (Throwable t) { callable.shouldRetry(t); t = translateException(t); - RetriesExhaustedException.ThrowableWithExtraContext qt = - new RetriesExhaustedException.ThrowableWithExtraContext(t, - System.currentTimeMillis(), callable.toString()); - exceptions.add(qt); + exceptions.add(t); if (tries == numRetries - 1) { - throw new RetriesExhaustedException(tries, exceptions); + throw new RetriesExhaustedException(callable.getServerName(), + callable.getRegionName(), callable.getRow(), tries, exceptions); } } finally { callable.afterCall(); @@ -1259,7 +1322,7 @@ public class HConnectionManager { Thread.sleep(getPauseTime(tries)); } catch (InterruptedException e) { Thread.currentThread().interrupt(); - throw new IOException("Giving up after tries=" + tries, e); + throw new IOException("Giving up trying to get region server: thread is interrupted."); } } return null; @@ -1268,8 +1331,8 @@ public class HConnectionManager { public T getRegionServerWithoutRetries(ServerCallable callable) throws IOException, RuntimeException { try { + callable.instantiateServer(false); callable.beforeCall(); - callable.connect(false); return callable.call(); } catch (Throwable t) { Throwable t2 = translateException(t); @@ -1294,7 +1357,7 @@ public class HConnectionManager { return server.multi(multi); } @Override - public void connect(boolean reload) throws IOException { + public void instantiateServer(boolean reload) throws IOException { server = connection.getHRegionConnection(loc.getHostname(), loc.getPort()); } @@ -1762,69 +1825,5 @@ public class HConnectionManager { LOG.debug("The connection to " + this.zooKeeper + " was closed by the finalize method."); } - - public HTableDescriptor[] listTables() throws IOException { - if (this.master == null) { - this.master = getMaster(); - } - HTableDescriptor[] htd = master.getHTableDescriptors(); - return htd; - } - - public HTableDescriptor[] getHTableDescriptors(List tableNames) throws IOException { - if (tableNames == null || tableNames.size() == 0) return null; - if (this.master == null) { - this.master = getMaster(); - } - return master.getHTableDescriptors(tableNames); - } - - public HTableDescriptor getHTableDescriptor(final byte[] tableName) - throws IOException { - if (tableName == null || tableName.length == 0) return null; - if (Bytes.equals(tableName, HConstants.ROOT_TABLE_NAME)) { - return new UnmodifyableHTableDescriptor(HTableDescriptor.ROOT_TABLEDESC); - } - if (Bytes.equals(tableName, HConstants.META_TABLE_NAME)) { - return HTableDescriptor.META_TABLEDESC; - } - if (this.master == null) { - this.master = getMaster(); - } - HTableDescriptor hTableDescriptor = null; - HTableDescriptor[] htds = master.getHTableDescriptors(); - if (htds != null && htds.length > 0) { - for (HTableDescriptor htd: htds) { - if (Bytes.equals(tableName, htd.getName())) { - hTableDescriptor = htd; - } - } - } - //HTableDescriptor htd = master.getHTableDescriptor(tableName); - if (hTableDescriptor == null) { - throw new TableNotFoundException(Bytes.toString(tableName)); - } - return hTableDescriptor; - } - } - - /** - * Set the number of retries to use serverside when trying to communicate - * with another server over {@link HConnection}. Used updating catalog - * tables, etc. Call this method before we create any Connections. - * @param c The Configuration instance to set the retries into. - * @param log Used to log what we set in here. - */ - public static void setServerSideHConnectionRetries(final Configuration c, - final Log log) { - int hcRetries = c.getInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, - HConstants.DEFAULT_HBASE_CLIENT_RETRIES_NUMBER); - // Go big. Multiply by 10. If we can't get to meta after this many retries - // then something seriously wrong. - int serversideMultiplier = - c.getInt("hbase.client.serverside.retries.multiplier", 10); - int retries = hcRetries * serversideMultiplier; - c.setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, retries); - log.debug("Set serverside HConnection retries=" + retries); } } diff --git src/main/java/org/apache/hadoop/hbase/client/HTable.java src/main/java/org/apache/hadoop/hbase/client/HTable.java index 0d2f3d0..7783be6 100644 --- src/main/java/org/apache/hadoop/hbase/client/HTable.java +++ src/main/java/org/apache/hadoop/hbase/client/HTable.java @@ -53,6 +53,7 @@ import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.NotServingRegionException; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.UnknownScannerException; +import org.apache.hadoop.hbase.ZooKeeperConnectionException; import org.apache.hadoop.hbase.client.HConnectionManager.HConnectable; import org.apache.hadoop.hbase.client.MetaScanner.MetaScannerVisitor; import org.apache.hadoop.hbase.client.coprocessor.Batch; @@ -191,8 +192,7 @@ public class HTable implements HTableInterface, Closeable { } this.connection = HConnectionManager.getConnection(conf); this.scannerTimeout = - (int) conf.getLong(HConstants.HBASE_REGIONSERVER_LEASE_PERIOD_KEY, - HConstants.DEFAULT_HBASE_REGIONSERVER_LEASE_PERIOD); + (int) conf.getLong(HConstants.HBASE_REGIONSERVER_LEASE_PERIOD_KEY, HConstants.DEFAULT_HBASE_REGIONSERVER_LEASE_PERIOD); this.operationTimeout = HTableDescriptor.isMetaTable(tableName) ? HConstants.DEFAULT_HBASE_CLIENT_OPERATION_TIMEOUT : conf.getInt(HConstants.HBASE_CLIENT_OPERATION_TIMEOUT, HConstants.DEFAULT_HBASE_CLIENT_OPERATION_TIMEOUT); @@ -406,7 +406,7 @@ public class HTable implements HTableInterface, Closeable { return true; } HRegionInfo info = Writables.getHRegionInfo(bytes); - if (Bytes.equals(info.getTableName(), getTableName())) { + if (Bytes.equals(info.getTableDesc().getName(), getTableName())) { if (!(info.isOffline() || info.isSplit())) { startKeyList.add(info.getStartKey()); endKeyList.add(info.getEndKey()); @@ -416,9 +416,8 @@ public class HTable implements HTableInterface, Closeable { } }; MetaScanner.metaScan(configuration, visitor, this.tableName); - return new Pair( - startKeyList.toArray(new byte[startKeyList.size()][]), - endKeyList.toArray(new byte[endKeyList.size()][])); + return new Pair(startKeyList.toArray(new byte[startKeyList.size()][]), + endKeyList.toArray(new byte[endKeyList.size()][])); } /** @@ -437,7 +436,7 @@ public class HTable implements HTableInterface, Closeable { rowResult.getValue(HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER)); - if (!(Bytes.equals(info.getTableName(), getTableName()))) { + if (!(Bytes.equals(info.getTableDesc().getName(), getTableName()))) { return false; } @@ -852,7 +851,7 @@ public class HTable implements HTableInterface, Closeable { @Override public void flushCommits() throws IOException { try { - this.connection.processBatchOfPuts(writeBuffer, tableName, pool); + connection.processBatchOfPuts(writeBuffer, tableName, pool); } finally { if (clearBufferOnFail) { writeBuffer.clear(); diff --git src/main/java/org/apache/hadoop/hbase/client/MetaScanner.java src/main/java/org/apache/hadoop/hbase/client/MetaScanner.java index f56ca17..2734f30 100644 --- src/main/java/org/apache/hadoop/hbase/client/MetaScanner.java +++ src/main/java/org/apache/hadoop/hbase/client/MetaScanner.java @@ -187,7 +187,7 @@ public class MetaScanner { if (LOG.isDebugEnabled()) { LOG.debug("Scanning " + Bytes.toString(metaTableName) + " starting at row=" + Bytes.toStringBinary(startRow) + " for max=" + - rowUpperLimit + " rows using " + connection.toString()); + rowUpperLimit + " rows"); } callable = new ScannerCallable(connection, metaTableName, scan); // Open scanner @@ -288,7 +288,7 @@ public class MetaScanner { HRegionInfo info = Writables.getHRegionInfo( rowResult.getValue(HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER)); - if (!(Bytes.equals(info.getTableName(), tablename))) { + if (!(Bytes.equals(info.getTableDesc().getName(), tablename))) { return false; } byte [] value = rowResult.getValue(HConstants.CATALOG_FAMILY, @@ -330,4 +330,4 @@ public class MetaScanner { */ public boolean processRow(Result rowResult) throws IOException; } -} +} \ No newline at end of file diff --git src/main/java/org/apache/hadoop/hbase/client/Result.java src/main/java/org/apache/hadoop/hbase/client/Result.java index bfb8ad7..8a0c1a9 100644 --- src/main/java/org/apache/hadoop/hbase/client/Result.java +++ src/main/java/org/apache/hadoop/hbase/client/Result.java @@ -20,16 +20,12 @@ package org.apache.hadoop.hbase.client; -import org.apache.hadoop.hbase.HConstants; -import org.apache.hadoop.hbase.HRegionInfo; +import com.google.common.collect.Ordering; import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.KeyValue.SplitKeyValue; -import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.io.ImmutableBytesWritable; import org.apache.hadoop.hbase.io.WritableWithSize; import org.apache.hadoop.hbase.util.Bytes; -import org.apache.hadoop.hbase.util.Pair; -import org.apache.hadoop.hbase.util.Writables; import org.apache.hadoop.io.Writable; import java.io.DataInput; diff --git src/main/java/org/apache/hadoop/hbase/client/RetriesExhaustedException.java src/main/java/org/apache/hadoop/hbase/client/RetriesExhaustedException.java index b9042f6..89d2abe 100644 --- src/main/java/org/apache/hadoop/hbase/client/RetriesExhaustedException.java +++ src/main/java/org/apache/hadoop/hbase/client/RetriesExhaustedException.java @@ -15,8 +15,9 @@ */ package org.apache.hadoop.hbase.client; +import org.apache.hadoop.hbase.util.Bytes; + import java.io.IOException; -import java.util.Date; import java.util.List; /** @@ -35,53 +36,28 @@ public class RetriesExhaustedException extends IOException { } /** - * Datastructure that allows adding more info around Throwable incident. - */ - public static class ThrowableWithExtraContext { - private final Throwable t; - private final long when; - private final String extras; - - public ThrowableWithExtraContext(final Throwable t, final long when, - final String extras) { - this.t = t; - this.when = when; - this.extras = extras; - } - - @Override - public String toString() { - return new Date(this.when).toString() + ", " + extras + ", " + t.toString(); - } - } - - /** * Create a new RetriesExhaustedException from the list of prior failures. - * @param callableVitals Details from the {@link ServerCallable} we were using - * when we got this exception. + * @param serverName name of HRegionServer + * @param regionName name of region + * @param row The row we were pursuing when we ran out of retries * @param numTries The number of tries we made * @param exceptions List of exceptions that failed before giving up */ - public RetriesExhaustedException(final String callableVitals, int numTries, - List exceptions) { - super(getMessage(callableVitals, numTries, exceptions)); + public RetriesExhaustedException(String serverName, final byte [] regionName, + final byte [] row, int numTries, List exceptions) { + super(getMessage(serverName, regionName, row, numTries, exceptions)); } - /** - * Create a new RetriesExhaustedException from the list of prior failures. - * @param numTries - * @param exceptions List of exceptions that failed before giving up - */ - public RetriesExhaustedException(final int numTries, - final List exceptions) { - super(getMessage(numTries, exceptions)); - } - - private static String getMessage(String callableVitals, int numTries, - List exceptions) { - StringBuilder buffer = new StringBuilder("Failed contacting "); - buffer.append(callableVitals); - buffer.append(" after "); + private static String getMessage(String serverName, final byte [] regionName, + final byte [] row, + int numTries, List exceptions) { + StringBuilder buffer = new StringBuilder("Trying to contact region server "); + buffer.append(serverName); + buffer.append(" for region "); + buffer.append(regionName == null? "": Bytes.toStringBinary(regionName)); + buffer.append(", row '"); + buffer.append(row == null? "": Bytes.toStringBinary(row)); + buffer.append("', but failed after "); buffer.append(numTries + 1); buffer.append(" attempts.\nExceptions:\n"); for (Throwable t : exceptions) { @@ -90,16 +66,4 @@ public class RetriesExhaustedException extends IOException { } return buffer.toString(); } - - private static String getMessage(final int numTries, - final List exceptions) { - StringBuilder buffer = new StringBuilder("Failed after attempts="); - buffer.append(numTries + 1); - buffer.append(", exceptions:\n"); - for (ThrowableWithExtraContext t : exceptions) { - buffer.append(t.toString()); - buffer.append("\n"); - } - return buffer.toString(); - } } \ No newline at end of file diff --git src/main/java/org/apache/hadoop/hbase/client/ScannerCallable.java src/main/java/org/apache/hadoop/hbase/client/ScannerCallable.java index cd8411a..20be384 100644 --- src/main/java/org/apache/hadoop/hbase/client/ScannerCallable.java +++ src/main/java/org/apache/hadoop/hbase/client/ScannerCallable.java @@ -59,9 +59,9 @@ public class ScannerCallable extends ServerCallable { * @throws IOException */ @Override - public void connect(boolean reload) throws IOException { + public void instantiateServer(boolean reload) throws IOException { if (!instantiated || reload) { - super.connect(reload); + super.instantiateServer(reload); instantiated = true; } } diff --git src/main/java/org/apache/hadoop/hbase/client/ServerCallable.java src/main/java/org/apache/hadoop/hbase/client/ServerCallable.java index 9b568e3..816f8b7 100644 --- src/main/java/org/apache/hadoop/hbase/client/ServerCallable.java +++ src/main/java/org/apache/hadoop/hbase/client/ServerCallable.java @@ -31,14 +31,7 @@ import org.apache.hadoop.hbase.ipc.HRegionInterface; import org.apache.hadoop.hbase.util.Bytes; /** - * Abstract class that implements {@link Callable}. Implementation stipulates - * return type and method we actually invoke on remote Server. Usually - * used inside a try/catch that fields usual connection failures all wrapped - * up in a retry loop. - *

Call {@link #connect(boolean)} to connect to server hosting region - * that contains the passed row in the passed table before invoking - * {@link #call()}. - * @see HConnection#getRegionServerWithoutRetries(ServerCallable) + * Abstract class that implements Callable, used by retryable actions. * @param the class that the ServerCallable handles */ public abstract class ServerCallable implements Callable { @@ -51,9 +44,9 @@ public abstract class ServerCallable implements Callable { protected long startTime, endTime; /** - * @param connection Connection to use. - * @param tableName Table name to which row belongs. - * @param row The row we want in tableName. + * @param connection connection callable is on + * @param tableName table name callable is on + * @param row row we are querying */ public ServerCallable(HConnection connection, byte [] tableName, byte [] row) { this(connection, tableName, row, HConstants.DEFAULT_HBASE_CLIENT_OPERATION_TIMEOUT); @@ -65,37 +58,34 @@ public abstract class ServerCallable implements Callable { this.row = row; this.callTimeout = callTimeout; } - /** - * Connect to the server hosting region with row from tablename. - * @param reload Set this to true if connection should re-find the region + * + * @param reload set this to true if connection should re-find the region * @throws IOException e */ - public void connect(final boolean reload) throws IOException { + public void instantiateServer(boolean reload) throws IOException { this.location = connection.getRegionLocation(tableName, row, reload); this.server = connection.getHRegionConnection(location.getHostname(), location.getPort()); } - /** @return the server name - * @deprecated Just use {@link #toString()} instead. - */ + /** @return the server name */ public String getServerName() { - if (location == null) return null; + if (location == null) { + return null; + } return location.getHostnamePort(); } - /** @return the region name - * @deprecated Just use {@link #toString()} instead. - */ + /** @return the region name */ public byte[] getRegionName() { - if (location == null) return null; + if (location == null) { + return null; + } return location.getRegionInfo().getRegionName(); } - /** @return the row - * @deprecated Just use {@link #toString()} instead. - */ + /** @return the row */ public byte [] getRow() { return row; } diff --git src/main/java/org/apache/hadoop/hbase/client/UnmodifyableHRegionInfo.java src/main/java/org/apache/hadoop/hbase/client/UnmodifyableHRegionInfo.java index 412f770..23e7a6b 100644 --- src/main/java/org/apache/hadoop/hbase/client/UnmodifyableHRegionInfo.java +++ src/main/java/org/apache/hadoop/hbase/client/UnmodifyableHRegionInfo.java @@ -30,6 +30,7 @@ class UnmodifyableHRegionInfo extends HRegionInfo { */ UnmodifyableHRegionInfo(HRegionInfo info) { super(info); + this.tableDesc = new UnmodifyableHTableDescriptor(info.getTableDesc()); } /** diff --git src/main/java/org/apache/hadoop/hbase/io/HbaseObjectWritable.java src/main/java/org/apache/hadoop/hbase/io/HbaseObjectWritable.java index ac28b96..e5202b0 100644 --- src/main/java/org/apache/hadoop/hbase/io/HbaseObjectWritable.java +++ src/main/java/org/apache/hadoop/hbase/io/HbaseObjectWritable.java @@ -165,7 +165,6 @@ public class HbaseObjectWritable implements Writable, WritableWithSize, Configur addToMap(HServerAddress.class, code++); addToMap(HServerInfo.class, code++); addToMap(HTableDescriptor.class, code++); - addToMap(HTableDescriptor[].class, code++); addToMap(MapWritable.class, code++); // diff --git src/main/java/org/apache/hadoop/hbase/ipc/HMasterInterface.java src/main/java/org/apache/hadoop/hbase/ipc/HMasterInterface.java index 18d7b33..5d9510c 100644 --- src/main/java/org/apache/hadoop/hbase/ipc/HMasterInterface.java +++ src/main/java/org/apache/hadoop/hbase/ipc/HMasterInterface.java @@ -20,7 +20,6 @@ package org.apache.hadoop.hbase.ipc; import java.io.IOException; -import java.util.List; import org.apache.hadoop.hbase.ClusterStatus; import org.apache.hadoop.hbase.HColumnDescriptor; @@ -87,7 +86,7 @@ public interface HMasterInterface extends VersionedProtocol { * @throws IOException */ public Pair getAlterStatus(byte[] tableName) - throws IOException; + throws IOException; /** * Adds a column to the specified table @@ -229,25 +228,4 @@ public interface HMasterInterface extends VersionedProtocol { * @return Previous balancer value */ public boolean balanceSwitch(final boolean b); - - /** - * Get array of all HTDs. - * @return array of HTableDescriptor - */ - public HTableDescriptor[] getHTableDescriptors(); - - /** - * Get current HTD for a given tablename - * @param tableName - * @return HTableDescriptor for the table - */ - //public HTableDescriptor getHTableDescriptor(final byte[] tableName); - - /** - * Get array of HTDs for requested tables. - * @param tableNames - * @return array of HTableDescriptor - */ - public HTableDescriptor[] getHTableDescriptors(List tableNames); - } diff --git src/main/java/org/apache/hadoop/hbase/master/AssignmentManager.java src/main/java/org/apache/hadoop/hbase/master/AssignmentManager.java index def25db..876d065 100644 --- src/main/java/org/apache/hadoop/hbase/master/AssignmentManager.java +++ src/main/java/org/apache/hadoop/hbase/master/AssignmentManager.java @@ -23,8 +23,10 @@ import java.io.DataInput; import java.io.DataOutput; import java.io.IOException; import java.lang.Thread.UncaughtExceptionHandler; +import java.util.Arrays; import java.util.ArrayList; import java.util.Date; +import java.util.Collection; import java.util.Collections; import java.util.HashMap; import java.util.HashSet; @@ -48,6 +50,7 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.Chore; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HRegionInfo; +import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.NotServingRegionException; import org.apache.hadoop.hbase.Server; import org.apache.hadoop.hbase.ServerName; @@ -163,7 +166,6 @@ public class AssignmentManager extends ZooKeeperListener { //Thread pool executor service for timeout monitor private java.util.concurrent.ExecutorService threadPoolExecutorService; - /** * Constructs a new assignment manager. * @@ -172,11 +174,10 @@ public class AssignmentManager extends ZooKeeperListener { * @param catalogTracker * @param service * @throws KeeperException - * @throws IOException */ public AssignmentManager(Server master, ServerManager serverManager, CatalogTracker catalogTracker, final ExecutorService service) - throws KeeperException, IOException { + throws KeeperException { super(master.getZooKeeper()); this.master = master; this.serverManager = serverManager; @@ -267,12 +268,11 @@ public class AssignmentManager extends ZooKeeperListener { * @param tableName * @return Pair indicating the status of the alter command * @throws IOException - * @throws InterruptedException */ public Pair getReopenStatus(byte[] tableName) - throws IOException, InterruptedException { - List hris = - MetaReader.getTableRegions(this.master.getCatalogTracker(), tableName); + throws IOException { + List hris = MetaReader.getTableRegions( + this.master.getCatalogTracker(), tableName); Integer pending = 0; for(HRegionInfo hri : hris) { if(regionsToReopen.get(hri.getEncodedName()) != null) { @@ -355,9 +355,7 @@ public class AssignmentManager extends ZooKeeperListener { // its a clean cluster startup, else its a failover. boolean regionsToProcess = false; for (Map.Entry e: this.regions.entrySet()) { - if (!e.getKey().isMetaRegion() - && !e.getKey().isRootRegion() - && e.getValue() != null) { + if (!e.getKey().isMetaRegion() && e.getValue() != null) { LOG.debug("Found " + e + " out on cluster"); regionsToProcess = true; break; @@ -379,7 +377,6 @@ public class AssignmentManager extends ZooKeeperListener { } else { // Fresh cluster startup. - LOG.info("Clean cluster startup. Assigning userregions"); cleanoutUnassigned(); assignAllUserRegions(); } @@ -729,7 +726,7 @@ public class AssignmentManager extends ZooKeeperListener { case RS_ZK_REGION_OPENING: // Should see OPENING after we have asked it to OPEN or additional // times after already being in state of OPENING - if (regionState == null || + if(regionState == null || (!regionState.isPendingOpen() && !regionState.isOpening())) { LOG.warn("Received OPENING for region " + prettyPrintedRegionName + @@ -1763,6 +1760,7 @@ public class AssignmentManager extends ZooKeeperListener { // Presume that master has stale data. Presume remote side just split. // Presume that the split message when it comes in will fix up the master's // in memory cluster state. + return; } catch (Throwable t) { if (t instanceof RemoteException) { t = ((RemoteException)t).unwrapRemoteException(); @@ -2080,13 +2078,13 @@ public class AssignmentManager extends ZooKeeperListener { Map>> rebuildUserRegions() throws IOException, KeeperException { // Region assignment from META - List results = MetaReader.fullScan(this.catalogTracker); + List results = MetaReader.fullScanOfResults(this.catalogTracker); // Map of offline servers and their regions to be returned Map>> offlineServers = new TreeMap>>(); // Iterate regions in META for (Result result : results) { - Pair region = MetaReader.parseCatalogResult(result); + Pair region = MetaReader.metaRowToRegionPair(result); if (region == null) continue; HRegionInfo regionInfo = region.getFirst(); ServerName regionLocation = region.getSecond(); @@ -2389,10 +2387,10 @@ public class AssignmentManager extends ZooKeeperListener { public List getRegionsOfTable(byte[] tableName) { List tableRegions = new ArrayList(); HRegionInfo boundary = - new HRegionInfo(tableName, null, null); + new HRegionInfo(new HTableDescriptor(tableName), null, null); synchronized (this.regions) { for (HRegionInfo regionInfo: this.regions.tailMap(boundary).keySet()) { - if(Bytes.equals(regionInfo.getTableName(), tableName)) { + if(Bytes.equals(regionInfo.getTableDesc().getName(), tableName)) { tableRegions.add(regionInfo); } else { break; @@ -2666,7 +2664,7 @@ public class AssignmentManager extends ZooKeeperListener { // that case. This is not racing with the region server itself since RS // report is done after the split transaction completed. if (this.zkTable.isDisablingOrDisabledTable( - parent.getTableNameAsString())) { + parent.getTableDesc().getNameAsString())) { unassign(a); unassign(b); } @@ -2738,6 +2736,30 @@ public class AssignmentManager extends ZooKeeperListener { } /** + * Assigns list of user regions in round-robin fashion, if any. + * @param sync True if we are to wait on all assigns. + * @param startup True if this is server startup time. + * @throws InterruptedException + * @throws IOException + */ + void bulkAssignUserRegions(final HRegionInfo [] regions, + final List servers, final boolean sync) + throws IOException { + Map> bulkPlan = + balancer.roundRobinAssignment(Arrays.asList(regions), servers); + LOG.info("Bulk assigning " + regions.length + " region(s) " + + "round-robin across " + servers.size() + " server(s)"); + // Use fixed count thread pool assigning. + BulkAssigner ba = new GeneralBulkAssigner(this.master, bulkPlan, this); + try { + ba.bulkAssign(sync); + } catch (InterruptedException e) { + throw new IOException("InterruptedException bulk assigning", e); + } + LOG.info("Bulk assigning done"); + } + + /** * State of a Region while undergoing transitions. */ public static class RegionState implements org.apache.hadoop.io.Writable { diff --git src/main/java/org/apache/hadoop/hbase/master/CatalogJanitor.java src/main/java/org/apache/hadoop/hbase/master/CatalogJanitor.java index da9332c..6ee5141 100644 --- src/main/java/org/apache/hadoop/hbase/master/CatalogJanitor.java +++ src/main/java/org/apache/hadoop/hbase/master/CatalogJanitor.java @@ -19,7 +19,6 @@ */ package org.apache.hadoop.hbase.master; -import java.io.FileNotFoundException; import java.io.IOException; import java.util.Comparator; import java.util.Map; @@ -36,9 +35,7 @@ import org.apache.hadoop.hbase.Chore; import org.apache.hadoop.hbase.HColumnDescriptor; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HRegionInfo; -import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.Server; -import org.apache.hadoop.hbase.TableExistsException; import org.apache.hadoop.hbase.catalog.MetaEditor; import org.apache.hadoop.hbase.catalog.MetaReader; import org.apache.hadoop.hbase.client.Result; @@ -49,7 +46,6 @@ import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.Pair; import org.apache.hadoop.hbase.util.Writables; - /** * A janitor for the catalog tables. Scans the .META. catalog * table on a period looking for unused regions to garbage collect. @@ -148,8 +144,8 @@ class CatalogJanitor extends Chore { if (left == null) return -1; if (right == null) return 1; // Same table name. - int result = Bytes.compareTo(left.getTableName(), - right.getTableName()); + int result = Bytes.compareTo(left.getTableDesc().getName(), + right.getTableDesc().getName()); if (result != 0) return result; // Compare start keys. result = Bytes.compareTo(left.getStartKey(), right.getStartKey()); @@ -310,16 +306,14 @@ class CatalogJanitor extends Chore { } FileSystem fs = this.services.getMasterFileSystem().getFileSystem(); Path rootdir = this.services.getMasterFileSystem().getRootDir(); - Path tabledir = new Path(rootdir, split.getTableNameAsString()); + Path tabledir = new Path(rootdir, split.getTableDesc().getNameAsString()); Path regiondir = new Path(tabledir, split.getEncodedName()); exists = fs.exists(regiondir); if (!exists) { LOG.warn("Daughter regiondir does not exist: " + regiondir.toString()); return new Pair(exists, Boolean.FALSE); } - HTableDescriptor parentDescriptor = getTableDescriptor(parent.getTableName()); - - for (HColumnDescriptor family: parentDescriptor.getFamilies()) { + for (HColumnDescriptor family: split.getTableDesc().getFamilies()) { Path p = Store.getStoreHomedir(tabledir, split.getEncodedName(), family.getName()); if (!fs.exists(p)) continue; @@ -340,9 +334,4 @@ class CatalogJanitor extends Chore { return new Pair(Boolean.valueOf(exists), Boolean.valueOf(references)); } - - private HTableDescriptor getTableDescriptor(byte[] tableName) - throws TableExistsException, FileNotFoundException, IOException { - return this.services.getTableDescriptors().get(Bytes.toString(tableName)); - } } diff --git src/main/java/org/apache/hadoop/hbase/master/DefaultLoadBalancer.java src/main/java/org/apache/hadoop/hbase/master/DefaultLoadBalancer.java index 028e37c..8b72690 100644 --- src/main/java/org/apache/hadoop/hbase/master/DefaultLoadBalancer.java +++ src/main/java/org/apache/hadoop/hbase/master/DefaultLoadBalancer.java @@ -663,8 +663,7 @@ public class DefaultLoadBalancer implements LoadBalancer { HRegionInfo region) { List topServerNames = null; try { - HTableDescriptor tableDescriptor = getTableDescriptor( - region.getTableName()); + HTableDescriptor tableDescriptor = region.getTableDesc(); if (tableDescriptor != null) { HDFSBlocksDistribution blocksDistribution = HRegion.computeHDFSBlocksDistribution(config, tableDescriptor, @@ -681,32 +680,6 @@ public class DefaultLoadBalancer implements LoadBalancer { } /** - * return HTableDescriptor for a given tableName - * @param tableName the table name - * @return HTableDescriptor - * @throws IOException - */ - private HTableDescriptor getTableDescriptor(byte[] tableName) - throws IOException { - HTableDescriptor tableDescriptor = null; - try { - if ( this.services != null) - { - tableDescriptor = this.services.getTableDescriptors(). - get(Bytes.toString(tableName)); - } - } catch (TableExistsException tee) { - LOG.debug("TableExistsException during getTableDescriptors." + - " Current table name = " + tableName , tee); - } catch (FileNotFoundException fnfe) { - LOG.debug("FileNotFoundException during getTableDescriptors." + - " Current table name = " + tableName , fnfe); - } - - return tableDescriptor; - } - - /** * Map hostname to ServerName, The output ServerName list will have the same * order as input hosts. * @param hosts the list of hosts diff --git src/main/java/org/apache/hadoop/hbase/master/HMaster.java src/main/java/org/apache/hadoop/hbase/master/HMaster.java index 1616659..fe9142c 100644 --- src/main/java/org/apache/hadoop/hbase/master/HMaster.java +++ src/main/java/org/apache/hadoop/hbase/master/HMaster.java @@ -25,6 +25,7 @@ import java.lang.reflect.InvocationTargetException; import java.net.InetAddress; import java.net.InetSocketAddress; import java.util.ArrayList; +import java.util.Arrays; import java.util.List; import java.util.Map; import java.util.concurrent.atomic.AtomicReference; @@ -40,18 +41,19 @@ import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.HServerLoad; import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.MasterNotRunningException; +import org.apache.hadoop.hbase.NotAllMetaRegionsOnlineException; import org.apache.hadoop.hbase.Server; import org.apache.hadoop.hbase.ServerName; -import org.apache.hadoop.hbase.TableDescriptors; +import org.apache.hadoop.hbase.TableExistsException; import org.apache.hadoop.hbase.TableNotDisabledException; import org.apache.hadoop.hbase.TableNotFoundException; import org.apache.hadoop.hbase.UnknownRegionException; import org.apache.hadoop.hbase.catalog.CatalogTracker; +import org.apache.hadoop.hbase.catalog.MetaEditor; import org.apache.hadoop.hbase.catalog.MetaReader; -import org.apache.hadoop.hbase.client.HConnectionManager; import org.apache.hadoop.hbase.client.MetaScanner; -import org.apache.hadoop.hbase.client.MetaScanner.MetaScannerVisitor; import org.apache.hadoop.hbase.client.Result; +import org.apache.hadoop.hbase.client.MetaScanner.MetaScannerVisitor; import org.apache.hadoop.hbase.coprocessor.CoprocessorHost; import org.apache.hadoop.hbase.executor.ExecutorService; import org.apache.hadoop.hbase.executor.ExecutorService.ExecutorType; @@ -60,7 +62,7 @@ import org.apache.hadoop.hbase.ipc.HBaseServer; import org.apache.hadoop.hbase.ipc.HMasterInterface; import org.apache.hadoop.hbase.ipc.HMasterRegionInterface; import org.apache.hadoop.hbase.ipc.RpcServer; -import org.apache.hadoop.hbase.master.handler.CreateTableHandler; +import org.apache.hadoop.hbase.master.RegionPlan; import org.apache.hadoop.hbase.master.handler.DeleteTableHandler; import org.apache.hadoop.hbase.master.handler.DisableTableHandler; import org.apache.hadoop.hbase.master.handler.EnableTableHandler; @@ -68,14 +70,17 @@ import org.apache.hadoop.hbase.master.handler.ModifyTableHandler; import org.apache.hadoop.hbase.master.handler.TableAddFamilyHandler; import org.apache.hadoop.hbase.master.handler.TableDeleteFamilyHandler; import org.apache.hadoop.hbase.master.handler.TableModifyFamilyHandler; +import org.apache.hadoop.hbase.master.handler.CreateTableHandler; import org.apache.hadoop.hbase.master.metrics.MasterMetrics; import org.apache.hadoop.hbase.monitoring.MemoryBoundedLogMessageBuffer; import org.apache.hadoop.hbase.monitoring.MonitoredTask; import org.apache.hadoop.hbase.monitoring.TaskMonitor; +import org.apache.hadoop.hbase.regionserver.HRegion; +import org.apache.hadoop.hbase.regionserver.wal.HLog; import org.apache.hadoop.hbase.replication.regionserver.Replication; import org.apache.hadoop.hbase.security.User; import org.apache.hadoop.hbase.util.Bytes; -import org.apache.hadoop.hbase.util.FSTableDescriptors; +import org.apache.hadoop.hbase.util.FSUtils; import org.apache.hadoop.hbase.util.InfoServer; import org.apache.hadoop.hbase.util.Pair; import org.apache.hadoop.hbase.util.Sleeper; @@ -180,8 +185,6 @@ implements HMasterInterface, HMasterRegionInterface, MasterServices, Server { private MasterCoprocessorHost cpHost; private final ServerName serverName; - private TableDescriptors tableDescriptors; - /** * Initializes the HMaster. The steps are as follows: *

@@ -197,8 +200,7 @@ implements HMasterInterface, HMasterRegionInterface, MasterServices, Server { public HMaster(final Configuration conf) throws IOException, KeeperException, InterruptedException { this.conf = conf; - // Set how many times to retry talking to another server over HConnection. - HConnectionManager.setServerSideHConnectionRetries(this.conf, LOG); + // Server to handle client requests. String hostname = DNS.getDefaultHost( conf.get("hbase.master.dns.interface", "default"), @@ -417,15 +419,12 @@ implements HMasterInterface, HMasterRegionInterface, MasterServices, Server { status.setStatus("Initializing Master file system"); // TODO: Do this using Dependency Injection, using PicoContainer, Guice or Spring. - this.fileSystemManager = new MasterFileSystem(this, this, metrics); - - this.tableDescriptors = - new FSTableDescriptors(this.fileSystemManager.getFileSystem(), - this.fileSystemManager.getRootDir()); + this.fileSystemManager = new MasterFileSystem(this, metrics); // publish cluster ID status.setStatus("Publishing Cluster ID in ZooKeeper"); - ClusterId.setClusterId(this.zooKeeper, fileSystemManager.getClusterId()); + ClusterId.setClusterId(this.zooKeeper, + fileSystemManager.getClusterId()); this.executorService = new ExecutorService(getServerName().toString()); @@ -460,13 +459,7 @@ implements HMasterInterface, HMasterRegionInterface, MasterServices, Server { // Make sure root and meta assigned before proceeding. assignRootAndMeta(status); - // Update meta with new HRI if required. i.e migrate all HRI with HTD to - // HRI with out HTD in meta and update the status in ROOT. This must happen - // before we assign all user regions or else the assignment will fail. - // TODO: Remove this when we do 0.94. - org.apache.hadoop.hbase.catalog.MetaMigrationRemovingHTD. - updateMetaWithNewHRI(this); - + // Fixup assignment manager status status.setStatus("Starting assignment manager"); this.assignmentManager.joinCluster(); @@ -559,11 +552,6 @@ implements HMasterInterface, HMasterRegionInterface, MasterServices, Server { return -1; } - @Override - public TableDescriptors getTableDescriptors() { - return this.tableDescriptors; - } - /** @return InfoServer object. Maybe null.*/ public InfoServer getInfoServer() { return this.infoServer; @@ -928,7 +916,7 @@ implements HMasterInterface, HMasterRegionInterface, MasterServices, Server { HRegionInfo[] hRegionInfos = null; if (splitKeys == null || splitKeys.length == 0) { hRegionInfos = new HRegionInfo[]{ - new HRegionInfo(hTableDescriptor.getName(), null, null)}; + new HRegionInfo(hTableDescriptor, null, null)}; } else { int numRegions = splitKeys.length + 1; hRegionInfos = new HRegionInfo[numRegions]; @@ -937,19 +925,19 @@ implements HMasterInterface, HMasterRegionInterface, MasterServices, Server { for (int i = 0; i < numRegions; i++) { endKey = (i == splitKeys.length) ? null : splitKeys[i]; hRegionInfos[i] = - new HRegionInfo(hTableDescriptor.getName(), startKey, endKey); + new HRegionInfo(hTableDescriptor, startKey, endKey); startKey = endKey; } } return hRegionInfos; } + private static boolean isCatalogTable(final byte [] tableName) { return Bytes.equals(tableName, HConstants.ROOT_TABLE_NAME) || Bytes.equals(tableName, HConstants.META_TABLE_NAME); } - @Override public void deleteTable(final byte [] tableName) throws IOException { if (cpHost != null) { cpHost.preDeleteTable(tableName); @@ -961,13 +949,16 @@ implements HMasterInterface, HMasterRegionInterface, MasterServices, Server { } } + /** + * Get the number of regions of the table that have been updated by the alter. + * + * @return Pair indicating the number of regions updated Pair.getFirst is the + * regions that are yet to be updated Pair.getSecond is the total number + * of regions of the table + */ public Pair getAlterStatus(byte[] tableName) throws IOException { - try { - return this.assignmentManager.getReopenStatus(tableName); - } catch (InterruptedException e) { - throw new IOException("Interrupted", e); - } + return this.assignmentManager.getReopenStatus(tableName); } public void addColumn(byte [] tableName, HColumnDescriptor column) @@ -1052,11 +1043,11 @@ implements HMasterInterface, HMasterRegionInterface, MasterServices, Server { if (data == null || data.size() <= 0) { return true; } - Pair pair = MetaReader.parseCatalogResult(data); + Pair pair = MetaReader.metaRowToRegionPair(data); if (pair == null) { return false; } - if (!Bytes.equals(pair.getFirst().getTableName(), tableName)) { + if (!Bytes.equals(pair.getFirst().getTableDesc().getName(), tableName)) { return false; } result.set(pair); @@ -1346,7 +1337,7 @@ implements HMasterInterface, HMasterRegionInterface, MasterServices, Server { throws IOException { Pair pair = MetaReader.getRegion(this.catalogTracker, regionName); - if (pair == null) throw new UnknownRegionException(Bytes.toString(regionName)); + if (pair == null) throw new UnknownRegionException(Bytes.toStringBinary(regionName)); HRegionInfo hri = pair.getFirst(); if (cpHost != null) { if (cpHost.preUnassign(hri, force)) { @@ -1365,42 +1356,6 @@ implements HMasterInterface, HMasterRegionInterface, MasterServices, Server { } /** - * Get HTD array for given tables - * @param tableNames - * @return HTableDescriptor[] - */ - public HTableDescriptor[] getHTableDescriptors(List tableNames) { - List list = - new ArrayList(tableNames.size()); - for (String s: tableNames) { - HTableDescriptor htd = null; - try { - htd = this.tableDescriptors.get(s); - } catch (IOException e) { - LOG.warn("Failed getting descriptor for " + s, e); - } - if (htd == null) continue; - list.add(htd); - } - return list.toArray(new HTableDescriptor [] {}); - } - - /** - * Get all table descriptors - * @return All descriptors or null if none. - */ - public HTableDescriptor [] getHTableDescriptors() { - Map descriptors = null; - try { - descriptors = this.tableDescriptors.getAll(); - } catch (IOException e) { - LOG.warn("Failed getting all descriptors", e); - } - return descriptors == null? - null: descriptors.values().toArray(new HTableDescriptor [] {}); - } - - /** * Compute the average load across all region servers. * Currently, this uses a very naive computation - just uses the number of * regions being served, ignoring stats about number of requests. diff --git src/main/java/org/apache/hadoop/hbase/master/MasterFileSystem.java src/main/java/org/apache/hadoop/hbase/master/MasterFileSystem.java index 91abb00..740ccd3 100644 --- src/main/java/org/apache/hadoop/hbase/master/MasterFileSystem.java +++ src/main/java/org/apache/hadoop/hbase/master/MasterFileSystem.java @@ -36,13 +36,12 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.HColumnDescriptor; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HRegionInfo; -import org.apache.hadoop.hbase.HTableDescriptor; -import org.apache.hadoop.hbase.InvalidFamilyOperationException; import org.apache.hadoop.hbase.RemoteExceptionHandler; import org.apache.hadoop.hbase.Server; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.master.metrics.MasterMetrics; import org.apache.hadoop.hbase.regionserver.HRegion; +import org.apache.hadoop.hbase.regionserver.Store; import org.apache.hadoop.hbase.regionserver.wal.HLog; import org.apache.hadoop.hbase.regionserver.wal.HLogSplitter; import org.apache.hadoop.hbase.regionserver.wal.OrphanHLogAfterSplitException; @@ -77,14 +76,11 @@ public class MasterFileSystem { final Lock splitLogLock = new ReentrantLock(); final boolean distributedLogSplitting; final SplitLogManager splitLogManager; - private final MasterServices services; - public MasterFileSystem(Server master, MasterServices services, - MasterMetrics metrics) + public MasterFileSystem(Server master, MasterMetrics metrics) throws IOException { this.conf = master.getConfiguration(); this.master = master; - this.services = services; this.metrics = metrics; // Set filesystem to be that of this.rootdir else we get complaints about // mismatched filesystems if hbase.rootdir is hdfs and fs.defaultFS is @@ -327,18 +323,9 @@ public class MasterFileSystem { if (!FSUtils.rootRegionExists(fs, rd)) { bootstrap(rd, c); } - createRootTableInfo(rd); return rd; } - private void createRootTableInfo(Path rd) throws IOException { - // Create ROOT tableInfo if required. - if (!FSUtils.tableInfoExists(fs, rd, - Bytes.toString(HRegionInfo.ROOT_REGIONINFO.getTableName()))) { - FSUtils.createTableDescriptor(HTableDescriptor.ROOT_TABLEDESC, this.conf); - } - } - private static void bootstrap(final Path rd, final Configuration c) throws IOException { LOG.info("BOOTSTRAP: creating ROOT and first META regions"); @@ -348,15 +335,13 @@ public class MasterFileSystem { // not make it in first place. Turn off block caching for bootstrap. // Enable after. HRegionInfo rootHRI = new HRegionInfo(HRegionInfo.ROOT_REGIONINFO); - setInfoFamilyCachingForRoot(false); + setInfoFamilyCaching(rootHRI, false); HRegionInfo metaHRI = new HRegionInfo(HRegionInfo.FIRST_META_REGIONINFO); - setInfoFamilyCachingForMeta(false); - HRegion root = HRegion.createHRegion(rootHRI, rd, c, - HTableDescriptor.ROOT_TABLEDESC); - HRegion meta = HRegion.createHRegion(metaHRI, rd, c, - HTableDescriptor.META_TABLEDESC); - setInfoFamilyCachingForRoot(true); - setInfoFamilyCachingForMeta(true); + setInfoFamilyCaching(metaHRI, false); + HRegion root = HRegion.createHRegion(rootHRI, rd, c); + HRegion meta = HRegion.createHRegion(metaHRI, rd, c); + setInfoFamilyCaching(rootHRI, true); + setInfoFamilyCaching(metaHRI, true); // Add first region from the META table to the ROOT region. HRegion.addRegionToMETA(root, meta); root.close(); @@ -370,19 +355,12 @@ public class MasterFileSystem { } } - private static void setInfoFamilyCachingForRoot(final boolean b) { - for (HColumnDescriptor hcd: - HTableDescriptor.ROOT_TABLEDESC.getColumnFamilies()) { - if (Bytes.equals(hcd.getName(), HConstants.CATALOG_FAMILY)) { - hcd.setBlockCacheEnabled(b); - hcd.setInMemory(b); - } - } - } - - private static void setInfoFamilyCachingForMeta(final boolean b) { - for (HColumnDescriptor hcd: - HTableDescriptor.META_TABLEDESC.getColumnFamilies()) { + /** + * @param hri Set all family block caching to b + * @param b + */ + private static void setInfoFamilyCaching(final HRegionInfo hri, final boolean b) { + for (HColumnDescriptor hcd: hri.getTableDesc().getFamilies()) { if (Bytes.equals(hcd.getName(), HConstants.CATALOG_FAMILY)) { hcd.setBlockCacheEnabled(b); hcd.setInMemory(b); @@ -390,7 +368,6 @@ public class MasterFileSystem { } } - public void deleteRegion(HRegionInfo region) throws IOException { fs.delete(HRegion.getRegionDir(rootdir, region), true); } @@ -405,91 +382,16 @@ public class MasterFileSystem { // @see HRegion.checkRegioninfoOnFilesystem() } + public void deleteFamily(HRegionInfo region, byte[] familyName) + throws IOException { + fs.delete(Store.getStoreHomedir( + new Path(rootdir, region.getTableDesc().getNameAsString()), + region.getEncodedName(), familyName), true); + } + public void stop() { if (splitLogManager != null) { this.splitLogManager.stop(); } } - - /** - * Get table info path for a table. - * @param tableName - * @return Table info path - */ - private Path getTableInfoPath(byte[] tableName) { - Path tablePath = new Path(this.rootdir, Bytes.toString(tableName)); - Path tableInfoPath = new Path(tablePath, HConstants.TABLEINFO_NAME); - return tableInfoPath; - } - - /** - * Create new HTableDescriptor in HDFS. - * - * @param htableDescriptor - */ - public void createTableDescriptor(HTableDescriptor htableDescriptor) - throws IOException { - FSUtils.createTableDescriptor(htableDescriptor, conf); - } - - /** - * Delete column of a table - * @param tableName - * @param familyName - * @return Modified HTableDescriptor with requested column deleted. - * @throws IOException - */ - public HTableDescriptor deleteColumn(byte[] tableName, byte[] familyName) - throws IOException { - LOG.info("DeleteColumn. Table = " + Bytes.toString(tableName) - + " family = " + Bytes.toString(familyName)); - HTableDescriptor htd = this.services.getTableDescriptors().get(tableName); - htd.removeFamily(familyName); - this.services.getTableDescriptors().add(htd); - return htd; - } - - /** - * Modify Column of a table - * @param tableName - * @param hcd HColumnDesciptor - * @return Modified HTableDescriptor with the column modified. - * @throws IOException - */ - public HTableDescriptor modifyColumn(byte[] tableName, HColumnDescriptor hcd) - throws IOException { - LOG.info("AddModifyColumn. Table = " + Bytes.toString(tableName) - + " HCD = " + hcd.toString()); - - HTableDescriptor htd = this.services.getTableDescriptors().get(tableName); - byte [] familyName = hcd.getName(); - if(!htd.hasFamily(familyName)) { - throw new InvalidFamilyOperationException("Family '" + - Bytes.toString(familyName) + "' doesn't exists so cannot be modified"); - } - htd.addFamily(hcd); - this.services.getTableDescriptors().add(htd); - return htd; - } - - /** - * Add column to a table - * @param tableName - * @param hcd - * @return Modified HTableDescriptor with new column added. - * @throws IOException - */ - public HTableDescriptor addColumn(byte[] tableName, HColumnDescriptor hcd) - throws IOException { - LOG.info("AddColumn. Table = " + Bytes.toString(tableName) + " HCD = " + - hcd.toString()); - HTableDescriptor htd = this.services.getTableDescriptors().get(tableName); - if (htd == null) { - throw new InvalidFamilyOperationException("Family '" + - hcd.getNameAsString() + "' cannot be modified as HTD is null"); - } - htd.addFamily(hcd); - this.services.getTableDescriptors().add(htd); - return htd; - } } diff --git src/main/java/org/apache/hadoop/hbase/master/MasterServices.java src/main/java/org/apache/hadoop/hbase/master/MasterServices.java index 3d5124c..db330ae 100644 --- src/main/java/org/apache/hadoop/hbase/master/MasterServices.java +++ src/main/java/org/apache/hadoop/hbase/master/MasterServices.java @@ -23,10 +23,11 @@ import java.io.IOException; import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.Server; -import org.apache.hadoop.hbase.TableDescriptors; import org.apache.hadoop.hbase.TableNotDisabledException; import org.apache.hadoop.hbase.TableNotFoundException; +import org.apache.hadoop.hbase.catalog.CatalogTracker; import org.apache.hadoop.hbase.executor.ExecutorService; +import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher; /** * Services Master supplies @@ -68,9 +69,4 @@ public interface MasterServices extends Server { */ public void createTable(HTableDescriptor desc, byte [][] splitKeys) throws IOException; - - /** - * @return Return table descriptors implementation. - */ - public TableDescriptors getTableDescriptors(); } diff --git src/main/java/org/apache/hadoop/hbase/master/SplitLogManager.java src/main/java/org/apache/hadoop/hbase/master/SplitLogManager.java index 4a0c6d8..8bf51dd 100644 --- src/main/java/org/apache/hadoop/hbase/master/SplitLogManager.java +++ src/main/java/org/apache/hadoop/hbase/master/SplitLogManager.java @@ -240,7 +240,7 @@ public class SplitLogManager extends ZooKeeperListener { for (FileStatus lf : logfiles) { // TODO If the log file is still being written to - which is most likely // the case for the last log file - then its length will show up here - // as zero. The size of such a file can only be retrieved after + // as zero. The size of such a file can only be retrieved after after // recover-lease is done. totalSize will be under in most cases and the // metrics that it drives will also be under-reported. totalSize += lf.getLen(); @@ -1052,4 +1052,4 @@ public class SplitLogManager extends ZooKeeperListener { SUCCESS(), FAILURE(); } -} \ No newline at end of file +} diff --git src/main/java/org/apache/hadoop/hbase/master/handler/ClosedRegionHandler.java src/main/java/org/apache/hadoop/hbase/master/handler/ClosedRegionHandler.java index 2dfc3e7..5cec1c0 100644 --- src/main/java/org/apache/hadoop/hbase/master/handler/ClosedRegionHandler.java +++ src/main/java/org/apache/hadoop/hbase/master/handler/ClosedRegionHandler.java @@ -92,7 +92,7 @@ public class ClosedRegionHandler extends EventHandler implements TotesHRegionInf LOG.debug("Handling CLOSED event for " + regionInfo.getEncodedName()); // Check if this table is being disabled or not if (this.assignmentManager.getZKTable(). - isDisablingOrDisabledTable(this.regionInfo.getTableNameAsString())) { + isDisablingOrDisabledTable(this.regionInfo.getTableDesc().getNameAsString())) { assignmentManager.offlineDisabledRegion(regionInfo); return; } diff --git src/main/java/org/apache/hadoop/hbase/master/handler/CreateTableHandler.java src/main/java/org/apache/hadoop/hbase/master/handler/CreateTableHandler.java index b9d4b90..1950bca 100644 --- src/main/java/org/apache/hadoop/hbase/master/handler/CreateTableHandler.java +++ src/main/java/org/apache/hadoop/hbase/master/handler/CreateTableHandler.java @@ -133,12 +133,6 @@ public class CreateTableHandler extends EventHandler { private void handleCreateTable() throws IOException, KeeperException { - // TODO: Currently we make the table descriptor and as side-effect the - // tableDir is created. Should we change below method to be createTable - // where we create table in tmp dir with its table descriptor file and then - // do rename to move it into place? - FSUtils.createTableDescriptor(this.hTableDescriptor, this.conf); - List regionInfos = new ArrayList(); final int batchSize = this.conf.getInt("hbase.master.createtable.batchsize", 100); @@ -148,7 +142,7 @@ public class CreateTableHandler extends EventHandler { // 1. Create HRegion HRegion region = HRegion.createHRegion(newRegion, this.fileSystemManager.getRootDir(), this.conf, - this.hTableDescriptor, hlog); + hlog); if (hlog == null) { hlog = region.getLog(); } diff --git src/main/java/org/apache/hadoop/hbase/master/handler/DeleteTableHandler.java src/main/java/org/apache/hadoop/hbase/master/handler/DeleteTableHandler.java index caa1fe0..1db7db6 100644 --- src/main/java/org/apache/hadoop/hbase/master/handler/DeleteTableHandler.java +++ src/main/java/org/apache/hadoop/hbase/master/handler/DeleteTableHandler.java @@ -70,8 +70,6 @@ public class DeleteTableHandler extends TableEventHandler { } // Delete table from FS this.masterServices.getMasterFileSystem().deleteTable(tableName); - // Update table descriptor cache - this.masterServices.getTableDescriptors().remove(Bytes.toString(tableName)); // If entry for this table in zk, and up in AssignmentManager, remove it. // Call to undisableTable does this. TODO: Make a more formal purge table. @@ -86,4 +84,4 @@ public class DeleteTableHandler extends TableEventHandler { } return getClass().getSimpleName() + "-" + name + "-" + getSeqid() + "-" + tableNameStr; } -} +} \ No newline at end of file diff --git src/main/java/org/apache/hadoop/hbase/master/handler/EnableTableHandler.java src/main/java/org/apache/hadoop/hbase/master/handler/EnableTableHandler.java index 78bb5bf..1e5d83c 100644 --- src/main/java/org/apache/hadoop/hbase/master/handler/EnableTableHandler.java +++ src/main/java/org/apache/hadoop/hbase/master/handler/EnableTableHandler.java @@ -111,8 +111,8 @@ public class EnableTableHandler extends EventHandler { boolean done = false; // Get the regions of this table. We're done when all listed // tables are onlined. - List regionsInMeta; - regionsInMeta = MetaReader.getTableRegions(this.ct, tableName, true); + List regionsInMeta = + MetaReader.getTableRegions(this.ct, tableName, true); int countOfRegionsInTable = regionsInMeta.size(); List regions = regionsToAssign(regionsInMeta); int regionsCount = regions.size(); diff --git src/main/java/org/apache/hadoop/hbase/master/handler/ModifyTableHandler.java src/main/java/org/apache/hadoop/hbase/master/handler/ModifyTableHandler.java index 3d72463..96fe6e1 100644 --- src/main/java/org/apache/hadoop/hbase/master/handler/ModifyTableHandler.java +++ src/main/java/org/apache/hadoop/hbase/master/handler/ModifyTableHandler.java @@ -25,6 +25,7 @@ import java.util.List; import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.Server; +import org.apache.hadoop.hbase.catalog.MetaEditor; import org.apache.hadoop.hbase.master.MasterServices; import org.apache.hadoop.hbase.util.Bytes; @@ -45,10 +46,14 @@ public class ModifyTableHandler extends TableEventHandler { @Override protected void handleTableOperation(List hris) throws IOException { - // Update descriptor - this.masterServices.getTableDescriptors().add(this.htd); + for (HRegionInfo hri : hris) { + // Update region info in META + hri.setTableDesc(this.htd); + MetaEditor.updateRegionInfo(this.server.getCatalogTracker(), hri); + // Update region info in FS + this.masterServices.getMasterFileSystem().updateRegionInfo(hri); + } } - @Override public String toString() { String name = "UnknownServerName"; diff --git src/main/java/org/apache/hadoop/hbase/master/handler/OpenedRegionHandler.java src/main/java/org/apache/hadoop/hbase/master/handler/OpenedRegionHandler.java index 7987997..48aedf6 100644 --- src/main/java/org/apache/hadoop/hbase/master/handler/OpenedRegionHandler.java +++ src/main/java/org/apache/hadoop/hbase/master/handler/OpenedRegionHandler.java @@ -115,7 +115,7 @@ public class OpenedRegionHandler extends EventHandler implements TotesHRegionInf " because regions is NOT in RIT -- presuming this is because it SPLIT"); } if (this.assignmentManager.getZKTable().isDisablingOrDisabledTable( - regionInfo.getTableNameAsString())) { + regionInfo.getTableDesc().getNameAsString())) { debugLog(regionInfo, "Opened region " + regionInfo.getRegionNameAsString() + " but " + "this table is disabled, triggering close of region"); diff --git src/main/java/org/apache/hadoop/hbase/master/handler/ServerShutdownHandler.java src/main/java/org/apache/hadoop/hbase/master/handler/ServerShutdownHandler.java index 0718479..cc315e9 100644 --- src/main/java/org/apache/hadoop/hbase/master/handler/ServerShutdownHandler.java +++ src/main/java/org/apache/hadoop/hbase/master/handler/ServerShutdownHandler.java @@ -41,6 +41,7 @@ import org.apache.hadoop.hbase.master.DeadServer; import org.apache.hadoop.hbase.master.MasterServices; import org.apache.hadoop.hbase.master.ServerManager; import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.hbase.util.Writables; import org.apache.zookeeper.KeeperException; /** @@ -264,16 +265,14 @@ public class ServerShutdownHandler extends EventHandler { LOG.debug("Removed " + rit.getRegion().getRegionNameAsString() + " from list of regions to assign because in RIT" + " region state: " + rit.getState()); - if (hris != null) hris.remove(rit.getRegion()); + hris.remove(rit.getRegion()); } } - assert regionsInTransition != null; - LOG.info("Reassigning " + ((hris == null)? 0: hris.size()) + - " region(s) that " + (serverName == null? "null": serverName) + - " was carrying (skipping " + - regionsInTransition.size() + - " regions(s) that are already in transition)"); + LOG.info("Reassigning " + (hris == null? 0: hris.size()) + + " region(s) that " + serverName + + " was carrying (skipping " + regionsInTransition.size() + + " regions(s) that are already in transition)"); // Iterate regions that were on this server and assign them if (hris != null) { @@ -306,7 +305,7 @@ public class ServerShutdownHandler extends EventHandler { throws IOException { // If table is not disabled but the region is offlined, boolean disabled = assignmentManager.getZKTable().isDisabledTable( - hri.getTableNameAsString()); + hri.getTableDesc().getNameAsString()); if (disabled) return false; if (hri.isOffline() && hri.isSplit()) { LOG.debug("Offlined and split region " + hri.getRegionNameAsString() + @@ -343,8 +342,7 @@ public class ServerShutdownHandler extends EventHandler { final AssignmentManager assignmentManager, final CatalogTracker catalogTracker) throws IOException { - HRegionInfo daughter = - MetaReader.parseHRegionInfoFromCatalogResult(result, qualifier); + HRegionInfo daughter = getHRegionInfo(result, qualifier); if (daughter == null) return; if (isDaughterMissing(catalogTracker, daughter)) { LOG.info("Fixup; missing daughter " + daughter.getRegionNameAsString()); @@ -362,6 +360,21 @@ public class ServerShutdownHandler extends EventHandler { } /** + * Interpret the content of the cell at {@link HConstants#CATALOG_FAMILY} and + * qualifier as an HRegionInfo and return it, or null. + * @param r Result instance to pull from. + * @param qualifier Column family qualifier + * @return An HRegionInfo instance or null. + * @throws IOException + */ + private static HRegionInfo getHRegionInfo(final Result r, byte [] qualifier) + throws IOException { + byte [] bytes = r.getValue(HConstants.CATALOG_FAMILY, qualifier); + if (bytes == null || bytes.length <= 0) return null; + return Writables.getHRegionInfoOrNull(bytes); + } + + /** * Look for presence of the daughter OR of a split of the daughter in .META. * Daughter could have been split over on regionserver before a run of the * catalogJanitor had chance to clear reference from parent. @@ -403,8 +416,7 @@ public class ServerShutdownHandler extends EventHandler { @Override public boolean visit(Result r) throws IOException { - HRegionInfo hri = - MetaReader.parseHRegionInfoFromCatalogResult(r, HConstants.REGIONINFO_QUALIFIER); + HRegionInfo hri = getHRegionInfo(r, HConstants.REGIONINFO_QUALIFIER); if (hri == null) { LOG.warn("No serialized HRegionInfo in " + r); return true; @@ -415,8 +427,8 @@ public class ServerShutdownHandler extends EventHandler { if (value == null) return false; // Now see if we have gone beyond the daughter's startrow. - if (!Bytes.equals(daughter.getTableName(), - hri.getTableName())) { + if (!Bytes.equals(daughter.getTableDesc().getName(), + hri.getTableDesc().getName())) { // We fell into another table. Stop scanning. return false; } diff --git src/main/java/org/apache/hadoop/hbase/master/handler/TableAddFamilyHandler.java src/main/java/org/apache/hadoop/hbase/master/handler/TableAddFamilyHandler.java index 18cff0b..4775ccb 100644 --- src/main/java/org/apache/hadoop/hbase/master/handler/TableAddFamilyHandler.java +++ src/main/java/org/apache/hadoop/hbase/master/handler/TableAddFamilyHandler.java @@ -27,6 +27,7 @@ import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.InvalidFamilyOperationException; import org.apache.hadoop.hbase.Server; +import org.apache.hadoop.hbase.catalog.MetaEditor; import org.apache.hadoop.hbase.master.MasterServices; import org.apache.hadoop.hbase.util.Bytes; @@ -46,24 +47,21 @@ public class TableAddFamilyHandler extends TableEventHandler { @Override protected void handleTableOperation(List hris) throws IOException { - HTableDescriptor htd = this.masterServices.getTableDescriptors(). - get(Bytes.toString(tableName)); + HTableDescriptor htd = hris.get(0).getTableDesc(); byte [] familyName = familyDesc.getName(); - if (htd == null) { - throw new IOException("Add Family operation could not be completed as " + - "HTableDescritor is missing for table = " - + Bytes.toString(tableName)); - } if(htd.hasFamily(familyName)) { throw new InvalidFamilyOperationException( "Family '" + Bytes.toString(familyName) + "' already exists so " + "cannot be added"); } - // Update table descriptor in HDFS - htd = this.masterServices.getMasterFileSystem() - .addColumn(tableName, familyDesc); - // Update in-memory descriptor cache - this.masterServices.getTableDescriptors().add(htd); + for(HRegionInfo hri : hris) { + // Update the HTD + hri.getTableDesc().addFamily(familyDesc); + // Update region in META + MetaEditor.updateRegionInfo(this.server.getCatalogTracker(), hri); + // Update region info in FS + this.masterServices.getMasterFileSystem().updateRegionInfo(hri); + } } @Override public String toString() { @@ -78,5 +76,4 @@ public class TableAddFamilyHandler extends TableEventHandler { return getClass().getSimpleName() + "-" + name + "-" + getSeqid() + "-" + tableNameStr + "-" + family; } - } diff --git src/main/java/org/apache/hadoop/hbase/master/handler/TableDeleteFamilyHandler.java src/main/java/org/apache/hadoop/hbase/master/handler/TableDeleteFamilyHandler.java index 2f65ff9..56c21e9 100644 --- src/main/java/org/apache/hadoop/hbase/master/handler/TableDeleteFamilyHandler.java +++ src/main/java/org/apache/hadoop/hbase/master/handler/TableDeleteFamilyHandler.java @@ -26,7 +26,8 @@ import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.InvalidFamilyOperationException; import org.apache.hadoop.hbase.Server; -import org.apache.hadoop.hbase.master.AssignmentManager; +import org.apache.hadoop.hbase.catalog.MetaEditor; +import org.apache.hadoop.hbase.master.MasterFileSystem; import org.apache.hadoop.hbase.master.MasterServices; import org.apache.hadoop.hbase.util.Bytes; @@ -45,25 +46,26 @@ public class TableDeleteFamilyHandler extends TableEventHandler { @Override protected void handleTableOperation(List hris) throws IOException { - AssignmentManager am = this.masterServices.getAssignmentManager(); - HTableDescriptor htd = this.masterServices.getTableDescriptors().get(Bytes.toString(tableName)); - if (htd == null) { - throw new IOException("Add Family operation could not be completed as " + - "HTableDescritor is missing for table = " - + Bytes.toString(tableName)); - } + HTableDescriptor htd = hris.get(0).getTableDesc(); if(!htd.hasFamily(familyName)) { throw new InvalidFamilyOperationException( "Family '" + Bytes.toString(familyName) + "' does not exist so " + "cannot be deleted"); } - // Update table descriptor in HDFS - htd = this.masterServices.getMasterFileSystem() - .deleteColumn(tableName, familyName); - // Update in-memory descriptor cache - this.masterServices.getTableDescriptors().add(htd); + for (HRegionInfo hri : hris) { + // Update the HTD + hri.getTableDesc().removeFamily(familyName); + // Update region in META + MetaEditor.updateRegionInfo(this.server.getCatalogTracker(), hri); + MasterFileSystem mfs = this.masterServices.getMasterFileSystem(); + // Update region info in FS + mfs.updateRegionInfo(hri); + // Delete directory in FS + mfs.deleteFamily(hri, familyName); + // Update region info in FS + this.masterServices.getMasterFileSystem().updateRegionInfo(hri); + } } - @Override public String toString() { String name = "UnknownServerName"; @@ -76,4 +78,4 @@ public class TableDeleteFamilyHandler extends TableEventHandler { } return getClass().getSimpleName() + "-" + name + "-" + getSeqid() + "-" + tableNameStr + "-" + family; } -} +} \ No newline at end of file diff --git src/main/java/org/apache/hadoop/hbase/master/handler/TableModifyFamilyHandler.java src/main/java/org/apache/hadoop/hbase/master/handler/TableModifyFamilyHandler.java index 708ee73..3f8c5c4 100644 --- src/main/java/org/apache/hadoop/hbase/master/handler/TableModifyFamilyHandler.java +++ src/main/java/org/apache/hadoop/hbase/master/handler/TableModifyFamilyHandler.java @@ -27,7 +27,7 @@ import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.InvalidFamilyOperationException; import org.apache.hadoop.hbase.Server; -import org.apache.hadoop.hbase.master.AssignmentManager; +import org.apache.hadoop.hbase.catalog.MetaEditor; import org.apache.hadoop.hbase.master.MasterServices; import org.apache.hadoop.hbase.util.Bytes; @@ -47,22 +47,20 @@ public class TableModifyFamilyHandler extends TableEventHandler { @Override protected void handleTableOperation(List regions) throws IOException { - AssignmentManager am = this.masterServices.getAssignmentManager(); - HTableDescriptor htd = this.masterServices.getTableDescriptors().get(Bytes.toString(tableName)); + HTableDescriptor htd = regions.get(0).getTableDesc(); byte [] familyName = familyDesc.getName(); - if (htd == null) { - throw new IOException("Modify Family operation could not be completed as " + - "HTableDescritor is missing for table = " - + Bytes.toString(tableName)); - } if(!htd.hasFamily(familyName)) { throw new InvalidFamilyOperationException("Family '" + Bytes.toString(familyName) + "' doesn't exists so cannot be modified"); } - // Update table descriptor in HDFS - htd = this.masterServices.getMasterFileSystem().modifyColumn(tableName, familyDesc); - // Update in-memory descriptor cache - this.masterServices.getTableDescriptors().add(htd); + for(HRegionInfo hri : regions) { + // Update the HTD + hri.getTableDesc().addFamily(familyDesc); + // Update region in META + MetaEditor.updateRegionInfo(this.server.getCatalogTracker(), hri); + // Update region info in FS + this.masterServices.getMasterFileSystem().updateRegionInfo(hri); + } } @Override public String toString() { @@ -76,5 +74,4 @@ public class TableModifyFamilyHandler extends TableEventHandler { } return getClass().getSimpleName() + "-" + name + "-" + getSeqid() + "-" + tableNameStr + "-" + family; } - -} +} \ No newline at end of file diff --git src/main/java/org/apache/hadoop/hbase/migration/HRegionInfo090x.java src/main/java/org/apache/hadoop/hbase/migration/HRegionInfo090x.java deleted file mode 100644 index eeb18e8..0000000 --- src/main/java/org/apache/hadoop/hbase/migration/HRegionInfo090x.java +++ /dev/null @@ -1,680 +0,0 @@ -/** - * Copyright 2007 The Apache Software Foundation - * - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hbase.migration; - -import java.io.DataInput; -import java.io.DataOutput; -import java.io.EOFException; -import java.io.IOException; -import java.util.Arrays; - -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.hbase.HConstants; -import org.apache.hadoop.hbase.HTableDescriptor; -import org.apache.hadoop.hbase.KeyValue; -import org.apache.hadoop.hbase.KeyValue.KVComparator; -import org.apache.hadoop.hbase.util.Bytes; -import org.apache.hadoop.hbase.util.JenkinsHash; -import org.apache.hadoop.hbase.util.MD5Hash; -import org.apache.hadoop.io.VersionedWritable; -import org.apache.hadoop.io.WritableComparable; - -/** - * HRegion information. - * Contains HRegion id, start and end keys, a reference to this - * HRegions' table descriptor, etc. - */ -public class HRegionInfo090x extends VersionedWritable implements - WritableComparable{ - private static final byte VERSION = 0; - private static final Log LOG = LogFactory.getLog(HRegionInfo090x.class); - - /** - * The new format for a region name contains its encodedName at the end. - * The encoded name also serves as the directory name for the region - * in the filesystem. - * - * New region name format: - * <tablename>,,<startkey>,<regionIdTimestamp>.<encodedName>. - * where, - * <encodedName> is a hex version of the MD5 hash of - * <tablename>,<startkey>,<regionIdTimestamp> - * - * The old region name format: - * <tablename>,<startkey>,<regionIdTimestamp> - * For region names in the old format, the encoded name is a 32-bit - * JenkinsHash integer value (in its decimal notation, string form). - *

- * **NOTE** - * - * ROOT, the first META region, and regions created by an older - * version of HBase (0.20 or prior) will continue to use the - * old region name format. - */ - - /** Separator used to demarcate the encodedName in a region name - * in the new format. See description on new format above. - */ - private static final int ENC_SEPARATOR = '.'; - public static final int MD5_HEX_LENGTH = 32; - - /** - * Does region name contain its encoded name? - * @param regionName region name - * @return boolean indicating if this a new format region - * name which contains its encoded name. - */ - private static boolean hasEncodedName(final byte[] regionName) { - // check if region name ends in ENC_SEPARATOR - if ((regionName.length >= 1) - && (regionName[regionName.length - 1] == ENC_SEPARATOR)) { - // region name is new format. it contains the encoded name. - return true; - } - return false; - } - - /** - * @param regionName - * @return the encodedName - */ - public static String encodeRegionName(final byte [] regionName) { - String encodedName; - if (hasEncodedName(regionName)) { - // region is in new format: - // ,,/encodedName/ - encodedName = Bytes.toString(regionName, - regionName.length - MD5_HEX_LENGTH - 1, - MD5_HEX_LENGTH); - } else { - // old format region name. ROOT and first META region also - // use this format.EncodedName is the JenkinsHash value. - int hashVal = Math.abs(JenkinsHash.getInstance().hash(regionName, - regionName.length, 0)); - encodedName = String.valueOf(hashVal); - } - return encodedName; - } - - /** - * Use logging. - * @param encodedRegionName The encoded regionname. - * @return -ROOT- if passed 70236052 or - * .META. if passed 1028785192 else returns - * encodedRegionName - */ - public static String prettyPrint(final String encodedRegionName) { - if (encodedRegionName.equals("70236052")) { - return encodedRegionName + "/-ROOT-"; - } else if (encodedRegionName.equals("1028785192")) { - return encodedRegionName + "/.META."; - } - return encodedRegionName; - } - - /** delimiter used between portions of a region name */ - public static final int DELIMITER = ','; - - /** HRegionInfo for root region */ - public static final HRegionInfo090x ROOT_REGIONINFO = - new HRegionInfo090x(0L, HTableDescriptor.ROOT_TABLEDESC); - - /** HRegionInfo for first meta region */ - public static final HRegionInfo090x FIRST_META_REGIONINFO = - new HRegionInfo090x(1L, HTableDescriptor.META_TABLEDESC); - - private byte [] endKey = HConstants.EMPTY_BYTE_ARRAY; - // This flag is in the parent of a split while the parent is still referenced - // by daughter regions. We USED to set this flag when we disabled a table - // but now table state is kept up in zookeeper as of 0.90.0 HBase. - private boolean offLine = false; - private long regionId = -1; - private transient byte [] regionName = HConstants.EMPTY_BYTE_ARRAY; - private String regionNameStr = ""; - private boolean split = false; - private byte [] startKey = HConstants.EMPTY_BYTE_ARRAY; - protected HTableDescriptor tableDesc = null; - private int hashCode = -1; - //TODO: Move NO_HASH to HStoreFile which is really the only place it is used. - public static final String NO_HASH = null; - private volatile String encodedName = NO_HASH; - private byte [] encodedNameAsBytes = null; - - private void setHashCode() { - int result = Arrays.hashCode(this.regionName); - result ^= this.regionId; - result ^= Arrays.hashCode(this.startKey); - result ^= Arrays.hashCode(this.endKey); - result ^= Boolean.valueOf(this.offLine).hashCode(); - result ^= this.tableDesc.hashCode(); - this.hashCode = result; - } - - /** - * Private constructor used constructing HRegionInfo for the catalog root and - * first meta regions - */ - private HRegionInfo090x(long regionId, HTableDescriptor tableDesc) { - super(); - this.regionId = regionId; - this.tableDesc = tableDesc; - - // Note: Root & First Meta regions names are still in old format - this.regionName = createRegionName(tableDesc.getName(), null, - regionId, false); - this.regionNameStr = Bytes.toStringBinary(this.regionName); - setHashCode(); - } - - /** Default constructor - creates empty object */ - public HRegionInfo090x() { - super(); - this.tableDesc = new HTableDescriptor(); - } - - /** - * Construct HRegionInfo with explicit parameters - * - * @param tableDesc the table descriptor - * @param startKey first key in region - * @param endKey end of key range - * @throws IllegalArgumentException - */ - public HRegionInfo090x(final HTableDescriptor tableDesc, final byte[] startKey, - final byte[] endKey) - throws IllegalArgumentException { - this(tableDesc, startKey, endKey, false); - } - - /** - * Construct HRegionInfo with explicit parameters - * - * @param tableDesc the table descriptor - * @param startKey first key in region - * @param endKey end of key range - * @param split true if this region has split and we have daughter regions - * regions that may or may not hold references to this region. - * @throws IllegalArgumentException - */ - public HRegionInfo090x(HTableDescriptor tableDesc, final byte[] startKey, - final byte[] endKey, final boolean split) - throws IllegalArgumentException { - this(tableDesc, startKey, endKey, split, System.currentTimeMillis()); - } - - /** - * Construct HRegionInfo with explicit parameters - * - * @param tableDesc the table descriptor - * @param startKey first key in region - * @param endKey end of key range - * @param split true if this region has split and we have daughter regions - * regions that may or may not hold references to this region. - * @param regionid Region id to use. - * @throws IllegalArgumentException - */ - public HRegionInfo090x(HTableDescriptor tableDesc, final byte[] startKey, - final byte[] endKey, final boolean split, final long regionid) - throws IllegalArgumentException { - super(); - if (tableDesc == null) { - throw new IllegalArgumentException("tableDesc cannot be null"); - } - this.offLine = false; - this.regionId = regionid; - this.regionName = createRegionName(tableDesc.getName(), startKey, regionId, true); - this.regionNameStr = Bytes.toStringBinary(this.regionName); - this.split = split; - this.endKey = endKey == null? HConstants.EMPTY_END_ROW: endKey.clone(); - this.startKey = startKey == null? - HConstants.EMPTY_START_ROW: startKey.clone(); - this.tableDesc = tableDesc; - setHashCode(); - } - - /** - * Costruct a copy of another HRegionInfo - * - * @param other - */ - public HRegionInfo090x(HRegionInfo090x other) { - super(); - this.endKey = other.getEndKey(); - this.offLine = other.isOffline(); - this.regionId = other.getRegionId(); - this.regionName = other.getRegionName(); - this.regionNameStr = Bytes.toStringBinary(this.regionName); - this.split = other.isSplit(); - this.startKey = other.getStartKey(); - this.tableDesc = other.getTableDesc(); - this.hashCode = other.hashCode(); - this.encodedName = other.getEncodedName(); - } - - /** - * Make a region name of passed parameters. - * @param tableName - * @param startKey Can be null - * @param regionid Region id (Usually timestamp from when region was created). - * @param newFormat should we create the region name in the new format - * (such that it contains its encoded name?). - * @return Region name made of passed tableName, startKey and id - */ - public static byte [] createRegionName(final byte [] tableName, - final byte [] startKey, final long regionid, boolean newFormat) { - return createRegionName(tableName, startKey, Long.toString(regionid), newFormat); - } - - /** - * Make a region name of passed parameters. - * @param tableName - * @param startKey Can be null - * @param id Region id (Usually timestamp from when region was created). - * @param newFormat should we create the region name in the new format - * (such that it contains its encoded name?). - * @return Region name made of passed tableName, startKey and id - */ - public static byte [] createRegionName(final byte [] tableName, - final byte [] startKey, final String id, boolean newFormat) { - return createRegionName(tableName, startKey, Bytes.toBytes(id), newFormat); - } - - /** - * Make a region name of passed parameters. - * @param tableName - * @param startKey Can be null - * @param id Region id (Usually timestamp from when region was created). - * @param newFormat should we create the region name in the new format - * (such that it contains its encoded name?). - * @return Region name made of passed tableName, startKey and id - */ - public static byte [] createRegionName(final byte [] tableName, - final byte [] startKey, final byte [] id, boolean newFormat) { - byte [] b = new byte [tableName.length + 2 + id.length + - (startKey == null? 0: startKey.length) + - (newFormat ? (MD5_HEX_LENGTH + 2) : 0)]; - - int offset = tableName.length; - System.arraycopy(tableName, 0, b, 0, offset); - b[offset++] = DELIMITER; - if (startKey != null && startKey.length > 0) { - System.arraycopy(startKey, 0, b, offset, startKey.length); - offset += startKey.length; - } - b[offset++] = DELIMITER; - System.arraycopy(id, 0, b, offset, id.length); - offset += id.length; - - if (newFormat) { - // - // Encoded name should be built into the region name. - // - // Use the region name thus far (namely, ,,) - // to compute a MD5 hash to be used as the encoded name, and append - // it to the byte buffer. - // - String md5Hash = MD5Hash.getMD5AsHex(b, 0, offset); - byte [] md5HashBytes = Bytes.toBytes(md5Hash); - - if (md5HashBytes.length != MD5_HEX_LENGTH) { - LOG.error("MD5-hash length mismatch: Expected=" + MD5_HEX_LENGTH + - "; Got=" + md5HashBytes.length); - } - - // now append the bytes '..' to the end - b[offset++] = ENC_SEPARATOR; - System.arraycopy(md5HashBytes, 0, b, offset, MD5_HEX_LENGTH); - offset += MD5_HEX_LENGTH; - b[offset++] = ENC_SEPARATOR; - } - - return b; - } - - /** - * Gets the table name from the specified region name. - * @param regionName - * @return Table name. - */ - public static byte [] getTableName(byte [] regionName) { - int offset = -1; - for (int i = 0; i < regionName.length; i++) { - if (regionName[i] == DELIMITER) { - offset = i; - break; - } - } - byte [] tableName = new byte[offset]; - System.arraycopy(regionName, 0, tableName, 0, offset); - return tableName; - } - - /** - * Separate elements of a regionName. - * @param regionName - * @return Array of byte[] containing tableName, startKey and id - * @throws IOException - */ - public static byte [][] parseRegionName(final byte [] regionName) - throws IOException { - int offset = -1; - for (int i = 0; i < regionName.length; i++) { - if (regionName[i] == DELIMITER) { - offset = i; - break; - } - } - if(offset == -1) throw new IOException("Invalid regionName format"); - byte [] tableName = new byte[offset]; - System.arraycopy(regionName, 0, tableName, 0, offset); - offset = -1; - for (int i = regionName.length - 1; i > 0; i--) { - if(regionName[i] == DELIMITER) { - offset = i; - break; - } - } - if(offset == -1) throw new IOException("Invalid regionName format"); - byte [] startKey = HConstants.EMPTY_BYTE_ARRAY; - if(offset != tableName.length + 1) { - startKey = new byte[offset - tableName.length - 1]; - System.arraycopy(regionName, tableName.length + 1, startKey, 0, - offset - tableName.length - 1); - } - byte [] id = new byte[regionName.length - offset - 1]; - System.arraycopy(regionName, offset + 1, id, 0, - regionName.length - offset - 1); - byte [][] elements = new byte[3][]; - elements[0] = tableName; - elements[1] = startKey; - elements[2] = id; - return elements; - } - - /** @return the regionId */ - public long getRegionId(){ - return regionId; - } - - /** - * @return the regionName as an array of bytes. - * @see #getRegionNameAsString() - */ - public byte [] getRegionName(){ - return regionName; - } - - /** - * @return Region name as a String for use in logging, etc. - */ - public String getRegionNameAsString() { - if (hasEncodedName(this.regionName)) { - // new format region names already have their encoded name. - return this.regionNameStr; - } - - // old format. regionNameStr doesn't have the region name. - // - // - return this.regionNameStr + "." + this.getEncodedName(); - } - - /** @return the encoded region name */ - public synchronized String getEncodedName() { - if (this.encodedName == NO_HASH) { - this.encodedName = encodeRegionName(this.regionName); - } - return this.encodedName; - } - - public synchronized byte [] getEncodedNameAsBytes() { - if (this.encodedNameAsBytes == null) { - this.encodedNameAsBytes = Bytes.toBytes(getEncodedName()); - } - return this.encodedNameAsBytes; - } - - /** @return the startKey */ - public byte [] getStartKey(){ - return startKey; - } - - /** @return the endKey */ - public byte [] getEndKey(){ - return endKey; - } - - /** - * Returns true if the given inclusive range of rows is fully contained - * by this region. For example, if the region is foo,a,g and this is - * passed ["b","c"] or ["a","c"] it will return true, but if this is passed - * ["b","z"] it will return false. - * @throws IllegalArgumentException if the range passed is invalid (ie end < start) - */ - public boolean containsRange(byte[] rangeStartKey, byte[] rangeEndKey) { - if (Bytes.compareTo(rangeStartKey, rangeEndKey) > 0) { - throw new IllegalArgumentException( - "Invalid range: " + Bytes.toStringBinary(rangeStartKey) + - " > " + Bytes.toStringBinary(rangeEndKey)); - } - - boolean firstKeyInRange = Bytes.compareTo(rangeStartKey, startKey) >= 0; - boolean lastKeyInRange = - Bytes.compareTo(rangeEndKey, endKey) < 0 || - Bytes.equals(endKey, HConstants.EMPTY_BYTE_ARRAY); - return firstKeyInRange && lastKeyInRange; - } - - /** - * Return true if the given row falls in this region. - */ - public boolean containsRow(byte[] row) { - return Bytes.compareTo(row, startKey) >= 0 && - (Bytes.compareTo(row, endKey) < 0 || - Bytes.equals(endKey, HConstants.EMPTY_BYTE_ARRAY)); - } - - /** @return the tableDesc */ - public HTableDescriptor getTableDesc(){ - return tableDesc; - } - - /** - * @param newDesc new table descriptor to use - */ - public void setTableDesc(HTableDescriptor newDesc) { - this.tableDesc = newDesc; - } - - /** @return true if this is the root region */ - public boolean isRootRegion() { - return this.tableDesc.isRootRegion(); - } - - /** @return true if this region is from a table that is a meta table, - * either .META. or -ROOT- - */ - public boolean isMetaTable() { - return this.tableDesc.isMetaTable(); - } - - /** @return true if this region is a meta region */ - public boolean isMetaRegion() { - return this.tableDesc.isMetaRegion(); - } - - /** - * @return True if has been split and has daughters. - */ - public boolean isSplit() { - return this.split; - } - - /** - * @param split set split status - */ - public void setSplit(boolean split) { - this.split = split; - } - - /** - * @return True if this region is offline. - */ - public boolean isOffline() { - return this.offLine; - } - - /** - * The parent of a region split is offline while split daughters hold - * references to the parent. Offlined regions are closed. - * @param offLine Set online/offline status. - */ - public void setOffline(boolean offLine) { - this.offLine = offLine; - } - - - /** - * @return True if this is a split parent region. - */ - public boolean isSplitParent() { - if (!isSplit()) return false; - if (!isOffline()) { - LOG.warn("Region is split but NOT offline: " + getRegionNameAsString()); - } - return true; - } - - /** - * @see java.lang.Object#toString() - */ - @Override - public String toString() { - return "REGION => {" + HConstants.NAME + " => '" + - this.regionNameStr + - "', STARTKEY => '" + - Bytes.toStringBinary(this.startKey) + "', ENDKEY => '" + - Bytes.toStringBinary(this.endKey) + - "', ENCODED => " + getEncodedName() + "," + - (isOffline()? " OFFLINE => true,": "") + - (isSplit()? " SPLIT => true,": "") + - " TABLE => {" + this.tableDesc.toString() + "}"; - } - - /** - * @see java.lang.Object#equals(java.lang.Object) - */ - @Override - public boolean equals(Object o) { - if (this == o) { - return true; - } - if (o == null) { - return false; - } - if (!(o instanceof HRegionInfo090x)) { - return false; - } - return this.compareTo((HRegionInfo090x)o) == 0; - } - - /** - * @see java.lang.Object#hashCode() - */ - @Override - public int hashCode() { - return this.hashCode; - } - - /** @return the object version number */ - @Override - public byte getVersion() { - return VERSION; - } - - // - // Writable - // - - @Override - public void write(DataOutput out) throws IOException { - super.write(out); - Bytes.writeByteArray(out, endKey); - out.writeBoolean(offLine); - out.writeLong(regionId); - Bytes.writeByteArray(out, regionName); - out.writeBoolean(split); - Bytes.writeByteArray(out, startKey); - tableDesc.write(out); - out.writeInt(hashCode); - } - - @Override - public void readFields(DataInput in) throws IOException { - super.readFields(in); - this.endKey = Bytes.readByteArray(in); - this.offLine = in.readBoolean(); - this.regionId = in.readLong(); - this.regionName = Bytes.readByteArray(in); - this.regionNameStr = Bytes.toStringBinary(this.regionName); - this.split = in.readBoolean(); - this.startKey = Bytes.readByteArray(in); - try { - this.tableDesc.readFields(in); - } catch(EOFException eofe) { - throw new IOException("HTD not found in input buffer"); - } - this.hashCode = in.readInt(); - } - - // - // Comparable - // - - public int compareTo(HRegionInfo090x o) { - if (o == null) { - return 1; - } - - // Are regions of same table? - int result = Bytes.compareTo(this.tableDesc.getName(), o.tableDesc.getName()); - if (result != 0) { - return result; - } - - // Compare start keys. - result = Bytes.compareTo(this.startKey, o.startKey); - if (result != 0) { - return result; - } - - // Compare end keys. - return Bytes.compareTo(this.endKey, o.endKey); - } - - /** - * @return Comparator to use comparing {@link org.apache.hadoop.hbase.KeyValue}s. - */ - public KVComparator getComparator() { - return isRootRegion()? KeyValue.ROOT_COMPARATOR: isMetaRegion()? - KeyValue.META_COMPARATOR: KeyValue.COMPARATOR; - } -} diff --git src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java index e12b99f..6751ab1 100644 --- src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java +++ src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java @@ -244,11 +244,11 @@ public class HRegion implements HeapSize { // , Writable{ final WriteState writestate = new WriteState(); - long memstoreFlushSize; + final long memstoreFlushSize; private volatile long lastFlushTime; final RegionServerServices rsServices; private List> recentFlushes = new ArrayList>(); - private long blockingMemStoreSize; + private final long blockingMemStoreSize; final long threadWakeFrequency; // Used to guard closes final ReentrantReadWriteLock lock = @@ -270,7 +270,6 @@ public class HRegion implements HeapSize { // , Writable{ * Name of the region info file that resides just under the region directory. */ public final static String REGIONINFO_FILE = ".regioninfo"; - private HTableDescriptor htableDescriptor = null; private RegionSplitPolicy splitPolicy; /** @@ -287,7 +286,6 @@ public class HRegion implements HeapSize { // , Writable{ this.log = null; this.regiondir = null; this.regionInfo = null; - this.htableDescriptor = null; this.threadWakeFrequency = 0L; this.coprocessorHost = null; } @@ -316,8 +314,7 @@ public class HRegion implements HeapSize { // , Writable{ * @see HRegion#newHRegion(Path, HLog, FileSystem, Configuration, org.apache.hadoop.hbase.HRegionInfo, FlushRequester) */ public HRegion(Path tableDir, HLog log, FileSystem fs, Configuration conf, - HRegionInfo regionInfo, final HTableDescriptor htd, - RegionServerServices rsServices) { + HRegionInfo regionInfo, RegionServerServices rsServices) { this.tableDir = tableDir; this.comparator = regionInfo.getComparator(); this.log = log; @@ -326,14 +323,19 @@ public class HRegion implements HeapSize { // , Writable{ this.rowLockWaitDuration = conf.getInt("hbase.rowlock.wait.duration", DEFAULT_ROWLOCK_WAIT_DURATION); this.regionInfo = regionInfo; - this.htableDescriptor = htd; this.rsServices = rsServices; this.threadWakeFrequency = conf.getLong(HConstants.THREAD_WAKE_FREQUENCY, 10 * 1000); String encodedNameStr = this.regionInfo.getEncodedName(); - setHTableSpecificConf(); this.regiondir = getRegionDir(this.tableDir, encodedNameStr); - + long flushSize = regionInfo.getTableDesc().getMemStoreFlushSize(); + if (flushSize == HTableDescriptor.DEFAULT_MEMSTORE_FLUSH_SIZE) { + flushSize = conf.getLong("hbase.hregion.memstore.flush.size", + HTableDescriptor.DEFAULT_MEMSTORE_FLUSH_SIZE); + } + this.memstoreFlushSize = flushSize; + this.blockingMemStoreSize = this.memstoreFlushSize * + conf.getLong("hbase.hregion.memstore.block.multiplier", 2); // don't initialize coprocessors if not running within a regionserver // TODO: revisit if coprocessors should load in other cases if (rsServices != null) { @@ -345,19 +347,6 @@ public class HRegion implements HeapSize { // , Writable{ } } - void setHTableSpecificConf() { - if (this.htableDescriptor == null) return; - LOG.info("Setting up tabledescriptor config now ..."); - long flushSize = this.htableDescriptor.getMemStoreFlushSize(); - if (flushSize == HTableDescriptor.DEFAULT_MEMSTORE_FLUSH_SIZE) { - flushSize = conf.getLong("hbase.hregion.memstore.flush.size", - HTableDescriptor.DEFAULT_MEMSTORE_FLUSH_SIZE); - } - this.memstoreFlushSize = flushSize; - this.blockingMemStoreSize = this.memstoreFlushSize * - conf.getLong("hbase.hregion.memstore.block.multiplier", 2); - } - /** * Initialize this region. * @return What the next sequence (edit) id should be. @@ -398,7 +387,7 @@ public class HRegion implements HeapSize { // , Writable{ // Load in all the HStores. Get maximum seqid. long maxSeqId = -1; - for (HColumnDescriptor c : this.htableDescriptor.getFamilies()) { + for (HColumnDescriptor c : this.regionInfo.getTableDesc().getFamilies()) { status.setStatus("Instantiating store for column family " + c); Store store = instantiateHStore(this.tableDir, c); this.stores.put(c.getName(), store); @@ -418,7 +407,7 @@ public class HRegion implements HeapSize { // , Writable{ SplitTransaction.cleanupAnySplitDetritus(this); FSUtils.deleteDirectory(this.fs, new Path(regiondir, MERGEDIR)); - this.writestate.setReadOnly(this.htableDescriptor.isReadOnly()); + this.writestate.setReadOnly(this.regionInfo.getTableDesc().isReadOnly()); this.writestate.compacting = 0; @@ -502,8 +491,9 @@ public class HRegion implements HeapSize { // , Writable{ String regionEncodedName) throws IOException { HDFSBlocksDistribution hdfsBlocksDistribution = new HDFSBlocksDistribution(); - Path tablePath = FSUtils.getTablePath(FSUtils.getRootDir(conf), - tableDescriptor.getName()); + Path tablePath = HTableDescriptor.getTableDir( + FSUtils.getRootDir(conf), + tableDescriptor.getName()); FileSystem fs = tablePath.getFileSystem(conf); for (HColumnDescriptor family: tableDescriptor.getFamilies()) { @@ -782,7 +772,7 @@ public class HRegion implements HeapSize { // , Writable{ /** @return HTableDescriptor for this region */ public HTableDescriptor getTableDesc() { - return this.htableDescriptor; + return this.regionInfo.getTableDesc(); } /** @return HLog in use for this region */ @@ -1219,7 +1209,7 @@ public class HRegion implements HeapSize { // , Writable{ // log-sequence-ids can be safely ignored. if (wal != null) { wal.completeCacheFlush(this.regionInfo.getEncodedNameAsBytes(), - regionInfo.getTableName(), completeSequenceId, + regionInfo.getTableDesc().getName(), completeSequenceId, this.getRegionInfo().isMetaRegion()); } @@ -1330,7 +1320,7 @@ public class HRegion implements HeapSize { // , Writable{ void prepareScanner(Scan scan) throws IOException { if(!scan.hasFamilies()) { // Adding all families to scanner - for(byte[] family: this.htableDescriptor.getFamiliesKeys()){ + for(byte[] family: regionInfo.getTableDesc().getFamiliesKeys()){ scan.addFamily(family); } } @@ -1365,7 +1355,7 @@ public class HRegion implements HeapSize { // , Writable{ private void prepareDelete(Delete delete) throws IOException { // Check to see if this is a deleteRow insert if(delete.getFamilyMap().isEmpty()){ - for(byte [] family : this.htableDescriptor.getFamiliesKeys()){ + for(byte [] family : regionInfo.getTableDesc().getFamiliesKeys()){ // Don't eat the timestamp delete.deleteFamily(family, delete.getTimeStamp()); } @@ -1502,8 +1492,8 @@ public class HRegion implements HeapSize { // , Writable{ // bunch up all edits across all column families into a // single WALEdit. addFamilyMapToWALEdit(familyMap, walEdit); - this.log.append(regionInfo, this.htableDescriptor.getName(), - walEdit, clusterId, now, this.htableDescriptor); + this.log.append(regionInfo, regionInfo.getTableDesc().getName(), + walEdit, clusterId, now); } // Now make changes to the memstore. @@ -1774,8 +1764,8 @@ public class HRegion implements HeapSize { // , Writable{ // Append the edit to WAL Put first = batchOp.operations[firstIndex].getFirst(); - this.log.append(regionInfo, this.htableDescriptor.getName(), - walEdit, first.getClusterId(), now, this.htableDescriptor); + this.log.append(regionInfo, regionInfo.getTableDesc().getName(), + walEdit, first.getClusterId(), now); // ------------------------------------ // STEP 4. Write back to memstore @@ -2047,8 +2037,8 @@ public class HRegion implements HeapSize { // , Writable{ // will contain uncommitted transactions. if (writeToWAL) { addFamilyMapToWALEdit(familyMap, walEdit); - this.log.append(regionInfo, this.htableDescriptor.getName(), - walEdit, clusterId, now, this.htableDescriptor); + this.log.append(regionInfo, regionInfo.getTableDesc().getName(), + walEdit, clusterId, now); } long addedSize = applyFamilyMapToMemstore(familyMap); @@ -2840,12 +2830,11 @@ public class HRegion implements HeapSize { // , Writable{ * @param conf is global configuration settings. * @param regionInfo - HRegionInfo that describes the region * is new), then read them from the supplied path. - * @param htd * @param rsServices * @return the new instance */ public static HRegion newHRegion(Path tableDir, HLog log, FileSystem fs, - Configuration conf, HRegionInfo regionInfo, final HTableDescriptor htd, + Configuration conf, HRegionInfo regionInfo, RegionServerServices rsServices) { try { @SuppressWarnings("unchecked") @@ -2854,16 +2843,19 @@ public class HRegion implements HeapSize { // , Writable{ Constructor c = regionClass.getConstructor(Path.class, HLog.class, FileSystem.class, - Configuration.class, HRegionInfo.class, HTableDescriptor.class, - RegionServerServices.class); + Configuration.class, HRegionInfo.class, RegionServerServices.class); - return c.newInstance(tableDir, log, fs, conf, regionInfo, htd, rsServices); + return c.newInstance(tableDir, log, fs, conf, regionInfo, rsServices); } catch (Throwable e) { // todo: what should I throw here? throw new IllegalStateException("Could not instantiate a region instance.", e); } } + public static HRegion createHRegion(final HRegionInfo info, final Path rootDir, + final Configuration conf) throws IOException { + return createHRegion(info, rootDir, conf, null); + } /** * Convenience method creating new HRegions. Used by createTable and by the * bootstrap code in the HMaster constructor. @@ -2873,42 +2865,15 @@ public class HRegion implements HeapSize { // , Writable{ * @param info Info for region to create. * @param rootDir Root directory for HBase instance * @param conf - * @param hTableDescriptor * @return new HRegion * * @throws IOException */ public static HRegion createHRegion(final HRegionInfo info, final Path rootDir, - final Configuration conf, final HTableDescriptor hTableDescriptor) + final Configuration conf, HLog hlog) throws IOException { - return createHRegion(info, rootDir, conf, hTableDescriptor, null); - } - - /** - * Convenience method creating new HRegions. Used by createTable. - * The {@link HLog} for the created region needs to be closed explicitly. - * Use {@link HRegion#getLog()} to get access. - * - * @param info Info for region to create. - * @param rootDir Root directory for HBase instance - * @param conf - * @param hTableDescriptor - * @param hlog shared HLog - * @return new HRegion - * - * @throws IOException - */ - public static HRegion createHRegion(final HRegionInfo info, final Path rootDir, - final Configuration conf, - final HTableDescriptor hTableDescriptor, - final HLog hlog) - throws IOException { - LOG.info("creating HRegion " + info.getTableNameAsString() - + " HTD == " + hTableDescriptor + " RootDir = " + rootDir + - " Table name == " + info.getTableNameAsString()); - Path tableDir = - HTableDescriptor.getTableDir(rootDir, info.getTableName()); + HTableDescriptor.getTableDir(rootDir, info.getTableDesc().getName()); Path regionDir = HRegion.getRegionDir(tableDir, info.getEncodedName()); FileSystem fs = FileSystem.get(conf); fs.mkdirs(regionDir); @@ -2918,7 +2883,7 @@ public class HRegion implements HeapSize { // , Writable{ new Path(regionDir, HConstants.HREGION_OLDLOGDIR_NAME), conf); } HRegion region = HRegion.newHRegion(tableDir, - effectiveHLog, fs, conf, info, hTableDescriptor, null); + effectiveHLog, fs, conf, info, null); region.initialize(); return region; } @@ -2935,11 +2900,10 @@ public class HRegion implements HeapSize { // , Writable{ * * @throws IOException */ - public static HRegion openHRegion(final HRegionInfo info, - final HTableDescriptor htd, final HLog wal, + public static HRegion openHRegion(final HRegionInfo info, final HLog wal, final Configuration conf) throws IOException { - return openHRegion(info, htd, wal, conf, null, null); + return openHRegion(info, wal, conf, null, null); } /** @@ -2956,9 +2920,8 @@ public class HRegion implements HeapSize { // , Writable{ * * @throws IOException */ - public static HRegion openHRegion(final HRegionInfo info, - final HTableDescriptor htd, final HLog wal, final Configuration conf, - final RegionServerServices rsServices, + public static HRegion openHRegion(final HRegionInfo info, final HLog wal, + final Configuration conf, final RegionServerServices rsServices, final CancelableProgressable reporter) throws IOException { if (LOG.isDebugEnabled()) { @@ -2968,50 +2931,12 @@ public class HRegion implements HeapSize { // , Writable{ throw new NullPointerException("Passed region info is null"); } Path dir = HTableDescriptor.getTableDir(FSUtils.getRootDir(conf), - info.getTableName()); + info.getTableDesc().getName()); HRegion r = HRegion.newHRegion(dir, wal, FileSystem.get(conf), conf, info, - htd, rsServices); + rsServices); return r.openHRegion(reporter); } - public static HRegion openHRegion(Path tableDir, final HRegionInfo info, - final HTableDescriptor htd, final HLog wal, final Configuration conf) - throws IOException { - return openHRegion(tableDir, info, htd, wal, conf, null, null); - } - - /** - * Open a Region. - * @param tableDir Table directory - * @param info Info for region to be opened. - * @param wal HLog for region to use. This method will call - * HLog#setSequenceNumber(long) passing the result of the call to - * HRegion#getMinSequenceId() to ensure the log id is properly kept - * up. HRegionStore does this every time it opens a new region. - * @param conf - * @param reporter An interface we can report progress against. - * @return new HRegion - * - * @throws IOException - */ - public static HRegion openHRegion(final Path tableDir, final HRegionInfo info, - final HTableDescriptor htd, final HLog wal, final Configuration conf, - final RegionServerServices rsServices, - final CancelableProgressable reporter) - throws IOException { - if (info == null) throw new NullPointerException("Passed region info is null"); - LOG.info("HRegion.openHRegion Region name ==" + info.getRegionNameAsString()); - if (LOG.isDebugEnabled()) { - LOG.debug("Opening region: " + info); - } - Path dir = HTableDescriptor.getTableDir(tableDir, - info.getTableName()); - HRegion r = HRegion.newHRegion(dir, wal, FileSystem.get(conf), conf, info, - htd, rsServices); - return r.openHRegion(reporter); - } - - /** * Open HRegion. * Calls initialize and sets sequenceid. @@ -3031,7 +2956,7 @@ public class HRegion implements HeapSize { // , Writable{ } private void checkCompressionCodecs() throws IOException { - for (HColumnDescriptor fam: this.htableDescriptor.getColumnFamilies()) { + for (HColumnDescriptor fam: regionInfo.getTableDesc().getColumnFamilies()) { CompressionTest.testCompression(fam.getCompression()); CompressionTest.testCompression(fam.getCompactionCompression()); } @@ -3059,11 +2984,6 @@ public class HRegion implements HeapSize { // , Writable{ HConstants.REGIONINFO_QUALIFIER, EnvironmentEdgeManager.currentTimeMillis(), Writables.getBytes(r.getRegionInfo()))); - edits.add(new KeyValue(row, HConstants.CATALOG_FAMILY, - org.apache.hadoop.hbase.catalog.MetaMigrationRemovingHTD.META_MIGRATION_QUALIFIER, - EnvironmentEdgeManager.currentTimeMillis(), - Bytes.toBytes(true))); - meta.put(HConstants.CATALOG_FAMILY, edits); } finally { meta.releaseRowLock(lid); @@ -3102,7 +3022,7 @@ public class HRegion implements HeapSize { // , Writable{ */ public static Path getRegionDir(final Path rootdir, final HRegionInfo info) { return new Path( - HTableDescriptor.getTableDir(rootdir, info.getTableName()), + HTableDescriptor.getTableDir(rootdir, info.getTableDesc().getName()), info.getEncodedName()); } @@ -3179,10 +3099,9 @@ public class HRegion implements HeapSize { // , Writable{ * @return new merged region * @throws IOException */ - public static HRegion merge(HRegion a, HRegion b) - throws IOException { - if (!a.getRegionInfo().getTableNameAsString().equals( - b.getRegionInfo().getTableNameAsString())) { + public static HRegion merge(HRegion a, HRegion b) throws IOException { + if (!a.getRegionInfo().getTableDesc().getNameAsString().equals( + b.getRegionInfo().getTableDesc().getNameAsString())) { throw new IOException("Regions do not belong to the same table"); } @@ -3235,8 +3154,7 @@ public class HRegion implements HeapSize { // , Writable{ ? b.getEndKey() : a.getEndKey()); - HRegionInfo newRegionInfo = - new HRegionInfo(tabledesc.getName(), startKey, endKey); + HRegionInfo newRegionInfo = new HRegionInfo(tabledesc, startKey, endKey); LOG.info("Creating new region " + newRegionInfo.toString()); String encodedName = newRegionInfo.getEncodedName(); Path newRegionDir = HRegion.getRegionDir(a.getTableDir(), encodedName); @@ -3282,8 +3200,7 @@ public class HRegion implements HeapSize { // , Writable{ LOG.debug("Files for new region"); listPaths(fs, newRegionDir); } - HRegion dstRegion = HRegion.newHRegion(tableDir, log, fs, conf, - newRegionInfo, a.getTableDesc(), null); + HRegion dstRegion = HRegion.newHRegion(tableDir, log, fs, conf, newRegionInfo, null); dstRegion.readRequestsCount.set(a.readRequestsCount.get() + b.readRequestsCount.get()); dstRegion.writeRequestsCount.set(a.writeRequestsCount.get() + b.writeRequestsCount.get()); dstRegion.initialize(); @@ -3377,7 +3294,7 @@ public class HRegion implements HeapSize { // , Writable{ checkFamily(family); } } else { // Adding all families to scanner - for (byte[] family: this.htableDescriptor.getFamiliesKeys()) { + for (byte[] family: regionInfo.getTableDesc().getFamiliesKeys()) { get.addFamily(family); } } @@ -3586,9 +3503,8 @@ public class HRegion implements HeapSize { // , Writable{ // Using default cluster id, as this can only happen in the orginating // cluster. A slave cluster receives the final value (not the delta) // as a Put. - this.log.append(regionInfo, this.htableDescriptor.getName(), - walEdits, HConstants.DEFAULT_CLUSTER_ID, now, - this.htableDescriptor); + this.log.append(regionInfo, regionInfo.getTableDesc().getName(), + walEdits, HConstants.DEFAULT_CLUSTER_ID, now); } size = this.addAndGetGlobalMemstoreSize(size); @@ -3667,9 +3583,8 @@ public class HRegion implements HeapSize { // , Writable{ // Using default cluster id, as this can only happen in the // orginating cluster. A slave cluster receives the final value (not // the delta) as a Put. - this.log.append(regionInfo, this.htableDescriptor.getName(), - walEdit, HConstants.DEFAULT_CLUSTER_ID, now, - this.htableDescriptor); + this.log.append(regionInfo, regionInfo.getTableDesc().getName(), + walEdit, HConstants.DEFAULT_CLUSTER_ID, now); } // Now request the ICV to the store, this will set the timestamp @@ -3705,19 +3620,19 @@ public class HRegion implements HeapSize { // , Writable{ private void checkFamily(final byte [] family) throws NoSuchColumnFamilyException { - if (!this.htableDescriptor.hasFamily(family)) { + if(!regionInfo.getTableDesc().hasFamily(family)) { throw new NoSuchColumnFamilyException("Column family " + - Bytes.toString(family) + " does not exist in region " + this - + " in table " + this.htableDescriptor); - } + Bytes.toStringBinary(family) + " does not exist in region " + this + + " in table " + regionInfo.getTableDesc()); + } } public static final long FIXED_OVERHEAD = ClassSize.align( - ClassSize.OBJECT + - ClassSize.ARRAY + - 28 * ClassSize.REFERENCE + Bytes.SIZEOF_INT + - (4 * Bytes.SIZEOF_LONG) + - Bytes.SIZEOF_BOOLEAN); + ClassSize.OBJECT + // this + (4 * Bytes.SIZEOF_LONG) + // memstoreFlushSize, lastFlushTime, blockingMemStoreSize, threadWakeFrequency + Bytes.SIZEOF_BOOLEAN + // splitRequest + ClassSize.ARRAY + // splitPoint + (26 * ClassSize.REFERENCE)); public static final long DEEP_OVERHEAD = FIXED_OVERHEAD + ClassSize.OBJECT + // closeLock @@ -3869,11 +3784,10 @@ public class HRegion implements HeapSize { // , Writable{ String metaStr = Bytes.toString(HConstants.META_TABLE_NAME); // Currently expects tables have one region only. if (p.getName().startsWith(rootStr)) { - region = HRegion.newHRegion(p, log, fs, c, HRegionInfo.ROOT_REGIONINFO, - HTableDescriptor.ROOT_TABLEDESC, null); + region = HRegion.newHRegion(p, log, fs, c, HRegionInfo.ROOT_REGIONINFO, null); } else if (p.getName().startsWith(metaStr)) { - region = HRegion.newHRegion(p, log, fs, c, - HRegionInfo.FIRST_META_REGIONINFO, HTableDescriptor.META_TABLEDESC, null); + region = HRegion.newHRegion(p, log, fs, c, HRegionInfo.FIRST_META_REGIONINFO, + null); } else { throw new IOException("Not a known catalog table: " + p.toString()); } diff --git src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java index 30bc71f..41f8639 100644 --- src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java +++ src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java @@ -63,14 +63,12 @@ import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.HServerAddress; import org.apache.hadoop.hbase.HServerInfo; import org.apache.hadoop.hbase.HServerLoad; -import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.MasterAddressTracker; import org.apache.hadoop.hbase.NotServingRegionException; import org.apache.hadoop.hbase.RemoteExceptionHandler; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.Stoppable; -import org.apache.hadoop.hbase.TableDescriptors; import org.apache.hadoop.hbase.UnknownRowLockException; import org.apache.hadoop.hbase.UnknownScannerException; import org.apache.hadoop.hbase.YouAreDeadException; @@ -81,7 +79,6 @@ import org.apache.hadoop.hbase.catalog.RootLocationEditor; import org.apache.hadoop.hbase.client.Action; import org.apache.hadoop.hbase.client.Delete; import org.apache.hadoop.hbase.client.Get; -import org.apache.hadoop.hbase.client.HConnectionManager; import org.apache.hadoop.hbase.client.Increment; import org.apache.hadoop.hbase.client.MultiAction; import org.apache.hadoop.hbase.client.MultiPut; @@ -129,7 +126,6 @@ import org.apache.hadoop.hbase.security.User; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.CompressionTest; import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; -import org.apache.hadoop.hbase.util.FSTableDescriptors; import org.apache.hadoop.hbase.util.FSUtils; import org.apache.hadoop.hbase.util.InfoServer; import org.apache.hadoop.hbase.util.Pair; @@ -310,11 +306,6 @@ public class HRegionServer implements HRegionInterface, HBaseRPCErrorHandler, */ private final long startcode; - /** - * Go here to get table descriptors. - */ - private TableDescriptors tableDescriptors; - /* * Strings to be used in forming the exception message for * RegionsAlreadyInTransitionException. @@ -333,8 +324,6 @@ public class HRegionServer implements HRegionInterface, HBaseRPCErrorHandler, throws IOException, InterruptedException { this.fsOk = true; this.conf = conf; - // Set how many times to retry talking to another server over HConnection. - HConnectionManager.setServerSideHConnectionRetries(this.conf, LOG); this.isOnline = false; checkCodecs(this.conf); @@ -908,7 +897,6 @@ public class HRegionServer implements HRegionInterface, HBaseRPCErrorHandler, // Get fs instance used by this RS this.fs = FileSystem.get(this.conf); this.rootDir = new Path(this.conf.get(HConstants.HBASE_DIR)); - this.tableDescriptors = new FSTableDescriptors(this.fs, this.rootDir, true); this.hlog = setupWALAndReplication(); // Init in here rather than in constructor after thread name has been set this.metrics = new RegionServerMetrics(); @@ -1466,7 +1454,7 @@ public class HRegionServer implements HRegionInterface, HBaseRPCErrorHandler, // Add to online regions if all above was successful. addToOnlineRegions(r); - LOG.info("addToOnlineRegions is done" + r.getRegionInfo()); + // Update ZK, ROOT or META if (r.getRegionInfo().isRootRegion()) { RootLocationEditor.setRootLocation(getZooKeeper(), @@ -2394,16 +2382,15 @@ public class HRegionServer implements HRegionInterface, HBaseRPCErrorHandler, region.getRegionNameAsString()); this.regionsInTransitionInRS.putIfAbsent(region.getEncodedNameAsBytes(), true); - HTableDescriptor htd = this.tableDescriptors.get(region.getTableName()); // Need to pass the expected version in the constructor. if (region.isRootRegion()) { - this.service.submit(new OpenRootHandler(this, this, region, htd, + this.service.submit(new OpenRootHandler(this, this, region, versionOfOfflineNode)); } else if (region.isMetaRegion()) { - this.service.submit(new OpenMetaHandler(this, this, region, htd, + this.service.submit(new OpenMetaHandler(this, this, region, versionOfOfflineNode)); } else { - this.service.submit(new OpenRegionHandler(this, this, region, htd, + this.service.submit(new OpenRegionHandler(this, this, region, versionOfOfflineNode)); } return RegionOpeningState.OPENED; @@ -2759,8 +2746,8 @@ public class HRegionServer implements HRegionInterface, HBaseRPCErrorHandler, */ protected void checkOpen() throws IOException { if (this.stopped || this.abortRequested) { - throw new RegionServerStoppedException("Server " + getServerName() + - " not running" + (this.abortRequested ? ", aborting" : "")); + throw new RegionServerStoppedException("Server not running" + + (this.abortRequested ? ", aborting" : "")); } if (!fsOk) { throw new RegionServerStoppedException("File system not available"); diff --git src/main/java/org/apache/hadoop/hbase/regionserver/LogRoller.java src/main/java/org/apache/hadoop/hbase/regionserver/LogRoller.java index 65a5c36..4ff8e2d 100644 --- src/main/java/org/apache/hadoop/hbase/regionserver/LogRoller.java +++ src/main/java/org/apache/hadoop/hbase/regionserver/LogRoller.java @@ -22,7 +22,10 @@ package org.apache.hadoop.hbase.regionserver; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.fs.Path; -import org.apache.hadoop.hbase.*; +import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.HRegionInfo; +import org.apache.hadoop.hbase.RemoteExceptionHandler; +import org.apache.hadoop.hbase.Server; import org.apache.hadoop.hbase.regionserver.wal.FailedLogCloseException; import org.apache.hadoop.hbase.regionserver.wal.HLogKey; import org.apache.hadoop.hbase.regionserver.wal.WALEdit; @@ -165,12 +168,6 @@ class LogRoller extends Thread implements WALActionsListener { } @Override - public void visitLogEntryBeforeWrite(HTableDescriptor htd, HLogKey logKey, - WALEdit logEdit) { - //Not interested - } - - @Override public void logCloseRequested() { // not interested } diff --git src/main/java/org/apache/hadoop/hbase/regionserver/RegionServerServices.java src/main/java/org/apache/hadoop/hbase/regionserver/RegionServerServices.java index 7c04a72..74748ec 100644 --- src/main/java/org/apache/hadoop/hbase/regionserver/RegionServerServices.java +++ src/main/java/org/apache/hadoop/hbase/regionserver/RegionServerServices.java @@ -78,4 +78,4 @@ public interface RegionServerServices extends OnlineRegions { */ public Map getRegionsInTransitionInRS(); -} +} diff --git src/main/java/org/apache/hadoop/hbase/regionserver/SplitTransaction.java src/main/java/org/apache/hadoop/hbase/regionserver/SplitTransaction.java index 5a38758..470e0c8 100644 --- src/main/java/org/apache/hadoop/hbase/regionserver/SplitTransaction.java +++ src/main/java/org/apache/hadoop/hbase/regionserver/SplitTransaction.java @@ -178,9 +178,9 @@ public class SplitTransaction { return false; } long rid = getDaughterRegionIdTimestamp(hri); - this.hri_a = new HRegionInfo(hri.getTableName(), startKey, this.splitrow, + this.hri_a = new HRegionInfo(hri.getTableDesc(), startKey, this.splitrow, false, rid); - this.hri_b = new HRegionInfo(hri.getTableName(), this.splitrow, endKey, + this.hri_b = new HRegionInfo(hri.getTableDesc(), this.splitrow, endKey, false, rid); return true; } @@ -664,7 +664,7 @@ public class SplitTransaction { this.splitdir, hri); HRegion r = HRegion.newHRegion(this.parent.getTableDir(), this.parent.getLog(), fs, this.parent.getConf(), - hri, this.parent.getTableDesc(), rsServices); + hri, rsServices); r.readRequestsCount.set(this.parent.getReadRequestsCount() / 2); r.writeRequestsCount.set(this.parent.getWriteRequestsCount() / 2); HRegion.moveInitialFilesIntoPlace(fs, regionDir, r.getRegionDir()); diff --git src/main/java/org/apache/hadoop/hbase/regionserver/Store.java src/main/java/org/apache/hadoop/hbase/regionserver/Store.java index 7761c42..8d329b7 100644 --- src/main/java/org/apache/hadoop/hbase/regionserver/Store.java +++ src/main/java/org/apache/hadoop/hbase/regionserver/Store.java @@ -139,7 +139,7 @@ public class Store implements HeapSize { // Comparing KeyValues final KeyValue.KVComparator comparator; - + /** * Constructor * @param basedir qualified path under which the region directory lives; @@ -195,6 +195,7 @@ public class Store implements HeapSize { // Setting up cache configuration for this family this.cacheConf = new CacheConfig(conf, family); + this.blockingStoreFileCount = conf.getInt("hbase.hstore.blockingStoreFiles", 7); diff --git src/main/java/org/apache/hadoop/hbase/regionserver/handler/OpenMetaHandler.java src/main/java/org/apache/hadoop/hbase/regionserver/handler/OpenMetaHandler.java index 66e5706..afb63aa 100644 --- src/main/java/org/apache/hadoop/hbase/regionserver/handler/OpenMetaHandler.java +++ src/main/java/org/apache/hadoop/hbase/regionserver/handler/OpenMetaHandler.java @@ -20,7 +20,6 @@ package org.apache.hadoop.hbase.regionserver.handler; import org.apache.hadoop.hbase.HRegionInfo; -import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.Server; import org.apache.hadoop.hbase.regionserver.RegionServerServices; @@ -31,14 +30,13 @@ import org.apache.hadoop.hbase.regionserver.RegionServerServices; */ public class OpenMetaHandler extends OpenRegionHandler { public OpenMetaHandler(final Server server, - final RegionServerServices rsServices, HRegionInfo regionInfo, - final HTableDescriptor htd) { - this(server, rsServices, regionInfo, htd, -1); + final RegionServerServices rsServices, HRegionInfo regionInfo) { + this(server, rsServices, regionInfo, -1); } public OpenMetaHandler(final Server server, final RegionServerServices rsServices, HRegionInfo regionInfo, - final HTableDescriptor htd, int versionOfOfflineNode) { - super(server, rsServices, regionInfo, htd, EventType.M_RS_OPEN_META, + int versionOfOfflineNode) { + super(server, rsServices, regionInfo, EventType.M_RS_OPEN_META, versionOfOfflineNode); } -} \ No newline at end of file +} diff --git src/main/java/org/apache/hadoop/hbase/regionserver/handler/OpenRegionHandler.java src/main/java/org/apache/hadoop/hbase/regionserver/handler/OpenRegionHandler.java index 3ef364e..bb49bac 100644 --- src/main/java/org/apache/hadoop/hbase/regionserver/handler/OpenRegionHandler.java +++ src/main/java/org/apache/hadoop/hbase/regionserver/handler/OpenRegionHandler.java @@ -24,9 +24,7 @@ import java.util.concurrent.atomic.AtomicBoolean; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.HRegionInfo; -import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.Server; import org.apache.hadoop.hbase.executor.EventHandler; import org.apache.hadoop.hbase.regionserver.HRegion; @@ -46,7 +44,6 @@ public class OpenRegionHandler extends EventHandler { private final RegionServerServices rsServices; private final HRegionInfo regionInfo; - private final HTableDescriptor htd; // We get version of our znode at start of open process and monitor it across // the total open. We'll fail the open if someone hijacks our znode; we can @@ -56,25 +53,23 @@ public class OpenRegionHandler extends EventHandler { private volatile int versionOfOfflineNode = -1; public OpenRegionHandler(final Server server, - final RegionServerServices rsServices, HRegionInfo regionInfo, - HTableDescriptor htd) { - this(server, rsServices, regionInfo, htd, EventType.M_RS_OPEN_REGION, -1); + final RegionServerServices rsServices, HRegionInfo regionInfo) { + this(server, rsServices, regionInfo, EventType.M_RS_OPEN_REGION, -1); } public OpenRegionHandler(final Server server, final RegionServerServices rsServices, HRegionInfo regionInfo, - HTableDescriptor htd, int versionOfOfflineNode) { - this(server, rsServices, regionInfo, htd, EventType.M_RS_OPEN_REGION, + int versionOfOfflineNode) { + this(server, rsServices, regionInfo, EventType.M_RS_OPEN_REGION, versionOfOfflineNode); } protected OpenRegionHandler(final Server server, final RegionServerServices rsServices, final HRegionInfo regionInfo, - final HTableDescriptor htd, EventType eventType, + EventType eventType, final int versionOfOfflineNode) { super(server, eventType); this.rsServices = rsServices; this.regionInfo = regionInfo; - this.htd = htd; this.versionOfOfflineNode = versionOfOfflineNode; } @@ -86,7 +81,9 @@ public class OpenRegionHandler extends EventHandler { public void process() throws IOException { try { final String name = regionInfo.getRegionNameAsString(); + LOG.debug("Processing open of " + name); if (this.server.isStopped() || this.rsServices.isStopping()) { + LOG.info("Server stopping or stopped, skipping open of " + name); return; } final String encodedName = regionInfo.getEncodedName(); @@ -204,10 +201,10 @@ public class OpenRegionHandler extends EventHandler { Thread.currentThread().interrupt(); } } - // Was there an exception opening the region? This should trigger on // InterruptedException too. If so, we failed. Even if tickle opening fails // then it is a failure. + // TODO:is the following line correct? got modified by HBASE-451 but not following why... return ((!Thread.interrupted() && t.getException() == null) && tickleOpening); } @@ -325,9 +322,8 @@ public class OpenRegionHandler extends EventHandler { try { // Instantiate the region. This also periodically tickles our zk OPENING // state so master doesn't timeout this region in transition. - region = HRegion.openHRegion(this.regionInfo, this.htd, - this.rsServices.getWAL(), this.server.getConfiguration(), - this.rsServices, + region = HRegion.openHRegion(this.regionInfo, this.rsServices.getWAL(), + this.server.getConfiguration(), this.rsServices, new CancelableProgressable() { public boolean progress() { // We may lose the znode ownership during the open. Currently its diff --git src/main/java/org/apache/hadoop/hbase/regionserver/handler/OpenRootHandler.java src/main/java/org/apache/hadoop/hbase/regionserver/handler/OpenRootHandler.java index 9a4f01a..fb65158 100644 --- src/main/java/org/apache/hadoop/hbase/regionserver/handler/OpenRootHandler.java +++ src/main/java/org/apache/hadoop/hbase/regionserver/handler/OpenRootHandler.java @@ -20,7 +20,6 @@ package org.apache.hadoop.hbase.regionserver.handler; import org.apache.hadoop.hbase.HRegionInfo; -import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.Server; import org.apache.hadoop.hbase.regionserver.RegionServerServices; @@ -31,14 +30,13 @@ import org.apache.hadoop.hbase.regionserver.RegionServerServices; */ public class OpenRootHandler extends OpenRegionHandler { public OpenRootHandler(final Server server, - final RegionServerServices rsServices, HRegionInfo regionInfo, - final HTableDescriptor htd) { - super(server, rsServices, regionInfo, htd, EventType.M_RS_OPEN_ROOT, -1); + final RegionServerServices rsServices, HRegionInfo regionInfo) { + super(server, rsServices, regionInfo, EventType.M_RS_OPEN_ROOT, -1); } public OpenRootHandler(final Server server, final RegionServerServices rsServices, HRegionInfo regionInfo, - final HTableDescriptor htd, int versionOfOfflineNode) { - super(server, rsServices, regionInfo, htd, EventType.M_RS_OPEN_ROOT, + int versionOfOfflineNode) { + super(server, rsServices, regionInfo, EventType.M_RS_OPEN_ROOT, versionOfOfflineNode); } } diff --git src/main/java/org/apache/hadoop/hbase/regionserver/wal/HLog.java src/main/java/org/apache/hadoop/hbase/regionserver/wal/HLog.java index 74bd7d3..f9bd5c0 100644 --- src/main/java/org/apache/hadoop/hbase/regionserver/wal/HLog.java +++ src/main/java/org/apache/hadoop/hbase/regionserver/wal/HLog.java @@ -916,14 +916,13 @@ public class HLog implements Syncable { /** Append an entry to the log. - * + *TODO: why do we have two of these??? * @param regionInfo * @param logEdit * @param logKey * @throws IOException */ - public void append(HRegionInfo regionInfo, HLogKey logKey, WALEdit logEdit, - HTableDescriptor htd) + public void append(HRegionInfo regionInfo, HLogKey logKey, WALEdit logEdit) throws IOException { if (this.closed) { throw new IOException("Cannot append; log is closed"); @@ -938,9 +937,9 @@ public class HLog implements Syncable { // is greater than or equal to the value in lastSeqWritten. this.lastSeqWritten.putIfAbsent(regionInfo.getEncodedNameAsBytes(), Long.valueOf(seqNum)); - doWrite(regionInfo, logKey, logEdit, htd); + doWrite(regionInfo, logKey, logEdit); this.numEntries.incrementAndGet(); - if (htd.isDeferredLogFlush()) { + if (regionInfo.getTableDesc().isDeferredLogFlush()) { lastDeferredSeq = seqNum; } } @@ -948,7 +947,7 @@ public class HLog implements Syncable { // Sync if catalog region, and if not then check if that table supports // deferred log flushing if (regionInfo.isMetaRegion() || - !htd.isDeferredLogFlush()) { + !regionInfo.getTableDesc().isDeferredLogFlush()) { // sync txn to file system this.sync(); } @@ -965,9 +964,9 @@ public class HLog implements Syncable { * @throws IOException */ public void append(HRegionInfo info, byte [] tableName, WALEdit edits, - final long now, HTableDescriptor htd) + final long now) throws IOException { - append(info, tableName, edits, HConstants.DEFAULT_CLUSTER_ID, now, htd); + append(info, tableName, edits, HConstants.DEFAULT_CLUSTER_ID, now); } /** @@ -995,8 +994,9 @@ public class HLog implements Syncable { * @throws IOException */ public void append(HRegionInfo info, byte [] tableName, WALEdit edits, UUID clusterId, - final long now, HTableDescriptor htd) + final long now) throws IOException { + HTableDescriptor htd = info.getTableDesc(); if (edits.isEmpty()) return; if (this.closed) { throw new IOException("Cannot append; log is closed"); @@ -1013,7 +1013,7 @@ public class HLog implements Syncable { byte [] hriKey = info.getEncodedNameAsBytes(); this.lastSeqWritten.putIfAbsent(hriKey, seqNum); HLogKey logKey = makeKey(hriKey, tableName, seqNum, now, clusterId); - doWrite(info, logKey, edits, htd); + doWrite(info, logKey, edits); this.numEntries.incrementAndGet(); if (htd.isDeferredLogFlush()) { lastDeferredSeq = seqNum; @@ -1198,15 +1198,14 @@ public class HLog implements Syncable { } } - protected void doWrite(HRegionInfo info, HLogKey logKey, WALEdit logEdit, - HTableDescriptor htd) + protected void doWrite(HRegionInfo info, HLogKey logKey, WALEdit logEdit) throws IOException { if (!this.enabled) { return; } if (!this.listeners.isEmpty()) { for (WALActionsListener i: this.listeners) { - i.visitLogEntryBeforeWrite(htd, logKey, logEdit); + i.visitLogEntryBeforeWrite(info, logKey, logEdit); } } try { @@ -1222,12 +1221,12 @@ public class HLog implements Syncable { writeOps++; if (took > 1000) { long len = 0; - for(KeyValue kv : logEdit.getKeyValues()) { - len += kv.getLength(); + for(KeyValue kv : logEdit.getKeyValues()) { + len += kv.getLength(); } LOG.warn(String.format( "%s took %d ms appending an edit to hlog; editcount=%d, len~=%s", - Thread.currentThread().getName(), took, this.numEntries.get(), + Thread.currentThread().getName(), took, this.numEntries.get(), StringUtils.humanReadableInt(len))); } } catch (IOException e) { @@ -1237,7 +1236,6 @@ public class HLog implements Syncable { } } - /** @return How many items have been added to the log */ int getNumEntries() { return numEntries.get(); diff --git src/main/java/org/apache/hadoop/hbase/regionserver/wal/SequenceFileLogWriter.java src/main/java/org/apache/hadoop/hbase/regionserver/wal/SequenceFileLogWriter.java index 833e840..63139fb 100644 --- src/main/java/org/apache/hadoop/hbase/regionserver/wal/SequenceFileLogWriter.java +++ src/main/java/org/apache/hadoop/hbase/regionserver/wal/SequenceFileLogWriter.java @@ -173,10 +173,7 @@ public class SequenceFileLogWriter implements HLog.Writer { @Override public void close() throws IOException { - if (this.writer != null) { - this.writer.close(); - this.writer = null; - } + this.writer.close(); } @Override @@ -208,4 +205,4 @@ public class SequenceFileLogWriter implements HLog.Writer { public FSDataOutputStream getWriterFSDataOutputStream() { return this.writer_out; } -} +} \ No newline at end of file diff --git src/main/java/org/apache/hadoop/hbase/regionserver/wal/WALActionsListener.java src/main/java/org/apache/hadoop/hbase/regionserver/wal/WALActionsListener.java index 151c90b..c20de42 100644 --- src/main/java/org/apache/hadoop/hbase/regionserver/wal/WALActionsListener.java +++ src/main/java/org/apache/hadoop/hbase/regionserver/wal/WALActionsListener.java @@ -21,7 +21,6 @@ package org.apache.hadoop.hbase.regionserver.wal; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.HRegionInfo; -import org.apache.hadoop.hbase.HTableDescriptor; /** * Get notification of {@link HLog}/WAL log events. The invocations are inline @@ -52,14 +51,4 @@ public interface WALActionsListener { */ public void visitLogEntryBeforeWrite(HRegionInfo info, HLogKey logKey, WALEdit logEdit); - - /** - * - * @param htd - * @param logKey - * @param logEdit - */ - public void visitLogEntryBeforeWrite(HTableDescriptor htd, HLogKey logKey, - WALEdit logEdit); - } diff --git src/main/java/org/apache/hadoop/hbase/replication/regionserver/Replication.java src/main/java/org/apache/hadoop/hbase/replication/regionserver/Replication.java index 8e1f6e0..805a635 100644 --- src/main/java/org/apache/hadoop/hbase/replication/regionserver/Replication.java +++ src/main/java/org/apache/hadoop/hbase/replication/regionserver/Replication.java @@ -28,7 +28,6 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.HRegionInfo; -import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.Server; import org.apache.hadoop.hbase.regionserver.wal.HLog; @@ -133,18 +132,12 @@ public class Replication implements WALActionsListener { @Override public void visitLogEntryBeforeWrite(HRegionInfo info, HLogKey logKey, WALEdit logEdit) { - // Not interested - } - - @Override - public void visitLogEntryBeforeWrite(HTableDescriptor htd, HLogKey logKey, - WALEdit logEdit) { NavigableMap scopes = new TreeMap(Bytes.BYTES_COMPARATOR); byte[] family; for (KeyValue kv : logEdit.getKeyValues()) { family = kv.getFamily(); - int scope = htd.getFamily(family).getScope(); + int scope = info.getTableDesc().getFamily(family).getScope(); if (scope != REPLICATION_SCOPE_LOCAL && !scopes.containsKey(family)) { scopes.put(family, scope); diff --git src/main/java/org/apache/hadoop/hbase/thrift/ThriftServer.java src/main/java/org/apache/hadoop/hbase/thrift/ThriftServer.java index ef1279b..e72cfa2 100644 --- src/main/java/org/apache/hadoop/hbase/thrift/ThriftServer.java +++ src/main/java/org/apache/hadoop/hbase/thrift/ThriftServer.java @@ -272,19 +272,17 @@ public class ThriftServer { public List getTableRegions(ByteBuffer tableName) throws IOError { try{ - List hris = this.admin.getTableRegions(tableName.array()); + List HRegions = this.admin.getTableRegions(tableName.array()); List regions = new ArrayList(); - if (hris != null) { - for (HRegionInfo regionInfo : hris){ - TRegionInfo region = new TRegionInfo(); - region.startKey = ByteBuffer.wrap(regionInfo.getStartKey()); - region.endKey = ByteBuffer.wrap(regionInfo.getEndKey()); - region.id = regionInfo.getRegionId(); - region.name = ByteBuffer.wrap(regionInfo.getRegionName()); - region.version = regionInfo.getVersion(); - regions.add(region); - } + for (HRegionInfo regionInfo : HRegions){ + TRegionInfo region = new TRegionInfo(); + region.startKey = ByteBuffer.wrap(regionInfo.getStartKey()); + region.endKey = ByteBuffer.wrap(regionInfo.getEndKey()); + region.id = regionInfo.getRegionId(); + region.name = ByteBuffer.wrap(regionInfo.getRegionName()); + region.version = regionInfo.getVersion(); + regions.add(region); } return regions; } catch (IOException e){ diff --git src/main/java/org/apache/hadoop/hbase/util/FSTableDescriptors.java src/main/java/org/apache/hadoop/hbase/util/FSTableDescriptors.java deleted file mode 100644 index 24570c9..0000000 --- src/main/java/org/apache/hadoop/hbase/util/FSTableDescriptors.java +++ /dev/null @@ -1,204 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hbase.util; - -import java.io.FileNotFoundException; -import java.io.IOException; -import java.util.List; -import java.util.Map; -import java.util.TreeMap; -import java.util.concurrent.ConcurrentHashMap; - -import org.apache.commons.lang.NotImplementedException; -import org.apache.hadoop.fs.FileSystem; -import org.apache.hadoop.fs.Path; -import org.apache.hadoop.hbase.HConstants; -import org.apache.hadoop.hbase.HTableDescriptor; -import org.apache.hadoop.hbase.TableDescriptors; -import org.apache.hadoop.hbase.TableExistsException; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; - - -/** - * Implementation of {@link TableDescriptors} that reads descriptors from the - * passed filesystem. It expects descriptors to be in a file under the - * table's directory in FS. Can be read-only -- i.e. does not modify - * the filesystem or can be read and write. - */ -public class FSTableDescriptors implements TableDescriptors { - - private static final Log LOG = LogFactory.getLog(FSTableDescriptors.class); - private final FileSystem fs; - private final Path rootdir; - private final boolean fsreadonly; - long cachehits = 0; - long invocations = 0; - - // This cache does not age out the old stuff. Thinking is that the amount - // of data we keep up in here is so small, no need to do occasional purge. - // TODO. - private final Map cache = - new ConcurrentHashMap(); - - /** - * Data structure to hold modification time and table descriptor. - */ - static class TableDescriptorModtime { - private final HTableDescriptor descriptor; - private final long modtime; - - TableDescriptorModtime(final long modtime, final HTableDescriptor htd) { - this.descriptor = htd; - this.modtime = modtime; - } - - long getModtime() { - return this.modtime; - } - - HTableDescriptor getTableDescriptor() { - return this.descriptor; - } - } - - public FSTableDescriptors(final FileSystem fs, final Path rootdir) { - this(fs, rootdir, false); - } - - /** - * @param fs - * @param rootdir - * @param fsreadOnly True if we are read-only when it comes to filesystem - * operations; i.e. on remove, we do not do delete in fs. - */ - public FSTableDescriptors(final FileSystem fs, final Path rootdir, - final boolean fsreadOnly) { - super(); - this.fs = fs; - this.rootdir = rootdir; - this.fsreadonly = fsreadOnly; - } - - /* (non-Javadoc) - * @see org.apache.hadoop.hbase.TableDescriptors#getHTableDescriptor(java.lang.String) - */ - @Override - public HTableDescriptor get(final byte [] tablename) - throws TableExistsException, FileNotFoundException, IOException { - return get(Bytes.toString(tablename)); - } - - /* (non-Javadoc) - * @see org.apache.hadoop.hbase.TableDescriptors#getTableDescriptor(byte[]) - */ - @Override - public HTableDescriptor get(final String tablename) - throws TableExistsException, FileNotFoundException, IOException { - invocations++; - if (HTableDescriptor.ROOT_TABLEDESC.getNameAsString().equals(tablename)) { - cachehits++; - return HTableDescriptor.ROOT_TABLEDESC; - } - if (HTableDescriptor.META_TABLEDESC.getNameAsString().equals(tablename)) { - cachehits++; - return HTableDescriptor.META_TABLEDESC; - } - // .META. and -ROOT- is already handled. If some one tries to get the descriptor for - // .logs, .oldlogs or .corrupt throw an exception. - if (HConstants.HBASE_NON_USER_TABLE_DIRS.contains(tablename)) { - throw new IOException("No descriptor found for table = " + tablename); - } - - // Look in cache of descriptors. - TableDescriptorModtime tdm = this.cache.get(tablename); - - // Check mod time has not changed (this is trip to NN). - long modtime = - FSUtils.getTableInfoModtime(this.fs, this.rootdir, tablename); - if (tdm != null) { - if (modtime <= tdm.getModtime()) { - cachehits++; - return tdm.getTableDescriptor(); - } - } - HTableDescriptor htd = - FSUtils.getTableDescriptor(this.fs, this.rootdir, tablename); - if (htd == null) { - // More likely is above will throw a FileNotFoundException - throw new TableExistsException("No descriptor for " + tablename); - } - this.cache.put(tablename, new TableDescriptorModtime(modtime, htd)); - return htd; - } - - /* (non-Javadoc) - * @see org.apache.hadoop.hbase.TableDescriptors#getTableDescriptors(org.apache.hadoop.fs.FileSystem, org.apache.hadoop.fs.Path) - */ - @Override - public Map getAll() - throws IOException { - Map htds = new TreeMap(); - List tableDirs = FSUtils.getTableDirs(fs, rootdir); - for (Path d: tableDirs) { - HTableDescriptor htd = null; - try { - - htd = get(d.getName()); - } catch (FileNotFoundException fnfe) { - // inability of retrieving one HTD shouldn't stop getting the remaining - LOG.warn("Trouble retrieving htd", fnfe); - } - if (htd == null) continue; - htds.put(d.getName(), htd); - } - return htds; - } - - @Override - public void add(HTableDescriptor htd) throws IOException { - if (Bytes.equals(HConstants.ROOT_TABLE_NAME, htd.getName())) { - throw new NotImplementedException(); - } - if (Bytes.equals(HConstants.META_TABLE_NAME, htd.getName())) { - throw new NotImplementedException(); - } - if (HConstants.HBASE_NON_USER_TABLE_DIRS.contains(htd.getNameAsString())) { - throw new NotImplementedException(); - } - if (!this.fsreadonly) FSUtils.updateHTableDescriptor(this.fs, this.rootdir, htd); - long modtime = - FSUtils.getTableInfoModtime(this.fs, this.rootdir, htd.getNameAsString()); - this.cache.put(htd.getNameAsString(), new TableDescriptorModtime(modtime, htd)); - } - - @Override - public HTableDescriptor remove(final String tablename) - throws IOException { - if (!this.fsreadonly) { - Path tabledir = FSUtils.getTablePath(this.rootdir, tablename); - if (this.fs.exists(tabledir)) { - if (!this.fs.delete(tabledir, true)) { - throw new IOException("Failed delete of " + tabledir.toString()); - } - } - } - TableDescriptorModtime tdm = this.cache.remove(tablename); - return tdm == null? null: tdm.getTableDescriptor(); - } -} diff --git src/main/java/org/apache/hadoop/hbase/util/FSUtils.java src/main/java/org/apache/hadoop/hbase/util/FSUtils.java index e1b521a..164486e 100644 --- src/main/java/org/apache/hadoop/hbase/util/FSUtils.java +++ src/main/java/org/apache/hadoop/hbase/util/FSUtils.java @@ -33,7 +33,6 @@ import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HDFSBlocksDistribution; import org.apache.hadoop.hbase.HRegionInfo; -import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.RemoteExceptionHandler; import org.apache.hadoop.hbase.master.HMaster; import org.apache.hadoop.hbase.regionserver.HRegion; @@ -50,9 +49,7 @@ import java.io.FileNotFoundException; import java.io.IOException; import java.net.URI; import java.net.URISyntaxException; -import java.util.ArrayList; import java.util.HashMap; -import java.util.List; import java.util.Map; /** @@ -505,20 +502,6 @@ public abstract class FSUtils { return fs.exists(rootRegionDir); } - /** - * Checks if .tableinfo exists for given table - * - * @param fs file system - * @param rootdir root directory of HBase installation - * @param tableName name of table - * @return true if exists - * @throws IOException - */ - public static boolean tableInfoExists(FileSystem fs, Path rootdir, - String tableName) throws IOException { - Path tablePath = getTableInfoPath(rootdir, tableName); - return fs.exists(tablePath); - } /** * Compute HDFS blocks distribution of a given file, or a portion of the file @@ -774,17 +757,14 @@ public abstract class FSUtils { } public boolean accept(Path p) { - boolean isValid = false; + boolean isdir = false; try { - if (HConstants.HBASE_NON_USER_TABLE_DIRS.contains(p)) { - isValid = false; - } else { - isValid = this.fs.getFileStatus(p).isDir(); - } + // TODO: reapply HBASE-4061?? + isdir = this.fs.getFileStatus(p).isDir(); } catch (IOException e) { e.printStackTrace(); } - return isValid; + return isdir; } } @@ -842,297 +822,8 @@ public abstract class FSUtils { */ public abstract void recoverFileLease(final FileSystem fs, final Path p, Configuration conf) throws IOException; - - /** - * @param fs - * @param rootdir - * @return All the table directories under rootdir. Ignore non table hbase folders such as - * .logs, .oldlogs, .corrupt, .META., and -ROOT- folders. - * @throws IOException - */ - public static List getTableDirs(final FileSystem fs, final Path rootdir) - throws IOException { - // presumes any directory under hbase.rootdir is a table - FileStatus [] dirs = fs.listStatus(rootdir, new DirFilter(fs)); - List tabledirs = new ArrayList(dirs.length); - for (FileStatus dir: dirs) { - Path p = dir.getPath(); - String tableName = p.getName(); - if (!HConstants.HBASE_NON_USER_TABLE_DIRS.contains(tableName)) { - tabledirs.add(p); - } - } - return tabledirs; - } - - /** - * Get table info path for a table. - * @param rootdir - * @param tableName - * @return Table info path - */ - private static Path getTableInfoPath(Path rootdir, String tablename) { - Path tablePath = getTablePath(rootdir, tablename); - return new Path(tablePath, HConstants.TABLEINFO_NAME); - } /** - * @param fs - * @param rootdir - * @param tablename - * @return Modification time for the table {@link HConstants#TABLEINFO_NAME} file. - * @throws IOException - */ - public static long getTableInfoModtime(final FileSystem fs, final Path rootdir, - final String tablename) - throws IOException { - Path p = getTableInfoPath(rootdir, tablename); - FileStatus [] status = fs.listStatus(p); - if (status == null || status.length < 1) { - throw new FileNotFoundException("No status for " + p.toString()); - } - return status[0].getModificationTime(); - } - - public static Path getTablePath(Path rootdir, byte [] tableName) { - return getTablePath(rootdir, Bytes.toString(tableName)); - } - - public static Path getTablePath(Path rootdir, final String tableName) { - return new Path(rootdir, tableName); - } - - private static FileSystem getCurrentFileSystem(Configuration conf) - throws IOException { - return getRootDir(conf).getFileSystem(conf); - } - - /** - * Get HTableDescriptor - * @param config - * @param tableName - * @return HTableDescriptor for table - * @throws IOException - */ - public static HTableDescriptor getHTableDescriptor(Configuration config, - String tableName) - throws IOException { - Path path = getRootDir(config); - FileSystem fs = path.getFileSystem(config); - return getTableDescriptor(fs, path, tableName); - } - - /** - * Get HTD from HDFS. - * @param fs - * @param hbaseRootDir - * @param tableName - * @return Descriptor or null if none found. - * @throws IOException - */ - public static HTableDescriptor getTableDescriptor(FileSystem fs, - Path hbaseRootDir, byte[] tableName) - throws IOException { - return getTableDescriptor(fs, hbaseRootDir, Bytes.toString(tableName)); - } - - public static HTableDescriptor getTableDescriptor(FileSystem fs, - Path hbaseRootDir, String tableName) { - HTableDescriptor htd = null; - try { - htd = getTableDescriptor(fs, getTablePath(hbaseRootDir, tableName)); - } catch (NullPointerException e) { - LOG.debug("Exception during readTableDecriptor. Current table name = " + - tableName , e); - } catch (IOException ioe) { - LOG.debug("Exception during readTableDecriptor. Current table name = " + - tableName , ioe); - } - return htd; - } - - public static HTableDescriptor getTableDescriptor(FileSystem fs, Path tableDir) - throws IOException, NullPointerException { - if (tableDir == null) throw new NullPointerException(); - Path tableinfo = new Path(tableDir, HConstants.TABLEINFO_NAME); - FSDataInputStream fsDataInputStream = fs.open(tableinfo); - HTableDescriptor hTableDescriptor = null; - try { - hTableDescriptor = new HTableDescriptor(); - hTableDescriptor.readFields(fsDataInputStream); - } finally { - fsDataInputStream.close(); - } - return hTableDescriptor; - } - - /** - * Create new HTableDescriptor in HDFS. Happens when we are creating table. - * - * @param htableDescriptor - * @param conf - */ - public static boolean createTableDescriptor( - HTableDescriptor htableDescriptor, Configuration conf) throws IOException { - return createTableDescriptor(htableDescriptor, conf, false); - } - - /** - * Create new HTableDescriptor in HDFS. Happens when we are creating table. If - * forceCreation is true then even if previous table descriptor is present it - * will be overwritten - * - * @param htableDescriptor - * @param conf - * @param forceCreation - */ - public static boolean createTableDescriptor( - HTableDescriptor htableDescriptor, Configuration conf, - boolean forceCreation) throws IOException { - FileSystem fs = getCurrentFileSystem(conf); - return createTableDescriptor(fs, getRootDir(conf), htableDescriptor, - forceCreation); - } - - /** - * Create new HTableDescriptor in HDFS. Happens when we are creating table. - * - * @param fs - * @param htableDescriptor - * @param rootdir - */ - public static boolean createTableDescriptor(FileSystem fs, Path rootdir, - HTableDescriptor htableDescriptor) throws IOException { - return createTableDescriptor(fs, rootdir, htableDescriptor, false); - } - - /** - * Create new HTableDescriptor in HDFS. Happens when we are creating table. If - * forceCreation is true then even if previous table descriptor is present it - * will be overwritten - * - * @param fs - * @param htableDescriptor - * @param rootdir - * @param forceCreation - */ - public static boolean createTableDescriptor(FileSystem fs, Path rootdir, - HTableDescriptor htableDescriptor, boolean forceCreation) - throws IOException { - Path tableInfoPath = getTableInfoPath(rootdir, htableDescriptor - .getNameAsString()); - LOG.info("Current tableInfoPath = " + tableInfoPath); - if (!forceCreation) { - if (fs.exists(tableInfoPath) - && fs.getFileStatus(tableInfoPath).getLen() > 0) { - LOG.info("TableInfo already exists.. Skipping creation"); - return false; - } - } - writeTableDescriptor(fs, htableDescriptor, getTablePath(rootdir, - htableDescriptor.getNameAsString()), forceCreation); - - return true; - } - - /** - * Deletes a table's directory from the file system if exists. Used in unit - * tests. - */ - public static void deleteTableDescriptorIfExists(String tableName, - Configuration conf) throws IOException { - FileSystem fs = getCurrentFileSystem(conf); - Path tableInfoPath = getTableInfoPath(getRootDir(conf), tableName); - if (fs.exists(tableInfoPath)) - deleteDirectory(fs, tableInfoPath); - } - - /** - * Called when we are creating a table to write out the tables' descriptor. - * @param fs - * @param hTableDescriptor - * @param tableDir - * @param forceCreation True if we are to force creation - * @throws IOException - */ - private static void writeTableDescriptor(FileSystem fs, - HTableDescriptor hTableDescriptor, Path tableDir, boolean forceCreation) - throws IOException { - // Create in tmpdir and then move into place in case we crash after - // create but before close. If we don't successfully close the file, - // subsequent region reopens will fail the below because create is - // registered in NN. - Path tableInfoPath = new Path(tableDir, HConstants.TABLEINFO_NAME); - Path tmpPath = new Path(new Path(tableDir, ".tmp"), - HConstants.TABLEINFO_NAME + "." + System.currentTimeMillis()); - LOG.info("TableInfoPath = " + tableInfoPath + " tmpPath = " + tmpPath); - try { - writeHTD(fs, tmpPath, hTableDescriptor); - } catch (IOException e) { - LOG.error("Unable to write the tabledescriptor in the path" + tmpPath - + ".", e); - fs.delete(tmpPath, true); - throw e; - } - // TODO: The below is less than ideal and likely error prone. There is a - // better rename in hadoops after 0.20 that takes rename options (this has - // its own issues according to mighty Todd in that old readers may fail - // as we cross the renme transition) but until then, we have this - // forceCreation flag which does a delete and then we rename so there is a - // hole. Need to fix. - try { - if (forceCreation) { - if (fs.exists(tableInfoPath) && !fs.delete(tableInfoPath, false)) { - String errMsg = "Unable to delete " + tableInfoPath - + " while forcefully writing the table descriptor."; - LOG.error(errMsg); - throw new IOException(errMsg); - } - } - if (!fs.rename(tmpPath, tableInfoPath)) { - String errMsg = "Unable to rename " + tmpPath + " to " + tableInfoPath; - LOG.error(errMsg); - throw new IOException(errMsg); - } else { - LOG.info("TableDescriptor stored. TableInfoPath = " + tableInfoPath); - } - } finally { - fs.delete(tmpPath, true); - } - } - - /** - * Update table descriptor - * @param fs - * @param conf - * @param hTableDescriptor - * @throws IOException - */ - public static void updateHTableDescriptor(FileSystem fs, Path rootdir, - HTableDescriptor hTableDescriptor) - throws IOException { - Path tableInfoPath = - getTableInfoPath(rootdir, hTableDescriptor.getNameAsString()); - writeTableDescriptor(fs, hTableDescriptor, tableInfoPath.getParent(), true); - LOG.info("Updated tableinfo=" + tableInfoPath + " to " + - hTableDescriptor.toString()); - } - - private static void writeHTD(final FileSystem fs, final Path p, - final HTableDescriptor htd) - throws IOException { - FSDataOutputStream out = fs.create(p, true); - try { - htd.write(out); - out.write('\n'); - out.write('\n'); - out.write(Bytes.toBytes(htd.toString())); - } finally { - out.close(); - } - } - - /** * Runs through the HBase rootdir and creates a reverse lookup map for * table StoreFile names to the full Path. *
@@ -1185,7 +876,7 @@ public abstract class FSUtils { } } } - return map; + return map; } } diff --git src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java index 9e9e07b..58a3837 100644 --- src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java +++ src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java @@ -174,21 +174,18 @@ public class HBaseFsck { if (!checkMetaOnly) { AtomicInteger numSkipped = new AtomicInteger(0); HTableDescriptor[] allTables = getTables(numSkipped); - errors.print("Number of Tables: " + - (allTables == null ? 0 : allTables.length)); + errors.print("Number of Tables: " + allTables.length); if (details) { if (numSkipped.get() > 0) { errors.detail("Number of Tables in flux: " + numSkipped.get()); } - if (allTables != null && allTables.length > 0) { - for (HTableDescriptor td : allTables) { + for (HTableDescriptor td : allTables) { String tableName = td.getNameAsString(); errors.detail(" Table: " + tableName + "\t" + (td.isReadOnly() ? "ro" : "rw") + "\t" + (td.isRootRegion() ? "ROOT" : (td.isMetaRegion() ? "META" : " ")) + "\t" + " families: " + td.getFamilies().size()); - } } } } @@ -277,7 +274,7 @@ public class HBaseFsck { * @throws KeeperException */ private boolean isTableDisabled(HRegionInfo regionInfo) { - return disabledTables.contains(regionInfo.getTableName()); + return disabledTables.contains(regionInfo.getTableDesc().getName()); } /** @@ -553,7 +550,7 @@ public class HBaseFsck { if (hbi.deployedOn.size() == 0) continue; // We should be safe here - String tableName = hbi.metaEntry.getTableNameAsString(); + String tableName = hbi.metaEntry.getTableDesc().getNameAsString(); TInfo modTInfo = tablesInfo.get(tableName); if (modTInfo == null) { modTInfo = new TInfo(tableName); @@ -780,8 +777,8 @@ public class HBaseFsck { * @return tables that have not been modified recently * @throws IOException if an error is encountered */ - HTableDescriptor[] getTables(AtomicInteger numSkipped) { - List tableNames = new ArrayList(); + HTableDescriptor[] getTables(AtomicInteger numSkipped) { + TreeSet uniqueTables = new TreeSet(); long now = System.currentTimeMillis(); for (HbckInfo hbi : regionInfo.values()) { @@ -791,27 +788,15 @@ public class HBaseFsck { // pick only those tables that were not modified in the last few milliseconds. if (info != null && info.getStartKey().length == 0 && !info.isMetaRegion()) { if (info.modTime + timelag < now) { - tableNames.add(info.getTableNameAsString()); + uniqueTables.add(info.getTableDesc()); } else { numSkipped.incrementAndGet(); // one more in-flux table } } } - return getHTableDescriptors(tableNames); - } - - HTableDescriptor[] getHTableDescriptors(List tableNames) { - HTableDescriptor[] htd = null; - try { - LOG.info("getHTableDescriptors == tableNames => " + tableNames); - htd = new HBaseAdmin(conf).getTableDescriptors(tableNames); - } catch (IOException e) { - LOG.debug("Exception getting table descriptors", e); - } - return htd; + return uniqueTables.toArray(new HTableDescriptor[uniqueTables.size()]); } - /** * Gets the entry in regionInfo corresponding to the the given encoded * region name. If the region has not been seen yet, a new entry is added @@ -902,7 +887,8 @@ public class HBaseFsck { // record the latest modification of this META record long ts = Collections.max(result.list(), comp).getTimestamp(); - Pair pair = MetaReader.parseCatalogResult(result); + Pair pair = + MetaReader.metaRowToRegionPair(result); if (pair == null || pair.getFirst() == null) { emptyRegionInfoQualifiers.add(result); return true; @@ -1004,7 +990,8 @@ public class HBaseFsck { } int tableCompare = RegionSplitCalculator.BYTES_COMPARATOR.compare( - l.metaEntry.getTableName(), r.metaEntry.getTableName()); + l.metaEntry.getTableDesc().getName(), + r.metaEntry.getTableDesc().getName()); if (tableCompare != 0) { return tableCompare; } diff --git src/main/java/org/apache/hadoop/hbase/util/HMerge.java src/main/java/org/apache/hadoop/hbase/util/HMerge.java index a6f6b69..c34f796 100644 --- src/main/java/org/apache/hadoop/hbase/util/HMerge.java +++ src/main/java/org/apache/hadoop/hbase/util/HMerge.java @@ -55,7 +55,6 @@ import org.apache.hadoop.hbase.regionserver.wal.HLog; * a table by merging adjacent regions. */ class HMerge { - // TODO: Where is this class used? How does it relate to Merge in same package? static final Log LOG = LogFactory.getLog(HMerge.class); static final Random rand = new Random(); @@ -136,12 +135,12 @@ class HMerge { protected final Configuration conf; protected final FileSystem fs; protected final Path tabledir; - protected final HTableDescriptor htd; protected final HLog hlog; private final long maxFilesize; - protected Merger(Configuration conf, FileSystem fs, final byte [] tableName) + protected Merger(Configuration conf, FileSystem fs, + final byte [] tableName) throws IOException { this.conf = conf; this.fs = fs; @@ -152,7 +151,6 @@ class HMerge { fs.makeQualified(new Path(conf.get(HConstants.HBASE_DIR))), Bytes.toString(tableName) ); - this.htd = FSUtils.getTableDescriptor(this.fs, this.tabledir); Path logdir = new Path(tabledir, "merge_" + System.currentTimeMillis() + HConstants.HREGION_LOGDIR_NAME); Path oldLogDir = new Path(tabledir, HConstants.HREGION_OLDLOGDIR_NAME); @@ -190,13 +188,13 @@ class HMerge { long nextSize = 0; for (int i = 0; i < info.length - 1; i++) { if (currentRegion == null) { - currentRegion = HRegion.newHRegion(tabledir, hlog, fs, conf, info[i], - this.htd, null); + currentRegion = + HRegion.newHRegion(tabledir, hlog, fs, conf, info[i], null); currentRegion.initialize(); currentSize = currentRegion.getLargestHStoreSize(); } - nextRegion = HRegion.newHRegion(tabledir, hlog, fs, conf, info[i + 1], - this.htd, null); + nextRegion = + HRegion.newHRegion(tabledir, hlog, fs, conf, info[i + 1], null); nextRegion.initialize(); nextSize = nextRegion.getLargestHStoreSize(); @@ -263,7 +261,7 @@ class HMerge { Bytes.toString(HConstants.REGIONINFO_QUALIFIER)); } HRegionInfo region = Writables.getHRegionInfo(regionInfoValue); - if (!Bytes.equals(region.getTableName(), this.tableName)) { + if (!Bytes.equals(region.getTableDesc().getName(), this.tableName)) { return null; } return region; @@ -359,7 +357,7 @@ class HMerge { // Scan root region to find all the meta regions root = HRegion.newHRegion(rootTableDir, hlog, fs, conf, - HRegionInfo.ROOT_REGIONINFO, HTableDescriptor.ROOT_TABLEDESC, null); + HRegionInfo.ROOT_REGIONINFO, null); root.initialize(); Scan scan = new Scan(); @@ -435,4 +433,4 @@ class HMerge { } } } -} \ No newline at end of file +} diff --git src/main/java/org/apache/hadoop/hbase/util/Merge.java src/main/java/org/apache/hadoop/hbase/util/Merge.java index 3aa980f..de66861 100644 --- src/main/java/org/apache/hadoop/hbase/util/Merge.java +++ src/main/java/org/apache/hadoop/hbase/util/Merge.java @@ -29,7 +29,6 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HRegionInfo; -import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.MasterNotRunningException; import org.apache.hadoop.hbase.ZooKeeperConnectionException; @@ -154,7 +153,7 @@ public class Merge extends Configured implements Tool { get.addColumn(HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER); List cells2 = rootRegion.get(get, null).list(); HRegionInfo info2 = Writables.getHRegionInfo((cells2 == null)? null: cells2.get(0).getValue()); - HRegion merged = merge(HTableDescriptor.META_TABLEDESC, info1, rootRegion, info2, rootRegion); + HRegion merged = merge(info1, rootRegion, info2, rootRegion); LOG.info("Adding " + merged.getRegionInfo() + " to " + rootRegion.getRegionInfo()); HRegion.addRegionToMETA(rootRegion, merged); @@ -217,9 +216,8 @@ public class Merge extends Configured implements Tool { Get get = new Get(region1); get.addColumn(HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER); List cells1 = metaRegion1.get(get, null).list(); - HRegionInfo info1 = - Writables.getHRegionInfo((cells1 == null)? null: cells1.get(0).getValue()); - if (info1 == null) { + HRegionInfo info1 = Writables.getHRegionInfo((cells1 == null)? null: cells1.get(0).getValue()); + if (info1== null) { throw new NullPointerException("info1 is null using key " + Bytes.toStringBinary(region1) + " in " + meta1); } @@ -237,9 +235,7 @@ public class Merge extends Configured implements Tool { if (info2 == null) { throw new NullPointerException("info2 is null using key " + meta2); } - HTableDescriptor htd = FSUtils.getTableDescriptor(FileSystem.get(getConf()), - this.rootdir, this.tableName); - HRegion merged = merge(htd, info1, metaRegion1, info2, metaRegion2); + HRegion merged = merge(info1, metaRegion1, info2, metaRegion2); // Now find the meta region which will contain the newly merged region @@ -271,8 +267,8 @@ public class Merge extends Configured implements Tool { * to scan the meta if the resulting merged region does not go in either) * Returns HRegion object for newly merged region */ - private HRegion merge(final HTableDescriptor htd, HRegionInfo info1, - HRegion meta1, HRegionInfo info2, HRegion meta2) + private HRegion merge(HRegionInfo info1, HRegion meta1, HRegionInfo info2, + HRegion meta2) throws IOException { if (info1 == null) { throw new IOException("Could not find " + Bytes.toStringBinary(region1) + " in " + @@ -284,9 +280,9 @@ public class Merge extends Configured implements Tool { } HRegion merged = null; HLog log = utils.getLog(); - HRegion r1 = HRegion.openHRegion(info1, htd, log, getConf()); + HRegion r1 = HRegion.openHRegion(info1, log, getConf()); try { - HRegion r2 = HRegion.openHRegion(info2, htd, log, getConf()); + HRegion r2 = HRegion.openHRegion(info2, log, getConf()); try { merged = HRegion.merge(r1, r2); } finally { diff --git src/main/java/org/apache/hadoop/hbase/util/MetaUtils.java src/main/java/org/apache/hadoop/hbase/util/MetaUtils.java index af8d734..540d7df 100644 --- src/main/java/org/apache/hadoop/hbase/util/MetaUtils.java +++ src/main/java/org/apache/hadoop/hbase/util/MetaUtils.java @@ -20,23 +20,14 @@ package org.apache.hadoop.hbase.util; -import java.io.IOException; -import java.util.ArrayList; -import java.util.Collections; -import java.util.List; -import java.util.Map; -import java.util.TreeMap; - import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.HBaseConfiguration; +import org.apache.hadoop.hbase.HColumnDescriptor; import org.apache.hadoop.hbase.HConstants; -import org.apache.hadoop.hbase.HRegionInfo; -import org.apache.hadoop.hbase.HTableDescriptor; -import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.client.Delete; import org.apache.hadoop.hbase.client.Get; import org.apache.hadoop.hbase.client.HTable; @@ -45,7 +36,17 @@ import org.apache.hadoop.hbase.client.Result; import org.apache.hadoop.hbase.client.Scan; import org.apache.hadoop.hbase.regionserver.HRegion; import org.apache.hadoop.hbase.regionserver.InternalScanner; +import org.apache.hadoop.hbase.regionserver.Store; import org.apache.hadoop.hbase.regionserver.wal.HLog; +import org.apache.hadoop.hbase.HRegionInfo; +import org.apache.hadoop.hbase.KeyValue; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; +import java.util.Map; +import java.util.TreeMap; /** * Contains utility methods for manipulating HBase meta tables. @@ -58,6 +59,7 @@ public class MetaUtils { private static final Log LOG = LogFactory.getLog(MetaUtils.class); private final Configuration conf; private FileSystem fs; + private Path rootdir; private HLog log; private HRegion rootRegion; private Map metaRegions = Collections.synchronizedSortedMap( @@ -87,6 +89,8 @@ public class MetaUtils { */ private void initialize() throws IOException { this.fs = FileSystem.get(this.conf); + // Get root directory of HBase installation + this.rootdir = FSUtils.getRootDir(this.conf); } /** @@ -262,16 +266,14 @@ public class MetaUtils { if (this.rootRegion != null) { return this.rootRegion; } - this.rootRegion = HRegion.openHRegion(HRegionInfo.ROOT_REGIONINFO, - HTableDescriptor.ROOT_TABLEDESC, getLog(), + this.rootRegion = HRegion.openHRegion(HRegionInfo.ROOT_REGIONINFO, getLog(), this.conf); this.rootRegion.compactStores(); return this.rootRegion; } private HRegion openMetaRegion(HRegionInfo metaInfo) throws IOException { - HRegion meta = HRegion.openHRegion(metaInfo, HTableDescriptor.META_TABLEDESC, - getLog(), this.conf); + HRegion meta = HRegion.openHRegion(metaInfo, getLog(), this.conf); meta.compactStores(); return meta; } @@ -320,6 +322,78 @@ public class MetaUtils { } /** + * Offline version of the online TableOperation, + * org.apache.hadoop.hbase.master.AddColumn. + * @param tableName table name + * @param hcd Add this column to tableName + * @throws IOException e + */ + public void addColumn(final byte [] tableName, + final HColumnDescriptor hcd) + throws IOException { + List metas = getMETARows(tableName); + for (HRegionInfo hri: metas) { + final HRegion m = getMetaRegion(hri); + scanMetaRegion(m, new ScannerListener() { + private boolean inTable = true; + + @SuppressWarnings("synthetic-access") + public boolean processRow(HRegionInfo info) throws IOException { + LOG.debug("Testing " + Bytes.toString(tableName) + " against " + + Bytes.toString(info.getTableDesc().getName())); + if (Bytes.equals(info.getTableDesc().getName(), tableName)) { + this.inTable = false; + info.getTableDesc().addFamily(hcd); + updateMETARegionInfo(m, info); + return true; + } + // If we got here and we have not yet encountered the table yet, + // inTable will be false. Otherwise, we've passed out the table. + // Stop the scanner. + return this.inTable; + }}); + } + } + + /** + * Offline version of the online TableOperation, + * org.apache.hadoop.hbase.master.DeleteColumn. + * @param tableName table name + * @param columnFamily Name of column name to remove. + * @throws IOException e + */ + public void deleteColumn(final byte [] tableName, + final byte [] columnFamily) throws IOException { + List metas = getMETARows(tableName); + for (HRegionInfo hri: metas) { + final HRegion m = getMetaRegion(hri); + scanMetaRegion(m, new ScannerListener() { + private boolean inTable = true; + + @SuppressWarnings("synthetic-access") + public boolean processRow(HRegionInfo info) throws IOException { + if (Bytes.equals(info.getTableDesc().getName(), tableName)) { + this.inTable = false; + info.getTableDesc().removeFamily(columnFamily); + updateMETARegionInfo(m, info); + Path tabledir = new Path(rootdir, + info.getTableDesc().getNameAsString()); + Path p = Store.getStoreHomedir(tabledir, info.getEncodedName(), + columnFamily); + if (!fs.delete(p, true)) { + LOG.warn("Failed delete of " + p); + } + return false; + } + // If we got here and we have not yet encountered the table yet, + // inTable will be false. Otherwise, we've passed out the table. + // Stop the scanner. + return this.inTable; + }}); + } + } + + /** * Update COL_REGIONINFO in meta region r with HRegionInfo hri * * @param r region @@ -392,7 +466,7 @@ public class MetaUtils { public boolean processRow(HRegionInfo info) throws IOException { SL_LOG.debug("Testing " + info); - if (Bytes.equals(info.getTableName(), + if (Bytes.equals(info.getTableDesc().getName(), HConstants.META_TABLE_NAME)) { result.add(info); return false; diff --git src/main/java/org/apache/hadoop/hbase/util/RegionSplitter.java src/main/java/org/apache/hadoop/hbase/util/RegionSplitter.java index e4cf9bc..ea4b54c 100644 --- src/main/java/org/apache/hadoop/hbase/util/RegionSplitter.java +++ src/main/java/org/apache/hadoop/hbase/util/RegionSplitter.java @@ -582,9 +582,8 @@ public class RegionSplitter { if (sk.length == 0) sk = splitAlgo.firstRow(); String startKey = splitAlgo.rowToStr(sk); - HTableDescriptor htd = table.getTableDescriptor(); // check every Column Family for that region - for (HColumnDescriptor c : htd.getFamilies()) { + for (HColumnDescriptor c : hri.getTableDesc().getFamilies()) { Path cfDir = Store.getStoreHomedir(tableDir, hri.getEncodedName(), c.getName()); if (fs.exists(cfDir)) { diff --git src/main/java/org/apache/hadoop/hbase/util/Writables.java src/main/java/org/apache/hadoop/hbase/util/Writables.java index f595af5..3e60f97 100644 --- src/main/java/org/apache/hadoop/hbase/util/Writables.java +++ src/main/java/org/apache/hadoop/hbase/util/Writables.java @@ -20,7 +20,6 @@ package org.apache.hadoop.hbase.util; import org.apache.hadoop.hbase.HRegionInfo; -import org.apache.hadoop.hbase.migration.HRegionInfo090x; import org.apache.hadoop.io.DataInputBuffer; import org.apache.hadoop.io.Writable; @@ -63,7 +62,7 @@ public class Writables { /** * Put a bunch of Writables as bytes all into the one byte array. - * @param ws writable + * @param w writable * @return The bytes of w gotten by running its * {@link Writable#write(java.io.DataOutput)} method. * @throws IOException e @@ -216,16 +215,4 @@ public class Writables { } return tgt; } - - /** - * Get HREgionInfoForMigration serialized from bytes. - * @param bytes serialized bytes - * @return HRegionInfoForMigration - * @throws IOException - */ - public static HRegionInfo090x getHRegionInfoForMigration(final byte [] bytes) - throws IOException { - return (HRegionInfo090x)getWritable(bytes, new HRegionInfo090x()); - } - } \ No newline at end of file diff --git src/main/java/org/apache/hadoop/hbase/zookeeper/MetaNodeTracker.java src/main/java/org/apache/hadoop/hbase/zookeeper/MetaNodeTracker.java index 4da6f96..55257b3 100644 --- src/main/java/org/apache/hadoop/hbase/zookeeper/MetaNodeTracker.java +++ src/main/java/org/apache/hadoop/hbase/zookeeper/MetaNodeTracker.java @@ -19,29 +19,52 @@ */ package org.apache.hadoop.hbase.zookeeper; +import java.io.IOException; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; import org.apache.hadoop.hbase.Abortable; import org.apache.hadoop.hbase.HRegionInfo; +import org.apache.hadoop.hbase.catalog.CatalogTracker; /** * Tracks the unassigned zookeeper node used by the META table. + * + * A callback is made into the passed {@link CatalogTracker} when + * .META. completes a new assignment. *

* If META is already assigned when instantiating this class, you will not * receive any notification for that assignment. You will receive a * notification after META has been successfully assigned to a new location. */ public class MetaNodeTracker extends ZooKeeperNodeTracker { + private static final Log LOG = LogFactory.getLog(MetaNodeTracker.class); + + /** Catalog tracker to notify when META has a new assignment completed. */ + private final CatalogTracker catalogTracker; + /** * Creates a meta node tracker. * @param watcher * @param abortable */ - public MetaNodeTracker(final ZooKeeperWatcher watcher, final Abortable abortable) { + public MetaNodeTracker(final ZooKeeperWatcher watcher, + final CatalogTracker catalogTracker, final Abortable abortable) { super(watcher, ZKUtil.joinZNode(watcher.assignmentZNode, HRegionInfo.FIRST_META_REGIONINFO.getEncodedName()), abortable); + this.catalogTracker = catalogTracker; } @Override public void nodeDeleted(String path) { super.nodeDeleted(path); + if (!path.equals(node)) return; + LOG.info("Detected completed assignment of META, notifying catalog tracker"); + try { + this.catalogTracker.waitForMetaServerConnectionDefault(); + } catch (IOException e) { + LOG.warn("Tried to reset META server location after seeing the " + + "completion of a new META assignment but got an IOE", e); + } } } \ No newline at end of file diff --git src/main/ruby/hbase/admin.rb src/main/ruby/hbase/admin.rb index 56f0b4f..b244ffe 100644 --- src/main/ruby/hbase/admin.rb +++ src/main/ruby/hbase/admin.rb @@ -263,13 +263,8 @@ module Hbase #---------------------------------------------------------------------------------------------- # Truncates table (deletes all records by recreating the table) - def truncate(table_name, conf = nil) - h_table = nil - unless conf - h_table = org.apache.hadoop.hbase.client.HTable.new(table_name) - else - h_table = org.apache.hadoop.hbase.client.HTable.new(conf, table_name) - end + def truncate(table_name) + h_table = org.apache.hadoop.hbase.client.HTable.new(table_name) table_description = h_table.getTableDescriptor() yield 'Disabling table...' if block_given? disable(table_name) diff --git src/test/java/org/apache/hadoop/hbase/HBaseTestCase.java src/test/java/org/apache/hadoop/hbase/HBaseTestCase.java index 3f2f41e..a742af3 100644 --- src/test/java/org/apache/hadoop/hbase/HBaseTestCase.java +++ src/test/java/org/apache/hadoop/hbase/HBaseTestCase.java @@ -57,15 +57,9 @@ public abstract class HBaseTestCase extends TestCase { /** configuration parameter name for test directory */ public static final String TEST_DIRECTORY_KEY = "test.build.data"; -/* protected final static byte [] fam1 = Bytes.toBytes("colfamily1"); protected final static byte [] fam2 = Bytes.toBytes("colfamily2"); protected final static byte [] fam3 = Bytes.toBytes("colfamily3"); -*/ - protected final static byte [] fam1 = Bytes.toBytes("colfamily11"); - protected final static byte [] fam2 = Bytes.toBytes("colfamily21"); - protected final static byte [] fam3 = Bytes.toBytes("colfamily31"); - protected static final byte [][] COLUMNS = {fam1, fam2, fam3}; private boolean localfs = false; @@ -165,15 +159,16 @@ public abstract class HBaseTestCase extends TestCase { Path rootdir = filesystem.makeQualified( new Path(conf.get(HConstants.HBASE_DIR))); filesystem.mkdirs(rootdir); - HRegionInfo hri = new HRegionInfo(desc.getName(), startKey, endKey); - return HRegion.createHRegion(hri, rootdir, conf, desc); + + return HRegion.createHRegion(new HRegionInfo(desc, startKey, endKey), + rootdir, conf); } protected HRegion openClosedRegion(final HRegion closedRegion) throws IOException { HRegion r = new HRegion(closedRegion.getTableDir(), closedRegion.getLog(), closedRegion.getFilesystem(), closedRegion.getConf(), - closedRegion.getRegionInfo(), closedRegion.getTableDesc(), null); + closedRegion.getRegionInfo(), null); r.initialize(); return r; } @@ -671,10 +666,9 @@ public abstract class HBaseTestCase extends TestCase { } protected void createRootAndMetaRegions() throws IOException { - root = HRegion.createHRegion(HRegionInfo.ROOT_REGIONINFO, testDir, - conf, HTableDescriptor.ROOT_TABLEDESC); + root = HRegion.createHRegion(HRegionInfo.ROOT_REGIONINFO, testDir, conf); meta = HRegion.createHRegion(HRegionInfo.FIRST_META_REGIONINFO, testDir, - conf, HTableDescriptor.META_TABLEDESC); + conf); HRegion.addRegionToMETA(root, meta); } diff --git src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java index 9aa757e..97dddab 100644 --- src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java +++ src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java @@ -51,7 +51,6 @@ import org.apache.hadoop.hbase.client.ResultScanner; import org.apache.hadoop.hbase.client.Scan; import org.apache.hadoop.hbase.io.hfile.Compression; import org.apache.hadoop.hbase.master.HMaster; -import org.apache.hadoop.hbase.migration.HRegionInfo090x; import org.apache.hadoop.hbase.regionserver.HRegion; import org.apache.hadoop.hbase.regionserver.HRegionServer; import org.apache.hadoop.hbase.regionserver.InternalScanner; @@ -950,7 +949,7 @@ public class HBaseTestingUtility { int count = 0; for (int i = 0; i < startKeys.length; i++) { int j = (i + 1) % startKeys.length; - HRegionInfo hri = new HRegionInfo(table.getTableName(), + HRegionInfo hri = new HRegionInfo(table.getTableDescriptor(), startKeys[i], startKeys[j]); Put put = new Put(hri.getRegionName()); put.add(HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER, @@ -978,97 +977,6 @@ public class HBaseTestingUtility { return count; } - public int createMultiRegionsWithLegacyHRI(final Configuration c, - final HTableDescriptor htd, - final byte [] family, int numRegions) - throws IOException { - if (numRegions < 3) throw new IOException("Must create at least 3 regions"); - byte [] startKey = Bytes.toBytes("aaaaa"); - byte [] endKey = Bytes.toBytes("zzzzz"); - byte [][] splitKeys = Bytes.split(startKey, endKey, numRegions - 3); - byte [][] regionStartKeys = new byte[splitKeys.length+1][]; - for (int i=0;i newRegions - = new ArrayList(startKeys.length); - int count = 0; - for (int i = 0; i < startKeys.length; i++) { - int j = (i + 1) % startKeys.length; - HRegionInfo090x hri = new HRegionInfo090x(htd, - startKeys[i], startKeys[j]); - Put put = new Put(hri.getRegionName()); - put.add(HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER, - Writables.getBytes(hri)); - meta.put(put); - LOG.info("createMultiRegions: PUT inserted " + hri.toString()); - - newRegions.add(hri); - count++; - } - return count; - - } - - public int createMultiRegionsWithNewHRI(final Configuration c, - final HTableDescriptor htd, - final byte [] family, int numRegions) - throws IOException { - if (numRegions < 3) throw new IOException("Must create at least 3 regions"); - byte [] startKey = Bytes.toBytes("aaaaa"); - byte [] endKey = Bytes.toBytes("zzzzz"); - byte [][] splitKeys = Bytes.split(startKey, endKey, numRegions - 3); - byte [][] regionStartKeys = new byte[splitKeys.length+1][]; - for (int i=0;i newRegions - = new ArrayList(startKeys.length); - int count = 0; - for (int i = 0; i < startKeys.length; i++) { - int j = (i + 1) % startKeys.length; - HRegionInfo hri = new HRegionInfo(htd.getName(), - startKeys[i], startKeys[j]); - Put put = new Put(hri.getRegionName()); - put.add(HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER, - Writables.getBytes(hri)); - meta.put(put); - LOG.info("createMultiRegions: PUT inserted " + hri.toString()); - - newRegions.add(hri); - count++; - } - return count; - - } - /** * Create rows in META for regions of the specified table with the specified * start keys. The first startKey should be a 0 length byte array if you @@ -1089,8 +997,7 @@ public class HBaseTestingUtility { int count = 0; for (int i = 0; i < startKeys.length; i++) { int j = (i + 1) % startKeys.length; - HRegionInfo hri = new HRegionInfo(htd.getName(), startKeys[i], - startKeys[j]); + HRegionInfo hri = new HRegionInfo(htd, startKeys[i], startKeys[j]); Put put = new Put(hri.getRegionName()); put.add(HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER, Writables.getBytes(hri)); @@ -1134,7 +1041,8 @@ public class HBaseTestingUtility { for (Result result : s) { HRegionInfo info = Writables.getHRegionInfo( result.getValue(HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER)); - if (Bytes.compareTo(info.getTableName(), tableName) == 0) { + HTableDescriptor desc = info.getTableDesc(); + if (Bytes.compareTo(desc.getName(), tableName) == 0) { LOG.info("getMetaTableRows: row -> " + Bytes.toStringBinary(result.getRow())); rows.add(result.getRow()); diff --git src/test/java/org/apache/hadoop/hbase/TestCompare.java src/test/java/org/apache/hadoop/hbase/TestCompare.java index 2c0d45d..bbac815 100644 --- src/test/java/org/apache/hadoop/hbase/TestCompare.java +++ src/test/java/org/apache/hadoop/hbase/TestCompare.java @@ -31,25 +31,25 @@ public class TestCompare extends TestCase { * Sort of HRegionInfo. */ public void testHRegionInfo() { - HRegionInfo a = new HRegionInfo(Bytes.toBytes("a"), null, null); - HRegionInfo b = new HRegionInfo(Bytes.toBytes("b"), null, null); + HRegionInfo a = new HRegionInfo(new HTableDescriptor("a"), null, null); + HRegionInfo b = new HRegionInfo(new HTableDescriptor("b"), null, null); assertTrue(a.compareTo(b) != 0); HTableDescriptor t = new HTableDescriptor("t"); byte [] midway = Bytes.toBytes("midway"); - a = new HRegionInfo(t.getName(), null, midway); - b = new HRegionInfo(t.getName(), midway, null); + a = new HRegionInfo(t, null, midway); + b = new HRegionInfo(t, midway, null); assertTrue(a.compareTo(b) < 0); assertTrue(b.compareTo(a) > 0); assertEquals(a, a); assertTrue(a.compareTo(a) == 0); - a = new HRegionInfo(t.getName(), Bytes.toBytes("a"), Bytes.toBytes("d")); - b = new HRegionInfo(t.getName(), Bytes.toBytes("e"), Bytes.toBytes("g")); + a = new HRegionInfo(t, Bytes.toBytes("a"), Bytes.toBytes("d")); + b = new HRegionInfo(t, Bytes.toBytes("e"), Bytes.toBytes("g")); assertTrue(a.compareTo(b) < 0); - a = new HRegionInfo(t.getName(), Bytes.toBytes("aaaa"), Bytes.toBytes("dddd")); - b = new HRegionInfo(t.getName(), Bytes.toBytes("e"), Bytes.toBytes("g")); + a = new HRegionInfo(t, Bytes.toBytes("aaaa"), Bytes.toBytes("dddd")); + b = new HRegionInfo(t, Bytes.toBytes("e"), Bytes.toBytes("g")); assertTrue(a.compareTo(b) < 0); - a = new HRegionInfo(t.getName(), Bytes.toBytes("aaaa"), Bytes.toBytes("dddd")); - b = new HRegionInfo(t.getName(), Bytes.toBytes("aaaa"), Bytes.toBytes("eeee")); + a = new HRegionInfo(t, Bytes.toBytes("aaaa"), Bytes.toBytes("dddd")); + b = new HRegionInfo(t, Bytes.toBytes("aaaa"), Bytes.toBytes("eeee")); assertTrue(a.compareTo(b) < 0); } } \ No newline at end of file diff --git src/test/java/org/apache/hadoop/hbase/TestFSTableDescriptorForceCreation.java src/test/java/org/apache/hadoop/hbase/TestFSTableDescriptorForceCreation.java deleted file mode 100644 index 8a69a39..0000000 --- src/test/java/org/apache/hadoop/hbase/TestFSTableDescriptorForceCreation.java +++ /dev/null @@ -1,84 +0,0 @@ -/** - * Copyright 2011 The Apache Software Foundation - * - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hbase; - -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertTrue; - -import java.io.IOException; - -import org.apache.hadoop.fs.FileSystem; -import org.apache.hadoop.fs.Path; -import org.apache.hadoop.hbase.util.FSTableDescriptors; -import org.apache.hadoop.hbase.util.FSUtils; -import org.junit.AfterClass; -import org.junit.BeforeClass; -import org.junit.Test; - -public class TestFSTableDescriptorForceCreation { - private static final HBaseTestingUtility UTIL = new HBaseTestingUtility(); - - @BeforeClass - public static void setUpCluster() throws Exception { - UTIL.startMiniDFSCluster(1); - } - - @AfterClass - public static void shutDownCluster() throws Exception { - UTIL.shutdownMiniDFSCluster(); - } - - @Test - public void testShouldCreateNewTableDescriptorIfForcefulCreationIsFalse() - throws IOException { - final String name = "newTable2"; - FileSystem fs = FileSystem.get(UTIL.getConfiguration()); - Path rootdir = new Path(fs.getWorkingDirectory(), name); - HTableDescriptor htd = new HTableDescriptor(name); - assertTrue("Should create new table descriptor", FSUtils - .createTableDescriptor(fs, rootdir, htd, false)); - } - - @Test - public void testShouldNotCreateTheSameTableDescriptorIfForcefulCreationIsFalse() - throws IOException { - final String name = "testAlreadyExists"; - FileSystem fs = FileSystem.get(UTIL.getConfiguration()); - // Cleanup old tests if any detrius laying around. - Path rootdir = new Path(fs.getWorkingDirectory(), name); - TableDescriptors htds = new FSTableDescriptors(fs, rootdir); - HTableDescriptor htd = new HTableDescriptor(name); - htds.add(htd); - assertFalse("Should not create new table descriptor", FSUtils - .createTableDescriptor(fs, rootdir, htd, false)); - } - - @Test - public void testShouldAllowForcefulCreationOfAlreadyExistingTableDescriptor() - throws Exception { - final String name = "createNewTableNew2"; - FileSystem fs = FileSystem.get(UTIL.getConfiguration()); - Path rootdir = new Path(fs.getWorkingDirectory(), name); - HTableDescriptor htd = new HTableDescriptor(name); - FSUtils.createTableDescriptor(fs, rootdir, htd, false); - assertTrue("Should create new table descriptor", FSUtils - .createTableDescriptor(fs, rootdir, htd, true)); - } -} diff --git src/test/java/org/apache/hadoop/hbase/TestRegionRebalancing.java src/test/java/org/apache/hadoop/hbase/TestRegionRebalancing.java index cc3d555..4158bad 100644 --- src/test/java/org/apache/hadoop/hbase/TestRegionRebalancing.java +++ src/test/java/org/apache/hadoop/hbase/TestRegionRebalancing.java @@ -25,6 +25,7 @@ import static org.junit.Assert.fail; import java.io.IOException; import java.util.ArrayList; import java.util.List; +import java.util.Map; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; @@ -80,11 +81,15 @@ public class TestRegionRebalancing { this.table = new HTable(UTIL.getConfiguration(), this.desc.getName()); CatalogTracker ct = new CatalogTracker(UTIL.getConfiguration()); ct.start(); + Map regions = null; try { - MetaReader.fullScanMetaAndPrint(ct); + regions = MetaReader.fullScan(ct); } finally { ct.stop(); } + for (Map.Entry e: regions.entrySet()) { + LOG.info(e); + } assertEquals("Test table should have right number of regions", HBaseTestingUtility.KEYS.length + 1/*One extra to account for start/end keys*/, this.table.getStartKeys().length); @@ -214,4 +219,4 @@ public class TestRegionRebalancing { } catch (InterruptedException e) {} } } -} \ No newline at end of file +} diff --git src/test/java/org/apache/hadoop/hbase/TestSerialization.java src/test/java/org/apache/hadoop/hbase/TestSerialization.java index b6a4c7a..05f0efc 100644 --- src/test/java/org/apache/hadoop/hbase/TestSerialization.java +++ src/test/java/org/apache/hadoop/hbase/TestSerialization.java @@ -103,8 +103,8 @@ public class TestSerialization { HRegionInfo deserializedHri = (HRegionInfo)Writables.getWritable(hrib, new HRegionInfo()); assertEquals(hri.getEncodedName(), deserializedHri.getEncodedName()); - //assertEquals(hri.getTableDesc().getFamilies().size(), - // deserializedHri.getTableDesc().getFamilies().size()); + assertEquals(hri.getTableDesc().getFamilies().size(), + deserializedHri.getTableDesc().getFamilies().size()); } @Test public void testRegionInfos() throws Exception { @@ -126,7 +126,7 @@ public class TestSerialization { for (int i = 0; i < families.length; i++) { htd.addFamily(new HColumnDescriptor(families[i])); } - return new HRegionInfo(htd.getName(), HConstants.EMPTY_START_ROW, + return new HRegionInfo(htd, HConstants.EMPTY_START_ROW, HConstants.EMPTY_END_ROW); } diff --git src/test/java/org/apache/hadoop/hbase/catalog/TestCatalogTracker.java src/test/java/org/apache/hadoop/hbase/catalog/TestCatalogTracker.java index 8e16b0a..538e809 100644 --- src/test/java/org/apache/hadoop/hbase/catalog/TestCatalogTracker.java +++ src/test/java/org/apache/hadoop/hbase/catalog/TestCatalogTracker.java @@ -35,20 +35,16 @@ import org.apache.hadoop.hbase.Abortable; import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HRegionInfo; -import org.apache.hadoop.hbase.HRegionLocation; +import org.apache.hadoop.hbase.HServerAddress; import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.NotAllMetaRegionsOnlineException; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.client.Get; import org.apache.hadoop.hbase.client.HConnection; -import org.apache.hadoop.hbase.client.HConnectionManager; -import org.apache.hadoop.hbase.client.HConnectionTestingUtility; import org.apache.hadoop.hbase.client.Result; -import org.apache.hadoop.hbase.client.ServerCallable; import org.apache.hadoop.hbase.ipc.HRegionInterface; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.Threads; -import org.apache.hadoop.hbase.util.Writables; import org.apache.hadoop.hbase.zookeeper.ZKUtil; import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher; import org.apache.hadoop.util.Progressable; @@ -164,44 +160,27 @@ public class TestCatalogTracker { t.join(); } - @Test - public void testGetMetaServerConnectionFails() + @Test public void testGetMetaServerConnectionFails() throws IOException, InterruptedException, KeeperException { - HConnection connection = - HConnectionTestingUtility.getMockedConnection(UTIL.getConfiguration()); + HConnection connection = Mockito.mock(HConnection.class); + ConnectException connectException = + new ConnectException("Connection refused"); + final HRegionInterface implementation = + Mockito.mock(HRegionInterface.class); + Mockito.when(implementation.get((byte [])Mockito.any(), (Get)Mockito.any())). + thenThrow(connectException); + Mockito.when(connection.getHRegionConnection((HServerAddress)Matchers.anyObject(), Matchers.anyBoolean())). + thenReturn(implementation); + Assert.assertNotNull(connection.getHRegionConnection(new HServerAddress(), false)); + final CatalogTracker ct = constructAndStartCatalogTracker(connection); try { - // Mock an HRegionInterface. - final HRegionInterface implementation = Mockito.mock(HRegionInterface.class); - // If a 'get' is called on mocked interface, throw connection refused. - Mockito.when(implementation.get((byte[]) Mockito.any(), (Get) Mockito.any())). - thenThrow(new ConnectException("Connection refused")); - // Make it so our implementation is returned when we do a connection. - // Need to fake out the location lookup stuff first. - ServerName sn = new ServerName("example.com", 1234, System.currentTimeMillis()); - final HRegionLocation anyLocation = - new HRegionLocation(HRegionInfo.FIRST_META_REGIONINFO, sn.getHostname(), - sn.getPort()); - Mockito.when(connection.getRegionLocation((byte[]) Mockito.any(), - (byte[]) Mockito.any(), Mockito.anyBoolean())). - thenReturn(anyLocation); - Mockito.when(connection.getHRegionConnection(Mockito.anyString(), - Mockito.anyInt(), Matchers.anyBoolean())). - thenReturn(implementation); - // Now start up the catalogtracker with our doctored Connection. - final CatalogTracker ct = constructAndStartCatalogTracker(connection); - try { - RootLocationEditor.setRootLocation(this.watcher, sn); - long timeout = UTIL.getConfiguration(). - getLong("hbase.catalog.verification.timeout", 1000); - Assert.assertFalse(ct.verifyMetaRegionLocation(timeout)); - } finally { - // Clean out root location or later tests will be confused... they - // presume start fresh in zk. - RootLocationEditor.deleteRootLocation(this.watcher); - } + RootLocationEditor.setRootLocation(this.watcher, + new ServerName("example.com", 1234, System.currentTimeMillis())); + Assert.assertFalse(ct.verifyMetaRegionLocation(100)); } finally { - // Clear out our doctored connection or could mess up subsequent tests. - HConnectionManager.deleteConnection(UTIL.getConfiguration(), true); + // Clean out root location or later tests will be confused... they presume + // start fresh in zk. + RootLocationEditor.deleteRootLocation(this.watcher); } } @@ -221,9 +200,9 @@ public class TestCatalogTracker { Mockito.mock(HRegionInterface.class); Mockito.when(implementation.getRegionInfo((byte [])Mockito.any())). thenThrow(connectException); - Mockito.when(connection.getHRegionConnection(Mockito.anyString(), - Mockito.anyInt(), Mockito.anyBoolean())). + Mockito.when(connection.getHRegionConnection((HServerAddress)Matchers.anyObject(), Matchers.anyBoolean())). thenReturn(implementation); + Assert.assertNotNull(connection.getHRegionConnection(new HServerAddress(), false)); final CatalogTracker ct = constructAndStartCatalogTracker(connection); try { RootLocationEditor.setRootLocation(this.watcher, @@ -246,14 +225,8 @@ public class TestCatalogTracker { @Test (expected = NotAllMetaRegionsOnlineException.class) public void testTimeoutWaitForMeta() throws IOException, InterruptedException { - HConnection connection = - HConnectionTestingUtility.getMockedConnection(UTIL.getConfiguration()); - try { - final CatalogTracker ct = constructAndStartCatalogTracker(connection); - ct.waitForMeta(100); - } finally { - HConnectionManager.deleteConnection(UTIL.getConfiguration(), true); - } + final CatalogTracker ct = constructAndStartCatalogTracker(); + ct.waitForMeta(100); } /** @@ -286,84 +259,62 @@ public class TestCatalogTracker { /** * Test waiting on meta w/ no timeout specified. - * @throws Exception + * @throws IOException + * @throws InterruptedException + * @throws KeeperException */ @Test public void testNoTimeoutWaitForMeta() - throws Exception { + throws IOException, InterruptedException, KeeperException { // Mock an HConnection and a HRegionInterface implementation. Have the // HConnection return the HRI. Have the HRI return a few mocked up responses // to make our test work. - HConnection connection = - HConnectionTestingUtility.getMockedConnection(UTIL.getConfiguration()); - try { - // Mock an HRegionInterface. - - final HRegionInterface implementation = Mockito.mock(HRegionInterface.class); - // Make it so our implementation is returned when we do a connection. - // Need to fake out the location lookup stuff first. - ServerName sn = new ServerName("example.com", 1234, System.currentTimeMillis()); - final HRegionLocation anyLocation = - new HRegionLocation(HRegionInfo.FIRST_META_REGIONINFO, sn.getHostname(), - sn.getPort()); - Mockito.when(connection.getRegionLocation((byte[]) Mockito.any(), - (byte[]) Mockito.any(), Mockito.anyBoolean())). - thenReturn(anyLocation); - // Have implementation returned which ever way getHRegionConnection is called. - Mockito.when(connection.getHRegionConnection(Mockito.anyString(), - Mockito.anyInt(), Matchers.anyBoolean())). - thenReturn(implementation); - Mockito.when(connection.getHRegionConnection(Mockito.anyString(), - Mockito.anyInt())). - thenReturn(implementation); + HConnection connection = Mockito.mock(HConnection.class); + HRegionInterface mockHRI = Mockito.mock(HRegionInterface.class); + // Make the HRI return an answer no matter how Get is called. Same for + // getHRegionInfo. Thats enough for this test. + Mockito.when(connection.getHRegionConnection((String)Mockito.any(), + Matchers.anyInt())).thenReturn(mockHRI); - final CatalogTracker ct = constructAndStartCatalogTracker(connection); - ServerName hsa = ct.getMetaLocation(); - Assert.assertNull(hsa); + final CatalogTracker ct = constructAndStartCatalogTracker(connection); + ServerName hsa = ct.getMetaLocation(); + Assert.assertNull(hsa); - // Now test waiting on meta location getting set. - Thread t = new WaitOnMetaThread(ct) { - @Override - void doWaiting() throws InterruptedException { - this.ct.waitForMeta(); - } - }; - startWaitAliveThenWaitItLives(t, 1000); + // Now test waiting on meta location getting set. + Thread t = new WaitOnMetaThread(ct) { + @Override + void doWaiting() throws InterruptedException { + this.ct.waitForMeta(); + } + }; + startWaitAliveThenWaitItLives(t, 1000); - // Now the ct is up... set into the mocks some answers that make it look - // like things have been getting assigned. Make it so we'll return a - // location (no matter what the Get is). Same for getHRegionInfo -- always - // just return the meta region. - List kvs = new ArrayList(); - kvs.add(new KeyValue(HConstants.EMPTY_BYTE_ARRAY, - HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER, - Writables.getBytes(HRegionInfo.FIRST_META_REGIONINFO))); - kvs.add(new KeyValue(HConstants.EMPTY_BYTE_ARRAY, - HConstants.CATALOG_FAMILY, HConstants.SERVER_QUALIFIER, - Bytes.toBytes(SN.getHostAndPort()))); - kvs.add(new KeyValue(HConstants.EMPTY_BYTE_ARRAY, - HConstants.CATALOG_FAMILY, HConstants.STARTCODE_QUALIFIER, - Bytes.toBytes(SN.getStartcode()))); - final Result result = new Result(kvs); - Mockito.when(connection.getRegionServerWithRetries((ServerCallable)Mockito.any())). - thenReturn(result); - Mockito.when(implementation.getRegionInfo((byte[]) Mockito.any())). - thenReturn(HRegionInfo.FIRST_META_REGIONINFO); - // This should trigger wake up of meta wait (Its the removal of the meta - // region unassigned node that triggers catalogtrackers that a meta has - // been assigned). - String node = ct.getMetaNodeTracker().getNode(); - ZKUtil.createAndFailSilent(this.watcher, node); - MetaEditor.updateMetaLocation(ct, HRegionInfo.FIRST_META_REGIONINFO, SN); - ZKUtil.deleteNode(this.watcher, node); - // Go get the new meta location. waitForMeta gets and verifies meta. - Assert.assertTrue(ct.waitForMeta(10000).equals(SN)); - // Join the thread... should exit shortly. - t.join(); - // Now meta is available. - Assert.assertTrue(ct.waitForMeta(10000).equals(SN)); - } finally { - HConnectionManager.deleteConnection(UTIL.getConfiguration(), true); - } + // Now the ct is up... set into the mocks some answers that make it look + // like things have been getting assigned. Make it so we'll return a + // location (no matter what the Get is). Same for getHRegionInfo -- always + // just return the meta region. + List kvs = new ArrayList(); + kvs.add(new KeyValue(HConstants.EMPTY_BYTE_ARRAY, + HConstants.CATALOG_FAMILY, HConstants.SERVER_QUALIFIER, + Bytes.toBytes(SN.getHostAndPort()))); + kvs.add(new KeyValue(HConstants.EMPTY_BYTE_ARRAY, + HConstants.CATALOG_FAMILY, HConstants.STARTCODE_QUALIFIER, + Bytes.toBytes(SN.getStartcode()))); + final Result result = new Result(kvs); + Mockito.when(mockHRI.get((byte [])Mockito.any(), (Get)Mockito.any())). + thenReturn(result); + Mockito.when(mockHRI.getRegionInfo((byte [])Mockito.any())). + thenReturn(HRegionInfo.FIRST_META_REGIONINFO); + // This should trigger wake up of meta wait (Its the removal of the meta + // region unassigned node that triggers catalogtrackers that a meta has + // been assigned. + String node = ct.getMetaNodeTracker().getNode(); + ZKUtil.createAndFailSilent(this.watcher, node); + MetaEditor.updateMetaLocation(ct, HRegionInfo.FIRST_META_REGIONINFO, SN); + ZKUtil.deleteNode(this.watcher, node); + // Join the thread... should exit shortly. + t.join(); + // Now meta is available. + Assert.assertTrue(ct.getMetaLocation().equals(SN)); } private void startWaitAliveThenWaitItLives(final Thread t, final int ms) { @@ -410,4 +361,4 @@ public class TestCatalogTracker { this.ct.waitForRoot(); } } -} \ No newline at end of file +} diff --git src/test/java/org/apache/hadoop/hbase/catalog/TestMetaReaderEditor.java src/test/java/org/apache/hadoop/hbase/catalog/TestMetaReaderEditor.java index 5d7bf0c..84130e2 100644 --- src/test/java/org/apache/hadoop/hbase/catalog/TestMetaReaderEditor.java +++ src/test/java/org/apache/hadoop/hbase/catalog/TestMetaReaderEditor.java @@ -19,10 +19,7 @@ */ package org.apache.hadoop.hbase.catalog; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertNull; -import static org.junit.Assert.assertTrue; +import static org.junit.Assert.*; import java.io.IOException; import java.util.List; @@ -37,6 +34,8 @@ import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.client.HBaseAdmin; +import org.apache.hadoop.hbase.client.HConnection; +import org.apache.hadoop.hbase.client.HConnectionManager; import org.apache.hadoop.hbase.client.HTable; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.Pair; @@ -71,15 +70,11 @@ public class TestMetaReaderEditor { }; @BeforeClass public static void beforeClass() throws Exception { - UTIL.startMiniCluster(3); + UTIL.startMiniCluster(); } @Before public void setup() throws IOException, InterruptedException { Configuration c = new Configuration(UTIL.getConfiguration()); - // Tests to 4 retries every 5 seconds. Make it try every 1 second so more - // responsive. 1 second is default as is ten retries. - c.setLong("hbase.client.pause", 1000); - c.setInt("hbase.client.retries.number", 10); zkw = new ZooKeeperWatcher(c, "TestMetaReaderEditor", ABORTABLE); ct = new CatalogTracker(zkw, c, ABORTABLE); ct.start(); @@ -89,111 +84,6 @@ public class TestMetaReaderEditor { UTIL.shutdownMiniCluster(); } - /** - * Does {@link MetaReader#getRegion(CatalogTracker, byte[])} and a write - * against .META. while its hosted server is restarted to prove our retrying - * works. - * @throws IOException - * @throws InterruptedException - */ - @Test (timeout = 180000) public void testRetrying() - throws IOException, InterruptedException { - final String name = "testRetrying"; - LOG.info("Started " + name); - final byte [] nameBytes = Bytes.toBytes(name); - HTable t = UTIL.createTable(nameBytes, HConstants.CATALOG_FAMILY); - int regionCount = UTIL.createMultiRegions(t, HConstants.CATALOG_FAMILY); - // Test it works getting a region from just made user table. - final List regions = - testGettingTableRegions(this.ct, nameBytes, regionCount); - MetaTask reader = new MetaTask(this.ct, "reader") { - @Override - void metaTask() throws Throwable { - testGetRegion(this.ct, regions.get(0)); - LOG.info("Read " + regions.get(0).getEncodedName()); - } - }; - MetaTask writer = new MetaTask(this.ct, "writer") { - @Override - void metaTask() throws Throwable { - MetaEditor.addRegionToMeta(this.ct, regions.get(0)); - LOG.info("Wrote " + regions.get(0).getEncodedName()); - } - }; - reader.start(); - writer.start(); - // Make sure reader and writer are working. - assertTrue(reader.isProgressing()); - assertTrue(writer.isProgressing()); - // Kill server hosting meta -- twice . See if our reader/writer ride over the - // meta moves. They'll need to retry. - for (int i = 0; i < 2; i++) { - LOG.info("Restart=" + i); - UTIL.ensureSomeRegionServersAvailable(2); - int index = -1; - do { - index = UTIL.getMiniHBaseCluster().getServerWithMeta(); - } while (index == -1); - UTIL.getMiniHBaseCluster().abortRegionServer(index); - UTIL.getMiniHBaseCluster().waitOnRegionServer(index); - } - assertTrue(reader.toString(), reader.isProgressing()); - assertTrue(writer.toString(), writer.isProgressing()); - reader.stop = true; - writer.stop = true; - reader.join(); - writer.join(); - } - - /** - * Thread that runs a MetaReader/MetaEditor task until asked stop. - */ - abstract static class MetaTask extends Thread { - boolean stop = false; - int count = 0; - Throwable t = null; - final CatalogTracker ct; - - MetaTask(final CatalogTracker ct, final String name) { - super(name); - this.ct = ct; - } - - @Override - public void run() { - try { - while(!this.stop) { - LOG.info("Before " + this.getName()+ ", count=" + this.count); - metaTask(); - this.count += 1; - LOG.info("After " + this.getName() + ", count=" + this.count); - Thread.sleep(100); - } - } catch (Throwable t) { - LOG.info(this.getName() + " failed", t); - this.t = t; - } - } - - boolean isProgressing() throws InterruptedException { - int currentCount = this.count; - while(currentCount == this.count) { - if (!isAlive()) return false; - if (this.t != null) return false; - Thread.sleep(10); - } - return true; - } - - @Override - public String toString() { - return "count=" + this.count + ", t=" + - (this.t == null? "null": this.t.toString()); - } - - abstract void metaTask() throws Throwable; - } - @Test public void testGetRegionsCatalogTables() throws IOException, InterruptedException { List regions = @@ -224,9 +114,19 @@ public class TestMetaReaderEditor { @Test public void testGetRegion() throws IOException, InterruptedException { final String name = "testGetRegion"; LOG.info("Started " + name); - // Test get on non-existent region. + final byte [] nameBytes = Bytes.toBytes(name); + HTable t = UTIL.createTable(nameBytes, HConstants.CATALOG_FAMILY); + int regionCount = UTIL.createMultiRegions(t, HConstants.CATALOG_FAMILY); + + // Test it works getting a region from user table. + List regions = MetaReader.getTableRegions(ct, nameBytes); + assertEquals(regionCount, regions.size()); Pair pair = - MetaReader.getRegion(ct, Bytes.toBytes("nonexistent-region")); + MetaReader.getRegion(ct, regions.get(0).getRegionName()); + assertEquals(regions.get(0).getEncodedName(), + pair.getFirst().getEncodedName()); + // Test get on non-existent region. + pair = MetaReader.getRegion(ct, Bytes.toBytes("nonexistent-region")); assertNull(pair); // Test it works getting a region from meta/root. pair = @@ -237,8 +137,7 @@ public class TestMetaReaderEditor { } // Test for the optimization made in HBASE-3650 - @Test public void testScanMetaForTable() - throws IOException, InterruptedException { + @Test public void testScanMetaForTable() throws IOException { final String name = "testScanMetaForTable"; LOG.info("Started " + name); @@ -266,25 +165,4 @@ public class TestMetaReaderEditor { } assertEquals(1, MetaReader.getTableRegions(ct, greaterName).size()); } - - private static List testGettingTableRegions(final CatalogTracker ct, - final byte [] nameBytes, final int regionCount) - throws IOException, InterruptedException { - List regions = MetaReader.getTableRegions(ct, nameBytes); - assertEquals(regionCount, regions.size()); - Pair pair = - MetaReader.getRegion(ct, regions.get(0).getRegionName()); - assertEquals(regions.get(0).getEncodedName(), - pair.getFirst().getEncodedName()); - return regions; - } - - private static void testGetRegion(final CatalogTracker ct, - final HRegionInfo region) - throws IOException, InterruptedException { - Pair pair = - MetaReader.getRegion(ct, region.getRegionName()); - assertEquals(region.getEncodedName(), - pair.getFirst().getEncodedName()); - } -} +} \ No newline at end of file diff --git src/test/java/org/apache/hadoop/hbase/catalog/TestMetaReaderEditorNoCluster.java src/test/java/org/apache/hadoop/hbase/catalog/TestMetaReaderEditorNoCluster.java deleted file mode 100644 index c97fb3d..0000000 --- src/test/java/org/apache/hadoop/hbase/catalog/TestMetaReaderEditorNoCluster.java +++ /dev/null @@ -1,177 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hbase.catalog; - -import static org.junit.Assert.assertTrue; - -import java.io.IOException; -import java.util.ArrayList; -import java.util.List; -import java.util.NavigableMap; - -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.hbase.Abortable; -import org.apache.hadoop.hbase.HBaseTestingUtility; -import org.apache.hadoop.hbase.HConstants; -import org.apache.hadoop.hbase.HRegionInfo; -import org.apache.hadoop.hbase.HRegionLocation; -import org.apache.hadoop.hbase.KeyValue; -import org.apache.hadoop.hbase.ServerName; -import org.apache.hadoop.hbase.client.HConnection; -import org.apache.hadoop.hbase.client.HConnectionManager; -import org.apache.hadoop.hbase.client.HConnectionTestingUtility; -import org.apache.hadoop.hbase.client.Result; -import org.apache.hadoop.hbase.client.Scan; -import org.apache.hadoop.hbase.ipc.HRegionInterface; -import org.apache.hadoop.hbase.util.Bytes; -import org.apache.hadoop.hbase.util.Writables; -import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher; -import org.junit.After; -import org.junit.Before; -import org.junit.Test; -import org.mockito.Mockito; - -/** - * Test MetaReader/Editor but without spinning up a cluster. - * We mock regionserver back and forth (we do spin up a zk cluster). - */ -public class TestMetaReaderEditorNoCluster { - private static final Log LOG = LogFactory.getLog(TestMetaReaderEditorNoCluster.class); - private static final HBaseTestingUtility UTIL = new HBaseTestingUtility(); - private static final Abortable ABORTABLE = new Abortable() { - boolean aborted = false; - @Override - public void abort(String why, Throwable e) { - LOG.info(why, e); - this.aborted = true; - throw new RuntimeException(e); - } - @Override - public boolean isAborted() { - return this.aborted; - } - }; - - @Before - public void before() throws Exception { - UTIL.startMiniZKCluster(); - } - - @After - public void after() throws IOException { - UTIL.shutdownMiniZKCluster(); - } - - /** - * Test that MetaReader will ride over server throwing - * "Server not running" IOEs. - * @see https://issues.apache.org/jira/browse/HBASE-3446 - * @throws IOException - * @throws InterruptedException - */ - @Test - public void testRideOverServerNotRunning() throws IOException, InterruptedException { - // Need a zk watcher. - ZooKeeperWatcher zkw = new ZooKeeperWatcher(UTIL.getConfiguration(), - this.getClass().getSimpleName(), ABORTABLE, true); - // This is a servername we use in a few places below. - ServerName sn = new ServerName("example.com", 1234, System.currentTimeMillis()); - - HConnection connection = null; - CatalogTracker ct = null; - try { - // Mock an HRegionInterface. Our mock implementation will fail a few - // times when we go to open a scanner. - final HRegionInterface implementation = Mockito.mock(HRegionInterface.class); - // When openScanner called throw IOE 'Server not running' a few times - // before we return a scanner id. Whats WEIRD is that these - // exceptions do not show in the log because they are caught and only - // printed if we FAIL. We eventually succeed after retry so these don't - // show. We will know if they happened or not because we will ask - // mockito at the end of this test to verify that openscanner was indeed - // called the wanted number of times. - final long scannerid = 123L; - Mockito.when(implementation.openScanner((byte [])Mockito.any(), - (Scan)Mockito.any())). - thenThrow(new IOException("Server not running (1 of 3)")). - thenThrow(new IOException("Server not running (2 of 3)")). - thenThrow(new IOException("Server not running (3 of 3)")). - thenReturn(scannerid); - // Make it so a verifiable answer comes back when next is called. Return - // the verifiable answer and then a null so we stop scanning. Our - // verifiable answer is something that looks like a row in META with - // a server and startcode that is that of the above defined servername. - List kvs = new ArrayList(); - final byte [] rowToVerify = Bytes.toBytes("rowToVerify"); - kvs.add(new KeyValue(rowToVerify, - HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER, - Writables.getBytes(HRegionInfo.FIRST_META_REGIONINFO))); - kvs.add(new KeyValue(rowToVerify, - HConstants.CATALOG_FAMILY, HConstants.SERVER_QUALIFIER, - Bytes.toBytes(sn.getHostAndPort()))); - kvs.add(new KeyValue(rowToVerify, - HConstants.CATALOG_FAMILY, HConstants.STARTCODE_QUALIFIER, - Bytes.toBytes(sn.getStartcode()))); - final Result [] result = new Result [] {new Result(kvs)}; - Mockito.when(implementation.next(Mockito.anyLong(), Mockito.anyInt())). - thenReturn(result). - thenReturn(null); - - // Associate a spied-upon HConnection with UTIL.getConfiguration. Need - // to shove this in here first so it gets picked up all over; e.g. by - // HTable. - connection = HConnectionTestingUtility.getSpiedConnection(UTIL.getConfiguration()); - // Fix the location lookup so it 'works' though no network. First - // make an 'any location' object. - final HRegionLocation anyLocation = - new HRegionLocation(HRegionInfo.FIRST_META_REGIONINFO, sn.getHostname(), - sn.getPort()); - // Return the any location object when locateRegion is called in HTable - // constructor and when its called by ServerCallable (it uses getRegionLocation). - // The ugly format below comes of 'Important gotcha on spying real objects!' from - // http://mockito.googlecode.com/svn/branches/1.6/javadoc/org/mockito/Mockito.html - Mockito.doReturn(anyLocation). - when(connection).locateRegion((byte[]) Mockito.any(), (byte[]) Mockito.any()); - Mockito.doReturn(anyLocation). - when(connection).getRegionLocation((byte[]) Mockito.any(), - (byte[]) Mockito.any(), Mockito.anyBoolean()); - - // Now shove our HRI implementation into the spied-upon connection. - Mockito.doReturn(implementation). - when(connection).getHRegionConnection(Mockito.anyString(), Mockito.anyInt()); - - // Now start up the catalogtracker with our doctored Connection. - ct = new CatalogTracker(zkw, null, connection, ABORTABLE, 0); - ct.start(); - // Scan meta for user tables and verify we got back expected answer. - NavigableMap hris = MetaReader.getServerUserRegions(ct, sn); - assertTrue(hris.size() == 1); - assertTrue(hris.firstEntry().getKey().equals(HRegionInfo.FIRST_META_REGIONINFO)); - assertTrue(Bytes.equals(rowToVerify, hris.firstEntry().getValue().getRow())); - // Finally verify that openscanner was called four times -- three times - // with exception and then on 4th attempt we succeed. - Mockito.verify(implementation, Mockito.times(4)). - openScanner((byte [])Mockito.any(), (Scan)Mockito.any()); - } finally { - if (ct != null) ct.stop(); - HConnectionManager.deleteConnection(UTIL.getConfiguration(), true); - zkw.close(); - } - } -} diff --git src/test/java/org/apache/hadoop/hbase/client/HConnectionTestingUtility.java src/test/java/org/apache/hadoop/hbase/client/HConnectionTestingUtility.java deleted file mode 100644 index 20a70f1..0000000 --- src/test/java/org/apache/hadoop/hbase/client/HConnectionTestingUtility.java +++ /dev/null @@ -1,85 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hbase.client; - -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hbase.ZooKeeperConnectionException; -import org.apache.hadoop.hbase.client.HConnectionManager.HConnectionImplementation; -import org.apache.hadoop.hbase.client.HConnectionManager.HConnectionKey; -import org.mockito.Mockito; - -/** - * {@link HConnection} testing utility. - */ -public class HConnectionTestingUtility { - /* - * Not part of {@link HBaseTestingUtility} because this class is not - * in same package as {@link HConnection}. Would have to reveal ugly - * {@link HConnectionManager} innards to HBaseTestingUtility to give it access. - */ - /** - * Get a Mocked {@link HConnection} that goes with the passed conf - * configuration instance. Minimally the mock will return - * conf when {@link HConnection#getConfiguration()} is invoked. - * Be sure to shutdown the connection when done by calling - * {@link HConnectionManager#deleteConnection(Configuration, boolean)} else it - * will stick around; this is probably not what you want. - * @param conf configuration - * @return HConnection object for conf - * @throws ZooKeeperConnectionException - */ - public static HConnection getMockedConnection(final Configuration conf) - throws ZooKeeperConnectionException { - HConnectionKey connectionKey = new HConnectionKey(conf); - synchronized (HConnectionManager.HBASE_INSTANCES) { - HConnectionImplementation connection = - HConnectionManager.HBASE_INSTANCES.get(connectionKey); - if (connection == null) { - connection = Mockito.mock(HConnectionImplementation.class); - Mockito.when(connection.getConfiguration()).thenReturn(conf); - HConnectionManager.HBASE_INSTANCES.put(connectionKey, connection); - } - return connection; - } - } - - /** - * Get a Mockito spied-upon {@link HConnection} that goes with the passed - * conf configuration instance. - * Be sure to shutdown the connection when done by calling - * {@link HConnectionManager#deleteConnection(Configuration, boolean)} else it - * will stick around; this is probably not what you want. - * @param conf configuration - * @return HConnection object for conf - * @throws ZooKeeperConnectionException - * @see http://mockito.googlecode.com/svn/branches/1.6/javadoc/org/mockito/Mockito.html#spy(T) - */ - public static HConnection getSpiedConnection(final Configuration conf) - throws ZooKeeperConnectionException { - HConnectionKey connectionKey = new HConnectionKey(conf); - synchronized (HConnectionManager.HBASE_INSTANCES) { - HConnectionImplementation connection = - HConnectionManager.HBASE_INSTANCES.get(connectionKey); - if (connection == null) { - connection = Mockito.spy(new HConnectionImplementation(conf)); - HConnectionManager.HBASE_INSTANCES.put(connectionKey, connection); - } - return connection; - } - } -} diff --git src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide.java src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide.java index e1330cc..95e4cca 100644 --- src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide.java +++ src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide.java @@ -186,7 +186,7 @@ public class TestFromClientSide { for (Map.Entry e: loadedRegions.entrySet()) { HRegionInfo hri = e.getKey(); assertTrue(HConnectionManager.isRegionCached(conf, - hri.getTableName(), hri.getStartKey())); + hri.getTableDesc().getName(), hri.getStartKey())); } // delete the temp file diff --git src/test/java/org/apache/hadoop/hbase/client/TestHCM.java src/test/java/org/apache/hadoop/hbase/client/TestHCM.java index f98baef..f09944e 100644 --- src/test/java/org/apache/hadoop/hbase/client/TestHCM.java +++ src/test/java/org/apache/hadoop/hbase/client/TestHCM.java @@ -19,10 +19,6 @@ */ package org.apache.hadoop.hbase.client; -import static org.junit.Assert.assertNotNull; -import static org.junit.Assert.assertNull; -import static org.junit.Assert.assertTrue; - import java.lang.reflect.Field; import java.util.ArrayList; import java.util.HashSet; @@ -31,8 +27,6 @@ import java.util.Map; import java.util.Random; import java.util.Set; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.HBaseTestingUtility; @@ -44,6 +38,12 @@ import org.junit.AfterClass; import org.junit.Assert; import org.junit.BeforeClass; import org.junit.Test; +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; + +import static org.junit.Assert.assertNull; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertTrue; /** * This class is for testing HCM features @@ -225,4 +225,4 @@ public class TestHCM { Thread.sleep(50); } } -} \ No newline at end of file +} diff --git src/test/java/org/apache/hadoop/hbase/client/TestMetaMigration.java src/test/java/org/apache/hadoop/hbase/client/TestMetaMigration.java deleted file mode 100644 index 6cbf88c..0000000 --- src/test/java/org/apache/hadoop/hbase/client/TestMetaMigration.java +++ /dev/null @@ -1,178 +0,0 @@ -/** - * Copyright 2010 The Apache Software Foundation - * - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hbase.client; - -import junit.framework.AssertionFailedError; -import org.apache.hadoop.hbase.catalog.CatalogTracker; -import org.apache.hadoop.hbase.migration.HRegionInfo090x; - -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hbase.*; - -import org.apache.hadoop.hbase.catalog.MetaMigrationRemovingHTD; -import org.apache.hadoop.hbase.catalog.MetaReader; -import org.apache.hadoop.hbase.util.Bytes; -import org.apache.hadoop.hbase.HTableDescriptor; -import org.junit.AfterClass; -import org.junit.BeforeClass; -import org.junit.Test; -import org.apache.hadoop.hbase.util.Writables; - -import java.util.List; - -public class TestMetaMigration { - final Log LOG = LogFactory.getLog(getClass()); - private final static HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility(); - private static MiniHBaseCluster miniHBaseCluster = null; - - @BeforeClass - public static void setUpBeforeClass() throws Exception { - miniHBaseCluster = TEST_UTIL.startMiniCluster(1); - } - - /** - * @throws java.lang.Exception - */ - @AfterClass - public static void tearDownAfterClass() throws Exception { - TEST_UTIL.shutdownMiniCluster(); - } - - @Test - public void testHRegionInfoForMigration() throws Exception { - LOG.info("Starting testHRegionInfoForMigration"); - HTableDescriptor htd = new HTableDescriptor("testMetaMigration"); - htd.addFamily(new HColumnDescriptor("family")); - HRegionInfo090x hrim = new HRegionInfo090x(htd, HConstants.EMPTY_START_ROW, - HConstants.EMPTY_END_ROW); - LOG.info("INFO 1 = " + hrim); - byte[] bytes = Writables.getBytes(hrim); - LOG.info(" BYtes.toString = " + Bytes.toString(bytes)); - LOG.info(" HTD bytes = " + Bytes.toString(Writables.getBytes(hrim.getTableDesc()))); - HRegionInfo090x info = Writables.getHRegionInfoForMigration(bytes); - LOG.info("info = " + info); - LOG.info("END testHRegionInfoForMigration"); - - } - - @Test - public void testMetaUpdatedFlagInROOT() throws Exception { - LOG.info("Starting testMetaUpdatedFlagInROOT"); - boolean metaUpdated = - MetaMigrationRemovingHTD.isMetaHRIUpdated(miniHBaseCluster.getMaster()); - assertEquals(true, metaUpdated); - LOG.info("END testMetaUpdatedFlagInROOT"); - } - - @Test - public void testMetaMigration() throws Exception { - LOG.info("Starting testMetaWithLegacyHRI"); - final byte[] FAMILY = Bytes.toBytes("family"); - HTableDescriptor htd = new HTableDescriptor("testMetaMigration"); - HColumnDescriptor hcd = new HColumnDescriptor(FAMILY); - htd.addFamily(hcd); - Configuration conf = TEST_UTIL.getConfiguration(); - TEST_UTIL.createMultiRegionsWithLegacyHRI(conf, htd, FAMILY, - new byte[][]{ - HConstants.EMPTY_START_ROW, - Bytes.toBytes("region_a"), - Bytes.toBytes("region_b")}); - CatalogTracker ct = miniHBaseCluster.getMaster().getCatalogTracker(); - // just for this test set it to false. - MetaMigrationRemovingHTD.updateRootWithMetaMigrationStatus(ct, false); - MetaReader.fullScanMetaAndPrint(ct); - LOG.info("Meta Print completed.testUpdatesOnMetaWithLegacyHRI"); - - List htds = MetaMigrationRemovingHTD.updateMetaWithNewRegionInfo( - TEST_UTIL.getHBaseCluster().getMaster()); - MetaReader.fullScanMetaAndPrint(ct); - assertEquals(3, htds.size()); - // Assert that the flag in ROOT is updated to reflect the correct status - boolean metaUpdated = - MetaMigrationRemovingHTD.isMetaHRIUpdated(miniHBaseCluster.getMaster()); - assertEquals(true, metaUpdated); - LOG.info("END testMetaWithLegacyHRI"); - - } - - /** - * This test assumes a master crash/failure during the meta migration process - * and attempts to continue the meta migration process when a new master takes over. - * When a master dies during the meta migration we will have some rows of - * META.CatalogFamily updated with new HRI, (i.e HRI with out HTD) and some - * still hanging with legacy HRI. (i.e HRI with HTD). When the backup master/ or - * fresh start of master attempts the migration it will encouter some rows of META - * already updated with new HRI and some still legacy. This test will simulate this - * scenario and validates that the migration process can safely skip the updated - * rows and migrate any pending rows at startup. - * @throws Exception - */ - @Test - public void testMasterCrashDuringMetaMigration() throws Exception { - LOG.info("Starting testMasterCrashDuringMetaMigration"); - final byte[] FAMILY = Bytes.toBytes("family"); - HTableDescriptor htd = new HTableDescriptor("testMasterCrashDuringMetaMigration"); - HColumnDescriptor hcd = new HColumnDescriptor(FAMILY); - htd.addFamily(hcd); - Configuration conf = TEST_UTIL.getConfiguration(); - // Create 10 New regions. - TEST_UTIL.createMultiRegionsWithNewHRI(conf, htd, FAMILY, 10); - // Create 10 Legacy regions. - TEST_UTIL.createMultiRegionsWithLegacyHRI(conf, htd, FAMILY, 10); - CatalogTracker ct = miniHBaseCluster.getMaster().getCatalogTracker(); - // just for this test set it to false. - MetaMigrationRemovingHTD.updateRootWithMetaMigrationStatus(ct, false); - //MetaReader.fullScanMetaAndPrint(ct); - LOG.info("MEta Print completed.testUpdatesOnMetaWithLegacyHRI"); - - List htds = MetaMigrationRemovingHTD.updateMetaWithNewRegionInfo( - TEST_UTIL.getHBaseCluster().getMaster()); - assertEquals(10, htds.size()); - // Assert that the flag in ROOT is updated to reflect the correct status - boolean metaUpdated = - MetaMigrationRemovingHTD.isMetaHRIUpdated(miniHBaseCluster.getMaster()); - assertEquals(true, metaUpdated); - LOG.info("END testMetaWithLegacyHRI"); - - } - - public static void assertEquals(int expected, - int actual) { - if (expected != actual) { - throw new AssertionFailedError("expected:<" + - expected + "> but was:<" + - actual + ">"); - } - } - - public static void assertEquals(boolean expected, - boolean actual) { - if (expected != actual) { - throw new AssertionFailedError("expected:<" + - expected + "> but was:<" + - actual + ">"); - } - } - - - -} diff --git src/test/java/org/apache/hadoop/hbase/client/TestMultipleTimestamps.java src/test/java/org/apache/hadoop/hbase/client/TestMultipleTimestamps.java index 1c33e90..37c7359 100644 --- src/test/java/org/apache/hadoop/hbase/client/TestMultipleTimestamps.java +++ src/test/java/org/apache/hadoop/hbase/client/TestMultipleTimestamps.java @@ -118,7 +118,6 @@ public class TestMultipleTimestamps { @Test public void testReseeksWithMultipleColumnOneTimestamp() throws IOException { - LOG.info("testReseeksWithMultipleColumnOneTimestamp"); byte [] TABLE = Bytes.toBytes("testReseeksWithMultiple" + "ColumnOneTimestamps"); byte [] FAMILY = Bytes.toBytes("event_log"); @@ -156,8 +155,6 @@ public class TestMultipleTimestamps { @Test public void testReseeksWithMultipleColumnMultipleTimestamp() throws IOException { - LOG.info("testReseeksWithMultipleColumnMultipleTimestamp"); - byte [] TABLE = Bytes.toBytes("testReseeksWithMultiple" + "ColumnMiltipleTimestamps"); byte [] FAMILY = Bytes.toBytes("event_log"); @@ -200,7 +197,6 @@ public class TestMultipleTimestamps { @Test public void testReseeksWithMultipleFiles() throws IOException { - LOG.info("testReseeksWithMultipleFiles"); byte [] TABLE = Bytes.toBytes("testReseeksWithMultipleFiles"); byte [] FAMILY = Bytes.toBytes("event_log"); byte [][] FAMILIES = new byte[][] { FAMILY }; @@ -266,12 +262,8 @@ public class TestMultipleTimestamps { } public void testWithVersionDeletes(boolean flushTables) throws IOException { - LOG.info("testWithVersionDeletes_"+ - (flushTables ? "flush" : "noflush")); - byte [] TABLE = Bytes.toBytes("testWithVersionDeletes_" + (flushTables ? "flush" : "noflush")); - byte [] FAMILY = Bytes.toBytes("event_log"); byte [][] FAMILIES = new byte[][] { FAMILY }; @@ -300,8 +292,6 @@ public class TestMultipleTimestamps { @Test public void testWithMultipleVersionDeletes() throws IOException { - LOG.info("testWithMultipleVersionDeletes"); - byte [] TABLE = Bytes.toBytes("testWithMultipleVersionDeletes"); byte [] FAMILY = Bytes.toBytes("event_log"); byte [][] FAMILIES = new byte[][] { FAMILY }; diff --git src/test/java/org/apache/hadoop/hbase/client/TestScannerTimeout.java src/test/java/org/apache/hadoop/hbase/client/TestScannerTimeout.java index c33c2ef..fbeb854 100644 --- src/test/java/org/apache/hadoop/hbase/client/TestScannerTimeout.java +++ src/test/java/org/apache/hadoop/hbase/client/TestScannerTimeout.java @@ -22,6 +22,8 @@ package org.apache.hadoop.hbase.client; import static org.junit.Assert.assertEquals; import static org.junit.Assert.fail; +import java.io.IOException; + import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; @@ -90,7 +92,6 @@ public class TestScannerTimeout { */ @Test(timeout=300000) public void test2481() throws Exception { - LOG.info("START ************ test2481"); Scan scan = new Scan(); HTable table = new HTable(new Configuration(TEST_UTIL.getConfiguration()), TABLE_NAME); @@ -111,7 +112,6 @@ public class TestScannerTimeout { return; } fail("We should be timing out"); - LOG.info("END ************ test2481"); } /** @@ -121,7 +121,6 @@ public class TestScannerTimeout { */ @Test(timeout=300000) public void test2772() throws Exception { - LOG.info("START************ test2772"); HRegionServer rs = TEST_UTIL.getRSForFirstRegionInTable(TABLE_NAME); Scan scan = new Scan(); // Set a very high timeout, we want to test what happens when a RS @@ -138,8 +137,6 @@ public class TestScannerTimeout { Result[] results = r.next(NB_ROWS); assertEquals(NB_ROWS, results.length); r.close(); - LOG.info("END ************ test2772"); - } /** @@ -149,24 +146,25 @@ public class TestScannerTimeout { */ @Test(timeout=300000) public void test3686a() throws Exception { - LOG.info("START ************ TEST3686A---1"); HRegionServer rs = TEST_UTIL.getRSForFirstRegionInTable(TABLE_NAME); - LOG.info("START ************ TEST3686A---1111"); - Scan scan = new Scan(); scan.setCaching(SCANNER_CACHING); LOG.info("************ TEST3686A"); - MetaReader.fullScanMetaAndPrint(TEST_UTIL.getHBaseCluster().getMaster().getCatalogTracker()); + MetaReader.fullScan(TEST_UTIL.getHBaseCluster().getMaster().getCatalogTracker(), + new MetaReader.Visitor() { + + @Override + public boolean visit(Result r) throws IOException { + LOG.info("result: " + r); + return true; + } + }); HTable table = new HTable(TEST_UTIL.getConfiguration(), TABLE_NAME); LOG.info("START ************ TEST3686A---22"); ResultScanner r = table.getScanner(scan); - LOG.info("START ************ TEST3686A---33"); - int count = 1; r.next(); - LOG.info("START ************ TEST3686A---44"); - // Kill after one call to next(), which got 5 rows. rs.abort("die!"); while(r.next() != null) { @@ -174,7 +172,6 @@ public class TestScannerTimeout { } assertEquals(NB_ROWS, count); r.close(); - LOG.info("************ END TEST3686A"); } /** @@ -185,7 +182,6 @@ public class TestScannerTimeout { */ @Test(timeout=300000) public void test3686b() throws Exception { - LOG.info("START ************ test3686b"); HRegionServer rs = TEST_UTIL.getRSForFirstRegionInTable(TABLE_NAME); Scan scan = new Scan(); scan.setCaching(SCANNER_CACHING); @@ -207,7 +203,5 @@ public class TestScannerTimeout { } assertEquals(NB_ROWS, count); r.close(); - LOG.info("END ************ END test3686b"); - } } diff --git src/test/java/org/apache/hadoop/hbase/coprocessor/TestCoprocessorInterface.java src/test/java/org/apache/hadoop/hbase/coprocessor/TestCoprocessorInterface.java index fbfdd83..392fe27 100644 --- src/test/java/org/apache/hadoop/hbase/coprocessor/TestCoprocessorInterface.java +++ src/test/java/org/apache/hadoop/hbase/coprocessor/TestCoprocessorInterface.java @@ -245,10 +245,9 @@ public class TestCoprocessorInterface extends HBaseTestCase { HRegion reopenRegion(final HRegion closedRegion, Class implClass) throws IOException { - //HRegionInfo info = new HRegionInfo(tableName, null, null, false); - HRegion r = new HRegion(closedRegion.getTableDir(), closedRegion.getLog(), + HRegion r = new HRegion(closedRegion.getRegionDir(), closedRegion.getLog(), closedRegion.getFilesystem(), closedRegion.getConf(), - closedRegion.getRegionInfo(), closedRegion.getTableDesc(), null); + closedRegion.getRegionInfo(), null); r.initialize(); // this following piece is a hack. currently a coprocessorHost @@ -277,9 +276,9 @@ public class TestCoprocessorInterface extends HBaseTestCase { for(byte [] family : families) { htd.addFamily(new HColumnDescriptor(family)); } - HRegionInfo info = new HRegionInfo(tableName, null, null, false); + HRegionInfo info = new HRegionInfo(htd, null, null, false); Path path = new Path(DIR + callingMethod); - HRegion r = HRegion.createHRegion(info, path, conf, htd); + HRegion r = HRegion.createHRegion(info, path, conf); // this following piece is a hack. RegionCoprocessorHost host = new RegionCoprocessorHost(r, null, conf); diff --git src/test/java/org/apache/hadoop/hbase/coprocessor/TestRegionObserverInterface.java src/test/java/org/apache/hadoop/hbase/coprocessor/TestRegionObserverInterface.java index 1f4e02d..0c53f39 100644 --- src/test/java/org/apache/hadoop/hbase/coprocessor/TestRegionObserverInterface.java +++ src/test/java/org/apache/hadoop/hbase/coprocessor/TestRegionObserverInterface.java @@ -385,7 +385,7 @@ public class TestRegionObserverInterface { try { for (JVMClusterUtil.RegionServerThread t : cluster.getRegionServerThreads()) { for (HRegionInfo r : t.getRegionServer().getOnlineRegions()) { - if (!Arrays.equals(r.getTableName(), tableName)) { + if (!Arrays.equals(r.getTableDesc().getName(), tableName)) { continue; } RegionCoprocessorHost cph = t.getRegionServer().getOnlineRegion(r.getRegionName()). diff --git src/test/java/org/apache/hadoop/hbase/coprocessor/TestRegionObserverStacking.java src/test/java/org/apache/hadoop/hbase/coprocessor/TestRegionObserverStacking.java index 96429c7..f500cfe 100644 --- src/test/java/org/apache/hadoop/hbase/coprocessor/TestRegionObserverStacking.java +++ src/test/java/org/apache/hadoop/hbase/coprocessor/TestRegionObserverStacking.java @@ -95,9 +95,9 @@ public class TestRegionObserverStacking extends TestCase { for(byte [] family : families) { htd.addFamily(new HColumnDescriptor(family)); } - HRegionInfo info = new HRegionInfo(htd.getName(), null, null, false); + HRegionInfo info = new HRegionInfo(htd, null, null, false); Path path = new Path(DIR + callingMethod); - HRegion r = HRegion.createHRegion(info, path, conf, htd); + HRegion r = HRegion.createHRegion(info, path, conf); // this following piece is a hack. currently a coprocessorHost // is secretly loaded at OpenRegionHandler. we don't really // start a region server here, so just manually create cphost diff --git src/test/java/org/apache/hadoop/hbase/coprocessor/TestWALObserver.java src/test/java/org/apache/hadoop/hbase/coprocessor/TestWALObserver.java index 8d94755..36f4b40 100644 --- src/test/java/org/apache/hadoop/hbase/coprocessor/TestWALObserver.java +++ src/test/java/org/apache/hadoop/hbase/coprocessor/TestWALObserver.java @@ -138,12 +138,7 @@ public class TestWALObserver { */ @Test public void testWALObserverWriteToWAL() throws Exception { - HRegionInfo hri = createBasic3FamilyHRegionInfo(Bytes.toString(TEST_TABLE)); - final HTableDescriptor htd = createBasic3FamilyHTD(Bytes.toString(TEST_TABLE)); - HRegion region2 = HRegion.createHRegion(hri, - hbaseRootDir, this.conf, htd); - Path basedir = new Path(this.hbaseRootDir, Bytes.toString(TEST_TABLE)); deleteDir(basedir); fs.mkdirs(new Path(basedir, hri.getEncodedName())); @@ -195,7 +190,7 @@ public class TestWALObserver { // it's where WAL write cp should occur. long now = EnvironmentEdgeManager.currentTimeMillis(); - log.append(hri, hri.getTableName(), edit, now, htd); + log.append(hri, hri.getTableDesc().getName(), edit, now); // the edit shall have been change now by the coprocessor. foundFamily0 = false; @@ -226,25 +221,16 @@ public class TestWALObserver { * Test WAL replay behavior with WALObserver. */ @Test - public void testWALCoprocessorReplay() throws Exception { + public void testWALObserverReplay() throws Exception { // WAL replay is handled at HRegion::replayRecoveredEdits(), which is // ultimately called by HRegion::initialize() byte[] tableName = Bytes.toBytes("testWALCoprocessorReplay"); - final HTableDescriptor htd = getBasic3FamilyHTableDescriptor(Bytes.toString(tableName)); - //final HRegionInfo hri = createBasic3FamilyHRegionInfo(Bytes.toString(tableName)); - //final HRegionInfo hri1 = createBasic3FamilyHRegionInfo(Bytes.toString(tableName)); - final HRegionInfo hri = new HRegionInfo(tableName, null, null); + final HRegionInfo hri = createBasic3FamilyHRegionInfo(Bytes.toString(tableName)); final Path basedir = new Path(this.hbaseRootDir, Bytes.toString(tableName)); deleteDir(basedir); fs.mkdirs(new Path(basedir, hri.getEncodedName())); - final Configuration newConf = HBaseConfiguration.create(this.conf); - - HRegion region2 = HRegion.createHRegion(hri, - hbaseRootDir, newConf,htd); - - //HLog wal = new HLog(this.fs, this.dir, this.oldLogDir, this.conf); HLog wal = createWAL(this.conf); //Put p = creatPutWith2Families(TEST_ROW); @@ -252,46 +238,40 @@ public class TestWALObserver { long now = EnvironmentEdgeManager.currentTimeMillis(); //addFamilyMapToWALEdit(p.getFamilyMap(), edit); final int countPerFamily = 1000; - //for (HColumnDescriptor hcd: hri.getTableDesc().getFamilies()) { - for (HColumnDescriptor hcd: htd.getFamilies()) { - //addWALEdits(tableName, hri, TEST_ROW, hcd.getName(), countPerFamily, - //EnvironmentEdgeManager.getDelegate(), wal); + for (HColumnDescriptor hcd: hri.getTableDesc().getFamilies()) { addWALEdits(tableName, hri, TEST_ROW, hcd.getName(), countPerFamily, - EnvironmentEdgeManager.getDelegate(), wal, htd); + EnvironmentEdgeManager.getDelegate(), wal); } - wal.append(hri, tableName, edit, now, htd); + wal.append(hri, tableName, edit, now); // sync to fs. wal.sync(); + final Configuration newConf = HBaseConfiguration.create(this.conf); User user = HBaseTestingUtility.getDifferentUser(newConf, ".replay.wal.secondtime"); user.runAs(new PrivilegedExceptionAction() { public Object run() throws Exception { - Path p = runWALSplit(newConf); - LOG.info("WALSplit path == " + p); + runWALSplit(newConf); FileSystem newFS = FileSystem.get(newConf); // Make a new wal for new region open. HLog wal2 = createWAL(newConf); - Path tableDir = - HTableDescriptor.getTableDir(hbaseRootDir, hri.getTableName()); - HRegion region = new HRegion(tableDir, wal2, FileSystem.get(newConf), - newConf, hri, htd, TEST_UTIL.getHBaseCluster().getRegionServer(0)); + HRegion region2 = new HRegion(basedir, wal2, FileSystem.get(newConf), + newConf, hri, TEST_UTIL.getHBaseCluster().getRegionServer(0)); + long seqid2 = region2.initialize(); - long seqid2 = region.initialize(); SampleRegionWALObserver cp2 = - (SampleRegionWALObserver)region.getCoprocessorHost().findCoprocessor( + (SampleRegionWALObserver)region2.getCoprocessorHost().findCoprocessor( SampleRegionWALObserver.class.getName()); // TODO: asserting here is problematic. assertNotNull(cp2); assertTrue(cp2.isPreWALRestoreCalled()); assertTrue(cp2.isPostWALRestoreCalled()); - region.close(); + region2.close(); wal2.closeAndDelete(); return null; } }); } - /** * Test to see CP loaded successfully or not. There is a duplication * at TestHLog, but the purpose of that one is to see whether the loaded @@ -321,7 +301,7 @@ public class TestWALObserver { HColumnDescriptor a = new HColumnDescriptor(TEST_FAMILY[i]); htd.addFamily(a); } - return new HRegionInfo(htd.getName(), null, null, false); + return new HRegionInfo(htd, null, null, false); } /* @@ -376,7 +356,7 @@ public class TestWALObserver { } private void addWALEdits (final byte [] tableName, final HRegionInfo hri, final byte [] rowName, final byte [] family, - final int count, EnvironmentEdge ee, final HLog wal, final HTableDescriptor htd) + final int count, EnvironmentEdge ee, final HLog wal) throws IOException { String familyStr = Bytes.toString(family); for (int j = 0; j < count; j++) { @@ -385,30 +365,8 @@ public class TestWALObserver { WALEdit edit = new WALEdit(); edit.add(new KeyValue(rowName, family, qualifierBytes, ee.currentTimeMillis(), columnBytes)); - wal.append(hri, tableName, edit, ee.currentTimeMillis(), htd); + wal.append(hri, tableName, edit, ee.currentTimeMillis()); } } - private HTableDescriptor getBasic3FamilyHTableDescriptor( - final String tableName) { - HTableDescriptor htd = new HTableDescriptor(tableName); - - for (int i = 0; i < TEST_FAMILY.length; i++ ) { - HColumnDescriptor a = new HColumnDescriptor(TEST_FAMILY[i]); - htd.addFamily(a); - } - return htd; - } - - private HTableDescriptor createBasic3FamilyHTD(final String tableName) { - HTableDescriptor htd = new HTableDescriptor(tableName); - HColumnDescriptor a = new HColumnDescriptor(Bytes.toBytes("a")); - htd.addFamily(a); - HColumnDescriptor b = new HColumnDescriptor(Bytes.toBytes("b")); - htd.addFamily(b); - HColumnDescriptor c = new HColumnDescriptor(Bytes.toBytes("c")); - htd.addFamily(c); - return htd; - } - } diff --git src/test/java/org/apache/hadoop/hbase/filter/TestColumnPrefixFilter.java src/test/java/org/apache/hadoop/hbase/filter/TestColumnPrefixFilter.java index 6d28910..e1eb02a 100644 --- src/test/java/org/apache/hadoop/hbase/filter/TestColumnPrefixFilter.java +++ src/test/java/org/apache/hadoop/hbase/filter/TestColumnPrefixFilter.java @@ -33,9 +33,9 @@ public class TestColumnPrefixFilter { String family = "Family"; HTableDescriptor htd = new HTableDescriptor("TestColumnPrefixFilter"); htd.addFamily(new HColumnDescriptor(family)); - HRegionInfo info = new HRegionInfo(htd.getName(), null, null, false); + HRegionInfo info = new HRegionInfo(htd, null, null, false); HRegion region = HRegion.createHRegion(info, HBaseTestingUtility. - getTestDir(), TEST_UTIL.getConfiguration(), htd); + getTestDir(), TEST_UTIL.getConfiguration()); List rows = generateRandomWords(100, "row"); List columns = generateRandomWords(10000, "column"); @@ -89,9 +89,9 @@ public class TestColumnPrefixFilter { String family = "Family"; HTableDescriptor htd = new HTableDescriptor("TestColumnPrefixFilter"); htd.addFamily(new HColumnDescriptor(family)); - HRegionInfo info = new HRegionInfo(htd.getName(), null, null, false); + HRegionInfo info = new HRegionInfo(htd, null, null, false); HRegion region = HRegion.createHRegion(info, HBaseTestingUtility. - getTestDir(), TEST_UTIL.getConfiguration(), htd); + getTestDir(), TEST_UTIL.getConfiguration()); List rows = generateRandomWords(100, "row"); List columns = generateRandomWords(10000, "column"); diff --git src/test/java/org/apache/hadoop/hbase/filter/TestDependentColumnFilter.java src/test/java/org/apache/hadoop/hbase/filter/TestDependentColumnFilter.java index 7e3e731..04705c3 100644 --- src/test/java/org/apache/hadoop/hbase/filter/TestDependentColumnFilter.java +++ src/test/java/org/apache/hadoop/hbase/filter/TestDependentColumnFilter.java @@ -75,9 +75,8 @@ public class TestDependentColumnFilter extends TestCase { HTableDescriptor htd = new HTableDescriptor(getName()); htd.addFamily(new HColumnDescriptor(FAMILIES[0])); htd.addFamily(new HColumnDescriptor(FAMILIES[1])); - HRegionInfo info = new HRegionInfo(htd.getName(), null, null, false); - this.region = HRegion.createHRegion(info, testUtil.getTestDir(), - testUtil.getConfiguration(), htd); + HRegionInfo info = new HRegionInfo(htd, null, null, false); + this.region = HRegion.createHRegion(info, testUtil.getTestDir(), testUtil.getConfiguration()); addData(); } diff --git src/test/java/org/apache/hadoop/hbase/filter/TestFilter.java src/test/java/org/apache/hadoop/hbase/filter/TestFilter.java index 09a785d..bfa3c72 100644 --- src/test/java/org/apache/hadoop/hbase/filter/TestFilter.java +++ src/test/java/org/apache/hadoop/hbase/filter/TestFilter.java @@ -92,8 +92,8 @@ public class TestFilter extends HBaseTestCase { HTableDescriptor htd = new HTableDescriptor(getName()); htd.addFamily(new HColumnDescriptor(FAMILIES[0])); htd.addFamily(new HColumnDescriptor(FAMILIES[1])); - HRegionInfo info = new HRegionInfo(htd.getName(), null, null, false); - this.region = HRegion.createHRegion(info, this.testDir, this.conf, htd); + HRegionInfo info = new HRegionInfo(htd, null, null, false); + this.region = HRegion.createHRegion(info, this.testDir, this.conf); // Insert first half for(byte [] ROW : ROWS_ONE) { diff --git src/test/java/org/apache/hadoop/hbase/filter/TestMultipleColumnPrefixFilter.java src/test/java/org/apache/hadoop/hbase/filter/TestMultipleColumnPrefixFilter.java index 527d141..6213039 100644 --- src/test/java/org/apache/hadoop/hbase/filter/TestMultipleColumnPrefixFilter.java +++ src/test/java/org/apache/hadoop/hbase/filter/TestMultipleColumnPrefixFilter.java @@ -51,9 +51,9 @@ public class TestMultipleColumnPrefixFilter { HTableDescriptor htd = new HTableDescriptor("TestMultipleColumnPrefixFilter"); htd.addFamily(new HColumnDescriptor(family)); // HRegionInfo info = new HRegionInfo(htd, null, null, false); - HRegionInfo info = new HRegionInfo(htd.getName(), null, null, false); + HRegionInfo info = new HRegionInfo(htd, null, null, false); HRegion region = HRegion.createHRegion(info, HBaseTestingUtility. - getTestDir(), TEST_UTIL.getConfiguration(), htd); + getTestDir(), TEST_UTIL.getConfiguration()); List rows = generateRandomWords(100, "row"); List columns = generateRandomWords(10000, "column"); @@ -110,9 +110,9 @@ public class TestMultipleColumnPrefixFilter { HTableDescriptor htd = new HTableDescriptor("TestMultipleColumnPrefixFilter"); htd.addFamily(new HColumnDescriptor(family1)); htd.addFamily(new HColumnDescriptor(family2)); - HRegionInfo info = new HRegionInfo(htd.getName(), null, null, false); + HRegionInfo info = new HRegionInfo(htd, null, null, false); HRegion region = HRegion.createHRegion(info, HBaseTestingUtility. - getTestDir(), TEST_UTIL.getConfiguration(), htd); + getTestDir(), TEST_UTIL.getConfiguration()); List rows = generateRandomWords(100, "row"); List columns = generateRandomWords(10000, "column"); @@ -173,9 +173,9 @@ public class TestMultipleColumnPrefixFilter { String family = "Family"; HTableDescriptor htd = new HTableDescriptor("TestMultipleColumnPrefixFilter"); htd.addFamily(new HColumnDescriptor(family)); - HRegionInfo info = new HRegionInfo(htd.getName(), null, null, false); + HRegionInfo info = new HRegionInfo(htd, null, null, false); HRegion region = HRegion.createHRegion(info, HBaseTestingUtility. - getTestDir(), TEST_UTIL.getConfiguration(),htd); + getTestDir(), TEST_UTIL.getConfiguration()); List rows = generateRandomWords(100, "row"); List columns = generateRandomWords(10000, "column"); diff --git src/test/java/org/apache/hadoop/hbase/master/TestCatalogJanitor.java src/test/java/org/apache/hadoop/hbase/master/TestCatalogJanitor.java index 2f20596..f9084eb 100644 --- src/test/java/org/apache/hadoop/hbase/master/TestCatalogJanitor.java +++ src/test/java/org/apache/hadoop/hbase/master/TestCatalogJanitor.java @@ -24,7 +24,6 @@ import static org.junit.Assert.assertNotNull; import static org.junit.Assert.assertNull; import static org.junit.Assert.assertTrue; -import java.io.FileNotFoundException; import java.io.IOException; import java.util.ArrayList; import java.util.List; @@ -44,12 +43,7 @@ import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.NotAllMetaRegionsOnlineException; import org.apache.hadoop.hbase.Server; import org.apache.hadoop.hbase.ServerName; -import org.apache.hadoop.hbase.TableDescriptors; -import org.apache.hadoop.hbase.TableExistsException; import org.apache.hadoop.hbase.catalog.CatalogTracker; -import org.apache.hadoop.hbase.client.HConnection; -import org.apache.hadoop.hbase.client.HConnectionManager; -import org.apache.hadoop.hbase.client.HConnectionTestingUtility; import org.apache.hadoop.hbase.client.Result; import org.apache.hadoop.hbase.executor.ExecutorService; import org.apache.hadoop.hbase.io.Reference; @@ -64,27 +58,20 @@ import org.mockito.Mockito; public class TestCatalogJanitor { /** * Pseudo server for below tests. - * Be sure to call stop on the way out else could leave some mess around. */ class MockServer implements Server { - private final HConnection connection; private final Configuration c; private final CatalogTracker ct; MockServer(final HBaseTestingUtility htu) - throws NotAllMetaRegionsOnlineException, IOException, InterruptedException { + throws NotAllMetaRegionsOnlineException, IOException { this.c = htu.getConfiguration(); - // Mock an HConnection and a HRegionInterface implementation. Have the - // HConnection return the HRI. Have the HRI return a few mocked up responses - // to make our test work. - this.connection = HConnectionTestingUtility.getMockedConnection(this.c); // Set hbase.rootdir into test dir. FileSystem fs = FileSystem.get(this.c); Path rootdir = fs.makeQualified(new Path(this.c.get(HConstants.HBASE_DIR))); this.c.set(HConstants.HBASE_DIR, rootdir.toString()); this.ct = Mockito.mock(CatalogTracker.class); HRegionInterface hri = Mockito.mock(HRegionInterface.class); - Mockito.when(this.ct.getConnection()).thenReturn(this.connection); Mockito.when(ct.waitForMetaServerConnectionDefault()).thenReturn(hri); } @@ -125,13 +112,9 @@ public class TestCatalogJanitor { @Override public void stop(String why) { - if (this.ct != null) { - this.ct.stop(); - } - if (this.connection != null) { - HConnectionManager.deleteConnection(this.connection.getConfiguration(), true); - } + //no-op } + } /** @@ -139,11 +122,9 @@ public class TestCatalogJanitor { */ class MockMasterServices implements MasterServices { private final MasterFileSystem mfs; - private final AssignmentManager asm; MockMasterServices(final Server server) throws IOException { - this.mfs = new MasterFileSystem(server, this, null); - this.asm = Mockito.mock(AssignmentManager.class); + this.mfs = new MasterFileSystem(server, null); } @Override @@ -159,7 +140,7 @@ public class TestCatalogJanitor { @Override public AssignmentManager getAssignmentManager() { - return this.asm; + return null; } @Override @@ -216,41 +197,6 @@ public class TestCatalogJanitor { public boolean isStopped() { return false; } - - @Override - public TableDescriptors getTableDescriptors() { - return new TableDescriptors() { - @Override - public HTableDescriptor remove(String tablename) throws IOException { - // TODO Auto-generated method stub - return null; - } - - @Override - public Map getAll() throws IOException { - // TODO Auto-generated method stub - return null; - } - - @Override - public HTableDescriptor get(byte[] tablename) - throws TableExistsException, FileNotFoundException, IOException { - return get(Bytes.toString(tablename)); - } - - @Override - public HTableDescriptor get(String tablename) - throws TableExistsException, FileNotFoundException, IOException { - return createHTableDescriptor(); - } - - @Override - public void add(HTableDescriptor htd) throws IOException { - // TODO Auto-generated method stub - - } - }; - } } @Test @@ -281,53 +227,39 @@ public class TestCatalogJanitor { } @Test - public void testCleanParent() throws IOException, InterruptedException { + public void testCleanParent() throws IOException { HBaseTestingUtility htu = new HBaseTestingUtility(); setRootDirAndCleanIt(htu, "testCleanParent"); Server server = new MockServer(htu); - try { - MasterServices services = new MockMasterServices(server); - CatalogJanitor janitor = new CatalogJanitor(server, services); - // Create regions. - HTableDescriptor htd = new HTableDescriptor("table"); - htd.addFamily(new HColumnDescriptor("f")); - HRegionInfo parent = - new HRegionInfo(htd.getName(), Bytes.toBytes("aaa"), - Bytes.toBytes("eee")); - HRegionInfo splita = - new HRegionInfo(htd.getName(), Bytes.toBytes("aaa"), - Bytes.toBytes("ccc")); - HRegionInfo splitb = - new HRegionInfo(htd.getName(), Bytes.toBytes("ccc"), - Bytes.toBytes("eee")); - // Test that when both daughter regions are in place, that we do not - // remove the parent. - List kvs = new ArrayList(); - kvs.add(new KeyValue(parent.getRegionName(), HConstants.CATALOG_FAMILY, - HConstants.SPLITA_QUALIFIER, Writables.getBytes(splita))); - kvs.add(new KeyValue(parent.getRegionName(), HConstants.CATALOG_FAMILY, - HConstants.SPLITB_QUALIFIER, Writables.getBytes(splitb))); - Result r = new Result(kvs); - // Add a reference under splitA directory so we don't clear out the parent. - Path rootdir = services.getMasterFileSystem().getRootDir(); - Path tabledir = - HTableDescriptor.getTableDir(rootdir, htd.getName()); - Path storedir = Store.getStoreHomedir(tabledir, splita.getEncodedName(), - htd.getColumnFamilies()[0].getName()); - Reference ref = new Reference(Bytes.toBytes("ccc"), Reference.Range.top); - long now = System.currentTimeMillis(); - // Reference name has this format: StoreFile#REF_NAME_PARSER - Path p = new Path(storedir, Long.toString(now) + "." + parent.getEncodedName()); - FileSystem fs = services.getMasterFileSystem().getFileSystem(); - Path path = ref.write(fs, p); - assertTrue(fs.exists(path)); - assertFalse(janitor.cleanParent(parent, r)); - // Remove the reference file and try again. - assertTrue(fs.delete(p, true)); - assertTrue(janitor.cleanParent(parent, r)); - } finally { - server.stop("shutdown"); - } + MasterServices services = new MockMasterServices(server); + CatalogJanitor janitor = new CatalogJanitor(server, services); + // Create regions. + HTableDescriptor htd = createHTableDescriptor(); + HRegionInfo parent = + new HRegionInfo(htd, Bytes.toBytes("aaa"), Bytes.toBytes("eee")); + HRegionInfo splita = + new HRegionInfo(htd, Bytes.toBytes("aaa"), Bytes.toBytes("ccc")); + HRegionInfo splitb = + new HRegionInfo(htd, Bytes.toBytes("ccc"), Bytes.toBytes("eee")); + // Test that when both daughter regions are in place, that we do not + // remove the parent. + Result r = createResult(parent, splita, splitb); + // Add a reference under splitA directory so we don't clear out the parent. + Path rootdir = services.getMasterFileSystem().getRootDir(); + Path tabledir = + HTableDescriptor.getTableDir(rootdir, htd.getName()); + Path storedir = Store.getStoreHomedir(tabledir, splita.getEncodedName(), + htd.getColumnFamilies()[0].getName()); + Reference ref = new Reference(Bytes.toBytes("ccc"), Reference.Range.top); + long now = System.currentTimeMillis(); + // Reference name has this format: StoreFile#REF_NAME_PARSER + Path p = new Path(storedir, Long.toString(now) + "." + parent.getEncodedName()); + FileSystem fs = services.getMasterFileSystem().getFileSystem(); + ref.write(fs, p); + assertFalse(janitor.cleanParent(parent, r)); + // Remove the reference file and try again. + assertTrue(fs.delete(p, true)); + assertTrue(janitor.cleanParent(parent, r)); } /** @@ -348,30 +280,30 @@ public class TestCatalogJanitor { // Create regions: aaa->eee, aaa->ccc, aaa->bbb, bbb->ccc, etc. // Parent - HRegionInfo parent = new HRegionInfo(htd.getName(), Bytes.toBytes("aaa"), + HRegionInfo parent = new HRegionInfo(htd, Bytes.toBytes("aaa"), Bytes.toBytes("eee")); // Sleep a second else the encoded name on these regions comes out // same for all with same start key and made in same second. Thread.sleep(1001); // Daughter a - HRegionInfo splita = new HRegionInfo(htd.getName(), Bytes.toBytes("aaa"), + HRegionInfo splita = new HRegionInfo(htd, Bytes.toBytes("aaa"), Bytes.toBytes("ccc")); Thread.sleep(1001); // Make daughters of daughter a; splitaa and splitab. - HRegionInfo splitaa = new HRegionInfo(htd.getName(), Bytes.toBytes("aaa"), + HRegionInfo splitaa = new HRegionInfo(htd, Bytes.toBytes("aaa"), Bytes.toBytes("bbb")); - HRegionInfo splitab = new HRegionInfo(htd.getName(), Bytes.toBytes("bbb"), + HRegionInfo splitab = new HRegionInfo(htd, Bytes.toBytes("bbb"), Bytes.toBytes("ccc")); // Daughter b - HRegionInfo splitb = new HRegionInfo(htd.getName(), Bytes.toBytes("ccc"), + HRegionInfo splitb = new HRegionInfo(htd, Bytes.toBytes("ccc"), Bytes.toBytes("eee")); Thread.sleep(1001); // Make Daughters of daughterb; splitba and splitbb. - HRegionInfo splitba = new HRegionInfo(htd.getName(), Bytes.toBytes("ccc"), + HRegionInfo splitba = new HRegionInfo(htd, Bytes.toBytes("ccc"), Bytes.toBytes("ddd")); - HRegionInfo splitbb = new HRegionInfo(htd.getName(), Bytes.toBytes("ddd"), + HRegionInfo splitbb = new HRegionInfo(htd, Bytes.toBytes("ddd"), Bytes.toBytes("eee")); // First test that our Comparator works right up in CatalogJanitor. diff --git src/test/java/org/apache/hadoop/hbase/master/TestDefaultLoadBalancer.java src/test/java/org/apache/hadoop/hbase/master/TestDefaultLoadBalancer.java index 2b2c7e1..7a7168e 100644 --- src/test/java/org/apache/hadoop/hbase/master/TestDefaultLoadBalancer.java +++ src/test/java/org/apache/hadoop/hbase/master/TestDefaultLoadBalancer.java @@ -39,6 +39,7 @@ import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.HRegionInfo; +import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.util.Bytes; import org.junit.BeforeClass; @@ -464,7 +465,7 @@ public class TestDefaultLoadBalancer { Bytes.putInt(start, 0, numRegions << 1); Bytes.putInt(end, 0, (numRegions << 1) + 1); HRegionInfo hri = new HRegionInfo( - Bytes.toBytes("table" + i), start, end, + new HTableDescriptor(Bytes.toBytes("table" + i)), start, end, false, regionId++); regions.add(hri); } diff --git src/test/java/org/apache/hadoop/hbase/master/TestDistributedLogSplitting.java src/test/java/org/apache/hadoop/hbase/master/TestDistributedLogSplitting.java index ac7ead2..318f1fd 100644 --- src/test/java/org/apache/hadoop/hbase/master/TestDistributedLogSplitting.java +++ src/test/java/org/apache/hadoop/hbase/master/TestDistributedLogSplitting.java @@ -56,7 +56,6 @@ import org.apache.hadoop.hbase.regionserver.wal.WALEdit; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.hadoop.hbase.util.FSUtils; -import org.apache.hadoop.hbase.util.Threads; import org.apache.hadoop.hbase.util.JVMClusterUtil.RegionServerThread; import org.apache.hadoop.hbase.zookeeper.ZKAssign; import org.apache.hadoop.hbase.zookeeper.ZKSplitLog; @@ -99,9 +98,6 @@ public class TestDistributedLogSplitting { LOG.info("Waiting for active/ready master"); cluster.waitForActiveAndReadyMaster(); master = cluster.getMaster(); - while (cluster.getLiveRegionServerThreads().size() < num_rs) { - Threads.sleep(1); - } } @After @@ -115,7 +111,7 @@ public class TestDistributedLogSplitting { final int NUM_REGIONS_TO_CREATE = 40; final int NUM_ROWS_PER_REGION = 100; - startCluster(NUM_RS); // NUM_RS=6. + startCluster(NUM_RS); ZooKeeperWatcher zkw = new ZooKeeperWatcher(conf, "distributed log splitting test", null); @@ -305,7 +301,7 @@ public class TestDistributedLogSplitting { HTable ht = TEST_UTIL.createTable(table, family); int numRegions = TEST_UTIL.createMultiRegions(conf, ht, family, nrs); assertEquals(nrs, numRegions); - LOG.info("Waiting for no more RIT\n"); + LOG.info("Waiting for no more RIT\n"); blockUntilNoRIT(zkw, master); // disable-enable cycle to get rid of table's dead regions left behind // by createMultiRegions @@ -357,7 +353,6 @@ public class TestDistributedLogSplitting { int num_edits, int edit_size) throws IOException { byte[] table = Bytes.toBytes(tname); - HTableDescriptor htd = new HTableDescriptor(tname); byte[] value = new byte[edit_size]; for (int i = 0; i < edit_size; i++) { value[i] = (byte)('a' + (i % 26)); @@ -374,7 +369,7 @@ public class TestDistributedLogSplitting { System.currentTimeMillis(), value)); // LOG.info("Region " + i + ": " + e); j++; - log.append(hris.get(j % n), table, e, System.currentTimeMillis(), htd); + log.append(hris.get(j % n), table, e, System.currentTimeMillis()); counts[j % n] += 1; // if ((i % 8096) == 0) { // log.sync(); diff --git src/test/java/org/apache/hadoop/hbase/master/TestMaster.java src/test/java/org/apache/hadoop/hbase/master/TestMaster.java index eaa47b9..f473c80 100644 --- src/test/java/org/apache/hadoop/hbase/master/TestMaster.java +++ src/test/java/org/apache/hadoop/hbase/master/TestMaster.java @@ -86,20 +86,21 @@ public class TestMaster { tableRegions.get(0).getFirst().getEndKey()); // Now trigger a split and stop when the split is in progress - CountDownLatch split = new CountDownLatch(1); + + CountDownLatch aboutToOpen = new CountDownLatch(1); CountDownLatch proceed = new CountDownLatch(1); - RegionSplitListener list = new RegionSplitListener(split, proceed); + RegionOpenListener list = new RegionOpenListener(aboutToOpen, proceed); cluster.getMaster().executorService. - registerListener(EventType.RS_ZK_REGION_SPLIT, list); + registerListener(EventType.RS_ZK_REGION_OPENED, list); LOG.info("Splitting table"); admin.split(TABLENAME); LOG.info("Waiting for split result to be about to open"); - split.await(60, TimeUnit.SECONDS); + aboutToOpen.await(60, TimeUnit.SECONDS); try { LOG.info("Making sure we can call getTableRegions while opening"); - tableRegions = MetaReader.getTableRegionsAndLocations(m.getCatalogTracker(), - TABLENAME, false); + tableRegions = MetaReader.getTableRegionsAndLocations( + m.getCatalogTracker(), Bytes.toString(TABLENAME)); LOG.info("Regions: " + Joiner.on(',').join(tableRegions)); // We have three regions because one is split-in-progress @@ -117,21 +118,22 @@ public class TestMaster { } } - static class RegionSplitListener implements EventHandlerListener { - CountDownLatch split, proceed; + static class RegionOpenListener implements EventHandlerListener { + CountDownLatch aboutToOpen, proceed; - public RegionSplitListener(CountDownLatch split, CountDownLatch proceed) { - this.split = split; + public RegionOpenListener(CountDownLatch aboutToOpen, CountDownLatch proceed) + { + this.aboutToOpen = aboutToOpen; this.proceed = proceed; } @Override public void afterProcess(EventHandler event) { - if (event.getEventType() != EventType.RS_ZK_REGION_SPLIT) { + if (event.getEventType() != EventType.RS_ZK_REGION_OPENED) { return; } try { - split.countDown(); + aboutToOpen.countDown(); proceed.await(60, TimeUnit.SECONDS); } catch (InterruptedException ie) { throw new RuntimeException(ie); diff --git src/test/java/org/apache/hadoop/hbase/master/TestMasterFailover.java src/test/java/org/apache/hadoop/hbase/master/TestMasterFailover.java index 49bcf02..92bd6ca 100644 --- src/test/java/org/apache/hadoop/hbase/master/TestMasterFailover.java +++ src/test/java/org/apache/hadoop/hbase/master/TestMasterFailover.java @@ -23,6 +23,7 @@ import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertTrue; +import java.io.IOException; import java.util.ArrayList; import java.util.List; import java.util.Set; @@ -31,30 +32,29 @@ import java.util.TreeSet; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.fs.FileSystem; -import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.Abortable; import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.HColumnDescriptor; -import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.MiniHBaseCluster; import org.apache.hadoop.hbase.ServerName; -import org.apache.hadoop.hbase.executor.EventHandler.EventType; import org.apache.hadoop.hbase.executor.RegionTransitionData; +import org.apache.hadoop.hbase.executor.EventHandler.EventType; import org.apache.hadoop.hbase.master.AssignmentManager.RegionState; +import org.apache.hadoop.hbase.master.RegionPlan; import org.apache.hadoop.hbase.regionserver.HRegion; import org.apache.hadoop.hbase.regionserver.HRegionServer; import org.apache.hadoop.hbase.util.Bytes; -import org.apache.hadoop.hbase.util.FSUtils; import org.apache.hadoop.hbase.util.JVMClusterUtil; import org.apache.hadoop.hbase.util.JVMClusterUtil.MasterThread; import org.apache.hadoop.hbase.util.JVMClusterUtil.RegionServerThread; import org.apache.hadoop.hbase.zookeeper.ZKAssign; import org.apache.hadoop.hbase.zookeeper.ZKTable; import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher; +import org.apache.zookeeper.KeeperException; +import org.apache.zookeeper.KeeperException.NodeExistsException; import org.junit.Test; public class TestMasterFailover { @@ -135,7 +135,7 @@ public class TestMasterFailover { // Stop the cluster TEST_UTIL.shutdownMiniCluster(); } - + @Test public void testShouldCheckMasterFailOverWhenMETAIsInOpenedState() throws Exception { @@ -352,26 +352,12 @@ public class TestMasterFailover { byte [] enabledTable = Bytes.toBytes("enabledTable"); HTableDescriptor htdEnabled = new HTableDescriptor(enabledTable); htdEnabled.addFamily(new HColumnDescriptor(FAMILY)); - - FileSystem filesystem = FileSystem.get(conf); - Path rootdir = filesystem.makeQualified( - new Path(conf.get(HConstants.HBASE_DIR))); - // Write the .tableinfo - FSUtils.createTableDescriptor(filesystem, rootdir, htdEnabled); - - HRegionInfo hriEnabled = new HRegionInfo(htdEnabled.getName(), null, null); - HRegion.createHRegion(hriEnabled, rootdir, conf, htdEnabled); - List enabledRegions = TEST_UTIL.createMultiRegionsInMeta( TEST_UTIL.getConfiguration(), htdEnabled, SPLIT_KEYS); byte [] disabledTable = Bytes.toBytes("disabledTable"); HTableDescriptor htdDisabled = new HTableDescriptor(disabledTable); htdDisabled.addFamily(new HColumnDescriptor(FAMILY)); - // Write the .tableinfo - FSUtils.createTableDescriptor(filesystem, rootdir, htdDisabled); - HRegionInfo hriDisabled = new HRegionInfo(htdDisabled.getName(), null, null); - HRegion.createHRegion(hriDisabled, rootdir, conf, htdDisabled); List disabledRegions = TEST_UTIL.createMultiRegionsInMeta( TEST_UTIL.getConfiguration(), htdDisabled, SPLIT_KEYS); @@ -677,26 +663,12 @@ public class TestMasterFailover { byte [] enabledTable = Bytes.toBytes("enabledTable"); HTableDescriptor htdEnabled = new HTableDescriptor(enabledTable); htdEnabled.addFamily(new HColumnDescriptor(FAMILY)); - FileSystem filesystem = FileSystem.get(conf); - Path rootdir = filesystem.makeQualified( - new Path(conf.get(HConstants.HBASE_DIR))); - // Write the .tableinfo - FSUtils.createTableDescriptor(filesystem, rootdir, htdEnabled); - HRegionInfo hriEnabled = new HRegionInfo(htdEnabled.getName(), - null, null); - HRegion.createHRegion(hriEnabled, rootdir, conf, htdEnabled); - List enabledRegions = TEST_UTIL.createMultiRegionsInMeta( TEST_UTIL.getConfiguration(), htdEnabled, SPLIT_KEYS); byte [] disabledTable = Bytes.toBytes("disabledTable"); HTableDescriptor htdDisabled = new HTableDescriptor(disabledTable); htdDisabled.addFamily(new HColumnDescriptor(FAMILY)); - // Write the .tableinfo - FSUtils.createTableDescriptor(filesystem, rootdir, htdDisabled); - HRegionInfo hriDisabled = new HRegionInfo(htdDisabled.getName(), null, null); - HRegion.createHRegion(hriDisabled, rootdir, conf, htdDisabled); - List disabledRegions = TEST_UTIL.createMultiRegionsInMeta( TEST_UTIL.getConfiguration(), htdDisabled, SPLIT_KEYS); @@ -920,7 +892,7 @@ public class TestMasterFailover { log("Starting up a new master"); master = cluster.startMaster().getMaster(); log("Waiting for master to be ready"); - assertTrue(cluster.waitForActiveAndReadyMaster()); + cluster.waitForActiveAndReadyMaster(); log("Master is ready"); // Let's add some weird states to master in-memory state @@ -980,12 +952,8 @@ public class TestMasterFailover { // Grab all the regions that are online across RSs Set onlineRegions = new TreeSet(); for (JVMClusterUtil.RegionServerThread rst : - cluster.getRegionServerThreads()) { - try { - onlineRegions.addAll(rst.getRegionServer().getOnlineRegions()); - } catch (org.apache.hadoop.hbase.regionserver.RegionServerStoppedException e) { - LOG.info("Got RegionServerStoppedException", e); - } + cluster.getRegionServerThreads()) { + onlineRegions.addAll(rst.getRegionServer().getOnlineRegions()); } // Now, everything that should be online should be online @@ -1010,4 +978,4 @@ public class TestMasterFailover { private void log(String string) { LOG.info("\n\n" + string + " \n\n"); } -} \ No newline at end of file +} diff --git src/test/java/org/apache/hadoop/hbase/master/TestMasterStatusServlet.java src/test/java/org/apache/hadoop/hbase/master/TestMasterStatusServlet.java index 5ceaac8..8866794 100644 --- src/test/java/org/apache/hadoop/hbase/master/TestMasterStatusServlet.java +++ src/test/java/org/apache/hadoop/hbase/master/TestMasterStatusServlet.java @@ -31,12 +31,14 @@ import java.util.regex.Matcher; import java.util.regex.Pattern; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hbase.*; +import org.apache.hadoop.hbase.HBaseConfiguration; +import org.apache.hadoop.hbase.HRegionInfo; +import org.apache.hadoop.hbase.HTableDescriptor; +import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.client.HBaseAdmin; import org.apache.hadoop.hbase.master.AssignmentManager.RegionState; import org.apache.hadoop.hbase.master.HMaster; import org.apache.hadoop.hbase.master.ServerManager; -import org.apache.hadoop.hbase.regionserver.HRegion; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher; import org.apache.hbase.tmpl.master.AssignmentManagerStatusTmpl; @@ -61,9 +63,9 @@ public class TestMasterStatusServlet { new ServerName("fakehost", 12345, 1234567890); static final HTableDescriptor FAKE_TABLE = new HTableDescriptor("mytable"); - static final HRegionInfo FAKE_HRI = - new HRegionInfo(FAKE_TABLE.getName(), Bytes.toBytes("a"), Bytes.toBytes("b")); - + static final HRegionInfo FAKE_REGION = + new HRegionInfo(FAKE_TABLE, Bytes.toBytes("a"), Bytes.toBytes("b")); + @Before public void setupBasicMocks() { conf = HBaseConfiguration.create(); @@ -82,7 +84,7 @@ public class TestMasterStatusServlet { NavigableMap regionsInTransition = Maps.newTreeMap(); regionsInTransition.put("r1", - new RegionState(FAKE_HRI, RegionState.State.CLOSING, 12345L, FAKE_HOST)); + new RegionState(FAKE_REGION, RegionState.State.CLOSING, 12345L, FAKE_HOST)); Mockito.doReturn(regionsInTransition).when(am).getRegionsInTransition(); Mockito.doReturn(am).when(master).getAssignmentManager(); @@ -159,7 +161,7 @@ public class TestMasterStatusServlet { NavigableMap regionsInTransition = Maps.newTreeMap(); for (byte i = 0; i < 100; i++) { - HRegionInfo hri = new HRegionInfo(FAKE_TABLE.getName(), + HRegionInfo hri = new HRegionInfo(FAKE_TABLE, new byte[]{i}, new byte[]{(byte) (i+1)}); regionsInTransition.put(hri.getEncodedName(), new RegionState(hri, RegionState.State.CLOSING, 12345L, FAKE_HOST)); diff --git src/test/java/org/apache/hadoop/hbase/master/TestOpenedRegionHandler.java src/test/java/org/apache/hadoop/hbase/master/TestOpenedRegionHandler.java index 30cd00e..3d6dff8 100644 --- src/test/java/org/apache/hadoop/hbase/master/TestOpenedRegionHandler.java +++ src/test/java/org/apache/hadoop/hbase/master/TestOpenedRegionHandler.java @@ -122,10 +122,10 @@ public class TestOpenedRegionHandler { final Server server = new MockServer(TEST_UTIL); HTableDescriptor htd = new HTableDescriptor( "testShouldNotCompeleteOpenedRegionSuccessfullyIfVersionMismatches"); - HRegionInfo hri = new HRegionInfo(htd.getName(), + HRegionInfo hri = new HRegionInfo(htd, Bytes.toBytes(testIndex), Bytes.toBytes(testIndex + 1)); HRegion region = HRegion.createHRegion(hri, HBaseTestingUtility - .getTestDir(), TEST_UTIL.getConfiguration(), htd); + .getTestDir(), TEST_UTIL.getConfiguration()); assertNotNull(region); AssignmentManager am = Mockito.mock(AssignmentManager.class); when(am.isRegionInTransition(hri)).thenReturn( diff --git src/test/java/org/apache/hadoop/hbase/regionserver/TestBlocksRead.java src/test/java/org/apache/hadoop/hbase/regionserver/TestBlocksRead.java index c50c614..944f229 100644 --- src/test/java/org/apache/hadoop/hbase/regionserver/TestBlocksRead.java +++ src/test/java/org/apache/hadoop/hbase/regionserver/TestBlocksRead.java @@ -96,9 +96,9 @@ public class TestBlocksRead extends HBaseTestCase { HColumnDescriptor.DEFAULT_REPLICATION_SCOPE); htd.addFamily(familyDesc); } - HRegionInfo info = new HRegionInfo(htd.getName(), null, null, false); + HRegionInfo info = new HRegionInfo(htd, null, null, false); Path path = new Path(DIR + callingMethod); - region = HRegion.createHRegion(info, path, conf, htd); + region = HRegion.createHRegion(info, path, conf); blockCache = new CacheConfig(conf).getBlockCache(); } diff --git src/test/java/org/apache/hadoop/hbase/regionserver/TestColumnSeeking.java src/test/java/org/apache/hadoop/hbase/regionserver/TestColumnSeeking.java index aef6a21..e2f4507 100644 --- src/test/java/org/apache/hadoop/hbase/regionserver/TestColumnSeeking.java +++ src/test/java/org/apache/hadoop/hbase/regionserver/TestColumnSeeking.java @@ -68,10 +68,10 @@ public class TestColumnSeeking { HColumnDescriptor.DEFAULT_BLOOMFILTER); HTableDescriptor htd = new HTableDescriptor(table); htd.addFamily(hcd); - HRegionInfo info = new HRegionInfo(Bytes.toBytes(table), null, null, false); + HRegionInfo info = new HRegionInfo(htd, null, null, false); HRegion region = HRegion.createHRegion(info, HBaseTestingUtility.getTestDir(), TEST_UTIL - .getConfiguration(), htd); + .getConfiguration()); List rows = generateRandomWords(10, "row"); List allColumns = generateRandomWords(10, "column"); @@ -172,11 +172,10 @@ public class TestColumnSeeking { HTableDescriptor htd = new HTableDescriptor(table); htd.addFamily(new HColumnDescriptor(family)); - - HRegionInfo info = new HRegionInfo(Bytes.toBytes(table), null, null, false); + HRegionInfo info = new HRegionInfo(htd, null, null, false); HRegion region = HRegion.createHRegion(info, HBaseTestingUtility.getTestDir(), TEST_UTIL - .getConfiguration(), htd); + .getConfiguration()); List rows = generateRandomWords(10, "row"); List allColumns = generateRandomWords(100, "column"); diff --git src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactSelection.java src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactSelection.java index ab777f7..e06a876 100644 --- src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactSelection.java +++ src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactSelection.java @@ -81,12 +81,9 @@ public class TestCompactSelection extends TestCase { HTableDescriptor htd = new HTableDescriptor(Bytes.toBytes("table")); htd.addFamily(hcd); - HRegionInfo info = new HRegionInfo(htd.getName(), null, null, false); - + HRegionInfo info = new HRegionInfo(htd, null, null, false); HLog hlog = new HLog(fs, logdir, oldLogDir, conf); - HRegion.createHRegion(info, basedir, conf, htd); - Path tableDir = new Path(basedir, Bytes.toString(htd.getName())); - HRegion region = new HRegion(tableDir, hlog, fs, conf, info, htd, null); + HRegion region = new HRegion(basedir, hlog, fs, conf, info, null); store = new Store(basedir, region, hcd, fs, conf); TEST_FILE = StoreFile.getRandomFilename(fs, store.getHomedir()); diff --git src/test/java/org/apache/hadoop/hbase/regionserver/TestGetClosestAtOrBefore.java src/test/java/org/apache/hadoop/hbase/regionserver/TestGetClosestAtOrBefore.java index 9bed59e..3b7c7e8 100644 --- src/test/java/org/apache/hadoop/hbase/regionserver/TestGetClosestAtOrBefore.java +++ src/test/java/org/apache/hadoop/hbase/regionserver/TestGetClosestAtOrBefore.java @@ -72,17 +72,17 @@ public class TestGetClosestAtOrBefore extends HBaseTestCase { Path rootdir = filesystem.makeQualified(new Path(conf.get(HConstants.HBASE_DIR))); filesystem.mkdirs(rootdir); // Up flush size else we bind up when we use default catalog flush of 16k. - HTableDescriptor.META_TABLEDESC.setMemStoreFlushSize(64 * 1024 * 1024); - + HRegionInfo.FIRST_META_REGIONINFO.getTableDesc(). + setMemStoreFlushSize(64 * 1024 * 1024); HRegion mr = HRegion.createHRegion(HRegionInfo.FIRST_META_REGIONINFO, - rootdir, this.conf, HTableDescriptor.META_TABLEDESC); + rootdir, this.conf); // Write rows for three tables 'A', 'B', and 'C'. for (char c = 'A'; c < 'D'; c++) { HTableDescriptor htd = new HTableDescriptor("" + c); final int last = 128; final int interval = 2; for (int i = 0; i <= last; i += interval) { - HRegionInfo hri = new HRegionInfo(htd.getName(), + HRegionInfo hri = new HRegionInfo(htd, i == 0? HConstants.EMPTY_BYTE_ARRAY: Bytes.toBytes((byte)i), i == last? HConstants.EMPTY_BYTE_ARRAY: Bytes.toBytes((byte)i + interval)); Put put = new Put(hri.getRegionName()); diff --git src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java index ec3bf55..bcde614 100644 --- src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java +++ src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java @@ -2869,9 +2869,9 @@ public class TestHRegion extends HBaseTestCase { HTableDescriptor htd = new HTableDescriptor(tableName); htd.addFamily(hcd); - HRegionInfo info = new HRegionInfo(htd.getName(), null, null, false); + HRegionInfo info = new HRegionInfo(htd, null, null, false); Path path = new Path(DIR + "testBloomFilterSize"); - region = HRegion.createHRegion(info, path, conf, htd); + region = HRegion.createHRegion(info, path, conf); int num_unique_rows = 10; int duplicate_multiplier =2; @@ -2927,9 +2927,9 @@ public class TestHRegion extends HBaseTestCase { HColumnDescriptor.DEFAULT_REPLICATION_SCOPE); HTableDescriptor htd = new HTableDescriptor(TABLE); htd.addFamily(hcd); - HRegionInfo info = new HRegionInfo(htd.getName(), null, null, false); + HRegionInfo info = new HRegionInfo(htd, null, null, false); Path path = new Path(DIR + "testAllColumnsWithBloomFilter"); - region = HRegion.createHRegion(info, path, conf, htd); + region = HRegion.createHRegion(info, path, conf); // For row:0, col:0: insert versions 1 through 5. byte row[] = Bytes.toBytes("row:" + 0); @@ -2972,9 +2972,9 @@ public class TestHRegion extends HBaseTestCase { HTableDescriptor htd = new HTableDescriptor(tableName); htd.addFamily(hcd); - HRegionInfo info = new HRegionInfo(htd.getName(), null, null, false); + HRegionInfo info = new HRegionInfo(htd, null, null, false); Path path = new Path(DIR + "TestDeleteRowWithBloomFilter"); - region = HRegion.createHRegion(info, path, conf, htd); + region = HRegion.createHRegion(info, path, conf); // Insert some data byte row[] = Bytes.toBytes("row1"); @@ -3172,14 +3172,14 @@ public class TestHRegion extends HBaseTestCase { for(byte [] family : families) { htd.addFamily(new HColumnDescriptor(family)); } - HRegionInfo info = new HRegionInfo(htd.getName(), startKey, stopKey, false); + HRegionInfo info = new HRegionInfo(htd, startKey, stopKey, false); Path path = new Path(DIR + callingMethod); if (fs.exists(path)) { if (!fs.delete(path, true)) { throw new IOException("Failed delete of " + path); } } - region = HRegion.createHRegion(info, path, conf, htd); + region = HRegion.createHRegion(info, path, conf); } /** diff --git src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionInfo.java src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionInfo.java index 6d83b00..516139b 100644 --- src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionInfo.java +++ src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionInfo.java @@ -19,22 +19,13 @@ */ package org.apache.hadoop.hbase.regionserver; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertNull; -import static org.junit.Assert.assertTrue; -import static org.junit.Assert.fail; - -import java.io.IOException; - -import org.apache.hadoop.hbase.HBaseTestingUtility; -import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.util.Bytes; -import org.apache.hadoop.hbase.util.FSUtils; import org.apache.hadoop.hbase.util.MD5Hash; + import org.junit.Test; +import static org.junit.Assert.*; public class TestHRegionInfo { @Test @@ -62,35 +53,10 @@ public class TestHRegionInfo { } @Test - public void testGetSetOfHTD() throws IOException { - HBaseTestingUtility HTU = new HBaseTestingUtility(); - final String tablename = "testGetSetOfHTD"; - - // Delete the temporary table directory that might still be there from the - // previous test run. - FSUtils.deleteTableDescriptorIfExists(tablename, - HTU.getConfiguration()); - - HTableDescriptor htd = new HTableDescriptor(tablename); - FSUtils.createTableDescriptor(htd, HTU.getConfiguration()); - HRegionInfo hri = new HRegionInfo(Bytes.toBytes("testGetSetOfHTD"), - HConstants.EMPTY_START_ROW, HConstants.EMPTY_END_ROW); - HTableDescriptor htd2 = hri.getTableDesc(); - assertTrue(htd.equals(htd2)); - final String key = "SOME_KEY"; - assertNull(htd.getValue(key)); - final String value = "VALUE"; - htd.setValue(key, value); - hri.setTableDesc(htd); - HTableDescriptor htd3 = hri.getTableDesc(); - assertTrue(htd.equals(htd3)); - } - - @Test public void testContainsRange() { HTableDescriptor tableDesc = new HTableDescriptor("testtable"); HRegionInfo hri = new HRegionInfo( - tableDesc.getName(), Bytes.toBytes("a"), Bytes.toBytes("g")); + tableDesc, Bytes.toBytes("a"), Bytes.toBytes("g")); // Single row range at start of region assertTrue(hri.containsRange(Bytes.toBytes("a"), Bytes.toBytes("a"))); // Fully contained range diff --git src/test/java/org/apache/hadoop/hbase/regionserver/TestMultiColumnScanner.java src/test/java/org/apache/hadoop/hbase/regionserver/TestMultiColumnScanner.java index a0d8abf..43eaab0 100644 --- src/test/java/org/apache/hadoop/hbase/regionserver/TestMultiColumnScanner.java +++ src/test/java/org/apache/hadoop/hbase/regionserver/TestMultiColumnScanner.java @@ -280,10 +280,9 @@ public class TestMultiColumnScanner { HTableDescriptor htd = new HTableDescriptor(tableName); htd.addFamily(hcd); HRegionInfo info = - new HRegionInfo(Bytes.toBytes(tableName), null, null, false); + new HRegionInfo(htd, null, null, false); HRegion region = HRegion.createHRegion( - info, HBaseTestingUtility.getTestDir(), TEST_UTIL.getConfiguration(), - htd); + info, HBaseTestingUtility.getTestDir(), TEST_UTIL.getConfiguration()); return region; } diff --git src/test/java/org/apache/hadoop/hbase/regionserver/TestRSStatusServlet.java src/test/java/org/apache/hadoop/hbase/regionserver/TestRSStatusServlet.java index 66d6ee0..40d352e 100644 --- src/test/java/org/apache/hadoop/hbase/regionserver/TestRSStatusServlet.java +++ src/test/java/org/apache/hadoop/hbase/regionserver/TestRSStatusServlet.java @@ -82,8 +82,8 @@ public class TestRSStatusServlet { public void testWithRegions() throws IOException { HTableDescriptor htd = new HTableDescriptor("mytable"); List regions = Lists.newArrayList( - new HRegionInfo(htd.getName(), Bytes.toBytes("a"), Bytes.toBytes("d")), - new HRegionInfo(htd.getName(), Bytes.toBytes("d"), Bytes.toBytes("z")) + new HRegionInfo(htd, Bytes.toBytes("a"), Bytes.toBytes("d")), + new HRegionInfo(htd, Bytes.toBytes("d"), Bytes.toBytes("z")) ); Mockito.doReturn(regions).when(rs).getOnlineRegions(); diff --git src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionSplitPolicy.java src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionSplitPolicy.java index a85f55b..be1eec1 100644 --- src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionSplitPolicy.java +++ src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionSplitPolicy.java @@ -42,9 +42,9 @@ public class TestRegionSplitPolicy { public void setupMocks() { conf = HBaseConfiguration.create(); - HRegionInfo hri = new HRegionInfo(Bytes.toBytes("testtable")); - htd = new HTableDescriptor(); + HRegionInfo hri = new HRegionInfo(htd, null, null); + mockRegion = Mockito.mock(HRegion.class); Mockito.doReturn(htd).when(mockRegion).getTableDesc(); Mockito.doReturn(hri).when(mockRegion).getRegionInfo(); diff --git src/test/java/org/apache/hadoop/hbase/regionserver/TestResettingCounters.java src/test/java/org/apache/hadoop/hbase/regionserver/TestResettingCounters.java index ef931bd..f092371 100644 --- src/test/java/org/apache/hadoop/hbase/regionserver/TestResettingCounters.java +++ src/test/java/org/apache/hadoop/hbase/regionserver/TestResettingCounters.java @@ -61,7 +61,7 @@ public class TestResettingCounters { HTableDescriptor htd = new HTableDescriptor(table); for (byte [] family : families) htd.addFamily(new HColumnDescriptor(family)); - HRegionInfo hri = new HRegionInfo(htd.getName(), null, null, false); + HRegionInfo hri = new HRegionInfo(htd, null, null, false); String testDir = HBaseTestingUtility.getTestDir() + "/TestResettingCounters/"; Path path = new Path(testDir); if (fs.exists(path)) { @@ -69,7 +69,7 @@ public class TestResettingCounters { throw new IOException("Failed delete of " + path); } } - HRegion region = HRegion.createHRegion(hri, path, conf, htd); + HRegion region = HRegion.createHRegion(hri, path, conf); Increment odd = new Increment(rows[0]); Increment even = new Increment(rows[0]); diff --git src/test/java/org/apache/hadoop/hbase/regionserver/TestScanner.java src/test/java/org/apache/hadoop/hbase/regionserver/TestScanner.java index 2c26cb4..ef8a4b2 100644 --- src/test/java/org/apache/hadoop/hbase/regionserver/TestScanner.java +++ src/test/java/org/apache/hadoop/hbase/regionserver/TestScanner.java @@ -72,7 +72,7 @@ public class TestScanner extends HBaseTestCase { } /** HRegionInfo for root region */ public static final HRegionInfo REGION_INFO = - new HRegionInfo(TESTTABLEDESC.getName(), HConstants.EMPTY_BYTE_ARRAY, + new HRegionInfo(TESTTABLEDESC, HConstants.EMPTY_BYTE_ARRAY, HConstants.EMPTY_BYTE_ARRAY); private static final byte [] ROW_KEY = REGION_INFO.getRegionName(); @@ -101,7 +101,7 @@ public class TestScanner extends HBaseTestCase { byte [] startrow = Bytes.toBytes("bbb"); byte [] stoprow = Bytes.toBytes("ccc"); try { - this.r = createNewHRegion(TESTTABLEDESC, null, null); + this.r = createNewHRegion(REGION_INFO.getTableDesc(), null, null); addContent(this.r, HConstants.CATALOG_FAMILY); List results = new ArrayList(); // Do simple test of getting one row only first. @@ -175,7 +175,7 @@ public class TestScanner extends HBaseTestCase { public void testFilters() throws IOException { try { - this.r = createNewHRegion(TESTTABLEDESC, null, null); + this.r = createNewHRegion(REGION_INFO.getTableDesc(), null, null); addContent(this.r, HConstants.CATALOG_FAMILY); byte [] prefix = Bytes.toBytes("ab"); Filter newFilter = new PrefixFilter(prefix); @@ -203,7 +203,7 @@ public class TestScanner extends HBaseTestCase { */ public void testRaceBetweenClientAndTimeout() throws Exception { try { - this.r = createNewHRegion(TESTTABLEDESC, null, null); + this.r = createNewHRegion(REGION_INFO.getTableDesc(), null, null); addContent(this.r, HConstants.CATALOG_FAMILY); Scan scan = new Scan(); InternalScanner s = r.getScanner(scan); @@ -352,7 +352,7 @@ public class TestScanner extends HBaseTestCase { assertEquals(0, info.getStartKey().length); assertEquals(0, info.getEndKey().length); assertEquals(0, Bytes.compareTo(info.getRegionName(), REGION_INFO.getRegionName())); - //assertEquals(0, info.getTableDesc().compareTo(REGION_INFO.getTableDesc())); + assertEquals(0, info.getTableDesc().compareTo(REGION_INFO.getTableDesc())); } /** Use a scanner to get the region info and then validate the results */ @@ -448,7 +448,7 @@ public class TestScanner extends HBaseTestCase { * @throws Exception */ public void testScanAndSyncFlush() throws Exception { - this.r = createNewHRegion(TESTTABLEDESC, null, null); + this.r = createNewHRegion(REGION_INFO.getTableDesc(), null, null); HRegionIncommon hri = new HRegionIncommon(r); try { LOG.info("Added: " + addContent(hri, Bytes.toString(HConstants.CATALOG_FAMILY), @@ -472,7 +472,7 @@ public class TestScanner extends HBaseTestCase { * @throws Exception */ public void testScanAndRealConcurrentFlush() throws Exception { - this.r = createNewHRegion(TESTTABLEDESC, null, null); + this.r = createNewHRegion(REGION_INFO.getTableDesc(), null, null); HRegionIncommon hri = new HRegionIncommon(r); try { LOG.info("Added: " + addContent(hri, Bytes.toString(HConstants.CATALOG_FAMILY), diff --git src/test/java/org/apache/hadoop/hbase/regionserver/TestSplitTransaction.java src/test/java/org/apache/hadoop/hbase/regionserver/TestSplitTransaction.java index 4dccaf4..f0c9dc2 100644 --- src/test/java/org/apache/hadoop/hbase/regionserver/TestSplitTransaction.java +++ src/test/java/org/apache/hadoop/hbase/regionserver/TestSplitTransaction.java @@ -196,8 +196,8 @@ public class TestSplitTransaction { int daughtersRowCount = 0; for (HRegion r: daughters) { // Open so can count its content. - HRegion openRegion = HRegion.openHRegion(this.testdir, r.getRegionInfo(), - r.getTableDesc(), r.getLog(), r.getConf()); + HRegion openRegion = HRegion.openHRegion(r.getRegionInfo(), + r.getLog(), r.getConf()); try { int count = countRows(openRegion); assertTrue(count > 0 && count != rowcount); @@ -251,8 +251,8 @@ public class TestSplitTransaction { int daughtersRowCount = 0; for (HRegion r: daughters) { // Open so can count its content. - HRegion openRegion = HRegion.openHRegion(this.testdir, r.getRegionInfo(), - r.getTableDesc(), r.getLog(), r.getConf()); + HRegion openRegion = HRegion.openHRegion(r.getRegionInfo(), + r.getLog(), r.getConf()); try { int count = countRows(openRegion); assertTrue(count > 0 && count != rowcount); @@ -296,9 +296,7 @@ public class TestSplitTransaction { HTableDescriptor htd = new HTableDescriptor("table"); HColumnDescriptor hcd = new HColumnDescriptor(CF); htd.addFamily(hcd); - HRegionInfo hri = new HRegionInfo(htd.getName(), STARTROW, ENDROW); - HRegion.createHRegion(hri, testdir, TEST_UTIL.getConfiguration(), htd); - return HRegion.openHRegion(testdir, hri, htd, wal, - TEST_UTIL.getConfiguration()); + HRegionInfo hri = new HRegionInfo(htd, STARTROW, ENDROW); + return HRegion.openHRegion(hri, wal, TEST_UTIL.getConfiguration()); } } \ No newline at end of file diff --git src/test/java/org/apache/hadoop/hbase/regionserver/TestStore.java src/test/java/org/apache/hadoop/hbase/regionserver/TestStore.java index 8d97556..ab9c493 100644 --- src/test/java/org/apache/hadoop/hbase/regionserver/TestStore.java +++ src/test/java/org/apache/hadoop/hbase/regionserver/TestStore.java @@ -58,6 +58,7 @@ import org.apache.hadoop.hbase.security.User; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.hadoop.hbase.util.EnvironmentEdgeManagerTestHelper; +import org.apache.hadoop.hbase.util.IncrementingEnvironmentEdge; import org.apache.hadoop.hbase.util.ManualEnvironmentEdge; import org.apache.hadoop.util.Progressable; import org.mockito.Mockito; @@ -132,9 +133,9 @@ public class TestStore extends TestCase { HTableDescriptor htd = new HTableDescriptor(table); htd.addFamily(hcd); - HRegionInfo info = new HRegionInfo(htd.getName(), null, null, false); + HRegionInfo info = new HRegionInfo(htd, null, null, false); HLog hlog = new HLog(fs, logdir, oldLogDir, conf); - HRegion region = new HRegion(basedir, hlog, fs, conf, info, htd, null); + HRegion region = new HRegion(basedir, hlog, fs, conf, info, null); store = new Store(basedir, region, hcd, fs, conf); } diff --git src/test/java/org/apache/hadoop/hbase/regionserver/TestWideScanner.java src/test/java/org/apache/hadoop/hbase/regionserver/TestWideScanner.java index b2cd21c..46ec4eb 100644 --- src/test/java/org/apache/hadoop/hbase/regionserver/TestWideScanner.java +++ src/test/java/org/apache/hadoop/hbase/regionserver/TestWideScanner.java @@ -69,6 +69,10 @@ public class TestWideScanner extends HBaseTestCase { } /** HRegionInfo for root region */ + public static final HRegionInfo REGION_INFO = + new HRegionInfo(TESTTABLEDESC, HConstants.EMPTY_BYTE_ARRAY, + HConstants.EMPTY_BYTE_ARRAY); + MiniDFSCluster cluster = null; HRegion r; @@ -103,7 +107,7 @@ public class TestWideScanner extends HBaseTestCase { public void testWideScanBatching() throws IOException { final int batch = 256; try { - this.r = createNewHRegion(TESTTABLEDESC, null, null); + this.r = createNewHRegion(REGION_INFO.getTableDesc(), null, null); int inserted = addWideContent(this.r); List results = new ArrayList(); Scan scan = new Scan(); diff --git src/test/java/org/apache/hadoop/hbase/regionserver/handler/TestCloseRegionHandler.java src/test/java/org/apache/hadoop/hbase/regionserver/handler/TestCloseRegionHandler.java index 1c374cb..8faa6b2 100644 --- src/test/java/org/apache/hadoop/hbase/regionserver/handler/TestCloseRegionHandler.java +++ src/test/java/org/apache/hadoop/hbase/regionserver/handler/TestCloseRegionHandler.java @@ -59,11 +59,11 @@ public class TestCloseRegionHandler { final RegionServerServices rss = new MockRegionServerServices(); HTableDescriptor htd = new HTableDescriptor("testFailedFlushAborts"); final HRegionInfo hri = - new HRegionInfo(htd.getName(), HConstants.EMPTY_END_ROW, + new HRegionInfo(htd, HConstants.EMPTY_END_ROW, HConstants.EMPTY_END_ROW); HRegion region = HRegion.createHRegion(hri, HBaseTestingUtility.getTestDir(), - HTU.getConfiguration(), htd); + HTU.getConfiguration()); assertNotNull(region); // Spy on the region so can throw exception when close is called. HRegion spy = Mockito.spy(region); diff --git src/test/java/org/apache/hadoop/hbase/regionserver/handler/TestOpenRegionHandler.java src/test/java/org/apache/hadoop/hbase/regionserver/handler/TestOpenRegionHandler.java index bc50521..8aa4d1b 100644 --- src/test/java/org/apache/hadoop/hbase/regionserver/handler/TestOpenRegionHandler.java +++ src/test/java/org/apache/hadoop/hbase/regionserver/handler/TestOpenRegionHandler.java @@ -80,7 +80,7 @@ public class TestOpenRegionHandler { */ @Before public void setupHRI() { - TEST_HRI = new HRegionInfo(TEST_HTD.getName(), + TEST_HRI = new HRegionInfo(TEST_HTD, Bytes.toBytes(testIndex), Bytes.toBytes(testIndex + 1)); testIndex++; @@ -103,9 +103,9 @@ public class TestOpenRegionHandler { final HRegionInfo hri = TEST_HRI; HRegion region = HRegion.createHRegion(hri, HBaseTestingUtility.getTestDir(), HTU - .getConfiguration(), htd); + .getConfiguration()); assertNotNull(region); - OpenRegionHandler handler = new OpenRegionHandler(server, rss, hri, htd) { + OpenRegionHandler handler = new OpenRegionHandler(server, rss, hri) { HRegion openRegion() { // Open region first, then remove znode as though it'd been hijacked. HRegion region = super.openRegion(); @@ -141,7 +141,7 @@ public class TestOpenRegionHandler { // Create the handler OpenRegionHandler handler = - new OpenRegionHandler(server, rsServices, TEST_HRI, TEST_HTD) { + new OpenRegionHandler(server, rsServices, TEST_HRI) { @Override HRegion openRegion() { // Fake failure of opening a region due to an IOE, which is caught @@ -166,7 +166,7 @@ public class TestOpenRegionHandler { // Create the handler OpenRegionHandler handler = - new OpenRegionHandler(server, rsServices, TEST_HRI, TEST_HTD) { + new OpenRegionHandler(server, rsServices, TEST_HRI) { @Override boolean updateMeta(final HRegion r) { // Fake failure of updating META diff --git src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestHLog.java src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestHLog.java index 7d48264..9c6248f 100644 --- src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestHLog.java +++ src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestHLog.java @@ -38,7 +38,11 @@ import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; -import org.apache.hadoop.hbase.*; +import org.apache.hadoop.hbase.HBaseTestingUtility; +import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.HRegionInfo; +import org.apache.hadoop.hbase.HTableDescriptor; +import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.regionserver.wal.HLog.Reader; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.FSUtils; @@ -143,14 +147,11 @@ public class TestHLog { Path tabledir = new Path(hbaseDir, getName()); fs.mkdirs(tabledir); for(int i = 0; i < howmany; i++) { - infos[i] = new HRegionInfo(tableName, + infos[i] = new HRegionInfo(new HTableDescriptor(tableName), Bytes.toBytes("" + i), Bytes.toBytes("" + (i+1)), false); fs.mkdirs(new Path(tabledir, infos[i].getEncodedName())); LOG.info("allo " + new Path(tabledir, infos[i].getEncodedName()).toString()); } - HTableDescriptor htd = new HTableDescriptor(tableName); - htd.addFamily(new HColumnDescriptor("column")); - // Add edits for three regions. try { for (int ii = 0; ii < howmany; ii++) { @@ -165,7 +166,7 @@ public class TestHLog { System.currentTimeMillis(), column)); LOG.info("Region " + i + ": " + edit); log.append(infos[i], tableName, edit, - System.currentTimeMillis(), htd); + System.currentTimeMillis()); } } log.rollWriter(); @@ -207,15 +208,13 @@ public class TestHLog { HLog wal = new HLog(fs, subdir, oldLogDir, conf); final int total = 20; - HRegionInfo info = new HRegionInfo(bytes, + HRegionInfo info = new HRegionInfo(new HTableDescriptor(bytes), null,null, false); - HTableDescriptor htd = new HTableDescriptor(); - htd.addFamily(new HColumnDescriptor(bytes)); for (int i = 0; i < total; i++) { WALEdit kvs = new WALEdit(); kvs.add(new KeyValue(Bytes.toBytes(i), bytes, bytes)); - wal.append(info, bytes, kvs, System.currentTimeMillis(), htd); + wal.append(info, bytes, kvs, System.currentTimeMillis()); } // Now call sync and try reading. Opening a Reader before you sync just // gives you EOFE. @@ -233,7 +232,7 @@ public class TestHLog { for (int i = 0; i < total; i++) { WALEdit kvs = new WALEdit(); kvs.add(new KeyValue(Bytes.toBytes(i), bytes, bytes)); - wal.append(info, bytes, kvs, System.currentTimeMillis(), htd); + wal.append(info, bytes, kvs, System.currentTimeMillis()); } reader = HLog.getReader(fs, walPath, conf); count = 0; @@ -252,7 +251,7 @@ public class TestHLog { for (int i = 0; i < total; i++) { WALEdit kvs = new WALEdit(); kvs.add(new KeyValue(Bytes.toBytes(i), bytes, value)); - wal.append(info, bytes, kvs, System.currentTimeMillis(), htd); + wal.append(info, bytes, kvs, System.currentTimeMillis()); } // Now I should have written out lots of blocks. Sync then read. wal.sync(); @@ -337,19 +336,17 @@ public class TestHLog { @Test public void testAppendClose() throws Exception { byte [] tableName = Bytes.toBytes(getName()); - HRegionInfo regioninfo = new HRegionInfo(tableName, - HConstants.EMPTY_START_ROW, HConstants.EMPTY_END_ROW, false); + HRegionInfo regioninfo = new HRegionInfo(new HTableDescriptor(tableName), + HConstants.EMPTY_START_ROW, HConstants.EMPTY_END_ROW, false); Path subdir = new Path(dir, "hlogdir"); Path archdir = new Path(dir, "hlogdir_archive"); HLog wal = new HLog(fs, subdir, archdir, conf); final int total = 20; - HTableDescriptor htd = new HTableDescriptor(); - htd.addFamily(new HColumnDescriptor(tableName)); for (int i = 0; i < total; i++) { WALEdit kvs = new WALEdit(); kvs.add(new KeyValue(Bytes.toBytes(i), tableName, tableName)); - wal.append(regioninfo, tableName, kvs, System.currentTimeMillis(), htd); + wal.append(regioninfo, tableName, kvs, System.currentTimeMillis()); } // Now call sync to send the data to HDFS datanodes wal.sync(); @@ -467,12 +464,9 @@ public class TestHLog { Bytes.toBytes(Integer.toString(i)), timestamp, new byte[] { (byte)(i + '0') })); } - HRegionInfo info = new HRegionInfo(tableName, + HRegionInfo info = new HRegionInfo(new HTableDescriptor(tableName), row,Bytes.toBytes(Bytes.toString(row) + "1"), false); - HTableDescriptor htd = new HTableDescriptor(); - htd.addFamily(new HColumnDescriptor("column")); - - log.append(info, tableName, cols, System.currentTimeMillis(), htd); + log.append(info, tableName, cols, System.currentTimeMillis()); long logSeqId = log.startCacheFlush(info.getEncodedNameAsBytes()); log.completeCacheFlush(info.getEncodedNameAsBytes(), tableName, logSeqId, info.isMetaRegion()); @@ -539,11 +533,9 @@ public class TestHLog { Bytes.toBytes(Integer.toString(i)), timestamp, new byte[] { (byte)(i + '0') })); } - HRegionInfo hri = new HRegionInfo(tableName, + HRegionInfo hri = new HRegionInfo(new HTableDescriptor(tableName), HConstants.EMPTY_START_ROW, HConstants.EMPTY_END_ROW); - HTableDescriptor htd = new HTableDescriptor(); - htd.addFamily(new HColumnDescriptor("column")); - log.append(hri, tableName, cols, System.currentTimeMillis(), htd); + log.append(hri, tableName, cols, System.currentTimeMillis()); long logSeqId = log.startCacheFlush(hri.getEncodedNameAsBytes()); log.completeCacheFlush(hri.getEncodedNameAsBytes(), tableName, logSeqId, false); log.close(); @@ -600,17 +592,14 @@ public class TestHLog { DumbWALActionsListener visitor = new DumbWALActionsListener(); log.registerWALActionsListener(visitor); long timestamp = System.currentTimeMillis(); - HTableDescriptor htd = new HTableDescriptor(); - htd.addFamily(new HColumnDescriptor("column")); - - HRegionInfo hri = new HRegionInfo(tableName, + HRegionInfo hri = new HRegionInfo(new HTableDescriptor(tableName), HConstants.EMPTY_START_ROW, HConstants.EMPTY_END_ROW); for (int i = 0; i < COL_COUNT; i++) { WALEdit cols = new WALEdit(); cols.add(new KeyValue(row, Bytes.toBytes("column"), Bytes.toBytes(Integer.toString(i)), timestamp, new byte[]{(byte) (i + '0')})); - log.append(hri, tableName, cols, System.currentTimeMillis(), htd); + log.append(hri, tableName, cols, System.currentTimeMillis()); } assertEquals(COL_COUNT, visitor.increments); log.unregisterWALActionsListener(visitor); @@ -618,7 +607,7 @@ public class TestHLog { cols.add(new KeyValue(row, Bytes.toBytes("column"), Bytes.toBytes(Integer.toString(11)), timestamp, new byte[]{(byte) (11 + '0')})); - log.append(hri, tableName, cols, System.currentTimeMillis(), htd); + log.append(hri, tableName, cols, System.currentTimeMillis()); assertEquals(COL_COUNT, visitor.increments); } @@ -629,9 +618,9 @@ public class TestHLog { final byte [] tableName2 = Bytes.toBytes("testLogCleaning2"); HLog log = new HLog(fs, dir, oldLogDir, conf); - HRegionInfo hri = new HRegionInfo(tableName, + HRegionInfo hri = new HRegionInfo(new HTableDescriptor(tableName), HConstants.EMPTY_START_ROW, HConstants.EMPTY_END_ROW); - HRegionInfo hri2 = new HRegionInfo(tableName2, + HRegionInfo hri2 = new HRegionInfo(new HTableDescriptor(tableName2), HConstants.EMPTY_START_ROW, HConstants.EMPTY_END_ROW); // Add a single edit and make sure that rolling won't remove the file @@ -683,15 +672,12 @@ public class TestHLog { private void addEdits(HLog log, HRegionInfo hri, byte [] tableName, int times) throws IOException { - HTableDescriptor htd = new HTableDescriptor(); - htd.addFamily(new HColumnDescriptor("row")); - final byte [] row = Bytes.toBytes("row"); for (int i = 0; i < times; i++) { long timestamp = System.currentTimeMillis(); WALEdit cols = new WALEdit(); cols.add(new KeyValue(row, row, row, timestamp, row)); - log.append(hri, tableName, cols, timestamp, htd); + log.append(hri, tableName, cols, timestamp); } } @@ -705,12 +691,6 @@ public class TestHLog { } @Override - public void visitLogEntryBeforeWrite(HTableDescriptor htd, HLogKey logKey, WALEdit logEdit) { - //To change body of implemented methods use File | Settings | File Templates. - increments++; - } - - @Override public void logRolled(Path newFile) { // TODO Auto-generated method stub diff --git src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestLogRolling.java src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestLogRolling.java index 79ac218..9bf7df1 100644 --- src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestLogRolling.java +++ src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestLogRolling.java @@ -429,9 +429,6 @@ public class TestLogRolling { @Override public void visitLogEntryBeforeWrite(HRegionInfo info, HLogKey logKey, WALEdit logEdit) {} - @Override - public void visitLogEntryBeforeWrite(HTableDescriptor htd, HLogKey logKey, - WALEdit logEdit) {} }); assertTrue("Need HDFS-826 for this test", log.canGetCurReplicas()); diff --git src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestWALActionsListener.java src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestWALActionsListener.java index 7691236..9bf6115 100644 --- src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestWALActionsListener.java +++ src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestWALActionsListener.java @@ -27,7 +27,11 @@ import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; -import org.apache.hadoop.hbase.*; +import org.apache.hadoop.hbase.HBaseTestingUtility; +import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.HRegionInfo; +import org.apache.hadoop.hbase.HTableDescriptor; +import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.util.Bytes; import org.junit.After; import org.junit.Before; @@ -84,19 +88,16 @@ public class TestWALActionsListener { list.add(observer); DummyWALActionsListener laterobserver = new DummyWALActionsListener(); HLog hlog = new HLog(fs, logDir, oldLogDir, conf, list, null); - HRegionInfo hri = new HRegionInfo(SOME_BYTES, - SOME_BYTES, SOME_BYTES, false); + HRegionInfo hri = new HRegionInfo(new HTableDescriptor(SOME_BYTES), + SOME_BYTES, SOME_BYTES, false); for (int i = 0; i < 20; i++) { byte[] b = Bytes.toBytes(i+""); KeyValue kv = new KeyValue(b,b,b); WALEdit edit = new WALEdit(); edit.add(kv); - HTableDescriptor htd = new HTableDescriptor(); - htd.addFamily(new HColumnDescriptor(b)); - HLogKey key = new HLogKey(b,b, 0, 0, HConstants.DEFAULT_CLUSTER_ID); - hlog.append(hri, key, edit, htd); + hlog.append(hri, key, edit); if (i == 10) { hlog.registerWALActionsListener(laterobserver); } @@ -113,7 +114,6 @@ public class TestWALActionsListener { assertEquals(2, observer.closedCount); } - /** * Just counts when methods are called */ @@ -142,10 +142,5 @@ public class TestWALActionsListener { public void logCloseRequested() { closedCount++; } - - public void visitLogEntryBeforeWrite(HTableDescriptor htd, HLogKey logKey, WALEdit logEdit) { - //To change body of implemented methods use File | Settings | File Templates. - } - } } diff --git src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestWALReplay.java src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestWALReplay.java index 2327d0a..523981b 100644 --- src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestWALReplay.java +++ src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestWALReplay.java @@ -42,7 +42,6 @@ import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.client.Get; import org.apache.hadoop.hbase.client.Put; import org.apache.hadoop.hbase.client.Result; -import org.apache.hadoop.hbase.io.hfile.CacheConfig; import org.apache.hadoop.hbase.io.hfile.HFile; import org.apache.hadoop.hbase.monitoring.MonitoredTask; import org.apache.hadoop.hbase.regionserver.FlushRequester; @@ -57,7 +56,6 @@ import org.junit.AfterClass; import org.junit.Before; import org.junit.BeforeClass; import org.junit.Test; -import org.mockito.Mockito; /** * Test replay of edits out of a WAL split. @@ -133,19 +131,14 @@ public class TestWALReplay { deleteDir(basedir); fs.mkdirs(new Path(basedir, hri.getEncodedName())); - HTableDescriptor htd = createBasic3FamilyHTD(tableNameStr); - HRegion region2 = HRegion.createHRegion(hri, - hbaseRootDir, this.conf, htd); - final byte [] tableName = Bytes.toBytes(tableNameStr); final byte [] rowName = tableName; HLog wal1 = createWAL(this.conf); // Add 1k to each family. final int countPerFamily = 1000; - for (HColumnDescriptor hcd: htd.getFamilies()) { - addWALEdits(tableName, hri, rowName, hcd.getName(), countPerFamily, ee, - wal1, htd); + for (HColumnDescriptor hcd: hri.getTableDesc().getFamilies()) { + addWALEdits(tableName, hri, rowName, hcd.getName(), countPerFamily, ee, wal1); } wal1.close(); runWALSplit(this.conf); @@ -154,9 +147,8 @@ public class TestWALReplay { // Up the sequenceid so that these edits are after the ones added above. wal2.setSequenceNumber(wal1.getSequenceNumber()); // Add 1k to each family. - for (HColumnDescriptor hcd: htd.getFamilies()) { - addWALEdits(tableName, hri, rowName, hcd.getName(), countPerFamily, - ee, wal2, htd); + for (HColumnDescriptor hcd: hri.getTableDesc().getFamilies()) { + addWALEdits(tableName, hri, rowName, hcd.getName(), countPerFamily, ee, wal2); } wal2.close(); runWALSplit(this.conf); @@ -165,7 +157,7 @@ public class TestWALReplay { wal3.setSequenceNumber(wal2.getSequenceNumber()); try { final HRegion region = new HRegion(basedir, wal3, this.fs, this.conf, hri, - htd, null); + null); long seqid = region.initialize(); assertTrue(seqid > wal3.getSequenceNumber()); @@ -193,15 +185,12 @@ public class TestWALReplay { final HRegionInfo hri = createBasic3FamilyHRegionInfo(tableNameStr); final Path basedir = new Path(this.hbaseRootDir, tableNameStr); deleteDir(basedir); - final HTableDescriptor htd = createBasic3FamilyHTD(tableNameStr); - HRegion region2 = HRegion.createHRegion(hri, - hbaseRootDir, this.conf, htd); HLog wal = createWAL(this.conf); - HRegion region = HRegion.openHRegion(hri, htd, wal, this.conf); + HRegion region = HRegion.openHRegion(hri, wal, this.conf); Path f = new Path(basedir, "hfile"); HFile.Writer writer = HFile.getWriterFactory(conf).createWriter(this.fs, f); - byte [] family = htd.getFamilies().iterator().next().getName(); + byte [] family = hri.getTableDesc().getFamilies().iterator().next().getName(); byte [] row = Bytes.toBytes(tableNameStr); writer.append(new KeyValue(row, family, family, row)); writer.close(); @@ -219,7 +208,7 @@ public class TestWALReplay { runWALSplit(newConf); HLog wal2 = createWAL(newConf); HRegion region2 = new HRegion(basedir, wal2, FileSystem.get(newConf), - newConf, hri, htd, null); + newConf, hri, null); long seqid2 = region2.initialize(); assertTrue(seqid2 > -1); @@ -250,20 +239,17 @@ public class TestWALReplay { deleteDir(basedir); final byte[] rowName = Bytes.toBytes(tableNameStr); final int countPerFamily = 10; - final HTableDescriptor htd = createBasic3FamilyHTD(tableNameStr); - HRegion region3 = HRegion.createHRegion(hri, - hbaseRootDir, this.conf, htd); // Write countPerFamily edits into the three families. Do a flush on one // of the families during the load of edits so its seqid is not same as // others to test we do right thing when different seqids. HLog wal = createWAL(this.conf); - HRegion region = new HRegion(basedir, wal, this.fs, this.conf, hri, htd, null); + HRegion region = new HRegion(basedir, wal, this.fs, this.conf, hri, null); long seqid = region.initialize(); // HRegionServer usually does this. It knows the largest seqid across all regions. wal.setSequenceNumber(seqid); boolean first = true; - for (HColumnDescriptor hcd: htd.getFamilies()) { + for (HColumnDescriptor hcd: hri.getTableDesc().getFamilies()) { addRegionEdits(rowName, hcd.getName(), countPerFamily, this.ee, region, "x"); if (first ) { // If first, so we have at least one family w/ different seqid to rest. @@ -274,7 +260,7 @@ public class TestWALReplay { // Now assert edits made it in. final Get g = new Get(rowName); Result result = region.get(g, null); - assertEquals(countPerFamily * htd.getFamilies().size(), + assertEquals(countPerFamily * hri.getTableDesc().getFamilies().size(), result.size()); // Now close the region, split the log, reopen the region and assert that // replay of log has no effect, that our seqids are calculated correctly so @@ -283,7 +269,7 @@ public class TestWALReplay { wal.close(); runWALSplit(this.conf); HLog wal2 = createWAL(this.conf); - HRegion region2 = new HRegion(basedir, wal2, this.fs, this.conf, hri, htd, null) { + HRegion region2 = new HRegion(basedir, wal2, this.fs, this.conf, hri, null) { @Override protected boolean restoreEdit(Store s, KeyValue kv) { super.restoreEdit(s, kv); @@ -298,7 +284,7 @@ public class TestWALReplay { // Next test. Add more edits, then 'crash' this region by stealing its wal // out from under it and assert that replay of the log adds the edits back // correctly when region is opened again. - for (HColumnDescriptor hcd: htd.getFamilies()) { + for (HColumnDescriptor hcd: hri.getTableDesc().getFamilies()) { addRegionEdits(rowName, hcd.getName(), countPerFamily, this.ee, region2, "y"); } // Get count of edits. @@ -318,7 +304,7 @@ public class TestWALReplay { // Make a new wal for new region open. HLog wal3 = createWAL(newConf); final AtomicInteger countOfRestoredEdits = new AtomicInteger(0); - HRegion region3 = new HRegion(basedir, wal3, newFS, newConf, hri, htd, null) { + HRegion region3 = new HRegion(basedir, wal3, newFS, newConf, hri, null) { @Override protected boolean restoreEdit(Store s, KeyValue kv) { boolean b = super.restoreEdit(s, kv); @@ -332,7 +318,7 @@ public class TestWALReplay { Result result3 = region3.get(g, null); // Assert that count of cells is same as before crash. assertEquals(result2.size(), result3.size()); - assertEquals(htd.getFamilies().size() * countPerFamily, + assertEquals(hri.getTableDesc().getFamilies().size() * countPerFamily, countOfRestoredEdits.get()); // I can't close wal1. Its been appropriated when we split. @@ -355,10 +341,6 @@ public class TestWALReplay { final Path basedir = new Path(hbaseRootDir, tableNameStr); deleteDir(basedir); fs.mkdirs(new Path(basedir, hri.getEncodedName())); - final HTableDescriptor htd = createBasic3FamilyHTD(tableNameStr); - HRegion region2 = HRegion.createHRegion(hri, - hbaseRootDir, this.conf, htd); - final HLog wal = createWAL(this.conf); final byte[] tableName = Bytes.toBytes(tableNameStr); final byte[] rowName = tableName; @@ -366,9 +348,8 @@ public class TestWALReplay { // Add 1k to each family. final int countPerFamily = 1000; - for (HColumnDescriptor hcd: htd.getFamilies()) { - addWALEdits(tableName, hri, rowName, hcd.getName(), countPerFamily, - ee, wal, htd); + for (HColumnDescriptor hcd: hri.getTableDesc().getFamilies()) { + addWALEdits(tableName, hri, rowName, hcd.getName(), countPerFamily, ee, wal); } // Add a cache flush, shouldn't have any effect @@ -380,14 +361,14 @@ public class TestWALReplay { long now = ee.currentTimeMillis(); edit.add(new KeyValue(rowName, Bytes.toBytes("another family"), rowName, now, rowName)); - wal.append(hri, tableName, edit, now, htd); + wal.append(hri, tableName, edit, now); // Delete the c family to verify deletes make it over. edit = new WALEdit(); now = ee.currentTimeMillis(); edit.add(new KeyValue(rowName, Bytes.toBytes("c"), null, now, KeyValue.Type.DeleteFamily)); - wal.append(hri, tableName, edit, now, htd); + wal.append(hri, tableName, edit, now); // Sync. wal.sync(); @@ -399,7 +380,7 @@ public class TestWALReplay { final Configuration newConf = HBaseConfiguration.create(this.conf); User user = HBaseTestingUtility.getDifferentUser(newConf, ".replay.wal.secondtime"); - user.runAs(new PrivilegedExceptionAction() { + user.runAs(new PrivilegedExceptionAction(){ public Object run() throws Exception { runWALSplit(newConf); FileSystem newFS = FileSystem.get(newConf); @@ -409,27 +390,26 @@ public class TestWALReplay { HLog newWal = createWAL(newConf); final AtomicInteger flushcount = new AtomicInteger(0); try { - final HRegion region = - new HRegion(basedir, newWal, newFS, newConf, hri, htd, null) { - protected boolean internalFlushcache( - final HLog wal, final long myseqid, MonitoredTask status) + final HRegion region = new HRegion(basedir, newWal, newFS, newConf, hri, + null) { + @Override + protected boolean internalFlushcache(HLog wal, long myseqid, + MonitoredTask status) throws IOException { - LOG.info("InternalFlushCache Invoked"); - boolean b = super.internalFlushcache(wal, myseqid, - Mockito.mock(MonitoredTask.class)); + boolean b = super.internalFlushcache(wal, myseqid, status); flushcount.incrementAndGet(); return b; }; }; long seqid = region.initialize(); // We flushed during init. - assertTrue("Flushcount=" + flushcount.get(), flushcount.get() > 0); + assertTrue(flushcount.get() > 0); assertTrue(seqid > wal.getSequenceNumber()); Get get = new Get(rowName); Result result = region.get(get, -1); // Make sure we only see the good edits - assertEquals(countPerFamily * (htd.getFamilies().size() - 1), + assertEquals(countPerFamily * (hri.getTableDesc().getFamilies().size() - 1), result.size()); region.close(); } finally { @@ -459,7 +439,7 @@ public class TestWALReplay { private void addWALEdits (final byte [] tableName, final HRegionInfo hri, final byte [] rowName, final byte [] family, - final int count, EnvironmentEdge ee, final HLog wal, final HTableDescriptor htd) + final int count, EnvironmentEdge ee, final HLog wal) throws IOException { String familyStr = Bytes.toString(family); for (int j = 0; j < count; j++) { @@ -468,7 +448,7 @@ public class TestWALReplay { WALEdit edit = new WALEdit(); edit.add(new KeyValue(rowName, family, qualifierBytes, ee.currentTimeMillis(), columnBytes)); - wal.append(hri, tableName, edit, ee.currentTimeMillis(), htd); + wal.append(hri, tableName, edit, ee.currentTimeMillis()); } } @@ -489,9 +469,17 @@ public class TestWALReplay { * column families named 'a','b', and 'c'. * @param tableName Name of table to use when we create HTableDescriptor. */ - private HRegionInfo createBasic3FamilyHRegionInfo(final String tableName) { - return new HRegionInfo(Bytes.toBytes(tableName), null, null, false); - } + private HRegionInfo createBasic3FamilyHRegionInfo(final String tableName) { + HTableDescriptor htd = new HTableDescriptor(tableName); + HColumnDescriptor a = new HColumnDescriptor(Bytes.toBytes("a")); + htd.addFamily(a); + HColumnDescriptor b = new HColumnDescriptor(Bytes.toBytes("b")); + htd.addFamily(b); + HColumnDescriptor c = new HColumnDescriptor(Bytes.toBytes("c")); + htd.addFamily(c); + return new HRegionInfo(htd, null, null, false); + } + /* * Run the split. Verify only single split file made. @@ -505,7 +493,7 @@ public class TestWALReplay { this.hbaseRootDir, this.logDir, this.oldLogDir, fs); List splits = logSplitter.splitLog(); // Split should generate only 1 file since there's only 1 region - assertEquals("splits=" + splits, 1, splits.size()); + assertEquals(1, splits.size()); // Make sure the file exists assertTrue(fs.exists(splits.get(0))); LOG.info("Split file=" + splits.get(0)); @@ -524,15 +512,4 @@ public class TestWALReplay { HBaseTestingUtility.setMaxRecoveryErrorCount(wal.getOutputStream(), 1); return wal; } - - private HTableDescriptor createBasic3FamilyHTD(final String tableName) { - HTableDescriptor htd = new HTableDescriptor(tableName); - HColumnDescriptor a = new HColumnDescriptor(Bytes.toBytes("a")); - htd.addFamily(a); - HColumnDescriptor b = new HColumnDescriptor(Bytes.toBytes("b")); - htd.addFamily(b); - HColumnDescriptor c = new HColumnDescriptor(Bytes.toBytes("c")); - htd.addFamily(c); - return htd; - } -} +} \ No newline at end of file diff --git src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicationSourceManager.java src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicationSourceManager.java index 5ff8a71..6360058 100644 --- src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicationSourceManager.java +++ src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicationSourceManager.java @@ -127,7 +127,7 @@ public class TestReplicationSourceManager { col.setScope(HConstants.REPLICATION_SCOPE_LOCAL); htd.addFamily(col); - hri = new HRegionInfo(htd.getName(), r1, r2); + hri = new HRegionInfo(htd, r1, r2); } @@ -164,8 +164,7 @@ public class TestReplicationSourceManager { URLEncoder.encode("regionserver:60020", "UTF8")); manager.init(); - HTableDescriptor htd = new HTableDescriptor(); - htd.addFamily(new HColumnDescriptor(f1)); + // Testing normal log rolling every 20 for(long i = 1; i < 101; i++) { if(i > 1 && i % 20 == 0) { @@ -174,7 +173,7 @@ public class TestReplicationSourceManager { LOG.info(i); HLogKey key = new HLogKey(hri.getRegionName(), test, seq++, System.currentTimeMillis(), HConstants.DEFAULT_CLUSTER_ID); - hlog.append(hri, key, edit, htd); + hlog.append(hri, key, edit); } // Simulate a rapid insert that's followed @@ -187,7 +186,7 @@ public class TestReplicationSourceManager { for (int i = 0; i < 3; i++) { HLogKey key = new HLogKey(hri.getRegionName(), test, seq++, System.currentTimeMillis(), HConstants.DEFAULT_CLUSTER_ID); - hlog.append(hri, key, edit, htd); + hlog.append(hri, key, edit); } assertEquals(6, manager.getHLogs().get(slaveId).size()); @@ -199,7 +198,7 @@ public class TestReplicationSourceManager { HLogKey key = new HLogKey(hri.getRegionName(), test, seq++, System.currentTimeMillis(), HConstants.DEFAULT_CLUSTER_ID); - hlog.append(hri, key, edit, htd); + hlog.append(hri, key, edit); assertEquals(1, manager.getHLogs().size()); diff --git src/test/java/org/apache/hadoop/hbase/rest/model/TestTableRegionModel.java src/test/java/org/apache/hadoop/hbase/rest/model/TestTableRegionModel.java index fdfdc01..c02dfda 100644 --- src/test/java/org/apache/hadoop/hbase/rest/model/TestTableRegionModel.java +++ src/test/java/org/apache/hadoop/hbase/rest/model/TestTableRegionModel.java @@ -88,7 +88,7 @@ public class TestTableRegionModel extends TestCase { public void testGetName() { TableRegionModel model = buildTestModel(); String modelName = model.getName(); - HRegionInfo hri = new HRegionInfo(Bytes.toBytes(TABLE), + HRegionInfo hri = new HRegionInfo(new HTableDescriptor(TABLE), START_KEY, END_KEY, false, ID); assertEquals(modelName, hri.getRegionNameAsString()); } @@ -103,4 +103,4 @@ public class TestTableRegionModel extends TestCase { public void testFromXML() throws Exception { checkModel(fromXML(AS_XML)); } -} +} \ No newline at end of file diff --git src/test/java/org/apache/hadoop/hbase/util/TestFSTableDescriptors.java src/test/java/org/apache/hadoop/hbase/util/TestFSTableDescriptors.java deleted file mode 100644 index 0c1042a..0000000 --- src/test/java/org/apache/hadoop/hbase/util/TestFSTableDescriptors.java +++ /dev/null @@ -1,148 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hbase.util; - -import static org.junit.Assert.*; - -import java.io.FileNotFoundException; -import java.io.IOException; - -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.fs.FileSystem; -import org.apache.hadoop.fs.Path; -import org.apache.hadoop.hbase.HBaseTestingUtility; -import org.apache.hadoop.hbase.HColumnDescriptor; -import org.apache.hadoop.hbase.HConstants; -import org.apache.hadoop.hbase.HTableDescriptor; -import org.apache.hadoop.hbase.TableDescriptors; -import org.apache.hadoop.hbase.TableExistsException; -import org.apache.hadoop.hbase.util.Bytes; -import org.apache.hadoop.hbase.util.FSUtils; -import org.junit.Test; - - -/** - * Tests for {@link FSTableDescriptors}. - */ -public class TestFSTableDescriptors { - private static final HBaseTestingUtility UTIL = new HBaseTestingUtility(); - private static final Log LOG = LogFactory.getLog(TestFSTableDescriptors.class); - - @Test - public void testRemoves() throws IOException { - final String name = "testRemoves"; - FileSystem fs = FileSystem.get(UTIL.getConfiguration()); - // Cleanup old tests if any detrius laying around. - Path rootdir = new Path(HBaseTestingUtility.getTestDir(), name); - TableDescriptors htds = new FSTableDescriptors(fs, rootdir); - HTableDescriptor htd = new HTableDescriptor(name); - htds.add(htd); - assertNotNull(htds.remove(htd.getNameAsString())); - assertNull(htds.remove(htd.getNameAsString())); - } - - @Test public void testReadingHTDFromFS() throws IOException { - final String name = "testReadingHTDFromFS"; - FileSystem fs = FileSystem.get(UTIL.getConfiguration()); - HTableDescriptor htd = new HTableDescriptor(name); - Path rootdir = HBaseTestingUtility.getTestDir(name); - createHTDInFS(fs, rootdir, htd); - HTableDescriptor htd2 = - FSUtils.getTableDescriptor(fs, rootdir, htd.getNameAsString()); - assertTrue(htd.equals(htd2)); - } - - private void createHTDInFS(final FileSystem fs, Path rootdir, - final HTableDescriptor htd) - throws IOException { - FSUtils.createTableDescriptor(fs, rootdir, htd); - } - - @Test public void testHTableDescriptors() - throws IOException, InterruptedException { - final String name = "testHTableDescriptors"; - FileSystem fs = FileSystem.get(UTIL.getConfiguration()); - // Cleanup old tests if any debris laying around. - Path rootdir = new Path(HBaseTestingUtility.getTestDir(), name); - final int count = 10; - // Write out table infos. - for (int i = 0; i < count; i++) { - HTableDescriptor htd = new HTableDescriptor(name + i); - createHTDInFS(fs, rootdir, htd); - } - FSTableDescriptors htds = new FSTableDescriptors(fs, rootdir) { - @Override - public HTableDescriptor get(byte[] tablename) - throws TableExistsException, FileNotFoundException, IOException { - LOG.info(Bytes.toString(tablename) + ", cachehits=" + this.cachehits); - return super.get(tablename); - } - }; - for (int i = 0; i < count; i++) { - assertTrue(htds.get(Bytes.toBytes(name + i)) != null); - } - for (int i = 0; i < count; i++) { - assertTrue(htds.get(Bytes.toBytes(name + i)) != null); - } - // Update the table infos - for (int i = 0; i < count; i++) { - HTableDescriptor htd = new HTableDescriptor(name + i); - htd.addFamily(new HColumnDescriptor("" + i)); - FSUtils.updateHTableDescriptor(fs, rootdir, htd); - } - // Wait a while so mod time we write is for sure different. - Thread.sleep(100); - for (int i = 0; i < count; i++) { - assertTrue(htds.get(Bytes.toBytes(name + i)) != null); - } - for (int i = 0; i < count; i++) { - assertTrue(htds.get(Bytes.toBytes(name + i)) != null); - } - assertEquals(count * 4, htds.invocations); - assertTrue("expected=" + (count * 2) + ", actual=" + htds.cachehits, - htds.cachehits >= (count * 2)); - assertTrue(htds.get(HConstants.ROOT_TABLE_NAME) != null); - assertEquals(htds.invocations, count * 4 + 1); - assertTrue("expected=" + ((count * 2) + 1) + ", actual=" + htds.cachehits, - htds.cachehits >= ((count * 2) + 1)); - } - - @Test (expected=java.io.FileNotFoundException.class) - public void testNoSuchTable() throws IOException { - final String name = "testNoSuchTable"; - FileSystem fs = FileSystem.get(UTIL.getConfiguration()); - // Cleanup old tests if any detrius laying around. - Path rootdir = new Path(HBaseTestingUtility.getTestDir(), name); - TableDescriptors htds = new FSTableDescriptors(fs, rootdir); - htds.get("NoSuchTable"); - } - - @Test - public void testUpdates() throws IOException { - final String name = "testUpdates"; - FileSystem fs = FileSystem.get(UTIL.getConfiguration()); - // Cleanup old tests if any detrius laying around. - Path rootdir = new Path(HBaseTestingUtility.getTestDir(), name); - TableDescriptors htds = new FSTableDescriptors(fs, rootdir); - HTableDescriptor htd = new HTableDescriptor(name); - htds.add(htd); - htds.add(htd); - htds.add(htd); - } -} diff --git src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsck.java src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsck.java index ca6dd4b..b2e34b6 100644 --- src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsck.java +++ src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsck.java @@ -148,7 +148,7 @@ public class TestHBaseFsck { htd, byte[] startKey, byte[] endKey) throws IOException { HTable meta = new HTable(conf, HConstants.META_TABLE_NAME); - HRegionInfo hri = new HRegionInfo(htd.getName(), startKey, endKey); + HRegionInfo hri = new HRegionInfo(htd, startKey, endKey); Put put = new Put(hri.getRegionName()); put.add(HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER, Writables.getBytes(hri)); diff --git src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsckComparator.java src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsckComparator.java index 9166968..afecc0f 100644 --- src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsckComparator.java +++ src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsckComparator.java @@ -23,6 +23,7 @@ import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertTrue; import org.apache.hadoop.hbase.HRegionInfo; +import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.util.HBaseFsck.HbckInfo; import org.apache.hadoop.hbase.util.HBaseFsck.MetaEntry; import org.junit.Test; @@ -32,16 +33,17 @@ import org.junit.Test; */ public class TestHBaseFsckComparator { - byte[] table = Bytes.toBytes("table1"); - byte[] table2 = Bytes.toBytes("table2"); + HTableDescriptor table = new HTableDescriptor("table1"); + HTableDescriptor table2 = new HTableDescriptor("table2"); byte[] keyStart = Bytes.toBytes(""); byte[] keyA = Bytes.toBytes("A"); byte[] keyB = Bytes.toBytes("B"); byte[] keyC = Bytes.toBytes("C"); byte[] keyEnd = Bytes.toBytes(""); - static HbckInfo genHbckInfo(byte[] table, byte[] start, byte[] end, int time) { - return new HbckInfo(new MetaEntry(new HRegionInfo(table, start, end), null, + static HbckInfo genHbckInfo(HTableDescriptor htd, + byte[] start, byte[] end, int time) { + return new HbckInfo(new MetaEntry(new HRegionInfo(htd, start, end), null, time)); } diff --git src/test/java/org/apache/hadoop/hbase/util/TestMergeTable.java src/test/java/org/apache/hadoop/hbase/util/TestMergeTable.java index ffc8d9d..3039df2 100644 --- src/test/java/org/apache/hadoop/hbase/util/TestMergeTable.java +++ src/test/java/org/apache/hadoop/hbase/util/TestMergeTable.java @@ -36,6 +36,8 @@ import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.catalog.CatalogTracker; import org.apache.hadoop.hbase.catalog.MetaReader; import org.apache.hadoop.hbase.client.HBaseAdmin; +import org.apache.hadoop.hbase.client.HConnection; +import org.apache.hadoop.hbase.client.HConnectionManager; import org.apache.hadoop.hbase.client.Put; import org.apache.hadoop.hbase.regionserver.HRegion; import org.junit.Test; @@ -94,9 +96,7 @@ public class TestMergeTable { byte [] row_70001 = Bytes.toBytes("row_70001"); byte [] row_80001 = Bytes.toBytes("row_80001"); - // Create regions and populate them at same time. Create the tabledir - // for them first. - FSUtils.createTableDescriptor(fs, rootdir, desc); + // Create regions and populate them at same time. HRegion [] regions = { createRegion(desc, null, row_70001, 1, 70000, rootdir), createRegion(desc, row_70001, row_80001, 70001, 10000, rootdir), @@ -128,18 +128,16 @@ public class TestMergeTable { assertTrue("originalTableRegions=" + originalTableRegions.size() + ", postMergeTableRegions=" + postMergeTableRegions.size(), postMergeTableRegions.size() < originalTableRegions.size()); - LOG.info("Done with merge"); } finally { UTIL.shutdownMiniCluster(); - LOG.info("After cluster shutdown"); } } private HRegion createRegion(final HTableDescriptor desc, byte [] startKey, byte [] endKey, int firstRow, int nrows, Path rootdir) throws IOException { - HRegionInfo hri = new HRegionInfo(desc.getName(), startKey, endKey); - HRegion region = HRegion.createHRegion(hri, rootdir, UTIL.getConfiguration(), desc); + HRegionInfo hri = new HRegionInfo(desc, startKey, endKey); + HRegion region = HRegion.createHRegion(hri, rootdir, UTIL.getConfiguration()); LOG.info("Created region " + region.getRegionNameAsString()); for(int i = firstRow; i < firstRow + nrows; i++) { Put put = new Put(Bytes.toBytes("row_" + String.format("%1$05d", i))); @@ -158,11 +156,10 @@ public class TestMergeTable { protected void setupROOTAndMeta(Path rootdir, final HRegion [] regions) throws IOException { HRegion root = - HRegion.createHRegion(HRegionInfo.ROOT_REGIONINFO, rootdir, - UTIL.getConfiguration(), HTableDescriptor.ROOT_TABLEDESC); + HRegion.createHRegion(HRegionInfo.ROOT_REGIONINFO, rootdir, UTIL.getConfiguration()); HRegion meta = HRegion.createHRegion(HRegionInfo.FIRST_META_REGIONINFO, rootdir, - UTIL.getConfiguration(), HTableDescriptor.META_TABLEDESC); + UTIL.getConfiguration()); HRegion.addRegionToMETA(root, meta); for (HRegion r: regions) { HRegion.addRegionToMETA(meta, r); diff --git src/test/java/org/apache/hadoop/hbase/util/TestMergeTool.java src/test/java/org/apache/hadoop/hbase/util/TestMergeTool.java index d7a3c11..ccc7a73 100644 --- src/test/java/org/apache/hadoop/hbase/util/TestMergeTool.java +++ src/test/java/org/apache/hadoop/hbase/util/TestMergeTool.java @@ -76,36 +76,31 @@ public class TestMergeTool extends HBaseTestCase { * Create the HRegionInfos for the regions. */ // Region 0 will contain the key range [row_0200,row_0300) - sourceRegions[0] = new HRegionInfo(this.desc.getName(), - Bytes.toBytes("row_0200"), + sourceRegions[0] = new HRegionInfo(this.desc, Bytes.toBytes("row_0200"), Bytes.toBytes("row_0300")); // Region 1 will contain the key range [row_0250,row_0400) and overlaps // with Region 0 sourceRegions[1] = - new HRegionInfo(this.desc.getName(), - Bytes.toBytes("row_0250"), + new HRegionInfo(this.desc, Bytes.toBytes("row_0250"), Bytes.toBytes("row_0400")); // Region 2 will contain the key range [row_0100,row_0200) and is adjacent // to Region 0 or the region resulting from the merge of Regions 0 and 1 sourceRegions[2] = - new HRegionInfo(this.desc.getName(), - Bytes.toBytes("row_0100"), + new HRegionInfo(this.desc, Bytes.toBytes("row_0100"), Bytes.toBytes("row_0200")); // Region 3 will contain the key range [row_0500,row_0600) and is not // adjacent to any of Regions 0, 1, 2 or the merged result of any or all // of those regions sourceRegions[3] = - new HRegionInfo(this.desc.getName(), - Bytes.toBytes("row_0500"), + new HRegionInfo(this.desc, Bytes.toBytes("row_0500"), Bytes.toBytes("row_0600")); // Region 4 will have empty start and end keys and overlaps all regions. sourceRegions[4] = - new HRegionInfo(this.desc.getName(), - HConstants.EMPTY_BYTE_ARRAY, + new HRegionInfo(this.desc, HConstants.EMPTY_BYTE_ARRAY, HConstants.EMPTY_BYTE_ARRAY); /* @@ -139,14 +134,12 @@ public class TestMergeTool extends HBaseTestCase { try { // Create root and meta regions createRootAndMetaRegions(); - FSUtils.createTableDescriptor(this.fs, this.testDir, this.desc); /* * Create the regions we will merge */ for (int i = 0; i < sourceRegions.length; i++) { regions[i] = - HRegion.createHRegion(this.sourceRegions[i], this.testDir, this.conf, - this.desc); + HRegion.createHRegion(this.sourceRegions[i], this.testDir, this.conf); /* * Insert data */ @@ -196,7 +189,7 @@ public class TestMergeTool extends HBaseTestCase { // Now verify that we can read all the rows from regions 0, 1 // in the new merged region. - HRegion merged = HRegion.openHRegion(mergedInfo, this.desc, log, this.conf); + HRegion merged = HRegion.openHRegion(mergedInfo, log, this.conf); verifyMerge(merged, upperbound); merged.close(); LOG.info("Verified " + msg); diff --git src/test/ruby/hbase/admin_test.rb src/test/ruby/hbase/admin_test.rb index df88100..9a096e4 100644 --- src/test/ruby/hbase/admin_test.rb +++ src/test/ruby/hbase/admin_test.rb @@ -152,7 +152,7 @@ module Hbase assert_equal(['a:', 'b:'], table(@create_test_name).get_all_columns.sort) end - define_test "create should work with hash column args" do + define_test "create hould work with hash column args" do drop_test_table(@create_test_name) admin.create(@create_test_name, { NAME => 'a'}, { NAME => 'b'}) assert_equal(['a:', 'b:'], table(@create_test_name).get_all_columns.sort) @@ -160,14 +160,14 @@ module Hbase #------------------------------------------------------------------------------- - define_test "close should work without region server name" do - if admin.exists?(@create_test_name) - admin.disable(@create_test_name) - admin.drop(@create_test_name) - end - admin.create(@create_test_name, 'foo') - admin.close_region(@create_test_name + ',,0', nil) - end +# define_test "close should work without region server name" do +# if admin.exists?(@create_test_name) +# admin.disable(@create_test_name) +# admin.drop(@create_test_name) +# end +# admin.create(@create_test_name, 'foo') +# admin.close_region(@create_test_name + ',,0') +# end #------------------------------------------------------------------------------- @@ -187,14 +187,13 @@ module Hbase table(@test_name).put(1, "x:a", 1) table(@test_name).put(2, "x:a", 2) assert_equal(2, table(@test_name).count) - # This is hacky. Need to get the configuration into admin instance - admin.truncate(@test_name, $TEST_CLUSTER.getConfiguration) + admin.truncate(@test_name) assert_equal(0, table(@test_name).count) end define_test "truncate should yield log records" do logs = [] - admin.truncate(@test_name, $TEST_CLUSTER.getConfiguration) do |log| + admin.truncate(@test_name) do |log| assert_kind_of(String, log) logs << log end diff --git src/test/ruby/shell/shell_test.rb src/test/ruby/shell/shell_test.rb index bc3000c..4289588 100644 --- src/test/ruby/shell/shell_test.rb +++ src/test/ruby/shell/shell_test.rb @@ -25,7 +25,7 @@ require 'shell/formatter' class ShellTest < Test::Unit::TestCase def setup @formatter = ::Shell::Formatter::Console.new() - @hbase = ::Hbase::Hbase.new($TEST_CLUSTER.getConfiguration) + @hbase = ::Hbase::Hbase.new @shell = Shell::Shell.new(@hbase, @formatter) end