diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/HTableDescriptor.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/HTableDescriptor.java index cc91aedbbbca63f2b1f6a7f85ac90192d2039ab3..51352bb7aa413ee0cddb41e3a5080eb633e5e770 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/HTableDescriptor.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/HTableDescriptor.java @@ -1332,6 +1332,17 @@ public class HTableDescriptor implements Comparable { .setBloomFilterType(BloomType.NONE) // Enable cache of data blocks in L1 if more than one caching tier deployed: // e.g. if using CombinedBlockCache (BucketCache). + .setCacheDataInL1(true), + new HColumnDescriptor(HConstants.TABLE_FAMILY) + // Ten is arbitrary number. Keep versions to help debugging. + .setMaxVersions(10) + .setInMemory(true) + .setBlocksize(8 * 1024) + .setScope(HConstants.REPLICATION_SCOPE_LOCAL) + // Disable blooms for meta. Needs work. Seems to mess w/ getClosestOrBefore. + .setBloomFilterType(BloomType.NONE) + // Enable cache of data blocks in L1 if more than one caching tier deployed: + // e.g. if using CombinedBlockCache (BucketCache). .setCacheDataInL1(true) }); diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/MetaTableAccessor.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/MetaTableAccessor.java index 108662ce57cbc7cbcddaeb811926bb5afc946e07..2df66f9a6c6e596f97c8a5e29a892ed93ea797ab 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/MetaTableAccessor.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/MetaTableAccessor.java @@ -17,9 +17,23 @@ */ package org.apache.hadoop.hbase; +import javax.annotation.Nullable; +import java.io.IOException; +import java.io.InterruptedIOException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.LinkedHashMap; +import java.util.List; +import java.util.Map; +import java.util.NavigableMap; +import java.util.Set; +import java.util.SortedMap; +import java.util.TreeMap; +import java.util.regex.Matcher; +import java.util.regex.Pattern; + import com.google.common.annotations.VisibleForTesting; import com.google.protobuf.ServiceException; - import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; @@ -38,6 +52,8 @@ import org.apache.hadoop.hbase.client.Result; import org.apache.hadoop.hbase.client.ResultScanner; import org.apache.hadoop.hbase.client.Scan; import org.apache.hadoop.hbase.client.Table; +import org.apache.hadoop.hbase.client.TableState; +import org.apache.hadoop.hbase.exceptions.DeserializationException; import org.apache.hadoop.hbase.ipc.CoprocessorRpcChannel; import org.apache.hadoop.hbase.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.protobuf.generated.ClientProtos; @@ -48,18 +64,6 @@ import org.apache.hadoop.hbase.util.Pair; import org.apache.hadoop.hbase.util.PairOfSameType; import org.apache.hadoop.hbase.util.Threads; -import java.io.IOException; -import java.io.InterruptedIOException; -import java.util.ArrayList; -import java.util.List; -import java.util.Map; -import java.util.NavigableMap; -import java.util.Set; -import java.util.SortedMap; -import java.util.TreeMap; -import java.util.regex.Matcher; -import java.util.regex.Pattern; - /** * Read/write operations on region and assignment information store in * hbase:meta. @@ -78,6 +82,11 @@ public class MetaTableAccessor { * HRI defined which is called default replica. * * Meta layout (as of 0.98 + HBASE-10070) is like: + * + * For each table there is single row in column family 'table' formatted: + * including namespace and columns are: + * table: state => contains table state + * * For each table range, there is a single row, formatted like: * ,,,. This row corresponds to the regionName * of the default region replica. @@ -120,6 +129,24 @@ public class MetaTableAccessor { META_REGION_PREFIX, 0, len); } + + @InterfaceAudience.Private + public enum QueryType { + ALL(HConstants.TABLE_FAMILY, HConstants.CATALOG_FAMILY), + REGION(HConstants.CATALOG_FAMILY), + TABLE(HConstants.TABLE_FAMILY); + + private final byte[][] families; + + QueryType(byte[]... families) { + this.families = families; + } + + byte[][] getFamilies() { + return this.families; + } + } + /** The delimiter for meta columns for replicaIds > 0 */ protected static final char META_REPLICA_ID_DELIMITER = '_'; @@ -131,16 +158,39 @@ public class MetaTableAccessor { // Reading operations // //////////////////////// - /** - * Performs a full scan of a hbase:meta table. - * @return List of {@link org.apache.hadoop.hbase.client.Result} + /** + * Performs a full scan of hbase:meta for regions. + * @param connection connection we're using + * @param visitor Visitor invoked against each row in regions family. * @throws IOException */ - public static List fullScanOfMeta(Connection connection) - throws IOException { - CollectAllVisitor v = new CollectAllVisitor(); - fullScan(connection, v, null); - return v.getResults(); + public static void fullScanRegions(Connection connection, + final Visitor visitor) + throws IOException { + fullScan(connection, visitor, null, QueryType.REGION); + } + + /** + * Performs a full scan of hbase:meta for regions. + * @param connection connection we're using + * @param visitor Visitor invoked against each row in regions family. + * @throws IOException + */ + public static List fullScanRegions(Connection connection) + throws IOException { + return fullScan(connection, QueryType.REGION); + } + + /** + * Performs a full scan of hbase:meta for tables. + * @param connection connection we're using + * @param visitor Visitor invoked against each row in tables family. + * @throws IOException + */ + public static void fullScanTables(Connection connection, + final Visitor visitor) + throws IOException { + fullScan(connection, visitor, null, QueryType.TABLE); } /** @@ -150,9 +200,9 @@ public class MetaTableAccessor { * @throws IOException */ public static void fullScan(Connection connection, - final Visitor visitor) + final Visitor visitor, QueryType type) throws IOException { - fullScan(connection, visitor, null); + fullScan(connection, visitor, null, type); } /** @@ -161,10 +211,10 @@ public class MetaTableAccessor { * @return List of {@link Result} * @throws IOException */ - public static List fullScan(Connection connection) + public static List fullScan(Connection connection, QueryType type) throws IOException { CollectAllVisitor v = new CollectAllVisitor(); - fullScan(connection, v, null); + fullScan(connection, v, null, type); return v.getResults(); } @@ -360,7 +410,7 @@ public class MetaTableAccessor { this.results.add(this.current); } }; - fullScan(connection, visitor, getTableStartRowForMeta(tableName)); + fullScan(connection, visitor, getTableStartRowForMeta(tableName), QueryType.REGION); // If visitor has results >= 1 then table exists. return visitor.getResults().size() >= 1; } @@ -511,7 +561,7 @@ public class MetaTableAccessor { } } }; - fullScan(connection, visitor, getTableStartRowForMeta(tableName)); + fullScan(connection, visitor, getTableStartRowForMeta(tableName), QueryType.REGION); return visitor.getResults(); } @@ -543,7 +593,7 @@ public class MetaTableAccessor { } } }; - fullScan(connection, v); + fullScan(connection, v, QueryType.REGION); return hris; } @@ -554,17 +604,22 @@ public class MetaTableAccessor { public boolean visit(Result r) throws IOException { if (r == null || r.isEmpty()) return true; LOG.info("fullScanMetaAndPrint.Current Meta Row: " + r); - RegionLocations locations = getRegionLocations(r); - if (locations == null) return true; - for (HRegionLocation loc : locations.getRegionLocations()) { - if (loc != null) { - LOG.info("fullScanMetaAndPrint.HRI Print= " + loc.getRegionInfo()); + TableState state = getTableState(r); + if (state != null) { + LOG.info("Table State: " + state); + } else { + RegionLocations locations = getRegionLocations(r); + if (locations == null) return true; + for (HRegionLocation loc : locations.getRegionLocations()) { + if (loc != null) { + LOG.info("fullScanMetaAndPrint.HRI Print= " + loc.getRegionInfo()); + } } } return true; } }; - fullScan(connection, v); + fullScan(connection, v, QueryType.ALL); } /** @@ -577,7 +632,7 @@ public class MetaTableAccessor { * @throws IOException */ public static void fullScan(Connection connection, - final Visitor visitor, final byte [] startrow) + final Visitor visitor, final byte[] startrow, QueryType type) throws IOException { Scan scan = new Scan(); if (startrow != null) scan.setStartRow(startrow); @@ -586,7 +641,9 @@ public class MetaTableAccessor { .getInt(HConstants.HBASE_META_SCANNER_CACHING, 100); scan.setCaching(caching); } - scan.addFamily(HConstants.CATALOG_FAMILY); + for (byte[] family : type.getFamilies()) { + scan.addFamily(family); + } Table metaTable = getMetaHTable(connection); ResultScanner scanner = null; try { @@ -607,11 +664,19 @@ public class MetaTableAccessor { * Returns the column family used for meta columns. * @return HConstants.CATALOG_FAMILY. */ - protected static byte[] getFamily() { + protected static byte[] getCatalogFamily() { return HConstants.CATALOG_FAMILY; } /** + * Returns the column family used for table columns. + * @return HConstants.TABLE_FAMILY. + */ + protected static byte[] getTableFamily() { + return HConstants.TABLE_FAMILY; + } + + /** * Returns the column qualifier for serialized region info * @return HConstants.REGIONINFO_QUALIFIER */ @@ -620,6 +685,15 @@ public class MetaTableAccessor { } /** + * Returns the column qualifier for serialized table state + * + * @return HConstants.TABLE_STATE_QUALIFIER + */ + protected static byte[] getStateColumn() { + return HConstants.TABLE_STATE_QUALIFIER; + } + + /** * Returns the column qualifier for server column for replicaId * @param replicaId the replicaId of the region * @return a byte[] for server column qualifier @@ -687,12 +761,12 @@ public class MetaTableAccessor { */ private static ServerName getServerName(final Result r, final int replicaId) { byte[] serverColumn = getServerColumn(replicaId); - Cell cell = r.getColumnLatestCell(getFamily(), serverColumn); + Cell cell = r.getColumnLatestCell(getCatalogFamily(), serverColumn); if (cell == null || cell.getValueLength() == 0) return null; String hostAndPort = Bytes.toString( cell.getValueArray(), cell.getValueOffset(), cell.getValueLength()); byte[] startcodeColumn = getStartCodeColumn(replicaId); - cell = r.getColumnLatestCell(getFamily(), startcodeColumn); + cell = r.getColumnLatestCell(getCatalogFamily(), startcodeColumn); if (cell == null || cell.getValueLength() == 0) return null; return ServerName.valueOf(hostAndPort, Bytes.toLong(cell.getValueArray(), cell.getValueOffset(), cell.getValueLength())); @@ -705,7 +779,7 @@ public class MetaTableAccessor { * @return SeqNum, or HConstants.NO_SEQNUM if there's no value written. */ private static long getSeqNumDuringOpen(final Result r, final int replicaId) { - Cell cell = r.getColumnLatestCell(getFamily(), getSeqNumColumn(replicaId)); + Cell cell = r.getColumnLatestCell(getCatalogFamily(), getSeqNumColumn(replicaId)); if (cell == null || cell.getValueLength() == 0) return HConstants.NO_SEQNUM; return Bytes.toLong(cell.getValueArray(), cell.getValueOffset(), cell.getValueLength()); } @@ -715,6 +789,7 @@ public class MetaTableAccessor { * @return an HRegionLocationList containing all locations for the region range or null if * we can't deserialize the result. */ + @Nullable public static RegionLocations getRegionLocations(final Result r) { if (r == null) return null; HRegionInfo regionInfo = getHRegionInfo(r, getRegionInfoColumn()); @@ -725,7 +800,7 @@ public class MetaTableAccessor { locations.add(getRegionLocation(r, regionInfo, 0)); - NavigableMap infoMap = familyMap.get(getFamily()); + NavigableMap infoMap = familyMap.get(getCatalogFamily()); if (infoMap == null) return new RegionLocations(locations); // iterate until all serverName columns are seen @@ -782,7 +857,7 @@ public class MetaTableAccessor { * @return An HRegionInfo instance or null. */ private static HRegionInfo getHRegionInfo(final Result r, byte [] qualifier) { - Cell cell = r.getColumnLatestCell(getFamily(), qualifier); + Cell cell = r.getColumnLatestCell(getCatalogFamily(), qualifier); if (cell == null) return null; return HRegionInfo.parseFromOrNull(cell.getValueArray(), cell.getValueOffset(), cell.getValueLength()); @@ -817,6 +892,76 @@ public class MetaTableAccessor { } /** + * Fetch table state for given table from META table + * @param conn connection to use + * @param tableName table to fetch state for + * @return state + * @throws IOException + */ + @Nullable + public static TableState getTableState(Connection conn, TableName tableName) + throws IOException { + Table metaHTable = getMetaHTable(conn); + Result result = + metaHTable.get(new Get(tableName.getName()).addColumn(getTableFamily(), getStateColumn())); + return getTableState(result); + } + + /** + * Fetch table states from META table + * @param conn connection to use + * @return map {tableName -> state} + * @throws IOException + */ + @Nullable + public static Map getTableStates(Connection conn) + throws IOException { + final Map states = new LinkedHashMap<>(); + Visitor collector = new Visitor() { + @Override + public boolean visit(Result r) throws IOException { + TableState state = getTableState(r); + if (state != null) + states.put(state.getTableName(), state); + return true; + } + }; + fullScanTables(conn, collector); + return states; + } + + /** + * Updates state in META + * @param conn connection to use + * @throws IOException + */ + public static void updateTableState(Connection conn, TableName tableName, + TableState.State actual) throws IOException { + updateTableState(conn, new TableState(tableName, actual)); + } + + /** + * Decode table state from META Result. + * Should contain cell from HConstants.TABLE_FAMILY + * @param r result + * @return null if not found + * @throws IOException + */ + @Nullable + public static TableState getTableState(Result r) + throws IOException { + Cell cell = r.getColumnLatestCell(getTableFamily(), getStateColumn()); + if (cell == null) return null; + try { + return TableState.parseFrom(Arrays.copyOfRange(cell.getValueArray(), + cell.getValueOffset(), cell.getValueOffset() + cell.getValueLength())); + } catch (DeserializationException e) { + throw new IOException(e); + } + + } + + /** * Implementations 'visit' a catalog table row. */ public interface Visitor { @@ -1245,6 +1390,25 @@ public class MetaTableAccessor { } } + public static void updateTableState(Connection connection, TableState state) + throws IOException { + Put put = makePutFromTableState(state); + putToMetaTable(connection, put); + } + + public static Put makePutFromTableState(TableState state) { + Put put = new Put(state.getTableName().getName()); + put.add(getTableFamily(), getStateColumn(), state.convert().toByteArray()); + return put; + } + + public static void deleteTableDescriptor(Connection connection, TableName table) + throws IOException { + Delete delete = new Delete(table.getName()); + delete.addFamily(getTableFamily()); + deleteFromMetaTable(connection, delete); + } + /** * Performs an atomic multi-Mutate operation against the given table. */ diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionCallable.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionCallable.java new file mode 100644 index 0000000000000000000000000000000000000000..3f44927463741a929d22c4a3e60aee557c10dab2 --- /dev/null +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionCallable.java @@ -0,0 +1,56 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.client; + +import java.io.Closeable; +import java.io.IOException; + +/** + * A RetryingCallable for generic connection operations. + * @param return type + */ +abstract class ConnectionCallable implements RetryingCallable, Closeable { + protected Connection connection; + + public ConnectionCallable(final Connection connection) { + this.connection = connection; + } + + @Override + public void prepare(boolean reload) throws IOException { + } + + @Override + public void close() throws IOException { + } + + @Override + public void throwable(Throwable t, boolean retrying) { + } + + @Override + public String getExceptionMessageAdditionalDetail() { + return ""; + } + + @Override + public long sleep(long pause, int tries) { + return ConnectionUtils.getPauseTime(pause, tries); + } +} \ No newline at end of file diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionManager.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionManager.java index 358ef3e127a30d686a1b7387838e30b27fe2d8d1..301fdc5de76798d5f160ef075fcfafa7545d4b1e 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionManager.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionManager.java @@ -2537,7 +2537,7 @@ final class ConnectionManager { GetTableDescriptorsResponse htds; try { GetTableDescriptorsRequest req = - RequestConverter.buildGetTableDescriptorsRequest(tableName); + RequestConverter.buildGetTableDescriptorsRequest(tableName); htds = master.getTableDescriptors(null, req); } catch (ServiceException se) { throw ProtobufUtil.getRemoteException(se); @@ -2562,16 +2562,11 @@ final class ConnectionManager { @Override public TableState getTableState(TableName tableName) throws IOException { - MasterKeepAliveConnection master = getKeepAliveMasterService(); - try { - GetTableStateResponse resp = master.getTableState(null, - RequestConverter.buildGetTableStateRequest(tableName)); - return TableState.convert(resp.getTableState()); - } catch (ServiceException se) { - throw ProtobufUtil.getRemoteException(se); - } finally { - master.close(); - } + ClusterConnection conn = getConnectionInternal(getConfiguration()); + TableState tableState = MetaTableAccessor.getTableState(conn, tableName); + if (tableState == null) + throw new TableNotFoundException(tableName); + return tableState; } @Override diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java index 5ca691548ac89f05d431bbeb376b0b5a73c0719b..acbef401bda2c94bd75ba689161e68502090fa52 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java @@ -18,6 +18,7 @@ */ package org.apache.hadoop.hbase.client; +import java.io.Closeable; import java.io.IOException; import java.io.InterruptedIOException; import java.net.SocketTimeoutException; @@ -283,7 +284,12 @@ public class HBaseAdmin implements Admin { */ @Override public boolean tableExists(final TableName tableName) throws IOException { - return MetaTableAccessor.tableExists(connection, tableName); + return executeCallable(new ConnectionCallable(getConnection()) { + @Override + public Boolean call(int callTimeout) throws ServiceException, IOException { + return MetaTableAccessor.tableExists(connection, tableName); + } + }); } public boolean tableExists(final byte[] tableName) @@ -1109,9 +1115,17 @@ public class HBaseAdmin implements Admin { * @throws IOException if a remote or network exception occurs */ @Override - public boolean isTableEnabled(TableName tableName) throws IOException { + public boolean isTableEnabled(final TableName tableName) throws IOException { checkTableExistence(tableName); - return connection.isTableEnabled(tableName); + return executeCallable(new ConnectionCallable(getConnection()) { + @Override + public Boolean call(int callTimeout) throws ServiceException, IOException { + TableState tableState = MetaTableAccessor.getTableState(connection, tableName); + if (tableState == null) + throw new TableNotFoundException(tableName); + return tableState.inStates(TableState.State.ENABLED); + } + }); } public boolean isTableEnabled(byte[] tableName) throws IOException { @@ -2296,10 +2310,15 @@ public class HBaseAdmin implements Admin { */ private TableName checkTableExists(final TableName tableName) throws IOException { - if (!MetaTableAccessor.tableExists(connection, tableName)) { - throw new TableNotFoundException(tableName); - } - return tableName; + return executeCallable(new ConnectionCallable(getConnection()) { + @Override + public TableName call(int callTimeout) throws ServiceException, IOException { + if (!MetaTableAccessor.tableExists(connection, tableName)) { + throw new TableNotFoundException(tableName); + } + return tableName; + } + }); } /** @@ -3667,7 +3686,8 @@ public class HBaseAdmin implements Admin { return QuotaRetriever.open(conf, filter); } - private V executeCallable(MasterCallable callable) throws IOException { + private & Closeable, V> V executeCallable(C callable) + throws IOException { RpcRetryingCaller caller = rpcCallerFactory.newCaller(); try { return caller.callWithRetries(callable, operationTimeout); diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/TableState.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/TableState.java index be9b80ce1695b2877986c6efbd28271a0ae7e134..222f47c56ecdce9cb080a9f5fc4c63347e6465ad 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/TableState.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/TableState.java @@ -17,9 +17,11 @@ */ package org.apache.hadoop.hbase.client; +import com.google.protobuf.InvalidProtocolBufferException; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.classification.InterfaceStability; +import org.apache.hadoop.hbase.exceptions.DeserializationException; import org.apache.hadoop.hbase.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos; @@ -187,6 +189,14 @@ public class TableState { state, tableState.getTimestamp()); } + public static TableState parseFrom(byte[] bytes) throws DeserializationException { + try { + return convert(HBaseProtos.TableState.parseFrom(bytes)); + } catch (InvalidProtocolBufferException e) { + throw new DeserializationException(e); + } + } + /** * Static version of state checker * @param state desired @@ -200,4 +210,36 @@ public class TableState { } return false; } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + + TableState that = (TableState) o; + + if (timestamp != that.timestamp) return false; + if (state != that.state) return false; + if (tableName != null ? !tableName.equals(that.tableName) : that.tableName != null) + return false; + + return true; + } + + @Override + public int hashCode() { + int result = (int) (timestamp ^ (timestamp >>> 32)); + result = 31 * result + (tableName != null ? tableName.hashCode() : 0); + result = 31 * result + (state != null ? state.hashCode() : 0); + return result; + } + + @Override + public String toString() { + return "TableState{" + + "timestamp=" + timestamp + + ", tableName=" + tableName + + ", state=" + state + + '}'; + } } diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java index c2c6401a9b731c5b9479b0f99fa26b023451c6a9..2bb6ec33439ef11baa161c72896013ddd4888c12 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java @@ -451,6 +451,16 @@ public final class HConstants { /** The upper-half merge region column qualifier */ public static final byte[] MERGEB_QUALIFIER = Bytes.toBytes("mergeB"); + /** The catalog family as a string*/ + public static final String TABLE_FAMILY_STR = "table"; + + /** The catalog family */ + public static final byte [] TABLE_FAMILY = Bytes.toBytes(TABLE_FAMILY_STR); + + /** The serialized table state qualifier */ + public static final byte[] TABLE_STATE_QUALIFIER = Bytes.toBytes("state"); + + /** * The meta table version column qualifier. * We keep current version of the meta table in this column in -ROOT- diff --git a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/HBaseProtos.java b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/HBaseProtos.java index 2947f40a602cb6d7cfe4a724e53554d21fddb23d..a96ef17ab759a692bd2609d66b27356244a89669 100644 --- a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/HBaseProtos.java +++ b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/HBaseProtos.java @@ -3450,15 +3450,15 @@ public final class HBaseProtos { */ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchemaOrBuilder getSchemaOrBuilder(); - // optional .TableState.State state = 2 [default = ENABLED]; + // optional .TableState.State state = 2 [default = ENABLED, deprecated = true]; /** - * optional .TableState.State state = 2 [default = ENABLED]; + * optional .TableState.State state = 2 [default = ENABLED, deprecated = true]; */ - boolean hasState(); + @java.lang.Deprecated boolean hasState(); /** - * optional .TableState.State state = 2 [default = ENABLED]; + * optional .TableState.State state = 2 [default = ENABLED, deprecated = true]; */ - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState.State getState(); + @java.lang.Deprecated org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState.State getState(); } /** * Protobuf type {@code TableDescriptor} @@ -3601,19 +3601,19 @@ public final class HBaseProtos { return schema_; } - // optional .TableState.State state = 2 [default = ENABLED]; + // optional .TableState.State state = 2 [default = ENABLED, deprecated = true]; public static final int STATE_FIELD_NUMBER = 2; private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState.State state_; /** - * optional .TableState.State state = 2 [default = ENABLED]; + * optional .TableState.State state = 2 [default = ENABLED, deprecated = true]; */ - public boolean hasState() { + @java.lang.Deprecated public boolean hasState() { return ((bitField0_ & 0x00000002) == 0x00000002); } /** - * optional .TableState.State state = 2 [default = ENABLED]; + * optional .TableState.State state = 2 [default = ENABLED, deprecated = true]; */ - public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState.State getState() { + @java.lang.Deprecated public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState.State getState() { return state_; } @@ -4054,24 +4054,24 @@ public final class HBaseProtos { return schemaBuilder_; } - // optional .TableState.State state = 2 [default = ENABLED]; + // optional .TableState.State state = 2 [default = ENABLED, deprecated = true]; private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState.State state_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState.State.ENABLED; /** - * optional .TableState.State state = 2 [default = ENABLED]; + * optional .TableState.State state = 2 [default = ENABLED, deprecated = true]; */ - public boolean hasState() { + @java.lang.Deprecated public boolean hasState() { return ((bitField0_ & 0x00000002) == 0x00000002); } /** - * optional .TableState.State state = 2 [default = ENABLED]; + * optional .TableState.State state = 2 [default = ENABLED, deprecated = true]; */ - public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState.State getState() { + @java.lang.Deprecated public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState.State getState() { return state_; } /** - * optional .TableState.State state = 2 [default = ENABLED]; + * optional .TableState.State state = 2 [default = ENABLED, deprecated = true]; */ - public Builder setState(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState.State value) { + @java.lang.Deprecated public Builder setState(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState.State value) { if (value == null) { throw new NullPointerException(); } @@ -4081,9 +4081,9 @@ public final class HBaseProtos { return this; } /** - * optional .TableState.State state = 2 [default = ENABLED]; + * optional .TableState.State state = 2 [default = ENABLED, deprecated = true]; */ - public Builder clearState() { + @java.lang.Deprecated public Builder clearState() { bitField0_ = (bitField0_ & ~0x00000002); state_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState.State.ENABLED; onChanged(); @@ -18197,52 +18197,52 @@ public final class HBaseProtos { "TableState.State\022\031\n\005table\030\002 \002(\0132\n.TableN" + "ame\022\021\n\ttimestamp\030\003 \001(\004\"?\n\005State\022\013\n\007ENABL" + "ED\020\000\022\014\n\010DISABLED\020\001\022\r\n\tDISABLING\020\002\022\014\n\010ENA", - "BLING\020\003\"Z\n\017TableDescriptor\022\034\n\006schema\030\001 \002" + - "(\0132\014.TableSchema\022)\n\005state\030\002 \001(\0162\021.TableS" + - "tate.State:\007ENABLED\"o\n\022ColumnFamilySchem" + - "a\022\014\n\004name\030\001 \002(\014\022#\n\nattributes\030\002 \003(\0132\017.By" + - "tesBytesPair\022&\n\rconfiguration\030\003 \003(\0132\017.Na" + - "meStringPair\"\232\001\n\nRegionInfo\022\021\n\tregion_id" + - "\030\001 \002(\004\022\036\n\ntable_name\030\002 \002(\0132\n.TableName\022\021" + - "\n\tstart_key\030\003 \001(\014\022\017\n\007end_key\030\004 \001(\014\022\017\n\007of" + - "fline\030\005 \001(\010\022\r\n\005split\030\006 \001(\010\022\025\n\nreplica_id" + - "\030\007 \001(\005:\0010\"1\n\014FavoredNodes\022!\n\014favored_nod", - "e\030\001 \003(\0132\013.ServerName\"\225\001\n\017RegionSpecifier" + - "\0222\n\004type\030\001 \002(\0162$.RegionSpecifier.RegionS" + - "pecifierType\022\r\n\005value\030\002 \002(\014\"?\n\023RegionSpe" + - "cifierType\022\017\n\013REGION_NAME\020\001\022\027\n\023ENCODED_R" + - "EGION_NAME\020\002\"%\n\tTimeRange\022\014\n\004from\030\001 \001(\004\022" + - "\n\n\002to\030\002 \001(\004\"A\n\nServerName\022\021\n\thost_name\030\001" + - " \002(\t\022\014\n\004port\030\002 \001(\r\022\022\n\nstart_code\030\003 \001(\004\"\033" + - "\n\013Coprocessor\022\014\n\004name\030\001 \002(\t\"-\n\016NameStrin" + - "gPair\022\014\n\004name\030\001 \002(\t\022\r\n\005value\030\002 \002(\t\",\n\rNa" + - "meBytesPair\022\014\n\004name\030\001 \002(\t\022\r\n\005value\030\002 \001(\014", - "\"/\n\016BytesBytesPair\022\r\n\005first\030\001 \002(\014\022\016\n\006sec" + - "ond\030\002 \002(\014\",\n\rNameInt64Pair\022\014\n\004name\030\001 \001(\t" + - "\022\r\n\005value\030\002 \001(\003\"\314\001\n\023SnapshotDescription\022" + - "\014\n\004name\030\001 \002(\t\022\r\n\005table\030\002 \001(\t\022\030\n\rcreation" + - "_time\030\003 \001(\003:\0010\022.\n\004type\030\004 \001(\0162\031.SnapshotD" + - "escription.Type:\005FLUSH\022\017\n\007version\030\005 \001(\005\022" + - "\r\n\005owner\030\006 \001(\t\".\n\004Type\022\014\n\010DISABLED\020\000\022\t\n\005" + - "FLUSH\020\001\022\r\n\tSKIPFLUSH\020\002\"}\n\024ProcedureDescr" + - "iption\022\021\n\tsignature\030\001 \002(\t\022\020\n\010instance\030\002 " + - "\001(\t\022\030\n\rcreation_time\030\003 \001(\003:\0010\022&\n\rconfigu", - "ration\030\004 \003(\0132\017.NameStringPair\"\n\n\010EmptyMs" + - "g\"\033\n\007LongMsg\022\020\n\010long_msg\030\001 \002(\003\"\037\n\tDouble" + - "Msg\022\022\n\ndouble_msg\030\001 \002(\001\"\'\n\rBigDecimalMsg" + - "\022\026\n\016bigdecimal_msg\030\001 \002(\014\"5\n\004UUID\022\026\n\016leas" + - "t_sig_bits\030\001 \002(\004\022\025\n\rmost_sig_bits\030\002 \002(\004\"" + - "K\n\023NamespaceDescriptor\022\014\n\004name\030\001 \002(\014\022&\n\r" + - "configuration\030\002 \003(\0132\017.NameStringPair\"$\n\020" + - "RegionServerInfo\022\020\n\010infoPort\030\001 \001(\005*r\n\013Co" + - "mpareType\022\010\n\004LESS\020\000\022\021\n\rLESS_OR_EQUAL\020\001\022\t" + - "\n\005EQUAL\020\002\022\r\n\tNOT_EQUAL\020\003\022\024\n\020GREATER_OR_E", - "QUAL\020\004\022\013\n\007GREATER\020\005\022\t\n\005NO_OP\020\006*n\n\010TimeUn" + - "it\022\017\n\013NANOSECONDS\020\001\022\020\n\014MICROSECONDS\020\002\022\020\n" + - "\014MILLISECONDS\020\003\022\013\n\007SECONDS\020\004\022\013\n\007MINUTES\020" + - "\005\022\t\n\005HOURS\020\006\022\010\n\004DAYS\020\007B>\n*org.apache.had" + - "oop.hbase.protobuf.generatedB\013HBaseProto" + - "sH\001\240\001\001" + "BLING\020\003\"^\n\017TableDescriptor\022\034\n\006schema\030\001 \002" + + "(\0132\014.TableSchema\022-\n\005state\030\002 \001(\0162\021.TableS" + + "tate.State:\007ENABLEDB\002\030\001\"o\n\022ColumnFamilyS" + + "chema\022\014\n\004name\030\001 \002(\014\022#\n\nattributes\030\002 \003(\0132" + + "\017.BytesBytesPair\022&\n\rconfiguration\030\003 \003(\0132" + + "\017.NameStringPair\"\232\001\n\nRegionInfo\022\021\n\tregio" + + "n_id\030\001 \002(\004\022\036\n\ntable_name\030\002 \002(\0132\n.TableNa" + + "me\022\021\n\tstart_key\030\003 \001(\014\022\017\n\007end_key\030\004 \001(\014\022\017" + + "\n\007offline\030\005 \001(\010\022\r\n\005split\030\006 \001(\010\022\025\n\nreplic" + + "a_id\030\007 \001(\005:\0010\"1\n\014FavoredNodes\022!\n\014favored", + "_node\030\001 \003(\0132\013.ServerName\"\225\001\n\017RegionSpeci" + + "fier\0222\n\004type\030\001 \002(\0162$.RegionSpecifier.Reg" + + "ionSpecifierType\022\r\n\005value\030\002 \002(\014\"?\n\023Regio" + + "nSpecifierType\022\017\n\013REGION_NAME\020\001\022\027\n\023ENCOD" + + "ED_REGION_NAME\020\002\"%\n\tTimeRange\022\014\n\004from\030\001 " + + "\001(\004\022\n\n\002to\030\002 \001(\004\"A\n\nServerName\022\021\n\thost_na" + + "me\030\001 \002(\t\022\014\n\004port\030\002 \001(\r\022\022\n\nstart_code\030\003 \001" + + "(\004\"\033\n\013Coprocessor\022\014\n\004name\030\001 \002(\t\"-\n\016NameS" + + "tringPair\022\014\n\004name\030\001 \002(\t\022\r\n\005value\030\002 \002(\t\"," + + "\n\rNameBytesPair\022\014\n\004name\030\001 \002(\t\022\r\n\005value\030\002", + " \001(\014\"/\n\016BytesBytesPair\022\r\n\005first\030\001 \002(\014\022\016\n" + + "\006second\030\002 \002(\014\",\n\rNameInt64Pair\022\014\n\004name\030\001" + + " \001(\t\022\r\n\005value\030\002 \001(\003\"\314\001\n\023SnapshotDescript" + + "ion\022\014\n\004name\030\001 \002(\t\022\r\n\005table\030\002 \001(\t\022\030\n\rcrea" + + "tion_time\030\003 \001(\003:\0010\022.\n\004type\030\004 \001(\0162\031.Snaps" + + "hotDescription.Type:\005FLUSH\022\017\n\007version\030\005 " + + "\001(\005\022\r\n\005owner\030\006 \001(\t\".\n\004Type\022\014\n\010DISABLED\020\000" + + "\022\t\n\005FLUSH\020\001\022\r\n\tSKIPFLUSH\020\002\"}\n\024ProcedureD" + + "escription\022\021\n\tsignature\030\001 \002(\t\022\020\n\010instanc" + + "e\030\002 \001(\t\022\030\n\rcreation_time\030\003 \001(\003:\0010\022&\n\rcon", + "figuration\030\004 \003(\0132\017.NameStringPair\"\n\n\010Emp" + + "tyMsg\"\033\n\007LongMsg\022\020\n\010long_msg\030\001 \002(\003\"\037\n\tDo" + + "ubleMsg\022\022\n\ndouble_msg\030\001 \002(\001\"\'\n\rBigDecima" + + "lMsg\022\026\n\016bigdecimal_msg\030\001 \002(\014\"5\n\004UUID\022\026\n\016" + + "least_sig_bits\030\001 \002(\004\022\025\n\rmost_sig_bits\030\002 " + + "\002(\004\"K\n\023NamespaceDescriptor\022\014\n\004name\030\001 \002(\014" + + "\022&\n\rconfiguration\030\002 \003(\0132\017.NameStringPair" + + "\"$\n\020RegionServerInfo\022\020\n\010infoPort\030\001 \001(\005*r" + + "\n\013CompareType\022\010\n\004LESS\020\000\022\021\n\rLESS_OR_EQUAL" + + "\020\001\022\t\n\005EQUAL\020\002\022\r\n\tNOT_EQUAL\020\003\022\024\n\020GREATER_", + "OR_EQUAL\020\004\022\013\n\007GREATER\020\005\022\t\n\005NO_OP\020\006*n\n\010Ti" + + "meUnit\022\017\n\013NANOSECONDS\020\001\022\020\n\014MICROSECONDS\020" + + "\002\022\020\n\014MILLISECONDS\020\003\022\013\n\007SECONDS\020\004\022\013\n\007MINU" + + "TES\020\005\022\t\n\005HOURS\020\006\022\010\n\004DAYS\020\007B>\n*org.apache" + + ".hadoop.hbase.protobuf.generatedB\013HBaseP" + + "rotosH\001\240\001\001" }; com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner = new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() { diff --git a/hbase-protocol/src/main/protobuf/HBase.proto b/hbase-protocol/src/main/protobuf/HBase.proto index c3c8c6a743f5f1655bc378d2fe2188ac72e38f9d..5d0a90a1fe3f6d5f7f9472597cce263bb28f1e8e 100644 --- a/hbase-protocol/src/main/protobuf/HBase.proto +++ b/hbase-protocol/src/main/protobuf/HBase.proto @@ -62,7 +62,7 @@ message TableState { /** On HDFS representation of table state. */ message TableDescriptor { required TableSchema schema = 1; - optional TableState.State state = 2 [ default = ENABLED ]; + optional TableState.State state = 2 [ default = ENABLED, deprecated = true ]; } /** diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/TableDescriptor.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/TableDescriptor.java index d27bfb7235e78771979c44b4d0b3646c9777b580..325dabdbef19349fe02d2bdcc4ed5560aac661ac 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/TableDescriptor.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/TableDescriptor.java @@ -35,6 +35,10 @@ import org.apache.hadoop.hbase.regionserver.BloomType; @InterfaceAudience.Private public class TableDescriptor { private HTableDescriptor hTableDescriptor; + /** + * Don't use, state was moved to meta, use MetaTableAccessor instead + */ + @Deprecated private TableState.State tableState; /** @@ -42,6 +46,7 @@ public class TableDescriptor { * @param hTableDescriptor HTableDescriptor to use * @param tableState table state */ + @Deprecated public TableDescriptor(HTableDescriptor hTableDescriptor, TableState.State tableState) { this.hTableDescriptor = hTableDescriptor; @@ -69,10 +74,12 @@ public class TableDescriptor { this.hTableDescriptor = hTableDescriptor; } + @Deprecated public TableState.State getTableState() { return tableState; } + @Deprecated public void setTableState(TableState.State tableState) { this.tableState = tableState; } @@ -170,6 +177,17 @@ public class TableDescriptor { .setBloomFilterType(BloomType.NONE) // Enable cache of data blocks in L1 if more than one caching tier deployed: // e.g. if using CombinedBlockCache (BucketCache). + .setCacheDataInL1(true), + new HColumnDescriptor(HConstants.TABLE_FAMILY) + // Ten is arbitrary number. Keep versions to help debugging. + .setMaxVersions(10) + .setInMemory(true) + .setBlocksize(8 * 1024) + .setScope(HConstants.REPLICATION_SCOPE_LOCAL) + // Disable blooms for meta. Needs work. Seems to mess w/ getClosestOrBefore. + .setBloomFilterType(BloomType.NONE) + // Enable cache of data blocks in L1 if more than one caching tier deployed: + // e.g. if using CombinedBlockCache (BucketCache). .setCacheDataInL1(true) }) { }; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/AssignmentManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/AssignmentManager.java index 6abb56d56876bf116df77b48f16de05a469ea3fc..1a04d2f7bcb8f69a447f78230bdcb814431294ad 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/AssignmentManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/AssignmentManager.java @@ -1569,7 +1569,7 @@ public class AssignmentManager { TableState.State.ENABLING); // Region assignment from META - List results = MetaTableAccessor.fullScanOfMeta(server.getConnection()); + List results = MetaTableAccessor.fullScanRegions(server.getConnection()); // Get any new but slow to checkin region server that joined the cluster Set onlineServers = serverManager.getOnlineServers().keySet(); // Set of offline servers to be returned diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java index de18ec610114acb93f59fe3eb3271ddce17da5c0..98043ca2e0227ef20d7f18c9f454a4c20f55e139 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java @@ -339,7 +339,8 @@ public class HMaster extends HRegionServer implements MasterServices, Server { conf.getLong("hbase.master.buffer.for.rs.fatals", 1*1024*1024)); LOG.info("hbase.rootdir=" + FSUtils.getRootDir(this.conf) + - ", hbase.cluster.distributed=" + this.conf.getBoolean(HConstants.CLUSTER_DISTRIBUTED, false)); + ", hbase.cluster.distributed=" + this.conf.getBoolean(HConstants.CLUSTER_DISTRIBUTED, + false)); Replication.decorateMasterConfiguration(this.conf); @@ -422,6 +423,11 @@ public class HMaster extends HRegionServer implements MasterServices, Server { return connector.getLocalPort(); } + @Override + protected TableDescriptors getFsTableDescriptors() throws IOException { + return super.getFsTableDescriptors(); + } + /** * For compatibility, if failed with regionserver credentials, try the master one */ @@ -619,9 +625,7 @@ public class HMaster extends HRegionServer implements MasterServices, Server { // Invalidate all write locks held previously this.tableLockManager.reapWriteLocks(); - this.tableStateManager = new TableStateManager(this); - this.tableStateManager.start(); status.setStatus("Initializing ZK system trackers"); initializeZKBasedSystemTrackers(); @@ -814,6 +818,8 @@ public class HMaster extends HRegionServer implements MasterServices, Server { } enableMeta(TableName.META_TABLE_NAME); + // TODO: should we prevent from using state manager before meta was initialized? + // tableStateManager.start(); if ((RecoveryMode.LOG_REPLAY == this.getMasterFileSystem().getLogRecoveryMode()) && (!previouslyFailedMetaRSs.isEmpty())) { @@ -1121,7 +1127,8 @@ public class HMaster extends HRegionServer implements MasterServices, Server { if (rpCount < plans.size() && // if performing next balance exceeds cutoff time, exit the loop (System.currentTimeMillis() + (totalRegPlanExecTime / rpCount)) > cutoffTime) { - //TODO: After balance, there should not be a cutoff time (keeping it as a security net for now) + //TODO: After balance, there should not be a cutoff time (keeping it as + // a security net for now) LOG.debug("No more balancing till next balance run; maximumBalanceTime=" + maximumBalanceTime); break; @@ -1411,7 +1418,8 @@ public class HMaster extends HRegionServer implements MasterServices, Server { LOG.fatal("Failed to become active master", t); // HBASE-5680: Likely hadoop23 vs hadoop 20.x/1.x incompatibility if (t instanceof NoClassDefFoundError && - t.getMessage().contains("org/apache/hadoop/hdfs/protocol/FSConstants$SafeModeAction")) { + t.getMessage() + .contains("org/apache/hadoop/hdfs/protocol/FSConstants$SafeModeAction")) { // improved error message for this special case abort("HBase is having a problem with its Hadoop jars. You may need to " + "recompile HBase against Hadoop version " diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterFileSystem.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterFileSystem.java index 4d72312e09e204537db5134a0f46c66e4cd6081a..78e4c119c5ef823d98aee679d8dcb55e60b0be48 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterFileSystem.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterFileSystem.java @@ -471,7 +471,7 @@ public class MasterFileSystem { // we should get them from registry. FSTableDescriptors fsd = new FSTableDescriptors(c, fs, rd); fsd.createTableDescriptor( - new TableDescriptor(fsd.get(TableName.META_TABLE_NAME), TableState.State.ENABLING)); + new TableDescriptor(fsd.get(TableName.META_TABLE_NAME))); return rd; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/SnapshotOfRegionAssignmentFromMeta.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/SnapshotOfRegionAssignmentFromMeta.java index b03611c5301f1e5a33046647c3c36ef389383f48..39beba8f264baabd5ee7fbcfc7a7ccd8e89420fe 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/SnapshotOfRegionAssignmentFromMeta.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/SnapshotOfRegionAssignmentFromMeta.java @@ -141,7 +141,7 @@ public class SnapshotOfRegionAssignmentFromMeta { } }; // Scan hbase:meta to pick up user regions - MetaTableAccessor.fullScan(connection, v); + MetaTableAccessor.fullScanRegions(connection, v); //regionToRegionServerMap = regions; LOG.info("Finished to scan the hbase:meta for the current region assignment" + "snapshot"); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/TableStateManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/TableStateManager.java index 04cc17c771b1616ffe7fc15c576d5d1e56d293ef..e82cb788cb4e958799c9407aef961a609c08fe71 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/TableStateManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/TableStateManager.java @@ -18,18 +18,18 @@ package org.apache.hadoop.hbase.master; import java.io.IOException; -import java.util.Map; import java.util.Set; +import java.util.concurrent.locks.ReadWriteLock; +import java.util.concurrent.locks.ReentrantReadWriteLock; -import com.google.common.collect.Maps; import com.google.common.collect.Sets; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.hbase.classification.InterfaceAudience; -import org.apache.hadoop.hbase.TableDescriptor; -import org.apache.hadoop.hbase.TableDescriptors; +import org.apache.hadoop.hbase.MetaTableAccessor; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.TableNotFoundException; +import org.apache.hadoop.hbase.classification.InterfaceAudience; +import org.apache.hadoop.hbase.client.Result; import org.apache.hadoop.hbase.client.TableState; /** @@ -39,24 +39,14 @@ import org.apache.hadoop.hbase.client.TableState; @InterfaceAudience.Private public class TableStateManager { private static final Log LOG = LogFactory.getLog(TableStateManager.class); - private final TableDescriptors descriptors; - private final Map tableStates = Maps.newConcurrentMap(); + private final ReadWriteLock lock = new ReentrantReadWriteLock(); + private final MasterServices master; + private TableState metaState = + new TableState(TableName.META_TABLE_NAME, TableState.State.ENABLING); public TableStateManager(MasterServices master) { - this.descriptors = master.getTableDescriptors(); - } - - public void start() throws IOException { - Map all = descriptors.getAllDescriptors(); - for (TableDescriptor table : all.values()) { - TableName tableName = table.getHTableDescriptor().getTableName(); - if (LOG.isDebugEnabled()) { - LOG.debug("Adding table state: " + tableName - + ": " + table.getTableState()); - } - tableStates.put(tableName, table.getTableState()); - } + this.master = master; } /** @@ -67,16 +57,13 @@ public class TableStateManager { * @throws IOException */ public void setTableState(TableName tableName, TableState.State newState) throws IOException { - synchronized (tableStates) { - TableDescriptor descriptor = readDescriptor(tableName); - if (descriptor == null) { - throw new TableNotFoundException(tableName); - } - if (descriptor.getTableState() != newState) { - writeDescriptor( - new TableDescriptor(descriptor.getHTableDescriptor(), newState)); - } + lock.writeLock().lock(); + try { + udpateMetaState(tableName, newState); + } finally { + lock.writeLock().unlock(); } + } /** @@ -91,21 +78,23 @@ public class TableStateManager { TableState.State newState, TableState.State... states) throws IOException { - synchronized (tableStates) { - TableDescriptor descriptor = readDescriptor(tableName); - if (descriptor == null) { + lock.writeLock().lock(); + try { + TableState currentState = readMetaState(tableName); + if (currentState == null) { throw new TableNotFoundException(tableName); } - if (TableState.isInStates(descriptor.getTableState(), states)) { - writeDescriptor( - new TableDescriptor(descriptor.getHTableDescriptor(), newState)); + if (currentState.inStates(states)) { + udpateMetaState(tableName, newState); return true; } else { return false; } + } finally { + lock.writeLock().unlock(); } - } + } /** * Set table state to provided but only if table not in specified states @@ -119,18 +108,15 @@ public class TableStateManager { TableState.State newState, TableState.State... states) throws IOException { - synchronized (tableStates) { - TableDescriptor descriptor = readDescriptor(tableName); - if (descriptor == null) { - throw new TableNotFoundException(tableName); - } - if (!TableState.isInStates(descriptor.getTableState(), states)) { - writeDescriptor( - new TableDescriptor(descriptor.getHTableDescriptor(), newState)); - return true; - } else { - return false; - } + TableState currentState = readMetaState(tableName); + if (currentState == null) { + throw new TableNotFoundException(tableName); + } + if (!currentState.inStates(states)) { + udpateMetaState(tableName, newState); + return true; + } else { + return false; } } @@ -146,11 +132,9 @@ public class TableStateManager { } public void setDeletedTable(TableName tableName) throws IOException { - TableState.State remove = tableStates.remove(tableName); - if (remove == null) { - LOG.warn("Moving table " + tableName + " state to deleted but was " + - "already deleted"); - } + if (tableName.equals(TableName.META_TABLE_NAME)) + return; + MetaTableAccessor.deleteTableDescriptor(master.getConnection(), tableName); } public boolean isTablePresent(TableName tableName) throws IOException { @@ -164,53 +148,44 @@ public class TableStateManager { * @return tables in given states * @throws IOException */ - public Set getTablesInStates(TableState.State... states) throws IOException { - Set rv = Sets.newHashSet(); - for (Map.Entry entry : tableStates.entrySet()) { - if (TableState.isInStates(entry.getValue(), states)) - rv.add(entry.getKey()); - } + public Set getTablesInStates(final TableState.State... states) throws IOException { + final Set rv = Sets.newHashSet(); + MetaTableAccessor.fullScanTables(master.getConnection(), new MetaTableAccessor.Visitor() { + @Override + public boolean visit(Result r) throws IOException { + TableState tableState = MetaTableAccessor.getTableState(r); + if (tableState != null && tableState.inStates(states)) + rv.add(tableState.getTableName()); + return true; + } + }); + if (metaState.inStates(states)) + rv.add(metaState.getTableName()); return rv; } public TableState.State getTableState(TableName tableName) throws IOException { - TableState.State tableState = tableStates.get(tableName); - if (tableState == null) { - TableDescriptor descriptor = readDescriptor(tableName); - if (descriptor != null) - tableState = descriptor.getTableState(); + TableState currentState = readMetaState(tableName); + if (currentState == null) { + throw new TableNotFoundException(tableName); } - return tableState; + return currentState.getState(); } - /** - * Write descriptor in place, update cache of states. - * Write lock should be hold by caller. - * - * @param descriptor what to write - */ - private void writeDescriptor(TableDescriptor descriptor) throws IOException { - TableName tableName = descriptor.getHTableDescriptor().getTableName(); - TableState.State state = descriptor.getTableState(); - descriptors.add(descriptor); - LOG.debug("Table " + tableName + " written descriptor for state " + state); - tableStates.put(tableName, state); - LOG.debug("Table " + tableName + " updated state to " + state); + protected void udpateMetaState(TableName tableName, TableState.State newState) + throws IOException { + if (tableName.equals(TableName.META_TABLE_NAME)) { + metaState = new TableState(TableName.META_TABLE_NAME, newState); + } else { + MetaTableAccessor.updateTableState(master.getConnection(), tableName, newState); + } } - /** - * Read current descriptor for table, update cache of states. - * - * @param table descriptor to read - * @return descriptor - * @throws IOException - */ - private TableDescriptor readDescriptor(TableName tableName) throws IOException { - TableDescriptor descriptor = descriptors.getDescriptor(tableName); - if (descriptor == null) - tableStates.remove(tableName); + + protected TableState readMetaState(TableName tableName) throws IOException { + if (tableName.equals(TableName.META_TABLE_NAME)) + return metaState; else - tableStates.put(tableName, descriptor.getTableState()); - return descriptor; + return MetaTableAccessor.getTableState(master.getConnection(), tableName); } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/CreateTableHandler.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/CreateTableHandler.java index d1f01512b1cb11f867e978681d71fd3544146d6a..964a2496eb7680afdcac7af74c95a1acffe9315b 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/CreateTableHandler.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/CreateTableHandler.java @@ -209,7 +209,7 @@ public class CreateTableHandler extends EventHandler { // 1. Create Table Descriptor // using a copy of descriptor, table will be created enabling first TableDescriptor underConstruction = new TableDescriptor( - this.hTableDescriptor, TableState.State.ENABLING); + this.hTableDescriptor); Path tempTableDir = FSUtils.getTableDir(tempdir, tableName); new FSTableDescriptors(this.conf).createTableDescriptorForTableDirectory( tempTableDir, underConstruction, false); @@ -223,6 +223,8 @@ public class CreateTableHandler extends EventHandler { " to hbase root=" + tableDir); } + MetaTableAccessor.updateTableState(this.server.getConnection(), hTableDescriptor.getTableName(), + TableState.State.ENABLING); if (regionInfos != null && regionInfos.size() > 0) { // 4. Add regions to META addRegionsToMeta(regionInfos); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/TruncateTableHandler.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/TruncateTableHandler.java index a124bf6ab577cffd0acf5e91dd7670402408f6a5..3ce36e8438e57ce3822455827953265db0ee8244 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/TruncateTableHandler.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/TruncateTableHandler.java @@ -95,8 +95,7 @@ public class TruncateTableHandler extends DeleteTableHandler { AssignmentManager assignmentManager = this.masterServices.getAssignmentManager(); // 1. Create Table Descriptor - TableDescriptor underConstruction = new TableDescriptor( - this.hTableDescriptor, TableState.State.ENABLING); + TableDescriptor underConstruction = new TableDescriptor(this.hTableDescriptor); Path tempTableDir = FSUtils.getTableDir(tempdir, this.tableName); new FSTableDescriptors(server.getConfiguration()) .createTableDescriptorForTableDirectory(tempTableDir, underConstruction, false); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java index 5263a99f4fe23881fec9cea787787e8fbf87d5a0..9da784b341fa70256a522bf02f8650f5529a5fcc 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java @@ -520,8 +520,7 @@ public class HRegionServer extends HasThread implements boolean useHBaseChecksum = conf.getBoolean(HConstants.HBASE_CHECKSUM_VERIFICATION, true); this.fs = new HFileSystem(this.conf, useHBaseChecksum); this.rootDir = FSUtils.getRootDir(this.conf); - this.tableDescriptors = new FSTableDescriptors(this.conf, - this.fs, this.rootDir, !canUpdateTableDescriptor(), false); + this.tableDescriptors = getFsTableDescriptors(); service = new ExecutorService(getServerName().toShortString()); spanReceiverHost = SpanReceiverHost.getInstance(getConfiguration()); @@ -552,6 +551,11 @@ public class HRegionServer extends HasThread implements this.walRoller = new LogRoller(this, this); } + protected TableDescriptors getFsTableDescriptors() throws IOException { + return new FSTableDescriptors(this.conf, + this.fs, this.rootDir, !canUpdateTableDescriptor(), false); + } + protected void login(UserProvider user, String host) throws IOException { user.login("hbase.regionserver.keytab.file", "hbase.regionserver.kerberos.principal", host); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotManifest.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotManifest.java index a3cfa0421b8a4f265c76f8eb609b3eb0eebbb745..d0ffcab0607a7528fdd076f1b68824aa3478528d 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotManifest.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotManifest.java @@ -18,8 +18,8 @@ package org.apache.hadoop.hbase.snapshot; -import java.io.IOException; import java.io.FileNotFoundException; +import java.io.IOException; import java.util.ArrayList; import java.util.Collection; import java.util.HashMap; @@ -30,7 +30,6 @@ import java.util.concurrent.TimeUnit; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FSDataInputStream; import org.apache.hadoop.fs.FSDataOutputStream; @@ -39,20 +38,20 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.TableDescriptor; -import org.apache.hadoop.hbase.client.TableState; +import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.errorhandling.ForeignExceptionSnare; +import org.apache.hadoop.hbase.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription; import org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotDataManifest; import org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotRegionManifest; -import org.apache.hadoop.hbase.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.regionserver.HRegion; import org.apache.hadoop.hbase.regionserver.HRegionFileSystem; import org.apache.hadoop.hbase.regionserver.Store; import org.apache.hadoop.hbase.regionserver.StoreFile; import org.apache.hadoop.hbase.regionserver.StoreFileInfo; import org.apache.hadoop.hbase.util.Bytes; -import org.apache.hadoop.hbase.util.FSUtils; import org.apache.hadoop.hbase.util.FSTableDescriptors; +import org.apache.hadoop.hbase.util.FSUtils; import org.apache.hadoop.hbase.util.Threads; /** @@ -357,7 +356,7 @@ public class SnapshotManifest { // write a copy of descriptor to the snapshot directory new FSTableDescriptors(conf, fs, rootDir) .createTableDescriptorForTableDirectory(workingDir, new TableDescriptor( - htd, TableState.State.ENABLED), false); + htd), false); } else { LOG.debug("Convert to Single Snapshot Manifest"); convertToV2SingleManifest(); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSTableDescriptors.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSTableDescriptors.java index 7a6811c36c2451c28e263b50938c76148f0c032f..cce37d73593e556dbfc51e2b8c031720816573e8 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSTableDescriptors.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSTableDescriptors.java @@ -33,7 +33,6 @@ import com.google.common.primitives.Ints; import org.apache.commons.lang.NotImplementedException; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FSDataInputStream; import org.apache.hadoop.fs.FSDataOutputStream; @@ -47,7 +46,7 @@ import org.apache.hadoop.hbase.TableDescriptor; import org.apache.hadoop.hbase.TableDescriptors; import org.apache.hadoop.hbase.TableInfoMissingException; import org.apache.hadoop.hbase.TableName; -import org.apache.hadoop.hbase.client.TableState; +import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.exceptions.DeserializationException; import org.apache.hadoop.hbase.protobuf.ProtobufUtil; @@ -154,7 +153,7 @@ public class FSTableDescriptors implements TableDescriptors { invocations++; if (TableName.META_TABLE_NAME.equals(tablename)) { cachehits++; - return new TableDescriptor(metaTableDescritor, TableState.State.ENABLED); + return new TableDescriptor(metaTableDescritor); } // hbase:meta is already handled. If some one tries to get the descriptor for // .logs, .oldlogs or .corrupt throw an exception. @@ -218,7 +217,7 @@ public class FSTableDescriptors implements TableDescriptors { } // add hbase:meta to the response tds.put(this.metaTableDescritor.getNameAsString(), - new TableDescriptor(metaTableDescritor, TableState.State.ENABLED)); + new TableDescriptor(metaTableDescritor)); } else { LOG.debug("Fetching table descriptors from the filesystem."); boolean allvisited = true; @@ -592,7 +591,7 @@ public class FSTableDescriptors implements TableDescriptors { HTableDescriptor htd = HTableDescriptor.parseFrom(content); LOG.warn("Found old table descriptor, converting to new format for table " + htd.getTableName() + "; NOTE table will be in ENABLED state!"); - td = new TableDescriptor(htd, TableState.State.ENABLED); + td = new TableDescriptor(htd); if (rewritePb) rewriteTableDescriptor(fs, status, td); } catch (DeserializationException e1) { throw new IOException("content=" + Bytes.toShort(content), e); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java index 7e6ee7b0ace0224e7b016ca9607e9b3efcd67f54..88e61687eca819a909a7cb4504f491c227bd3e4f 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java @@ -53,12 +53,16 @@ import java.util.concurrent.TimeoutException; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicInteger; +import com.google.common.base.Joiner; +import com.google.common.base.Preconditions; +import com.google.common.collect.Lists; +import com.google.common.collect.Multimap; +import com.google.common.collect.TreeMultimap; +import com.google.protobuf.ServiceException; import org.apache.commons.lang.RandomStringUtils; import org.apache.commons.lang.StringUtils; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.hbase.classification.InterfaceAudience; -import org.apache.hadoop.hbase.classification.InterfaceStability; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configured; import org.apache.hadoop.fs.FSDataOutputStream; @@ -85,6 +89,8 @@ import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.TableDescriptor; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.ZooKeeperConnectionException; +import org.apache.hadoop.hbase.classification.InterfaceAudience; +import org.apache.hadoop.hbase.classification.InterfaceStability; import org.apache.hadoop.hbase.client.Admin; import org.apache.hadoop.hbase.client.ClusterConnection; import org.apache.hadoop.hbase.client.ConnectionFactory; @@ -101,6 +107,7 @@ import org.apache.hadoop.hbase.client.Put; import org.apache.hadoop.hbase.client.RegionReplicaUtil; import org.apache.hadoop.hbase.client.Result; import org.apache.hadoop.hbase.client.RowMutations; +import org.apache.hadoop.hbase.client.Scan; import org.apache.hadoop.hbase.client.Table; import org.apache.hadoop.hbase.client.TableState; import org.apache.hadoop.hbase.io.hfile.CacheConfig; @@ -111,6 +118,7 @@ import org.apache.hadoop.hbase.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.AdminService.BlockingInterface; import org.apache.hadoop.hbase.regionserver.HRegion; import org.apache.hadoop.hbase.regionserver.HRegionFileSystem; +import org.apache.hadoop.hbase.regionserver.RegionScanner; import org.apache.hadoop.hbase.regionserver.StoreFileInfo; import org.apache.hadoop.hbase.regionserver.wal.MetricsWAL; import org.apache.hadoop.hbase.regionserver.wal.WALActionsListener; @@ -136,13 +144,6 @@ import org.apache.hadoop.util.Tool; import org.apache.hadoop.util.ToolRunner; import org.apache.zookeeper.KeeperException; -import com.google.common.base.Joiner; -import com.google.common.base.Preconditions; -import com.google.common.collect.Lists; -import com.google.common.collect.Multimap; -import com.google.common.collect.TreeMultimap; -import com.google.protobuf.ServiceException; - /** * HBaseFsck (hbck) is a tool for checking and repairing region consistency and * table integrity problems in a corrupted HBase. @@ -1137,7 +1138,7 @@ public class HBaseFsck extends Configured implements Closeable { for (String columnfamimly : columns) { htd.addFamily(new HColumnDescriptor(columnfamimly)); } - fstd.createTableDescriptor(new TableDescriptor(htd, TableState.State.ENABLED), true); + fstd.createTableDescriptor(new TableDescriptor(htd), true); return true; } @@ -1185,7 +1186,7 @@ public class HBaseFsck extends Configured implements Closeable { if (tableName.equals(htds[j].getTableName())) { HTableDescriptor htd = htds[j]; LOG.info("fixing orphan table: " + tableName + " from cache"); - fstd.createTableDescriptor(new TableDescriptor(htd, TableState.State.ENABLED), true); + fstd.createTableDescriptor(new TableDescriptor(htd), true); j++; iter.remove(); } @@ -1262,6 +1263,8 @@ public class HBaseFsck extends Configured implements Closeable { } TableInfo ti = e.getValue(); + puts.add(MetaTableAccessor + .makePutFromTableState(new TableState(ti.tableName, TableState.State.ENABLED))); for (Entry> spl : ti.sc.getStarts().asMap() .entrySet()) { Collection his = spl.getValue(); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdmin1.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdmin1.java index 9b55acd2d8fe72114f5e74e48169b01df6839fac..bf7d0139fd1cc8f9b936cd3aaca84afcd4d122ce 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdmin1.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdmin1.java @@ -18,11 +18,6 @@ */ package org.apache.hadoop.hbase.client; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertTrue; -import static org.junit.Assert.fail; - import java.io.IOException; import java.util.ArrayList; import java.util.HashMap; @@ -32,6 +27,7 @@ import java.util.Map; import java.util.Set; import java.util.concurrent.atomic.AtomicInteger; +import com.google.protobuf.ServiceException; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.hbase.HBaseTestingUtility; @@ -67,7 +63,11 @@ import org.junit.BeforeClass; import org.junit.Test; import org.junit.experimental.categories.Category; -import com.google.protobuf.ServiceException; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; /** * Class to test HBaseAdmin. @@ -248,7 +248,8 @@ public class TestAdmin1 { this.admin.disableTable(ht.getName()); assertTrue("Table must be disabled.", TEST_UTIL.getHBaseCluster() .getMaster().getAssignmentManager().getTableStateManager().isTableState( - ht.getName(), TableState.State.DISABLED)); + ht.getName(), TableState.State.DISABLED)); + assertEquals(TableState.State.DISABLED, getStateFromMeta(table)); // Test that table is disabled get = new Get(row); @@ -275,7 +276,8 @@ public class TestAdmin1 { this.admin.enableTable(table); assertTrue("Table must be enabled.", TEST_UTIL.getHBaseCluster() .getMaster().getAssignmentManager().getTableStateManager().isTableState( - ht.getName(), TableState.State.ENABLED)); + ht.getName(), TableState.State.ENABLED)); + assertEquals(TableState.State.ENABLED, getStateFromMeta(table)); // Test that table is enabled try { @@ -287,6 +289,13 @@ public class TestAdmin1 { ht.close(); } + private TableState.State getStateFromMeta(TableName table) throws IOException { + TableState state = + MetaTableAccessor.getTableState(TEST_UTIL.getConnection(), table); + assertNotNull(state); + return state.getState(); + } + @Test (timeout=300000) public void testDisableAndEnableTables() throws IOException { final byte [] row = Bytes.toBytes("row"); @@ -318,6 +327,10 @@ public class TestAdmin1 { ok = true; } + assertEquals(TableState.State.DISABLED, getStateFromMeta(table1)); + assertEquals(TableState.State.DISABLED, getStateFromMeta(table2)); + + assertTrue(ok); this.admin.enableTables("testDisableAndEnableTable.*"); @@ -336,18 +349,23 @@ public class TestAdmin1 { ht1.close(); ht2.close(); + + assertEquals(TableState.State.ENABLED, getStateFromMeta(table1)); + assertEquals(TableState.State.ENABLED, getStateFromMeta(table2)); } @Test (timeout=300000) public void testCreateTable() throws IOException { HTableDescriptor [] tables = admin.listTables(); int numTables = tables.length; - TEST_UTIL.createTable(TableName.valueOf("testCreateTable"), HConstants.CATALOG_FAMILY).close(); + TableName tableName = TableName.valueOf("testCreateTable"); + TEST_UTIL.createTable(tableName, HConstants.CATALOG_FAMILY).close(); tables = this.admin.listTables(); assertEquals(numTables + 1, tables.length); assertTrue("Table must be enabled.", TEST_UTIL.getHBaseCluster() .getMaster().getAssignmentManager().getTableStateManager().isTableState( - TableName.valueOf("testCreateTable"), TableState.State.ENABLED)); + tableName, TableState.State.ENABLED)); + assertEquals(TableState.State.ENABLED, getStateFromMeta(tableName)); } @Test (timeout=300000) @@ -405,6 +423,7 @@ public class TestAdmin1 { Table table = TEST_UTIL.getConnection().getTable(htd.getTableName()); HTableDescriptor confirmedHtd = table.getTableDescriptor(); assertEquals(htd.compareTo(confirmedHtd), 0); + MetaTableAccessor.fullScanMetaAndPrint(TEST_UTIL.getConnection()); table.close(); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestAssignmentManagerOnCluster.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestAssignmentManagerOnCluster.java index 5a0dffaed67c2fe3c205aea2978249990689ea74..7c8b08aa23bba7cbde70973e14d98e281aa826d3 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestAssignmentManagerOnCluster.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestAssignmentManagerOnCluster.java @@ -1164,7 +1164,7 @@ public class TestAssignmentManagerOnCluster { tableNameList.add(TableName.valueOf(name + "_" + i)); } } - List metaRows = MetaTableAccessor.fullScanOfMeta(admin.getConnection()); + List metaRows = MetaTableAccessor.fullScanRegions(admin.getConnection()); int count = 0; // Check all 100 rows are in meta for (Result result : metaRows) { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestCatalogJanitor.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestCatalogJanitor.java index cc501ededd30afa41be4af940eea72a020c6dd5c..4975bbf7c5887a1981cb37d1619554e84ab5d3ff 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestCatalogJanitor.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestCatalogJanitor.java @@ -1021,8 +1021,7 @@ public class TestCatalogJanitor { } private TableDescriptor createTableDescriptor() { - TableDescriptor htd = new TableDescriptor(createHTableDescriptor(), TableState.State.ENABLED); - return htd; + return new TableDescriptor(createHTableDescriptor()); } private MultiResponse buildMultiResponse(MultiRequest req) { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterOperationsForRegionReplicas.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterOperationsForRegionReplicas.java index 6307c4c0e5e42e601d1569f2d5ca110be2cbb681..ca9bc9c1a6b1a0281e4019987b1966e0421ede49 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterOperationsForRegionReplicas.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterOperationsForRegionReplicas.java @@ -47,8 +47,6 @@ import org.apache.hadoop.hbase.client.Admin; import org.apache.hadoop.hbase.client.Connection; import org.apache.hadoop.hbase.client.ConnectionFactory; import org.apache.hadoop.hbase.client.Delete; -import org.apache.hadoop.hbase.client.HBaseAdmin; -import org.apache.hadoop.hbase.client.HTable; import org.apache.hadoop.hbase.client.RegionReplicaUtil; import org.apache.hadoop.hbase.client.Result; import org.apache.hadoop.hbase.client.Table; @@ -134,7 +132,7 @@ public class TestMasterOperationsForRegionReplicas { } } - List metaRows = MetaTableAccessor.fullScanOfMeta(ADMIN.getConnection()); + List metaRows = MetaTableAccessor.fullScanRegions(ADMIN.getConnection()); int numRows = 0; for (Result result : metaRows) { RegionLocations locations = MetaTableAccessor.getRegionLocations(result); @@ -297,7 +295,7 @@ public class TestMasterOperationsForRegionReplicas { return true; } }; - MetaTableAccessor.fullScan(connection, visitor); + MetaTableAccessor.fullScanRegions(connection, visitor); assert(count.get() == numRegions); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestFSTableDescriptors.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestFSTableDescriptors.java index a99daf2d9e8c47c3e882378c5d01bfd4b284c8ca..c09982e66c558e6893e2925c6a1405e0d3d1c9e6 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestFSTableDescriptors.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestFSTableDescriptors.java @@ -76,7 +76,7 @@ public class TestFSTableDescriptors { public void testCreateAndUpdate() throws IOException { Path testdir = UTIL.getDataTestDir("testCreateAndUpdate"); HTableDescriptor htd = new HTableDescriptor(TableName.valueOf("testCreate")); - TableDescriptor td = new TableDescriptor(htd, TableState.State.ENABLED); + TableDescriptor td = new TableDescriptor(htd); FileSystem fs = FileSystem.get(UTIL.getConfiguration()); FSTableDescriptors fstd = new FSTableDescriptors(UTIL.getConfiguration(), fs, testdir); assertTrue(fstd.createTableDescriptor(td)); @@ -113,7 +113,7 @@ public class TestFSTableDescriptors { assertTrue(!fs.exists(p1)); int i2 = FSTableDescriptors.getTableInfoSequenceId(p2); assertTrue(i2 == i1 + 1); - td = new TableDescriptor(htd, TableState.State.DISABLED); + td = new TableDescriptor(htd); Path p3 = fstd.updateTableDescriptor(td); // Assert we cleaned up the old file. assertTrue(!fs.exists(p2)); @@ -172,7 +172,7 @@ public class TestFSTableDescriptors { final String name = "testReadingHTDFromFS"; FileSystem fs = FileSystem.get(UTIL.getConfiguration()); HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(name)); - TableDescriptor td = new TableDescriptor(htd, TableState.State.ENABLED); + TableDescriptor td = new TableDescriptor(htd); Path rootdir = UTIL.getDataTestDir(name); FSTableDescriptors fstd = new FSTableDescriptors(UTIL.getConfiguration(), fs, rootdir); fstd.createTableDescriptor(td); @@ -187,7 +187,7 @@ public class TestFSTableDescriptors { Path rootdir = UTIL.getDataTestDir(name); FSTableDescriptors fstd = new FSTableDescriptors(UTIL.getConfiguration(), fs, rootdir); HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(name)); - TableDescriptor td = new TableDescriptor(htd, TableState.State.ENABLED); + TableDescriptor td = new TableDescriptor(htd); Path descriptorFile = fstd.updateTableDescriptor(td); try (FSDataOutputStream out = fs.create(descriptorFile, true)) { out.write(htd.toByteArray()); @@ -222,8 +222,8 @@ public class TestFSTableDescriptors { final int count = 10; // Write out table infos. for (int i = 0; i < count; i++) { - TableDescriptor htd = new TableDescriptor(new HTableDescriptor(name + i), - TableState.State.ENABLED); + TableDescriptor htd = new TableDescriptor( + new HTableDescriptor(TableName.valueOf(name + i))); htds.createTableDescriptor(htd); } @@ -420,7 +420,7 @@ public class TestFSTableDescriptors { Path testdir = UTIL.getDataTestDir("testCreateTableDescriptorUpdatesIfThereExistsAlready"); HTableDescriptor htd = new HTableDescriptor(TableName.valueOf( "testCreateTableDescriptorUpdatesIfThereExistsAlready")); - TableDescriptor td = new TableDescriptor(htd, TableState.State.ENABLED); + TableDescriptor td = new TableDescriptor(htd); FileSystem fs = FileSystem.get(UTIL.getConfiguration()); FSTableDescriptors fstd = new FSTableDescriptors(UTIL.getConfiguration(), fs, testdir); assertTrue(fstd.createTableDescriptor(td)); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/hbck/OfflineMetaRebuildTestCore.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/hbck/OfflineMetaRebuildTestCore.java index 349bf567e6017a39a2d1f57de8c52ee049f492cd..57c7a14f8e19e4899ec4dfaf649e7852874d942a 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/hbck/OfflineMetaRebuildTestCore.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/hbck/OfflineMetaRebuildTestCore.java @@ -105,7 +105,7 @@ public class OfflineMetaRebuildTestCore { tableIdx++; htbl = setupTable(table); populateTable(htbl); - assertEquals(5, scanMeta()); + assertEquals(5 + 2, scanMeta()); LOG.info("Table " + table + " has " + tableRowCount(conf, table) + " entries."); assertEquals(16, tableRowCount(conf, table)); @@ -294,6 +294,7 @@ public class OfflineMetaRebuildTestCore { count++; } meta.close(); + MetaTableAccessor.fullScanMetaAndPrint(TEST_UTIL.getConnection()); return count; } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/hbck/TestOfflineMetaRebuildBase.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/hbck/TestOfflineMetaRebuildBase.java index a3d323c1e032975be92b9338ee39b4c688fb4fb3..a2a775711a6a7793de146e3b130f8615493f534c 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/hbck/TestOfflineMetaRebuildBase.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/hbck/TestOfflineMetaRebuildBase.java @@ -47,7 +47,7 @@ public class TestOfflineMetaRebuildBase extends OfflineMetaRebuildTestCore { wipeOutMeta(); // is meta really messed up? - assertEquals(1, scanMeta()); + assertEquals(1 + 2, scanMeta()); assertErrors(doFsck(conf, false), new ERROR_CODE[] { ERROR_CODE.NOT_IN_META_OR_DEPLOYED, @@ -70,13 +70,14 @@ public class TestOfflineMetaRebuildBase extends OfflineMetaRebuildTestCore { TEST_UTIL.restartHBaseCluster(3); try (Connection connection = ConnectionFactory.createConnection(TEST_UTIL.getConfiguration())) { Admin admin = connection.getAdmin(); - admin.enableTable(table); + if (admin.isTableDisabled(table)) + admin.enableTable(table); LOG.info("Waiting for no more RIT"); TEST_UTIL.waitUntilNoRegionsInTransition(60000); LOG.info("No more RIT in ZK, now doing final test verification"); // everything is good again. - assertEquals(5, scanMeta()); + assertEquals(5 + 2, scanMeta()); // including table state rows HTableDescriptor[] htbls = admin.listTables(); LOG.info("Tables present after restart: " + Arrays.toString(htbls)); assertEquals(1, htbls.length); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/hbck/TestOfflineMetaRebuildHole.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/hbck/TestOfflineMetaRebuildHole.java index 6320b93c4711dc309dc35339e5ee9ff70677ed0e..4c6b8a2c8cdd4b77515176062da9c0453d4d065c 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/hbck/TestOfflineMetaRebuildHole.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/hbck/TestOfflineMetaRebuildHole.java @@ -49,7 +49,7 @@ public class TestOfflineMetaRebuildHole extends OfflineMetaRebuildTestCore { wipeOutMeta(); // is meta really messed up? - assertEquals(1, scanMeta()); + assertEquals(1 + 2, scanMeta()); assertErrors(doFsck(conf, false), new ERROR_CODE[] { ERROR_CODE.NOT_IN_META_OR_DEPLOYED, ERROR_CODE.NOT_IN_META_OR_DEPLOYED, @@ -83,7 +83,7 @@ public class TestOfflineMetaRebuildHole extends OfflineMetaRebuildTestCore { } // Meta still messed up. - assertEquals(1, scanMeta()); + assertEquals(1 + 2, scanMeta()); HTableDescriptor[] htbls = getTables(TEST_UTIL.getConfiguration()); LOG.info("Tables present after restart: " + Arrays.toString(htbls)); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/hbck/TestOfflineMetaRebuildOverlap.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/hbck/TestOfflineMetaRebuildOverlap.java index e49b1546eb6758054bb1f43584e4992d408a494d..4c0b6f16bea2fe93e2f166e464e92952d1738188 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/hbck/TestOfflineMetaRebuildOverlap.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/hbck/TestOfflineMetaRebuildOverlap.java @@ -52,7 +52,7 @@ public class TestOfflineMetaRebuildOverlap extends OfflineMetaRebuildTestCore { wipeOutMeta(); // is meta really messed up? - assertEquals(1, scanMeta()); + assertEquals(1 + 2, scanMeta()); assertErrors(doFsck(conf, false), new ERROR_CODE[] { ERROR_CODE.NOT_IN_META_OR_DEPLOYED, @@ -91,7 +91,7 @@ public class TestOfflineMetaRebuildOverlap extends OfflineMetaRebuildTestCore { } // Meta still messed up. - assertEquals(1, scanMeta()); + assertEquals(1 + 2, scanMeta()); HTableDescriptor[] htbls = getTables(TEST_UTIL.getConfiguration()); LOG.info("Tables present after restart: " + Arrays.toString(htbls));