diff --git a/bin/hbase-daemon.sh b/bin/hbase-daemon.sh index e411dad..94ab2d1 100755 --- a/bin/hbase-daemon.sh +++ b/bin/hbase-daemon.sh @@ -205,15 +205,15 @@ case $startStop in # Add to the command log file vital stats on our environment. echo "`date` Starting $command on `hostname`" >> ${HBASE_LOGLOG} `ulimit -a` >> "$HBASE_LOGLOG" 2>&1 - # in case the parent shell gets the kill make sure to trap signals. - # Only one will get called. Either the trap or the flow will go through. - trap cleanAfterRun SIGHUP SIGINT SIGTERM EXIT nice -n $HBASE_NICENESS "$HBASE_HOME"/bin/hbase \ --config "${HBASE_CONF_DIR}" \ $command "$@" start >> ${HBASE_LOGOUT} 2>&1 & - hbase_pid=$! - echo $hbase_pid > ${HBASE_PID} - wait $hbase_pid + echo $! > ${HBASE_PID} + # in case the parent shell gets the kill make sure to trap signals. + # Only one will get called. Either the trap or the flow will go through. + trap cleanAfterRun SIGHUP SIGINT SIGTERM EXIT + wait + cleanAfterRun ;; (internal_autorestart) diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/HTableDescriptor.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/HTableDescriptor.java index 51352bb..cc91aed 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/HTableDescriptor.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/HTableDescriptor.java @@ -1332,17 +1332,6 @@ public class HTableDescriptor implements Comparable { .setBloomFilterType(BloomType.NONE) // Enable cache of data blocks in L1 if more than one caching tier deployed: // e.g. if using CombinedBlockCache (BucketCache). - .setCacheDataInL1(true), - new HColumnDescriptor(HConstants.TABLE_FAMILY) - // Ten is arbitrary number. Keep versions to help debugging. - .setMaxVersions(10) - .setInMemory(true) - .setBlocksize(8 * 1024) - .setScope(HConstants.REPLICATION_SCOPE_LOCAL) - // Disable blooms for meta. Needs work. Seems to mess w/ getClosestOrBefore. - .setBloomFilterType(BloomType.NONE) - // Enable cache of data blocks in L1 if more than one caching tier deployed: - // e.g. if using CombinedBlockCache (BucketCache). .setCacheDataInL1(true) }); diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/MetaTableAccessor.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/MetaTableAccessor.java index 2e6723a..8f3a20e 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/MetaTableAccessor.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/MetaTableAccessor.java @@ -17,23 +17,9 @@ */ package org.apache.hadoop.hbase; -import javax.annotation.Nullable; -import java.io.IOException; -import java.io.InterruptedIOException; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.LinkedHashMap; -import java.util.List; -import java.util.Map; -import java.util.NavigableMap; -import java.util.Set; -import java.util.SortedMap; -import java.util.TreeMap; -import java.util.regex.Matcher; -import java.util.regex.Pattern; - import com.google.common.annotations.VisibleForTesting; import com.google.protobuf.ServiceException; + import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; @@ -52,8 +38,6 @@ import org.apache.hadoop.hbase.client.Result; import org.apache.hadoop.hbase.client.ResultScanner; import org.apache.hadoop.hbase.client.Scan; import org.apache.hadoop.hbase.client.Table; -import org.apache.hadoop.hbase.client.TableState; -import org.apache.hadoop.hbase.exceptions.DeserializationException; import org.apache.hadoop.hbase.ipc.CoprocessorRpcChannel; import org.apache.hadoop.hbase.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.protobuf.generated.ClientProtos; @@ -64,6 +48,18 @@ import org.apache.hadoop.hbase.util.Pair; import org.apache.hadoop.hbase.util.PairOfSameType; import org.apache.hadoop.hbase.util.Threads; +import java.io.IOException; +import java.io.InterruptedIOException; +import java.util.ArrayList; +import java.util.List; +import java.util.Map; +import java.util.NavigableMap; +import java.util.Set; +import java.util.SortedMap; +import java.util.TreeMap; +import java.util.regex.Matcher; +import java.util.regex.Pattern; + /** * Read/write operations on region and assignment information store in * hbase:meta. @@ -82,11 +78,6 @@ public class MetaTableAccessor { * HRI defined which is called default replica. * * Meta layout (as of 0.98 + HBASE-10070) is like: - * - * For each table there is single row in column family 'table' formatted: - * including namespace and columns are: - * table: state => contains table state - * * For each table range, there is a single row, formatted like: * ,,,. This row corresponds to the regionName * of the default region replica. @@ -129,24 +120,6 @@ public class MetaTableAccessor { META_REGION_PREFIX, 0, len); } - - @InterfaceAudience.Private - public enum QueryType { - ALL(HConstants.TABLE_FAMILY, HConstants.CATALOG_FAMILY), - REGION(HConstants.CATALOG_FAMILY), - TABLE(HConstants.TABLE_FAMILY); - - private final byte[][] families; - - QueryType(byte[]... families) { - this.families = families; - } - - byte[][] getFamilies() { - return this.families; - } - } - /** The delimiter for meta columns for replicaIds > 0 */ protected static final char META_REPLICA_ID_DELIMITER = '_'; @@ -158,64 +131,40 @@ public class MetaTableAccessor { // Reading operations // //////////////////////// - /** - * Performs a full scan of hbase:meta for regions. - * @param connection connection we're using - * @param visitor Visitor invoked against each row in regions family. + /** + * Performs a full scan of a hbase:meta table. + * @return List of {@link org.apache.hadoop.hbase.client.Result} * @throws IOException */ - public static void fullScanRegions(Connection connection, - final Visitor visitor) - throws IOException { - fullScan(connection, visitor, null, QueryType.REGION); - } - - /** - * Performs a full scan of hbase:meta for regions. - * @param connection connection we're using - * @throws IOException - */ - public static List fullScanRegions(Connection connection) - throws IOException { - return fullScan(connection, QueryType.REGION); - } - - /** - * Performs a full scan of hbase:meta for tables. - * @param connection connection we're using - * @param visitor Visitor invoked against each row in tables family. - * @throws IOException - */ - public static void fullScanTables(Connection connection, - final Visitor visitor) - throws IOException { - fullScan(connection, visitor, null, QueryType.TABLE); + public static List fullScanOfMeta(Connection connection) + throws IOException { + CollectAllVisitor v = new CollectAllVisitor(); + fullScan(connection, v, null); + return v.getResults(); } /** * Performs a full scan of hbase:meta. * @param connection connection we're using * @param visitor Visitor invoked against each row. - * @param type scanned part of meta * @throws IOException */ public static void fullScan(Connection connection, - final Visitor visitor, QueryType type) + final Visitor visitor) throws IOException { - fullScan(connection, visitor, null, type); + fullScan(connection, visitor, null); } /** * Performs a full scan of hbase:meta. * @param connection connection we're using - * @param type scanned part of meta * @return List of {@link Result} * @throws IOException */ - public static List fullScan(Connection connection, QueryType type) + public static List fullScan(Connection connection) throws IOException { CollectAllVisitor v = new CollectAllVisitor(); - fullScan(connection, v, null, type); + fullScan(connection, v, null); return v.getResults(); } @@ -357,7 +306,6 @@ public class MetaTableAccessor { * @return null if it doesn't contain merge qualifier, else two merge regions * @throws IOException */ - @Nullable public static Pair getRegionsFromMergeQualifier( Connection connection, byte[] regionName) throws IOException { Result result = getRegionResult(connection, regionName); @@ -380,9 +328,42 @@ public class MetaTableAccessor { public static boolean tableExists(Connection connection, final TableName tableName) throws IOException { - // Catalog tables always exist. - return tableName.equals(TableName.META_TABLE_NAME) - || getTableState(connection, tableName) != null; + if (tableName.equals(TableName.META_TABLE_NAME)) { + // Catalog tables always exist. + return true; + } + // Make a version of ResultCollectingVisitor that only collects the first + CollectingVisitor visitor = new CollectingVisitor() { + private HRegionInfo current = null; + + @Override + public boolean visit(Result r) throws IOException { + RegionLocations locations = getRegionLocations(r); + if (locations == null || locations.getRegionLocation().getRegionInfo() == null) { + LOG.warn("No serialized HRegionInfo in " + r); + return true; + } + this.current = locations.getRegionLocation().getRegionInfo(); + if (this.current == null) { + LOG.warn("No serialized HRegionInfo in " + r); + return true; + } + if (!isInsideTable(this.current, tableName)) return false; + // Else call super and add this Result to the collection. + super.visit(r); + // Stop collecting regions from table after we get one. + return false; + } + + @Override + void add(Result r) { + // Add the current HRI. + this.results.add(this.current); + } + }; + fullScan(connection, visitor, getTableStartRowForMeta(tableName)); + // If visitor has results >= 1 then table exists. + return visitor.getResults().size() >= 1; } /** @@ -419,7 +400,6 @@ public class MetaTableAccessor { return getListOfHRegionInfos(result); } - @Nullable static List getListOfHRegionInfos(final List> pairs) { if (pairs == null || pairs.isEmpty()) return null; List result = new ArrayList(pairs.size()); @@ -490,7 +470,6 @@ public class MetaTableAccessor { * Do not use this method to get meta table regions, use methods in MetaTableLocator instead. * @param connection connection we're using * @param tableName table to work with - * @param excludeOfflinedSplitParents don't return split parents * @return Return list of regioninfos and server addresses. * @throws IOException */ @@ -533,7 +512,7 @@ public class MetaTableAccessor { } } }; - fullScan(connection, visitor, getTableStartRowForMeta(tableName), QueryType.REGION); + fullScan(connection, visitor, getTableStartRowForMeta(tableName)); return visitor.getResults(); } @@ -565,7 +544,7 @@ public class MetaTableAccessor { } } }; - fullScan(connection, v, QueryType.REGION); + fullScan(connection, v); return hris; } @@ -576,22 +555,17 @@ public class MetaTableAccessor { public boolean visit(Result r) throws IOException { if (r == null || r.isEmpty()) return true; LOG.info("fullScanMetaAndPrint.Current Meta Row: " + r); - TableState state = getTableState(r); - if (state != null) { - LOG.info("Table State: " + state); - } else { - RegionLocations locations = getRegionLocations(r); - if (locations == null) return true; - for (HRegionLocation loc : locations.getRegionLocations()) { - if (loc != null) { - LOG.info("fullScanMetaAndPrint.HRI Print= " + loc.getRegionInfo()); - } + RegionLocations locations = getRegionLocations(r); + if (locations == null) return true; + for (HRegionLocation loc : locations.getRegionLocations()) { + if (loc != null) { + LOG.info("fullScanMetaAndPrint.HRI Print= " + loc.getRegionInfo()); } } return true; } }; - fullScan(connection, v, QueryType.ALL); + fullScan(connection, v); } /** @@ -600,40 +574,20 @@ public class MetaTableAccessor { * @param visitor Visitor invoked against each row. * @param startrow Where to start the scan. Pass null if want to begin scan * at first row. - * @param type scanned part of meta * hbase:meta, the default (pass false to scan hbase:meta) * @throws IOException */ public static void fullScan(Connection connection, - final Visitor visitor, @Nullable final byte[] startrow, QueryType type) throws IOException { - fullScan(connection, visitor, startrow, type, false); - } - - /** - * Performs a full scan of a catalog table. - * @param connection connection we're using - * @param visitor Visitor invoked against each row. - * @param startrow Where to start the scan. Pass null if want to begin scan - * at first row. - * @param type scanned part of meta - * @param raw read raw data including Delete tumbstones - * hbase:meta, the default (pass false to scan hbase:meta) - * @throws IOException - */ - public static void fullScan(Connection connection, - final Visitor visitor, @Nullable final byte[] startrow, QueryType type, boolean raw) + final Visitor visitor, final byte [] startrow) throws IOException { Scan scan = new Scan(); - scan.setRaw(raw); if (startrow != null) scan.setStartRow(startrow); if (startrow == null) { int caching = connection.getConfiguration() .getInt(HConstants.HBASE_META_SCANNER_CACHING, 100); scan.setCaching(caching); } - for (byte[] family : type.getFamilies()) { - scan.addFamily(family); - } + scan.addFamily(HConstants.CATALOG_FAMILY); Table metaTable = getMetaHTable(connection); ResultScanner scanner = null; try { @@ -654,19 +608,11 @@ public class MetaTableAccessor { * Returns the column family used for meta columns. * @return HConstants.CATALOG_FAMILY. */ - protected static byte[] getCatalogFamily() { + protected static byte[] getFamily() { return HConstants.CATALOG_FAMILY; } /** - * Returns the column family used for table columns. - * @return HConstants.TABLE_FAMILY. - */ - protected static byte[] getTableFamily() { - return HConstants.TABLE_FAMILY; - } - - /** * Returns the column qualifier for serialized region info * @return HConstants.REGIONINFO_QUALIFIER */ @@ -675,15 +621,6 @@ public class MetaTableAccessor { } /** - * Returns the column qualifier for serialized table state - * - * @return HConstants.TABLE_STATE_QUALIFIER - */ - protected static byte[] getStateColumn() { - return HConstants.TABLE_STATE_QUALIFIER; - } - - /** * Returns the column qualifier for server column for replicaId * @param replicaId the replicaId of the region * @return a byte[] for server column qualifier @@ -749,15 +686,14 @@ public class MetaTableAccessor { * @param r Result to pull from * @return A ServerName instance or null if necessary fields not found or empty. */ - @Nullable private static ServerName getServerName(final Result r, final int replicaId) { byte[] serverColumn = getServerColumn(replicaId); - Cell cell = r.getColumnLatestCell(getCatalogFamily(), serverColumn); + Cell cell = r.getColumnLatestCell(getFamily(), serverColumn); if (cell == null || cell.getValueLength() == 0) return null; String hostAndPort = Bytes.toString( cell.getValueArray(), cell.getValueOffset(), cell.getValueLength()); byte[] startcodeColumn = getStartCodeColumn(replicaId); - cell = r.getColumnLatestCell(getCatalogFamily(), startcodeColumn); + cell = r.getColumnLatestCell(getFamily(), startcodeColumn); if (cell == null || cell.getValueLength() == 0) return null; return ServerName.valueOf(hostAndPort, Bytes.toLong(cell.getValueArray(), cell.getValueOffset(), cell.getValueLength())); @@ -770,7 +706,7 @@ public class MetaTableAccessor { * @return SeqNum, or HConstants.NO_SEQNUM if there's no value written. */ private static long getSeqNumDuringOpen(final Result r, final int replicaId) { - Cell cell = r.getColumnLatestCell(getCatalogFamily(), getSeqNumColumn(replicaId)); + Cell cell = r.getColumnLatestCell(getFamily(), getSeqNumColumn(replicaId)); if (cell == null || cell.getValueLength() == 0) return HConstants.NO_SEQNUM; return Bytes.toLong(cell.getValueArray(), cell.getValueOffset(), cell.getValueLength()); } @@ -780,7 +716,6 @@ public class MetaTableAccessor { * @return an HRegionLocationList containing all locations for the region range or null if * we can't deserialize the result. */ - @Nullable public static RegionLocations getRegionLocations(final Result r) { if (r == null) return null; HRegionInfo regionInfo = getHRegionInfo(r, getRegionInfoColumn()); @@ -791,7 +726,7 @@ public class MetaTableAccessor { locations.add(getRegionLocation(r, regionInfo, 0)); - NavigableMap infoMap = familyMap.get(getCatalogFamily()); + NavigableMap infoMap = familyMap.get(getFamily()); if (infoMap == null) return new RegionLocations(locations); // iterate until all serverName columns are seen @@ -853,9 +788,8 @@ public class MetaTableAccessor { * @param qualifier Column family qualifier * @return An HRegionInfo instance or null. */ - @Nullable private static HRegionInfo getHRegionInfo(final Result r, byte [] qualifier) { - Cell cell = r.getColumnLatestCell(getCatalogFamily(), qualifier); + Cell cell = r.getColumnLatestCell(getFamily(), qualifier); if (cell == null) return null; return HRegionInfo.parseFromOrNull(cell.getValueArray(), cell.getValueOffset(), cell.getValueLength()); @@ -890,80 +824,6 @@ public class MetaTableAccessor { } /** - * Fetch table state for given table from META table - * @param conn connection to use - * @param tableName table to fetch state for - * @return state - * @throws IOException - */ - @Nullable - public static TableState getTableState(Connection conn, TableName tableName) - throws IOException { - Table metaHTable = getMetaHTable(conn); - Get get = new Get(tableName.getName()).addColumn(getTableFamily(), getStateColumn()); - long time = EnvironmentEdgeManager.currentTime(); - get.setTimeRange(0, time); - Result result = - metaHTable.get(get); - return getTableState(result); - } - - /** - * Fetch table states from META table - * @param conn connection to use - * @return map {tableName -> state} - * @throws IOException - */ - public static Map getTableStates(Connection conn) - throws IOException { - final Map states = new LinkedHashMap<>(); - Visitor collector = new Visitor() { - @Override - public boolean visit(Result r) throws IOException { - TableState state = getTableState(r); - if (state != null) - states.put(state.getTableName(), state); - return true; - } - }; - fullScanTables(conn, collector); - return states; - } - - /** - * Updates state in META - * @param conn connection to use - * @param tableName table to look for - * @throws IOException - */ - public static void updateTableState(Connection conn, TableName tableName, - TableState.State actual) throws IOException { - updateTableState(conn, new TableState(tableName, actual)); - } - - /** - * Decode table state from META Result. - * Should contain cell from HConstants.TABLE_FAMILY - * @param r result - * @return null if not found - * @throws IOException - */ - @Nullable - public static TableState getTableState(Result r) - throws IOException { - Cell cell = r.getColumnLatestCell(getTableFamily(), getStateColumn()); - if (cell == null) return null; - try { - return TableState.parseFrom(TableName.valueOf(r.getRow()), - Arrays.copyOfRange(cell.getValueArray(), - cell.getValueOffset(), cell.getValueOffset() + cell.getValueLength())); - } catch (DeserializationException e) { - throw new IOException(e); - } - - } - - /** * Implementations 'visit' a catalog table row. */ public interface Visitor { @@ -1060,8 +920,7 @@ public class MetaTableAccessor { */ public static Put makePutFromRegionInfo(HRegionInfo regionInfo) throws IOException { - long now = EnvironmentEdgeManager.currentTime(); - Put put = new Put(regionInfo.getRegionName(), now); + Put put = new Put(regionInfo.getRegionName()); addRegionInfo(put, regionInfo); return put; } @@ -1074,9 +933,7 @@ public class MetaTableAccessor { if (regionInfo == null) { throw new IllegalArgumentException("Can't make a delete for null region"); } - long now = EnvironmentEdgeManager.currentTime(); Delete delete = new Delete(regionInfo.getRegionName()); - delete.addFamily(getCatalogFamily(), now); return delete; } @@ -1177,15 +1034,14 @@ public class MetaTableAccessor { throws IOException { int absoluteIndex = replicaIndexToDeleteFrom + numReplicasToRemove; for (byte[] row : metaRows) { - long now = EnvironmentEdgeManager.currentTime(); Delete deleteReplicaLocations = new Delete(row); for (int i = replicaIndexToDeleteFrom; i < absoluteIndex; i++) { - deleteReplicaLocations.addColumns(getCatalogFamily(), - getServerColumn(i), now); - deleteReplicaLocations.addColumns(getCatalogFamily(), - getSeqNumColumn(i), now); - deleteReplicaLocations.addColumns(getCatalogFamily(), - getStartCodeColumn(i), now); + deleteReplicaLocations.deleteColumns(HConstants.CATALOG_FAMILY, + getServerColumn(i)); + deleteReplicaLocations.deleteColumns(HConstants.CATALOG_FAMILY, + getSeqNumColumn(i)); + deleteReplicaLocations.deleteColumns(HConstants.CATALOG_FAMILY, + getStartCodeColumn(i)); } deleteFromMetaTable(connection, deleteReplicaLocations); } @@ -1315,8 +1171,7 @@ public class MetaTableAccessor { public static void addDaughter(final Connection connection, final HRegionInfo regionInfo, final ServerName sn, final long openSeqNum) throws NotAllMetaRegionsOnlineException, IOException { - long now = EnvironmentEdgeManager.currentTime(); - Put put = new Put(regionInfo.getRegionName(), now); + Put put = new Put(regionInfo.getRegionName()); addRegionInfo(put, regionInfo); if (sn != null) { addLocation(put, sn, openSeqNum, regionInfo.getReplicaId()); @@ -1418,45 +1273,6 @@ public class MetaTableAccessor { } /** - * Update state of the table in meta. - * @param connection what we use for update - * @param state new state - * @throws IOException - */ - public static void updateTableState(Connection connection, TableState state) - throws IOException { - Put put = makePutFromTableState(state); - putToMetaTable(connection, put); - LOG.info( - "Updated table " + state.getTableName() + " state to " + state.getState() + " in META"); - } - - /** - * Construct PUT for given state - * @param state new state - */ - public static Put makePutFromTableState(TableState state) { - long time = EnvironmentEdgeManager.currentTime(); - Put put = new Put(state.getTableName().getName(), time); - put.add(getTableFamily(), getStateColumn(), state.convert().toByteArray()); - return put; - } - - /** - * Remove state for table from meta - * @param connection to use for deletion - * @param table to delete state for - */ - public static void deleteTableState(Connection connection, TableName table) - throws IOException { - long time = EnvironmentEdgeManager.currentTime(); - Delete delete = new Delete(table.getName()); - delete.addColumns(getTableFamily(), getStateColumn(), time); - deleteFromMetaTable(connection, delete); - LOG.info("Deleted table " + table + " state from META"); - } - - /** * Performs an atomic multi-Mutate operation against the given table. */ private static void multiMutate(Table table, byte[] row, Mutation... mutations) @@ -1521,8 +1337,7 @@ public class MetaTableAccessor { HRegionInfo regionInfo, ServerName sn, long openSeqNum) throws IOException { // region replicas are kept in the primary region's row - long time = EnvironmentEdgeManager.currentTime(); - Put put = new Put(getMetaKeyForRegion(regionInfo), time); + Put put = new Put(getMetaKeyForRegion(regionInfo)); addLocation(put, sn, openSeqNum, regionInfo.getReplicaId()); putToMetaTable(connection, put); LOG.info("Updated row " + regionInfo.getRegionNameAsString() + @@ -1538,9 +1353,7 @@ public class MetaTableAccessor { public static void deleteRegion(Connection connection, HRegionInfo regionInfo) throws IOException { - long time = EnvironmentEdgeManager.currentTime(); Delete delete = new Delete(regionInfo.getRegionName()); - delete.addFamily(getCatalogFamily(), time); deleteFromMetaTable(connection, delete); LOG.info("Deleted " + regionInfo.getRegionNameAsString()); } @@ -1554,11 +1367,8 @@ public class MetaTableAccessor { public static void deleteRegions(Connection connection, List regionsInfo) throws IOException { List deletes = new ArrayList(regionsInfo.size()); - long time = EnvironmentEdgeManager.currentTime(); for (HRegionInfo hri: regionsInfo) { - Delete e = new Delete(hri.getRegionName()); - e.addFamily(getCatalogFamily(), time); - deletes.add(e); + deletes.add(new Delete(hri.getRegionName())); } deleteFromMetaTable(connection, deletes); LOG.info("Deleted " + regionsInfo); @@ -1578,7 +1388,7 @@ public class MetaTableAccessor { List mutation = new ArrayList(); if (regionsToRemove != null) { for (HRegionInfo hri: regionsToRemove) { - mutation.add(makeDeleteFromRegionInfo(hri)); + mutation.add(new Delete(hri.getRegionName())); } } if (regionsToAdd != null) { @@ -1621,10 +1431,9 @@ public class MetaTableAccessor { */ public static void deleteMergeQualifiers(Connection connection, final HRegionInfo mergedRegion) throws IOException { - long time = EnvironmentEdgeManager.currentTime(); Delete delete = new Delete(mergedRegion.getRegionName()); - delete.addColumns(getCatalogFamily(), HConstants.MERGEA_QUALIFIER, time); - delete.addColumns(getCatalogFamily(), HConstants.MERGEB_QUALIFIER, time); + delete.deleteColumns(HConstants.CATALOG_FAMILY, HConstants.MERGEA_QUALIFIER); + delete.deleteColumns(HConstants.CATALOG_FAMILY, HConstants.MERGEB_QUALIFIER); deleteFromMetaTable(connection, delete); LOG.info("Deleted references in merged region " + mergedRegion.getRegionNameAsString() + ", qualifier=" @@ -1634,7 +1443,7 @@ public class MetaTableAccessor { private static Put addRegionInfo(final Put p, final HRegionInfo hri) throws IOException { - p.addImmutable(getCatalogFamily(), HConstants.REGIONINFO_QUALIFIER, + p.addImmutable(HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER, hri.toByteArray()); return p; } @@ -1643,20 +1452,20 @@ public class MetaTableAccessor { // using regionserver's local time as the timestamp of Put. // See: HBASE-11536 long now = EnvironmentEdgeManager.currentTime(); - p.addImmutable(getCatalogFamily(), getServerColumn(replicaId), now, + p.addImmutable(HConstants.CATALOG_FAMILY, getServerColumn(replicaId), now, Bytes.toBytes(sn.getHostAndPort())); - p.addImmutable(getCatalogFamily(), getStartCodeColumn(replicaId), now, + p.addImmutable(HConstants.CATALOG_FAMILY, getStartCodeColumn(replicaId), now, Bytes.toBytes(sn.getStartcode())); - p.addImmutable(getCatalogFamily(), getSeqNumColumn(replicaId), now, + p.addImmutable(HConstants.CATALOG_FAMILY, getSeqNumColumn(replicaId), now, Bytes.toBytes(openSeqNum)); return p; } public static Put addEmptyLocation(final Put p, int replicaId) { long now = EnvironmentEdgeManager.currentTime(); - p.addImmutable(getCatalogFamily(), getServerColumn(replicaId), now, null); - p.addImmutable(getCatalogFamily(), getStartCodeColumn(replicaId), now, null); - p.addImmutable(getCatalogFamily(), getSeqNumColumn(replicaId), now, null); + p.addImmutable(HConstants.CATALOG_FAMILY, getServerColumn(replicaId), now, null); + p.addImmutable(HConstants.CATALOG_FAMILY, getStartCodeColumn(replicaId), now, null); + p.addImmutable(HConstants.CATALOG_FAMILY, getSeqNumColumn(replicaId), now, null); return p; } } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionCallable.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionCallable.java deleted file mode 100644 index 3f44927..0000000 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionCallable.java +++ /dev/null @@ -1,56 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hbase.client; - -import java.io.Closeable; -import java.io.IOException; - -/** - * A RetryingCallable for generic connection operations. - * @param return type - */ -abstract class ConnectionCallable implements RetryingCallable, Closeable { - protected Connection connection; - - public ConnectionCallable(final Connection connection) { - this.connection = connection; - } - - @Override - public void prepare(boolean reload) throws IOException { - } - - @Override - public void close() throws IOException { - } - - @Override - public void throwable(Throwable t, boolean retrying) { - } - - @Override - public String getExceptionMessageAdditionalDetail() { - return ""; - } - - @Override - public long sleep(long pause, int tries) { - return ConnectionUtils.getPauseTime(pause, tries); - } -} \ No newline at end of file diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionManager.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionManager.java index e986156..dbd555c 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionManager.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionManager.java @@ -18,7 +18,6 @@ */ package org.apache.hadoop.hbase.client; -import javax.annotation.Nullable; import java.io.Closeable; import java.io.IOException; import java.io.InterruptedIOException; @@ -38,12 +37,9 @@ import java.util.concurrent.ExecutorService; import java.util.concurrent.LinkedBlockingQueue; import java.util.concurrent.ThreadPoolExecutor; import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicInteger; -import com.google.common.annotations.VisibleForTesting; -import com.google.protobuf.BlockingRpcChannel; -import com.google.protobuf.RpcController; -import com.google.protobuf.ServiceException; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; @@ -64,6 +60,8 @@ import org.apache.hadoop.hbase.TableNotFoundException; import org.apache.hadoop.hbase.ZooKeeperConnectionException; import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.client.AsyncProcess.AsyncRequestFuture; +import org.apache.hadoop.hbase.client.MetaScanner.MetaScannerVisitor; +import org.apache.hadoop.hbase.client.MetaScanner.MetaScannerVisitorBase; import org.apache.hadoop.hbase.client.backoff.ClientBackoffPolicy; import org.apache.hadoop.hbase.client.backoff.ClientBackoffPolicyFactory; import org.apache.hadoop.hbase.client.coprocessor.Batch; @@ -174,7 +172,6 @@ import org.apache.hadoop.hbase.security.UserProvider; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.hadoop.hbase.util.ExceptionUtil; -import org.apache.hadoop.hbase.util.Pair; import org.apache.hadoop.hbase.util.Threads; import org.apache.hadoop.hbase.zookeeper.MasterAddressTracker; import org.apache.hadoop.hbase.zookeeper.ZKUtil; @@ -182,6 +179,11 @@ import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher; import org.apache.hadoop.ipc.RemoteException; import org.apache.zookeeper.KeeperException; +import com.google.common.annotations.VisibleForTesting; +import com.google.protobuf.BlockingRpcChannel; +import com.google.protobuf.RpcController; +import com.google.protobuf.ServiceException; + /** * An internal, non-instantiable class that manages creation of {@link HConnection}s. */ @@ -927,7 +929,30 @@ ˙ */ @Override public boolean isTableAvailable(final TableName tableName) throws IOException { - return isTableAvailable(tableName, null); + final AtomicBoolean available = new AtomicBoolean(true); + final AtomicInteger regionCount = new AtomicInteger(0); + MetaScannerVisitor visitor = new MetaScannerVisitorBase() { + @Override + public boolean processRow(Result row) throws IOException { + HRegionInfo info = MetaScanner.getHRegionInfo(row); + if (info != null && !info.isSplitParent()) { + if (tableName.equals(info.getTable())) { + ServerName server = HRegionInfo.getServerName(row); + if (server == null) { + available.set(false); + return false; + } + regionCount.incrementAndGet(); + } else if (tableName.compareTo(info.getTable()) < 0) { + // Return if we are done with the current table + return false; + } + } + return true; + } + }; + MetaScanner.metaScan(this, visitor, tableName); + return available.get() && (regionCount.get() > 0); } @Override @@ -936,61 +961,44 @@ ˙ */ } @Override - public boolean isTableAvailable(final TableName tableName, @Nullable final byte[][] splitKeys) + public boolean isTableAvailable(final TableName tableName, final byte[][] splitKeys) throws IOException { - try { - if (!isTableEnabled(tableName)) { - LOG.debug("Table " + tableName + " not enabled"); - return false; - } - ClusterConnection connection = getConnectionInternal(getConfiguration()); - List> locations = MetaTableAccessor - .getTableRegionsAndLocations(connection, tableName, true); - int notDeployed = 0; - int regionCount = 0; - for (Pair pair : locations) { - HRegionInfo info = pair.getFirst(); - if (pair.getSecond() == null) { - if (LOG.isDebugEnabled()) { - LOG.debug("Table " + tableName + " has not deployed region " + pair.getFirst() - .getEncodedName()); - } - notDeployed++; - } else if (splitKeys != null - && !Bytes.equals(info.getStartKey(), HConstants.EMPTY_BYTE_ARRAY)) { - for (byte[] splitKey : splitKeys) { - // Just check if the splitkey is available - if (Bytes.equals(info.getStartKey(), splitKey)) { - regionCount++; - break; + final AtomicBoolean available = new AtomicBoolean(true); + final AtomicInteger regionCount = new AtomicInteger(0); + MetaScannerVisitor visitor = new MetaScannerVisitorBase() { + @Override + public boolean processRow(Result row) throws IOException { + HRegionInfo info = MetaScanner.getHRegionInfo(row); + if (info != null && !info.isSplitParent()) { + if (tableName.equals(info.getTable())) { + ServerName server = HRegionInfo.getServerName(row); + if (server == null) { + available.set(false); + return false; + } + if (!Bytes.equals(info.getStartKey(), HConstants.EMPTY_BYTE_ARRAY)) { + for (byte[] splitKey : splitKeys) { + // Just check if the splitkey is available + if (Bytes.equals(info.getStartKey(), splitKey)) { + regionCount.incrementAndGet(); + break; + } + } + } else { + // Always empty start row should be counted + regionCount.incrementAndGet(); } + } else if (tableName.compareTo(info.getTable()) < 0) { + // Return if we are done with the current table + return false; } - } else { - // Always empty start row should be counted - regionCount++; - } - } - if (notDeployed > 0) { - if (LOG.isDebugEnabled()) { - LOG.debug("Table " + tableName + " has " + notDeployed + " regions"); - } - return false; - } else if (splitKeys != null && regionCount != splitKeys.length + 1) { - if (LOG.isDebugEnabled()) { - LOG.debug("Table " + tableName + " expected to have " + (splitKeys.length + 1) - + " regions, but only " + regionCount + " available"); - } - return false; - } else { - if (LOG.isDebugEnabled()) { - LOG.debug("Table " + tableName + " should be available"); } return true; } - } catch (TableNotFoundException tnfe) { - LOG.warn("Table " + tableName + " not enabled, it is not exists"); - return false; - } + }; + MetaScanner.metaScan(this, visitor, tableName); + // +1 needs to be added so that the empty start row is also taken into account + return available.get() && (regionCount.get() == splitKeys.length + 1); } @Override @@ -2477,7 +2485,7 @@ ˙ */ GetTableDescriptorsResponse htds; try { GetTableDescriptorsRequest req = - RequestConverter.buildGetTableDescriptorsRequest(tableName); + RequestConverter.buildGetTableDescriptorsRequest(tableName); htds = master.getTableDescriptors(null, req); } catch (ServiceException se) { throw ProtobufUtil.getRemoteException(se); @@ -2502,11 +2510,16 @@ ˙ */ @Override public TableState getTableState(TableName tableName) throws IOException { - ClusterConnection conn = getConnectionInternal(getConfiguration()); - TableState tableState = MetaTableAccessor.getTableState(conn, tableName); - if (tableState == null) - throw new TableNotFoundException(tableName); - return tableState; + MasterKeepAliveConnection master = getKeepAliveMasterService(); + try { + GetTableStateResponse resp = master.getTableState(null, + RequestConverter.buildGetTableStateRequest(tableName)); + return TableState.convert(resp.getTableState()); + } catch (ServiceException se) { + throw ProtobufUtil.getRemoteException(se); + } finally { + master.close(); + } } @Override diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java index 3acaaf9..d14e369 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java @@ -18,7 +18,6 @@ */ package org.apache.hadoop.hbase.client; -import java.io.Closeable; import java.io.IOException; import java.io.InterruptedIOException; import java.net.SocketTimeoutException; @@ -287,12 +286,7 @@ public class HBaseAdmin implements Admin { */ @Override public boolean tableExists(final TableName tableName) throws IOException { - return executeCallable(new ConnectionCallable(getConnection()) { - @Override - public Boolean call(int callTimeout) throws ServiceException, IOException { - return MetaTableAccessor.tableExists(connection, tableName); - } - }); + return MetaTableAccessor.tableExists(connection, tableName); } public boolean tableExists(final byte[] tableName) @@ -553,11 +547,11 @@ public class HBaseAdmin implements Admin { } int numRegs = (splitKeys == null ? 1 : splitKeys.length + 1) * desc.getRegionReplication(); int prevRegCount = 0; - boolean tableWasEnabled = false; + boolean doneWithMetaScan = false; for (int tries = 0; tries < this.numRetries * this.retryLongerMultiplier; ++tries) { - if (tableWasEnabled) { - // Wait all table regions comes online + if (!doneWithMetaScan) { + // Wait for new table to come on-line final AtomicInteger actualRegCount = new AtomicInteger(0); MetaScannerVisitor visitor = new MetaScannerVisitorBase() { @Override @@ -605,26 +599,17 @@ public class HBaseAdmin implements Admin { tries = -1; } } else { - return; + doneWithMetaScan = true; + tries = -1; } + } else if (isTableEnabled(desc.getTableName())) { + return; } else { - try { - tableWasEnabled = isTableAvailable(desc.getTableName()); - } catch (TableNotFoundException tnfe) { - LOG.debug( - "Table " + desc.getTableName() + " was not enabled, sleeping, still " + numRetries - + " retries left"); - } - if (tableWasEnabled) { - // no we will scan meta to ensure all regions are online - tries = -1; - } else { - try { // Sleep - Thread.sleep(getPauseTime(tries)); - } catch (InterruptedException e) { - throw new InterruptedIOException("Interrupted when waiting" + - " for table to be enabled; meta scan was done"); - } + try { // Sleep + Thread.sleep(getPauseTime(tries)); + } catch (InterruptedException e) { + throw new InterruptedIOException("Interrupted when waiting" + + " for table to be enabled; meta scan was done"); } } } @@ -713,11 +698,24 @@ public class HBaseAdmin implements Admin { }); int failures = 0; + // Wait until all regions deleted for (int tries = 0; tries < (this.numRetries * this.retryLongerMultiplier); tries++) { try { - tableExists = tableExists(tableName); - if (!tableExists) - break; + // Find whether all regions are deleted. + List regionLations = + MetaScanner.listTableRegionLocations(conf, connection, tableName); + + // let us wait until hbase:meta table is updated and + // HMaster removes the table from its HTableDescriptors + if (regionLations == null || regionLations.size() == 0) { + HTableDescriptor htd = getTableDescriptorByTableName(tableName); + + if (htd == null) { + // table could not be found in master - we are done. + tableExists = false; + break; + } + } } catch (IOException ex) { failures++; if(failures >= numRetries - 1) { // no more tries left @@ -1111,17 +1109,9 @@ public class HBaseAdmin implements Admin { * @throws IOException if a remote or network exception occurs */ @Override - public boolean isTableEnabled(final TableName tableName) throws IOException { + public boolean isTableEnabled(TableName tableName) throws IOException { checkTableExistence(tableName); - return executeCallable(new ConnectionCallable(getConnection()) { - @Override - public Boolean call(int callTimeout) throws ServiceException, IOException { - TableState tableState = MetaTableAccessor.getTableState(connection, tableName); - if (tableState == null) - throw new TableNotFoundException(tableName); - return tableState.inStates(TableState.State.ENABLED); - } - }); + return connection.isTableEnabled(tableName); } public boolean isTableEnabled(byte[] tableName) throws IOException { @@ -2306,15 +2296,10 @@ public class HBaseAdmin implements Admin { */ private TableName checkTableExists(final TableName tableName) throws IOException { - return executeCallable(new ConnectionCallable(getConnection()) { - @Override - public TableName call(int callTimeout) throws ServiceException, IOException { - if (!MetaTableAccessor.tableExists(connection, tableName)) { - throw new TableNotFoundException(tableName); - } - return tableName; - } - }); + if (!MetaTableAccessor.tableExists(connection, tableName)) { + throw new TableNotFoundException(tableName); + } + return tableName; } /** @@ -3682,8 +3667,7 @@ public class HBaseAdmin implements Admin { return QuotaRetriever.open(conf, filter); } - private & Closeable, V> V executeCallable(C callable) - throws IOException { + private V executeCallable(MasterCallable callable) throws IOException { RpcRetryingCaller caller = rpcCallerFactory.newCaller(); try { return caller.callWithRetries(callable, operationTimeout); diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/TableState.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/TableState.java index 77c90f5..be9b80c 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/TableState.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/TableState.java @@ -17,11 +17,9 @@ */ package org.apache.hadoop.hbase.client; -import com.google.protobuf.InvalidProtocolBufferException; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.classification.InterfaceStability; -import org.apache.hadoop.hbase.exceptions.DeserializationException; import org.apache.hadoop.hbase.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos; @@ -173,30 +171,20 @@ public class TableState { public HBaseProtos.TableState convert() { return HBaseProtos.TableState.newBuilder() .setState(this.state.convert()) - .setTable(ProtobufUtil.toProtoTableName(this.tableName)) // set for backward compatibility + .setTable(ProtobufUtil.toProtoTableName(this.tableName)) .setTimestamp(this.timestamp) .build(); } /** * Covert from PB version of TableState - * - * @param tableName table this state of * @param tableState convert from * @return POJO */ - public static TableState convert(TableName tableName, HBaseProtos.TableState tableState) { + public static TableState convert(HBaseProtos.TableState tableState) { TableState.State state = State.convert(tableState.getState()); - return new TableState(tableName, state, tableState.getTimestamp()); - } - - public static TableState parseFrom(TableName tableName, byte[] bytes) - throws DeserializationException { - try { - return convert(tableName, HBaseProtos.TableState.parseFrom(bytes)); - } catch (InvalidProtocolBufferException e) { - throw new DeserializationException(e); - } + return new TableState(ProtobufUtil.toTableName(tableState.getTable()), + state, tableState.getTimestamp()); } /** @@ -212,36 +200,4 @@ public class TableState { } return false; } - - @Override - public boolean equals(Object o) { - if (this == o) return true; - if (o == null || getClass() != o.getClass()) return false; - - TableState that = (TableState) o; - - if (timestamp != that.timestamp) return false; - if (state != that.state) return false; - if (tableName != null ? !tableName.equals(that.tableName) : that.tableName != null) - return false; - - return true; - } - - @Override - public int hashCode() { - int result = (int) (timestamp ^ (timestamp >>> 32)); - result = 31 * result + (tableName != null ? tableName.hashCode() : 0); - result = 31 * result + (state != null ? state.hashCode() : 0); - return result; - } - - @Override - public String toString() { - return "TableState{" + - "timestamp=" + timestamp + - ", tableName=" + tableName + - ", state=" + state + - '}'; - } } diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/CellComparator.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/CellComparator.java index cbb7ff3..d760aa2 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/CellComparator.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/CellComparator.java @@ -178,19 +178,11 @@ public class CellComparator implements Comparator, Serializable { return compareWithoutRow(left, right); } - /** - * Do not use comparing rows from hbase:meta. Meta table Cells have schema (table,startrow,hash) - * so can't be treated as plain byte arrays as this method does. - */ public static int compareRows(final Cell left, final Cell right) { return Bytes.compareTo(left.getRowArray(), left.getRowOffset(), left.getRowLength(), right.getRowArray(), right.getRowOffset(), right.getRowLength()); } - /** - * Do not use comparing rows from hbase:meta. Meta table Cells have schema (table,startrow,hash) - * so can't be treated as plain byte arrays as this method does. - */ public static int compareRows(byte[] left, int loffset, int llength, byte[] right, int roffset, int rlength) { return Bytes.compareTo(left, loffset, llength, right, roffset, rlength); @@ -383,16 +375,14 @@ public class CellComparator implements Comparator, Serializable { /** * Try to return a Cell that falls between left and right but that is - * shorter; i.e. takes up less space. This trick is used building HFile block index. + * shorter; i.e. takes up less space. This is trick is used building HFile block index. * Its an optimization. It does not always work. In this case we'll just return the * right cell. - * @param comparator Comparator to use. * @param left * @param right * @return A cell that sorts between left and right. */ - public static Cell getMidpoint(final KeyValue.KVComparator comparator, final Cell left, - final Cell right) { + public static Cell getMidpoint(final Cell left, final Cell right) { // TODO: Redo so only a single pass over the arrays rather than one to compare and then a // second composing midpoint. if (right == null) { @@ -401,12 +391,6 @@ public class CellComparator implements Comparator, Serializable { if (left == null) { return right; } - // If Cells from meta table, don't mess around. meta table Cells have schema - // (table,startrow,hash) so can't be treated as plain byte arrays. Just skip out without - // trying to do this optimization. - if (comparator != null && comparator instanceof KeyValue.MetaComparator) { - return right; - } int diff = compareRows(left, right); if (diff > 0) { throw new IllegalArgumentException("Left row sorts after right row; left=" + diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java index 8a07397..2ee55f7 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java @@ -19,6 +19,7 @@ package org.apache.hadoop.hbase; import static org.apache.hadoop.hbase.io.hfile.BlockType.MAGIC_LENGTH; +import java.nio.ByteBuffer; import java.nio.charset.Charset; import java.util.Arrays; import java.util.Collections; @@ -450,16 +451,6 @@ public final class HConstants { /** The upper-half merge region column qualifier */ public static final byte[] MERGEB_QUALIFIER = Bytes.toBytes("mergeB"); - /** The catalog family as a string*/ - public static final String TABLE_FAMILY_STR = "table"; - - /** The catalog family */ - public static final byte [] TABLE_FAMILY = Bytes.toBytes(TABLE_FAMILY_STR); - - /** The serialized table state qualifier */ - public static final byte[] TABLE_STATE_QUALIFIER = Bytes.toBytes("state"); - - /** * The meta table version column qualifier. * We keep current version of the meta table in this column in -ROOT- @@ -747,8 +738,7 @@ public final class HConstants { /** * The client scanner timeout period in milliseconds. */ - public static final String HBASE_CLIENT_SCANNER_TIMEOUT_PERIOD = - "hbase.client.scanner.timeout.period"; + public static final String HBASE_CLIENT_SCANNER_TIMEOUT_PERIOD = "hbase.client.scanner.timeout.period"; /** * Use {@link #HBASE_CLIENT_SCANNER_TIMEOUT_PERIOD} instead. diff --git a/hbase-common/src/test/java/org/apache/hadoop/hbase/TestCellComparator.java b/hbase-common/src/test/java/org/apache/hadoop/hbase/TestCellComparator.java index 007f826..d6a2f72 100644 --- a/hbase-common/src/test/java/org/apache/hadoop/hbase/TestCellComparator.java +++ b/hbase-common/src/test/java/org/apache/hadoop/hbase/TestCellComparator.java @@ -20,7 +20,6 @@ package org.apache.hadoop.hbase; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertTrue; -import org.apache.hadoop.hbase.KeyValue.KVComparator; import org.apache.hadoop.hbase.KeyValue.Type; import org.apache.hadoop.hbase.testclassification.MiscTests; import org.apache.hadoop.hbase.testclassification.SmallTests; @@ -80,65 +79,56 @@ public class TestCellComparator { @Test public void testGetShortMidpoint() { - KeyValue.KVComparator comparator = new KeyValue.KVComparator(); - Cell left = CellUtil.createCell(Bytes.toBytes("a"), Bytes.toBytes("a"), Bytes.toBytes("a")); Cell right = CellUtil.createCell(Bytes.toBytes("a"), Bytes.toBytes("a"), Bytes.toBytes("a")); - Cell mid = CellComparator.getMidpoint(comparator, left, right); + Cell mid = CellComparator.getMidpoint(left, right); assertTrue(CellComparator.compare(left, mid, true) <= 0); assertTrue(CellComparator.compare(mid, right, true) <= 0); left = CellUtil.createCell(Bytes.toBytes("a"), Bytes.toBytes("a"), Bytes.toBytes("a")); right = CellUtil.createCell(Bytes.toBytes("b"), Bytes.toBytes("a"), Bytes.toBytes("a")); - mid = CellComparator.getMidpoint(comparator, left, right); + mid = CellComparator.getMidpoint(left, right); assertTrue(CellComparator.compare(left, mid, true) < 0); assertTrue(CellComparator.compare(mid, right, true) <= 0); left = CellUtil.createCell(Bytes.toBytes("g"), Bytes.toBytes("a"), Bytes.toBytes("a")); right = CellUtil.createCell(Bytes.toBytes("i"), Bytes.toBytes("a"), Bytes.toBytes("a")); - mid = CellComparator.getMidpoint(comparator, left, right); + mid = CellComparator.getMidpoint(left, right); assertTrue(CellComparator.compare(left, mid, true) < 0); assertTrue(CellComparator.compare(mid, right, true) <= 0); left = CellUtil.createCell(Bytes.toBytes("a"), Bytes.toBytes("a"), Bytes.toBytes("a")); right = CellUtil.createCell(Bytes.toBytes("bbbbbbb"), Bytes.toBytes("a"), Bytes.toBytes("a")); - mid = CellComparator.getMidpoint(comparator, left, right); + mid = CellComparator.getMidpoint(left, right); assertTrue(CellComparator.compare(left, mid, true) < 0); assertTrue(CellComparator.compare(mid, right, true) < 0); assertEquals(1, (int)mid.getRowLength()); left = CellUtil.createCell(Bytes.toBytes("a"), Bytes.toBytes("a"), Bytes.toBytes("a")); right = CellUtil.createCell(Bytes.toBytes("a"), Bytes.toBytes("b"), Bytes.toBytes("a")); - mid = CellComparator.getMidpoint(comparator, left, right); + mid = CellComparator.getMidpoint(left, right); assertTrue(CellComparator.compare(left, mid, true) < 0); assertTrue(CellComparator.compare(mid, right, true) <= 0); left = CellUtil.createCell(Bytes.toBytes("a"), Bytes.toBytes("a"), Bytes.toBytes("a")); right = CellUtil.createCell(Bytes.toBytes("a"), Bytes.toBytes("aaaaaaaa"), Bytes.toBytes("b")); - mid = CellComparator.getMidpoint(comparator, left, right); + mid = CellComparator.getMidpoint(left, right); assertTrue(CellComparator.compare(left, mid, true) < 0); assertTrue(CellComparator.compare(mid, right, true) < 0); assertEquals(2, (int)mid.getFamilyLength()); left = CellUtil.createCell(Bytes.toBytes("a"), Bytes.toBytes("a"), Bytes.toBytes("a")); right = CellUtil.createCell(Bytes.toBytes("a"), Bytes.toBytes("a"), Bytes.toBytes("aaaaaaaaa")); - mid = CellComparator.getMidpoint(comparator, left, right); + mid = CellComparator.getMidpoint(left, right); assertTrue(CellComparator.compare(left, mid, true) < 0); assertTrue(CellComparator.compare(mid, right, true) < 0); assertEquals(2, (int)mid.getQualifierLength()); left = CellUtil.createCell(Bytes.toBytes("a"), Bytes.toBytes("a"), Bytes.toBytes("a")); right = CellUtil.createCell(Bytes.toBytes("a"), Bytes.toBytes("a"), Bytes.toBytes("b")); - mid = CellComparator.getMidpoint(comparator, left, right); + mid = CellComparator.getMidpoint(left, right); assertTrue(CellComparator.compare(left, mid, true) < 0); assertTrue(CellComparator.compare(mid, right, true) <= 0); assertEquals(1, (int)mid.getQualifierLength()); - - // Assert that if meta comparator, it returns the right cell -- i.e. no optimization done. - left = CellUtil.createCell(Bytes.toBytes("g"), Bytes.toBytes("a"), Bytes.toBytes("a")); - right = CellUtil.createCell(Bytes.toBytes("i"), Bytes.toBytes("a"), Bytes.toBytes("a")); - mid = CellComparator.getMidpoint(new KeyValue.MetaComparator(), left, right); - assertTrue(CellComparator.compare(left, mid, true) < 0); - assertTrue(CellComparator.compare(mid, right, true) == 0); } } \ No newline at end of file diff --git a/hbase-common/src/test/java/org/apache/hadoop/hbase/Waiter.java b/hbase-common/src/test/java/org/apache/hadoop/hbase/Waiter.java index cd31ce5..3453baf 100644 --- a/hbase-common/src/test/java/org/apache/hadoop/hbase/Waiter.java +++ b/hbase-common/src/test/java/org/apache/hadoop/hbase/Waiter.java @@ -21,13 +21,13 @@ package org.apache.hadoop.hbase; import java.text.MessageFormat; +import junit.framework.Assert; + import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.classification.InterfaceAudience; -import static org.junit.Assert.fail; - /** * A class that provides a standard waitFor pattern * See details at https://issues.apache.org/jira/browse/HBASE-7384 @@ -98,21 +98,6 @@ public final class Waiter { } /** - * A mixin interface, can be used with {@link Waiter} to explain failed state. - */ - @InterfaceAudience.Private - public interface ExplainingPredicate extends Predicate { - - /** - * Perform a predicate evaluation. - * - * @return explanation of failed state - */ - String explainFailure() throws E; - - } - - /** * Makes the current thread sleep for the duration equal to the specified time in milliseconds * multiplied by the {@link #getWaitForRatio(Configuration)}. * @param conf the configuration @@ -205,13 +190,9 @@ public final class Waiter { LOG.warn(MessageFormat.format("Waiting interrupted after [{0}] msec", System.currentTimeMillis() - started)); } else if (failIfTimeout) { - String msg = getExplanation(predicate); - fail(MessageFormat - .format("Waiting timed out after [{0}] msec", adjustedTimeout) + msg); + Assert.fail(MessageFormat.format("Waiting timed out after [{0}] msec", adjustedTimeout)); } else { - String msg = getExplanation(predicate); - LOG.warn( - MessageFormat.format("Waiting timed out after [{0}] msec", adjustedTimeout) + msg); + LOG.warn(MessageFormat.format("Waiting timed out after [{0}] msec", adjustedTimeout)); } } return (eval || interrupted) ? (System.currentTimeMillis() - started) : -1; @@ -220,17 +201,4 @@ public final class Waiter { } } - public static String getExplanation(Predicate explain) { - if (explain instanceof ExplainingPredicate) { - try { - return " " + ((ExplainingPredicate) explain).explainFailure(); - } catch (Exception e) { - LOG.error("Failed to get explanation, ", e); - return e.getMessage(); - } - } else { - return ""; - } - } - } diff --git a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/HBaseProtos.java b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/HBaseProtos.java index a96ef17..2947f40 100644 --- a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/HBaseProtos.java +++ b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/HBaseProtos.java @@ -3450,15 +3450,15 @@ public final class HBaseProtos { */ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchemaOrBuilder getSchemaOrBuilder(); - // optional .TableState.State state = 2 [default = ENABLED, deprecated = true]; + // optional .TableState.State state = 2 [default = ENABLED]; /** - * optional .TableState.State state = 2 [default = ENABLED, deprecated = true]; + * optional .TableState.State state = 2 [default = ENABLED]; */ - @java.lang.Deprecated boolean hasState(); + boolean hasState(); /** - * optional .TableState.State state = 2 [default = ENABLED, deprecated = true]; + * optional .TableState.State state = 2 [default = ENABLED]; */ - @java.lang.Deprecated org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState.State getState(); + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState.State getState(); } /** * Protobuf type {@code TableDescriptor} @@ -3601,19 +3601,19 @@ public final class HBaseProtos { return schema_; } - // optional .TableState.State state = 2 [default = ENABLED, deprecated = true]; + // optional .TableState.State state = 2 [default = ENABLED]; public static final int STATE_FIELD_NUMBER = 2; private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState.State state_; /** - * optional .TableState.State state = 2 [default = ENABLED, deprecated = true]; + * optional .TableState.State state = 2 [default = ENABLED]; */ - @java.lang.Deprecated public boolean hasState() { + public boolean hasState() { return ((bitField0_ & 0x00000002) == 0x00000002); } /** - * optional .TableState.State state = 2 [default = ENABLED, deprecated = true]; + * optional .TableState.State state = 2 [default = ENABLED]; */ - @java.lang.Deprecated public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState.State getState() { + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState.State getState() { return state_; } @@ -4054,24 +4054,24 @@ public final class HBaseProtos { return schemaBuilder_; } - // optional .TableState.State state = 2 [default = ENABLED, deprecated = true]; + // optional .TableState.State state = 2 [default = ENABLED]; private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState.State state_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState.State.ENABLED; /** - * optional .TableState.State state = 2 [default = ENABLED, deprecated = true]; + * optional .TableState.State state = 2 [default = ENABLED]; */ - @java.lang.Deprecated public boolean hasState() { + public boolean hasState() { return ((bitField0_ & 0x00000002) == 0x00000002); } /** - * optional .TableState.State state = 2 [default = ENABLED, deprecated = true]; + * optional .TableState.State state = 2 [default = ENABLED]; */ - @java.lang.Deprecated public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState.State getState() { + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState.State getState() { return state_; } /** - * optional .TableState.State state = 2 [default = ENABLED, deprecated = true]; + * optional .TableState.State state = 2 [default = ENABLED]; */ - @java.lang.Deprecated public Builder setState(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState.State value) { + public Builder setState(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState.State value) { if (value == null) { throw new NullPointerException(); } @@ -4081,9 +4081,9 @@ public final class HBaseProtos { return this; } /** - * optional .TableState.State state = 2 [default = ENABLED, deprecated = true]; + * optional .TableState.State state = 2 [default = ENABLED]; */ - @java.lang.Deprecated public Builder clearState() { + public Builder clearState() { bitField0_ = (bitField0_ & ~0x00000002); state_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState.State.ENABLED; onChanged(); @@ -18197,52 +18197,52 @@ public final class HBaseProtos { "TableState.State\022\031\n\005table\030\002 \002(\0132\n.TableN" + "ame\022\021\n\ttimestamp\030\003 \001(\004\"?\n\005State\022\013\n\007ENABL" + "ED\020\000\022\014\n\010DISABLED\020\001\022\r\n\tDISABLING\020\002\022\014\n\010ENA", - "BLING\020\003\"^\n\017TableDescriptor\022\034\n\006schema\030\001 \002" + - "(\0132\014.TableSchema\022-\n\005state\030\002 \001(\0162\021.TableS" + - "tate.State:\007ENABLEDB\002\030\001\"o\n\022ColumnFamilyS" + - "chema\022\014\n\004name\030\001 \002(\014\022#\n\nattributes\030\002 \003(\0132" + - "\017.BytesBytesPair\022&\n\rconfiguration\030\003 \003(\0132" + - "\017.NameStringPair\"\232\001\n\nRegionInfo\022\021\n\tregio" + - "n_id\030\001 \002(\004\022\036\n\ntable_name\030\002 \002(\0132\n.TableNa" + - "me\022\021\n\tstart_key\030\003 \001(\014\022\017\n\007end_key\030\004 \001(\014\022\017" + - "\n\007offline\030\005 \001(\010\022\r\n\005split\030\006 \001(\010\022\025\n\nreplic" + - "a_id\030\007 \001(\005:\0010\"1\n\014FavoredNodes\022!\n\014favored", - "_node\030\001 \003(\0132\013.ServerName\"\225\001\n\017RegionSpeci" + - "fier\0222\n\004type\030\001 \002(\0162$.RegionSpecifier.Reg" + - "ionSpecifierType\022\r\n\005value\030\002 \002(\014\"?\n\023Regio" + - "nSpecifierType\022\017\n\013REGION_NAME\020\001\022\027\n\023ENCOD" + - "ED_REGION_NAME\020\002\"%\n\tTimeRange\022\014\n\004from\030\001 " + - "\001(\004\022\n\n\002to\030\002 \001(\004\"A\n\nServerName\022\021\n\thost_na" + - "me\030\001 \002(\t\022\014\n\004port\030\002 \001(\r\022\022\n\nstart_code\030\003 \001" + - "(\004\"\033\n\013Coprocessor\022\014\n\004name\030\001 \002(\t\"-\n\016NameS" + - "tringPair\022\014\n\004name\030\001 \002(\t\022\r\n\005value\030\002 \002(\t\"," + - "\n\rNameBytesPair\022\014\n\004name\030\001 \002(\t\022\r\n\005value\030\002", - " \001(\014\"/\n\016BytesBytesPair\022\r\n\005first\030\001 \002(\014\022\016\n" + - "\006second\030\002 \002(\014\",\n\rNameInt64Pair\022\014\n\004name\030\001" + - " \001(\t\022\r\n\005value\030\002 \001(\003\"\314\001\n\023SnapshotDescript" + - "ion\022\014\n\004name\030\001 \002(\t\022\r\n\005table\030\002 \001(\t\022\030\n\rcrea" + - "tion_time\030\003 \001(\003:\0010\022.\n\004type\030\004 \001(\0162\031.Snaps" + - "hotDescription.Type:\005FLUSH\022\017\n\007version\030\005 " + - "\001(\005\022\r\n\005owner\030\006 \001(\t\".\n\004Type\022\014\n\010DISABLED\020\000" + - "\022\t\n\005FLUSH\020\001\022\r\n\tSKIPFLUSH\020\002\"}\n\024ProcedureD" + - "escription\022\021\n\tsignature\030\001 \002(\t\022\020\n\010instanc" + - "e\030\002 \001(\t\022\030\n\rcreation_time\030\003 \001(\003:\0010\022&\n\rcon", - "figuration\030\004 \003(\0132\017.NameStringPair\"\n\n\010Emp" + - "tyMsg\"\033\n\007LongMsg\022\020\n\010long_msg\030\001 \002(\003\"\037\n\tDo" + - "ubleMsg\022\022\n\ndouble_msg\030\001 \002(\001\"\'\n\rBigDecima" + - "lMsg\022\026\n\016bigdecimal_msg\030\001 \002(\014\"5\n\004UUID\022\026\n\016" + - "least_sig_bits\030\001 \002(\004\022\025\n\rmost_sig_bits\030\002 " + - "\002(\004\"K\n\023NamespaceDescriptor\022\014\n\004name\030\001 \002(\014" + - "\022&\n\rconfiguration\030\002 \003(\0132\017.NameStringPair" + - "\"$\n\020RegionServerInfo\022\020\n\010infoPort\030\001 \001(\005*r" + - "\n\013CompareType\022\010\n\004LESS\020\000\022\021\n\rLESS_OR_EQUAL" + - "\020\001\022\t\n\005EQUAL\020\002\022\r\n\tNOT_EQUAL\020\003\022\024\n\020GREATER_", - "OR_EQUAL\020\004\022\013\n\007GREATER\020\005\022\t\n\005NO_OP\020\006*n\n\010Ti" + - "meUnit\022\017\n\013NANOSECONDS\020\001\022\020\n\014MICROSECONDS\020" + - "\002\022\020\n\014MILLISECONDS\020\003\022\013\n\007SECONDS\020\004\022\013\n\007MINU" + - "TES\020\005\022\t\n\005HOURS\020\006\022\010\n\004DAYS\020\007B>\n*org.apache" + - ".hadoop.hbase.protobuf.generatedB\013HBaseP" + - "rotosH\001\240\001\001" + "BLING\020\003\"Z\n\017TableDescriptor\022\034\n\006schema\030\001 \002" + + "(\0132\014.TableSchema\022)\n\005state\030\002 \001(\0162\021.TableS" + + "tate.State:\007ENABLED\"o\n\022ColumnFamilySchem" + + "a\022\014\n\004name\030\001 \002(\014\022#\n\nattributes\030\002 \003(\0132\017.By" + + "tesBytesPair\022&\n\rconfiguration\030\003 \003(\0132\017.Na" + + "meStringPair\"\232\001\n\nRegionInfo\022\021\n\tregion_id" + + "\030\001 \002(\004\022\036\n\ntable_name\030\002 \002(\0132\n.TableName\022\021" + + "\n\tstart_key\030\003 \001(\014\022\017\n\007end_key\030\004 \001(\014\022\017\n\007of" + + "fline\030\005 \001(\010\022\r\n\005split\030\006 \001(\010\022\025\n\nreplica_id" + + "\030\007 \001(\005:\0010\"1\n\014FavoredNodes\022!\n\014favored_nod", + "e\030\001 \003(\0132\013.ServerName\"\225\001\n\017RegionSpecifier" + + "\0222\n\004type\030\001 \002(\0162$.RegionSpecifier.RegionS" + + "pecifierType\022\r\n\005value\030\002 \002(\014\"?\n\023RegionSpe" + + "cifierType\022\017\n\013REGION_NAME\020\001\022\027\n\023ENCODED_R" + + "EGION_NAME\020\002\"%\n\tTimeRange\022\014\n\004from\030\001 \001(\004\022" + + "\n\n\002to\030\002 \001(\004\"A\n\nServerName\022\021\n\thost_name\030\001" + + " \002(\t\022\014\n\004port\030\002 \001(\r\022\022\n\nstart_code\030\003 \001(\004\"\033" + + "\n\013Coprocessor\022\014\n\004name\030\001 \002(\t\"-\n\016NameStrin" + + "gPair\022\014\n\004name\030\001 \002(\t\022\r\n\005value\030\002 \002(\t\",\n\rNa" + + "meBytesPair\022\014\n\004name\030\001 \002(\t\022\r\n\005value\030\002 \001(\014", + "\"/\n\016BytesBytesPair\022\r\n\005first\030\001 \002(\014\022\016\n\006sec" + + "ond\030\002 \002(\014\",\n\rNameInt64Pair\022\014\n\004name\030\001 \001(\t" + + "\022\r\n\005value\030\002 \001(\003\"\314\001\n\023SnapshotDescription\022" + + "\014\n\004name\030\001 \002(\t\022\r\n\005table\030\002 \001(\t\022\030\n\rcreation" + + "_time\030\003 \001(\003:\0010\022.\n\004type\030\004 \001(\0162\031.SnapshotD" + + "escription.Type:\005FLUSH\022\017\n\007version\030\005 \001(\005\022" + + "\r\n\005owner\030\006 \001(\t\".\n\004Type\022\014\n\010DISABLED\020\000\022\t\n\005" + + "FLUSH\020\001\022\r\n\tSKIPFLUSH\020\002\"}\n\024ProcedureDescr" + + "iption\022\021\n\tsignature\030\001 \002(\t\022\020\n\010instance\030\002 " + + "\001(\t\022\030\n\rcreation_time\030\003 \001(\003:\0010\022&\n\rconfigu", + "ration\030\004 \003(\0132\017.NameStringPair\"\n\n\010EmptyMs" + + "g\"\033\n\007LongMsg\022\020\n\010long_msg\030\001 \002(\003\"\037\n\tDouble" + + "Msg\022\022\n\ndouble_msg\030\001 \002(\001\"\'\n\rBigDecimalMsg" + + "\022\026\n\016bigdecimal_msg\030\001 \002(\014\"5\n\004UUID\022\026\n\016leas" + + "t_sig_bits\030\001 \002(\004\022\025\n\rmost_sig_bits\030\002 \002(\004\"" + + "K\n\023NamespaceDescriptor\022\014\n\004name\030\001 \002(\014\022&\n\r" + + "configuration\030\002 \003(\0132\017.NameStringPair\"$\n\020" + + "RegionServerInfo\022\020\n\010infoPort\030\001 \001(\005*r\n\013Co" + + "mpareType\022\010\n\004LESS\020\000\022\021\n\rLESS_OR_EQUAL\020\001\022\t" + + "\n\005EQUAL\020\002\022\r\n\tNOT_EQUAL\020\003\022\024\n\020GREATER_OR_E", + "QUAL\020\004\022\013\n\007GREATER\020\005\022\t\n\005NO_OP\020\006*n\n\010TimeUn" + + "it\022\017\n\013NANOSECONDS\020\001\022\020\n\014MICROSECONDS\020\002\022\020\n" + + "\014MILLISECONDS\020\003\022\013\n\007SECONDS\020\004\022\013\n\007MINUTES\020" + + "\005\022\t\n\005HOURS\020\006\022\010\n\004DAYS\020\007B>\n*org.apache.had" + + "oop.hbase.protobuf.generatedB\013HBaseProto" + + "sH\001\240\001\001" }; com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner = new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() { diff --git a/hbase-protocol/src/main/protobuf/HBase.proto b/hbase-protocol/src/main/protobuf/HBase.proto index 1566846..c3c8c6a 100644 --- a/hbase-protocol/src/main/protobuf/HBase.proto +++ b/hbase-protocol/src/main/protobuf/HBase.proto @@ -55,14 +55,14 @@ message TableState { } // This is the table's state. required State state = 1; - required TableName table = 2 [deprecated = true]; + required TableName table = 2; optional uint64 timestamp = 3; } /** On HDFS representation of table state. */ message TableDescriptor { required TableSchema schema = 1; - optional TableState.State state = 2 [ default = ENABLED, deprecated = true ]; + optional TableState.State state = 2 [ default = ENABLED ]; } /** diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/TableDescriptor.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/TableDescriptor.java index d1935db..d27bfb7 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/TableDescriptor.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/TableDescriptor.java @@ -17,13 +17,12 @@ */ package org.apache.hadoop.hbase; -import javax.annotation.Nullable; import java.io.IOException; import com.google.common.annotations.VisibleForTesting; import com.google.protobuf.InvalidProtocolBufferException; -import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.classification.InterfaceAudience; +import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.client.TableState; import org.apache.hadoop.hbase.exceptions.DeserializationException; import org.apache.hadoop.hbase.protobuf.ProtobufUtil; @@ -36,23 +35,15 @@ import org.apache.hadoop.hbase.regionserver.BloomType; @InterfaceAudience.Private public class TableDescriptor { private HTableDescriptor hTableDescriptor; - /** - * Don't use, state was moved to meta, use MetaTableAccessor instead - * @deprecated state was moved to meta - */ - @Deprecated - @Nullable private TableState.State tableState; /** * Creates TableDescriptor with all fields. * @param hTableDescriptor HTableDescriptor to use * @param tableState table state - * @deprecated state was moved to meta */ - @Deprecated public TableDescriptor(HTableDescriptor hTableDescriptor, - @Nullable TableState.State tableState) { + TableState.State tableState) { this.hTableDescriptor = hTableDescriptor; this.tableState = tableState; } @@ -78,35 +69,22 @@ public class TableDescriptor { this.hTableDescriptor = hTableDescriptor; } - /** - * @return table state - * @deprecated state was moved to meta - */ - @Deprecated - @Nullable public TableState.State getTableState() { return tableState; } - /** - * @param tableState state to set for table - * @deprecated state was moved to meta - */ - @Deprecated - public void setTableState(@Nullable TableState.State tableState) { + public void setTableState(TableState.State tableState) { this.tableState = tableState; } /** * Convert to PB. */ - @SuppressWarnings("deprecation") public HBaseProtos.TableDescriptor convert() { - HBaseProtos.TableDescriptor.Builder builder = HBaseProtos.TableDescriptor.newBuilder() - .setSchema(hTableDescriptor.convert()); - if (tableState!= null) - builder.setState(tableState.convert()); - return builder.build(); + return HBaseProtos.TableDescriptor.newBuilder() + .setSchema(hTableDescriptor.convert()) + .setState(tableState.convert()) + .build(); } /** @@ -114,9 +92,7 @@ public class TableDescriptor { */ public static TableDescriptor convert(HBaseProtos.TableDescriptor proto) { HTableDescriptor hTableDescriptor = HTableDescriptor.convert(proto.getSchema()); - TableState.State state = proto.hasState()? - TableState.State.convert(proto.getState()) - :null; + TableState.State state = TableState.State.convert(proto.getState()); return new TableDescriptor(hTableDescriptor, state); } @@ -194,17 +170,6 @@ public class TableDescriptor { .setBloomFilterType(BloomType.NONE) // Enable cache of data blocks in L1 if more than one caching tier deployed: // e.g. if using CombinedBlockCache (BucketCache). - .setCacheDataInL1(true), - new HColumnDescriptor(HConstants.TABLE_FAMILY) - // Ten is arbitrary number. Keep versions to help debugging. - .setMaxVersions(10) - .setInMemory(true) - .setBlocksize(8 * 1024) - .setScope(HConstants.REPLICATION_SCOPE_LOCAL) - // Disable blooms for meta. Needs work. Seems to mess w/ getClosestOrBefore. - .setBloomFilterType(BloomType.NONE) - // Enable cache of data blocks in L1 if more than one caching tier deployed: - // e.g. if using CombinedBlockCache (BucketCache). .setCacheDataInL1(true) }) { }; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileWriterV2.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileWriterV2.java index 28c4655..1df8bc2 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileWriterV2.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileWriterV2.java @@ -162,8 +162,9 @@ public class HFileWriterV2 extends AbstractHFileWriter { fsBlockWriter.writeHeaderAndData(outputStream); int onDiskSize = fsBlockWriter.getOnDiskSizeWithHeader(); - Cell indexEntry = - CellComparator.getMidpoint(this.comparator, lastCellOfPreviousBlock, firstCellInBlock); + // Rather than CellComparator, we should be making use of an Interface here with the + // implementation class serialized out to the HFile metadata. TODO. + Cell indexEntry = CellComparator.getMidpoint(lastCellOfPreviousBlock, firstCellInBlock); dataBlockIndexWriter.addEntry(CellUtil.getCellKeySerializedAsKeyValueKey(indexEntry), lastDataBlockOffset, onDiskSize); totalUncompressedBytes += fsBlockWriter.getUncompressedSizeWithHeader(); @@ -263,9 +264,8 @@ public class HFileWriterV2 extends AbstractHFileWriter { checkBlockBoundary(); } - if (!fsBlockWriter.isWriting()) { + if (!fsBlockWriter.isWriting()) newBlock(); - } fsBlockWriter.write(cell); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcServer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcServer.java index 064771c..fac1ac9 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcServer.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcServer.java @@ -198,7 +198,6 @@ public class RpcServer implements RpcServerInterface { protected final InetSocketAddress bindAddress; protected int port; // port we listen on - protected InetSocketAddress address; // inet address we listen on private int readThreads; // number of read threads protected int maxIdleTime; // the maximum idle time after // which a client may be @@ -529,7 +528,6 @@ public class RpcServer implements RpcServerInterface { // Bind the server socket to the binding addrees (can be different from the default interface) bind(acceptChannel.socket(), bindAddress, backlogLength); port = acceptChannel.socket().getLocalPort(); //Could be an ephemeral port - address = (InetSocketAddress)acceptChannel.socket().getLocalSocketAddress(); // create a selector; selector= Selector.open(); @@ -756,7 +754,7 @@ public class RpcServer implements RpcServerInterface { } InetSocketAddress getAddress() { - return address; + return (InetSocketAddress)acceptChannel.socket().getLocalSocketAddress(); } void doAccept(SelectionKey key) throws IOException, OutOfMemoryError { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/AssignmentManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/AssignmentManager.java index f861529..4d9ff13 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/AssignmentManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/AssignmentManager.java @@ -54,6 +54,7 @@ import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.MetaTableAccessor; import org.apache.hadoop.hbase.NotServingRegionException; import org.apache.hadoop.hbase.RegionLocations; +import org.apache.hadoop.hbase.Server; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.TableNotFoundException; @@ -98,7 +99,7 @@ import com.google.common.annotations.VisibleForTesting; public class AssignmentManager { private static final Log LOG = LogFactory.getLog(AssignmentManager.class); - protected final MasterServices server; + protected final Server server; private ServerManager serverManager; @@ -129,8 +130,8 @@ public class AssignmentManager { private final int maximumAttempts; /** - * The sleep time for which the assignment will wait before retrying in case of - * hbase:meta assignment failure due to lack of availability of region plan or bad region plan + * The sleep time for which the assignment will wait before retrying in case of hbase:meta assignment + * failure due to lack of availability of region plan or bad region plan */ private final long sleepTimeBeforeRetryingMetaAssignment; @@ -208,7 +209,7 @@ public class AssignmentManager { * @param tableLockManager TableLock manager * @throws IOException */ - public AssignmentManager(MasterServices server, ServerManager serverManager, + public AssignmentManager(Server server, ServerManager serverManager, final LoadBalancer balancer, final ExecutorService service, MetricsMaster metricsMaster, final TableLockManager tableLockManager, @@ -1569,7 +1570,7 @@ public class AssignmentManager { TableState.State.ENABLING); // Region assignment from META - List results = MetaTableAccessor.fullScanRegions(server.getConnection()); + List results = MetaTableAccessor.fullScanOfMeta(server.getConnection()); // Get any new but slow to checkin region server that joined the cluster Set onlineServers = serverManager.getOnlineServers().keySet(); // Set of offline servers to be returned diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java index 61a1c66..020d6fb 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java @@ -430,11 +430,6 @@ public class HMaster extends HRegionServer implements MasterServices, Server { return connector.getLocalPort(); } - @Override - protected TableDescriptors getFsTableDescriptors() throws IOException { - return super.getFsTableDescriptors(); - } - /** * For compatibility, if failed with regionserver credentials, try the master one */ @@ -634,7 +629,9 @@ public class HMaster extends HRegionServer implements MasterServices, Server { // Invalidate all write locks held previously this.tableLockManager.reapWriteLocks(); + this.tableStateManager = new TableStateManager(this); + this.tableStateManager.start(); status.setStatus("Initializing ZK system trackers"); initializeZKBasedSystemTrackers(); @@ -872,10 +869,7 @@ public class HMaster extends HRegionServer implements MasterServices, Server { assigned++; } - if (replicaId == HRegionInfo.DEFAULT_REPLICA_ID) - getTableStateManager().setTableState(TableName.META_TABLE_NAME, TableState.State.ENABLED); - // TODO: should we prevent from using state manager before meta was initialized? - // tableStateManager.start(); + if (replicaId == HRegionInfo.DEFAULT_REPLICA_ID) enableMeta(TableName.META_TABLE_NAME); if ((RecoveryMode.LOG_REPLAY == this.getMasterFileSystem().getLogRecoveryMode()) && (!previouslyFailedMetaRSs.isEmpty())) { @@ -884,9 +878,6 @@ public class HMaster extends HRegionServer implements MasterServices, Server { this.fileSystemManager.splitMetaLog(previouslyFailedMetaRSs); } - this.assignmentManager.setEnabledTable(TableName.META_TABLE_NAME); - tableStateManager.start(); - // Make sure a hbase:meta location is set. We need to enable SSH here since // if the meta region server is died at this time, we need it to be re-assigned // by SSH so that system tables can be assigned. @@ -943,6 +934,13 @@ public class HMaster extends HRegionServer implements MasterServices, Server { } } + private void enableMeta(TableName metaTableName) { + if (!this.tableStateManager.isTableState(metaTableName, + TableState.State.ENABLED)) { + this.assignmentManager.setEnabledTable(metaTableName); + } + } + /** * This function returns a set of region server names under hbase:meta recovering region ZK node * @return Set of meta server names which were recorded in ZK @@ -1175,8 +1173,7 @@ public class HMaster extends HRegionServer implements MasterServices, Server { if (rpCount < plans.size() && // if performing next balance exceeds cutoff time, exit the loop (System.currentTimeMillis() + (totalRegPlanExecTime / rpCount)) > cutoffTime) { - //TODO: After balance, there should not be a cutoff time (keeping it as - // a security net for now) + //TODO: After balance, there should not be a cutoff time (keeping it as a security net for now) LOG.debug("No more balancing till next balance run; maximumBalanceTime=" + maximumBalanceTime); break; @@ -1466,8 +1463,7 @@ public class HMaster extends HRegionServer implements MasterServices, Server { LOG.fatal("Failed to become active master", t); // HBASE-5680: Likely hadoop23 vs hadoop 20.x/1.x incompatibility if (t instanceof NoClassDefFoundError && - t.getMessage() - .contains("org/apache/hadoop/hdfs/protocol/FSConstants$SafeModeAction")) { + t.getMessage().contains("org/apache/hadoop/hdfs/protocol/FSConstants$SafeModeAction")) { // improved error message for this special case abort("HBase is having a problem with its Hadoop jars. You may need to " + "recompile HBase against Hadoop version " @@ -2196,18 +2192,15 @@ public class HMaster extends HRegionServer implements MasterServices, Server { } for (HTableDescriptor desc: htds) { - if (tableStateManager.isTablePresent(desc.getTableName()) - && (includeSysTables || !desc.getTableName().isSystemTable())) { + if (includeSysTables || !desc.getTableName().isSystemTable()) { descriptors.add(desc); } } } else { for (TableName s: tableNameList) { - if (tableStateManager.isTablePresent(s)) { - HTableDescriptor desc = tableDescriptors.get(s); - if (desc != null) { - descriptors.add(desc); - } + HTableDescriptor desc = tableDescriptors.get(s); + if (desc != null) { + descriptors.add(desc); } } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/LoadBalancer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/LoadBalancer.java index c4eecfa..f979403 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/LoadBalancer.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/LoadBalancer.java @@ -18,7 +18,6 @@ */ package org.apache.hadoop.hbase.master; -import javax.annotation.Nullable; import java.util.List; import java.util.Map; @@ -89,7 +88,6 @@ public interface LoadBalancer extends Configurable, Stoppable, ConfigurationObse * @param servers * @return List of plans */ - @Nullable Map> retainAssignment( Map regions, List servers diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterFileSystem.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterFileSystem.java index 78e4c11..4d72312 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterFileSystem.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterFileSystem.java @@ -471,7 +471,7 @@ public class MasterFileSystem { // we should get them from registry. FSTableDescriptors fsd = new FSTableDescriptors(c, fs, rd); fsd.createTableDescriptor( - new TableDescriptor(fsd.get(TableName.META_TABLE_NAME))); + new TableDescriptor(fsd.get(TableName.META_TABLE_NAME), TableState.State.ENABLING)); return rd; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java index 4af53a4..0e81461 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java @@ -850,6 +850,8 @@ public class MasterRpcServices extends RSRpcServices TableName tableName = ProtobufUtil.toTableName(request.getTableName()); TableState.State state = master.getTableStateManager() .getTableState(tableName); + if (state == null) + throw new TableNotFoundException(tableName); MasterProtos.GetTableStateResponse.Builder builder = MasterProtos.GetTableStateResponse.newBuilder(); builder.setTableState(new TableState(tableName, state).convert()); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/RegionStateStore.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/RegionStateStore.java index df61b45..9dd412c 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/RegionStateStore.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/RegionStateStore.java @@ -230,8 +230,7 @@ public class RegionStateStore { } } // Called when meta is not on master - multiHConnection.processBatchCallback(Arrays.asList(put), - TableName.META_TABLE_NAME, null, null); + multiHConnection.processBatchCallback(Arrays.asList(put), TableName.META_TABLE_NAME, null, null); } catch (IOException ioe) { LOG.error("Failed to persist region state " + newState, ioe); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/RegionStates.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/RegionStates.java index e5214ca..221c7a4 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/RegionStates.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/RegionStates.java @@ -40,6 +40,7 @@ import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.MetaTableAccessor; +import org.apache.hadoop.hbase.Server; import org.apache.hadoop.hbase.ServerLoad; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.TableName; @@ -146,13 +147,13 @@ public class RegionStates { private final TableStateManager tableStateManager; private final RegionStateStore regionStateStore; private final ServerManager serverManager; - private final MasterServices server; + private final Server server; // The maximum time to keep a log split info in region states map static final String LOG_SPLIT_TIME = "hbase.master.maximum.logsplit.keeptime"; static final long DEFAULT_LOG_SPLIT_TIME = 7200000L; // 2 hours - RegionStates(final MasterServices master, final TableStateManager tableStateManager, + RegionStates(final Server master, final TableStateManager tableStateManager, final ServerManager serverManager, final RegionStateStore regionStateStore) { this.tableStateManager = tableStateManager; this.regionStateStore = regionStateStore; @@ -871,7 +872,7 @@ public class RegionStates { private int getRegionReplication(HRegionInfo r) throws IOException { if (tableStateManager != null) { - HTableDescriptor htd = server.getTableDescriptors().get(r.getTable()); + HTableDescriptor htd = tableStateManager.getTableDescriptors().get(r.getTable()); if (htd != null) { return htd.getRegionReplication(); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/SnapshotOfRegionAssignmentFromMeta.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/SnapshotOfRegionAssignmentFromMeta.java index 39beba8..b03611c 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/SnapshotOfRegionAssignmentFromMeta.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/SnapshotOfRegionAssignmentFromMeta.java @@ -141,7 +141,7 @@ public class SnapshotOfRegionAssignmentFromMeta { } }; // Scan hbase:meta to pick up user regions - MetaTableAccessor.fullScanRegions(connection, v); + MetaTableAccessor.fullScan(connection, v); //regionToRegionServerMap = regions; LOG.info("Finished to scan the hbase:meta for the current region assignment" + "snapshot"); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/TableStateManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/TableStateManager.java index 5d1e638..d8199ea 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/TableStateManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/TableStateManager.java @@ -17,27 +17,19 @@ */ package org.apache.hadoop.hbase.master; -import javax.annotation.Nonnull; -import javax.annotation.Nullable; import java.io.IOException; -import java.util.HashMap; import java.util.Map; import java.util.Set; -import java.util.concurrent.locks.ReadWriteLock; -import java.util.concurrent.locks.ReentrantReadWriteLock; +import com.google.common.collect.Maps; import com.google.common.collect.Sets; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.hbase.MetaTableAccessor; +import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.TableDescriptor; import org.apache.hadoop.hbase.TableDescriptors; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.TableNotFoundException; -import org.apache.hadoop.hbase.classification.InterfaceAudience; -import org.apache.hadoop.hbase.client.ClusterConnection; -import org.apache.hadoop.hbase.client.Connection; -import org.apache.hadoop.hbase.client.Result; import org.apache.hadoop.hbase.client.TableState; /** @@ -47,12 +39,24 @@ import org.apache.hadoop.hbase.client.TableState; @InterfaceAudience.Private public class TableStateManager { private static final Log LOG = LogFactory.getLog(TableStateManager.class); + private final TableDescriptors descriptors; - private final ReadWriteLock lock = new ReentrantReadWriteLock(); - private final MasterServices master; + private final Map tableStates = Maps.newConcurrentMap(); public TableStateManager(MasterServices master) { - this.master = master; + this.descriptors = master.getTableDescriptors(); + } + + public void start() throws IOException { + Map all = descriptors.getAllDescriptors(); + for (TableDescriptor table : all.values()) { + TableName tableName = table.getHTableDescriptor().getTableName(); + if (LOG.isDebugEnabled()) { + LOG.debug("Adding table state: " + tableName + + ": " + table.getTableState()); + } + tableStates.put(tableName, table.getTableState()); + } } /** @@ -63,13 +67,16 @@ public class TableStateManager { * @throws IOException */ public void setTableState(TableName tableName, TableState.State newState) throws IOException { - lock.writeLock().lock(); - try { - udpateMetaState(tableName, newState); - } finally { - lock.writeLock().unlock(); + synchronized (tableStates) { + TableDescriptor descriptor = readDescriptor(tableName); + if (descriptor == null) { + throw new TableNotFoundException(tableName); + } + if (descriptor.getTableState() != newState) { + writeDescriptor( + new TableDescriptor(descriptor.getHTableDescriptor(), newState)); + } } - } /** @@ -84,24 +91,22 @@ public class TableStateManager { TableState.State newState, TableState.State... states) throws IOException { - lock.writeLock().lock(); - try { - TableState currentState = readMetaState(tableName); - if (currentState == null) { + synchronized (tableStates) { + TableDescriptor descriptor = readDescriptor(tableName); + if (descriptor == null) { throw new TableNotFoundException(tableName); } - if (currentState.inStates(states)) { - udpateMetaState(tableName, newState); + if (TableState.isInStates(descriptor.getTableState(), states)) { + writeDescriptor( + new TableDescriptor(descriptor.getHTableDescriptor(), newState)); return true; } else { return false; } - } finally { - lock.writeLock().unlock(); } - } + /** * Set table state to provided but only if table not in specified states * Caller should lock table on write. @@ -114,36 +119,42 @@ public class TableStateManager { TableState.State newState, TableState.State... states) throws IOException { - TableState currentState = readMetaState(tableName); - if (currentState == null) { - throw new TableNotFoundException(tableName); - } - if (!currentState.inStates(states)) { - udpateMetaState(tableName, newState); - return true; - } else { - return false; + synchronized (tableStates) { + TableDescriptor descriptor = readDescriptor(tableName); + if (descriptor == null) { + throw new TableNotFoundException(tableName); + } + if (!TableState.isInStates(descriptor.getTableState(), states)) { + writeDescriptor( + new TableDescriptor(descriptor.getHTableDescriptor(), newState)); + return true; + } else { + return false; + } } } public boolean isTableState(TableName tableName, TableState.State... states) { + TableState.State tableState = null; try { - TableState.State tableState = getTableState(tableName); - return TableState.isInStates(tableState, states); + tableState = getTableState(tableName); } catch (IOException e) { - LOG.error("Unable to get table " + tableName + " state, probably table not exists"); + LOG.error("Unable to get table state, probably table not exists"); return false; } + return tableState != null && TableState.isInStates(tableState, states); } public void setDeletedTable(TableName tableName) throws IOException { - if (tableName.equals(TableName.META_TABLE_NAME)) - return; - MetaTableAccessor.deleteTableState(master.getConnection(), tableName); + TableState.State remove = tableStates.remove(tableName); + if (remove == null) { + LOG.warn("Moving table " + tableName + " state to deleted but was " + + "already deleted"); + } } public boolean isTablePresent(TableName tableName) throws IOException { - return readMetaState(tableName) != null; + return getTableState(tableName) != null; } /** @@ -153,82 +164,57 @@ public class TableStateManager { * @return tables in given states * @throws IOException */ - public Set getTablesInStates(final TableState.State... states) throws IOException { - final Set rv = Sets.newHashSet(); - MetaTableAccessor.fullScanTables(master.getConnection(), new MetaTableAccessor.Visitor() { - @Override - public boolean visit(Result r) throws IOException { - TableState tableState = MetaTableAccessor.getTableState(r); - if (tableState != null && tableState.inStates(states)) - rv.add(tableState.getTableName()); - return true; - } - }); + public Set getTablesInStates(TableState.State... states) throws IOException { + Set rv = Sets.newHashSet(); + for (Map.Entry entry : tableStates.entrySet()) { + if (TableState.isInStates(entry.getValue(), states)) + rv.add(entry.getKey()); + } return rv; } - @Nonnull public TableState.State getTableState(TableName tableName) throws IOException { - TableState currentState = readMetaState(tableName); - if (currentState == null) { - throw new TableNotFoundException(tableName); + TableState.State tableState = tableStates.get(tableName); + if (tableState == null) { + TableDescriptor descriptor = readDescriptor(tableName); + if (descriptor != null) + tableState = descriptor.getTableState(); } - return currentState.getState(); - } - - protected void udpateMetaState(TableName tableName, TableState.State newState) - throws IOException { - MetaTableAccessor.updateTableState(master.getConnection(), tableName, newState); + return tableState; } - @Nullable - protected TableState readMetaState(TableName tableName) throws IOException { - if (tableName.equals(TableName.META_TABLE_NAME)) - return new TableState(tableName, TableState.State.ENABLED); - return MetaTableAccessor.getTableState(master.getConnection(), tableName); + TableDescriptors getTableDescriptors() { + return descriptors; } - @SuppressWarnings("deprecation") - public void start() throws IOException { - TableDescriptors tableDescriptors = master.getTableDescriptors(); - Connection connection = master.getConnection(); - fixTableStates(tableDescriptors, connection); + /** + * Write descriptor in place, update cache of states. + * Write lock should be hold by caller. + * + * @param descriptor what to write + */ + private void writeDescriptor(TableDescriptor descriptor) throws IOException { + TableName tableName = descriptor.getHTableDescriptor().getTableName(); + TableState.State state = descriptor.getTableState(); + descriptors.add(descriptor); + LOG.debug("Table " + tableName + " written descriptor for state " + state); + tableStates.put(tableName, state); + LOG.debug("Table " + tableName + " updated state to " + state); } - public static void fixTableStates(TableDescriptors tableDescriptors, Connection connection) - throws IOException { - final Map allDescriptors = - tableDescriptors.getAllDescriptors(); - final Map states = new HashMap<>(); - MetaTableAccessor.fullScanTables(connection, new MetaTableAccessor.Visitor() { - @Override - public boolean visit(Result r) throws IOException { - TableState state = MetaTableAccessor.getTableState(r); - if (state != null) - states.put(state.getTableName().getNameAsString(), state); - return true; - } - }); - for (Map.Entry entry : allDescriptors.entrySet()) { - String table = entry.getKey(); - if (table.equals(TableName.META_TABLE_NAME.getNameAsString())) - continue; - if (!states.containsKey(table)) { - LOG.warn("Found table without state " + table); - TableDescriptor td = entry.getValue(); - TableState.State tds = td.getTableState(); - if (tds != null) { - LOG.warn("Found table with state in descriptor, using that state"); - MetaTableAccessor.updateTableState(connection, TableName.valueOf(table), tds); - LOG.warn("Updating table descriptor"); - td.setTableState(null); - tableDescriptors.add(td); - } else { - LOG.warn("Found table with no state in descriptor, assuming ENABLED"); - MetaTableAccessor.updateTableState(connection, TableName.valueOf(table), - TableState.State.ENABLED); - } - } - } + /** + * Read current descriptor for table, update cache of states. + * + * @param table descriptor to read + * @return descriptor + * @throws IOException + */ + private TableDescriptor readDescriptor(TableName tableName) throws IOException { + TableDescriptor descriptor = descriptors.getDescriptor(tableName); + if (descriptor == null) + tableStates.remove(tableName); + else + tableStates.put(tableName, descriptor.getTableState()); + return descriptor; } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/CreateTableHandler.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/CreateTableHandler.java index b60733e..2007ed4 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/CreateTableHandler.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/CreateTableHandler.java @@ -70,7 +70,6 @@ public class CreateTableHandler extends EventHandler { private final AssignmentManager assignmentManager; private final TableLockManager tableLockManager; private final HRegionInfo [] newRegions; - private final MasterServices masterServices; private final TableLock tableLock; private User activeUser; @@ -83,7 +82,6 @@ public class CreateTableHandler extends EventHandler { this.hTableDescriptor = hTableDescriptor; this.conf = conf; this.newRegions = newRegions; - this.masterServices = masterServices; this.assignmentManager = masterServices.getAssignmentManager(); this.tableLockManager = masterServices.getTableLockManager(); @@ -211,11 +209,10 @@ public class CreateTableHandler extends EventHandler { // 1. Create Table Descriptor // using a copy of descriptor, table will be created enabling first TableDescriptor underConstruction = new TableDescriptor( - this.hTableDescriptor); + this.hTableDescriptor, TableState.State.ENABLING); Path tempTableDir = FSUtils.getTableDir(tempdir, tableName); - ((FSTableDescriptors)(masterServices.getTableDescriptors())) - .createTableDescriptorForTableDirectory( - tempTableDir, underConstruction, false); + new FSTableDescriptors(this.conf).createTableDescriptorForTableDirectory( + tempTableDir, underConstruction, false); Path tableDir = FSUtils.getTableDir(fileSystemManager.getRootDir(), tableName); // 2. Create Regions @@ -226,12 +223,6 @@ public class CreateTableHandler extends EventHandler { " to hbase root=" + tableDir); } - // populate descriptors cache to be visible in getAll - masterServices.getTableDescriptors().get(tableName); - - MetaTableAccessor.updateTableState(this.server.getConnection(), hTableDescriptor.getTableName(), - TableState.State.ENABLING); - if (regionInfos != null && regionInfos.size() > 0) { // 4. Add regions to META addRegionsToMeta(regionInfos, hTableDescriptor.getRegionReplication()); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/TruncateTableHandler.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/TruncateTableHandler.java index 15a5b8f..ee40153 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/TruncateTableHandler.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/TruncateTableHandler.java @@ -95,10 +95,10 @@ public class TruncateTableHandler extends DeleteTableHandler { AssignmentManager assignmentManager = this.masterServices.getAssignmentManager(); // 1. Create Table Descriptor - TableDescriptor underConstruction = new TableDescriptor(this.hTableDescriptor); + TableDescriptor underConstruction = new TableDescriptor( + this.hTableDescriptor, TableState.State.ENABLING); Path tempTableDir = FSUtils.getTableDir(tempdir, this.tableName); - - ((FSTableDescriptors)(masterServices.getTableDescriptors())) + new FSTableDescriptors(server.getConfiguration()) .createTableDescriptorForTableDirectory(tempTableDir, underConstruction, false); Path tableDir = FSUtils.getTableDir(mfs.getRootDir(), this.tableName); @@ -123,11 +123,6 @@ public class TruncateTableHandler extends DeleteTableHandler { " to hbase root=" + tableDir); } - // populate descriptors cache to be visible in getAll - masterServices.getTableDescriptors().get(tableName); - - assignmentManager.getTableStateManager().setTableState(tableName, - TableState.State.ENABLING); // 4. Add regions to META MetaTableAccessor.addRegionsToMeta(masterServices.getConnection(), regionInfos, hTableDescriptor.getRegionReplication()); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java index c170a65..90b29ef 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java @@ -528,7 +528,8 @@ public class HRegionServer extends HasThread implements boolean useHBaseChecksum = conf.getBoolean(HConstants.HBASE_CHECKSUM_VERIFICATION, true); this.fs = new HFileSystem(this.conf, useHBaseChecksum); this.rootDir = FSUtils.getRootDir(this.conf); - this.tableDescriptors = getFsTableDescriptors(); + this.tableDescriptors = new FSTableDescriptors(this.conf, + this.fs, this.rootDir, !canUpdateTableDescriptor(), false); service = new ExecutorService(getServerName().toShortString()); spanReceiverHost = SpanReceiverHost.getInstance(getConfiguration()); @@ -560,11 +561,6 @@ public class HRegionServer extends HasThread implements this.choreService = new ChoreService(getServerName().toString()); } - protected TableDescriptors getFsTableDescriptors() throws IOException { - return new FSTableDescriptors(this.conf, - this.fs, this.rootDir, !canUpdateTableDescriptor(), false); - } - protected void login(UserProvider user, String host) throws IOException { user.login("hbase.regionserver.keytab.file", "hbase.regionserver.kerberos.principal", host); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotManifest.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotManifest.java index 330ead4..a3cfa04 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotManifest.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotManifest.java @@ -39,6 +39,7 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.TableDescriptor; +import org.apache.hadoop.hbase.client.TableState; import org.apache.hadoop.hbase.errorhandling.ForeignExceptionSnare; import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription; import org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotDataManifest; @@ -356,7 +357,7 @@ public class SnapshotManifest { // write a copy of descriptor to the snapshot directory new FSTableDescriptors(conf, fs, rootDir) .createTableDescriptorForTableDirectory(workingDir, new TableDescriptor( - htd), false); + htd, TableState.State.ENABLED), false); } else { LOG.debug("Convert to Single Snapshot Manifest"); convertToV2SingleManifest(); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSTableDescriptors.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSTableDescriptors.java index cce37d7..7a6811c 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSTableDescriptors.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSTableDescriptors.java @@ -33,6 +33,7 @@ import com.google.common.primitives.Ints; import org.apache.commons.lang.NotImplementedException; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FSDataInputStream; import org.apache.hadoop.fs.FSDataOutputStream; @@ -46,7 +47,7 @@ import org.apache.hadoop.hbase.TableDescriptor; import org.apache.hadoop.hbase.TableDescriptors; import org.apache.hadoop.hbase.TableInfoMissingException; import org.apache.hadoop.hbase.TableName; -import org.apache.hadoop.hbase.classification.InterfaceAudience; +import org.apache.hadoop.hbase.client.TableState; import org.apache.hadoop.hbase.exceptions.DeserializationException; import org.apache.hadoop.hbase.protobuf.ProtobufUtil; @@ -153,7 +154,7 @@ public class FSTableDescriptors implements TableDescriptors { invocations++; if (TableName.META_TABLE_NAME.equals(tablename)) { cachehits++; - return new TableDescriptor(metaTableDescritor); + return new TableDescriptor(metaTableDescritor, TableState.State.ENABLED); } // hbase:meta is already handled. If some one tries to get the descriptor for // .logs, .oldlogs or .corrupt throw an exception. @@ -217,7 +218,7 @@ public class FSTableDescriptors implements TableDescriptors { } // add hbase:meta to the response tds.put(this.metaTableDescritor.getNameAsString(), - new TableDescriptor(metaTableDescritor)); + new TableDescriptor(metaTableDescritor, TableState.State.ENABLED)); } else { LOG.debug("Fetching table descriptors from the filesystem."); boolean allvisited = true; @@ -591,7 +592,7 @@ public class FSTableDescriptors implements TableDescriptors { HTableDescriptor htd = HTableDescriptor.parseFrom(content); LOG.warn("Found old table descriptor, converting to new format for table " + htd.getTableName() + "; NOTE table will be in ENABLED state!"); - td = new TableDescriptor(htd); + td = new TableDescriptor(htd, TableState.State.ENABLED); if (rewritePb) rewriteTableDescriptor(fs, status, td); } catch (DeserializationException e1) { throw new IOException("content=" + Bytes.toShort(content), e); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java index a8b60cd..8e1d848 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java @@ -53,16 +53,12 @@ import java.util.concurrent.TimeoutException; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicInteger; -import com.google.common.base.Joiner; -import com.google.common.base.Preconditions; -import com.google.common.collect.Lists; -import com.google.common.collect.Multimap; -import com.google.common.collect.TreeMultimap; -import com.google.protobuf.ServiceException; import org.apache.commons.lang.RandomStringUtils; import org.apache.commons.lang.StringUtils; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.hbase.classification.InterfaceAudience; +import org.apache.hadoop.hbase.classification.InterfaceStability; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configured; import org.apache.hadoop.fs.FSDataOutputStream; @@ -89,15 +85,15 @@ import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.TableDescriptor; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.ZooKeeperConnectionException; -import org.apache.hadoop.hbase.classification.InterfaceAudience; -import org.apache.hadoop.hbase.classification.InterfaceStability; import org.apache.hadoop.hbase.client.Admin; import org.apache.hadoop.hbase.client.ClusterConnection; import org.apache.hadoop.hbase.client.ConnectionFactory; import org.apache.hadoop.hbase.client.Delete; import org.apache.hadoop.hbase.client.Get; import org.apache.hadoop.hbase.client.HBaseAdmin; +import org.apache.hadoop.hbase.client.HConnectable; import org.apache.hadoop.hbase.client.HConnection; +import org.apache.hadoop.hbase.client.HConnectionManager; import org.apache.hadoop.hbase.client.MetaScanner; import org.apache.hadoop.hbase.client.MetaScanner.MetaScannerVisitor; import org.apache.hadoop.hbase.client.MetaScanner.MetaScannerVisitorBase; @@ -141,6 +137,13 @@ import org.apache.hadoop.util.Tool; import org.apache.hadoop.util.ToolRunner; import org.apache.zookeeper.KeeperException; +import com.google.common.base.Joiner; +import com.google.common.base.Preconditions; +import com.google.common.collect.Lists; +import com.google.common.collect.Multimap; +import com.google.common.collect.TreeMultimap; +import com.google.protobuf.ServiceException; + /** * HBaseFsck (hbck) is a tool for checking and repairing region consistency and * table integrity problems in a corrupted HBase. @@ -242,8 +245,7 @@ public class HBaseFsck extends Configured implements Closeable { // hbase:meta are always checked private Set tablesIncluded = new HashSet(); private int maxMerge = DEFAULT_MAX_MERGE; // maximum number of overlapping regions to merge - // maximum number of overlapping regions to sideline - private int maxOverlapsToSideline = DEFAULT_OVERLAPS_TO_SIDELINE; + private int maxOverlapsToSideline = DEFAULT_OVERLAPS_TO_SIDELINE; // maximum number of overlapping regions to sideline private boolean sidelineBigOverlaps = false; // sideline overlaps with >maxMerge regions private Path sidelineDir = null; @@ -265,6 +267,8 @@ public class HBaseFsck extends Configured implements Closeable { * to detect and correct consistency (hdfs/meta/deployment) problems. */ private TreeMap regionInfoMap = new TreeMap(); + private TreeSet disabledTables = + new TreeSet(); // Empty regioninfo qualifiers in hbase:meta private Set emptyRegionInfoQualifiers = new HashSet(); @@ -288,8 +292,6 @@ public class HBaseFsck extends Configured implements Closeable { private Map> orphanTableDirs = new HashMap>(); - private Map tableStates = - new HashMap(); /** * Constructor @@ -491,7 +493,7 @@ public class HBaseFsck extends Configured implements Closeable { fixes = 0; regionInfoMap.clear(); emptyRegionInfoQualifiers.clear(); - tableStates.clear(); + disabledTables.clear(); errors.clear(); tablesInfo.clear(); orphanHdfsDirs.clear(); @@ -575,15 +577,15 @@ public class HBaseFsck extends Configured implements Closeable { reportTablesInFlux(); } - // Get disabled tables states - loadTableStates(); - // load regiondirs and regioninfos from HDFS if (shouldCheckHdfs()) { loadHdfsRegionDirs(); loadHdfsRegionInfos(); } + // Get disabled tables from ZooKeeper + loadDisabledTables(); + // fix the orphan tables fixOrphanTables(); @@ -1138,7 +1140,7 @@ public class HBaseFsck extends Configured implements Closeable { for (String columnfamimly : columns) { htd.addFamily(new HColumnDescriptor(columnfamimly)); } - fstd.createTableDescriptor(new TableDescriptor(htd), true); + fstd.createTableDescriptor(new TableDescriptor(htd, TableState.State.ENABLED), true); return true; } @@ -1186,7 +1188,7 @@ public class HBaseFsck extends Configured implements Closeable { if (tableName.equals(htds[j].getTableName())) { HTableDescriptor htd = htds[j]; LOG.info("fixing orphan table: " + tableName + " from cache"); - fstd.createTableDescriptor(new TableDescriptor(htd), true); + fstd.createTableDescriptor(new TableDescriptor(htd, TableState.State.ENABLED), true); j++; iter.remove(); } @@ -1263,8 +1265,6 @@ public class HBaseFsck extends Configured implements Closeable { } TableInfo ti = e.getValue(); - puts.add(MetaTableAccessor - .makePutFromTableState(new TableState(ti.tableName, TableState.State.ENABLED))); for (Entry> spl : ti.sc.getStarts().asMap() .entrySet()) { Collection his = spl.getValue(); @@ -1524,19 +1524,28 @@ public class HBaseFsck extends Configured implements Closeable { * @throws ZooKeeperConnectionException * @throws IOException */ - private void loadTableStates() + private void loadDisabledTables() throws IOException { - tableStates = MetaTableAccessor.getTableStates(connection); + HConnectionManager.execute(new HConnectable(getConf()) { + @Override + public Void connect(HConnection connection) throws IOException { + TableName[] tables = connection.listTableNames(); + for (TableName table : tables) { + if (connection.getTableState(table) + .inStates(TableState.State.DISABLED, TableState.State.DISABLING)) { + disabledTables.add(table); + } + } + return null; + } + }); } /** * Check if the specified region's table is disabled. - * @param tableName table to check status of */ - private boolean isTableDisabled(TableName tableName) { - return tableStates.containsKey(tableName) - && tableStates.get(tableName) - .inStates(TableState.State.DISABLED, TableState.State.DISABLING); + private boolean isTableDisabled(HRegionInfo regionInfo) { + return disabledTables.contains(regionInfo.getTable()); } /** @@ -1606,24 +1615,15 @@ public class HBaseFsck extends Configured implements Closeable { HConstants.EMPTY_START_ROW, false, false); if (rl == null) { errors.reportError(ERROR_CODE.NULL_META_REGION, - "META region was not found in Zookeeper"); + "META region or some of its attributes are null."); return false; } for (HRegionLocation metaLocation : rl.getRegionLocations()) { // Check if Meta region is valid and existing - if (metaLocation == null ) { - errors.reportError(ERROR_CODE.NULL_META_REGION, - "META region location is null"); - return false; - } - if (metaLocation.getRegionInfo() == null) { - errors.reportError(ERROR_CODE.NULL_META_REGION, - "META location regionInfo is null"); - return false; - } - if (metaLocation.getHostname() == null) { + if (metaLocation == null || metaLocation.getRegionInfo() == null || + metaLocation.getHostname() == null) { errors.reportError(ERROR_CODE.NULL_META_REGION, - "META location hostName is null"); + "META region or some of its attributes are null."); return false; } ServerName sn = metaLocation.getServerName(); @@ -1718,55 +1718,6 @@ public class HBaseFsck extends Configured implements Closeable { } } setCheckHdfs(prevHdfsCheck); - - if (shouldCheckHdfs()) { - checkAndFixTableStates(); - } - } - - /** - * Check and fix table states, assumes full info available: - * - tableInfos - * - empty tables loaded - */ - private void checkAndFixTableStates() throws IOException { - // first check dangling states - for (Entry entry : tableStates.entrySet()) { - TableName tableName = entry.getKey(); - TableState tableState = entry.getValue(); - TableInfo tableInfo = tablesInfo.get(tableName); - if (isTableIncluded(tableName) - && !tableName.isSystemTable() - && tableInfo == null) { - if (fixMeta) { - MetaTableAccessor.deleteTableState(connection, tableName); - TableState state = MetaTableAccessor.getTableState(connection, tableName); - if (state != null) { - errors.reportError(ERROR_CODE.ORPHAN_TABLE_STATE, - tableName + " unable to delete dangling table state " + tableState); - } - } else { - errors.reportError(ERROR_CODE.ORPHAN_TABLE_STATE, - tableName + " has dangling table state " + tableState); - } - } - } - // check that all tables have states - for (TableName tableName : tablesInfo.keySet()) { - if (isTableIncluded(tableName) && !tableStates.containsKey(tableName)) { - if (fixMeta) { - MetaTableAccessor.updateTableState(connection, tableName, TableState.State.ENABLED); - TableState newState = MetaTableAccessor.getTableState(connection, tableName); - if (newState == null) { - errors.reportError(ERROR_CODE.NO_TABLE_STATE, - "Unable to change state for table " + tableName + " in meta "); - } - } else { - errors.reportError(ERROR_CODE.NO_TABLE_STATE, - tableName + " has no state in meta "); - } - } - } } private void preCheckPermission() throws IOException, AccessDeniedException { @@ -2010,8 +1961,8 @@ public class HBaseFsck extends Configured implements Closeable { hasMetaAssignment && isDeployed && !isMultiplyDeployed && hbi.metaEntry.regionServer.equals(hbi.deployedOn.get(0)); boolean splitParent = - inMeta && hbi.metaEntry.isSplit() && hbi.metaEntry.isOffline(); - boolean shouldBeDeployed = inMeta && !isTableDisabled(hbi.metaEntry.getTable()); + (hbi.metaEntry == null)? false: hbi.metaEntry.isSplit() && hbi.metaEntry.isOffline(); + boolean shouldBeDeployed = inMeta && !isTableDisabled(hbi.metaEntry); boolean recentlyModified = inHdfs && hbi.getModTime() + timelag > System.currentTimeMillis(); @@ -2793,7 +2744,7 @@ public class HBaseFsck extends Configured implements Closeable { // When table is disabled no need to check for the region chain. Some of the regions // accidently if deployed, this below code might report some issues like missing start // or end regions or region hole in chain and may try to fix which is unwanted. - if (isTableDisabled(this.tableName)) { + if (disabledTables.contains(this.tableName)) { return true; } int originalErrorsCount = errors.getErrorList().size(); @@ -3583,14 +3534,12 @@ public class HBaseFsck extends Configured implements Closeable { public interface ErrorReporter { enum ERROR_CODE { UNKNOWN, NO_META_REGION, NULL_META_REGION, NO_VERSION_FILE, NOT_IN_META_HDFS, NOT_IN_META, - NOT_IN_META_OR_DEPLOYED, NOT_IN_HDFS_OR_DEPLOYED, NOT_IN_HDFS, SERVER_DOES_NOT_MATCH_META, - NOT_DEPLOYED, + NOT_IN_META_OR_DEPLOYED, NOT_IN_HDFS_OR_DEPLOYED, NOT_IN_HDFS, SERVER_DOES_NOT_MATCH_META, NOT_DEPLOYED, MULTI_DEPLOYED, SHOULD_NOT_BE_DEPLOYED, MULTI_META_REGION, RS_CONNECT_FAILURE, FIRST_REGION_STARTKEY_NOT_EMPTY, LAST_REGION_ENDKEY_NOT_EMPTY, DUPE_STARTKEYS, HOLE_IN_REGION_CHAIN, OVERLAP_IN_REGION_CHAIN, REGION_CYCLE, DEGENERATE_REGION, ORPHAN_HDFS_REGION, LINGERING_SPLIT_PARENT, NO_TABLEINFO_FILE, LINGERING_REFERENCE_HFILE, - WRONG_USAGE, EMPTY_META_CELL, EXPIRED_TABLE_LOCK, BOUNDARIES_ERROR, ORPHAN_TABLE_STATE, - NO_TABLE_STATE + WRONG_USAGE, EMPTY_META_CELL, EXPIRED_TABLE_LOCK, BOUNDARIES_ERROR } void clear(); void report(String message); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java index 1a377fc..8613276 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java @@ -17,8 +17,6 @@ */ package org.apache.hadoop.hbase; -import javax.annotation.Nullable; - import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertTrue; import static org.junit.Assert.fail; @@ -46,7 +44,6 @@ import java.util.Set; import java.util.TreeSet; import java.util.UUID; import java.util.concurrent.TimeUnit; -import java.util.concurrent.atomic.AtomicReference; import org.apache.commons.lang.RandomStringUtils; import org.apache.commons.logging.Log; @@ -56,7 +53,6 @@ import org.apache.commons.logging.impl.Log4JLogger; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; -import org.apache.hadoop.hbase.Waiter.ExplainingPredicate; import org.apache.hadoop.hbase.Waiter.Predicate; import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.classification.InterfaceStability; @@ -75,7 +71,6 @@ import org.apache.hadoop.hbase.client.Result; import org.apache.hadoop.hbase.client.ResultScanner; import org.apache.hadoop.hbase.client.Scan; import org.apache.hadoop.hbase.client.Table; -import org.apache.hadoop.hbase.client.TableState; import org.apache.hadoop.hbase.fs.HFileSystem; import org.apache.hadoop.hbase.io.compress.Compression; import org.apache.hadoop.hbase.io.compress.Compression.Algorithm; @@ -2293,8 +2288,6 @@ public class HBaseTestingUtility extends HBaseCommonTestingUtility { Table meta = (HTable) getConnection().getTable(TableName.META_TABLE_NAME); Arrays.sort(startKeys, Bytes.BYTES_COMPARATOR); List newRegions = new ArrayList(startKeys.length); - MetaTableAccessor - .updateTableState(getConnection(), htd.getTableName(), TableState.State.ENABLED); // add custom ones for (int i = 0; i < startKeys.length; i++) { int j = (i + 1) % startKeys.length; @@ -2906,12 +2899,12 @@ public class HBaseTestingUtility extends HBaseCommonTestingUtility { */ public void waitTableAvailable(TableName table) throws InterruptedException, IOException { - waitTableAvailable(table.getName(), 30000); + waitTableAvailable(getHBaseAdmin(), table.getName(), 30000); } - public void waitTableAvailable(TableName table, long timeoutMillis) + public void waitTableAvailable(Admin admin, byte[] table) throws InterruptedException, IOException { - waitFor(timeoutMillis, predicateTableAvailable(table)); + waitTableAvailable(admin, table, 30000); } /** @@ -2923,73 +2916,23 @@ public class HBaseTestingUtility extends HBaseCommonTestingUtility { */ public void waitTableAvailable(byte[] table, long timeoutMillis) throws InterruptedException, IOException { - waitFor(timeoutMillis, predicateTableAvailable(TableName.valueOf(table))); - } - - public String explainTableAvailability(TableName tableName) throws IOException { - String msg = explainTableState(tableName, TableState.State.ENABLED) + ", "; - if (getHBaseCluster().getMaster().isAlive()) { - Map assignments = - getHBaseCluster().getMaster().getAssignmentManager().getRegionStates() - .getRegionAssignments(); - final List> metaLocations = - MetaTableAccessor.getTableRegionsAndLocations(connection, tableName); - for (Pair metaLocation : metaLocations) { - HRegionInfo hri = metaLocation.getFirst(); - ServerName sn = metaLocation.getSecond(); - if (!assignments.containsKey(hri)) { - msg += ", region " + hri - + " not assigned, but found in meta, it expected to be on " + sn; - - } else if (sn == null) { - msg += ", region " + hri - + " assigned, but has no server in meta"; - } else if (!sn.equals(assignments.get(hri))) { - msg += ", region " + hri - + " assigned, but has different servers in meta and AM ( " + - sn + " <> " + assignments.get(hri); - } - } - } - return msg; + waitTableAvailable(getHBaseAdmin(), table, timeoutMillis); } - public String explainTableState(final TableName table, TableState.State state) - throws IOException { - TableState tableState = MetaTableAccessor.getTableState(connection, table); - if (tableState == null) { - return "TableState in META: No table state in META for table " + table - + " last state in meta (including deleted is " + findLastTableState(table) + ")"; - } else if (!tableState.inStates(state)) { - return "TableState in META: Not " + state + " state, but " + tableState; - } else { - return "TableState in META: OK"; + public void waitTableAvailable(Admin admin, byte[] table, long timeoutMillis) + throws InterruptedException, IOException { + long startWait = System.currentTimeMillis(); + while (!admin.isTableAvailable(TableName.valueOf(table))) { + assertTrue("Timed out waiting for table to become available " + + Bytes.toStringBinary(table), + System.currentTimeMillis() - startWait < timeoutMillis); + Thread.sleep(200); } } - @Nullable - public TableState findLastTableState(final TableName table) throws IOException { - final AtomicReference lastTableState = new AtomicReference<>(null); - MetaTableAccessor.Visitor visitor = new MetaTableAccessor.Visitor() { - @Override - public boolean visit(Result r) throws IOException { - if (!Arrays.equals(r.getRow(), table.getName())) - return false; - TableState state = MetaTableAccessor.getTableState(r); - if (state != null) - lastTableState.set(state); - return true; - } - }; - MetaTableAccessor - .fullScan(connection, visitor, table.getName(), MetaTableAccessor.QueryType.TABLE, true); - return lastTableState.get(); - } - /** * Waits for a table to be 'enabled'. Enabled means that table is set as 'enabled' and the * regions have been all assigned. Will timeout after default period (30 seconds) - * Tolerates nonexistent table. * @param table Table to wait on. * @param table * @throws InterruptedException @@ -2997,7 +2940,12 @@ public class HBaseTestingUtility extends HBaseCommonTestingUtility { */ public void waitTableEnabled(TableName table) throws InterruptedException, IOException { - waitTableEnabled(table, 30000); + waitTableEnabled(getHBaseAdmin(), table.getName(), 30000); + } + + public void waitTableEnabled(Admin admin, byte[] table) + throws InterruptedException, IOException { + waitTableEnabled(admin, table, 30000); } /** @@ -3011,12 +2959,30 @@ public class HBaseTestingUtility extends HBaseCommonTestingUtility { */ public void waitTableEnabled(byte[] table, long timeoutMillis) throws InterruptedException, IOException { - waitTableEnabled(TableName.valueOf(table), timeoutMillis); + waitTableEnabled(getHBaseAdmin(), table, timeoutMillis); } - public void waitTableEnabled(TableName table, long timeoutMillis) - throws IOException { - waitFor(timeoutMillis, predicateTableEnabled(table)); + public void waitTableEnabled(Admin admin, byte[] table, long timeoutMillis) + throws InterruptedException, IOException { + TableName tableName = TableName.valueOf(table); + long startWait = System.currentTimeMillis(); + waitTableAvailable(admin, table, timeoutMillis); + while (!admin.isTableEnabled(tableName)) { + assertTrue("Timed out waiting for table to become available and enabled " + + Bytes.toStringBinary(table), + System.currentTimeMillis() - startWait < timeoutMillis); + Thread.sleep(200); + } + // Finally make sure all regions are fully open and online out on the cluster. Regions may be + // in the hbase:meta table and almost open on all regionservers but there setting the region + // online in the regionserver is the very last thing done and can take a little while to happen. + // Below we do a get. The get will retry if a NotServeringRegionException or a + // RegionOpeningException. It is crass but when done all will be online. + try { + Canary.sniff(admin, tableName); + } catch (Exception e) { + throw new IOException(e); + } } /** @@ -3028,12 +2994,12 @@ public class HBaseTestingUtility extends HBaseCommonTestingUtility { */ public void waitTableDisabled(byte[] table) throws InterruptedException, IOException { - waitTableDisabled(table, 30000); + waitTableDisabled(getHBaseAdmin(), table, 30000); } - public void waitTableDisabled(TableName table, long millisTimeout) + public void waitTableDisabled(Admin admin, byte[] table) throws InterruptedException, IOException { - waitFor(millisTimeout, predicateTableDisabled(table)); + waitTableDisabled(admin, table, 30000); } /** @@ -3045,7 +3011,19 @@ public class HBaseTestingUtility extends HBaseCommonTestingUtility { */ public void waitTableDisabled(byte[] table, long timeoutMillis) throws InterruptedException, IOException { - waitTableDisabled(TableName.valueOf(table), timeoutMillis); + waitTableDisabled(getHBaseAdmin(), table, timeoutMillis); + } + + public void waitTableDisabled(Admin admin, byte[] table, long timeoutMillis) + throws InterruptedException, IOException { + TableName tableName = TableName.valueOf(table); + long startWait = System.currentTimeMillis(); + while (!admin.isTableDisabled(tableName)) { + assertTrue("Timed out waiting for table to become disabled " + + Bytes.toStringBinary(table), + System.currentTimeMillis() - startWait < timeoutMillis); + Thread.sleep(200); + } } /** @@ -3211,12 +3189,7 @@ public class HBaseTestingUtility extends HBaseCommonTestingUtility { throws IOException { final Table meta = getConnection().getTable(TableName.META_TABLE_NAME); try { - long l = waitFor(timeout, 200, true, new ExplainingPredicate() { - @Override - public String explainFailure() throws IOException { - return explainTableAvailability(tableName); - } - + waitFor(timeout, 200, true, new Predicate() { @Override public boolean evaluate() throws IOException { boolean allRegionsAssigned = true; @@ -3226,7 +3199,7 @@ public class HBaseTestingUtility extends HBaseCommonTestingUtility { try { Result r; while ((r = s.next()) != null) { - byte[] b = r.getValue(HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER); + byte [] b = r.getValue(HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER); HRegionInfo info = HRegionInfo.parseFromOrNull(b); if (info != null && info.getTable().equals(tableName)) { b = r.getValue(HConstants.CATALOG_FAMILY, HConstants.SERVER_QUALIFIER); @@ -3249,12 +3222,7 @@ public class HBaseTestingUtility extends HBaseCommonTestingUtility { // returing -- sometimes this can lag. HMaster master = getHBaseCluster().getMaster(); final RegionStates states = master.getAssignmentManager().getRegionStates(); - waitFor(timeout, 200, new ExplainingPredicate() { - @Override - public String explainFailure() throws IOException { - return explainTableAvailability(tableName); - } - + waitFor(timeout, 200, new Predicate() { @Override public boolean evaluate() throws IOException { List hris = states.getRegionsOfTable(tableName); @@ -3728,17 +3696,10 @@ public class HBaseTestingUtility extends HBaseCommonTestingUtility { /** * Returns a {@link Predicate} for checking that there are no regions in transition in master */ - public ExplainingPredicate predicateNoRegionsInTransition() { - return new ExplainingPredicate() { + public Waiter.Predicate predicateNoRegionsInTransition() { + return new Waiter.Predicate() { @Override - public String explainFailure() throws IOException { - final RegionStates regionStates = getMiniHBaseCluster().getMaster() - .getAssignmentManager().getRegionStates(); - return "found in transition: " + regionStates.getRegionsInTransition().toString(); - } - - @Override - public boolean evaluate() throws IOException { + public boolean evaluate() throws Exception { final RegionStates regionStates = getMiniHBaseCluster().getMaster() .getAssignmentManager().getRegionStates(); return !regionStates.isRegionsInTransition(); @@ -3749,58 +3710,11 @@ public class HBaseTestingUtility extends HBaseCommonTestingUtility { /** * Returns a {@link Predicate} for checking that table is enabled */ - public Waiter.Predicate predicateTableEnabled(final TableName tableName) { - return new ExplainingPredicate() { - @Override - public String explainFailure() throws IOException { - return explainTableState(tableName, TableState.State.ENABLED); - } - - @Override - public boolean evaluate() throws IOException { - return getHBaseAdmin().tableExists(tableName) && getHBaseAdmin().isTableEnabled(tableName); - } - }; - } - - /** - * Returns a {@link Predicate} for checking that table is enabled - */ - public Waiter.Predicate predicateTableDisabled(final TableName tableName) { - return new ExplainingPredicate() { - @Override - public String explainFailure() throws IOException { - return explainTableState(tableName, TableState.State.DISABLED); - } - - @Override - public boolean evaluate() throws IOException { - return getHBaseAdmin().isTableDisabled(tableName); - } - }; - } - - /** - * Returns a {@link Predicate} for checking that table is enabled - */ - public Waiter.Predicate predicateTableAvailable(final TableName tableName) { - return new ExplainingPredicate() { - @Override - public String explainFailure() throws IOException { - return explainTableAvailability(tableName); - } - - @Override - public boolean evaluate() throws IOException { - boolean tableAvailable = getHBaseAdmin().isTableAvailable(tableName); - if (tableAvailable) { - try { - Canary.sniff(getHBaseAdmin(), tableName); - } catch (Exception e) { - throw new IOException("Canary sniff failed for table " + tableName, e); - } - } - return tableAvailable; + public Waiter.Predicate predicateTableEnabled(final TableName tableName) { + return new Waiter.Predicate() { + @Override + public boolean evaluate() throws Exception { + return getHBaseAdmin().isTableEnabled(tableName); } }; } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/HFilePerformanceEvaluation.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/HFilePerformanceEvaluation.java index ea10f60..8336543 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/HFilePerformanceEvaluation.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/HFilePerformanceEvaluation.java @@ -19,7 +19,6 @@ package org.apache.hadoop.hbase; import java.io.IOException; -import java.security.SecureRandom; import java.util.Random; import org.apache.commons.logging.Log; @@ -31,10 +30,6 @@ import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.io.ImmutableBytesWritable; -import org.apache.hadoop.hbase.io.crypto.Encryption; -import org.apache.hadoop.hbase.io.crypto.KeyProviderForTesting; -import org.apache.hadoop.hbase.io.crypto.aes.AES; -import org.apache.hadoop.hbase.io.hfile.AbstractHFileWriter; import org.apache.hadoop.hbase.io.hfile.CacheConfig; import org.apache.hadoop.hbase.io.hfile.HFile; import org.apache.hadoop.hbase.io.hfile.HFileContext; @@ -50,16 +45,7 @@ public class HFilePerformanceEvaluation { private static final int ROW_LENGTH = 10; private static final int ROW_COUNT = 1000000; private static final int RFILE_BLOCKSIZE = 8 * 1024; - private static StringBuilder testSummary = new StringBuilder(); - - // Disable verbose INFO logging from org.apache.hadoop.io.compress.CodecPool - static { - System.setProperty("org.apache.commons.logging.Log", - "org.apache.commons.logging.impl.SimpleLog"); - System.setProperty("org.apache.commons.logging.simplelog.log.org.apache.hadoop.io.compress.CodecPool", - "WARN"); - } - + static final Log LOG = LogFactory.getLog(HFilePerformanceEvaluation.class.getName()); @@ -96,154 +82,70 @@ public class HFilePerformanceEvaluation { return CellUtil.createCell(keyRow, value); } - /** - * Add any supported codec or cipher to test the HFile read/write performance. - * Specify "none" to disable codec or cipher or both. - * @throws Exception - */ private void runBenchmarks() throws Exception { final Configuration conf = new Configuration(); final FileSystem fs = FileSystem.get(conf); final Path mf = fs.makeQualified(new Path("performanceevaluation.mapfile")); - - // codec=none cipher=none - runWriteBenchmark(conf, fs, mf, "none", "none"); - runReadBenchmark(conf, fs, mf, "none", "none"); - - // codec=gz cipher=none - runWriteBenchmark(conf, fs, mf, "gz", "none"); - runReadBenchmark(conf, fs, mf, "gz", "none"); - - // Add configuration for AES cipher - final Configuration aesconf = new Configuration(); - aesconf.set(HConstants.CRYPTO_KEYPROVIDER_CONF_KEY, KeyProviderForTesting.class.getName()); - aesconf.set(HConstants.CRYPTO_MASTERKEY_NAME_CONF_KEY, "hbase"); - aesconf.setInt("hfile.format.version", 3); - final FileSystem aesfs = FileSystem.get(aesconf); - final Path aesmf = aesfs.makeQualified(new Path("performanceevaluation.aes.mapfile")); - - // codec=none cipher=aes - runWriteBenchmark(aesconf, aesfs, aesmf, "none", "aes"); - runReadBenchmark(aesconf, aesfs, aesmf, "none", "aes"); - - // codec=gz cipher=aes - runWriteBenchmark(aesconf, aesfs, aesmf, "gz", "aes"); - runReadBenchmark(aesconf, aesfs, aesmf, "gz", "aes"); - - // cleanup test files if (fs.exists(mf)) { fs.delete(mf, true); } - if (aesfs.exists(aesmf)) { - aesfs.delete(aesmf, true); - } - - // Print Result Summary - LOG.info("\n***************\n" + "Result Summary" + "\n***************\n"); - LOG.info(testSummary.toString()); - - } - - /** - * Write a test HFile with the given codec & cipher - * @param conf - * @param fs - * @param mf - * @param codec "none", "lzo", "gz", "snappy" - * @param cipher "none", "aes" - * @throws Exception - */ - private void runWriteBenchmark(Configuration conf, FileSystem fs, Path mf, String codec, - String cipher) throws Exception { - if (fs.exists(mf)) { - fs.delete(mf, true); - } - - runBenchmark(new SequentialWriteBenchmark(conf, fs, mf, ROW_COUNT, codec, cipher), - ROW_COUNT, codec, cipher); - - } - /** - * Run all the read benchmarks for the test HFile - * @param conf - * @param fs - * @param mf - * @param codec "none", "lzo", "gz", "snappy" - * @param cipher "none", "aes" - */ - private void runReadBenchmark(final Configuration conf, final FileSystem fs, final Path mf, - final String codec, final String cipher) { + runBenchmark(new SequentialWriteBenchmark(conf, fs, mf, ROW_COUNT), + ROW_COUNT); PerformanceEvaluationCommons.concurrentReads(new Runnable() { @Override public void run() { try { runBenchmark(new UniformRandomSmallScan(conf, fs, mf, ROW_COUNT), - ROW_COUNT, codec, cipher); + ROW_COUNT); } catch (Exception e) { - testSummary.append("UniformRandomSmallScan failed " + e.getMessage()); e.printStackTrace(); } } }); - PerformanceEvaluationCommons.concurrentReads(new Runnable() { @Override public void run() { try { runBenchmark(new UniformRandomReadBenchmark(conf, fs, mf, ROW_COUNT), - ROW_COUNT, codec, cipher); + ROW_COUNT); } catch (Exception e) { - testSummary.append("UniformRandomReadBenchmark failed " + e.getMessage()); e.printStackTrace(); } } }); - PerformanceEvaluationCommons.concurrentReads(new Runnable() { @Override public void run() { try { runBenchmark(new GaussianRandomReadBenchmark(conf, fs, mf, ROW_COUNT), - ROW_COUNT, codec, cipher); + ROW_COUNT); } catch (Exception e) { - testSummary.append("GaussianRandomReadBenchmark failed " + e.getMessage()); e.printStackTrace(); } } }); - PerformanceEvaluationCommons.concurrentReads(new Runnable() { @Override public void run() { try { runBenchmark(new SequentialReadBenchmark(conf, fs, mf, ROW_COUNT), - ROW_COUNT, codec, cipher); + ROW_COUNT); } catch (Exception e) { - testSummary.append("SequentialReadBenchmark failed " + e.getMessage()); e.printStackTrace(); } } - }); + }); } - - protected void runBenchmark(RowOrientedBenchmark benchmark, int rowCount, - String codec, String cipher) throws Exception { - LOG.info("Running " + benchmark.getClass().getSimpleName() + " with codec[" + - codec + "] " + "cipher[" + cipher + "] for " + rowCount + " rows."); - + + protected void runBenchmark(RowOrientedBenchmark benchmark, int rowCount) + throws Exception { + LOG.info("Running " + benchmark.getClass().getSimpleName() + " for " + + rowCount + " rows."); long elapsedTime = benchmark.run(); - - LOG.info("Running " + benchmark.getClass().getSimpleName() + " with codec[" + - codec + "] " + "cipher[" + cipher + "] for " + rowCount + " rows took " + - elapsedTime + "ms."); - - // Store results to print summary at the end - testSummary.append("Running ").append(benchmark.getClass().getSimpleName()) - .append(" with codec[").append(codec).append("] cipher[").append(cipher) - .append("] for ").append(rowCount).append(" rows took ").append(elapsedTime) - .append("ms.").append("\n"); + LOG.info("Running " + benchmark.getClass().getSimpleName() + " for " + + rowCount + " rows took " + elapsedTime + "ms."); } static abstract class RowOrientedBenchmark { @@ -252,18 +154,6 @@ public class HFilePerformanceEvaluation { protected final FileSystem fs; protected final Path mf; protected final int totalRows; - protected String codec = "none"; - protected String cipher = "none"; - - public RowOrientedBenchmark(Configuration conf, FileSystem fs, Path mf, - int totalRows, String codec, String cipher) { - this.conf = conf; - this.fs = fs; - this.mf = mf; - this.totalRows = totalRows; - this.codec = codec; - this.cipher = cipher; - } public RowOrientedBenchmark(Configuration conf, FileSystem fs, Path mf, int totalRows) { @@ -318,36 +208,21 @@ public class HFilePerformanceEvaluation { private byte[] bytes = new byte[ROW_LENGTH]; public SequentialWriteBenchmark(Configuration conf, FileSystem fs, Path mf, - int totalRows, String codec, String cipher) { - super(conf, fs, mf, totalRows, codec, cipher); + int totalRows) { + super(conf, fs, mf, totalRows); } @Override void setUp() throws Exception { - - HFileContextBuilder builder = new HFileContextBuilder() - .withCompression(AbstractHFileWriter.compressionByName(codec)) - .withBlockSize(RFILE_BLOCKSIZE); - - if (cipher == "aes") { - byte[] cipherKey = new byte[AES.KEY_LENGTH]; - new SecureRandom().nextBytes(cipherKey); - builder.withEncryptionContext(Encryption.newContext(conf) - .setCipher(Encryption.getCipher(conf, cipher)) - .setKey(cipherKey)); - } else if (!"none".equals(cipher)) { - throw new IOException("Cipher " + cipher + " not supported."); - } - - HFileContext hFileContext = builder.build(); - - writer = HFile.getWriterFactoryNoCache(conf) - .withPath(fs, mf) - .withFileContext(hFileContext) - .withComparator(new KeyValue.RawBytesComparator()) - .create(); + HFileContext hFileContext = new HFileContextBuilder().withBlockSize(RFILE_BLOCKSIZE).build(); + writer = + HFile.getWriterFactoryNoCache(conf) + .withPath(fs, mf) + .withFileContext(hFileContext) + .withComparator(new KeyValue.RawBytesComparator()) + .create(); } - + @Override void doRow(int i) throws Exception { writer.append(createCell(i, generateValue())); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestMetaTableAccessor.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestMetaTableAccessor.java index eefb974..e637976 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestMetaTableAccessor.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestMetaTableAccessor.java @@ -228,12 +228,7 @@ public class TestMetaTableAccessor { admin.deleteTable(name); assertFalse(MetaTableAccessor.tableExists(connection, name)); assertTrue(MetaTableAccessor.tableExists(connection, - TableName.META_TABLE_NAME)); - UTIL.createTable(name, HConstants.CATALOG_FAMILY); - assertTrue(MetaTableAccessor.tableExists(connection, name)); - admin.disableTable(name); - admin.deleteTable(name); - assertFalse(MetaTableAccessor.tableExists(connection, name)); + TableName.META_TABLE_NAME)); } @Test public void testGetRegion() throws IOException, InterruptedException { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdmin1.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdmin1.java index fd1eff7..85fbbc6 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdmin1.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdmin1.java @@ -20,7 +20,6 @@ package org.apache.hadoop.hbase.client; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertNotNull; import static org.junit.Assert.assertTrue; import static org.junit.Assert.fail; @@ -249,8 +248,7 @@ public class TestAdmin1 { this.admin.disableTable(ht.getName()); assertTrue("Table must be disabled.", TEST_UTIL.getHBaseCluster() .getMaster().getAssignmentManager().getTableStateManager().isTableState( - ht.getName(), TableState.State.DISABLED)); - assertEquals(TableState.State.DISABLED, getStateFromMeta(table)); + ht.getName(), TableState.State.DISABLED)); // Test that table is disabled get = new Get(row); @@ -277,8 +275,7 @@ public class TestAdmin1 { this.admin.enableTable(table); assertTrue("Table must be enabled.", TEST_UTIL.getHBaseCluster() .getMaster().getAssignmentManager().getTableStateManager().isTableState( - ht.getName(), TableState.State.ENABLED)); - assertEquals(TableState.State.ENABLED, getStateFromMeta(table)); + ht.getName(), TableState.State.ENABLED)); // Test that table is enabled try { @@ -290,13 +287,6 @@ public class TestAdmin1 { ht.close(); } - private TableState.State getStateFromMeta(TableName table) throws IOException { - TableState state = - MetaTableAccessor.getTableState(TEST_UTIL.getConnection(), table); - assertNotNull(state); - return state.getState(); - } - @Test (timeout=300000) public void testDisableAndEnableTables() throws IOException { final byte [] row = Bytes.toBytes("row"); @@ -328,10 +318,6 @@ public class TestAdmin1 { ok = true; } - assertEquals(TableState.State.DISABLED, getStateFromMeta(table1)); - assertEquals(TableState.State.DISABLED, getStateFromMeta(table2)); - - assertTrue(ok); this.admin.enableTables("testDisableAndEnableTable.*"); @@ -350,23 +336,18 @@ public class TestAdmin1 { ht1.close(); ht2.close(); - - assertEquals(TableState.State.ENABLED, getStateFromMeta(table1)); - assertEquals(TableState.State.ENABLED, getStateFromMeta(table2)); } @Test (timeout=300000) public void testCreateTable() throws IOException { HTableDescriptor [] tables = admin.listTables(); int numTables = tables.length; - TableName tableName = TableName.valueOf("testCreateTable"); - TEST_UTIL.createTable(tableName, HConstants.CATALOG_FAMILY).close(); + TEST_UTIL.createTable(TableName.valueOf("testCreateTable"), HConstants.CATALOG_FAMILY).close(); tables = this.admin.listTables(); assertEquals(numTables + 1, tables.length); assertTrue("Table must be enabled.", TEST_UTIL.getHBaseCluster() .getMaster().getAssignmentManager().getTableStateManager().isTableState( - tableName, TableState.State.ENABLED)); - assertEquals(TableState.State.ENABLED, getStateFromMeta(tableName)); + TableName.valueOf("testCreateTable"), TableState.State.ENABLED)); } @Test (timeout=300000) @@ -424,7 +405,6 @@ public class TestAdmin1 { Table table = TEST_UTIL.getConnection().getTable(htd.getTableName()); HTableDescriptor confirmedHtd = table.getTableDescriptor(); assertEquals(htd.compareTo(confirmedHtd), 0); - MetaTableAccessor.fullScanMetaAndPrint(TEST_UTIL.getConnection()); table.close(); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMetaWithReplicas.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMetaWithReplicas.java index 8e60353..83ff822 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMetaWithReplicas.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMetaWithReplicas.java @@ -18,8 +18,10 @@ */ package org.apache.hadoop.hbase.client; -import javax.annotation.Nullable; -import java.io.IOException; +import static org.apache.hadoop.hbase.util.hbck.HbckTestingUtil.assertErrors; +import static org.apache.hadoop.hbase.util.hbck.HbckTestingUtil.doFsck; +import static org.junit.Assert.*; + import java.util.Arrays; import java.util.Collection; import java.util.List; @@ -37,28 +39,21 @@ import org.apache.hadoop.hbase.RegionLocations; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.TableNotFoundException; -import org.apache.hadoop.hbase.Waiter; import org.apache.hadoop.hbase.regionserver.StorefileRefresherChore; import org.apache.hadoop.hbase.testclassification.MediumTests; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.HBaseFsck; -import org.apache.hadoop.hbase.util.HBaseFsck.ErrorReporter.ERROR_CODE; import org.apache.hadoop.hbase.util.HBaseFsckRepair; +import org.apache.hadoop.hbase.util.HBaseFsck.ErrorReporter.ERROR_CODE; import org.apache.hadoop.hbase.util.hbck.HbckTestingUtil; import org.apache.hadoop.hbase.zookeeper.LoadBalancerTracker; import org.apache.hadoop.hbase.zookeeper.ZKUtil; import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher; -import org.apache.zookeeper.KeeperException; import org.junit.After; import org.junit.Before; import org.junit.Test; import org.junit.experimental.categories.Category; -import static org.apache.hadoop.hbase.util.hbck.HbckTestingUtil.assertErrors; -import static org.apache.hadoop.hbase.util.hbck.HbckTestingUtil.doFsck; -import static org.junit.Assert.assertTrue; -import static org.junit.Assert.fail; - /** * Tests the scenarios where replicas are enabled for the meta table */ @@ -229,8 +224,7 @@ public class TestMetaWithReplicas { stopMasterAndValidateReplicaCount(2, 3); } - private void stopMasterAndValidateReplicaCount(final int originalReplicaCount, - final int newReplicaCount) + private void stopMasterAndValidateReplicaCount(int originalReplicaCount, int newReplicaCount) throws Exception { ServerName sn = TEST_UTIL.getHBaseClusterInterface().getClusterStatus().getMaster(); TEST_UTIL.getHBaseClusterInterface().stopMaster(sn); @@ -241,7 +235,16 @@ public class TestMetaWithReplicas { newReplicaCount); TEST_UTIL.getHBaseClusterInterface().startMaster(sn.getHostname(), 0); TEST_UTIL.getHBaseClusterInterface().waitForActiveAndReadyMaster(); - TEST_UTIL.waitFor(10000, predicateMetaHasReplicas(newReplicaCount)); + int count = 0; + do { + metaZnodes = TEST_UTIL.getZooKeeperWatcher().getMetaReplicaNodes(); + Thread.sleep(10); + count++; + // wait for the count to be different from the originalReplicaCount. When the + // replica count is reduced, that will happen when the master unassigns excess + // replica, and deletes the excess znodes + } while (metaZnodes.size() == originalReplicaCount && count < 1000); + assert(metaZnodes.size() == newReplicaCount); // also check if hbck returns without errors TEST_UTIL.getConfiguration().setInt(HConstants.META_REPLICAS_NUM, newReplicaCount); @@ -249,46 +252,6 @@ public class TestMetaWithReplicas { HbckTestingUtil.assertNoErrors(hbck); } - private Waiter.ExplainingPredicate predicateMetaHasReplicas( - final int newReplicaCount) { - return new Waiter.ExplainingPredicate() { - @Override - public String explainFailure() throws Exception { - return checkMetaLocationAndExplain(newReplicaCount); - } - - @Override - public boolean evaluate() throws Exception { - return checkMetaLocationAndExplain(newReplicaCount) == null; - } - }; - } - - @Nullable - private String checkMetaLocationAndExplain(int originalReplicaCount) - throws KeeperException, IOException { - List metaZnodes = TEST_UTIL.getZooKeeperWatcher().getMetaReplicaNodes(); - if (metaZnodes.size() == originalReplicaCount) { - RegionLocations rl = ((ClusterConnection) TEST_UTIL.getConnection()) - .locateRegion(TableName.META_TABLE_NAME, - HConstants.EMPTY_START_ROW, false, false); - for (HRegionLocation location : rl.getRegionLocations()) { - if (location == null) { - return "Null location found in " + rl.toString(); - } - if (location.getRegionInfo() == null) { - return "Null regionInfo for location " + location; - } - if (location.getHostname() == null) { - return "Null hostName for location " + location; - } - } - return null; // OK - } - return "Replica count is not as expected " + originalReplicaCount + " <> " + metaZnodes.size() - + "(" + metaZnodes.toString() + ")"; - } - @Test public void testHBaseFsckWithMetaReplicas() throws Exception { HBaseFsck hbck = HbckTestingUtil.doFsck(TEST_UTIL.getConfiguration(), false); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestReplicaWithCluster.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestReplicaWithCluster.java index b0bd6f6..6d98c52 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestReplicaWithCluster.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestReplicaWithCluster.java @@ -42,7 +42,6 @@ import org.apache.hadoop.hbase.regionserver.TestHRegionServerBulkLoad; import org.apache.hadoop.hbase.testclassification.ClientTests; import org.apache.hadoop.hbase.testclassification.MediumTests; import org.apache.hadoop.hbase.util.Bytes; -import org.apache.hadoop.hbase.util.FSTableDescriptors; import org.apache.hadoop.hbase.util.Pair; import org.apache.hadoop.hbase.zookeeper.MiniZooKeeperCluster; import org.junit.AfterClass; @@ -131,8 +130,7 @@ public class TestReplicaWithCluster { @AfterClass public static void afterClass() throws Exception { - if (HTU2 != null) - HTU2.shutdownMiniCluster(); + HTU2.shutdownMiniCluster(); HTU.shutdownMiniCluster(); } @@ -215,6 +213,7 @@ public class TestReplicaWithCluster { SlowMeCopro.sleepTime.set(0); } + HTU.getHBaseCluster().stopMaster(0); Admin admin = HTU.getHBaseAdmin(); nHdt =admin.getTableDescriptor(hdt.getTableName()); Assert.assertEquals("fams=" + Arrays.toString(nHdt.getColumnFamilies()), @@ -222,6 +221,7 @@ public class TestReplicaWithCluster { admin.disableTable(hdt.getTableName()); admin.deleteTable(hdt.getTableName()); + HTU.getHBaseCluster().startMaster(); admin.close(); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFilePerformance.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFilePerformance.java new file mode 100644 index 0000000..78de413 --- /dev/null +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFilePerformance.java @@ -0,0 +1,455 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.io.hfile; + +import java.io.IOException; +import java.nio.ByteBuffer; +import java.security.SecureRandom; +import java.text.DateFormat; +import java.text.SimpleDateFormat; +import java.util.Random; + +import org.apache.commons.cli.CommandLine; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FSDataInputStream; +import org.apache.hadoop.fs.FSDataOutputStream; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hbase.CellUtil; +import org.apache.hadoop.hbase.HBaseConfiguration; +import org.apache.hadoop.hbase.HBaseTestingUtility; +import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.KeyValue; +import org.apache.hadoop.hbase.io.crypto.Encryption; +import org.apache.hadoop.hbase.io.crypto.KeyProviderForTesting; +import org.apache.hadoop.hbase.io.crypto.aes.AES; +import org.apache.hadoop.hbase.util.AbstractHBaseTool; +import org.apache.hadoop.io.BytesWritable; +import org.apache.hadoop.io.SequenceFile; +import org.apache.hadoop.io.compress.CompressionCodec; +import org.apache.hadoop.io.compress.GzipCodec; +import org.apache.hadoop.util.ToolRunner; + +/** + * Set of long-running tests to measure performance of HFile. + *

+ * Copied from + * hadoop-3315 tfile. + * Remove after tfile is committed and use the tfile version of this class + * instead.

+ */ +public class TestHFilePerformance extends AbstractHBaseTool { + private HBaseTestingUtility TEST_UTIL; + private static String ROOT_DIR; + private FileSystem fs; + private long startTimeEpoch; + private long finishTimeEpoch; + private DateFormat formatter; + + @Override + public void setConf(Configuration conf) { + super.setConf(conf); + try { + fs = FileSystem.get(conf); + } catch (IOException e) { + throw new RuntimeException(e); + } + conf.set(HConstants.CRYPTO_KEYPROVIDER_CONF_KEY, KeyProviderForTesting.class.getName()); + conf.set(HConstants.CRYPTO_MASTERKEY_NAME_CONF_KEY, "hbase"); + formatter = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss"); + TEST_UTIL = new HBaseTestingUtility(conf); + ROOT_DIR = TEST_UTIL.getDataTestDir("TestHFilePerformance").toString(); + } + + public void startTime() { + startTimeEpoch = System.currentTimeMillis(); + System.out.println(formatTime() + " Started timing."); + } + + public void stopTime() { + finishTimeEpoch = System.currentTimeMillis(); + System.out.println(formatTime() + " Stopped timing."); + } + + public long getIntervalMillis() { + return finishTimeEpoch - startTimeEpoch; + } + + public void printlnWithTimestamp(String message) { + System.out.println(formatTime() + " " + message); + } + + /* + * Format millis into minutes and seconds. + */ + public String formatTime(long milis){ + return formatter.format(milis); + } + + public String formatTime(){ + return formatTime(System.currentTimeMillis()); + } + + private FSDataOutputStream createFSOutput(Path name) throws IOException { + if (fs.exists(name)) + fs.delete(name, true); + FSDataOutputStream fout = fs.create(name); + return fout; + } + + //TODO have multiple ways of generating key/value e.g. dictionary words + //TODO to have a sample compressable data, for now, made 1 out of 3 values random + // keys are all random. + + private static class KeyValueGenerator { + Random keyRandomizer; + Random valueRandomizer; + long randomValueRatio = 3; // 1 out of randomValueRatio generated values will be random. + long valueSequence = 0 ; + + + KeyValueGenerator() { + keyRandomizer = new Random(0L); //TODO with seed zero + valueRandomizer = new Random(1L); //TODO with seed one + } + + // Key is always random now. + void getKey(byte[] key) { + keyRandomizer.nextBytes(key); + } + + void getValue(byte[] value) { + if (valueSequence % randomValueRatio == 0) + valueRandomizer.nextBytes(value); + valueSequence++; + } + } + + /** + * + * @param fileType "HFile" or "SequenceFile" + * @param keyLength + * @param valueLength + * @param codecName "none", "lzo", "gz", "snappy" + * @param cipherName "none", "aes" + * @param rows number of rows to be written. + * @param writeMethod used for HFile only. + * @param minBlockSize used for HFile only. + * @throws IOException + */ + //TODO writeMethod: implement multiple ways of writing e.g. A) known length (no chunk) B) using a buffer and streaming (for many chunks). + public void timeWrite(String fileType, int keyLength, int valueLength, + String codecName, String cipherName, long rows, String writeMethod, int minBlockSize) + throws IOException { + System.out.println("File Type: " + fileType); + System.out.println("Writing " + fileType + " with codecName: " + codecName + + " cipherName: " + cipherName); + long totalBytesWritten = 0; + + + //Using separate randomizer for key/value with seeds matching Sequence File. + byte[] key = new byte[keyLength]; + byte[] value = new byte[valueLength]; + KeyValueGenerator generator = new KeyValueGenerator(); + + startTime(); + + Path path = new Path(ROOT_DIR, fileType + ".Performance"); + System.out.println(ROOT_DIR + Path.SEPARATOR + path.getName()); + FSDataOutputStream fout = createFSOutput(path); + + if ("HFile".equals(fileType)){ + HFileContextBuilder builder = new HFileContextBuilder() + .withCompression(AbstractHFileWriter.compressionByName(codecName)) + .withBlockSize(minBlockSize); + if (cipherName != "none") { + byte[] cipherKey = new byte[AES.KEY_LENGTH]; + new SecureRandom().nextBytes(cipherKey); + builder.withEncryptionContext( + Encryption.newContext(conf) + .setCipher(Encryption.getCipher(conf, cipherName)) + .setKey(cipherKey)); + } + HFileContext context = builder.build(); + System.out.println("HFile write method: "); + HFile.Writer writer = HFile.getWriterFactoryNoCache(conf) + .withOutputStream(fout) + .withFileContext(context) + .withComparator(new KeyValue.RawBytesComparator()) + .create(); + + // Writing value in one shot. + for (long l=0; l metaRows = MetaTableAccessor.fullScanRegions(admin.getConnection()); + List metaRows = MetaTableAccessor.fullScanOfMeta(admin.getConnection()); int count = 0; // Check all 100 rows are in meta for (Result result : metaRows) { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestCatalogJanitor.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestCatalogJanitor.java index 8ed49ff..e09583a 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestCatalogJanitor.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestCatalogJanitor.java @@ -1043,7 +1043,8 @@ public class TestCatalogJanitor { } private TableDescriptor createTableDescriptor() { - return new TableDescriptor(createHTableDescriptor()); + TableDescriptor htd = new TableDescriptor(createHTableDescriptor(), TableState.State.ENABLED); + return htd; } private MultiResponse buildMultiResponse(MultiRequest req) { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterOperationsForRegionReplicas.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterOperationsForRegionReplicas.java index ca9bc9c..6307c4c 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterOperationsForRegionReplicas.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterOperationsForRegionReplicas.java @@ -47,6 +47,8 @@ import org.apache.hadoop.hbase.client.Admin; import org.apache.hadoop.hbase.client.Connection; import org.apache.hadoop.hbase.client.ConnectionFactory; import org.apache.hadoop.hbase.client.Delete; +import org.apache.hadoop.hbase.client.HBaseAdmin; +import org.apache.hadoop.hbase.client.HTable; import org.apache.hadoop.hbase.client.RegionReplicaUtil; import org.apache.hadoop.hbase.client.Result; import org.apache.hadoop.hbase.client.Table; @@ -132,7 +134,7 @@ public class TestMasterOperationsForRegionReplicas { } } - List metaRows = MetaTableAccessor.fullScanRegions(ADMIN.getConnection()); + List metaRows = MetaTableAccessor.fullScanOfMeta(ADMIN.getConnection()); int numRows = 0; for (Result result : metaRows) { RegionLocations locations = MetaTableAccessor.getRegionLocations(result); @@ -295,7 +297,7 @@ public class TestMasterOperationsForRegionReplicas { return true; } }; - MetaTableAccessor.fullScanRegions(connection, visitor); + MetaTableAccessor.fullScan(connection, visitor); assert(count.get() == numRegions); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestRegionStates.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestRegionStates.java index 99e1709..ce61e40 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestRegionStates.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestRegionStates.java @@ -56,7 +56,7 @@ public class TestRegionStates { @Test (timeout=10000) public void testCanMakeProgressThoughMetaIsDown() throws IOException, InterruptedException, BrokenBarrierException { - MasterServices server = mock(MasterServices.class); + Server server = mock(Server.class); when(server.getServerName()).thenReturn(ServerName.valueOf("master,1,1")); Connection connection = mock(ClusterConnection.class); // Set up a table that gets 'stuck' when we try to fetch a row from the meta table. @@ -101,7 +101,7 @@ public class TestRegionStates { @Test public void testWeDontReturnDrainingServersForOurBalancePlans() throws Exception { - MasterServices server = mock(MasterServices.class); + Server server = mock(Server.class); when(server.getServerName()).thenReturn(ServerName.valueOf("master,1,1")); Configuration configuration = mock(Configuration.class); when(server.getConfiguration()).thenReturn(configuration); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestAccessController.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestAccessController.java index 51436b4..3c8fea5 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestAccessController.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestAccessController.java @@ -927,9 +927,11 @@ public class TestAccessController extends SecureTestUtil { setPermission(loadPath, FsPermission.valueOf("-rwxrwxrwx")); try (HTable table = (HTable)TEST_UTIL.getConnection().getTable(tableName)) { - TEST_UTIL.waitTableEnabled(tableName); - LoadIncrementalHFiles loader = new LoadIncrementalHFiles(conf); - loader.doBulkLoad(loadPath, table); + try (Admin admin = TEST_UTIL.getHBaseAdmin()) { + TEST_UTIL.waitTableEnabled(admin, tableName.getName()); + LoadIncrementalHFiles loader = new LoadIncrementalHFiles(conf); + loader.doBulkLoad(loadPath, table); + } } } @@ -2520,7 +2522,6 @@ public class TestAccessController extends SecureTestUtil { assertTrue(existingPerms.size() > 1); TEST_UTIL.getHBaseAdmin().disableTable(TEST_TABLE.getTableName()); TEST_UTIL.truncateTable(TEST_TABLE.getTableName()); - TEST_UTIL.waitTableAvailable(TEST_TABLE.getTableName()); List perms = AccessControlClient.getUserPermissions(conf, TEST_TABLE.getTableName().getNameAsString()); assertTrue(perms != null); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/SnapshotTestingUtils.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/SnapshotTestingUtils.java index b663a2a..a8588cc 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/SnapshotTestingUtils.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/SnapshotTestingUtils.java @@ -635,7 +635,13 @@ public class SnapshotTestingUtils { region.waitForFlushesAndCompactions(); } // Wait up to 60 seconds for a table to be available. - util.waitFor(60000, util.predicateTableAvailable(tableName)); + final HBaseAdmin hBaseAdmin = util.getHBaseAdmin(); + util.waitFor(60000, new Waiter.Predicate() { + @Override + public boolean evaluate() throws IOException { + return hBaseAdmin.isTableAvailable(tableName); + } + }); } public static void createTable(final HBaseTestingUtility util, final TableName tableName, diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestCoprocessorScanPolicy.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestCoprocessorScanPolicy.java index 7600388..9a7db90 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestCoprocessorScanPolicy.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestCoprocessorScanPolicy.java @@ -212,7 +212,6 @@ public class TestCoprocessorScanPolicy { // should be gone now assertEquals(0, r.size()); t.close(); - EnvironmentEdgeManager.reset(); } public static class ScanObserver extends BaseRegionObserver { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestFSTableDescriptors.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestFSTableDescriptors.java index c09982e..a99daf2 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestFSTableDescriptors.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestFSTableDescriptors.java @@ -76,7 +76,7 @@ public class TestFSTableDescriptors { public void testCreateAndUpdate() throws IOException { Path testdir = UTIL.getDataTestDir("testCreateAndUpdate"); HTableDescriptor htd = new HTableDescriptor(TableName.valueOf("testCreate")); - TableDescriptor td = new TableDescriptor(htd); + TableDescriptor td = new TableDescriptor(htd, TableState.State.ENABLED); FileSystem fs = FileSystem.get(UTIL.getConfiguration()); FSTableDescriptors fstd = new FSTableDescriptors(UTIL.getConfiguration(), fs, testdir); assertTrue(fstd.createTableDescriptor(td)); @@ -113,7 +113,7 @@ public class TestFSTableDescriptors { assertTrue(!fs.exists(p1)); int i2 = FSTableDescriptors.getTableInfoSequenceId(p2); assertTrue(i2 == i1 + 1); - td = new TableDescriptor(htd); + td = new TableDescriptor(htd, TableState.State.DISABLED); Path p3 = fstd.updateTableDescriptor(td); // Assert we cleaned up the old file. assertTrue(!fs.exists(p2)); @@ -172,7 +172,7 @@ public class TestFSTableDescriptors { final String name = "testReadingHTDFromFS"; FileSystem fs = FileSystem.get(UTIL.getConfiguration()); HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(name)); - TableDescriptor td = new TableDescriptor(htd); + TableDescriptor td = new TableDescriptor(htd, TableState.State.ENABLED); Path rootdir = UTIL.getDataTestDir(name); FSTableDescriptors fstd = new FSTableDescriptors(UTIL.getConfiguration(), fs, rootdir); fstd.createTableDescriptor(td); @@ -187,7 +187,7 @@ public class TestFSTableDescriptors { Path rootdir = UTIL.getDataTestDir(name); FSTableDescriptors fstd = new FSTableDescriptors(UTIL.getConfiguration(), fs, rootdir); HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(name)); - TableDescriptor td = new TableDescriptor(htd); + TableDescriptor td = new TableDescriptor(htd, TableState.State.ENABLED); Path descriptorFile = fstd.updateTableDescriptor(td); try (FSDataOutputStream out = fs.create(descriptorFile, true)) { out.write(htd.toByteArray()); @@ -222,8 +222,8 @@ public class TestFSTableDescriptors { final int count = 10; // Write out table infos. for (int i = 0; i < count; i++) { - TableDescriptor htd = new TableDescriptor( - new HTableDescriptor(TableName.valueOf(name + i))); + TableDescriptor htd = new TableDescriptor(new HTableDescriptor(name + i), + TableState.State.ENABLED); htds.createTableDescriptor(htd); } @@ -420,7 +420,7 @@ public class TestFSTableDescriptors { Path testdir = UTIL.getDataTestDir("testCreateTableDescriptorUpdatesIfThereExistsAlready"); HTableDescriptor htd = new HTableDescriptor(TableName.valueOf( "testCreateTableDescriptorUpdatesIfThereExistsAlready")); - TableDescriptor td = new TableDescriptor(htd); + TableDescriptor td = new TableDescriptor(htd, TableState.State.ENABLED); FileSystem fs = FileSystem.get(UTIL.getConfiguration()); FSTableDescriptors fstd = new FSTableDescriptors(UTIL.getConfiguration(), fs, testdir); assertTrue(fstd.createTableDescriptor(td)); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsck.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsck.java index 0d3a94e..33bd337 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsck.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsck.java @@ -157,7 +157,6 @@ public class TestHBaseFsck { conf.setInt("hbase.hconnection.threads.max", 2 * POOL_SIZE); conf.setInt("hbase.hconnection.threads.core", POOL_SIZE); conf.setInt("hbase.hbck.close.timeout", 2 * REGION_ONLINE_TIMEOUT); - conf.setInt(HConstants.HBASE_RPC_TIMEOUT_KEY, 2 * REGION_ONLINE_TIMEOUT); TEST_UTIL.startMiniCluster(3); tableExecutorService = new ThreadPoolExecutor(1, POOL_SIZE, 60, TimeUnit.SECONDS, @@ -1403,7 +1402,7 @@ public class TestHBaseFsck { HBaseFsck hbck = doFsck(conf, false); assertErrors(hbck, new ERROR_CODE[] {ERROR_CODE.NOT_IN_HDFS, ERROR_CODE.NOT_IN_HDFS, ERROR_CODE.NOT_IN_HDFS, - ERROR_CODE.NOT_IN_HDFS, ERROR_CODE.ORPHAN_TABLE_STATE, }); + ERROR_CODE.NOT_IN_HDFS,}); // holes are separate from overlap groups assertEquals(0, hbck.getOverlapGroups(table).size()); @@ -1446,34 +1445,6 @@ public class TestHBaseFsck { } /** - * when the hbase.version file missing, It is fix the fault. - */ - @Test (timeout=180000) - public void testNoTableState() throws Exception { - // delete the hbase.version file - TableName table = - TableName.valueOf("testNoTableState"); - try { - setupTable(table); - // make sure data in regions, if in wal only there is no data loss - admin.flush(table); - - MetaTableAccessor.deleteTableState(TEST_UTIL.getConnection(), table); - - // test - HBaseFsck hbck = doFsck(conf, false); - assertErrors(hbck, new ERROR_CODE[] { ERROR_CODE.NO_TABLE_STATE }); - // fix table state missing - doFsck(conf, true); - - assertNoErrors(doFsck(conf, false)); - assertTrue(TEST_UTIL.getHBaseAdmin().isTableEnabled(table)); - } finally { - cleanupTable(table); - } - } - - /** * The region is not deployed when the table is disabled. */ @Test (timeout=180000) diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/hbck/OfflineMetaRebuildTestCore.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/hbck/OfflineMetaRebuildTestCore.java index e940425..349bf56 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/hbck/OfflineMetaRebuildTestCore.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/hbck/OfflineMetaRebuildTestCore.java @@ -285,9 +285,16 @@ public class OfflineMetaRebuildTestCore { * @return # of entries in meta. */ protected int scanMeta() throws IOException { - LOG.info("Scanning META"); - MetaTableAccessor.fullScanMetaAndPrint(TEST_UTIL.getConnection()); - return MetaTableAccessor.fullScanRegions(TEST_UTIL.getConnection()).size(); + int count = 0; + Table meta = TEST_UTIL.getConnection().getTable(TableName.META_TABLE_NAME); + ResultScanner scanner = meta.getScanner(new Scan()); + LOG.info("Table: " + meta.getName()); + for (Result res : scanner) { + LOG.info(Bytes.toString(res.getRow())); + count++; + } + meta.close(); + return count; } protected HTableDescriptor[] getTables(final Configuration configuration) throws IOException { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/hbck/TestOfflineMetaRebuildBase.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/hbck/TestOfflineMetaRebuildBase.java index fc22292..a3d323c 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/hbck/TestOfflineMetaRebuildBase.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/hbck/TestOfflineMetaRebuildBase.java @@ -20,13 +20,11 @@ package org.apache.hadoop.hbase.util.hbck; import static org.apache.hadoop.hbase.util.hbck.HbckTestingUtil.assertErrors; import static org.apache.hadoop.hbase.util.hbck.HbckTestingUtil.doFsck; import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertNotNull; import static org.junit.Assert.assertTrue; import java.util.Arrays; import org.apache.hadoop.hbase.HTableDescriptor; -import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.Admin; import org.apache.hadoop.hbase.client.Connection; import org.apache.hadoop.hbase.client.ConnectionFactory; @@ -72,20 +70,13 @@ public class TestOfflineMetaRebuildBase extends OfflineMetaRebuildTestCore { TEST_UTIL.restartHBaseCluster(3); try (Connection connection = ConnectionFactory.createConnection(TEST_UTIL.getConfiguration())) { Admin admin = connection.getAdmin(); - if (admin.isTableDisabled(table)) - admin.enableTable(table); + admin.enableTable(table); LOG.info("Waiting for no more RIT"); TEST_UTIL.waitUntilNoRegionsInTransition(60000); LOG.info("No more RIT in ZK, now doing final test verification"); // everything is good again. - assertEquals(5, scanMeta()); // including table state rows - TableName[] tableNames = TEST_UTIL.getHBaseAdmin().listTableNames(); - for (TableName tableName : tableNames) { - HTableDescriptor tableDescriptor = TEST_UTIL.getHBaseAdmin().getTableDescriptor(tableName); - assertNotNull(tableDescriptor); - assertTrue(TEST_UTIL.getHBaseAdmin().isTableEnabled(tableName)); - } + assertEquals(5, scanMeta()); HTableDescriptor[] htbls = admin.listTables(); LOG.info("Tables present after restart: " + Arrays.toString(htbls)); assertEquals(1, htbls.length); diff --git a/src/main/asciidoc/_chapters/performance.adoc b/src/main/asciidoc/_chapters/performance.adoc index 36b3c70..521c8f2 100644 --- a/src/main/asciidoc/_chapters/performance.adoc +++ b/src/main/asciidoc/_chapters/performance.adoc @@ -100,6 +100,17 @@ Using 10Gbe links between racks will greatly increase performance, and assuming Are all the network interfaces functioning correctly? Are you sure? See the Troubleshooting Case Study in <>. +[[perf.network.call_me_maybe]] +=== Network Consistency and Partition Tolerance +The link:http://en.wikipedia.org/wiki/CAP_theorem[CAP Theorem] states that a distributed system can maintain two out of the following three charateristics: +- *C*onsistency -- all nodes see the same data. +- *A*vailability -- every request receives a response about whether it succeeded or failed. +- *P*artition tolerance -- the system continues to operate even if some of its components become unavailable to the others. + +HBase favors consistency and partition tolerance, where a decision has to be made. Coda Hale explains why partition tolerance is so important, in http://codahale.com/you-cant-sacrifice-partition-tolerance/. + +Robert Yokota used an automated testing framework called link:https://aphyr.com/tags/jepsen[Jepson] to test HBase's partition tolerance in the face of network partitions, using techniques modeled after Aphyr's link:https://aphyr.com/posts/281-call-me-maybe-carly-rae-jepsen-and-the-perils-of-network-partitions[Call Me Maybe] series. The results, available as a link:http://eng.yammer.com/call-me-maybe-hbase/[blog post] and an link:http://eng.yammer.com/call-me-maybe-hbase-addendum/[addendum], show that HBase performs correctly. All acknowledged writes are accounted for, and no unacknowledged writes are written. In the event of a very long-running partition, HBase clients will time out, losing some writes. Thus, the clients are the server remain in agreement about what has been written and what has failed to be written. + [[jvm]] == Java