diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/HRegionInfo.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/HRegionInfo.java index ca7ab6a..b8cbb3a 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/HRegionInfo.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/HRegionInfo.java @@ -402,8 +402,9 @@ public class HRegionInfo implements Comparable { * Gets the table name from the specified region name. * @param regionName * @return Table name. + * @see #getTable(byte[]) */ - public static TableName getTableName(byte[] regionName) { + public static byte [] getTableName(byte[] regionName) { int offset = -1; for (int i = 0; i < regionName.length; i++) { if (regionName[i] == HConstants.DELIMITER) { @@ -413,7 +414,18 @@ public class HRegionInfo implements Comparable { } byte[] buff = new byte[offset]; System.arraycopy(regionName, 0, buff, 0, offset); - return TableName.valueOf(buff); + return buff; + } + + /** + * Gets the table name from the specified region name. + * Like {@link #getTableName(byte[])} only returns a {@link TableName} rather than a byte array. + * @param regionName + * @return Table name + * @see #getTableName(byte[]) + */ + public static TableName getTable(final byte [] regionName) { + return TableName.valueOf(getTableName(regionName)); } /** @@ -523,12 +535,22 @@ public class HRegionInfo implements Comparable { /** * Get current table name of the region * @return byte array of table name + * @see #getTable() + */ + public byte [] getTableName() { + return getTable().toBytes(); + } + + /** + * Get current table name of the region + * @return TableName + * @see #getTableName() */ - public TableName getTableName() { + public TableName getTable() { if (tableName == null || tableName.getName().length == 0) { - tableName = getTableName(getRegionName()); + tableName = getTable(getRegionName()); } - return tableName; + return this.tableName; } /** @@ -570,7 +592,7 @@ public class HRegionInfo implements Comparable { /** @return true if this region is a meta region */ public boolean isMetaRegion() { - return tableName.equals(HRegionInfo.FIRST_META_REGIONINFO.getTableName()); + return tableName.equals(HRegionInfo.FIRST_META_REGIONINFO.getTable()); } /** @@ -813,7 +835,7 @@ public class HRegionInfo implements Comparable { public static RegionInfo convert(final HRegionInfo info) { if (info == null) return null; RegionInfo.Builder builder = RegionInfo.newBuilder(); - builder.setTableName(ProtobufUtil.toProtoTableName(info.getTableName())); + builder.setTableName(ProtobufUtil.toProtoTableName(info.getTable())); builder.setRegionId(info.getRegionId()); if (info.getStartKey() != null) { builder.setStartKey(ByteString.copyFrom(info.getStartKey())); diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/HTableDescriptor.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/HTableDescriptor.java index 0e0e189..3d7683e 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/HTableDescriptor.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/HTableDescriptor.java @@ -546,6 +546,16 @@ public class HTableDescriptor implements WritableComparable { } /** + * Remove metadata represented by the key from the {@link #values} map + * + * @param key Key whose key and value we're to remove from HTableDescriptor + * parameters. + */ + public void remove(final byte [] key) { + remove(new ImmutableBytesWritable(key)); + } + + /** * Check if the readOnly flag of the table is set. If the readOnly flag is * set then the contents of the table can only be read from but not modified. * diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClientScanner.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClientScanner.java index 9bbe8a9..5ee2607 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClientScanner.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClientScanner.java @@ -85,6 +85,15 @@ public class ClientScanner extends AbstractClientScanner { this(conf, scan, tableName, HConnectionManager.getConnection(conf)); } + /** + * @deprecated Use {@link #ClientScanner(Configuration, Scan, TableName)} + */ + @Deprecated + public ClientScanner(final Configuration conf, final Scan scan, + final byte [] tableName) throws IOException { + this(conf, scan, TableName.valueOf(tableName)); + } + /** * Create a new ClientScanner for the specified table @@ -102,6 +111,15 @@ public class ClientScanner extends AbstractClientScanner { } /** + * @deprecated Use {@link #ClientScanner(Configuration, Scan, TableName, HConnection)} + */ + @Deprecated + public ClientScanner(final Configuration conf, final Scan scan, final byte [] tableName, + HConnection connection) throws IOException { + this(conf, scan, TableName.valueOf(tableName), connection, new RpcRetryingCallerFactory(conf)); + } + + /** * Create a new ClientScanner for the specified table Note that the passed {@link Scan}'s start * row maybe changed changed. * @param conf The {@link Configuration} to use. @@ -158,7 +176,11 @@ public class ClientScanner extends AbstractClientScanner { return this.connection; } - protected TableName getTableName() { + protected byte [] getTableName() { + return this.tableName.getName(); + } + + protected TableName getTable() { return this.tableName; } @@ -253,7 +275,7 @@ public class ClientScanner extends AbstractClientScanner { int nbRows) { scan.setStartRow(localStartKey); ScannerCallable s = new ScannerCallable(getConnection(), - getTableName(), scan, this.scanMetrics); + getTable(), scan, this.scanMetrics); s.setCaching(nbRows); return s; } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HConnectionManager.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HConnectionManager.java index 74093f0..7604808 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HConnectionManager.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HConnectionManager.java @@ -933,8 +933,7 @@ public class HConnectionManager { return false; } regionCount.incrementAndGet(); - } else if (tableName.compareTo( - info.getTableName()) < 0) { + } else if (tableName.compareTo(info.getTable()) < 0) { // Return if we are done with the current table return false; } @@ -979,7 +978,7 @@ public class HConnectionManager { // Always empty start row should be counted regionCount.incrementAndGet(); } - } else if (tableName.compareTo(info.getTableName()) < 0) { + } else if (tableName.compareTo(info.getTable()) < 0) { // Return if we are done with the current table return false; } @@ -1000,7 +999,7 @@ public class HConnectionManager { @Override public HRegionLocation locateRegion(final byte[] regionName) throws IOException { - return locateRegion(HRegionInfo.getTableName(regionName), + return locateRegion(HRegionInfo.getTable(regionName), HRegionInfo.getStartKey(regionName), false, true); } @@ -2306,7 +2305,7 @@ public class HConnectionManager { ServerName serverName, long seqNum) { HRegionLocation newHrl = new HRegionLocation(hri, serverName, seqNum); synchronized (this.cachedRegionLocations) { - cacheLocation(hri.getTableName(), source, newHrl); + cacheLocation(hri.getTable(), source, newHrl); } } @@ -2320,7 +2319,7 @@ public class HConnectionManager { HRegionLocation oldLocation; synchronized (this.cachedRegionLocations) { Map tableLocations = - getTableLocations(hri.getTableName()); + getTableLocations(hri.getTable()); oldLocation = tableLocations.get(hri.getStartKey()); if (oldLocation != null) { // Do not delete the cache entry if it's not for the same server that gave us the error. @@ -2338,7 +2337,7 @@ public class HConnectionManager { return; } synchronized (this.cachedRegionLocations) { - TableName tableName = location.getRegionInfo().getTableName(); + TableName tableName = location.getRegionInfo().getTable(); Map tableLocations = getTableLocations(tableName); if (!tableLocations.isEmpty()) { diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTable.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTable.java index 3c6c20c..914e5a2 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTable.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTable.java @@ -1007,6 +1007,18 @@ public class HTable implements HTableInterface { } /** + * @deprecated Use {@link #incrementColumnValue(byte[], byte[], byte[], long, Durability)} + */ + @Deprecated + @Override + public long incrementColumnValue(final byte [] row, final byte [] family, + final byte [] qualifier, final long amount, final boolean writeToWAL) + throws IOException { + return incrementColumnValue(row, family, qualifier, amount, + writeToWAL? Durability.SKIP_WAL: Durability.USE_DEFAULT); + } + + /** * {@inheritDoc} */ @Override diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTableInterface.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTableInterface.java index eee13b0..2d96ce0 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTableInterface.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTableInterface.java @@ -20,6 +20,7 @@ package org.apache.hadoop.hbase.client; import com.google.protobuf.Service; import com.google.protobuf.ServiceException; + import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.conf.Configuration; @@ -385,6 +386,14 @@ public interface HTableInterface extends Closeable { long amount, Durability durability) throws IOException; /** + * @deprecated Use {@link #incrementColumnValue(byte[], byte[], byte[], long, Durability)} + */ + @Deprecated + long incrementColumnValue(final byte [] row, final byte [] family, + final byte [] qualifier, final long amount, final boolean writeToWAL) + throws IOException; + + /** * Tells whether or not 'auto-flush' is turned on. * * @return {@code true} if 'auto-flush' is enabled (default), meaning diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTableMultiplexer.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTableMultiplexer.java index 946bd85..807bf37 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTableMultiplexer.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTableMultiplexer.java @@ -19,19 +19,6 @@ */ package org.apache.hadoop.hbase.client; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.classification.InterfaceStability; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hbase.HConstants; -import org.apache.hadoop.hbase.TableName; -import org.apache.hadoop.hbase.HRegionInfo; -import org.apache.hadoop.hbase.HRegionLocation; -import org.apache.hadoop.hbase.ZooKeeperConnectionException; -import org.apache.hadoop.hbase.util.Bytes; -import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; - import java.io.IOException; import java.util.AbstractMap.SimpleEntry; import java.util.ArrayList; @@ -46,6 +33,18 @@ import java.util.concurrent.LinkedBlockingQueue; import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.atomic.AtomicLong; +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.HRegionInfo; +import org.apache.hadoop.hbase.HRegionLocation; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.ZooKeeperConnectionException; +import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; + /** * HTableMultiplexer provides a thread-safe non blocking PUT API across all the tables. * Each put will be sharded into different buffer queues based on its destination region server. @@ -450,7 +449,7 @@ public class HTableMultiplexer { HRegionLocation oldLoc) throws IOException { Put failedPut = failedPutStatus.getPut(); // The currentPut is failed. So get the table name for the currentPut. - TableName tableName = failedPutStatus.getRegionInfo().getTableName(); + TableName tableName = failedPutStatus.getRegionInfo().getTable(); // Decrease the retry count int retryCount = failedPutStatus.getRetryCount() - 1; diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTablePool.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTablePool.java index 5503121..3726887 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTablePool.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTablePool.java @@ -600,5 +600,11 @@ public class HTablePool implements Closeable { throw new IllegalStateException("Table=" + new String(table.getTableName()) + " already closed"); } } + + @Override + public long incrementColumnValue(byte[] row, byte[] family, + byte[] qualifier, long amount, boolean writeToWAL) throws IOException { + return table.incrementColumnValue(row, family, qualifier, amount, writeToWAL); + } } } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ScannerCallable.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ScannerCallable.java index 0eb3cfd..7194cf9 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ScannerCallable.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ScannerCallable.java @@ -102,6 +102,15 @@ public class ScannerCallable extends RegionServerCallable { } /** + * @deprecated Use {@link #ScannerCallable(HConnection, TableName, Scan, ScanMetrics)} + */ + @Deprecated + public ScannerCallable (HConnection connection, final byte [] tableName, Scan scan, + ScanMetrics scanMetrics) { + this(connection, TableName.valueOf(tableName), scan, scanMetrics); + } + + /** * @param reload force reload of server location * @throws IOException */ diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java index 6f2318e..9ab718c 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java @@ -2195,7 +2195,7 @@ public final class ProtobufUtil { // input / output paths are relative to the store dir // store dir is relative to region dir CompactionDescriptor.Builder builder = CompactionDescriptor.newBuilder() - .setTableName(ByteString.copyFrom(info.getTableName().getName())) + .setTableName(ByteString.copyFrom(info.getTableName())) .setEncodedRegionName(ByteString.copyFrom(info.getEncodedNameAsBytes())) .setFamilyName(ByteString.copyFrom(family)) .setStoreHomeDir(storeDir.getName()); //make relative diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/HFileArchiver.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/HFileArchiver.java index 49d736f..f9cc60f 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/HFileArchiver.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/HFileArchiver.java @@ -72,7 +72,7 @@ public class HFileArchiver { public static void archiveRegion(Configuration conf, FileSystem fs, HRegionInfo info) throws IOException { Path rootDir = FSUtils.getRootDir(conf); - archiveRegion(fs, rootDir, FSUtils.getTableDir(rootDir, info.getTableName()), + archiveRegion(fs, rootDir, FSUtils.getTableDir(rootDir, info.getTable()), HRegion.getRegionDir(rootDir, info)); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/CoprocessorHost.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/CoprocessorHost.java index 05b9770..9b0ec04 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/CoprocessorHost.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/CoprocessorHost.java @@ -564,6 +564,12 @@ public abstract class CoprocessorHost { public void setWriteBufferSize(long writeBufferSize) throws IOException { table.setWriteBufferSize(writeBufferSize); } + + @Override + public long incrementColumnValue(byte[] row, byte[] family, + byte[] qualifier, long amount, boolean writeToWAL) throws IOException { + return table.incrementColumnValue(row, family, qualifier, amount, writeToWAL); + } } /** The coprocessor */ diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/HFileLink.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/HFileLink.java index cb286d7..5b2b874 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/HFileLink.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/HFileLink.java @@ -263,7 +263,7 @@ public class HFileLink extends FileLink { */ public static String createHFileLinkName(final HRegionInfo hfileRegionInfo, final String hfileName) { - return createHFileLinkName(hfileRegionInfo.getTableName(), + return createHFileLinkName(hfileRegionInfo.getTable(), hfileRegionInfo.getEncodedName(), hfileName); } @@ -300,7 +300,7 @@ public class HFileLink extends FileLink { public static boolean create(final Configuration conf, final FileSystem fs, final Path dstFamilyPath, final HRegionInfo hfileRegionInfo, final String hfileName) throws IOException { - TableName linkedTable = hfileRegionInfo.getTableName(); + TableName linkedTable = hfileRegionInfo.getTable(); String linkedRegion = hfileRegionInfo.getEncodedName(); return create(conf, fs, dstFamilyPath, linkedTable, linkedRegion, hfileName); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/LoadIncrementalHFiles.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/LoadIncrementalHFiles.java index 076218a..ec48858 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/LoadIncrementalHFiles.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/LoadIncrementalHFiles.java @@ -518,6 +518,16 @@ public class LoadIncrementalHFiles extends Configured implements Tool { } /** + * @deprecated Use {@link #tryAtomicRegionLoad(HConnection, TableName, byte[], Collection)} + */ + @Deprecated + protected List tryAtomicRegionLoad(final HConnection conn, + final byte [] tableName, final byte[] first, Collection lqis) + throws IOException { + return tryAtomicRegionLoad(conn, TableName.valueOf(tableName), first, lqis); + } + + /** * Attempts to do an atomic load of many hfiles into a region. If it fails, * it returns a list of hfiles that need to be retried. If it is successful * it will return an empty list. @@ -531,9 +541,8 @@ public class LoadIncrementalHFiles extends Configured implements Tool { * failure */ protected List tryAtomicRegionLoad(final HConnection conn, - final TableName tableName, - final byte[] first, Collection lqis) throws IOException { - + final TableName tableName, final byte[] first, Collection lqis) + throws IOException { final List> famPaths = new ArrayList>(lqis.size()); for (LoadQueueItem lqi : lqis) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TableSplit.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TableSplit.java index 5d11ed9..5bcada3 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TableSplit.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TableSplit.java @@ -85,11 +85,20 @@ implements Writable, Comparable { /** Default constructor. */ public TableSplit() { - this(null, null, HConstants.EMPTY_BYTE_ARRAY, + this((TableName)null, null, HConstants.EMPTY_BYTE_ARRAY, HConstants.EMPTY_BYTE_ARRAY, ""); } /** + * @deprecated Use {@link TableSplit#TableSplit(TableName, byte[], byte[], String)} + */ + @Deprecated + public TableSplit(final byte [] tableName, Scan scan, byte [] startRow, byte [] endRow, + final String location) { + this(TableName.valueOf(tableName), scan, startRow, endRow, location); + } + + /** * Creates a new instance while assigning all variables. * * @param tableName The name of the current table. @@ -111,7 +120,16 @@ implements Writable, Comparable { this.endRow = endRow; this.regionLocation = location; } - + + /** + * @deprecated Use {@link TableSplit#TableSplit(TableName, byte[], byte[], String)} + */ + @Deprecated + public TableSplit(final byte [] tableName, byte[] startRow, byte[] endRow, + final String location) { + this(TableName.valueOf(tableName), startRow, endRow, location); + } + /** * Creates a new instance without a scanner. * @@ -136,11 +154,20 @@ implements Writable, Comparable { } /** + * Returns the table name converted to a byte array. + * @see #getTable() + * @return The table name. + */ + public byte [] getTableName() { + return tableName.getName(); + } + + /** * Returns the table name. * * @return The table name. */ - public TableName getTableName() { + public TableName getTable() { return tableName; } @@ -268,7 +295,7 @@ implements Writable, Comparable { // If The table name of the two splits is the same then compare start row // otherwise compare based on table names int tableNameComparison = - getTableName().compareTo(split.getTableName()); + getTable().compareTo(split.getTable()); return tableNameComparison != 0 ? tableNameComparison : Bytes.compareTo( getStartRow(), split.getStartRow()); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/AssignmentManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/AssignmentManager.java index a078a52..4857f8d 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/AssignmentManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/AssignmentManager.java @@ -460,7 +460,7 @@ public class AssignmentManager extends ZooKeeperListener { // its a clean cluster startup, else its a failover. Map regions = regionStates.getRegionAssignments(); for (Map.Entry e: regions.entrySet()) { - if (!e.getKey().getTableName().isSystemTable() + if (!e.getKey().getTable().isSystemTable() && e.getValue() != null) { LOG.debug("Found " + e + " out on cluster"); failover = true; @@ -1282,8 +1282,7 @@ public class AssignmentManager extends ZooKeeperListener { if (rs.isOpened()) { ServerName serverName = rs.getServerName(); regionOnline(regionInfo, serverName); - boolean disabled = getZKTable().isDisablingOrDisabledTable( - regionInfo.getTableName()); + boolean disabled = getZKTable().isDisablingOrDisabledTable(regionInfo.getTable()); if (!serverManager.isServerOnline(serverName) && !disabled) { LOG.info("Opened " + regionNameStr + "but the region server is offline, reassign the region"); @@ -1898,7 +1897,7 @@ public class AssignmentManager extends ZooKeeperListener { // When we have a case such as all the regions are added directly into hbase:meta and we call // assignRegion then we need to make the table ENABLED. Hence in such case the table // will not be in ENABLING or ENABLED state. - TableName tableName = region.getTableName(); + TableName tableName = region.getTable(); if (!zkTable.isEnablingTable(tableName) && !zkTable.isEnabledTable(tableName)) { LOG.debug("Setting table " + tableName + " to ENABLED state."); setEnabledTable(tableName); @@ -2085,7 +2084,7 @@ public class AssignmentManager extends ZooKeeperListener { } private boolean isDisabledorDisablingRegionInRIT(final HRegionInfo region) { - TableName tableName = region.getTableName(); + TableName tableName = region.getTable(); boolean disabled = this.zkTable.isDisabledTable(tableName); if (disabled || this.zkTable.isDisablingTable(tableName)) { LOG.info("Table " + tableName + (disabled ? " disabled;" : " disabling;") + @@ -2584,7 +2583,7 @@ public class AssignmentManager extends ZooKeeperListener { //remove system tables because they would have been assigned earlier for(Iterator iter = allRegions.keySet().iterator(); iter.hasNext();) { - if (iter.next().getTableName().isSystemTable()) { + if (iter.next().getTable().isSystemTable()) { iter.remove(); } } @@ -2603,7 +2602,7 @@ public class AssignmentManager extends ZooKeeperListener { } for (HRegionInfo hri : allRegions.keySet()) { - TableName tableName = hri.getTableName(); + TableName tableName = hri.getTable(); if (!zkTable.isEnabledTable(tableName)) { setEnabledTable(tableName); } @@ -2664,7 +2663,7 @@ public class AssignmentManager extends ZooKeeperListener { HRegionInfo regionInfo = region.getFirst(); ServerName regionLocation = region.getSecond(); if (regionInfo == null) continue; - TableName tableName = regionInfo.getTableName(); + TableName tableName = regionInfo.getTable(); if (tableName.isSystemTable()) continue; regionStates.createRegionState(regionInfo); if (regionStates.isRegionInState(regionInfo, State.SPLIT)) { @@ -3161,7 +3160,7 @@ public class AssignmentManager extends ZooKeeperListener { } catch (KeeperException ke) { server.abort("Unexpected ZK exception deleting node " + hri, ke); } - if (zkTable.isDisablingOrDisabledTable(hri.getTableName())) { + if (zkTable.isDisablingOrDisabledTable(hri.getTable())) { it.remove(); regionStates.regionOffline(hri); continue; @@ -3193,8 +3192,7 @@ public class AssignmentManager extends ZooKeeperListener { // the master to disable, we need to make sure we close those regions in // that case. This is not racing with the region server itself since RS // report is done after the split transaction completed. - if (this.zkTable.isDisablingOrDisabledTable( - parent.getTableName())) { + if (this.zkTable.isDisablingOrDisabledTable(parent.getTable())) { unassign(a); unassign(b); } @@ -3217,7 +3215,7 @@ public class AssignmentManager extends ZooKeeperListener { // the master to disable, we need to make sure we close those regions in // that case. This is not racing with the region server itself since RS // report is done after the regions merge transaction completed. - if (this.zkTable.isDisablingOrDisabledTable(merged.getTableName())) { + if (this.zkTable.isDisablingOrDisabledTable(merged.getTable())) { unassign(merged); } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/CatalogJanitor.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/CatalogJanitor.java index 0f13580..9cccbcf 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/CatalogJanitor.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/CatalogJanitor.java @@ -149,7 +149,7 @@ public class CatalogJanitor extends Chore { HRegionInfo info = HRegionInfo.getHRegionInfo(r); if (info == null) return true; // Keep scanning if (isTableSpecified - && info.getTableName().compareTo(tableName) > 0) { + && info.getTable().compareTo(tableName) > 0) { // Another table, stop scanning return false; } @@ -184,9 +184,8 @@ public class CatalogJanitor extends Chore { final HRegionInfo regionA, final HRegionInfo regionB) throws IOException { FileSystem fs = this.services.getMasterFileSystem().getFileSystem(); Path rootdir = this.services.getMasterFileSystem().getRootDir(); - Path tabledir = FSUtils.getTableDir(rootdir, - mergedRegion.getTableName()); - HTableDescriptor htd = getTableDescriptor(mergedRegion.getTableName()); + Path tabledir = FSUtils.getTableDir(rootdir, mergedRegion.getTable()); + HTableDescriptor htd = getTableDescriptor(mergedRegion.getTable()); HRegionFileSystem regionFs = null; try { regionFs = HRegionFileSystem.openRegionFromFileSystem( @@ -290,8 +289,7 @@ public class CatalogJanitor extends Chore { if (left == null) return -1; if (right == null) return 1; // Same table name. - int result = left.getTableName().compareTo( - right.getTableName()); + int result = left.getTable().compareTo(right.getTable()); if (result != 0) return result; // Compare start keys. result = Bytes.compareTo(left.getStartKey(), right.getStartKey()); @@ -375,7 +373,7 @@ public class CatalogJanitor extends Chore { FileSystem fs = this.services.getMasterFileSystem().getFileSystem(); Path rootdir = this.services.getMasterFileSystem().getRootDir(); - Path tabledir = FSUtils.getTableDir(rootdir, daughter.getTableName()); + Path tabledir = FSUtils.getTableDir(rootdir, daughter.getTable()); HRegionFileSystem regionFs = null; try { @@ -387,7 +385,7 @@ public class CatalogJanitor extends Chore { } boolean references = false; - HTableDescriptor parentDescriptor = getTableDescriptor(parent.getTableName()); + HTableDescriptor parentDescriptor = getTableDescriptor(parent.getTable()); for (HColumnDescriptor family: parentDescriptor.getFamilies()) { if ((references = regionFs.hasReferences(family.getNameAsString()))) { break; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java index 0b8f710..75ecde9 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java @@ -1068,7 +1068,7 @@ MasterServices, Server { MetaReader.fullScan(catalogTracker, disabledOrDisablingOrEnabling, true); for(Iterator iter = allRegions.keySet().iterator(); iter.hasNext();) { - if (!iter.next().getTableName().isSystemTable()) { + if (!iter.next().getTable().isSystemTable()) { iter.remove(); } } @@ -1108,8 +1108,8 @@ MasterServices, Server { this.assignmentManager.regionOnline(regionInfo, currServer); } - if (!this.assignmentManager.getZKTable().isEnabledTable(regionInfo.getTableName())) { - this.assignmentManager.setEnabledTable(regionInfo.getTableName()); + if (!this.assignmentManager.getZKTable().isEnabledTable(regionInfo.getTable())) { + this.assignmentManager.setEnabledTable(regionInfo.getTable()); } LOG.info("System region " + regionInfo.getRegionNameAsString() + " assigned, rit=" + rit + ", location=" + catalogTracker.getMetaLocation()); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterFileSystem.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterFileSystem.java index 6dca7b3..ebab4b8 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterFileSystem.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterFileSystem.java @@ -588,7 +588,7 @@ public class MasterFileSystem { public void deleteFamilyFromFS(HRegionInfo region, byte[] familyName) throws IOException { // archive family store files - Path tableDir = FSUtils.getTableDir(rootdir, region.getTableName()); + Path tableDir = FSUtils.getTableDir(rootdir, region.getTable()); HFileArchiver.archiveFamily(fs, conf, region, tableDir, familyName); // delete the family folder diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/RegionStates.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/RegionStates.java index 006132f..3874665 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/RegionStates.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/RegionStates.java @@ -529,7 +529,7 @@ public class RegionStates { for (Map.Entry> e: serverHoldings.entrySet()) { for (HRegionInfo hri: e.getValue()) { if (hri.isMetaRegion()) continue; - TableName tablename = hri.getTableName(); + TableName tablename = hri.getTable(); Map> svrToRegions = result.get(tablename); if (svrToRegions == null) { svrToRegions = new HashMap>(serverHoldings.size()); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/SnapshotOfRegionAssignmentFromMeta.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/SnapshotOfRegionAssignmentFromMeta.java index 773e755..d7e51ca 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/SnapshotOfRegionAssignmentFromMeta.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/SnapshotOfRegionAssignmentFromMeta.java @@ -144,7 +144,7 @@ public class SnapshotOfRegionAssignmentFromMeta { regionNameToRegionInfoMap.put(regionInfo.getRegionNameAsString(), regionInfo); // Process the table to region map - TableName tableName = regionInfo.getTableName(); + TableName tableName = regionInfo.getTable(); List regionList = tableToRegionMap.get(tableName); if (regionList == null) { regionList = new ArrayList(); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.java index 54da5c4..39e73fb 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.java @@ -148,7 +148,7 @@ public abstract class BaseLoadBalancer implements LoadBalancer { regionPerServerIndex = 0; for (HRegionInfo region : entry.getValue()) { - String tableName = region.getTableName().getNameAsString(); + String tableName = region.getTable().getNameAsString(); Integer idx = tablesToIndex.get(tableName); if (idx == null) { tables.add(tableName); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/FavoredNodeLoadBalancer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/FavoredNodeLoadBalancer.java index d8cee20..147f196 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/FavoredNodeLoadBalancer.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/FavoredNodeLoadBalancer.java @@ -99,7 +99,7 @@ public class FavoredNodeLoadBalancer extends BaseLoadBalancer { currentServer.getPort(), ServerName.NON_STARTCODE); List list = entry.getValue(); for (HRegionInfo region : list) { - if(region.getTableName().getNamespaceAsString() + if(region.getTable().getNamespaceAsString() .equals(NamespaceDescriptor.SYSTEM_NAMESPACE_NAME_STR)) { continue; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/RegionLocationFinder.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/RegionLocationFinder.java index af950a0..690d8c9 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/RegionLocationFinder.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/RegionLocationFinder.java @@ -122,7 +122,7 @@ class RegionLocationFinder { protected List internalGetTopBlockLocation(HRegionInfo region) { List topServerNames = null; try { - HTableDescriptor tableDescriptor = getTableDescriptor(region.getTableName()); + HTableDescriptor tableDescriptor = getTableDescriptor(region.getTable()); if (tableDescriptor != null) { HDFSBlocksDistribution blocksDistribution = HRegion.computeHDFSBlocksDistribution(getConf(), tableDescriptor, region); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/ClosedRegionHandler.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/ClosedRegionHandler.java index 19c7185..35e503c 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/ClosedRegionHandler.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/ClosedRegionHandler.java @@ -92,7 +92,7 @@ public class ClosedRegionHandler extends EventHandler implements TotesHRegionInf LOG.debug("Handling CLOSED event for " + regionInfo.getEncodedName()); // Check if this table is being disabled or not if (this.assignmentManager.getZKTable(). - isDisablingOrDisabledTable(this.regionInfo.getTableName())) { + isDisablingOrDisabledTable(this.regionInfo.getTable())) { assignmentManager.offlineDisabledRegion(regionInfo); return; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/OpenedRegionHandler.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/OpenedRegionHandler.java index b3bce49..f711bd1 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/OpenedRegionHandler.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/OpenedRegionHandler.java @@ -69,7 +69,7 @@ public class OpenedRegionHandler extends EventHandler implements TotesHRegionInf this.expectedVersion = expectedVersion; if(regionInfo.isMetaRegion()) { priority = OpenedPriority.META; - } else if(regionInfo.getTableName() + } else if(regionInfo.getTable() .getNamespaceAsString().equals(NamespaceDescriptor.SYSTEM_NAMESPACE_NAME_STR)) { priority = OpenedPriority.SYSTEM; } else { @@ -113,8 +113,7 @@ public class OpenedRegionHandler extends EventHandler implements TotesHRegionInf " because regions is NOT in RIT -- presuming this is because it SPLIT"); } if (!openedNodeDeleted) { - if (this.assignmentManager.getZKTable().isDisablingOrDisabledTable( - regionInfo.getTableName())) { + if (this.assignmentManager.getZKTable().isDisablingOrDisabledTable(regionInfo.getTable())) { debugLog(regionInfo, "Opened region " + regionInfo.getShortNameToLog() + " but " + "this table is disabled, triggering close of region"); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/ServerShutdownHandler.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/ServerShutdownHandler.java index 26d5b35..12c6353 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/ServerShutdownHandler.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/ServerShutdownHandler.java @@ -251,7 +251,7 @@ public class ServerShutdownHandler extends EventHandler { toAssignRegions.add(hri); } else if (rit != null) { if ((rit.isClosing() || rit.isPendingClose()) - && am.getZKTable().isDisablingOrDisabledTable(hri.getTableName())) { + && am.getZKTable().isDisablingOrDisabledTable(hri.getTable())) { // If the table was partially disabled and the RS went down, we should clear the RIT // and remove the node for the region. // The rit that we use may be stale in case the table was in DISABLING state @@ -324,16 +324,14 @@ public class ServerShutdownHandler extends EventHandler { public static boolean processDeadRegion(HRegionInfo hri, Result result, AssignmentManager assignmentManager, CatalogTracker catalogTracker) throws IOException { - boolean tablePresent = assignmentManager.getZKTable().isTablePresent( - hri.getTableName()); + boolean tablePresent = assignmentManager.getZKTable().isTablePresent(hri.getTable()); if (!tablePresent) { LOG.info("The table " + hri.getTableName() + " was deleted. Hence not proceeding."); return false; } // If table is not disabled but the region is offlined, - boolean disabled = assignmentManager.getZKTable().isDisabledTable( - hri.getTableName()); + boolean disabled = assignmentManager.getZKTable().isDisabledTable(hri.getTable()); if (disabled){ LOG.info("The table " + hri.getTableName() + " was disabled. Hence not proceeding."); @@ -345,8 +343,7 @@ public class ServerShutdownHandler extends EventHandler { //to the dead server. We don't have to do anything. return false; } - boolean disabling = assignmentManager.getZKTable().isDisablingTable( - hri.getTableName()); + boolean disabling = assignmentManager.getZKTable().isDisablingTable(hri.getTable()); if (disabling) { LOG.info("The table " + hri.getTableName() + " is disabled. Hence not assigning region" + hri.getEncodedName()); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java index 98e9894..c94c1b2 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java @@ -3924,11 +3924,11 @@ public class HRegion implements HeapSize { // , Writable{ final HLog hlog, final boolean initialize, final boolean ignoreHLog) throws IOException { - LOG.info("creating HRegion " + info.getTableName().getNameAsString() + LOG.info("creating HRegion " + info.getTable().getNameAsString() + " HTD == " + hTableDescriptor + " RootDir = " + rootDir + - " Table name == " + info.getTableName().getNameAsString()); + " Table name == " + info.getTable().getNameAsString()); - Path tableDir = FSUtils.getTableDir(rootDir, info.getTableName()); + Path tableDir = FSUtils.getTableDir(rootDir, info.getTable()); FileSystem fs = FileSystem.get(conf); HRegionFileSystem rfs = HRegionFileSystem.createRegionOnFileSystem(conf, fs, tableDir, info); HLog effectiveHLog = hlog; @@ -4088,7 +4088,7 @@ public class HRegion implements HeapSize { // , Writable{ if (LOG.isDebugEnabled()) { LOG.debug("Opening region: " + info); } - Path dir = FSUtils.getTableDir(rootDir, info.getTableName()); + Path dir = FSUtils.getTableDir(rootDir, info.getTable()); HRegion r = HRegion.newHRegion(dir, wal, fs, conf, info, htd, rsServices); return r.openHRegion(reporter); } @@ -4217,8 +4217,7 @@ public class HRegion implements HeapSize { // , Writable{ @Deprecated public static Path getRegionDir(final Path rootdir, final HRegionInfo info) { return new Path( - FSUtils.getTableDir(rootdir, info.getTableName()), - info.getEncodedName()); + FSUtils.getTableDir(rootdir, info.getTable()), info.getEncodedName()); } /** diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java index 4d24821..5d2a2c8 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java @@ -3523,8 +3523,8 @@ public class HRegionServer implements ClientProtos.ClientService.BlockingInterfa LOG.info("Open " + region.getRegionNameAsString()); htd = htds.get(region.getTableName()); if (htd == null) { - htd = this.tableDescriptors.get(region.getTableName()); - htds.put(region.getTableName(), htd); + htd = this.tableDescriptors.get(region.getTable()); + htds.put(region.getTable(), htd); } final Boolean previous = this.regionsInTransitionInRS.putIfAbsent( diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java index c4ea2b1..a804260 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java @@ -281,7 +281,7 @@ public class HStore implements Store { @Override public TableName getTableName() { - return this.getRegionInfo().getTableName(); + return this.getRegionInfo().getTable(); } @Override diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionCoprocessorHost.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionCoprocessorHost.java index efe803c..c1e5726 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionCoprocessorHost.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionCoprocessorHost.java @@ -146,7 +146,7 @@ public class RegionCoprocessorHost loadSystemCoprocessors(conf, REGION_COPROCESSOR_CONF_KEY); // load system default cp's for user tables from configuration. - if (!region.getRegionInfo().getTableName().isSystemTable()) { + if (!region.getRegionInfo().getTable().isSystemTable()) { loadSystemCoprocessors(conf, USER_REGION_COPROCESSOR_CONF_KEY); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionMergeTransaction.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionMergeTransaction.java index e800928..457f850 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionMergeTransaction.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionMergeTransaction.java @@ -433,7 +433,7 @@ public class RegionMergeTransaction { } // Merged region is sorted between two merging regions in META - HRegionInfo mergedRegionInfo = new HRegionInfo(a.getTableName(), startKey, + HRegionInfo mergedRegionInfo = new HRegionInfo(a.getTable(), startKey, endKey, false, rid); return mergedRegionInfo; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SplitTransaction.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SplitTransaction.java index b9e8d02..569df9a 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SplitTransaction.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SplitTransaction.java @@ -168,8 +168,8 @@ public class SplitTransaction { return false; } long rid = getDaughterRegionIdTimestamp(hri); - this.hri_a = new HRegionInfo(hri.getTableName(), startKey, this.splitrow, false, rid); - this.hri_b = new HRegionInfo(hri.getTableName(), this.splitrow, endKey, false, rid); + this.hri_a = new HRegionInfo(hri.getTable(), startKey, this.splitrow, false, rid); + this.hri_b = new HRegionInfo(hri.getTable(), this.splitrow, endKey, false, rid); return true; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/HLogSplitter.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/HLogSplitter.java index 7386b94..7faa4c5 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/HLogSplitter.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/HLogSplitter.java @@ -1615,7 +1615,7 @@ public class HLogSplitter { final long pause = conf.getLong(HConstants.HBASE_CLIENT_PAUSE, HConstants.DEFAULT_HBASE_CLIENT_PAUSE); boolean reloadLocation = false; - TableName tableName = loc.getRegionInfo().getTableName(); + TableName tableName = loc.getRegionInfo().getTable(); int tries = 0; Throwable cause = null; while (endTime > EnvironmentEdgeManager.currentTimeMillis()) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/rest/client/RemoteHTable.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/rest/client/RemoteHTable.java index 1fbf56a..7586693 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/rest/client/RemoteHTable.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/rest/client/RemoteHTable.java @@ -779,4 +779,10 @@ public class RemoteHTable implements HTableInterface { public void setWriteBufferSize(long writeBufferSize) throws IOException { throw new IOException("setWriteBufferSize not supported"); } + + @Override + public long incrementColumnValue(byte[] row, byte[] family, byte[] qualifier, + long amount, boolean writeToWAL) throws IOException { + throw new IOException("incrementColumnValue not supported"); + } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessController.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessController.java index ff7565b..8426817 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessController.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessController.java @@ -208,7 +208,7 @@ public class AccessController extends BaseRegionObserver RegionCoprocessorEnvironment e, Map> families) { HRegionInfo hri = e.getRegion().getRegionInfo(); - TableName tableName = hri.getTableName(); + TableName tableName = hri.getTable(); // 1. All users need read access to hbase:meta table. // this is a very common operation, so deal with it quickly. @@ -470,7 +470,7 @@ public class AccessController extends BaseRegionObserver Map> familyMap) throws IOException { HRegionInfo hri = env.getRegion().getRegionInfo(); - TableName tableName = hri.getTableName(); + TableName tableName = hri.getTable(); if (user == null) { return false; @@ -702,7 +702,7 @@ public class AccessController extends BaseRegionObserver @Override public void preMove(ObserverContext c, HRegionInfo region, ServerName srcServer, ServerName destServer) throws IOException { - requirePermission("move", region.getTableName(), null, null, Action.ADMIN); + requirePermission("move", region.getTable(), null, null, Action.ADMIN); } @Override @@ -713,7 +713,7 @@ public class AccessController extends BaseRegionObserver @Override public void preAssign(ObserverContext c, HRegionInfo regionInfo) throws IOException { - requirePermission("assign", regionInfo.getTableName(), null, null, Action.ADMIN); + requirePermission("assign", regionInfo.getTable(), null, null, Action.ADMIN); } @Override @@ -723,7 +723,7 @@ public class AccessController extends BaseRegionObserver @Override public void preUnassign(ObserverContext c, HRegionInfo regionInfo, boolean force) throws IOException { - requirePermission("unassign", regionInfo.getTableName(), null, null, Action.ADMIN); + requirePermission("unassign", regionInfo.getTable(), null, null, Action.ADMIN); } @Override @@ -733,7 +733,7 @@ public class AccessController extends BaseRegionObserver @Override public void preRegionOffline(ObserverContext c, HRegionInfo regionInfo) throws IOException { - requirePermission("regionOffline", regionInfo.getTableName(), null, null, Action.ADMIN); + requirePermission("regionOffline", regionInfo.getTable(), null, null, Action.ADMIN); } @Override @@ -1420,7 +1420,7 @@ public class AccessController extends BaseRegionObserver if (region != null) { HRegionInfo regionInfo = region.getRegionInfo(); if (regionInfo != null) { - tableName = regionInfo.getTableName(); + tableName = regionInfo.getTable(); } } return tableName; @@ -1452,7 +1452,7 @@ public class AccessController extends BaseRegionObserver } private boolean isSpecialTable(HRegionInfo regionInfo) { - TableName tableName = regionInfo.getTableName(); + TableName tableName = regionInfo.getTable(); return tableName.equals(AccessControlLists.ACL_TABLE_NAME) || tableName.equals(TableName.NAMESPACE_TABLE_NAME) || tableName.equals(TableName.META_TABLE_NAME); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java index c706654..f8027e8 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java @@ -1820,7 +1820,7 @@ public class HBaseFsck extends Configured implements Tool { if (hbi.deployedOn.size() == 0) continue; // We should be safe here - TableName tableName = hbi.metaEntry.getTableName(); + TableName tableName = hbi.metaEntry.getTable(); TableInfo modTInfo = tablesInfo.get(tableName); if (modTInfo == null) { modTInfo = new TableInfo(tableName); @@ -2460,7 +2460,7 @@ public class HBaseFsck extends Configured implements Tool { // pick only those tables that were not modified in the last few milliseconds. if (info != null && info.getStartKey().length == 0 && !info.isMetaRegion()) { if (info.modTime + timelag < now) { - tableNames.add(info.getTableName()); + tableNames.add(info.getTable()); } else { numSkipped.incrementAndGet(); // one more in-flux table } @@ -2584,7 +2584,7 @@ public class HBaseFsck extends Configured implements Tool { sn = pair.getSecond(); } HRegionInfo hri = pair.getFirst(); - if (!(isTableIncluded(hri.getTableName()) + if (!(isTableIncluded(hri.getTable()) || hri.isMetaRegion())) { return true; } @@ -2752,7 +2752,7 @@ public class HBaseFsck extends Configured implements Tool { public TableName getTableName() { if (this.metaEntry != null) { - return this.metaEntry.getTableName(); + return this.metaEntry.getTable(); } else if (this.hdfsEntry != null) { // we are only guaranteed to have a path and not an HRI for hdfsEntry, // so we get the name from the Path @@ -3108,7 +3108,7 @@ public class HBaseFsck extends Configured implements Tool { List ret = Lists.newArrayList(); for (HRegionInfo hri : regions) { if (hri.isMetaTable() || (!hbck.checkMetaOnly - && hbck.isTableIncluded(hri.getTableName()))) { + && hbck.isTableIncluded(hri.getTable()))) { ret.add(hri); } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdmin.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdmin.java index 87568b4..2191fb7 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdmin.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdmin.java @@ -1325,7 +1325,7 @@ public class TestAdmin { HRegionServer rs = TEST_UTIL.getRSForFirstRegionInTable(TABLENAME); List onlineRegions = ProtobufUtil.getOnlineRegions(rs); for (HRegionInfo regionInfo : onlineRegions) { - if (!regionInfo.getTableName().isSystemTable()) { + if (!regionInfo.getTable().isSystemTable()) { info = regionInfo; admin.closeRegionWithEncodedRegionName(regionInfo.getEncodedName(), rs .getServerName().getServerName()); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/SampleRegionWALObserver.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/SampleRegionWALObserver.java index a3dc051..f108d75 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/SampleRegionWALObserver.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/SampleRegionWALObserver.java @@ -87,7 +87,7 @@ implements WALObserver { HRegionInfo info, HLogKey logKey, WALEdit logEdit) throws IOException { boolean bypass = false; // check table name matches or not. - if (!Bytes.equals(info.getTableName().getName(), this.tableName)) { + if (!Bytes.equals(info.getTableName(), this.tableName)) { return bypass; } preWALWriteCalled = true; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestRegionServerCoprocessorExceptionWithAbort.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestRegionServerCoprocessorExceptionWithAbort.java index aa58de1..affe00b 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestRegionServerCoprocessorExceptionWithAbort.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestRegionServerCoprocessorExceptionWithAbort.java @@ -104,7 +104,7 @@ public class TestRegionServerCoprocessorExceptionWithAbort { final Put put, final WALEdit edit, final Durability durability) { TableName tableName = - c.getEnvironment().getRegion().getRegionInfo().getTableName(); + c.getEnvironment().getRegion().getRegionInfo().getTable(); if (TABLE_NAME.equals(tableName) && Bytes.equals(put.getRow(), ROW)) { throw new NullPointerException("Buggy coprocessor: " + put); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestRegionServerCoprocessorExceptionWithRemove.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestRegionServerCoprocessorExceptionWithRemove.java index 6de987f..ca63038 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestRegionServerCoprocessorExceptionWithRemove.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestRegionServerCoprocessorExceptionWithRemove.java @@ -53,8 +53,7 @@ public class TestRegionServerCoprocessorExceptionWithRemove { final Put put, final WALEdit edit, final Durability durability) { String tableName = - c.getEnvironment().getRegion().getRegionInfo() - .getTableName().getNameAsString(); + c.getEnvironment().getRegion().getRegionInfo().getTable().getNameAsString(); if (tableName.equals("observed_table")) { Integer i = null; i = i + 1; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestWALObserver.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestWALObserver.java index b4c79b8..dc45bda 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestWALObserver.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestWALObserver.java @@ -186,7 +186,7 @@ public class TestWALObserver { // it's where WAL write cp should occur. long now = EnvironmentEdgeManager.currentTimeMillis(); - log.append(hri, hri.getTableName(), edit, now, htd); + log.append(hri, hri.getTable(), edit, now, htd); // the edit shall have been change now by the coprocessor. foundFamily0 = false; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestLoadIncrementalHFilesSplitRecovery.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestLoadIncrementalHFilesSplitRecovery.java index 162ca04..22a103a 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestLoadIncrementalHFilesSplitRecovery.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestLoadIncrementalHFilesSplitRecovery.java @@ -158,7 +158,7 @@ public class TestLoadIncrementalHFilesSplitRecovery { .toBytes(table)); for (HRegionInfo hri : ProtobufUtil.getOnlineRegions(hrs)) { - if (Bytes.equals(hri.getTableName().getName(), Bytes.toBytes(table))) { + if (Bytes.equals(hri.getTable().getName(), Bytes.toBytes(table))) { // splitRegion doesn't work if startkey/endkey are null ProtobufUtil.split(hrs, hri, rowkey(ROWCOUNT / 2)); // hard code split } @@ -169,7 +169,7 @@ public class TestLoadIncrementalHFilesSplitRecovery { do { regions = 0; for (HRegionInfo hri : ProtobufUtil.getOnlineRegions(hrs)) { - if (Bytes.equals(hri.getTableName().getName(), Bytes.toBytes(table))) { + if (Bytes.equals(hri.getTable().getName(), Bytes.toBytes(table))) { regions++; } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestAssignmentManager.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestAssignmentManager.java index 360e7fc..98fd2f4 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestAssignmentManager.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestAssignmentManager.java @@ -501,7 +501,7 @@ public class TestAssignmentManager { // adding region in pending close. am.getRegionStates().updateRegionState( REGIONINFO, State.SPLITTING, SERVERNAME_A); - am.getZKTable().setEnabledTable(REGIONINFO.getTableName()); + am.getZKTable().setEnabledTable(REGIONINFO.getTable()); RegionTransition data = RegionTransition.createRegionTransition(EventType.RS_ZK_REGION_SPLITTING, REGIONINFO.getRegionName(), SERVERNAME_A); String node = ZKAssign.getNodeName(this.watcher, REGIONINFO.getEncodedName()); @@ -550,9 +550,9 @@ public class TestAssignmentManager { // adding region in pending close. am.getRegionStates().updateRegionState(REGIONINFO, State.PENDING_CLOSE); if (state == Table.State.DISABLING) { - am.getZKTable().setDisablingTable(REGIONINFO.getTableName()); + am.getZKTable().setDisablingTable(REGIONINFO.getTable()); } else { - am.getZKTable().setDisabledTable(REGIONINFO.getTableName()); + am.getZKTable().setDisabledTable(REGIONINFO.getTable()); } RegionTransition data = RegionTransition.createRegionTransition(EventType.M_ZK_REGION_CLOSING, REGIONINFO.getRegionName(), SERVERNAME_A); @@ -576,7 +576,7 @@ public class TestAssignmentManager { am.getRegionStates().isRegionsInTransition()); } } finally { - am.setEnabledTable(REGIONINFO.getTableName()); + am.setEnabledTable(REGIONINFO.getTable()); executor.shutdown(); am.shutdown(); // Clean up all znodes @@ -888,7 +888,7 @@ public class TestAssignmentManager { } try{ // set table in disabling state. - am.getZKTable().setDisablingTable(REGIONINFO.getTableName()); + am.getZKTable().setDisablingTable(REGIONINFO.getTable()); am.joinCluster(); // should not call retainAssignment if we get empty regions in assignAllUserRegions. assertFalse( @@ -896,12 +896,12 @@ public class TestAssignmentManager { gate.get()); // need to change table state from disabling to disabled. assertTrue("Table should be disabled.", - am.getZKTable().isDisabledTable(REGIONINFO.getTableName())); + am.getZKTable().isDisabledTable(REGIONINFO.getTable())); } finally { this.server.getConfiguration().setClass( HConstants.HBASE_MASTER_LOADBALANCER_CLASS, DefaultLoadBalancer.class, LoadBalancer.class); - am.getZKTable().setEnabledTable(REGIONINFO.getTableName()); + am.getZKTable().setEnabledTable(REGIONINFO.getTable()); am.shutdown(); } } @@ -927,17 +927,17 @@ public class TestAssignmentManager { this.serverManager); try { // set table in enabling state. - am.getZKTable().setEnablingTable(REGIONINFO.getTableName()); - new EnableTableHandler(server, REGIONINFO.getTableName(), + am.getZKTable().setEnablingTable(REGIONINFO.getTable()); + new EnableTableHandler(server, REGIONINFO.getTable(), am.getCatalogTracker(), am, new NullTableLockManager(), true).prepare() .process(); assertEquals("Number of assignments should be 1.", 1, assignmentCount); assertTrue("Table should be enabled.", - am.getZKTable().isEnabledTable(REGIONINFO.getTableName())); + am.getZKTable().isEnabledTable(REGIONINFO.getTable())); } finally { enabling = false; assignmentCount = 0; - am.getZKTable().setEnabledTable(REGIONINFO.getTableName()); + am.getZKTable().setEnabledTable(REGIONINFO.getTable()); am.shutdown(); ZKAssign.deleteAllNodes(this.watcher); } @@ -964,7 +964,7 @@ public class TestAssignmentManager { // adding region plan am.regionPlans.put(REGIONINFO.getEncodedName(), new RegionPlan(REGIONINFO, SERVERNAME_B, SERVERNAME_A)); - am.getZKTable().setEnabledTable(REGIONINFO.getTableName()); + am.getZKTable().setEnabledTable(REGIONINFO.getTable()); try { am.assignInvoked = false; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestCatalogJanitor.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestCatalogJanitor.java index d2255ce..fbe6dc3 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestCatalogJanitor.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestCatalogJanitor.java @@ -833,7 +833,7 @@ public class TestCatalogJanitor { // archive directory. Otherwise, it just seems to pick the first root directory it can find (so // the single test passes, but when the full suite is run, things get borked). FSUtils.setRootDir(fs.getConf(), rootdir); - Path tabledir = FSUtils.getTableDir(rootdir, parent.getTableName()); + Path tabledir = FSUtils.getTableDir(rootdir, parent.getTable()); Path storedir = HStore.getStoreHomedir(tabledir, parent, htd.getColumnFamilies()[0].getName()); System.out.println("Old root:" + rootdir); System.out.println("Old table:" + tabledir); @@ -915,7 +915,7 @@ public class TestCatalogJanitor { final HRegionInfo daughter, final byte [] midkey, final boolean top) throws IOException { Path rootdir = services.getMasterFileSystem().getRootDir(); - Path tabledir = FSUtils.getTableDir(rootdir, parent.getTableName()); + Path tabledir = FSUtils.getTableDir(rootdir, parent.getTable()); Path storedir = HStore.getStoreHomedir(tabledir, daughter, htd.getColumnFamilies()[0].getName()); Reference ref = diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestDistributedLogSplitting.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestDistributedLogSplitting.java index 839bb52..3f6fece 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestDistributedLogSplitting.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestDistributedLogSplitting.java @@ -208,7 +208,7 @@ public class TestDistributedLogSplitting { hrs = rsts.get(i).getRegionServer(); regions = ProtobufUtil.getOnlineRegions(hrs); for (HRegionInfo region : regions) { - if (region.getTableName().getNameAsString().equalsIgnoreCase("table")) { + if (region.getTable().getNameAsString().equalsIgnoreCase("table")) { foundRs = true; break; } @@ -222,7 +222,7 @@ public class TestDistributedLogSplitting { Iterator it = regions.iterator(); while (it.hasNext()) { HRegionInfo region = it.next(); - if (region.getTableName().getNamespaceAsString() + if (region.getTable().getNamespaceAsString() .equals(NamespaceDescriptor.SYSTEM_NAMESPACE_NAME_STR)) { it.remove(); } @@ -664,12 +664,12 @@ public class TestDistributedLogSplitting { break; } if (tableName != null && - !tableName.equalsIgnoreCase(region.getTableName().getNameAsString())) { + !tableName.equalsIgnoreCase(region.getTable().getNameAsString())) { // make sure that we find a RS has online regions for both "table" and "disableTable" hasRegionsForBothTables = true; break; } else if (tableName == null) { - tableName = region.getTableName().getNameAsString(); + tableName = region.getTable().getNameAsString(); } } if (isCarryingMeta) { @@ -1123,7 +1123,7 @@ public class TestDistributedLogSplitting { HRegionServer hrs = rst.getRegionServer(); List hris = ProtobufUtil.getOnlineRegions(hrs); for (HRegionInfo hri : hris) { - if (hri.getTableName().isSystemTable()) { + if (hri.getTable().isSystemTable()) { continue; } LOG.debug("adding data to rs = " + rst.getName() + @@ -1148,7 +1148,7 @@ public class TestDistributedLogSplitting { for(Iterator iter = regions.iterator(); iter.hasNext(); ) { HRegionInfo regionInfo = iter.next(); - if(regionInfo.getTableName().isSystemTable()) { + if(regionInfo.getTable().isSystemTable()) { iter.remove(); } } @@ -1157,7 +1157,7 @@ public class TestDistributedLogSplitting { List hris = new ArrayList(); for (HRegionInfo region : regions) { - if (!region.getTableName().getNameAsString().equalsIgnoreCase(tname)) { + if (!region.getTable().getNameAsString().equalsIgnoreCase(tname)) { continue; } hris.add(region); @@ -1330,7 +1330,7 @@ public class TestDistributedLogSplitting { if (region.isMetaRegion()) { isCarryingMeta = true; } - if (tableName == null || region.getTableName().getNameAsString().equals(tableName)) { + if (tableName == null || region.getTable().getNameAsString().equals(tableName)) { foundTableRegion = true; } if (foundTableRegion && (isCarryingMeta || !hasMetaRegion)) { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterTransitions.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterTransitions.java index 7878f91..c7d95ad 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterTransitions.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterTransitions.java @@ -489,12 +489,12 @@ public class TestMasterTransitions { for (Result r = null; (r = s.next()) != null;) { HRegionInfo hri = HRegionInfo.getHRegionInfo(r); if (hri == null) break; - if (!hri.getTableName().getNameAsString().equals(TABLENAME)) { + if (!hri.getTable().getNameAsString().equals(TABLENAME)) { continue; } // If start key, add 'aaa'. - if(!hri.getTableName().getNameAsString().equals(TABLENAME)) { + if(!hri.getTable().getNameAsString().equals(TABLENAME)) { continue; } byte [] row = getStartKey(hri); @@ -512,24 +512,6 @@ public class TestMasterTransitions { } /* - * @return Count of rows in TABLENAME - * @throws IOException - */ - private static int count() throws IOException { - HTable t = new HTable(TEST_UTIL.getConfiguration(), TABLENAME); - int rows = 0; - Scan scan = new Scan(); - ResultScanner s = t.getScanner(scan); - for (Result r = null; (r = s.next()) != null;) { - rows++; - } - s.close(); - LOG.info("Counted=" + rows); - t.close(); - return rows; - } - - /* * @param hri * @return Start key for hri (If start key is '', then return 'aaa'. */ @@ -545,6 +527,4 @@ public class TestMasterTransitions { private static byte [] getTestQualifier() { return getTestFamily(); } - -} - +} \ No newline at end of file diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestRegionPlacement.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestRegionPlacement.java index 4b479c1..b8ed519 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestRegionPlacement.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestRegionPlacement.java @@ -64,8 +64,6 @@ import org.apache.hadoop.hbase.regionserver.HRegion; import org.apache.hadoop.hbase.regionserver.HRegionServer; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.Pair; -import org.apache.hadoop.hbase.zookeeper.ZKAssign; -import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher; import org.apache.zookeeper.KeeperException; import org.junit.AfterClass; import org.junit.BeforeClass; @@ -307,7 +305,7 @@ public class TestRegionPlacement { TEST_UTIL.getHBaseCluster().getRegionServer(killIndex).getOnlineRegionsLocalContext(); isNamespaceServer = false; for (HRegion r : regs) { - if (r.getRegionInfo().getTableName().getNamespaceAsString() + if (r.getRegionInfo().getTable().getNamespaceAsString() .equals(NamespaceDescriptor.SYSTEM_NAMESPACE_NAME_STR)) { isNamespaceServer = true; break; @@ -606,7 +604,7 @@ public class TestRegionPlacement { public boolean processRow(Result result) throws IOException { try { HRegionInfo info = MetaScanner.getHRegionInfo(result); - if(info.getTableName().getNamespaceAsString() + if(info.getTable().getNamespaceAsString() .equals(NamespaceDescriptor.SYSTEM_NAMESPACE_NAME_STR)) { return true; } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionFileSystem.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionFileSystem.java index 7870cae..90ee7aa 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionFileSystem.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionFileSystem.java @@ -60,7 +60,7 @@ public class TestHRegionFileSystem { // Create a Region HRegionInfo hri = new HRegionInfo(TableName.valueOf("TestTable")); HRegionFileSystem regionFs = HRegionFileSystem.createRegionOnFileSystem(conf, fs, - FSUtils.getTableDir(rootDir, hri.getTableName()), hri); + FSUtils.getTableDir(rootDir, hri.getTable()), hri); // Verify if the region is on disk Path regionDir = regionFs.getRegionDir(); @@ -72,12 +72,12 @@ public class TestHRegionFileSystem { // Open the region regionFs = HRegionFileSystem.openRegionFromFileSystem(conf, fs, - FSUtils.getTableDir(rootDir, hri.getTableName()), hri, false); + FSUtils.getTableDir(rootDir, hri.getTable()), hri, false); assertEquals(regionDir, regionFs.getRegionDir()); // Delete the region HRegionFileSystem.deleteRegionFromFileSystem(conf, fs, - FSUtils.getTableDir(rootDir, hri.getTableName()), hri); + FSUtils.getTableDir(rootDir, hri.getTable()), hri); assertFalse("The region folder should be removed", fs.exists(regionDir)); fs.delete(rootDir, true); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionInfo.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionInfo.java index 340e229..1ee256b 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionInfo.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionInfo.java @@ -20,7 +20,6 @@ package org.apache.hadoop.hbase.regionserver; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertNull; import static org.junit.Assert.assertTrue; import static org.junit.Assert.fail; @@ -28,12 +27,12 @@ import java.io.IOException; import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.Path; -import org.apache.hadoop.hbase.TableName; -import org.apache.hadoop.hbase.exceptions.DeserializationException; import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.SmallTests; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.exceptions.DeserializationException; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.MD5Hash; import org.junit.Test; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionMergeTransactionOnCluster.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionMergeTransactionOnCluster.java index 1f08913..f548898 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionMergeTransactionOnCluster.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionMergeTransactionOnCluster.java @@ -185,7 +185,7 @@ public class TestRegionMergeTransactionOnCluster { FileSystem fs = master.getMasterFileSystem().getFileSystem(); Path rootDir = master.getMasterFileSystem().getRootDir(); - Path tabledir = FSUtils.getTableDir(rootDir, mergedRegionInfo.getTableName()); + Path tabledir = FSUtils.getTableDir(rootDir, mergedRegionInfo.getTable()); Path regionAdir = new Path(tabledir, regionA.getEncodedName()); Path regionBdir = new Path(tabledir, regionB.getEncodedName()); assertTrue(fs.exists(regionAdir)); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerNoMaster.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerNoMaster.java index 1189406..46ff904 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerNoMaster.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerNoMaster.java @@ -307,7 +307,7 @@ public class TestRegionServerNoMaster { hri.getEncodedNameAsBytes())); // Let's start the open handler - HTableDescriptor htd = getRS().tableDescriptors.get(hri.getTableName()); + HTableDescriptor htd = getRS().tableDescriptors.get(hri.getTable()); getRS().service.submit(new OpenRegionHandler(getRS(), getRS(), hri, htd, 0)); // The open handler should have removed the region from RIT but kept the region closed @@ -361,7 +361,7 @@ public class TestRegionServerNoMaster { // 1) There is no ZK node // 2) The region in RIT was changed. // The order is more or less implementation dependant. - HTableDescriptor htd = getRS().tableDescriptors.get(hri.getTableName()); + HTableDescriptor htd = getRS().tableDescriptors.get(hri.getTable()); getRS().service.submit(new OpenRegionHandler(getRS(), getRS(), hri, htd, 0)); // The open handler should have removed the region from RIT but kept the region closed diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSplitTransactionOnCluster.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSplitTransactionOnCluster.java index c9378f7..214e7c2 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSplitTransactionOnCluster.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSplitTransactionOnCluster.java @@ -923,7 +923,7 @@ public class TestSplitTransactionOnCluster { @Override void transitionZKNode(Server server, RegionServerServices services, HRegion a, HRegion b) throws IOException { - if (this.currentRegion.getRegionInfo().getTableName().getNameAsString() + if (this.currentRegion.getRegionInfo().getTable().getNameAsString() .equals("testShouldFailSplitIfZNodeDoesNotExistDueToPrevRollBack")) { try { if (!secondSplit){ @@ -935,14 +935,14 @@ public class TestSplitTransactionOnCluster { } super.transitionZKNode(server, services, a, b); - if (this.currentRegion.getRegionInfo().getTableName().getNameAsString() + if (this.currentRegion.getRegionInfo().getTable().getNameAsString() .equals("testShouldFailSplitIfZNodeDoesNotExistDueToPrevRollBack")) { firstSplitCompleted = true; } } @Override public boolean rollback(Server server, RegionServerServices services) throws IOException { - if (this.currentRegion.getRegionInfo().getTableName().getNameAsString() + if (this.currentRegion.getRegionInfo().getTable().getNameAsString() .equals("testShouldFailSplitIfZNodeDoesNotExistDueToPrevRollBack")) { if(secondSplit){ super.rollback(server, services); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreFile.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreFile.java index f8613f9..f99a9fd 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreFile.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreFile.java @@ -93,7 +93,7 @@ public class TestStoreFile extends HBaseTestCase { final HRegionInfo hri = new HRegionInfo(TableName.valueOf("testBasicHalfMapFileTb")); HRegionFileSystem regionFs = HRegionFileSystem.createRegionOnFileSystem( - conf, fs, new Path(this.testDir, hri.getTableName().getNameAsString()), hri); + conf, fs, new Path(this.testDir, hri.getTable().getNameAsString()), hri); StoreFile.Writer writer = new StoreFile.WriterBuilder(conf, cacheConf, this.fs, 2 * 1024) .withFilePath(regionFs.createTempName()) @@ -142,7 +142,7 @@ public class TestStoreFile extends HBaseTestCase { public void testReference() throws IOException { final HRegionInfo hri = new HRegionInfo(TableName.valueOf("testReferenceTb")); HRegionFileSystem regionFs = HRegionFileSystem.createRegionOnFileSystem( - conf, fs, new Path(this.testDir, hri.getTableName().getNameAsString()), hri); + conf, fs, new Path(this.testDir, hri.getTable().getNameAsString()), hri); // Make a store file and write data to it. StoreFile.Writer writer = new StoreFile.WriterBuilder(conf, cacheConf, this.fs, 8 * 1024) @@ -162,7 +162,7 @@ public class TestStoreFile extends HBaseTestCase { kv = KeyValue.createKeyValueFromKey(reader.getLastKey()); byte [] finalRow = kv.getRow(); // Make a reference - HRegionInfo splitHri = new HRegionInfo(hri.getTableName(), null, midRow); + HRegionInfo splitHri = new HRegionInfo(hri.getTable(), null, midRow); Path refPath = splitStoreFile(regionFs, splitHri, TEST_FAMILY, hsf, midRow, true); StoreFile refHsf = new StoreFile(this.fs, refPath, conf, cacheConf, BloomType.NONE, NoOpDataBlockEncoder.INSTANCE); @@ -186,7 +186,7 @@ public class TestStoreFile extends HBaseTestCase { Configuration testConf = new Configuration(this.conf); FSUtils.setRootDir(testConf, this.testDir); HRegionFileSystem regionFs = HRegionFileSystem.createRegionOnFileSystem( - testConf, fs, FSUtils.getTableDir(this.testDir, hri.getTableName()), hri); + testConf, fs, FSUtils.getTableDir(this.testDir, hri.getTable()), hri); // Make a store file and write data to it. StoreFile.Writer writer = new StoreFile.WriterBuilder(testConf, cacheConf, this.fs, 8 * 1024) @@ -228,7 +228,7 @@ public class TestStoreFile extends HBaseTestCase { // adding legal table name chars to verify regex handles it. HRegionInfo hri = new HRegionInfo(TableName.valueOf("_original-evil-name")); HRegionFileSystem regionFs = HRegionFileSystem.createRegionOnFileSystem( - testConf, fs, FSUtils.getTableDir(this.testDir, hri.getTableName()), hri); + testConf, fs, FSUtils.getTableDir(this.testDir, hri.getTable()), hri); // Make a store file and write data to it. //// StoreFile.Writer writer = new StoreFile.WriterBuilder(testConf, cacheConf, this.fs, 8 * 1024) @@ -240,7 +240,7 @@ public class TestStoreFile extends HBaseTestCase { // create link to store file. /clone/region//-- HRegionInfo hriClone = new HRegionInfo(TableName.valueOf("clone")); HRegionFileSystem cloneRegionFs = HRegionFileSystem.createRegionOnFileSystem( - testConf, fs, FSUtils.getTableDir(this.testDir, hri.getTableName()), + testConf, fs, FSUtils.getTableDir(this.testDir, hri.getTable()), hriClone); Path dstPath = cloneRegionFs.getStoreDir(TEST_FAMILY); HFileLink.create(testConf, this.fs, dstPath, hri, storeFilePath.getName()); @@ -250,8 +250,8 @@ public class TestStoreFile extends HBaseTestCase { // create splits of the link. // /clone/splitA//, // /clone/splitB// - HRegionInfo splitHriA = new HRegionInfo(hri.getTableName(), null, SPLITKEY); - HRegionInfo splitHriB = new HRegionInfo(hri.getTableName(), SPLITKEY, null); + HRegionInfo splitHriA = new HRegionInfo(hri.getTable(), null, SPLITKEY); + HRegionInfo splitHriB = new HRegionInfo(hri.getTable(), SPLITKEY, null); StoreFile f = new StoreFile(fs, linkFilePath, testConf, cacheConf, BloomType.NONE, NoOpDataBlockEncoder.INSTANCE); Path pathA = splitStoreFile(cloneRegionFs, splitHriA, TEST_FAMILY, f, SPLITKEY, true); // top @@ -300,11 +300,11 @@ public class TestStoreFile extends HBaseTestCase { KeyValue midKV = KeyValue.createKeyValueFromKey(midkey); byte [] midRow = midKV.getRow(); // Create top split. - HRegionInfo topHri = new HRegionInfo(regionFs.getRegionInfo().getTableName(), + HRegionInfo topHri = new HRegionInfo(regionFs.getRegionInfo().getTable(), null, midRow); Path topPath = splitStoreFile(regionFs, topHri, TEST_FAMILY, f, midRow, true); // Create bottom split. - HRegionInfo bottomHri = new HRegionInfo(regionFs.getRegionInfo().getTableName(), + HRegionInfo bottomHri = new HRegionInfo(regionFs.getRegionInfo().getTable(), midRow, null); Path bottomPath = splitStoreFile(regionFs, bottomHri, TEST_FAMILY, f, midRow, false); // Make readers on top and bottom. diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/HLogPerformanceEvaluation.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/HLogPerformanceEvaluation.java index e24995b..f76a5bc 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/HLogPerformanceEvaluation.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/HLogPerformanceEvaluation.java @@ -103,10 +103,10 @@ public final class HLogPerformanceEvaluation extends Configured implements Tool addFamilyMapToWALEdit(put.getFamilyCellMap(), walEdit); HRegionInfo hri = region.getRegionInfo(); if (this.noSync) { - hlog.appendNoSync(hri, hri.getTableName(), walEdit, + hlog.appendNoSync(hri, hri.getTable(), walEdit, new ArrayList(), now, htd); } else { - hlog.append(hri, hri.getTableName(), walEdit, now, htd); + hlog.append(hri, hri.getTable(), walEdit, now, htd); } } long totalTime = (System.currentTimeMillis() - startTime); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestExportSnapshot.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestExportSnapshot.java index 2903403..65434a7 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestExportSnapshot.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestExportSnapshot.java @@ -37,17 +37,16 @@ import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.FileUtil; import org.apache.hadoop.fs.Path; -import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.HColumnDescriptor; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.MediumTests; -import org.apache.hadoop.hbase.master.snapshot.SnapshotManager; +import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.HBaseAdmin; import org.apache.hadoop.hbase.client.HTable; -import org.apache.hadoop.hbase.protobuf.ProtobufUtil; +import org.apache.hadoop.hbase.master.snapshot.SnapshotManager; import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription; import org.apache.hadoop.hbase.regionserver.HRegionFileSystem; import org.apache.hadoop.hbase.util.Bytes; @@ -202,7 +201,7 @@ public class TestExportSnapshot { // First region, simple with one plain hfile. HRegionInfo hri = new HRegionInfo(tableWithRefsName); HRegionFileSystem r0fs = HRegionFileSystem.createRegionOnFileSystem(conf, - fs, FSUtils.getTableDir(archiveDir, hri.getTableName()), hri); + fs, FSUtils.getTableDir(archiveDir, hri.getTable()), hri); Path storeFile = new Path(rootDir, TEST_HFILE); FSDataOutputStream out = fs.create(storeFile); out.write(Bytes.toBytes("Test Data")); @@ -213,7 +212,7 @@ public class TestExportSnapshot { // This region contains a reference to the hfile in the first region. hri = new HRegionInfo(tableWithRefsName); HRegionFileSystem r1fs = HRegionFileSystem.createRegionOnFileSystem(conf, - fs, new Path(archiveDir, hri.getTableName().getNameAsString()), hri); + fs, new Path(archiveDir, hri.getTable().getNameAsString()), hri); storeFile = new Path(rootDir, TEST_HFILE + '.' + r0fs.getRegionInfo().getEncodedName()); out = fs.create(storeFile); out.write(Bytes.toBytes("Test Data")); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestRestoreSnapshotHelper.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestRestoreSnapshotHelper.java index 1c862a0..e2bf7b8 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestRestoreSnapshotHelper.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestRestoreSnapshotHelper.java @@ -43,7 +43,6 @@ import org.apache.hadoop.hbase.catalog.CatalogTracker; import org.apache.hadoop.hbase.errorhandling.ForeignExceptionDispatcher; import org.apache.hadoop.hbase.io.HFileLink; import org.apache.hadoop.hbase.monitoring.MonitoredTask; -import org.apache.hadoop.hbase.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription; import org.apache.hadoop.hbase.regionserver.HRegionFileSystem; import org.apache.hadoop.hbase.regionserver.StoreFileInfo; @@ -160,7 +159,7 @@ public class TestRestoreSnapshotHelper { // First region, simple with one plain hfile. HRegionInfo hri = new HRegionInfo(htd.getTableName()); HRegionFileSystem r0fs = HRegionFileSystem.createRegionOnFileSystem(conf, - fs, FSUtils.getTableDir(archiveDir, hri.getTableName()), hri); + fs, FSUtils.getTableDir(archiveDir, hri.getTable()), hri); Path storeFile = new Path(rootDir, TEST_HFILE); fs.createNewFile(storeFile); r0fs.commitStoreFile(TEST_FAMILY, storeFile); @@ -169,7 +168,7 @@ public class TestRestoreSnapshotHelper { // This region contains a reference to the hfile in the first region. hri = new HRegionInfo(htd.getTableName()); HRegionFileSystem r1fs = HRegionFileSystem.createRegionOnFileSystem(conf, - fs, FSUtils.getTableDir(archiveDir, hri.getTableName()), hri); + fs, FSUtils.getTableDir(archiveDir, hri.getTable()), hri); storeFile = new Path(rootDir, TEST_HFILE + '.' + r0fs.getRegionInfo().getEncodedName()); fs.createNewFile(storeFile); r1fs.commitStoreFile(TEST_FAMILY, storeFile); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsck.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsck.java index 9adf5c1..1c8f08f 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsck.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsck.java @@ -1868,7 +1868,7 @@ public class TestHBaseFsck { @Override public boolean processRow(Result rowResult) throws IOException { - if(!MetaScanner.getHRegionInfo(rowResult).getTableName().isSystemTable()) { + if(!MetaScanner.getHRegionInfo(rowResult).getTable().isSystemTable()) { Delete delete = new Delete(rowResult.getRow()); delete.deleteColumn(HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER); deletes.add(delete); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/hbck/OfflineMetaRebuildTestCore.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/hbck/OfflineMetaRebuildTestCore.java index 08456b3..4a5f84d 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/hbck/OfflineMetaRebuildTestCore.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/hbck/OfflineMetaRebuildTestCore.java @@ -235,8 +235,7 @@ public class OfflineMetaRebuildTestCore { for (Result r : scanner) { HRegionInfo info = HRegionInfo.getHRegionInfo(r); - if(info != null && - !info.getTableName().getNamespaceAsString() + if(info != null && !info.getTable().getNamespaceAsString() .equals(NamespaceDescriptor.SYSTEM_NAMESPACE_NAME_STR)) { Delete d = new Delete(r.getRow()); dels.add(d);