From 98bdeb44f5446636cf0747fa6952cf9cc2564948 Mon Sep 17 00:00:00 2001 From: Duo Zhang Date: Tue, 5 Mar 2019 16:04:38 +0800 Subject: [PATCH] HBASE-21994 Expose TableDescriptors to master coprocessor --- .../hbase/rsgroup/RSGroupInfoManagerImpl.java | 4 +- .../balancer/RSGroupableBalancerTestBase.java | 13 ++- .../hbase/tmpl/master/MasterStatusTmpl.jamon | 9 +- .../hadoop/hbase/MutableTableDescriptors.java | 54 +++++++++ .../apache/hadoop/hbase/TableDescriptors.java | 61 +++-------- .../MasterCoprocessorEnvironment.java | 6 + .../hadoop/hbase/master/CatalogJanitor.java | 2 +- .../master/ExpiredMobFileCleanerChore.java | 13 +-- .../apache/hadoop/hbase/master/HMaster.java | 19 ++-- .../hbase/master/MasterCoprocessorHost.java | 6 + .../hadoop/hbase/master/MasterFileSystem.java | 9 +- .../hbase/master/MasterRpcServices.java | 3 +- .../hadoop/hbase/master/MasterServices.java | 4 +- .../hbase/master/MobCompactionChore.java | 7 +- .../hbase/master/TableStateManager.java | 17 ++- .../MergeTableRegionsProcedure.java | 8 +- .../master/assignment/RegionStateStore.java | 2 +- .../assignment/SplitTableRegionProcedure.java | 13 ++- .../master/balancer/RegionLocationFinder.java | 4 +- .../normalizer/SimpleRegionNormalizer.java | 3 +- .../procedure/DisableTableProcedure.java | 2 +- .../procedure/EnableTableProcedure.java | 2 +- .../procedure/ModifyTableProcedure.java | 14 +-- .../procedure/TruncateTableProcedure.java | 2 +- .../replication/AbstractPeerProcedure.java | 2 +- .../replication/ModifyPeerProcedure.java | 2 +- .../UpdatePeerConfigProcedure.java | 2 +- .../master/snapshot/SnapshotManager.java | 2 +- .../master/snapshot/TakeSnapshotHandler.java | 6 +- .../hbase/regionserver/HRegionServer.java | 8 +- .../hbase/regionserver/RSRpcServices.java | 6 +- .../regionserver/RegionServerServices.java | 4 +- .../handler/AssignRegionHandler.java | 4 +- .../replication/ReplicationEndpoint.java | 8 +- .../RegionReplicaReplicationEndpoint.java | 18 +-- .../regionserver/ReplicationSource.java | 4 +- .../security/access/AccessController.java | 13 +-- .../hadoop/hbase/util/FSTableDescriptors.java | 103 ++++++++---------- .../apache/hadoop/hbase/util/HBaseFsck.java | 5 +- .../apache/hadoop/hbase/HBaseTestCase.java | 2 +- .../hbase/MockRegionServerServices.java | 2 +- .../hbase/master/MockNoopMasterServices.java | 4 +- .../hadoop/hbase/master/MockRegionServer.java | 4 +- .../master/assignment/MockMasterServices.java | 18 +-- .../TestSimpleRegionNormalizer.java | 47 +++++--- .../MasterProcedureTestingUtility.java | 8 +- .../hbase/regionserver/TestHRegionInfo.java | 4 +- .../regionserver/TestRegionInfoBuilder.java | 6 +- .../TestRegionMergeTransactionOnCluster.java | 2 +- .../TestRegionServerNoMaster.java | 2 +- .../hbase/util/TestFSTableDescriptors.java | 28 ++--- 51 files changed, 305 insertions(+), 286 deletions(-) create mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/MutableTableDescriptors.java diff --git a/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupInfoManagerImpl.java b/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupInfoManagerImpl.java index c89bba885c..461f9cdf09 100644 --- a/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupInfoManagerImpl.java +++ b/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupInfoManagerImpl.java @@ -426,8 +426,8 @@ final class RSGroupInfoManagerImpl implements RSGroupInfoManager { // refresh default group, prune NavigableSet orphanTables = new TreeSet<>(); - for (String entry : masterServices.getTableDescriptors().getAll().keySet()) { - orphanTables.add(TableName.valueOf(entry)); + for (TableDescriptor entry : masterServices.getTableDescriptors().getAll()) { + orphanTables.add(entry.getTableName()); } for (RSGroupInfo group : groupList) { if (!group.getName().equals(RSGroupInfo.DEFAULT_GROUP)) { diff --git a/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/master/balancer/RSGroupableBalancerTestBase.java b/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/master/balancer/RSGroupableBalancerTestBase.java index 570bb3abb3..af2e522be5 100644 --- a/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/master/balancer/RSGroupableBalancerTestBase.java +++ b/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/master/balancer/RSGroupableBalancerTestBase.java @@ -28,12 +28,13 @@ import java.util.ArrayList; import java.util.HashMap; import java.util.List; import java.util.Map; +import java.util.Optional; import java.util.Set; import java.util.TreeMap; import java.util.TreeSet; import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.hbase.ServerName; -import org.apache.hadoop.hbase.TableDescriptors; +import org.apache.hadoop.hbase.MutableTableDescriptors; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.RegionInfo; import org.apache.hadoop.hbase.client.RegionInfoBuilder; @@ -407,11 +408,11 @@ public class RSGroupableBalancerTestBase { } protected static MasterServices getMockedMaster() throws IOException { - TableDescriptors tds = Mockito.mock(TableDescriptors.class); - Mockito.when(tds.get(tables[0])).thenReturn(tableDescs.get(0)); - Mockito.when(tds.get(tables[1])).thenReturn(tableDescs.get(1)); - Mockito.when(tds.get(tables[2])).thenReturn(tableDescs.get(2)); - Mockito.when(tds.get(tables[3])).thenReturn(tableDescs.get(3)); + MutableTableDescriptors tds = Mockito.mock(MutableTableDescriptors.class); + Mockito.when(tds.get(tables[0])).thenReturn(Optional.of(tableDescs.get(0))); + Mockito.when(tds.get(tables[1])).thenReturn(Optional.of(tableDescs.get(1))); + Mockito.when(tds.get(tables[2])).thenReturn(Optional.of(tableDescs.get(2))); + Mockito.when(tds.get(tables[3])).thenReturn(Optional.of(tableDescs.get(3))); MasterServices services = Mockito.mock(HMaster.class); Mockito.when(services.getTableDescriptors()).thenReturn(tds); AssignmentManager am = Mockito.mock(AssignmentManager.class); diff --git a/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/master/MasterStatusTmpl.jamon b/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/master/MasterStatusTmpl.jamon index da44052dee..0d6da624d3 100644 --- a/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/master/MasterStatusTmpl.jamon +++ b/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/master/MasterStatusTmpl.jamon @@ -102,12 +102,9 @@ AssignmentManager assignmentManager = master.getAssignmentManager(); public static String getUserTables(HMaster master, List tables){ if (master.isInitialized()){ try { - Map descriptorMap = master.getTableDescriptors().getAll(); - if (descriptorMap != null) { - for (TableDescriptor desc : descriptorMap.values()) { - if (!desc.getTableName().isSystemTable()) { - tables.add(desc); - } + for (TableDescriptor desc : master.getTableDescriptors().getAll()) { + if (!desc.getTableName().isSystemTable()) { + tables.add(desc); } } } catch (IOException e) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/MutableTableDescriptors.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/MutableTableDescriptors.java new file mode 100644 index 0000000000..fff73810a2 --- /dev/null +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/MutableTableDescriptors.java @@ -0,0 +1,54 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase; + +import java.io.IOException; +import java.util.Optional; +import org.apache.hadoop.hbase.client.TableDescriptor; +import org.apache.yetus.audience.InterfaceAudience; + +/** + * Get, remove and modify table descriptors. + *

+ * Used by servers to host descriptors. + */ +@InterfaceAudience.Private +public interface MutableTableDescriptors extends TableDescriptors { + + /** + * Add or update descriptor + * @param htd Descriptor to set into TableDescriptors + */ + void add(final TableDescriptor htd) throws IOException; + + /** + * @param tablename + * @return Instance of table descriptor or none if not found. + */ + Optional remove(final TableName tablename) throws IOException; + + /** + * Enables the tabledescriptor cache + */ + void setCacheOn() throws IOException; + + /** + * Disables the tabledescriptor cache + */ + void setCacheOff() throws IOException; +} diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/TableDescriptors.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/TableDescriptors.java index 5787f66203..ac22842a5d 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/TableDescriptors.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/TableDescriptors.java @@ -18,65 +18,36 @@ package org.apache.hadoop.hbase; import java.io.IOException; -import java.util.Map; - -import org.apache.yetus.audience.InterfaceAudience; +import java.util.List; +import java.util.Optional; import org.apache.hadoop.hbase.client.TableDescriptor; +import org.apache.yetus.audience.InterfaceAudience; +import org.apache.yetus.audience.InterfaceStability; /** - * Get, remove and modify table descriptors. - * Used by servers to host descriptors. + * Get table descriptors. + *

+ * Can be used by coprocessors. */ -@InterfaceAudience.Private +@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.COPROC) +@InterfaceStability.Evolving public interface TableDescriptors { + /** * @param tableName * @return TableDescriptor for tablename - * @throws IOException */ - TableDescriptor get(final TableName tableName) - throws IOException; + Optional get(TableName tableName) throws IOException; /** - * Get Map of all NamespaceDescriptors for a given namespace. + * Get all descriptors for a given namespace. * @return Map of all descriptors. - * @throws IOException - */ - Map getByNamespace(String name) - throws IOException; - - /** - * Get Map of all TableDescriptors. Populates the descriptor cache as a - * side effect. - * @return Map of all descriptors. - * @throws IOException - */ - Map getAll() - throws IOException; - - /** - * Add or update descriptor - * @param htd Descriptor to set into TableDescriptors - * @throws IOException - */ - void add(final TableDescriptor htd) - throws IOException; - - /** - * @param tablename - * @return Instance of table descriptor or null if none found. - * @throws IOException - */ - TableDescriptor remove(final TableName tablename) - throws IOException; - - /** - * Enables the tabledescriptor cache */ - void setCacheOn() throws IOException; + List getByNamespace(String name) throws IOException; /** - * Disables the tabledescriptor cache + * Get all TableDescriptors. Populates the descriptor cache as a side effect. + * @return List of all descriptors. */ - void setCacheOff() throws IOException; + List getAll() throws IOException; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/MasterCoprocessorEnvironment.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/MasterCoprocessorEnvironment.java index cc72871b67..a303e72b98 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/MasterCoprocessorEnvironment.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/MasterCoprocessorEnvironment.java @@ -25,6 +25,7 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.CoprocessorEnvironment; import org.apache.hadoop.hbase.HBaseInterfaceAudience; import org.apache.hadoop.hbase.ServerName; +import org.apache.hadoop.hbase.TableDescriptors; import org.apache.hadoop.hbase.client.Connection; import org.apache.hadoop.hbase.metrics.MetricRegistry; import org.apache.yetus.audience.InterfaceAudience; @@ -96,4 +97,9 @@ public interface MasterCoprocessorEnvironment extends CoprocessorEnvironment map = htds.getAll(); - for (TableDescriptor htd : map.values()) { + for (TableDescriptor htd : master.getTableDescriptors().getAll()) { for (ColumnFamilyDescriptor hcd : htd.getColumnFamilies()) { if (hcd.isMobEnabled() && hcd.getMinVersions() == 0) { // clean only for mob-enabled column. diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java index 10bfade869..8af77a8b95 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java @@ -1840,7 +1840,7 @@ public class HMaster extends HRegionServer implements MasterServices { return false; } - TableDescriptor tblDesc = getTableDescriptors().get(table); + TableDescriptor tblDesc = getTableDescriptors().get(table).orElse(null); if (table.isSystemTable() || (tblDesc != null && !tblDesc.isNormalizationEnabled())) { LOG.trace("Skipping normalization for {}, as it's either system" @@ -2498,7 +2498,7 @@ public class HMaster extends HRegionServer implements MasterServices { @Override public TableDescriptor get() throws IOException { - TableDescriptor old = getTableDescriptors().get(tableName); + TableDescriptor old = getTableDescriptors().get(tableName).get(); if (old.hasColumnFamily(column.getName())) { throw new InvalidFamilyOperationException("Column family '" + column.getNameAsString() + "' in table '" + tableName + "' already exists so cannot be added"); @@ -2525,7 +2525,7 @@ public class HMaster extends HRegionServer implements MasterServices { @Override public TableDescriptor get() throws IOException { - TableDescriptor old = getTableDescriptors().get(tableName); + TableDescriptor old = getTableDescriptors().get(tableName).get(); if (!old.hasColumnFamily(descriptor.getName())) { throw new InvalidFamilyOperationException("Family '" + descriptor.getNameAsString() + "' does not exist, so it cannot be modified"); @@ -2546,7 +2546,7 @@ public class HMaster extends HRegionServer implements MasterServices { @Override public TableDescriptor get() throws IOException { - TableDescriptor old = getTableDescriptors().get(tableName); + TableDescriptor old = getTableDescriptors().get(tableName).get(); if (!old.hasColumnFamily(columnName)) { throw new InvalidFamilyOperationException("Family '" + Bytes.toString(columnName) @@ -2657,7 +2657,7 @@ public class HMaster extends HRegionServer implements MasterServices { .submitProcedure(new MasterProcedureUtil.NonceProcedureRunnable(this, nonceGroup, nonce) { @Override protected void run() throws IOException { - TableDescriptor oldDescriptor = getMaster().getTableDescriptors().get(tableName); + TableDescriptor oldDescriptor = getMaster().getTableDescriptors().get(tableName).get(); TableDescriptor newDescriptor = getMaster().getMasterCoprocessorHost() .preModifyTable(tableName, oldDescriptor, newDescriptorGetter.get()); sanityCheckTableDescriptor(newDescriptor); @@ -3504,9 +3504,9 @@ public class HMaster extends HRegionServer implements MasterServices { if (namespace != null && namespace.length() > 0) { // Do a check on the namespace existence. Will fail if does not exist. this.clusterSchemaService.getNamespace(namespace); - allHtds = tableDescriptors.getByNamespace(namespace).values(); + allHtds = tableDescriptors.getByNamespace(namespace); } else { - allHtds = tableDescriptors.getAll().values(); + allHtds = tableDescriptors.getAll(); } for (TableDescriptor desc: allHtds) { if (tableStateManager.isTablePresent(desc.getTableName()) @@ -3517,10 +3517,7 @@ public class HMaster extends HRegionServer implements MasterServices { } else { for (TableName s: tableNameList) { if (tableStateManager.isTablePresent(s)) { - TableDescriptor desc = tableDescriptors.get(s); - if (desc != null) { - htds.add(desc); - } + tableDescriptors.get(s).ifPresent(htds::add); } } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterCoprocessorHost.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterCoprocessorHost.java index 8764143d1b..e84f09dbbc 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterCoprocessorHost.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterCoprocessorHost.java @@ -29,6 +29,7 @@ import org.apache.hadoop.hbase.MetaMutationAnnotation; import org.apache.hadoop.hbase.NamespaceDescriptor; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.SharedConnection; +import org.apache.hadoop.hbase.TableDescriptors; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.Connection; import org.apache.hadoop.hbase.client.MasterSwitchType; @@ -116,6 +117,11 @@ public class MasterCoprocessorHost super.shutdown(); MetricsCoprocessor.removeRegistry(this.metricRegistry); } + + @Override + public TableDescriptors getTableDescriptors() { + return services.getTableDescriptors(); + } } /** diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterFileSystem.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterFileSystem.java index 9e9f278b30..62281c9f2b 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterFileSystem.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterFileSystem.java @@ -299,7 +299,7 @@ public class MasterFileSystem { // meta table is a system table, so descriptors are predefined, // we should get them from registry. FSTableDescriptors fsd = new FSTableDescriptors(c, fs, rd); - fsd.createTableDescriptor(fsd.get(TableName.META_TABLE_NAME)); + fsd.createTableDescriptor(fsd.get(TableName.META_TABLE_NAME).get()); return rd; } @@ -398,9 +398,10 @@ public class MasterFileSystem { // created here in bootstrap and it'll need to be cleaned up. Better to // not make it in first place. Turn off block caching for bootstrap. // Enable after. - TableDescriptor metaDescriptor = new FSTableDescriptors(c).get(TableName.META_TABLE_NAME); - HRegion meta = HRegion.createHRegion(RegionInfoBuilder.FIRST_META_REGIONINFO, rd, - c, setInfoFamilyCachingForMeta(metaDescriptor, false), null); + TableDescriptor metaDescriptor = + new FSTableDescriptors(c).get(TableName.META_TABLE_NAME).get(); + HRegion meta = HRegion.createHRegion(RegionInfoBuilder.FIRST_META_REGIONINFO, rd, c, + setInfoFamilyCachingForMeta(metaDescriptor, false), null); meta.close(); } catch (IOException e) { e = e instanceof RemoteException ? diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java index e5fc0b83c9..c0a6b41e08 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java @@ -1716,7 +1716,8 @@ public class MasterRpcServices extends RSRpcServices } boolean allFiles = false; List compactedColumns = new ArrayList<>(); - ColumnFamilyDescriptor[] hcds = master.getTableDescriptors().get(tableName).getColumnFamilies(); + ColumnFamilyDescriptor[] hcds = + master.getTableDescriptors().get(tableName).get().getColumnFamilies(); byte[] family = null; if (request.hasFamily()) { family = request.getFamily().toByteArray(); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java index 12c78ac8f6..066ccb1162 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java @@ -24,7 +24,7 @@ import java.util.List; import org.apache.hadoop.hbase.Server; import org.apache.hadoop.hbase.ServerName; -import org.apache.hadoop.hbase.TableDescriptors; +import org.apache.hadoop.hbase.MutableTableDescriptors; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.TableNotDisabledException; import org.apache.hadoop.hbase.TableNotFoundException; @@ -320,7 +320,7 @@ public interface MasterServices extends Server { /** * @return Return table descriptors implementation. */ - TableDescriptors getTableDescriptors(); + MutableTableDescriptors getTableDescriptors(); /** * Registers a new protocol buffer {@link Service} subclass as a master coprocessor endpoint. diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MobCompactionChore.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MobCompactionChore.java index 6c5d677a86..cb6b1a77e7 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MobCompactionChore.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MobCompactionChore.java @@ -23,7 +23,7 @@ import java.util.concurrent.ExecutorService; import java.util.concurrent.TimeUnit; import org.apache.hadoop.hbase.ScheduledChore; -import org.apache.hadoop.hbase.TableDescriptors; +import org.apache.hadoop.hbase.MutableTableDescriptors; import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -54,9 +54,8 @@ public class MobCompactionChore extends ScheduledChore { @Override protected void chore() { try { - TableDescriptors htds = master.getTableDescriptors(); - Map map = htds.getAll(); - for (TableDescriptor htd : map.values()) { + MutableTableDescriptors htds = master.getTableDescriptors(); + for (TableDescriptor htd : htds.getAll()) { if (!master.getTableStateManager().isTableState(htd.getTableName(), TableState.State.ENABLED)) { continue; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/TableStateManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/TableStateManager.java index 580e726b87..e5996072ec 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/TableStateManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/TableStateManager.java @@ -27,7 +27,7 @@ import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.ConcurrentMap; import java.util.concurrent.locks.ReadWriteLock; import org.apache.hadoop.hbase.MetaTableAccessor; -import org.apache.hadoop.hbase.TableDescriptors; +import org.apache.hadoop.hbase.MutableTableDescriptors; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.TableNotFoundException; import org.apache.hadoop.hbase.client.Connection; @@ -263,34 +263,33 @@ public class TableStateManager { } public void start() throws IOException { - TableDescriptors tableDescriptors = master.getTableDescriptors(); + MutableTableDescriptors tableDescriptors = master.getTableDescriptors(); migrateZooKeeper(); Connection connection = master.getConnection(); fixTableStates(tableDescriptors, connection); } - private void fixTableStates(TableDescriptors tableDescriptors, Connection connection) + private void fixTableStates(MutableTableDescriptors tableDescriptors, Connection connection) throws IOException { - Map allDescriptors = tableDescriptors.getAll(); - Map states = new HashMap<>(); + Map states = new HashMap<>(); // NOTE: Full hbase:meta table scan! MetaTableAccessor.fullScanTables(connection, new MetaTableAccessor.Visitor() { @Override public boolean visit(Result r) throws IOException { TableState state = MetaTableAccessor.getTableState(r); - states.put(state.getTableName().getNameAsString(), state); + states.put(state.getTableName(), state); return true; } }); - for (Map.Entry entry : allDescriptors.entrySet()) { - TableName tableName = TableName.valueOf(entry.getKey()); + for (TableDescriptor td : tableDescriptors.getAll()) { + TableName tableName = td.getTableName(); if (TableName.isMetaTableName(tableName)) { // This table is always enabled. No fixup needed. No entry in hbase:meta needed. // Call through to fixTableState though in case a super class wants to do something. fixTableState(new TableState(tableName, TableState.State.ENABLED)); continue; } - TableState tableState = states.get(entry.getKey()); + TableState tableState = states.get(td.getTableName()); if (tableState == null) { LOG.warn(tableName + " has no table state in hbase:meta, assuming ENABLED"); MetaTableAccessor.updateTableState(connection, tableName, TableState.State.ENABLED); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/MergeTableRegionsProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/MergeTableRegionsProcedure.java index 241e8f94ef..e634bb36b9 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/MergeTableRegionsProcedure.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/MergeTableRegionsProcedure.java @@ -523,7 +523,7 @@ public class MergeTableRegionsProcedure return false; } - if (!env.getMasterServices().getTableDescriptors().get(getTableName()).isMergeEnabled()) { + if (!env.getMasterServices().getTableDescriptors().get(getTableName()).get().isMergeEnabled()) { String regionsStr = Arrays.deepToString(regionsToMerge); LOG.warn("Merge is disabled for the table! Skipping merge of {}", regionsStr); super.setFailure(getClass().getSimpleName(), new IOException( @@ -644,7 +644,8 @@ public class MergeTableRegionsProcedure throws IOException { final MasterFileSystem mfs = env.getMasterServices().getMasterFileSystem(); final Configuration conf = env.getMasterConfiguration(); - final TableDescriptor htd = env.getMasterServices().getTableDescriptors().get(getTableName()); + final TableDescriptor htd = + env.getMasterServices().getTableDescriptors().get(getTableName()).get(); for (String family : regionFs.getFamilies()) { final ColumnFamilyDescriptor hcd = htd.getColumnFamily(Bytes.toBytes(family)); @@ -697,7 +698,8 @@ public class MergeTableRegionsProcedure } private int getRegionReplication(final MasterProcedureEnv env) throws IOException { - final TableDescriptor htd = env.getMasterServices().getTableDescriptors().get(getTableName()); + final TableDescriptor htd = + env.getMasterServices().getTableDescriptors().get(getTableName()).get(); return htd.getRegionReplication(); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/RegionStateStore.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/RegionStateStore.java index ce4bc38cd3..7de8dfade3 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/RegionStateStore.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/RegionStateStore.java @@ -281,7 +281,7 @@ public class RegionStateStore { } private TableDescriptor getTableDescriptor(TableName tableName) throws IOException { - return master.getTableDescriptors().get(tableName); + return master.getTableDescriptors().get(tableName).orElse(null); } // ========================================================================== diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/SplitTableRegionProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/SplitTableRegionProcedure.java index 8e0dcd3fde..5749267763 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/SplitTableRegionProcedure.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/SplitTableRegionProcedure.java @@ -125,7 +125,7 @@ public class SplitTableRegionProcedure .setSplit(false) .setRegionId(rid) .build(); - TableDescriptor htd = env.getMasterServices().getTableDescriptors().get(getTableName()); + TableDescriptor htd = env.getMasterServices().getTableDescriptors().get(getTableName()).get(); if(htd.getRegionSplitPolicyClassName() != null) { // Since we don't have region reference here, creating the split policy instance without it. // This can be used to invoke methods which don't require Region reference. This instantiation @@ -504,11 +504,11 @@ public class SplitTableRegionProcedure return false; } - if (!env.getMasterServices().getTableDescriptors().get(getTableName()).isSplitEnabled()) { + if (!env.getMasterServices().getTableDescriptors().get(getTableName()).get().isSplitEnabled()) { LOG.warn("pid={}, split is disabled for the table! Skipping split of {}", getProcId(), parentHRI); - setFailure(new IOException("Split region " + parentHRI.getRegionNameAsString() - + " failed as region split is disabled for the table")); + setFailure(new IOException("Split region " + parentHRI.getRegionNameAsString() + + " failed as region split is disabled for the table")); return false; } @@ -644,7 +644,7 @@ public class SplitTableRegionProcedure maxThreads, Threads.getNamedThreadFactory("StoreFileSplitter-%1$d")); final List>> futures = new ArrayList>>(nbFiles); - TableDescriptor htd = env.getMasterServices().getTableDescriptors().get(getTableName()); + TableDescriptor htd = env.getMasterServices().getTableDescriptors().get(getTableName()).get(); // Split each store file. for (Map.Entry> e : files.entrySet()) { byte[] familyName = Bytes.toBytes(e.getKey()); @@ -839,7 +839,8 @@ public class SplitTableRegionProcedure } private int getRegionReplication(final MasterProcedureEnv env) throws IOException { - final TableDescriptor htd = env.getMasterServices().getTableDescriptors().get(getTableName()); + final TableDescriptor htd = + env.getMasterServices().getTableDescriptors().get(getTableName()).get(); return htd.getRegionReplication(); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/RegionLocationFinder.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/RegionLocationFinder.java index fb7731fa75..82710a4992 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/RegionLocationFinder.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/RegionLocationFinder.java @@ -210,14 +210,12 @@ class RegionLocationFinder { * return TableDescriptor for a given tableName * * @param tableName the table name - * @return TableDescriptor - * @throws IOException */ protected TableDescriptor getTableDescriptor(TableName tableName) throws IOException { TableDescriptor tableDescriptor = null; try { if (this.services != null && this.services.getTableDescriptors() != null) { - tableDescriptor = this.services.getTableDescriptors().get(tableName); + tableDescriptor = this.services.getTableDescriptors().get(tableName).orElse(null); } } catch (FileNotFoundException fnfe) { LOG.debug("tableName={}", tableName, fnfe); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/normalizer/SimpleRegionNormalizer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/normalizer/SimpleRegionNormalizer.java index a30a13be24..2c436d2abc 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/normalizer/SimpleRegionNormalizer.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/normalizer/SimpleRegionNormalizer.java @@ -161,7 +161,8 @@ public class SimpleRegionNormalizer implements RegionNormalizer { int targetRegionCount = -1; long targetRegionSize = -1; try { - TableDescriptor tableDescriptor = masterServices.getTableDescriptors().get(table); + TableDescriptor tableDescriptor = + masterServices.getTableDescriptors().get(table).orElse(null); if(tableDescriptor != null) { targetRegionCount = tableDescriptor.getNormalizerTargetRegionCount(); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/DisableTableProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/DisableTableProcedure.java index 9cacc5d6b9..b22c957975 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/DisableTableProcedure.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/DisableTableProcedure.java @@ -109,7 +109,7 @@ public class DisableTableProcedure setNextState(DisableTableState.DISABLE_TABLE_ADD_REPLICATION_BARRIER); break; case DISABLE_TABLE_ADD_REPLICATION_BARRIER: - if (env.getMasterServices().getTableDescriptors().get(tableName) + if (env.getMasterServices().getTableDescriptors().get(tableName).get() .hasGlobalReplicationScope()) { FileSystem walFS = env.getMasterServices().getMasterWalManager().getFileSystem(); try (BufferedMutator mutator = env.getMasterServices().getConnection() diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/EnableTableProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/EnableTableProcedure.java index 3994304d42..1cc992c1ed 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/EnableTableProcedure.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/EnableTableProcedure.java @@ -107,7 +107,7 @@ public class EnableTableProcedure // we will need to get the tableDescriptor here to see if there is a change in the replica // count TableDescriptor hTableDescriptor = - env.getMasterServices().getTableDescriptors().get(tableName); + env.getMasterServices().getTableDescriptors().get(tableName).get(); // Get the replica count int regionReplicaCount = hTableDescriptor.getRegionReplication(); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ModifyTableProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ModifyTableProcedure.java index dd834db2fe..7eddde320a 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ModifyTableProcedure.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ModifyTableProcedure.java @@ -263,17 +263,17 @@ public class ModifyTableProcedure // for comparison in order to update the descriptor. if (shouldCheckDescriptor) { if (TableDescriptor.COMPARATOR.compare(unmodifiedTableDescriptor, - env.getMasterServices().getTableDescriptors().get(getTableName())) != 0) { - LOG.error("Error while modifying table '" + getTableName().toString() - + "' Skipping procedure : " + this); + env.getMasterServices().getTableDescriptors().get(getTableName()).get()) != 0) { + LOG.error("Error while modifying table '" + getTableName().toString() + + "' Skipping procedure : " + this); throw new ConcurrentTableModificationException( - "Skipping modify table operation on table '" + getTableName().toString() - + "' as it has already been modified by some other concurrent operation, " - + "Please retry."); + "Skipping modify table operation on table '" + getTableName().toString() + + "' as it has already been modified by some other concurrent operation, " + + "Please retry."); } } else { this.unmodifiedTableDescriptor = - env.getMasterServices().getTableDescriptors().get(getTableName()); + env.getMasterServices().getTableDescriptors().get(getTableName()).get(); } if (env.getMasterServices().getTableStateManager() diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/TruncateTableProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/TruncateTableProcedure.java index 52da607ef8..b4244f2337 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/TruncateTableProcedure.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/TruncateTableProcedure.java @@ -102,7 +102,7 @@ public class TruncateTableProcedure break; case TRUNCATE_TABLE_REMOVE_FROM_META: tableDescriptor = env.getMasterServices().getTableDescriptors() - .get(tableName); + .get(tableName).get(); DeleteTableProcedure.deleteFromMeta(env, getTableName(), regions); DeleteTableProcedure.deleteAssignmentState(env, getTableName()); setNextState(TruncateTableState.TRUNCATE_TABLE_CLEAR_FS_LAYOUT); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/AbstractPeerProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/AbstractPeerProcedure.java index 755e0a3dc5..3b8c3ddfa6 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/AbstractPeerProcedure.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/AbstractPeerProcedure.java @@ -113,7 +113,7 @@ public abstract class AbstractPeerProcedure extends AbstractPeerNoLockPr protected final void setLastPushedSequenceId(MasterProcedureEnv env, ReplicationPeerConfig peerConfig) throws IOException, ReplicationException { Map lastSeqIds = new HashMap(); - for (TableDescriptor td : env.getMasterServices().getTableDescriptors().getAll().values()) { + for (TableDescriptor td : env.getMasterServices().getTableDescriptors().getAll()) { if (!td.hasGlobalReplicationScope()) { continue; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/ModifyPeerProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/ModifyPeerProcedure.java index d5d2779694..9701db9a9d 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/ModifyPeerProcedure.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/ModifyPeerProcedure.java @@ -140,7 +140,7 @@ public abstract class ModifyPeerProcedure extends AbstractPeerProcedure lastSeqIds = new HashMap(); List encodedRegionNames = new ArrayList<>(); - for (TableDescriptor td : env.getMasterServices().getTableDescriptors().getAll().values()) { + for (TableDescriptor td : env.getMasterServices().getTableDescriptors().getAll()) { if (!td.hasGlobalReplicationScope()) { continue; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/SnapshotManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/SnapshotManager.java index 05db4abc82..c67480c540 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/SnapshotManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/SnapshotManager.java @@ -611,7 +611,7 @@ public class SnapshotManager extends MasterProcedureManager implements Stoppable TableDescriptor desc = null; try { desc = master.getTableDescriptors().get( - TableName.valueOf(snapshot.getTable())); + TableName.valueOf(snapshot.getTable())).orElse(null); } catch (FileNotFoundException e) { String msg = "Table:" + snapshot.getTable() + " info doesn't exist!"; LOG.error(msg); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/TakeSnapshotHandler.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/TakeSnapshotHandler.java index 3b7d65a495..2cd13360f4 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/TakeSnapshotHandler.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/TakeSnapshotHandler.java @@ -138,10 +138,8 @@ public abstract class TakeSnapshotHandler extends EventHandler implements Snapsh "Taking " + snapshot.getType() + " snapshot on table: " + snapshotTable); } - private TableDescriptor loadTableDescriptor() - throws FileNotFoundException, IOException { - TableDescriptor htd = - this.master.getTableDescriptors().get(snapshotTable); + private TableDescriptor loadTableDescriptor() throws FileNotFoundException, IOException { + TableDescriptor htd = this.master.getTableDescriptors().get(snapshotTable).orElse(null); if (htd == null) { throw new IOException("TableDescriptor missing for " + snapshotTable); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java index f983882b6f..469f69d472 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java @@ -78,7 +78,7 @@ import org.apache.hadoop.hbase.PleaseHoldException; import org.apache.hadoop.hbase.ScheduledChore; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.Stoppable; -import org.apache.hadoop.hbase.TableDescriptors; +import org.apache.hadoop.hbase.MutableTableDescriptors; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.YouAreDeadException; import org.apache.hadoop.hbase.ZNodeClearer; @@ -270,7 +270,7 @@ public class HRegionServer extends HasThread implements /** * Go here to get table descriptors. */ - protected TableDescriptors tableDescriptors; + protected MutableTableDescriptors tableDescriptors; // Replication services. If no replication, this handler will be null. protected ReplicationSourceService replicationSourceHandler; @@ -717,7 +717,7 @@ public class HRegionServer extends HasThread implements this.tableDescriptors = getFsTableDescriptors(); } - protected TableDescriptors getFsTableDescriptors() throws IOException { + protected MutableTableDescriptors getFsTableDescriptors() throws IOException { return new FSTableDescriptors(this.conf, this.fs, this.rootDir, !canUpdateTableDescriptor(), false, getMetaTableObserver()); } @@ -3664,7 +3664,7 @@ public class HRegionServer extends HasThread implements * @return Return table descriptors implementation. */ @Override - public TableDescriptors getTableDescriptors() { + public MutableTableDescriptors getTableDescriptors() { return this.tableDescriptors; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java index 9b99ff82d6..a92afd7ae6 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java @@ -2069,7 +2069,7 @@ public class RSRpcServices implements HBaseRPCErrorHandler, if (previous == null || !previous.booleanValue()) { htd = htds.get(region.getTable()); if (htd == null) { - htd = regionServer.tableDescriptors.get(region.getTable()); + htd = regionServer.tableDescriptors.get(region.getTable()).orElse(null); htds.put(region.getTable(), htd); } if (htd == null) { @@ -2146,7 +2146,7 @@ public class RSRpcServices implements HBaseRPCErrorHandler, LOG.debug("Warming up Region " + region.getRegionNameAsString()); } - htd = regionServer.tableDescriptors.get(region.getTable()); + htd = regionServer.tableDescriptors.get(region.getTable()).get(); if (regionServer.getRegionsInTransitionInRS().containsKey(encodedNameBytes)) { LOG.info("Region is in transition. Skipping warmup " + region); @@ -3700,7 +3700,7 @@ public class RSRpcServices implements HBaseRPCErrorHandler, TableDescriptor tableDesc = tdCache.get(regionInfo.getTable()); if (tableDesc == null) { try { - tableDesc = regionServer.getTableDescriptors().get(regionInfo.getTable()); + tableDesc = regionServer.getTableDescriptors().get(regionInfo.getTable()).orElse(null); } catch (IOException e) { // Here we do not fail the whole method since we also need deal with other // procedures, and we can not ignore this one, so we still schedule a diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionServerServices.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionServerServices.java index e0638acb16..b324339748 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionServerServices.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionServerServices.java @@ -27,7 +27,7 @@ import java.util.Optional; import java.util.concurrent.ConcurrentMap; import org.apache.hadoop.hbase.Abortable; import org.apache.hadoop.hbase.Server; -import org.apache.hadoop.hbase.TableDescriptors; +import org.apache.hadoop.hbase.MutableTableDescriptors; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.RegionInfo; import org.apache.hadoop.hbase.client.locking.EntityLock; @@ -268,7 +268,7 @@ public interface RegionServerServices extends Server, MutableOnlineRegions, Favo /** * @return Return table descriptors implementation. */ - TableDescriptors getTableDescriptors(); + MutableTableDescriptors getTableDescriptors(); /** * @return The block cache instance. diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/handler/AssignRegionHandler.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/handler/AssignRegionHandler.java index c6fee2e57a..1cd63b3e12 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/handler/AssignRegionHandler.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/handler/AssignRegionHandler.java @@ -120,8 +120,8 @@ public class AssignRegionHandler extends EventHandler { } HRegion region; try { - TableDescriptor htd = - tableDesc != null ? tableDesc : rs.getTableDescriptors().get(regionInfo.getTable()); + TableDescriptor htd = tableDesc != null ? tableDesc + : rs.getTableDescriptors().get(regionInfo.getTable()).orElse(null); if (htd == null) { throw new IOException("Missing table descriptor for " + regionName); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/ReplicationEndpoint.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/ReplicationEndpoint.java index f4c37b1ea8..675d7622c2 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/ReplicationEndpoint.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/ReplicationEndpoint.java @@ -29,7 +29,7 @@ import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.hbase.HBaseInterfaceAudience; -import org.apache.hadoop.hbase.TableDescriptors; +import org.apache.hadoop.hbase.MutableTableDescriptors; import org.apache.hadoop.hbase.wal.WAL.Entry; import org.apache.hadoop.hbase.replication.regionserver.MetricsSource; @@ -56,7 +56,7 @@ public interface ReplicationEndpoint extends ReplicationPeerConfigListener { private final Configuration localConf; private final Configuration conf; private final FileSystem fs; - private final TableDescriptors tableDescriptors; + private final MutableTableDescriptors tableDescriptors; private final ReplicationPeer replicationPeer; private final String peerId; private final UUID clusterId; @@ -72,7 +72,7 @@ public interface ReplicationEndpoint extends ReplicationPeerConfigListener { final UUID clusterId, final ReplicationPeer replicationPeer, final MetricsSource metrics, - final TableDescriptors tableDescriptors, + final MutableTableDescriptors tableDescriptors, final Abortable abortable) { this.localConf = localConf; this.conf = conf; @@ -108,7 +108,7 @@ public interface ReplicationEndpoint extends ReplicationPeerConfigListener { public MetricsSource getMetrics() { return metrics; } - public TableDescriptors getTableDescriptors() { + public MutableTableDescriptors getTableDescriptors() { return tableDescriptors; } public Abortable getAbortable() { return abortable; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/RegionReplicaReplicationEndpoint.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/RegionReplicaReplicationEndpoint.java index f7721e0934..5e726ed0b8 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/RegionReplicaReplicationEndpoint.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/RegionReplicaReplicationEndpoint.java @@ -40,7 +40,7 @@ import org.apache.hadoop.hbase.HBaseIOException; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HRegionLocation; import org.apache.hadoop.hbase.RegionLocations; -import org.apache.hadoop.hbase.TableDescriptors; +import org.apache.hadoop.hbase.MutableTableDescriptors; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.TableNotFoundException; import org.apache.hadoop.hbase.client.ClusterConnection; @@ -92,7 +92,7 @@ public class RegionReplicaReplicationEndpoint extends HBaseReplicationEndpoint { private Configuration conf; private ClusterConnection connection; - private TableDescriptors tableDescriptors; + private MutableTableDescriptors tableDescriptors; // Reuse WALSplitter constructs as a WAL pipe private PipelineController controller; @@ -269,10 +269,10 @@ public class RegionReplicaReplicationEndpoint extends HBaseReplicationEndpoint { static class RegionReplicaOutputSink extends OutputSink { private final RegionReplicaSinkWriter sinkWriter; - private final TableDescriptors tableDescriptors; + private final MutableTableDescriptors tableDescriptors; private final Cache memstoreReplicationEnabled; - public RegionReplicaOutputSink(PipelineController controller, TableDescriptors tableDescriptors, + public RegionReplicaOutputSink(PipelineController controller, MutableTableDescriptors tableDescriptors, EntryBuffers entryBuffers, ClusterConnection connection, ExecutorService pool, int numWriters, int operationTimeout) { super(controller, entryBuffers, numWriters); @@ -358,8 +358,8 @@ public class RegionReplicaReplicationEndpoint extends HBaseReplicationEndpoint { if (requiresReplication == null) { // check if the table requires memstore replication // some unit-test drop the table, so we should do a bypass check and always replicate. - TableDescriptor htd = tableDescriptors.get(tableName); - requiresReplication = htd == null || htd.hasRegionMemStoreReplication(); + requiresReplication = tableDescriptors.get(tableName) + .map(TableDescriptor::hasGlobalReplicationScope).orElse(true); memstoreReplicationEnabled.put(tableName, requiresReplication); } @@ -391,10 +391,10 @@ public class RegionReplicaReplicationEndpoint extends HBaseReplicationEndpoint { int operationTimeout; ExecutorService pool; Cache disabledAndDroppedTables; - TableDescriptors tableDescriptors; + MutableTableDescriptors tableDescriptors; public RegionReplicaSinkWriter(RegionReplicaOutputSink sink, ClusterConnection connection, - ExecutorService pool, int operationTimeout, TableDescriptors tableDescriptors) { + ExecutorService pool, int operationTimeout, MutableTableDescriptors tableDescriptors) { this.sink = sink; this.connection = connection; this.operationTimeout = operationTimeout; @@ -529,7 +529,7 @@ public class RegionReplicaReplicationEndpoint extends HBaseReplicationEndpoint { disabledAndDroppedTables.put(tableName, Boolean.TRUE); // put to cache for later. canBeSkipped = true; } else if (tableDescriptors != null) { - TableDescriptor tableDescriptor = tableDescriptors.get(tableName); + TableDescriptor tableDescriptor = tableDescriptors.get(tableName).orElse(null); if (tableDescriptor != null //(replicaId + 1) as no task is added for primary replica for replication && tableDescriptor.getRegionReplication() <= (replicaId + 1)) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSource.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSource.java index 8e001e6b4d..3ea8c31cae 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSource.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSource.java @@ -43,7 +43,7 @@ import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.Server; import org.apache.hadoop.hbase.ServerName; -import org.apache.hadoop.hbase.TableDescriptors; +import org.apache.hadoop.hbase.MutableTableDescriptors; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.regionserver.HRegionServer; import org.apache.hadoop.hbase.regionserver.RSRpcServices; @@ -278,7 +278,7 @@ public class ReplicationSource implements ReplicationSourceInterface { private void initAndStartReplicationEndpoint(ReplicationEndpoint replicationEndpoint) throws IOException, TimeoutException { - TableDescriptors tableDescriptors = null; + MutableTableDescriptors tableDescriptors = null; if (server instanceof HRegionServer) { tableDescriptors = ((HRegionServer) server).getTableDescriptors(); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessController.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessController.java index 2898a719a0..ac1f626805 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessController.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessController.java @@ -2368,15 +2368,14 @@ public class AccessController implements MasterCoprocessor, RegionCoprocessor, if (regex == null && tableNamesList != null && !tableNamesList.isEmpty()) { // Otherwise, if the requestor has ADMIN or CREATE privs for all listed tables, the // request can be granted. - TableName [] sns = null; try (Admin admin = ctx.getEnvironment().getConnection().getAdmin()) { - sns = admin.listTableNames(); - if (sns == null) return; - for (TableName tableName: tableNamesList) { + for (TableName tableName : tableNamesList) { // Skip checks for a table that does not exist - if (!admin.tableExists(tableName)) continue; - requirePermission(ctx, "getTableDescriptors", tableName, null, null, - Action.ADMIN, Action.CREATE); + if (!admin.tableExists(tableName)) { + continue; + } + requirePermission(ctx, "getTableDescriptors", tableName, null, null, Action.ADMIN, + Action.CREATE); } } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSTableDescriptors.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSTableDescriptors.java index 3dce0de4af..f6ef9c4537 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSTableDescriptors.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSTableDescriptors.java @@ -19,16 +19,15 @@ package org.apache.hadoop.hbase.util; import java.io.FileNotFoundException; import java.io.IOException; +import java.util.ArrayList; import java.util.Comparator; import java.util.List; import java.util.Map; -import java.util.TreeMap; +import java.util.Optional; import java.util.concurrent.ConcurrentHashMap; import java.util.function.Function; import java.util.regex.Matcher; import java.util.regex.Pattern; - -import edu.umd.cs.findbugs.annotations.Nullable; import org.apache.commons.lang3.NotImplementedException; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FSDataInputStream; @@ -37,26 +36,27 @@ import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.PathFilter; -import org.apache.hadoop.hbase.client.CoprocessorDescriptorBuilder; -import org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint; -import org.apache.yetus.audience.InterfaceAudience; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; +import org.apache.hadoop.hbase.Coprocessor; +import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.MutableTableDescriptors; +import org.apache.hadoop.hbase.TableInfoMissingException; +import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder; +import org.apache.hadoop.hbase.client.CoprocessorDescriptorBuilder; import org.apache.hadoop.hbase.client.TableDescriptor; import org.apache.hadoop.hbase.client.TableDescriptorBuilder; -import org.apache.hadoop.hbase.Coprocessor; +import org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint; import org.apache.hadoop.hbase.exceptions.DeserializationException; -import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.regionserver.BloomType; +import org.apache.yetus.audience.InterfaceAudience; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting; import org.apache.hbase.thirdparty.com.google.common.primitives.Ints; -import org.apache.hadoop.hbase.TableDescriptors; -import org.apache.hadoop.hbase.TableInfoMissingException; -import org.apache.hadoop.hbase.TableName; /** - * Implementation of {@link TableDescriptors} that reads descriptors from the + * Implementation of {@link MutableTableDescriptors} that reads descriptors from the * passed filesystem. It expects descriptors to be in a file in the * {@link #TABLEINFO_DIR} subdir of the table's directory in FS. Can be read-only * -- i.e. does not modify the filesystem or can be read and write. @@ -74,7 +74,7 @@ import org.apache.hadoop.hbase.TableName; * the below needs a bit of a reworking and perhaps some supporting api in hdfs. */ @InterfaceAudience.Private -public class FSTableDescriptors implements TableDescriptors { +public class FSTableDescriptors implements MutableTableDescriptors { private static final Logger LOG = LoggerFactory.getLogger(FSTableDescriptors.class); private final FileSystem fs; private final Path rootdir; @@ -218,24 +218,22 @@ public class FSTableDescriptors implements TableDescriptors { } /** - * Get the current table descriptor for the given table, or null if none exists. - * - * Uses a local cache of the descriptor but still checks the filesystem on each call - * to see if a newer file has been created since the cached one was read. + * Get the current table descriptor for the given table. + *

+ * Uses a local cache of the descriptor but still checks the filesystem on each call to see if a + * newer file has been created since the cached one was read. */ @Override - @Nullable - public TableDescriptor get(final TableName tablename) - throws IOException { + public Optional get(TableName tablename) throws IOException { invocations++; if (TableName.META_TABLE_NAME.equals(tablename)) { cachehits++; - return metaTableDescriptor; + return Optional.of(metaTableDescriptor); } // hbase:meta is already handled. If some one tries to get the descriptor for // .logs, .oldlogs or .corrupt throw an exception. if (HConstants.HBASE_NON_USER_TABLE_DIRS.contains(tablename.getNameAsString())) { - throw new IOException("No descriptor found for non table = " + tablename); + throw new IOException("No descriptor found for non table = " + tablename); } if (usecache) { @@ -243,59 +241,53 @@ public class FSTableDescriptors implements TableDescriptors { TableDescriptor cachedtdm = this.cache.get(tablename); if (cachedtdm != null) { cachehits++; - return cachedtdm; + return Optional.of(cachedtdm); } } TableDescriptor tdmt = null; try { tdmt = getTableDescriptorFromFs(fs, rootdir, tablename); } catch (NullPointerException e) { - LOG.debug("Exception during readTableDecriptor. Current table name = " - + tablename, e); + LOG.debug("Exception during readTableDecriptor. Current table name = " + tablename, e); } catch (TableInfoMissingException e) { // ignore. This is regular operation } catch (IOException ioe) { - LOG.debug("Exception during readTableDecriptor. Current table name = " - + tablename, ioe); + LOG.debug("Exception during readTableDecriptor. Current table name = " + tablename, ioe); } // last HTD written wins if (usecache && tdmt != null) { this.cache.put(tablename, tdmt); } - return tdmt; + return Optional.ofNullable(tdmt); } /** * Returns a map from table name to table descriptor for all tables. */ @Override - public Map getAll() - throws IOException { - Map tds = new TreeMap<>(); - + public List getAll() throws IOException { + List tds = new ArrayList<>(); if (fsvisited && usecache) { - for (Map.Entry entry: this.cache.entrySet()) { - tds.put(entry.getKey().toString(), entry.getValue()); - } + tds.addAll(this.cache.values()); // add hbase:meta to the response - tds.put(this.metaTableDescriptor.getTableName().getNameAsString(), metaTableDescriptor); + tds.add(metaTableDescriptor); } else { LOG.trace("Fetching table descriptors from the filesystem."); boolean allvisited = true; for (Path d : FSUtils.getTableDirs(fs, rootdir)) { - TableDescriptor htd = null; + Optional htd = Optional.empty(); try { htd = get(FSUtils.getTableName(d)); } catch (FileNotFoundException fnfe) { // inability of retrieving one HTD shouldn't stop getting the remaining LOG.warn("Trouble retrieving htd", fnfe); } - if (htd == null) { + if (htd.isPresent()) { + tds.add(htd.get()); + } else { allvisited = false; continue; - } else { - tds.put(htd.getTableName().getNameAsString(), htd); } fsvisited = allvisited; } @@ -305,24 +297,20 @@ public class FSTableDescriptors implements TableDescriptors { /** * Find descriptors by namespace. - * @see #get(org.apache.hadoop.hbase.TableName) */ @Override - public Map getByNamespace(String name) - throws IOException { - Map htds = new TreeMap<>(); - List tableDirs = - FSUtils.getLocalTableDirs(fs, FSUtils.getNamespaceDir(rootdir, name)); - for (Path d: tableDirs) { - TableDescriptor htd = null; + public List getByNamespace(String name) throws IOException { + List htds = new ArrayList<>(); + List tableDirs = FSUtils.getLocalTableDirs(fs, FSUtils.getNamespaceDir(rootdir, name)); + for (Path d : tableDirs) { + Optional htd = Optional.empty(); try { htd = get(FSUtils.getTableName(d)); } catch (FileNotFoundException fnfe) { // inability of retrieving one HTD shouldn't stop getting the remaining LOG.warn("Trouble retrieving htd", fnfe); } - if (htd == null) continue; - htds.put(FSUtils.getTableName(d).getNameAsString(), htd); + htd.ifPresent(htds::add); } return htds; } @@ -349,13 +337,11 @@ public class FSTableDescriptors implements TableDescriptors { } /** - * Removes the table descriptor from the local cache and returns it. - * If not in read only mode, it also deletes the entire table directory(!) - * from the FileSystem. + * Removes the table descriptor from the local cache and returns it. If not in read only mode, it + * also deletes the entire table directory(!) from the FileSystem. */ @Override - public TableDescriptor remove(final TableName tablename) - throws IOException { + public Optional remove(final TableName tablename) throws IOException { if (fsreadonly) { throw new NotImplementedException("Cannot remove a table descriptor - in read only mode"); } @@ -365,8 +351,7 @@ public class FSTableDescriptors implements TableDescriptors { throw new IOException("Failed delete of " + tabledir.toString()); } } - TableDescriptor descriptor = this.cache.remove(tablename); - return descriptor; + return Optional.ofNullable(this.cache.remove(tablename)); } /** diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java index 9e5f9e8c91..b2f9824240 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java @@ -1536,7 +1536,7 @@ public class HBaseFsck extends Configured implements Closeable { Path rootdir = FSUtils.getRootDir(getConf()); Configuration c = getConf(); RegionInfo metaHRI = RegionInfoBuilder.FIRST_META_REGIONINFO; - TableDescriptor metaDescriptor = new FSTableDescriptors(c).get(TableName.META_TABLE_NAME); + TableDescriptor metaDescriptor = new FSTableDescriptors(c).get(TableName.META_TABLE_NAME).get(); MasterFileSystem.setInfoFamilyCachingForMeta(metaDescriptor, false); // The WAL subsystem will use the default rootDir rather than the passed in rootDir // unless I pass along via the conf. @@ -2723,8 +2723,7 @@ public class HBaseFsck extends Configured implements Closeable { * regions reported for the table, but table dir is there in hdfs */ private void loadTableInfosForTablesWithNoRegion() throws IOException { - Map allTables = new FSTableDescriptors(getConf()).getAll(); - for (TableDescriptor htd : allTables.values()) { + for (TableDescriptor htd : new FSTableDescriptors(getConf()).getAll()) { if (checkMetaOnly && !htd.isMetaTable()) { continue; } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestCase.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestCase.java index 706a75f287..a83e95ed3b 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestCase.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestCase.java @@ -424,7 +424,7 @@ public abstract class HBaseTestCase extends TestCase { protected void createMetaRegion() throws IOException { FSTableDescriptors fsTableDescriptors = new FSTableDescriptors(conf); meta = HBaseTestingUtility.createRegionAndWAL(HRegionInfo.FIRST_META_REGIONINFO, testDir, - conf, fsTableDescriptors.get(TableName.META_TABLE_NAME)); + conf, fsTableDescriptors.get(TableName.META_TABLE_NAME).get()); } protected void closeRootAndMeta() throws IOException { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/MockRegionServerServices.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/MockRegionServerServices.java index 0e4f241d12..4540818e33 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/MockRegionServerServices.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/MockRegionServerServices.java @@ -355,7 +355,7 @@ public class MockRegionServerServices implements RegionServerServices { } @Override - public TableDescriptors getTableDescriptors() { + public MutableTableDescriptors getTableDescriptors() { return null; } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockNoopMasterServices.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockNoopMasterServices.java index 9c55f57212..2d1afdd644 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockNoopMasterServices.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockNoopMasterServices.java @@ -27,7 +27,7 @@ import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.hbase.ChoreService; import org.apache.hadoop.hbase.CoordinatedStateManager; import org.apache.hadoop.hbase.ServerName; -import org.apache.hadoop.hbase.TableDescriptors; +import org.apache.hadoop.hbase.MutableTableDescriptors; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.ClusterConnection; import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor; @@ -202,7 +202,7 @@ public class MockNoopMasterServices implements MasterServices { } @Override - public TableDescriptors getTableDescriptors() { + public MutableTableDescriptors getTableDescriptors() { return null; } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockRegionServer.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockRegionServer.java index a930d7fb57..bbb7366a66 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockRegionServer.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockRegionServer.java @@ -38,7 +38,7 @@ import org.apache.hadoop.hbase.CellUtil; import org.apache.hadoop.hbase.ChoreService; import org.apache.hadoop.hbase.CoordinatedStateManager; import org.apache.hadoop.hbase.ServerName; -import org.apache.hadoop.hbase.TableDescriptors; +import org.apache.hadoop.hbase.MutableTableDescriptors; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.ZooKeeperConnectionException; import org.apache.hadoop.hbase.client.ClusterConnection; @@ -708,7 +708,7 @@ class MockRegionServer implements AdminProtos.AdminService.BlockingInterface, } @Override - public TableDescriptors getTableDescriptors() { + public MutableTableDescriptors getTableDescriptors() { return null; } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/MockMasterServices.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/MockMasterServices.java index 56467cc6d6..ca74502724 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/MockMasterServices.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/MockMasterServices.java @@ -21,15 +21,15 @@ import static org.mockito.ArgumentMatchers.any; import java.io.IOException; import java.util.List; -import java.util.Map; import java.util.NavigableMap; +import java.util.Optional; import java.util.SortedSet; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.CoordinatedStateManager; +import org.apache.hadoop.hbase.MutableTableDescriptors; import org.apache.hadoop.hbase.ServerMetricsBuilder; import org.apache.hadoop.hbase.ServerName; -import org.apache.hadoop.hbase.TableDescriptors; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.ClusterConnection; import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder; @@ -309,29 +309,29 @@ public class MockMasterServices extends MockNoopMasterServices { } @Override - public TableDescriptors getTableDescriptors() { - return new TableDescriptors() { + public MutableTableDescriptors getTableDescriptors() { + return new MutableTableDescriptors() { @Override - public TableDescriptor remove(TableName tablename) throws IOException { + public Optional remove(TableName tablename) throws IOException { // noop return null; } @Override - public Map getAll() throws IOException { + public List getAll() throws IOException { // noop return null; } @Override - public TableDescriptor get(TableName tablename) throws IOException { + public Optional get(TableName tablename) throws IOException { TableDescriptorBuilder builder = TableDescriptorBuilder.newBuilder(tablename); builder.setColumnFamily(ColumnFamilyDescriptorBuilder.of(DEFAULT_COLUMN_FAMILY_NAME)); - return builder.build(); + return Optional.of(builder.build()); } @Override - public Map getByNamespace(String name) throws IOException { + public List getByNamespace(String name) throws IOException { return null; } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/normalizer/TestSimpleRegionNormalizer.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/normalizer/TestSimpleRegionNormalizer.java index 743ec2be48..337979a8d6 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/normalizer/TestSimpleRegionNormalizer.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/normalizer/TestSimpleRegionNormalizer.java @@ -19,23 +19,28 @@ package org.apache.hadoop.hbase.master.normalizer; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertTrue; -import static org.mockito.Matchers.any; +import static org.mockito.ArgumentMatchers.any; import static org.mockito.Mockito.RETURNS_DEEP_STUBS; +import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; +import java.io.IOException; import java.util.ArrayList; import java.util.Comparator; import java.util.HashMap; import java.util.List; import java.util.Map; +import java.util.Optional; import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseIOException; +import org.apache.hadoop.hbase.MutableTableDescriptors; import org.apache.hadoop.hbase.RegionMetrics; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.Size; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.RegionInfo; import org.apache.hadoop.hbase.client.RegionInfoBuilder; +import org.apache.hadoop.hbase.client.TableDescriptor; import org.apache.hadoop.hbase.master.MasterRpcServices; import org.apache.hadoop.hbase.master.MasterServices; import org.apache.hadoop.hbase.testclassification.MasterTests; @@ -368,8 +373,17 @@ public class TestSimpleRegionNormalizer { assertEquals(hri4, ((SplitNormalizationPlan) plan).getRegionInfo()); } + + private void mockTargetRegionSize(long targetRegionSize) throws IOException { + TableDescriptor td = mock(TableDescriptor.class); + when(td.getNormalizerTargetRegionSize()).thenReturn(targetRegionSize); + MutableTableDescriptors tds = mock(MutableTableDescriptors.class); + when(tds.get(any())).thenReturn(Optional.of(td)); + when(masterServices.getTableDescriptors()).thenReturn(tds); + } + @Test - public void testSplitWithTargetRegionCount() throws Exception { + public void testSplitWithTargetRegionSize() throws Exception { final TableName tableName = TableName.valueOf(name.getMethodName()); List RegionInfo = new ArrayList<>(); Map regionSizes = new HashMap<>(); @@ -407,8 +421,7 @@ public class TestSimpleRegionNormalizer { setupMocksForNormalizer(regionSizes, RegionInfo); // test when target region size is 20 - when(masterServices.getTableDescriptors().get(any()).getNormalizerTargetRegionSize()) - .thenReturn(20L); + mockTargetRegionSize(20); List plans = normalizer.computePlanForTable(tableName); Assert.assertEquals(4, plans.size()); @@ -417,8 +430,7 @@ public class TestSimpleRegionNormalizer { } // test when target region size is 200 - when(masterServices.getTableDescriptors().get(any()).getNormalizerTargetRegionSize()) - .thenReturn(200L); + mockTargetRegionSize(200); plans = normalizer.computePlanForTable(tableName); Assert.assertEquals(2, plans.size()); NormalizationPlan plan = plans.get(0); @@ -427,8 +439,16 @@ public class TestSimpleRegionNormalizer { assertEquals(hri2, ((MergeNormalizationPlan) plan).getSecondRegion()); } + private void mockTargetRegionCount(int targetRegionCount) throws IOException { + TableDescriptor td = mock(TableDescriptor.class); + when(td.getNormalizerTargetRegionCount()).thenReturn(targetRegionCount); + MutableTableDescriptors tds = mock(MutableTableDescriptors.class); + when(tds.get(any())).thenReturn(Optional.of(td)); + when(masterServices.getTableDescriptors()).thenReturn(tds); + } + @Test - public void testSplitWithTargetRegionSize() throws Exception { + public void testSplitWithTargetRegionCount() throws Exception { final TableName tableName = TableName.valueOf(name.getMethodName()); List RegionInfo = new ArrayList<>(); Map regionSizes = new HashMap<>(); @@ -456,8 +476,7 @@ public class TestSimpleRegionNormalizer { setupMocksForNormalizer(regionSizes, RegionInfo); // test when target region count is 8 - when(masterServices.getTableDescriptors().get(any()).getNormalizerTargetRegionCount()) - .thenReturn(8); + mockTargetRegionCount(8); List plans = normalizer.computePlanForTable(tableName); Assert.assertEquals(2, plans.size()); @@ -466,8 +485,7 @@ public class TestSimpleRegionNormalizer { } // test when target region count is 3 - when(masterServices.getTableDescriptors().get(any()).getNormalizerTargetRegionCount()) - .thenReturn(3); + mockTargetRegionCount(3); plans = normalizer.computePlanForTable(tableName); Assert.assertEquals(1, plans.size()); NormalizationPlan plan = plans.get(0); @@ -476,11 +494,10 @@ public class TestSimpleRegionNormalizer { assertEquals(hri2, ((MergeNormalizationPlan) plan).getSecondRegion()); } - @SuppressWarnings("MockitoCast") - protected void setupMocksForNormalizer(Map regionSizes, + private void setupMocksForNormalizer(Map regionSizes, List RegionInfo) { - masterServices = Mockito.mock(MasterServices.class, RETURNS_DEEP_STUBS); - masterRpcServices = Mockito.mock(MasterRpcServices.class, RETURNS_DEEP_STUBS); + masterServices = mock(MasterServices.class, RETURNS_DEEP_STUBS); + masterRpcServices = mock(MasterRpcServices.class, RETURNS_DEEP_STUBS); // for simplicity all regions are assumed to be on one server; doesn't matter to us ServerName sn = ServerName.valueOf("localhost", 0, 1L); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureTestingUtility.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureTestingUtility.java index b4d16c6544..9ce509a833 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureTestingUtility.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureTestingUtility.java @@ -213,7 +213,7 @@ public class MasterProcedureTestingUtility { assertEquals(regions.length, countMetaRegions(master, tableName)); // check htd - TableDescriptor htd = master.getTableDescriptors().get(tableName); + TableDescriptor htd = master.getTableDescriptors().get(tableName).orElse(null); assertTrue("table descriptor not found", htd != null); for (int i = 0; i < family.length; ++i) { assertTrue("family not found " + family[i], htd.getColumnFamily(Bytes.toBytes(family[i])) != null); @@ -286,7 +286,7 @@ public class MasterProcedureTestingUtility { public static void validateColumnFamilyAddition(final HMaster master, final TableName tableName, final String family) throws IOException { - TableDescriptor htd = master.getTableDescriptors().get(tableName); + TableDescriptor htd = master.getTableDescriptors().get(tableName).orElse(null); assertTrue(htd != null); assertTrue(htd.hasColumnFamily(Bytes.toBytes(family))); @@ -295,7 +295,7 @@ public class MasterProcedureTestingUtility { public static void validateColumnFamilyDeletion(final HMaster master, final TableName tableName, final String family) throws IOException { // verify htd - TableDescriptor htd = master.getTableDescriptors().get(tableName); + TableDescriptor htd = master.getTableDescriptors().get(tableName).orElse(null); assertTrue(htd != null); assertFalse(htd.hasColumnFamily(Bytes.toBytes(family))); @@ -311,7 +311,7 @@ public class MasterProcedureTestingUtility { public static void validateColumnFamilyModification(final HMaster master, final TableName tableName, final String family, ColumnFamilyDescriptor columnDescriptor) throws IOException { - TableDescriptor htd = master.getTableDescriptors().get(tableName); + TableDescriptor htd = master.getTableDescriptors().get(tableName).orElse(null); assertTrue(htd != null); ColumnFamilyDescriptor hcfd = htd.getColumnFamily(Bytes.toBytes(family)); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionInfo.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionInfo.java index 50b675dfb0..b14b7cb08f 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionInfo.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionInfo.java @@ -78,12 +78,12 @@ public class TestHRegionInfo { // Create a region. That'll write the .regioninfo file. FSTableDescriptors fsTableDescriptors = new FSTableDescriptors(htu.getConfiguration()); HRegion r = HBaseTestingUtility.createRegionAndWAL(hri, basedir, htu.getConfiguration(), - fsTableDescriptors.get(TableName.META_TABLE_NAME)); + fsTableDescriptors.get(TableName.META_TABLE_NAME).get()); // Get modtime on the file. long modtime = getModTime(r); HBaseTestingUtility.closeRegionAndWAL(r); Thread.sleep(1001); - r = HRegion.openHRegion(basedir, hri, fsTableDescriptors.get(TableName.META_TABLE_NAME), + r = HRegion.openHRegion(basedir, hri, fsTableDescriptors.get(TableName.META_TABLE_NAME).get(), null, htu.getConfiguration()); // Ensure the file is not written for a second time. long modtime2 = getModTime(r); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionInfoBuilder.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionInfoBuilder.java index 3f778c238a..ddf65a9e65 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionInfoBuilder.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionInfoBuilder.java @@ -104,13 +104,13 @@ public class TestRegionInfoBuilder { // Create a region. That'll write the .regioninfo file. FSTableDescriptors fsTableDescriptors = new FSTableDescriptors(htu.getConfiguration()); HRegion r = HBaseTestingUtility.createRegionAndWAL(convert(ri), basedir, htu.getConfiguration(), - fsTableDescriptors.get(TableName.META_TABLE_NAME)); + fsTableDescriptors.get(TableName.META_TABLE_NAME).get()); // Get modtime on the file. long modtime = getModTime(r); HBaseTestingUtility.closeRegionAndWAL(r); Thread.sleep(1001); - r = HRegion.openHRegion(basedir, convert(ri), fsTableDescriptors.get(TableName.META_TABLE_NAME), - null, htu.getConfiguration()); + r = HRegion.openHRegion(basedir, convert(ri), + fsTableDescriptors.get(TableName.META_TABLE_NAME).get(), null, htu.getConfiguration()); // Ensure the file is not written for a second time. long modtime2 = getModTime(r); assertEquals(modtime, modtime2); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionMergeTransactionOnCluster.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionMergeTransactionOnCluster.java index 9d9226ef26..bc445da0c7 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionMergeTransactionOnCluster.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionMergeTransactionOnCluster.java @@ -211,7 +211,7 @@ public class TestRegionMergeTransactionOnCluster { .getTableRegionsAndLocations(MASTER.getConnection(), tableName); RegionInfo mergedRegionInfo = tableRegions.get(0).getFirst(); TableDescriptor tableDescriptor = MASTER.getTableDescriptors().get( - tableName); + tableName).get(); Result mergedRegionResult = MetaTableAccessor.getRegionResult( MASTER.getConnection(), mergedRegionInfo.getRegionName()); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerNoMaster.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerNoMaster.java index b9f89b72da..d6da137b7c 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerNoMaster.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerNoMaster.java @@ -264,7 +264,7 @@ public class TestRegionServerNoMaster { hri.getEncodedNameAsBytes())); // Let's start the open handler - TableDescriptor htd = getRS().tableDescriptors.get(hri.getTable()); + TableDescriptor htd = getRS().tableDescriptors.get(hri.getTable()).get(); getRS().executorService.submit(new OpenRegionHandler(getRS(), getRS(), hri, htd, -1)); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestFSTableDescriptors.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestFSTableDescriptors.java index 11c7bcd005..537156f672 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestFSTableDescriptors.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestFSTableDescriptors.java @@ -28,7 +28,7 @@ import java.io.FileNotFoundException; import java.io.IOException; import java.util.Arrays; import java.util.Comparator; -import java.util.Map; +import java.util.Optional; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FSDataInputStream; import org.apache.hadoop.fs.FSDataOutputStream; @@ -38,7 +38,7 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.HConstants; -import org.apache.hadoop.hbase.TableDescriptors; +import org.apache.hadoop.hbase.MutableTableDescriptors; import org.apache.hadoop.hbase.TableExistsException; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder; @@ -125,7 +125,7 @@ public class TestFSTableDescriptors { assertTrue(!fs.exists(p2)); int i3 = FSTableDescriptors.getTableInfoSequenceId(p3); assertTrue(i3 == i2 + 1); - TableDescriptor descriptor = fstd.get(htd.getTableName()); + TableDescriptor descriptor = fstd.get(htd.getTableName()).get(); assertEquals(descriptor, htd); } @@ -167,7 +167,7 @@ public class TestFSTableDescriptors { FileSystem fs = FileSystem.get(UTIL.getConfiguration()); // Cleanup old tests if any detrius laying around. Path rootdir = new Path(UTIL.getDataTestDir(), name); - TableDescriptors htds = new FSTableDescriptors(UTIL.getConfiguration(), fs, rootdir); + MutableTableDescriptors htds = new FSTableDescriptors(UTIL.getConfiguration(), fs, rootdir); TableDescriptor htd = TableDescriptorBuilder.newBuilder(TableName.valueOf(name)).build(); htds.add(htd); assertNotNull(htds.remove(htd.getTableName())); @@ -197,7 +197,7 @@ public class TestFSTableDescriptors { out.write(TableDescriptorBuilder.toByteArray(htd)); } FSTableDescriptors fstd2 = new FSTableDescriptors(UTIL.getConfiguration(), fs, rootdir); - TableDescriptor td2 = fstd2.get(htd.getTableName()); + TableDescriptor td2 = fstd2.get(htd.getTableName()).get(); assertEquals(htd, td2); FileStatus descriptorFile2 = FSTableDescriptors.getTableInfoPath(fs, fstd2.getTableDir(htd.getTableName())); @@ -217,7 +217,7 @@ public class TestFSTableDescriptors { Path rootdir = new Path(UTIL.getDataTestDir(), name); FSTableDescriptors htds = new FSTableDescriptors(UTIL.getConfiguration(), fs, rootdir) { @Override - public TableDescriptor get(TableName tablename) + public Optional get(TableName tablename) throws TableExistsException, FileNotFoundException, IOException { LOG.info(tablename + ", cachehits=" + this.cachehits); return super.get(tablename); @@ -281,7 +281,7 @@ public class TestFSTableDescriptors { for (int i = 0; i < count; i++) { assertNotNull("Expected HTD, got null instead", htds.get(TableName.valueOf(name + i))); assertTrue("Column Family " + i + " missing", - htds.get(TableName.valueOf(name + i)).hasColumnFamily(Bytes.toBytes("" + i))); + htds.get(TableName.valueOf(name + i)).get().hasColumnFamily(Bytes.toBytes("" + i))); } assertEquals(count * 4, htds.invocations); assertEquals("expected=0, actual=" + htds.cachehits, 0, htds.cachehits); @@ -339,14 +339,6 @@ public class TestFSTableDescriptors { // hbase:meta will only increase the cachehit by 1 assertTrue(nonchtds.getAll().size() == chtds.getAll().size()); - - for (Map.Entry entry: nonchtds.getAll().entrySet()) { - String t = (String) entry.getKey(); - TableDescriptor nchtd = entry.getValue(); - assertTrue("expected " + htd.toString() + - " got: " + chtds.get(TableName.valueOf(t)).toString(), - (nchtd.equals(chtds.get(TableName.valueOf(t))))); - } } @Test @@ -355,7 +347,7 @@ public class TestFSTableDescriptors { FileSystem fs = FileSystem.get(UTIL.getConfiguration()); // Cleanup old tests if any detrius laying around. Path rootdir = new Path(UTIL.getDataTestDir(), name); - TableDescriptors htds = new FSTableDescriptors(UTIL.getConfiguration(), fs, rootdir); + MutableTableDescriptors htds = new FSTableDescriptors(UTIL.getConfiguration(), fs, rootdir); assertNull("There shouldn't be any HTD for this table", htds.get(TableName.valueOf("NoSuchTable"))); } @@ -366,7 +358,7 @@ public class TestFSTableDescriptors { FileSystem fs = FileSystem.get(UTIL.getConfiguration()); // Cleanup old tests if any detrius laying around. Path rootdir = new Path(UTIL.getDataTestDir(), name); - TableDescriptors htds = new FSTableDescriptors(UTIL.getConfiguration(), fs, rootdir); + MutableTableDescriptors htds = new FSTableDescriptors(UTIL.getConfiguration(), fs, rootdir); TableDescriptor htd = TableDescriptorBuilder.newBuilder(TableName.valueOf(name)).build(); htds.add(htd); htds.add(htd); @@ -446,7 +438,7 @@ public class TestFSTableDescriptors { } @Override - public TableDescriptor get(TableName tablename) + public Optional get(TableName tablename) throws TableExistsException, FileNotFoundException, IOException { LOG.info((super.isUsecache() ? "Cached" : "Non-Cached") + " TableDescriptor.get() on " + tablename + ", cachehits=" + this.cachehits); -- 2.17.1