diff --git a/hbase-it/src/test/java/org/apache/hadoop/hbase/test/IntegrationTestBigLinkedList.java b/hbase-it/src/test/java/org/apache/hadoop/hbase/test/IntegrationTestBigLinkedList.java index 9864031..e5607a7 100644 --- a/hbase-it/src/test/java/org/apache/hadoop/hbase/test/IntegrationTestBigLinkedList.java +++ b/hbase-it/src/test/java/org/apache/hadoop/hbase/test/IntegrationTestBigLinkedList.java @@ -64,7 +64,6 @@ import org.apache.hadoop.hbase.client.BufferedMutatorParams; import org.apache.hadoop.hbase.client.Connection; import org.apache.hadoop.hbase.client.ConnectionFactory; import org.apache.hadoop.hbase.client.Get; -import org.apache.hadoop.hbase.client.HBaseAdmin; import org.apache.hadoop.hbase.client.Mutation; import org.apache.hadoop.hbase.client.Put; import org.apache.hadoop.hbase.client.RegionLocator; @@ -474,9 +473,9 @@ public class IntegrationTestBigLinkedList extends IntegrationTestBase { protected void createSchema() throws IOException { Configuration conf = getConf(); - Admin admin = new HBaseAdmin(conf); TableName tableName = getTableName(conf); - try { + try (Connection conn = ConnectionFactory.createConnection(conf); + Admin admin = conn.getAdmin()) { if (!admin.tableExists(tableName)) { HTableDescriptor htd = new HTableDescriptor(getTableName(getConf())); htd.addFamily(new HColumnDescriptor(FAMILY_NAME)); @@ -485,7 +484,7 @@ public class IntegrationTestBigLinkedList extends IntegrationTestBase { throw new IllegalStateException("No live regionservers"); } int regionsPerServer = conf.getInt(HBaseTestingUtility.REGIONS_PER_SERVER_KEY, - HBaseTestingUtility.DEFAULT_REGIONS_PER_SERVER); + HBaseTestingUtility.DEFAULT_REGIONS_PER_SERVER); int totalNumberOfRegions = numberOfServers * regionsPerServer; LOG.info("Number of live regionservers: " + numberOfServers + ", " + "pre-splitting table into " + totalNumberOfRegions + " regions " + @@ -498,8 +497,6 @@ public class IntegrationTestBigLinkedList extends IntegrationTestBase { } catch (MasterNotRunningException e) { LOG.error("Master not running", e); throw new IOException(e); - } finally { - admin.close(); } } @@ -1360,13 +1357,14 @@ public class IntegrationTestBigLinkedList extends IntegrationTestBase { TableName tableName = getTableName(conf); FileSystem fs = HFileSystem.get(conf); - Admin admin = new HBaseAdmin(conf); + try (Connection conn = ConnectionFactory.createConnection(getConf()); + Admin admin = conn.getAdmin()) { - if (admin.tableExists(tableName)) { - admin.disableTable(tableName); - admin.deleteTable(tableName); + if (admin.tableExists(tableName)) { + admin.disableTable(tableName); + admin.deleteTable(tableName); + } } - if (fs.exists(p)) { fs.delete(p, true); } diff --git a/hbase-it/src/test/java/org/apache/hadoop/hbase/test/IntegrationTestBigLinkedListWithVisibility.java b/hbase-it/src/test/java/org/apache/hadoop/hbase/test/IntegrationTestBigLinkedListWithVisibility.java index b82c750..a4bd9f7 100644 --- a/hbase-it/src/test/java/org/apache/hadoop/hbase/test/IntegrationTestBigLinkedListWithVisibility.java +++ b/hbase-it/src/test/java/org/apache/hadoop/hbase/test/IntegrationTestBigLinkedListWithVisibility.java @@ -40,9 +40,9 @@ import org.apache.hadoop.hbase.chaos.factories.MonkeyFactory; import org.apache.hadoop.hbase.client.Admin; import org.apache.hadoop.hbase.client.BufferedMutator; import org.apache.hadoop.hbase.client.BufferedMutatorParams; +import org.apache.hadoop.hbase.client.Connection; import org.apache.hadoop.hbase.client.ConnectionFactory; import org.apache.hadoop.hbase.client.Delete; -import org.apache.hadoop.hbase.client.HBaseAdmin; import org.apache.hadoop.hbase.client.HConnection; import org.apache.hadoop.hbase.client.Put; import org.apache.hadoop.hbase.client.Result; @@ -133,14 +133,15 @@ public class IntegrationTestBigLinkedListWithVisibility extends IntegrationTestB if(!acl) { LOG.info("No ACL available."); } - Admin admin = new HBaseAdmin(getConf()); - for (int i = 0; i < DEFAULT_TABLES_COUNT; i++) { - TableName tableName = IntegrationTestBigLinkedListWithVisibility.getTableName(i); - createTable(admin, tableName, false, acl); + try (Connection conn = ConnectionFactory.createConnection(getConf()); + Admin admin = conn.getAdmin()) { + for (int i = 0; i < DEFAULT_TABLES_COUNT; i++) { + TableName tableName = IntegrationTestBigLinkedListWithVisibility.getTableName(i); + createTable(admin, tableName, false, acl); + } + TableName tableName = TableName.valueOf(COMMON_TABLE_NAME); + createTable(admin, tableName, true, acl); } - TableName tableName = TableName.valueOf(COMMON_TABLE_NAME); - createTable(admin, tableName, true, acl); - admin.close(); } private void createTable(Admin admin, TableName tableName, boolean setVersion, diff --git a/hbase-it/src/test/java/org/apache/hadoop/hbase/test/IntegrationTestWithCellVisibilityLoadAndVerify.java b/hbase-it/src/test/java/org/apache/hadoop/hbase/test/IntegrationTestWithCellVisibilityLoadAndVerify.java index 05e214b..3fc4e8e 100644 --- a/hbase-it/src/test/java/org/apache/hadoop/hbase/test/IntegrationTestWithCellVisibilityLoadAndVerify.java +++ b/hbase-it/src/test/java/org/apache/hadoop/hbase/test/IntegrationTestWithCellVisibilityLoadAndVerify.java @@ -33,7 +33,8 @@ import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.IntegrationTestingUtility; import org.apache.hadoop.hbase.client.Admin; -import org.apache.hadoop.hbase.client.HBaseAdmin; +import org.apache.hadoop.hbase.client.Connection; +import org.apache.hadoop.hbase.client.ConnectionFactory; import org.apache.hadoop.hbase.client.Put; import org.apache.hadoop.hbase.client.Result; import org.apache.hadoop.hbase.client.Scan; @@ -370,11 +371,9 @@ public class IntegrationTestWithCellVisibilityLoadAndVerify extends IntegrationT HTableDescriptor htd = new HTableDescriptor(getTablename()); htd.addFamily(new HColumnDescriptor(TEST_FAMILY)); - Admin admin = new HBaseAdmin(getConf()); - try { + try (Connection conn = ConnectionFactory.createConnection(getConf()); + Admin admin = conn.getAdmin()) { admin.createTable(htd, Bytes.toBytes(0L), Bytes.toBytes(-1L), numPresplits); - } finally { - admin.close(); } doLoad(getConf(), htd); doVerify(getConf(), htd); @@ -382,6 +381,7 @@ public class IntegrationTestWithCellVisibilityLoadAndVerify extends IntegrationT return 0; } + @SuppressWarnings("unchecked") @Override protected void processOptions(CommandLine cmd) { List args = cmd.getArgList(); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/LoadIncrementalHFiles.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/LoadIncrementalHFiles.java index d96c9e4..a2d28ef 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/LoadIncrementalHFiles.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/LoadIncrementalHFiles.java @@ -47,7 +47,6 @@ import org.apache.hadoop.hbase.classification.InterfaceStability; import org.apache.hadoop.hbase.client.Admin; import org.apache.hadoop.hbase.client.Connection; import org.apache.hadoop.hbase.client.ConnectionFactory; -import org.apache.hadoop.hbase.client.HBaseAdmin; import org.apache.hadoop.hbase.client.HConnection; import org.apache.hadoop.hbase.client.HTable; import org.apache.hadoop.hbase.client.RegionLocator; @@ -112,7 +111,7 @@ import java.util.concurrent.TimeUnit; @InterfaceStability.Stable public class LoadIncrementalHFiles extends Configured implements Tool { private static final Log LOG = LogFactory.getLog(LoadIncrementalHFiles.class); - private Admin hbAdmin; + private boolean initalized = false; public static final String NAME = "completebulkload"; public static final String MAX_FILES_PER_REGION_PER_FAMILY @@ -138,18 +137,19 @@ public class LoadIncrementalHFiles extends Configured implements Tool { } private void initialize() throws Exception { - if (hbAdmin == null) { - // make a copy, just to be sure we're not overriding someone else's config - setConf(HBaseConfiguration.create(getConf())); - Configuration conf = getConf(); - // disable blockcache for tool invocation, see HBASE-10500 - conf.setFloat(HConstants.HFILE_BLOCK_CACHE_SIZE_KEY, 0); - this.hbAdmin = new HBaseAdmin(conf); - this.userProvider = UserProvider.instantiate(conf); - this.fsDelegationToken = new FsDelegationToken(userProvider, "renewer"); - assignSeqIds = conf.getBoolean(ASSIGN_SEQ_IDS, true); - maxFilesPerRegionPerFamily = conf.getInt(MAX_FILES_PER_REGION_PER_FAMILY, 32); + if (initalized) { + return; } + // make a copy, just to be sure we're not overriding someone else's config + setConf(HBaseConfiguration.create(getConf())); + Configuration conf = getConf(); + // disable blockcache for tool invocation, see HBASE-10500 + conf.setFloat(HConstants.HFILE_BLOCK_CACHE_SIZE_KEY, 0); + this.userProvider = UserProvider.instantiate(conf); + this.fsDelegationToken = new FsDelegationToken(userProvider, "renewer"); + assignSeqIds = conf.getBoolean(ASSIGN_SEQ_IDS, true); + maxFilesPerRegionPerFamily = conf.getInt(MAX_FILES_PER_REGION_PER_FAMILY, 32); + initalized = true; } private void usage() { @@ -851,10 +851,6 @@ public class LoadIncrementalHFiles extends Configured implements Tool { return !HFile.isReservedFileInfoKey(key); } - private boolean doesTableExist(TableName tableName) throws Exception { - return hbAdmin.tableExists(tableName); - } - /* * Infers region boundaries for a new table. * Parameter: @@ -889,7 +885,7 @@ public class LoadIncrementalHFiles extends Configured implements Tool { * If the table is created for the first time, then "completebulkload" reads the files twice. * More modifications necessary if we want to avoid doing it. */ - private void createTable(TableName tableName, String dirPath) throws Exception { + private void createTable(TableName tableName, String dirPath, Admin admin) throws Exception { final Path hfofDir = new Path(dirPath); final FileSystem fs = hfofDir.getFileSystem(getConf()); @@ -937,7 +933,7 @@ public class LoadIncrementalHFiles extends Configured implements Tool { }); byte[][] keys = LoadIncrementalHFiles.inferBoundaries(map); - this.hbAdmin.createTable(htd,keys); + admin.createTable(htd, keys); LOG.info("Table "+ tableName +" is available!!"); } @@ -950,26 +946,27 @@ public class LoadIncrementalHFiles extends Configured implements Tool { } initialize(); - - String dirPath = args[0]; - TableName tableName = TableName.valueOf(args[1]); - - boolean tableExists = this.doesTableExist(tableName); - if (!tableExists) { - if ("yes".equalsIgnoreCase(getConf().get(CREATE_TABLE_CONF_KEY, "yes"))) { - this.createTable(tableName, dirPath); - } else { - String errorMsg = format("Table '%s' does not exist.", tableName); - LOG.error(errorMsg); - throw new TableNotFoundException(errorMsg); + try (Connection connection = ConnectionFactory.createConnection(getConf()); + Admin admin = connection.getAdmin()) { + String dirPath = args[0]; + TableName tableName = TableName.valueOf(args[1]); + + boolean tableExists = admin.tableExists(tableName); + if (!tableExists) { + if ("yes".equalsIgnoreCase(getConf().get(CREATE_TABLE_CONF_KEY, "yes"))) { + this.createTable(tableName, dirPath, admin); + } else { + String errorMsg = format("Table '%s' does not exist.", tableName); + LOG.error(errorMsg); + throw new TableNotFoundException(errorMsg); + } } - } - Path hfofDir = new Path(dirPath); + Path hfofDir = new Path(dirPath); - try (Connection connection = ConnectionFactory.createConnection(getConf()); - HTable table = (HTable) connection.getTable(tableName);) { - doBulkLoad(hfofDir, table); + try (HTable table = (HTable) connection.getTable(tableName);) { + doBulkLoad(hfofDir, table); + } } return 0; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMasterCommandLine.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMasterCommandLine.java index 3aeee40..cb32827 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMasterCommandLine.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMasterCommandLine.java @@ -28,7 +28,6 @@ import org.apache.commons.cli.Options; import org.apache.commons.cli.ParseException; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.CoordinatedStateManager; import org.apache.hadoop.hbase.CoordinatedStateManagerFactory; @@ -37,8 +36,10 @@ import org.apache.hadoop.hbase.LocalHBaseCluster; import org.apache.hadoop.hbase.MasterNotRunningException; import org.apache.hadoop.hbase.ZNodeClearer; import org.apache.hadoop.hbase.ZooKeeperConnectionException; +import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.client.Admin; -import org.apache.hadoop.hbase.client.HBaseAdmin; +import org.apache.hadoop.hbase.client.Connection; +import org.apache.hadoop.hbase.client.ConnectionFactory; import org.apache.hadoop.hbase.regionserver.HRegionServer; import org.apache.hadoop.hbase.util.JVMClusterUtil; import org.apache.hadoop.hbase.util.ServerCommandLine; @@ -249,12 +250,16 @@ public class HMasterCommandLine extends ServerCommandLine { @SuppressWarnings("resource") private int stopMaster() { - Admin adm = null; - try { - Configuration conf = getConf(); - // Don't try more than once - conf.setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, 1); - adm = new HBaseAdmin(getConf()); + Configuration conf = getConf(); + // Don't try more than once + conf.setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, 1); + try (Connection connection = ConnectionFactory.createConnection(conf)) { + try (Admin admin = connection.getAdmin()) { + connection.getAdmin().shutdown(); + } catch (Throwable t) { + LOG.error("Failed to stop master", t); + return 1; + } } catch (MasterNotRunningException e) { LOG.error("Master not running"); return 1; @@ -265,12 +270,6 @@ public class HMasterCommandLine extends ServerCommandLine { LOG.error("Got IOException: " +e.getMessage(), e); return 1; } - try { - adm.shutdown(); - } catch (Throwable t) { - LOG.error("Failed to stop master", t); - return 1; - } return 0; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java index 1fb64a2..5eb3e63 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java @@ -60,7 +60,6 @@ import com.google.common.collect.Lists; import com.google.common.collect.Multimap; import com.google.common.collect.TreeMultimap; import com.google.protobuf.ServiceException; - import org.apache.commons.lang.RandomStringUtils; import org.apache.commons.lang.StringUtils; import org.apache.commons.logging.Log; @@ -95,10 +94,10 @@ import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.classification.InterfaceStability; import org.apache.hadoop.hbase.client.Admin; import org.apache.hadoop.hbase.client.ClusterConnection; +import org.apache.hadoop.hbase.client.Connection; import org.apache.hadoop.hbase.client.ConnectionFactory; import org.apache.hadoop.hbase.client.Delete; import org.apache.hadoop.hbase.client.Get; -import org.apache.hadoop.hbase.client.HBaseAdmin; import org.apache.hadoop.hbase.client.HConnection; import org.apache.hadoop.hbase.client.Put; import org.apache.hadoop.hbase.client.RegionReplicaUtil; @@ -3096,21 +3095,12 @@ public class HBaseFsck extends Configured implements Closeable { HTableDescriptor[] getHTableDescriptors(List tableNames) { HTableDescriptor[] htd = new HTableDescriptor[0]; - Admin admin = null; - try { LOG.info("getHTableDescriptors == tableNames => " + tableNames); - admin = new HBaseAdmin(getConf()); + try (Connection conn = ConnectionFactory.createConnection(getConf()); + Admin admin = conn.getAdmin()) { htd = admin.getTableDescriptorsByTableName(tableNames); } catch (IOException e) { LOG.debug("Exception getting table descriptors", e); - } finally { - if (admin != null) { - try { - admin.close(); - } catch (IOException e) { - LOG.debug("Exception closing HBaseAdmin", e); - } - } } return htd; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HMerge.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HMerge.java index 96e0d48..592fd46 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HMerge.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HMerge.java @@ -26,7 +26,6 @@ import java.util.Random; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; @@ -36,11 +35,11 @@ import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.MetaTableAccessor; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.TableNotDisabledException; +import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.client.Admin; import org.apache.hadoop.hbase.client.Connection; import org.apache.hadoop.hbase.client.ConnectionFactory; import org.apache.hadoop.hbase.client.Delete; -import org.apache.hadoop.hbase.client.HBaseAdmin; import org.apache.hadoop.hbase.client.HConnection; import org.apache.hadoop.hbase.client.Result; import org.apache.hadoop.hbase.client.ResultScanner; @@ -126,13 +125,11 @@ class HMerge { throw new IllegalStateException( "HBase instance must be running to merge a normal table"); } - Admin admin = new HBaseAdmin(conf); - try { + try (Connection conn = ConnectionFactory.createConnection(conf); + Admin admin = conn.getAdmin()) { if (!admin.isTableDisabled(tableName)) { throw new TableNotDisabledException(tableName); } - } finally { - admin.close(); } new OnlineMerger(conf, fs, tableName).process(); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/RegionSizeCalculator.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/RegionSizeCalculator.java index 9cd24f6..643ab0e 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/RegionSizeCalculator.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/RegionSizeCalculator.java @@ -38,7 +38,8 @@ import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.classification.InterfaceStability; import org.apache.hadoop.hbase.client.Admin; -import org.apache.hadoop.hbase.client.HBaseAdmin; +import org.apache.hadoop.hbase.client.Connection; +import org.apache.hadoop.hbase.client.ConnectionFactory; import org.apache.hadoop.hbase.client.HTable; import org.apache.hadoop.hbase.client.RegionLocator; @@ -66,11 +67,9 @@ public class RegionSizeCalculator { */ @Deprecated public RegionSizeCalculator(HTable table) throws IOException { - HBaseAdmin admin = new HBaseAdmin(table.getConfiguration()); - try { + try (Connection conn = ConnectionFactory.createConnection(table.getConfiguration()); + Admin admin = conn.getAdmin()) { init(table.getRegionLocator(), admin); - } finally { - admin.close(); } } diff --git a/hbase-server/src/main/resources/hbase-webapps/master/tablesDetailed.jsp b/hbase-server/src/main/resources/hbase-webapps/master/tablesDetailed.jsp index 9bed70b..d47f574 100644 --- a/hbase-server/src/main/resources/hbase-webapps/master/tablesDetailed.jsp +++ b/hbase-server/src/main/resources/hbase-webapps/master/tablesDetailed.jsp @@ -21,7 +21,9 @@ import="static org.apache.commons.lang.StringEscapeUtils.escapeXml" import="org.apache.hadoop.conf.Configuration" import="org.apache.hadoop.hbase.master.HMaster" - import="org.apache.hadoop.hbase.client.HBaseAdmin" + import="org.apache.hadoop.hbase.client.Admin" + import="org.apache.hadoop.hbase.client.Connection" + import="org.apache.hadoop.hbase.client.ConnectionFactory" import="org.apache.hadoop.hbase.HTableDescriptor" import="org.apache.hadoop.hbase.HBaseConfiguration" %> <% @@ -81,7 +83,11 @@ -<% HTableDescriptor[] tables = new HBaseAdmin(conf).listTables(); +<% HTableDescriptor[] tables; + try(Connection connection = ConnectionFactory.createConnection(conf); + Admin admin = connection.getAdmin()) { + tables = admin.listTables(); + } if(tables != null && tables.length > 0) { %> diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/FilterTestingCluster.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/FilterTestingCluster.java index 982b977..993a8e0 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/FilterTestingCluster.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/FilterTestingCluster.java @@ -39,9 +39,6 @@ import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.ZooKeeperConnectionException; import org.apache.hadoop.hbase.client.Admin; import org.apache.hadoop.hbase.client.Connection; -import org.apache.hadoop.hbase.client.ConnectionFactory; -import org.apache.hadoop.hbase.client.HBaseAdmin; -import org.apache.hadoop.hbase.client.HTable; import org.apache.hadoop.hbase.client.ScannerCallable; import org.apache.hadoop.hbase.client.Table; import org.apache.hadoop.hbase.ipc.AbstractRpcClient; @@ -105,8 +102,7 @@ public class FilterTestingCluster { conf = HBaseConfiguration.create(conf); conf.setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, 1); try { - connection = ConnectionFactory.createConnection(conf); - admin = connection.getAdmin(); + admin = TEST_UTIL.getHBaseAdmin(); } catch (MasterNotRunningException e) { assertNull("Master is not running", e); } catch (ZooKeeperConnectionException e) { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestHFileOutputFormat.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestHFileOutputFormat.java index ecea98e..d3a7c9a 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestHFileOutputFormat.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestHFileOutputFormat.java @@ -383,11 +383,10 @@ public class TestHFileOutputFormat { util = new HBaseTestingUtility(); Configuration conf = util.getConfiguration(); byte[][] splitKeys = generateRandomSplitKeys(4); - HBaseAdmin admin = null; try { util.startMiniCluster(); Path testDir = util.getDataTestDirOnTestFS("testLocalMRIncrementalLoad"); - admin = util.getHBaseAdmin(); + HBaseAdmin admin = util.getHBaseAdmin(); HTable table = util.createTable(TABLE_NAME, FAMILIES, splitKeys); assertEquals("Should start with empty table", 0, util.countRows(table)); @@ -466,7 +465,6 @@ public class TestHFileOutputFormat { assertEquals("Data should remain after reopening of regions", tableDigestBefore, util.checksumRows(table)); } finally { - if (admin != null) admin.close(); util.shutdownMiniMapReduceCluster(); util.shutdownMiniCluster(); } @@ -909,7 +907,7 @@ public class TestHFileOutputFormat { try { util.startMiniCluster(); final FileSystem fs = util.getDFSCluster().getFileSystem(); - HBaseAdmin admin = new HBaseAdmin(conf); + HBaseAdmin admin = util.getHBaseAdmin(); HTable table = util.createTable(TABLE_NAME, FAMILIES); assertEquals("Should start with empty table", 0, util.countRows(table)); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestMasterReplication.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestMasterReplication.java index 0ec410e..c7ffe25 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestMasterReplication.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestMasterReplication.java @@ -36,12 +36,10 @@ import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.TableName; -import org.apache.hadoop.hbase.client.Admin; import org.apache.hadoop.hbase.client.ConnectionFactory; import org.apache.hadoop.hbase.client.Delete; import org.apache.hadoop.hbase.client.Durability; import org.apache.hadoop.hbase.client.Get; -import org.apache.hadoop.hbase.client.HBaseAdmin; import org.apache.hadoop.hbase.client.Put; import org.apache.hadoop.hbase.client.Result; import org.apache.hadoop.hbase.client.Table; @@ -286,15 +284,8 @@ public class TestMasterReplication { } private void createTableOnClusters(HTableDescriptor table) throws Exception { - int numClusters = configurations.length; - for (int i = 0; i < numClusters; i++) { - Admin hbaseAdmin = null; - try { - hbaseAdmin = new HBaseAdmin(configurations[i]); - hbaseAdmin.createTable(table); - } finally { - close(hbaseAdmin); - } + for (HBaseTestingUtility utility : utilities) { + utility.getHBaseAdmin().createTable(table); } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/LoadTestTool.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/LoadTestTool.java index 0f077ed..b817401 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/LoadTestTool.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/LoadTestTool.java @@ -43,9 +43,9 @@ import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.client.Admin; +import org.apache.hadoop.hbase.client.Connection; import org.apache.hadoop.hbase.client.ConnectionFactory; import org.apache.hadoop.hbase.client.Durability; -import org.apache.hadoop.hbase.client.HBaseAdmin; import org.apache.hadoop.hbase.io.compress.Compression; import org.apache.hadoop.hbase.io.crypto.Cipher; import org.apache.hadoop.hbase.io.crypto.Encryption; @@ -266,45 +266,48 @@ public class LoadTestTool extends AbstractHBaseTool { */ protected void applyColumnFamilyOptions(TableName tableName, byte[][] columnFamilies) throws IOException { - Admin admin = new HBaseAdmin(conf); - HTableDescriptor tableDesc = admin.getTableDescriptor(tableName); - LOG.info("Disabling table " + tableName); - admin.disableTable(tableName); - for (byte[] cf : columnFamilies) { - HColumnDescriptor columnDesc = tableDesc.getFamily(cf); - boolean isNewCf = columnDesc == null; - if (isNewCf) { - columnDesc = new HColumnDescriptor(cf); - } - if (bloomType != null) { - columnDesc.setBloomFilterType(bloomType); - } - if (compressAlgo != null) { - columnDesc.setCompressionType(compressAlgo); - } - if (dataBlockEncodingAlgo != null) { - columnDesc.setDataBlockEncoding(dataBlockEncodingAlgo); - } - if (inMemoryCF) { - columnDesc.setInMemory(inMemoryCF); - } - if (cipher != null) { - byte[] keyBytes = new byte[cipher.getKeyLength()]; - new SecureRandom().nextBytes(keyBytes); - columnDesc.setEncryptionType(cipher.getName()); - columnDesc.setEncryptionKey(EncryptionUtil.wrapKey(conf, - User.getCurrent().getShortName(), - new SecretKeySpec(keyBytes, cipher.getName()))); - } - if (isNewCf) { - admin.addColumn(tableName, columnDesc); - } else { - admin.modifyColumn(tableName, columnDesc); + try (Connection conn = ConnectionFactory.createConnection(conf); + Admin admin = conn.getAdmin()) { + HTableDescriptor tableDesc = admin.getTableDescriptor(tableName); + LOG.info("Disabling table " + tableName); + admin.disableTable(tableName); + for (byte[] cf : columnFamilies) { + HColumnDescriptor columnDesc = tableDesc.getFamily(cf); + boolean isNewCf = columnDesc == null; + if (isNewCf) { + columnDesc = new HColumnDescriptor(cf); + } + if (bloomType != null) { + columnDesc.setBloomFilterType(bloomType); + } + if (compressAlgo != null) { + columnDesc.setCompressionType(compressAlgo); + } + if (dataBlockEncodingAlgo != null) { + columnDesc.setDataBlockEncoding(dataBlockEncodingAlgo); + } + if (inMemoryCF) { + columnDesc.setInMemory(inMemoryCF); + } + if (cipher != null) { + byte[] keyBytes = new byte[cipher.getKeyLength()]; + new SecureRandom().nextBytes(keyBytes); + columnDesc.setEncryptionType(cipher.getName()); + columnDesc.setEncryptionKey( + EncryptionUtil.wrapKey(conf, + User.getCurrent().getShortName(), + new SecretKeySpec(keyBytes, + cipher.getName()))); + } + if (isNewCf) { + admin.addColumn(tableName, columnDesc); + } else { + admin.modifyColumn(tableName, columnDesc); + } } + LOG.info("Enabling table " + tableName); + admin.enableTable(tableName); } - LOG.info("Enabling table " + tableName); - admin.enableTable(tableName); - admin.close(); } @Override