diff --git a/hbase-it/src/test/java/org/apache/hadoop/hbase/test/IntegrationTestBigLinkedList.java b/hbase-it/src/test/java/org/apache/hadoop/hbase/test/IntegrationTestBigLinkedList.java index 9864031..76af7e8 100644 --- a/hbase-it/src/test/java/org/apache/hadoop/hbase/test/IntegrationTestBigLinkedList.java +++ b/hbase-it/src/test/java/org/apache/hadoop/hbase/test/IntegrationTestBigLinkedList.java @@ -20,8 +20,8 @@ package org.apache.hadoop.hbase.test; import java.io.DataInput; import java.io.DataOutput; -import java.io.IOException; import java.io.FileNotFoundException; +import java.io.IOException; import java.io.InterruptedIOException; import java.util.ArrayList; import java.util.Arrays; @@ -34,6 +34,7 @@ import java.util.TreeSet; import java.util.UUID; import java.util.concurrent.atomic.AtomicInteger; +import com.google.common.collect.Sets; import org.apache.commons.cli.CommandLine; import org.apache.commons.cli.GnuParser; import org.apache.commons.cli.HelpFormatter; @@ -64,7 +65,6 @@ import org.apache.hadoop.hbase.client.BufferedMutatorParams; import org.apache.hadoop.hbase.client.Connection; import org.apache.hadoop.hbase.client.ConnectionFactory; import org.apache.hadoop.hbase.client.Get; -import org.apache.hadoop.hbase.client.HBaseAdmin; import org.apache.hadoop.hbase.client.Mutation; import org.apache.hadoop.hbase.client.Put; import org.apache.hadoop.hbase.client.RegionLocator; @@ -100,23 +100,21 @@ import org.apache.hadoop.mapreduce.RecordReader; import org.apache.hadoop.mapreduce.Reducer; import org.apache.hadoop.mapreduce.TaskAttemptContext; import org.apache.hadoop.mapreduce.TaskAttemptID; +import org.apache.hadoop.mapreduce.lib.input.FileInputFormat; import org.apache.hadoop.mapreduce.lib.input.FileSplit; import org.apache.hadoop.mapreduce.lib.input.SequenceFileAsBinaryInputFormat; -import org.apache.hadoop.mapreduce.lib.input.FileInputFormat; import org.apache.hadoop.mapreduce.lib.input.SequenceFileInputFormat; -import org.apache.hadoop.mapreduce.task.TaskAttemptContextImpl; import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat; import org.apache.hadoop.mapreduce.lib.output.NullOutputFormat; import org.apache.hadoop.mapreduce.lib.output.SequenceFileAsBinaryOutputFormat; import org.apache.hadoop.mapreduce.lib.output.SequenceFileOutputFormat; import org.apache.hadoop.mapreduce.lib.output.TextOutputFormat; +import org.apache.hadoop.mapreduce.task.TaskAttemptContextImpl; import org.apache.hadoop.util.Tool; import org.apache.hadoop.util.ToolRunner; import org.junit.Test; import org.junit.experimental.categories.Category; -import com.google.common.collect.Sets; - /** * This is an integration test borrowed from goraci, written by Keith Turner, * which is in turn inspired by the Accumulo test called continous ingest (ci). @@ -474,32 +472,31 @@ public class IntegrationTestBigLinkedList extends IntegrationTestBase { protected void createSchema() throws IOException { Configuration conf = getConf(); - Admin admin = new HBaseAdmin(conf); TableName tableName = getTableName(conf); - try { - if (!admin.tableExists(tableName)) { - HTableDescriptor htd = new HTableDescriptor(getTableName(getConf())); - htd.addFamily(new HColumnDescriptor(FAMILY_NAME)); - int numberOfServers = admin.getClusterStatus().getServers().size(); - if (numberOfServers == 0) { - throw new IllegalStateException("No live regionservers"); - } - int regionsPerServer = conf.getInt(HBaseTestingUtility.REGIONS_PER_SERVER_KEY, - HBaseTestingUtility.DEFAULT_REGIONS_PER_SERVER); - int totalNumberOfRegions = numberOfServers * regionsPerServer; - LOG.info("Number of live regionservers: " + numberOfServers + ", " + - "pre-splitting table into " + totalNumberOfRegions + " regions " + - "(default regions per server: " + regionsPerServer + ")"); + try (Connection conn = ConnectionFactory.createConnection(conf)) { + try (Admin admin = conn.getAdmin()) { + if (!admin.tableExists(tableName)) { + HTableDescriptor htd = new HTableDescriptor(getTableName(getConf())); + htd.addFamily(new HColumnDescriptor(FAMILY_NAME)); + int numberOfServers = admin.getClusterStatus().getServers().size(); + if (numberOfServers == 0) { + throw new IllegalStateException("No live regionservers"); + } + int regionsPerServer = conf.getInt(HBaseTestingUtility.REGIONS_PER_SERVER_KEY, + HBaseTestingUtility.DEFAULT_REGIONS_PER_SERVER); + int totalNumberOfRegions = numberOfServers * regionsPerServer; + LOG.info("Number of live regionservers: " + numberOfServers + ", " + + "pre-splitting table into " + totalNumberOfRegions + " regions " + + "(default regions per server: " + regionsPerServer + ")"); - byte[][] splits = new RegionSplitter.UniformSplit().split(totalNumberOfRegions); + byte[][] splits = new RegionSplitter.UniformSplit().split(totalNumberOfRegions); - admin.createTable(htd, splits); + admin.createTable(htd, splits); + } } } catch (MasterNotRunningException e) { LOG.error("Master not running", e); throw new IOException(e); - } finally { - admin.close(); } } @@ -1360,13 +1357,15 @@ public class IntegrationTestBigLinkedList extends IntegrationTestBase { TableName tableName = getTableName(conf); FileSystem fs = HFileSystem.get(conf); - Admin admin = new HBaseAdmin(conf); + try (Connection conn = ConnectionFactory.createConnection(getConf())) { + try (Admin admin = conn.getAdmin()) { - if (admin.tableExists(tableName)) { - admin.disableTable(tableName); - admin.deleteTable(tableName); + if (admin.tableExists(tableName)) { + admin.disableTable(tableName); + admin.deleteTable(tableName); + } + } } - if (fs.exists(p)) { fs.delete(p, true); } diff --git a/hbase-it/src/test/java/org/apache/hadoop/hbase/test/IntegrationTestBigLinkedListWithVisibility.java b/hbase-it/src/test/java/org/apache/hadoop/hbase/test/IntegrationTestBigLinkedListWithVisibility.java index b82c750..6bc46c7 100644 --- a/hbase-it/src/test/java/org/apache/hadoop/hbase/test/IntegrationTestBigLinkedListWithVisibility.java +++ b/hbase-it/src/test/java/org/apache/hadoop/hbase/test/IntegrationTestBigLinkedListWithVisibility.java @@ -40,9 +40,9 @@ import org.apache.hadoop.hbase.chaos.factories.MonkeyFactory; import org.apache.hadoop.hbase.client.Admin; import org.apache.hadoop.hbase.client.BufferedMutator; import org.apache.hadoop.hbase.client.BufferedMutatorParams; +import org.apache.hadoop.hbase.client.Connection; import org.apache.hadoop.hbase.client.ConnectionFactory; import org.apache.hadoop.hbase.client.Delete; -import org.apache.hadoop.hbase.client.HBaseAdmin; import org.apache.hadoop.hbase.client.HConnection; import org.apache.hadoop.hbase.client.Put; import org.apache.hadoop.hbase.client.Result; @@ -133,14 +133,16 @@ public class IntegrationTestBigLinkedListWithVisibility extends IntegrationTestB if(!acl) { LOG.info("No ACL available."); } - Admin admin = new HBaseAdmin(getConf()); - for (int i = 0; i < DEFAULT_TABLES_COUNT; i++) { - TableName tableName = IntegrationTestBigLinkedListWithVisibility.getTableName(i); - createTable(admin, tableName, false, acl); + try (Connection conn = ConnectionFactory.createConnection(getConf())) { + try (Admin admin = conn.getAdmin()) { + for (int i = 0; i < DEFAULT_TABLES_COUNT; i++) { + TableName tableName = IntegrationTestBigLinkedListWithVisibility.getTableName(i); + createTable(admin, tableName, false, acl); + } + TableName tableName = TableName.valueOf(COMMON_TABLE_NAME); + createTable(admin, tableName, true, acl); + } } - TableName tableName = TableName.valueOf(COMMON_TABLE_NAME); - createTable(admin, tableName, true, acl); - admin.close(); } private void createTable(Admin admin, TableName tableName, boolean setVersion, diff --git a/hbase-it/src/test/java/org/apache/hadoop/hbase/test/IntegrationTestWithCellVisibilityLoadAndVerify.java b/hbase-it/src/test/java/org/apache/hadoop/hbase/test/IntegrationTestWithCellVisibilityLoadAndVerify.java index 05e214b..f92176e 100644 --- a/hbase-it/src/test/java/org/apache/hadoop/hbase/test/IntegrationTestWithCellVisibilityLoadAndVerify.java +++ b/hbase-it/src/test/java/org/apache/hadoop/hbase/test/IntegrationTestWithCellVisibilityLoadAndVerify.java @@ -17,9 +17,6 @@ */ package org.apache.hadoop.hbase.test; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertTrue; - import java.io.IOException; import java.security.PrivilegedExceptionAction; import java.util.List; @@ -33,7 +30,8 @@ import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.IntegrationTestingUtility; import org.apache.hadoop.hbase.client.Admin; -import org.apache.hadoop.hbase.client.HBaseAdmin; +import org.apache.hadoop.hbase.client.Connection; +import org.apache.hadoop.hbase.client.ConnectionFactory; import org.apache.hadoop.hbase.client.Put; import org.apache.hadoop.hbase.client.Result; import org.apache.hadoop.hbase.client.Scan; @@ -59,6 +57,9 @@ import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat; import org.apache.hadoop.util.ToolRunner; import org.junit.experimental.categories.Category; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; + /** * A large test which loads a lot of data with cell visibility, and verifies the data. Test adds 2 * users with different sets of visibility labels authenticated for them. Every row (so cells in @@ -370,11 +371,10 @@ public class IntegrationTestWithCellVisibilityLoadAndVerify extends IntegrationT HTableDescriptor htd = new HTableDescriptor(getTablename()); htd.addFamily(new HColumnDescriptor(TEST_FAMILY)); - Admin admin = new HBaseAdmin(getConf()); - try { - admin.createTable(htd, Bytes.toBytes(0L), Bytes.toBytes(-1L), numPresplits); - } finally { - admin.close(); + try (Connection conn = ConnectionFactory.createConnection(getConf())) { + try (Admin admin = conn.getAdmin()) { + admin.createTable(htd, Bytes.toBytes(0L), Bytes.toBytes(-1L), numPresplits); + } } doLoad(getConf(), htd); doVerify(getConf(), htd); @@ -382,6 +382,7 @@ public class IntegrationTestWithCellVisibilityLoadAndVerify extends IntegrationT return 0; } + @SuppressWarnings("unchecked") @Override protected void processOptions(CommandLine cmd) { List args = cmd.getArgList(); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/LoadIncrementalHFiles.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/LoadIncrementalHFiles.java index d96c9e4..e0681dd 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/LoadIncrementalHFiles.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/LoadIncrementalHFiles.java @@ -24,7 +24,6 @@ import com.google.common.collect.HashMultimap; import com.google.common.collect.Multimap; import com.google.common.collect.Multimaps; import com.google.common.util.concurrent.ThreadFactoryBuilder; - import org.apache.commons.lang.mutable.MutableInt; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; @@ -47,7 +46,6 @@ import org.apache.hadoop.hbase.classification.InterfaceStability; import org.apache.hadoop.hbase.client.Admin; import org.apache.hadoop.hbase.client.Connection; import org.apache.hadoop.hbase.client.ConnectionFactory; -import org.apache.hadoop.hbase.client.HBaseAdmin; import org.apache.hadoop.hbase.client.HConnection; import org.apache.hadoop.hbase.client.HTable; import org.apache.hadoop.hbase.client.RegionLocator; @@ -112,7 +110,7 @@ import java.util.concurrent.TimeUnit; @InterfaceStability.Stable public class LoadIncrementalHFiles extends Configured implements Tool { private static final Log LOG = LogFactory.getLog(LoadIncrementalHFiles.class); - private Admin hbAdmin; + private boolean initalized = false; public static final String NAME = "completebulkload"; public static final String MAX_FILES_PER_REGION_PER_FAMILY @@ -138,18 +136,19 @@ public class LoadIncrementalHFiles extends Configured implements Tool { } private void initialize() throws Exception { - if (hbAdmin == null) { - // make a copy, just to be sure we're not overriding someone else's config - setConf(HBaseConfiguration.create(getConf())); - Configuration conf = getConf(); - // disable blockcache for tool invocation, see HBASE-10500 - conf.setFloat(HConstants.HFILE_BLOCK_CACHE_SIZE_KEY, 0); - this.hbAdmin = new HBaseAdmin(conf); - this.userProvider = UserProvider.instantiate(conf); - this.fsDelegationToken = new FsDelegationToken(userProvider, "renewer"); - assignSeqIds = conf.getBoolean(ASSIGN_SEQ_IDS, true); - maxFilesPerRegionPerFamily = conf.getInt(MAX_FILES_PER_REGION_PER_FAMILY, 32); + if (initalized) { + return; } + // make a copy, just to be sure we're not overriding someone else's config + setConf(HBaseConfiguration.create(getConf())); + Configuration conf = getConf(); + // disable blockcache for tool invocation, see HBASE-10500 + conf.setFloat(HConstants.HFILE_BLOCK_CACHE_SIZE_KEY, 0); + this.userProvider = UserProvider.instantiate(conf); + this.fsDelegationToken = new FsDelegationToken(userProvider, "renewer"); + assignSeqIds = conf.getBoolean(ASSIGN_SEQ_IDS, true); + maxFilesPerRegionPerFamily = conf.getInt(MAX_FILES_PER_REGION_PER_FAMILY, 32); + initalized = true; } private void usage() { @@ -851,10 +850,6 @@ public class LoadIncrementalHFiles extends Configured implements Tool { return !HFile.isReservedFileInfoKey(key); } - private boolean doesTableExist(TableName tableName) throws Exception { - return hbAdmin.tableExists(tableName); - } - /* * Infers region boundaries for a new table. * Parameter: @@ -889,7 +884,7 @@ public class LoadIncrementalHFiles extends Configured implements Tool { * If the table is created for the first time, then "completebulkload" reads the files twice. * More modifications necessary if we want to avoid doing it. */ - private void createTable(TableName tableName, String dirPath) throws Exception { + private void createTable(TableName tableName, String dirPath, Admin admin) throws Exception { final Path hfofDir = new Path(dirPath); final FileSystem fs = hfofDir.getFileSystem(getConf()); @@ -937,7 +932,7 @@ public class LoadIncrementalHFiles extends Configured implements Tool { }); byte[][] keys = LoadIncrementalHFiles.inferBoundaries(map); - this.hbAdmin.createTable(htd,keys); + admin.createTable(htd, keys); LOG.info("Table "+ tableName +" is available!!"); } @@ -950,26 +945,27 @@ public class LoadIncrementalHFiles extends Configured implements Tool { } initialize(); - - String dirPath = args[0]; - TableName tableName = TableName.valueOf(args[1]); - - boolean tableExists = this.doesTableExist(tableName); - if (!tableExists) { - if ("yes".equalsIgnoreCase(getConf().get(CREATE_TABLE_CONF_KEY, "yes"))) { - this.createTable(tableName, dirPath); - } else { - String errorMsg = format("Table '%s' does not exist.", tableName); - LOG.error(errorMsg); - throw new TableNotFoundException(errorMsg); + try (Connection connection = ConnectionFactory.createConnection(getConf()); + Admin admin = connection.getAdmin()) { + String dirPath = args[0]; + TableName tableName = TableName.valueOf(args[1]); + + boolean tableExists = admin.tableExists(tableName); + if (!tableExists) { + if ("yes".equalsIgnoreCase(getConf().get(CREATE_TABLE_CONF_KEY, "yes"))) { + this.createTable(tableName, dirPath, admin); + } else { + String errorMsg = format("Table '%s' does not exist.", tableName); + LOG.error(errorMsg); + throw new TableNotFoundException(errorMsg); + } } - } - Path hfofDir = new Path(dirPath); + Path hfofDir = new Path(dirPath); - try (Connection connection = ConnectionFactory.createConnection(getConf()); - HTable table = (HTable) connection.getTable(tableName);) { - doBulkLoad(hfofDir, table); + try (HTable table = (HTable) connection.getTable(tableName);) { + doBulkLoad(hfofDir, table); + } } return 0; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMasterCommandLine.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMasterCommandLine.java index 3aeee40..1cf0f39 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMasterCommandLine.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMasterCommandLine.java @@ -28,7 +28,6 @@ import org.apache.commons.cli.Options; import org.apache.commons.cli.ParseException; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.CoordinatedStateManager; import org.apache.hadoop.hbase.CoordinatedStateManagerFactory; @@ -37,8 +36,9 @@ import org.apache.hadoop.hbase.LocalHBaseCluster; import org.apache.hadoop.hbase.MasterNotRunningException; import org.apache.hadoop.hbase.ZNodeClearer; import org.apache.hadoop.hbase.ZooKeeperConnectionException; -import org.apache.hadoop.hbase.client.Admin; -import org.apache.hadoop.hbase.client.HBaseAdmin; +import org.apache.hadoop.hbase.classification.InterfaceAudience; +import org.apache.hadoop.hbase.client.Connection; +import org.apache.hadoop.hbase.client.ConnectionFactory; import org.apache.hadoop.hbase.regionserver.HRegionServer; import org.apache.hadoop.hbase.util.JVMClusterUtil; import org.apache.hadoop.hbase.util.ServerCommandLine; @@ -249,12 +249,12 @@ public class HMasterCommandLine extends ServerCommandLine { @SuppressWarnings("resource") private int stopMaster() { - Admin adm = null; + Connection connection = null; try { Configuration conf = getConf(); // Don't try more than once conf.setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, 1); - adm = new HBaseAdmin(getConf()); + connection = ConnectionFactory.createConnection(conf); } catch (MasterNotRunningException e) { LOG.error("Master not running"); return 1; @@ -266,7 +266,7 @@ public class HMasterCommandLine extends ServerCommandLine { return 1; } try { - adm.shutdown(); + connection.getAdmin().shutdown(); } catch (Throwable t) { LOG.error("Failed to stop master", t); return 1; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java index 1fb64a2..5eb3e63 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java @@ -60,7 +60,6 @@ import com.google.common.collect.Lists; import com.google.common.collect.Multimap; import com.google.common.collect.TreeMultimap; import com.google.protobuf.ServiceException; - import org.apache.commons.lang.RandomStringUtils; import org.apache.commons.lang.StringUtils; import org.apache.commons.logging.Log; @@ -95,10 +94,10 @@ import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.classification.InterfaceStability; import org.apache.hadoop.hbase.client.Admin; import org.apache.hadoop.hbase.client.ClusterConnection; +import org.apache.hadoop.hbase.client.Connection; import org.apache.hadoop.hbase.client.ConnectionFactory; import org.apache.hadoop.hbase.client.Delete; import org.apache.hadoop.hbase.client.Get; -import org.apache.hadoop.hbase.client.HBaseAdmin; import org.apache.hadoop.hbase.client.HConnection; import org.apache.hadoop.hbase.client.Put; import org.apache.hadoop.hbase.client.RegionReplicaUtil; @@ -3096,21 +3095,12 @@ public class HBaseFsck extends Configured implements Closeable { HTableDescriptor[] getHTableDescriptors(List tableNames) { HTableDescriptor[] htd = new HTableDescriptor[0]; - Admin admin = null; - try { LOG.info("getHTableDescriptors == tableNames => " + tableNames); - admin = new HBaseAdmin(getConf()); + try (Connection conn = ConnectionFactory.createConnection(getConf()); + Admin admin = conn.getAdmin()) { htd = admin.getTableDescriptorsByTableName(tableNames); } catch (IOException e) { LOG.debug("Exception getting table descriptors", e); - } finally { - if (admin != null) { - try { - admin.close(); - } catch (IOException e) { - LOG.debug("Exception closing HBaseAdmin", e); - } - } } return htd; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HMerge.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HMerge.java index 96e0d48..118e1b3 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HMerge.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HMerge.java @@ -26,7 +26,6 @@ import java.util.Random; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; @@ -36,11 +35,11 @@ import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.MetaTableAccessor; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.TableNotDisabledException; +import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.client.Admin; import org.apache.hadoop.hbase.client.Connection; import org.apache.hadoop.hbase.client.ConnectionFactory; import org.apache.hadoop.hbase.client.Delete; -import org.apache.hadoop.hbase.client.HBaseAdmin; import org.apache.hadoop.hbase.client.HConnection; import org.apache.hadoop.hbase.client.Result; import org.apache.hadoop.hbase.client.ResultScanner; @@ -126,13 +125,13 @@ class HMerge { throw new IllegalStateException( "HBase instance must be running to merge a normal table"); } - Admin admin = new HBaseAdmin(conf); - try { + Connection conn = ConnectionFactory.createConnection(conf); + try (Admin admin = conn.getAdmin()) { if (!admin.isTableDisabled(tableName)) { throw new TableNotDisabledException(tableName); } } finally { - admin.close(); + conn.close(); } new OnlineMerger(conf, fs, tableName).process(); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/RegionSizeCalculator.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/RegionSizeCalculator.java index 9cd24f6..643ab0e 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/RegionSizeCalculator.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/RegionSizeCalculator.java @@ -38,7 +38,8 @@ import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.classification.InterfaceStability; import org.apache.hadoop.hbase.client.Admin; -import org.apache.hadoop.hbase.client.HBaseAdmin; +import org.apache.hadoop.hbase.client.Connection; +import org.apache.hadoop.hbase.client.ConnectionFactory; import org.apache.hadoop.hbase.client.HTable; import org.apache.hadoop.hbase.client.RegionLocator; @@ -66,11 +67,9 @@ public class RegionSizeCalculator { */ @Deprecated public RegionSizeCalculator(HTable table) throws IOException { - HBaseAdmin admin = new HBaseAdmin(table.getConfiguration()); - try { + try (Connection conn = ConnectionFactory.createConnection(table.getConfiguration()); + Admin admin = conn.getAdmin()) { init(table.getRegionLocator(), admin); - } finally { - admin.close(); } } diff --git a/hbase-server/src/main/resources/hbase-webapps/master/tablesDetailed.jsp b/hbase-server/src/main/resources/hbase-webapps/master/tablesDetailed.jsp index 9bed70b..d47f574 100644 --- a/hbase-server/src/main/resources/hbase-webapps/master/tablesDetailed.jsp +++ b/hbase-server/src/main/resources/hbase-webapps/master/tablesDetailed.jsp @@ -21,7 +21,9 @@ import="static org.apache.commons.lang.StringEscapeUtils.escapeXml" import="org.apache.hadoop.conf.Configuration" import="org.apache.hadoop.hbase.master.HMaster" - import="org.apache.hadoop.hbase.client.HBaseAdmin" + import="org.apache.hadoop.hbase.client.Admin" + import="org.apache.hadoop.hbase.client.Connection" + import="org.apache.hadoop.hbase.client.ConnectionFactory" import="org.apache.hadoop.hbase.HTableDescriptor" import="org.apache.hadoop.hbase.HBaseConfiguration" %> <% @@ -81,7 +83,11 @@ -<% HTableDescriptor[] tables = new HBaseAdmin(conf).listTables(); +<% HTableDescriptor[] tables; + try(Connection connection = ConnectionFactory.createConnection(conf); + Admin admin = connection.getAdmin()) { + tables = admin.listTables(); + } if(tables != null && tables.length > 0) { %> diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/FilterTestingCluster.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/FilterTestingCluster.java index 982b977..a40d9f9 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/FilterTestingCluster.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/FilterTestingCluster.java @@ -19,10 +19,6 @@ */ package org.apache.hadoop.hbase.filter; -import static org.junit.Assert.assertNotNull; -import static org.junit.Assert.assertNull; -import static org.junit.Assert.assertTrue; - import java.io.IOException; import java.util.ArrayList; import java.util.List; @@ -39,9 +35,6 @@ import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.ZooKeeperConnectionException; import org.apache.hadoop.hbase.client.Admin; import org.apache.hadoop.hbase.client.Connection; -import org.apache.hadoop.hbase.client.ConnectionFactory; -import org.apache.hadoop.hbase.client.HBaseAdmin; -import org.apache.hadoop.hbase.client.HTable; import org.apache.hadoop.hbase.client.ScannerCallable; import org.apache.hadoop.hbase.client.Table; import org.apache.hadoop.hbase.ipc.AbstractRpcClient; @@ -54,6 +47,10 @@ import org.junit.AfterClass; import org.junit.BeforeClass; import org.junit.experimental.categories.Category; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertNull; +import static org.junit.Assert.assertTrue; + /** * By using this class as the super class of a set of tests you will have a HBase testing * cluster available that is very suitable for writing tests for scanning and filtering against. @@ -105,8 +102,7 @@ public class FilterTestingCluster { conf = HBaseConfiguration.create(conf); conf.setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, 1); try { - connection = ConnectionFactory.createConnection(conf); - admin = connection.getAdmin(); + admin = TEST_UTIL.getHBaseAdmin(); } catch (MasterNotRunningException e) { assertNull("Master is not running", e); } catch (ZooKeeperConnectionException e) { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestHFileOutputFormat.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestHFileOutputFormat.java index ecea98e..9732841 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestHFileOutputFormat.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestHFileOutputFormat.java @@ -35,7 +35,6 @@ import java.util.Set; import java.util.concurrent.Callable; import junit.framework.Assert; - import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; @@ -383,11 +382,10 @@ public class TestHFileOutputFormat { util = new HBaseTestingUtility(); Configuration conf = util.getConfiguration(); byte[][] splitKeys = generateRandomSplitKeys(4); - HBaseAdmin admin = null; try { util.startMiniCluster(); Path testDir = util.getDataTestDirOnTestFS("testLocalMRIncrementalLoad"); - admin = util.getHBaseAdmin(); + HBaseAdmin admin = util.getHBaseAdmin(); HTable table = util.createTable(TABLE_NAME, FAMILIES, splitKeys); assertEquals("Should start with empty table", 0, util.countRows(table)); @@ -466,7 +464,6 @@ public class TestHFileOutputFormat { assertEquals("Data should remain after reopening of regions", tableDigestBefore, util.checksumRows(table)); } finally { - if (admin != null) admin.close(); util.shutdownMiniMapReduceCluster(); util.shutdownMiniCluster(); } @@ -909,7 +906,7 @@ public class TestHFileOutputFormat { try { util.startMiniCluster(); final FileSystem fs = util.getDFSCluster().getFileSystem(); - HBaseAdmin admin = new HBaseAdmin(conf); + HBaseAdmin admin = util.getHBaseAdmin(); HTable table = util.createTable(TABLE_NAME, FAMILIES); assertEquals("Should start with empty table", 0, util.countRows(table)); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestMasterReplication.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestMasterReplication.java index 0ec410e..c7ffe25 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestMasterReplication.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestMasterReplication.java @@ -36,12 +36,10 @@ import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.TableName; -import org.apache.hadoop.hbase.client.Admin; import org.apache.hadoop.hbase.client.ConnectionFactory; import org.apache.hadoop.hbase.client.Delete; import org.apache.hadoop.hbase.client.Durability; import org.apache.hadoop.hbase.client.Get; -import org.apache.hadoop.hbase.client.HBaseAdmin; import org.apache.hadoop.hbase.client.Put; import org.apache.hadoop.hbase.client.Result; import org.apache.hadoop.hbase.client.Table; @@ -286,15 +284,8 @@ public class TestMasterReplication { } private void createTableOnClusters(HTableDescriptor table) throws Exception { - int numClusters = configurations.length; - for (int i = 0; i < numClusters; i++) { - Admin hbaseAdmin = null; - try { - hbaseAdmin = new HBaseAdmin(configurations[i]); - hbaseAdmin.createTable(table); - } finally { - close(hbaseAdmin); - } + for (HBaseTestingUtility utility : utilities) { + utility.getHBaseAdmin().createTable(table); } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/LoadTestTool.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/LoadTestTool.java index 0f077ed..85da693 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/LoadTestTool.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/LoadTestTool.java @@ -16,6 +16,7 @@ */ package org.apache.hadoop.hbase.util; +import javax.crypto.spec.SecretKeySpec; import java.io.IOException; import java.io.InterruptedIOException; import java.lang.reflect.Constructor; @@ -28,8 +29,6 @@ import java.util.Properties; import java.util.Random; import java.util.concurrent.atomic.AtomicReference; -import javax.crypto.spec.SecretKeySpec; - import org.apache.commons.cli.CommandLine; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; @@ -43,9 +42,9 @@ import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.client.Admin; +import org.apache.hadoop.hbase.client.Connection; import org.apache.hadoop.hbase.client.ConnectionFactory; import org.apache.hadoop.hbase.client.Durability; -import org.apache.hadoop.hbase.client.HBaseAdmin; import org.apache.hadoop.hbase.io.compress.Compression; import org.apache.hadoop.hbase.io.crypto.Cipher; import org.apache.hadoop.hbase.io.crypto.Encryption; @@ -266,45 +265,48 @@ public class LoadTestTool extends AbstractHBaseTool { */ protected void applyColumnFamilyOptions(TableName tableName, byte[][] columnFamilies) throws IOException { - Admin admin = new HBaseAdmin(conf); - HTableDescriptor tableDesc = admin.getTableDescriptor(tableName); - LOG.info("Disabling table " + tableName); - admin.disableTable(tableName); - for (byte[] cf : columnFamilies) { - HColumnDescriptor columnDesc = tableDesc.getFamily(cf); - boolean isNewCf = columnDesc == null; - if (isNewCf) { - columnDesc = new HColumnDescriptor(cf); - } - if (bloomType != null) { - columnDesc.setBloomFilterType(bloomType); - } - if (compressAlgo != null) { - columnDesc.setCompressionType(compressAlgo); - } - if (dataBlockEncodingAlgo != null) { - columnDesc.setDataBlockEncoding(dataBlockEncodingAlgo); - } - if (inMemoryCF) { - columnDesc.setInMemory(inMemoryCF); - } - if (cipher != null) { - byte[] keyBytes = new byte[cipher.getKeyLength()]; - new SecureRandom().nextBytes(keyBytes); - columnDesc.setEncryptionType(cipher.getName()); - columnDesc.setEncryptionKey(EncryptionUtil.wrapKey(conf, - User.getCurrent().getShortName(), - new SecretKeySpec(keyBytes, cipher.getName()))); - } - if (isNewCf) { - admin.addColumn(tableName, columnDesc); - } else { - admin.modifyColumn(tableName, columnDesc); + try (Connection conn = ConnectionFactory.createConnection(conf); + Admin admin = conn.getAdmin()) { + HTableDescriptor tableDesc = admin.getTableDescriptor(tableName); + LOG.info("Disabling table " + tableName); + admin.disableTable(tableName); + for (byte[] cf : columnFamilies) { + HColumnDescriptor columnDesc = tableDesc.getFamily(cf); + boolean isNewCf = columnDesc == null; + if (isNewCf) { + columnDesc = new HColumnDescriptor(cf); + } + if (bloomType != null) { + columnDesc.setBloomFilterType(bloomType); + } + if (compressAlgo != null) { + columnDesc.setCompressionType(compressAlgo); + } + if (dataBlockEncodingAlgo != null) { + columnDesc.setDataBlockEncoding(dataBlockEncodingAlgo); + } + if (inMemoryCF) { + columnDesc.setInMemory(inMemoryCF); + } + if (cipher != null) { + byte[] keyBytes = new byte[cipher.getKeyLength()]; + new SecureRandom().nextBytes(keyBytes); + columnDesc.setEncryptionType(cipher.getName()); + columnDesc.setEncryptionKey( + EncryptionUtil.wrapKey(conf, + User.getCurrent().getShortName(), + new SecretKeySpec(keyBytes, + cipher.getName()))); + } + if (isNewCf) { + admin.addColumn(tableName, columnDesc); + } else { + admin.modifyColumn(tableName, columnDesc); + } } + LOG.info("Enabling table " + tableName); + admin.enableTable(tableName); } - LOG.info("Enabling table " + tableName); - admin.enableTable(tableName); - admin.close(); } @Override