Index: src/contrib/hbase/src/java/org/apache/hadoop/hbase/HBaseAdmin.java =================================================================== --- src/contrib/hbase/src/java/org/apache/hadoop/hbase/HBaseAdmin.java (revision 562014) +++ src/contrib/hbase/src/java/org/apache/hadoop/hbase/HBaseAdmin.java (working copy) @@ -57,6 +57,46 @@ } /** + * @return proxy connection to master server for this instance + * @throws MasterNotRunningException + */ + public HMasterInterface getMaster() throws MasterNotRunningException{ + return this.connection.getMaster(); + } + + /** @return - true if the master server is running */ + public boolean isMasterRunning() { + return this.connection.isMasterRunning(); + } + + /** + * @param tableName Table to check. + * @return True if table exists already. + * @throws MasterNotRunningException + */ + public boolean tableExists(final Text tableName) throws MasterNotRunningException { + if (this.master == null) { + throw new MasterNotRunningException("master has been shut down"); + } + + return connection.tableExists(tableName); + } + + /** + * List all the userspace tables. In other words, scan the META table. + * + * If we wanted this to be really fast, we could implement a special + * catalog table that just contains table names and their descriptors. + * Right now, it only exists as part of the META table's region info. + * + * @return - returns an array of HTableDescriptors + * @throws IOException + */ + public HTableDescriptor[] listTables() throws IOException { + return this.connection.listTables(); + } + + /** * Creates a new table * * @param desc table descriptor for table @@ -381,19 +421,6 @@ } /** - * @param tableName Table to check. - * @return True if table exists already. - * @throws MasterNotRunningException - */ - public boolean tableExists(final Text tableName) throws MasterNotRunningException { - if (this.master == null) { - throw new MasterNotRunningException("master has been shut down"); - } - - return connection.tableExists(tableName); - } - - /** * Add a column to an existing table * * @param tableName name of the table to add column to Index: src/contrib/hbase/src/java/org/apache/hadoop/hbase/HMaster.java =================================================================== --- src/contrib/hbase/src/java/org/apache/hadoop/hbase/HMaster.java (revision 562014) +++ src/contrib/hbase/src/java/org/apache/hadoop/hbase/HMaster.java (working copy) @@ -94,7 +94,7 @@ private Server server; private HServerAddress address; - HClient client; + HConnection connection; long metaRescanInterval; @@ -188,7 +188,7 @@ region.regionName); try { - regionServer = client.getHRegionConnection(region.server); + regionServer = connection.getHRegionConnection(region.server); scannerId = regionServer.openScanner(region.regionName, METACOLUMNS, FIRST_ROW, System.currentTimeMillis(), null); @@ -681,7 +681,7 @@ this.address = new HServerAddress(server.getListenerAddress()); conf.set(MASTER_ADDRESS, address.toString()); - this.client = new HClient(conf); + this.connection = HConnectionManager.getConnection(conf); this.metaRescanInterval = conf.getLong("hbase.master.meta.thread.rescanfrequency", 60 * 1000); @@ -1478,7 +1478,7 @@ // scanned return false; } - server = client.getHRegionConnection(rootRegionLocation); + server = connection.getHRegionConnection(rootRegionLocation); scannerId = -1L; try { @@ -1523,7 +1523,7 @@ HRegionInterface server = null; long scannerId = -1L; - server = client.getHRegionConnection(r.server); + server = connection.getHRegionConnection(r.server); scannerId = server.openScanner(r.regionName, columns, startRow, System.currentTimeMillis(), null); @@ -1595,7 +1595,7 @@ return false; } metaRegionName = HGlobals.rootRegionInfo.regionName; - server = client.getHRegionConnection(rootRegionLocation); + server = connection.getHRegionConnection(rootRegionLocation); onlineMetaRegions.remove(regionInfo.getStartKey()); } else { @@ -1619,7 +1619,7 @@ onlineMetaRegions.headMap(regionInfo.getRegionName()).lastKey()); } metaRegionName = r.regionName; - server = client.getHRegionConnection(r.server); + server = connection.getHRegionConnection(r.server); } long clientId = rand.nextLong(); @@ -1734,7 +1734,7 @@ return false; } metaRegionName = HGlobals.rootRegionInfo.regionName; - server = client.getHRegionConnection(rootRegionLocation); + server = connection.getHRegionConnection(rootRegionLocation); } else { if (!rootScanned @@ -1762,7 +1762,7 @@ onlineMetaRegions.headMap(region.getRegionName()).lastKey()); } metaRegionName = r.regionName; - server = client.getHRegionConnection(r.server); + server = connection.getHRegionConnection(r.server); } LOG.info("updating row " + region.getRegionName() + " in table " + metaRegionName); @@ -1898,12 +1898,12 @@ onlineMetaRegions.get(onlineMetaRegions. headMap(newRegion.getTableDesc().getName()).lastKey()); Text metaRegionName = m.regionName; - HRegionInterface connection = client.getHRegionConnection(m.server); - long scannerid = connection.openScanner(metaRegionName, + HRegionInterface r = connection.getHRegionConnection(m.server); + long scannerid = r.openScanner(metaRegionName, new Text[] { COL_REGIONINFO }, tableName, System.currentTimeMillis(), null); try { - KeyedData[] data = connection.next(scannerid); + KeyedData[] data = r.next(scannerid); // Test data and that the row for the data is for our table. If // table does not exist, scanner will return row after where our table // would be inserted if it exists so look for exact match on table @@ -1915,16 +1915,16 @@ throw new TableExistsException(tableName.toString()); } } finally { - connection.close(scannerid); + r.close(scannerid); } // 2. Create the HRegion - HRegion r = HRegion.createHRegion(newRegion.regionId, newRegion. + HRegion region = HRegion.createHRegion(newRegion.regionId, newRegion. getTableDesc(), this.dir, this.conf); // 3. Insert into meta - HRegionInfo info = r.getRegionInfo(); - Text regionName = r.getRegionName(); + HRegionInfo info = region.getRegionInfo(); + Text regionName = region.getRegionName(); ByteArrayOutputStream byteValue = new ByteArrayOutputStream(); DataOutputStream s = new DataOutputStream(byteValue); info.write(s); @@ -1929,16 +1929,15 @@ DataOutputStream s = new DataOutputStream(byteValue); info.write(s); long clientId = rand.nextLong(); - long lockid = connection. - startUpdate(metaRegionName, clientId, regionName); - connection.put(metaRegionName, clientId, lockid, COL_REGIONINFO, + long lockid = r.startUpdate(metaRegionName, clientId, regionName); + r.put(metaRegionName, clientId, lockid, COL_REGIONINFO, byteValue.toByteArray()); - connection.commit(metaRegionName, clientId, lockid, + r.commit(metaRegionName, clientId, lockid, System.currentTimeMillis()); // 4. Close the new region to flush it to disk. Close its log file too. - r.close(); - r.getLog().closeAndDelete(); + region.close(); + region.getLog().closeAndDelete(); // 5. Get it assigned to a server unassignedRegions.put(regionName, info); @@ -2039,7 +2038,7 @@ // Get a connection to a meta server - HRegionInterface server = client.getHRegionConnection(m.server); + HRegionInterface server = connection.getHRegionConnection(m.server); // Open a scanner on the meta region @@ -2549,8 +2548,8 @@ if (cmd.equals("stop")) { try { - HClient client = new HClient(conf); - client.shutdown(); + HBaseAdmin adm = new HBaseAdmin(conf); + adm.shutdown(); } catch (Throwable t) { LOG.error( "Can not stop master because " + StringUtils.stringifyException(t) ); Index: src/contrib/hbase/src/java/org/apache/hadoop/hbase/HMerge.java =================================================================== --- src/contrib/hbase/src/java/org/apache/hadoop/hbase/HMerge.java (revision 562014) +++ src/contrib/hbase/src/java/org/apache/hadoop/hbase/HMerge.java (working copy) @@ -61,9 +61,9 @@ */ public static void merge(Configuration conf, FileSystem fs, Text tableName) throws IOException { - - HClient client = new HClient(conf); - boolean masterIsRunning = client.isMasterRunning(); + + HConnection connection = HConnectionManager.getConnection(conf); + boolean masterIsRunning = connection.isMasterRunning(); if(tableName.equals(META_TABLE_NAME)) { if(masterIsRunning) { throw new IllegalStateException( @@ -76,7 +76,7 @@ throw new IllegalStateException( "HBase instance must be running to merge a normal table"); } - new OnlineMerger(conf, fs, client, tableName).process(); + new OnlineMerger(conf, fs, tableName).process(); } } @@ -195,17 +195,16 @@ /** Instantiated to compact a normal user table */ private static class OnlineMerger extends Merger { - private HClient client; + private HTable table; private HScannerInterface metaScanner; private HRegionInfo latestRegion; - OnlineMerger(Configuration conf, FileSystem fs, HClient client, - Text tableName) throws IOException { + OnlineMerger(Configuration conf, FileSystem fs, Text tableName) + throws IOException { super(conf, fs, tableName); - this.client = client; - client.openTable(META_TABLE_NAME); - this.metaScanner = client.obtainScanner(META_COLS, new Text()); + this.table = new HTable(conf, META_TABLE_NAME); + this.metaScanner = table.obtainScanner(META_COLS, new Text()); this.latestRegion = null; } @@ -269,11 +268,11 @@ } long lockid = -1L; try { - lockid = client.startUpdate(regionsToDelete[r]); - client.delete(lockid, COL_REGIONINFO); - client.delete(lockid, COL_SERVER); - client.delete(lockid, COL_STARTCODE); - client.commit(lockid); + lockid = table.startUpdate(regionsToDelete[r]); + table.delete(lockid, COL_REGIONINFO); + table.delete(lockid, COL_SERVER); + table.delete(lockid, COL_STARTCODE); + table.commit(lockid); lockid = -1L; if(LOG.isDebugEnabled()) { @@ -282,7 +281,7 @@ } finally { try { if(lockid != -1L) { - client.abort(lockid); + table.abort(lockid); } } catch(IOException iex) { @@ -296,9 +295,9 @@ newRegion.getRegionInfo().write(s); long lockid = -1L; try { - lockid = client.startUpdate(newRegion.getRegionName()); - client.put(lockid, COL_REGIONINFO, byteValue.toByteArray()); - client.commit(lockid); + lockid = table.startUpdate(newRegion.getRegionName()); + table.put(lockid, COL_REGIONINFO, byteValue.toByteArray()); + table.commit(lockid); lockid = -1L; if(LOG.isDebugEnabled()) { @@ -308,7 +307,7 @@ } finally { try { if(lockid != -1L) { - client.abort(lockid); + table.abort(lockid); } } catch(IOException iex) { Index: src/contrib/hbase/src/java/org/apache/hadoop/hbase/HRegion.java =================================================================== --- src/contrib/hbase/src/java/org/apache/hadoop/hbase/HRegion.java (revision 562014) +++ src/contrib/hbase/src/java/org/apache/hadoop/hbase/HRegion.java (working copy) @@ -1715,7 +1715,7 @@ meta.commit(writeid, System.currentTimeMillis()); } - static void addRegionToMETA(final HClient client, + static void addRegionToMETA(final Configuration conf, final Text table, final HRegion region, final HServerAddress serverAddress, final long startCode) @@ -1720,17 +1720,17 @@ final HServerAddress serverAddress, final long startCode) throws IOException { - client.openTable(table); + HTable t = new HTable(conf, table); ByteArrayOutputStream bytes = new ByteArrayOutputStream(); DataOutputStream out = new DataOutputStream(bytes); region.getRegionInfo().write(out); - long lockid = client.startUpdate(region.getRegionName()); - client.put(lockid, COL_REGIONINFO, bytes.toByteArray()); - client.put(lockid, COL_SERVER, + long lockid = t.startUpdate(region.getRegionName()); + t.put(lockid, COL_REGIONINFO, bytes.toByteArray()); + t.put(lockid, COL_SERVER, serverAddress.toString().getBytes(UTF8_ENCODING)); - client.put(lockid, COL_STARTCODE, + t.put(lockid, COL_STARTCODE, String.valueOf(startCode).getBytes(UTF8_ENCODING)); - client.commit(lockid); + t.commit(lockid); if (LOG.isDebugEnabled()) { LOG.info("Added region " + region.getRegionName() + " to table " + table); @@ -1739,7 +1739,7 @@ /** * Delete region from META table. - * @param client Client to use running update. + * @param conf Configuration object * @param table META table we are to delete region from. * @param regionName Region to remove. * @throws IOException @@ -1744,15 +1744,15 @@ * @param regionName Region to remove. * @throws IOException */ - static void removeRegionFromMETA(final HClient client, + static void removeRegionFromMETA(final Configuration conf, final Text table, final Text regionName) throws IOException { - client.openTable(table); - long lockid = client.startUpdate(regionName); - client.delete(lockid, COL_REGIONINFO); - client.delete(lockid, COL_SERVER); - client.delete(lockid, COL_STARTCODE); - client.commit(lockid); + HTable t = new HTable(conf, table); + long lockid = t.startUpdate(regionName); + t.delete(lockid, COL_REGIONINFO); + t.delete(lockid, COL_SERVER); + t.delete(lockid, COL_STARTCODE); + t.commit(lockid); if (LOG.isDebugEnabled()) { LOG.debug("Removed " + regionName + " from table " + table); } Index: src/contrib/hbase/src/java/org/apache/hadoop/hbase/HRegionServer.java =================================================================== --- src/contrib/hbase/src/java/org/apache/hadoop/hbase/HRegionServer.java (revision 562014) +++ src/contrib/hbase/src/java/org/apache/hadoop/hbase/HRegionServer.java (working copy) @@ -102,7 +102,6 @@ /** Runs periodically to determine if regions need to be compacted or split */ class SplitOrCompactChecker implements Runnable, RegionUnavailableListener { - HClient client = new HClient(conf); /** * {@inheritDoc} @@ -207,7 +206,7 @@ // Remove old region from META for (int tries = 0; tries < numRetries; tries++) { try { - HRegion.removeRegionFromMETA(client, tableToUpdate, + HRegion.removeRegionFromMETA(conf, tableToUpdate, region.getRegionName()); break; } catch (IOException e) { @@ -224,7 +223,7 @@ for (int i = 0; i < newRegions.length; i++) { for (int tries = 0; tries < numRetries; tries ++) { try { - HRegion.addRegionToMETA(client, tableToUpdate, newRegions[i], + HRegion.addRegionToMETA(conf, tableToUpdate, newRegions[i], serverInfo.getServerAddress(), serverInfo.getStartCode()); break; } catch(IOException e) { Index: src/contrib/hbase/src/java/org/apache/hadoop/hbase/HTable.java =================================================================== --- src/contrib/hbase/src/java/org/apache/hadoop/hbase/HTable.java (revision 562014) +++ src/contrib/hbase/src/java/org/apache/hadoop/hbase/HTable.java (working copy) @@ -94,6 +94,11 @@ row : this.tableServers.headMap(row).lastKey(); return this.tableServers.get(serverKey); } + + /** @return the connection */ + HConnection getConnection() { + return connection; + } /** * Verifies that no update is in progress Index: src/contrib/hbase/src/java/org/apache/hadoop/hbase/mapred/TableInputFormat.java =================================================================== --- src/contrib/hbase/src/java/org/apache/hadoop/hbase/mapred/TableInputFormat.java (revision 562014) +++ src/contrib/hbase/src/java/org/apache/hadoop/hbase/mapred/TableInputFormat.java (working copy) @@ -37,7 +37,7 @@ import org.apache.hadoop.mapred.RecordReader; import org.apache.hadoop.mapred.Reporter; -import org.apache.hadoop.hbase.HClient; +import org.apache.hadoop.hbase.HTable; import org.apache.hadoop.hbase.HScannerInterface; import org.apache.hadoop.hbase.HStoreKey; import org.apache.hadoop.hbase.io.KeyedData; @@ -59,7 +59,7 @@ private Text m_tableName; Text[] m_cols; - HClient m_client; + HTable m_table; /** * Iterate over an HBase table data, return (HStoreKey, KeyedDataArrayWritable) pairs @@ -78,7 +78,7 @@ public TableRecordReader(Text startRow, Text endRow) throws IOException { LOG.debug("start construct"); m_row = new TreeMap(); - m_scanner = m_client.obtainScanner(m_cols, startRow); + m_scanner = m_table.obtainScanner(m_cols, startRow); m_endRow = endRow; LOG.debug("end construct"); } @@ -83,9 +83,7 @@ LOG.debug("end construct"); } - /* (non-Javadoc) - * @see org.apache.hadoop.mapred.RecordReader#close() - */ + /** {@inheritDoc} */ public void close() throws IOException { LOG.debug("start close"); m_scanner.close(); @@ -110,9 +108,7 @@ return new KeyedDataArrayWritable(); } - /* (non-Javadoc) - * @see org.apache.hadoop.mapred.RecordReader#getPos() - */ + /** {@inheritDoc} */ public long getPos() { // This should be the ordinal tuple in the range; // not clear how to calculate... @@ -119,9 +115,7 @@ return 0; } - /* (non-Javadoc) - * @see org.apache.hadoop.mapred.RecordReader#getProgress() - */ + /** {@inheritDoc} */ public float getProgress() { // Depends on the total number of tuples and getPos return 0; @@ -165,9 +159,7 @@ } - /* (non-Javadoc) - * @see org.apache.hadoop.mapred.InputFormat#getRecordReader(org.apache.hadoop.mapred.InputSplit, org.apache.hadoop.mapred.JobConf, org.apache.hadoop.mapred.Reporter) - */ + /** {@inheritDoc} */ public RecordReader getRecordReader(InputSplit split, @SuppressWarnings("unused") JobConf job, @SuppressWarnings("unused") Reporter reporter) throws IOException { @@ -185,7 +177,7 @@ public InputSplit[] getSplits(JobConf job, int numSplits) throws IOException { LOG.debug("start getSplits"); - Text[] startKeys = m_client.getStartKeys(); + Text[] startKeys = m_table.getStartKeys(); if(startKeys == null || startKeys.length == 0) { throw new IOException("Expecting at least one region"); } @@ -199,9 +191,7 @@ return splits; } - /* (non-Javadoc) - * @see org.apache.hadoop.mapred.JobConfigurable#configure(org.apache.hadoop.mapred.JobConf) - */ + /** {@inheritDoc} */ public void configure(JobConf job) { LOG.debug("start configure"); Path[] tableNames = job.getInputPaths(); @@ -212,10 +202,9 @@ for(int i = 0; i < m_cols.length; i++) { m_cols[i] = new Text(colNames[i]); } - m_client = new HClient(job); try { - m_client.openTable(m_tableName); - } catch(Exception e) { + m_table = new HTable(job, m_tableName); + } catch (Exception e) { LOG.error(e); } LOG.debug("end configure"); @@ -221,9 +210,7 @@ LOG.debug("end configure"); } - /* (non-Javadoc) - * @see org.apache.hadoop.mapred.InputFormat#validateInput(org.apache.hadoop.mapred.JobConf) - */ + /** {@inheritDoc} */ public void validateInput(JobConf job) throws IOException { // expecting exactly one path Index: src/contrib/hbase/src/java/org/apache/hadoop/hbase/mapred/TableOutputFormat.java =================================================================== --- src/contrib/hbase/src/java/org/apache/hadoop/hbase/mapred/TableOutputFormat.java (revision 562014) +++ src/contrib/hbase/src/java/org/apache/hadoop/hbase/mapred/TableOutputFormat.java (working copy) @@ -33,7 +33,7 @@ import org.apache.hadoop.mapred.Reporter; import org.apache.hadoop.util.Progressable; -import org.apache.hadoop.hbase.HClient; +import org.apache.hadoop.hbase.HTable; import org.apache.hadoop.hbase.io.KeyedData; import org.apache.hadoop.hbase.io.KeyedDataArrayWritable; @@ -57,7 +57,7 @@ * and write to an HBase table */ protected class TableRecordWriter implements RecordWriter { - private HClient m_client; + private HTable m_table; /** * Instantiate a TableRecordWriter with the HBase HClient for writing. @@ -62,15 +62,13 @@ /** * Instantiate a TableRecordWriter with the HBase HClient for writing. * - * @param client + * @param table */ - public TableRecordWriter(HClient client) { - m_client = client; + public TableRecordWriter(HTable table) { + m_table = table; } - /* (non-Javadoc) - * @see org.apache.hadoop.mapred.RecordWriter#close(org.apache.hadoop.mapred.Reporter) - */ + /** {@inheritDoc} */ public void close(@SuppressWarnings("unused") Reporter reporter) {} /** @@ -87,11 +85,11 @@ // start transaction - long xid = m_client.startUpdate(tKey); + long xid = m_table.startUpdate(tKey); for(int i = 0; i < columns.length; i++) { KeyedData column = columns[i]; - m_client.put(xid, column.getKey().getColumn(), column.getData()); + m_table.put(xid, column.getKey().getColumn(), column.getData()); } // end transaction @@ -96,7 +94,7 @@ // end transaction - m_client.commit(xid); + m_table.commit(xid); LOG.debug("end write"); } @@ -105,6 +103,7 @@ /* (non-Javadoc) * @see org.apache.hadoop.mapred.OutputFormatBase#getRecordWriter(org.apache.hadoop.fs.FileSystem, org.apache.hadoop.mapred.JobConf, java.lang.String, org.apache.hadoop.util.Progressable) */ + /** {@inheritDoc} */ @Override @SuppressWarnings("unused") public RecordWriter getRecordWriter(FileSystem ignored, JobConf job, @@ -114,10 +113,9 @@ LOG.debug("start get writer"); Text tableName = new Text(job.get(OUTPUT_TABLE)); - HClient client = null; + HTable table = null; try { - client = new HClient(job); - client.openTable(tableName); + table = new HTable(job, tableName); } catch(Exception e) { LOG.error(e); } @@ -122,12 +120,10 @@ LOG.error(e); } LOG.debug("end get writer"); - return new TableRecordWriter(client); + return new TableRecordWriter(table); } - /* (non-Javadoc) - * @see org.apache.hadoop.mapred.OutputFormatBase#checkOutputSpecs(org.apache.hadoop.fs.FileSystem, org.apache.hadoop.mapred.JobConf) - */ + /** {@inheritDoc} */ @Override @SuppressWarnings("unused") public void checkOutputSpecs(FileSystem ignored, JobConf job) Index: src/contrib/hbase/src/test/org/apache/hadoop/hbase/HBaseTestCase.java =================================================================== --- src/contrib/hbase/src/test/org/apache/hadoop/hbase/HBaseTestCase.java (revision 562014) +++ src/contrib/hbase/src/test/org/apache/hadoop/hbase/HBaseTestCase.java (working copy) @@ -36,7 +36,7 @@ StaticTestEnvironment.initialize(); } - protected Configuration conf; + protected volatile Configuration conf; protected HBaseTestCase() { super(); Index: src/contrib/hbase/src/test/org/apache/hadoop/hbase/PerformanceEvaluation.java =================================================================== --- src/contrib/hbase/src/test/org/apache/hadoop/hbase/PerformanceEvaluation.java (revision 562014) +++ src/contrib/hbase/src/test/org/apache/hadoop/hbase/PerformanceEvaluation.java (working copy) @@ -102,8 +102,7 @@ SEQUENTIAL_WRITE, SCAN}); - private final Configuration conf; - private final HClient client; + volatile Configuration conf; private boolean miniCluster = false; private int N = 1; private int R = ROWS_PER_GB; @@ -109,7 +108,7 @@ private int R = ROWS_PER_GB; private static final Path PERF_EVAL_DIR = new Path("performance_evaluation"); - /* + /** * Regex to parse lines in input file passed to mapreduce task. */ public static final Pattern LINE_PATTERN = @@ -116,26 +115,38 @@ Pattern.compile("startRow=(\\d+),\\s+" + "perClientRunRows=(\\d+),\\s+totalRows=(\\d+),\\s+clients=(\\d+)"); - /* + /** * Enum for map metrics. Keep it out here rather than inside in the Map * inner-class so we can find associated properties. */ - protected static enum Counter {ELAPSED_TIME, ROWS} + protected static enum Counter { + /** elapsed time */ + ELAPSED_TIME, + /** number of rows */ + ROWS} - public PerformanceEvaluation(final HBaseConfiguration c) { + /** + * Constructor + * @param c Configuration object + */ + public PerformanceEvaluation(final Configuration c) { this.conf = c; - this.client = new HClient(conf); } - /* + /** * Implementations can have their status set. */ static interface Status { + /** + * Sets status + * @param msg status message + * @throws IOException + */ void setStatus(final String msg) throws IOException; } - /* + /** * MapReduce job that runs a performance evaluation client in each map task. */ public static class EvaluationMapTask extends MapReduceBase @@ -140,6 +151,7 @@ */ public static class EvaluationMapTask extends MapReduceBase implements Mapper { + /** configuration parameter name that contains the command */ public final static String CMD_KEY = "EvaluationMapTask.command"; private String cmd; private PerformanceEvaluation pe; @@ -144,12 +156,15 @@ private String cmd; private PerformanceEvaluation pe; + /** {@inheritDoc} */ @Override public void configure(JobConf j) { this.cmd = j.get(CMD_KEY); - this.pe = new PerformanceEvaluation(new HBaseConfiguration()); + + this.pe = new PerformanceEvaluation(j); } + /** {@inheritDoc} */ public void map(@SuppressWarnings("unused") final WritableComparable key, final Writable value, final OutputCollector output, final Reporter reporter) @@ -160,7 +175,7 @@ int perClientRunRows = Integer.parseInt(m.group(2)); int totalRows = Integer.parseInt(m.group(3)); Status status = new Status() { - public void setStatus(String msg) throws IOException { + public void setStatus(String msg) { reporter.setStatus(msg); } }; @@ -182,8 +197,8 @@ * @return True if we created the table. * @throws IOException */ - private boolean checkTable(final HClient c) throws IOException { - HTableDescriptor [] extantTables = c.listTables(); + private boolean checkTable(HBaseAdmin admin) throws IOException { + HTableDescriptor [] extantTables = admin.listTables(); boolean tableExists = false; if (extantTables.length > 0) { // Check to see if our table already exists. Print warning if it does. @@ -196,7 +211,7 @@ } } if (!tableExists) { - c.createTable(tableDescriptor); + admin.createTable(tableDescriptor); LOG.info("Table " + tableDescriptor + " created"); } return !tableExists; @@ -210,7 +225,7 @@ */ private void runNIsMoreThanOne(final String cmd) throws IOException { - checkTable(this.client); + checkTable(new HBaseAdmin(conf)); // Run a mapreduce job. Run as many maps as asked-for clients. // Before we start up the job, write out an input file with instruction @@ -269,7 +284,6 @@ */ static abstract class Test { protected final Random rand = new Random(System.currentTimeMillis()); - protected final HClient client; protected final int startRow; protected final int perClientRunRows; protected final int totalRows; @@ -274,11 +288,13 @@ protected final int perClientRunRows; protected final int totalRows; private final Status status; + protected HBaseAdmin admin; + protected HTable table; + protected volatile Configuration conf; - Test(final HClient c, final int startRow, final int perClientRunRows, - final int totalRows, final Status status) { + Test(final Configuration conf, final int startRow, + final int perClientRunRows, final int totalRows, final Status status) { super(); - this.client = c; this.startRow = startRow; this.perClientRunRows = perClientRunRows; this.totalRows = totalRows; @@ -283,6 +299,8 @@ this.perClientRunRows = perClientRunRows; this.totalRows = totalRows; this.status = status; + this.table = null; + this.conf = conf; } /* @@ -305,9 +323,11 @@ } void testSetup() throws IOException { - this.client.openTable(tableDescriptor.getName()); + this.admin = new HBaseAdmin(conf); + this.table = new HTable(conf, tableDescriptor.getName()); } - + + @SuppressWarnings("unused") void testTakedown() throws IOException { // Empty } @@ -355,9 +375,9 @@ } class RandomReadTest extends Test { - RandomReadTest(final HClient c, final int startRow, - final int perClientRunRows, final int totalRows, final Status status) { - super(c, startRow, perClientRunRows, totalRows, status); + RandomReadTest(final Configuration conf, final int startRow, + final int perClientRunRows, final int totalRows, final Status status) { + super(conf, startRow, perClientRunRows, totalRows, status); } @Override @@ -362,9 +382,10 @@ @Override void testRow(@SuppressWarnings("unused") final int i) throws IOException { - this.client.get(getRandomRow(), COLUMN_NAME); + this.table.get(getRandomRow(), COLUMN_NAME); } - + + @Override protected int getReportingPeriod() { // return this.perClientRunRows / 100; @@ -377,9 +398,9 @@ } class RandomWriteTest extends Test { - RandomWriteTest(final HClient c, final int startRow, - final int perClientRunRows, final int totalRows, final Status status) { - super(c, startRow, perClientRunRows, totalRows, status); + RandomWriteTest(final Configuration conf, final int startRow, + final int perClientRunRows, final int totalRows, final Status status) { + super(conf, startRow, perClientRunRows, totalRows, status); } @Override @@ -385,9 +406,9 @@ @Override void testRow(@SuppressWarnings("unused") final int i) throws IOException { Text row = getRandomRow(); - long lockid = client.startUpdate(row); - client.put(lockid, COLUMN_NAME, generateValue()); - client.commit(lockid); + long lockid = table.startUpdate(row); + table.put(lockid, COLUMN_NAME, generateValue()); + table.commit(lockid); } @Override @@ -401,9 +422,9 @@ private HStoreKey key = new HStoreKey(); private TreeMap results = new TreeMap(); - ScanTest(final HClient c, final int startRow, final int perClientRunRows, - final int totalRows, final Status status) { - super(c, startRow, perClientRunRows, totalRows, status); + ScanTest(final Configuration conf, final int startRow, + final int perClientRunRows, final int totalRows, final Status status) { + super(conf, startRow, perClientRunRows, totalRows, status); } @Override @@ -409,7 +430,7 @@ @Override void testSetup() throws IOException { super.testSetup(); - this.testScanner = client.obtainScanner(new Text[] {COLUMN_NAME}, + this.testScanner = table.obtainScanner(new Text[] {COLUMN_NAME}, new Text(Integer.toString(this.startRow))); } @@ -435,9 +456,9 @@ } class SequentialReadTest extends Test { - SequentialReadTest(final HClient c, final int startRow, + SequentialReadTest(final Configuration conf, final int startRow, final int perClientRunRows, final int totalRows, final Status status) { - super(c, startRow, perClientRunRows, totalRows, status); + super(conf, startRow, perClientRunRows, totalRows, status); } @Override @@ -442,7 +463,7 @@ @Override void testRow(final int i) throws IOException { - client.get(new Text(Integer.toString(i)), COLUMN_NAME); + table.get(new Text(Integer.toString(i)), COLUMN_NAME); } @Override @@ -452,9 +473,9 @@ } class SequentialWriteTest extends Test { - SequentialWriteTest(final HClient c, final int startRow, + SequentialWriteTest(final Configuration conf, final int startRow, final int perClientRunRows, final int totalRows, final Status status) { - super(c, startRow, perClientRunRows, totalRows, status); + super(conf, startRow, perClientRunRows, totalRows, status); } @Override @@ -459,9 +480,9 @@ @Override void testRow(final int i) throws IOException { - long lockid = client.startUpdate(new Text(Integer.toString(i))); - client.put(lockid, COLUMN_NAME, generateValue()); - client.commit(lockid); + long lockid = table.startUpdate(new Text(Integer.toString(i))); + table.put(lockid, COLUMN_NAME, generateValue()); + table.commit(lockid); } @Override @@ -477,7 +498,7 @@ perClientRunRows + " rows"); long totalElapsedTime = 0; if (cmd.equals(RANDOM_READ)) { - Test t = new RandomReadTest(this.client, startRow, perClientRunRows, + Test t = new RandomReadTest(this.conf, startRow, perClientRunRows, totalRows, status); totalElapsedTime = t.test(); } else if (cmd.equals(RANDOM_READ_MEM)) { @@ -483,19 +504,19 @@ } else if (cmd.equals(RANDOM_READ_MEM)) { throw new UnsupportedOperationException("Not yet implemented"); } else if (cmd.equals(RANDOM_WRITE)) { - Test t = new RandomWriteTest(this.client, startRow, perClientRunRows, + Test t = new RandomWriteTest(this.conf, startRow, perClientRunRows, totalRows, status); totalElapsedTime = t.test(); } else if (cmd.equals(SCAN)) { - Test t = new ScanTest(this.client, startRow, perClientRunRows, + Test t = new ScanTest(this.conf, startRow, perClientRunRows, totalRows, status); totalElapsedTime = t.test(); } else if (cmd.equals(SEQUENTIAL_READ)) { - Test t = new SequentialReadTest(this.client, startRow, perClientRunRows, + Test t = new SequentialReadTest(this.conf, startRow, perClientRunRows, totalRows, status); totalElapsedTime = t.test(); } else if (cmd.equals(SEQUENTIAL_WRITE)) { - Test t = new SequentialWriteTest(this.client, startRow, perClientRunRows, + Test t = new SequentialWriteTest(this.conf, startRow, perClientRunRows, totalRows, status); totalElapsedTime = t.test(); } else { @@ -513,9 +534,11 @@ LOG.info(msg); } }; - + + HBaseAdmin admin = null; try { - checkTable(this.client); + admin = new HBaseAdmin(this.conf); + checkTable(admin); if (cmd.equals(RANDOM_READ) || cmd.equals(RANDOM_READ_MEM) || cmd.equals(SCAN) || cmd.equals(SEQUENTIAL_READ)) { @@ -529,7 +552,9 @@ LOG.error("Failed", e); } finally { LOG.info("Deleting table " + tableDescriptor.getName()); - this.client.deleteTable(tableDescriptor.getName()); + if (admin != null) { + admin.deleteTable(tableDescriptor.getName()); + } } } Index: src/contrib/hbase/src/test/org/apache/hadoop/hbase/TestBatchUpdate.java =================================================================== --- src/contrib/hbase/src/test/org/apache/hadoop/hbase/TestBatchUpdate.java (revision 562014) +++ src/contrib/hbase/src/test/org/apache/hadoop/hbase/TestBatchUpdate.java (working copy) @@ -33,7 +33,7 @@ private byte[] value; private HTableDescriptor desc = null; - private HClient client = null; + private HTable table = null; /** constructor */ public TestBatchUpdate() { @@ -51,12 +51,12 @@ @Override public void setUp() throws Exception { super.setUp(); - this.client = new HClient(conf); this.desc = new HTableDescriptor("test"); desc.addFamily(new HColumnDescriptor(CONTENTS_STR)); try { - client.createTable(desc); - client.openTable(desc.getName()); + HBaseAdmin admin = new HBaseAdmin(conf); + admin.createTable(desc); + table = new HTable(conf, desc.getName()); } catch (Exception e) { e.printStackTrace(); @@ -67,7 +67,7 @@ /** the test case */ public void testBatchUpdate() { try { - client.commitBatch(-1L); + table.commitBatch(-1L); } catch (IllegalStateException e) { // expected @@ -76,21 +76,12 @@ fail(); } - long lockid = client.startBatchUpdate(new Text("row1")); + long lockid = table.startBatchUpdate(new Text("row1")); try { - client.openTable(HConstants.META_TABLE_NAME); - - } catch (IllegalStateException e) { - // expected - } catch (Exception e) { - e.printStackTrace(); - fail(); - } - try { try { @SuppressWarnings("unused") - long dummy = client.startUpdate(new Text("row2")); + long dummy = table.startUpdate(new Text("row2")); } catch (IllegalStateException e) { // expected } catch (Exception e) { @@ -97,16 +88,16 @@ e.printStackTrace(); fail(); } - client.put(lockid, CONTENTS, value); - client.delete(lockid, CONTENTS); - client.commitBatch(lockid); + table.put(lockid, CONTENTS, value); + table.delete(lockid, CONTENTS); + table.commitBatch(lockid); - lockid = client.startBatchUpdate(new Text("row2")); - client.put(lockid, CONTENTS, value); - client.commit(lockid); + lockid = table.startBatchUpdate(new Text("row2")); + table.put(lockid, CONTENTS, value); + table.commit(lockid); Text[] columns = { CONTENTS }; - HScannerInterface scanner = client.obtainScanner(columns, new Text()); + HScannerInterface scanner = table.obtainScanner(columns, new Text()); HStoreKey key = new HStoreKey(); TreeMap results = new TreeMap(); while(scanner.next(key, results)) { Index: src/contrib/hbase/src/test/org/apache/hadoop/hbase/TestBloomFilters.java =================================================================== --- src/contrib/hbase/src/test/org/apache/hadoop/hbase/TestBloomFilters.java (revision 562014) +++ src/contrib/hbase/src/test/org/apache/hadoop/hbase/TestBloomFilters.java (working copy) @@ -29,7 +29,7 @@ private static final Text CONTENTS = new Text("contents:"); private HTableDescriptor desc = null; - private HClient client = null; + private HTable table = null; private static final Text[] rows = { new Text("wmjwjzyv"), @@ -150,6 +150,7 @@ Logger.getLogger(HStore.class).setLevel(Level.DEBUG); } + /** {@inheritDoc} */ @Override public void setUp() { try { @@ -154,7 +155,6 @@ public void setUp() { try { super.setUp(); - this.client = new HClient(conf); this.desc = new HTableDescriptor("test"); desc.addFamily( new HColumnDescriptor(CONTENTS, 1, HColumnDescriptor.CompressionType.NONE, @@ -164,8 +164,9 @@ 12499, // number of bits 4 // number of hash functions ))); // false positive = 0.0000001 - client.createTable(desc); - client.openTable(desc.getName()); + HBaseAdmin admin = new HBaseAdmin(conf); + admin.createTable(desc); + table = new HTable(conf, desc.getName()); // Store some values @@ -172,9 +173,9 @@ for(int i = 0; i < 100; i++) { Text row = rows[i]; String value = row.toString(); - long lockid = client.startUpdate(rows[i]); - client.put(lockid, CONTENTS, value.getBytes(HConstants.UTF8_ENCODING)); - client.commit(lockid); + long lockid = table.startUpdate(rows[i]); + table.put(lockid, CONTENTS, value.getBytes(HConstants.UTF8_ENCODING)); + table.commit(lockid); } } catch (Exception e) { e.printStackTrace(); @@ -195,7 +196,7 @@ try { for(int i = 0; i < testKeys.length; i++) { - byte[] value = client.get(testKeys[i], CONTENTS); + byte[] value = table.get(testKeys[i], CONTENTS); if(value != null && value.length != 0) { System.err.println("non existant key: " + testKeys[i] + " returned value: " + new String(value, HConstants.UTF8_ENCODING)); Index: src/contrib/hbase/src/test/org/apache/hadoop/hbase/TestCleanRegionServerExit.java =================================================================== --- src/contrib/hbase/src/test/org/apache/hadoop/hbase/TestCleanRegionServerExit.java (revision 562014) +++ src/contrib/hbase/src/test/org/apache/hadoop/hbase/TestCleanRegionServerExit.java (working copy) @@ -30,7 +30,7 @@ * Tests region server failover when a region server exits. */ public class TestCleanRegionServerExit extends HBaseClusterTestCase { - private HClient client; + private HTable table; /** constructor */ public TestCleanRegionServerExit() { @@ -48,7 +48,6 @@ @Override public void setUp() throws Exception { super.setUp(); - this.client = new HClient(conf); } /** @@ -57,7 +56,8 @@ */ public void testCleanRegionServerExit() throws IOException { // When the META table can be opened, the region servers are running - this.client.openTable(HConstants.META_TABLE_NAME); + @SuppressWarnings("unused") + HTable meta = new HTable(conf, HConstants.META_TABLE_NAME); // Put something into the meta table. String tableName = getName(); HTableDescriptor desc = new HTableDescriptor(tableName); @@ -62,14 +62,15 @@ String tableName = getName(); HTableDescriptor desc = new HTableDescriptor(tableName); desc.addFamily(new HColumnDescriptor(HConstants.COLUMN_FAMILY.toString())); - this.client.createTable(desc); + HBaseAdmin admin = new HBaseAdmin(conf); + admin.createTable(desc); // put some values in the table - this.client.openTable(new Text(tableName)); + this.table = new HTable(conf, new Text(tableName)); Text row = new Text("row1"); - long lockid = client.startUpdate(row); - client.put(lockid, HConstants.COLUMN_FAMILY, + long lockid = table.startUpdate(row); + table.put(lockid, HConstants.COLUMN_FAMILY, tableName.getBytes(HConstants.UTF8_ENCODING)); - client.commit(lockid); + table.commit(lockid); // Start up a new region server to take over serving of root and meta // after we shut down the current meta/root host. this.cluster.startRegionServer(); @@ -81,7 +82,7 @@ // to a different server HScannerInterface scanner = - client.obtainScanner(HConstants.COLUMN_FAMILY_ARRAY, new Text()); + table.obtainScanner(HConstants.COLUMN_FAMILY_ARRAY, new Text()); try { HStoreKey key = new HStoreKey(); Index: src/contrib/hbase/src/test/org/apache/hadoop/hbase/TestHBaseCluster.java =================================================================== --- src/contrib/hbase/src/test/org/apache/hadoop/hbase/TestHBaseCluster.java (revision 562014) +++ src/contrib/hbase/src/test/org/apache/hadoop/hbase/TestHBaseCluster.java (working copy) @@ -32,7 +32,8 @@ public class TestHBaseCluster extends HBaseClusterTestCase { private HTableDescriptor desc; - private HClient client; + private HBaseAdmin admin; + private HTable table; /** constructor */ public TestHBaseCluster() { @@ -38,7 +39,8 @@ public TestHBaseCluster() { super(true); this.desc = null; - this.client = null; + this.admin = null; + this.table = null; } /** @@ -55,6 +57,8 @@ cleanup(); } + /** {@inheritDoc} */ + @Override public void tearDown() throws Exception { super.tearDown(); } @@ -69,11 +73,12 @@ private static final String ANCHORSTR = "anchorstr"; private void setup() throws IOException { - client = new HClient(conf); desc = new HTableDescriptor("test"); desc.addFamily(new HColumnDescriptor(CONTENTS.toString())); desc.addFamily(new HColumnDescriptor(ANCHOR.toString())); - client.createTable(desc); + admin = new HBaseAdmin(conf); + admin.createTable(desc); + table = new HTable(conf, desc.getName()); } // Test basic functionality. Writes to contents:basic and anchor:anchornum-* @@ -81,15 +86,13 @@ private void basic() throws IOException { long startTime = System.currentTimeMillis(); - client.openTable(desc.getName()); - // Write out a bunch of values for (int k = FIRST_ROW; k <= NUM_VALS; k++) { - long writeid = client.startUpdate(new Text("row_" + k)); - client.put(writeid, CONTENTS_BASIC, (CONTENTSTR + k).getBytes()); - client.put(writeid, new Text(ANCHORNUM + k), (ANCHORSTR + k).getBytes()); - client.commit(writeid); + long writeid = table.startUpdate(new Text("row_" + k)); + table.put(writeid, CONTENTS_BASIC, (CONTENTSTR + k).getBytes()); + table.put(writeid, new Text(ANCHORNUM + k), (ANCHORSTR + k).getBytes()); + table.commit(writeid); } System.out.println("Write " + NUM_VALS + " rows. Elapsed time: " + ((System.currentTimeMillis() - startTime) / 1000.0)); @@ -102,7 +105,7 @@ for (int k = FIRST_ROW; k <= NUM_VALS; k++) { Text rowlabel = new Text("row_" + k); - byte bodydata[] = client.get(rowlabel, CONTENTS_BASIC); + byte bodydata[] = table.get(rowlabel, CONTENTS_BASIC); assertNotNull(bodydata); String bodystr = new String(bodydata).toString().trim(); String teststr = CONTENTSTR + k; @@ -110,7 +113,7 @@ + "), expected: '" + teststr + "' got: '" + bodystr + "'", bodystr, teststr); collabel = new Text(ANCHORNUM + k); - bodydata = client.get(rowlabel, collabel); + bodydata = table.get(rowlabel, collabel); bodystr = new String(bodydata).toString().trim(); teststr = ANCHORSTR + k; assertEquals("Incorrect value for key: (" + rowlabel + "," + collabel @@ -130,7 +133,7 @@ long startTime = System.currentTimeMillis(); - HScannerInterface s = client.obtainScanner(cols, new Text()); + HScannerInterface s = table.obtainScanner(cols, new Text()); try { int contentsFetched = 0; @@ -178,7 +181,7 @@ } private void listTables() throws IOException { - HTableDescriptor[] tables = client.listTables(); + HTableDescriptor[] tables = admin.listTables(); assertEquals(1, tables.length); assertEquals(desc.getName(), tables[0].getName()); Set families = tables[0].families().keySet(); @@ -191,6 +194,6 @@ // Delete the table we created - client.deleteTable(desc.getName()); + admin.deleteTable(desc.getName()); } } Index: src/contrib/hbase/src/test/org/apache/hadoop/hbase/TestHClient.java =================================================================== --- src/contrib/hbase/src/test/org/apache/hadoop/hbase/TestHClient.java (revision 562014) +++ src/contrib/hbase/src/test/org/apache/hadoop/hbase/TestHClient.java (working copy) @@ -25,6 +25,7 @@ /** * Test HClient. */ +@Deprecated public class TestHClient extends HBaseClusterTestCase { private Log LOG = LogFactory.getLog(this.getClass().getName()); private HClient client; @@ -29,6 +30,8 @@ private Log LOG = LogFactory.getLog(this.getClass().getName()); private HClient client; + /** {@inheritDoc} */ + @Override public void setUp() throws Exception { super.setUp(); this.client = new HClient(this.conf); @@ -34,6 +37,9 @@ this.client = new HClient(this.conf); } + /** the test + * @throws Exception + */ public void testCommandline() throws Exception { final String m = "--master=" + this.conf.get(HConstants.MASTER_ADDRESS); LOG.info("Creating table"); Index: src/contrib/hbase/src/test/org/apache/hadoop/hbase/TestMasterAdmin.java =================================================================== --- src/contrib/hbase/src/test/org/apache/hadoop/hbase/TestMasterAdmin.java (revision 562014) +++ src/contrib/hbase/src/test/org/apache/hadoop/hbase/TestMasterAdmin.java (working copy) @@ -21,6 +21,7 @@ import org.apache.hadoop.io.Text; +/** tests administrative functions */ public class TestMasterAdmin extends HBaseClusterTestCase { private static final Text COLUMN_NAME = new Text("col1:"); private static HTableDescriptor testDesc; @@ -29,17 +30,20 @@ testDesc.addFamily(new HColumnDescriptor(COLUMN_NAME.toString())); } - private HClient client; + private HBaseAdmin admin; + /** constructor */ public TestMasterAdmin() { super(true); - client = new HClient(conf); + admin = null; } + /** the test */ public void testMasterAdmin() { try { - client.createTable(testDesc); - client.disableTable(testDesc.getName()); + admin = new HBaseAdmin(conf); + admin.createTable(testDesc); + admin.disableTable(testDesc.getName()); } catch(Exception e) { e.printStackTrace(); @@ -48,7 +52,8 @@ try { try { - client.openTable(testDesc.getName()); + @SuppressWarnings("unused") + HTable table = new HTable(conf, testDesc.getName()); } catch(IllegalStateException e) { // Expected @@ -54,10 +59,10 @@ // Expected } - client.addColumn(testDesc.getName(), new HColumnDescriptor("col2:")); - client.enableTable(testDesc.getName()); + admin.addColumn(testDesc.getName(), new HColumnDescriptor("col2:")); + admin.enableTable(testDesc.getName()); try { - client.deleteColumn(testDesc.getName(), new Text("col2:")); + admin.deleteColumn(testDesc.getName(), new Text("col2:")); } catch(TableNotDisabledException e) { // Expected @@ -63,8 +68,8 @@ // Expected } - client.disableTable(testDesc.getName()); - client.deleteColumn(testDesc.getName(), new Text("col2:")); + admin.disableTable(testDesc.getName()); + admin.deleteColumn(testDesc.getName(), new Text("col2:")); } catch(Exception e) { e.printStackTrace(); @@ -72,7 +77,7 @@ } finally { try { - client.deleteTable(testDesc.getName()); + admin.deleteTable(testDesc.getName()); } catch(Exception e) { e.printStackTrace(); Index: src/contrib/hbase/src/test/org/apache/hadoop/hbase/TestMultipleUpdates.java =================================================================== --- src/contrib/hbase/src/test/org/apache/hadoop/hbase/TestMultipleUpdates.java (revision 562014) +++ src/contrib/hbase/src/test/org/apache/hadoop/hbase/TestMultipleUpdates.java (working copy) @@ -30,7 +30,7 @@ private static final byte[] value = { 1, 2, 3, 4 }; private HTableDescriptor desc = null; - private HClient client = null; + private HTable table = null; /** * {@inheritDoc} @@ -38,12 +38,12 @@ @Override public void setUp() throws Exception { super.setUp(); - this.client = new HClient(conf); this.desc = new HTableDescriptor("test"); desc.addFamily(new HColumnDescriptor(CONTENTS_STR)); try { - client.createTable(desc); - client.openTable(desc.getName()); + HBaseAdmin admin = new HBaseAdmin(conf); + admin.createTable(desc); + table = new HTable(conf, desc.getName()); } catch (Exception e) { e.printStackTrace(); @@ -54,10 +54,10 @@ /** the test */ public void testMultipleUpdates() { try { - long lockid = client.startUpdate(new Text("row1")); + long lockid = table.startUpdate(new Text("row1")); try { - long lockid2 = client.startUpdate(new Text("row2")); + long lockid2 = table.startUpdate(new Text("row2")); throw new Exception("second startUpdate returned lock id " + lockid2); } catch (IllegalStateException i) { @@ -64,24 +64,10 @@ // expected } - try { - client.openTable(HConstants.META_TABLE_NAME); - - } catch (IllegalStateException i) { - // expected - } - long invalidid = 42; try { - client.put(invalidid, CONTENTS, value); - - } catch (IllegalArgumentException i) { - // expected - } - - try { - client.delete(invalidid, CONTENTS); + table.put(invalidid, CONTENTS, value); } catch (IllegalArgumentException i) { // expected @@ -88,7 +74,7 @@ } try { - client.put(invalidid, CONTENTS, value); + table.delete(invalidid, CONTENTS); } catch (IllegalArgumentException i) { // expected @@ -95,7 +81,7 @@ } try { - client.abort(invalidid); + table.abort(invalidid); } catch (IllegalArgumentException i) { // expected @@ -102,7 +88,7 @@ } try { - client.commit(invalidid); + table.commit(invalidid); } catch (IllegalArgumentException i) { // expected @@ -108,7 +94,7 @@ // expected } - client.abort(lockid); + table.abort(lockid); } catch (Exception e) { System.err.println("unexpected exception"); Index: src/contrib/hbase/src/test/org/apache/hadoop/hbase/TestRegionServerAbort.java =================================================================== --- src/contrib/hbase/src/test/org/apache/hadoop/hbase/TestRegionServerAbort.java (revision 562014) +++ src/contrib/hbase/src/test/org/apache/hadoop/hbase/TestRegionServerAbort.java (working copy) @@ -30,7 +30,7 @@ * Tests region server failover when a region server exits. */ public class TestRegionServerAbort extends HBaseClusterTestCase { - private HClient client; + private HTable table; /** constructor */ public TestRegionServerAbort() { @@ -48,7 +48,6 @@ @Override public void setUp() throws Exception { super.setUp(); - this.client = new HClient(conf); } /** @@ -57,7 +56,8 @@ */ public void testRegionServerAbort() throws IOException { // When the META table can be opened, the region servers are running - this.client.openTable(HConstants.META_TABLE_NAME); + @SuppressWarnings("unused") + HTable meta = new HTable(conf, HConstants.META_TABLE_NAME); // Put something into the meta table. String tableName = getName(); HTableDescriptor desc = new HTableDescriptor(tableName); @@ -62,14 +62,15 @@ String tableName = getName(); HTableDescriptor desc = new HTableDescriptor(tableName); desc.addFamily(new HColumnDescriptor(HConstants.COLUMN_FAMILY.toString())); - this.client.createTable(desc); + HBaseAdmin admin = new HBaseAdmin(conf); + admin.createTable(desc); // put some values in the table - this.client.openTable(new Text(tableName)); + this.table = new HTable(conf, new Text(tableName)); Text row = new Text("row1"); - long lockid = client.startUpdate(row); - client.put(lockid, HConstants.COLUMN_FAMILY, + long lockid = table.startUpdate(row); + table.put(lockid, HConstants.COLUMN_FAMILY, tableName.getBytes(HConstants.UTF8_ENCODING)); - client.commit(lockid); + table.commit(lockid); // Start up a new region server to take over serving of root and meta // after we shut down the current meta/root host. this.cluster.startRegionServer(); @@ -81,7 +82,7 @@ // to a different server HScannerInterface scanner = - client.obtainScanner(HConstants.COLUMN_FAMILY_ARRAY, new Text()); + table.obtainScanner(HConstants.COLUMN_FAMILY_ARRAY, new Text()); try { HStoreKey key = new HStoreKey(); Index: src/contrib/hbase/src/test/org/apache/hadoop/hbase/TestScanner2.java =================================================================== --- src/contrib/hbase/src/test/org/apache/hadoop/hbase/TestScanner2.java (revision 562014) +++ src/contrib/hbase/src/test/org/apache/hadoop/hbase/TestScanner2.java (working copy) @@ -30,6 +30,7 @@ import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.filter.RegExpRowFilter; import org.apache.hadoop.hbase.filter.RowFilterInterface; @@ -64,7 +65,7 @@ */ public void testScannerFilter() throws Exception { // Setup HClient, ensure that it is running correctly - HClient client = new HClient(this.conf); + HBaseAdmin admin = new HBaseAdmin(conf); // Setup colkeys to be inserted HTableDescriptor htd = new HTableDescriptor(getName()); @@ -75,28 +76,28 @@ (char)(FIRST_COLKEY + i), ':' })); htd.addFamily(new HColumnDescriptor(colKeys[i].toString())); } - client.createTable(htd); + admin.createTable(htd); assertTrue("Table with name " + tableName + " created successfully.", - client.tableExists(tableName)); - assertTrue("Master is running.", client.isMasterRunning()); + admin.tableExists(tableName)); + assertTrue("Master is running.", admin.isMasterRunning()); // Enter data - client.openTable(tableName); + HTable table = new HTable(conf, tableName); for (char i = FIRST_ROWKEY; i <= LAST_ROWKEY; i++) { Text rowKey = new Text(new String(new char[] { i })); - long lockID = client.startUpdate(rowKey); + long lockID = table.startUpdate(rowKey); for (char j = 0; j < colKeys.length; j++) { - client.put(lockID, colKeys[j], (i >= FIRST_BAD_RANGE_ROWKEY && + table.put(lockID, colKeys[j], (i >= FIRST_BAD_RANGE_ROWKEY && i <= LAST_BAD_RANGE_ROWKEY)? BAD_BYTES : GOOD_BYTES); } - client.commit(lockID); + table.commit(lockID); } - regExpFilterTest(client, colKeys); - rowFilterSetTest(client, colKeys); + regExpFilterTest(table, colKeys); + rowFilterSetTest(table, colKeys); } - private void regExpFilterTest(HClient client, Text[] colKeys) + private void regExpFilterTest(HTable table, Text[] colKeys) throws Exception { // Get the filter. The RegExpRowFilter used should filter out vowels. Map colCriteria = new TreeMap(); @@ -106,7 +107,7 @@ RowFilterInterface filter = new RegExpRowFilter("[^aeiou]", colCriteria); // Create the scanner from the filter. - HScannerInterface scanner = client.obtainScanner(colKeys, new Text(new + HScannerInterface scanner = table.obtainScanner(colKeys, new Text(new String(new char[] { FIRST_ROWKEY })), filter); // Iterate over the scanner, ensuring that results match the passed regex. @@ -113,7 +114,7 @@ iterateOnScanner(scanner, "[^aei-qu]"); } - private void rowFilterSetTest(HClient client, Text[] colKeys) + private void rowFilterSetTest(HTable table, Text[] colKeys) throws Exception { // Get the filter. The RegExpRowFilter used should filter out vowels and // the WhileMatchRowFilter(StopRowFilter) should filter out all rows @@ -125,7 +126,7 @@ new RowFilterSet(RowFilterSet.Operator.MUST_PASS_ALL, filterSet); // Create the scanner from the filter. - HScannerInterface scanner = client.obtainScanner(colKeys, new Text(new + HScannerInterface scanner = table.obtainScanner(colKeys, new Text(new String(new char[] { FIRST_ROWKEY })), filter); // Iterate over the scanner, ensuring that results match the passed regex. @@ -159,9 +160,10 @@ */ public void testSplitDeleteOneAddTwoRegions() throws IOException { // First add a new table. Its intial region will be added to META region. - HClient client = new HClient(this.conf); - client.createTable(new HTableDescriptor(getName())); - List regions = scan(client, HConstants.META_TABLE_NAME); + HBaseAdmin admin = new HBaseAdmin(conf); + Text tableName = new Text(getName()); + admin.createTable(new HTableDescriptor(tableName.toString())); + List regions = scan(conf, HConstants.META_TABLE_NAME); assertEquals("Expected one region", regions.size(), 1); HRegionInfo region = regions.get(0); assertTrue("Expected region named for test", @@ -168,7 +170,7 @@ region.regionName.toString().startsWith(getName())); // Now do what happens at split time; remove old region and then add two // new ones in its place. - HRegion.removeRegionFromMETA(client, HConstants.META_TABLE_NAME, + HRegion.removeRegionFromMETA(conf, HConstants.META_TABLE_NAME, region.regionName); HTableDescriptor desc = region.tableDesc; Path homedir = new Path(getName()); @@ -181,10 +183,10 @@ homedir, this.conf, null)); try { for (HRegion r : newRegions) { - HRegion.addRegionToMETA(client, HConstants.META_TABLE_NAME, r, + HRegion.addRegionToMETA(conf, HConstants.META_TABLE_NAME, r, this.cluster.getHMasterAddress(), -1L); } - regions = scan(client, HConstants.META_TABLE_NAME); + regions = scan(conf, HConstants.META_TABLE_NAME); assertEquals("Should be two regions only", 2, regions.size()); } finally { for (HRegion r : newRegions) { @@ -194,7 +196,7 @@ } } - private List scan(final HClient client, final Text table) + private List scan(final Configuration conf, final Text table) throws IOException { List regions = new ArrayList(); HRegionInterface regionServer = null; @@ -200,9 +202,9 @@ HRegionInterface regionServer = null; long scannerId = -1L; try { - client.openTable(table); - HRegionLocation rl = client.getRegionLocation(table); - regionServer = client.getHRegionConnection(rl.getServerAddress()); + HTable t = new HTable(conf, table); + HRegionLocation rl = t.getRegionLocation(table); + regionServer = t.getConnection().getHRegionConnection(rl.getServerAddress()); scannerId = regionServer.openScanner(rl.getRegionInfo().getRegionName(), HMaster.METACOLUMNS, new Text(), System.currentTimeMillis(), null); while (true) { Index: src/contrib/hbase/src/test/org/apache/hadoop/hbase/TestTable.java =================================================================== --- src/contrib/hbase/src/test/org/apache/hadoop/hbase/TestTable.java (revision 562014) +++ src/contrib/hbase/src/test/org/apache/hadoop/hbase/TestTable.java (working copy) @@ -25,6 +25,7 @@ /** Tests table creation restrictions*/ public class TestTable extends HBaseClusterTestCase { + /** constructor */ public TestTable() { super(true); } @@ -29,11 +30,15 @@ super(true); } + /** + * the test + * @throws IOException + */ public void testCreateTable() throws IOException { - final HClient client = new HClient(conf); + final HBaseAdmin admin = new HBaseAdmin(conf); String msg = null; try { - client.createTable(HGlobals.rootTableDesc); + admin.createTable(HGlobals.rootTableDesc); } catch (IllegalArgumentException e) { msg = e.toString(); } @@ -43,7 +48,7 @@ msg = null; try { - client.createTable(HGlobals.metaTableDesc); + admin.createTable(HGlobals.metaTableDesc); } catch(IllegalArgumentException e) { msg = e.toString(); } @@ -55,9 +60,9 @@ msg = null; HTableDescriptor desc = new HTableDescriptor(getName()); desc.addFamily(new HColumnDescriptor(HConstants.COLUMN_FAMILY.toString())); - client.createTable(desc); + admin.createTable(desc); try { - client.createTable(desc); + admin.createTable(desc); } catch (TableExistsException e) { msg = e.getMessage(); } @@ -78,7 +83,7 @@ @Override public void run() { try { - client.createTable(threadDesc); + admin.createTable(threadDesc); successes.incrementAndGet(); } catch (TableExistsException e) { failures.incrementAndGet(); @@ -111,10 +116,11 @@ * @throws Exception */ public void testTableNameClash() throws Exception { - HClient client = new HClient(conf); - client.createTable(new HTableDescriptor(getName() + "SOMEUPPERCASE")); - client.createTable(new HTableDescriptor(getName())); + HBaseAdmin admin = new HBaseAdmin(conf); + admin.createTable(new HTableDescriptor(getName() + "SOMEUPPERCASE")); + admin.createTable(new HTableDescriptor(getName())); // Before fix, below would fail throwing a NoServerForRegionException. - client.openTable(new Text(getName())); + @SuppressWarnings("unused") + HTable table = new HTable(conf, new Text(getName())); } } Index: src/contrib/hbase/src/test/org/apache/hadoop/hbase/TestTableMapReduce.java =================================================================== --- src/contrib/hbase/src/test/org/apache/hadoop/hbase/TestTableMapReduce.java (revision 562014) +++ src/contrib/hbase/src/test/org/apache/hadoop/hbase/TestTableMapReduce.java (working copy) @@ -225,8 +225,7 @@ } private void scanTable(Configuration conf) throws IOException { - HClient client = new HClient(conf); - client.openTable(new Text(TABLE_NAME)); + HTable table = new HTable(conf, new Text(TABLE_NAME)); Text[] columns = { TEXT_INPUT_COLUMN, @@ -233,7 +232,7 @@ TEXT_OUTPUT_COLUMN }; HScannerInterface scanner = - client.obtainScanner(columns, HClient.EMPTY_START_ROW); + table.obtainScanner(columns, HConstants.EMPTY_START_ROW); try { HStoreKey key = new HStoreKey(); Index: src/contrib/hbase/src/test/org/apache/hadoop/hbase/TestTimestamp.java =================================================================== --- src/contrib/hbase/src/test/org/apache/hadoop/hbase/TestTimestamp.java (revision 562014) +++ src/contrib/hbase/src/test/org/apache/hadoop/hbase/TestTimestamp.java (working copy) @@ -38,7 +38,7 @@ private static final Text TABLE = new Text(TABLE_NAME); private static final Text ROW = new Text("row"); - private HClient client; + private HTable table; /** constructor */ public TestTimestamp() { @@ -43,7 +43,6 @@ /** constructor */ public TestTimestamp() { super(); - client = new HClient(conf); } /** {@inheritDoc} */ @@ -55,7 +54,8 @@ desc.addFamily(new HColumnDescriptor(COLUMN_NAME)); try { - client.createTable(desc); + HBaseAdmin admin = new HBaseAdmin(conf); + admin.createTable(desc); } catch (Exception e) { e.printStackTrace(); @@ -66,25 +66,25 @@ /** the test */ public void testTimestamp() { try { - client.openTable(TABLE); + table = new HTable(conf, TABLE); // store a value specifying an update time - long lockid = client.startUpdate(ROW); - client.put(lockid, COLUMN, VERSION1.getBytes(HConstants.UTF8_ENCODING)); - client.commit(lockid, T0); + long lockid = table.startUpdate(ROW); + table.put(lockid, COLUMN, VERSION1.getBytes(HConstants.UTF8_ENCODING)); + table.commit(lockid, T0); // store a value specifying 'now' as the update time - lockid = client.startUpdate(ROW); - client.put(lockid, COLUMN, LATEST.getBytes(HConstants.UTF8_ENCODING)); - client.commit(lockid); + lockid = table.startUpdate(ROW); + table.put(lockid, COLUMN, LATEST.getBytes(HConstants.UTF8_ENCODING)); + table.commit(lockid); // delete values older than T1 - lockid = client.startUpdate(ROW); - client.delete(lockid, COLUMN); - client.commit(lockid, T1); + lockid = table.startUpdate(ROW); + table.delete(lockid, COLUMN); + table.commit(lockid, T1); // now retrieve... @@ -90,7 +90,7 @@ // the most recent version: - byte[] bytes = client.get(ROW, COLUMN); + byte[] bytes = table.get(ROW, COLUMN); assertTrue(bytes != null && bytes.length != 0); assertTrue(LATEST.equals(new String(bytes, HConstants.UTF8_ENCODING))); @@ -96,7 +96,7 @@ // any version <= time T1 - byte[][] values = client.get(ROW, COLUMN, T1, 3); + byte[][] values = table.get(ROW, COLUMN, T1, 3); assertNull(values); // the version from T0 @@ -101,7 +101,7 @@ // the version from T0 - values = client.get(ROW, COLUMN, T0, 3); + values = table.get(ROW, COLUMN, T0, 3); assertTrue(values.length == 1 && VERSION1.equals(new String(values[0], HConstants.UTF8_ENCODING))); @@ -116,7 +116,7 @@ // the most recent version: - bytes = client.get(ROW, COLUMN); + bytes = table.get(ROW, COLUMN); assertTrue(bytes != null && bytes.length != 0); assertTrue(LATEST.equals(new String(bytes, HConstants.UTF8_ENCODING))); @@ -122,7 +122,7 @@ // any version <= time T1 - values = client.get(ROW, COLUMN, T1, 3); + values = table.get(ROW, COLUMN, T1, 3); assertNull(values); // the version from T0 @@ -127,7 +127,7 @@ // the version from T0 - values = client.get(ROW, COLUMN, T0, 3); + values = table.get(ROW, COLUMN, T0, 3); assertTrue(values.length == 1 && VERSION1.equals(new String(values[0], HConstants.UTF8_ENCODING))); @@ -133,7 +133,7 @@ // three versions older than now - values = client.get(ROW, COLUMN, 3); + values = table.get(ROW, COLUMN, 3); assertTrue(values.length == 1 && LATEST.equals(new String(values[0], HConstants.UTF8_ENCODING))); @@ -140,7 +140,7 @@ // Test scanners HScannerInterface scanner = - client.obtainScanner(COLUMNS, HClient.EMPTY_START_ROW); + table.obtainScanner(COLUMNS, HConstants.EMPTY_START_ROW); try { HStoreKey key = new HStoreKey(); TreeMap results = new TreeMap(); @@ -155,7 +155,7 @@ scanner.close(); } - scanner = client.obtainScanner(COLUMNS, HClient.EMPTY_START_ROW, T1); + scanner = table.obtainScanner(COLUMNS, HConstants.EMPTY_START_ROW, T1); try { HStoreKey key = new HStoreKey(); TreeMap results = new TreeMap(); @@ -170,7 +170,7 @@ scanner.close(); } - scanner = client.obtainScanner(COLUMNS, HClient.EMPTY_START_ROW, T0); + scanner = table.obtainScanner(COLUMNS, HConstants.EMPTY_START_ROW, T0); try { HStoreKey key = new HStoreKey(); TreeMap results = new TreeMap();