Index: src/contrib/hbase/src/java/org/apache/hadoop/hbase/HAbstractScanner.java =================================================================== --- src/contrib/hbase/src/java/org/apache/hadoop/hbase/HAbstractScanner.java (revision 542444) +++ src/contrib/hbase/src/java/org/apache/hadoop/hbase/HAbstractScanner.java (working copy) @@ -62,7 +62,7 @@ try { int colpos = column.indexOf(":") + 1; if(colpos == 0) { - throw new IllegalArgumentException("Column name has no family indicator."); + throw new InvalidColumnNameException("Column name has no family indicator."); } String columnkey = column.substring(colpos); Index: src/contrib/hbase/src/java/org/apache/hadoop/hbase/HClient.java =================================================================== --- src/contrib/hbase/src/java/org/apache/hadoop/hbase/HClient.java (revision 542444) +++ src/contrib/hbase/src/java/org/apache/hadoop/hbase/HClient.java (working copy) @@ -15,6 +15,7 @@ */ package org.apache.hadoop.hbase; +import java.lang.Class; import java.io.IOException; import java.util.ArrayList; import java.util.Collection; @@ -20,6 +21,7 @@ import java.util.Collection; import java.util.NoSuchElementException; import java.util.Random; +import java.util.SortedMap; import java.util.TreeMap; import java.util.TreeSet; @@ -30,6 +32,7 @@ import org.apache.hadoop.io.DataInputBuffer; import org.apache.hadoop.io.Text; import org.apache.hadoop.ipc.RPC; +import org.apache.hadoop.ipc.RemoteException; /** * HClient manages a connection to a single HRegionServer. @@ -41,6 +44,10 @@ COLUMN_FAMILY }; + private static final Text[] REGIONINFO = { + COL_REGIONINFO + }; + private static final Text EMPTY_START_ROW = new Text(); private long clientTimeout; @@ -61,11 +68,11 @@ // Map tableName -> (Map startRow -> (HRegionInfo, HServerAddress) - private TreeMap> tablesToServers; + private TreeMap> tablesToServers; // For the "current" table: Map startRow -> (HRegionInfo, HServerAddress) - private TreeMap tableServers; + private SortedMap tableServers; // Known region HServerAddress.toString() -> HRegionInterface @@ -87,7 +94,7 @@ this.numRetries = conf.getInt("hbase.client.retries.number", 2); this.master = null; - this.tablesToServers = new TreeMap>(); + this.tablesToServers = new TreeMap>(); this.tableServers = null; this.servers = new TreeMap(); @@ -92,7 +99,7 @@ this.servers = new TreeMap(); // For row mutation operations - + this.currentRegion = null; this.currentServer = null; this.rand = new Random(); @@ -98,10 +105,33 @@ this.rand = new Random(); } - /** - * Find the address of the master and connect to it - */ - private void checkMaster() { + private void handleRemoteException(RemoteException e) throws IOException { + String msg = e.getMessage(); + if(e.getClassName().equals("org.apache.hadoop.hbase.InvalidColumnNameException")) { + throw new InvalidColumnNameException(msg); + + } else if(e.getClassName().equals("org.apache.hadoop.hbase.LockException")) { + throw new LockException(msg); + + } else if(e.getClassName().equals("org.apache.hadoop.hbase.MasterNotRunningException")) { + throw new MasterNotRunningException(msg); + + } else if(e.getClassName().equals("org.apache.hadoop.hbase.NoServerForRegionException")) { + throw new NoServerForRegionException(msg); + + } else if(e.getClassName().equals("org.apache.hadoop.hbase.NotServingRegionException")) { + throw new NotServingRegionException(msg); + + } else if(e.getClassName().equals("org.apache.hadoop.hbase.TableNotDisabledException")) { + throw new TableNotDisabledException(msg); + + } else { + throw e; + } + } + + /* Find the address of the master and connect to it */ + private void checkMaster() throws IOException { if (this.master != null) { return; } @@ -136,26 +166,287 @@ } } if(this.master == null) { - throw new IllegalStateException("Master is not running"); + throw new MasterNotRunningException(); } } + ////////////////////////////////////////////////////////////////////////////// + // Administrative methods + ////////////////////////////////////////////////////////////////////////////// + + /** + * Creates a new table + * + * @param desc - table descriptor for table + * + * @throws IllegalArgumentException - if the table name is reserved + * @throws MasterNotRunningException - if master is not running + * @throws NoServerForRegionException - if root region is not being served + * @throws IOException + */ public synchronized void createTable(HTableDescriptor desc) throws IOException { - if(desc.getName().equals(ROOT_TABLE_NAME) - || desc.getName().equals(META_TABLE_NAME)) { + checkReservedTableName(desc.getName()); + checkMaster(); + try { + this.master.createTable(desc); - throw new IllegalArgumentException(desc.getName().toString() - + " is a reserved table name"); + } catch(RemoteException e) { + handleRemoteException(e); } + + // Save the current table + + SortedMap oldServers = this.tableServers; + + try { + // Wait for new table to come on-line + + findServersForTable(desc.getName()); + + } finally { + if(oldServers != null && oldServers.size() != 0) { + // Restore old current table if there was one + + this.tableServers = oldServers; + } + } + } + + public synchronized void deleteTable(Text tableName) throws IOException { + checkReservedTableName(tableName); checkMaster(); - locateRootRegion(); - this.master.createTable(desc); + TableInfo firstMetaServer = getFirstMetaServerForTable(tableName); + + try { + this.master.deleteTable(tableName); + + } catch(RemoteException e) { + handleRemoteException(e); + } + + // Wait until first region is deleted + + HRegionInterface server = getHRegionConnection(firstMetaServer.serverAddress); + + DataInputBuffer inbuf = new DataInputBuffer(); + HStoreKey key = new HStoreKey(); + HRegionInfo info = new HRegionInfo(); + for(int tries = 0; tries < numRetries; tries++) { + long scannerId = -1L; + try { + scannerId = server.openScanner(firstMetaServer.regionInfo.regionName, + REGIONINFO, tableName); + LabelledData[] values = server.next(scannerId, key); + if(values == null || values.length == 0) { + break; + } + boolean found = false; + for(int j = 0; j < values.length; j++) { + if(values[j].getLabel().equals(COL_REGIONINFO)) { + byte[] bytes = new byte[values[j].getData().getSize()]; + System.arraycopy(values[j].getData().get(), 0, bytes, 0, bytes.length); + inbuf.reset(bytes, bytes.length); + info.readFields(inbuf); + if(info.tableDesc.getName().equals(tableName)) { + found = true; + } + } + } + if(!found) { + break; + } + + } finally { + if(scannerId != -1L) { + try { + server.close(scannerId); + + } catch(Exception e) { + LOG.warn(e); + } + } + } + if(LOG.isDebugEnabled()) { + LOG.debug("Sleep. Waiting for first region to be deleted from " + tableName); + } + try { + Thread.sleep(clientTimeout); + + } catch(InterruptedException e) { + } + if(LOG.isDebugEnabled()) { + LOG.debug("Wake. Waiting for first region to be deleted from " + tableName); + } + } + if(LOG.isDebugEnabled()) { + LOG.debug("table deleted " + tableName); + } } - public synchronized void deleteTable(Text tableName) throws IOException { + public synchronized void addColumn(Text tableName, HColumnDescriptor column) throws IOException { + checkReservedTableName(tableName); + checkMaster(); + try { + this.master.addColumn(tableName, column); + + } catch(RemoteException e) { + handleRemoteException(e); + } + } + + public synchronized void deleteColumn(Text tableName, Text columnName) throws IOException { + checkReservedTableName(tableName); + checkMaster(); + try { + this.master.deleteColumn(tableName, columnName); + + } catch(RemoteException e) { + handleRemoteException(e); + } + } + + public synchronized void mergeRegions(Text regionName1, Text regionName2) throws IOException { + + } + + public synchronized void enableTable(Text tableName) throws IOException { + checkReservedTableName(tableName); + checkMaster(); + TableInfo firstMetaServer = getFirstMetaServerForTable(tableName); + + try { + this.master.enableTable(tableName); + + } catch(RemoteException e) { + handleRemoteException(e); + } + + // Wait until first region is enabled + + HRegionInterface server = getHRegionConnection(firstMetaServer.serverAddress); + + DataInputBuffer inbuf = new DataInputBuffer(); + HStoreKey key = new HStoreKey(); + HRegionInfo info = new HRegionInfo(); + for(int tries = 0; tries < numRetries; tries++) { + int valuesfound = 0; + long scannerId = -1L; + try { + scannerId = server.openScanner(firstMetaServer.regionInfo.regionName, + REGIONINFO, tableName); + LabelledData[] values = server.next(scannerId, key); + if(values == null || values.length == 0) { + if(valuesfound == 0) { + throw new NoSuchElementException("table " + tableName + " not found"); + } + } + valuesfound += 1; + boolean isenabled = false; + for(int j = 0; j < values.length; j++) { + if(values[j].getLabel().equals(COL_REGIONINFO)) { + byte[] bytes = new byte[values[j].getData().getSize()]; + System.arraycopy(values[j].getData().get(), 0, bytes, 0, bytes.length); + inbuf.reset(bytes, bytes.length); + info.readFields(inbuf); + isenabled = !info.offLine; + } + } + if(isenabled) { + break; + } + + } finally { + if(scannerId != -1L) { + try { + server.close(scannerId); + + } catch(Exception e) { + LOG.warn(e); + } + } + } + if(LOG.isDebugEnabled()) { + LOG.debug("Sleep. Waiting for first region to be enabled from " + tableName); + } + try { + Thread.sleep(clientTimeout); + + } catch(InterruptedException e) { + } + if(LOG.isDebugEnabled()) { + LOG.debug("Wake. Waiting for first region to be enabled from " + tableName); + } + } + } + + public synchronized void disableTable(Text tableName) throws IOException { + checkReservedTableName(tableName); checkMaster(); - locateRootRegion(); - this.master.deleteTable(tableName); + TableInfo firstMetaServer = getFirstMetaServerForTable(tableName); + + try { + this.master.disableTable(tableName); + + } catch(RemoteException e) { + handleRemoteException(e); + } + + // Wait until first region is disabled + + HRegionInterface server = getHRegionConnection(firstMetaServer.serverAddress); + + DataInputBuffer inbuf = new DataInputBuffer(); + HStoreKey key = new HStoreKey(); + HRegionInfo info = new HRegionInfo(); + for(int tries = 0; tries < numRetries; tries++) { + int valuesfound = 0; + long scannerId = -1L; + try { + scannerId = server.openScanner(firstMetaServer.regionInfo.regionName, + REGIONINFO, tableName); + LabelledData[] values = server.next(scannerId, key); + if(values == null || values.length == 0) { + if(valuesfound == 0) { + throw new NoSuchElementException("table " + tableName + " not found"); + } + } + valuesfound += 1; + boolean disabled = false; + for(int j = 0; j < values.length; j++) { + if(values[j].getLabel().equals(COL_REGIONINFO)) { + byte[] bytes = new byte[values[j].getData().getSize()]; + System.arraycopy(values[j].getData().get(), 0, bytes, 0, bytes.length); + inbuf.reset(bytes, bytes.length); + info.readFields(inbuf); + disabled = info.offLine; + } + } + if(disabled) { + break; + } + + } finally { + if(scannerId != -1L) { + try { + server.close(scannerId); + + } catch(Exception e) { + LOG.warn(e); + } + } + } + if(LOG.isDebugEnabled()) { + LOG.debug("Sleep. Waiting for first region to be disabled from " + tableName); + } + try { + Thread.sleep(clientTimeout); + + } catch(InterruptedException e) { + } + if(LOG.isDebugEnabled()) { + LOG.debug("Wake. Waiting for first region to be disabled from " + tableName); + } + } } public synchronized void shutdown() throws IOException { @@ -162,7 +453,35 @@ checkMaster(); this.master.shutdown(); } + + /* + * Verifies that the specified table name is not a reserved name + * @param tableName - the table name to be checked + * @throws IllegalArgumentException - if the table name is reserved + */ + private void checkReservedTableName(Text tableName) { + if(tableName.equals(ROOT_TABLE_NAME) + || tableName.equals(META_TABLE_NAME)) { + + throw new IllegalArgumentException(tableName + " is a reserved table name"); + } + } + + private TableInfo getFirstMetaServerForTable(Text tableName) throws IOException { + SortedMap metaservers = findMetaServersForTable(tableName); + return metaservers.get(metaservers.firstKey()); + } + ////////////////////////////////////////////////////////////////////////////// + // Client API + ////////////////////////////////////////////////////////////////////////////// + + /** + * Loads information so that a table can be manipulated. + * + * @param tableName - the table to be located + * @throws IOException - if the table can not be located after retrying + */ public synchronized void openTable(Text tableName) throws IOException { if(tableName == null || tableName.getLength() == 0) { throw new IllegalArgumentException("table name cannot be null or zero length"); @@ -168,53 +487,59 @@ throw new IllegalArgumentException("table name cannot be null or zero length"); } this.tableServers = tablesToServers.get(tableName); - if(this.tableServers == null ) { // We don't know where the table is - findTableInMeta(tableName); // Load the information from meta + if(this.tableServers == null ) { + // We don't know where the table is. + // Load the information from meta. + this.tableServers = findServersForTable(tableName); } } - private void findTableInMeta(Text tableName) throws IOException { - TreeMap metaServers = + /* + * Locates a table by searching the META region + * + * @param tableName - name of table to find servers for + * @return - map of first row to table info for all regions in the table + * @throws IOException + */ + private SortedMap findServersForTable(Text tableName) + throws IOException { + + SortedMap servers = null; + if(tableName.equals(ROOT_TABLE_NAME)) { + servers = locateRootRegion(); + + } else if(tableName.equals(META_TABLE_NAME)) { + servers = loadMetaFromRoot(); + + } else { + servers = new TreeMap(); + for(TableInfo t: findMetaServersForTable(tableName).values()) { + servers.putAll(scanOneMetaRegion(t, tableName)); + } + this.tablesToServers.put(tableName, servers); + } + return servers; + } + + /* + * Finds the meta servers that contain information about the specified table + * @param tableName - the name of the table to get information about + * @return - returns a SortedMap of the meta servers + * @throws IOException + */ + private SortedMap findMetaServersForTable(Text tableName) + throws IOException { + + SortedMap metaServers = this.tablesToServers.get(META_TABLE_NAME); - if (metaServers == null) { // Don't know where the meta is - loadMetaFromRoot(tableName); - if (tableName.equals(META_TABLE_NAME) || tableName.equals(ROOT_TABLE_NAME)) { - // All we really wanted was the meta or root table - return; - } - metaServers = this.tablesToServers.get(META_TABLE_NAME); + if(metaServers == null) { // Don't know where the meta is + metaServers = loadMetaFromRoot(); } + Text firstMetaRegion = (metaServers.containsKey(tableName)) ? + tableName : metaServers.headMap(tableName).lastKey(); - this.tableServers = new TreeMap(); - for(int tries = 0; - this.tableServers.size() == 0 && tries < this.numRetries; - tries++) { - Text firstMetaRegion = (metaServers.containsKey(tableName))? - tableName: metaServers.headMap(tableName).lastKey(); - for(TableInfo t: metaServers.tailMap(firstMetaRegion).values()) { - scanOneMetaRegion(t, tableName); - } - if (this.tableServers.size() == 0) { - // Table not assigned. Sleep and try again - if (LOG.isDebugEnabled()) { - LOG.debug("Sleeping. Table " + tableName - + " not currently being served."); - } - try { - Thread.sleep(this.clientTimeout); - } catch(InterruptedException e) { - } - if(LOG.isDebugEnabled()) { - LOG.debug("Wake. Retry finding table " + tableName); - } - } - } - if (this.tableServers.size() == 0) { - throw new IOException("failed to scan " + META_TABLE_NAME + " after " - + this.numRetries + " retries"); - } - this.tablesToServers.put(tableName, this.tableServers); + return metaServers.tailMap(firstMetaRegion); } /* @@ -219,20 +544,28 @@ /* * Load the meta table from the root table. + * + * @return map of first row to TableInfo for all meta regions + * @throws IOException */ - private void loadMetaFromRoot(Text tableName) throws IOException { - locateRootRegion(); - if(tableName.equals(ROOT_TABLE_NAME)) { // All we really wanted was the root - return; + private TreeMap loadMetaFromRoot() throws IOException { + SortedMap rootRegion = + this.tablesToServers.get(ROOT_TABLE_NAME); + + if(rootRegion == null) { + rootRegion = locateRootRegion(); } - scanRoot(); + return scanRoot(rootRegion.get(rootRegion.firstKey())); } /* - * Repeatedly try to find the root region by asking the HMaster for where it - * could be. + * Repeatedly try to find the root region by asking the master for where it is + * + * @return TreeMap for root regin if found + * @throws NoServerForRegionException - if the root region can not be located after retrying + * @throws IOException */ - private void locateRootRegion() throws IOException { + private TreeMap locateRootRegion() throws IOException { checkMaster(); HServerAddress rootRegionLocation = null; @@ -256,7 +589,8 @@ } } if(rootRegionLocation == null) { - throw new IOException("Timed out trying to locate root region"); + throw new NoServerForRegionException( + "Timed out trying to locate root region"); } HRegionInterface rootRegion = getHRegionConnection(rootRegionLocation); @@ -263,11 +597,6 @@ try { rootRegion.getRegionInfo(HGlobals.rootRegionInfo.regionName); - this.tableServers = new TreeMap(); - this.tableServers.put(EMPTY_START_ROW, - new TableInfo(HGlobals.rootRegionInfo, rootRegionLocation)); - - this.tablesToServers.put(ROOT_TABLE_NAME, this.tableServers); break; } catch(NotServingRegionException e) { @@ -293,40 +622,30 @@ } if (rootRegionLocation == null) { - throw new IOException("unable to locate root region server"); + throw new NoServerForRegionException( + "unable to locate root region server"); } + + TreeMap rootServer = new TreeMap(); + rootServer.put(EMPTY_START_ROW, + new TableInfo(HGlobals.rootRegionInfo, rootRegionLocation)); + + this.tablesToServers.put(ROOT_TABLE_NAME, rootServer); + return rootServer; } - /* + /* * Scans the root region to find all the meta regions + * @return - TreeMap of meta region servers + * @throws IOException */ - private void scanRoot() throws IOException { - this.tableServers = new TreeMap(); - TableInfo t = this.tablesToServers.get(ROOT_TABLE_NAME).get(EMPTY_START_ROW); - for(int tries = 0; - scanOneMetaRegion(t, META_TABLE_NAME) == 0 && tries < this.numRetries; - tries++) { - - // The table is not yet being served. Sleep and retry. - - if(LOG.isDebugEnabled()) { - LOG.debug("Sleeping. Table " + META_TABLE_NAME - + " not currently being served."); - } - try { - Thread.sleep(this.clientTimeout); - - } catch(InterruptedException e) { - } - if(LOG.isDebugEnabled()) { - LOG.debug("Wake. Retry finding table " + META_TABLE_NAME); - } - } - if(this.tableServers.size() == 0) { - throw new IOException("failed to scan " + ROOT_TABLE_NAME + " after " - + this.numRetries + " retries"); - } - this.tablesToServers.put(META_TABLE_NAME, this.tableServers); + private TreeMap scanRoot(TableInfo rootRegion) + throws IOException { + + TreeMap metaservers = + scanOneMetaRegion(rootRegion, META_TABLE_NAME); + this.tablesToServers.put(META_TABLE_NAME, metaservers); + return metaservers; } /* @@ -331,82 +650,121 @@ /* * Scans a single meta region - * @param t the table we're going to scan + * @param t the meta region we're going to scan * @param tableName the name of the table we're looking for - * @return returns the number of servers that are serving the table + * @return returns a map of startingRow to TableInfo + * @throws NoSuchElementException - if table does not exist + * @throws IllegalStateException - if table is offline + * @throws NoServerForRegionException - if table can not be found after retrying + * @throws IOException */ - private int scanOneMetaRegion(TableInfo t, Text tableName) + private TreeMap scanOneMetaRegion(TableInfo t, Text tableName) throws IOException { HRegionInterface server = getHRegionConnection(t.serverAddress); - int servers = 0; - long scannerId = -1L; - try { - scannerId = - server.openScanner(t.regionInfo.regionName, META_COLUMNS, tableName); - - DataInputBuffer inbuf = new DataInputBuffer(); - while(true) { - HRegionInfo regionInfo = null; - String serverAddress = null; - HStoreKey key = new HStoreKey(); - LabelledData[] values = server.next(scannerId, key); - if(values.length == 0) { - if(servers == 0) { - // If we didn't find any servers then the table does not exist - - throw new NoSuchElementException("table '" + tableName - + "' does not exist"); + TreeMap servers = new TreeMap(); + for(int tries = 0; servers.size() == 0 && tries < this.numRetries; + tries++) { + + long scannerId = -1L; + try { + scannerId = + server.openScanner(t.regionInfo.regionName, META_COLUMNS, tableName); + + DataInputBuffer inbuf = new DataInputBuffer(); + while(true) { + HRegionInfo regionInfo = null; + String serverAddress = null; + HStoreKey key = new HStoreKey(); + LabelledData[] values = server.next(scannerId, key); + if(values.length == 0) { + if(servers.size() == 0) { + // If we didn't find any servers then the table does not exist + + throw new NoSuchElementException("table '" + tableName + + "' does not exist"); + } + + // We found at least one server for the table and now we're done. + + break; } - - // We found at least one server for the table and now we're done. - - break; - } - byte[] bytes = null; - TreeMap results = new TreeMap(); - for(int i = 0; i < values.length; i++) { - bytes = new byte[values[i].getData().getSize()]; - System.arraycopy(values[i].getData().get(), 0, bytes, 0, bytes.length); - results.put(values[i].getLabel(), bytes); - } - regionInfo = new HRegionInfo(); - bytes = results.get(COL_REGIONINFO); - inbuf.reset(bytes, bytes.length); - regionInfo.readFields(inbuf); + byte[] bytes = null; + TreeMap results = new TreeMap(); + for(int i = 0; i < values.length; i++) { + bytes = new byte[values[i].getData().getSize()]; + System.arraycopy(values[i].getData().get(), 0, bytes, 0, bytes.length); + results.put(values[i].getLabel(), bytes); + } + regionInfo = new HRegionInfo(); + bytes = results.get(COL_REGIONINFO); + inbuf.reset(bytes, bytes.length); + regionInfo.readFields(inbuf); - if(!regionInfo.tableDesc.getName().equals(tableName)) { - // We're done - break; + if(!regionInfo.tableDesc.getName().equals(tableName)) { + // We're done + break; + } + + if(regionInfo.offLine) { + throw new IllegalStateException("table offline: " + tableName); + } + + bytes = results.get(COL_SERVER); + if(bytes == null || bytes.length == 0) { + // We need to rescan because the table we want is unassigned. + + if(LOG.isDebugEnabled()) { + LOG.debug("no server address for " + regionInfo.toString()); + } + servers.clear(); + break; + } + serverAddress = new String(bytes, UTF8_ENCODING); + + servers.put(regionInfo.startKey, + new TableInfo(regionInfo, new HServerAddress(serverAddress))); } + } finally { + if(scannerId != -1L) { + try { + server.close(scannerId); - bytes = results.get(COL_SERVER); - if(bytes == null || bytes.length == 0) { - // We need to rescan because the table we want is unassigned. - - if(LOG.isDebugEnabled()) { - LOG.debug("no server address for " + regionInfo.toString()); + } catch(Exception e) { + LOG.warn(e); } - servers = 0; - this.tableServers.clear(); - break; } - servers += 1; - serverAddress = new String(bytes, UTF8_ENCODING); + } + + if(servers.size() == 0 && tries == this.numRetries - 1) { + throw new NoServerForRegionException("failed to find server for " + + tableName + " after " + this.numRetries + " retries"); + } - this.tableServers.put(regionInfo.startKey, - new TableInfo(regionInfo, new HServerAddress(serverAddress))); + // The table is not yet being served. Sleep and retry. + + if(LOG.isDebugEnabled()) { + LOG.debug("Sleeping. Table " + tableName + + " not currently being served."); } - return servers; - - } finally { - if(scannerId != -1L) { - server.close(scannerId); + try { + Thread.sleep(this.clientTimeout); + + } catch(InterruptedException e) { + } + if(LOG.isDebugEnabled()) { + LOG.debug("Wake. Retry finding table " + tableName); } } + return servers; } + /* + * Establishes a connection to the region server at the specified address + * @param regionServer - the server to connect to + * @throws IOException + */ synchronized HRegionInterface getHRegionConnection(HServerAddress regionServer) throws IOException { @@ -436,7 +794,7 @@ throws IOException { TreeSet uniqueTables = new TreeSet(); - TreeMap metaTables = + SortedMap metaTables = this.tablesToServers.get(META_TABLE_NAME); if(metaTables == null) { @@ -441,8 +799,7 @@ if(metaTables == null) { // Meta is not loaded yet so go do that - loadMetaFromRoot(META_TABLE_NAME); - metaTables = tablesToServers.get(META_TABLE_NAME); + metaTables = loadMetaFromRoot(); } for (TableInfo t: metaTables.values()) { @@ -512,10 +869,11 @@ // Reload information for the whole table - findTableInMeta(info.regionInfo.tableDesc.getName()); + this.tableServers = findServersForTable(info.regionInfo.tableDesc.getName()); if(this.tableServers.get(info.regionInfo.startKey) == null ) { - throw new IOException("region " + info.regionInfo.regionName + " does not exist"); + throw new IOException("region " + info.regionInfo.regionName + + " does not exist"); } } @@ -879,8 +1237,7 @@ System.err.println(" address is read from configuration."); System.err.println("Commands:"); System.err.println(" shutdown Shutdown the HBase cluster."); - System.err.println(" createTable Takes table name, column families, " + - "and maximum versions."); + System.err.println(" createTable Takes table name, column families,... "); System.err.println(" deleteTable Takes a table name."); System.err.println(" iistTables List all tables."); System.err.println("Example Usage:"); @@ -928,16 +1285,14 @@ } if (cmd.equals("createTable")) { - if (i + 3 > args.length) { + if (i + 2 > args.length) { throw new IllegalArgumentException("Must supply a table name " + - ", at least one column family and maximum number of versions"); + "and at least one column family"); } - int maxVersions = (Integer.parseInt(args[args.length - 1])); - HTableDescriptor desc = - new HTableDescriptor(args[i + 1], maxVersions); + HTableDescriptor desc = new HTableDescriptor(args[i + 1]); boolean addedFamily = false; for (int ii = i + 2; ii < (args.length - 1); ii++) { - desc.addFamily(new Text(args[ii])); + desc.addFamily(new HColumnDescriptor(args[ii])); addedFamily = true; } if (!addedFamily) { Index: src/contrib/hbase/src/java/org/apache/hadoop/hbase/HColumnDescriptor.java =================================================================== --- src/contrib/hbase/src/java/org/apache/hadoop/hbase/HColumnDescriptor.java (revision 0) +++ src/contrib/hbase/src/java/org/apache/hadoop/hbase/HColumnDescriptor.java (revision 0) @@ -0,0 +1,290 @@ +/** + * Copyright 2006 The Apache Software Foundation + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase; + +import java.io.DataInput; +import java.io.DataOutput; +import java.io.IOException; +import java.util.regex.Matcher; +import java.util.regex.Pattern; + +import org.apache.hadoop.io.Text; +import org.apache.hadoop.io.WritableComparable; + +/** + * A HColumnDescriptor contains information about a column family such as the + * number of versions, compression settings, etc. + * + */ +public class HColumnDescriptor implements WritableComparable { + + // For future backward compatibility + + private static final byte COLUMN_DESCRIPTOR_VERSION = (byte)1; + + // Legal family names can only contain 'word characters' and end in a colon. + + private static final Pattern LEGAL_FAMILY_NAME = Pattern.compile("\\w+:"); + + /** + * The type of compression. + * @see org.apache.hadoop.io.SequenceFile.Writer + */ + public static enum CompressionType { + /** Do not compress records. */ + NONE, + /** Compress values only, each separately. */ + RECORD, + /** Compress sequences of records together in blocks. */ + BLOCK + } + + // Internal values for compression type used for serialization + + private static final byte COMPRESSION_NONE = (byte)0; + private static final byte COMPRESSION_RECORD = (byte)1; + private static final byte COMPRESSION_BLOCK = (byte)2; + + private static final int DEFAULT_N_VERSIONS = 3; + + Text name; // Column family name + int maxVersions; // Number of versions to keep + byte compressionType; // Compression setting if any + boolean inMemory; // Serve reads from in-memory cache + int maxValueLength; // Maximum value size + boolean bloomFilterEnabled; // True if column has a bloom filter + byte versionNumber; // Version number of this class + + /** + * Default constructor. Must be present for Writable. + */ + public HColumnDescriptor() { + this.name = new Text(); + this.maxVersions = DEFAULT_N_VERSIONS; + this.compressionType = COMPRESSION_NONE; + this.inMemory = false; + this.maxValueLength = Integer.MAX_VALUE; + this.bloomFilterEnabled = false; + this.versionNumber = COLUMN_DESCRIPTOR_VERSION; + } + + public HColumnDescriptor(String columnName) { + this(); + this.name.set(columnName); + } + + /** + * Constructor - specify all parameters. + * @param name - Column family name + * @param maxVersions - Maximum number of versions to keep + * @param compression - Compression type + * @param inMemory - If true, column data should be kept in a + * HRegionServer's cache + * @param maxValueLength - Restrict values to <= this value + * @param bloomFilter - Enable a bloom filter for this column + * + * @throws IllegalArgumentException if passed a family name that is made of + * other than 'word' characters: i.e. [a-zA-Z_0-9] and does not + * end in a : + * @throws IllegalArgumentException if the number of versions is <= 0 + */ + public HColumnDescriptor(Text name, int maxVersions, CompressionType compression, + boolean inMemory, int maxValueLength, boolean bloomFilter) { + String familyStr = name.toString(); + Matcher m = LEGAL_FAMILY_NAME.matcher(familyStr); + if(m == null || !m.matches()) { + throw new IllegalArgumentException( + "Family names can only contain 'word characters' and must end with a ':'"); + } + this.name = name; + + if(maxVersions <= 0) { + // TODO: Allow maxVersion of 0 to be the way you say "Keep all versions". + // Until there is support, consider 0 or < 0 -- a configuration error. + throw new IllegalArgumentException("Maximum versions must be positive"); + } + this.maxVersions = maxVersions; + + if(compression == CompressionType.NONE) { + this.compressionType = COMPRESSION_NONE; + + } else if(compression == CompressionType.BLOCK) { + this.compressionType = COMPRESSION_BLOCK; + + } else if(compression == CompressionType.RECORD) { + this.compressionType = COMPRESSION_RECORD; + + } else { + assert(false); + } + this.inMemory = inMemory; + this.maxValueLength = maxValueLength; + this.bloomFilterEnabled = bloomFilter; + this.versionNumber = COLUMN_DESCRIPTOR_VERSION; + } + + /** + * @return - name of column family + */ + public Text getName() { + return name; + } + + /** + * @return - compression type being used for the column family + */ + public CompressionType getCompression() { + CompressionType value = null; + + if(this.compressionType == COMPRESSION_NONE) { + value = CompressionType.NONE; + + } else if(this.compressionType == COMPRESSION_BLOCK) { + value = CompressionType.BLOCK; + + } else if(this.compressionType == COMPRESSION_RECORD) { + value = CompressionType.RECORD; + + } else { + assert(false); + } + return value; + } + + /** + * @return - maximum number of versions + */ + public int getMaxVersions() { + return this.maxVersions; + } + + @Override + public String toString() { + String compression = "none"; + switch(compressionType) { + case COMPRESSION_NONE: + break; + case COMPRESSION_RECORD: + compression = "record"; + break; + case COMPRESSION_BLOCK: + compression = "block"; + break; + default: + assert(false); + } + + return "(" + name + ", max versions: " + maxVersions + ", compression: " + + compression + ", in memory: " + inMemory + ", max value length: " + + maxValueLength + ", bloom filter:" + bloomFilterEnabled + ")"; + } + + @Override + public boolean equals(Object obj) { + return compareTo(obj) == 0; + } + + @Override + public int hashCode() { + int result = this.name.hashCode(); + result ^= Integer.valueOf(this.maxVersions).hashCode(); + result ^= Byte.valueOf(this.compressionType).hashCode(); + result ^= Boolean.valueOf(this.inMemory).hashCode(); + result ^= Integer.valueOf(this.maxValueLength).hashCode(); + result ^= Boolean.valueOf(this.bloomFilterEnabled).hashCode(); + result ^= Byte.valueOf(this.versionNumber).hashCode(); + return result; + } + + ////////////////////////////////////////////////////////////////////////////// + // Writable + ////////////////////////////////////////////////////////////////////////////// + + public void readFields(DataInput in) throws IOException { + this.versionNumber = in.readByte(); + this.name.readFields(in); + this.maxVersions = in.readInt(); + this.compressionType = in.readByte(); + this.inMemory = in.readBoolean(); + this.maxValueLength = in.readInt(); + this.bloomFilterEnabled = in.readBoolean(); + } + + public void write(DataOutput out) throws IOException { + out.writeByte(this.versionNumber); + this.name.write(out); + out.writeInt(this.maxVersions); + out.writeByte(this.compressionType); + out.writeBoolean(this.inMemory); + out.writeInt(this.maxValueLength); + out.writeBoolean(this.bloomFilterEnabled); + } + + ////////////////////////////////////////////////////////////////////////////// + // Comparable + ////////////////////////////////////////////////////////////////////////////// + + public int compareTo(Object o) { + // NOTE: we don't do anything with the version number yet. + // Version numbers will come into play when we introduce an incompatible + // change in the future such as the addition of access control lists. + + HColumnDescriptor other = (HColumnDescriptor)o; + + int result = this.name.compareTo(other.getName()); + + if(result == 0) { + result = Integer.valueOf(this.maxVersions).compareTo( + Integer.valueOf(other.maxVersions)); + } + + if(result == 0) { + result = Integer.valueOf(this.compressionType).compareTo( + Integer.valueOf(other.compressionType)); + } + + if(result == 0) { + if(this.inMemory == other.inMemory) { + result = 0; + + } else if(this.inMemory) { + result = -1; + + } else { + result = 1; + } + } + + if(result == 0) { + result = other.maxValueLength - this.maxValueLength; + } + + if(result == 0) { + if(this.bloomFilterEnabled == other.bloomFilterEnabled) { + result = 0; + + } else if(this.bloomFilterEnabled) { + result = -1; + + } else { + result = 1; + } + } + + return result; + } + +} Index: src/contrib/hbase/src/java/org/apache/hadoop/hbase/HGlobals.java =================================================================== --- src/contrib/hbase/src/java/org/apache/hadoop/hbase/HGlobals.java (revision 542444) +++ src/contrib/hbase/src/java/org/apache/hadoop/hbase/HGlobals.java (working copy) @@ -25,12 +25,14 @@ static HTableDescriptor metaTableDesc = null; static { - rootTableDesc = new HTableDescriptor(ROOT_TABLE_NAME.toString(), 1); - rootTableDesc.addFamily(COLUMN_FAMILY); + rootTableDesc = new HTableDescriptor(ROOT_TABLE_NAME.toString()); + rootTableDesc.addFamily(new HColumnDescriptor(COLUMN_FAMILY, 1, + HColumnDescriptor.CompressionType.NONE, false, Integer.MAX_VALUE, false)); rootRegionInfo = new HRegionInfo(0L, rootTableDesc, null, null); - metaTableDesc = new HTableDescriptor(META_TABLE_NAME.toString(), 1); - metaTableDesc.addFamily(COLUMN_FAMILY); + metaTableDesc = new HTableDescriptor(META_TABLE_NAME.toString()); + metaTableDesc.addFamily(new HColumnDescriptor(COLUMN_FAMILY, 1, + HColumnDescriptor.CompressionType.NONE, false, Integer.MAX_VALUE, false)); } } Index: src/contrib/hbase/src/java/org/apache/hadoop/hbase/HMaster.java =================================================================== --- src/contrib/hbase/src/java/org/apache/hadoop/hbase/HMaster.java (revision 542444) +++ src/contrib/hbase/src/java/org/apache/hadoop/hbase/HMaster.java (working copy) @@ -24,6 +24,7 @@ import java.util.Map; import java.util.Random; import java.util.SortedMap; +import java.util.SortedSet; import java.util.Timer; import java.util.TimerTask; import java.util.TreeMap; @@ -173,7 +174,9 @@ } for (int i = 0; i < values.length; i++) { - results.put(values[i].getLabel(), values[i].getData().get()); + byte[] bytes = new byte[values[i].getData().getSize()]; + System.arraycopy(values[i].getData().get(), 0, bytes, 0, bytes.length); + results.put(values[i].getLabel(), bytes); } HRegionInfo info = getRegionInfo(COL_REGIONINFO, results, inbuf); @@ -220,11 +223,17 @@ } protected String getServerName(final Text key, - final TreeMap data) - throws UnsupportedEncodingException { + final TreeMap data) { + byte [] bytes = data.get(key); - String name = (bytes != null && bytes.length != 0)? - new String(bytes, UTF8_ENCODING): null; + String name = null; + try { + name = (bytes != null && bytes.length != 0) ? + new String(bytes, UTF8_ENCODING): null; + + } catch(UnsupportedEncodingException e) { + assert(false); + } return (name != null)? name.trim(): name; } @@ -229,12 +238,20 @@ } protected long getStartCode(final Text key, - final TreeMap data) - throws NumberFormatException, UnsupportedEncodingException { + final TreeMap data) { + long startCode = -1L; byte [] bytes = data.get(key); if(bytes != null && bytes.length != 0) { - startCode = Long.valueOf(new String(bytes, UTF8_ENCODING).trim()); + try { + startCode = Long.valueOf(new String(bytes, UTF8_ENCODING).trim()); + + } catch(NumberFormatException e) { + assert(false); + + } catch(UnsupportedEncodingException e) { + assert(false); + } } return startCode; } @@ -241,8 +258,32 @@ protected void checkAssigned(final HRegionInfo info, final String serverName, final long startCode) { + + // Skip region - if ... + if(info.offLine // offline + || killedRegions.contains(info.regionName) // queued for offline + || regionsToDelete.contains(info.regionName)) { // queued for delete + + unassignedRegions.remove(info.regionName); + assignAttempts.remove(info.regionName); + + if(LOG.isDebugEnabled()) { + LOG.debug("not assigning region: " + info.regionName); + } + return; + } + HServerInfo storedInfo = null; if(serverName != null) { + TreeMap regionsToKill = killList.get(serverName); + if(regionsToKill != null && regionsToKill.containsKey(info.regionName)) { + // Skip if region is on kill list + + if(LOG.isDebugEnabled()) { + LOG.debug("not assigning region (on kill list): " + info.regionName); + } + return; + } storedInfo = serversToServerInfo.get(serverName); } if(storedInfo == null || storedInfo.getStartCode() != startCode) { @@ -310,10 +351,35 @@ private Thread rootScannerThread; private Integer rootScannerLock = 0; - private static class MetaRegion { + private static class MetaRegion implements Comparable { public HServerAddress server; public Text regionName; public Text startKey; + + @Override + public boolean equals(Object o) { + return this.compareTo(o) == 0; + } + + @Override + public int hashCode() { + int result = this.regionName.hashCode(); + result ^= this.startKey.hashCode(); + return result; + } + + // Comparable + + public int compareTo(Object o) { + MetaRegion other = (MetaRegion)o; + + int result = this.regionName.compareTo(other.regionName); + if(result == 0) { + result = this.startKey.compareTo(other.startKey); + } + return result; + } + } /** Work for the meta scanner is queued up here */ @@ -456,10 +522,21 @@ private SortedMap assignAttempts; - // 'killList' indicates regions that we hope to close and then never reopen - // (because we're merging them, say). + // 'killList' indicates regions that we hope to close and not reopen + // (because we're merging them, or taking the table offline, for example). private SortedMap> killList; + + // 'killedRegions' contains regions that are in the process of being closed + + private SortedSet killedRegions; + + // 'regionsToDelete' contains regions that need to be deleted, but cannot be + // until the region server closes it + + private SortedSet regionsToDelete; + + // A map of known server names to server info private SortedMap serversToServerInfo = Collections.synchronizedSortedMap(new TreeMap()); @@ -568,6 +645,12 @@ Collections.synchronizedSortedMap( new TreeMap>()); + this.killedRegions = + Collections.synchronizedSortedSet(new TreeSet()); + + this.regionsToDelete = + Collections.synchronizedSortedSet(new TreeSet()); + // We're almost open for business this.closed = false; @@ -575,8 +658,9 @@ LOG.info("HMaster initialized on " + address.toString()); } - public boolean isMasterRunning() { - return !closed; + /** returns the HMaster server address */ + public HServerAddress getMasterAddress() { + return address; } public void run() { @@ -691,25 +775,30 @@ } } - /** - * Turn off the HMaster. Sets a flag so that the main thread know to shut - * things down in an orderly fashion. + /** + * Wait until rootRegionLocation has been set or until the + * closed flag has been set. + * @return True if rootRegionLocation was populated. */ - public void shutdown() { - TimerTask tt = new TimerTask() { - @Override - public void run() { - closed = true; - synchronized(msgQueue) { - msgQueue.clear(); // Empty the queue - msgQueue.notifyAll(); // Wake main thread + private synchronized boolean waitForRootRegionOrClose() { + while (!closed && rootRegionLocation == null) { + try { + if (LOG.isDebugEnabled()) { + LOG.debug("Wait for root region (or close)"); + } + wait(); + if (LOG.isDebugEnabled()) { + LOG.debug("Wake from wait for root region (or close)"); + } + } catch(InterruptedException e) { + if (LOG.isDebugEnabled()) { + LOG.debug("Wake from wait for root region (or close) (IE)"); } } - }; - Timer t = new Timer("Shutdown"); - t.schedule(tt, 10); + } + return this.rootRegionLocation == null; } - + ////////////////////////////////////////////////////////////////////////////// // HMasterRegionInterface ////////////////////////////////////////////////////////////////////////////// @@ -748,10 +837,13 @@ /** HRegionServers call this method repeatedly. */ public HMsg[] regionServerReport(HServerInfo serverInfo, HMsg msgs[]) throws IOException { String server = serverInfo.getServerAddress().toString().trim(); + Text serverLabel = new Text(server); - if (closed) { - // We're shutting down. Tell the server to go away. + if(closed + || (msgs.length == 1 && msgs[0].getMsg() == HMsg.MSG_REPORT_EXITING)) { + // We're shutting down. Or the HRegionServer is. serversToServerInfo.remove(server); + serverLeases.cancelLease(serverLabel, serverLabel); HMsg returnMsgs[] = {new HMsg(HMsg.MSG_REGIONSERVER_STOP)}; return returnMsgs; } @@ -797,7 +889,6 @@ // This will always succeed; otherwise, the fetch of serversToServerInfo // would have failed above. - Text serverLabel = new Text(server); serverLeases.renewLease(serverLabel, serverLabel); // Refresh the info object @@ -899,16 +990,21 @@ } else { boolean reassignRegion = true; + boolean deleteRegion = false; - if(regionsToKill.containsKey(region.regionName)) { - regionsToKill.remove(region.regionName); - unassignedRegions.remove(region.regionName); - assignAttempts.remove(region.regionName); + if(killedRegions.remove(region.regionName)) { reassignRegion = false; } + + if(regionsToDelete.remove(region.regionName)) { + reassignRegion = false; + deleteRegion = true; + } + unassignedRegions.remove(region.regionName); + assignAttempts.remove(region.regionName); synchronized(msgQueue) { - msgQueue.add(new PendingCloseReport(region, reassignRegion)); + msgQueue.add(new PendingCloseReport(region, reassignRegion, deleteRegion)); msgQueue.notifyAll(); } @@ -943,10 +1039,9 @@ // Process the kill list if(regionsToKill != null) { - for(Iterator i = regionsToKill.values().iterator(); - i.hasNext(); ) { - - returnMsgs.add(new HMsg(HMsg.MSG_REGION_CLOSE_AND_DELETE, i.next())); + for(HRegionInfo i: regionsToKill.values()) { + returnMsgs.add(new HMsg(HMsg.MSG_REGION_CLOSE, i)); + killedRegions.add(i.regionName); } } @@ -993,48 +1088,6 @@ notifyAll(); } - /** - * Wait until rootRegionLocation has been set or until the - * closed flag has been set. - * @return True if rootRegionLocation was populated. - */ - private synchronized boolean waitForRootRegionOrClose() { - while (!closed && rootRegionLocation == null) { - try { - if (LOG.isDebugEnabled()) { - LOG.debug("Wait for root region (or close)"); - } - wait(); - if (LOG.isDebugEnabled()) { - LOG.debug("Wake from wait for root region (or close)"); - } - } catch(InterruptedException e) { - if (LOG.isDebugEnabled()) { - LOG.debug("Wake from wait for root region (or close) (IE)"); - } - } - } - return this.rootRegionLocation == null; - } - - private synchronized void waitForRootRegion() { - while (rootRegionLocation == null) { - try { - if (LOG.isDebugEnabled()) { - LOG.debug("Wait for root region"); - } - wait(); - if (LOG.isDebugEnabled()) { - LOG.debug("Wake from wait for root region"); - } - } catch(InterruptedException e) { - if (LOG.isDebugEnabled()) { - LOG.debug("Wake from wait for root region (IE)"); - } - } - } - } - ////////////////////////////////////////////////////////////////////////////// // Some internal classes to manage msg-passing and client operations ////////////////////////////////////////////////////////////////////////////// @@ -1057,6 +1110,20 @@ private String deadServer; private long oldStartCode; + private class ToDoEntry { + boolean deleteRegion; + boolean regionOffline; + HStoreKey key; + HRegionInfo info; + + ToDoEntry(HStoreKey key, HRegionInfo info) { + this.deleteRegion = false; + this.regionOffline = false; + this.key = key; + this.info = info; + } + } + public PendingServerShutdown(HServerInfo serverInfo) { super(); this.deadServer = serverInfo.getServerAddress().toString(); @@ -1066,7 +1133,7 @@ private void scanMetaRegion(HRegionInterface server, long scannerId, Text regionName) throws IOException { - Vector toDoList = new Vector(); + Vector toDoList = new Vector(); TreeMap regions = new TreeMap(); DataInputBuffer inbuf = new DataInputBuffer(); @@ -1092,7 +1159,9 @@ TreeMap results = new TreeMap(); for(int i = 0; i < values.length; i++) { - results.put(values[i].getLabel(), values[i].getData().get()); + byte[] bytes = new byte[values[i].getData().getSize()]; + System.arraycopy(values[i].getData().get(), 0, bytes, 0, bytes.length); + results.put(values[i].getLabel(), bytes); } byte[] bytes = results.get(COL_SERVER); @@ -1154,11 +1223,35 @@ if(LOG.isDebugEnabled()) { LOG.debug(serverName + " was serving " + info.regionName); } + + ToDoEntry todo = new ToDoEntry(key, info); + toDoList.add(todo); - // Add to our to do lists + if(killList.containsKey(deadServer)) { + TreeMap regionsToKill = killList.get(deadServer); + if(regionsToKill.containsKey(info.regionName)) { + regionsToKill.remove(info.regionName); + killList.put(deadServer, regionsToKill); + unassignedRegions.remove(info.regionName); + assignAttempts.remove(info.regionName); + + if(regionsToDelete.contains(info.regionName)) { + // Delete this region + + regionsToDelete.remove(info.regionName); + todo.deleteRegion = true; + + } else { + // Mark region offline + + todo.regionOffline = true; + } + } + } else { + // Get region reassigned - toDoList.add(key); - regions.put(info.regionName, info); + regions.put(info.regionName, info); + } } } finally { @@ -1176,7 +1269,20 @@ // Remove server from root/meta entries for(int i = 0; i < toDoList.size(); i++) { - long lockid = server.startUpdate(regionName, clientId, toDoList.get(i).getRow()); + ToDoEntry e = toDoList.get(i); + long lockid = server.startUpdate(regionName, clientId, e.key.getRow()); + if(e.deleteRegion) { + server.delete(regionName, clientId, lockid, COL_REGIONINFO); + + } else if(e.regionOffline) { + e.info.offLine = true; + ByteArrayOutputStream byteValue = new ByteArrayOutputStream(); + DataOutputStream s = new DataOutputStream(byteValue); + e.info.write(s); + + server.put(regionName, clientId, lockid, COL_REGIONINFO, + new BytesWritable(byteValue.toByteArray())); + } server.delete(regionName, clientId, lockid, COL_SERVER); server.delete(regionName, clientId, lockid, COL_STARTCODE); server.commit(regionName, clientId, lockid); @@ -1182,12 +1288,9 @@ server.commit(regionName, clientId, lockid); } - // Put all the regions we found on the unassigned region list + // Get regions reassigned - for(Iterator> i = regions.entrySet().iterator(); - i.hasNext(); ) { - - Map.Entry e = i.next(); + for(Map.Entry e: regions.entrySet()) { Text region = e.getKey(); HRegionInfo regionInfo = e.getValue(); @@ -1257,9 +1360,12 @@ private class PendingCloseReport extends PendingOperation { private HRegionInfo regionInfo; private boolean reassignRegion; + private boolean deleteRegion; private boolean rootRegion; - public PendingCloseReport(HRegionInfo regionInfo, boolean reassignRegion) { + public PendingCloseReport(HRegionInfo regionInfo, boolean reassignRegion, + boolean deleteRegion) { + super(); this.regionInfo = regionInfo; @@ -1264,6 +1370,7 @@ this.regionInfo = regionInfo; this.reassignRegion = reassignRegion; + this.deleteRegion = deleteRegion; // If the region closing down is a meta region then we need to update // the ROOT table @@ -1312,6 +1419,18 @@ try { long lockid = server.startUpdate(metaRegionName, clientId, regionInfo.regionName); + if(deleteRegion) { + server.delete(metaRegionName, clientId, lockid, COL_REGIONINFO); + + } else if(!reassignRegion ) { + regionInfo.offLine = true; + ByteArrayOutputStream byteValue = new ByteArrayOutputStream(); + DataOutputStream s = new DataOutputStream(byteValue); + regionInfo.write(s); + + server.put(metaRegionName, clientId, lockid, COL_REGIONINFO, + new BytesWritable(byteValue.toByteArray())); + } server.delete(metaRegionName, clientId, lockid, COL_SERVER); server.delete(metaRegionName, clientId, lockid, COL_STARTCODE); server.commit(metaRegionName, clientId, lockid); @@ -1332,6 +1451,16 @@ unassignedRegions.put(regionInfo.regionName, regionInfo); assignAttempts.put(regionInfo.regionName, 0L); + + } else if(deleteRegion) { + try { + HRegion.deleteRegion(fs, dir, regionInfo.regionName); + + } catch(IOException e) { + LOG.error("failed to delete region " + regionInfo.regionName); + LOG.error(e); + throw e; + } } } } @@ -1425,6 +1554,24 @@ } } + private synchronized void waitForRootRegion() { + while (rootRegionLocation == null) { + try { + if (LOG.isDebugEnabled()) { + LOG.debug("Wait for root region"); + } + wait(); + if (LOG.isDebugEnabled()) { + LOG.debug("Wake from wait for root region"); + } + } catch(InterruptedException e) { + if (LOG.isDebugEnabled()) { + LOG.debug("Wake from wait for root region (IE)"); + } + } + } + } + ////////////////////////////////////////////////////////////////////////////// // HMasterInterface ////////////////////////////////////////////////////////////////////////////// @@ -1429,9 +1576,8 @@ // HMasterInterface ////////////////////////////////////////////////////////////////////////////// - /** returns the HMaster server address */ - public HServerAddress getMasterAddress() { - return address; + public boolean isMasterRunning() { + return !closed; } public void createTable(HTableDescriptor desc) throws IOException { @@ -1436,7 +1582,7 @@ public void createTable(HTableDescriptor desc) throws IOException { if (!isMasterRunning()) { - throw new IllegalStateException(MASTER_NOT_RUNNING); + throw new MasterNotRunningException(); } HRegionInfo newRegion = new HRegionInfo(rand.nextLong(), desc, null, null); @@ -1560,211 +1706,533 @@ meta.commit(writeid); } + /* (non-Javadoc) + * @see org.apache.hadoop.hbase.HMasterInterface#deleteTable(org.apache.hadoop.io.Text) + */ public void deleteTable(Text tableName) throws IOException { - if (!isMasterRunning()) { - throw new IllegalStateException(MASTER_NOT_RUNNING); + new TableDelete(tableName).process(); + if(LOG.isDebugEnabled()) { + LOG.debug("deleted table: " + tableName); } - - for(int tries = 0; tries < numRetries; tries++) { - try { - // We can not access any meta region if they have not already been - // assigned and scanned. - - metaScanner.waitForMetaScan(); - - Text firstMetaRegion = null; - if(knownMetaRegions.size() == 1) { - firstMetaRegion = knownMetaRegions.firstKey(); - - } else if(knownMetaRegions.containsKey(tableName)) { - firstMetaRegion = tableName; - - } else { - firstMetaRegion = knownMetaRegions.headMap(tableName).lastKey(); + } + + /* (non-Javadoc) + * @see org.apache.hadoop.hbase.HMasterInterface#addColumn(org.apache.hadoop.io.Text, org.apache.hadoop.hbase.HColumnDescriptor) + */ + public void addColumn(Text tableName, HColumnDescriptor column) throws IOException { + new AddColumn(tableName, column).process(); + } + + /* (non-Javadoc) + * @see org.apache.hadoop.hbase.HMasterInterface#deleteColumn(org.apache.hadoop.io.Text, org.apache.hadoop.io.Text) + */ + public void deleteColumn(Text tableName, Text columnName) throws IOException { + new DeleteColumn(tableName, HStoreKey.extractFamily(columnName)).process(); + } + + /* (non-Javadoc) + * @see org.apache.hadoop.hbase.HMasterInterface#mergeRegions(org.apache.hadoop.io.Text, org.apache.hadoop.io.Text) + */ + public void mergeRegions(Text regionName1, Text regionName2) throws IOException { + //TODO + } + + /* (non-Javadoc) + * @see org.apache.hadoop.hbase.HMasterInterface#enableTable(org.apache.hadoop.io.Text) + */ + public void enableTable(Text tableName) throws IOException { + new ChangeTableState(tableName, true).process(); + } + + /** + * Turn off the HMaster. Sets a flag so that the main thread know to shut + * things down in an orderly fashion. + */ + public void shutdown() { + TimerTask tt = new TimerTask() { + @Override + public void run() { + closed = true; + synchronized(msgQueue) { + msgQueue.clear(); // Empty the queue + msgQueue.notifyAll(); // Wake main thread } + } + }; + Timer t = new Timer("Shutdown"); + t.schedule(tt, 10); + } - synchronized(metaScannerLock) { // Prevent meta scanner from running - for(Iterator it = - knownMetaRegions.tailMap(firstMetaRegion).values().iterator(); - it.hasNext(); ) { + /* (non-Javadoc) + * @see org.apache.hadoop.hbase.HMasterInterface#findRootRegion() + */ + public HServerAddress findRootRegion() { + return rootRegionLocation; + } - // Find all the regions that make up this table + /* (non-Javadoc) + * @see org.apache.hadoop.hbase.HMasterInterface#disableTable(org.apache.hadoop.io.Text) + */ + public void disableTable(Text tableName) throws IOException { + new ChangeTableState(tableName, false).process(); + } + + // Helper classes for HMasterInterface - MetaRegion m = it.next(); - HRegionInterface server = client.getHRegionConnection(m.server); + private abstract class TableOperation { + private SortedSet metaRegions; + protected Text tableName; + + protected TreeSet unservedRegions; + + protected TableOperation(Text tableName) throws IOException { + if (!isMasterRunning()) { + throw new MasterNotRunningException(); + } + this.metaRegions = new TreeSet(); + this.tableName = tableName; + this.unservedRegions = new TreeSet(); - // Rows in the meta table we will need to delete + // We can not access any meta region if they have not already been + // assigned and scanned. - Vector rowsToDelete = new Vector(); + metaScanner.waitForMetaScan(); - // Regions that are being served. We will get the HRegionServers - // to delete them for us, but we don't tell them that until after - // we are done scanning to prevent lock contention + Text firstMetaRegion = null; + if(knownMetaRegions.size() == 1) { + firstMetaRegion = knownMetaRegions.firstKey(); - TreeMap> localKillList = - new TreeMap>(); + } else if(knownMetaRegions.containsKey(tableName)) { + firstMetaRegion = tableName; - // Regions that are not being served. We will have to delete - // them ourselves + } else { + firstMetaRegion = knownMetaRegions.headMap(tableName).lastKey(); + } - TreeSet unservedRegions = new TreeSet(); + this.metaRegions.addAll(knownMetaRegions.tailMap(firstMetaRegion).values()); + } + + public void process() throws IOException { + for(int tries = 0; tries < numRetries; tries++) { + boolean tableExists = false; + try { + synchronized(metaScannerLock) { // Prevent meta scanner from running + for(MetaRegion m: metaRegions) { - long scannerId = -1L; - try { - scannerId = server.openScanner(m.regionName, METACOLUMNS, tableName); + // Get a connection to a meta server - DataInputBuffer inbuf = new DataInputBuffer(); - byte[] bytes; - while(true) { - LabelledData[] values = null; - HStoreKey key = new HStoreKey(); - values = server.next(scannerId, key); - if(values == null || values.length == 0) { - break; - } - TreeMap results = new TreeMap(); - for(int i = 0; i < values.length; i++) { - bytes = new byte[values[i].getData().getSize()]; - System.arraycopy(values[i].getData().get(), 0, bytes, 0, bytes.length); - results.put(values[i].getLabel(), bytes); - } - bytes = results.get(COL_REGIONINFO); - if(bytes == null || bytes.length == 0) { - break; - } - inbuf.reset(bytes, bytes.length); - HRegionInfo info = new HRegionInfo(); - info.readFields(inbuf); + HRegionInterface server = client.getHRegionConnection(m.server); - if(info.tableDesc.getName().compareTo(tableName) > 0) { - break; // Beyond any more entries for this table - } + // Open a scanner on the meta region + + long scannerId = + server.openScanner(m.regionName, METACOLUMNS, tableName); + + try { + DataInputBuffer inbuf = new DataInputBuffer(); + byte[] bytes; + while(true) { + HRegionInfo info = new HRegionInfo(); + String serverName = null; + long startCode = -1L; + + LabelledData[] values = null; + HStoreKey key = new HStoreKey(); + values = server.next(scannerId, key); + if(values == null || values.length == 0) { + break; + } + boolean haveRegionInfo = false; + for(int i = 0; i < values.length; i++) { + bytes = new byte[values[i].getData().getSize()]; + if(bytes.length == 0) { + break; + } + System.arraycopy(values[i].getData().get(), 0, bytes, 0, bytes.length); + + if(values[i].getLabel().equals(COL_REGIONINFO)) { + haveRegionInfo = true; + inbuf.reset(bytes, bytes.length); + info.readFields(inbuf); + + } else if(values[i].getLabel().equals(COL_SERVER)) { + try { + serverName = new String(bytes, UTF8_ENCODING); + + } catch(UnsupportedEncodingException e) { + assert(false); + } + + } else if(values[i].getLabel().equals(COL_STARTCODE)) { + try { + startCode = Long.valueOf(new String(bytes, UTF8_ENCODING)); + + } catch(UnsupportedEncodingException e) { + assert(false); + } + } + } + + if(!haveRegionInfo) { + throw new IOException(COL_REGIONINFO + " not found"); + } + + if(info.tableDesc.getName().compareTo(tableName) > 0) { + break; // Beyond any more entries for this table + } + + tableExists = true; + if(!isBeingServed(serverName, startCode)) { + unservedRegions.add(info); + } + processScanItem(serverName, startCode, info); - rowsToDelete.add(info.regionName); + } // while(true) + + } finally { + if(scannerId != -1L) { + try { + server.close(scannerId); - // Is it being served? + } catch(IOException e) { + LOG.error(e); + } + } + scannerId = -1L; + } + + if(!tableExists) { + throw new IOException(tableName + " does not exist"); + } + + postProcessMeta(m, server); + unservedRegions.clear(); + + } // for(MetaRegion m:) + } // synchronized(metaScannerLock) + + } catch(NotServingRegionException e) { + if(tries == numRetries - 1) { + throw e; + } + continue; + } + break; + } // for(tries...) + } + + protected boolean isBeingServed(String serverName, long startCode) { + boolean result = false; + + if(serverName != null && startCode != -1L) { + HServerInfo s = serversToServerInfo.get(serverName); + result = s != null && s.getStartCode() == startCode; + } + return result; + } + + protected boolean isEnabled(HRegionInfo info) { + return !info.offLine; + } + + protected abstract void processScanItem(String serverName, long startCode, + HRegionInfo info) throws IOException; + + protected abstract void postProcessMeta(MetaRegion m, + HRegionInterface server) throws IOException; + } + + private class ChangeTableState extends TableOperation { + private boolean online; + + protected TreeMap> servedRegions; + protected long lockid; + protected long clientId; + + public ChangeTableState(Text tableName, boolean onLine) throws IOException { + super(tableName); + this.online = onLine; + this.servedRegions = new TreeMap>(); + } + + protected void processScanItem(String serverName, long startCode, + HRegionInfo info) throws IOException { + + if(isBeingServed(serverName, startCode)) { + TreeSet regions = servedRegions.get(serverName); + if(regions == null) { + regions = new TreeSet(); + } + regions.add(info); + servedRegions.put(serverName, regions); + } + } + + protected void postProcessMeta(MetaRegion m, HRegionInterface server) + throws IOException { - bytes = results.get(COL_SERVER); - if(bytes != null && bytes.length != 0) { - String serverName = new String(bytes, UTF8_ENCODING); + // Process regions not being served + + if(LOG.isDebugEnabled()) { + LOG.debug("processing unserved regions"); + } + for(HRegionInfo i: unservedRegions) { + + // Update meta table - bytes = results.get(COL_STARTCODE); - if(bytes != null && bytes.length != 0) { - long startCode = Long.valueOf(new String(bytes, UTF8_ENCODING)); + if(LOG.isDebugEnabled()) { + LOG.debug("updating columns in row: " + i.regionName); + } - HServerInfo s = serversToServerInfo.get(serverName); - if(s != null && s.getStartCode() == startCode) { + lockid = -1L; + clientId = rand.nextLong(); + try { + lockid = server.startUpdate(m.regionName, clientId, i.regionName); + updateRegionInfo(server, m.regionName, i); + server.delete(m.regionName, clientId, lockid, COL_SERVER); + server.delete(m.regionName, clientId, lockid, COL_STARTCODE); + server.commit(m.regionName, clientId, lockid); + lockid = -1L; - // It is being served. - // Tell the server to stop it and not report back. + if(LOG.isDebugEnabled()) { + LOG.debug("updated columns in row: " + i.regionName); + } - TreeMap regionsToKill = - localKillList.get(serverName); + } catch(NotServingRegionException e) { + throw e; - if(regionsToKill == null) { - regionsToKill = new TreeMap(); - } - regionsToKill.put(info.regionName, info); - localKillList.put(serverName, regionsToKill); - continue; - } - } - } + } catch(IOException e) { + LOG.error("column update failed in row: " + i.regionName); + LOG.error(e); - // Region is not currently being served. - // Prevent it from getting assigned and add it to the list of - // regions we need to delete here. + } finally { + try { + if(lockid != -1L) { + server.abort(m.regionName, clientId, lockid); + } - unassignedRegions.remove(info.regionName); - assignAttempts.remove(info.regionName); - unservedRegions.add(info.regionName); - } + } catch(IOException iex) { + LOG.error(iex); + } + } - } finally { - if(scannerId != -1L) { - try { - server.close(scannerId); + if(online) { // Bring offline regions on-line + if(!unassignedRegions.containsKey(i.regionName)) { + unassignedRegions.put(i.regionName, i); + assignAttempts.put(i.regionName, 0L); + } + + } else { // Prevent region from getting assigned. + unassignedRegions.remove(i.regionName); + assignAttempts.remove(i.regionName); + } + } + + // Process regions currently being served + + if(LOG.isDebugEnabled()) { + LOG.debug("processing regions currently being served"); + } + for(Map.Entry> e: servedRegions.entrySet()) { + String serverName = e.getKey(); + + if(online) { + continue; // Already being served + } + + // Cause regions being served to be take off-line and disabled + + TreeMap localKillList = killList.get(serverName); + if(localKillList == null) { + localKillList = new TreeMap(); + } + for(HRegionInfo i: e.getValue()) { + if(LOG.isDebugEnabled()) { + LOG.debug("adding region " + i.regionName + " to local kill list"); + } + localKillList.put(i.regionName, i); + } + if(localKillList != null && localKillList.size() > 0) { + if(LOG.isDebugEnabled()) { + LOG.debug("inserted local kill list into kill list for server " + serverName); + } + killList.put(serverName, localKillList); + } + } + servedRegions.clear(); + } + + protected void updateRegionInfo(HRegionInterface server, Text regionName, + HRegionInfo i) throws IOException { + + i.offLine = !online; + + ByteArrayOutputStream byteValue = new ByteArrayOutputStream(); + DataOutputStream s = new DataOutputStream(byteValue); + i.write(s); - } catch(IOException e) { - LOG.error(e); - } - } - scannerId = -1L; - } + server.put(regionName, clientId, lockid, COL_REGIONINFO, + new BytesWritable(byteValue.toByteArray())); + + } + } + + private class TableDelete extends ChangeTableState { + + public TableDelete(Text tableName) throws IOException { + super(tableName, false); + } + + protected void postProcessMeta(MetaRegion m, HRegionInterface server) + throws IOException { - // Wipe the existence of the regions out of the meta table + // For regions that are being served, mark them for deletion + + for(TreeSet s: servedRegions.values()) { + for(HRegionInfo i: s) { + regionsToDelete.add(i.regionName); + } + } - for(Iterator row = rowsToDelete.iterator(); row.hasNext(); ) { - Text rowName = row.next(); - if(LOG.isDebugEnabled()) { - LOG.debug("deleting columns in row: " + rowName); - } - long lockid = -1L; - long clientId = rand.nextLong(); - try { - lockid = server.startUpdate(m.regionName, clientId, rowName); - server.delete(m.regionName, clientId, lockid, COL_REGIONINFO); - server.delete(m.regionName, clientId, lockid, COL_SERVER); - server.delete(m.regionName, clientId, lockid, COL_STARTCODE); - server.commit(m.regionName, clientId, lockid); - lockid = -1L; - if(LOG.isDebugEnabled()) { - LOG.debug("deleted columns in row: " + rowName); - } + // Unserved regions we can delete now + + for(HRegionInfo i: unservedRegions) { + // Delete the region - } catch(IOException e) { - if(lockid != -1L) { - server.abort(m.regionName, clientId, lockid); - } - LOG.error("columns deletion failed in row: " + rowName); - LOG.error(e); - throw e; - } - } + try { + HRegion.deleteRegion(fs, dir, i.regionName); - // Notify region servers that some regions need to be closed and deleted + } catch(IOException e) { + LOG.error("failed to delete region " + i.regionName); + LOG.error(e); + } + } + super.postProcessMeta(m, server); + } + + @Override + protected void updateRegionInfo(HRegionInterface server, Text regionName, + HRegionInfo i) throws IOException { + + server.delete(regionName, clientId, lockid, COL_REGIONINFO); + } + } + + private abstract class ColumnOperation extends TableOperation { + + protected ColumnOperation(Text tableName) throws IOException { + super(tableName); + } - if(localKillList.size() != 0) { - killList.putAll(localKillList); - } + protected void processScanItem(String serverName, long startCode, + HRegionInfo info) throws IOException { + + if(isEnabled(info)) { + throw new TableNotDisabledException(tableName.toString()); + } + } - // Delete any regions that are not being served + protected void updateRegionInfo(HRegionInterface server, Text regionName, + HRegionInfo i) throws IOException { + + ByteArrayOutputStream byteValue = new ByteArrayOutputStream(); + DataOutputStream s = new DataOutputStream(byteValue); + i.write(s); - for(Iterator i = unservedRegions.iterator(); i.hasNext(); ) { - Text regionName = i.next(); - try { - HRegion.deleteRegion(fs, dir, regionName); + long lockid = -1L; + long clientId = rand.nextLong(); + try { + lockid = server.startUpdate(regionName, clientId, i.regionName); + server.put(regionName, clientId, lockid, COL_REGIONINFO, + new BytesWritable(byteValue.toByteArray())); + + server.commit(regionName, clientId, lockid); + lockid = -1L; - } catch(IOException e) { - LOG.error("failed to delete region " + regionName); - LOG.error(e); - throw e; - } - } - } + if(LOG.isDebugEnabled()) { + LOG.debug("updated columns in row: " + i.regionName); } + } catch(NotServingRegionException e) { - if(tries == numRetries - 1) { - throw e; - } - continue; + throw e; } catch(IOException e) { + LOG.error("column update failed in row: " + i.regionName); LOG.error(e); - throw e; + + } finally { + if(lockid != -1L) { + try { + server.abort(regionName, clientId, lockid); + + } catch(IOException iex) { + LOG.error(iex); + } + } } - break; } + } + + private class DeleteColumn extends ColumnOperation { + private Text columnName; + + public DeleteColumn(Text tableName, Text columnName) throws IOException { + super(tableName); + this.columnName = columnName; + } + + protected void postProcessMeta(MetaRegion m, HRegionInterface server) + throws IOException { - if(LOG.isDebugEnabled()) { - LOG.debug("deleted table: " + tableName); + for(HRegionInfo i: unservedRegions) { + i.tableDesc.families().remove(columnName); + updateRegionInfo(server, m.regionName, i); + + // Delete the directories used by the column + + try { + fs.delete(HStoreFile.getMapDir(dir, i.regionName, columnName)); + + } catch(IOException e) { + LOG.error(e); + } + + try { + fs.delete(HStoreFile.getInfoDir(dir, i.regionName, columnName)); + + } catch(IOException e) { + LOG.error(e); + } + + } } } - public HServerAddress findRootRegion() { - return rootRegionLocation; + private class AddColumn extends ColumnOperation { + private HColumnDescriptor newColumn; + + public AddColumn(Text tableName, HColumnDescriptor newColumn) + throws IOException { + + super(tableName); + this.newColumn = newColumn; + } + + protected void postProcessMeta(MetaRegion m, HRegionInterface server) + throws IOException { + + for(HRegionInfo i: unservedRegions) { + + //TODO: I *think* all we need to do to add a column is add it to + // the table descriptor. When the region is brought on-line, it + // should find the column missing and create it. + + i.tableDesc.addFamily(newColumn); + updateRegionInfo(server, m.regionName, i); + } + } } - + ////////////////////////////////////////////////////////////////////////////// // Managing leases ////////////////////////////////////////////////////////////////////////////// @@ -1789,6 +2257,10 @@ } } + ////////////////////////////////////////////////////////////////////////////// + // Main program + ////////////////////////////////////////////////////////////////////////////// + private static void printUsageAndExit() { System.err.println("Usage: java org.apache.hbase.HMaster " + "[--bind=hostname:port] start|stop"); Index: src/contrib/hbase/src/java/org/apache/hadoop/hbase/HMasterInterface.java =================================================================== --- src/contrib/hbase/src/java/org/apache/hadoop/hbase/HMasterInterface.java (revision 542444) +++ src/contrib/hbase/src/java/org/apache/hadoop/hbase/HMasterInterface.java (working copy) @@ -41,6 +41,14 @@ public void createTable(HTableDescriptor desc) throws IOException; public void deleteTable(Text tableName) throws IOException; + public void addColumn(Text tableName, HColumnDescriptor column) throws IOException; + public void deleteColumn(Text tableName, Text columnName) throws IOException; + + public void mergeRegions(Text regionName1, Text regionName2) throws IOException; + + public void enableTable(Text tableName) throws IOException; + public void disableTable(Text tableName) throws IOException; + /** * Shutdown an HBase cluster. */ Index: src/contrib/hbase/src/java/org/apache/hadoop/hbase/HMsg.java =================================================================== --- src/contrib/hbase/src/java/org/apache/hadoop/hbase/HMsg.java (revision 542444) +++ src/contrib/hbase/src/java/org/apache/hadoop/hbase/HMsg.java (working copy) @@ -30,7 +30,6 @@ public static final byte MSG_CALL_SERVER_STARTUP = 4; public static final byte MSG_REGIONSERVER_STOP = 5; public static final byte MSG_REGION_CLOSE_WITHOUT_REPORT = 6; - public static final byte MSG_REGION_CLOSE_AND_DELETE = 7; public static final byte MSG_REPORT_OPEN = 100; public static final byte MSG_REPORT_CLOSE = 101; @@ -36,6 +35,7 @@ public static final byte MSG_REPORT_CLOSE = 101; public static final byte MSG_REGION_SPLIT = 102; public static final byte MSG_NEW_REGION = 103; + public static final byte MSG_REPORT_EXITING = 104; byte msg; HRegionInfo info; Index: src/contrib/hbase/src/java/org/apache/hadoop/hbase/HRegion.java =================================================================== --- src/contrib/hbase/src/java/org/apache/hadoop/hbase/HRegion.java (revision 542444) +++ src/contrib/hbase/src/java/org/apache/hadoop/hbase/HRegion.java (working copy) @@ -333,12 +333,12 @@ } // Load in all the HStores. - for(Iterator it = this.regionInfo.tableDesc.families().iterator(); - it.hasNext(); ) { - Text colFamily = HStoreKey.extractFamily(it.next()); + for(Map.Entry e : + this.regionInfo.tableDesc.families().entrySet()) { + + Text colFamily = HStoreKey.extractFamily(e.getKey()); stores.put(colFamily, new HStore(dir, this.regionInfo.regionName, - colFamily, this.regionInfo.tableDesc.getMaxVersions(), fs, - oldLogFile, conf)); + e.getValue(), fs, oldLogFile, conf)); } // Get rid of any splits or merges that were lost in-progress @@ -378,14 +378,6 @@ return closed; } - /** Closes and deletes this HRegion. Called when doing a table deletion, for example */ - public void closeAndDelete() throws IOException { - LOG.info("deleting region: " + regionInfo.regionName); - close(); - deleteRegion(fs, dir, regionInfo.regionName); - LOG.info("region deleted: " + regionInfo.regionName); - } - /** * Close down this HRegion. Flush the cache, shut down each HStore, don't * service any more calls. Index: src/contrib/hbase/src/java/org/apache/hadoop/hbase/HRegiondirReader.java =================================================================== --- src/contrib/hbase/src/java/org/apache/hadoop/hbase/HRegiondirReader.java (revision 542444) +++ src/contrib/hbase/src/java/org/apache/hadoop/hbase/HRegiondirReader.java (working copy) @@ -101,10 +101,10 @@ private HTableDescriptor getTableDescriptor(final FileSystem fs, final Path d, final String tableName) throws IOException { - HTableDescriptor desc = new HTableDescriptor(tableName, 1); + HTableDescriptor desc = new HTableDescriptor(tableName); Text [] families = getFamilies(fs, d); for (Text f: families) { - desc.addFamily(f); + desc.addFamily(new HColumnDescriptor(f.toString())); } return desc; } @@ -163,7 +163,7 @@ private void dump(final HRegionInfo info) throws IOException { HRegion r = new HRegion(this.parentdir, null, FileSystem.get(this.conf), conf, info, null, null); - Text [] families = info.tableDesc.families().toArray(new Text [] {}); + Text [] families = info.tableDesc.families().keySet().toArray(new Text [] {}); HInternalScannerInterface scanner = r.getScanner(families, new Text()); HStoreKey key = new HStoreKey(); TreeMap results = new TreeMap(); @@ -183,8 +183,8 @@ // followed by cell content. while(scanner.next(key, results)) { for (Map.Entry es: results.entrySet()) { - Text colname = es.getKey(); - BytesWritable colvalue = es.getValue(); + Text colname = es.getKey(); + BytesWritable colvalue = es.getValue(); Object value = null; byte[] bytes = new byte[colvalue.getSize()]; if (colname.toString().equals("info:regioninfo")) { @@ -219,4 +219,4 @@ } } } -} \ No newline at end of file +} Index: src/contrib/hbase/src/java/org/apache/hadoop/hbase/HRegionInfo.java =================================================================== --- src/contrib/hbase/src/java/org/apache/hadoop/hbase/HRegionInfo.java (revision 542444) +++ src/contrib/hbase/src/java/org/apache/hadoop/hbase/HRegionInfo.java (working copy) @@ -35,6 +35,7 @@ public Text startKey; public Text endKey; public Text regionName; + public boolean offLine; public HRegionInfo() { this.regionId = 0; @@ -42,16 +43,12 @@ this.startKey = new Text(); this.endKey = new Text(); this.regionName = new Text(); + this.offLine = false; } - public HRegionInfo(final byte [] serializedBytes) { + public HRegionInfo(final byte [] serializedBytes) throws IOException { this(); - try { - readFields(new DataInputStream( - new ByteArrayInputStream(serializedBytes))); - } catch (IOException e) { - throw new RuntimeException(e); - } + readFields(new DataInputStream(new ByteArrayInputStream(serializedBytes))); } public HRegionInfo(long regionId, HTableDescriptor tableDesc, @@ -79,6 +76,8 @@ this.regionName = new Text(tableDesc.getName() + "_" + (startKey == null ? "" : startKey.toString()) + "_" + regionId); + + this.offLine = false; } @Override @@ -98,6 +97,7 @@ startKey.write(out); endKey.write(out); regionName.write(out); + out.writeBoolean(offLine); } public void readFields(DataInput in) throws IOException { @@ -106,5 +106,6 @@ this.startKey.readFields(in); this.endKey.readFields(in); this.regionName.readFields(in); + this.offLine = in.readBoolean(); } } \ No newline at end of file Index: src/contrib/hbase/src/java/org/apache/hadoop/hbase/HRegionServer.java =================================================================== --- src/contrib/hbase/src/java/org/apache/hadoop/hbase/HRegionServer.java (revision 542444) +++ src/contrib/hbase/src/java/org/apache/hadoop/hbase/HRegionServer.java (working copy) @@ -603,6 +603,13 @@ } } try { + HMsg[] exitMsg = { new HMsg(HMsg.MSG_REPORT_EXITING) }; + hbaseMaster.regionServerReport(info, exitMsg); + + } catch(IOException e) { + LOG.warn(e); + } + try { LOG.info("stopping server at: " + info.getServerAddress().toString()); // Send interrupts to wake up threads if sleeping so they notice shutdown. @@ -747,13 +754,6 @@ closeRegion(msg.getRegionInfo(), false); break; - case HMsg.MSG_REGION_CLOSE_AND_DELETE: - if (LOG.isDebugEnabled()) { - LOG.debug("MSG_REGION_CLOSE_AND_DELETE"); - } - closeAndDeleteRegion(msg.getRegionInfo()); - break; - default: throw new IOException("Impossible state during msg processing. Instruction: " + msg); } @@ -799,27 +799,6 @@ } } - private void closeAndDeleteRegion(HRegionInfo info) throws IOException { - this.lock.writeLock().lock(); - HRegion region = null; - try { - region = regions.remove(info.regionName); - } finally { - this.lock.writeLock().unlock(); - } - if(region != null) { - if(LOG.isDebugEnabled()) { - LOG.debug("deleting region " + info.regionName); - } - - region.closeAndDelete(); - - if(LOG.isDebugEnabled()) { - LOG.debug("region " + info.regionName + " deleted"); - } - } - } - /** Called either when the master tells us to restart or from stop() */ private void closeAllRegions() { Vector regionsToClose = new Vector(); Index: src/contrib/hbase/src/java/org/apache/hadoop/hbase/HStore.java =================================================================== --- src/contrib/hbase/src/java/org/apache/hadoop/hbase/HStore.java (revision 542444) +++ src/contrib/hbase/src/java/org/apache/hadoop/hbase/HStore.java (working copy) @@ -52,8 +52,9 @@ Path dir; Text regionName; - Text colFamily; - int maxVersions; + HColumnDescriptor family; + Text familyName; + SequenceFile.CompressionType compression; FileSystem fs; Configuration conf; Path mapdir; @@ -98,7 +99,7 @@ *

It's assumed that after this constructor returns, the reconstructionLog * file will be deleted (by whoever has instantiated the HStore). */ - public HStore(Path dir, Text regionName, Text colFamily, int maxVersions, + public HStore(Path dir, Text regionName, HColumnDescriptor family, FileSystem fs, Path reconstructionLog, Configuration conf) throws IOException { this.dir = dir; @@ -103,18 +104,32 @@ throws IOException { this.dir = dir; this.regionName = regionName; - this.colFamily = colFamily; - this.maxVersions = maxVersions; + this.family = family; + this.familyName = HStoreKey.extractFamily(this.family.getName()); + this.compression = SequenceFile.CompressionType.NONE; + + if(family.getCompression() != HColumnDescriptor.CompressionType.NONE) { + if(family.getCompression() == HColumnDescriptor.CompressionType.BLOCK) { + this.compression = SequenceFile.CompressionType.BLOCK; + + } else if(family.getCompression() == HColumnDescriptor.CompressionType.RECORD) { + this.compression = SequenceFile.CompressionType.RECORD; + + } else { + assert(false); + } + } + this.fs = fs; this.conf = conf; - this.mapdir = HStoreFile.getMapDir(dir, regionName, colFamily); + this.mapdir = HStoreFile.getMapDir(dir, regionName, familyName); fs.mkdirs(mapdir); - this.loginfodir = HStoreFile.getInfoDir(dir, regionName, colFamily); + this.loginfodir = HStoreFile.getInfoDir(dir, regionName, familyName); fs.mkdirs(loginfodir); if(LOG.isDebugEnabled()) { - LOG.debug("starting HStore for " + regionName + "/"+ colFamily); + LOG.debug("starting HStore for " + regionName + "/"+ familyName); } // Either restart or get rid of any leftover compaction work. Either way, @@ -123,7 +138,7 @@ this.compactdir = new Path(dir, COMPACTION_DIR); Path curCompactStore = - HStoreFile.getHStoreDir(compactdir, regionName, colFamily); + HStoreFile.getHStoreDir(compactdir, regionName, familyName); if(fs.exists(curCompactStore)) { processReadyCompaction(); fs.delete(curCompactStore); @@ -134,7 +149,7 @@ // corresponding one in 'loginfodir'. Without a corresponding log info // file, the entry in 'mapdir' must be deleted. Vector hstoreFiles - = HStoreFile.loadHStoreFiles(conf, dir, regionName, colFamily, fs); + = HStoreFile.loadHStoreFiles(conf, dir, regionName, familyName, fs); for(Iterator it = hstoreFiles.iterator(); it.hasNext(); ) { HStoreFile hsf = it.next(); mapFiles.put(hsf.loadInfo(fs), hsf); @@ -187,7 +202,7 @@ Text column = val.getColumn(); if (!key.getRegionName().equals(this.regionName) || column.equals(HLog.METACOLUMN) || - HStoreKey.extractFamily(column).equals(this.colFamily)) { + HStoreKey.extractFamily(column).equals(this.familyName)) { if (LOG.isDebugEnabled()) { LOG.debug("Passing on edit " + key.getRegionName() + ", " + key.getRegionName() + ", " + column.toString() + ": " @@ -230,7 +245,7 @@ new MapFile.Reader(fs, e.getValue().getMapFilePath().toString(), conf)); } - LOG.info("HStore online for " + this.regionName + "/" + this.colFamily); + LOG.info("HStore online for " + this.regionName + "/" + this.familyName); } /** Turn off all the MapFile readers */ @@ -235,7 +250,7 @@ /** Turn off all the MapFile readers */ public void close() throws IOException { - LOG.info("closing HStore for " + this.regionName + "/" + this.colFamily); + LOG.info("closing HStore for " + this.regionName + "/" + this.familyName); this.lock.obtainWriteLock(); try { for (MapFile.Reader map: maps.values()) { @@ -244,7 +259,7 @@ maps.clear(); mapFiles.clear(); - LOG.info("HStore closed for " + this.regionName + "/" + this.colFamily); + LOG.info("HStore closed for " + this.regionName + "/" + this.familyName); } finally { this.lock.releaseWriteLock(); } @@ -276,7 +291,7 @@ synchronized(flushLock) { if(LOG.isDebugEnabled()) { - LOG.debug("flushing HStore " + this.regionName + "/" + this.colFamily); + LOG.debug("flushing HStore " + this.regionName + "/" + this.familyName); } // A. Write the TreeMap out to the disk @@ -282,7 +297,7 @@ // A. Write the TreeMap out to the disk HStoreFile flushedFile - = HStoreFile.obtainNewHStoreFile(conf, dir, regionName, colFamily, fs); + = HStoreFile.obtainNewHStoreFile(conf, dir, regionName, familyName, fs); Path mapfile = flushedFile.getMapFilePath(); if(LOG.isDebugEnabled()) { @@ -290,7 +305,7 @@ } MapFile.Writer out = new MapFile.Writer(conf, fs, mapfile.toString(), - HStoreKey.class, BytesWritable.class); + HStoreKey.class, BytesWritable.class, compression); try { for (Map.Entry es: inputCache.entrySet()) { @@ -295,7 +310,7 @@ try { for (Map.Entry es: inputCache.entrySet()) { HStoreKey curkey = es.getKey(); - if (this.colFamily.equals(HStoreKey.extractFamily(curkey.getColumn()))) { + if (this.familyName.equals(HStoreKey.extractFamily(curkey.getColumn()))) { out.append(curkey, es.getValue()); } } @@ -300,7 +315,7 @@ } } if(LOG.isDebugEnabled()) { - LOG.debug("HStore " + this.regionName + "/" + this.colFamily + " flushed"); + LOG.debug("HStore " + this.regionName + "/" + this.familyName + " flushed"); } } finally { @@ -325,7 +340,7 @@ mapFiles.put(logCacheFlushId, flushedFile); if(LOG.isDebugEnabled()) { LOG.debug("HStore available for " + this.regionName + "/" - + this.colFamily + " flush id=" + logCacheFlushId); + + this.familyName + " flush id=" + logCacheFlushId); } } finally { @@ -373,10 +388,10 @@ void compactHelper(boolean deleteSequenceInfo) throws IOException { synchronized(compactLock) { if(LOG.isDebugEnabled()) { - LOG.debug("started compaction of " + this.regionName + "/" + this.colFamily); + LOG.debug("started compaction of " + this.regionName + "/" + this.familyName); } - Path curCompactStore = HStoreFile.getHStoreDir(compactdir, regionName, colFamily); + Path curCompactStore = HStoreFile.getHStoreDir(compactdir, regionName, familyName); fs.mkdirs(curCompactStore); try { @@ -409,11 +424,11 @@ } HStoreFile compactedOutputFile - = new HStoreFile(conf, compactdir, regionName, colFamily, -1); + = new HStoreFile(conf, compactdir, regionName, familyName, -1); if(toCompactFiles.size() == 1) { if(LOG.isDebugEnabled()) { - LOG.debug("nothing to compact for " + this.regionName + "/" + this.colFamily); + LOG.debug("nothing to compact for " + this.regionName + "/" + this.familyName); } HStoreFile hsf = toCompactFiles.elementAt(0); @@ -426,7 +441,7 @@ MapFile.Writer compactedOut = new MapFile.Writer(conf, fs, compactedOutputFile.getMapFilePath().toString(), HStoreKey.class, - BytesWritable.class); + BytesWritable.class, compression); try { @@ -507,7 +522,7 @@ timesSeen = 1; } - if(timesSeen <= maxVersions) { + if(timesSeen <= family.getMaxVersions()) { // Keep old versions until we have maxVersions worth. // Then just skip them. @@ -592,7 +607,7 @@ processReadyCompaction(); if(LOG.isDebugEnabled()) { - LOG.debug("compaction complete for " + this.regionName + "/" + this.colFamily); + LOG.debug("compaction complete for " + this.regionName + "/" + this.familyName); } } finally { @@ -625,7 +640,7 @@ // 1. Acquiring the write-lock - Path curCompactStore = HStoreFile.getHStoreDir(compactdir, regionName, colFamily); + Path curCompactStore = HStoreFile.getHStoreDir(compactdir, regionName, familyName); this.lock.obtainWriteLock(); try { Path doneFile = new Path(curCompactStore, COMPACTION_DONE); @@ -714,10 +729,10 @@ } HStoreFile compactedFile - = new HStoreFile(conf, compactdir, regionName, colFamily, -1); + = new HStoreFile(conf, compactdir, regionName, familyName, -1); HStoreFile finalCompactedFile - = HStoreFile.obtainNewHStoreFile(conf, dir, regionName, colFamily, fs); + = HStoreFile.obtainNewHStoreFile(conf, dir, regionName, familyName, fs); fs.rename(compactedFile.getMapFilePath(), finalCompactedFile.getMapFilePath()); Index: src/contrib/hbase/src/java/org/apache/hadoop/hbase/HTableDescriptor.java =================================================================== --- src/contrib/hbase/src/java/org/apache/hadoop/hbase/HTableDescriptor.java (revision 542444) +++ src/contrib/hbase/src/java/org/apache/hadoop/hbase/HTableDescriptor.java (working copy) @@ -19,7 +19,9 @@ import java.io.DataOutput; import java.io.IOException; import java.util.Iterator; -import java.util.TreeSet; +import java.util.Map; +import java.util.Map.Entry; +import java.util.TreeMap; import java.util.regex.Matcher; import java.util.regex.Pattern; @@ -27,13 +29,12 @@ import org.apache.hadoop.io.WritableComparable; /** - * HTableDescriptor contains various facts about an HTable, like - * column families, maximum number of column versions, etc. + * HTableDescriptor contains the name of an HTable, and its + * column families. */ public class HTableDescriptor implements WritableComparable { Text name; - int maxVersions; - TreeSet families = new TreeSet(); + TreeMap families; /** * Legal table names can only contain 'word characters': @@ -39,7 +40,7 @@ * Legal table names can only contain 'word characters': * i.e. [a-zA-Z_0-9]. * - * Lets be restrictive until a reason to be otherwise. + * Let's be restrictive until a reason to be otherwise. */ private static final Pattern LEGAL_TABLE_NAME = Pattern.compile("[\\w-]+"); @@ -44,16 +45,9 @@ private static final Pattern LEGAL_TABLE_NAME = Pattern.compile("[\\w-]+"); - /** - * Legal family names can only contain 'word characters' and - * end in a colon. - */ - private static final Pattern LEGAL_FAMILY_NAME = - Pattern.compile("\\w+:"); - public HTableDescriptor() { this.name = new Text(); - this.families.clear(); + this.families = new TreeMap(); } /** @@ -59,7 +53,6 @@ /** * Constructor. * @param name Table name. - * @param maxVersions Number of versions of a column to keep. * @throws IllegalArgumentException if passed a table name * that is made of other than 'word' characters: i.e. * [a-zA-Z_0-9] @@ -64,21 +57,14 @@ * that is made of other than 'word' characters: i.e. * [a-zA-Z_0-9] */ - public HTableDescriptor(String name, int maxVersions) { + public HTableDescriptor(String name) { Matcher m = LEGAL_TABLE_NAME.matcher(name); if (m == null || !m.matches()) { - throw new IllegalArgumentException("Table names can only " + - "contain 'word characters': i.e. [a-zA-Z_0-9"); - } - if (maxVersions <= 0) { - // TODO: Allow maxVersion of 0 to be the way you say - // "Keep all versions". Until there is support, consider - // 0 -- or < 0 -- a configuration error. - throw new IllegalArgumentException("Maximum versions " + - "must be positive"); + throw new IllegalArgumentException( + "Table names can only contain 'word characters': i.e. [a-zA-Z_0-9"); } this.name = new Text(name); - this.maxVersions = maxVersions; + this.families = new TreeMap(); } public Text getName() { @@ -85,27 +71,12 @@ return name; } - public int getMaxVersions() { - return maxVersions; - } - /** * Add a column family. - * @param family Column family name to add. Column family names - * must end in a : - * @throws IllegalArgumentException if passed a table name - * that is made of other than 'word' characters: i.e. - * [a-zA-Z_0-9] + * @param family HColumnDescriptor of familyto add. */ - public void addFamily(Text family) { - String familyStr = family.toString(); - Matcher m = LEGAL_FAMILY_NAME.matcher(familyStr); - if (m == null || !m.matches()) { - throw new IllegalArgumentException("Family names can " + - "only contain 'word characters' and must end with a " + - "':'"); - } - families.add(family); + public void addFamily(HColumnDescriptor family) { + families.put(family.getName(), family); } /** Do we contain a given column? */ @@ -110,11 +81,15 @@ /** Do we contain a given column? */ public boolean hasFamily(Text family) { - return families.contains(family); + return families.containsKey(family); } - /** All the column families in this table. */ - public TreeSet families() { + /** All the column families in this table. + * + * TODO: What is this used for? Seems Dangerous to let people play with our + * private members. + */ + public TreeMap families() { return families; } @@ -120,8 +95,7 @@ @Override public String toString() { - return "name: " + this.name.toString() + - ", maxVersions: " + this.maxVersions + ", families: " + this.families; + return "name: " + this.name.toString() + ", families: " + this.families; } @Override @@ -133,10 +107,9 @@ public int hashCode() { // TODO: Cache. int result = this.name.hashCode(); - result ^= Integer.valueOf(this.maxVersions).hashCode(); if (this.families != null && this.families.size() > 0) { - for (Text family: this.families) { - result ^= family.hashCode(); + for (Map.Entry e: this.families.entrySet()) { + result ^= e.hashCode(); } } return result; @@ -148,9 +121,9 @@ public void write(DataOutput out) throws IOException { name.write(out); - out.writeInt(maxVersions); out.writeInt(families.size()); - for(Iterator it = families.iterator(); it.hasNext(); ) { + for(Iterator it = families.values().iterator(); + it.hasNext(); ) { it.next().write(out); } } @@ -157,13 +130,12 @@ public void readFields(DataInput in) throws IOException { this.name.readFields(in); - this.maxVersions = in.readInt(); int numCols = in.readInt(); families.clear(); for(int i = 0; i < numCols; i++) { - Text t = new Text(); - t.readFields(in); - families.add(t); + HColumnDescriptor c = new HColumnDescriptor(); + c.readFields(in); + families.put(c.getName(), c); } } @@ -172,24 +144,24 @@ ////////////////////////////////////////////////////////////////////////////// public int compareTo(Object o) { - HTableDescriptor htd = (HTableDescriptor) o; - int result = name.compareTo(htd.name); + HTableDescriptor other = (HTableDescriptor) o; + int result = name.compareTo(other.name); + if(result == 0) { - result = maxVersions - htd.maxVersions; + result = families.size() - other.families.size(); } - if(result == 0) { - result = families.size() - htd.families.size(); + if(result == 0 && families.size() != other.families.size()) { + result = Integer.valueOf(families.size()).compareTo( + Integer.valueOf(other.families.size())); } if(result == 0) { - Iterator it2 = htd.families.iterator(); - for(Iterator it = families.iterator(); it.hasNext(); ) { - Text family1 = it.next(); - Text family2 = it2.next(); - result = family1.compareTo(family2); + for(Iterator it = families.values().iterator(), + it2 = other.families.values().iterator(); it.hasNext(); ) { + result = it.next().compareTo(it2.next()); if(result != 0) { - return result; + break; } } } Index: src/contrib/hbase/src/java/org/apache/hadoop/hbase/InvalidColumnNameException.java =================================================================== --- src/contrib/hbase/src/java/org/apache/hadoop/hbase/InvalidColumnNameException.java (revision 0) +++ src/contrib/hbase/src/java/org/apache/hadoop/hbase/InvalidColumnNameException.java (revision 0) @@ -0,0 +1,29 @@ +/** + * Copyright 2007 The Apache Software Foundation + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase; + +import java.io.IOException; + +public class InvalidColumnNameException extends IOException { + private static final long serialVersionUID = 1L << 29 - 1L; + public InvalidColumnNameException() { + super(); + } + + public InvalidColumnNameException(String s) { + super(s); + } +} Index: src/contrib/hbase/src/java/org/apache/hadoop/hbase/MasterNotRunningException.java =================================================================== --- src/contrib/hbase/src/java/org/apache/hadoop/hbase/MasterNotRunningException.java (revision 0) +++ src/contrib/hbase/src/java/org/apache/hadoop/hbase/MasterNotRunningException.java (revision 0) @@ -0,0 +1,29 @@ +/** + * Copyright 2007 The Apache Software Foundation + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase; + +import java.io.IOException; + +public class MasterNotRunningException extends IOException { + private static final long serialVersionUID = 1L << 23 - 1L; + public MasterNotRunningException() { + super(); + } + + public MasterNotRunningException(String s) { + super(s); + } +} Index: src/contrib/hbase/src/java/org/apache/hadoop/hbase/NoServerForRegionException.java =================================================================== --- src/contrib/hbase/src/java/org/apache/hadoop/hbase/NoServerForRegionException.java (revision 0) +++ src/contrib/hbase/src/java/org/apache/hadoop/hbase/NoServerForRegionException.java (revision 0) @@ -0,0 +1,30 @@ +/** + * Copyright 2007 The Apache Software Foundation + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase; + +import java.io.IOException; + +public class NoServerForRegionException extends IOException { + private static final long serialVersionUID = 1L << 11 - 1L; + + public NoServerForRegionException() { + super(); + } + + public NoServerForRegionException(String s) { + super(s); + } +} Index: src/contrib/hbase/src/java/org/apache/hadoop/hbase/TableNotDisabledException.java =================================================================== --- src/contrib/hbase/src/java/org/apache/hadoop/hbase/TableNotDisabledException.java (revision 0) +++ src/contrib/hbase/src/java/org/apache/hadoop/hbase/TableNotDisabledException.java (revision 0) @@ -0,0 +1,29 @@ +/** + * Copyright 2007 The Apache Software Foundation + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase; + +import java.io.IOException; + +public class TableNotDisabledException extends IOException { + private static final long serialVersionUID = 1L << 19 - 1L; + public TableNotDisabledException() { + super(); + } + + public TableNotDisabledException(String s) { + super(s); + } +} Index: src/contrib/hbase/src/test/org/apache/hadoop/hbase/EvaluationClient.java =================================================================== --- src/contrib/hbase/src/test/org/apache/hadoop/hbase/EvaluationClient.java (revision 542444) +++ src/contrib/hbase/src/test/org/apache/hadoop/hbase/EvaluationClient.java (working copy) @@ -53,8 +53,8 @@ private static HTableDescriptor tableDescriptor; static { - tableDescriptor = new HTableDescriptor("TestTable", 1); - tableDescriptor.addFamily(COLUMN_FAMILY); + tableDescriptor = new HTableDescriptor("TestTable"); + tableDescriptor.addFamily(new HColumnDescriptor(COLUMN_FAMILY.toString())); } private static enum Test {RANDOM_READ, Index: src/contrib/hbase/src/test/org/apache/hadoop/hbase/TestGet.java =================================================================== --- src/contrib/hbase/src/test/org/apache/hadoop/hbase/TestGet.java (revision 542444) +++ src/contrib/hbase/src/test/org/apache/hadoop/hbase/TestGet.java (working copy) @@ -71,9 +71,9 @@ Path dir = new Path("/hbase"); fs.mkdirs(dir); - HTableDescriptor desc = new HTableDescriptor("test", 1); - desc.addFamily(CONTENTS); - desc.addFamily(HConstants.COLUMN_FAMILY); + HTableDescriptor desc = new HTableDescriptor("test"); + desc.addFamily(new HColumnDescriptor(CONTENTS.toString())); + desc.addFamily(new HColumnDescriptor(HConstants.COLUMN_FAMILY.toString())); HRegionInfo info = new HRegionInfo(0L, desc, null, null); Path regionDir = HStoreFile.getHRegionDir(dir, info.regionName); Index: src/contrib/hbase/src/test/org/apache/hadoop/hbase/TestHBaseCluster.java =================================================================== --- src/contrib/hbase/src/test/org/apache/hadoop/hbase/TestHBaseCluster.java (revision 542444) +++ src/contrib/hbase/src/test/org/apache/hadoop/hbase/TestHBaseCluster.java (working copy) @@ -17,8 +17,8 @@ import java.io.IOException; import java.util.Iterator; +import java.util.Set; import java.util.TreeMap; -import java.util.TreeSet; import org.apache.hadoop.io.Text; @@ -71,9 +71,9 @@ private void setup() throws IOException { client = new HClient(conf); - desc = new HTableDescriptor("test", 3); - desc.addFamily(new Text(CONTENTS)); - desc.addFamily(new Text(ANCHOR)); + desc = new HTableDescriptor("test"); + desc.addFamily(new HColumnDescriptor(CONTENTS.toString())); + desc.addFamily(new HColumnDescriptor(ANCHOR.toString())); client.createTable(desc); } @@ -182,7 +182,7 @@ HTableDescriptor[] tables = client.listTables(); assertEquals(1, tables.length); assertEquals(desc.getName(), tables[0].getName()); - TreeSet families = tables[0].families(); + Set families = tables[0].families().keySet(); assertEquals(2, families.size()); assertTrue(families.contains(new Text(CONTENTS))); assertTrue(families.contains(new Text(ANCHOR))); @@ -193,11 +193,5 @@ // Delete the table we created client.deleteTable(desc.getName()); - try { - Thread.sleep(30000); // Wait for table to be deleted - - } catch(InterruptedException e) { - } } - } Index: src/contrib/hbase/src/test/org/apache/hadoop/hbase/TestHRegion.java =================================================================== --- src/contrib/hbase/src/test/org/apache/hadoop/hbase/TestHRegion.java (revision 542444) +++ src/contrib/hbase/src/test/org/apache/hadoop/hbase/TestHRegion.java (working copy) @@ -102,9 +102,9 @@ oldlogfile = new Path(parentdir, "oldlogfile"); log = new HLog(fs, newlogdir, conf); - desc = new HTableDescriptor("test", 3); - desc.addFamily(new Text("contents:")); - desc.addFamily(new Text("anchor:")); + desc = new HTableDescriptor("test"); + desc.addFamily(new HColumnDescriptor("contents:")); + desc.addFamily(new HColumnDescriptor("anchor:")); region = new HRegion(parentdir, log, fs, conf, new HRegionInfo(1, desc, null, null), null, oldlogfile); } Index: src/contrib/hbase/src/test/org/apache/hadoop/hbase/TestMasterAdmin.java =================================================================== --- src/contrib/hbase/src/test/org/apache/hadoop/hbase/TestMasterAdmin.java (revision 0) +++ src/contrib/hbase/src/test/org/apache/hadoop/hbase/TestMasterAdmin.java (revision 0) @@ -0,0 +1,79 @@ +/** + * Copyright 2006 The Apache Software Foundation + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase; + +import org.apache.hadoop.io.Text; + +public class TestMasterAdmin extends HBaseClusterTestCase { + private static final Text COLUMN_NAME = new Text("col1:"); + private static HTableDescriptor testDesc; + static { + testDesc = new HTableDescriptor("testadmin1"); + testDesc.addFamily(new HColumnDescriptor(COLUMN_NAME.toString())); + } + + private HClient client; + + public TestMasterAdmin() { + super(true); + client = new HClient(conf); + } + + public void testMasterAdmin() { + try { + client.createTable(testDesc); + client.disableTable(testDesc.getName()); + + } catch(Exception e) { + e.printStackTrace(); + fail(); + } + + try { + try { + client.openTable(testDesc.getName()); + + } catch(IllegalStateException e) { + // Expected + } + + client.addColumn(testDesc.getName(), new HColumnDescriptor("col2:")); + client.enableTable(testDesc.getName()); + try { + client.deleteColumn(testDesc.getName(), new Text("col2:")); + + } catch(TableNotDisabledException e) { + // Expected + } + + client.disableTable(testDesc.getName()); + client.deleteColumn(testDesc.getName(), new Text("col2:")); + + } catch(Exception e) { + e.printStackTrace(); + fail(); + + } finally { + try { + client.deleteTable(testDesc.getName()); + + } catch(Exception e) { + e.printStackTrace(); + fail(); + } + } + } +} Index: src/contrib/hbase/src/test/org/apache/hadoop/hbase/TestTable.java =================================================================== --- src/contrib/hbase/src/test/org/apache/hadoop/hbase/TestTable.java (revision 542444) +++ src/contrib/hbase/src/test/org/apache/hadoop/hbase/TestTable.java (working copy) @@ -50,8 +50,8 @@ fail(); } - HTableDescriptor desc = new HTableDescriptor("test", 1); - desc.addFamily(HConstants.COLUMN_FAMILY); + HTableDescriptor desc = new HTableDescriptor("test"); + desc.addFamily(new HColumnDescriptor(HConstants.COLUMN_FAMILY.toString())); try { client.createTable(desc); @@ -73,5 +73,5 @@ e.printStackTrace(); fail(); } -} + } } Index: src/contrib/hbase/src/test/org/apache/hadoop/hbase/TestToString.java =================================================================== --- src/contrib/hbase/src/test/org/apache/hadoop/hbase/TestToString.java (revision 542444) +++ src/contrib/hbase/src/test/org/apache/hadoop/hbase/TestToString.java (working copy) @@ -15,15 +15,24 @@ } public void testHRegionInfo() throws Exception { - HTableDescriptor htd = new HTableDescriptor("hank", 10); - htd.addFamily(new Text("hankfamily:")); - htd.addFamily(new Text("hankotherfamily:")); - assertEquals("Table descriptor", htd.toString(), - "name: hank, maxVersions: 10, families: [hankfamily:, hankotherfamily:]"); + HTableDescriptor htd = new HTableDescriptor("hank"); + htd.addFamily(new HColumnDescriptor("hankfamily:")); + htd.addFamily(new HColumnDescriptor(new Text("hankotherfamily:"), 10, + HColumnDescriptor.CompressionType.BLOCK, true, 1000, false)); + assertEquals("Table descriptor", "name: hank, families: " + + "{hankfamily:=(hankfamily:, max versions: 3, compression: none, " + + "in memory: false, max value length: 2147483647, bloom filter:false), " + + "hankotherfamily:=(hankotherfamily:, max versions: 10, " + + "compression: block, in memory: true, max value length: 1000, " + + "bloom filter:false)}", htd.toString()); HRegionInfo hri = new HRegionInfo(-1, htd, new Text(), new Text("10")); assertEquals("HRegionInfo", - "regionname: hank__-1, startKey: <>, tableDesc: {name: hank, " + - "maxVersions: 10, families: [hankfamily:, hankotherfamily:]}", + "regionname: hank__-1, startKey: <>, tableDesc: {" + "name: hank, " + + "families: {hankfamily:=(hankfamily:, max versions: 3, " + + "compression: none, in memory: false, max value length: 2147483647, " + + "bloom filter:false), hankotherfamily:=(hankotherfamily:, " + + "max versions: 10, compression: block, in memory: true, max value " + + "length: 1000, bloom filter:false)}}", hri.toString()); } }