Index: src/test/org/apache/hadoop/hbase/TestTable.java =================================================================== --- src/test/org/apache/hadoop/hbase/TestTable.java (revision 677358) +++ src/test/org/apache/hadoop/hbase/TestTable.java (working copy) @@ -23,6 +23,7 @@ import org.apache.hadoop.hbase.client.HBaseAdmin; import org.apache.hadoop.hbase.client.HTable; +import org.apache.hadoop.hbase.io.BatchUpdate; /** Tests table creation restrictions*/ public class TestTable extends HBaseClusterTestCase { @@ -123,4 +124,26 @@ @SuppressWarnings("unused") HTable table = new HTable(conf, getName()); } + + /** + * Test read only tables + */ + public void testReadOnlyTable() throws Exception { + HBaseAdmin admin = new HBaseAdmin(conf); + HTableDescriptor desc = new HTableDescriptor(getName()); + byte[] colName = "test:".getBytes(); + desc.addFamily(new HColumnDescriptor(colName)); + desc.setReadOnly(true); + admin.createTable(desc); + HTable table = new HTable(conf, getName()); + try { + byte[] value = "somedata".getBytes(); + BatchUpdate update = new BatchUpdate(); + update.put(colName, value); + table.commit(update); + fail("BatchUpdate on read only table succeeded"); + } catch (Exception e) { + // expected + } + } } \ No newline at end of file Index: src/test/org/apache/hadoop/hbase/client/TestHTable.java =================================================================== --- src/test/org/apache/hadoop/hbase/client/TestHTable.java (revision 677358) +++ src/test/org/apache/hadoop/hbase/client/TestHTable.java (working copy) @@ -21,6 +21,9 @@ import java.io.IOException; import java.util.Map; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; import org.apache.hadoop.hbase.HBaseClusterTestCase; import org.apache.hadoop.hbase.HColumnDescriptor; import org.apache.hadoop.hbase.HConstants; @@ -31,11 +34,14 @@ import org.apache.hadoop.hbase.io.RowResult; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.io.Text; +import org.apache.hadoop.hbase.HRegionInfo; +import org.apache.hadoop.hbase.HRegionLocation; /** * Tests HTable */ public class TestHTable extends HBaseClusterTestCase implements HConstants { + private static final Log LOG = LogFactory.getLog(TestHTable.class); private static final HColumnDescriptor column = new HColumnDescriptor(COLUMN_FAMILY); @@ -45,6 +51,9 @@ private static final byte [] row = Bytes.toBytes("row"); + private static final byte [] attrName = Bytes.toBytes("TESTATTR"); + private static final byte [] attrValue = Bytes.toBytes("somevalue"); + /** * the test * @throws IOException @@ -123,7 +132,57 @@ // We can still access A through newA because it has the table information // cached. And if it needs to recalibrate, that will cause the information // to be reloaded. - + + // Test user metadata + + try { + // make a modifiable descriptor + HTableDescriptor desc = new HTableDescriptor(a.getMetadata()); + // offline the table + admin.disableTable(tableAname); + // add a user attribute to HTD + desc.setValue(attrName, attrValue); + // add a user attribute to HCD + for (HColumnDescriptor c: desc.getFamilies()) + c.setValue(attrName, attrValue); + // update metadata for all regions of this table + admin.modifyTableMeta(tableAname, desc); + // enable the table + admin.enableTable(tableAname); + + // Use a metascanner to avoid client API caching (HConnection has a + // metadata cache) + MetaScanner.MetaScannerVisitor visitor = new MetaScanner.MetaScannerVisitor() { + public boolean processRow( + @SuppressWarnings("unused") RowResult rowResult, + HRegionLocation regionLocation, + HRegionInfo info) { + LOG.info("visiting " + regionLocation.toString()); + HTableDescriptor desc = info.getTableDesc(); + if (Bytes.compareTo(desc.getName(), tableAname) == 0) { + // check HTD attribute + byte[] value = desc.getValue(attrName); + if (value == null) + fail("missing HTD attribute value"); + if (Bytes.compareTo(value, attrValue) != 0) + fail("HTD attribute value is incorrect"); + // check HCD attribute + for (HColumnDescriptor c: desc.getFamilies()) { + value = c.getValue(attrName); + if (value == null) + fail("missing HCD attribute value"); + if (Bytes.compareTo(value, attrValue) != 0) + fail("HCD attribute value is incorrect"); + } + } + return true; + } + }; + MetaScanner.metaScan(conf, visitor); + } catch (Exception e) { + e.printStackTrace(); + fail(); + } } /** Index: src/java/org/apache/hadoop/hbase/HColumnDescriptor.java =================================================================== --- src/java/org/apache/hadoop/hbase/HColumnDescriptor.java (revision 677358) +++ src/java/org/apache/hadoop/hbase/HColumnDescriptor.java (working copy) @@ -22,7 +22,10 @@ import java.io.DataInput; import java.io.DataOutput; import java.io.IOException; +import java.util.HashMap; +import java.util.Map; +import org.apache.hadoop.hbase.io.ImmutableBytesWritable; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.io.Text; import org.apache.hadoop.io.WritableComparable; @@ -39,10 +42,12 @@ public class HColumnDescriptor implements WritableComparable { // For future backward compatibility - // Version 3 was when column names becaome byte arrays and when we picked up + // Version 3 was when column names become byte arrays and when we picked up // Time-to-live feature. Version 4 was when we moved to byte arrays, HBASE-82. // Version 5 was when bloom filter descriptors were removed. - private static final byte COLUMN_DESCRIPTOR_VERSION = (byte)5; + // Version 6 adds metadata as a map where keys and values are byte[] and also + // removes backwards compatibility for versions < 6. + private static final byte COLUMN_DESCRIPTOR_VERSION = (byte)6; /** * The type of compression. @@ -57,14 +62,17 @@ BLOCK } - // Defines for jruby/shell public static final String COMPRESSION = "COMPRESSION"; public static final String IN_MEMORY = "IN_MEMORY"; public static final String BLOCKCACHE = "BLOCKCACHE"; public static final String LENGTH = "LENGTH"; public static final String TTL = "TTL"; + public static final String VERSIONS = "VERSIONS"; public static final String BLOOMFILTER = "BLOOMFILTER"; public static final String FOREVER = "FOREVER"; + public static final String MAPFILE_INDEX_INTERVAL = + "MAPFILE_INDEX_INTERVAL"; + public static final String MEMCACHE_FLUSHSIZE = "MEMCACHE_FLUSHSIZE"; /** * Default compression type. @@ -104,22 +112,18 @@ // Column family name private byte [] name; - // Number of versions to keep - private int maxVersions = DEFAULT_VERSIONS; - // Compression setting if any - private CompressionType compressionType = DEFAULT_COMPRESSION; - // Serve reads from in-memory cache - private boolean inMemory = DEFAULT_IN_MEMORY; - // Serve reads from in-memory block cache - private boolean blockCacheEnabled = DEFAULT_BLOCKCACHE; - // Maximum value size - private int maxValueLength = DEFAULT_LENGTH; - // Time to live of cell contents, in seconds from last timestamp - private int timeToLive = DEFAULT_TTL; - // True if bloom filter was specified - private boolean bloomFilter = false; /** + * Default mapfile index interval. + */ + public static final int DEFAULT_MAPFILE_INDEX_INTERVAL = 128; + + // Column metadata + protected Map values = + new HashMap(); + + + /** * Default constructor. Must be present for Writable. */ public HColumnDescriptor() { @@ -160,6 +164,21 @@ } /** + * Constructor. + * Makes a deep copy of the supplied descriptor. + * Can make a modifiable descriptor from an UnmodifyableHColumnDescriptor. + * @param desc The descriptor. + */ + public HColumnDescriptor(HColumnDescriptor desc) { + super(); + this.name = desc.name.clone(); + for (Map.Entry e: + desc.values.entrySet()) { + this.values.put(e.getKey(), e.getValue()); + } + } + + /** * Constructor * @param columnName Column family name. Must have the ':' ending. * @param maxVersions Maximum number of versions to keep @@ -188,13 +207,13 @@ // Until there is support, consider 0 or < 0 -- a configuration error. throw new IllegalArgumentException("Maximum versions must be positive"); } - this.maxVersions = maxVersions; - this.inMemory = inMemory; - this.blockCacheEnabled = blockCacheEnabled; - this.maxValueLength = maxValueLength; - this.timeToLive = timeToLive; - this.bloomFilter = bloomFilter; - this.compressionType = compression; + setMaxVersions(maxVersions); + setInMemory(inMemory); + setBlockCacheEnabled(blockCacheEnabled); + setMaxValueLength(maxValueLength); + setTimeToLive(timeToLive); + setCompressionType(compression); + setBloomfilter(bloomFilter); } private static byte [] stripColon(final byte [] n) { @@ -203,7 +222,7 @@ System.arraycopy(n, 0, result, 0, n.length - 1); return result; } - + /** * @param b Family name. * @return b @@ -238,77 +257,232 @@ } /** + * @return Name of this column family with colon as required by client API + */ + public byte [] getNameWithColon() { + String s = Bytes.toString(this.name); + if (!s.contains(":")) + s += ":"; + return Bytes.toBytes(s); + } + + /** * @return Name of this column family */ public String getNameAsString() { return Bytes.toString(this.name); } + /** + * @param key The key. + * @return The value. + */ + public byte[] getValue(byte[] key) { + ImmutableBytesWritable ibw = values.get(new ImmutableBytesWritable(key)); + if (ibw == null) + return null; + return ibw.get(); + } + + /** + * @param key The key. + * @return The value as a string. + */ + public String getValue(String key) { + byte[] value = getValue(Bytes.toBytes(key)); + if (value == null) + return null; + return Bytes.toString(value); + } + + /** + * @param key The key. + * @param value The value. + */ + public void setValue(byte[] key, byte[] value) { + values.put(new ImmutableBytesWritable(key), + new ImmutableBytesWritable(value)); + } + + /** + * @param key The key. + * @param value The value. + */ + public void setValue(String key, String value) { + setValue(Bytes.toBytes(key), Bytes.toBytes(value)); + } + /** @return compression type being used for the column family */ public CompressionType getCompression() { - return this.compressionType; + String value = getValue(COMPRESSION); + if (value != null) { + if (value.equalsIgnoreCase("BLOCK")) + return CompressionType.BLOCK; + else if (value.equalsIgnoreCase("RECORD")) + return CompressionType.RECORD; + } + return CompressionType.NONE; } /** @return maximum number of versions */ public int getMaxVersions() { - return this.maxVersions; + String value = getValue(VERSIONS); + if (value != null) + return Integer.valueOf(value); + return DEFAULT_VERSIONS; } + + /** + * @param maxVersions maximum number of versions + */ + public void setMaxVersions(int maxVersions) { + setValue(VERSIONS, Integer.toString(maxVersions)); + } /** * @return Compression type setting. */ public CompressionType getCompressionType() { - return this.compressionType; + return getCompression(); } /** + * @param type Compression type setting. + */ + public void setCompressionType(CompressionType type) { + String compressionType; + switch (type) { + case BLOCK: compressionType = "BLOCK"; break; + case RECORD: compressionType = "RECORD"; break; + default: compressionType = "NONE"; break; + } + setValue(COMPRESSION, compressionType); + } + + /** * @return True if we are to keep all in use HRegionServer cache. */ public boolean isInMemory() { - return this.inMemory; + String value = getValue(IN_MEMORY); + if (value != null) + return Boolean.valueOf(value); + return DEFAULT_IN_MEMORY; } /** + * @param inMemory True if we are to keep all values in the HRegionServer + * cache + */ + public void setInMemory(boolean inMemory) { + setValue(IN_MEMORY, Boolean.toString(inMemory)); + } + + /** * @return Maximum value length. */ public int getMaxValueLength() { - return this.maxValueLength; + String value = getValue(LENGTH); + if (value != null) + return Integer.valueOf(value); + return DEFAULT_LENGTH; } /** + * @param maxLength Maximum value length. + */ + public void setMaxValueLength(int maxLength) { + setValue(LENGTH, Integer.toString(maxLength)); + } + + /** * @return Time to live. */ public int getTimeToLive() { - return this.timeToLive; + String value = getValue(TTL); + if (value != null) + return Integer.valueOf(value); + return DEFAULT_TTL; } /** + * @param timeToLive + */ + public void setTimeToLive(int timeToLive) { + setValue(TTL, Integer.toString(timeToLive)); + } + + /** * @return True if MapFile blocks should be cached. */ public boolean isBlockCacheEnabled() { - return blockCacheEnabled; + String value = getValue(BLOCKCACHE); + if (value != null) + return Boolean.valueOf(value); + return DEFAULT_BLOCKCACHE; } /** + * @param blockCacheEnabled True if MapFile blocks should be cached. + */ + public void setBlockCacheEnabled(boolean blockCacheEnabled) { + setValue(BLOCKCACHE, Boolean.toString(blockCacheEnabled)); + } + + /** * @return true if a bloom filter is enabled */ - public boolean isBloomFilterEnabled() { - return this.bloomFilter; + public boolean isBloomfilterEnabled() { + String value = getValue(BLOOMFILTER); + if (value != null) + return Boolean.valueOf(value); + return DEFAULT_BLOOMFILTER; } + /** + * @param onOff Enable/Disable bloom filter + */ + public void setBloomfilter(final boolean onOff) { + setValue(BLOOMFILTER, Boolean.toString(onOff)); + } + + /** + * @return The number of entries that are added to the store MapFile before + * an index entry is added. + */ + public int getMapFileIndexInterval() { + String value = getValue(MAPFILE_INDEX_INTERVAL); + if (value != null) + return Integer.valueOf(value); + return DEFAULT_MAPFILE_INDEX_INTERVAL; + } + + /** + * @param interval The number of entries that are added to the store MapFile before + * an index entry is added. + */ + public void setMapFileIndexInterval(int interval) { + setValue(MAPFILE_INDEX_INTERVAL, Integer.toString(interval)); + } + /** {@inheritDoc} */ @Override public String toString() { - return "{" + HConstants.NAME + " => '" + Bytes.toString(name) + - "', " + HConstants.VERSIONS + " => " + maxVersions + - ", " + COMPRESSION + " => '" + this.compressionType + - "', " + IN_MEMORY + " => " + inMemory + - ", " + BLOCKCACHE + " => " + blockCacheEnabled + - ", " + LENGTH + " => " + maxValueLength + - ", " + TTL + " => " + - (timeToLive == HConstants.FOREVER ? "FOREVER" : - Integer.toString(timeToLive)) + - ", " + BLOOMFILTER + " => " + bloomFilter + "}"; + StringBuffer s = new StringBuffer(); + s.append('{'); + s.append(HConstants.NAME); + s.append(" => '"); + s.append(Bytes.toString(name)); + s.append("'"); + for (Map.Entry e: + values.entrySet()) { + s.append(", "); + s.append(Bytes.toString(e.getKey().get())); + s.append(" => '"); + s.append(Bytes.toString(e.getValue().get())); + s.append("'"); + } + s.append('}'); + return s.toString(); } /** {@inheritDoc} */ @@ -321,14 +495,8 @@ @Override public int hashCode() { int result = Bytes.hashCode(this.name); - result ^= Integer.valueOf(this.maxVersions).hashCode(); - result ^= this.compressionType.hashCode(); - result ^= Boolean.valueOf(this.inMemory).hashCode(); - result ^= Boolean.valueOf(this.blockCacheEnabled).hashCode(); - result ^= Integer.valueOf(this.maxValueLength).hashCode(); - result ^= Integer.valueOf(this.timeToLive).hashCode(); - result ^= Boolean.valueOf(this.bloomFilter).hashCode(); result ^= Byte.valueOf(COLUMN_DESCRIPTOR_VERSION).hashCode(); + result ^= values.hashCode(); return result; } @@ -336,51 +504,32 @@ /** {@inheritDoc} */ public void readFields(DataInput in) throws IOException { - int versionNumber = in.readByte(); - if (versionNumber <= 2) { - Text t = new Text(); - t.readFields(in); - this.name = t.getBytes(); - if (HStoreKey.getFamilyDelimiterIndex(this.name) > 0) { - this.name = stripColon(this.name); - } - } else { - this.name = Bytes.readByteArray(in); + int version = in.readByte(); + if (version < 6) + throw new IOException("versions < 6 are no longer supported"); + // version 6+ + this.name = Bytes.readByteArray(in); + this.values.clear(); + int numValues = in.readInt(); + for (int i = 0; i < numValues; i++) { + ImmutableBytesWritable key = new ImmutableBytesWritable(); + ImmutableBytesWritable value = new ImmutableBytesWritable(); + key.readFields(in); + value.readFields(in); + values.put(key, value); } - this.maxVersions = in.readInt(); - int ordinal = in.readInt(); - this.compressionType = CompressionType.values()[ordinal]; - this.inMemory = in.readBoolean(); - this.maxValueLength = in.readInt(); - this.bloomFilter = in.readBoolean(); - if (this.bloomFilter && versionNumber < 5) { - // If a bloomFilter is enabled and the column descriptor is less than - // version 5, we need to skip over it to read the rest of the column - // descriptor. There are no BloomFilterDescriptors written to disk for - // column descriptors with a version number >= 5 - BloomFilterDescriptor junk = new BloomFilterDescriptor(); - junk.readFields(in); - } - if (versionNumber > 1) { - this.blockCacheEnabled = in.readBoolean(); - } - - if (versionNumber > 2) { - this.timeToLive = in.readInt(); - } } /** {@inheritDoc} */ public void write(DataOutput out) throws IOException { out.writeByte(COLUMN_DESCRIPTOR_VERSION); Bytes.writeByteArray(out, this.name); - out.writeInt(this.maxVersions); - out.writeInt(this.compressionType.ordinal()); - out.writeBoolean(this.inMemory); - out.writeInt(this.maxValueLength); - out.writeBoolean(this.bloomFilter); - out.writeBoolean(this.blockCacheEnabled); - out.writeInt(this.timeToLive); + out.writeInt(values.size()); + for (Map.Entry e: + values.entrySet()) { + e.getKey().write(out); + e.getValue().write(out); + } } // Comparable @@ -389,58 +538,14 @@ public int compareTo(Object o) { HColumnDescriptor other = (HColumnDescriptor)o; int result = Bytes.compareTo(this.name, other.getName()); - if(result == 0) { - result = Integer.valueOf(this.maxVersions).compareTo( - Integer.valueOf(other.maxVersions)); - } - - if(result == 0) { - result = this.compressionType.compareTo(other.compressionType); - } - - if(result == 0) { - if(this.inMemory == other.inMemory) { - result = 0; - - } else if(this.inMemory) { + if (result == 0) { + // punt on comparison for ordering, just calculate difference + result = this.values.hashCode() - other.values.hashCode(); + if (result < 0) result = -1; - - } else { + else if (result > 0) result = 1; - } } - - if(result == 0) { - if(this.blockCacheEnabled == other.blockCacheEnabled) { - result = 0; - - } else if(this.blockCacheEnabled) { - result = -1; - - } else { - result = 1; - } - } - - if(result == 0) { - result = other.maxValueLength - this.maxValueLength; - } - - if(result == 0) { - result = other.timeToLive - this.timeToLive; - } - - if(result == 0) { - if(this.bloomFilter == other.bloomFilter) { - result = 0; - - } else if(this.bloomFilter) { - result = -1; - - } else { - result = 1; - } - } return result; } -} \ No newline at end of file +} Index: src/java/org/apache/hadoop/hbase/BloomFilterDescriptor.java =================================================================== --- src/java/org/apache/hadoop/hbase/BloomFilterDescriptor.java (revision 677358) +++ src/java/org/apache/hadoop/hbase/BloomFilterDescriptor.java (working copy) @@ -73,7 +73,19 @@ public BloomFilterDescriptor() { super(); } - + + /* + * Constructor. + *

+ * Creates a deep copy of the supplied BloomFilterDescriptor. + */ + public BloomFilterDescriptor(BloomFilterDescriptor desc) { + super(); + this.filterType = desc.filterType; + this.nbHash = desc.nbHash; + this.vectorSize = desc.vectorSize; + } + /** * Creates a BloomFilterDescriptor for the specified type of filter, fixes * the number of hash functions to 4 and computes a vector size using: Index: src/java/org/apache/hadoop/hbase/thrift/ThriftUtilities.java =================================================================== --- src/java/org/apache/hadoop/hbase/thrift/ThriftUtilities.java (revision 677358) +++ src/java/org/apache/hadoop/hbase/thrift/ThriftUtilities.java (working copy) @@ -67,7 +67,7 @@ col.inMemory = in.isInMemory(); col.blockCacheEnabled = in.isBlockCacheEnabled(); col.maxValueLength = in.getMaxValueLength(); - col.bloomFilterType = Boolean.toString(in.isBloomFilterEnabled()); + col.bloomFilterType = Boolean.toString(in.isBloomfilterEnabled()); return col; } Index: src/java/org/apache/hadoop/hbase/regionserver/HStore.java =================================================================== --- src/java/org/apache/hadoop/hbase/regionserver/HStore.java (revision 677358) +++ src/java/org/apache/hadoop/hbase/regionserver/HStore.java (working copy) @@ -172,15 +172,20 @@ this.storeName = Bytes.toBytes(this.info.getEncodedName() + "/" + Bytes.toString(this.family.getName())); this.storeNameStr = Bytes.toString(this.storeName); - + // By default, we compact if an HStore has more than // MIN_COMMITS_FOR_COMPACTION map files this.compactionThreshold = conf.getInt("hbase.hstore.compactionThreshold", 3); // By default we split region if a file > DEFAULT_MAX_FILE_SIZE. - this.desiredMaxFileSize = - conf.getLong("hbase.hregion.max.filesize", DEFAULT_MAX_FILE_SIZE); + long maxFileSize = info.getTableDesc().getMaxFileSize(); + if (maxFileSize == HConstants.DEFAULT_MAX_FILE_SIZE) { + maxFileSize = conf.getLong("hbase.hregion.max.filesize", + HConstants.DEFAULT_MAX_FILE_SIZE); + } + this.desiredMaxFileSize = maxFileSize; + this.storeSize = 0L; if (family.getCompression() == HColumnDescriptor.CompressionType.BLOCK) { @@ -242,11 +247,11 @@ if (first) { // Use a block cache (if configured) for the first reader only // so as to control memory usage. - r = e.getValue().getReader(this.fs, this.family.isBloomFilterEnabled(), + r = e.getValue().getReader(this.fs, this.family.isBloomfilterEnabled(), family.isBlockCacheEnabled()); first = false; } else { - r = e.getValue().getReader(this.fs, this.family.isBloomFilterEnabled(), + r = e.getValue().getReader(this.fs, this.family.isBloomfilterEnabled(), false); } this.readers.put(e.getKey(), r); @@ -582,7 +587,8 @@ HStoreFile flushedFile = new HStoreFile(conf, fs, basedir, info.getEncodedName(), family.getName(), -1L, null); MapFile.Writer out = flushedFile.getWriter(this.fs, this.compression, - this.family.isBloomFilterEnabled(), cache.size()); + this.family.isBloomfilterEnabled(), cache.size()); + out.setIndexInterval(family.getMapFileIndexInterval()); // Here we tried picking up an existing HStoreFile from disk and // interlacing the memcache flush compacting as we go. The notion was @@ -651,7 +657,7 @@ Long flushid = Long.valueOf(logCacheFlushId); // Open the map file reader. this.readers.put(flushid, - flushedFile.getReader(this.fs, this.family.isBloomFilterEnabled(), + flushedFile.getReader(this.fs, this.family.isBloomfilterEnabled(), this.family.isBlockCacheEnabled())); this.storefiles.put(flushid, flushedFile); // Tell listeners of the change in readers. @@ -737,9 +743,9 @@ return checkSplit(); } /* - * We create a new list of MapFile.Reader objects so we don't screw up the - * caching associated with the currently-loaded ones. Our iteration-based - * access pattern is practically designed to ruin the cache. + * We create a new list of MapFile.Reader objects so we don't screw up + * the caching associated with the currently-loaded ones. Our iteration- + * based access pattern is practically designed to ruin the cache. */ List readers = new ArrayList(); for (HStoreFile file: filesToCompact) { @@ -749,7 +755,7 @@ readers.add(reader); // Compute the size of the new bloomfilter if needed - if (this.family.isBloomFilterEnabled()) { + if (this.family.isBloomfilterEnabled()) { nrows += reader.getBloomFilterSize(); } } catch (IOException e) { @@ -775,7 +781,8 @@ FSUtils.getPath(compactedOutputFile.getMapFilePath())); } MapFile.Writer writer = compactedOutputFile.getWriter(this.fs, - this.compression, this.family.isBloomFilterEnabled(), nrows); + this.compression, this.family.isBloomfilterEnabled(), nrows); + writer.setIndexInterval(family.getMapFileIndexInterval()); try { compactHStoreFiles(writer, readers); } finally { @@ -1029,7 +1036,7 @@ // Use a block cache (if configured) for this reader since // it is the only one. finalCompactedFile.getReader(this.fs, - this.family.isBloomFilterEnabled(), + this.family.isBloomfilterEnabled(), this.family.isBlockCacheEnabled())); this.storefiles.put(orderVal, finalCompactedFile); // Tell observers that list of Readers has changed. @@ -1814,4 +1821,4 @@ return key; } } -} \ No newline at end of file +} Index: src/java/org/apache/hadoop/hbase/regionserver/HRegion.java =================================================================== --- src/java/org/apache/hadoop/hbase/regionserver/HRegion.java (revision 677358) +++ src/java/org/apache/hadoop/hbase/regionserver/HRegion.java (working copy) @@ -156,6 +156,7 @@ b.getRegionInfo().getTableDesc().getNameAsString())) { throw new IOException("Regions do not belong to the same table"); } + FileSystem fs = a.getFilesystem(); // Make sure each region's cache is empty @@ -483,13 +484,19 @@ fs.delete(merges, true); } - // By default, we flush the cache when 64M. - this.memcacheFlushSize = conf.getInt("hbase.hregion.memcache.flush.size", - 1024*1024*64); + int flushSize = regionInfo.getTableDesc().getMemcacheFlushSize(); + if (flushSize == HTableDescriptor.DEFAULT_MEMCACHE_FLUSH_SIZE) { + flushSize = conf.getInt("hbase.hregion.memcache.flush.size", + HTableDescriptor.DEFAULT_MEMCACHE_FLUSH_SIZE); + } + this.memcacheFlushSize = flushSize; this.blockingMemcacheSize = this.memcacheFlushSize * conf.getInt("hbase.hregion.memcache.block.multiplier", 1); + if (this.regionInfo.getTableDesc().isReadOnly()) + this.writestate.writesEnabled = false; + // HRegion is ready to go! this.writestate.compacting = false; this.lastFlushTime = System.currentTimeMillis(); @@ -1311,6 +1318,10 @@ public void batchUpdate(BatchUpdate b) throws IOException { + if (!this.writestate.writesEnabled) { + throw new IOException("region is read only"); + } + // Do a rough check that we have resources to accept a write. The check is // 'rough' in that between the resource check and the call to obtain a // read lock, resources may run out. For now, the thought is that this @@ -1418,6 +1429,9 @@ public void deleteAll(final byte [] row, final byte [] column, final long ts) throws IOException { checkColumn(column); + if (!this.writestate.writesEnabled) { + throw new IOException("region is read only"); + } Integer lid = obtainRowLock(row); try { deleteMultiple(row, column, ts, ALL_VERSIONS); @@ -1434,6 +1448,9 @@ */ public void deleteAll(final byte [] row, final long ts) throws IOException { + if (!this.writestate.writesEnabled) { + throw new IOException("region is read only"); + } Integer lid = obtainRowLock(row); try { for (HStore store : stores.values()){ @@ -1461,6 +1478,9 @@ */ public void deleteFamily(byte [] row, byte [] family, long timestamp) throws IOException{ + if (!this.writestate.writesEnabled) { + throw new IOException("region is read only"); + } Integer lid = obtainRowLock(row); try { // find the HStore for the column family @@ -1493,6 +1513,9 @@ private void deleteMultiple(final byte [] row, final byte [] column, final long ts, final int versions) throws IOException { + if (!this.writestate.writesEnabled) { + throw new IOException("region is read only"); + } HStoreKey origin = new HStoreKey(row, column, ts); Set keys = getKeys(origin, versions); if (keys.size() > 0) { @@ -1520,6 +1543,9 @@ final byte [] val) throws IOException { checkColumn(key.getColumn()); + if (!this.writestate.writesEnabled) { + throw new IOException("region is read only"); + } TreeMap targets = this.targetColumns.get(lockid); if (targets == null) { targets = new TreeMap(); @@ -1541,6 +1567,9 @@ if (updatesByColumn == null || updatesByColumn.size() <= 0) { return; } + if (!this.writestate.writesEnabled) { + throw new IOException("region is read only"); + } boolean flush = false; this.updatesLock.readLock().lock(); try { Index: src/java/org/apache/hadoop/hbase/rest/TableHandler.java =================================================================== --- src/java/org/apache/hadoop/hbase/rest/TableHandler.java (revision 677358) +++ src/java/org/apache/hadoop/hbase/rest/TableHandler.java (working copy) @@ -413,7 +413,7 @@ doElement(outputter, "name", Bytes.toString(e.getName())); doElement(outputter, "compression", e.getCompression().toString()); doElement(outputter, "bloomfilter", - Boolean.toString(e.isBloomFilterEnabled())); + Boolean.toString(e.isBloomfilterEnabled())); doElement(outputter, "max-versions", Integer.toString(e.getMaxVersions())); doElement(outputter, "maximum-cell-size", Index: src/java/org/apache/hadoop/hbase/HTableDescriptor.java =================================================================== --- src/java/org/apache/hadoop/hbase/HTableDescriptor.java (revision 677358) +++ src/java/org/apache/hadoop/hbase/HTableDescriptor.java (working copy) @@ -29,6 +29,7 @@ import java.util.Map; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hbase.io.ImmutableBytesWritable; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.io.WritableComparable; @@ -53,13 +54,32 @@ new HColumnDescriptor(HConstants.COLUMN_FAMILY_HISTORIAN, HConstants.ALL_VERSIONS, HColumnDescriptor.CompressionType.NONE, false, false, Integer.MAX_VALUE, HConstants.FOREVER, false) }); - - private boolean rootregion = false; - private boolean metaregion = false; + + // Changes prior to version 3 were not recorded here. + // Version 3 adds metadata as a map where keys and values are byte[]. + public static final byte TABLE_DESCRIPTOR_VERSION = 3; + private byte [] name = HConstants.EMPTY_BYTE_ARRAY; private String nameAsString = ""; + + // Table metadata + protected Map values = + new HashMap(); public static final String FAMILIES = "FAMILIES"; + + public static final String MAX_FILESIZE = "MAX_FILESIZE"; + public static final String IN_MEMORY = "IN_MEMORY"; + public static final String READONLY = "READONLY"; + public static final String MEMCACHE_FLUSHSIZE = "MEMCACHE_FLUSHSIZE"; + public static final String IS_ROOT = "IS_ROOT"; + public static final String IS_META = "IS_META"; + + public static final boolean DEFAULT_IN_MEMORY = false; + + public static final boolean DEFAULT_READONLY = false; + + public static final int DEFAULT_MEMCACHE_FLUSH_SIZE = 1024*1024*64; // Key is hash of the family name. private final Map families = @@ -107,23 +127,81 @@ * @see HADOOP-1581 HBASE: Un-openable tablename bug */ public HTableDescriptor(final byte [] name) { - setMetaFlags(name); - this.name = this.metaregion? name: isLegalTableName(name); + super(); + this.name = this.isMetaRegion() ? name: isLegalTableName(name); this.nameAsString = Bytes.toString(this.name); + setMetaFlags(this.name); } + /** + * Constructor. + *

+ * Makes a deep copy of the supplied descriptor. + * Can make a modifiable descriptor from an UnmodifyableHTableDescriptor. + * @param desc The descriptor. + */ + public HTableDescriptor(final HTableDescriptor desc) + { + super(); + this.name = desc.name.clone(); + this.nameAsString = Bytes.toString(this.name); + setMetaFlags(this.name); + for (HColumnDescriptor c: desc.families.values()) { + this.families.put(Bytes.mapKey(c.getName()), new HColumnDescriptor(c)); + } + for (Map.Entry e: + desc.values.entrySet()) { + this.values.put(e.getKey(), e.getValue()); + } + } + /* * Set meta flags on this table. * Called by constructors. * @param name */ private void setMetaFlags(final byte [] name) { - this.rootregion = Bytes.equals(name, HConstants.ROOT_TABLE_NAME); - this.metaregion = - this.rootregion? true: Bytes.equals(name, HConstants.META_TABLE_NAME); + setRootRegion(Bytes.equals(name, HConstants.ROOT_TABLE_NAME)); + setMetaRegion(isRootRegion() || + Bytes.equals(name, HConstants.META_TABLE_NAME)); } - + + /** @return true if this is the root region */ + public boolean isRootRegion() { + String value = getValue(IS_ROOT); + if (value != null) + return Boolean.valueOf(value); + return false; + } + + /** @param isRoot true if this is the root region */ + protected void setRootRegion(boolean isRoot) { + values.put(new ImmutableBytesWritable(Bytes.toBytes(IS_ROOT)), + new ImmutableBytesWritable(Bytes.toBytes(Boolean.toString(isRoot)))); + } + + /** @return true if this is a meta region (part of the root or meta tables) */ + public boolean isMetaRegion() { + String value = getValue(IS_META); + if (value != null) + return Boolean.valueOf(value); + return false; + } + /** + * @param isMeta true if this is a meta region (part of the root or meta + * tables) */ + protected void setMetaRegion(boolean isMeta) { + values.put(new ImmutableBytesWritable(Bytes.toBytes(IS_META)), + new ImmutableBytesWritable(Bytes.toBytes(Boolean.toString(isMeta)))); + } + + /** @return true if table is the meta table */ + public boolean isMetaTable() { + return isMetaRegion() && !isRootRegion(); + } + + /** * Check passed buffer is legal user-space table name. * @param b Table name. * @return Returns passed b param @@ -147,21 +225,82 @@ return b; } - /** @return true if this is the root region */ - public boolean isRootRegion() { - return rootregion; + /** + * @param key The key. + * @return The value. + */ + public byte[] getValue(byte[] key) { + ImmutableBytesWritable ibw = values.get(new ImmutableBytesWritable(key)); + if (ibw == null) + return null; + return ibw.get(); } - - /** @return true if table is the meta table */ - public boolean isMetaTable() { - return metaregion && !rootregion; + + /** + * @param key The key. + * @return The value as a string. + */ + public String getValue(String key) { + byte[] value = getValue(Bytes.toBytes(key)); + if (value == null) + return null; + return Bytes.toString(value); } - - /** @return true if this is a meta region (part of the root or meta tables) */ - public boolean isMetaRegion() { - return metaregion; + + /** + * @param key The key. + * @param value The value. + */ + public void setValue(byte[] key, byte[] value) { + values.put(new ImmutableBytesWritable(key), + new ImmutableBytesWritable(value)); } + /** + * @param key The key. + * @param value The value. + */ + public void setValue(String key, String value) { + setValue(Bytes.toBytes(key), Bytes.toBytes(value)); + } + + /** + * @return true if all columns in the table should be kept in the + * HRegionServer cache only + */ + public boolean isInMemory() { + String value = getValue(IN_MEMORY); + if (value != null) + return Boolean.valueOf(value); + return DEFAULT_IN_MEMORY; + } + + /** + * @param inMemory True if all of the columns in the table should be kept in + * the HRegionServer cache only. + */ + public void setInMemory(boolean inMemory) { + setValue(IN_MEMORY, Boolean.toString(inMemory)); + } + + /** + * @return true if all columns in the table should be read only + */ + public boolean isReadOnly() { + String value = getValue(READONLY); + if (value != null) + return Boolean.valueOf(value); + return DEFAULT_READONLY; + } + + /** + * @param readOnly True if all of the columns in the table should be read + * only. + */ + public void setReadOnly(boolean readOnly) { + setValue(READONLY, Boolean.toString(readOnly)); + } + /** @return name of table */ public byte [] getName() { return name; @@ -172,7 +311,40 @@ return this.nameAsString; } + /** @return max hregion size for table */ + public long getMaxFileSize() { + String value = getValue(MAX_FILESIZE); + if (value != null) + return Long.valueOf(value); + return HConstants.DEFAULT_MAX_FILE_SIZE; + } + /** + * @param maxFileSize The maximum file size that a store file can grow to + * before a split is triggered. + */ + public void setMaxFileSize(long maxFileSize) { + setValue(MAX_FILESIZE, Long.toString(maxFileSize)); + } + + /** + * @return memory cache flush size for each hregion + */ + public int getMemcacheFlushSize() { + String value = getValue(MEMCACHE_FLUSHSIZE); + if (value != null) + return Integer.valueOf(value); + return DEFAULT_MEMCACHE_FLUSH_SIZE; + } + + /** + * @param memcacheFlushSize memory cache flush size for each hregion + */ + public void setMemcacheFlushSize(int memcacheFlushSize) { + setValue(MEMCACHE_FLUSHSIZE, Integer.toString(memcacheFlushSize)); + } + + /** * Adds a column family. * @param family HColumnDescriptor of familyto add. */ @@ -211,10 +383,28 @@ */ @Override public String toString() { - return HConstants.NAME + " => '" + Bytes.toString(this.name) + - "', " + FAMILIES + " => " + this.families.values(); + StringBuffer s = new StringBuffer(); + s.append('{'); + s.append(HConstants.NAME); + s.append(" => '"); + s.append(Bytes.toString(name)); + s.append("'"); + for (Map.Entry e: + values.entrySet()) { + s.append(", "); + s.append(Bytes.toString(e.getKey().get())); + s.append(" => '"); + s.append(Bytes.toString(e.getValue().get())); + s.append("'"); + } + s.append(", "); + s.append(FAMILIES); + s.append(" => "); + s.append(families.values()); + s.append('}'); + return s.toString(); } - + /** {@inheritDoc} */ @Override public boolean equals(Object obj) { @@ -224,45 +414,67 @@ /** {@inheritDoc} */ @Override public int hashCode() { - // TODO: Cache. int result = Bytes.hashCode(this.name); + result ^= Byte.valueOf(TABLE_DESCRIPTOR_VERSION).hashCode(); if (this.families != null && this.families.size() > 0) { for (HColumnDescriptor e: this.families.values()) { result ^= e.hashCode(); } } + result ^= values.hashCode(); return result; } - + // Writable /** {@inheritDoc} */ + public void readFields(DataInput in) throws IOException { + int version = in.readInt(); + if (version < 3) + throw new IOException("versions < 3 are not supported (and never existed!?)"); + // version 3+ + name = Bytes.readByteArray(in); + nameAsString = Bytes.toString(this.name); + setRootRegion(in.readBoolean()); + setMetaRegion(in.readBoolean()); + values.clear(); + int numVals = in.readInt(); + for (int i = 0; i < numVals; i++) { + ImmutableBytesWritable key = new ImmutableBytesWritable(); + ImmutableBytesWritable value = new ImmutableBytesWritable(); + key.readFields(in); + value.readFields(in); + values.put(key, value); + } + families.clear(); + int numFamilies = in.readInt(); + for (int i = 0; i < numFamilies; i++) { + HColumnDescriptor c = new HColumnDescriptor(); + c.readFields(in); + families.put(Bytes.mapKey(c.getName()), c); + } + } + + /** {@inheritDoc} */ public void write(DataOutput out) throws IOException { - out.writeBoolean(rootregion); - out.writeBoolean(metaregion); + out.writeInt(TABLE_DESCRIPTOR_VERSION); Bytes.writeByteArray(out, name); + out.writeBoolean(isRootRegion()); + out.writeBoolean(isMetaRegion()); + out.writeInt(values.size()); + for (Map.Entry e: + values.entrySet()) { + e.getKey().write(out); + e.getValue().write(out); + } out.writeInt(families.size()); for(Iterator it = families.values().iterator(); it.hasNext(); ) { - it.next().write(out); + HColumnDescriptor family = it.next(); + family.write(out); } } - /** {@inheritDoc} */ - public void readFields(DataInput in) throws IOException { - this.rootregion = in.readBoolean(); - this.metaregion = in.readBoolean(); - this.name = Bytes.readByteArray(in); - this.nameAsString = Bytes.toString(this.name); - int numCols = in.readInt(); - this.families.clear(); - for (int i = 0; i < numCols; i++) { - HColumnDescriptor c = new HColumnDescriptor(); - c.readFields(in); - this.families.put(Bytes.mapKey(c.getName()), c); - } - } - // Comparable /** {@inheritDoc} */ @@ -272,12 +484,10 @@ if (result == 0) { result = families.size() - other.families.size(); } - if (result == 0 && families.size() != other.families.size()) { result = Integer.valueOf(families.size()).compareTo( Integer.valueOf(other.families.size())); } - if (result == 0) { for (Iterator it = families.values().iterator(), it2 = other.families.values().iterator(); it.hasNext(); ) { @@ -287,6 +497,14 @@ } } } + if (result == 0) { + // punt on comparison for ordering, just calculate difference + result = this.values.hashCode() - other.values.hashCode(); + if (result < 0) + result = -1; + else if (result > 0) + result = 1; + } return result; } @@ -323,4 +541,4 @@ public static Path getTableDir(Path rootdir, final byte [] tableName) { return new Path(rootdir, Bytes.toString(tableName)); } -} \ No newline at end of file +} Index: src/java/org/apache/hadoop/hbase/HRegionInfo.java =================================================================== --- src/java/org/apache/hadoop/hbase/HRegionInfo.java (revision 677358) +++ src/java/org/apache/hadoop/hbase/HRegionInfo.java (working copy) @@ -307,7 +307,14 @@ public HTableDescriptor getTableDesc(){ return tableDesc; } - + + /** + * @param newDesc new table descriptor to use + */ + public void setTableDesc(HTableDescriptor newDesc) { + this.tableDesc = newDesc; + } + /** @return true if this is the root region */ public boolean isRootRegion() { return this.tableDesc.isRootRegion(); Index: src/java/org/apache/hadoop/hbase/master/HMaster.java =================================================================== --- src/java/org/apache/hadoop/hbase/master/HMaster.java (revision 677358) +++ src/java/org/apache/hadoop/hbase/master/HMaster.java (working copy) @@ -689,6 +689,13 @@ } /** {@inheritDoc} */ + public void modifyTableMeta(final byte[] tableName, HTableDescriptor desc) + throws IOException + { + new ModifyTableMeta(this, tableName, desc).process(); + } + + /** {@inheritDoc} */ public HServerAddress findRootRegion() { return regionManager.getRootRegionLocation(); } Index: src/java/org/apache/hadoop/hbase/master/ModifyTableMeta.java =================================================================== --- src/java/org/apache/hadoop/hbase/master/ModifyTableMeta.java (revision 0) +++ src/java/org/apache/hadoop/hbase/master/ModifyTableMeta.java (revision 0) @@ -0,0 +1,79 @@ +/** + * Copyright 2008 The Apache Software Foundation + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.master; + +import java.io.IOException; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.hbase.HRegionInfo; +import org.apache.hadoop.hbase.HTableDescriptor; +import org.apache.hadoop.hbase.TableNotDisabledException; +import org.apache.hadoop.hbase.ipc.HRegionInterface; +import org.apache.hadoop.hbase.io.BatchUpdate; +import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.hbase.util.Writables; + +/** Instantiated to modify table descriptor metadata */ +class ModifyTableMeta extends TableOperation { + + private static Log LOG = LogFactory.getLog(ModifyTableMeta.class); + + private HTableDescriptor desc; + + ModifyTableMeta(final HMaster master, final byte [] tableName, + HTableDescriptor desc) + throws IOException { + super(master, tableName); + this.desc = desc; + LOG.debug("modifying " + Bytes.toString(tableName) + ": " + + desc.toString()); + } + + protected void updateRegionInfo(HRegionInterface server, byte [] regionName, + HRegionInfo i) + throws IOException { + BatchUpdate b = new BatchUpdate(i.getRegionName()); + b.put(COL_REGIONINFO, Writables.getBytes(i)); + server.batchUpdate(regionName, b); + LOG.debug("updated HTableDescriptor for region " + i.getRegionNameAsString()); + } + + @Override + protected void processScanItem( + @SuppressWarnings("unused") String serverName, + @SuppressWarnings("unused") long startCode, final HRegionInfo info) + throws IOException { + if (isEnabled(info)) { + throw new TableNotDisabledException(tableName.toString()); + } + } + + @Override + protected void postProcessMeta(MetaRegion m, HRegionInterface server) + throws IOException { + for (HRegionInfo i: unservedRegions) { + i.setTableDesc(desc); + updateRegionInfo(server, m.getRegionName(), i); + } + // kick off a meta scan right away + master.regionManager.metaScannerThread.interrupt(); + } +} Index: src/java/org/apache/hadoop/hbase/ipc/HMasterInterface.java =================================================================== --- src/java/org/apache/hadoop/hbase/ipc/HMasterInterface.java (revision 677358) +++ src/java/org/apache/hadoop/hbase/ipc/HMasterInterface.java (working copy) @@ -104,8 +104,17 @@ * @throws IOException */ public void disableTable(final byte [] tableName) throws IOException; - + /** + * Modify a table's metadata + * + * @param tableName + * @param desc + */ + public void modifyTableMeta(byte[] tableName, HTableDescriptor desc) + throws IOException; + + /** * Shutdown an HBase cluster. * @throws IOException */ Index: src/java/org/apache/hadoop/hbase/util/Migrate.java =================================================================== --- src/java/org/apache/hadoop/hbase/util/Migrate.java (revision 677358) +++ src/java/org/apache/hadoop/hbase/util/Migrate.java (working copy) @@ -534,7 +534,7 @@ Path tableDir = HTableDescriptor.getTableDir(rootDir, desc.getName()); for (HColumnDescriptor column: desc.getFamilies()) { - if (column.isBloomFilterEnabled()) { + if (column.isBloomfilterEnabled()) { // Column has a bloom filter migrationNeeded = true; Index: src/java/org/apache/hadoop/hbase/client/HTable.java =================================================================== --- src/java/org/apache/hadoop/hbase/client/HTable.java (revision 677358) +++ src/java/org/apache/hadoop/hbase/client/HTable.java (working copy) @@ -243,7 +243,8 @@ * @throws IOException */ public HTableDescriptor getTableDescriptor() throws IOException { - return this.connection.getHTableDescriptor(this.tableName); + return new UnmodifyableHTableDescriptor( + this.connection.getHTableDescriptor(this.tableName)); } /** Index: src/java/org/apache/hadoop/hbase/client/MetaScanner.java =================================================================== --- src/java/org/apache/hadoop/hbase/client/MetaScanner.java (revision 677358) +++ src/java/org/apache/hadoop/hbase/client/MetaScanner.java (working copy) @@ -82,8 +82,7 @@ /** * Visitor class called to process each row of the .META. table */ - protected interface MetaScannerVisitor { - + interface MetaScannerVisitor { /** * Visitor method that accepts a RowResult and the meta region location. * Implementations can return false to stop the region's loop if it becomes Index: src/java/org/apache/hadoop/hbase/client/UnmodifyableHColumnDescriptor.java =================================================================== --- src/java/org/apache/hadoop/hbase/client/UnmodifyableHColumnDescriptor.java (revision 0) +++ src/java/org/apache/hadoop/hbase/client/UnmodifyableHColumnDescriptor.java (revision 0) @@ -0,0 +1,55 @@ +package org.apache.hadoop.hbase.client; + +import org.apache.hadoop.hbase.HColumnDescriptor; + +public class UnmodifyableHColumnDescriptor extends HColumnDescriptor { + + public UnmodifyableHColumnDescriptor (final HColumnDescriptor desc) { + super(desc); + } + + @Override + public void setValue(byte[] key, byte[] value) { + throw new UnsupportedOperationException("HColumnDescriptor is read-only"); + } + + @Override + public void setValue(String key, String value) { + throw new UnsupportedOperationException("HColumnDescriptor is read-only"); + } + + @Override + public void setMaxVersions(int maxVersions) { + throw new UnsupportedOperationException("HColumnDescriptor is read-only"); + } + + @Override + public void setInMemory(boolean inMemory) { + throw new UnsupportedOperationException("HColumnDescriptor is read-only"); + } + + @Override + public void setBlockCacheEnabled(boolean blockCacheEnabled) { + throw new UnsupportedOperationException("HColumnDescriptor is read-only"); + } + + @Override + public void setMaxValueLength(int maxLength) { + throw new UnsupportedOperationException("HColumnDescriptor is read-only"); + } + + @Override + public void setTimeToLive(int timeToLive) { + throw new UnsupportedOperationException("HColumnDescriptor is read-only"); + } + + @Override + public void setCompressionType(CompressionType type) { + throw new UnsupportedOperationException("HColumnDescriptor is read-only"); + } + + @Override + public void setMapFileIndexInterval(int interval) { + throw new UnsupportedOperationException("HTableDescriptor is read-only"); + } +} Index: src/java/org/apache/hadoop/hbase/client/HBaseAdmin.java =================================================================== --- src/java/org/apache/hadoop/hbase/client/HBaseAdmin.java (revision 677358) +++ src/java/org/apache/hadoop/hbase/client/HBaseAdmin.java (working copy) @@ -565,7 +565,26 @@ } } - + /** + * Modify a table's HTableDescriptor + * + * @param tableName name of table + * @param desc the updated descriptor + * @throws IOException + */ + public void modifyTableMeta(final byte [] tableName, HTableDescriptor desc) + throws IOException { + if (this.master == null) { + throw new MasterNotRunningException("master has been shut down"); + } + HTableDescriptor.isLegalTableName(tableName); + try { + this.master.modifyTableMeta(tableName, desc); + } catch (RemoteException e) { + throw RemoteExceptionHandler.decodeRemoteException(e); + } + } + /** * Shuts down the HBase instance * @throws IOException Index: src/java/org/apache/hadoop/hbase/client/UnmodifyableHTableDescriptor.java =================================================================== --- src/java/org/apache/hadoop/hbase/client/UnmodifyableHTableDescriptor.java (revision 677358) +++ src/java/org/apache/hadoop/hbase/client/UnmodifyableHTableDescriptor.java (working copy) @@ -39,7 +39,7 @@ UnmodifyableHTableDescriptor(final HTableDescriptor desc) { super(desc.getName()); for (HColumnDescriptor c: desc.getFamilies()) { - super.addFamily(c); + super.addFamily(new UnmodifyableHColumnDescriptor(c)); } } @@ -61,4 +61,34 @@ public HColumnDescriptor removeFamily(final byte [] column) { throw new UnsupportedOperationException("HTableDescriptor is read-only"); } + + @Override + public void setInMemory(boolean inMemory) { + throw new UnsupportedOperationException("HTableDescriptor is read-only"); + } + + @Override + public void setReadOnly(boolean readOnly) { + throw new UnsupportedOperationException("HTableDescriptor is read-only"); + } + + @Override + public void setValue(byte[] key, byte[] value) { + throw new UnsupportedOperationException("HTableDescriptor is read-only"); + } + + @Override + public void setValue(String key, String value) { + throw new UnsupportedOperationException("HTableDescriptor is read-only"); + } + + @Override + public void setMaxFileSize(long maxFileSize) { + throw new UnsupportedOperationException("HTableDescriptor is read-only"); + } + + @Override + public void setMemcacheFlushSize(int memcacheFlushSize) { + throw new UnsupportedOperationException("HTableDescriptor is read-only"); + } }