Index: src/java/org/apache/hadoop/hbase/HColumnDescriptor.java =================================================================== --- src/java/org/apache/hadoop/hbase/HColumnDescriptor.java (revision 673356) +++ src/java/org/apache/hadoop/hbase/HColumnDescriptor.java (working copy) @@ -22,6 +22,10 @@ import java.io.DataInput; import java.io.DataOutput; import java.io.IOException; +import java.util.Collections; +import java.util.Map; +import java.util.SortedMap; +import java.util.TreeMap; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.io.Text; @@ -39,9 +43,10 @@ public class HColumnDescriptor implements WritableComparable { // For future backward compatibility - // Version 3 was when column names becaome byte arrays and when we picked up + // Version 3 was when column names became byte arrays and when we picked up // Time-to-live feature. Version 4 was when we moved to byte arrays, HBASE-82. - private static final byte COLUMN_DESCRIPTOR_VERSION = (byte)4; + // Version 5 adds metadata as a map where keys and values are byte[]. + private static final byte COLUMN_DESCRIPTOR_VERSION = (byte)5; /** * The type of compression. @@ -56,14 +61,17 @@ BLOCK } - // Defines for jruby/shell public static final String COMPRESSION = "COMPRESSION"; public static final String IN_MEMORY = "IN_MEMORY"; public static final String BLOCKCACHE = "BLOCKCACHE"; public static final String LENGTH = "LENGTH"; public static final String TTL = "TTL"; + public static final String VERSIONS = "VERSIONS"; public static final String BLOOMFILTER = "BLOOMFILTER"; public static final String FOREVER = "FOREVER"; + public static final String MAPFILE_INDEX_INTERVAL = + "MAPFILE_INDEX_INTERVAL"; + public static final String MEMCACHE_FLUSHSIZE = "MEMCACHE_FLUSHSIZE"; /** * Default compression type. @@ -97,27 +105,23 @@ public static final int DEFAULT_TTL = HConstants.FOREVER; /** + * Default mapfile index interval. + */ + public static final int DEFAULT_MAPFILE_INDEX_INTERVAL = 128; + + /** * Default bloom filter description. */ public static final BloomFilterDescriptor DEFAULT_BLOOMFILTER = null; // Column family name private byte [] name; - // Number of versions to keep - private int maxVersions = DEFAULT_VERSIONS; - // Compression setting if any - private CompressionType compressionType = DEFAULT_COMPRESSION; - // Serve reads from in-memory cache - private boolean inMemory = DEFAULT_IN_MEMORY; - // Serve reads from in-memory block cache - private boolean blockCacheEnabled = DEFAULT_BLOCKCACHE; - // Maximum value size - private int maxValueLength = DEFAULT_LENGTH; - // Time to live of cell contents, in seconds from last timestamp - private int timeToLive = DEFAULT_TTL; - // True if bloom filter was specified - private boolean bloomFilterSpecified = false; - // Descriptor of bloom filter + + // Column metadata + private SortedMap values = + new TreeMap(Bytes.BYTES_COMPARATOR); + + // Bloom filter private BloomFilterDescriptor bloomFilter = DEFAULT_BLOOMFILTER; /** @@ -157,7 +161,7 @@ this (columnName == null || columnName.length <= 0? HConstants.EMPTY_BYTE_ARRAY: columnName, DEFAULT_VERSIONS, DEFAULT_COMPRESSION, DEFAULT_IN_MEMORY, - DEFAULT_BLOCKCACHE, + DEFAULT_BLOCKCACHE, Integer.MAX_VALUE, DEFAULT_TTL, DEFAULT_BLOOMFILTER); } @@ -165,6 +169,26 @@ /** * Constructor * @param columnName Column family name. Must have the ':' ending. + * @param values Column metadata as a map of byte[] keys and values + * @param bloomFilter Enable the specified bloom filter for this column + * + * @throws IllegalArgumentException if passed a family name that is made of + * other than 'word' characters: i.e. [a-zA-Z_0-9] and does not + * end in a : + */ + protected HColumnDescriptor(final byte [] columnName, + final SortedMap values, + final BloomFilterDescriptor bloomFilter) { + isLegalFamilyName(columnName); + this.name = stripColon(columnName); + this.bloomFilter = bloomFilter; + this.values.clear(); + this.values.putAll(values); + } + + /** + * Constructor + * @param columnName Column family name. Must have the ':' ending. * @param maxVersions Maximum number of versions to keep * @param compression Compression type * @param inMemory If true, column data should be kept in an HRegionServer's @@ -192,14 +216,13 @@ // Until there is support, consider 0 or < 0 -- a configuration error. throw new IllegalArgumentException("Maximum versions must be positive"); } - this.maxVersions = maxVersions; - this.inMemory = inMemory; - this.blockCacheEnabled = blockCacheEnabled; - this.maxValueLength = maxValueLength; - this.timeToLive = timeToLive; + setMaxVersions(maxVersions); + setInMemory(inMemory); + setBlockCacheEnabled(blockCacheEnabled); + setMaxValueLength(maxValueLength); + setTimeToLive(timeToLive); + setCompressionType(compression); this.bloomFilter = bloomFilter; - this.bloomFilterSpecified = this.bloomFilter == null ? false : true; - this.compressionType = compression; } private static byte [] stripColon(final byte [] n) { @@ -208,7 +231,7 @@ System.arraycopy(n, 0, result, 0, n.length - 1); return result; } - + /** * @param b Family name. * @return b @@ -243,81 +266,230 @@ } /** + * @return Name of this column family with colon as required by client API + */ + public byte [] getNameWithColon() { + String s = Bytes.toString(this.name); + if (!s.contains(":")) + s += ":"; + return Bytes.toBytes(s); + } + + /** * @return Name of this column family */ public String getNameAsString() { return Bytes.toString(this.name); } + /** + * @param key The key. + * @return The value. + */ + public byte[] getValue(byte[] key) { + return values.get(key); + } + + /** + * @param key The key. + * @return The value as a string. + */ + public String getValue(String key) { + byte[] value = getValue(Bytes.toBytes(key)); + if (value == null) + return null; + return Bytes.toString(value); + } + + /** + * @param key The key. + * @param value The value. + */ + public void setValue(byte[] key, byte[] value) { + values.put(key, value); + } + + /** + * @param key The key. + * @param value The value. + */ + public void setValue(String key, String value) { + setValue(Bytes.toBytes(key), Bytes.toBytes(value)); + } + + /** + * @return column family metadata as a map of byte[] keys and values + */ + public SortedMap getValues() { + return Collections.unmodifiableSortedMap(values); + } + /** @return compression type being used for the column family */ public CompressionType getCompression() { - return this.compressionType; + String value = getValue(COMPRESSION); + if (value != null) { + if (value.equalsIgnoreCase("BLOCK")) + return CompressionType.BLOCK; + else if (value.equalsIgnoreCase("RECORD")) + return CompressionType.RECORD; + } + return CompressionType.NONE; } /** @return maximum number of versions */ public int getMaxVersions() { - return this.maxVersions; + String value = getValue(VERSIONS); + if (value != null) + return Integer.valueOf(value); + return DEFAULT_VERSIONS; } /** + * @param maxVersions maximum number of versions + */ + public void setMaxVersions(int maxVersions) { + setValue(VERSIONS, Integer.toString(maxVersions)); + } + + /** * @return Compression type setting. */ public CompressionType getCompressionType() { - return this.compressionType; + return getCompression(); } /** - * @return True if we are to keep all in use HRegionServer cache. + * @param type Compression type setting. */ + public void setCompressionType(CompressionType type) { + String compressionType; + switch (type) { + case BLOCK: compressionType = "BLOCK"; break; + case RECORD: compressionType = "RECORD"; break; + default: compressionType = "NONE"; break; + } + setValue(COMPRESSION, compressionType); + } + + /** + * @return True if we are to keep all values in the HRegionServer cache + */ public boolean isInMemory() { - return this.inMemory; + String value = getValue(IN_MEMORY); + if (value != null) + return Boolean.valueOf(value); + return DEFAULT_IN_MEMORY; } - + /** + * @param inMemory True if we are to keep all values in the HRegionServer + * cache + */ + public void setInMemory(boolean inMemory) { + setValue(IN_MEMORY, Boolean.toString(inMemory)); + } + + /** * @return Maximum value length. */ public int getMaxValueLength() { - return this.maxValueLength; + String value = getValue(LENGTH); + if (value != null) + return Integer.valueOf(value); + return DEFAULT_LENGTH; } /** + * @param maxLength Maximum value length. + */ + public void setMaxValueLength(int maxLength) { + setValue(LENGTH, Integer.toString(maxLength)); + } + + /** * @return Time to live. */ public int getTimeToLive() { - return this.timeToLive; + String value = getValue(TTL); + if (value != null) + return Integer.valueOf(value); + return DEFAULT_TTL; } /** + * @param timeToLive + */ + public void setTimeToLive(int timeToLive) { + setValue(TTL, Integer.toString(timeToLive)); + } + + /** * @return True if MapFile blocks should be cached. */ public boolean isBlockCacheEnabled() { - return blockCacheEnabled; + String value = getValue(BLOCKCACHE); + if (value != null) + return Boolean.valueOf(value); + return DEFAULT_BLOCKCACHE; } /** + * @param blockCacheEnabled True if MapFile blocks should be cached. + */ + public void setBlockCacheEnabled(boolean blockCacheEnabled) { + setValue(BLOCKCACHE, Boolean.toString(blockCacheEnabled)); + } + + /** * @return Bloom filter descriptor or null if none set. */ public BloomFilterDescriptor getBloomFilter() { return this.bloomFilter; } + /** + * @return The number of entries that are added to the store MapFile before + * an index entry is added. + */ + public int getMapFileIndexInterval() { + String value = getValue(MAPFILE_INDEX_INTERVAL); + if (value != null) + return Integer.valueOf(value); + return DEFAULT_MAPFILE_INDEX_INTERVAL; + } + + /** + * @param interval The number of entries that are added to the store MapFile before + * an index entry is added. + */ + public void setMapFileIndexInterval(int interval) { + setValue(MAPFILE_INDEX_INTERVAL, Integer.toString(interval)); + } + /** {@inheritDoc} */ @Override public String toString() { - return "{" + HConstants.NAME + " => '" + Bytes.toString(name) + - "', " + HConstants.VERSIONS + " => " + maxVersions + - ", " + COMPRESSION + " => '" + this.compressionType + - "', " + IN_MEMORY + " => " + inMemory + - ", " + BLOCKCACHE + " => " + blockCacheEnabled + - ", " + LENGTH + " => " + maxValueLength + - ", " + TTL + " => " + - (timeToLive == HConstants.FOREVER ? "FOREVER" : - Integer.toString(timeToLive)) + - ", " + BLOOMFILTER + " => " + - (bloomFilterSpecified ? bloomFilter.toString() : CompressionType.NONE) + - "}"; + StringBuffer s = new StringBuffer(); + s.append('{'); + s.append(HConstants.NAME); + s.append(" => '"); + s.append(Bytes.toString(name)); + s.append("'"); + for (Map.Entry e: values.entrySet()) { + s.append(", "); + s.append(Bytes.toString(e.getKey())); + s.append(" => '"); + s.append(Bytes.toString(e.getValue())); + s.append("'"); + } + s.append(", "); + s.append(BLOOMFILTER); + s.append(" => "); + s.append(bloomFilter != null ? bloomFilter.toString() : CompressionType.NONE); + s.append('}'); + return s.toString(); } - + /** {@inheritDoc} */ @Override public boolean equals(Object obj) { @@ -328,71 +500,76 @@ @Override public int hashCode() { int result = Bytes.hashCode(this.name); - result ^= Integer.valueOf(this.maxVersions).hashCode(); - result ^= this.compressionType.hashCode(); - result ^= Boolean.valueOf(this.inMemory).hashCode(); - result ^= Boolean.valueOf(this.blockCacheEnabled).hashCode(); - result ^= Integer.valueOf(this.maxValueLength).hashCode(); - result ^= Integer.valueOf(this.timeToLive).hashCode(); - result ^= Boolean.valueOf(this.bloomFilterSpecified).hashCode(); result ^= Byte.valueOf(COLUMN_DESCRIPTOR_VERSION).hashCode(); - if (this.bloomFilterSpecified) { + result ^= Boolean.valueOf(bloomFilter != null).hashCode(); + if (bloomFilter != null) { result ^= this.bloomFilter.hashCode(); } + for (Map.Entry e: values.entrySet()) { + result ^= Bytes.hashCode(e.getKey()); + result ^= Bytes.hashCode(e.getValue()); + } return result; } - + // Writable /** {@inheritDoc} */ public void readFields(DataInput in) throws IOException { int versionNumber = in.readByte(); - if (versionNumber <= 2) { - Text t = new Text(); - t.readFields(in); - this.name = t.getBytes(); - if (HStoreKey.getFamilyDelimiterIndex(this.name) > 0) { - this.name = stripColon(this.name); + if (versionNumber <= 4) { + // versions < 5 + if (versionNumber <= 2) { + Text t = new Text(); + t.readFields(in); + this.name = t.getBytes(); + if (HStoreKey.getFamilyDelimiterIndex(this.name) > 0) { + this.name = stripColon(this.name); + } + } else { + this.name = Bytes.readByteArray(in); } + setMaxVersions(in.readInt()); + setCompressionType(CompressionType.values()[in.readInt()]); + setInMemory(in.readBoolean()); + setMaxValueLength(in.readInt()); + if (in.readBoolean()) { + bloomFilter = new BloomFilterDescriptor(); + bloomFilter.readFields(in); + } + if (versionNumber > 1) { + setBlockCacheEnabled(in.readBoolean()); + } + if (versionNumber > 2) { + setTimeToLive(in.readInt()); + } } else { + // version 5+ this.name = Bytes.readByteArray(in); + this.values.clear(); + int numValues = in.readInt(); + for (int i = 0; i < numValues; i++) + values.put(Bytes.readByteArray(in), Bytes.readByteArray(in)); + boolean hasBloomFilter = in.readBoolean(); + if (hasBloomFilter) { + bloomFilter = new BloomFilterDescriptor(); + bloomFilter.readFields(in); + } } - this.maxVersions = in.readInt(); - int ordinal = in.readInt(); - this.compressionType = CompressionType.values()[ordinal]; - this.inMemory = in.readBoolean(); - this.maxValueLength = in.readInt(); - this.bloomFilterSpecified = in.readBoolean(); - - if(bloomFilterSpecified) { - bloomFilter = new BloomFilterDescriptor(); - bloomFilter.readFields(in); - } - - if (versionNumber > 1) { - this.blockCacheEnabled = in.readBoolean(); - } - - if (versionNumber > 2) { - this.timeToLive = in.readInt(); - } } /** {@inheritDoc} */ public void write(DataOutput out) throws IOException { out.writeByte(COLUMN_DESCRIPTOR_VERSION); Bytes.writeByteArray(out, this.name); - out.writeInt(this.maxVersions); - out.writeInt(this.compressionType.ordinal()); - out.writeBoolean(this.inMemory); - out.writeInt(this.maxValueLength); - out.writeBoolean(this.bloomFilterSpecified); - - if(bloomFilterSpecified) { + out.writeInt(values.size()); + for (Map.Entry e: values.entrySet()) { + Bytes.writeByteArray(out, e.getKey()); + Bytes.writeByteArray(out, e.getValue()); + } + out.writeBoolean(bloomFilter != null); + if (bloomFilter != null) bloomFilter.write(out); - } - out.writeBoolean(this.blockCacheEnabled); - out.writeInt(this.timeToLive); } // Comparable @@ -402,62 +579,32 @@ HColumnDescriptor other = (HColumnDescriptor)o; int result = Bytes.compareTo(this.name, other.getName()); if(result == 0) { - result = Integer.valueOf(this.maxVersions).compareTo( - Integer.valueOf(other.maxVersions)); - } - - if(result == 0) { - result = this.compressionType.compareTo(other.compressionType); - } - - if(result == 0) { - if(this.inMemory == other.inMemory) { - result = 0; - - } else if(this.inMemory) { + if ((this.bloomFilter != null) == (other.bloomFilter != null)) { + result = 0; + } else if(this.bloomFilter != null) { result = -1; - } else { result = 1; } } - - if(result == 0) { - if(this.blockCacheEnabled == other.blockCacheEnabled) { - result = 0; - - } else if(this.blockCacheEnabled) { - result = -1; - - } else { - result = 1; - } + if(result == 0 && this.bloomFilter != null) { + result = this.bloomFilter.compareTo(other.bloomFilter); } - - if(result == 0) { - result = other.maxValueLength - this.maxValueLength; - } - - if(result == 0) { - result = other.timeToLive - this.timeToLive; - } - - if(result == 0) { - if(this.bloomFilterSpecified == other.bloomFilterSpecified) { - result = 0; - - } else if(this.bloomFilterSpecified) { - result = -1; - - } else { - result = 1; + if (result == 0) { + // punt on comparison for ordering, just calculate difference + int hashCode = 0; + for (Map.Entry e: this.values.entrySet()) { + hashCode ^= Bytes.hashCode(e.getKey()); + hashCode ^= Bytes.hashCode(e.getValue()); } + int otherHashCode = 0; + for (Map.Entry e: other.values.entrySet()) { + otherHashCode ^= Bytes.hashCode(e.getKey()); + otherHashCode ^= Bytes.hashCode(e.getValue()); + } + result = hashCode - otherHashCode; } - - if(result == 0 && this.bloomFilterSpecified) { - result = this.bloomFilter.compareTo(other.bloomFilter); - } - return result; } -} \ No newline at end of file +} + Index: src/java/org/apache/hadoop/hbase/regionserver/HStore.java =================================================================== --- src/java/org/apache/hadoop/hbase/regionserver/HStore.java (revision 673356) +++ src/java/org/apache/hadoop/hbase/regionserver/HStore.java (working copy) @@ -184,15 +184,20 @@ this.storeName = Bytes.toBytes(this.info.getEncodedName() + "/" + Bytes.toString(this.family.getName())); this.storeNameStr = Bytes.toString(this.storeName); - + // By default, we compact if an HStore has more than // MIN_COMMITS_FOR_COMPACTION map files this.compactionThreshold = conf.getInt("hbase.hstore.compactionThreshold", 3); // By default we split region if a file > DEFAULT_MAX_FILE_SIZE. - this.desiredMaxFileSize = - conf.getLong("hbase.hregion.max.filesize", DEFAULT_MAX_FILE_SIZE); + long maxFileSize = info.getTableDesc().getMaxFileSize(); + if (maxFileSize == HConstants.DEFAULT_MAX_FILE_SIZE) { + maxFileSize = conf.getLong("hbase.hregion.max.filesize", + HConstants.DEFAULT_MAX_FILE_SIZE); + } + this.desiredMaxFileSize = maxFileSize; + this.storeSize = 0L; if (family.getCompression() == HColumnDescriptor.CompressionType.BLOCK) { @@ -704,7 +709,8 @@ info.getEncodedName(), family.getName(), -1L, null); MapFile.Writer out = flushedFile.getWriter(this.fs, this.compression, this.bloomFilter); - + out.setIndexInterval(family.getMapFileIndexInterval()); + // Here we tried picking up an existing HStoreFile from disk and // interlacing the memcache flush compacting as we go. The notion was // that interlacing would take as long as a pure flush with the added @@ -891,6 +897,7 @@ } MapFile.Writer compactedOut = compactedOutputFile.getWriter(this.fs, this.compression, this.bloomFilter); + compactedOut.setIndexInterval(family.getMapFileIndexInterval()); try { compactHStoreFiles(compactedOut, filesToCompact); } finally { Index: src/java/org/apache/hadoop/hbase/regionserver/HRegion.java =================================================================== --- src/java/org/apache/hadoop/hbase/regionserver/HRegion.java (revision 673356) +++ src/java/org/apache/hadoop/hbase/regionserver/HRegion.java (working copy) @@ -157,6 +157,7 @@ b.getRegionInfo().getTableDesc().getNameAsString())) { throw new IOException("Regions do not belong to the same table"); } + FileSystem fs = a.getFilesystem(); // Make sure each region's cache is empty @@ -484,13 +485,19 @@ fs.delete(merges, true); } - // By default, we flush the cache when 64M. - this.memcacheFlushSize = conf.getInt("hbase.hregion.memcache.flush.size", - 1024*1024*64); + int flushSize = regionInfo.getTableDesc().getMemcacheFlushSize(); + if (flushSize == HTableDescriptor.DEFAULT_MEMCACHE_FLUSH_SIZE) { + flushSize = conf.getInt("hbase.hregion.memcache.flush.size", + HTableDescriptor.DEFAULT_MEMCACHE_FLUSH_SIZE); + } + this.memcacheFlushSize = flushSize; this.blockingMemcacheSize = this.memcacheFlushSize * conf.getInt("hbase.hregion.memcache.block.multiplier", 1); + if (this.regionInfo.getTableDesc().isReadOnly()) + this.writestate.writesEnabled = false; + // HRegion is ready to go! this.writestate.compacting = false; this.lastFlushTime = System.currentTimeMillis(); @@ -1299,6 +1306,10 @@ public void batchUpdate(BatchUpdate b) throws IOException { + if (!this.writestate.writesEnabled) { + throw new IOException("region is read only"); + } + // Do a rough check that we have resources to accept a write. The check is // 'rough' in that between the resource check and the call to obtain a // read lock, resources may run out. For now, the thought is that this @@ -1406,6 +1417,9 @@ public void deleteAll(final byte [] row, final byte [] column, final long ts) throws IOException { checkColumn(column); + if (!this.writestate.writesEnabled) { + throw new IOException("region is read only"); + } Integer lid = obtainRowLock(row); try { deleteMultiple(row, column, ts, ALL_VERSIONS); @@ -1422,6 +1436,9 @@ */ public void deleteAll(final byte [] row, final long ts) throws IOException { + if (!this.writestate.writesEnabled) { + throw new IOException("region is read only"); + } Integer lid = obtainRowLock(row); try { for (HStore store : stores.values()){ @@ -1449,6 +1466,9 @@ */ public void deleteFamily(byte [] row, byte [] family, long timestamp) throws IOException{ + if (!this.writestate.writesEnabled) { + throw new IOException("region is read only"); + } Integer lid = obtainRowLock(row); try { // find the HStore for the column family @@ -1481,6 +1501,9 @@ private void deleteMultiple(final byte [] row, final byte [] column, final long ts, final int versions) throws IOException { + if (!this.writestate.writesEnabled) { + throw new IOException("region is read only"); + } HStoreKey origin = new HStoreKey(row, column, ts); Set keys = getKeys(origin, versions); if (keys.size() > 0) { @@ -1508,6 +1531,9 @@ final byte [] val) throws IOException { checkColumn(key.getColumn()); + if (!this.writestate.writesEnabled) { + throw new IOException("region is read only"); + } TreeMap targets = this.targetColumns.get(lockid); if (targets == null) { targets = new TreeMap(); @@ -1529,6 +1555,9 @@ if (updatesByColumn == null || updatesByColumn.size() <= 0) { return; } + if (!this.writestate.writesEnabled) { + throw new IOException("region is read only"); + } boolean flush = false; this.updatesLock.readLock().lock(); try { Index: src/java/org/apache/hadoop/hbase/HTableDescriptor.java =================================================================== --- src/java/org/apache/hadoop/hbase/HTableDescriptor.java (revision 673356) +++ src/java/org/apache/hadoop/hbase/HTableDescriptor.java (working copy) @@ -27,6 +27,8 @@ import java.util.HashMap; import java.util.Iterator; import java.util.Map; +import java.util.SortedMap; +import java.util.TreeMap; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.util.Bytes; @@ -37,13 +39,13 @@ * column families. */ public class HTableDescriptor implements WritableComparable { - /** Table descriptor for -ROOT- catalog table */ + /** Table descriptor for -ROOT- catalog table */ public static final HTableDescriptor ROOT_TABLEDESC = new HTableDescriptor( HConstants.ROOT_TABLE_NAME, new HColumnDescriptor[] { new HColumnDescriptor(HConstants.COLUMN_FAMILY, 1, HColumnDescriptor.CompressionType.NONE, false, false, Integer.MAX_VALUE, HConstants.FOREVER, null) }); - + /** Table descriptor for .META. catalog table */ public static final HTableDescriptor META_TABLEDESC = new HTableDescriptor( HConstants.META_TABLE_NAME, new HColumnDescriptor[] { @@ -53,14 +55,31 @@ new HColumnDescriptor(HConstants.COLUMN_FAMILY_HISTORIAN, HConstants.ALL_VERSIONS, HColumnDescriptor.CompressionType.NONE, false, false, Integer.MAX_VALUE, HConstants.FOREVER, null) }); - + + public static final int CURRENT_VERSION = 3; + private boolean rootregion = false; private boolean metaregion = false; + private int version = CURRENT_VERSION; private byte [] name = HConstants.EMPTY_BYTE_ARRAY; private String nameAsString = ""; - + + // Table metadata + private SortedMap values = + new TreeMap(Bytes.BYTES_COMPARATOR); + public static final String FAMILIES = "FAMILIES"; - + public static final String MAX_FILESIZE = "MAX_FILESIZE"; + public static final String IN_MEMORY = "IN_MEMORY"; + public static final String READONLY = "READONLY"; + public static final String MEMCACHE_FLUSHSIZE = "MEMCACHE_FLUSHSIZE"; + + public static final boolean DEFAULT_IN_MEMORY = false; + + public static final boolean DEFAULT_READONLY = false; + + public static final int DEFAULT_MEMCACHE_FLUSH_SIZE = 1024*1024*64; + // Key is hash of the family name. private final Map families = new HashMap(); @@ -108,7 +127,7 @@ */ public HTableDescriptor(final byte [] name) { setMetaFlags(name); - this.name = this.metaregion? name: isLegalTableName(name); + this.name = this.isMetaRegion() ? name: isLegalTableName(name); this.nameAsString = Bytes.toString(this.name); } @@ -118,12 +137,39 @@ * @param name */ private void setMetaFlags(final byte [] name) { - this.rootregion = Bytes.equals(name, HConstants.ROOT_TABLE_NAME); - this.metaregion = - this.rootregion? true: Bytes.equals(name, HConstants.META_TABLE_NAME); + setRootRegion(Bytes.equals(name, HConstants.ROOT_TABLE_NAME)); + setMetaRegion(isRootRegion() || + Bytes.equals(name, HConstants.META_TABLE_NAME)); } - + + /** @return true if this is the root region */ + public boolean isRootRegion() { + return rootregion; + } + + /** @param isRoot true if this is the root region */ + protected void setRootRegion(boolean isRoot) { + rootregion = isRoot; + } + + /** @return true if this is a meta region (part of the root or meta tables) */ + public boolean isMetaRegion() { + return metaregion; + } + /** + * @param isMeta true if this is a meta region (part of the root or meta + * tables) */ + protected void setMetaRegion(boolean isMeta) { + metaregion = isMeta; + } + + /** @return true if table is the meta table */ + public boolean isMetaTable() { + return isMetaRegion() && !isRootRegion(); + } + + /** * Check passed buffer is legal user-space table name. * @param b Table name. * @return Returns passed b param @@ -147,21 +193,85 @@ return b; } - /** @return true if this is the root region */ - public boolean isRootRegion() { - return rootregion; + /** + * @param key The key. + * @return The value. + */ + public byte[] getValue(byte[] key) { + return values.get(key); } - - /** @return true if table is the meta table */ - public boolean isMetaTable() { - return metaregion && !rootregion; + + /** + * @param key The key. + * @return The value as a string. + */ + public String getValue(String key) { + byte[] value = getValue(Bytes.toBytes(key)); + if (value == null) + return null; + return Bytes.toString(value); } - - /** @return true if this is a meta region (part of the root or meta tables) */ - public boolean isMetaRegion() { - return metaregion; + + /** + * @param key The key. + * @param value The value. + */ + public void setValue(byte[] key, byte[] value) { + values.put(key, value); } + /** + * @param key The key. + * @param value The value. + */ + public void setValue(String key, String value) { + setValue(Bytes.toBytes(key), Bytes.toBytes(value)); + } + + /** + * @return table metadata as a map of byte[] keys and values + */ + public Map getValues() { + return Collections.unmodifiableMap(values); + } + + /** + * @return true if all columns in the table should be kept in the + * HRegionServer cache only + */ + public boolean isInMemory() { + String value = getValue(IN_MEMORY); + if (value != null) + return Boolean.valueOf(value); + return DEFAULT_IN_MEMORY; + } + + /** + * @param inMemory True if all of the columns in the table should be kept in + * the HRegionServer cache only. + */ + public void setInMemory(boolean inMemory) { + setValue(IN_MEMORY, Boolean.toString(inMemory)); + } + + /** + * @return true if all columns in the table should be read only + */ + public boolean isReadOnly() { + String value = getValue(READONLY); + if (value != null) + return Boolean.valueOf(value); + return DEFAULT_READONLY; + } + + /** + * @param readOnly True if all of the columns in the table should be read + * only. + */ + public void setReadOnly(boolean readOnly) { + setValue(READONLY, Boolean.toString(readOnly)); + } + /** @return name of table */ public byte [] getName() { return name; @@ -172,7 +282,40 @@ return this.nameAsString; } + /** @return max hregion size for table */ + public long getMaxFileSize() { + String value = getValue(MAX_FILESIZE); + if (value != null) + return Long.valueOf(value); + return HConstants.DEFAULT_MAX_FILE_SIZE; + } + /** + * @param maxFileSize The maximum file size that a store file can grow to + * before a split is triggered. + */ + public void setMaxFileSize(long maxFileSize) { + setValue(MAX_FILESIZE, Long.toString(maxFileSize)); + } + + /** + * @return memory cache flush size for each hregion + */ + public int getMemcacheFlushSize() { + String value = getValue(MEMCACHE_FLUSHSIZE); + if (value != null) + return Integer.valueOf(value); + return DEFAULT_MEMCACHE_FLUSH_SIZE; + } + + /** + * @param memcacheFlushSize memory cache flush size for each hregion + */ + public void setMemcacheFlushSize(int memcacheFlushSize) { + setValue(MEMCACHE_FLUSHSIZE, Integer.toString(memcacheFlushSize)); + } + + /** * Adds a column family. * @param family HColumnDescriptor of familyto add. */ @@ -211,10 +354,27 @@ */ @Override public String toString() { - return HConstants.NAME + " => '" + Bytes.toString(this.name) + - "', " + FAMILIES + " => " + this.families.values(); + StringBuffer s = new StringBuffer(); + s.append('{'); + s.append(HConstants.NAME); + s.append(" => '"); + s.append(Bytes.toString(name)); + s.append("'"); + for (Map.Entry e: values.entrySet()) { + s.append(", "); + s.append(Bytes.toString(e.getKey())); + s.append(" => '"); + s.append(Bytes.toString(e.getValue())); + s.append("'"); + } + s.append(", "); + s.append(FAMILIES); + s.append(" => "); + s.append(families.values()); + s.append('}'); + return s.toString(); } - + /** {@inheritDoc} */ @Override public boolean equals(Object obj) { @@ -231,38 +391,72 @@ result ^= e.hashCode(); } } + for (Map.Entry e: values.entrySet()) { + result ^= Bytes.hashCode(e.getKey()); + result ^= Bytes.hashCode(e.getValue()); + } return result; } - + // Writable /** {@inheritDoc} */ + public void readFields(DataInput in) throws IOException { + version = in.readInt(); + if (version <= 2) { + // version < 3 + setRootRegion(in.readBoolean()); + setMetaRegion(in.readBoolean()); + if (this.version > 1) + setMaxFileSize(in.readLong()); + name = Bytes.readByteArray(in); + nameAsString = Bytes.toString(this.name); + int numCols = in.readInt(); + families.clear(); + for (int i = 0; i < numCols; i++) { + HColumnDescriptor c = new HColumnDescriptor(); + c.readFields(in); + families.put(Bytes.mapKey(c.getName()), c); + } + } else { + // version 3+ + name = Bytes.readByteArray(in); + nameAsString = Bytes.toString(this.name); + setRootRegion(in.readBoolean()); + setMetaRegion(in.readBoolean()); + values.clear(); + int numVals = in.readInt(); + for (int i = 0; i < numVals; i++) + values.put(Bytes.readByteArray(in), Bytes.readByteArray(in)); + families.clear(); + int numFamilies = in.readInt(); + for (int i = 0; i < numFamilies; i++) { + HColumnDescriptor c = new HColumnDescriptor(); + c.readFields(in); + families.put(Bytes.mapKey(c.getName()), c); + } + } + } + + /** {@inheritDoc} */ public void write(DataOutput out) throws IOException { - out.writeBoolean(rootregion); - out.writeBoolean(metaregion); + out.writeInt(version); Bytes.writeByteArray(out, name); + out.writeBoolean(isRootRegion()); + out.writeBoolean(isMetaRegion()); + out.writeInt(values.size()); + for (Map.Entry e: values.entrySet()) { + Bytes.writeByteArray(out, e.getKey()); + Bytes.writeByteArray(out, e.getValue()); + } out.writeInt(families.size()); for(Iterator it = families.values().iterator(); it.hasNext(); ) { - it.next().write(out); + HColumnDescriptor family = it.next(); + family.write(out); } } - /** {@inheritDoc} */ - public void readFields(DataInput in) throws IOException { - this.rootregion = in.readBoolean(); - this.metaregion = in.readBoolean(); - this.name = Bytes.readByteArray(in); - this.nameAsString = Bytes.toString(this.name); - int numCols = in.readInt(); - this.families.clear(); - for (int i = 0; i < numCols; i++) { - HColumnDescriptor c = new HColumnDescriptor(); - c.readFields(in); - this.families.put(Bytes.mapKey(c.getName()), c); - } - } - // Comparable /** {@inheritDoc} */ @@ -272,12 +466,10 @@ if (result == 0) { result = families.size() - other.families.size(); } - if (result == 0 && families.size() != other.families.size()) { result = Integer.valueOf(families.size()).compareTo( Integer.valueOf(other.families.size())); } - if (result == 0) { for (Iterator it = families.values().iterator(), it2 = other.families.values().iterator(); it.hasNext(); ) { @@ -287,6 +479,20 @@ } } } + if (result == 0) { + // punt on comparison for ordering, just calculate difference + int hashCode = 0; + for (Map.Entry e: this.values.entrySet()) { + hashCode ^= Bytes.hashCode(e.getKey()); + hashCode ^= Bytes.hashCode(e.getValue()); + } + int otherHashCode = 0; + for (Map.Entry e: other.values.entrySet()) { + otherHashCode ^= Bytes.hashCode(e.getKey()); + otherHashCode ^= Bytes.hashCode(e.getValue()); + } + result = hashCode - otherHashCode; + } return result; } @@ -323,4 +529,4 @@ public static Path getTableDir(Path rootdir, final byte [] tableName) { return new Path(rootdir, Bytes.toString(tableName)); } -} \ No newline at end of file +} Index: src/java/org/apache/hadoop/hbase/HRegionInfo.java =================================================================== --- src/java/org/apache/hadoop/hbase/HRegionInfo.java (revision 673356) +++ src/java/org/apache/hadoop/hbase/HRegionInfo.java (working copy) @@ -307,7 +307,14 @@ public HTableDescriptor getTableDesc(){ return tableDesc; } - + + /** + * @param newDesc new table descriptor to use + */ + public void setTableDesc(HTableDescriptor newDesc) { + this.tableDesc = newDesc; + } + /** @return true if this is the root region */ public boolean isRootRegion() { return this.tableDesc.isRootRegion(); Index: src/java/org/apache/hadoop/hbase/master/HMaster.java =================================================================== --- src/java/org/apache/hadoop/hbase/master/HMaster.java (revision 673356) +++ src/java/org/apache/hadoop/hbase/master/HMaster.java (working copy) @@ -678,6 +678,13 @@ } /** {@inheritDoc} */ + public void modifyTableMeta(final byte[] tableName, HTableDescriptor desc) + throws IOException + { + new ModifyTableMeta(this, tableName, desc).process(); + } + + /** {@inheritDoc} */ public HServerAddress findRootRegion() { return regionManager.getRootRegionLocation(); } Index: src/java/org/apache/hadoop/hbase/master/ModifyTableMeta.java =================================================================== --- src/java/org/apache/hadoop/hbase/master/ModifyTableMeta.java (revision 0) +++ src/java/org/apache/hadoop/hbase/master/ModifyTableMeta.java (revision 0) @@ -0,0 +1,69 @@ +/** + * Copyright 2008 The Apache Software Foundation + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.master; + +import java.io.IOException; + +import org.apache.hadoop.hbase.HRegionInfo; +import org.apache.hadoop.hbase.HTableDescriptor; +import org.apache.hadoop.hbase.TableNotDisabledException; +import org.apache.hadoop.hbase.ipc.HRegionInterface; +import org.apache.hadoop.hbase.io.BatchUpdate; +import org.apache.hadoop.hbase.util.Writables; + +/** Instantiated to modify table descriptor metadata */ +class ModifyTableMeta extends TableOperation { + + private HTableDescriptor newDesc; + + ModifyTableMeta(final HMaster master, final byte [] tableName, + HTableDescriptor newDesc) + throws IOException { + super(master, tableName); + this.newDesc = newDesc; + } + + protected void updateRegionInfo(HRegionInterface server, byte [] regionName, + HRegionInfo i) + throws IOException { + BatchUpdate b = new BatchUpdate(i.getRegionName()); + b.put(COL_REGIONINFO, Writables.getBytes(i)); + server.batchUpdate(regionName, b); + } + + @Override + protected void processScanItem( + @SuppressWarnings("unused") String serverName, + @SuppressWarnings("unused") long startCode, final HRegionInfo info) + throws IOException { + if (isEnabled(info)) { + throw new TableNotDisabledException(tableName.toString()); + } + } + + @Override + protected void postProcessMeta(MetaRegion m, HRegionInterface server) + throws IOException { + for (HRegionInfo i: unservedRegions) { + i.setTableDesc(newDesc); + updateRegionInfo(server, m.getRegionName(), i); + } + } +} Index: src/java/org/apache/hadoop/hbase/ipc/HMasterInterface.java =================================================================== --- src/java/org/apache/hadoop/hbase/ipc/HMasterInterface.java (revision 673356) +++ src/java/org/apache/hadoop/hbase/ipc/HMasterInterface.java (working copy) @@ -104,8 +104,17 @@ * @throws IOException */ public void disableTable(final byte [] tableName) throws IOException; - + /** + * Modify a table's metadata + * + * @param tableName + * @param desc + */ + public void modifyTableMeta(byte[] tableName, HTableDescriptor desc) + throws IOException; + + /** * Shutdown an HBase cluster. * @throws IOException */ Index: src/java/org/apache/hadoop/hbase/util/Writables.java =================================================================== --- src/java/org/apache/hadoop/hbase/util/Writables.java (revision 673356) +++ src/java/org/apache/hadoop/hbase/util/Writables.java (working copy) @@ -25,6 +25,7 @@ import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.io.Cell; +import org.apache.hadoop.hbase.io.ImmutableBytesWritable; import org.apache.hadoop.io.DataInputBuffer; import org.apache.hadoop.io.Writable; @@ -173,4 +174,12 @@ } return Bytes.toLong(c.getValue()); } + + /** + * @param bytes + * @return Bytes as a new ImmutableBytesWritable + */ + public static Writable getWritable(byte[] bytes) { + return new ImmutableBytesWritable(bytes); + } } \ No newline at end of file Index: src/java/org/apache/hadoop/hbase/client/UnmodifyableHColumnDescriptor.java =================================================================== --- src/java/org/apache/hadoop/hbase/client/UnmodifyableHColumnDescriptor.java (revision 0) +++ src/java/org/apache/hadoop/hbase/client/UnmodifyableHColumnDescriptor.java (revision 0) @@ -0,0 +1,55 @@ +package org.apache.hadoop.hbase.client; + +import org.apache.hadoop.hbase.HColumnDescriptor; + +public class UnmodifyableHColumnDescriptor extends HColumnDescriptor { + + public UnmodifyableHColumnDescriptor (final HColumnDescriptor desc) { + super(desc.getNameWithColon(), desc.getValues(), desc.getBloomFilter()); + } + + @Override + public void setValue(byte[] key, byte[] value) { + throw new UnsupportedOperationException("HColumnDescriptor is read-only"); + } + + @Override + public void setValue(String key, String value) { + throw new UnsupportedOperationException("HColumnDescriptor is read-only"); + } + + @Override + public void setMaxVersions(int maxVersions) { + throw new UnsupportedOperationException("HColumnDescriptor is read-only"); + } + + @Override + public void setInMemory(boolean inMemory) { + throw new UnsupportedOperationException("HColumnDescriptor is read-only"); + } + + @Override + public void setBlockCacheEnabled(boolean blockCacheEnabled) { + throw new UnsupportedOperationException("HColumnDescriptor is read-only"); + } + + @Override + public void setMaxValueLength(int maxLength) { + throw new UnsupportedOperationException("HColumnDescriptor is read-only"); + } + + @Override + public void setTimeToLive(int timeToLive) { + throw new UnsupportedOperationException("HColumnDescriptor is read-only"); + } + + @Override + public void setCompressionType(CompressionType type) { + throw new UnsupportedOperationException("HColumnDescriptor is read-only"); + } + + @Override + public void setMapFileIndexInterval(int interval) { + throw new UnsupportedOperationException("HTableDescriptor is read-only"); + } +} Property changes on: src/java/org/apache/hadoop/hbase/client/UnmodifyableHColumnDescriptor.java ___________________________________________________________________ Name: svn:executable + * Index: src/java/org/apache/hadoop/hbase/client/HBaseAdmin.java =================================================================== --- src/java/org/apache/hadoop/hbase/client/HBaseAdmin.java (revision 673356) +++ src/java/org/apache/hadoop/hbase/client/HBaseAdmin.java (working copy) @@ -565,7 +565,27 @@ } } - + /** + * Modify a table's HTableDescriptor + * + * @param tableName name of table + * @param what the metadata to change + * @param value the new value to use + * @throws IOException + */ + public void modifyTableMeta(final byte [] tableName, HTableDescriptor desc) + throws IOException { + if (this.master == null) { + throw new MasterNotRunningException("master has been shut down"); + } + HTableDescriptor.isLegalTableName(tableName); + try { + this.master.modifyTableMeta(tableName, desc); + } catch (RemoteException e) { + throw RemoteExceptionHandler.decodeRemoteException(e); + } + } + /** * Shuts down the HBase instance * @throws IOException Index: src/java/org/apache/hadoop/hbase/client/UnmodifyableHTableDescriptor.java =================================================================== --- src/java/org/apache/hadoop/hbase/client/UnmodifyableHTableDescriptor.java (revision 673356) +++ src/java/org/apache/hadoop/hbase/client/UnmodifyableHTableDescriptor.java (working copy) @@ -22,17 +22,16 @@ import org.apache.hadoop.hbase.HColumnDescriptor; import org.apache.hadoop.hbase.HTableDescriptor; -import org.apache.hadoop.hbase.util.Bytes; class UnmodifyableHTableDescriptor extends HTableDescriptor { /* - * Create an unmodifyable copy of an HTableDescriptor + * Create an unmodifiable copy of an HTableDescriptor * @param desc */ UnmodifyableHTableDescriptor(final HTableDescriptor desc) { super(desc.getName()); for (HColumnDescriptor c: desc.getFamilies()) { - super.addFamily(c); + super.addFamily(new UnmodifyableHColumnDescriptor(c)); } } @@ -54,4 +53,34 @@ public HColumnDescriptor removeFamily(final byte [] column) { throw new UnsupportedOperationException("HTableDescriptor is read-only"); } + + @Override + public void setInMemory(boolean inMemory) { + throw new UnsupportedOperationException("HTableDescriptor is read-only"); + } + + @Override + public void setReadOnly(boolean readOnly) { + throw new UnsupportedOperationException("HTableDescriptor is read-only"); + } + + @Override + public void setValue(byte[] key, byte[] value) { + throw new UnsupportedOperationException("HTableDescriptor is read-only"); + } + + @Override + public void setValue(String key, String value) { + throw new UnsupportedOperationException("HTableDescriptor is read-only"); + } + + @Override + public void setMaxFileSize(long maxFileSize) { + throw new UnsupportedOperationException("HTableDescriptor is read-only"); + } + + @Override + public void setMemcacheFlushSize(int memcacheFlushSize) { + throw new UnsupportedOperationException("HTableDescriptor is read-only"); + } }