Index: src/java/org/apache/hadoop/hbase/HConstants.java =================================================================== --- src/java/org/apache/hadoop/hbase/HConstants.java (revision 664521) +++ src/java/org/apache/hadoop/hbase/HConstants.java (working copy) @@ -212,7 +212,13 @@ * Unlimited time-to-live. */ static final int FOREVER = -1; - + + /** + * Selector for changing HTableDescriptor.maxFileSize via + * HMasterInterface.modifyTableMeta + */ + static final int META_MAXFILESIZE = 1; + public static final String HBASE_CLIENT_RETRIES_NUMBER_KEY = "hbase.client.retries.number"; public static final int DEFAULT_CLIENT_RETRIES = 5; Index: src/java/org/apache/hadoop/hbase/regionserver/HStore.java =================================================================== --- src/java/org/apache/hadoop/hbase/regionserver/HStore.java (revision 664521) +++ src/java/org/apache/hadoop/hbase/regionserver/HStore.java (working copy) @@ -191,8 +191,15 @@ conf.getInt("hbase.hstore.compactionThreshold", 3); // By default we split region if a file > DEFAULT_MAX_FILE_SIZE. - this.desiredMaxFileSize = - conf.getLong("hbase.hregion.max.filesize", DEFAULT_MAX_FILE_SIZE); + long maxFileSize = conf.getLong("hbase.hregion.max.filesize", -1); + if (maxFileSize != -1) { + if (info.getTableDesc().getMaxFileSize() < maxFileSize) + maxFileSize = info.getTableDesc().getMaxFileSize(); + } else { + maxFileSize = info.getTableDesc().getMaxFileSize(); + } + this.desiredMaxFileSize = maxFileSize; + this.storeSize = 0L; if (family.getCompression() == HColumnDescriptor.CompressionType.BLOCK) { Index: src/java/org/apache/hadoop/hbase/HTableDescriptor.java =================================================================== --- src/java/org/apache/hadoop/hbase/HTableDescriptor.java (revision 664521) +++ src/java/org/apache/hadoop/hbase/HTableDescriptor.java (working copy) @@ -37,13 +37,13 @@ * column families. */ public class HTableDescriptor implements WritableComparable { - /** Table descriptor for -ROOT- catalog table */ + /** Table descriptor for -ROOT- catalog table */ public static final HTableDescriptor ROOT_TABLEDESC = new HTableDescriptor( HConstants.ROOT_TABLE_NAME, new HColumnDescriptor[] { new HColumnDescriptor(HConstants.COLUMN_FAMILY, 1, HColumnDescriptor.CompressionType.NONE, false, false, Integer.MAX_VALUE, HConstants.FOREVER, null) }); - + /** Table descriptor for .META. catalog table */ public static final HTableDescriptor META_TABLEDESC = new HTableDescriptor( HConstants.META_TABLE_NAME, new HColumnDescriptor[] { @@ -53,12 +53,16 @@ new HColumnDescriptor(HConstants.COLUMN_FAMILY_HISTORIAN, HConstants.ALL_VERSIONS, HColumnDescriptor.CompressionType.NONE, false, false, Integer.MAX_VALUE, HConstants.FOREVER, null) }); - + + public static final int CURRENT_VERSION = 2; + + private int version = CURRENT_VERSION; private boolean rootregion = false; private boolean metaregion = false; + private long maxFileSize = HConstants.DEFAULT_MAX_FILE_SIZE; private byte [] name = HConstants.EMPTY_BYTE_ARRAY; private String nameAsString = ""; - + // Key is hash of the family name. private final Map families = new HashMap(); @@ -159,6 +163,16 @@ return this.nameAsString; } + /** @return max hregion size for table */ + public long getMaxFileSize() { + return maxFileSize; + } + + /** Set max hregion size for table */ + public void setMaxFileSize(long max) { + maxFileSize = max; + } + /** * Adds a column family. * @param family HColumnDescriptor of familyto add. @@ -190,7 +204,7 @@ @Override public String toString() { return "name: " + Bytes.toString(this.name) + ", families: " + - this.families.values(); + this.families.values() + ", maxFileSize: " + this.maxFileSize; } /** {@inheritDoc} */ @@ -216,8 +230,11 @@ /** {@inheritDoc} */ public void write(DataOutput out) throws IOException { + out.writeInt(version); out.writeBoolean(rootregion); out.writeBoolean(metaregion); + if (version > 1) + out.writeLong(maxFileSize); Bytes.writeByteArray(out, name); out.writeInt(families.size()); for(Iterator it = families.values().iterator(); @@ -228,8 +245,11 @@ /** {@inheritDoc} */ public void readFields(DataInput in) throws IOException { + this.version = in.readInt(); this.rootregion = in.readBoolean(); this.metaregion = in.readBoolean(); + if (this.version > 1) + this.maxFileSize = in.readLong(); this.name = Bytes.readByteArray(in); this.nameAsString = Bytes.toString(this.name); int numCols = in.readInt(); @@ -255,7 +275,11 @@ result = Integer.valueOf(families.size()).compareTo( Integer.valueOf(other.families.size())); } + if (result == 0 && maxFileSize != other.maxFileSize) { + result = Long.valueOf(maxFileSize).compareTo(other.maxFileSize); + } + if (result == 0) { for (Iterator it = families.values().iterator(), it2 = other.families.values().iterator(); it.hasNext(); ) { Index: src/java/org/apache/hadoop/hbase/master/HMaster.java =================================================================== --- src/java/org/apache/hadoop/hbase/master/HMaster.java (revision 664521) +++ src/java/org/apache/hadoop/hbase/master/HMaster.java (working copy) @@ -678,6 +678,14 @@ } /** {@inheritDoc} */ + public void modifyTableMeta(final byte[] tableName, int what, + final byte[] value) + throws IOException + { + new ModifyTableMeta(this, tableName, what, value).process(); + } + + /** {@inheritDoc} */ public HServerAddress findRootRegion() { return regionManager.getRootRegionLocation(); } Index: src/java/org/apache/hadoop/hbase/master/ModifyTableMeta.java =================================================================== --- src/java/org/apache/hadoop/hbase/master/ModifyTableMeta.java (revision 0) +++ src/java/org/apache/hadoop/hbase/master/ModifyTableMeta.java (revision 0) @@ -0,0 +1,78 @@ +/** + * Copyright 2008 The Apache Software Foundation + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.master; + +import java.io.IOException; + +import org.apache.hadoop.hbase.HRegionInfo; +import org.apache.hadoop.hbase.HTableDescriptor; +import org.apache.hadoop.hbase.TableNotDisabledException; +import org.apache.hadoop.hbase.ipc.HRegionInterface; +import org.apache.hadoop.hbase.io.BatchUpdate; +import org.apache.hadoop.hbase.util.Writables; + +/** Instantiated to modify table descriptor metadata */ +class ModifyTableMeta extends TableOperation { + + protected int what; + protected byte[] value; + + ModifyTableMeta(final HMaster master, final byte [] tableName, + int what, final byte[] value) + throws IOException { + super(master, tableName); + this.what = what; + this.value = value; + } + + protected void updateRegionInfo(HRegionInterface server, byte [] regionName, + HRegionInfo i) + throws IOException { + BatchUpdate b = new BatchUpdate(i.getRegionName()); + b.put(COL_REGIONINFO, Writables.getBytes(i)); + server.batchUpdate(regionName, b); + } + + @Override + protected void processScanItem( + @SuppressWarnings("unused") String serverName, + @SuppressWarnings("unused") long startCode, final HRegionInfo info) + throws IOException { + if (isEnabled(info)) { + throw new TableNotDisabledException(tableName.toString()); + } + } + + @Override + protected void postProcessMeta(MetaRegion m, HRegionInterface server) + throws IOException { + for (HRegionInfo i: unservedRegions) { + HTableDescriptor desc = i.getTableDesc(); + switch (what) { + case META_MAXFILESIZE: + desc.setMaxFileSize(Long.valueOf(new String(value))); + break; + default: + break; + } + updateRegionInfo(server, m.getRegionName(), i); + } + } +} Index: src/java/org/apache/hadoop/hbase/ipc/HMasterInterface.java =================================================================== --- src/java/org/apache/hadoop/hbase/ipc/HMasterInterface.java (revision 664521) +++ src/java/org/apache/hadoop/hbase/ipc/HMasterInterface.java (working copy) @@ -104,8 +104,18 @@ * @throws IOException */ public void disableTable(final byte [] tableName) throws IOException; - + /** + * Modify a table's metadata + * + * @param tableName + * @param what + * @param value + */ + public void modifyTableMeta(byte[] tableName, int what, byte[] value) + throws IOException; + + /** * Shutdown an HBase cluster. * @throws IOException */ Index: src/java/org/apache/hadoop/hbase/client/HBaseAdmin.java =================================================================== --- src/java/org/apache/hadoop/hbase/client/HBaseAdmin.java (revision 664521) +++ src/java/org/apache/hadoop/hbase/client/HBaseAdmin.java (working copy) @@ -658,7 +658,28 @@ } } - + /** + * Modify a table's HTableDescriptor + * + * @param tableName name of table + * @param what the metadata to change + * @param value the new value to use + * @throws IOException + */ + public void modifyTableMeta(final byte [] tableName, int what, + final byte[] value) + throws IOException { + if (this.master == null) { + throw new MasterNotRunningException("master has been shut down"); + } + HTableDescriptor.isLegalTableName(tableName); + try { + this.master.modifyTableMeta(tableName, what, value); + } catch (RemoteException e) { + throw RemoteExceptionHandler.decodeRemoteException(e); + } + } + /** * Shuts down the HBase instance * @throws IOException