Index: core/src/main/java/org/apache/hadoop/hbase/HColumnDescriptor.java =================================================================== --- core/src/main/java/org/apache/hadoop/hbase/HColumnDescriptor.java (revision 944933) +++ core/src/main/java/org/apache/hadoop/hbase/HColumnDescriptor.java (working copy) @@ -346,12 +346,6 @@ setValue(Bytes.toBytes(key), Bytes.toBytes(value)); } - /** @return compression type being used for the column family */ - public Compression.Algorithm getCompression() { - String n = getValue(COMPRESSION); - return Compression.Algorithm.valueOf(n.toUpperCase()); - } - /** @return maximum number of versions */ public synchronized int getMaxVersions() { if (this.cachedMaxVersions == -1) { @@ -393,7 +387,8 @@ * @return Compression type setting. */ public Compression.Algorithm getCompressionType() { - return getCompression(); + String n = getValue(COMPRESSION); + return Compression.Algorithm.valueOf(n.toUpperCase()); } /** Index: core/src/main/java/org/apache/hadoop/hbase/thrift/ThriftServer.java =================================================================== --- core/src/main/java/org/apache/hadoop/hbase/thrift/ThriftServer.java (revision 944933) +++ core/src/main/java/org/apache/hadoop/hbase/thrift/ThriftServer.java (working copy) @@ -119,7 +119,8 @@ * @throws IOException */ byte[][] getAllColumns(HTable table) throws IOException { - HColumnDescriptor[] cds = table.getTableDescriptor().getColumnFamilies(); + HColumnDescriptor[] cds = + table.getTableDescriptor().getFamilies().toArray(new HColumnDescriptor[0]); byte[][] columns = new byte[cds.length][]; for (int i = 0; i < cds.length; i++) { columns[i] = Bytes.add(cds[i].getName(), Index: core/src/main/java/org/apache/hadoop/hbase/thrift/ThriftUtilities.java =================================================================== --- core/src/main/java/org/apache/hadoop/hbase/thrift/ThriftUtilities.java (revision 944933) +++ core/src/main/java/org/apache/hadoop/hbase/thrift/ThriftUtilities.java (working copy) @@ -74,7 +74,7 @@ ColumnDescriptor col = new ColumnDescriptor(); col.name = Bytes.add(in.getName(), KeyValue.COLUMN_FAMILY_DELIM_ARRAY); col.maxVersions = in.getMaxVersions(); - col.compression = in.getCompression().toString(); + col.compression = in.getCompressionType().toString(); col.inMemory = in.isInMemory(); col.blockCacheEnabled = in.isBlockCacheEnabled(); col.bloomFilterType = Boolean.toString(in.isBloomfilter()); Index: core/src/main/java/org/apache/hadoop/hbase/regionserver/Store.java =================================================================== --- core/src/main/java/org/apache/hadoop/hbase/regionserver/Store.java (revision 944933) +++ core/src/main/java/org/apache/hadoop/hbase/regionserver/Store.java (working copy) @@ -177,7 +177,7 @@ this.conf = conf; this.blockcache = family.isBlockCacheEnabled(); this.blocksize = family.getBlocksize(); - this.compression = family.getCompression(); + this.compression = family.getCompressionType(); this.comparator = info.getComparator(); this.comparatorIgnoringType = this.comparator.getComparatorIgnoringType(); // getTimeToLive returns ttl in seconds. Convert to milliseconds. Index: core/src/main/java/org/apache/hadoop/hbase/HTableDescriptor.java =================================================================== --- core/src/main/java/org/apache/hadoop/hbase/HTableDescriptor.java (revision 944933) +++ core/src/main/java/org/apache/hadoop/hbase/HTableDescriptor.java (working copy) @@ -630,10 +630,6 @@ return Collections.unmodifiableSet(this.families.keySet()); } - public HColumnDescriptor[] getColumnFamilies() { - return getFamilies().toArray(new HColumnDescriptor[0]); - } - /** * @param column * @return Column descriptor for the passed family name or the family on