diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFile.java hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFile.java index 11d71cf..8e90028 100644 --- hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFile.java +++ hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFile.java @@ -25,6 +25,7 @@ import java.util.Arrays; import java.util.Collection; import java.util.Collections; import java.util.Comparator; +import java.util.List; import java.util.Map; import java.util.SortedSet; import java.util.UUID; @@ -82,6 +83,7 @@ import com.google.common.collect.Ordering; public class StoreFile { private static final Log LOG = LogFactory.getLog(StoreFile.class.getName()); + static final String HBASE_HFILE_PLUGINS_KEY = "hbase.hfile.plugins"; // Keys for fileinfo values in HFile /** Max Sequence ID in FileInfo */ @@ -171,7 +173,11 @@ public class StoreFile { * if this metadata is set as true, the reset is skipped. */ public static final byte[] SKIP_RESET_SEQ_ID = Bytes.toBytes("SKIP_RESET_SEQ_ID"); - + + /** + * Configuration + */ + private final Configuration conf; /** * Constructor, loads a reader and it's indices, etc. May allocate a * substantial amount of ram depending on the underlying files (10-20MB?). @@ -213,6 +219,7 @@ public class StoreFile { this.fs = fs; this.fileInfo = fileInfo; this.cacheConf = cacheConf; + this.conf = conf; if (BloomFilterFactory.isGeneralBloomEnabled(conf)) { this.cfBloomType = cfBloomType; @@ -232,6 +239,8 @@ public class StoreFile { this.fileInfo = other.fileInfo; this.cacheConf = other.cacheConf; this.cfBloomType = other.cfBloomType; + this.conf = other.conf; + } /** @@ -709,7 +718,6 @@ public class StoreFile { * @param comparator Comparator used to compare KVs. * @return The split point row, or null if splitting is not possible, or reader is null. */ - @SuppressWarnings("deprecation") byte[] getFileSplitPoint(CellComparator comparator) throws IOException { if (this.reader == null) { LOG.warn("Storefile " + this + " Reader is null; cannot get split point"); @@ -764,6 +772,8 @@ public class StoreFile { protected HFile.Writer writer; private KeyValue.KeyOnlyKeyValue lastBloomKeyOnlyKV = null; + + private List plugins; /** * Creates an HFile.Writer that also write helpful meta data. @@ -820,6 +830,7 @@ public class StoreFile { if (LOG.isTraceEnabled()) LOG.trace("Delete Family Bloom filter type for " + path + ": " + deleteFamilyBloomFilterWriter.getClass().getSimpleName()); } + this.plugins = StoreFilePluginFactory.getPlugins(conf); } /** @@ -835,6 +846,21 @@ public class StoreFile { writer.appendFileInfo(MAJOR_COMPACTION_KEY, Bytes.toBytes(majorCompaction)); appendTrackedTimestampsToMetadata(); + notifyPluginsOnAppendMetadata(writer); + } + + private void notifyPluginsOnAppendMetadata( + org.apache.hadoop.hbase.io.hfile.HFile.Writer w) + throws IOException + { + if(plugins.size() > 0){ + int size = plugins.size(); + for(int i=0; i < size; i++){ + Plugin plugin = plugins.get(i); + StoreFile.MetaWriter metaWriter = plugin.getMetaWriter(); + metaWriter.appendMetadata(w); + } + } } /** @@ -851,6 +877,7 @@ public class StoreFile { writer.appendFileInfo(MAJOR_COMPACTION_KEY, Bytes.toBytes(majorCompaction)); writer.appendFileInfo(MOB_CELLS_COUNT, Bytes.toBytes(mobCellsCount)); appendTrackedTimestampsToMetadata(); + //TODO append plug in meta - it does not work for MOBs for some reason } /** @@ -1001,6 +1028,19 @@ public class StoreFile { appendDeleteFamilyBloomFilter(cell); writer.append(cell); trackTimestamps(cell); + notifyPluginsOnAppend(cell); + } + + private void notifyPluginsOnAppend(Cell cell) + { + if(plugins.size() > 0){ + int size = plugins.size(); + for(int i=0; i < size; i++){ + Plugin plugin = plugins.get(i); + StoreFile.MetaWriter metaWriter = plugin.getMetaWriter(); + metaWriter.add(cell); + } + } } public Path getPath() { @@ -1697,4 +1737,52 @@ public class StoreFile { } } } + + /** + * StoreFile plug-in supports custom code in Writer/Reader(in a future) + * path. The main goal is to provide HBase applications with a hook allowing + * to store/retrieve custom meta info associated with a store file. + * + */ + public static abstract class Plugin { + private Configuration conf; + public Plugin(){ + + } + public void config(Configuration conf){ + this.conf = conf; + } + + public abstract StoreFile.MetaWriter getMetaWriter(); + } + /** + * Meta writer plug-in code here. + */ + public static abstract class MetaWriter { + + protected Configuration conf; + + public MetaWriter(){ + } + /** + * Configure meta writer + * @param conf + */ + public void config(Configuration conf){ + this.conf = conf; + } + /** + * Add cell is called by StoreFile.Writer on add(Cell cell) + * @param cell + */ + public abstract void add(Cell cell); + + /** + * Append meta-data + * @param writer + */ + public abstract void appendMetadata(HFile.Writer writer) + throws IOException; + + } } diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFilePluginFactory.java hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFilePluginFactory.java new file mode 100644 index 0000000..665c985 --- /dev/null +++ hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFilePluginFactory.java @@ -0,0 +1,54 @@ +/** +* +* Licensed to the Apache Software Foundation (ASF) under one +* or more contributor license agreements. See the NOTICE file +* distributed with this work for additional information +* regarding copyright ownership. The ASF licenses this file +* to you under the Apache License, Version 2.0 (the +* "License"); you may not use this file except in compliance +* with the License. You may obtain a copy of the License at +* +* http://www.apache.org/licenses/LICENSE-2.0 +* +* Unless required by applicable law or agreed to in writing, software +* distributed under the License is distributed on an "AS IS" BASIS, +* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +* See the License for the specific language governing permissions and +* limitations under the License. +*/ +package org.apache.hadoop.hbase.regionserver; + +import java.util.ArrayList; +import java.util.List; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.conf.Configuration; + +public final class StoreFilePluginFactory { + private static final Log LOG = LogFactory.getLog(StoreFilePluginFactory.class); + + private StoreFilePluginFactory(){ + //Utility classes should not have a public or default constructor + } + + static List getPlugins(Configuration conf){ + ArrayList plugins = new ArrayList(); + String classesStr = conf.get(StoreFile.HBASE_HFILE_PLUGINS_KEY); + if(classesStr != null){ + String[] classNameList = classesStr.split(","); + for(String className : classNameList){ + className = className.trim(); + try { + Class cls = Class.forName(className); + StoreFile.Plugin plugin = (StoreFile.Plugin) cls.newInstance(); + plugin.config(conf); + plugins.add(plugin); + } catch (Exception e) { + LOG.error("Could not instantiate plugin: "+className, e); + } + } + } + return plugins; + } +} diff --git hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreMetaDataPlugins.java hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreMetaDataPlugins.java new file mode 100644 index 0000000..b6d6051 --- /dev/null +++ hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreMetaDataPlugins.java @@ -0,0 +1,151 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.regionserver; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; + +import java.io.IOException; +import java.util.Collection; +import java.util.List; + +import org.apache.hadoop.hbase.Cell; +import org.apache.hadoop.hbase.HBaseTestingUtility; +import org.apache.hadoop.hbase.HColumnDescriptor; +import org.apache.hadoop.hbase.HTableDescriptor; +import org.apache.hadoop.hbase.MiniHBaseCluster; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.client.Admin; +import org.apache.hadoop.hbase.client.Put; +import org.apache.hadoop.hbase.client.Table; +import org.apache.hadoop.hbase.io.hfile.HFile.Writer; +import org.apache.hadoop.hbase.regionserver.StoreFile.MetaWriter; +import org.apache.hadoop.hbase.testclassification.RegionServerTests; +import org.apache.hadoop.hbase.testclassification.SmallTests; +import org.apache.hadoop.hbase.util.Bytes; +import org.junit.AfterClass; +import org.junit.BeforeClass; +import org.junit.Rule; +import org.junit.Test; +import org.junit.experimental.categories.Category; +import org.junit.rules.TestName; + +/** + * Class that test StoreFile meta plug in + */ +@Category({RegionServerTests.class, SmallTests.class}) +public class TestStoreMetaDataPlugins { + + public static final byte[] KEY = Bytes.toBytes("KEY"); + private final static HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility(); + private static MiniHBaseCluster cluster; + @Rule + public final TestName TEST_NAME = new TestName(); + + @BeforeClass + public static void setUpBeforeClass() throws Exception { + cluster = TEST_UTIL.startMiniCluster(1, 1); + } + + @AfterClass + public static void tearDownAfterClass() throws Exception { + TEST_UTIL.shutdownMiniCluster(); + } + + + @Test + public void testLoadFlushWithMaxTTL() throws Exception { + Table table = null; + try { + TableName tableName = TableName.valueOf(TEST_NAME.getMethodName()); + byte[] fam = Bytes.toBytes("info"); + // column names + byte[] qual = Bytes.toBytes("qual"); + byte[] row1 = Bytes.toBytes("rowb"); + byte[] row2 = Bytes.toBytes("rowc"); + + HTableDescriptor desc = new HTableDescriptor(tableName); + HColumnDescriptor colDesc = new HColumnDescriptor(fam); + colDesc.setConfiguration(StoreFile.HBASE_HFILE_PLUGINS_KEY, + TestPlugin.class.getName()); + desc.addFamily(colDesc); + Admin admin = TEST_UTIL.getHBaseAdmin(); + admin.createTable(desc); + byte[] value = Bytes.toBytes("value"); + table = TEST_UTIL.getConnection().getTable(tableName); + Put put = new Put(row1); + put.addColumn(fam, qual, value); + table.put(put); + + put = new Put(row2); + put.addColumn(fam, qual, value); + table.put(put); + + admin.flush(tableName); + + List regions = cluster.getRegions(tableName); + assertTrue(regions.size() == 1); + HRegion region = regions.get(0); + List stores = region.getStores(); + assertTrue(stores.size() == 1); + Store store = stores.get(0); + Collection files = store.getStorefiles(); + assertTrue(files.size() == 1); + StoreFile file = files.iterator().next(); + int numCells = Bytes.toInt(file.getMetadataValue(KEY)); + assertEquals(numCells, 2); + } finally { + if (table != null) { + table.close(); + } + } + } + +} + +class TestPlugin extends StoreFile.Plugin +{ + MetaWriter metaWriter; + @Override + public MetaWriter getMetaWriter() { + if(metaWriter == null){ + metaWriter = new TestMetaWriter(); + } + return metaWriter; + } + +} + +class TestMetaWriter extends StoreFile.MetaWriter +{ + + int cellCount = 0; + public TestMetaWriter(){} + @Override + public void add(Cell cell) { + cellCount++; + } + + @Override + public void appendMetadata(Writer writer) + throws IOException + { + writer.appendFileInfo(TestStoreMetaDataPlugins.KEY, Bytes.toBytes(cellCount)); + } + +} \ No newline at end of file