Index: src/java/org/apache/hadoop/hbase/RegionHistorian.java =================================================================== --- src/java/org/apache/hadoop/hbase/RegionHistorian.java (revision 0) +++ src/java/org/apache/hadoop/hbase/RegionHistorian.java (revision 0) @@ -0,0 +1,265 @@ +/** + * Copyright 2008 The Apache Software Foundation + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase; + +import java.io.IOException; +import java.text.SimpleDateFormat; +import java.util.ArrayList; +import java.util.Collections; +import java.util.GregorianCalendar; +import java.util.List; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.hbase.client.HTable; +import org.apache.hadoop.hbase.io.BatchUpdate; +import org.apache.hadoop.hbase.io.Cell; +import org.apache.hadoop.hbase.util.Bytes; + +public class RegionHistorian implements HConstants { + /** + * The Region Historian task is to keep track of every modification a region + * has to go trought. Public methods are used to update the information in the + * .META. table and to retreive it. + */ + + static final Log LOG = LogFactory.getLog(RegionHistorian.class); + + private HTable metaTable; + + private GregorianCalendar cal = new GregorianCalendar(); + + /** Singleton reference */ + private static RegionHistorian historian; + + /** Date formater for the timestamp in RegionHistoryInformation */ + private static SimpleDateFormat dateFormat = new SimpleDateFormat( + "EEE, d MMM yyyy HH:mm:ss"); + + /** + * Default constructor. Initializes reference to .META. table + * + */ + private RegionHistorian() { + HBaseConfiguration conf = new HBaseConfiguration(); + try { + metaTable = new HTable(conf, META_TABLE_NAME); + LOG.debug("Region historian is ready."); + } catch (IOException ioe) { + LOG.warn("Unable to create RegionHistorian", ioe); + } + } + + /** + * Singleton method + * + * @return The region historian + */ + public static RegionHistorian getInstance() { + if (historian == null) { + historian = new RegionHistorian(); + } + return historian; + } + + /** + * Returns, for a given region name, an ordered list by timestamp + * of all values in the historian column of the .META. table. + * + * @param regionName Region name as a string + * @return List of RegionHistoryInformation + */ + public List getRegionHistory(String regionName) { + List informations = new ArrayList(); + try { + /* TODO + * REGION_HISTORIAN_KEYS is used because there is no other for the moment + * to retrieve all version and to have the column key information. To be changed + * when HTable.getRow handles versions. + */ + for (byte[] columnKey : REGION_HISTORIAN_KEYS) { + Cell[] cells = metaTable.get(Bytes.toBytes(regionName), columnKey, + ALL_VERSIONS); + if (cells != null) { + for (Cell cell : cells) { + informations.add(new RegionHistoryInformation(cell.getTimestamp(), + Bytes.toString(columnKey).split(":")[1], Bytes.toString(cell + .getValue()))); + } + } + } + } catch (IOException ioe) { + LOG.warn("Unable to retrieve region history", ioe); + } + Collections.sort(informations); + return informations; + } + + /** + * Method to add a creation event to the row in the .META table + * @param info + */ + public void addRegionCreation(HRegionInfo info) { + add(REGION_CREATION, "Region creation", info); + } + + /** + * Method to add a opening event to the row in the .META table + * @param info + * @param address + */ + public void addRegionOpen(HRegionInfo info, HServerAddress address) { + add(REGION_OPEN, "Region opened on server : " + address.getHostname(), info); + } + + /** + * Method to add a merge event to the row in the .META table with + * information from oldInfo1 and oldInfo2. + * @param newInfo + * @param oldInfo1 + * @param oldInfo2 + */ + public void addRegionMerge(HRegionInfo newInfo, HRegionInfo oldInfo1, + HRegionInfo oldInfo2) { + add(REGION_MERGE, "Region merged from : " + + oldInfo1.getRegionNameAsString() + " and " + + oldInfo2.getRegionNameAsString(), newInfo); + } + + /** + * Method to add a split event to the rows in the .META table with + * information from oldInfo. + * @param oldInfo + * @param newInfo1 + * @param newInfo2 + */ + public void addRegionSplit(HRegionInfo oldInfo, HRegionInfo newInfo1, + HRegionInfo newInfo2) { + HRegionInfo[] infos = new HRegionInfo[] { newInfo1, newInfo2 }; + for (HRegionInfo info : infos) { + add(REGION_SPLIT, "Region split from : " + + oldInfo.getRegionNameAsString(), info); + } + + List oldHist = getRegionHistory(oldInfo + .getRegionNameAsString()); + + for (RegionHistoryInformation hist : oldHist) { + for (HRegionInfo info : infos) { + add(Bytes.toBytes(COLUMN_FAMILY_HISTORIAN_STR + hist.getEvent()), "[" + + oldInfo.getRegionNameAsString() + "] " + hist.getDescription(), + info, hist.getTimestamp()); + } + } + } + + /** + * Method to add a compaction event to the row in the .META table + * @param info + */ + public void addRegionCompaction(HRegionInfo info) { + add(REGION_COMPACTION, "Region compaction", info); + } + + /** + * Method to add an event with LATEST_TIMESTAMP. + * @param column + * @param text + * @param info + */ + private void add(byte[] column, String text, HRegionInfo info) { + add(column, text, info, LATEST_TIMESTAMP); + } + + /** + * Method to add an event with provided information. + * @param column + * @param text + * @param info + * @param timestamp + */ + private void add(byte[] column, String text, HRegionInfo info, long timestamp) { + if (!info.isMetaRegion()) { + BatchUpdate batch = new BatchUpdate(info.getRegionName()); + batch.setTimestamp(timestamp); + batch.put(column, Bytes.toBytes(text)); + try { + metaTable.commit(batch); + System.out.println("Added " + info.getRegionNameAsString() + " to " + + Bytes.toString(column)); + } catch (IOException ioe) { + LOG.warn("Unable to '" + text + "'", ioe); + } + } + } + + /** + * Inner class that only contains information about an event. + * + */ + public class RegionHistoryInformation implements + Comparable { + + private long timestamp; + + private String event; + + private String description; + + public RegionHistoryInformation(long timestamp, String event, + String description) { + this.timestamp = timestamp; + this.event = event; + this.description = description; + } + + /** + * Returns the inverse value of Long.compareTo + */ + public int compareTo(RegionHistoryInformation otherInfo) { + return -1 * Long.valueOf(timestamp).compareTo(otherInfo.getTimestamp()); + } + + public String getEvent() { + return event; + } + + public String getDescription() { + return description; + } + + public long getTimestamp() { + return timestamp; + } + + /** + * Returns the value of the timestamp processed + * with the date formater. + * @return + */ + public String getTimestampAsString() { + cal.setTimeInMillis(timestamp); + return dateFormat.format(cal.getTime()); + } + + } + +} Index: src/java/org/apache/hadoop/hbase/HConstants.java =================================================================== --- src/java/org/apache/hadoop/hbase/HConstants.java (revision 661066) +++ src/java/org/apache/hadoop/hbase/HConstants.java (working copy) @@ -128,9 +128,15 @@ /** The ROOT and META column family (string) */ static final String COLUMN_FAMILY_STR = "info:"; + + /** The META historian column family (string) */ + static final String COLUMN_FAMILY_HISTORIAN_STR = "historian:"; /** The ROOT and META column family */ static final byte [] COLUMN_FAMILY = Bytes.toBytes(COLUMN_FAMILY_STR); + + /** The META historian column family */ + static final byte [] COLUMN_FAMILY_HISTORIAN = Bytes.toBytes(COLUMN_FAMILY_HISTORIAN_STR); /** Array of meta column names */ static final byte[][] COLUMN_FAMILY_ARRAY = new byte[][] {COLUMN_FAMILY}; @@ -206,4 +212,25 @@ public static final String HBASE_CLIENT_RETRIES_NUMBER_KEY = "hbase.client.retries.number"; public static final int DEFAULT_CLIENT_RETRIES = 5; + + /** .META. column key for region creation */ + public final static byte[] REGION_CREATION = Bytes.toBytes(COLUMN_FAMILY_HISTORIAN_STR+"creation"); + + /** .META. column key for region opening */ + public final static byte[] REGION_OPEN = Bytes.toBytes(COLUMN_FAMILY_HISTORIAN_STR+"open"); + + /** .META. column key for region merge */ + public final static byte[] REGION_MERGE = Bytes.toBytes(COLUMN_FAMILY_HISTORIAN_STR+"merge"); + + /** .META. column key for region split */ + public final static byte[] REGION_SPLIT = Bytes.toBytes(COLUMN_FAMILY_HISTORIAN_STR+"split"); + + /** .META. column key for region compaction */ + public final static byte[] REGION_COMPACTION = Bytes.toBytes(COLUMN_FAMILY_HISTORIAN_STR+"compaction"); + + /** .META. column key for region assignment */ + public final static byte[] REGION_ASSIGNMENT = Bytes.toBytes(COLUMN_FAMILY_HISTORIAN_STR+"assignment"); + + /** .META. column keys aggregate to be used when retrieving all versions of a row */ + public final static byte[][] REGION_HISTORIAN_KEYS = new byte[][] { REGION_CREATION,REGION_OPEN,REGION_MERGE,REGION_SPLIT,REGION_COMPACTION,REGION_ASSIGNMENT }; } \ No newline at end of file Index: src/java/org/apache/hadoop/hbase/regionserver/HRegion.java =================================================================== --- src/java/org/apache/hadoop/hbase/regionserver/HRegion.java (revision 661066) +++ src/java/org/apache/hadoop/hbase/regionserver/HRegion.java (working copy) @@ -50,6 +50,7 @@ import org.apache.hadoop.hbase.HStoreKey; import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.NotServingRegionException; +import org.apache.hadoop.hbase.RegionHistorian; import org.apache.hadoop.hbase.WrongRegionException; import org.apache.hadoop.hbase.filter.RowFilterInterface; import org.apache.hadoop.hbase.io.BatchOperation; @@ -252,6 +253,8 @@ deleteRegion(fs, b.getRegionDir()); LOG.info("merge completed. New region is " + dstRegion); + RegionHistorian.getInstance().addRegionMerge(dstRegion.getRegionInfo(), + a.getRegionInfo(), b.getRegionInfo()); return dstRegion; } @@ -433,6 +436,8 @@ LOG.debug("Opening region " + this + "/" + this.regionInfo.getEncodedName()); } + if (!regionInfo.isMetaRegion()) + RegionHistorian.getInstance().addRegionCreation(regionInfo); this.regionCompactionDir = new Path(getCompactionDir(basedir), encodedNameStr); @@ -774,6 +779,10 @@ LOG.debug("Cleaned up " + FSUtils.getPath(splits) + " " + deleted); } HRegion regions[] = new HRegion [] {regionA, regionB}; + + RegionHistorian.getInstance().addRegionSplit(this.regionInfo, + regionA.getRegionInfo(), regionB.getRegionInfo()); + return regions; } } @@ -867,6 +876,8 @@ doRegionCompactionCleanup(); LOG.info("compaction completed on region " + this + " in " + StringUtils.formatTimeDiff(System.currentTimeMillis(), startTime)); + + RegionHistorian.getInstance().addRegionCompaction(regionInfo); } finally { synchronized (writestate) { writestate.compacting = false; Index: src/java/org/apache/hadoop/hbase/HTableDescriptor.java =================================================================== --- src/java/org/apache/hadoop/hbase/HTableDescriptor.java (revision 661066) +++ src/java/org/apache/hadoop/hbase/HTableDescriptor.java (working copy) @@ -38,18 +38,21 @@ */ public class HTableDescriptor implements WritableComparable { /** Table descriptor for -ROOT- catalog table */ - public static final HTableDescriptor ROOT_TABLEDESC = - new HTableDescriptor(HConstants.ROOT_TABLE_NAME, - new HColumnDescriptor(HConstants.COLUMN_FAMILY, 1, - HColumnDescriptor.CompressionType.NONE, false, false, - Integer.MAX_VALUE, HConstants.FOREVER, null)); + public static final HTableDescriptor ROOT_TABLEDESC = new HTableDescriptor( + HConstants.ROOT_TABLE_NAME, + new HColumnDescriptor[] { new HColumnDescriptor(HConstants.COLUMN_FAMILY, + 1, HColumnDescriptor.CompressionType.NONE, false, false, + Integer.MAX_VALUE, HConstants.FOREVER, null) }); /** Table descriptor for .META. catalog table */ - public static final HTableDescriptor META_TABLEDESC = - new HTableDescriptor(HConstants.META_TABLE_NAME, - new HColumnDescriptor(HConstants.COLUMN_FAMILY, 1, - HColumnDescriptor.CompressionType.NONE, false, false, - Integer.MAX_VALUE, HConstants.FOREVER, null)); + public static final HTableDescriptor META_TABLEDESC = new HTableDescriptor( + HConstants.META_TABLE_NAME, new HColumnDescriptor[] { + new HColumnDescriptor(HConstants.COLUMN_FAMILY, 1, + HColumnDescriptor.CompressionType.NONE, false, false, + Integer.MAX_VALUE, HConstants.FOREVER, null), + new HColumnDescriptor(HConstants.COLUMN_FAMILY_HISTORIAN, + HConstants.ALL_VERSIONS, HColumnDescriptor.CompressionType.NONE, + false, false, Integer.MAX_VALUE, HConstants.FOREVER, null) }); private boolean rootregion = false; private boolean metaregion = false; @@ -64,11 +67,13 @@ * Private constructor used internally creating table descriptors for * catalog tables: e.g. .META. and -ROOT-. */ - private HTableDescriptor(final byte [] name, HColumnDescriptor family) { + private HTableDescriptor(final byte [] name, HColumnDescriptor[] families) { this.name = name.clone(); this.rootregion = Bytes.equals(name, HConstants.ROOT_TABLE_NAME); this.metaregion = true; - this.families.put(Bytes.mapKey(family.getName()), family); + for(HColumnDescriptor descriptor : families) { + this.families.put(Bytes.mapKey(descriptor.getName()), descriptor); + } } /** Index: src/java/org/apache/hadoop/hbase/master/ProcessRegionOpen.java =================================================================== --- src/java/org/apache/hadoop/hbase/master/ProcessRegionOpen.java (revision 661066) +++ src/java/org/apache/hadoop/hbase/master/ProcessRegionOpen.java (working copy) @@ -24,6 +24,7 @@ import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.HServerAddress; import org.apache.hadoop.hbase.HServerInfo; +import org.apache.hadoop.hbase.RegionHistorian; import org.apache.hadoop.hbase.io.BatchUpdate; import org.apache.hadoop.hbase.util.Bytes; @@ -81,6 +82,7 @@ b.put(COL_SERVER, Bytes.toBytes(serverAddress.toString())); b.put(COL_STARTCODE, startCode); server.batchUpdate(metaRegionName, b); + RegionHistorian.getInstance().addRegionOpen(regionInfo, serverAddress); if (isMetaTable) { // It's a meta region. MetaRegion m = new MetaRegion(serverAddress, Index: src/webapps/master/table.jsp =================================================================== --- src/webapps/master/table.jsp (revision 661066) +++ src/webapps/master/table.jsp (working copy) @@ -45,7 +45,9 @@ for (MetaRegion meta: onlineRegions.values()) { int infoPort = serverToServerInfos.get(meta.getServer().getBindAddress()+":"+meta.getServer().getPort()).getInfoPort(); String url = "http://" + meta.getServer().getHostname() + ":" + infoPort + "/";%> -<%= meta.getRegionName() %><%= meta.getServer().getHostname() %>:<%= meta.getServer().getPort() %>-<%= meta.getStartKey() %>- +<%= Bytes.toString(meta.getRegionName()) %> + <%= meta.getServer().getHostname() %>:<%= meta.getServer().getPort() %> + -<%= meta.getStartKey() %>- <% } %> <%} else { %> @@ -56,11 +58,11 @@ if(regions != null && regions.size() > 0) { %> <%= tableHeader %> <% for(Map.Entry hriEntry : regions.entrySet()) { %> -<% System.out.println(serverToServerInfos.keySet().toArray()[0].toString()); - System.out.println(hriEntry.getValue().getHostname()+":"+hriEntry.getValue().getPort()); - int infoPort = serverToServerInfos.get(hriEntry.getValue().getBindAddress()+":"+hriEntry.getValue().getPort()).getInfoPort(); - String url = "http://" + hriEntry.getValue().getHostname().toString() + ":" + infoPort + "/"; %> -<%= hriEntry.getKey().getRegionNameAsString()%><%= hriEntry.getValue().getHostname() %>:<%= hriEntry.getValue().getPort() %> +<% int infoPort = serverToServerInfos.get(hriEntry.getValue().getBindAddress()+":"+hriEntry.getValue().getPort()).getInfoPort(); + String urlRegionHistorian = "/regionhistorian.jsp?regionname="+hriEntry.getKey().getRegionNameAsString(); + String urlRegionServer = "http://" + hriEntry.getValue().getHostname().toString() + ":" + infoPort + "/"; %> +<%= hriEntry.getKey().getRegionNameAsString()%> + <%= hriEntry.getValue().getHostname() %>:<%= hriEntry.getValue().getPort() %> <%= hriEntry.getKey().getEncodedName()%> <%= Bytes.toString(hriEntry.getKey().getStartKey())%> <%= Bytes.toString(hriEntry.getKey().getEndKey())%> <% } %>