Index: src/java/org/apache/hadoop/hbase/RegionHistorian.java =================================================================== --- src/java/org/apache/hadoop/hbase/RegionHistorian.java (revision 0) +++ src/java/org/apache/hadoop/hbase/RegionHistorian.java (revision 0) @@ -0,0 +1,291 @@ +/** + * Copyright 2008 The Apache Software Foundation + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase; + +import java.io.IOException; +import java.text.SimpleDateFormat; +import java.util.ArrayList; +import java.util.Collections; +import java.util.GregorianCalendar; +import java.util.List; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.hbase.client.HTable; +import org.apache.hadoop.hbase.io.BatchUpdate; +import org.apache.hadoop.hbase.io.Cell; +import org.apache.hadoop.hbase.util.Bytes; +/** + * The Region Historian task is to keep track of every modification a region + * has to go trought. Public methods are used to update the information in the + * .META. table and to retreive it. + */ +public class RegionHistorian implements HConstants { + + static final Log LOG = LogFactory.getLog(RegionHistorian.class); + + private HTable metaTable; + + private GregorianCalendar cal = new GregorianCalendar(); + + /** Singleton reference */ + private static RegionHistorian historian; + + /** Date formater for the timestamp in RegionHistoryInformation */ + private static SimpleDateFormat dateFormat = new SimpleDateFormat( + "EEE, d MMM yyyy HH:mm:ss"); + + public static enum HistorianColumnKey { + REGION_CREATION ( Bytes.toBytes(COLUMN_FAMILY_HISTORIAN_STR+"creation")), + REGION_OPEN ( Bytes.toBytes(COLUMN_FAMILY_HISTORIAN_STR+"open")), + REGION_SPLIT ( Bytes.toBytes(COLUMN_FAMILY_HISTORIAN_STR+"split")), + REGION_COMPACTION ( Bytes.toBytes(COLUMN_FAMILY_HISTORIAN_STR+"compaction")), + REGION_FLUSH ( Bytes.toBytes(COLUMN_FAMILY_HISTORIAN_STR+"flush")), + REGION_ASSIGNMENT ( Bytes.toBytes(COLUMN_FAMILY_HISTORIAN_STR+"assignment")); + + public byte[] key; + + HistorianColumnKey(byte[] key) { + this.key = key; + } + } + + /** + * Default constructor. Initializes reference to .META. table + * + */ + private RegionHistorian() { + HBaseConfiguration conf = new HBaseConfiguration(); + + try { + metaTable = new HTable(conf, META_TABLE_NAME); + LOG.debug("Region historian is ready."); + } catch (IOException ioe) { + LOG.warn("Unable to create RegionHistorian", ioe); + } + } + + /** + * Singleton method + * + * @return The region historian + */ + public static RegionHistorian getInstance() { + if (historian == null) { + historian = new RegionHistorian(); + } + return historian; + } + + /** + * Returns, for a given region name, an ordered list by timestamp of all + * values in the historian column of the .META. table. + * + * @param regionName + * Region name as a string + * @return List of RegionHistoryInformation + */ + public static List getRegionHistory( + String regionName) { + getInstance(); + List informations = new ArrayList(); + try { + /* + * TODO REGION_HISTORIAN_KEYS is used because there is no other for the + * moment to retrieve all version and to have the column key information. + * To be changed when HTable.getRow handles versions. + */ + for (HistorianColumnKey keyEnu : HistorianColumnKey.values()) { + byte[] columnKey = keyEnu.key; + Cell[] cells = historian.metaTable.get(Bytes.toBytes(regionName), + columnKey, ALL_VERSIONS); + if (cells != null) { + for (Cell cell : cells) { + informations.add(historian.new RegionHistoryInformation(cell + .getTimestamp(), Bytes.toString(columnKey).split(":")[1], Bytes + .toString(cell.getValue()))); + } + } + } + } catch (IOException ioe) { + LOG.warn("Unable to retrieve region history", ioe); + } + Collections.sort(informations); + return informations; + } + + /** + * Method to add a creation event to the row in the .META table + * + * @param info + */ + public static void addRegionAssignment(HRegionInfo info, String serverName) { + + add(HistorianColumnKey.REGION_ASSIGNMENT.key, "Region assigned to server " + + serverName, info); + } + + /** + * Method to add a creation event to the row in the .META table + * + * @param info + */ + public static void addRegionCreation(HRegionInfo info) { + + add(HistorianColumnKey.REGION_CREATION.key, "Region creation", info); + } + + /** + * Method to add a opening event to the row in the .META table + * + * @param info + * @param address + */ + public static void addRegionOpen(HRegionInfo info, HServerAddress address) { + + add(HistorianColumnKey.REGION_OPEN.key, "Region opened on server : " + + address.getHostname(), info); + } + + /** + * Method to add a split event to the rows in the .META table with + * information from oldInfo. + * @param oldInfo + * @param newInfo1 + * @param newInfo2 + */ + public static void addRegionSplit(HRegionInfo oldInfo, HRegionInfo newInfo1, + HRegionInfo newInfo2) { + + HRegionInfo[] infos = new HRegionInfo[] { newInfo1, newInfo2 }; + for (HRegionInfo info : infos) { + add(HistorianColumnKey.REGION_SPLIT.key, "Region split from : " + + oldInfo.getRegionNameAsString(), info); + } + } + + /** + * Method to add a compaction event to the row in the .META table + * + * @param info + */ + public static void addRegionCompaction(HRegionInfo info, String timeTaken) { + if (LOG.isDebugEnabled()) { + add(HistorianColumnKey.REGION_COMPACTION.key, + "Region compaction completed in " + timeTaken, info); + } + } + + /** + * Method to add a flush event to the row in the .META table + * + * @param info + */ + public static void addRegionFlush(HRegionInfo info, String timeTaken) { + if (LOG.isDebugEnabled()) { + add(HistorianColumnKey.REGION_FLUSH.key, "Region flush completed in " + + timeTaken, info); + } + } + + /** + * Method to add an event with LATEST_TIMESTAMP. + * @param column + * @param text + * @param info + */ + private static void add(byte[] column, String text, HRegionInfo info) { + add(column, text, info, LATEST_TIMESTAMP); + } + + /** + * Method to add an event with provided information. + * @param column + * @param text + * @param info + * @param timestamp + */ + private static void add(byte[] column, String text, HRegionInfo info, long timestamp) { + if (!info.isMetaRegion()) { + getInstance(); + BatchUpdate batch = new BatchUpdate(info.getRegionName()); + batch.setTimestamp(timestamp); + batch.put(column, Bytes.toBytes(text)); + try { + historian.metaTable.commit(batch); + } catch (IOException ioe) { + LOG.warn("Unable to '" + text + "'", ioe); + } + } + } + + /** + * Inner class that only contains information about an event. + * + */ + public class RegionHistoryInformation implements + Comparable { + + private long timestamp; + + private String event; + + private String description; + + public RegionHistoryInformation(long timestamp, String event, + String description) { + this.timestamp = timestamp; + this.event = event; + this.description = description; + } + + /** + * Returns the inverse value of Long.compareTo + */ + public int compareTo(RegionHistoryInformation otherInfo) { + return -1 * Long.valueOf(timestamp).compareTo(otherInfo.getTimestamp()); + } + + public String getEvent() { + return event; + } + + public String getDescription() { + return description; + } + + public long getTimestamp() { + return timestamp; + } + + /** + * Returns the value of the timestamp processed + * with the date formater. + * @return + */ + public String getTimestampAsString() { + cal.setTimeInMillis(timestamp); + return dateFormat.format(cal.getTime()); + } + + } + +} Index: src/java/org/apache/hadoop/hbase/HConstants.java =================================================================== --- src/java/org/apache/hadoop/hbase/HConstants.java (revision 662499) +++ src/java/org/apache/hadoop/hbase/HConstants.java (working copy) @@ -128,9 +128,15 @@ /** The ROOT and META column family (string) */ static final String COLUMN_FAMILY_STR = "info:"; + + /** The META historian column family (string) */ + static final String COLUMN_FAMILY_HISTORIAN_STR = "historian:"; /** The ROOT and META column family */ static final byte [] COLUMN_FAMILY = Bytes.toBytes(COLUMN_FAMILY_STR); + + /** The META historian column family */ + static final byte [] COLUMN_FAMILY_HISTORIAN = Bytes.toBytes(COLUMN_FAMILY_HISTORIAN_STR); /** Array of meta column names */ static final byte[][] COLUMN_FAMILY_ARRAY = new byte[][] {COLUMN_FAMILY}; @@ -206,4 +212,5 @@ public static final String HBASE_CLIENT_RETRIES_NUMBER_KEY = "hbase.client.retries.number"; public static final int DEFAULT_CLIENT_RETRIES = 5; + } \ No newline at end of file Index: src/java/org/apache/hadoop/hbase/regionserver/HRegion.java =================================================================== --- src/java/org/apache/hadoop/hbase/regionserver/HRegion.java (revision 662499) +++ src/java/org/apache/hadoop/hbase/regionserver/HRegion.java (working copy) @@ -50,6 +50,7 @@ import org.apache.hadoop.hbase.HStoreKey; import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.NotServingRegionException; +import org.apache.hadoop.hbase.RegionHistorian; import org.apache.hadoop.hbase.WrongRegionException; import org.apache.hadoop.hbase.filter.RowFilterInterface; import org.apache.hadoop.hbase.io.BatchOperation; @@ -433,6 +434,7 @@ LOG.debug("Opening region " + this + "/" + this.regionInfo.getEncodedName()); } + this.regionCompactionDir = new Path(getCompactionDir(basedir), encodedNameStr); @@ -774,6 +776,10 @@ LOG.debug("Cleaned up " + FSUtils.getPath(splits) + " " + deleted); } HRegion regions[] = new HRegion [] {regionA, regionB}; + + RegionHistorian.addRegionSplit(this.regionInfo, + regionA.getRegionInfo(), regionB.getRegionInfo()); + return regions; } } @@ -865,8 +871,11 @@ } } doRegionCompactionCleanup(); - LOG.info("compaction completed on region " + this + " in " + - StringUtils.formatTimeDiff(System.currentTimeMillis(), startTime)); + String timeTaken = StringUtils.formatTimeDiff(System.currentTimeMillis(), + startTime); + LOG.info("compaction completed on region " + this + " in " + timeTaken); + + RegionHistorian.addRegionCompaction(regionInfo, timeTaken); } finally { synchronized (writestate) { writestate.compacting = false; @@ -1039,10 +1048,14 @@ } if (LOG.isDebugEnabled()) { + String timeTaken = StringUtils.formatTimeDiff(System.currentTimeMillis(), + startTime); LOG.debug("Finished memcache flush for region " + this + " in " + (System.currentTimeMillis() - startTime) + "ms, sequence id=" + sequenceId); + if (!regionInfo.isMetaRegion()) + RegionHistorian.addRegionFlush(regionInfo, timeTaken); } return true; } @@ -1915,6 +1928,8 @@ Path regionDir = HRegion.getRegionDir(tableDir, info.getEncodedName()); FileSystem fs = FileSystem.get(conf); fs.mkdirs(regionDir); + if (!info.isMetaRegion()) + RegionHistorian.addRegionCreation(info); return new HRegion(tableDir, new HLog(fs, new Path(regionDir, HREGION_LOGDIR_NAME), conf, null), fs, conf, info, null, null); Index: src/java/org/apache/hadoop/hbase/HTableDescriptor.java =================================================================== --- src/java/org/apache/hadoop/hbase/HTableDescriptor.java (revision 662499) +++ src/java/org/apache/hadoop/hbase/HTableDescriptor.java (working copy) @@ -38,18 +38,21 @@ */ public class HTableDescriptor implements WritableComparable { /** Table descriptor for -ROOT- catalog table */ - public static final HTableDescriptor ROOT_TABLEDESC = - new HTableDescriptor(HConstants.ROOT_TABLE_NAME, - new HColumnDescriptor(HConstants.COLUMN_FAMILY, 1, - HColumnDescriptor.CompressionType.NONE, false, false, - Integer.MAX_VALUE, HConstants.FOREVER, null)); + public static final HTableDescriptor ROOT_TABLEDESC = new HTableDescriptor( + HConstants.ROOT_TABLE_NAME, + new HColumnDescriptor[] { new HColumnDescriptor(HConstants.COLUMN_FAMILY, + 1, HColumnDescriptor.CompressionType.NONE, false, false, + Integer.MAX_VALUE, HConstants.FOREVER, null) }); /** Table descriptor for .META. catalog table */ - public static final HTableDescriptor META_TABLEDESC = - new HTableDescriptor(HConstants.META_TABLE_NAME, - new HColumnDescriptor(HConstants.COLUMN_FAMILY, 1, - HColumnDescriptor.CompressionType.NONE, false, false, - Integer.MAX_VALUE, HConstants.FOREVER, null)); + public static final HTableDescriptor META_TABLEDESC = new HTableDescriptor( + HConstants.META_TABLE_NAME, new HColumnDescriptor[] { + new HColumnDescriptor(HConstants.COLUMN_FAMILY, 1, + HColumnDescriptor.CompressionType.NONE, false, false, + Integer.MAX_VALUE, HConstants.FOREVER, null), + new HColumnDescriptor(HConstants.COLUMN_FAMILY_HISTORIAN, + HConstants.ALL_VERSIONS, HColumnDescriptor.CompressionType.NONE, + false, false, Integer.MAX_VALUE, HConstants.FOREVER, null) }); private boolean rootregion = false; private boolean metaregion = false; @@ -64,11 +67,13 @@ * Private constructor used internally creating table descriptors for * catalog tables: e.g. .META. and -ROOT-. */ - private HTableDescriptor(final byte [] name, HColumnDescriptor family) { + private HTableDescriptor(final byte [] name, HColumnDescriptor[] families) { this.name = name.clone(); this.rootregion = Bytes.equals(name, HConstants.ROOT_TABLE_NAME); this.metaregion = true; - this.families.put(Bytes.mapKey(family.getName()), family); + for(HColumnDescriptor descriptor : families) { + this.families.put(Bytes.mapKey(descriptor.getName()), descriptor); + } } /** Index: src/java/org/apache/hadoop/hbase/master/ProcessRegionOpen.java =================================================================== --- src/java/org/apache/hadoop/hbase/master/ProcessRegionOpen.java (revision 662499) +++ src/java/org/apache/hadoop/hbase/master/ProcessRegionOpen.java (working copy) @@ -24,6 +24,7 @@ import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.HServerAddress; import org.apache.hadoop.hbase.HServerInfo; +import org.apache.hadoop.hbase.RegionHistorian; import org.apache.hadoop.hbase.io.BatchUpdate; import org.apache.hadoop.hbase.util.Bytes; @@ -81,6 +82,7 @@ b.put(COL_SERVER, Bytes.toBytes(serverAddress.toString())); b.put(COL_STARTCODE, startCode); server.batchUpdate(metaRegionName, b); + RegionHistorian.addRegionOpen(regionInfo, serverAddress); if (isMetaTable) { // It's a meta region. MetaRegion m = new MetaRegion(serverAddress, Index: src/java/org/apache/hadoop/hbase/master/RegionManager.java =================================================================== --- src/java/org/apache/hadoop/hbase/master/RegionManager.java (revision 662499) +++ src/java/org/apache/hadoop/hbase/master/RegionManager.java (working copy) @@ -42,6 +42,7 @@ import org.apache.hadoop.hbase.HServerInfo; import org.apache.hadoop.hbase.HServerLoad; import org.apache.hadoop.hbase.HRegionInfo; +import org.apache.hadoop.hbase.RegionHistorian; import org.apache.hadoop.hbase.regionserver.HRegion; import org.apache.hadoop.hbase.ipc.HRegionInterface; import org.apache.hadoop.hbase.HMsg; @@ -258,6 +259,7 @@ Bytes.toString(regionInfo.getRegionName())+ " to server " + serverName); unassignedRegions.put(regionInfo, Long.valueOf(now)); + RegionHistorian.addRegionAssignment(regionInfo, serverName); returnMsgs.add(new HMsg(HMsg.Type.MSG_REGION_OPEN, regionInfo)); if (--nregions <= 0) { break; @@ -383,6 +385,7 @@ Bytes.toString(regionInfo.getRegionName()) + " to the only server " + serverName); unassignedRegions.put(regionInfo, Long.valueOf(now)); + RegionHistorian.addRegionAssignment(regionInfo, serverName); returnMsgs.add(new HMsg(HMsg.Type.MSG_REGION_OPEN, regionInfo)); } } Index: src/java/org/apache/hadoop/hbase/util/Migrate.java =================================================================== --- src/java/org/apache/hadoop/hbase/util/Migrate.java (revision 662499) +++ src/java/org/apache/hadoop/hbase/util/Migrate.java (working copy) @@ -50,6 +50,7 @@ import org.apache.hadoop.hbase.client.HBaseAdmin; import org.apache.hadoop.hbase.HBaseConfiguration; +import org.apache.hadoop.hbase.HColumnDescriptor; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.MasterNotRunningException; @@ -249,6 +250,7 @@ private void migrateToV2(FileStatus[] rootFiles) throws IOException { LOG.info("Checking to see if file system is at revision 2."); checkForUnrecoveredLogFiles(rootFiles); + addHistorianFamilyToMeta(); } private FileStatus[] getRootDirFiles() throws IOException { @@ -436,6 +438,15 @@ } } + private void addHistorianFamilyToMeta() throws IOException { + + utils.addColumn(HConstants.META_TABLE_NAME, new HColumnDescriptor(HConstants.COLUMN_FAMILY_HISTORIAN, + HConstants.ALL_VERSIONS, HColumnDescriptor.CompressionType.NONE, + false, false, Integer.MAX_VALUE, HConstants.FOREVER, null)); + LOG.info("Historian family added to .META."); + utils.shutdown(); + } + @SuppressWarnings("static-access") private int parseArgs(String[] args) { Options opts = new Options(); Index: src/webapps/master/regionhistorian.jsp =================================================================== --- src/webapps/master/regionhistorian.jsp (revision 0) +++ src/webapps/master/regionhistorian.jsp (revision 0) @@ -0,0 +1,42 @@ +<%@ page contentType="text/html;charset=UTF-8" + import="java.util.List" + import="org.apache.hadoop.hbase.RegionHistorian" + import="org.apache.hadoop.hbase.RegionHistorian.RegionHistoryInformation" + import="org.apache.hadoop.hbase.HConstants"%><% + String regionName = request.getParameter("regionname"); + List informations = RegionHistorian.getRegionHistory(regionName); +%> + + + +Region in <%= regionName %> + + + + + +

Region <%= regionName %>

+ +
+<%if(informations != null && informations.size() > 0) { %> + +<% for( RegionHistoryInformation information : informations) {%> + +<% } %> +
TimestampEventDescription
<%= information.getTimestampAsString() %><%= information.getEvent() %><%= information.getDescription()%>
+

+Master is the source of following events : +creation,open,assignment. +Regions are the source of following events : +split,compaction, flush +

+<%} else {%> +

+This region is no longer available. It may be due to a split, a merge or the name changed. +

+<%} %> + + + + Index: src/webapps/master/table.jsp =================================================================== --- src/webapps/master/table.jsp (revision 662499) +++ src/webapps/master/table.jsp (working copy) @@ -45,7 +45,9 @@ for (MetaRegion meta: onlineRegions.values()) { int infoPort = serverToServerInfos.get(meta.getServer().getBindAddress()+":"+meta.getServer().getPort()).getInfoPort(); String url = "http://" + meta.getServer().getHostname() + ":" + infoPort + "/";%> -<%= meta.getRegionName() %><%= meta.getServer().getHostname() %>:<%= meta.getServer().getPort() %>-<%= meta.getStartKey() %>- +<%= Bytes.toString(meta.getRegionName()) %> + <%= meta.getServer().getHostname() %>:<%= meta.getServer().getPort() %> + -<%= meta.getStartKey() %>- <% } %> <%} else { %> @@ -56,11 +58,11 @@ if(regions != null && regions.size() > 0) { %> <%= tableHeader %> <% for(Map.Entry hriEntry : regions.entrySet()) { %> -<% System.out.println(serverToServerInfos.keySet().toArray()[0].toString()); - System.out.println(hriEntry.getValue().getHostname()+":"+hriEntry.getValue().getPort()); - int infoPort = serverToServerInfos.get(hriEntry.getValue().getBindAddress()+":"+hriEntry.getValue().getPort()).getInfoPort(); - String url = "http://" + hriEntry.getValue().getHostname().toString() + ":" + infoPort + "/"; %> -<%= hriEntry.getKey().getRegionNameAsString()%><%= hriEntry.getValue().getHostname() %>:<%= hriEntry.getValue().getPort() %> +<% int infoPort = serverToServerInfos.get(hriEntry.getValue().getBindAddress()+":"+hriEntry.getValue().getPort()).getInfoPort(); + String urlRegionHistorian = "/regionhistorian.jsp?regionname="+hriEntry.getKey().getRegionNameAsString(); + String urlRegionServer = "http://" + hriEntry.getValue().getHostname().toString() + ":" + infoPort + "/"; %> +<%= hriEntry.getKey().getRegionNameAsString()%> + <%= hriEntry.getValue().getHostname() %>:<%= hriEntry.getValue().getPort() %> <%= hriEntry.getKey().getEncodedName()%> <%= Bytes.toString(hriEntry.getKey().getStartKey())%> <%= Bytes.toString(hriEntry.getKey().getEndKey())%> <% } %> Index: src/webapps/master/WEB-INF/web.xml =================================================================== --- src/webapps/master/WEB-INF/web.xml (revision 662499) +++ src/webapps/master/WEB-INF/web.xml (working copy) @@ -10,6 +10,11 @@ org.apache.hadoop.hbase.generated.master.master_jsp org.apache.hadoop.hbase.generated.master.master_jsp @@ -19,7 +24,17 @@ org.apache.hadoop.hbase.generated.master.table_jsp + + org.apache.hadoop.hbase.generated.master.regionhistorian_jsp + org.apache.hadoop.hbase.generated.master.regionhistorian_jsp + + org.apache.hadoop.hbase.generated.master.master_jsp /master.jsp @@ -29,5 +44,10 @@ /table.jsp + + org.apache.hadoop.hbase.generated.master.regionhistorian_jsp + /regionhistorian.jsp + +