Index: src/java/org/apache/hadoop/hbase/regionserver/LogFlusher.java =================================================================== --- src/java/org/apache/hadoop/hbase/regionserver/LogFlusher.java (revision 736069) +++ src/java/org/apache/hadoop/hbase/regionserver/LogFlusher.java (working copy) @@ -48,6 +48,7 @@ } } + @Override protected void chore() { synchronized (log) { HLog hlog = log.get(); Index: src/java/org/apache/hadoop/hbase/regionserver/NoSuchColumnFamilyException.java =================================================================== --- src/java/org/apache/hadoop/hbase/regionserver/NoSuchColumnFamilyException.java (revision 736069) +++ src/java/org/apache/hadoop/hbase/regionserver/NoSuchColumnFamilyException.java (working copy) @@ -24,8 +24,9 @@ /** * Thrown if request for nonexistent column family. */ -@SuppressWarnings("serial") public class NoSuchColumnFamilyException extends DoNotRetryIOException { + private static final long serialVersionUID = -6569952730832331274L; + /** default constructor */ public NoSuchColumnFamilyException() { super(); Index: src/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java =================================================================== --- src/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java (revision 736069) +++ src/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java (working copy) @@ -1588,8 +1588,7 @@ } public void batchUpdate(final byte [] regionName, BatchUpdate b, - @SuppressWarnings("unused") long lockId) - throws IOException { + long lockId) throws IOException { if (b.getRow() == null) throw new IllegalArgumentException("update has null row"); @@ -2132,7 +2131,7 @@ } public long getProtocolVersion(final String protocol, - @SuppressWarnings("unused") final long clientVersion) + final long clientVersion) throws IOException { if (protocol.equals(HRegionInterface.class.getName())) { return HBaseRPCProtocolVersion.versionID; Index: src/java/org/apache/hadoop/hbase/regionserver/metrics/RegionServerMetrics.java =================================================================== --- src/java/org/apache/hadoop/hbase/regionserver/metrics/RegionServerMetrics.java (revision 736069) +++ src/java/org/apache/hadoop/hbase/regionserver/metrics/RegionServerMetrics.java (working copy) @@ -18,7 +18,6 @@ package org.apache.hadoop.hbase.regionserver.metrics; import java.lang.management.ManagementFactory; -import java.lang.management.MemoryMXBean; import java.lang.management.MemoryUsage; import org.apache.commons.logging.Log; @@ -96,7 +95,7 @@ * Since this object is a registered updater, this method will be called * periodically, e.g. every 5 seconds. */ - public void doUpdates(@SuppressWarnings("unused") MetricsContext unused) { + public void doUpdates(MetricsContext unused) { synchronized (this) { this.stores.pushMetric(this.metricsRecord); this.storefiles.pushMetric(this.metricsRecord); Index: src/java/org/apache/hadoop/hbase/regionserver/transactional/TransactionalHLogManager.java =================================================================== --- src/java/org/apache/hadoop/hbase/regionserver/transactional/TransactionalHLogManager.java (revision 736069) +++ src/java/org/apache/hadoop/hbase/regionserver/transactional/TransactionalHLogManager.java (working copy) @@ -49,8 +49,6 @@ /** * Responsible for writing and reading (recovering) transactional information * to/from the HLog. - * - * */ class TransactionalHLogManager { Index: src/java/org/apache/hadoop/hbase/regionserver/transactional/TransactionalRegion.java =================================================================== --- src/java/org/apache/hadoop/hbase/regionserver/transactional/TransactionalRegion.java (revision 736069) +++ src/java/org/apache/hadoop/hbase/regionserver/transactional/TransactionalRegion.java (working copy) @@ -83,7 +83,7 @@ private static final int DEFAULT_OLD_TRANSACTION_FLUSH = 100; // Do a flush if we have this many old transactions.. - private static final Log LOG = LogFactory.getLog(TransactionalRegion.class); + static final Log LOG = LogFactory.getLog(TransactionalRegion.class); // Collection of active transactions (PENDING) keyed by id. private Map transactionsById = new HashMap(); Index: src/java/org/apache/hadoop/hbase/regionserver/HLogKey.java =================================================================== --- src/java/org/apache/hadoop/hbase/regionserver/HLogKey.java (revision 736069) +++ src/java/org/apache/hadoop/hbase/regionserver/HLogKey.java (working copy) @@ -33,7 +33,7 @@ * * Some Transactional edits (START, COMMIT, ABORT) will not have an associated row. */ -public class HLogKey implements WritableComparable { +public class HLogKey implements WritableComparable { private byte [] regionName; private byte [] tablename; private byte [] row; @@ -94,7 +94,7 @@ @Override public boolean equals(Object obj) { - return compareTo(obj) == 0; + return compareTo((HLogKey)obj) == 0; } @Override @@ -109,19 +109,18 @@ // Comparable // - public int compareTo(Object o) { - HLogKey other = (HLogKey) o; - int result = Bytes.compareTo(this.regionName, other.regionName); + public int compareTo(HLogKey o) { + int result = Bytes.compareTo(this.regionName, o.regionName); if(result == 0) { - result = Bytes.compareTo(this.row, other.row); + result = Bytes.compareTo(this.row, o.row); if(result == 0) { - if (this.logSeqNum < other.logSeqNum) { + if (this.logSeqNum < o.logSeqNum) { result = -1; - } else if (this.logSeqNum > other.logSeqNum) { + } else if (this.logSeqNum > o.logSeqNum) { result = 1; } } Index: src/java/org/apache/hadoop/hbase/regionserver/Memcache.java =================================================================== --- src/java/org/apache/hadoop/hbase/regionserver/Memcache.java (revision 736069) +++ src/java/org/apache/hadoop/hbase/regionserver/Memcache.java (working copy) @@ -258,7 +258,6 @@ * @param b * @return Return lowest of a or b or null if both a and b are null */ - @SuppressWarnings("unchecked") private byte [] getLowest(final byte [] a, final byte [] b) { if (a == null) { Index: src/java/org/apache/hadoop/hbase/regionserver/HLog.java =================================================================== --- src/java/org/apache/hadoop/hbase/regionserver/HLog.java (revision 736069) +++ src/java/org/apache/hadoop/hbase/regionserver/HLog.java (working copy) @@ -49,7 +49,6 @@ import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.FSUtils; import org.apache.hadoop.io.SequenceFile; -import org.apache.hadoop.io.Text; import org.apache.hadoop.io.SequenceFile.CompressionType; import org.apache.hadoop.io.SequenceFile.Reader; Index: src/java/org/apache/hadoop/hbase/regionserver/tableindexed/IndexedRegionServer.java =================================================================== --- src/java/org/apache/hadoop/hbase/regionserver/tableindexed/IndexedRegionServer.java (revision 736069) +++ src/java/org/apache/hadoop/hbase/regionserver/tableindexed/IndexedRegionServer.java (working copy) @@ -27,8 +27,6 @@ import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.ipc.IndexedRegionInterface; import org.apache.hadoop.hbase.regionserver.HRegion; -import org.apache.hadoop.hbase.regionserver.HRegionServer; -import org.apache.hadoop.hbase.regionserver.transactional.TransactionalRegion; import org.apache.hadoop.hbase.regionserver.transactional.TransactionalRegionServer; import org.apache.hadoop.util.Progressable; Index: src/java/org/apache/hadoop/hbase/regionserver/metrics/RegionServerMetrics.java =================================================================== --- src/java/org/apache/hadoop/hbase/regionserver/metrics/RegionServerMetrics.java (revision 736069) +++ src/java/org/apache/hadoop/hbase/regionserver/metrics/RegionServerMetrics.java (working copy) @@ -18,7 +18,6 @@ package org.apache.hadoop.hbase.regionserver.metrics; import java.lang.management.ManagementFactory; -import java.lang.management.MemoryMXBean; import java.lang.management.MemoryUsage; import org.apache.commons.logging.Log; @@ -96,7 +95,7 @@ * Since this object is a registered updater, this method will be called * periodically, e.g. every 5 seconds. */ - public void doUpdates(@SuppressWarnings("unused") MetricsContext unused) { + public void doUpdates(MetricsContext unused) { synchronized (this) { this.stores.pushMetric(this.metricsRecord); this.storefiles.pushMetric(this.metricsRecord); Index: src/java/org/apache/hadoop/hbase/regionserver/LogFlusher.java =================================================================== --- src/java/org/apache/hadoop/hbase/regionserver/LogFlusher.java (revision 736069) +++ src/java/org/apache/hadoop/hbase/regionserver/LogFlusher.java (working copy) @@ -48,6 +48,7 @@ } } + @Override protected void chore() { synchronized (log) { HLog hlog = log.get(); Index: src/java/org/apache/hadoop/hbase/rest/Dispatcher.java =================================================================== --- src/java/org/apache/hadoop/hbase/rest/Dispatcher.java (revision 736069) +++ src/java/org/apache/hadoop/hbase/rest/Dispatcher.java (working copy) @@ -57,9 +57,8 @@ * * @see Hbase REST Specification */ -@SuppressWarnings("serial") -public class Dispatcher extends javax.servlet.http.HttpServlet -implements javax.servlet.Servlet { +public class Dispatcher extends javax.servlet.http.HttpServlet { + private static final long serialVersionUID = 1221888754510962313L; @SuppressWarnings("unused") private static final Log LOG = LogFactory.getLog(Dispatcher.class.getName()); private MetaHandler metaHandler; @@ -125,23 +124,22 @@ // if it was a root request, it must be a create table request tableHandler.doPost(request, response, pathSegments); return; - } else { - // there should be at least two path segments (table name and row or - // scanner or disable/enable operation) - if (pathSegments.length >= 2 && pathSegments[0].length() > 0) { - if (pathSegments[1].toLowerCase().equals(SCANNER) - && pathSegments.length >= 2) { - scannerHandler.doPost(request, response, pathSegments); - return; - } else if (pathSegments[1].toLowerCase().equals(ROW) - && pathSegments.length >= 3) { - rowHandler.doPost(request, response, pathSegments); - return; - } else if ((pathSegments[1].toLowerCase().equals(TableHandler.DISABLE) || pathSegments[1].toLowerCase().equals(TableHandler.ENABLE)) - && pathSegments.length == 2) { - tableHandler.doPost(request, response, pathSegments); - return; - } + } + // there should be at least two path segments (table name and row or + // scanner or disable/enable operation) + if (pathSegments.length >= 2 && pathSegments[0].length() > 0) { + if (pathSegments[1].toLowerCase().equals(SCANNER) + && pathSegments.length >= 2) { + scannerHandler.doPost(request, response, pathSegments); + return; + } else if (pathSegments[1].toLowerCase().equals(ROW) + && pathSegments.length >= 3) { + rowHandler.doPost(request, response, pathSegments); + return; + } else if ((pathSegments[1].toLowerCase().equals(TableHandler.DISABLE) || pathSegments[1].toLowerCase().equals(TableHandler.ENABLE)) + && pathSegments.length == 2) { + tableHandler.doPost(request, response, pathSegments); + return; } } Index: src/java/org/apache/hadoop/hbase/regionserver/NoSuchColumnFamilyException.java =================================================================== --- src/java/org/apache/hadoop/hbase/regionserver/NoSuchColumnFamilyException.java (revision 736069) +++ src/java/org/apache/hadoop/hbase/regionserver/NoSuchColumnFamilyException.java (working copy) @@ -24,8 +24,9 @@ /** * Thrown if request for nonexistent column family. */ -@SuppressWarnings("serial") public class NoSuchColumnFamilyException extends DoNotRetryIOException { + private static final long serialVersionUID = -6569952730832331274L; + /** default constructor */ public NoSuchColumnFamilyException() { super(); Index: src/java/org/apache/hadoop/hbase/regionserver/tableindexed/IndexedRegionServer.java =================================================================== --- src/java/org/apache/hadoop/hbase/regionserver/tableindexed/IndexedRegionServer.java (revision 736069) +++ src/java/org/apache/hadoop/hbase/regionserver/tableindexed/IndexedRegionServer.java (working copy) @@ -27,8 +27,6 @@ import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.ipc.IndexedRegionInterface; import org.apache.hadoop.hbase.regionserver.HRegion; -import org.apache.hadoop.hbase.regionserver.HRegionServer; -import org.apache.hadoop.hbase.regionserver.transactional.TransactionalRegion; import org.apache.hadoop.hbase.regionserver.transactional.TransactionalRegionServer; import org.apache.hadoop.util.Progressable; Index: src/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java =================================================================== --- src/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java (revision 736069) +++ src/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java (working copy) @@ -1588,8 +1588,7 @@ } public void batchUpdate(final byte [] regionName, BatchUpdate b, - @SuppressWarnings("unused") long lockId) - throws IOException { + long lockId) throws IOException { if (b.getRow() == null) throw new IllegalArgumentException("update has null row"); @@ -2132,7 +2131,7 @@ } public long getProtocolVersion(final String protocol, - @SuppressWarnings("unused") final long clientVersion) + final long clientVersion) throws IOException { if (protocol.equals(HRegionInterface.class.getName())) { return HBaseRPCProtocolVersion.versionID; Index: src/java/org/apache/hadoop/hbase/master/metrics/MasterMetrics.java =================================================================== --- src/java/org/apache/hadoop/hbase/master/metrics/MasterMetrics.java (revision 736069) +++ src/java/org/apache/hadoop/hbase/master/metrics/MasterMetrics.java (working copy) @@ -62,7 +62,7 @@ * Since this object is a registered updater, this method will be called * periodically, e.g. every 5 seconds. */ - public void doUpdates(@SuppressWarnings("unused") MetricsContext unused) { + public void doUpdates(MetricsContext unused) { synchronized (this) { synchronized(this.cluster_requests) { this.cluster_requests.pushMetric(metricsRecord); Index: src/java/org/apache/hadoop/hbase/rest/RowHandler.java =================================================================== --- src/java/org/apache/hadoop/hbase/rest/RowHandler.java (revision 736069) +++ src/java/org/apache/hadoop/hbase/rest/RowHandler.java (working copy) @@ -28,8 +28,7 @@ public class RowHandler extends GenericHandler { - public RowHandler(HBaseConfiguration conf, HBaseAdmin admin) - throws ServletException { + public RowHandler(HBaseConfiguration conf, HBaseAdmin admin) { super(conf, admin); } @@ -34,7 +33,7 @@ } public void doGet(HttpServletRequest request, HttpServletResponse response, - String[] pathSegments) throws ServletException, IOException { + String[] pathSegments) throws IOException { HTable table = getTable(pathSegments[0]); if (pathSegments[1].toLowerCase().equals(ROW)) { // get a row @@ -56,7 +55,7 @@ public void doDelete(HttpServletRequest request, HttpServletResponse response, String[] pathSegments) - throws ServletException, IOException { + throws IOException { deleteRow(request, response, pathSegments); } @@ -116,7 +115,7 @@ } } - if (result == null || result.size() == 0) { + if (result.size() == 0) { doNotFound(response, "Row not found!"); } else { switch (ContentType.getContentType(request.getHeader(ACCEPT))) { @@ -314,7 +313,7 @@ */ private void deleteRow(final HttpServletRequest request, final HttpServletResponse response, final String [] pathSegments) - throws IOException, ServletException { + throws IOException { // grab the table we're operating on HTable table = getTable(getTableName(pathSegments)); Index: src/java/org/apache/hadoop/hbase/rest/ScannerHandler.java =================================================================== --- src/java/org/apache/hadoop/hbase/rest/ScannerHandler.java (revision 736069) +++ src/java/org/apache/hadoop/hbase/rest/ScannerHandler.java (working copy) @@ -26,7 +26,6 @@ import java.util.List; import java.util.Map; -import javax.servlet.ServletException; import javax.servlet.http.HttpServletRequest; import javax.servlet.http.HttpServletResponse; @@ -37,7 +36,6 @@ import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.JenkinsHash; -import org.apache.hadoop.hbase.io.Cell; import org.apache.hadoop.hbase.io.RowResult; import org.znerd.xmlenc.XMLOutputter; @@ -47,8 +45,7 @@ public class ScannerHandler extends GenericHandler { private static final String ROWS = "rows"; - public ScannerHandler(HBaseConfiguration conf, HBaseAdmin admin) - throws ServletException{ + public ScannerHandler(HBaseConfiguration conf, HBaseAdmin admin){ super(conf, admin); } @@ -72,9 +69,8 @@ nextRows.add(result); } return nextRows.size() > 0; - } else { - return true; } + return true; } /** @@ -99,8 +95,7 @@ new HashMap(); public void doGet(HttpServletRequest request, HttpServletResponse response, - String[] pathSegments) - throws ServletException, IOException { + String[] pathSegments) throws IOException { doMethodNotAllowed(response, "GET to a scanner not supported."); } @@ -106,7 +101,7 @@ public void doPost(HttpServletRequest request, HttpServletResponse response, String[] pathSegments) - throws ServletException, IOException { + throws IOException { if (pathSegments.length == 2) { // trying to create a scanner openScanner(request, response, pathSegments); @@ -121,8 +116,7 @@ } public void doPut(HttpServletRequest request, HttpServletResponse response, - String[] pathSegments) - throws ServletException, IOException { + String[] pathSegments) throws IOException { doPost(request, response, pathSegments); } @@ -127,8 +121,7 @@ } public void doDelete(HttpServletRequest request, HttpServletResponse response, - String[] pathSegments) - throws ServletException, IOException { + String[] pathSegments) throws IOException { deleteScanner(response, pathSegments[2]); } @@ -209,6 +202,7 @@ outputter.getWriter().close(); } + //TODO Remove it?! // private void outputScannerEntryMime(final HttpServletResponse response, // final ScannerRecord sr) // throws IOException { @@ -261,7 +255,7 @@ */ private void openScanner(final HttpServletRequest request, final HttpServletResponse response, final String [] pathSegments) - throws IOException, ServletException { + throws IOException { // get the table HTable table = getTable(getTableName(pathSegments)); @@ -326,7 +320,7 @@ */ private void deleteScanner(final HttpServletResponse response, final String scannerid) - throws IOException, ServletException { + throws IOException { ScannerRecord sr = this.scanners.remove(scannerid); if (sr == null) { doNotFound(response, "No such scanner"); Index: src/java/org/apache/hadoop/hbase/rest/MetaHandler.java =================================================================== --- src/java/org/apache/hadoop/hbase/rest/MetaHandler.java (revision 736069) +++ src/java/org/apache/hadoop/hbase/rest/MetaHandler.java (working copy) @@ -22,7 +22,6 @@ import java.io.IOException; import java.io.PrintWriter; -import javax.servlet.ServletException; import javax.servlet.http.HttpServletRequest; import javax.servlet.http.HttpServletResponse; @@ -39,8 +38,7 @@ */ public class MetaHandler extends GenericHandler { - public MetaHandler(HBaseConfiguration conf, HBaseAdmin admin) - throws ServletException{ + public MetaHandler(HBaseConfiguration conf, HBaseAdmin admin){ super(conf, admin); } @@ -47,7 +45,7 @@ public void doGet(HttpServletRequest request, HttpServletResponse response, String[] pathSegments) - throws ServletException, IOException { + throws IOException { getTables(request, response); } @@ -53,7 +51,7 @@ public void doPost(HttpServletRequest request, HttpServletResponse response, String[] pathSegments) - throws ServletException, IOException { + throws IOException { doMethodNotAllowed(response, "POST not allowed at /"); } @@ -59,7 +57,7 @@ public void doPut(HttpServletRequest request, HttpServletResponse response, String[] pathSegments) - throws ServletException, IOException { + throws IOException { doMethodNotAllowed(response, "PUT not allowed at /"); } @@ -65,7 +63,7 @@ public void doDelete(HttpServletRequest request, HttpServletResponse response, String[] pathSegments) - throws ServletException, IOException { + throws IOException { doMethodNotAllowed(response, "DELETE not allowed at /"); } Index: src/java/org/apache/hadoop/hbase/rest/TableHandler.java =================================================================== --- src/java/org/apache/hadoop/hbase/rest/TableHandler.java (revision 736069) +++ src/java/org/apache/hadoop/hbase/rest/TableHandler.java (working copy) @@ -51,8 +51,7 @@ public static final String DISABLE = "disable"; public static final String ENABLE = "enable"; - public TableHandler(HBaseConfiguration conf, HBaseAdmin admin) - throws ServletException{ + public TableHandler(HBaseConfiguration conf, HBaseAdmin admin){ super(conf, admin); } @@ -57,8 +56,7 @@ } public void doGet(HttpServletRequest request, HttpServletResponse response, - String[] pathSegments) - throws ServletException, IOException { + String[] pathSegments) throws IOException { // if it's just table name, return the metadata if (pathSegments.length == 1) { getTableMetadata(request, response, pathSegments[0]); @@ -101,7 +99,7 @@ public void doDelete(HttpServletRequest request, HttpServletResponse response, String[] pathSegments) - throws ServletException, IOException { + throws ServletException { deleteTable(request, response, pathSegments); } Index: src/java/org/apache/hadoop/hbase/master/metrics/MasterMetrics.java =================================================================== --- src/java/org/apache/hadoop/hbase/master/metrics/MasterMetrics.java (revision 736069) +++ src/java/org/apache/hadoop/hbase/master/metrics/MasterMetrics.java (working copy) @@ -62,7 +62,7 @@ * Since this object is a registered updater, this method will be called * periodically, e.g. every 5 seconds. */ - public void doUpdates(@SuppressWarnings("unused") MetricsContext unused) { + public void doUpdates(MetricsContext unused) { synchronized (this) { synchronized(this.cluster_requests) { this.cluster_requests.pushMetric(metricsRecord); Index: src/java/org/apache/hadoop/hbase/rest/GenericHandler.java =================================================================== --- src/java/org/apache/hadoop/hbase/rest/GenericHandler.java (revision 736069) +++ src/java/org/apache/hadoop/hbase/rest/GenericHandler.java (working copy) @@ -256,9 +256,8 @@ } if (limit < 1) { break; - } else { - --limit; } + --limit; outputter.startTag(COLUMN); doElement(outputter, "name", org.apache.hadoop.hbase.util.Base64.encodeBytes(e.getKey())); @@ -294,9 +293,8 @@ } if (limit < 1) { break; - } else { - --limit; } + --limit; for (Cell c : e.getValue()) { outputter.startTag(COLUMN); doElement(outputter, "name", @@ -335,8 +333,9 @@ } protected String makeColumnName(String column) { - if (column.indexOf(':') == -1) - column += ':'; - return column; + String columnName = column; + if (columnName.indexOf(':') == -1) + columnName += ':'; + return columnName; } } Index: src/java/org/apache/hadoop/hbase/metrics/file/TimeStampingFileContext.java =================================================================== --- src/java/org/apache/hadoop/hbase/metrics/file/TimeStampingFileContext.java (revision 736069) +++ src/java/org/apache/hadoop/hbase/metrics/file/TimeStampingFileContext.java (working copy) @@ -46,6 +46,7 @@ this.sdf = new SimpleDateFormat("yyyy-MM-dd'T'HH:mm:ss"); } + @Override public void init(String contextName, ContextFactory factory) { super.init(contextName, factory); String fileName = getAttribute(FILE_NAME_PROPERTY); @@ -54,6 +55,7 @@ } } + @Override public void startMonitoring() throws IOException { if (file == null) { writer = new PrintWriter(new BufferedOutputStream(System.out)); @@ -63,6 +65,7 @@ super.startMonitoring(); } + @Override public void stopMonitoring() { super.stopMonitoring(); if (writer != null) { @@ -75,6 +78,7 @@ return this.sdf.format(new Date()); } + @Override public void emitRecord(String contextName, String recordName, OutputRecord outRec) { writer.print(iso8601()); @@ -100,6 +104,7 @@ writer.println(); } + @Override public void flush() { writer.flush(); } Index: src/java/org/apache/hadoop/hbase/regionserver/metrics/RegionServerMetrics.java =================================================================== --- src/java/org/apache/hadoop/hbase/regionserver/metrics/RegionServerMetrics.java (revision 736069) +++ src/java/org/apache/hadoop/hbase/regionserver/metrics/RegionServerMetrics.java (working copy) @@ -18,7 +18,6 @@ package org.apache.hadoop.hbase.regionserver.metrics; import java.lang.management.ManagementFactory; -import java.lang.management.MemoryMXBean; import java.lang.management.MemoryUsage; import org.apache.commons.logging.Log; @@ -96,7 +95,7 @@ * Since this object is a registered updater, this method will be called * periodically, e.g. every 5 seconds. */ - public void doUpdates(@SuppressWarnings("unused") MetricsContext unused) { + public void doUpdates(MetricsContext unused) { synchronized (this) { this.stores.pushMetric(this.metricsRecord); this.storefiles.pushMetric(this.metricsRecord); Index: src/java/org/apache/hadoop/hbase/regionserver/transactional/TransactionalHLogManager.java =================================================================== --- src/java/org/apache/hadoop/hbase/regionserver/transactional/TransactionalHLogManager.java (revision 736069) +++ src/java/org/apache/hadoop/hbase/regionserver/transactional/TransactionalHLogManager.java (working copy) @@ -49,8 +49,6 @@ /** * Responsible for writing and reading (recovering) transactional information * to/from the HLog. - * - * */ class TransactionalHLogManager { Index: src/java/org/apache/hadoop/hbase/regionserver/transactional/TransactionalRegion.java =================================================================== --- src/java/org/apache/hadoop/hbase/regionserver/transactional/TransactionalRegion.java (revision 736069) +++ src/java/org/apache/hadoop/hbase/regionserver/transactional/TransactionalRegion.java (working copy) @@ -83,7 +83,7 @@ private static final int DEFAULT_OLD_TRANSACTION_FLUSH = 100; // Do a flush if we have this many old transactions.. - private static final Log LOG = LogFactory.getLog(TransactionalRegion.class); + static final Log LOG = LogFactory.getLog(TransactionalRegion.class); // Collection of active transactions (PENDING) keyed by id. private Map transactionsById = new HashMap(); Index: src/java/org/apache/hadoop/hbase/regionserver/transactional/TransactionalHLogManager.java =================================================================== --- src/java/org/apache/hadoop/hbase/regionserver/transactional/TransactionalHLogManager.java (revision 736069) +++ src/java/org/apache/hadoop/hbase/regionserver/transactional/TransactionalHLogManager.java (working copy) @@ -49,8 +49,6 @@ /** * Responsible for writing and reading (recovering) transactional information * to/from the HLog. - * - * */ class TransactionalHLogManager { Index: src/java/org/apache/hadoop/hbase/regionserver/HLogKey.java =================================================================== --- src/java/org/apache/hadoop/hbase/regionserver/HLogKey.java (revision 736069) +++ src/java/org/apache/hadoop/hbase/regionserver/HLogKey.java (working copy) @@ -33,7 +33,7 @@ * * Some Transactional edits (START, COMMIT, ABORT) will not have an associated row. */ -public class HLogKey implements WritableComparable { +public class HLogKey implements WritableComparable { private byte [] regionName; private byte [] tablename; private byte [] row; @@ -94,7 +94,7 @@ @Override public boolean equals(Object obj) { - return compareTo(obj) == 0; + return compareTo((HLogKey)obj) == 0; } @Override @@ -109,19 +109,18 @@ // Comparable // - public int compareTo(Object o) { - HLogKey other = (HLogKey) o; - int result = Bytes.compareTo(this.regionName, other.regionName); + public int compareTo(HLogKey o) { + int result = Bytes.compareTo(this.regionName, o.regionName); if(result == 0) { - result = Bytes.compareTo(this.row, other.row); + result = Bytes.compareTo(this.row, o.row); if(result == 0) { - if (this.logSeqNum < other.logSeqNum) { + if (this.logSeqNum < o.logSeqNum) { result = -1; - } else if (this.logSeqNum > other.logSeqNum) { + } else if (this.logSeqNum > o.logSeqNum) { result = 1; } } Index: src/java/org/apache/hadoop/hbase/regionserver/Memcache.java =================================================================== --- src/java/org/apache/hadoop/hbase/regionserver/Memcache.java (revision 736069) +++ src/java/org/apache/hadoop/hbase/regionserver/Memcache.java (working copy) @@ -258,7 +258,6 @@ * @param b * @return Return lowest of a or b or null if both a and b are null */ - @SuppressWarnings("unchecked") private byte [] getLowest(final byte [] a, final byte [] b) { if (a == null) { Index: src/java/org/apache/hadoop/hbase/metrics/file/TimeStampingFileContext.java =================================================================== --- src/java/org/apache/hadoop/hbase/metrics/file/TimeStampingFileContext.java (revision 736069) +++ src/java/org/apache/hadoop/hbase/metrics/file/TimeStampingFileContext.java (working copy) @@ -46,6 +46,7 @@ this.sdf = new SimpleDateFormat("yyyy-MM-dd'T'HH:mm:ss"); } + @Override public void init(String contextName, ContextFactory factory) { super.init(contextName, factory); String fileName = getAttribute(FILE_NAME_PROPERTY); @@ -54,6 +55,7 @@ } } + @Override public void startMonitoring() throws IOException { if (file == null) { writer = new PrintWriter(new BufferedOutputStream(System.out)); @@ -63,6 +65,7 @@ super.startMonitoring(); } + @Override public void stopMonitoring() { super.stopMonitoring(); if (writer != null) { @@ -75,6 +78,7 @@ return this.sdf.format(new Date()); } + @Override public void emitRecord(String contextName, String recordName, OutputRecord outRec) { writer.print(iso8601()); @@ -100,6 +104,7 @@ writer.println(); } + @Override public void flush() { writer.flush(); } Index: src/java/org/apache/hadoop/hbase/regionserver/HLog.java =================================================================== --- src/java/org/apache/hadoop/hbase/regionserver/HLog.java (revision 736069) +++ src/java/org/apache/hadoop/hbase/regionserver/HLog.java (working copy) @@ -49,7 +49,6 @@ import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.FSUtils; import org.apache.hadoop.io.SequenceFile; -import org.apache.hadoop.io.Text; import org.apache.hadoop.io.SequenceFile.CompressionType; import org.apache.hadoop.io.SequenceFile.Reader; Index: src/java/org/apache/hadoop/hbase/rest/GenericHandler.java =================================================================== --- src/java/org/apache/hadoop/hbase/rest/GenericHandler.java (revision 736069) +++ src/java/org/apache/hadoop/hbase/rest/GenericHandler.java (working copy) @@ -256,9 +256,8 @@ } if (limit < 1) { break; - } else { - --limit; } + --limit; outputter.startTag(COLUMN); doElement(outputter, "name", org.apache.hadoop.hbase.util.Base64.encodeBytes(e.getKey())); @@ -294,9 +293,8 @@ } if (limit < 1) { break; - } else { - --limit; } + --limit; for (Cell c : e.getValue()) { outputter.startTag(COLUMN); doElement(outputter, "name", @@ -335,8 +333,9 @@ } protected String makeColumnName(String column) { - if (column.indexOf(':') == -1) - column += ':'; - return column; + String columnName = column; + if (columnName.indexOf(':') == -1) + columnName += ':'; + return columnName; } } Index: src/java/org/apache/hadoop/hbase/rest/Dispatcher.java =================================================================== --- src/java/org/apache/hadoop/hbase/rest/Dispatcher.java (revision 736069) +++ src/java/org/apache/hadoop/hbase/rest/Dispatcher.java (working copy) @@ -57,9 +57,8 @@ * * @see Hbase REST Specification */ -@SuppressWarnings("serial") -public class Dispatcher extends javax.servlet.http.HttpServlet -implements javax.servlet.Servlet { +public class Dispatcher extends javax.servlet.http.HttpServlet { + private static final long serialVersionUID = 1221888754510962313L; @SuppressWarnings("unused") private static final Log LOG = LogFactory.getLog(Dispatcher.class.getName()); private MetaHandler metaHandler; @@ -125,23 +124,22 @@ // if it was a root request, it must be a create table request tableHandler.doPost(request, response, pathSegments); return; - } else { - // there should be at least two path segments (table name and row or - // scanner or disable/enable operation) - if (pathSegments.length >= 2 && pathSegments[0].length() > 0) { - if (pathSegments[1].toLowerCase().equals(SCANNER) - && pathSegments.length >= 2) { - scannerHandler.doPost(request, response, pathSegments); - return; - } else if (pathSegments[1].toLowerCase().equals(ROW) - && pathSegments.length >= 3) { - rowHandler.doPost(request, response, pathSegments); - return; - } else if ((pathSegments[1].toLowerCase().equals(TableHandler.DISABLE) || pathSegments[1].toLowerCase().equals(TableHandler.ENABLE)) - && pathSegments.length == 2) { - tableHandler.doPost(request, response, pathSegments); - return; - } + } + // there should be at least two path segments (table name and row or + // scanner or disable/enable operation) + if (pathSegments.length >= 2 && pathSegments[0].length() > 0) { + if (pathSegments[1].toLowerCase().equals(SCANNER) + && pathSegments.length >= 2) { + scannerHandler.doPost(request, response, pathSegments); + return; + } else if (pathSegments[1].toLowerCase().equals(ROW) + && pathSegments.length >= 3) { + rowHandler.doPost(request, response, pathSegments); + return; + } else if ((pathSegments[1].toLowerCase().equals(TableHandler.DISABLE) || pathSegments[1].toLowerCase().equals(TableHandler.ENABLE)) + && pathSegments.length == 2) { + tableHandler.doPost(request, response, pathSegments); + return; } } Index: src/java/org/apache/hadoop/hbase/rest/RowHandler.java =================================================================== --- src/java/org/apache/hadoop/hbase/rest/RowHandler.java (revision 736069) +++ src/java/org/apache/hadoop/hbase/rest/RowHandler.java (working copy) @@ -28,8 +28,7 @@ public class RowHandler extends GenericHandler { - public RowHandler(HBaseConfiguration conf, HBaseAdmin admin) - throws ServletException { + public RowHandler(HBaseConfiguration conf, HBaseAdmin admin) { super(conf, admin); } @@ -34,7 +33,7 @@ } public void doGet(HttpServletRequest request, HttpServletResponse response, - String[] pathSegments) throws ServletException, IOException { + String[] pathSegments) throws IOException { HTable table = getTable(pathSegments[0]); if (pathSegments[1].toLowerCase().equals(ROW)) { // get a row @@ -56,7 +55,7 @@ public void doDelete(HttpServletRequest request, HttpServletResponse response, String[] pathSegments) - throws ServletException, IOException { + throws IOException { deleteRow(request, response, pathSegments); } @@ -116,7 +115,7 @@ } } - if (result == null || result.size() == 0) { + if (result.size() == 0) { doNotFound(response, "Row not found!"); } else { switch (ContentType.getContentType(request.getHeader(ACCEPT))) { @@ -314,7 +313,7 @@ */ private void deleteRow(final HttpServletRequest request, final HttpServletResponse response, final String [] pathSegments) - throws IOException, ServletException { + throws IOException { // grab the table we're operating on HTable table = getTable(getTableName(pathSegments)); Index: src/java/org/apache/hadoop/hbase/rest/ScannerHandler.java =================================================================== --- src/java/org/apache/hadoop/hbase/rest/ScannerHandler.java (revision 736069) +++ src/java/org/apache/hadoop/hbase/rest/ScannerHandler.java (working copy) @@ -26,7 +26,6 @@ import java.util.List; import java.util.Map; -import javax.servlet.ServletException; import javax.servlet.http.HttpServletRequest; import javax.servlet.http.HttpServletResponse; @@ -37,7 +36,6 @@ import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.JenkinsHash; -import org.apache.hadoop.hbase.io.Cell; import org.apache.hadoop.hbase.io.RowResult; import org.znerd.xmlenc.XMLOutputter; @@ -47,8 +45,7 @@ public class ScannerHandler extends GenericHandler { private static final String ROWS = "rows"; - public ScannerHandler(HBaseConfiguration conf, HBaseAdmin admin) - throws ServletException{ + public ScannerHandler(HBaseConfiguration conf, HBaseAdmin admin){ super(conf, admin); } @@ -72,9 +69,8 @@ nextRows.add(result); } return nextRows.size() > 0; - } else { - return true; } + return true; } /** @@ -99,8 +95,7 @@ new HashMap(); public void doGet(HttpServletRequest request, HttpServletResponse response, - String[] pathSegments) - throws ServletException, IOException { + String[] pathSegments) throws IOException { doMethodNotAllowed(response, "GET to a scanner not supported."); } @@ -106,7 +101,7 @@ public void doPost(HttpServletRequest request, HttpServletResponse response, String[] pathSegments) - throws ServletException, IOException { + throws IOException { if (pathSegments.length == 2) { // trying to create a scanner openScanner(request, response, pathSegments); @@ -121,8 +116,7 @@ } public void doPut(HttpServletRequest request, HttpServletResponse response, - String[] pathSegments) - throws ServletException, IOException { + String[] pathSegments) throws IOException { doPost(request, response, pathSegments); } @@ -127,8 +121,7 @@ } public void doDelete(HttpServletRequest request, HttpServletResponse response, - String[] pathSegments) - throws ServletException, IOException { + String[] pathSegments) throws IOException { deleteScanner(response, pathSegments[2]); } @@ -209,6 +202,7 @@ outputter.getWriter().close(); } + //TODO Remove it?! // private void outputScannerEntryMime(final HttpServletResponse response, // final ScannerRecord sr) // throws IOException { @@ -261,7 +255,7 @@ */ private void openScanner(final HttpServletRequest request, final HttpServletResponse response, final String [] pathSegments) - throws IOException, ServletException { + throws IOException { // get the table HTable table = getTable(getTableName(pathSegments)); @@ -326,7 +320,7 @@ */ private void deleteScanner(final HttpServletResponse response, final String scannerid) - throws IOException, ServletException { + throws IOException { ScannerRecord sr = this.scanners.remove(scannerid); if (sr == null) { doNotFound(response, "No such scanner"); Index: src/java/org/apache/hadoop/hbase/rest/MetaHandler.java =================================================================== --- src/java/org/apache/hadoop/hbase/rest/MetaHandler.java (revision 736069) +++ src/java/org/apache/hadoop/hbase/rest/MetaHandler.java (working copy) @@ -22,7 +22,6 @@ import java.io.IOException; import java.io.PrintWriter; -import javax.servlet.ServletException; import javax.servlet.http.HttpServletRequest; import javax.servlet.http.HttpServletResponse; @@ -39,8 +38,7 @@ */ public class MetaHandler extends GenericHandler { - public MetaHandler(HBaseConfiguration conf, HBaseAdmin admin) - throws ServletException{ + public MetaHandler(HBaseConfiguration conf, HBaseAdmin admin){ super(conf, admin); } @@ -47,7 +45,7 @@ public void doGet(HttpServletRequest request, HttpServletResponse response, String[] pathSegments) - throws ServletException, IOException { + throws IOException { getTables(request, response); } @@ -53,7 +51,7 @@ public void doPost(HttpServletRequest request, HttpServletResponse response, String[] pathSegments) - throws ServletException, IOException { + throws IOException { doMethodNotAllowed(response, "POST not allowed at /"); } @@ -59,7 +57,7 @@ public void doPut(HttpServletRequest request, HttpServletResponse response, String[] pathSegments) - throws ServletException, IOException { + throws IOException { doMethodNotAllowed(response, "PUT not allowed at /"); } @@ -65,7 +63,7 @@ public void doDelete(HttpServletRequest request, HttpServletResponse response, String[] pathSegments) - throws ServletException, IOException { + throws IOException { doMethodNotAllowed(response, "DELETE not allowed at /"); } Index: src/java/org/apache/hadoop/hbase/rest/TableHandler.java =================================================================== --- src/java/org/apache/hadoop/hbase/rest/TableHandler.java (revision 736069) +++ src/java/org/apache/hadoop/hbase/rest/TableHandler.java (working copy) @@ -51,8 +51,7 @@ public static final String DISABLE = "disable"; public static final String ENABLE = "enable"; - public TableHandler(HBaseConfiguration conf, HBaseAdmin admin) - throws ServletException{ + public TableHandler(HBaseConfiguration conf, HBaseAdmin admin){ super(conf, admin); } @@ -57,8 +56,7 @@ } public void doGet(HttpServletRequest request, HttpServletResponse response, - String[] pathSegments) - throws ServletException, IOException { + String[] pathSegments) throws IOException { // if it's just table name, return the metadata if (pathSegments.length == 1) { getTableMetadata(request, response, pathSegments[0]); @@ -101,7 +99,7 @@ public void doDelete(HttpServletRequest request, HttpServletResponse response, String[] pathSegments) - throws ServletException, IOException { + throws ServletException { deleteTable(request, response, pathSegments); } Index: src/java/org/apache/hadoop/hbase/regionserver/tableindexed/IndexedRegionServer.java =================================================================== --- src/java/org/apache/hadoop/hbase/regionserver/tableindexed/IndexedRegionServer.java (revision 736069) +++ src/java/org/apache/hadoop/hbase/regionserver/tableindexed/IndexedRegionServer.java (working copy) @@ -27,8 +27,6 @@ import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.ipc.IndexedRegionInterface; import org.apache.hadoop.hbase.regionserver.HRegion; -import org.apache.hadoop.hbase.regionserver.HRegionServer; -import org.apache.hadoop.hbase.regionserver.transactional.TransactionalRegion; import org.apache.hadoop.hbase.regionserver.transactional.TransactionalRegionServer; import org.apache.hadoop.util.Progressable; Index: src/java/org/apache/hadoop/hbase/regionserver/transactional/TransactionalRegion.java =================================================================== --- src/java/org/apache/hadoop/hbase/regionserver/transactional/TransactionalRegion.java (revision 736069) +++ src/java/org/apache/hadoop/hbase/regionserver/transactional/TransactionalRegion.java (working copy) @@ -83,7 +83,7 @@ private static final int DEFAULT_OLD_TRANSACTION_FLUSH = 100; // Do a flush if we have this many old transactions.. - private static final Log LOG = LogFactory.getLog(TransactionalRegion.class); + static final Log LOG = LogFactory.getLog(TransactionalRegion.class); // Collection of active transactions (PENDING) keyed by id. private Map transactionsById = new HashMap();