Index: src/main/jamon/org/apache/hadoop/hbase/tmpl/regionserver/RSStatusTmpl.jamon
===================================================================
--- src/main/jamon/org/apache/hadoop/hbase/tmpl/regionserver/RSStatusTmpl.jamon (revision 7615)
+++ src/main/jamon/org/apache/hadoop/hbase/tmpl/regionserver/RSStatusTmpl.jamon (working copy)
@@ -74,10 +74,13 @@
Local logs,
Thread Dump,
Log Level,
- Debug dump,
+ Debug dump
<%if HBaseConfiguration.isShowConfInServlet() %>
- HBase Configuration
+ ,HBase Configuration
%if>
+<%if regionServer.isShowQueueDump() %>
+ ,Queue Dump
+%if>
Index: src/main/java/org/apache/hadoop/hbase/regionserver/CompactSplitThread.java
===================================================================
--- src/main/java/org/apache/hadoop/hbase/regionserver/CompactSplitThread.java (revision 7615)
+++ src/main/java/org/apache/hadoop/hbase/regionserver/CompactSplitThread.java (working copy)
@@ -19,6 +19,8 @@
*/
package org.apache.hadoop.hbase.regionserver;
+import java.util.Iterator;
+import java.util.concurrent.BlockingQueue;
import java.util.concurrent.Executors;
import java.util.concurrent.PriorityBlockingQueue;
import java.util.concurrent.RejectedExecutionException;
@@ -142,6 +144,38 @@
: largeCompactions.getQueue().size())
+ ", split_queue=" + splits.getQueue().size();
}
+
+ public String dumpQueue() {
+ StringBuffer queueLists = new StringBuffer();
+
+ queueLists.append("\n\nLargeCompation Queue:\n");
+ BlockingQueue lq = largeCompactions.getQueue();
+ Iterator it = lq.iterator();
+ while(it.hasNext()){
+ queueLists.append(it.next().toString());
+ queueLists.append("\n");
+ }
+
+ if( smallCompactions != null ){
+ queueLists.append("\n\nSmallCompation Queue:\n");
+ lq = smallCompactions.getQueue();
+ it = lq.iterator();
+ while(it.hasNext()){
+ queueLists.append(it.next().toString());
+ queueLists.append("\n");
+ }
+ }
+
+ queueLists.append("\n\nSplit Queue:\n");
+ lq = splits.getQueue();
+ it = lq.iterator();
+ while(it.hasNext()){
+ queueLists.append(it.next().toString());
+ queueLists.append("\n");
+ }
+
+ return queueLists.toString();
+ }
public synchronized boolean requestSplit(final HRegion r) {
// don't split regions that are blocking
Index: src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
===================================================================
--- src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java (revision 7615)
+++ src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java (working copy)
@@ -1547,6 +1547,7 @@
this.infoServer = new InfoServer("regionserver", addr, port, false, this.conf);
this.infoServer.addServlet("status", "/rs-status", RSStatusServlet.class);
this.infoServer.addServlet("dump", "/dump", RSDumpServlet.class);
+ this.infoServer.addServlet("queuedump", "/queuedump", QueueDumpServlet.class);
this.infoServer.setAttribute(REGIONSERVER, this);
this.infoServer.start();
break;
@@ -3675,6 +3676,10 @@
HServerLoad hsl = buildServerLoad();
return hsl == null? null: hsl.getCoprocessors();
}
+
+ public boolean isShowQueueDump(){
+ return this.conf.getBoolean("hbase.regionserver.servlet.show.queuedump", false);
+ }
/**
* Register bean with platform management server
Index: src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreFlusher.java
===================================================================
--- src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreFlusher.java (revision 7615)
+++ src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreFlusher.java (working copy)
@@ -466,6 +466,24 @@
}
}
+ @Override
+ public String toString() {
+ return "flush_queue="
+ + flushQueue.size();
+ }
+
+ public String dumpQueue() {
+ StringBuilder queueList = new StringBuilder();
+ queueList.append("\n\nFlush Queue:\n");
+ java.util.Iterator it = flushQueue.iterator();
+
+ while(it.hasNext()){
+ queueList.append(it.next().toString());
+ }
+
+ return queueList.toString();
+ }
+
interface FlushQueueEntry extends Delayed {}
/**
Index: src/main/java/org/apache/hadoop/hbase/regionserver/QueueDumpServlet.java
===================================================================
--- src/main/java/org/apache/hadoop/hbase/regionserver/QueueDumpServlet.java (revision 0)
+++ src/main/java/org/apache/hadoop/hbase/regionserver/QueueDumpServlet.java (working copy)
@@ -0,0 +1,55 @@
+package org.apache.hadoop.hbase.regionserver;
+
+import java.io.IOException;
+import java.io.OutputStream;
+import java.io.PrintWriter;
+import java.util.Date;
+
+import javax.servlet.http.HttpServlet;
+import javax.servlet.http.HttpServletRequest;
+import javax.servlet.http.HttpServletResponse;
+
+public class QueueDumpServlet extends HttpServlet {
+ private static final long serialVersionUID = 1L;
+ private static final String LINE =
+ "===========================================================";
+ @Override
+ public void doGet(HttpServletRequest request, HttpServletResponse response)
+ throws IOException {
+ HRegionServer hrs = (HRegionServer)getServletContext().getAttribute(
+ HRegionServer.REGIONSERVER);
+ assert hrs != null : "No RS in context!";
+
+ response.setContentType("text/plain");
+ OutputStream os = response.getOutputStream();
+ PrintWriter out = new PrintWriter(os);
+
+ out.println("Queue status for " + hrs.getServerName()
+ + " as of " + new Date());
+
+
+ // 1. Print out Compaction/Split Queue
+ out.println("\n\nCompaction/Split Queue summary");
+ out.println(LINE);
+ out.println(hrs.compactSplitThread.toString());
+
+
+ out.println("\n\nCompaction/Split Queue dump");
+ out.println(LINE);
+ out.println(hrs.compactSplitThread.dumpQueue());
+
+
+
+ // 2. Print out flush Queue
+ out.println("\n\nflush Queue summary ");
+ out.println(LINE);
+ out.println(hrs.cacheFlusher.toString());
+
+
+ out.println("\n\nflush Queue Queue dump");
+ out.println(LINE);
+ out.println(hrs.cacheFlusher.dumpQueue());
+
+ out.flush();
+ }
+}