Index: hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSHDFSUtils.java =================================================================== --- hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSHDFSUtils.java (revision 1470253) +++ hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSHDFSUtils.java (working copy) @@ -59,6 +59,10 @@ // usually needs 10 minutes before marking the nodes as dead. So we're putting ourselves // beyond that limit 'to be safe'. long recoveryTimeout = conf.getInt("hbase.lease.recovery.timeout", 900000) + startWaiting; + // If stale data node is chosen to be the primary node, short retry interval may lead to + // DDoS. We provide the following config parameter so that user can choose the interval + // based on the setup of the underlying hdfs cluster + int retryInterval = conf.getInt("hbase.lease.recovery.retry.interval", 10000); boolean recovered = false; int nbAttempt = 0; while (!recovered) { @@ -91,7 +95,7 @@ " - continuing without the lease, but we could have a data loss."); } else { try { - Thread.sleep(nbAttempt < 3 ? 500 : 1000); + Thread.sleep(nbAttempt < 3 ? 900 : retryInterval); } catch (InterruptedException ie) { InterruptedIOException iioe = new InterruptedIOException(); iioe.initCause(ie);