From 6de7a2c6509e52c6de0d655951b1d0fb0d5fb440 Mon Sep 17 00:00:00 2001 From: Michael Stack Date: Tue, 9 Jan 2018 12:49:39 -0800 Subject: [PATCH] HBASE-19694 The initialization order for a fresh cluster is incorrect Includes HBASE-19753... Become active Master before calling the super class's run method. Have the wait-on-becoming-active-Master be in-line rather than off in a background thread (i.e. undo running thread in startActiveMasterManager) Purge the fragile HBASE-16367 hackery that attempted to fix this issue previously by adding a latch to try and hold up superclass RegionServer until cluster id set by subclass Master. --- .../hadoop/hbase/zookeeper/ReadOnlyZKClient.java | 4 +- .../hadoop/hbase/zookeeper/ZooKeeperHelper.java | 43 ++++++++ .../org/apache/hadoop/hbase/master/HMaster.java | 122 +++++++++++---------- .../hadoop/hbase/regionserver/HRegionServer.java | 7 -- .../apache/hadoop/hbase/util/JVMClusterUtil.java | 7 +- .../hadoop/hbase/master/TestTableStateManager.java | 2 +- .../regionserver/TestPerColumnFamilyFlush.java | 1 - hbase-zookeeper/pom.xml | 3 + .../hadoop/hbase/zookeeper/ZKMainServer.java | 9 +- .../hbase/zookeeper/TestReadOnlyZKClient.java | 9 +- .../hadoop/hbase/zookeeper/TestZKMainServer.java | 3 +- .../hadoop/hbase/zookeeper/TestZKNodeTracker.java | 6 +- .../src/test/resources/log4j.properties | 2 +- 13 files changed, 133 insertions(+), 85 deletions(-) create mode 100644 hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZooKeeperHelper.java diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ReadOnlyZKClient.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ReadOnlyZKClient.java index 24c7112a81..192a19fc2f 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ReadOnlyZKClient.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ReadOnlyZKClient.java @@ -31,6 +31,7 @@ import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicBoolean; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.util.Threads; import org.apache.yetus.audience.InterfaceAudience; import org.apache.zookeeper.KeeperException; import org.apache.zookeeper.KeeperException.Code; @@ -284,8 +285,7 @@ public final class ReadOnlyZKClient implements Closeable { private ZooKeeper getZk() throws IOException { // may be closed when session expired if (zookeeper == null || !zookeeper.getState().isAlive()) { - zookeeper = new ZooKeeper(connectString, sessionTimeoutMs, e -> { - }); + zookeeper = ZooKeeperHelper.getConnectedZooKeeper(connectString, sessionTimeoutMs); } return zookeeper; } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZooKeeperHelper.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZooKeeperHelper.java new file mode 100644 index 0000000000..7238adb4b6 --- /dev/null +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZooKeeperHelper.java @@ -0,0 +1,43 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.zookeeper; + +import org.apache.hadoop.hbase.util.Threads; +import org.apache.yetus.audience.InterfaceAudience; +import org.apache.zookeeper.ZooKeeper; + +import java.io.IOException; + +/** + * Methods that help working with ZooKeeper + */ +@InterfaceAudience.Private +public class ZooKeeperHelper { + /** + * Get a ZooKeeper instance and wait until it connected before returning. + */ + public static ZooKeeper getConnectedZooKeeper(String connectString, int sessionTimeoutMs) + throws IOException { + ZooKeeper zookeeper = new ZooKeeper(connectString, sessionTimeoutMs, e -> {}); + // Make sure we are connected before we hand it back. + while(!zookeeper.getState().isConnected()) { + Threads.sleep(1); + } + return zookeeper; + } +} diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java index 945f54d7d7..ee7cd18667 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java @@ -40,7 +40,6 @@ import java.util.Map; import java.util.Map.Entry; import java.util.Objects; import java.util.Set; -import java.util.concurrent.CountDownLatch; import java.util.concurrent.ExecutionException; import java.util.concurrent.Future; import java.util.concurrent.TimeUnit; @@ -524,12 +523,9 @@ public class HMaster extends HRegionServer implements MasterServices { // Some unit tests don't need a cluster, so no zookeeper at all if (!conf.getBoolean("hbase.testing.nocluster", false)) { - setInitLatch(new CountDownLatch(1)); - activeMasterManager = new ActiveMasterManager(zooKeeper, this.serverName, this); - int infoPort = putUpJettyServer(); - startActiveMasterManager(infoPort); + this.activeMasterManager = new ActiveMasterManager(zooKeeper, this.serverName, this); } else { - activeMasterManager = null; + this.activeMasterManager = null; } } catch (Throwable t) { // Make sure we log the exception. HMaster is often started via reflection and the @@ -539,10 +535,27 @@ public class HMaster extends HRegionServer implements MasterServices { } } - // Main run loop. Calls through to the regionserver run loop. + // Main run loop. Calls through to the regionserver run loop AFTER becoming active Master; will + // block in here until then. @Override public void run() { try { + if (!conf.getBoolean("hbase.testing.nocluster", false)) { + try { + int infoPort = putUpJettyServer(); + startActiveMasterManager(infoPort); + } catch (Throwable t) { + // Make sure we log the exception. + String error = "Failed to become Active Master"; + LOG.error(error, t); + // Abort should have been called already. + if (!isAborted()) { + abort(error, t); + } + } + } + // Fall in here even if we have been aborted. Need to run the shutdown services and + // the super run call will do this for us. super.run(); } finally { if (this.clusterSchemaService != null) { @@ -757,9 +770,9 @@ public class HMaster extends HRegionServer implements MasterServices { private void finishActiveMasterInitialization(MonitoredTask status) throws IOException, InterruptedException, KeeperException, CoordinatedStateException { - activeMaster = true; Thread zombieDetector = new Thread(new InitializationMonitor(this), "ActiveMasterInitializationMonitor-" + System.currentTimeMillis()); + zombieDetector.setDaemon(true); zombieDetector.start(); /* @@ -783,10 +796,9 @@ public class HMaster extends HRegionServer implements MasterServices { this.tableDescriptors.getAll(); } - // publish cluster ID + // Publish cluster ID status.setStatus("Publishing Cluster ID in ZooKeeper"); ZKClusterId.setClusterId(this.zooKeeper, fileSystemManager.getClusterId()); - this.initLatch.countDown(); this.serverManager = createServerManager(this); @@ -795,6 +807,10 @@ public class HMaster extends HRegionServer implements MasterServices { status.setStatus("Initializing ZK system trackers"); initializeZKBasedSystemTrackers(); + // Set Master as active now after we've setup zk with stuff like whether cluster is up or not. + // RegionServers won't come up if the cluster status is not up. + this.activeMaster = true; + // This is for backwards compatibility // See HBASE-11393 status.setStatus("Update TableCFs node in ZNode"); @@ -818,7 +834,9 @@ public class HMaster extends HRegionServer implements MasterServices { // Wake up this server to check in sleeper.skipSleepCycle(); - // Wait for region servers to report in + // Wait for region servers to report in. + // With this as part of master initialization, it precludes our being able to start a single + // server that is both Master and RegionServer. Needs more thought. TODO. String statusStr = "Wait for region servers to report in"; status.setStatus(statusStr); LOG.info(Objects.toString(status)); @@ -1985,57 +2003,43 @@ public class HMaster extends HRegionServer implements MasterServices { * this node for us since it is ephemeral. */ LOG.info("Adding backup master ZNode " + backupZNode); - if (!MasterAddressTracker.setMasterAddress(zooKeeper, backupZNode, - serverName, infoPort)) { + if (!MasterAddressTracker.setMasterAddress(zooKeeper, backupZNode, serverName, infoPort)) { LOG.warn("Failed create of " + backupZNode + " by " + serverName); } - - activeMasterManager.setInfoPort(infoPort); - // Start a thread to try to become the active master, so we won't block here - Threads.setDaemonThreadRunning(new Thread(new Runnable() { - @Override - public void run() { - int timeout = conf.getInt(HConstants.ZK_SESSION_TIMEOUT, - HConstants.DEFAULT_ZK_SESSION_TIMEOUT); - // If we're a backup master, stall until a primary to writes his address - if (conf.getBoolean(HConstants.MASTER_TYPE_BACKUP, - HConstants.DEFAULT_MASTER_TYPE_BACKUP)) { - LOG.debug("HMaster started in backup mode. " - + "Stalling until master znode is written."); - // This will only be a minute or so while the cluster starts up, - // so don't worry about setting watches on the parent znode - while (!activeMasterManager.hasActiveMaster()) { - LOG.debug("Waiting for master address ZNode to be written " - + "(Also watching cluster state node)"); - Threads.sleep(timeout); - } - } - MonitoredTask status = TaskMonitor.get().createStatus("Master startup"); - status.setDescription("Master startup"); - try { - if (activeMasterManager.blockUntilBecomingActiveMaster(timeout, status)) { - finishActiveMasterInitialization(status); - } - } catch (Throwable t) { - status.setStatus("Failed to become active: " + t.getMessage()); - LOG.error(HBaseMarkers.FATAL, "Failed to become active master", t); - // HBASE-5680: Likely hadoop23 vs hadoop 20.x/1.x incompatibility - if (t instanceof NoClassDefFoundError && - t.getMessage() - .contains("org/apache/hadoop/hdfs/protocol/HdfsConstants$SafeModeAction")) { - // improved error message for this special case - abort("HBase is having a problem with its Hadoop jars. You may need to " - + "recompile HBase against Hadoop version " - + org.apache.hadoop.util.VersionInfo.getVersion() - + " or change your hadoop jars to start properly", t); - } else { - abort("Unhandled exception. Starting shutdown.", t); - } - } finally { - status.cleanup(); - } + this.activeMasterManager.setInfoPort(infoPort); + int timeout = conf.getInt(HConstants.ZK_SESSION_TIMEOUT, HConstants.DEFAULT_ZK_SESSION_TIMEOUT); + // If we're a backup master, stall until a primary to write this address + if (conf.getBoolean(HConstants.MASTER_TYPE_BACKUP, HConstants.DEFAULT_MASTER_TYPE_BACKUP)) { + LOG.debug("HMaster started in backup mode. Stalling until master znode is written."); + // This will only be a minute or so while the cluster starts up, + // so don't worry about setting watches on the parent znode + while (!activeMasterManager.hasActiveMaster()) { + LOG.debug("Waiting for master address and cluster state znode to be written."); + Threads.sleep(timeout); + } + } + MonitoredTask status = TaskMonitor.get().createStatus("Master startup"); + status.setDescription("Master startup"); + try { + if (activeMasterManager.blockUntilBecomingActiveMaster(timeout, status)) { + finishActiveMasterInitialization(status); + } + } catch (Throwable t) { + status.setStatus("Failed to become active: " + t.getMessage()); + LOG.error(HBaseMarkers.FATAL, "Failed to become active master", t); + // HBASE-5680: Likely hadoop23 vs hadoop 20.x/1.x incompatibility + if (t instanceof NoClassDefFoundError && t.getMessage(). + contains("org/apache/hadoop/hdfs/protocol/HdfsConstants$SafeModeAction")) { + // improved error message for this special case + abort("HBase is having a problem with its Hadoop jars. You may need to recompile " + + "HBase against Hadoop version " + org.apache.hadoop.util.VersionInfo.getVersion() + + " or change your hadoop jars to start properly", t); + } else { + abort("Unhandled exception. Starting shutdown.", t); } - }, getServerName().toShortString() + ".masterManager")); + } finally { + status.cleanup(); + } } private void checkCompression(final TableDescriptor htd) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java index 63451c600b..c31ca1e56f 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java @@ -243,7 +243,6 @@ public class HRegionServer extends HasThread implements protected MemStoreFlusher cacheFlusher; protected HeapMemoryManager hMemManager; - protected CountDownLatch initLatch = null; /** * Cluster connection to be shared by services. @@ -696,10 +695,6 @@ public class HRegionServer extends HasThread implements return null; } - protected void setInitLatch(CountDownLatch latch) { - this.initLatch = latch; - } - /* * Returns true if configured hostname should be used */ @@ -854,8 +849,6 @@ public class HRegionServer extends HasThread implements // when ready. blockAndCheckIfStopped(this.clusterStatusTracker); - doLatch(this.initLatch); - // Retrieve clusterId // Since cluster status is now up // ID should have already been set by HMaster diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/JVMClusterUtil.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/JVMClusterUtil.java index 00410af2fa..73d66c2260 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/JVMClusterUtil.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/JVMClusterUtil.java @@ -187,7 +187,9 @@ public class JVMClusterUtil { int startTimeout = configuration != null ? Integer.parseInt( configuration.get("hbase.master.start.timeout.localHBaseCluster", "30000")) : 30000; if (System.currentTimeMillis() > startTime + startTimeout) { - throw new RuntimeException(String.format("Master not active after %s seconds", startTimeout)); + String msg = String.format("Master not active after %s seconds", startTimeout); + Threads.printThreadInfo(System.out, "Thread dump because " + msg); + throw new RuntimeException(msg); } } @@ -216,8 +218,7 @@ public class JVMClusterUtil { } if (System.currentTimeMillis() > startTime + maxwait) { String msg = "Master not initialized after " + maxwait + "ms seconds"; - Threads.printThreadInfo(System.out, - "Thread dump because: " + msg); + Threads.printThreadInfo(System.out, "Thread dump because: " + msg); throw new RuntimeException(msg); } try { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestTableStateManager.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestTableStateManager.java index 1f61ee7d34..81c1dfcaa8 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestTableStateManager.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestTableStateManager.java @@ -58,7 +58,7 @@ public class TestTableStateManager { @Test(timeout = 60000) public void testUpgradeFromZk() throws Exception { final TableName tableName = TableName.valueOf(name.getMethodName()); - TEST_UTIL.startMiniCluster(2, 1); + TEST_UTIL.startMiniCluster(1, 1); TEST_UTIL.shutdownMiniHBaseCluster(); ZKWatcher watcher = TEST_UTIL.getZooKeeperWatcher(); setTableStateInZK(watcher, tableName, ZooKeeperProtos.DeprecatedTableState.State.DISABLED); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestPerColumnFamilyFlush.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestPerColumnFamilyFlush.java index 2e3ff7ec08..3b5609e53b 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestPerColumnFamilyFlush.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestPerColumnFamilyFlush.java @@ -236,7 +236,6 @@ public class TestPerColumnFamilyFlush { // CF3 shouldn't have been touched. assertEquals(cf3MemstoreSize, oldCF3MemstoreSize); assertEquals(totalMemstoreSize, cf3MemstoreSize.getDataSize()); - assertEquals(smallestSeqInRegionCurrentMemstore, smallestSeqCF3); // What happens when we hit the memstore limit, but we are not able to find // any Column Family above the threshold? diff --git a/hbase-zookeeper/pom.xml b/hbase-zookeeper/pom.xml index 67d373013f..aff824bcb0 100644 --- a/hbase-zookeeper/pom.xml +++ b/hbase-zookeeper/pom.xml @@ -96,6 +96,9 @@ maven-surefire-plugin + + src/test/resources + listener diff --git a/hbase-zookeeper/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKMainServer.java b/hbase-zookeeper/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKMainServer.java index 3a96015ffa..d9da0e597e 100644 --- a/hbase-zookeeper/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKMainServer.java +++ b/hbase-zookeeper/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKMainServer.java @@ -53,12 +53,15 @@ public class ZKMainServer { // Make sure we are connected before we proceed. Can take a while on some systems. If we // run the command without being connected, we get ConnectionLoss KeeperErrorConnection... Stopwatch stopWatch = Stopwatch.createStarted(); + // Make it 30seconds. We dont' have a config in this context and zk doesn't have + // a timeout until after connection. 30000ms is default for zk. + long timeout = 30000; while (!this.zk.getState().isConnected()) { Thread.sleep(1); - if (stopWatch.elapsed(TimeUnit.SECONDS) > 10) { + if (stopWatch.elapsed(TimeUnit.MILLISECONDS) > timeout) { throw new InterruptedException("Failed connect after waiting " + - stopWatch.elapsed(TimeUnit.SECONDS) + "seconds; state=" + this.zk.getState() + - "; " + this.zk); + stopWatch.elapsed(TimeUnit.MILLISECONDS) + "ms (zk session timeout); " + + this.zk); } } } diff --git a/hbase-zookeeper/src/test/java/org/apache/hadoop/hbase/zookeeper/TestReadOnlyZKClient.java b/hbase-zookeeper/src/test/java/org/apache/hadoop/hbase/zookeeper/TestReadOnlyZKClient.java index 1f83536393..934a8391a4 100644 --- a/hbase-zookeeper/src/test/java/org/apache/hadoop/hbase/zookeeper/TestReadOnlyZKClient.java +++ b/hbase-zookeeper/src/test/java/org/apache/hadoop/hbase/zookeeper/TestReadOnlyZKClient.java @@ -38,6 +38,7 @@ import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.Waiter.ExplainingPredicate; import org.apache.hadoop.hbase.testclassification.MediumTests; import org.apache.hadoop.hbase.testclassification.ZKTests; +import org.apache.hadoop.hbase.util.Threads; import org.apache.zookeeper.CreateMode; import org.apache.zookeeper.KeeperException; import org.apache.zookeeper.KeeperException.Code; @@ -67,8 +68,8 @@ public class TestReadOnlyZKClient { public static void setUp() throws Exception { PORT = UTIL.startMiniZKCluster().getClientPort(); - ZooKeeper zk = new ZooKeeper("localhost:" + PORT, 10000, e -> { - }); + ZooKeeper zk = ZooKeeperHelper. + getConnectedZooKeeper("localhost:" + PORT, 10000); DATA = new byte[10]; ThreadLocalRandom.current().nextBytes(DATA); zk.create(PATH, DATA, ZooDefs.Ids.OPEN_ACL_UNSAFE, CreateMode.PERSISTENT); @@ -137,8 +138,8 @@ public class TestReadOnlyZKClient { UTIL.getZkCluster().getZooKeeperServers().get(0).closeSession(sessionId); // should not reach keep alive so still the same instance assertSame(zk, RO_ZK.getZooKeeper()); - - assertArrayEquals(DATA, RO_ZK.get(PATH).get()); + byte [] got = RO_ZK.get(PATH).get(); + assertArrayEquals(DATA, got); assertNotNull(RO_ZK.getZooKeeper()); assertNotSame(zk, RO_ZK.getZooKeeper()); assertNotEquals(sessionId, RO_ZK.getZooKeeper().getSessionId()); diff --git a/hbase-zookeeper/src/test/java/org/apache/hadoop/hbase/zookeeper/TestZKMainServer.java b/hbase-zookeeper/src/test/java/org/apache/hadoop/hbase/zookeeper/TestZKMainServer.java index bc1c240e59..d8db8aedac 100644 --- a/hbase-zookeeper/src/test/java/org/apache/hadoop/hbase/zookeeper/TestZKMainServer.java +++ b/hbase-zookeeper/src/test/java/org/apache/hadoop/hbase/zookeeper/TestZKMainServer.java @@ -70,7 +70,8 @@ public class TestZKMainServer { public void testCommandLineWorks() throws Exception { System.setSecurityManager(new NoExitSecurityManager()); HBaseZKTestingUtility htu = new HBaseZKTestingUtility(); - htu.getConfiguration().setInt(HConstants.ZK_SESSION_TIMEOUT, 1000); + // Make it long so for sure succeeds. + htu.getConfiguration().setInt(HConstants.ZK_SESSION_TIMEOUT, 30000); htu.startMiniZKCluster(); try { ZKWatcher zkw = htu.getZooKeeperWatcher(); diff --git a/hbase-zookeeper/src/test/java/org/apache/hadoop/hbase/zookeeper/TestZKNodeTracker.java b/hbase-zookeeper/src/test/java/org/apache/hadoop/hbase/zookeeper/TestZKNodeTracker.java index 3778ca0119..9afa9d1595 100644 --- a/hbase-zookeeper/src/test/java/org/apache/hadoop/hbase/zookeeper/TestZKNodeTracker.java +++ b/hbase-zookeeper/src/test/java/org/apache/hadoop/hbase/zookeeper/TestZKNodeTracker.java @@ -129,9 +129,9 @@ public class TestZKNodeTracker { // Create a completely separate zk connection for test triggers and avoid // any weird watcher interactions from the test - final ZooKeeper zkconn = - new ZooKeeper(ZKConfig.getZKQuorumServersString(TEST_UTIL.getConfiguration()), 60000, e -> { - }); + final ZooKeeper zkconn = ZooKeeperHelper. + getConnectedZooKeeper(ZKConfig.getZKQuorumServersString(TEST_UTIL.getConfiguration()), + 60000); // Add the node with data one zkconn.create(node, dataOne, Ids.OPEN_ACL_UNSAFE, CreateMode.PERSISTENT); diff --git a/hbase-zookeeper/src/test/resources/log4j.properties b/hbase-zookeeper/src/test/resources/log4j.properties index c322699ced..f599ea636e 100644 --- a/hbase-zookeeper/src/test/resources/log4j.properties +++ b/hbase-zookeeper/src/test/resources/log4j.properties @@ -55,7 +55,7 @@ log4j.appender.console.layout.ConversionPattern=%d{ISO8601} %-5p [%t] %C{2}(%L): #log4j.logger.org.apache.hadoop.fs.FSNamesystem=DEBUG log4j.logger.org.apache.hadoop=WARN -log4j.logger.org.apache.zookeeper=ERROR +log4j.logger.org.apache.zookeeper=DEBUG log4j.logger.org.apache.hadoop.hbase=DEBUG #These settings are workarounds against spurious logs from the minicluster. -- 2.11.0 (Apple Git-81)