diff --git a/security/src/main/java/org/apache/hadoop/hbase/security/access/ZKPermissionWatcher.java b/security/src/main/java/org/apache/hadoop/hbase/security/access/ZKPermissionWatcher.java index 4870bb1..7a1b64d 100644 --- a/security/src/main/java/org/apache/hadoop/hbase/security/access/ZKPermissionWatcher.java +++ b/security/src/main/java/org/apache/hadoop/hbase/security/access/ZKPermissionWatcher.java @@ -56,11 +56,13 @@ public class ZKPermissionWatcher extends ZooKeeperListener { public void start() throws KeeperException { watcher.registerListener(this); + LOG.info("ZKPermissionWatcher start"); if (ZKUtil.watchAndCheckExists(watcher, aclZNode)) { List existing = ZKUtil.getChildDataAndWatchForNewChildren(watcher, aclZNode); if (existing != null) { - refreshNodes(existing); + int cnt = refreshNodes(existing); + LOG.info("refreshNodes() read " + cnt + " nodes"); } } } @@ -121,7 +123,8 @@ public class ZKPermissionWatcher extends ZooKeeperListener { } } - private void refreshNodes(List nodes) { + private int refreshNodes(List nodes) { + int cnt = 0; for (ZKUtil.NodeAndData n : nodes) { if (n.isEmpty()) continue; String path = n.getNode(); @@ -134,11 +137,13 @@ public class ZKPermissionWatcher extends ZooKeeperListener { } authManager.refreshCacheFromWritable(Bytes.toBytes(table), nodeData); + cnt++; } catch (IOException ioe) { LOG.error("Failed parsing permissions for table '" + table + "' from zk", ioe); } } + return cnt; } /*** diff --git a/security/src/test/java/org/apache/hadoop/hbase/security/access/TestAccessController.java b/security/src/test/java/org/apache/hadoop/hbase/security/access/TestAccessController.java index f573578..e802e5f 100644 --- a/security/src/test/java/org/apache/hadoop/hbase/security/access/TestAccessController.java +++ b/security/src/test/java/org/apache/hadoop/hbase/security/access/TestAccessController.java @@ -40,6 +40,7 @@ import org.apache.hadoop.hbase.HServerAddress; import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.LargeTests; +import org.apache.hadoop.hbase.MiniHBaseCluster; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.UnknownRowLockException; import org.apache.hadoop.hbase.client.Append; @@ -61,6 +62,7 @@ import org.apache.hadoop.hbase.coprocessor.RegionServerCoprocessorEnvironment; import org.apache.hadoop.hbase.io.hfile.CacheConfig; import org.apache.hadoop.hbase.io.hfile.HFile; import org.apache.hadoop.hbase.mapreduce.LoadIncrementalHFiles; +import org.apache.hadoop.hbase.master.HMaster; import org.apache.hadoop.hbase.master.MasterCoprocessorHost; import org.apache.hadoop.hbase.regionserver.HRegion; import org.apache.hadoop.hbase.regionserver.RegionCoprocessorHost; @@ -69,10 +71,12 @@ import org.apache.hadoop.hbase.security.AccessDeniedException; import org.apache.hadoop.hbase.security.User; import org.apache.hadoop.hbase.security.access.Permission.Action; import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.hbase.util.JVMClusterUtil.MasterThread; import org.junit.AfterClass; import org.junit.BeforeClass; import org.junit.Test; import org.junit.experimental.categories.Category; +import org.mortbay.log.Log; /** * Performs authorization checks for common operations, according to different @@ -120,7 +124,7 @@ public class TestAccessController { "org.apache.hadoop.hbase.master.snapshot.SnapshotLogCleaner"); SecureTestUtil.enableSecurity(conf); - TEST_UTIL.startMiniCluster(); + TEST_UTIL.startMiniCluster(2, 2); MasterCoprocessorHost cpHost = TEST_UTIL.getMiniHBaseCluster().getMaster().getCoprocessorHost(); cpHost.load(AccessController.class, Coprocessor.PRIORITY_HIGHEST, conf); ACCESS_CONTROLLER = (AccessController) cpHost.findCoprocessor(AccessController.class.getName()); @@ -179,6 +183,24 @@ public class TestAccessController { } finally { acl.close(); } + MiniHBaseCluster cluster = TEST_UTIL.getHBaseCluster(); + // get all the master threads + List masterThreads = cluster.getMasterThreads(); + ServerName activeName = null; + HMaster active = null; + int activeIndex = -1; + for (int i = 0; i < masterThreads.size(); i++) { + if (masterThreads.get(i).getMaster().isActiveMaster()) { + activeIndex = i; + active = masterThreads.get(activeIndex).getMaster(); + activeName = active.getServerName(); + } + } + cluster.stopMaster(activeIndex, false); + Log.info("active master (" + activeIndex + ") stopped"); + cluster.waitOnMaster(activeIndex); + // wait for an active master to show up and be ready + assertTrue(cluster.waitForActiveAndReadyMaster()); } @AfterClass