From 5c5907d210fed0379028c03a7ecc28484a78e8c8 Mon Sep 17 00:00:00 2001 From: Sakthi Date: Sat, 23 Mar 2019 17:38:52 -0700 Subject: [PATCH] HBASE-22054: Space Quota: Compaction is not working for super user in case of NO_WRITES_COMPACTIONS --- .../hadoop/hbase/security/Superusers.java | 4 ++ .../hbase/regionserver/CompactSplit.java | 15 ++++- .../quotas/TestSuperUserQuotaPermissions.java | 57 ++++++++++++++----- 3 files changed, 59 insertions(+), 17 deletions(-) diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/security/Superusers.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/security/Superusers.java index b5566e65be77706b6cf39873ac0ca6fffcec7d0d..8135205110c3da4ae481f3650e0e8eafeac782c2 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/security/Superusers.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/security/Superusers.java @@ -91,6 +91,10 @@ public final class Superusers { throw new IllegalStateException("Super users/super groups lists" + " have not been initialized properly."); } + if (user == null) { + LOG.trace("isSuperUser check received for a null user. Returned false"); + return false; + } if (superUsers.contains(user.getShortName())) { return true; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactSplit.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactSplit.java index ae3cc15c730c8581ebe0d356800368402d044f46..50a8b2b11c5fedc144dad9b6e202ed96a4a25f8a 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactSplit.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactSplit.java @@ -40,6 +40,7 @@ import java.util.function.IntSupplier; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.conf.ConfigurationManager; import org.apache.hadoop.hbase.conf.PropagatingConfigurationObserver; +import org.apache.hadoop.hbase.ipc.RpcServer; import org.apache.hadoop.hbase.quotas.RegionServerSpaceQuotaManager; import org.apache.hadoop.hbase.regionserver.compactions.CompactionContext; import org.apache.hadoop.hbase.regionserver.compactions.CompactionLifeCycleTracker; @@ -47,6 +48,7 @@ import org.apache.hadoop.hbase.regionserver.compactions.CompactionRequestImpl; import org.apache.hadoop.hbase.regionserver.compactions.CompactionRequester; import org.apache.hadoop.hbase.regionserver.throttle.CompactionThroughputControllerFactory; import org.apache.hadoop.hbase.regionserver.throttle.ThroughputController; +import org.apache.hadoop.hbase.security.Superusers; import org.apache.hadoop.hbase.security.User; import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.hadoop.hbase.util.StealJobQueue; @@ -112,6 +114,14 @@ public class CompactSplit implements CompactionRequester, PropagatingConfigurati // compaction throughput controller this.compactionThroughputController = CompactionThroughputControllerFactory.create(server, conf); + + // if regionserver's constructor wasn't called for some reason, then Superusers would remain + // uninitialized. Hence initializing here. The initialization process is idempotent so we are okay. + try { + Superusers.initialize(this.conf); + } catch (IOException ioe){ + LOG.info("Caught IOException while trying to initialize superusers. " + ioe.getMessage()); + } } private void createSplitExcecutors() { @@ -341,8 +351,9 @@ public class CompactSplit implements CompactionRequester, PropagatingConfigurati } RegionServerSpaceQuotaManager spaceQuotaManager = this.server.getRegionServerSpaceQuotaManager(); - if (spaceQuotaManager != null && - spaceQuotaManager.areCompactionsDisabled(region.getTableDescriptor().getTableName())) { + if (!Superusers.isSuperUser(RpcServer.getRequestUser().orElse(null)) + && spaceQuotaManager != null && spaceQuotaManager + .areCompactionsDisabled(region.getTableDescriptor().getTableName())) { String reason = "Ignoring compaction request for " + region + " as an active space quota violation " + " policy disallows compactions."; tracker.notExecuted(store, reason); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestSuperUserQuotaPermissions.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestSuperUserQuotaPermissions.java index 01f8a2f4bbf04d45999f0656581f5f9a24a81be9..8f383226a075b7d2bfbbf2e4a0130b551200233d 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestSuperUserQuotaPermissions.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestSuperUserQuotaPermissions.java @@ -42,6 +42,7 @@ import org.apache.hadoop.hbase.security.access.AccessControlClient; import org.apache.hadoop.hbase.security.access.AccessController; import org.apache.hadoop.hbase.security.access.Permission.Action; import org.apache.hadoop.hbase.testclassification.MediumTests; +import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.security.UserGroupInformation; import org.junit.AfterClass; import org.junit.Before; @@ -125,10 +126,6 @@ public class TestSuperUserQuotaPermissions { try (Connection conn = getConnection()) { Admin admin = conn.getAdmin(); final TableName tn = helper.createTableWithRegions(admin, 5); - final long sizeLimit = 2L * SpaceQuotaHelperForTests.ONE_MEGABYTE; - QuotaSettings settings = QuotaSettingsFactory.limitTableSpace( - tn, sizeLimit, SpaceViolationPolicy.NO_WRITES_COMPACTIONS); - admin.setQuota(settings); // Grant the normal user permissions try { AccessControlClient.grant( @@ -149,12 +146,23 @@ public class TestSuperUserQuotaPermissions { @Override public Void call() throws Exception { try (Connection conn = getConnection()) { - helper.writeData(tn, 3L * SpaceQuotaHelperForTests.ONE_MEGABYTE); + // Write way more data so that we have HFiles > numRegions after flushes + // helper.writeData flushes itself, so need to flush explicitly + helper.writeData(tn, 2L * SpaceQuotaHelperForTests.ONE_MEGABYTE); + helper.writeData(tn, 2L * SpaceQuotaHelperForTests.ONE_MEGABYTE); return null; } } }); + final long sizeLimit = 2L * SpaceQuotaHelperForTests.ONE_MEGABYTE; + QuotaSettings settings = QuotaSettingsFactory.limitTableSpace( + tn, sizeLimit, SpaceViolationPolicy.NO_WRITES_COMPACTIONS); + + try (Connection conn = getConnection()) { + conn.getAdmin().setQuota(settings); + } + waitForTableToEnterQuotaViolation(tn); // Should throw an exception, unprivileged users cannot compact due to the quota @@ -170,19 +178,29 @@ public class TestSuperUserQuotaPermissions { }); fail("Expected an exception trying to compact a table with a quota violation"); } catch (DoNotRetryIOException e) { - // Expected + // Expected Exception. + LOG.debug("Regular User: Caught Exception: "+e.getMessage()); } - // Should not throw an exception (superuser can do anything) - doAsSuperUser(new Callable() { - @Override - public Void call() throws Exception { - try (Connection conn = getConnection()) { - conn.getAdmin().majorCompact(tn); - return null; + try{ + // Should not throw an exception (superuser can do anything) + doAsSuperUser(new Callable() { + @Override + public Void call() throws Exception { + try (Connection conn = getConnection()) { + conn.getAdmin().majorCompact(tn); + return null; + } } - } - }); + }); + } catch (Exception e) { + // Unexpected Exception. + LOG.debug("Super User: Caught Exception: " + e.getMessage()); + fail("Did an expect an exception to be thrown while a super user tries " + + "to compact a table with a quota violation"); + } + int numberOfRegions = TEST_UTIL.getAdmin().getRegions(tn).size(); + waitForHFilesCountLessorEqual(tn, Bytes.toBytes("f1"), numberOfRegions); } @Test @@ -299,4 +317,13 @@ public class TestSuperUserQuotaPermissions { } }); } + + private void waitForHFilesCountLessorEqual(TableName tn, byte[] cf, int count) throws Exception { + Waiter.waitFor(TEST_UTIL.getConfiguration(), 30 * 1000, 1000, new Predicate() { + @Override + public boolean evaluate() throws Exception { + return TEST_UTIL.getNumHFiles(tn, cf) <= count; + } + }); + } } -- 2.17.2 (Apple Git-113)