diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Query.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Query.java index 5bec128..ebd8ef5 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Query.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Query.java @@ -118,7 +118,9 @@ public abstract class Query extends OperationWithAttributes { /** * @param cellFirstStrategy true if ACLs should be evaluated on the cell * level first, false if ACL should first be checked at the CF and table - * levels + * levels. + * Note that setting to true would mean that the ACL should evaluate to true + * on the cell level and also evaluate to true at the CF and table levels. */ public void setACLStrategy(boolean cellFirstStrategy) { if (cellFirstStrategy) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessController.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessController.java index 977a403..b138cfc 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessController.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessController.java @@ -1379,28 +1379,16 @@ public class AccessController extends BaseRegionObserver // grants three times (permissionGranted above, here, and in the // filter) but that's the price of backwards compatibility. if (hasFamilyQualifierPermission(user, Action.READ, env, families)) { - Filter ourFilter = new AccessControlFilter(authManager, user, table, - query.getACLStrategy() ? AccessControlFilter.Strategy.CHECK_CELL_FIRST : - AccessControlFilter.Strategy.CHECK_TABLE_AND_CF_ONLY, - cfVsMaxVersions); - // wrap any existing filter - if (filter != null) { - ourFilter = new FilterList(FilterList.Operator.MUST_PASS_ALL, - Lists.newArrayList(ourFilter, filter)); - } - authResult.setAllowed(true);; - authResult.setReason("Access allowed with filter"); - switch (opType) { - case GET: - case EXISTS: - ((Get)query).setFilter(ourFilter); - break; - case SCAN: - ((Scan)query).setFilter(ourFilter); - break; - default: - throw new RuntimeException("Unhandled operation " + opType); + Filter ourFilter = null; + if (cellFeaturesEnabled) { + ourFilter = new AccessControlFilter(authManager, user, table, + query.getACLStrategy() ? AccessControlFilter.Strategy.CHECK_CELL_FIRST + : AccessControlFilter.Strategy.CHECK_TABLE_AND_CF_ONLY, cfVsMaxVersions); + } else { + ourFilter = new AccessControlFilter(authManager, user, table, + AccessControlFilter.Strategy.CHECK_TABLE_AND_CF_ONLY, cfVsMaxVersions); } + wrapAnyExisitingFilter(query, opType, filter, authResult, ourFilter); } } else { // New behavior: Any access we might be granted is more fine-grained @@ -1411,23 +1399,16 @@ public class AccessController extends BaseRegionObserver query.getACLStrategy() ? AccessControlFilter.Strategy.CHECK_CELL_FIRST : AccessControlFilter.Strategy.CHECK_CELL_DEFAULT, cfVsMaxVersions); - // wrap any existing filter - if (filter != null) { - ourFilter = new FilterList(FilterList.Operator.MUST_PASS_ALL, - Lists.newArrayList(ourFilter, filter)); - } - authResult.setAllowed(true);; - authResult.setReason("Access allowed with filter"); - switch (opType) { - case GET: - case EXISTS: - ((Get)query).setFilter(ourFilter); - break; - case SCAN: - ((Scan)query).setFilter(ourFilter); - break; - default: - throw new RuntimeException("Unhandled operation " + opType); + wrapAnyExisitingFilter(query, opType, filter, authResult, ourFilter); + } + } else { + if(cellFeaturesEnabled && !compatibleEarlyTermination) { + // Here we know that the table/CF has read permission and so we could allow the cell first strategy + // to be performed and see if the fine-grained permissions are granted + if (query.getACLStrategy()) { + Filter ourFilter = new AccessControlFilter(authManager, user, table, + AccessControlFilter.Strategy.CHECK_CELL_FIRST, cfVsMaxVersions); + wrapAnyExisitingFilter(query, opType, filter, authResult, ourFilter); } } } @@ -1439,6 +1420,28 @@ public class AccessController extends BaseRegionObserver } } + private void wrapAnyExisitingFilter(final Query query, OpType opType, Filter filter, + AuthResult authResult, Filter ourFilter) { + // wrap any existing filter + if (filter != null) { + ourFilter = new FilterList(FilterList.Operator.MUST_PASS_ALL, + Lists.newArrayList(ourFilter, filter)); + } + authResult.setAllowed(true);; + authResult.setReason("Access allowed with filter"); + switch (opType) { + case GET: + case EXISTS: + ((Get)query).setFilter(ourFilter); + break; + case SCAN: + ((Scan)query).setFilter(ourFilter); + break; + default: + throw new RuntimeException("Unhandled operation " + opType); + } + } + @Override public void preGetOp(final ObserverContext c, final Get get, final List result) throws IOException { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/LoadTestTool.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/LoadTestTool.java index 4d17556..5214073 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/LoadTestTool.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/LoadTestTool.java @@ -188,7 +188,7 @@ public class LoadTestTool extends AbstractHBaseTool { private String superUser; - private String userNames = "user1, user2, user3, user4"; + private String userNames; //This file is used to read authentication information in secure clusters. private String authnFileName; @@ -514,7 +514,7 @@ public class LoadTestTool extends AbstractHBaseTool { minColsPerKey, maxColsPerKey, COLUMN_FAMILY); } - if (User.isHBaseSecurityEnabled(conf) && userOwner != null) { + if (userOwner != null) { LOG.info("Granting permissions for user " + userOwner.getShortName()); AccessControlProtos.Permission.Action[] actions = { AccessControlProtos.Permission.Action.ADMIN, AccessControlProtos.Permission.Action.CREATE, @@ -531,22 +531,21 @@ public class LoadTestTool extends AbstractHBaseTool { // This will be comma separated list of expressions. String users[] = userNames.split(","); User user = null; - if (User.isHBaseSecurityEnabled(conf)) { - for (String userStr : users) { + for (String userStr : users) { + if (User.isHBaseSecurityEnabled(conf)) { user = User.create(loginAndReturnUGI(conf, userStr)); - LOG.info("Granting READ permission for the user " + user.getShortName()); - AccessControlProtos.Permission.Action[] actions = { AccessControlProtos.Permission.Action.READ }; - try { - AccessControlClient.grant(conf, tableName, user.getShortName(), null, null, actions); - } catch (Throwable e) { - LOG.fatal("Error in granting READ permission for the user " + user.getShortName(), e); - return EXIT_FAILURE; - } - } - } else { - for (String userStr : users) { + } else { user = User.createUserForTesting(conf, userStr, new String[0]); } + LOG.info("Granting permission for the user " + user.getShortName()); + AccessControlProtos.Permission.Action[] actions = { AccessControlProtos.Permission.Action.READ }; + try { + // Need to grant READ permission because we are using CELL_FIRST_STRATEGY + AccessControlClient.grant(conf, tableName, user.getShortName(), null, null, actions); + } catch (Throwable e) { + LOG.fatal("Error in granting permission for the user " + user.getShortName(), e); + return EXIT_FAILURE; + } } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/MultiThreadedReaderWithACL.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/MultiThreadedReaderWithACL.java index 99b4f1d..219f581 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/MultiThreadedReaderWithACL.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/MultiThreadedReaderWithACL.java @@ -108,7 +108,9 @@ public class MultiThreadedReaderWithACL extends MultiThreadedReader { result = localTable.get(get); } boolean isNullExpected = ((((int) keyToRead % specialPermCellInsertionFactor)) == 0); - LOG.info("Read happening from ACL " + isNullExpected); + if (isNullExpected) { + LOG.info("Read happening from ACL " + isNullExpected); + } getResultMetricUpdation(verify, rowKey, start, result, localTable, isNullExpected); } catch (IOException e) { recordFailure(keyToRead);