Index: conf/hbase-policy.xml
===================================================================
--- conf/hbase-policy.xml (revision 1335370)
+++ conf/hbase-policy.xml (working copy)
@@ -22,7 +22,7 @@
- security.client.protocol.acl
+ security.hbase.client.protocol.acl*ACL for ClientProtocol and AdminProtocol implementations (ie.
clients talking to HRegionServers)
Index: security/src/test/java/org/apache/hadoop/hbase/security/token/TestTokenAuthentication.java (deleted)
===================================================================
Index: security/src/test/java/org/apache/hadoop/hbase/security/token/TestZKSecretWatcher.java (deleted)
===================================================================
Index: security/src/test/java/org/apache/hadoop/hbase/security/access/TestTablePermissions.java (deleted)
===================================================================
Index: security/src/test/java/org/apache/hadoop/hbase/security/access/SecureTestUtil.java (deleted)
===================================================================
Index: security/src/test/java/org/apache/hadoop/hbase/security/access/TestAccessController.java (deleted)
===================================================================
Index: security/src/test/java/org/apache/hadoop/hbase/security/access/TestAccessControlFilter.java (deleted)
===================================================================
Index: security/src/test/java/org/apache/hadoop/hbase/security/access/TestZKPermissionsWatcher.java (deleted)
===================================================================
Index: security/src/test/resources/hbase-site.xml (deleted)
===================================================================
Index: security/src/main/java/org/apache/hadoop/hbase/security/token/AuthenticationProtocol.java (deleted)
===================================================================
Index: security/src/main/java/org/apache/hadoop/hbase/security/token/AuthenticationTokenSecretManager.java (deleted)
===================================================================
Index: security/src/main/java/org/apache/hadoop/hbase/security/token/AuthenticationTokenSelector.java (deleted)
===================================================================
Index: security/src/main/java/org/apache/hadoop/hbase/security/token/AuthenticationTokenIdentifier.java (deleted)
===================================================================
Index: security/src/main/java/org/apache/hadoop/hbase/security/token/AuthenticationKey.java (deleted)
===================================================================
Index: security/src/main/java/org/apache/hadoop/hbase/security/token/ZKSecretWatcher.java (deleted)
===================================================================
Index: security/src/main/java/org/apache/hadoop/hbase/security/token/TokenProvider.java (deleted)
===================================================================
Index: security/src/main/java/org/apache/hadoop/hbase/security/token/TokenUtil.java (deleted)
===================================================================
Index: security/src/main/java/org/apache/hadoop/hbase/security/HBaseSaslRpcServer.java (deleted)
===================================================================
Index: security/src/main/java/org/apache/hadoop/hbase/security/access/TablePermission.java (deleted)
===================================================================
Index: security/src/main/java/org/apache/hadoop/hbase/security/access/AccessControlLists.java (deleted)
===================================================================
Index: security/src/main/java/org/apache/hadoop/hbase/security/access/TableAuthManager.java (deleted)
===================================================================
Index: security/src/main/java/org/apache/hadoop/hbase/security/access/AccessController.java (deleted)
===================================================================
Index: security/src/main/java/org/apache/hadoop/hbase/security/access/AccessControllerProtocol.java (deleted)
===================================================================
Index: security/src/main/java/org/apache/hadoop/hbase/security/access/UserPermission.java (deleted)
===================================================================
Index: security/src/main/java/org/apache/hadoop/hbase/security/access/AccessControlFilter.java (deleted)
===================================================================
Index: security/src/main/java/org/apache/hadoop/hbase/security/access/Permission.java (deleted)
===================================================================
Index: security/src/main/java/org/apache/hadoop/hbase/security/access/ZKPermissionWatcher.java (deleted)
===================================================================
Index: security/src/main/java/org/apache/hadoop/hbase/security/AccessDeniedException.java (deleted)
===================================================================
Index: security/src/main/java/org/apache/hadoop/hbase/security/HBaseSaslRpcClient.java (deleted)
===================================================================
Index: security/src/main/java/org/apache/hadoop/hbase/security/HBasePolicyProvider.java (deleted)
===================================================================
Index: security/src/main/java/org/apache/hadoop/hbase/ipc/SecureConnectionHeader.java (deleted)
===================================================================
Index: security/src/main/java/org/apache/hadoop/hbase/ipc/SecureRpcEngine.java (deleted)
===================================================================
Index: security/src/main/java/org/apache/hadoop/hbase/ipc/SecureClient.java (deleted)
===================================================================
Index: security/src/main/java/org/apache/hadoop/hbase/ipc/SecureServer.java (deleted)
===================================================================
Index: src/test/java/org/apache/hadoop/hbase/security/token/TestTokenAuthentication.java
===================================================================
--- src/test/java/org/apache/hadoop/hbase/security/token/TestTokenAuthentication.java (revision 0)
+++ src/test/java/org/apache/hadoop/hbase/security/token/TestTokenAuthentication.java (revision 0)
@@ -0,0 +1,148 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.security.token;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
+
+import java.security.PrivilegedExceptionAction;
+import java.util.UUID;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.HBaseTestingUtility;
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.LargeTests;
+import org.apache.hadoop.hbase.client.HTable;
+import org.apache.hadoop.hbase.coprocessor.BaseEndpointCoprocessor;
+import org.apache.hadoop.hbase.ipc.CoprocessorProtocol;
+import org.apache.hadoop.hbase.ipc.HBaseRPC;
+import org.apache.hadoop.hbase.ipc.HBaseServer;
+import org.apache.hadoop.hbase.ipc.RequestContext;
+import org.apache.hadoop.hbase.ipc.RpcServer;
+import org.apache.hadoop.hbase.regionserver.HRegionServer;
+import org.apache.hadoop.hbase.security.User;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.Writables;
+import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.security.token.SecretManager;
+import org.apache.hadoop.security.token.Token;
+import org.junit.AfterClass;
+import org.junit.BeforeClass;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+
+/**
+ * Tests for authentication token creation and usage
+ */
+@Category(LargeTests.class)
+public class TestTokenAuthentication {
+ public static interface IdentityProtocol extends CoprocessorProtocol {
+ public String whoami();
+ public String getAuthMethod();
+ }
+
+ public static class IdentityCoprocessor extends BaseEndpointCoprocessor
+ implements IdentityProtocol {
+ public String whoami() {
+ return RequestContext.getRequestUserName();
+ }
+
+ public String getAuthMethod() {
+ UserGroupInformation ugi = null;
+ User user = RequestContext.getRequestUser();
+ if (user != null) {
+ ugi = user.getUGI();
+ }
+ if (ugi != null) {
+ return ugi.getAuthenticationMethod().toString();
+ }
+ return null;
+ }
+ }
+
+ private static HBaseTestingUtility TEST_UTIL;
+ private static AuthenticationTokenSecretManager secretManager;
+
+ @BeforeClass
+ public static void setupBeforeClass() throws Exception {
+ TEST_UTIL = new HBaseTestingUtility();
+ Configuration conf = TEST_UTIL.getConfiguration();
+ conf.set("hbase.coprocessor.region.classes",
+ IdentityCoprocessor.class.getName());
+ TEST_UTIL.startMiniCluster();
+ HRegionServer rs = TEST_UTIL.getMiniHBaseCluster().getRegionServer(0);
+ secretManager = new AuthenticationTokenSecretManager(conf, rs.getZooKeeper(),
+ rs.getServerName().toString(),
+ conf.getLong("hbase.auth.key.update.interval", 24*60*60*1000),
+ conf.getLong("hbase.auth.token.max.lifetime", 7*24*60*60*1000));
+ secretManager.start();
+ while(secretManager.getCurrentKey() == null) {
+ Thread.sleep(1);
+ }
+ }
+
+ @AfterClass
+ public static void tearDownAfterClass() throws Exception {
+ TEST_UTIL.shutdownMiniCluster();
+ }
+
+ @Test
+ public void testTokenCreation() throws Exception {
+ Token token =
+ secretManager.generateToken("testuser");
+
+ AuthenticationTokenIdentifier ident = new AuthenticationTokenIdentifier();
+ Writables.getWritable(token.getIdentifier(), ident);
+ assertEquals("Token username should match", "testuser",
+ ident.getUsername());
+ byte[] passwd = secretManager.retrievePassword(ident);
+ assertTrue("Token password and password from secret manager should match",
+ Bytes.equals(token.getPassword(), passwd));
+ }
+
+ // @Test - Disable due to kerberos requirement
+ public void testTokenAuthentication() throws Exception {
+ UserGroupInformation testuser =
+ UserGroupInformation.createUserForTesting("testuser", new String[]{"testgroup"});
+
+ testuser.setAuthenticationMethod(
+ UserGroupInformation.AuthenticationMethod.TOKEN);
+ final Configuration conf = TEST_UTIL.getConfiguration();
+ conf.set("hadoop.security.authentication", "kerberos");
+ conf.set("randomkey", UUID.randomUUID().toString());
+ testuser.setConfiguration(conf);
+ Token token =
+ secretManager.generateToken("testuser");
+ testuser.addToken(token);
+
+ // verify the server authenticates us as this token user
+ testuser.doAs(new PrivilegedExceptionAction
+ hadoop.log.dir
+ ${user.dir}/../logs
+
+ hbase.zookeeper.property.clientPort21818Property from ZooKeeper's config zoo.cfg.
Index: src/main/protobuf/RPC.proto
===================================================================
--- src/main/protobuf/RPC.proto (revision 1335370)
+++ src/main/protobuf/RPC.proto (working copy)
@@ -47,7 +47,7 @@
message UserInformation {
required string effectiveUser = 1;
- required string realUser = 2;
+ optional string realUser = 2;
}
message ConnectionHeader {
@@ -94,9 +94,14 @@
/** Echo back the callId the client sent */
required int32 callId = 1;
/** Did the RPC execution encounter an error at the server */
- required bool error = 2;
+ enum Status {
+ SUCCESS = 0;
+ ERROR = 1;
+ FATAL = 2;
+ }
+ required Status status = 2;
/** Optional response bytes */
optional bytes response = 3;
/** Optional exception when error is true*/
optional RpcException exception = 4;
-}
+}
\ No newline at end of file
Index: src/main/java/org/apache/hadoop/hbase/protobuf/generated/RPCProtos.java
===================================================================
--- src/main/java/org/apache/hadoop/hbase/protobuf/generated/RPCProtos.java (revision 1335370)
+++ src/main/java/org/apache/hadoop/hbase/protobuf/generated/RPCProtos.java (working copy)
@@ -15,7 +15,7 @@
boolean hasEffectiveUser();
String getEffectiveUser();
- // required string realUser = 2;
+ // optional string realUser = 2;
boolean hasRealUser();
String getRealUser();
}
@@ -80,7 +80,7 @@
}
}
- // required string realUser = 2;
+ // optional string realUser = 2;
public static final int REALUSER_FIELD_NUMBER = 2;
private java.lang.Object realUser_;
public boolean hasRealUser() {
@@ -125,10 +125,6 @@
memoizedIsInitialized = 0;
return false;
}
- if (!hasRealUser()) {
- memoizedIsInitialized = 0;
- return false;
- }
memoizedIsInitialized = 1;
return true;
}
@@ -406,10 +402,6 @@
return false;
}
- if (!hasRealUser()) {
-
- return false;
- }
return true;
}
@@ -488,7 +480,7 @@
onChanged();
}
- // required string realUser = 2;
+ // optional string realUser = 2;
private java.lang.Object realUser_ = "";
public boolean hasRealUser() {
return ((bitField0_ & 0x00000002) == 0x00000002);
@@ -2081,9 +2073,9 @@
boolean hasCallId();
int getCallId();
- // required bool error = 2;
- boolean hasError();
- boolean getError();
+ // required .RpcResponse.Status status = 2;
+ boolean hasStatus();
+ org.apache.hadoop.hbase.protobuf.generated.RPCProtos.RpcResponse.Status getStatus();
// optional bytes response = 3;
boolean hasResponse();
@@ -2122,6 +2114,78 @@
return org.apache.hadoop.hbase.protobuf.generated.RPCProtos.internal_static_RpcResponse_fieldAccessorTable;
}
+ public enum Status
+ implements com.google.protobuf.ProtocolMessageEnum {
+ SUCCESS(0, 0),
+ ERROR(1, 1),
+ FATAL(2, 2),
+ ;
+
+ public static final int SUCCESS_VALUE = 0;
+ public static final int ERROR_VALUE = 1;
+ public static final int FATAL_VALUE = 2;
+
+
+ public final int getNumber() { return value; }
+
+ public static Status valueOf(int value) {
+ switch (value) {
+ case 0: return SUCCESS;
+ case 1: return ERROR;
+ case 2: return FATAL;
+ default: return null;
+ }
+ }
+
+ public static com.google.protobuf.Internal.EnumLiteMap
+ internalGetValueMap() {
+ return internalValueMap;
+ }
+ private static com.google.protobuf.Internal.EnumLiteMap
+ internalValueMap =
+ new com.google.protobuf.Internal.EnumLiteMap() {
+ public Status findValueByNumber(int number) {
+ return Status.valueOf(number);
+ }
+ };
+
+ public final com.google.protobuf.Descriptors.EnumValueDescriptor
+ getValueDescriptor() {
+ return getDescriptor().getValues().get(index);
+ }
+ public final com.google.protobuf.Descriptors.EnumDescriptor
+ getDescriptorForType() {
+ return getDescriptor();
+ }
+ public static final com.google.protobuf.Descriptors.EnumDescriptor
+ getDescriptor() {
+ return org.apache.hadoop.hbase.protobuf.generated.RPCProtos.RpcResponse.getDescriptor().getEnumTypes().get(0);
+ }
+
+ private static final Status[] VALUES = {
+ SUCCESS, ERROR, FATAL,
+ };
+
+ public static Status valueOf(
+ com.google.protobuf.Descriptors.EnumValueDescriptor desc) {
+ if (desc.getType() != getDescriptor()) {
+ throw new java.lang.IllegalArgumentException(
+ "EnumValueDescriptor is not for this type.");
+ }
+ return VALUES[desc.getIndex()];
+ }
+
+ private final int index;
+ private final int value;
+
+ private Status(int index, int value) {
+ this.index = index;
+ this.value = value;
+ }
+
+ // @@protoc_insertion_point(enum_scope:RpcResponse.Status)
+ }
+
private int bitField0_;
// required int32 callId = 1;
public static final int CALLID_FIELD_NUMBER = 1;
@@ -2133,14 +2197,14 @@
return callId_;
}
- // required bool error = 2;
- public static final int ERROR_FIELD_NUMBER = 2;
- private boolean error_;
- public boolean hasError() {
+ // required .RpcResponse.Status status = 2;
+ public static final int STATUS_FIELD_NUMBER = 2;
+ private org.apache.hadoop.hbase.protobuf.generated.RPCProtos.RpcResponse.Status status_;
+ public boolean hasStatus() {
return ((bitField0_ & 0x00000002) == 0x00000002);
}
- public boolean getError() {
- return error_;
+ public org.apache.hadoop.hbase.protobuf.generated.RPCProtos.RpcResponse.Status getStatus() {
+ return status_;
}
// optional bytes response = 3;
@@ -2168,7 +2232,7 @@
private void initFields() {
callId_ = 0;
- error_ = false;
+ status_ = org.apache.hadoop.hbase.protobuf.generated.RPCProtos.RpcResponse.Status.SUCCESS;
response_ = com.google.protobuf.ByteString.EMPTY;
exception_ = org.apache.hadoop.hbase.protobuf.generated.RPCProtos.RpcException.getDefaultInstance();
}
@@ -2181,7 +2245,7 @@
memoizedIsInitialized = 0;
return false;
}
- if (!hasError()) {
+ if (!hasStatus()) {
memoizedIsInitialized = 0;
return false;
}
@@ -2202,7 +2266,7 @@
output.writeInt32(1, callId_);
}
if (((bitField0_ & 0x00000002) == 0x00000002)) {
- output.writeBool(2, error_);
+ output.writeEnum(2, status_.getNumber());
}
if (((bitField0_ & 0x00000004) == 0x00000004)) {
output.writeBytes(3, response_);
@@ -2225,7 +2289,7 @@
}
if (((bitField0_ & 0x00000002) == 0x00000002)) {
size += com.google.protobuf.CodedOutputStream
- .computeBoolSize(2, error_);
+ .computeEnumSize(2, status_.getNumber());
}
if (((bitField0_ & 0x00000004) == 0x00000004)) {
size += com.google.protobuf.CodedOutputStream
@@ -2263,10 +2327,10 @@
result = result && (getCallId()
== other.getCallId());
}
- result = result && (hasError() == other.hasError());
- if (hasError()) {
- result = result && (getError()
- == other.getError());
+ result = result && (hasStatus() == other.hasStatus());
+ if (hasStatus()) {
+ result = result &&
+ (getStatus() == other.getStatus());
}
result = result && (hasResponse() == other.hasResponse());
if (hasResponse()) {
@@ -2291,9 +2355,9 @@
hash = (37 * hash) + CALLID_FIELD_NUMBER;
hash = (53 * hash) + getCallId();
}
- if (hasError()) {
- hash = (37 * hash) + ERROR_FIELD_NUMBER;
- hash = (53 * hash) + hashBoolean(getError());
+ if (hasStatus()) {
+ hash = (37 * hash) + STATUS_FIELD_NUMBER;
+ hash = (53 * hash) + hashEnum(getStatus());
}
if (hasResponse()) {
hash = (37 * hash) + RESPONSE_FIELD_NUMBER;
@@ -2422,7 +2486,7 @@
super.clear();
callId_ = 0;
bitField0_ = (bitField0_ & ~0x00000001);
- error_ = false;
+ status_ = org.apache.hadoop.hbase.protobuf.generated.RPCProtos.RpcResponse.Status.SUCCESS;
bitField0_ = (bitField0_ & ~0x00000002);
response_ = com.google.protobuf.ByteString.EMPTY;
bitField0_ = (bitField0_ & ~0x00000004);
@@ -2477,7 +2541,7 @@
if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
to_bitField0_ |= 0x00000002;
}
- result.error_ = error_;
+ result.status_ = status_;
if (((from_bitField0_ & 0x00000004) == 0x00000004)) {
to_bitField0_ |= 0x00000004;
}
@@ -2509,8 +2573,8 @@
if (other.hasCallId()) {
setCallId(other.getCallId());
}
- if (other.hasError()) {
- setError(other.getError());
+ if (other.hasStatus()) {
+ setStatus(other.getStatus());
}
if (other.hasResponse()) {
setResponse(other.getResponse());
@@ -2527,7 +2591,7 @@
return false;
}
- if (!hasError()) {
+ if (!hasStatus()) {
return false;
}
@@ -2569,8 +2633,14 @@
break;
}
case 16: {
- bitField0_ |= 0x00000002;
- error_ = input.readBool();
+ int rawValue = input.readEnum();
+ org.apache.hadoop.hbase.protobuf.generated.RPCProtos.RpcResponse.Status value = org.apache.hadoop.hbase.protobuf.generated.RPCProtos.RpcResponse.Status.valueOf(rawValue);
+ if (value == null) {
+ unknownFields.mergeVarintField(2, rawValue);
+ } else {
+ bitField0_ |= 0x00000002;
+ status_ = value;
+ }
break;
}
case 26: {
@@ -2614,23 +2684,26 @@
return this;
}
- // required bool error = 2;
- private boolean error_ ;
- public boolean hasError() {
+ // required .RpcResponse.Status status = 2;
+ private org.apache.hadoop.hbase.protobuf.generated.RPCProtos.RpcResponse.Status status_ = org.apache.hadoop.hbase.protobuf.generated.RPCProtos.RpcResponse.Status.SUCCESS;
+ public boolean hasStatus() {
return ((bitField0_ & 0x00000002) == 0x00000002);
}
- public boolean getError() {
- return error_;
+ public org.apache.hadoop.hbase.protobuf.generated.RPCProtos.RpcResponse.Status getStatus() {
+ return status_;
}
- public Builder setError(boolean value) {
+ public Builder setStatus(org.apache.hadoop.hbase.protobuf.generated.RPCProtos.RpcResponse.Status value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
bitField0_ |= 0x00000002;
- error_ = value;
+ status_ = value;
onChanged();
return this;
}
- public Builder clearError() {
+ public Builder clearStatus() {
bitField0_ = (bitField0_ & ~0x00000002);
- error_ = false;
+ status_ = org.apache.hadoop.hbase.protobuf.generated.RPCProtos.RpcResponse.Status.SUCCESS;
onChanged();
return this;
}
@@ -2795,17 +2868,19 @@
static {
java.lang.String[] descriptorData = {
"\n\tRPC.proto\":\n\017UserInformation\022\025\n\reffect" +
- "iveUser\030\001 \002(\t\022\020\n\010realUser\030\002 \002(\t\"w\n\020Conne" +
+ "iveUser\030\001 \002(\t\022\020\n\010realUser\030\002 \001(\t\"w\n\020Conne" +
"ctionHeader\022\"\n\010userInfo\030\001 \001(\0132\020.UserInfo" +
"rmation\022?\n\010protocol\030\002 \001(\t:-org.apache.ha" +
"doop.hbase.client.ClientProtocol\"-\n\nRpcR" +
"equest\022\016\n\006callId\030\001 \002(\005\022\017\n\007request\030\002 \001(\014\"" +
"9\n\014RpcException\022\025\n\rexceptionName\030\001 \002(\t\022\022" +
- "\n\nstackTrace\030\002 \001(\t\"`\n\013RpcResponse\022\016\n\006cal" +
- "lId\030\001 \002(\005\022\r\n\005error\030\002 \002(\010\022\020\n\010response\030\003 \001" +
- "(\014\022 \n\texception\030\004 \001(\0132\r.RpcExceptionB<\n*",
- "org.apache.hadoop.hbase.protobuf.generat" +
- "edB\tRPCProtosH\001\240\001\001"
+ "\n\nstackTrace\030\002 \001(\t\"\243\001\n\013RpcResponse\022\016\n\006ca" +
+ "llId\030\001 \002(\005\022#\n\006status\030\002 \002(\0162\023.RpcResponse" +
+ ".Status\022\020\n\010response\030\003 \001(\014\022 \n\texception\030\004",
+ " \001(\0132\r.RpcException\"+\n\006Status\022\013\n\007SUCCESS" +
+ "\020\000\022\t\n\005ERROR\020\001\022\t\n\005FATAL\020\002B<\n*org.apache.h" +
+ "adoop.hbase.protobuf.generatedB\tRPCProto" +
+ "sH\001\240\001\001"
};
com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner =
new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() {
@@ -2849,7 +2924,7 @@
internal_static_RpcResponse_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_RpcResponse_descriptor,
- new java.lang.String[] { "CallId", "Error", "Response", "Exception", },
+ new java.lang.String[] { "CallId", "Status", "Response", "Exception", },
org.apache.hadoop.hbase.protobuf.generated.RPCProtos.RpcResponse.class,
org.apache.hadoop.hbase.protobuf.generated.RPCProtos.RpcResponse.Builder.class);
return null;
Index: src/main/java/org/apache/hadoop/hbase/security/token/AuthenticationProtocol.java
===================================================================
--- src/main/java/org/apache/hadoop/hbase/security/token/AuthenticationProtocol.java (revision 0)
+++ src/main/java/org/apache/hadoop/hbase/security/token/AuthenticationProtocol.java (revision 0)
@@ -0,0 +1,43 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.security.token;
+
+import java.io.IOException;
+
+import org.apache.hadoop.hbase.ipc.CoprocessorProtocol;
+import org.apache.hadoop.security.token.Token;
+
+/**
+ * Defines a custom RPC protocol for obtaining authentication tokens
+ */
+public interface AuthenticationProtocol extends CoprocessorProtocol {
+ /**
+ * Obtains a token capable of authenticating as the current user for future
+ * connections.
+ * @return an authentication token for the current user
+ * @throws IOException If obtaining a token is denied or encounters an error
+ */
+ public Token getAuthenticationToken()
+ throws IOException;
+
+ /**
+ * Returns the currently authenticated username.
+ */
+ public String whoami();
+}
Index: src/main/java/org/apache/hadoop/hbase/security/token/AuthenticationTokenSecretManager.java
===================================================================
--- src/main/java/org/apache/hadoop/hbase/security/token/AuthenticationTokenSecretManager.java (revision 0)
+++ src/main/java/org/apache/hadoop/hbase/security/token/AuthenticationTokenSecretManager.java (revision 0)
@@ -0,0 +1,334 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.security.token;
+
+import javax.crypto.SecretKey;
+import java.io.IOException;
+import java.util.Iterator;
+import java.util.Map;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.atomic.AtomicLong;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.Stoppable;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
+import org.apache.hadoop.hbase.zookeeper.ClusterId;
+import org.apache.hadoop.hbase.zookeeper.ZKLeaderManager;
+import org.apache.hadoop.hbase.zookeeper.ZKUtil;
+import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
+import org.apache.hadoop.io.Text;
+import org.apache.hadoop.io.WritableUtils;
+import org.apache.hadoop.security.token.SecretManager;
+import org.apache.hadoop.security.token.Token;
+import org.apache.zookeeper.KeeperException;
+
+/**
+ * Manages an internal list of secret keys used to sign new authentication
+ * tokens as they are generated, and to valid existing tokens used for
+ * authentication.
+ *
+ *
+ * A single instance of {@code AuthenticationTokenSecretManager} will be
+ * running as the "leader" in a given HBase cluster. The leader is responsible
+ * for periodically generating new secret keys, which are then distributed to
+ * followers via ZooKeeper, and for expiring previously used secret keys that
+ * are no longer needed (as any tokens using them have expired).
+ *
+ */
+public class AuthenticationTokenSecretManager
+ extends SecretManager {
+
+ static final String NAME_PREFIX = "SecretManager-";
+
+ private static Log LOG = LogFactory.getLog(
+ AuthenticationTokenSecretManager.class);
+
+ private long lastKeyUpdate;
+ private long keyUpdateInterval;
+ private long tokenMaxLifetime;
+ private ZKSecretWatcher zkWatcher;
+ private LeaderElector leaderElector;
+ private ClusterId clusterId;
+
+ private Map allKeys =
+ new ConcurrentHashMap();
+ private AuthenticationKey currentKey;
+
+ private int idSeq;
+ private AtomicLong tokenSeq = new AtomicLong();
+ private String name;
+
+ /**
+ * Create a new secret manager instance for generating keys.
+ * @param conf Configuration to use
+ * @param zk Connection to zookeeper for handling leader elections
+ * @param keyUpdateInterval Time (in milliseconds) between rolling a new master key for token signing
+ * @param tokenMaxLifetime Maximum age (in milliseconds) before a token expires and is no longer valid
+ */
+ /* TODO: Restrict access to this constructor to make rogues instances more difficult.
+ * For the moment this class is instantiated from
+ * org.apache.hadoop.hbase.ipc.SecureServer so public access is needed.
+ */
+ public AuthenticationTokenSecretManager(Configuration conf,
+ ZooKeeperWatcher zk, String serverName,
+ long keyUpdateInterval, long tokenMaxLifetime) {
+ this.zkWatcher = new ZKSecretWatcher(conf, zk, this);
+ this.keyUpdateInterval = keyUpdateInterval;
+ this.tokenMaxLifetime = tokenMaxLifetime;
+ this.leaderElector = new LeaderElector(zk, serverName);
+ this.name = NAME_PREFIX+serverName;
+ this.clusterId = new ClusterId(zk, zk);
+ }
+
+ public void start() {
+ try {
+ // populate any existing keys
+ this.zkWatcher.start();
+ // try to become leader
+ this.leaderElector.start();
+ } catch (KeeperException ke) {
+ LOG.error("Zookeeper initialization failed", ke);
+ }
+ }
+
+ public void stop() {
+ this.leaderElector.stop("SecretManager stopping");
+ }
+
+ public boolean isMaster() {
+ return leaderElector.isMaster();
+ }
+
+ public String getName() {
+ return name;
+ }
+
+ @Override
+ protected byte[] createPassword(AuthenticationTokenIdentifier identifier) {
+ long now = EnvironmentEdgeManager.currentTimeMillis();
+ AuthenticationKey secretKey = currentKey;
+ identifier.setKeyId(secretKey.getKeyId());
+ identifier.setIssueDate(now);
+ identifier.setExpirationDate(now + tokenMaxLifetime);
+ identifier.setSequenceNumber(tokenSeq.getAndIncrement());
+ return createPassword(WritableUtils.toByteArray(identifier),
+ secretKey.getKey());
+ }
+
+ @Override
+ public byte[] retrievePassword(AuthenticationTokenIdentifier identifier)
+ throws InvalidToken {
+ long now = EnvironmentEdgeManager.currentTimeMillis();
+ if (identifier.getExpirationDate() < now) {
+ throw new InvalidToken("Token has expired");
+ }
+ AuthenticationKey masterKey = allKeys.get(identifier.getKeyId());
+ if (masterKey == null) {
+ throw new InvalidToken("Unknown master key for token (id="+
+ identifier.getKeyId()+")");
+ }
+ // regenerate the password
+ return createPassword(WritableUtils.toByteArray(identifier),
+ masterKey.getKey());
+ }
+
+ @Override
+ public AuthenticationTokenIdentifier createIdentifier() {
+ return new AuthenticationTokenIdentifier();
+ }
+
+ public Token generateToken(String username) {
+ AuthenticationTokenIdentifier ident =
+ new AuthenticationTokenIdentifier(username);
+ Token token =
+ new Token(ident, this);
+ if (clusterId.hasId()) {
+ token.setService(new Text(clusterId.getId()));
+ }
+ return token;
+ }
+
+ public synchronized void addKey(AuthenticationKey key) throws IOException {
+ // ignore zk changes when running as master
+ if (leaderElector.isMaster()) {
+ if (LOG.isDebugEnabled()) {
+ LOG.debug("Running as master, ignoring new key "+key.getKeyId());
+ }
+ return;
+ }
+
+ if (LOG.isDebugEnabled()) {
+ LOG.debug("Adding key "+key.getKeyId());
+ }
+
+ allKeys.put(key.getKeyId(), key);
+ if (currentKey == null || key.getKeyId() > currentKey.getKeyId()) {
+ currentKey = key;
+ }
+ // update current sequence
+ if (key.getKeyId() > idSeq) {
+ idSeq = key.getKeyId();
+ }
+ }
+
+ synchronized void removeKey(Integer keyId) {
+ // ignore zk changes when running as master
+ if (leaderElector.isMaster()) {
+ if (LOG.isDebugEnabled()) {
+ LOG.debug("Running as master, ignoring removed key "+keyId);
+ }
+ return;
+ }
+
+ if (LOG.isDebugEnabled()) {
+ LOG.debug("Removing key "+keyId);
+ }
+
+ allKeys.remove(keyId);
+ }
+
+ AuthenticationKey getCurrentKey() {
+ return currentKey;
+ }
+
+ AuthenticationKey getKey(int keyId) {
+ return allKeys.get(keyId);
+ }
+
+ synchronized void removeExpiredKeys() {
+ if (!leaderElector.isMaster()) {
+ LOG.info("Skipping removeExpiredKeys() because not running as master.");
+ return;
+ }
+
+ long now = EnvironmentEdgeManager.currentTimeMillis();
+ Iterator iter = allKeys.values().iterator();
+ while (iter.hasNext()) {
+ AuthenticationKey key = iter.next();
+ if (key.getExpiration() < now) {
+ if (LOG.isDebugEnabled()) {
+ LOG.debug("Removing expired key "+key.getKeyId());
+ }
+ iter.remove();
+ zkWatcher.removeKeyFromZK(key);
+ }
+ }
+ }
+
+ synchronized boolean isCurrentKeyRolled() {
+ return currentKey != null;
+ }
+
+ synchronized void rollCurrentKey() {
+ if (!leaderElector.isMaster()) {
+ LOG.info("Skipping rollCurrentKey() because not running as master.");
+ return;
+ }
+
+ long now = EnvironmentEdgeManager.currentTimeMillis();
+ AuthenticationKey prev = currentKey;
+ AuthenticationKey newKey = new AuthenticationKey(++idSeq,
+ Long.MAX_VALUE, // don't allow to expire until it's replaced by a new key
+ generateSecret());
+ allKeys.put(newKey.getKeyId(), newKey);
+ currentKey = newKey;
+ zkWatcher.addKeyToZK(newKey);
+ lastKeyUpdate = now;
+
+ if (prev != null) {
+ // make sure previous key is still stored
+ prev.setExpiration(now + tokenMaxLifetime);
+ allKeys.put(prev.getKeyId(), prev);
+ zkWatcher.updateKeyInZK(prev);
+ }
+ }
+
+ public static SecretKey createSecretKey(byte[] raw) {
+ return SecretManager.createSecretKey(raw);
+ }
+
+ private class LeaderElector extends Thread implements Stoppable {
+ private boolean stopped = false;
+ /** Flag indicating whether we're in charge of rolling/expiring keys */
+ private boolean isMaster = false;
+ private ZKLeaderManager zkLeader;
+
+ public LeaderElector(ZooKeeperWatcher watcher, String serverName) {
+ setDaemon(true);
+ setName("ZKSecretWatcher-leaderElector");
+ zkLeader = new ZKLeaderManager(watcher,
+ ZKUtil.joinZNode(zkWatcher.getRootKeyZNode(), "keymaster"),
+ Bytes.toBytes(serverName), this);
+ }
+
+ public boolean isMaster() {
+ return isMaster;
+ }
+
+ @Override
+ public boolean isStopped() {
+ return stopped;
+ }
+
+ @Override
+ public void stop(String reason) {
+ if (stopped) {
+ return;
+ }
+
+ stopped = true;
+ // prevent further key generation when stopping
+ if (isMaster) {
+ zkLeader.stepDownAsLeader();
+ }
+ isMaster = false;
+ LOG.info("Stopping leader election, because: "+reason);
+ interrupt();
+ }
+
+ public void run() {
+ zkLeader.start();
+ zkLeader.waitToBecomeLeader();
+ isMaster = true;
+
+ while (!stopped) {
+ long now = EnvironmentEdgeManager.currentTimeMillis();
+
+ // clear any expired
+ removeExpiredKeys();
+
+ if (lastKeyUpdate + keyUpdateInterval < now) {
+ // roll a new master key
+ rollCurrentKey();
+ }
+
+ try {
+ Thread.sleep(5000);
+ } catch (InterruptedException ie) {
+ if (LOG.isDebugEnabled()) {
+ LOG.debug("Interrupted waiting for next update", ie);
+ }
+ }
+ }
+ }
+ }
+}
Index: src/main/java/org/apache/hadoop/hbase/security/token/AuthenticationTokenSelector.java
===================================================================
--- src/main/java/org/apache/hadoop/hbase/security/token/AuthenticationTokenSelector.java (revision 0)
+++ src/main/java/org/apache/hadoop/hbase/security/token/AuthenticationTokenSelector.java (revision 0)
@@ -0,0 +1,47 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.security.token;
+
+import java.util.Collection;
+
+import org.apache.hadoop.io.Text;
+import org.apache.hadoop.security.token.Token;
+import org.apache.hadoop.security.token.TokenIdentifier;
+import org.apache.hadoop.security.token.TokenSelector;
+
+public class AuthenticationTokenSelector
+ implements TokenSelector {
+
+ public AuthenticationTokenSelector() {
+ }
+
+ @Override
+ public Token selectToken(Text serviceName,
+ Collection> tokens) {
+ if (serviceName != null) {
+ for (Token ident : tokens) {
+ if (serviceName.equals(ident.getService()) &&
+ AuthenticationTokenIdentifier.AUTH_TOKEN_TYPE.equals(ident.getKind())) {
+ return (Token)ident;
+ }
+ }
+ }
+ return null;
+ }
+}
Index: src/main/java/org/apache/hadoop/hbase/security/token/AuthenticationTokenIdentifier.java
===================================================================
--- src/main/java/org/apache/hadoop/hbase/security/token/AuthenticationTokenIdentifier.java (revision 0)
+++ src/main/java/org/apache/hadoop/hbase/security/token/AuthenticationTokenIdentifier.java (revision 0)
@@ -0,0 +1,156 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.security.token;
+
+import java.io.DataInput;
+import java.io.DataOutput;
+import java.io.IOException;
+
+import org.apache.hadoop.io.Text;
+import org.apache.hadoop.io.WritableUtils;
+import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.security.token.TokenIdentifier;
+
+/**
+ * Represents the identity information stored in an HBase authentication token.
+ */
+public class AuthenticationTokenIdentifier extends TokenIdentifier {
+ public static final byte VERSION = 1;
+ public static final Text AUTH_TOKEN_TYPE = new Text("HBASE_AUTH_TOKEN");
+
+ protected String username;
+ protected int keyId;
+ protected long issueDate;
+ protected long expirationDate;
+ protected long sequenceNumber;
+
+ public AuthenticationTokenIdentifier() {
+ }
+
+ public AuthenticationTokenIdentifier(String username) {
+ this.username = username;
+ }
+
+ public AuthenticationTokenIdentifier(String username, int keyId,
+ long issueDate, long expirationDate) {
+ this.username = username;
+ this.keyId = keyId;
+ this.issueDate = issueDate;
+ this.expirationDate = expirationDate;
+ }
+
+ @Override
+ public Text getKind() {
+ return AUTH_TOKEN_TYPE;
+ }
+
+ @Override
+ public UserGroupInformation getUser() {
+ if (username == null || "".equals(username)) {
+ return null;
+ }
+ return UserGroupInformation.createRemoteUser(username);
+ }
+
+ public String getUsername() {
+ return username;
+ }
+
+ void setUsername(String name) {
+ this.username = name;
+ }
+
+ public int getKeyId() {
+ return keyId;
+ }
+
+ void setKeyId(int id) {
+ this.keyId = id;
+ }
+
+ public long getIssueDate() {
+ return issueDate;
+ }
+
+ void setIssueDate(long timestamp) {
+ this.issueDate = timestamp;
+ }
+
+ public long getExpirationDate() {
+ return expirationDate;
+ }
+
+ void setExpirationDate(long timestamp) {
+ this.expirationDate = timestamp;
+ }
+
+ public long getSequenceNumber() {
+ return sequenceNumber;
+ }
+
+ void setSequenceNumber(long seq) {
+ this.sequenceNumber = seq;
+ }
+
+ @Override
+ public void write(DataOutput out) throws IOException {
+ out.writeByte(VERSION);
+ WritableUtils.writeString(out, username);
+ WritableUtils.writeVInt(out, keyId);
+ WritableUtils.writeVLong(out, issueDate);
+ WritableUtils.writeVLong(out, expirationDate);
+ WritableUtils.writeVLong(out, sequenceNumber);
+ }
+
+ @Override
+ public void readFields(DataInput in) throws IOException {
+ byte version = in.readByte();
+ if (version != VERSION) {
+ throw new IOException("Version mismatch in deserialization: " +
+ "expected="+VERSION+", got="+version);
+ }
+ username = WritableUtils.readString(in);
+ keyId = WritableUtils.readVInt(in);
+ issueDate = WritableUtils.readVLong(in);
+ expirationDate = WritableUtils.readVLong(in);
+ sequenceNumber = WritableUtils.readVLong(in);
+ }
+
+ @Override
+ public boolean equals(Object other) {
+ if (other == null) {
+ return false;
+ }
+ if (other instanceof AuthenticationTokenIdentifier) {
+ AuthenticationTokenIdentifier ident = (AuthenticationTokenIdentifier)other;
+ return sequenceNumber == ident.getSequenceNumber()
+ && keyId == ident.getKeyId()
+ && issueDate == ident.getIssueDate()
+ && expirationDate == ident.getExpirationDate()
+ && (username == null ? ident.getUsername() == null :
+ username.equals(ident.getUsername()));
+ }
+ return false;
+ }
+
+ @Override
+ public int hashCode() {
+ return (int)sequenceNumber;
+ }
+}
Index: src/main/java/org/apache/hadoop/hbase/security/token/AuthenticationKey.java
===================================================================
--- src/main/java/org/apache/hadoop/hbase/security/token/AuthenticationKey.java (revision 0)
+++ src/main/java/org/apache/hadoop/hbase/security/token/AuthenticationKey.java (revision 0)
@@ -0,0 +1,114 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.security.token;
+
+import javax.crypto.SecretKey;
+import java.io.DataInput;
+import java.io.DataOutput;
+import java.io.IOException;
+
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.io.Writable;
+import org.apache.hadoop.io.WritableUtils;
+
+/**
+ * Represents a secret key used for signing and verifying authentication tokens
+ * by {@link AuthenticationTokenSecretManager}.
+ */
+public class AuthenticationKey implements Writable {
+ private int id;
+ private long expirationDate;
+ private SecretKey secret;
+
+ public AuthenticationKey() {
+ // for Writable
+ }
+
+ public AuthenticationKey(int keyId, long expirationDate, SecretKey key) {
+ this.id = keyId;
+ this.expirationDate = expirationDate;
+ this.secret = key;
+ }
+
+ public int getKeyId() {
+ return id;
+ }
+
+ public long getExpiration() {
+ return expirationDate;
+ }
+
+ public void setExpiration(long timestamp) {
+ expirationDate = timestamp;
+ }
+
+ SecretKey getKey() {
+ return secret;
+ }
+
+ @Override
+ public boolean equals(Object obj) {
+ if (obj == null || !(obj instanceof AuthenticationKey)) {
+ return false;
+ }
+ AuthenticationKey other = (AuthenticationKey)obj;
+ return id == other.getKeyId() &&
+ expirationDate == other.getExpiration() &&
+ (secret == null ? other.getKey() == null :
+ other.getKey() != null &&
+ Bytes.equals(secret.getEncoded(), other.getKey().getEncoded()));
+ }
+
+ @Override
+ public String toString() {
+ StringBuilder buf = new StringBuilder();
+ buf.append("AuthenticationKey[ ")
+ .append("id=").append(id)
+ .append(", expiration=").append(expirationDate)
+ .append(" ]");
+ return buf.toString();
+ }
+
+ @Override
+ public void write(DataOutput out) throws IOException {
+ WritableUtils.writeVInt(out, id);
+ WritableUtils.writeVLong(out, expirationDate);
+ if (secret == null) {
+ WritableUtils.writeVInt(out, -1);
+ } else {
+ byte[] keyBytes = secret.getEncoded();
+ WritableUtils.writeVInt(out, keyBytes.length);
+ out.write(keyBytes);
+ }
+ }
+
+ @Override
+ public void readFields(DataInput in) throws IOException {
+ id = WritableUtils.readVInt(in);
+ expirationDate = WritableUtils.readVLong(in);
+ int keyLength = WritableUtils.readVInt(in);
+ if (keyLength < 0) {
+ secret = null;
+ } else {
+ byte[] keyBytes = new byte[keyLength];
+ in.readFully(keyBytes);
+ secret = AuthenticationTokenSecretManager.createSecretKey(keyBytes);
+ }
+ }
+}
Index: src/main/java/org/apache/hadoop/hbase/security/token/ZKSecretWatcher.java
===================================================================
--- src/main/java/org/apache/hadoop/hbase/security/token/ZKSecretWatcher.java (revision 0)
+++ src/main/java/org/apache/hadoop/hbase/security/token/ZKSecretWatcher.java (revision 0)
@@ -0,0 +1,212 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.security.token;
+
+import java.io.IOException;
+import java.util.List;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.util.Writables;
+import org.apache.hadoop.hbase.zookeeper.ZKUtil;
+import org.apache.hadoop.hbase.zookeeper.ZooKeeperListener;
+import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
+import org.apache.zookeeper.KeeperException;
+
+/**
+ * Synchronizes token encryption keys across cluster nodes.
+ */
+public class ZKSecretWatcher extends ZooKeeperListener {
+ private static final String DEFAULT_ROOT_NODE = "tokenauth";
+ private static final String DEFAULT_KEYS_PARENT = "keys";
+ private static Log LOG = LogFactory.getLog(ZKSecretWatcher.class);
+
+ private AuthenticationTokenSecretManager secretManager;
+ private String baseKeyZNode;
+ private String keysParentZNode;
+
+ public ZKSecretWatcher(Configuration conf,
+ ZooKeeperWatcher watcher,
+ AuthenticationTokenSecretManager secretManager) {
+ super(watcher);
+ this.secretManager = secretManager;
+ String keyZNodeParent = conf.get("zookeeper.znode.tokenauth.parent", DEFAULT_ROOT_NODE);
+ this.baseKeyZNode = ZKUtil.joinZNode(watcher.baseZNode, keyZNodeParent);
+ this.keysParentZNode = ZKUtil.joinZNode(baseKeyZNode, DEFAULT_KEYS_PARENT);
+ }
+
+ public void start() throws KeeperException {
+ watcher.registerListener(this);
+ // make sure the base node exists
+ ZKUtil.createWithParents(watcher, keysParentZNode);
+
+ if (ZKUtil.watchAndCheckExists(watcher, keysParentZNode)) {
+ List nodes =
+ ZKUtil.getChildDataAndWatchForNewChildren(watcher, keysParentZNode);
+ refreshNodes(nodes);
+ }
+ }
+
+ @Override
+ public void nodeCreated(String path) {
+ if (path.equals(keysParentZNode)) {
+ try {
+ List nodes =
+ ZKUtil.getChildDataAndWatchForNewChildren(watcher, keysParentZNode);
+ refreshNodes(nodes);
+ } catch (KeeperException ke) {
+ LOG.fatal("Error reading data from zookeeper", ke);
+ watcher.abort("Error reading new key znode "+path, ke);
+ }
+ }
+ }
+
+ @Override
+ public void nodeDeleted(String path) {
+ if (keysParentZNode.equals(ZKUtil.getParent(path))) {
+ String keyId = ZKUtil.getNodeName(path);
+ try {
+ Integer id = new Integer(keyId);
+ secretManager.removeKey(id);
+ } catch (NumberFormatException nfe) {
+ LOG.error("Invalid znode name for key ID '"+keyId+"'", nfe);
+ }
+ }
+ }
+
+ @Override
+ public void nodeDataChanged(String path) {
+ if (keysParentZNode.equals(ZKUtil.getParent(path))) {
+ try {
+ byte[] data = ZKUtil.getDataAndWatch(watcher, path);
+ if (data == null || data.length == 0) {
+ LOG.debug("Ignoring empty node "+path);
+ return;
+ }
+
+ AuthenticationKey key = (AuthenticationKey)Writables.getWritable(data,
+ new AuthenticationKey());
+ secretManager.addKey(key);
+ } catch (KeeperException ke) {
+ LOG.fatal("Error reading data from zookeeper", ke);
+ watcher.abort("Error reading updated key znode "+path, ke);
+ } catch (IOException ioe) {
+ LOG.fatal("Error reading key writables", ioe);
+ watcher.abort("Error reading key writables from znode "+path, ioe);
+ }
+ }
+ }
+
+ @Override
+ public void nodeChildrenChanged(String path) {
+ if (path.equals(keysParentZNode)) {
+ // keys changed
+ try {
+ List nodes =
+ ZKUtil.getChildDataAndWatchForNewChildren(watcher, keysParentZNode);
+ refreshNodes(nodes);
+ } catch (KeeperException ke) {
+ LOG.fatal("Error reading data from zookeeper", ke);
+ watcher.abort("Error reading changed keys from zookeeper", ke);
+ }
+ }
+ }
+
+ public String getRootKeyZNode() {
+ return baseKeyZNode;
+ }
+
+ private void refreshNodes(List nodes) {
+ for (ZKUtil.NodeAndData n : nodes) {
+ String path = n.getNode();
+ String keyId = ZKUtil.getNodeName(path);
+ try {
+ byte[] data = n.getData();
+ if (data == null || data.length == 0) {
+ LOG.debug("Ignoring empty node "+path);
+ continue;
+ }
+ AuthenticationKey key = (AuthenticationKey)Writables.getWritable(
+ data, new AuthenticationKey());
+ secretManager.addKey(key);
+ } catch (IOException ioe) {
+ LOG.fatal("Failed reading new secret key for id '" + keyId +
+ "' from zk", ioe);
+ watcher.abort("Error deserializing key from znode "+path, ioe);
+ }
+ }
+ }
+
+ private String getKeyNode(int keyId) {
+ return ZKUtil.joinZNode(keysParentZNode, Integer.toString(keyId));
+ }
+
+ public void removeKeyFromZK(AuthenticationKey key) {
+ String keyZNode = getKeyNode(key.getKeyId());
+ try {
+ ZKUtil.deleteNode(watcher, keyZNode);
+ } catch (KeeperException.NoNodeException nne) {
+ LOG.error("Non-existent znode "+keyZNode+" for key "+key.getKeyId(), nne);
+ } catch (KeeperException ke) {
+ LOG.fatal("Failed removing znode "+keyZNode+" for key "+key.getKeyId(),
+ ke);
+ watcher.abort("Unhandled zookeeper error removing znode "+keyZNode+
+ " for key "+key.getKeyId(), ke);
+ }
+ }
+
+ public void addKeyToZK(AuthenticationKey key) {
+ String keyZNode = getKeyNode(key.getKeyId());
+ try {
+ byte[] keyData = Writables.getBytes(key);
+ // TODO: is there any point in retrying beyond what ZK client does?
+ ZKUtil.createSetData(watcher, keyZNode, keyData);
+ } catch (KeeperException ke) {
+ LOG.fatal("Unable to synchronize master key "+key.getKeyId()+
+ " to znode "+keyZNode, ke);
+ watcher.abort("Unable to synchronize secret key "+
+ key.getKeyId()+" in zookeeper", ke);
+ } catch (IOException ioe) {
+ // this can only happen from an error serializing the key
+ watcher.abort("Failed serializing key "+key.getKeyId(), ioe);
+ }
+ }
+
+ public void updateKeyInZK(AuthenticationKey key) {
+ String keyZNode = getKeyNode(key.getKeyId());
+ try {
+ byte[] keyData = Writables.getBytes(key);
+ try {
+ ZKUtil.updateExistingNodeData(watcher, keyZNode, keyData, -1);
+ } catch (KeeperException.NoNodeException ne) {
+ // node was somehow removed, try adding it back
+ ZKUtil.createSetData(watcher, keyZNode, keyData);
+ }
+ } catch (KeeperException ke) {
+ LOG.fatal("Unable to update master key "+key.getKeyId()+
+ " in znode "+keyZNode);
+ watcher.abort("Unable to synchronize secret key "+
+ key.getKeyId()+" in zookeeper", ke);
+ } catch (IOException ioe) {
+ // this can only happen from an error serializing the key
+ watcher.abort("Failed serializing key "+key.getKeyId(), ioe);
+ }
+ }
+}
Index: src/main/java/org/apache/hadoop/hbase/security/token/TokenProvider.java
===================================================================
--- src/main/java/org/apache/hadoop/hbase/security/token/TokenProvider.java (revision 0)
+++ src/main/java/org/apache/hadoop/hbase/security/token/TokenProvider.java (revision 0)
@@ -0,0 +1,106 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.security.token;
+
+import java.io.IOException;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.hbase.CoprocessorEnvironment;
+import org.apache.hadoop.hbase.coprocessor.BaseEndpointCoprocessor;
+import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment;
+import org.apache.hadoop.hbase.ipc.HBaseServer;
+import org.apache.hadoop.hbase.ipc.RequestContext;
+import org.apache.hadoop.hbase.ipc.RpcServer;
+import org.apache.hadoop.hbase.security.AccessDeniedException;
+import org.apache.hadoop.hbase.security.User;
+import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.security.token.SecretManager;
+import org.apache.hadoop.security.token.Token;
+
+/**
+ * Provides a service for obtaining authentication tokens via the
+ * {@link AuthenticationProtocol} coprocessor protocol.
+ */
+public class TokenProvider extends BaseEndpointCoprocessor
+ implements AuthenticationProtocol {
+
+ public static final long VERSION = 0L;
+ private static Log LOG = LogFactory.getLog(TokenProvider.class);
+
+ private AuthenticationTokenSecretManager secretManager;
+
+
+ @Override
+ public void start(CoprocessorEnvironment env) {
+ super.start(env);
+
+ // if running at region
+ if (env instanceof RegionCoprocessorEnvironment) {
+ RegionCoprocessorEnvironment regionEnv =
+ (RegionCoprocessorEnvironment)env;
+ RpcServer server = regionEnv.getRegionServerServices().getRpcServer();
+ SecretManager> mgr = ((HBaseServer)server).getSecretManager();
+ if (mgr instanceof AuthenticationTokenSecretManager) {
+ secretManager = (AuthenticationTokenSecretManager)mgr;
+ }
+ }
+ }
+
+ @Override
+ public Token getAuthenticationToken()
+ throws IOException {
+ if (secretManager == null) {
+ throw new IOException(
+ "No secret manager configured for token authentication");
+ }
+
+ User currentUser = RequestContext.getRequestUser();
+ UserGroupInformation ugi = null;
+ if (currentUser != null) {
+ ugi = currentUser.getUGI();
+ }
+ if (currentUser == null) {
+ throw new AccessDeniedException("No authenticated user for request!");
+ } else if (ugi.getAuthenticationMethod() !=
+ UserGroupInformation.AuthenticationMethod.KERBEROS) {
+ LOG.warn("Token generation denied for user="+currentUser.getName()
+ +", authMethod="+ugi.getAuthenticationMethod());
+ throw new AccessDeniedException(
+ "Token generation only allowed for Kerberos authenticated clients");
+ }
+
+ return secretManager.generateToken(currentUser.getName());
+ }
+
+ @Override
+ public String whoami() {
+ return RequestContext.getRequestUserName();
+ }
+
+ @Override
+ public long getProtocolVersion(String protocol, long clientVersion)
+ throws IOException {
+ if (AuthenticationProtocol.class.getName().equals(protocol)) {
+ return TokenProvider.VERSION;
+ }
+ LOG.warn("Unknown protocol requested: "+protocol);
+ return -1;
+ }
+}
Index: src/main/java/org/apache/hadoop/hbase/security/token/TokenUtil.java
===================================================================
--- src/main/java/org/apache/hadoop/hbase/security/token/TokenUtil.java (revision 0)
+++ src/main/java/org/apache/hadoop/hbase/security/token/TokenUtil.java (revision 0)
@@ -0,0 +1,183 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.security.token;
+
+import java.io.IOException;
+import java.lang.reflect.UndeclaredThrowableException;
+import java.security.PrivilegedExceptionAction;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.client.HTable;
+import org.apache.hadoop.io.Text;
+import org.apache.hadoop.mapred.JobConf;
+import org.apache.hadoop.mapreduce.Job;
+import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.security.token.Token;
+
+/**
+ * Utility methods for obtaining authentication tokens.
+ */
+public class TokenUtil {
+ private static Log LOG = LogFactory.getLog(TokenUtil.class);
+
+ /**
+ * Obtain and return an authentication token for the current user.
+ * @param conf The configuration for connecting to the cluster
+ * @return the authentication token instance
+ */
+ public static Token obtainToken(
+ Configuration conf) throws IOException {
+ HTable meta = null;
+ try {
+ meta = new HTable(conf, ".META.");
+ AuthenticationProtocol prot = meta.coprocessorProxy(
+ AuthenticationProtocol.class, HConstants.EMPTY_START_ROW);
+ return prot.getAuthenticationToken();
+ } finally {
+ if (meta != null) {
+ meta.close();
+ }
+ }
+ }
+
+ private static Text getClusterId(Token token)
+ throws IOException {
+ return token.getService() != null
+ ? token.getService() : new Text("default");
+ }
+
+ /**
+ * Obtain an authentication token for the given user and add it to the
+ * user's credentials.
+ * @param conf The configuration for connecting to the cluster
+ * @param user The user for whom to obtain the token
+ * @throws IOException If making a remote call to the {@link TokenProvider} fails
+ * @throws InterruptedException If executing as the given user is interrupted
+ */
+ public static void obtainAndCacheToken(final Configuration conf,
+ UserGroupInformation user)
+ throws IOException, InterruptedException {
+ try {
+ Token token =
+ user.doAs(new PrivilegedExceptionAction>() {
+ public Token run() throws Exception {
+ return obtainToken(conf);
+ }
+ });
+
+ if (token == null) {
+ throw new IOException("No token returned for user "+user.getUserName());
+ }
+ if (LOG.isDebugEnabled()) {
+ LOG.debug("Obtained token "+token.getKind().toString()+" for user "+
+ user.getUserName());
+ }
+ user.addToken(token);
+ } catch (IOException ioe) {
+ throw ioe;
+ } catch (InterruptedException ie) {
+ throw ie;
+ } catch (RuntimeException re) {
+ throw re;
+ } catch (Exception e) {
+ throw new UndeclaredThrowableException(e,
+ "Unexpected exception obtaining token for user "+user.getUserName());
+ }
+ }
+
+ /**
+ * Obtain an authentication token on behalf of the given user and add it to
+ * the credentials for the given map reduce job.
+ * @param conf The configuration for connecting to the cluster
+ * @param user The user for whom to obtain the token
+ * @param job The job instance in which the token should be stored
+ * @throws IOException If making a remote call to the {@link TokenProvider} fails
+ * @throws InterruptedException If executing as the given user is interrupted
+ */
+ public static void obtainTokenForJob(final Configuration conf,
+ UserGroupInformation user, Job job)
+ throws IOException, InterruptedException {
+ try {
+ Token token =
+ user.doAs(new PrivilegedExceptionAction>() {
+ public Token run() throws Exception {
+ return obtainToken(conf);
+ }
+ });
+
+ if (token == null) {
+ throw new IOException("No token returned for user "+user.getUserName());
+ }
+ Text clusterId = getClusterId(token);
+ LOG.info("Obtained token "+token.getKind().toString()+" for user "+
+ user.getUserName() + " on cluster "+clusterId.toString());
+ job.getCredentials().addToken(clusterId, token);
+ } catch (IOException ioe) {
+ throw ioe;
+ } catch (InterruptedException ie) {
+ throw ie;
+ } catch (RuntimeException re) {
+ throw re;
+ } catch (Exception e) {
+ throw new UndeclaredThrowableException(e,
+ "Unexpected exception obtaining token for user "+user.getUserName());
+ }
+ }
+
+ /**
+ * Obtain an authentication token on behalf of the given user and add it to
+ * the credentials for the given map reduce job.
+ * @param user The user for whom to obtain the token
+ * @param job The job configuration in which the token should be stored
+ * @throws IOException If making a remote call to the {@link TokenProvider} fails
+ * @throws InterruptedException If executing as the given user is interrupted
+ */
+ public static void obtainTokenForJob(final JobConf job,
+ UserGroupInformation user)
+ throws IOException, InterruptedException {
+ try {
+ Token token =
+ user.doAs(new PrivilegedExceptionAction>() {
+ public Token run() throws Exception {
+ return obtainToken(job);
+ }
+ });
+
+ if (token == null) {
+ throw new IOException("No token returned for user "+user.getUserName());
+ }
+ Text clusterId = getClusterId(token);
+ LOG.info("Obtained token "+token.getKind().toString()+" for user "+
+ user.getUserName()+" on cluster "+clusterId.toString());
+ job.getCredentials().addToken(clusterId, token);
+ } catch (IOException ioe) {
+ throw ioe;
+ } catch (InterruptedException ie) {
+ throw ie;
+ } catch (RuntimeException re) {
+ throw re;
+ } catch (Exception e) {
+ throw new UndeclaredThrowableException(e,
+ "Unexpected exception obtaining token for user "+user.getUserName());
+ }
+ }
+}
Index: src/main/java/org/apache/hadoop/hbase/security/access/TablePermission.java
===================================================================
--- src/main/java/org/apache/hadoop/hbase/security/access/TablePermission.java (revision 0)
+++ src/main/java/org/apache/hadoop/hbase/security/access/TablePermission.java (revision 0)
@@ -0,0 +1,295 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.security.access;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.hbase.KeyValue;
+import org.apache.hadoop.hbase.util.Bytes;
+
+import java.io.DataInput;
+import java.io.DataOutput;
+import java.io.IOException;
+
+/**
+ * Represents an authorization for access for the given actions, optionally
+ * restricted to the given column family or column qualifier, over the
+ * given table. If the family property is null, it implies
+ * full table access.
+ */
+public class TablePermission extends Permission {
+ private static Log LOG = LogFactory.getLog(TablePermission.class);
+
+ private byte[] table;
+ private byte[] family;
+ private byte[] qualifier;
+
+ /** Nullary constructor for Writable, do not use */
+ public TablePermission() {
+ super();
+ }
+
+ /**
+ * Create a new permission for the given table and (optionally) column family,
+ * allowing the given actions.
+ * @param table the table
+ * @param family the family, can be null if a global permission on the table
+ * @param assigned the list of allowed actions
+ */
+ public TablePermission(byte[] table, byte[] family, Action... assigned) {
+ this(table, family, null, assigned);
+ }
+
+ /**
+ * Creates a new permission for the given table, restricted to the given
+ * column family and qualifer, allowing the assigned actions to be performed.
+ * @param table the table
+ * @param family the family, can be null if a global permission on the table
+ * @param assigned the list of allowed actions
+ */
+ public TablePermission(byte[] table, byte[] family, byte[] qualifier,
+ Action... assigned) {
+ super(assigned);
+ this.table = table;
+ this.family = family;
+ this.qualifier = qualifier;
+ }
+
+ /**
+ * Creates a new permission for the given table, family and column qualifier,
+ * allowing the actions matching the provided byte codes to be performed.
+ * @param table the table
+ * @param family the family, can be null if a global permission on the table
+ * @param actionCodes the list of allowed action codes
+ */
+ public TablePermission(byte[] table, byte[] family, byte[] qualifier,
+ byte[] actionCodes) {
+ super(actionCodes);
+ this.table = table;
+ this.family = family;
+ this.qualifier = qualifier;
+ }
+
+ public byte[] getTable() {
+ return table;
+ }
+
+ public byte[] getFamily() {
+ return family;
+ }
+
+ public byte[] getQualifier() {
+ return qualifier;
+ }
+
+ /**
+ * Checks that a given table operation is authorized by this permission
+ * instance.
+ *
+ * @param table the table where the operation is being performed
+ * @param family the column family to which the operation is restricted,
+ * if null implies "all"
+ * @param qualifier the column qualifier to which the action is restricted,
+ * if null implies "all"
+ * @param action the action being requested
+ * @return true if the action within the given scope is allowed
+ * by this permission, false
+ */
+ public boolean implies(byte[] table, byte[] family, byte[] qualifier,
+ Action action) {
+ if (!Bytes.equals(this.table, table)) {
+ return false;
+ }
+
+ if (this.family != null &&
+ (family == null ||
+ !Bytes.equals(this.family, family))) {
+ return false;
+ }
+
+ if (this.qualifier != null &&
+ (qualifier == null ||
+ !Bytes.equals(this.qualifier, qualifier))) {
+ return false;
+ }
+
+ // check actions
+ return super.implies(action);
+ }
+
+ /**
+ * Checks if this permission grants access to perform the given action on
+ * the given table and key value.
+ * @param table the table on which the operation is being performed
+ * @param kv the KeyValue on which the operation is being requested
+ * @param action the action requested
+ * @return true if the action is allowed over the given scope
+ * by this permission, otherwise false
+ */
+ public boolean implies(byte[] table, KeyValue kv, Action action) {
+ if (!Bytes.equals(this.table, table)) {
+ return false;
+ }
+
+ if (family != null &&
+ (Bytes.compareTo(family, 0, family.length,
+ kv.getBuffer(), kv.getFamilyOffset(), kv.getFamilyLength()) != 0)) {
+ return false;
+ }
+
+ if (qualifier != null &&
+ (Bytes.compareTo(qualifier, 0, qualifier.length,
+ kv.getBuffer(), kv.getQualifierOffset(), kv.getQualifierLength()) != 0)) {
+ return false;
+ }
+
+ // check actions
+ return super.implies(action);
+ }
+
+ /**
+ * Returns true if this permission matches the given column
+ * family at least. This only indicates a partial match against the table
+ * and column family, however, and does not guarantee that implies() for the
+ * column same family would return true. In the case of a
+ * column-qualifier specific permission, for example, implies() would still
+ * return false.
+ */
+ public boolean matchesFamily(byte[] table, byte[] family, Action action) {
+ if (!Bytes.equals(this.table, table)) {
+ return false;
+ }
+
+ if (this.family != null &&
+ (family == null ||
+ !Bytes.equals(this.family, family))) {
+ return false;
+ }
+
+ // ignore qualifier
+ // check actions
+ return super.implies(action);
+ }
+
+ /**
+ * Returns if the given permission matches the given qualifier.
+ * @param table the table name to match
+ * @param family the column family to match
+ * @param qualifier the qualifier name to match
+ * @param action the action requested
+ * @return true if the table, family and qualifier match,
+ * otherwise false
+ */
+ public boolean matchesFamilyQualifier(byte[] table, byte[] family, byte[] qualifier,
+ Action action) {
+ if (!matchesFamily(table, family, action)) {
+ return false;
+ } else {
+ if (this.qualifier != null &&
+ (qualifier == null ||
+ !Bytes.equals(this.qualifier, qualifier))) {
+ return false;
+ }
+ }
+ return super.implies(action);
+ }
+
+ @Override
+ public boolean equals(Object obj) {
+ if (!(obj instanceof TablePermission)) {
+ return false;
+ }
+ TablePermission other = (TablePermission)obj;
+
+ if (!(Bytes.equals(table, other.getTable()) &&
+ ((family == null && other.getFamily() == null) ||
+ Bytes.equals(family, other.getFamily())) &&
+ ((qualifier == null && other.getQualifier() == null) ||
+ Bytes.equals(qualifier, other.getQualifier()))
+ )) {
+ return false;
+ }
+
+ // check actions
+ return super.equals(other);
+ }
+
+ @Override
+ public int hashCode() {
+ final int prime = 37;
+ int result = super.hashCode();
+ if (table != null) {
+ result = prime * result + Bytes.hashCode(table);
+ }
+ if (family != null) {
+ result = prime * result + Bytes.hashCode(family);
+ }
+ if (qualifier != null) {
+ result = prime * result + Bytes.hashCode(qualifier);
+ }
+ return result;
+ }
+
+ public String toString() {
+ StringBuilder str = new StringBuilder("[TablePermission: ")
+ .append("table=").append(Bytes.toString(table))
+ .append(", family=").append(Bytes.toString(family))
+ .append(", qualifier=").append(Bytes.toString(qualifier))
+ .append(", actions=");
+ if (actions != null) {
+ for (int i=0; i 0)
+ str.append(",");
+ if (actions[i] != null)
+ str.append(actions[i].toString());
+ else
+ str.append("NULL");
+ }
+ }
+ str.append("]");
+
+ return str.toString();
+ }
+
+ @Override
+ public void readFields(DataInput in) throws IOException {
+ super.readFields(in);
+ table = Bytes.readByteArray(in);
+ if (in.readBoolean()) {
+ family = Bytes.readByteArray(in);
+ }
+ if (in.readBoolean()) {
+ qualifier = Bytes.readByteArray(in);
+ }
+ }
+
+ @Override
+ public void write(DataOutput out) throws IOException {
+ super.write(out);
+ Bytes.writeByteArray(out, table);
+ out.writeBoolean(family != null);
+ if (family != null) {
+ Bytes.writeByteArray(out, family);
+ }
+ out.writeBoolean(qualifier != null);
+ if (qualifier != null) {
+ Bytes.writeByteArray(out, qualifier);
+ }
+ }
+}
Index: src/main/java/org/apache/hadoop/hbase/security/access/AccessControlLists.java
===================================================================
--- src/main/java/org/apache/hadoop/hbase/security/access/AccessControlLists.java (revision 0)
+++ src/main/java/org/apache/hadoop/hbase/security/access/AccessControlLists.java (revision 0)
@@ -0,0 +1,514 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.security.access;
+
+import com.google.common.collect.ArrayListMultimap;
+import com.google.common.collect.ListMultimap;
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.HColumnDescriptor;
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.HTableDescriptor;
+import org.apache.hadoop.hbase.KeyValue;
+import org.apache.hadoop.hbase.catalog.MetaReader;
+import org.apache.hadoop.hbase.client.Delete;
+import org.apache.hadoop.hbase.client.Get;
+import org.apache.hadoop.hbase.client.HTable;
+import org.apache.hadoop.hbase.client.Put;
+import org.apache.hadoop.hbase.client.Result;
+import org.apache.hadoop.hbase.client.ResultScanner;
+import org.apache.hadoop.hbase.client.Scan;
+import org.apache.hadoop.hbase.io.HbaseObjectWritable;
+import org.apache.hadoop.hbase.io.hfile.Compression;
+import org.apache.hadoop.hbase.master.MasterServices;
+import org.apache.hadoop.hbase.regionserver.HRegion;
+import org.apache.hadoop.hbase.regionserver.InternalScanner;
+import org.apache.hadoop.hbase.regionserver.StoreFile;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.Pair;
+import org.apache.hadoop.io.Text;
+
+import java.io.ByteArrayOutputStream;
+import java.io.DataInput;
+import java.io.DataOutput;
+import java.io.DataOutputStream;
+import java.io.IOException;
+import java.util.*;
+
+/**
+ * Maintains lists of permission grants to users and groups to allow for
+ * authorization checks by {@link AccessController}.
+ *
+ *
+ * Access control lists are stored in an "internal" metadata table named
+ * {@code _acl_}. Each table's permission grants are stored as a separate row,
+ * keyed by the table name. KeyValues for permissions assignments are stored
+ * in one of the formats:
+ *
+ * Key Desc
+ * -------- --------
+ * user table level permissions for a user [R=read, W=write]
+ * @group table level permissions for a group
+ * user,family column family level permissions for a user
+ * @group,family column family level permissions for a group
+ * user,family,qualifier column qualifier level permissions for a user
+ * @group,family,qualifier column qualifier level permissions for a group
+ *
+ * All values are encoded as byte arrays containing the codes from the
+ * {@link org.apache.hadoop.hbase.security.access.TablePermission.Action} enum.
+ *
+ */
+public class AccessControlLists {
+ /** Internal storage table for access control lists */
+ public static final String ACL_TABLE_NAME_STR = "_acl_";
+ public static final byte[] ACL_TABLE_NAME = Bytes.toBytes(ACL_TABLE_NAME_STR);
+ /** Column family used to store ACL grants */
+ public static final String ACL_LIST_FAMILY_STR = "l";
+ public static final byte[] ACL_LIST_FAMILY = Bytes.toBytes(ACL_LIST_FAMILY_STR);
+
+ /** Table descriptor for ACL internal table */
+ public static final HTableDescriptor ACL_TABLEDESC = new HTableDescriptor(
+ ACL_TABLE_NAME);
+ static {
+ ACL_TABLEDESC.addFamily(
+ new HColumnDescriptor(ACL_LIST_FAMILY,
+ 10, // Ten is arbitrary number. Keep versions to help debugging.
+ Compression.Algorithm.NONE.getName(), true, true, 8 * 1024,
+ HConstants.FOREVER, StoreFile.BloomType.NONE.toString(),
+ HConstants.REPLICATION_SCOPE_LOCAL));
+ }
+
+ /**
+ * Delimiter to separate user, column family, and qualifier in
+ * _acl_ table info: column keys */
+ public static final char ACL_KEY_DELIMITER = ',';
+ /** Prefix character to denote group names */
+ public static final String GROUP_PREFIX = "@";
+ /** Configuration key for superusers */
+ public static final String SUPERUSER_CONF_KEY = "hbase.superuser";
+
+ private static Log LOG = LogFactory.getLog(AccessControlLists.class);
+
+ /**
+ * Check for existence of {@code _acl_} table and create it if it does not exist
+ * @param master reference to HMaster
+ */
+ static void init(MasterServices master) throws IOException {
+ if (!MetaReader.tableExists(master.getCatalogTracker(), ACL_TABLE_NAME_STR)) {
+ master.createTable(ACL_TABLEDESC, null);
+ }
+ }
+
+ /**
+ * Stores a new table permission grant in the access control lists table.
+ * @param conf the configuration
+ * @param tableName the table to which access is being granted
+ * @param username the user or group being granted the permission
+ * @param perm the details of the permission being granted
+ * @throws IOException in the case of an error accessing the metadata table
+ */
+ static void addTablePermission(Configuration conf,
+ byte[] tableName, String username, TablePermission perm)
+ throws IOException {
+
+ Put p = new Put(tableName);
+ byte[] key = Bytes.toBytes(username);
+ if (perm.getFamily() != null && perm.getFamily().length > 0) {
+ key = Bytes.add(key,
+ Bytes.add(new byte[]{ACL_KEY_DELIMITER}, perm.getFamily()));
+ if (perm.getQualifier() != null && perm.getQualifier().length > 0) {
+ key = Bytes.add(key,
+ Bytes.add(new byte[]{ACL_KEY_DELIMITER}, perm.getQualifier()));
+ }
+ }
+
+ TablePermission.Action[] actions = perm.getActions();
+ if ((actions == null) || (actions.length == 0)) {
+ LOG.warn("No actions associated with user '"+username+"'");
+ return;
+ }
+
+ byte[] value = new byte[actions.length];
+ for (int i = 0; i < actions.length; i++) {
+ value[i] = actions[i].code();
+ }
+ p.add(ACL_LIST_FAMILY, key, value);
+ if (LOG.isDebugEnabled()) {
+ LOG.debug("Writing permission for table "+
+ Bytes.toString(tableName)+" "+
+ Bytes.toString(key)+": "+Bytes.toStringBinary(value)
+ );
+ }
+ HTable acls = null;
+ try {
+ acls = new HTable(conf, ACL_TABLE_NAME);
+ acls.put(p);
+ } finally {
+ if (acls != null) acls.close();
+ }
+ }
+
+ /**
+ * Removes a previously granted permission from the stored access control
+ * lists. The {@link TablePermission} being removed must exactly match what
+ * is stored -- no wildcard matching is attempted. Ie, if user "bob" has
+ * been granted "READ" access to the "data" table, but only to column family
+ * plus qualifier "info:colA", then trying to call this method with only
+ * user "bob" and the table name "data" (but without specifying the
+ * column qualifier "info:colA") will have no effect.
+ *
+ * @param conf the configuration
+ * @param tableName the table of the current permission grant
+ * @param userName the user or group currently granted the permission
+ * @param perm the details of the permission to be revoked
+ * @throws IOException if there is an error accessing the metadata table
+ */
+ static void removeTablePermission(Configuration conf,
+ byte[] tableName, String userName, TablePermission perm)
+ throws IOException {
+
+ Delete d = new Delete(tableName);
+ byte[] key = null;
+ if (perm.getFamily() != null && perm.getFamily().length > 0) {
+ key = Bytes.toBytes(userName + ACL_KEY_DELIMITER +
+ Bytes.toString(perm.getFamily()));
+ if (perm.getQualifier() != null && perm.getQualifier().length > 0) {
+ key = Bytes.toBytes(userName + ACL_KEY_DELIMITER +
+ Bytes.toString(perm.getFamily()) + ACL_KEY_DELIMITER +
+ Bytes.toString(perm.getQualifier()));
+ } else {
+ key = Bytes.toBytes(userName + ACL_KEY_DELIMITER +
+ Bytes.toString(perm.getFamily()));
+ }
+ } else {
+ key = Bytes.toBytes(userName);
+ }
+ if (LOG.isDebugEnabled()) {
+ LOG.debug("Removing permission for user '" + userName+ "': "+
+ perm.toString());
+ }
+ d.deleteColumns(ACL_LIST_FAMILY, key);
+ HTable acls = null;
+ try {
+ acls = new HTable(conf, ACL_TABLE_NAME);
+ acls.delete(d);
+ } finally {
+ if (acls != null) acls.close();
+ }
+ }
+
+ /**
+ * Returns {@code true} if the given region is part of the {@code _acl_}
+ * metadata table.
+ */
+ static boolean isAclRegion(HRegion region) {
+ return Bytes.equals(ACL_TABLE_NAME, region.getTableDesc().getName());
+ }
+
+ /**
+ * Loads all of the permission grants stored in a region of the {@code _acl_}
+ * table.
+ *
+ * @param aclRegion
+ * @return
+ * @throws IOException
+ */
+ static Map> loadAll(
+ HRegion aclRegion)
+ throws IOException {
+
+ if (!isAclRegion(aclRegion)) {
+ throw new IOException("Can only load permissions from "+ACL_TABLE_NAME_STR);
+ }
+
+ Map> allPerms =
+ new TreeMap>(Bytes.BYTES_COMPARATOR);
+
+ // do a full scan of _acl_ table
+
+ Scan scan = new Scan();
+ scan.addFamily(ACL_LIST_FAMILY);
+
+ InternalScanner iScanner = null;
+ try {
+ iScanner = aclRegion.getScanner(scan);
+
+ while (true) {
+ List row = new ArrayList();
+
+ boolean hasNext = iScanner.next(row);
+ ListMultimap perms = ArrayListMultimap.create();
+ byte[] table = null;
+ for (KeyValue kv : row) {
+ if (table == null) {
+ table = kv.getRow();
+ }
+ Pair permissionsOfUserOnTable =
+ parseTablePermissionRecord(table, kv);
+ if (permissionsOfUserOnTable != null) {
+ String username = permissionsOfUserOnTable.getFirst();
+ TablePermission permissions = permissionsOfUserOnTable.getSecond();
+ perms.put(username, permissions);
+ }
+ }
+ if (table != null) {
+ allPerms.put(table, perms);
+ }
+ if (!hasNext) {
+ break;
+ }
+ }
+ } finally {
+ if (iScanner != null) {
+ iScanner.close();
+ }
+ }
+
+ return allPerms;
+ }
+
+ /**
+ * Load all permissions from the region server holding {@code _acl_},
+ * primarily intended for testing purposes.
+ */
+ static Map> loadAll(
+ Configuration conf) throws IOException {
+ Map> allPerms =
+ new TreeMap>(Bytes.BYTES_COMPARATOR);
+
+ // do a full scan of _acl_, filtering on only first table region rows
+
+ Scan scan = new Scan();
+ scan.addFamily(ACL_LIST_FAMILY);
+
+ HTable acls = null;
+ ResultScanner scanner = null;
+ try {
+ acls = new HTable(conf, ACL_TABLE_NAME);
+ scanner = acls.getScanner(scan);
+ for (Result row : scanner) {
+ ListMultimap resultPerms =
+ parseTablePermissions(row.getRow(), row);
+ allPerms.put(row.getRow(), resultPerms);
+ }
+ } finally {
+ if (scanner != null) scanner.close();
+ if (acls != null) acls.close();
+ }
+
+ return allPerms;
+ }
+
+ /**
+ * Reads user permission assignments stored in the l: column
+ * family of the first table row in _acl_.
+ *
+ *
+ * See {@link AccessControlLists class documentation} for the key structure
+ * used for storage.
+ *
+ */
+ static ListMultimap getTablePermissions(
+ Configuration conf, byte[] tableName)
+ throws IOException {
+ /* TODO: -ROOT- and .META. cannot easily be handled because they must be
+ * online before _acl_ table. Can anything be done here?
+ */
+ if (Bytes.equals(tableName, HConstants.ROOT_TABLE_NAME) ||
+ Bytes.equals(tableName, HConstants.META_TABLE_NAME) ||
+ Bytes.equals(tableName, AccessControlLists.ACL_TABLE_NAME)) {
+ return ArrayListMultimap.create(0,0);
+ }
+
+ // for normal user tables, we just read the table row from _acl_
+ ListMultimap perms = ArrayListMultimap.create();
+ HTable acls = null;
+ try {
+ acls = new HTable(conf, ACL_TABLE_NAME);
+ Get get = new Get(tableName);
+ get.addFamily(ACL_LIST_FAMILY);
+ Result row = acls.get(get);
+ if (!row.isEmpty()) {
+ perms = parseTablePermissions(tableName, row);
+ } else {
+ LOG.info("No permissions found in "+ACL_TABLE_NAME_STR+
+ " for table "+Bytes.toString(tableName));
+ }
+ } finally {
+ if (acls != null) acls.close();
+ }
+
+ return perms;
+ }
+
+ /**
+ * Returns the currently granted permissions for a given table as a list of
+ * user plus associated permissions.
+ */
+ static List getUserPermissions(
+ Configuration conf, byte[] tableName)
+ throws IOException {
+ ListMultimap allPerms = getTablePermissions(
+ conf, tableName);
+
+ List perms = new ArrayList();
+
+ for (Map.Entry entry : allPerms.entries()) {
+ UserPermission up = new UserPermission(Bytes.toBytes(entry.getKey()),
+ entry.getValue().getTable(), entry.getValue().getFamily(),
+ entry.getValue().getQualifier(), entry.getValue().getActions());
+ perms.add(up);
+ }
+ return perms;
+ }
+
+ private static ListMultimap parseTablePermissions(
+ byte[] table, Result result) {
+ ListMultimap perms = ArrayListMultimap.create();
+ if (result != null && result.size() > 0) {
+ for (KeyValue kv : result.raw()) {
+
+ Pair permissionsOfUserOnTable =
+ parseTablePermissionRecord(table, kv);
+
+ if (permissionsOfUserOnTable != null) {
+ String username = permissionsOfUserOnTable.getFirst();
+ TablePermission permissions = permissionsOfUserOnTable.getSecond();
+ perms.put(username, permissions);
+ }
+ }
+ }
+ return perms;
+ }
+
+ private static Pair parseTablePermissionRecord(
+ byte[] table, KeyValue kv) {
+ // return X given a set of permissions encoded in the permissionRecord kv.
+ byte[] family = kv.getFamily();
+
+ if (!Bytes.equals(family, ACL_LIST_FAMILY)) {
+ return null;
+ }
+
+ byte[] key = kv.getQualifier();
+ byte[] value = kv.getValue();
+ if (LOG.isDebugEnabled()) {
+ LOG.debug("Read acl: kv ["+
+ Bytes.toStringBinary(key)+": "+
+ Bytes.toStringBinary(value)+"]");
+ }
+
+ // check for a column family appended to the key
+ // TODO: avoid the string conversion to make this more efficient
+ String username = Bytes.toString(key);
+ int idx = username.indexOf(ACL_KEY_DELIMITER);
+ byte[] permFamily = null;
+ byte[] permQualifier = null;
+ if (idx > 0 && idx < username.length()-1) {
+ String remainder = username.substring(idx+1);
+ username = username.substring(0, idx);
+ idx = remainder.indexOf(ACL_KEY_DELIMITER);
+ if (idx > 0 && idx < remainder.length()-1) {
+ permFamily = Bytes.toBytes(remainder.substring(0, idx));
+ permQualifier = Bytes.toBytes(remainder.substring(idx+1));
+ } else {
+ permFamily = Bytes.toBytes(remainder);
+ }
+ }
+
+ return new Pair(
+ username, new TablePermission(table, permFamily, permQualifier, value));
+ }
+
+ /**
+ * Writes a set of permissions as {@link org.apache.hadoop.io.Writable} instances
+ * to the given output stream.
+ * @param out
+ * @param perms
+ * @param conf
+ * @throws IOException
+ */
+ public static void writePermissions(DataOutput out,
+ ListMultimap perms, Configuration conf)
+ throws IOException {
+ Set keys = perms.keySet();
+ out.writeInt(keys.size());
+ for (String key : keys) {
+ Text.writeString(out, key);
+ HbaseObjectWritable.writeObject(out, perms.get(key), List.class, conf);
+ }
+ }
+
+ /**
+ * Writes a set of permissions as {@link org.apache.hadoop.io.Writable} instances
+ * and returns the resulting byte array.
+ */
+ public static byte[] writePermissionsAsBytes(
+ ListMultimap perms, Configuration conf) {
+ try {
+ ByteArrayOutputStream bos = new ByteArrayOutputStream();
+ writePermissions(new DataOutputStream(bos), perms, conf);
+ return bos.toByteArray();
+ } catch (IOException ioe) {
+ // shouldn't happen here
+ LOG.error("Error serializing permissions", ioe);
+ }
+ return null;
+ }
+
+ /**
+ * Reads a set of permissions as {@link org.apache.hadoop.io.Writable} instances
+ * from the input stream.
+ */
+ public static ListMultimap readPermissions(
+ DataInput in, Configuration conf) throws IOException {
+ ListMultimap perms = ArrayListMultimap.create();
+ int length = in.readInt();
+ for (int i=0; i userPerms =
+ (List)HbaseObjectWritable.readObject(in, conf);
+ perms.putAll(user, userPerms);
+ }
+
+ return perms;
+ }
+
+ /**
+ * Returns whether or not the given name should be interpreted as a group
+ * principal. Currently this simply checks if the name starts with the
+ * special group prefix character ("@").
+ */
+ public static boolean isGroupPrincipal(String name) {
+ return name != null && name.startsWith(GROUP_PREFIX);
+ }
+
+ /**
+ * Returns the actual name for a group principal (stripped of the
+ * group prefix).
+ */
+ public static String getGroupName(String aclKey) {
+ if (!isGroupPrincipal(aclKey)) {
+ return aclKey;
+ }
+
+ return aclKey.substring(GROUP_PREFIX.length());
+ }
+}
Index: src/main/java/org/apache/hadoop/hbase/security/access/TableAuthManager.java
===================================================================
--- src/main/java/org/apache/hadoop/hbase/security/access/TableAuthManager.java (revision 0)
+++ src/main/java/org/apache/hadoop/hbase/security/access/TableAuthManager.java (revision 0)
@@ -0,0 +1,482 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.security.access;
+
+import com.google.common.collect.ArrayListMultimap;
+import com.google.common.collect.ListMultimap;
+import com.google.common.collect.Lists;
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.KeyValue;
+import org.apache.hadoop.hbase.security.User;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
+import org.apache.zookeeper.KeeperException;
+
+import java.io.*;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.concurrent.ConcurrentSkipListMap;
+
+/**
+ * Performs authorization checks for a given user's assigned permissions
+ */
+public class TableAuthManager {
+ /** Key for the user and group cache maps for globally assigned permissions */
+ private static final String GLOBAL_CACHE_KEY = ".access.";
+ private static Log LOG = LogFactory.getLog(TableAuthManager.class);
+
+ private static TableAuthManager instance;
+
+ /** Cache of global user permissions */
+ private ListMultimap USER_CACHE = ArrayListMultimap.create();
+ /** Cache of global group permissions */
+ private ListMultimap GROUP_CACHE = ArrayListMultimap.create();
+
+ private ConcurrentSkipListMap> TABLE_USER_CACHE =
+ new ConcurrentSkipListMap>(Bytes.BYTES_COMPARATOR);
+
+ private ConcurrentSkipListMap> TABLE_GROUP_CACHE =
+ new ConcurrentSkipListMap>(Bytes.BYTES_COMPARATOR);
+
+ private Configuration conf;
+ private ZKPermissionWatcher zkperms;
+
+ private TableAuthManager(ZooKeeperWatcher watcher, Configuration conf)
+ throws IOException {
+ this.conf = conf;
+ this.zkperms = new ZKPermissionWatcher(watcher, this, conf);
+ try {
+ this.zkperms.start();
+ } catch (KeeperException ke) {
+ LOG.error("ZooKeeper initialization failed", ke);
+ }
+
+ // initialize global permissions based on configuration
+ initGlobal(conf);
+ }
+
+ private void initGlobal(Configuration conf) throws IOException {
+ User user = User.getCurrent();
+ if (user == null) {
+ throw new IOException("Unable to obtain the current user, " +
+ "authorization checks for internal operations will not work correctly!");
+ }
+ String currentUser = user.getShortName();
+
+ // the system user is always included
+ List superusers = Lists.asList(currentUser, conf.getStrings(
+ AccessControlLists.SUPERUSER_CONF_KEY, new String[0]));
+ if (superusers != null) {
+ for (String name : superusers) {
+ if (AccessControlLists.isGroupPrincipal(name)) {
+ GROUP_CACHE.put(AccessControlLists.getGroupName(name),
+ new Permission(Permission.Action.values()));
+ } else {
+ USER_CACHE.put(name, new Permission(Permission.Action.values()));
+ }
+ }
+ }
+ }
+
+ public ZKPermissionWatcher getZKPermissionWatcher() {
+ return this.zkperms;
+ }
+
+ public void refreshCacheFromWritable(byte[] table, byte[] data) throws IOException {
+ if (data != null && data.length > 0) {
+ DataInput in = new DataInputStream( new ByteArrayInputStream(data) );
+ ListMultimap perms = AccessControlLists.readPermissions(in, conf);
+ cache(table, perms);
+ } else {
+ LOG.debug("Skipping permission cache refresh because writable data is empty");
+ }
+ }
+
+ /**
+ * Updates the internal permissions cache for a single table, splitting
+ * the permissions listed into separate caches for users and groups to optimize
+ * group lookups.
+ *
+ * @param table
+ * @param tablePerms
+ */
+ private void cache(byte[] table,
+ ListMultimap tablePerms) {
+ // split user from group assignments so we don't have to prepend the group
+ // prefix every time we query for groups
+ ListMultimap userPerms = ArrayListMultimap.create();
+ ListMultimap groupPerms = ArrayListMultimap.create();
+
+ if (tablePerms != null) {
+ for (Map.Entry entry : tablePerms.entries()) {
+ if (AccessControlLists.isGroupPrincipal(entry.getKey())) {
+ groupPerms.put(
+ entry.getKey().substring(AccessControlLists.GROUP_PREFIX.length()),
+ entry.getValue());
+ } else {
+ userPerms.put(entry.getKey(), entry.getValue());
+ }
+ }
+ TABLE_GROUP_CACHE.put(table, groupPerms);
+ TABLE_USER_CACHE.put(table, userPerms);
+ }
+ }
+
+ private List getUserPermissions(String username, byte[] table) {
+ ListMultimap tablePerms = TABLE_USER_CACHE.get(table);
+ if (tablePerms != null) {
+ return tablePerms.get(username);
+ }
+
+ return null;
+ }
+
+ private List getGroupPermissions(String groupName, byte[] table) {
+ ListMultimap tablePerms = TABLE_GROUP_CACHE.get(table);
+ if (tablePerms != null) {
+ return tablePerms.get(groupName);
+ }
+
+ return null;
+ }
+
+ /**
+ * Authorizes a global permission
+ * @param perms
+ * @param action
+ * @return
+ */
+ private boolean authorize(List perms, Permission.Action action) {
+ if (perms != null) {
+ for (Permission p : perms) {
+ if (p.implies(action)) {
+ return true;
+ }
+ }
+ } else if (LOG.isDebugEnabled()) {
+ LOG.debug("No permissions found");
+ }
+
+ return false;
+ }
+
+ /**
+ * Authorize a global permission based on ACLs for the given user and the
+ * user's groups.
+ * @param user
+ * @param action
+ * @return
+ */
+ public boolean authorize(User user, Permission.Action action) {
+ if (user == null) {
+ return false;
+ }
+
+ if (authorize(USER_CACHE.get(user.getShortName()), action)) {
+ return true;
+ }
+
+ String[] groups = user.getGroupNames();
+ if (groups != null) {
+ for (String group : groups) {
+ if (authorize(GROUP_CACHE.get(group), action)) {
+ return true;
+ }
+ }
+ }
+ return false;
+ }
+
+ private boolean authorize(List perms, byte[] table, byte[] family,
+ Permission.Action action) {
+ return authorize(perms, table, family, null, action);
+ }
+
+ private boolean authorize(List perms, byte[] table, byte[] family,
+ byte[] qualifier, Permission.Action action) {
+ if (perms != null) {
+ for (TablePermission p : perms) {
+ if (p.implies(table, family, qualifier, action)) {
+ return true;
+ }
+ }
+ } else if (LOG.isDebugEnabled()) {
+ LOG.debug("No permissions found for table="+Bytes.toStringBinary(table));
+ }
+ return false;
+ }
+
+ public boolean authorize(User user, byte[] table, KeyValue kv,
+ TablePermission.Action action) {
+ List userPerms = getUserPermissions(
+ user.getShortName(), table);
+ if (authorize(userPerms, table, kv, action)) {
+ return true;
+ }
+
+ String[] groupNames = user.getGroupNames();
+ if (groupNames != null) {
+ for (String group : groupNames) {
+ List groupPerms = getGroupPermissions(group, table);
+ if (authorize(groupPerms, table, kv, action)) {
+ return true;
+ }
+ }
+ }
+
+ return false;
+ }
+
+ private boolean authorize(List perms, byte[] table, KeyValue kv,
+ TablePermission.Action action) {
+ if (perms != null) {
+ for (TablePermission p : perms) {
+ if (p.implies(table, kv, action)) {
+ return true;
+ }
+ }
+ } else if (LOG.isDebugEnabled()) {
+ LOG.debug("No permissions for authorize() check, table=" +
+ Bytes.toStringBinary(table));
+ }
+
+ return false;
+ }
+
+ /**
+ * Checks global authorization for a specific action for a user, based on the
+ * stored user permissions.
+ */
+ public boolean authorizeUser(String username, Permission.Action action) {
+ return authorize(USER_CACHE.get(username), action);
+ }
+
+ /**
+ * Checks authorization to a given table and column family for a user, based on the
+ * stored user permissions.
+ *
+ * @param username
+ * @param table
+ * @param family
+ * @param action
+ * @return
+ */
+ public boolean authorizeUser(String username, byte[] table, byte[] family,
+ Permission.Action action) {
+ return authorizeUser(username, table, family, null, action);
+ }
+
+ public boolean authorizeUser(String username, byte[] table, byte[] family,
+ byte[] qualifier, Permission.Action action) {
+ // global authorization supercedes table level
+ if (authorizeUser(username, action)) {
+ return true;
+ }
+ return authorize(getUserPermissions(username, table), table, family,
+ qualifier, action);
+ }
+
+
+ /**
+ * Checks authorization for a given action for a group, based on the stored
+ * permissions.
+ */
+ public boolean authorizeGroup(String groupName, Permission.Action action) {
+ return authorize(GROUP_CACHE.get(groupName), action);
+ }
+
+ /**
+ * Checks authorization to a given table and column family for a group, based
+ * on the stored permissions.
+ * @param groupName
+ * @param table
+ * @param family
+ * @param action
+ * @return
+ */
+ public boolean authorizeGroup(String groupName, byte[] table, byte[] family,
+ Permission.Action action) {
+ // global authorization supercedes table level
+ if (authorizeGroup(groupName, action)) {
+ return true;
+ }
+ return authorize(getGroupPermissions(groupName, table), table, family, action);
+ }
+
+ public boolean authorize(User user, byte[] table, byte[] family,
+ byte[] qualifier, Permission.Action action) {
+ if (authorizeUser(user.getShortName(), table, family, qualifier, action)) {
+ return true;
+ }
+
+ String[] groups = user.getGroupNames();
+ if (groups != null) {
+ for (String group : groups) {
+ if (authorizeGroup(group, table, family, action)) {
+ return true;
+ }
+ }
+ }
+ return false;
+ }
+
+ public boolean authorize(User user, byte[] table, byte[] family,
+ Permission.Action action) {
+ return authorize(user, table, family, null, action);
+ }
+
+ /**
+ * Returns true if the given user has a {@link TablePermission} matching up
+ * to the column family portion of a permission. Note that this permission
+ * may be scoped to a given column qualifier and does not guarantee that
+ * authorize() on the same column family would return true.
+ */
+ public boolean matchPermission(User user,
+ byte[] table, byte[] family, TablePermission.Action action) {
+ List userPerms = getUserPermissions(
+ user.getShortName(), table);
+ if (userPerms != null) {
+ for (TablePermission p : userPerms) {
+ if (p.matchesFamily(table, family, action)) {
+ return true;
+ }
+ }
+ }
+
+ String[] groups = user.getGroupNames();
+ if (groups != null) {
+ for (String group : groups) {
+ List groupPerms = getGroupPermissions(group, table);
+ if (groupPerms != null) {
+ for (TablePermission p : groupPerms) {
+ if (p.matchesFamily(table, family, action)) {
+ return true;
+ }
+ }
+ }
+ }
+ }
+
+ return false;
+ }
+
+ public boolean matchPermission(User user,
+ byte[] table, byte[] family, byte[] qualifier,
+ TablePermission.Action action) {
+ List userPerms = getUserPermissions(
+ user.getShortName(), table);
+ if (userPerms != null) {
+ for (TablePermission p : userPerms) {
+ if (p.matchesFamilyQualifier(table, family, qualifier, action)) {
+ return true;
+ }
+ }
+ }
+
+ String[] groups = user.getGroupNames();
+ if (groups != null) {
+ for (String group : groups) {
+ List groupPerms = getGroupPermissions(group, table);
+ if (groupPerms != null) {
+ for (TablePermission p : groupPerms) {
+ if (p.matchesFamilyQualifier(table, family, qualifier, action)) {
+ return true;
+ }
+ }
+ }
+ }
+ }
+
+ return false;
+ }
+
+ public void remove(byte[] table) {
+ TABLE_USER_CACHE.remove(table);
+ TABLE_GROUP_CACHE.remove(table);
+ }
+
+ /**
+ * Overwrites the existing permission set for a given user for a table, and
+ * triggers an update for zookeeper synchronization.
+ * @param username
+ * @param table
+ * @param perms
+ */
+ public void setUserPermissions(String username, byte[] table,
+ List perms) {
+ ListMultimap tablePerms = TABLE_USER_CACHE.get(table);
+ if (tablePerms == null) {
+ tablePerms = ArrayListMultimap.create();
+ TABLE_USER_CACHE.put(table, tablePerms);
+ }
+ tablePerms.replaceValues(username, perms);
+ writeToZooKeeper(table, tablePerms, TABLE_GROUP_CACHE.get(table));
+ }
+
+ /**
+ * Overwrites the existing permission set for a group and triggers an update
+ * for zookeeper synchronization.
+ * @param group
+ * @param table
+ * @param perms
+ */
+ public void setGroupPermissions(String group, byte[] table,
+ List perms) {
+ ListMultimap tablePerms = TABLE_GROUP_CACHE.get(table);
+ if (tablePerms == null) {
+ tablePerms = ArrayListMultimap.create();
+ TABLE_GROUP_CACHE.put(table, tablePerms);
+ }
+ tablePerms.replaceValues(group, perms);
+ writeToZooKeeper(table, TABLE_USER_CACHE.get(table), tablePerms);
+ }
+
+ public void writeToZooKeeper(byte[] table,
+ ListMultimap userPerms,
+ ListMultimap groupPerms) {
+ ListMultimap tmp = ArrayListMultimap.create();
+ if (userPerms != null) {
+ tmp.putAll(userPerms);
+ }
+ if (groupPerms != null) {
+ for (String group : groupPerms.keySet()) {
+ tmp.putAll(AccessControlLists.GROUP_PREFIX + group,
+ groupPerms.get(group));
+ }
+ }
+ byte[] serialized = AccessControlLists.writePermissionsAsBytes(tmp, conf);
+ zkperms.writeToZookeeper(Bytes.toString(table), serialized);
+ }
+
+ static Map managerMap =
+ new HashMap();
+
+ public synchronized static TableAuthManager get(
+ ZooKeeperWatcher watcher, Configuration conf) throws IOException {
+ instance = managerMap.get(watcher);
+ if (instance == null) {
+ instance = new TableAuthManager(watcher, conf);
+ managerMap.put(watcher, instance);
+ }
+ return instance;
+ }
+}
Index: src/main/java/org/apache/hadoop/hbase/security/access/AccessController.java
===================================================================
--- src/main/java/org/apache/hadoop/hbase/security/access/AccessController.java (revision 0)
+++ src/main/java/org/apache/hadoop/hbase/security/access/AccessController.java (revision 0)
@@ -0,0 +1,1132 @@
+/*
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.security.access;
+
+import java.io.IOException;
+import java.util.Arrays;
+import java.util.Collection;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.hbase.CoprocessorEnvironment;
+import org.apache.hadoop.hbase.HColumnDescriptor;
+import org.apache.hadoop.hbase.HRegionInfo;
+import org.apache.hadoop.hbase.HTableDescriptor;
+import org.apache.hadoop.hbase.KeyValue;
+import org.apache.hadoop.hbase.ServerName;
+import org.apache.hadoop.hbase.client.Delete;
+import org.apache.hadoop.hbase.client.Get;
+import org.apache.hadoop.hbase.client.Increment;
+import org.apache.hadoop.hbase.client.Put;
+import org.apache.hadoop.hbase.client.Result;
+import org.apache.hadoop.hbase.client.Scan;
+import org.apache.hadoop.hbase.coprocessor.BaseRegionObserver;
+import org.apache.hadoop.hbase.coprocessor.CoprocessorException;
+import org.apache.hadoop.hbase.coprocessor.MasterCoprocessorEnvironment;
+import org.apache.hadoop.hbase.coprocessor.MasterObserver;
+import org.apache.hadoop.hbase.coprocessor.ObserverContext;
+import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment;
+import org.apache.hadoop.hbase.filter.CompareFilter;
+import org.apache.hadoop.hbase.filter.FilterList;
+import org.apache.hadoop.hbase.filter.WritableByteArrayComparable;
+import org.apache.hadoop.hbase.ipc.HBaseRPC;
+import org.apache.hadoop.hbase.ipc.ProtocolSignature;
+import org.apache.hadoop.hbase.ipc.RequestContext;
+import org.apache.hadoop.hbase.regionserver.HRegion;
+import org.apache.hadoop.hbase.regionserver.InternalScanner;
+import org.apache.hadoop.hbase.regionserver.RegionScanner;
+import org.apache.hadoop.hbase.regionserver.wal.WALEdit;
+import org.apache.hadoop.hbase.security.AccessDeniedException;
+import org.apache.hadoop.hbase.security.User;
+import org.apache.hadoop.hbase.util.Bytes;
+
+import com.google.common.collect.ListMultimap;
+import com.google.common.collect.Lists;
+import com.google.common.collect.MapMaker;
+import com.google.common.collect.Maps;
+import com.google.common.collect.Sets;
+
+/**
+ * Provides basic authorization checks for data access and administrative
+ * operations.
+ *
+ *
+ * {@code AccessController} performs authorization checks for HBase operations
+ * based on:
+ *
+ *
the identity of the user performing the operation
+ *
the scope over which the operation is performed, in increasing
+ * specificity: global, table, column family, or qualifier
+ *
the type of action being performed (as mapped to
+ * {@link Permission.Action} values)
+ *
+ * If the authorization check fails, an {@link AccessDeniedException}
+ * will be thrown for the operation.
+ *
+ *
+ *
+ * To perform authorization checks, {@code AccessController} relies on the
+ * {@link org.apache.hadoop.hbase.ipc.SecureRpcEngine} being loaded to provide
+ * the user identities for remote requests.
+ *
+ *
+ *
+ * The access control lists used for authorization can be manipulated via the
+ * exposed {@link AccessControllerProtocol} implementation, and the associated
+ * {@code grant}, {@code revoke}, and {@code user_permission} HBase shell
+ * commands.
+ *
+ */
+public class AccessController extends BaseRegionObserver
+ implements MasterObserver, AccessControllerProtocol {
+ /**
+ * Represents the result of an authorization check for logging and error
+ * reporting.
+ */
+ private static class AuthResult {
+ private final boolean allowed;
+ private final byte[] table;
+ private final byte[] family;
+ private final byte[] qualifier;
+ private final Permission.Action action;
+ private final String reason;
+ private final User user;
+
+ public AuthResult(boolean allowed, String reason, User user,
+ Permission.Action action, byte[] table, byte[] family, byte[] qualifier) {
+ this.allowed = allowed;
+ this.reason = reason;
+ this.user = user;
+ this.table = table;
+ this.family = family;
+ this.qualifier = qualifier;
+ this.action = action;
+ }
+
+ public boolean isAllowed() { return allowed; }
+
+ public User getUser() { return user; }
+
+ public String getReason() { return reason; }
+
+ public String toContextString() {
+ return "(user=" + (user != null ? user.getName() : "UNKNOWN") + ", " +
+ "scope=" + (table == null ? "GLOBAL" : Bytes.toString(table)) + ", " +
+ "family=" + (family != null ? Bytes.toString(family) : "") + ", " +
+ "qualifer=" + (qualifier != null ? Bytes.toString(qualifier) : "") + ", " +
+ "action=" + (action != null ? action.toString() : "") + ")";
+ }
+
+ public String toString() {
+ return new StringBuilder("AuthResult")
+ .append(toContextString()).toString();
+ }
+
+ public static AuthResult allow(String reason, User user,
+ Permission.Action action, byte[] table) {
+ return new AuthResult(true, reason, user, action, table, null, null);
+ }
+
+ public static AuthResult deny(String reason, User user,
+ Permission.Action action, byte[] table) {
+ return new AuthResult(false, reason, user, action, table, null, null);
+ }
+
+ public static AuthResult deny(String reason, User user,
+ Permission.Action action, byte[] table, byte[] family, byte[] qualifier) {
+ return new AuthResult(false, reason, user, action, table, family, qualifier);
+ }
+ }
+
+ public static final Log LOG = LogFactory.getLog(AccessController.class);
+
+ private static final Log AUDITLOG =
+ LogFactory.getLog("SecurityLogger."+AccessController.class.getName());
+
+ /**
+ * Version number for AccessControllerProtocol
+ */
+ private static final long PROTOCOL_VERSION = 1L;
+
+ TableAuthManager authManager = null;
+
+ // flags if we are running on a region of the _acl_ table
+ boolean aclRegion = false;
+
+ // defined only for Endpoint implementation, so it can have way to
+ // access region services.
+ private RegionCoprocessorEnvironment regionEnv;
+
+ /** Mapping of scanner instances to the user who created them */
+ private Map scannerOwners =
+ new MapMaker().weakKeys().makeMap();
+
+ void initialize(RegionCoprocessorEnvironment e) throws IOException {
+ final HRegion region = e.getRegion();
+
+ Map> tables =
+ AccessControlLists.loadAll(region);
+ // For each table, write out the table's permissions to the respective
+ // znode for that table.
+ for (Map.Entry> t:
+ tables.entrySet()) {
+ byte[] table = t.getKey();
+ String tableName = Bytes.toString(table);
+ ListMultimap perms = t.getValue();
+ byte[] serialized = AccessControlLists.writePermissionsAsBytes(perms,
+ regionEnv.getConfiguration());
+ this.authManager.getZKPermissionWatcher().writeToZookeeper(tableName,
+ serialized);
+ }
+ }
+
+ /**
+ * Writes all table ACLs for the tables in the given Map up into ZooKeeper
+ * znodes. This is called to synchronize ACL changes following {@code _acl_}
+ * table updates.
+ */
+ void updateACL(RegionCoprocessorEnvironment e,
+ final Map> familyMap) {
+ Set tableSet = new HashSet();
+ for (Map.Entry> f : familyMap.entrySet()) {
+ List kvs = f.getValue();
+ for (KeyValue kv: kvs) {
+ if (Bytes.compareTo(kv.getBuffer(), kv.getFamilyOffset(),
+ kv.getFamilyLength(), AccessControlLists.ACL_LIST_FAMILY, 0,
+ AccessControlLists.ACL_LIST_FAMILY.length) == 0) {
+ String tableName = Bytes.toString(kv.getRow());
+ tableSet.add(tableName);
+ }
+ }
+ }
+
+ for (String tableName: tableSet) {
+ try {
+ ListMultimap perms =
+ AccessControlLists.getTablePermissions(regionEnv.getConfiguration(),
+ Bytes.toBytes(tableName));
+ byte[] serialized = AccessControlLists.writePermissionsAsBytes(
+ perms, regionEnv.getConfiguration());
+ this.authManager.getZKPermissionWatcher().writeToZookeeper(tableName,
+ serialized);
+ } catch (IOException ex) {
+ LOG.error("Failed updating permissions mirror for '" + tableName +
+ "'", ex);
+ }
+ }
+ }
+
+ /**
+ * Check the current user for authorization to perform a specific action
+ * against the given set of row data.
+ *
+ *
Note: Ordering of the authorization checks
+ * has been carefully optimized to short-circuit the most common requests
+ * and minimize the amount of processing required.
+ *
+ * @param permRequest the action being requested
+ * @param e the coprocessor environment
+ * @param families the map of column families to qualifiers present in
+ * the request
+ * @return
+ */
+ AuthResult permissionGranted(User user, TablePermission.Action permRequest,
+ RegionCoprocessorEnvironment e,
+ Map> families) {
+ HRegionInfo hri = e.getRegion().getRegionInfo();
+ HTableDescriptor htd = e.getRegion().getTableDesc();
+ byte[] tableName = hri.getTableName();
+
+ // 1. All users need read access to .META. and -ROOT- tables.
+ // this is a very common operation, so deal with it quickly.
+ if ((hri.isRootRegion() || hri.isMetaRegion()) &&
+ (permRequest == TablePermission.Action.READ)) {
+ return AuthResult.allow("All users allowed", user, permRequest,
+ hri.getTableName());
+ }
+
+ if (user == null) {
+ return AuthResult.deny("No user associated with request!", null,
+ permRequest, hri.getTableName());
+ }
+
+ // 2. The table owner has full privileges
+ String owner = htd.getOwnerString();
+ if (user.getShortName().equals(owner)) {
+ // owner of the table has full access
+ return AuthResult.allow("User is table owner", user, permRequest,
+ hri.getTableName());
+ }
+
+ // 3. check for the table-level, if successful we can short-circuit
+ if (authManager.authorize(user, tableName, (byte[])null, permRequest)) {
+ return AuthResult.allow("Table permission granted", user,
+ permRequest, tableName);
+ }
+
+ // 4. check permissions against the requested families
+ if (families != null && families.size() > 0) {
+ // all families must pass
+ for (Map.Entry> family : families.entrySet()) {
+ // a) check for family level access
+ if (authManager.authorize(user, tableName, family.getKey(),
+ permRequest)) {
+ continue; // family-level permission overrides per-qualifier
+ }
+
+ // b) qualifier level access can still succeed
+ if ((family.getValue() != null) && (family.getValue().size() > 0)) {
+ if (family.getValue() instanceof Set) {
+ // for each qualifier of the family
+ Set familySet = (Set)family.getValue();
+ for (byte[] qualifier : familySet) {
+ if (!authManager.authorize(user, tableName, family.getKey(),
+ qualifier, permRequest)) {
+ return AuthResult.deny("Failed qualifier check", user,
+ permRequest, tableName, family.getKey(), qualifier);
+ }
+ }
+ } else if (family.getValue() instanceof List) { // List
+ List kvList = (List)family.getValue();
+ for (KeyValue kv : kvList) {
+ if (!authManager.authorize(user, tableName, family.getKey(),
+ kv.getQualifier(), permRequest)) {
+ return AuthResult.deny("Failed qualifier check", user,
+ permRequest, tableName, family.getKey(), kv.getQualifier());
+ }
+ }
+ }
+ } else {
+ // no qualifiers and family-level check already failed
+ return AuthResult.deny("Failed family check", user, permRequest,
+ tableName, family.getKey(), null);
+ }
+ }
+
+ // all family checks passed
+ return AuthResult.allow("All family checks passed", user, permRequest,
+ tableName);
+ }
+
+ // 5. no families to check and table level access failed
+ return AuthResult.deny("No families to check and table permission failed",
+ user, permRequest, tableName);
+ }
+
+ private void logResult(AuthResult result) {
+ if (AUDITLOG.isTraceEnabled()) {
+ AUDITLOG.trace("Access " + (result.isAllowed() ? "allowed" : "denied") +
+ " for user " + (result.getUser() != null ? result.getUser().getShortName() : "UNKNOWN") +
+ "; reason: " + result.getReason() +
+ "; context: " + result.toContextString());
+ }
+ }
+
+ /**
+ * Returns the active user to which authorization checks should be applied.
+ * If we are in the context of an RPC call, the remote user is used,
+ * otherwise the currently logged in user is used.
+ */
+ private User getActiveUser() throws IOException {
+ User user = RequestContext.getRequestUser();
+ if (!RequestContext.isInRequestContext()) {
+ // for non-rpc handling, fallback to system user
+ user = User.getCurrent();
+ }
+ return user;
+ }
+
+ /**
+ * Authorizes that the current user has global privileges for the given action.
+ * @param perm The action being requested
+ * @throws IOException if obtaining the current user fails
+ * @throws AccessDeniedException if authorization is denied
+ */
+ private void requirePermission(Permission.Action perm) throws IOException {
+ User user = getActiveUser();
+ if (authManager.authorize(user, perm)) {
+ logResult(AuthResult.allow("Global check allowed", user, perm, null));
+ } else {
+ logResult(AuthResult.deny("Global check failed", user, perm, null));
+ throw new AccessDeniedException("Insufficient permissions for user '" +
+ (user != null ? user.getShortName() : "null") +"' (global, action=" +
+ perm.toString() + ")");
+ }
+ }
+
+ /**
+ * Authorizes that the current user has permission to perform the given
+ * action on the set of table column families.
+ * @param perm Action that is required
+ * @param env The current coprocessor environment
+ * @param families The set of column families present/required in the request
+ * @throws AccessDeniedException if the authorization check failed
+ */
+ private void requirePermission(Permission.Action perm,
+ RegionCoprocessorEnvironment env, Collection families)
+ throws IOException {
+ // create a map of family-qualifier
+ HashMap> familyMap = new HashMap>();
+ for (byte[] family : families) {
+ familyMap.put(family, null);
+ }
+ requirePermission(perm, env, familyMap);
+ }
+
+ /**
+ * Authorizes that the current user has permission to perform the given
+ * action on the set of table column families.
+ * @param perm Action that is required
+ * @param env The current coprocessor environment
+ * @param families The map of column families-qualifiers.
+ * @throws AccessDeniedException if the authorization check failed
+ */
+ private void requirePermission(Permission.Action perm,
+ RegionCoprocessorEnvironment env,
+ Map> families)
+ throws IOException {
+ User user = getActiveUser();
+ AuthResult result = permissionGranted(user, perm, env, families);
+ logResult(result);
+
+ if (!result.isAllowed()) {
+ StringBuffer sb = new StringBuffer("");
+ if ((families != null && families.size() > 0)) {
+ for (byte[] familyName : families.keySet()) {
+ if (sb.length() != 0) {
+ sb.append(", ");
+ }
+ sb.append(Bytes.toString(familyName));
+ }
+ }
+ throw new AccessDeniedException("Insufficient permissions (table=" +
+ env.getRegion().getTableDesc().getNameAsString()+
+ ((families != null && families.size() > 0) ? ", family: " +
+ sb.toString() : "") + ", action=" +
+ perm.toString() + ")");
+ }
+ }
+
+ /**
+ * Returns true if the current user is allowed the given action
+ * over at least one of the column qualifiers in the given column families.
+ */
+ private boolean hasFamilyQualifierPermission(User user,
+ TablePermission.Action perm,
+ RegionCoprocessorEnvironment env,
+ Map> familyMap)
+ throws IOException {
+ HRegionInfo hri = env.getRegion().getRegionInfo();
+ byte[] tableName = hri.getTableName();
+
+ if (user == null) {
+ return false;
+ }
+
+ if (familyMap != null && familyMap.size() > 0) {
+ // at least one family must be allowed
+ for (Map.Entry> family :
+ familyMap.entrySet()) {
+ if (family.getValue() != null && !family.getValue().isEmpty()) {
+ for (byte[] qualifier : family.getValue()) {
+ if (authManager.matchPermission(user, tableName,
+ family.getKey(), qualifier, perm)) {
+ return true;
+ }
+ }
+ } else {
+ if (authManager.matchPermission(user, tableName, family.getKey(),
+ perm)) {
+ return true;
+ }
+ }
+ }
+ } else if (LOG.isDebugEnabled()) {
+ LOG.debug("Empty family map passed for permission check");
+ }
+
+ return false;
+ }
+
+ /* ---- MasterObserver implementation ---- */
+ public void start(CoprocessorEnvironment env) throws IOException {
+ // if running on HMaster
+ if (env instanceof MasterCoprocessorEnvironment) {
+ MasterCoprocessorEnvironment e = (MasterCoprocessorEnvironment)env;
+ this.authManager = TableAuthManager.get(
+ e.getMasterServices().getZooKeeper(),
+ e.getConfiguration());
+ }
+
+ // if running at region
+ if (env instanceof RegionCoprocessorEnvironment) {
+ regionEnv = (RegionCoprocessorEnvironment)env;
+ }
+ }
+
+ public void stop(CoprocessorEnvironment env) {
+
+ }
+
+ @Override
+ public void preCreateTable(ObserverContext c,
+ HTableDescriptor desc, HRegionInfo[] regions) throws IOException {
+ requirePermission(Permission.Action.CREATE);
+
+ // default the table owner if not specified
+ User owner = getActiveUser();
+ if (desc.getOwnerString() == null ||
+ desc.getOwnerString().equals("")) {
+ desc.setOwner(owner);
+ }
+ }
+
+ @Override
+ public void postCreateTable(ObserverContext c,
+ HTableDescriptor desc, HRegionInfo[] regions) throws IOException {}
+
+ @Override
+ public void preDeleteTable(ObserverContext c,
+ byte[] tableName) throws IOException {
+ if (isActiveUserTableOwner(c.getEnvironment(), tableName)) {
+ requirePermission(Permission.Action.CREATE);
+ } else {
+ requirePermission(Permission.Action.ADMIN);
+ }
+ }
+ @Override
+ public void postDeleteTable(ObserverContext c,
+ byte[] tableName) throws IOException {}
+
+
+ @Override
+ public void preModifyTable(ObserverContext c,
+ byte[] tableName, HTableDescriptor htd) throws IOException {
+ requirePermission(Permission.Action.CREATE);
+ }
+ @Override
+ public void postModifyTable(ObserverContext c,
+ byte[] tableName, HTableDescriptor htd) throws IOException {}
+
+
+ @Override
+ public void preAddColumn(ObserverContext c,
+ byte[] tableName, HColumnDescriptor column) throws IOException {
+ requirePermission(Permission.Action.CREATE);
+ }
+ @Override
+ public void postAddColumn(ObserverContext c,
+ byte[] tableName, HColumnDescriptor column) throws IOException {}
+
+
+ @Override
+ public void preModifyColumn(ObserverContext c,
+ byte[] tableName, HColumnDescriptor descriptor) throws IOException {
+ requirePermission(Permission.Action.CREATE);
+ }
+ @Override
+ public void postModifyColumn(ObserverContext c,
+ byte[] tableName, HColumnDescriptor descriptor) throws IOException {}
+
+
+ @Override
+ public void preDeleteColumn(ObserverContext c,
+ byte[] tableName, byte[] col) throws IOException {
+ requirePermission(Permission.Action.CREATE);
+ }
+ @Override
+ public void postDeleteColumn(ObserverContext c,
+ byte[] tableName, byte[] col) throws IOException {}
+
+
+ @Override
+ public void preEnableTable(ObserverContext c,
+ byte[] tableName) throws IOException {
+ if (isActiveUserTableOwner(c.getEnvironment(), tableName)) {
+ requirePermission(Permission.Action.CREATE);
+ } else {
+ requirePermission(Permission.Action.ADMIN);
+ }
+ }
+ @Override
+ public void postEnableTable(ObserverContext c,
+ byte[] tableName) throws IOException {}
+
+ @Override
+ public void preDisableTable(ObserverContext c,
+ byte[] tableName) throws IOException {
+ if (isActiveUserTableOwner(c.getEnvironment(), tableName)) {
+ requirePermission(Permission.Action.CREATE);
+ } else {
+ requirePermission(Permission.Action.ADMIN);
+ }
+ }
+ @Override
+ public void postDisableTable(ObserverContext c,
+ byte[] tableName) throws IOException {}
+
+ @Override
+ public void preMove(ObserverContext c,
+ HRegionInfo region, ServerName srcServer, ServerName destServer)
+ throws IOException {
+ requirePermission(Permission.Action.ADMIN);
+ }
+ @Override
+ public void postMove(ObserverContext c,
+ HRegionInfo region, ServerName srcServer, ServerName destServer)
+ throws IOException {}
+
+ @Override
+ public void preAssign(ObserverContext c,
+ HRegionInfo regionInfo) throws IOException {
+ requirePermission(Permission.Action.ADMIN);
+ }
+ @Override
+ public void postAssign(ObserverContext c,
+ HRegionInfo regionInfo) throws IOException {}
+
+ @Override
+ public void preUnassign(ObserverContext c,
+ HRegionInfo regionInfo, boolean force) throws IOException {
+ requirePermission(Permission.Action.ADMIN);
+ }
+ @Override
+ public void postUnassign(ObserverContext c,
+ HRegionInfo regionInfo, boolean force) throws IOException {}
+
+ @Override
+ public void preBalance(ObserverContext c)
+ throws IOException {
+ requirePermission(Permission.Action.ADMIN);
+ }
+ @Override
+ public void postBalance(ObserverContext c)
+ throws IOException {}
+
+ @Override
+ public boolean preBalanceSwitch(ObserverContext c,
+ boolean newValue) throws IOException {
+ requirePermission(Permission.Action.ADMIN);
+ return newValue;
+ }
+ @Override
+ public void postBalanceSwitch(ObserverContext c,
+ boolean oldValue, boolean newValue) throws IOException {}
+
+ @Override
+ public void preShutdown(ObserverContext c)
+ throws IOException {
+ requirePermission(Permission.Action.ADMIN);
+ }
+
+ @Override
+ public void preStopMaster(ObserverContext c)
+ throws IOException {
+ requirePermission(Permission.Action.ADMIN);
+ }
+
+ @Override
+ public void postStartMaster(ObserverContext ctx)
+ throws IOException {
+ // initialize the ACL storage table
+ AccessControlLists.init(ctx.getEnvironment().getMasterServices());
+ }
+
+
+ /* ---- RegionObserver implementation ---- */
+
+ @Override
+ public void postOpen(ObserverContext c) {
+ RegionCoprocessorEnvironment e = c.getEnvironment();
+ final HRegion region = e.getRegion();
+ if (region == null) {
+ LOG.error("NULL region from RegionCoprocessorEnvironment in postOpen()");
+ return;
+ }
+
+ try {
+ this.authManager = TableAuthManager.get(
+ e.getRegionServerServices().getZooKeeper(),
+ regionEnv.getConfiguration());
+ } catch (IOException ioe) {
+ // pass along as a RuntimeException, so that the coprocessor is unloaded
+ throw new RuntimeException("Error obtaining TableAuthManager", ioe);
+ }
+
+ if (AccessControlLists.isAclRegion(region)) {
+ aclRegion = true;
+ try {
+ initialize(e);
+ } catch (IOException ex) {
+ // if we can't obtain permissions, it's better to fail
+ // than perform checks incorrectly
+ throw new RuntimeException("Failed to initialize permissions cache", ex);
+ }
+ }
+ }
+
+ @Override
+ public void preGetClosestRowBefore(final ObserverContext c,
+ final byte [] row, final byte [] family, final Result result)
+ throws IOException {
+ requirePermission(TablePermission.Action.READ, c.getEnvironment(),
+ (family != null ? Lists.newArrayList(family) : null));
+ }
+
+ @Override
+ public void preGet(final ObserverContext c,
+ final Get get, final List result) throws IOException {
+ /*
+ if column family level checks fail, check for a qualifier level permission
+ in one of the families. If it is present, then continue with the AccessControlFilter.
+ */
+ RegionCoprocessorEnvironment e = c.getEnvironment();
+ User requestUser = getActiveUser();
+ AuthResult authResult = permissionGranted(requestUser,
+ TablePermission.Action.READ, e, get.getFamilyMap());
+ if (!authResult.isAllowed()) {
+ if (hasFamilyQualifierPermission(requestUser,
+ TablePermission.Action.READ, e, get.getFamilyMap())) {
+ byte[] table = getTableName(e);
+ AccessControlFilter filter = new AccessControlFilter(authManager,
+ requestUser, table);
+
+ // wrap any existing filter
+ if (get.getFilter() != null) {
+ FilterList wrapper = new FilterList(FilterList.Operator.MUST_PASS_ALL,
+ Lists.newArrayList(filter, get.getFilter()));
+ get.setFilter(wrapper);
+ } else {
+ get.setFilter(filter);
+ }
+ logResult(AuthResult.allow("Access allowed with filter", requestUser,
+ TablePermission.Action.READ, authResult.table));
+ } else {
+ logResult(authResult);
+ throw new AccessDeniedException("Insufficient permissions (table=" +
+ e.getRegion().getTableDesc().getNameAsString() + ", action=READ)");
+ }
+ } else {
+ // log auth success
+ logResult(authResult);
+ }
+ }
+
+ @Override
+ public boolean preExists(final ObserverContext c,
+ final Get get, final boolean exists) throws IOException {
+ requirePermission(TablePermission.Action.READ, c.getEnvironment(),
+ get.familySet());
+ return exists;
+ }
+
+ @Override
+ public void prePut(final ObserverContext c,
+ final Put put, final WALEdit edit, final boolean writeToWAL)
+ throws IOException {
+ requirePermission(TablePermission.Action.WRITE, c.getEnvironment(),
+ put.getFamilyMap());
+ }
+
+ @Override
+ public void postPut(final ObserverContext c,
+ final Put put, final WALEdit edit, final boolean writeToWAL) {
+ if (aclRegion) {
+ updateACL(c.getEnvironment(), put.getFamilyMap());
+ }
+ }
+
+ @Override
+ public void preDelete(final ObserverContext c,
+ final Delete delete, final WALEdit edit, final boolean writeToWAL)
+ throws IOException {
+ requirePermission(TablePermission.Action.WRITE, c.getEnvironment(),
+ delete.getFamilyMap());
+ }
+
+ @Override
+ public void postDelete(final ObserverContext c,
+ final Delete delete, final WALEdit edit, final boolean writeToWAL)
+ throws IOException {
+ if (aclRegion) {
+ updateACL(c.getEnvironment(), delete.getFamilyMap());
+ }
+ }
+
+ @Override
+ public boolean preCheckAndPut(final ObserverContext c,
+ final byte [] row, final byte [] family, final byte [] qualifier,
+ final CompareFilter.CompareOp compareOp,
+ final WritableByteArrayComparable comparator, final Put put,
+ final boolean result) throws IOException {
+ requirePermission(TablePermission.Action.READ, c.getEnvironment(),
+ Arrays.asList(new byte[][]{family}));
+ return result;
+ }
+
+ @Override
+ public boolean preCheckAndDelete(final ObserverContext c,
+ final byte [] row, final byte [] family, final byte [] qualifier,
+ final CompareFilter.CompareOp compareOp,
+ final WritableByteArrayComparable comparator, final Delete delete,
+ final boolean result) throws IOException {
+ requirePermission(TablePermission.Action.READ, c.getEnvironment(),
+ Arrays.asList( new byte[][] {family}));
+ return result;
+ }
+
+ @Override
+ public long preIncrementColumnValue(final ObserverContext c,
+ final byte [] row, final byte [] family, final byte [] qualifier,
+ final long amount, final boolean writeToWAL)
+ throws IOException {
+ requirePermission(TablePermission.Action.WRITE, c.getEnvironment(),
+ Arrays.asList(new byte[][]{family}));
+ return -1;
+ }
+
+ @Override
+ public Result preIncrement(final ObserverContext c,
+ final Increment increment)
+ throws IOException {
+ requirePermission(TablePermission.Action.WRITE, c.getEnvironment(),
+ increment.getFamilyMap().keySet());
+ return null;
+ }
+
+ @Override
+ public RegionScanner preScannerOpen(final ObserverContext c,
+ final Scan scan, final RegionScanner s) throws IOException {
+ /*
+ if column family level checks fail, check for a qualifier level permission
+ in one of the families. If it is present, then continue with the AccessControlFilter.
+ */
+ RegionCoprocessorEnvironment e = c.getEnvironment();
+ User user = getActiveUser();
+ AuthResult authResult = permissionGranted(user, TablePermission.Action.READ, e,
+ scan.getFamilyMap());
+ if (!authResult.isAllowed()) {
+ if (hasFamilyQualifierPermission(user, TablePermission.Action.READ, e,
+ scan.getFamilyMap())) {
+ byte[] table = getTableName(e);
+ AccessControlFilter filter = new AccessControlFilter(authManager,
+ user, table);
+
+ // wrap any existing filter
+ if (scan.hasFilter()) {
+ FilterList wrapper = new FilterList(FilterList.Operator.MUST_PASS_ALL,
+ Lists.newArrayList(filter, scan.getFilter()));
+ scan.setFilter(wrapper);
+ } else {
+ scan.setFilter(filter);
+ }
+ logResult(AuthResult.allow("Access allowed with filter", user,
+ TablePermission.Action.READ, authResult.table));
+ } else {
+ // no table/family level perms and no qualifier level perms, reject
+ logResult(authResult);
+ throw new AccessDeniedException("Insufficient permissions for user '"+
+ (user != null ? user.getShortName() : "null")+"' "+
+ "for scanner open on table " + Bytes.toString(getTableName(e)));
+ }
+ } else {
+ // log success
+ logResult(authResult);
+ }
+ return s;
+ }
+
+ @Override
+ public RegionScanner postScannerOpen(final ObserverContext c,
+ final Scan scan, final RegionScanner s) throws IOException {
+ User user = getActiveUser();
+ if (user != null && user.getShortName() != null) { // store reference to scanner owner for later checks
+ scannerOwners.put(s, user.getShortName());
+ }
+ return s;
+ }
+
+ @Override
+ public boolean preScannerNext(final ObserverContext c,
+ final InternalScanner s, final List result,
+ final int limit, final boolean hasNext) throws IOException {
+ requireScannerOwner(s);
+ return hasNext;
+ }
+
+ @Override
+ public void preScannerClose(final ObserverContext c,
+ final InternalScanner s) throws IOException {
+ requireScannerOwner(s);
+ }
+
+ @Override
+ public void postScannerClose(final ObserverContext c,
+ final InternalScanner s) throws IOException {
+ // clean up any associated owner mapping
+ scannerOwners.remove(s);
+ }
+
+ /**
+ * Verify, when servicing an RPC, that the caller is the scanner owner.
+ * If so, we assume that access control is correctly enforced based on
+ * the checks performed in preScannerOpen()
+ */
+ private void requireScannerOwner(InternalScanner s)
+ throws AccessDeniedException {
+ if (RequestContext.isInRequestContext()) {
+ String owner = scannerOwners.get(s);
+ if (owner != null && !owner.equals(RequestContext.getRequestUserName())) {
+ throw new AccessDeniedException("User '"+
+ RequestContext.getRequestUserName()+"' is not the scanner owner!");
+ }
+ }
+ }
+
+ /* ---- AccessControllerProtocol implementation ---- */
+ /*
+ * These methods are only allowed to be called against the _acl_ region(s).
+ * This will be restricted by both client side and endpoint implementations.
+ */
+ @Override
+ public void grant(byte[] user, TablePermission permission)
+ throws IOException {
+ // verify it's only running at .acl.
+ if (aclRegion) {
+ if (LOG.isDebugEnabled()) {
+ LOG.debug("Received request to grant access permission to '"
+ + Bytes.toString(user) + "'. "
+ + permission.toString());
+ }
+
+ requirePermission(Permission.Action.ADMIN);
+
+ AccessControlLists.addTablePermission(regionEnv.getConfiguration(),
+ permission.getTable(), Bytes.toString(user), permission);
+ if (AUDITLOG.isTraceEnabled()) {
+ // audit log should store permission changes in addition to auth results
+ AUDITLOG.trace("Granted user '" + Bytes.toString(user) + "' permission "
+ + permission.toString());
+ }
+ } else {
+ throw new CoprocessorException(AccessController.class, "This method " +
+ "can only execute at " +
+ Bytes.toString(AccessControlLists.ACL_TABLE_NAME) + " table.");
+ }
+ }
+
+ @Override
+ public void revoke(byte[] user, TablePermission permission)
+ throws IOException{
+ // only allowed to be called on _acl_ region
+ if (aclRegion) {
+ if (LOG.isDebugEnabled()) {
+ LOG.debug("Received request to revoke access permission for '"
+ + Bytes.toString(user) + "'. "
+ + permission.toString());
+ }
+
+ requirePermission(Permission.Action.ADMIN);
+
+ AccessControlLists.removeTablePermission(regionEnv.getConfiguration(),
+ permission.getTable(), Bytes.toString(user), permission);
+ if (AUDITLOG.isTraceEnabled()) {
+ // audit log should record all permission changes
+ AUDITLOG.trace("Revoked user '" + Bytes.toString(user) + "' permission "
+ + permission.toString());
+ }
+ } else {
+ throw new CoprocessorException(AccessController.class, "This method " +
+ "can only execute at " +
+ Bytes.toString(AccessControlLists.ACL_TABLE_NAME) + " table.");
+ }
+ }
+
+ @Override
+ public List getUserPermissions(final byte[] tableName)
+ throws IOException {
+ // only allowed to be called on _acl_ region
+ if (aclRegion) {
+ requirePermission(Permission.Action.ADMIN);
+
+ List perms = AccessControlLists.getUserPermissions
+ (regionEnv.getConfiguration(), tableName);
+ return perms;
+ } else {
+ throw new CoprocessorException(AccessController.class, "This method " +
+ "can only execute at " +
+ Bytes.toString(AccessControlLists.ACL_TABLE_NAME) + " table.");
+ }
+ }
+
+ @Override
+ public void checkPermissions(Permission[] permissions) throws IOException {
+ byte[] tableName = regionEnv.getRegion().getTableDesc().getName();
+ for (Permission permission : permissions) {
+ if (permission instanceof TablePermission) {
+ TablePermission tperm = (TablePermission) permission;
+ for (Permission.Action action : permission.getActions()) {
+ if (!Arrays.equals(tperm.getTable(), tableName)) {
+ throw new CoprocessorException(AccessController.class, String.format("This method "
+ + "can only execute at the table specified in TablePermission. " +
+ "Table of the region:%s , requested table:%s", Bytes.toString(tableName),
+ Bytes.toString(tperm.getTable())));
+ }
+
+ HashMap> familyMap = Maps.newHashMapWithExpectedSize(1);
+ if (tperm.getFamily() != null) {
+ if (tperm.getQualifier() != null) {
+ familyMap.put(tperm.getFamily(), Sets.newHashSet(tperm.getQualifier()));
+ } else {
+ familyMap.put(tperm.getFamily(), null);
+ }
+ }
+
+ requirePermission(action, regionEnv, familyMap);
+ }
+
+ } else {
+ for (Permission.Action action : permission.getActions()) {
+ requirePermission(action);
+ }
+ }
+ }
+ }
+
+ @Override
+ public long getProtocolVersion(String protocol, long clientVersion) throws IOException {
+ return PROTOCOL_VERSION;
+ }
+
+ @Override
+ public ProtocolSignature getProtocolSignature(String protocol,
+ long clientVersion, int clientMethodsHash) throws IOException {
+ if (AccessControllerProtocol.class.getName().equals(protocol)) {
+ return new ProtocolSignature(PROTOCOL_VERSION, null);
+ }
+ throw new HBaseRPC.UnknownProtocolException(
+ "Unexpected protocol requested: "+protocol);
+ }
+
+ private byte[] getTableName(RegionCoprocessorEnvironment e) {
+ HRegion region = e.getRegion();
+ byte[] tableName = null;
+
+ if (region != null) {
+ HRegionInfo regionInfo = region.getRegionInfo();
+ if (regionInfo != null) {
+ tableName = regionInfo.getTableName();
+ }
+ }
+ return tableName;
+ }
+
+ private String getTableOwner(MasterCoprocessorEnvironment e,
+ byte[] tableName) throws IOException {
+ HTableDescriptor htd = e.getTable(tableName).getTableDescriptor();
+ return htd.getOwnerString();
+ }
+
+ private boolean isActiveUserTableOwner(MasterCoprocessorEnvironment e,
+ byte[] tableName) throws IOException {
+ String activeUser = getActiveUser().getShortName();
+ return activeUser.equals(getTableOwner(e, tableName));
+ }
+
+ @Override
+ public void preCreateTableHandler(ObserverContext ctx,
+ HTableDescriptor desc, HRegionInfo[] regions) throws IOException {
+ }
+
+ @Override
+ public void postCreateTableHandler(ObserverContext ctx,
+ HTableDescriptor desc, HRegionInfo[] regions) throws IOException {
+ }
+
+ @Override
+ public void preDeleteTableHandler(ObserverContext ctx,
+ byte[] tableName) throws IOException {
+ }
+
+ @Override
+ public void postDeleteTableHandler(ObserverContext ctx,
+ byte[] tableName) throws IOException {
+ }
+
+ @Override
+ public void preModifyTableHandler(ObserverContext ctx,
+ byte[] tableName, HTableDescriptor htd) throws IOException {
+ }
+
+ @Override
+ public void postModifyTableHandler(ObserverContext ctx,
+ byte[] tableName, HTableDescriptor htd) throws IOException {
+ }
+
+ @Override
+ public void preAddColumnHandler(ObserverContext ctx,
+ byte[] tableName, HColumnDescriptor column) throws IOException {
+ }
+
+ @Override
+ public void postAddColumnHandler(ObserverContext ctx,
+ byte[] tableName, HColumnDescriptor column) throws IOException {
+ }
+
+ @Override
+ public void preModifyColumnHandler(ObserverContext ctx,
+ byte[] tableName, HColumnDescriptor descriptor) throws IOException {
+ }
+
+ @Override
+ public void postModifyColumnHandler(ObserverContext ctx,
+ byte[] tableName, HColumnDescriptor descriptor) throws IOException {
+ }
+
+ @Override
+ public void preDeleteColumnHandler(ObserverContext ctx,
+ byte[] tableName, byte[] c) throws IOException {
+ }
+
+ @Override
+ public void postDeleteColumnHandler(ObserverContext ctx,
+ byte[] tableName, byte[] c) throws IOException {
+ }
+
+ @Override
+ public void preEnableTableHandler(ObserverContext ctx,
+ byte[] tableName) throws IOException {
+ }
+
+ @Override
+ public void postEnableTableHandler(ObserverContext ctx,
+ byte[] tableName) throws IOException {
+ }
+
+ @Override
+ public void preDisableTableHandler(ObserverContext ctx,
+ byte[] tableName) throws IOException {
+ }
+
+ @Override
+ public void postDisableTableHandler(ObserverContext ctx,
+ byte[] tableName) throws IOException {
+ }
+}
Index: src/main/java/org/apache/hadoop/hbase/security/access/AccessControllerProtocol.java
===================================================================
--- src/main/java/org/apache/hadoop/hbase/security/access/AccessControllerProtocol.java (revision 0)
+++ src/main/java/org/apache/hadoop/hbase/security/access/AccessControllerProtocol.java (revision 0)
@@ -0,0 +1,85 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.security.access;
+
+import java.io.IOException;
+import java.util.List;
+
+import org.apache.hadoop.hbase.ipc.CoprocessorProtocol;
+
+/**
+ * A custom protocol defined for maintaining and querying access control lists.
+ */
+public interface AccessControllerProtocol extends CoprocessorProtocol {
+
+ public static final long VERSION = 1L;
+
+ /**
+ * Grants the given user or group the privilege to perform the given actions
+ * over the specified scope contained in {@link TablePermission}
+ * @param user the user name, or, if prefixed with "@", group name receiving
+ * the grant
+ * @param permission the details of the provided permissions
+ * @throws IOException if the grant could not be applied
+ */
+ public void grant(byte[] user, TablePermission permission)
+ throws IOException;
+
+ /**
+ * Revokes a previously granted privilege from a user or group.
+ * Note that the provided {@link TablePermission} details must exactly match
+ * a stored grant. For example, if user "bob" has been granted "READ" access
+ * to table "data", over column family and qualifer "info:colA", then the
+ * table, column family and column qualifier must all be specified.
+ * Attempting to revoke permissions over just the "data" table will have
+ * no effect.
+ * @param user the user name, or, if prefixed with "@", group name whose
+ * privileges are being revoked
+ * @param permission the details of the previously granted permission to revoke
+ * @throws IOException if the revocation could not be performed
+ */
+ public void revoke(byte[] user, TablePermission permission)
+ throws IOException;
+
+ /**
+ * Queries the permissions currently stored for the given table, returning
+ * a list of currently granted permissions, along with the user or group
+ * each is associated with.
+ * @param tableName the table of the permission grants to return
+ * @return a list of the currently granted permissions, with associated user
+ * or group names
+ * @throws IOException if there is an error querying the permissions
+ */
+ public List getUserPermissions(byte[] tableName)
+ throws IOException;
+
+ /**
+ * Checks whether the given Permissions will pass the access checks for the
+ * current user. Global permissions can be checked from the -acl- table
+ * or any other table, however TablePermissions can only be checked by
+ * the table's regions. If access control checks fail this method throws
+ * AccessDeniedException.
+ * @param permissions to check for. Permission subclasses can be used
+ * to do more specific checks at the table/family/column level.
+ * @throws IOException if there is an error checking the permissions
+ */
+ public void checkPermissions(Permission[] permissions)
+ throws IOException;
+
+}
Index: src/main/java/org/apache/hadoop/hbase/security/access/UserPermission.java
===================================================================
--- src/main/java/org/apache/hadoop/hbase/security/access/UserPermission.java (revision 0)
+++ src/main/java/org/apache/hadoop/hbase/security/access/UserPermission.java (revision 0)
@@ -0,0 +1,138 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.security.access;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.hbase.util.Bytes;
+
+import java.io.DataInput;
+import java.io.DataOutput;
+import java.io.IOException;
+
+/**
+ * Represents an authorization for access over the given table, column family
+ * plus qualifier, for the given user.
+ */
+public class UserPermission extends TablePermission {
+ private static Log LOG = LogFactory.getLog(UserPermission.class);
+
+ private byte[] user;
+
+ /** Nullary constructor for Writable, do not use */
+ public UserPermission() {
+ super();
+ }
+
+ /**
+ * Creates a new instance for the given user, table and column family.
+ * @param user the user
+ * @param table the table
+ * @param family the family, can be null if action is allowed over the entire
+ * table
+ * @param assigned the list of allowed actions
+ */
+ public UserPermission(byte[] user, byte[] table, byte[] family,
+ Action... assigned) {
+ super(table, family, assigned);
+ this.user = user;
+ }
+
+ /**
+ * Creates a new permission for the given user, table, column family and
+ * column qualifier.
+ * @param user the user
+ * @param table the table
+ * @param family the family, can be null if action is allowed over the entire
+ * table
+ * @param qualifier the column qualifier, can be null if action is allowed
+ * over the entire column family
+ * @param assigned the list of allowed actions
+ */
+ public UserPermission(byte[] user, byte[] table, byte[] family,
+ byte[] qualifier, Action... assigned) {
+ super(table, family, qualifier, assigned);
+ this.user = user;
+ }
+
+ /**
+ * Creates a new instance for the given user, table, column family and
+ * qualifier, matching the actions with the given codes.
+ * @param user the user
+ * @param table the table
+ * @param family the family, can be null if action is allowed over the entire
+ * table
+ * @param qualifier the column qualifier, can be null if action is allowed
+ * over the entire column family
+ * @param actionCodes the list of allowed action codes
+ */
+ public UserPermission(byte[] user, byte[] table, byte[] family,
+ byte[] qualifier, byte[] actionCodes) {
+ super(table, family, qualifier, actionCodes);
+ this.user = user;
+ }
+
+ public byte[] getUser() {
+ return user;
+ }
+
+ @Override
+ public boolean equals(Object obj) {
+ if (!(obj instanceof UserPermission)) {
+ return false;
+ }
+ UserPermission other = (UserPermission)obj;
+
+ if ((Bytes.equals(user, other.getUser()) &&
+ super.equals(obj))) {
+ return true;
+ } else {
+ return false;
+ }
+ }
+
+ @Override
+ public int hashCode() {
+ final int prime = 37;
+ int result = super.hashCode();
+ if (user != null) {
+ result = prime * result + Bytes.hashCode(user);
+ }
+ return result;
+ }
+
+ public String toString() {
+ StringBuilder str = new StringBuilder("UserPermission: ")
+ .append("user=").append(Bytes.toString(user))
+ .append(", ").append(super.toString());
+ return str.toString();
+ }
+
+ @Override
+ public void readFields(DataInput in) throws IOException {
+ super.readFields(in);
+ user = Bytes.readByteArray(in);
+ }
+
+ @Override
+ public void write(DataOutput out) throws IOException {
+ super.write(out);
+ Bytes.writeByteArray(out, user);
+ }
+}
Index: src/main/java/org/apache/hadoop/hbase/security/access/AccessControlFilter.java
===================================================================
--- src/main/java/org/apache/hadoop/hbase/security/access/AccessControlFilter.java (revision 0)
+++ src/main/java/org/apache/hadoop/hbase/security/access/AccessControlFilter.java (revision 0)
@@ -0,0 +1,84 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.security.access;
+
+import java.io.DataInput;
+import java.io.DataOutput;
+import java.io.IOException;
+
+import org.apache.hadoop.hbase.KeyValue;
+import org.apache.hadoop.hbase.filter.FilterBase;
+import org.apache.hadoop.hbase.security.User;
+
+/**
+ * NOTE: for internal use only by AccessController implementation
+ *
+ *
+ * TODO: There is room for further performance optimization here.
+ * Calling TableAuthManager.authorize() per KeyValue imposes a fair amount of
+ * overhead. A more optimized solution might look at the qualifiers where
+ * permissions are actually granted and explicitly limit the scan to those.
+ *
+ *
+ * We should aim to use this _only_ when access to the requested column families
+ * is not granted at the column family levels. If table or column family
+ * access succeeds, then there is no need to impose the overhead of this filter.
+ *
+ */
+class AccessControlFilter extends FilterBase {
+
+ private TableAuthManager authManager;
+ private byte[] table;
+ private User user;
+
+ /**
+ * For Writable
+ */
+ AccessControlFilter() {
+ }
+
+ AccessControlFilter(TableAuthManager mgr, User ugi,
+ byte[] tableName) {
+ authManager = mgr;
+ table = tableName;
+ user = ugi;
+ }
+
+ @Override
+ public ReturnCode filterKeyValue(KeyValue kv) {
+ if (authManager.authorize(user, table, kv, TablePermission.Action.READ)) {
+ return ReturnCode.INCLUDE;
+ }
+ return ReturnCode.NEXT_COL;
+ }
+
+ @Override
+ public void write(DataOutput dataOutput) throws IOException {
+ // no implementation, server-side use only
+ throw new UnsupportedOperationException(
+ "Serialization not supported. Intended for server-side use only.");
+ }
+
+ @Override
+ public void readFields(DataInput dataInput) throws IOException {
+ // no implementation, server-side use only
+ throw new UnsupportedOperationException(
+ "Serialization not supported. Intended for server-side use only.");
+ }
+}
Index: src/main/java/org/apache/hadoop/hbase/security/access/Permission.java
===================================================================
--- src/main/java/org/apache/hadoop/hbase/security/access/Permission.java (revision 0)
+++ src/main/java/org/apache/hadoop/hbase/security/access/Permission.java (revision 0)
@@ -0,0 +1,199 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.security.access;
+
+import com.google.common.collect.Maps;
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.io.VersionedWritable;
+
+import java.io.DataInput;
+import java.io.DataOutput;
+import java.io.IOException;
+import java.util.Arrays;
+import java.util.Map;
+
+/**
+ * Base permissions instance representing the ability to perform a given set
+ * of actions.
+ *
+ * @see TablePermission
+ */
+public class Permission extends VersionedWritable {
+ protected static final byte VERSION = 0;
+ public enum Action {
+ READ('R'), WRITE('W'), EXEC('X'), CREATE('C'), ADMIN('A');
+
+ private byte code;
+ Action(char code) {
+ this.code = (byte)code;
+ }
+
+ public byte code() { return code; }
+ }
+
+ private static Log LOG = LogFactory.getLog(Permission.class);
+ protected static Map ACTION_BY_CODE = Maps.newHashMap();
+
+ protected Action[] actions;
+
+ static {
+ for (Action a : Action.values()) {
+ ACTION_BY_CODE.put(a.code(), a);
+ }
+ }
+
+ /** Empty constructor for Writable implementation. Do not use. */
+ public Permission() {
+ super();
+ }
+
+ public Permission(Action... assigned) {
+ if (assigned != null && assigned.length > 0) {
+ actions = Arrays.copyOf(assigned, assigned.length);
+ }
+ }
+
+ public Permission(byte[] actionCodes) {
+ if (actionCodes != null) {
+ Action acts[] = new Action[actionCodes.length];
+ int j = 0;
+ for (int i=0; i 0)
+ str.append(",");
+ if (actions[i] != null)
+ str.append(actions[i].toString());
+ else
+ str.append("NULL");
+ }
+ }
+ str.append("]");
+
+ return str.toString();
+ }
+
+ /** @return the object version number */
+ public byte getVersion() {
+ return VERSION;
+ }
+
+ @Override
+ public void readFields(DataInput in) throws IOException {
+ super.readFields(in);
+ int length = (int)in.readByte();
+ if (length > 0) {
+ actions = new Action[length];
+ for (int i = 0; i < length; i++) {
+ byte b = in.readByte();
+ Action a = ACTION_BY_CODE.get(b);
+ if (a == null) {
+ throw new IOException("Unknown action code '"+
+ Bytes.toStringBinary(new byte[]{b})+"' in input");
+ }
+ this.actions[i] = a;
+ }
+ } else {
+ actions = new Action[0];
+ }
+ }
+
+ @Override
+ public void write(DataOutput out) throws IOException {
+ super.write(out);
+ out.writeByte(actions != null ? actions.length : 0);
+ if (actions != null) {
+ for (Action a: actions) {
+ out.writeByte(a.code());
+ }
+ }
+ }
+}
Index: src/main/java/org/apache/hadoop/hbase/security/access/ZKPermissionWatcher.java
===================================================================
--- src/main/java/org/apache/hadoop/hbase/security/access/ZKPermissionWatcher.java (revision 0)
+++ src/main/java/org/apache/hadoop/hbase/security/access/ZKPermissionWatcher.java (revision 0)
@@ -0,0 +1,164 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.security.access;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.zookeeper.ZKUtil;
+import org.apache.hadoop.hbase.zookeeper.ZooKeeperListener;
+import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
+import org.apache.zookeeper.KeeperException;
+
+import java.io.IOException;
+import java.util.List;
+
+/**
+ * Handles synchronization of access control list entries and updates
+ * throughout all nodes in the cluster. The {@link AccessController} instance
+ * on the {@code _acl_} table regions, creates a znode for each table as
+ * {@code /hbase/acl/tablename}, with the znode data containing a serialized
+ * list of the permissions granted for the table. The {@code AccessController}
+ * instances on all other cluster hosts watch the znodes for updates, which
+ * trigger updates in the {@link TableAuthManager} permission cache.
+ */
+public class ZKPermissionWatcher extends ZooKeeperListener {
+ private static Log LOG = LogFactory.getLog(ZKPermissionWatcher.class);
+ // parent node for permissions lists
+ static final String ACL_NODE = "acl";
+ TableAuthManager authManager;
+ String aclZNode;
+
+ public ZKPermissionWatcher(ZooKeeperWatcher watcher,
+ TableAuthManager authManager, Configuration conf) {
+ super(watcher);
+ this.authManager = authManager;
+ String aclZnodeParent = conf.get("zookeeper.znode.acl.parent", ACL_NODE);
+ this.aclZNode = ZKUtil.joinZNode(watcher.baseZNode, aclZnodeParent);
+ }
+
+ public void start() throws KeeperException {
+ watcher.registerListener(this);
+ if (ZKUtil.watchAndCheckExists(watcher, aclZNode)) {
+ List existing =
+ ZKUtil.getChildDataAndWatchForNewChildren(watcher, aclZNode);
+ if (existing != null) {
+ refreshNodes(existing);
+ }
+ }
+ }
+
+ @Override
+ public void nodeCreated(String path) {
+ if (path.equals(aclZNode)) {
+ try {
+ List nodes =
+ ZKUtil.getChildDataAndWatchForNewChildren(watcher, aclZNode);
+ refreshNodes(nodes);
+ } catch (KeeperException ke) {
+ LOG.error("Error reading data from zookeeper", ke);
+ // only option is to abort
+ watcher.abort("Zookeeper error obtaining acl node children", ke);
+ }
+ }
+ }
+
+ @Override
+ public void nodeDeleted(String path) {
+ if (aclZNode.equals(ZKUtil.getParent(path))) {
+ String table = ZKUtil.getNodeName(path);
+ authManager.remove(Bytes.toBytes(table));
+ }
+ }
+
+ @Override
+ public void nodeDataChanged(String path) {
+ if (aclZNode.equals(ZKUtil.getParent(path))) {
+ // update cache on an existing table node
+ String table = ZKUtil.getNodeName(path);
+ try {
+ byte[] data = ZKUtil.getDataAndWatch(watcher, path);
+ authManager.refreshCacheFromWritable(Bytes.toBytes(table), data);
+ } catch (KeeperException ke) {
+ LOG.error("Error reading data from zookeeper for node "+table, ke);
+ // only option is to abort
+ watcher.abort("Zookeeper error getting data for node " + table, ke);
+ } catch (IOException ioe) {
+ LOG.error("Error reading permissions writables", ioe);
+ }
+ }
+ }
+
+ @Override
+ public void nodeChildrenChanged(String path) {
+ if (path.equals(aclZNode)) {
+ // table permissions changed
+ try {
+ List nodes =
+ ZKUtil.getChildDataAndWatchForNewChildren(watcher, aclZNode);
+ refreshNodes(nodes);
+ } catch (KeeperException ke) {
+ LOG.error("Error reading data from zookeeper for path "+path, ke);
+ watcher.abort("Zookeeper error get node children for path "+path, ke);
+ }
+ }
+ }
+
+ private void refreshNodes(List nodes) {
+ for (ZKUtil.NodeAndData n : nodes) {
+ if (n.isEmpty()) continue;
+ String path = n.getNode();
+ String table = ZKUtil.getNodeName(path);
+ try {
+ byte[] nodeData = n.getData();
+ if (LOG.isDebugEnabled()) {
+ LOG.debug("Updating permissions cache from node "+table+" with data: "+
+ Bytes.toStringBinary(nodeData));
+ }
+ authManager.refreshCacheFromWritable(Bytes.toBytes(table),
+ nodeData);
+ } catch (IOException ioe) {
+ LOG.error("Failed parsing permissions for table '" + table +
+ "' from zk", ioe);
+ }
+ }
+ }
+
+ /***
+ * Write a table's access controls to the permissions mirror in zookeeper
+ * @param tableName
+ * @param permsData
+ */
+ public void writeToZookeeper(String tableName,
+ byte[] permsData) {
+ String zkNode =
+ ZKUtil.joinZNode(ZKUtil.joinZNode(watcher.baseZNode, ACL_NODE),
+ tableName);
+ try {
+ ZKUtil.createWithParents(watcher, zkNode);
+ ZKUtil.updateExistingNodeData(watcher, zkNode,
+ permsData, -1);
+ } catch (KeeperException e) {
+ LOG.error("Failed updating permissions for table '" + tableName +
+ "'", e);
+ watcher.abort("Failed writing node "+zkNode+" to zookeeper", e);
+ }
+ }
+}
Index: src/main/java/org/apache/hadoop/hbase/security/HBaseSaslRpcServer.java
===================================================================
--- src/main/java/org/apache/hadoop/hbase/security/HBaseSaslRpcServer.java (revision 0)
+++ src/main/java/org/apache/hadoop/hbase/security/HBaseSaslRpcServer.java (revision 0)
@@ -0,0 +1,278 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.security;
+
+import java.io.ByteArrayInputStream;
+import java.io.DataInput;
+import java.io.DataInputStream;
+import java.io.DataOutput;
+import java.io.IOException;
+import java.util.Map;
+import java.util.TreeMap;
+
+import javax.security.auth.callback.Callback;
+import javax.security.auth.callback.CallbackHandler;
+import javax.security.auth.callback.NameCallback;
+import javax.security.auth.callback.PasswordCallback;
+import javax.security.auth.callback.UnsupportedCallbackException;
+import javax.security.sasl.AuthorizeCallback;
+import javax.security.sasl.RealmCallback;
+import javax.security.sasl.Sasl;
+
+import org.apache.commons.codec.binary.Base64;
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.ipc.HBaseServer;
+import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.security.token.SecretManager;
+import org.apache.hadoop.security.token.TokenIdentifier;
+import org.apache.hadoop.security.UserGroupInformation.AuthenticationMethod;
+import org.apache.hadoop.security.token.SecretManager.InvalidToken;
+
+/**
+ * A utility class for dealing with SASL on RPC server
+ */
+public class HBaseSaslRpcServer {
+ public static final Log LOG = LogFactory.getLog(HBaseSaslRpcServer.class);
+ public static final String SASL_DEFAULT_REALM = "default";
+ public static final Map SASL_PROPS =
+ new TreeMap();
+
+ public static final int SWITCH_TO_SIMPLE_AUTH = -88;
+
+ public static enum QualityOfProtection {
+ AUTHENTICATION("auth"),
+ INTEGRITY("auth-int"),
+ PRIVACY("auth-conf");
+
+ public final String saslQop;
+
+ private QualityOfProtection(String saslQop) {
+ this.saslQop = saslQop;
+ }
+
+ public String getSaslQop() {
+ return saslQop;
+ }
+ }
+
+ public static void init(Configuration conf) {
+ QualityOfProtection saslQOP = QualityOfProtection.AUTHENTICATION;
+ String rpcProtection = conf.get("hbase.rpc.protection",
+ QualityOfProtection.AUTHENTICATION.name().toLowerCase());
+ if (QualityOfProtection.INTEGRITY.name().toLowerCase()
+ .equals(rpcProtection)) {
+ saslQOP = QualityOfProtection.INTEGRITY;
+ } else if (QualityOfProtection.PRIVACY.name().toLowerCase().equals(
+ rpcProtection)) {
+ saslQOP = QualityOfProtection.PRIVACY;
+ }
+
+ SASL_PROPS.put(Sasl.QOP, saslQOP.getSaslQop());
+ SASL_PROPS.put(Sasl.SERVER_AUTH, "true");
+ }
+
+ static String encodeIdentifier(byte[] identifier) {
+ return new String(Base64.encodeBase64(identifier));
+ }
+
+ static byte[] decodeIdentifier(String identifier) {
+ return Base64.decodeBase64(identifier.getBytes());
+ }
+
+ public static T getIdentifier(String id,
+ SecretManager secretManager) throws InvalidToken {
+ byte[] tokenId = decodeIdentifier(id);
+ T tokenIdentifier = secretManager.createIdentifier();
+ try {
+ tokenIdentifier.readFields(new DataInputStream(new ByteArrayInputStream(
+ tokenId)));
+ } catch (IOException e) {
+ throw (InvalidToken) new InvalidToken(
+ "Can't de-serialize tokenIdentifier").initCause(e);
+ }
+ return tokenIdentifier;
+ }
+
+ static char[] encodePassword(byte[] password) {
+ return new String(Base64.encodeBase64(password)).toCharArray();
+ }
+
+ /** Splitting fully qualified Kerberos name into parts */
+ public static String[] splitKerberosName(String fullName) {
+ return fullName.split("[/@]");
+ }
+
+ public enum SaslStatus {
+ SUCCESS (0),
+ ERROR (1);
+
+ public final int state;
+ private SaslStatus(int state) {
+ this.state = state;
+ }
+ }
+
+ /** Authentication method */
+ public static enum AuthMethod {
+ SIMPLE((byte) 80, "", AuthenticationMethod.SIMPLE),
+ KERBEROS((byte) 81, "GSSAPI", AuthenticationMethod.KERBEROS),
+ DIGEST((byte) 82, "DIGEST-MD5", AuthenticationMethod.TOKEN);
+
+ /** The code for this method. */
+ public final byte code;
+ public final String mechanismName;
+ public final AuthenticationMethod authenticationMethod;
+
+ private AuthMethod(byte code, String mechanismName,
+ AuthenticationMethod authMethod) {
+ this.code = code;
+ this.mechanismName = mechanismName;
+ this.authenticationMethod = authMethod;
+ }
+
+ private static final int FIRST_CODE = values()[0].code;
+
+ /** Return the object represented by the code. */
+ private static AuthMethod valueOf(byte code) {
+ final int i = (code & 0xff) - FIRST_CODE;
+ return i < 0 || i >= values().length ? null : values()[i];
+ }
+
+ /** Return the SASL mechanism name */
+ public String getMechanismName() {
+ return mechanismName;
+ }
+
+ /** Read from in */
+ public static AuthMethod read(DataInput in) throws IOException {
+ return valueOf(in.readByte());
+ }
+
+ /** Write to out */
+ public void write(DataOutput out) throws IOException {
+ out.write(code);
+ }
+ };
+
+ /** CallbackHandler for SASL DIGEST-MD5 mechanism */
+ public static class SaslDigestCallbackHandler implements CallbackHandler {
+ private SecretManager secretManager;
+ private HBaseServer.Connection connection;
+
+ public SaslDigestCallbackHandler(
+ SecretManager secretManager,
+ HBaseServer.Connection connection) {
+ this.secretManager = secretManager;
+ this.connection = connection;
+ }
+
+ private char[] getPassword(TokenIdentifier tokenid) throws InvalidToken {
+ return encodePassword(secretManager.retrievePassword(tokenid));
+ }
+
+ /** {@inheritDoc} */
+ @Override
+ public void handle(Callback[] callbacks) throws InvalidToken,
+ UnsupportedCallbackException {
+ NameCallback nc = null;
+ PasswordCallback pc = null;
+ AuthorizeCallback ac = null;
+ for (Callback callback : callbacks) {
+ if (callback instanceof AuthorizeCallback) {
+ ac = (AuthorizeCallback) callback;
+ } else if (callback instanceof NameCallback) {
+ nc = (NameCallback) callback;
+ } else if (callback instanceof PasswordCallback) {
+ pc = (PasswordCallback) callback;
+ } else if (callback instanceof RealmCallback) {
+ continue; // realm is ignored
+ } else {
+ throw new UnsupportedCallbackException(callback,
+ "Unrecognized SASL DIGEST-MD5 Callback");
+ }
+ }
+ if (pc != null) {
+ TokenIdentifier tokenIdentifier = getIdentifier(nc.getDefaultName(), secretManager);
+ char[] password = getPassword(tokenIdentifier);
+ UserGroupInformation user = null;
+ user = tokenIdentifier.getUser(); // may throw exception
+ connection.attemptingUser = user;
+ if (LOG.isDebugEnabled()) {
+ LOG.debug("SASL server DIGEST-MD5 callback: setting password "
+ + "for client: " + tokenIdentifier.getUser());
+ }
+ pc.setPassword(password);
+ }
+ if (ac != null) {
+ String authid = ac.getAuthenticationID();
+ String authzid = ac.getAuthorizationID();
+ if (authid.equals(authzid)) {
+ ac.setAuthorized(true);
+ } else {
+ ac.setAuthorized(false);
+ }
+ if (ac.isAuthorized()) {
+ if (LOG.isDebugEnabled()) {
+ String username =
+ getIdentifier(authzid, secretManager).getUser().getUserName();
+ LOG.debug("SASL server DIGEST-MD5 callback: setting "
+ + "canonicalized client ID: " + username);
+ }
+ ac.setAuthorizedID(authzid);
+ }
+ }
+ }
+ }
+
+ /** CallbackHandler for SASL GSSAPI Kerberos mechanism */
+ public static class SaslGssCallbackHandler implements CallbackHandler {
+
+ /** {@inheritDoc} */
+ @Override
+ public void handle(Callback[] callbacks) throws
+ UnsupportedCallbackException {
+ AuthorizeCallback ac = null;
+ for (Callback callback : callbacks) {
+ if (callback instanceof AuthorizeCallback) {
+ ac = (AuthorizeCallback) callback;
+ } else {
+ throw new UnsupportedCallbackException(callback,
+ "Unrecognized SASL GSSAPI Callback");
+ }
+ }
+ if (ac != null) {
+ String authid = ac.getAuthenticationID();
+ String authzid = ac.getAuthorizationID();
+ if (authid.equals(authzid)) {
+ ac.setAuthorized(true);
+ } else {
+ ac.setAuthorized(false);
+ }
+ if (ac.isAuthorized()) {
+ if (LOG.isDebugEnabled())
+ LOG.debug("SASL server GSSAPI callback: setting "
+ + "canonicalized client ID: " + authzid);
+ ac.setAuthorizedID(authzid);
+ }
+ }
+ }
+ }
+}
Index: src/main/java/org/apache/hadoop/hbase/security/AccessDeniedException.java
===================================================================
--- src/main/java/org/apache/hadoop/hbase/security/AccessDeniedException.java (revision 0)
+++ src/main/java/org/apache/hadoop/hbase/security/AccessDeniedException.java (revision 0)
@@ -0,0 +1,39 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.security;
+
+import org.apache.hadoop.hbase.DoNotRetryIOException;
+
+/**
+ * Exception thrown by access-related methods.
+ */
+public class AccessDeniedException extends DoNotRetryIOException {
+ private static final long serialVersionUID = 1913879564363001780L;
+
+ public AccessDeniedException() {
+ super();
+ }
+
+ public AccessDeniedException(Class> clazz, String s) {
+ super( "AccessDenied [" + clazz.getName() + "]: " + s);
+ }
+
+ public AccessDeniedException(String s) {
+ super(s);
+ }
+}
Index: src/main/java/org/apache/hadoop/hbase/security/HBaseSaslRpcClient.java
===================================================================
--- src/main/java/org/apache/hadoop/hbase/security/HBaseSaslRpcClient.java (revision 0)
+++ src/main/java/org/apache/hadoop/hbase/security/HBaseSaslRpcClient.java (revision 0)
@@ -0,0 +1,279 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.security;
+
+import java.io.BufferedInputStream;
+import java.io.BufferedOutputStream;
+import java.io.DataInputStream;
+import java.io.DataOutputStream;
+import java.io.IOException;
+import java.io.InputStream;
+import java.io.OutputStream;
+
+import javax.security.auth.callback.Callback;
+import javax.security.auth.callback.CallbackHandler;
+import javax.security.auth.callback.NameCallback;
+import javax.security.auth.callback.PasswordCallback;
+import javax.security.auth.callback.UnsupportedCallbackException;
+import javax.security.sasl.RealmCallback;
+import javax.security.sasl.RealmChoiceCallback;
+import javax.security.sasl.Sasl;
+import javax.security.sasl.SaslException;
+import javax.security.sasl.SaslClient;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.io.WritableUtils;
+import org.apache.hadoop.ipc.RemoteException;
+import org.apache.hadoop.hbase.security.HBaseSaslRpcServer.AuthMethod;
+import org.apache.hadoop.hbase.security.HBaseSaslRpcServer.SaslStatus;
+import org.apache.hadoop.security.SaslInputStream;
+import org.apache.hadoop.security.SaslOutputStream;
+import org.apache.hadoop.security.token.Token;
+import org.apache.hadoop.security.token.TokenIdentifier;
+
+/**
+ * A utility class that encapsulates SASL logic for RPC client.
+ * Copied from org.apache.hadoop.security
+ */
+public class HBaseSaslRpcClient {
+ public static final Log LOG = LogFactory.getLog(HBaseSaslRpcClient.class);
+
+ private final SaslClient saslClient;
+
+ /**
+ * Create a HBaseSaslRpcClient for an authentication method
+ *
+ * @param method
+ * the requested authentication method
+ * @param token
+ * token to use if needed by the authentication method
+ */
+ public HBaseSaslRpcClient(AuthMethod method,
+ Token extends TokenIdentifier> token, String serverPrincipal)
+ throws IOException {
+ switch (method) {
+ case DIGEST:
+ if (LOG.isDebugEnabled())
+ LOG.debug("Creating SASL " + AuthMethod.DIGEST.getMechanismName()
+ + " client to authenticate to service at " + token.getService());
+ saslClient = Sasl.createSaslClient(new String[] { AuthMethod.DIGEST
+ .getMechanismName() }, null, null, HBaseSaslRpcServer.SASL_DEFAULT_REALM,
+ HBaseSaslRpcServer.SASL_PROPS, new SaslClientCallbackHandler(token));
+ break;
+ case KERBEROS:
+ if (LOG.isDebugEnabled()) {
+ LOG
+ .debug("Creating SASL " + AuthMethod.KERBEROS.getMechanismName()
+ + " client. Server's Kerberos principal name is "
+ + serverPrincipal);
+ }
+ if (serverPrincipal == null || serverPrincipal.length() == 0) {
+ throw new IOException(
+ "Failed to specify server's Kerberos principal name");
+ }
+ String names[] = HBaseSaslRpcServer.splitKerberosName(serverPrincipal);
+ if (names.length != 3) {
+ throw new IOException(
+ "Kerberos principal does not have the expected format: "
+ + serverPrincipal);
+ }
+ saslClient = Sasl.createSaslClient(new String[] { AuthMethod.KERBEROS
+ .getMechanismName() }, null, names[0], names[1],
+ HBaseSaslRpcServer.SASL_PROPS, null);
+ break;
+ default:
+ throw new IOException("Unknown authentication method " + method);
+ }
+ if (saslClient == null)
+ throw new IOException("Unable to find SASL client implementation");
+ }
+
+ private static void readStatus(DataInputStream inStream) throws IOException {
+ int status = inStream.readInt(); // read status
+ if (status != SaslStatus.SUCCESS.state) {
+ throw new RemoteException(WritableUtils.readString(inStream),
+ WritableUtils.readString(inStream));
+ }
+ }
+
+ /**
+ * Do client side SASL authentication with server via the given InputStream
+ * and OutputStream
+ *
+ * @param inS
+ * InputStream to use
+ * @param outS
+ * OutputStream to use
+ * @return true if connection is set up, or false if needs to switch
+ * to simple Auth.
+ * @throws IOException
+ */
+ public boolean saslConnect(InputStream inS, OutputStream outS)
+ throws IOException {
+ DataInputStream inStream = new DataInputStream(new BufferedInputStream(inS));
+ DataOutputStream outStream = new DataOutputStream(new BufferedOutputStream(
+ outS));
+
+ try {
+ byte[] saslToken = new byte[0];
+ if (saslClient.hasInitialResponse())
+ saslToken = saslClient.evaluateChallenge(saslToken);
+ if (saslToken != null) {
+ outStream.writeInt(saslToken.length);
+ outStream.write(saslToken, 0, saslToken.length);
+ outStream.flush();
+ if (LOG.isDebugEnabled())
+ LOG.debug("Have sent token of size " + saslToken.length
+ + " from initSASLContext.");
+ }
+ if (!saslClient.isComplete()) {
+ readStatus(inStream);
+ int len = inStream.readInt();
+ if (len == HBaseSaslRpcServer.SWITCH_TO_SIMPLE_AUTH) {
+ if (LOG.isDebugEnabled())
+ LOG.debug("Server asks us to fall back to simple auth.");
+ saslClient.dispose();
+ return false;
+ }
+ saslToken = new byte[len];
+ if (LOG.isDebugEnabled())
+ LOG.debug("Will read input token of size " + saslToken.length
+ + " for processing by initSASLContext");
+ inStream.readFully(saslToken);
+ }
+
+ while (!saslClient.isComplete()) {
+ saslToken = saslClient.evaluateChallenge(saslToken);
+ if (saslToken != null) {
+ if (LOG.isDebugEnabled())
+ LOG.debug("Will send token of size " + saslToken.length
+ + " from initSASLContext.");
+ outStream.writeInt(saslToken.length);
+ outStream.write(saslToken, 0, saslToken.length);
+ outStream.flush();
+ }
+ if (!saslClient.isComplete()) {
+ readStatus(inStream);
+ saslToken = new byte[inStream.readInt()];
+ if (LOG.isDebugEnabled())
+ LOG.debug("Will read input token of size " + saslToken.length
+ + " for processing by initSASLContext");
+ inStream.readFully(saslToken);
+ }
+ }
+ if (LOG.isDebugEnabled()) {
+ LOG.debug("SASL client context established. Negotiated QoP: "
+ + saslClient.getNegotiatedProperty(Sasl.QOP));
+ }
+ return true;
+ } catch (IOException e) {
+ try {
+ saslClient.dispose();
+ } catch (SaslException ignored) {
+ // ignore further exceptions during cleanup
+ }
+ throw e;
+ }
+ }
+
+ /**
+ * Get a SASL wrapped InputStream. Can be called only after saslConnect() has
+ * been called.
+ *
+ * @param in
+ * the InputStream to wrap
+ * @return a SASL wrapped InputStream
+ * @throws IOException
+ */
+ public InputStream getInputStream(InputStream in) throws IOException {
+ if (!saslClient.isComplete()) {
+ throw new IOException("Sasl authentication exchange hasn't completed yet");
+ }
+ return new SaslInputStream(in, saslClient);
+ }
+
+ /**
+ * Get a SASL wrapped OutputStream. Can be called only after saslConnect() has
+ * been called.
+ *
+ * @param out
+ * the OutputStream to wrap
+ * @return a SASL wrapped OutputStream
+ * @throws IOException
+ */
+ public OutputStream getOutputStream(OutputStream out) throws IOException {
+ if (!saslClient.isComplete()) {
+ throw new IOException("Sasl authentication exchange hasn't completed yet");
+ }
+ return new SaslOutputStream(out, saslClient);
+ }
+
+ /** Release resources used by wrapped saslClient */
+ public void dispose() throws SaslException {
+ saslClient.dispose();
+ }
+
+ private static class SaslClientCallbackHandler implements CallbackHandler {
+ private final String userName;
+ private final char[] userPassword;
+
+ public SaslClientCallbackHandler(Token extends TokenIdentifier> token) {
+ this.userName = HBaseSaslRpcServer.encodeIdentifier(token.getIdentifier());
+ this.userPassword = HBaseSaslRpcServer.encodePassword(token.getPassword());
+ }
+
+ public void handle(Callback[] callbacks)
+ throws UnsupportedCallbackException {
+ NameCallback nc = null;
+ PasswordCallback pc = null;
+ RealmCallback rc = null;
+ for (Callback callback : callbacks) {
+ if (callback instanceof RealmChoiceCallback) {
+ continue;
+ } else if (callback instanceof NameCallback) {
+ nc = (NameCallback) callback;
+ } else if (callback instanceof PasswordCallback) {
+ pc = (PasswordCallback) callback;
+ } else if (callback instanceof RealmCallback) {
+ rc = (RealmCallback) callback;
+ } else {
+ throw new UnsupportedCallbackException(callback,
+ "Unrecognized SASL client callback");
+ }
+ }
+ if (nc != null) {
+ if (LOG.isDebugEnabled())
+ LOG.debug("SASL client callback: setting username: " + userName);
+ nc.setName(userName);
+ }
+ if (pc != null) {
+ if (LOG.isDebugEnabled())
+ LOG.debug("SASL client callback: setting userPassword");
+ pc.setPassword(userPassword);
+ }
+ if (rc != null) {
+ if (LOG.isDebugEnabled())
+ LOG.debug("SASL client callback: setting realm: "
+ + rc.getDefaultText());
+ rc.setText(rc.getDefaultText());
+ }
+ }
+ }
+}
Index: src/main/java/org/apache/hadoop/hbase/security/HBasePolicyProvider.java
===================================================================
--- src/main/java/org/apache/hadoop/hbase/security/HBasePolicyProvider.java (revision 0)
+++ src/main/java/org/apache/hadoop/hbase/security/HBasePolicyProvider.java (revision 0)
@@ -0,0 +1,55 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.security;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.client.AdminProtocol;
+import org.apache.hadoop.hbase.client.ClientProtocol;
+import org.apache.hadoop.hbase.ipc.HMasterInterface;
+import org.apache.hadoop.hbase.ipc.RegionServerStatusProtocol;
+import org.apache.hadoop.security.authorize.PolicyProvider;
+import org.apache.hadoop.security.authorize.Service;
+import org.apache.hadoop.security.authorize.ServiceAuthorizationManager;
+
+/**
+ * Implementation of secure Hadoop policy provider for mapping
+ * protocol interfaces to hbase-policy.xml entries.
+ */
+public class HBasePolicyProvider extends PolicyProvider {
+ protected static Service[] services = {
+ new Service("security.hbase.client.protocol.acl", ClientProtocol.class),
+ new Service("security.hbase.client.protocol.acl", AdminProtocol.class),
+ new Service("security.admin.protocol.acl", HMasterInterface.class),
+ new Service("security.masterregion.protocol.acl", RegionServerStatusProtocol.class)
+ };
+
+ @Override
+ public Service[] getServices() {
+ return services;
+ }
+
+ public static void init(Configuration conf,
+ ServiceAuthorizationManager authManager) {
+ // set service-level authorization security policy
+ conf.set("hadoop.policy.file", "hbase-policy.xml");
+ if (conf.getBoolean(
+ ServiceAuthorizationManager.SERVICE_AUTHORIZATION_CONFIG, false)) {
+ authManager.refresh(conf, new HBasePolicyProvider());
+ }
+ }
+}
Index: src/main/java/org/apache/hadoop/hbase/security/User.java
===================================================================
--- src/main/java/org/apache/hadoop/hbase/security/User.java (revision 1335370)
+++ src/main/java/org/apache/hadoop/hbase/security/User.java (working copy)
@@ -22,17 +22,14 @@
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hbase.HBaseConfiguration;
-import org.apache.hadoop.hbase.protobuf.generated.RPCProtos.ConnectionHeader;
-import org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation;
import org.apache.hadoop.hbase.util.Methods;
import org.apache.hadoop.mapred.JobConf;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.security.UserGroupInformation;
import java.io.IOException;
-import java.lang.reflect.Constructor;
import java.lang.reflect.UndeclaredThrowableException;
import java.security.PrivilegedAction;
import java.security.PrivilegedExceptionAction;
@@ -50,24 +47,12 @@
* HBase, but can be extended as needs change.
*
*/
-@InterfaceAudience.Private
+@InterfaceAudience.Public
+@InterfaceStability.Evolving
public abstract class User {
public static final String HBASE_SECURITY_CONF_KEY =
"hbase.security.authentication";
- /**
- * Flag to differentiate between API-incompatible changes to
- * {@link org.apache.hadoop.security.UserGroupInformation} between vanilla
- * Hadoop 0.20.x and secure Hadoop 0.20+.
- */
- private static boolean IS_SECURE_HADOOP = true;
- static {
- try {
- UserGroupInformation.class.getMethod("isSecurityEnabled");
- } catch (NoSuchMethodException nsme) {
- IS_SECURE_HADOOP = false;
- }
- }
private static Log LOG = LogFactory.getLog(User.class);
protected UserGroupInformation ugi;
@@ -138,12 +123,7 @@
* Returns the {@code User} instance within current execution context.
*/
public static User getCurrent() throws IOException {
- User user;
- if (IS_SECURE_HADOOP) {
- user = new SecureHadoopUser();
- } else {
- user = new HadoopUser();
- }
+ User user = new SecureHadoopUser();
if (user.getUGI() == null) {
return null;
}
@@ -159,40 +139,9 @@
if (ugi == null) {
return null;
}
-
- if (IS_SECURE_HADOOP) {
- return new SecureHadoopUser(ugi);
- }
- return new HadoopUser(ugi);
+ return new SecureHadoopUser(ugi);
}
- public static User createUser(ConnectionHeader head) {
- UserGroupInformation ugi = null;
-
- if (!head.hasUserInfo()) {
- return create(null);
- }
- UserInformation userInfoProto = head.getUserInfo();
- String effectiveUser = null;
- if (userInfoProto.hasEffectiveUser()) {
- effectiveUser = userInfoProto.getEffectiveUser();
- }
- String realUser = null;
- if (userInfoProto.hasRealUser()) {
- realUser = userInfoProto.getRealUser();
- }
- if (effectiveUser != null) {
- if (realUser != null) {
- UserGroupInformation realUserUgi =
- UserGroupInformation.createRemoteUser(realUser);
- ugi = UserGroupInformation.createProxyUser(effectiveUser, realUserUgi);
- } else {
- ugi = UserGroupInformation.createRemoteUser(effectiveUser);
- }
- }
- return create(ugi);
- }
-
/**
* Generates a new {@code User} instance specifically for use in test code.
* @param name the full username
@@ -201,10 +150,7 @@
*/
public static User createUserForTesting(Configuration conf,
String name, String[] groups) {
- if (IS_SECURE_HADOOP) {
- return SecureHadoopUser.createUserForTesting(conf, name, groups);
- }
- return HadoopUser.createUserForTesting(conf, name, groups);
+ return SecureHadoopUser.createUserForTesting(conf, name, groups);
}
/**
@@ -225,11 +171,7 @@
*/
public static void login(Configuration conf, String fileConfKey,
String principalConfKey, String localhost) throws IOException {
- if (IS_SECURE_HADOOP) {
- SecureHadoopUser.login(conf, fileConfKey, principalConfKey, localhost);
- } else {
- HadoopUser.login(conf, fileConfKey, principalConfKey, localhost);
- }
+ SecureHadoopUser.login(conf, fileConfKey, principalConfKey, localhost);
}
/**
@@ -239,11 +181,7 @@
* {@code UserGroupInformation.isSecurityEnabled()}.
*/
public static boolean isSecurityEnabled() {
- if (IS_SECURE_HADOOP) {
- return SecureHadoopUser.isSecurityEnabled();
- } else {
- return HadoopUser.isSecurityEnabled();
- }
+ return SecureHadoopUser.isSecurityEnabled();
}
/**
@@ -258,160 +196,6 @@
/* Concrete implementations */
/**
- * Bridges {@link User} calls to invocations of the appropriate methods
- * in {@link org.apache.hadoop.security.UserGroupInformation} in regular
- * Hadoop 0.20 (ASF Hadoop and other versions without the backported security
- * features).
- */
- private static class HadoopUser extends User {
-
- private HadoopUser() {
- try {
- ugi = (UserGroupInformation) callStatic("getCurrentUGI");
- if (ugi == null) {
- // Secure Hadoop UGI will perform an implicit login if the current
- // user is null. Emulate the same behavior here for consistency
- Configuration conf = HBaseConfiguration.create();
- ugi = (UserGroupInformation) callStatic("login",
- new Class[]{ Configuration.class }, new Object[]{ conf });
- if (ugi != null) {
- callStatic("setCurrentUser",
- new Class[]{ UserGroupInformation.class }, new Object[]{ ugi });
- }
- }
- } catch (RuntimeException re) {
- throw re;
- } catch (Exception e) {
- throw new UndeclaredThrowableException(e,
- "Unexpected exception HadoopUser");
- }
- }
-
- private HadoopUser(UserGroupInformation ugi) {
- this.ugi = ugi;
- }
-
- @Override
- public String getShortName() {
- return ugi != null ? ugi.getUserName() : null;
- }
-
- @Override
- public T runAs(PrivilegedAction action) {
- T result = null;
- UserGroupInformation previous = null;
- try {
- previous = (UserGroupInformation) callStatic("getCurrentUGI");
- try {
- if (ugi != null) {
- callStatic("setCurrentUser", new Class[]{UserGroupInformation.class},
- new Object[]{ugi});
- }
- result = action.run();
- } finally {
- callStatic("setCurrentUser", new Class[]{UserGroupInformation.class},
- new Object[]{previous});
- }
- } catch (RuntimeException re) {
- throw re;
- } catch (Exception e) {
- throw new UndeclaredThrowableException(e,
- "Unexpected exception in runAs()");
- }
- return result;
- }
-
- @Override
- public T runAs(PrivilegedExceptionAction action)
- throws IOException, InterruptedException {
- T result = null;
- try {
- UserGroupInformation previous =
- (UserGroupInformation) callStatic("getCurrentUGI");
- try {
- if (ugi != null) {
- callStatic("setCurrentUGI", new Class[]{UserGroupInformation.class},
- new Object[]{ugi});
- }
- result = action.run();
- } finally {
- callStatic("setCurrentUGI", new Class[]{UserGroupInformation.class},
- new Object[]{previous});
- }
- } catch (Exception e) {
- if (e instanceof IOException) {
- throw (IOException)e;
- } else if (e instanceof InterruptedException) {
- throw (InterruptedException)e;
- } else if (e instanceof RuntimeException) {
- throw (RuntimeException)e;
- } else {
- throw new UndeclaredThrowableException(e, "Unknown exception in runAs()");
- }
- }
- return result;
- }
-
- @Override
- public void obtainAuthTokenForJob(Configuration conf, Job job)
- throws IOException, InterruptedException {
- // this is a no-op. token creation is only supported for kerberos
- // authenticated clients
- }
-
- @Override
- public void obtainAuthTokenForJob(JobConf job)
- throws IOException, InterruptedException {
- // this is a no-op. token creation is only supported for kerberos
- // authenticated clients
- }
-
- /** @see User#createUserForTesting(org.apache.hadoop.conf.Configuration, String, String[]) */
- public static User createUserForTesting(Configuration conf,
- String name, String[] groups) {
- try {
- Class c = Class.forName("org.apache.hadoop.security.UnixUserGroupInformation");
- Constructor constructor = c.getConstructor(String.class, String[].class);
- if (constructor == null) {
- throw new NullPointerException(
- );
- }
- UserGroupInformation newUser =
- (UserGroupInformation)constructor.newInstance(name, groups);
- // set user in configuration -- hack for regular hadoop
- conf.set("hadoop.job.ugi", newUser.toString());
- return new HadoopUser(newUser);
- } catch (ClassNotFoundException cnfe) {
- throw new RuntimeException(
- "UnixUserGroupInformation not found, is this secure Hadoop?", cnfe);
- } catch (NoSuchMethodException nsme) {
- throw new RuntimeException(
- "No valid constructor found for UnixUserGroupInformation!", nsme);
- } catch (RuntimeException re) {
- throw re;
- } catch (Exception e) {
- throw new UndeclaredThrowableException(e,
- "Unexpected exception instantiating new UnixUserGroupInformation");
- }
- }
-
- /**
- * No-op since we're running on a version of Hadoop that doesn't support
- * logins.
- * @see User#login(org.apache.hadoop.conf.Configuration, String, String, String)
- */
- public static void login(Configuration conf, String fileConfKey,
- String principalConfKey, String localhost) throws IOException {
- LOG.info("Skipping login, not running on secure Hadoop");
- }
-
- /** Always returns {@code false}. */
- public static boolean isSecurityEnabled() {
- return false;
- }
- }
-
- /**
* Bridges {@code User} invocations to underlying calls to
* {@link org.apache.hadoop.security.UserGroupInformation} for secure Hadoop
* 0.20 and versions 0.21 and above.
Index: src/main/java/org/apache/hadoop/hbase/client/ClientProtocol.java
===================================================================
--- src/main/java/org/apache/hadoop/hbase/client/ClientProtocol.java (revision 1335370)
+++ src/main/java/org/apache/hadoop/hbase/client/ClientProtocol.java (working copy)
@@ -23,7 +23,7 @@
import org.apache.hadoop.hbase.ipc.VersionedProtocol;
import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ClientService;
import org.apache.hadoop.hbase.security.TokenInfo;
-import org.apache.hadoop.security.KerberosInfo;
+import org.apache.hadoop.hbase.security.KerberosInfo;
/**
* Protocol that a HBase client uses to communicate with a region server.
Index: src/main/java/org/apache/hadoop/hbase/client/AdminProtocol.java
===================================================================
--- src/main/java/org/apache/hadoop/hbase/client/AdminProtocol.java (revision 1335370)
+++ src/main/java/org/apache/hadoop/hbase/client/AdminProtocol.java (working copy)
@@ -22,7 +22,7 @@
import org.apache.hadoop.hbase.ipc.VersionedProtocol;
import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.AdminService;
import org.apache.hadoop.hbase.security.TokenInfo;
-import org.apache.hadoop.security.KerberosInfo;
+import org.apache.hadoop.hbase.security.KerberosInfo;
/**
* Protocol that a HBase client uses to communicate with a region server.
Index: src/main/java/org/apache/hadoop/hbase/ipc/RegionServerStatusProtocol.java
===================================================================
--- src/main/java/org/apache/hadoop/hbase/ipc/RegionServerStatusProtocol.java (revision 1335370)
+++ src/main/java/org/apache/hadoop/hbase/ipc/RegionServerStatusProtocol.java (working copy)
@@ -23,7 +23,7 @@
import org.apache.hadoop.hbase.ipc.VersionedProtocol;
import org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerStatusService;
import org.apache.hadoop.hbase.security.TokenInfo;
-import org.apache.hadoop.security.KerberosInfo;
+import org.apache.hadoop.hbase.security.KerberosInfo;
/**
* Protocol that a RegionServer uses to communicate its status to the Master.
Index: src/main/java/org/apache/hadoop/hbase/ipc/WritableRpcEngine.java
===================================================================
--- src/main/java/org/apache/hadoop/hbase/ipc/WritableRpcEngine.java (revision 1335370)
+++ src/main/java/org/apache/hadoop/hbase/ipc/WritableRpcEngine.java (working copy)
@@ -50,8 +50,11 @@
import org.apache.hadoop.io.*;
import org.apache.hadoop.ipc.RPC;
import org.apache.hadoop.ipc.RemoteException;
+import org.apache.hadoop.security.authorize.ServiceAuthorizationManager;
+import org.apache.hadoop.hbase.security.HBasePolicyProvider;
import org.apache.hadoop.hbase.ipc.VersionedProtocol;
import org.apache.hadoop.hbase.security.User;
+import org.apache.hadoop.hbase.security.token.AuthenticationTokenSecretManager;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.conf.*;
@@ -252,9 +255,6 @@
private Class>[] ifaces;
private boolean verbose;
- // for JSON encoding
- private static ObjectMapper mapper = new ObjectMapper();
-
private static final String WARN_RESPONSE_TIME =
"hbase.ipc.warn.response.time";
private static final String WARN_RESPONSE_SIZE =
@@ -310,7 +310,37 @@
DEFAULT_WARN_RESPONSE_SIZE);
}
+ public AuthenticationTokenSecretManager createSecretManager(){
+ if (!User.isSecurityEnabled() ||
+ !(instance instanceof org.apache.hadoop.hbase.Server)) {
+ return null;
+ }
+ org.apache.hadoop.hbase.Server server =
+ (org.apache.hadoop.hbase.Server)instance;
+ Configuration conf = server.getConfiguration();
+ long keyUpdateInterval =
+ conf.getLong("hbase.auth.key.update.interval", 24*60*60*1000);
+ long maxAge =
+ conf.getLong("hbase.auth.token.max.lifetime", 7*24*60*60*1000);
+ return new AuthenticationTokenSecretManager(conf, server.getZooKeeper(),
+ server.getServerName().toString(), keyUpdateInterval, maxAge);
+ }
+
@Override
+ public void startThreads() {
+ AuthenticationTokenSecretManager mgr = createSecretManager();
+ if (mgr != null) {
+ setSecretManager(mgr);
+ mgr.start();
+ }
+ this.authManager = new ServiceAuthorizationManager();
+ HBasePolicyProvider.init(conf, authManager);
+
+ // continue with base startup
+ super.startThreads();
+ }
+
+ @Override
public Writable call(Class extends VersionedProtocol> protocol,
Writable param, long receivedTime, MonitoredRPCHandler status)
throws IOException {
Index: src/main/java/org/apache/hadoop/hbase/ipc/ConnectionHeader.java
===================================================================
--- src/main/java/org/apache/hadoop/hbase/ipc/ConnectionHeader.java (revision 1335370)
+++ src/main/java/org/apache/hadoop/hbase/ipc/ConnectionHeader.java (working copy)
@@ -1,77 +0,0 @@
-/*
- * Copyright 2010 The Apache Software Foundation
- *
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hbase.ipc;
-
-import java.io.DataInput;
-import java.io.DataOutput;
-import java.io.IOException;
-
-import org.apache.hadoop.io.Text;
-import org.apache.hadoop.io.Writable;
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.hbase.security.User;
-
-/**
- * The IPC connection header sent by the client to the server
- * on connection establishment.
- */
-@InterfaceAudience.Private
-class ConnectionHeader implements Writable {
- protected String protocol;
-
- public ConnectionHeader() {}
-
- /**
- * Create a new {@link ConnectionHeader} with the given protocol
- * and {@link User}.
- * @param protocol protocol used for communication between the IPC client
- * and the server
- * @param user {@link User} of the client communicating with
- * the server
- */
- public ConnectionHeader(String protocol, User user) {
- this.protocol = protocol;
- }
-
- @Override
- public void readFields(DataInput in) throws IOException {
- protocol = Text.readString(in);
- if (protocol.isEmpty()) {
- protocol = null;
- }
- }
-
- @Override
- public void write(DataOutput out) throws IOException {
- Text.writeString(out, (protocol == null) ? "" : protocol);
- }
-
- public String getProtocol() {
- return protocol;
- }
-
- public User getUser() {
- return null;
- }
-
- public String toString() {
- return protocol;
- }
-}
Index: src/main/java/org/apache/hadoop/hbase/ipc/HBaseServer.java
===================================================================
--- src/main/java/org/apache/hadoop/hbase/ipc/HBaseServer.java (revision 1335370)
+++ src/main/java/org/apache/hadoop/hbase/ipc/HBaseServer.java (working copy)
@@ -20,6 +20,8 @@
package org.apache.hadoop.hbase.ipc;
+import static org.apache.hadoop.fs.CommonConfigurationKeys.HADOOP_SECURITY_AUTHORIZATION;
+
import java.io.ByteArrayInputStream;
import java.io.ByteArrayOutputStream;
import java.io.DataInputStream;
@@ -34,6 +36,7 @@
import java.net.UnknownHostException;
import java.nio.ByteBuffer;
import java.nio.channels.CancelledKeyException;
+import java.nio.channels.Channels;
import java.nio.channels.ClosedChannelException;
import java.nio.channels.ReadableByteChannel;
import java.nio.channels.SelectionKey;
@@ -41,6 +44,7 @@
import java.nio.channels.ServerSocketChannel;
import java.nio.channels.SocketChannel;
import java.nio.channels.WritableByteChannel;
+import java.security.PrivilegedExceptionAction;
import java.util.ArrayList;
import java.util.Collections;
import java.util.HashMap;
@@ -56,6 +60,10 @@
import java.util.concurrent.Executors;
import java.util.concurrent.LinkedBlockingQueue;
+import javax.security.sasl.Sasl;
+import javax.security.sasl.SaslException;
+import javax.security.sasl.SaslServer;
+
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
@@ -68,14 +76,34 @@
import org.apache.hadoop.hbase.protobuf.generated.RPCProtos.RpcRequest;
import org.apache.hadoop.hbase.protobuf.generated.RPCProtos.RpcResponse;
import org.apache.hadoop.hbase.protobuf.generated.RPCProtos.RpcException;
+import org.apache.hadoop.hbase.protobuf.generated.RPCProtos.RpcResponse.Status;
+import org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation;
import org.apache.hadoop.hbase.monitoring.MonitoredRPCHandler;
import org.apache.hadoop.hbase.monitoring.TaskMonitor;
import org.apache.hadoop.hbase.security.User;
+import org.apache.hadoop.hbase.security.HBaseSaslRpcServer;
+import org.apache.hadoop.hbase.security.HBaseSaslRpcServer.AuthMethod;
+import org.apache.hadoop.hbase.security.HBaseSaslRpcServer.SaslDigestCallbackHandler;
+import org.apache.hadoop.hbase.security.HBaseSaslRpcServer.SaslGssCallbackHandler;
+import org.apache.hadoop.hbase.security.HBaseSaslRpcServer.SaslStatus;
import org.apache.hadoop.hbase.util.ByteBufferOutputStream;
import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.io.BytesWritable;
import org.apache.hadoop.io.DataOutputBuffer;
+import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.Writable;
+import org.apache.hadoop.io.WritableUtils;
+import org.apache.hadoop.ipc.Server;
import org.apache.hadoop.ipc.RPC.VersionMismatch;
+import org.apache.hadoop.security.AccessControlException;
+import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.security.UserGroupInformation.AuthenticationMethod;
+import org.apache.hadoop.security.authorize.AuthorizationException;
+import org.apache.hadoop.security.authorize.ProxyUsers;
+import org.apache.hadoop.security.authorize.ServiceAuthorizationManager;
+import org.apache.hadoop.security.token.SecretManager;
+import org.apache.hadoop.security.token.TokenIdentifier;
+import org.apache.hadoop.security.token.SecretManager.InvalidToken;
import org.apache.hadoop.util.ReflectionUtils;
import org.apache.hadoop.util.StringUtils;
@@ -96,7 +124,8 @@
*/
@InterfaceAudience.Private
public abstract class HBaseServer implements RpcServer {
-
+ private final boolean authorize;
+ private boolean isSecurityEnabled;
/**
* The first four bytes of Hadoop RPC connections
*/
@@ -130,6 +159,13 @@
protected static final Log TRACELOG =
LogFactory.getLog("org.apache.hadoop.ipc.HBaseServer.trace");
+ private static final String AUTH_FAILED_FOR = "Auth failed for ";
+ private static final String AUTH_SUCCESSFUL_FOR = "Auth successful for ";
+ private static final Log AUDITLOG =
+ LogFactory.getLog("SecurityLogger."+Server.class.getName());
+ protected SecretManager secretManager;
+ protected ServiceAuthorizationManager authManager;
+
protected static final ThreadLocal SERVER =
new ThreadLocal();
private volatile boolean started = false;
@@ -303,11 +339,12 @@
return param.toString() + " from " + connection.toString();
}
+ protected synchronized void setSaslTokenResponse(ByteBuffer response) {
+ this.response = response;
+ }
+
protected synchronized void setResponse(Object value, Status status,
String errorClass, String error) {
- // Avoid overwriting an error value in the response. This can happen if
- // endDelayThrowing is called by another thread before the actual call
- // returning.
if (this.isError)
return;
if (errorClass != null) {
@@ -328,8 +365,7 @@
if (result instanceof WritableWithSize) {
// get the size hint.
WritableWithSize ohint = (WritableWithSize) result;
- long hint = ohint.getWritableSize() + Bytes.SIZEOF_BYTE +
- (2 * Bytes.SIZEOF_INT);
+ long hint = ohint.getWritableSize() + 2*Bytes.SIZEOF_INT;
if (hint > Integer.MAX_VALUE) {
// oops, new problem.
IOException ioe =
@@ -342,12 +378,11 @@
}
ByteBufferOutputStream buf = new ByteBufferOutputStream(size);
- DataOutputStream out = new DataOutputStream(buf);
try {
RpcResponse.Builder builder = RpcResponse.newBuilder();
// Call id.
builder.setCallId(this.id);
- builder.setError(error != null);
+ builder.setStatus(status);
if (error != null) {
RpcException.Builder b = RpcException.newBuilder();
b.setExceptionName(errorClass);
@@ -359,8 +394,10 @@
byte[] response = d.getData();
builder.setResponse(ByteString.copyFrom(response));
}
- builder.build().writeDelimitedTo(
- DataOutputOutputStream.constructOutputStream(out));
+ builder.build().writeDelimitedTo(buf);
+ if (connection.useWrap) {
+ wrapWithSasl(buf);
+ }
} catch (IOException e) {
LOG.warn("Exception while creating response " + e);
}
@@ -369,6 +406,28 @@
this.response = bb;
}
+ private void wrapWithSasl(ByteBufferOutputStream response)
+ throws IOException {
+ if (connection.useSasl) {
+ // getByteBuffer calls flip()
+ ByteBuffer buf = response.getByteBuffer();
+ byte[] token;
+ // synchronization may be needed since there can be multiple Handler
+ // threads using saslServer to wrap responses.
+ synchronized (connection.saslServer) {
+ token = connection.saslServer.wrap(buf.array(),
+ buf.arrayOffset(), buf.remaining());
+ }
+ if (LOG.isDebugEnabled())
+ LOG.debug("Adding saslServer wrapped token of size " + token.length
+ + " as call response.");
+ buf.clear();
+ DataOutputStream saslOut = new DataOutputStream(response);
+ saslOut.writeInt(token.length);
+ saslOut.write(token, 0, token.length);
+ }
+ }
+
@Override
public synchronized void endDelay(Object result) throws IOException {
assert this.delayResponse;
@@ -1046,8 +1105,8 @@
}
/** Reads calls from a connection and queues them for handling. */
- protected class Connection {
- private boolean versionRead = false; //if initial signature and
+ public class Connection {
+ private boolean rpcHeaderRead = false; //if initial signature and
//version are read
private boolean headerRead = false; //if the connection header that
//follows version is read.
@@ -1058,6 +1117,7 @@
private volatile int rpcCount = 0; // number of outstanding rpcs
private long lastContact;
private int dataLength;
+ private InetAddress addr;
protected Socket socket;
// Cache the remote host & port info so that even if the socket is
// disconnected, we can say where it used to connect to.
@@ -1065,8 +1125,27 @@
protected int remotePort;
ConnectionHeader header;
Class extends VersionedProtocol> protocol;
- protected User user = null;
+ protected UserGroupInformation user = null;
+ private AuthMethod authMethod;
+ private boolean saslContextEstablished;
+ private boolean skipInitialSaslHandshake;
+ private ByteBuffer rpcHeaderBuffer;
+ private ByteBuffer unwrappedData;
+ private ByteBuffer unwrappedDataLengthBuffer;
+ boolean useSasl;
+ SaslServer saslServer;
+ private boolean useWrap = false;
+ // Fake 'call' for failed authorization response
+ private final int AUTHROIZATION_FAILED_CALLID = -1;
+ private final Call authFailedCall =
+ new Call(AUTHROIZATION_FAILED_CALLID, null, this, null, 0);
+ private ByteArrayOutputStream authFailedResponse =
+ new ByteArrayOutputStream();
+ // Fake 'call' for SASL context setup
+ private static final int SASL_CALLID = -33;
+ private final Call saslCall = new Call(SASL_CALLID, null, this, null, 0);
+ public UserGroupInformation attemptingUser = null; // user name before auth
public Connection(SocketChannel channel, long lastContact) {
this.channel = channel;
this.lastContact = lastContact;
@@ -1100,6 +1179,10 @@
return hostAddress;
}
+ public InetAddress getHostInetAddress() {
+ return addr;
+ }
+
public int getRemotePort() {
return remotePort;
}
@@ -1131,39 +1214,218 @@
return isIdle() && currentTime - lastContact > maxIdleTime;
}
+ private UserGroupInformation getAuthorizedUgi(String authorizedId)
+ throws IOException {
+ if (authMethod == AuthMethod.DIGEST) {
+ TokenIdentifier tokenId = HBaseSaslRpcServer.getIdentifier(authorizedId,
+ secretManager);
+ UserGroupInformation ugi = tokenId.getUser();
+ if (ugi == null) {
+ throw new AccessControlException(
+ "Can't retrieve username from tokenIdentifier.");
+ }
+ ugi.addTokenIdentifier(tokenId);
+ return ugi;
+ } else {
+ return UserGroupInformation.createRemoteUser(authorizedId);
+ }
+ }
+
+ private void saslReadAndProcess(byte[] saslToken) throws IOException,
+ InterruptedException {
+ if (saslContextEstablished) {
+ if (LOG.isDebugEnabled())
+ LOG.debug("Have read input token of size " + saslToken.length
+ + " for processing by saslServer.unwrap()");
+
+ if (!useWrap) {
+ processOneRpc(saslToken);
+ } else {
+ byte[] plaintextData = saslServer.unwrap(saslToken, 0,
+ saslToken.length);
+ processUnwrappedData(plaintextData);
+ }
+ } else {
+ byte[] replyToken = null;
+ try {
+ if (saslServer == null) {
+ switch (authMethod) {
+ case DIGEST:
+ if (secretManager == null) {
+ throw new AccessControlException(
+ "Server is not configured to do DIGEST authentication.");
+ }
+ saslServer = Sasl.createSaslServer(AuthMethod.DIGEST
+ .getMechanismName(), null, HBaseSaslRpcServer.SASL_DEFAULT_REALM,
+ HBaseSaslRpcServer.SASL_PROPS, new SaslDigestCallbackHandler(
+ secretManager, this));
+ break;
+ default:
+ UserGroupInformation current = UserGroupInformation
+ .getCurrentUser();
+ String fullName = current.getUserName();
+ if (LOG.isDebugEnabled())
+ LOG.debug("Kerberos principal name is " + fullName);
+ final String names[] = HBaseSaslRpcServer.splitKerberosName(fullName);
+ if (names.length != 3) {
+ throw new AccessControlException(
+ "Kerberos principal name does NOT have the expected "
+ + "hostname part: " + fullName);
+ }
+ current.doAs(new PrivilegedExceptionAction() {
+ @Override
+ public Object run() throws SaslException {
+ saslServer = Sasl.createSaslServer(AuthMethod.KERBEROS
+ .getMechanismName(), names[0], names[1],
+ HBaseSaslRpcServer.SASL_PROPS, new SaslGssCallbackHandler());
+ return null;
+ }
+ });
+ }
+ if (saslServer == null)
+ throw new AccessControlException(
+ "Unable to find SASL server implementation for "
+ + authMethod.getMechanismName());
+ if (LOG.isDebugEnabled())
+ LOG.debug("Created SASL server with mechanism = "
+ + authMethod.getMechanismName());
+ }
+ if (LOG.isDebugEnabled())
+ LOG.debug("Have read input token of size " + saslToken.length
+ + " for processing by saslServer.evaluateResponse()");
+ replyToken = saslServer.evaluateResponse(saslToken);
+ } catch (IOException e) {
+ IOException sendToClient = e;
+ Throwable cause = e;
+ while (cause != null) {
+ if (cause instanceof InvalidToken) {
+ sendToClient = (InvalidToken) cause;
+ break;
+ }
+ cause = cause.getCause();
+ }
+ doRawSaslReply(SaslStatus.ERROR, null, sendToClient.getClass().getName(),
+ sendToClient.getLocalizedMessage());
+ rpcMetrics.authenticationFailures.inc();
+ String clientIP = this.toString();
+ // attempting user could be null
+ AUDITLOG.warn(AUTH_FAILED_FOR + clientIP + ":" + attemptingUser);
+ throw e;
+ }
+ if (replyToken != null) {
+ if (LOG.isDebugEnabled())
+ LOG.debug("Will send token of size " + replyToken.length
+ + " from saslServer.");
+ doRawSaslReply(SaslStatus.SUCCESS, new BytesWritable(replyToken), null,
+ null);
+ }
+ if (saslServer.isComplete()) {
+ String qop = (String) saslServer.getNegotiatedProperty(Sasl.QOP);
+ useWrap = qop != null && !"auth".equalsIgnoreCase(qop);
+ user = getAuthorizedUgi(saslServer.getAuthorizationID());
+ if (LOG.isDebugEnabled()) {
+ LOG.debug("SASL server context established. Authenticated client: "
+ + user + ". Negotiated QoP is "
+ + saslServer.getNegotiatedProperty(Sasl.QOP));
+ }
+ rpcMetrics.authenticationSuccesses.inc();
+ AUDITLOG.trace(AUTH_SUCCESSFUL_FOR + user);
+ saslContextEstablished = true;
+ }
+ }
+ }
+ /**
+ * No protobuf encoding of raw sasl messages
+ */
+ private void doRawSaslReply(SaslStatus status, Writable rv,
+ String errorClass, String error) throws IOException {
+ //In my testing, have noticed that sasl messages are usually
+ //in the ballpark of 100-200. That's why the initialcapacity is 256.
+ ByteBufferOutputStream saslResponse = new ByteBufferOutputStream(256);
+ DataOutputStream out = new DataOutputStream(saslResponse);
+ out.writeInt(status.state); // write status
+ if (status == SaslStatus.SUCCESS) {
+ rv.write(out);
+ } else {
+ WritableUtils.writeString(out, errorClass);
+ WritableUtils.writeString(out, error);
+ }
+ saslCall.setSaslTokenResponse(saslResponse.getByteBuffer());
+ saslCall.responder = responder;
+ saslCall.sendResponseIfReady();
+ }
+
+ private void disposeSasl() {
+ if (saslServer != null) {
+ try {
+ saslServer.dispose();
+ saslServer = null;
+ } catch (SaslException ignored) {
+ }
+ }
+ }
+
public int readAndProcess() throws IOException, InterruptedException {
while (true) {
/* Read at most one RPC. If the header is not read completely yet
* then iterate until we read first RPC or until there is no data left.
*/
- int count;
+ int count = -1;
if (dataLengthBuffer.remaining() > 0) {
count = channelRead(channel, dataLengthBuffer);
if (count < 0 || dataLengthBuffer.remaining() > 0)
return count;
}
- if (!versionRead) {
+ if (!rpcHeaderRead) {
//Every connection is expected to send the header.
- ByteBuffer versionBuffer = ByteBuffer.allocate(1);
- count = channelRead(channel, versionBuffer);
- if (count <= 0) {
+ if (rpcHeaderBuffer == null) {
+ rpcHeaderBuffer = ByteBuffer.allocate(2);
+ }
+ count = channelRead(channel, rpcHeaderBuffer);
+ if (count < 0 || rpcHeaderBuffer.remaining() > 0) {
return count;
}
- int version = versionBuffer.get(0);
-
+ int version = rpcHeaderBuffer.get(0);
+ byte[] method = new byte[] {rpcHeaderBuffer.get(1)};
+ authMethod = AuthMethod.read(new DataInputStream(
+ new ByteArrayInputStream(method)));
dataLengthBuffer.flip();
if (!HEADER.equals(dataLengthBuffer) || version != CURRENT_VERSION) {
- //Warning is ok since this is not supposed to happen.
- LOG.warn("Incorrect header or version mismatch from " +
- hostAddress + ":" + remotePort +
- " got version " + version +
- " expected version " + CURRENT_VERSION);
+ LOG.warn("Incorrect header or version mismatch from " +
+ hostAddress + ":" + remotePort +
+ " got version " + version +
+ " expected version " + CURRENT_VERSION);
setupBadVersionResponse(version);
return -1;
}
dataLengthBuffer.clear();
- versionRead = true;
+ if (authMethod == null) {
+ throw new IOException("Unable to read authentication method");
+ }
+ if (isSecurityEnabled && authMethod == AuthMethod.SIMPLE) {
+ AccessControlException ae = new AccessControlException(
+ "Authentication is required");
+ setupResponse(authFailedResponse, authFailedCall, Status.FATAL,
+ null, ae.getClass().getName(), ae.getMessage());
+ responder.doRespond(authFailedCall);
+ throw ae;
+ }
+ if (!isSecurityEnabled && authMethod != AuthMethod.SIMPLE) {
+ doRawSaslReply(SaslStatus.SUCCESS, new IntWritable(
+ HBaseSaslRpcServer.SWITCH_TO_SIMPLE_AUTH), null, null);
+ authMethod = AuthMethod.SIMPLE;
+ // client has already sent the initial Sasl message and we
+ // should ignore it. Both client and server should fall back
+ // to simple auth from now on.
+ skipInitialSaslHandshake = true;
+ }
+ if (authMethod != AuthMethod.SIMPLE) {
+ useSasl = true;
+ }
+
+ rpcHeaderBuffer = null;
+ rpcHeaderRead = true;
continue;
}
@@ -1172,9 +1434,15 @@
dataLength = dataLengthBuffer.getInt();
if (dataLength == HBaseClient.PING_CALL_ID) {
- dataLengthBuffer.clear();
- return 0; //ping message
+ if(!useWrap) { //covers the !useSasl too
+ dataLengthBuffer.clear();
+ return 0; //ping message
+ }
}
+ if (dataLength < 0) {
+ throw new IllegalArgumentException("Unexpected data length "
+ + dataLength + "!! from " + getHostAddress());
+ }
data = ByteBuffer.allocate(dataLength);
incRpcCount(); // Increment the rpc count
}
@@ -1184,15 +1452,21 @@
if (data.remaining() == 0) {
dataLengthBuffer.clear();
data.flip();
- if (headerRead) {
- processData(data.array());
+ if (skipInitialSaslHandshake) {
data = null;
- return count;
+ skipInitialSaslHandshake = false;
+ continue;
}
- processHeader();
- headerRead = true;
+ boolean isHeaderRead = headerRead;
+ if (useSasl) {
+ saslReadAndProcess(data.array());
+ } else {
+ processOneRpc(data.array());
+ }
data = null;
- continue;
+ if (!isHeaderRead) {
+ continue;
+ }
}
return count;
}
@@ -1228,18 +1502,106 @@
}
/// Reads the connection header following version
- private void processHeader() throws IOException {
- header = ConnectionHeader.parseFrom(new ByteArrayInputStream(data.array()));
+ private void processHeader(byte[] buf) throws IOException {
+ DataInputStream in =
+ new DataInputStream(new ByteArrayInputStream(buf));
+ header = ConnectionHeader.parseFrom(in);
try {
String protocolClassName = header.getProtocol();
- protocol = getProtocolClass(protocolClassName, conf);
+ if (protocolClassName != null) {
+ protocol = getProtocolClass(header.getProtocol(), conf);
+ }
} catch (ClassNotFoundException cnfe) {
throw new IOException("Unknown protocol: " + header.getProtocol());
}
- user = User.createUser(header);
+ UserGroupInformation protocolUser = createUser(header);
+ if (!useSasl) {
+ user = protocolUser;
+ if (user != null) {
+ user.setAuthenticationMethod(AuthMethod.SIMPLE.authenticationMethod);
+ }
+ } else {
+ // user is authenticated
+ user.setAuthenticationMethod(authMethod.authenticationMethod);
+ //Now we check if this is a proxy user case. If the protocol user is
+ //different from the 'user', it is a proxy user scenario. However,
+ //this is not allowed if user authenticated with DIGEST.
+ if ((protocolUser != null)
+ && (!protocolUser.getUserName().equals(user.getUserName()))) {
+ if (authMethod == AuthMethod.DIGEST) {
+ // Not allowed to doAs if token authentication is used
+ throw new AccessControlException("Authenticated user (" + user
+ + ") doesn't match what the client claims to be ("
+ + protocolUser + ")");
+ } else {
+ // Effective user can be different from authenticated user
+ // for simple auth or kerberos auth
+ // The user is the real user. Now we create a proxy user
+ UserGroupInformation realUser = user;
+ user = UserGroupInformation.createProxyUser(protocolUser
+ .getUserName(), realUser);
+ // Now the user is a proxy user, set Authentication method Proxy.
+ user.setAuthenticationMethod(AuthenticationMethod.PROXY);
+ }
+ }
+ }
}
+ private void processUnwrappedData(byte[] inBuf) throws IOException,
+ InterruptedException {
+ ReadableByteChannel ch = Channels.newChannel(new ByteArrayInputStream(
+ inBuf));
+ // Read all RPCs contained in the inBuf, even partial ones
+ while (true) {
+ int count = -1;
+ if (unwrappedDataLengthBuffer.remaining() > 0) {
+ count = channelRead(ch, unwrappedDataLengthBuffer);
+ if (count <= 0 || unwrappedDataLengthBuffer.remaining() > 0)
+ return;
+ }
+
+ if (unwrappedData == null) {
+ unwrappedDataLengthBuffer.flip();
+ int unwrappedDataLength = unwrappedDataLengthBuffer.getInt();
+
+ if (unwrappedDataLength == HBaseClient.PING_CALL_ID) {
+ if (LOG.isDebugEnabled())
+ LOG.debug("Received ping message");
+ unwrappedDataLengthBuffer.clear();
+ continue; // ping message
+ }
+ unwrappedData = ByteBuffer.allocate(unwrappedDataLength);
+ }
+
+ count = channelRead(ch, unwrappedData);
+ if (count <= 0 || unwrappedData.remaining() > 0)
+ return;
+
+ if (unwrappedData.remaining() == 0) {
+ unwrappedDataLengthBuffer.clear();
+ unwrappedData.flip();
+ processOneRpc(unwrappedData.array());
+ unwrappedData = null;
+ }
+ }
+ }
+
+ private void processOneRpc(byte[] buf) throws IOException,
+ InterruptedException {
+ if (headerRead) {
+ processData(buf);
+ } else {
+ processHeader(buf);
+ headerRead = true;
+ if (!authorizeConnection()) {
+ throw new AccessControlException("Connection from " + this
+ + " for protocol " + header.getProtocol()
+ + " is unauthorized for user " + user);
+ }
+ }
+ }
+
protected void processData(byte[] buf) throws IOException, InterruptedException {
RpcRequest request = RpcRequest.parseFrom(buf);
int id = request.getCallId();
@@ -1293,7 +1655,34 @@
}
}
+ private boolean authorizeConnection() throws IOException {
+ try {
+ // If auth method is DIGEST, the token was obtained by the
+ // real user for the effective user, therefore not required to
+ // authorize real user. doAs is allowed only for simple or kerberos
+ // authentication
+ if (user != null && user.getRealUser() != null
+ && (authMethod != AuthMethod.DIGEST)) {
+ ProxyUsers.authorize(user, this.getHostAddress(), conf);
+ }
+ authorize(user, header, getHostInetAddress());
+ if (LOG.isDebugEnabled()) {
+ LOG.debug("Successfully authorized " + header);
+ }
+ rpcMetrics.authorizationSuccesses.inc();
+ } catch (AuthorizationException ae) {
+ LOG.debug("Connection authorization failed: "+ae.getMessage(), ae);
+ rpcMetrics.authorizationFailures.inc();
+ setupResponse(authFailedResponse, authFailedCall, Status.FATAL, null,
+ ae.getClass().getName(), ae.getMessage());
+ responder.doRespond(authFailedCall);
+ return false;
+ }
+ return true;
+ }
+
protected synchronized void close() {
+ disposeSasl();
data = null;
dataLengthBuffer = null;
if (!channel.isOpen())
@@ -1304,6 +1693,33 @@
}
try {socket.close();} catch(Exception ignored) {}
}
+
+ private UserGroupInformation createUser(ConnectionHeader head) {
+ UserGroupInformation ugi = null;
+
+ if (!head.hasUserInfo()) {
+ return null;
+ }
+ UserInformation userInfoProto = head.getUserInfo();
+ String effectiveUser = null;
+ if (userInfoProto.hasEffectiveUser()) {
+ effectiveUser = userInfoProto.getEffectiveUser();
+ }
+ String realUser = null;
+ if (userInfoProto.hasRealUser()) {
+ realUser = userInfoProto.getRealUser();
+ }
+ if (effectiveUser != null) {
+ if (realUser != null) {
+ UserGroupInformation realUserUgi =
+ UserGroupInformation.createRemoteUser(realUser);
+ ugi = UserGroupInformation.createProxyUser(effectiveUser, realUserUgi);
+ } else {
+ ugi = UserGroupInformation.createRemoteUser(effectiveUser);
+ }
+ }
+ return ugi;
+ }
}
/**
@@ -1367,15 +1783,16 @@
throw new ServerNotRunningYetException("Server is not running yet");
if (LOG.isDebugEnabled()) {
- User remoteUser = call.connection.user;
+ UserGroupInformation remoteUser = call.connection.user;
LOG.debug(getName() + ": call #" + call.id + " executing as "
- + (remoteUser == null ? "NULL principal" : remoteUser.getName()));
+ + (remoteUser == null ? "NULL principal" :
+ remoteUser.getUserName()));
}
- RequestContext.set(call.connection.user, getRemoteIp(),
+ RequestContext.set(User.create(call.connection.user), getRemoteIp(),
call.connection.protocol);
// make the call
- value = call(call.connection.protocol, call.param, call.timestamp,
+ value = call(call.connection.protocol, call.param, call.timestamp,
status);
} catch (Throwable e) {
LOG.debug(getName()+", call "+call+": error: " + e, e);
@@ -1507,6 +1924,12 @@
// Create the responder here
responder = new Responder();
+ this.authorize =
+ conf.getBoolean(HADOOP_SECURITY_AUTHORIZATION, false);
+ this.isSecurityEnabled = UserGroupInformation.isSecurityEnabled();
+ if (isSecurityEnabled) {
+ HBaseSaslRpcServer.init(conf);
+ }
}
/**
@@ -1562,6 +1985,10 @@
rpcMetrics.numOpenConnections.set(numConnections);
}
+ Configuration getConf() {
+ return conf;
+ }
+
/** Sets the socket buffer size used for responding to RPCs.
* @param size send size
*/
@@ -1607,6 +2034,14 @@
}
}
+ public SecretManager extends TokenIdentifier> getSecretManager() {
+ return this.secretManager;
+ }
+
+ public void setSecretManager(SecretManager extends TokenIdentifier> secretManager) {
+ this.secretManager = (SecretManager) secretManager;
+ }
+
/** Stops the service. No new calls will be handled after this is called. */
@Override
public synchronized void stop() {
@@ -1673,6 +2108,31 @@
}
/**
+ * Authorize the incoming client connection.
+ *
+ * @param user client user
+ * @param connection incoming connection
+ * @param addr InetAddress of incoming connection
+ * @throws org.apache.hadoop.security.authorize.AuthorizationException when the client isn't authorized to talk the protocol
+ */
+ public void authorize(UserGroupInformation user,
+ ConnectionHeader connection,
+ InetAddress addr
+ ) throws AuthorizationException {
+ if (authorize) {
+ Class> protocol = null;
+ try {
+ protocol = getProtocolClass(connection.getProtocol(), getConf());
+ } catch (ClassNotFoundException cfne) {
+ throw new AuthorizationException("Unknown protocol: " +
+ connection.getProtocol());
+ }
+ authManager.authorize(user != null ? user : null,
+ protocol, getConf(), addr);
+ }
+ }
+
+ /**
* When the read or write buffer size is larger than this limit, i/o will be
* done in chunks of this size. Most RPC requests and responses would be
* be smaller.
Index: src/main/java/org/apache/hadoop/hbase/ipc/HBaseClient.java
===================================================================
--- src/main/java/org/apache/hadoop/hbase/ipc/HBaseClient.java (revision 1335370)
+++ src/main/java/org/apache/hadoop/hbase/ipc/HBaseClient.java (working copy)
@@ -28,13 +28,18 @@
import java.io.FilterInputStream;
import java.io.IOException;
import java.io.InputStream;
+import java.io.OutputStream;
import java.net.ConnectException;
import java.net.InetSocketAddress;
import java.net.Socket;
import java.net.SocketException;
import java.net.SocketTimeoutException;
import java.net.UnknownHostException;
+import java.security.PrivilegedExceptionAction;
+import java.util.HashMap;
import java.util.Iterator;
+import java.util.Map;
+import java.util.Random;
import java.util.Map.Entry;
import java.util.concurrent.ConcurrentSkipListMap;
import java.util.concurrent.atomic.AtomicBoolean;
@@ -48,18 +53,32 @@
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.protobuf.generated.RPCProtos;
+import org.apache.hadoop.hbase.protobuf.generated.RPCProtos.RpcResponse.Status;
import org.apache.hadoop.hbase.protobuf.generated.RPCProtos.ConnectionHeader;
import org.apache.hadoop.hbase.protobuf.generated.RPCProtos.RpcRequest;
import org.apache.hadoop.hbase.protobuf.generated.RPCProtos.RpcResponse;
+import org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation;
+import org.apache.hadoop.hbase.security.HBaseSaslRpcClient;
+import org.apache.hadoop.hbase.security.KerberosInfo;
+import org.apache.hadoop.hbase.security.TokenInfo;
+import org.apache.hadoop.hbase.security.HBaseSaslRpcServer.AuthMethod;
import org.apache.hadoop.hbase.security.User;
+import org.apache.hadoop.hbase.security.token.AuthenticationTokenIdentifier;
+import org.apache.hadoop.hbase.security.token.AuthenticationTokenSelector;
import org.apache.hadoop.hbase.util.PoolMap;
import org.apache.hadoop.hbase.util.PoolMap.PoolType;
import org.apache.hadoop.io.DataOutputBuffer;
import org.apache.hadoop.hbase.io.DataOutputOutputStream;
import org.apache.hadoop.io.IOUtils;
+import org.apache.hadoop.io.Text;
import org.apache.hadoop.io.Writable;
import org.apache.hadoop.ipc.RemoteException;
import org.apache.hadoop.net.NetUtils;
+import org.apache.hadoop.security.SecurityUtil;
+import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.security.token.Token;
+import org.apache.hadoop.security.token.TokenIdentifier;
+import org.apache.hadoop.security.token.TokenSelector;
import org.apache.hadoop.util.ReflectionUtils;
import com.google.protobuf.ByteString;
@@ -213,7 +232,12 @@
return this.startTime;
}
}
-
+ protected static Map> tokenHandlers =
+ new HashMap>();
+ static {
+ tokenHandlers.put(AuthenticationTokenIdentifier.AUTH_TOKEN_TYPE.toString(),
+ new AuthenticationTokenSelector());
+ }
/** Thread that reads responses and notifies callers. Each connection owns a
* socket connected to a remote address. Calls are multiplexed through this
* socket: responses may be delivered out of order. */
@@ -223,6 +247,13 @@
protected Socket socket = null; // connected socket
protected DataInputStream in;
protected DataOutputStream out;
+ private InetSocketAddress server; // server ip:port
+ private String serverPrincipal; // server's krb5 principal name
+ private AuthMethod authMethod; // authentication method
+ private boolean useSasl;
+ private Token extends TokenIdentifier> token;
+ private HBaseSaslRpcClient saslRpcClient;
+ private int reloginMaxBackoff; // max pause before relogin on sasl failure
// currently active calls
protected final ConcurrentSkipListMap calls = new ConcurrentSkipListMap();
@@ -235,20 +266,89 @@
throw new UnknownHostException("unknown host: " +
remoteId.getAddress().getHostName());
}
+ this.server = remoteId.getAddress();
+
+ UserGroupInformation ticket = remoteId.getTicket().getUGI();
+ Class> protocol = remoteId.getProtocol();
+ this.useSasl = UserGroupInformation.isSecurityEnabled();
+ if (useSasl && protocol != null) {
+ TokenInfo tokenInfo = protocol.getAnnotation(TokenInfo.class);
+ if (tokenInfo != null) {
+ TokenSelector extends TokenIdentifier> tokenSelector =
+ tokenHandlers.get(tokenInfo.value());
+ if (tokenSelector != null) {
+ token = tokenSelector.selectToken(new Text(clusterId),
+ ticket.getTokens());
+ } else if (LOG.isDebugEnabled()) {
+ LOG.debug("No token selector found for type "+tokenInfo.value());
+ }
+ }
+ KerberosInfo krbInfo = protocol.getAnnotation(KerberosInfo.class);
+ if (krbInfo != null) {
+ String serverKey = krbInfo.serverPrincipal();
+ if (serverKey == null) {
+ throw new IOException(
+ "Can't obtain server Kerberos config key from KerberosInfo");
+ }
+ serverPrincipal = SecurityUtil.getServerPrincipal(
+ conf.get(serverKey), server.getAddress().getCanonicalHostName().toLowerCase());
+ if (LOG.isDebugEnabled()) {
+ LOG.debug("RPC Server Kerberos principal name for protocol="
+ + protocol.getCanonicalName() + " is " + serverPrincipal);
+ }
+ }
+ }
+
+ if (!useSasl) {
+ authMethod = AuthMethod.SIMPLE;
+ } else if (token != null) {
+ authMethod = AuthMethod.DIGEST;
+ } else {
+ authMethod = AuthMethod.KERBEROS;
+ }
+
+ if (LOG.isDebugEnabled())
+ LOG.debug("Use " + authMethod + " authentication for protocol "
+ + protocol.getSimpleName());
+
+ reloginMaxBackoff = conf.getInt("hbase.security.relogin.maxbackoff", 5000);
this.remoteId = remoteId;
- User ticket = remoteId.getTicket();
- Class extends VersionedProtocol> protocol = remoteId.getProtocol();
ConnectionHeader.Builder builder = ConnectionHeader.newBuilder();
builder.setProtocol(protocol == null ? "" : protocol.getName());
+ UserInformation userInfoPB;
+ if ((userInfoPB = getUserInfoPB(ticket)) != null) {
+ builder.setUserInfo(userInfoPB);
+ }
this.header = builder.build();
this.setName("IPC Client (" + socketFactory.hashCode() +") connection to " +
remoteId.getAddress().toString() +
- ((ticket==null)?" from an unknown user": (" from " + ticket.getName())));
+ ((ticket==null)?" from an unknown user": (" from "
+ + ticket.getUserName())));
this.setDaemon(true);
}
+ private UserInformation getUserInfoPB(UserGroupInformation ugi) {
+ UserInformation.Builder userInfoPB = UserInformation.newBuilder();
+ if (ugi == null) {
+ return userInfoPB.build();
+ }
+ if (ugi != null) {
+ if (authMethod == AuthMethod.KERBEROS) {
+ // Send effective user for Kerberos auth
+ userInfoPB.setEffectiveUser(ugi.getUserName());
+ } else if (authMethod == AuthMethod.SIMPLE) {
+ //Send both effective user and real user for simple auth
+ userInfoPB.setEffectiveUser(ugi.getUserName());
+ if (ugi.getRealUser() != null) {
+ userInfoPB.setRealUser(ugi.getRealUser().getUserName());
+ }
+ }
+ }
+ return userInfoPB.build();
+ }
+
/** Update lastActivity with the current time. */
protected void touch() {
lastActivity.set(System.currentTimeMillis());
@@ -352,42 +452,6 @@
}
}
- /** Connect to the server and set up the I/O streams. It then sends
- * a header to the server and starts
- * the connection thread that waits for responses.
- * @throws java.io.IOException e
- */
- protected synchronized void setupIOstreams()
- throws IOException, InterruptedException {
-
- if (socket != null || shouldCloseConnection.get()) {
- return;
- }
-
- try {
- if (LOG.isDebugEnabled()) {
- LOG.debug("Connecting to "+remoteId);
- }
- setupConnection();
- this.in = new DataInputStream(new BufferedInputStream
- (new PingInputStream(NetUtils.getInputStream(socket))));
- this.out = new DataOutputStream
- (new BufferedOutputStream(NetUtils.getOutputStream(socket)));
- writeHeader();
-
- // update last activity time
- touch();
-
- // start the receiver thread after the socket connection has been set up
- start();
- } catch (IOException e) {
- markClosed(e);
- close();
-
- throw e;
- }
- }
-
protected void closeConnection() {
// close the current connection
if (socket != null) {
@@ -437,16 +501,6 @@
" time(s).");
}
- /* Write the header for each connection
- * Out is not synchronized because only the first thread does this.
- */
- private void writeHeader() throws IOException {
- out.write(HBaseServer.HEADER.array());
- out.write(HBaseServer.CURRENT_VERSION);
- out.writeInt(header.getSerializedSize());
- header.writeTo(out);
- }
-
/* wait till someone signals us to start reading RPC response or
* it is idle too long, it is marked as to be closed,
* or the client is marked as not running.
@@ -519,6 +573,230 @@
+ connections.size());
}
+ private synchronized void disposeSasl() {
+ if (saslRpcClient != null) {
+ try {
+ saslRpcClient.dispose();
+ saslRpcClient = null;
+ } catch (IOException ioe) {
+ LOG.error("Error disposing of SASL client", ioe);
+ }
+ }
+ }
+
+ private synchronized boolean shouldAuthenticateOverKrb() throws IOException {
+ UserGroupInformation loginUser = UserGroupInformation.getLoginUser();
+ UserGroupInformation currentUser =
+ UserGroupInformation.getCurrentUser();
+ UserGroupInformation realUser = currentUser.getRealUser();
+ return authMethod == AuthMethod.KERBEROS &&
+ loginUser != null &&
+ //Make sure user logged in using Kerberos either keytab or TGT
+ loginUser.hasKerberosCredentials() &&
+ // relogin only in case it is the login user (e.g. JT)
+ // or superuser (like oozie).
+ (loginUser.equals(currentUser) || loginUser.equals(realUser));
+ }
+
+ private synchronized boolean setupSaslConnection(final InputStream in2,
+ final OutputStream out2) throws IOException {
+ saslRpcClient = new HBaseSaslRpcClient(authMethod, token, serverPrincipal);
+ return saslRpcClient.saslConnect(in2, out2);
+ }
+
+ /**
+ * If multiple clients with the same principal try to connect
+ * to the same server at the same time, the server assumes a
+ * replay attack is in progress. This is a feature of kerberos.
+ * In order to work around this, what is done is that the client
+ * backs off randomly and tries to initiate the connection
+ * again.
+ * The other problem is to do with ticket expiry. To handle that,
+ * a relogin is attempted.
+ */
+ private synchronized void handleSaslConnectionFailure(
+ final int currRetries,
+ final int maxRetries, final Exception ex, final Random rand,
+ final UserGroupInformation user)
+ throws IOException, InterruptedException{
+ user.doAs(new PrivilegedExceptionAction() {
+ public Object run() throws IOException, InterruptedException {
+ closeConnection();
+ if (shouldAuthenticateOverKrb()) {
+ if (currRetries < maxRetries) {
+ LOG.debug("Exception encountered while connecting to " +
+ "the server : " + ex);
+ //try re-login
+ if (UserGroupInformation.isLoginKeytabBased()) {
+ UserGroupInformation.getLoginUser().reloginFromKeytab();
+ } else {
+ UserGroupInformation.getLoginUser().reloginFromTicketCache();
+ }
+ disposeSasl();
+ //have granularity of milliseconds
+ //we are sleeping with the Connection lock held but since this
+ //connection instance is being used for connecting to the server
+ //in question, it is okay
+ Thread.sleep((rand.nextInt(reloginMaxBackoff) + 1));
+ return null;
+ } else {
+ String msg = "Couldn't setup connection for " +
+ UserGroupInformation.getLoginUser().getUserName() +
+ " to " + serverPrincipal;
+ LOG.warn(msg);
+ throw (IOException) new IOException(msg).initCause(ex);
+ }
+ } else {
+ LOG.warn("Exception encountered while connecting to " +
+ "the server : " + ex);
+ }
+ if (ex instanceof RemoteException)
+ throw (RemoteException)ex;
+ throw new IOException(ex);
+ }
+ });
+ }
+
+ protected synchronized void setupIOstreams()
+ throws IOException, InterruptedException {
+ if (socket != null || shouldCloseConnection.get()) {
+ return;
+ }
+
+ try {
+ if (LOG.isDebugEnabled()) {
+ LOG.debug("Connecting to "+server);
+ }
+ short numRetries = 0;
+ final short MAX_RETRIES = 5;
+ Random rand = null;
+ while (true) {
+ setupConnection();
+ InputStream inStream = NetUtils.getInputStream(socket);
+ OutputStream outStream = NetUtils.getOutputStream(socket);
+ writeRpcHeader(outStream);
+ if (useSasl) {
+ final InputStream in2 = inStream;
+ final OutputStream out2 = outStream;
+ UserGroupInformation ticket = remoteId.getTicket().getUGI();
+ if (authMethod == AuthMethod.KERBEROS) {;
+ if (ticket != null && ticket.getRealUser() != null) {
+ ticket = ticket.getRealUser();
+ }
+ }
+ boolean continueSasl = false;
+ try {
+ continueSasl =
+ ticket.doAs(new PrivilegedExceptionAction() {
+ @Override
+ public Boolean run() throws IOException {
+ return setupSaslConnection(in2, out2);
+ }
+ });
+ } catch (Exception ex) {
+ if (rand == null) {
+ rand = new Random();
+ }
+ handleSaslConnectionFailure(numRetries++, MAX_RETRIES, ex, rand,
+ ticket);
+ continue;
+ }
+ if (continueSasl) {
+ // Sasl connect is successful. Let's set up Sasl i/o streams.
+ inStream = saslRpcClient.getInputStream(inStream);
+ outStream = saslRpcClient.getOutputStream(outStream);
+ } else {
+ // fall back to simple auth because server told us so.
+ authMethod = AuthMethod.SIMPLE;
+ useSasl = false;
+ }
+ }
+ this.in = new DataInputStream(new BufferedInputStream
+ (new PingInputStream(inStream)));
+ this.out = new DataOutputStream
+ (new BufferedOutputStream(outStream));
+ writeHeader();
+
+ // update last activity time
+ touch();
+
+ // start the receiver thread after the socket connection has been set up
+ start();
+ return;
+ }
+ } catch (IOException e) {
+ markClosed(e);
+ close();
+
+ throw e;
+ }
+ }
+
+ /* Write the RPC header */
+ private void writeRpcHeader(OutputStream outStream) throws IOException {
+ DataOutputStream out = new DataOutputStream(new BufferedOutputStream(outStream));
+ // Write out the header, version and authentication method
+ out.write(HBaseServer.HEADER.array());
+ out.write(HBaseServer.CURRENT_VERSION);
+ authMethod.write(out);
+ out.flush();
+ }
+
+ /**
+ * Write the protocol header for each connection
+ * Out is not synchronized because only the first thread does this.
+ */
+ private void writeHeader() throws IOException {
+ // Write out the ConnectionHeader
+ out.writeInt(header.getSerializedSize());
+ header.writeTo(out);
+ }
+
+ /** Close the connection. */
+ protected synchronized void close() {
+ if (!shouldCloseConnection.get()) {
+ LOG.error("The connection is not in the closed state");
+ return;
+ }
+
+ // release the resources
+ // first thing to do;take the connection out of the connection list
+ synchronized (connections) {
+ if (connections.get(remoteId) == this) {
+ connections.remove(remoteId);
+ }
+ }
+
+ // close the streams and therefore the socket
+ IOUtils.closeStream(out);
+ IOUtils.closeStream(in);
+ disposeSasl();
+
+ // clean up all calls
+ if (closeException == null) {
+ if (!calls.isEmpty()) {
+ LOG.warn(
+ "A connection is closed for no cause and calls are not empty. " +
+ "#Calls: " + calls.size());
+
+ // clean up calls anyway
+ closeException = new IOException("Unexpected closed connection");
+ cleanupCalls();
+ }
+ } else {
+ // log the info
+ if (LOG.isDebugEnabled()) {
+ LOG.debug("closing ipc connection to " + server + ": " +
+ closeException.getMessage(),closeException);
+ }
+
+ // cleanup calls
+ cleanupCalls();
+ }
+ if (LOG.isDebugEnabled())
+ LOG.debug(getName() + ": closed");
+ }
+
/* Initiates a call by sending the parameter to the remote server.
* Note: this is not called from the Connection thread, but by other
* threads.
@@ -575,15 +853,8 @@
LOG.debug(getName() + " got value #" + id);
Call call = calls.remove(id);
- boolean isError = response.getError();
- if (isError) {
- if (call != null) {
- //noinspection ThrowableInstanceNeverThrown
- call.setException(new RemoteException(
- response.getException().getExceptionName(),
- response.getException().getStackTrace()));
- }
- } else {
+ Status status = response.getStatus();
+ if (status == Status.SUCCESS) {
ByteString responseObj = response.getResponse();
DataInputStream dis =
new DataInputStream(responseObj.newInput());
@@ -594,6 +865,18 @@
if (call != null) {
call.setValue(value);
}
+ } else if (status == Status.ERROR) {
+ if (call != null) {
+ //noinspection ThrowableInstanceNeverThrown
+ call.setException(new RemoteException(
+ response.getException().getExceptionName(),
+ response.getException().getStackTrace()));
+ }
+ } else if (status == Status.FATAL) {
+ // Close the connection
+ markClosed(new RemoteException(
+ response.getException().getExceptionName(),
+ response.getException().getStackTrace()));
}
} catch (IOException e) {
if (e instanceof SocketTimeoutException && remoteId.rpcTimeout > 0) {
@@ -620,47 +903,6 @@
}
}
- /** Close the connection. */
- protected synchronized void close() {
- if (!shouldCloseConnection.get()) {
- LOG.error("The connection is not in the closed state");
- return;
- }
-
- // release the resources
- // first thing to do;take the connection out of the connection list
- synchronized (connections) {
- connections.remove(remoteId, this);
- }
-
- // close the streams and therefore the socket
- IOUtils.closeStream(out);
- IOUtils.closeStream(in);
-
- // clean up all calls
- if (closeException == null) {
- if (!calls.isEmpty()) {
- LOG.warn(
- "A connection is closed for no cause and calls are not empty");
-
- // clean up calls anyway
- closeException = new IOException("Unexpected closed connection");
- cleanupCalls();
- }
- } else {
- // log the info
- if (LOG.isDebugEnabled()) {
- LOG.debug("closing ipc connection to " + remoteId.address + ": " +
- closeException.getMessage(),closeException);
- }
-
- // cleanup calls
- cleanupCalls();
- }
- if (LOG.isDebugEnabled())
- LOG.debug(getName() + ": closed");
- }
-
/* Cleanup all calls and mark them as done */
protected void cleanupCalls() {
cleanupCalls(0);
Index: src/main/java/org/apache/hadoop/hbase/ipc/Status.java (deleted)
===================================================================
Index: pom.xml
===================================================================
--- pom.xml (revision 1335370)
+++ pom.xml (working copy)
@@ -1624,64 +1624,6 @@
-
-
- security
-
- 1.0.2
-
-
- ${project.artifactId}-${project.version}-security
-
-
- org.codehaus.mojo
- build-helper-maven-plugin
-
-
- add-source
-
- add-source
-
-
-
- ${project.basedir}/security/src/main/java
-
-
-
-
- add-test-source
-
- add-test-source
-
-
-
- ${project.basedir}/security/src/test/java
-
-
-
-
- add-test-resource
-
- add-test-resource
-
-
-
-
- ${project.basedir}/security/src/test/resources
-
- hbase-site.xml
-
-
-
-
-
-
-
-
-
-
-
-