Index: shims/ivy.xml
===================================================================
--- shims/ivy.xml (revision 1228525)
+++ shims/ivy.xml (working copy)
@@ -27,11 +27,8 @@
-
+
-
-
-
Index: shims/src/0.23/java/org/apache/hadoop/hive/thrift/DelegationTokenSelector23.java
===================================================================
--- shims/src/0.23/java/org/apache/hadoop/hive/thrift/DelegationTokenSelector23.java (revision 1228525)
+++ shims/src/0.23/java/org/apache/hadoop/hive/thrift/DelegationTokenSelector23.java (working copy)
@@ -1,33 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hive.thrift;
-
-import org.apache.hadoop.security.token.delegation.AbstractDelegationTokenSelector;
-
-/**
- * A delegation token that is specialized for Hive
- */
-
-public class DelegationTokenSelector23
- extends AbstractDelegationTokenSelector{
-
- public DelegationTokenSelector23() {
- super(DelegationTokenIdentifier23.HIVE_DELEGATION_KIND);
- }
-}
Index: shims/src/0.23/java/org/apache/hadoop/hive/thrift/DelegationTokenIdentifier23.java
===================================================================
--- shims/src/0.23/java/org/apache/hadoop/hive/thrift/DelegationTokenIdentifier23.java (revision 1228525)
+++ shims/src/0.23/java/org/apache/hadoop/hive/thrift/DelegationTokenIdentifier23.java (working copy)
@@ -1,52 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hive.thrift;
-
-import org.apache.hadoop.io.Text;
-import org.apache.hadoop.security.token.delegation.AbstractDelegationTokenIdentifier;
-
-/**
- * A delegation token identifier that is specific to Hive.
- */
-public class DelegationTokenIdentifier23
- extends AbstractDelegationTokenIdentifier {
- public static final Text HIVE_DELEGATION_KIND = new Text("HIVE_DELEGATION_TOKEN");
-
- /**
- * Create an empty delegation token identifier for reading into.
- */
- public DelegationTokenIdentifier23() {
- }
-
- /**
- * Create a new delegation token identifier
- * @param owner the effective username of the token owner
- * @param renewer the username of the renewer
- * @param realUser the real username of the token owner
- */
- public DelegationTokenIdentifier23(Text owner, Text renewer, Text realUser) {
- super(owner, renewer, realUser);
- }
-
- @Override
- public Text getKind() {
- return HIVE_DELEGATION_KIND;
- }
-
-}
Index: shims/src/0.23/java/org/apache/hadoop/hive/shims/Hadoop23Shims.java
===================================================================
--- shims/src/0.23/java/org/apache/hadoop/hive/shims/Hadoop23Shims.java (revision 1228525)
+++ shims/src/0.23/java/org/apache/hadoop/hive/shims/Hadoop23Shims.java (working copy)
@@ -17,505 +17,22 @@
*/
package org.apache.hadoop.hive.shims;
-import java.io.DataInput;
-import java.io.DataOutput;
-import java.io.IOException;
-import java.lang.reflect.Constructor;
-import java.util.ArrayList;
-import java.util.List;
-
-import javax.security.auth.login.LoginException;
-
import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FileStatus;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.fs.PathFilter;
-import org.apache.hadoop.hdfs.MiniDFSCluster;
-import org.apache.hadoop.hive.io.HiveIOExceptionHandlerChain;
-import org.apache.hadoop.hive.io.HiveIOExceptionHandlerUtil;
import org.apache.hadoop.hive.shims.HadoopShims.JobTrackerState;
-import org.apache.hadoop.hive.thrift.DelegationTokenSelector23;
-import org.apache.hadoop.io.Text;
+import org.apache.hadoop.hive.shims.HadoopShimsSecure;
import org.apache.hadoop.mapred.ClusterStatus;
-import org.apache.hadoop.mapred.FileInputFormat;
-import org.apache.hadoop.mapred.InputFormat;
-import org.apache.hadoop.mapred.InputSplit;
-import org.apache.hadoop.mapred.JobConf;
-import org.apache.hadoop.mapred.JobContext;
-import org.apache.hadoop.mapred.JobStatus;
-import org.apache.hadoop.mapred.OutputCommitter;
-import org.apache.hadoop.mapred.RecordReader;
-import org.apache.hadoop.mapred.Reporter;
-import org.apache.hadoop.mapred.RunningJob;
-import org.apache.hadoop.mapred.TaskAttemptContext;
-import org.apache.hadoop.mapred.TaskCompletionEvent;
-import org.apache.hadoop.mapred.TaskID;
-import org.apache.hadoop.mapred.lib.CombineFileInputFormat;
-import org.apache.hadoop.mapred.lib.CombineFileSplit;
-import org.apache.hadoop.mapred.lib.NullOutputFormat;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.TaskAttemptID;
import org.apache.hadoop.mapreduce.task.JobContextImpl;
import org.apache.hadoop.mapreduce.task.TaskAttemptContextImpl;
-import org.apache.hadoop.security.UserGroupInformation;
-import org.apache.hadoop.security.token.Token;
-import org.apache.hadoop.security.token.TokenIdentifier;
-import org.apache.hadoop.security.token.TokenSelector;
-import org.apache.hadoop.tools.HadoopArchives;
import org.apache.hadoop.util.Progressable;
-import org.apache.hadoop.util.ToolRunner;
/**
* Implemention of shims against Hadoop 0.23.0.
*/
-public class Hadoop23Shims implements HadoopShims {
- public boolean usesJobShell() {
- return false;
- }
+public class Hadoop23Shims extends HadoopShimsSecure {
- public boolean fileSystemDeleteOnExit(FileSystem fs, Path path)
- throws IOException {
-
- return fs.deleteOnExit(path);
- }
-
- public void inputFormatValidateInput(InputFormat fmt, JobConf conf)
- throws IOException {
- // gone in 0.18+
- }
-
- public boolean isJobPreparing(RunningJob job) throws IOException {
- return job.getJobState() == JobStatus.PREP;
- }
- /**
- * Workaround for hadoop-17 - jobclient only looks at commandlineconfig.
- */
- public void setTmpFiles(String prop, String files) {
- // gone in 20+
- }
-
- public HadoopShims.MiniDFSShim getMiniDfs(Configuration conf,
- int numDataNodes,
- boolean format,
- String[] racks) throws IOException {
- return new MiniDFSShim(new MiniDFSCluster(conf, numDataNodes, format, racks));
- }
-
- /**
- * MiniDFSShim.
- *
- */
- public class MiniDFSShim implements HadoopShims.MiniDFSShim {
- private final MiniDFSCluster cluster;
-
- public MiniDFSShim(MiniDFSCluster cluster) {
- this.cluster = cluster;
- }
-
- public FileSystem getFileSystem() throws IOException {
- return cluster.getFileSystem();
- }
-
- public void shutdown() {
- cluster.shutdown();
- }
- }
-
- /**
- * We define this function here to make the code compatible between
- * hadoop 0.17 and hadoop 0.20.
- *
- * Hive binary that compiled Text.compareTo(Text) with hadoop 0.20 won't
- * work with hadoop 0.17 because in hadoop 0.20, Text.compareTo(Text) is
- * implemented in org.apache.hadoop.io.BinaryComparable, and Java compiler
- * references that class, which is not available in hadoop 0.17.
- */
- public int compareText(Text a, Text b) {
- return a.compareTo(b);
- }
-
@Override
- public long getAccessTime(FileStatus file) {
- return file.getAccessTime();
- }
-
- public HadoopShims.CombineFileInputFormatShim getCombineFileInputFormat() {
- return new CombineFileInputFormatShim() {
- @Override
- public RecordReader getRecordReader(InputSplit split,
- JobConf job, Reporter reporter) throws IOException {
- throw new IOException("CombineFileInputFormat.getRecordReader not needed.");
- }
- };
- }
-
- public static class InputSplitShim extends CombineFileSplit implements HadoopShims.InputSplitShim {
- long shrinkedLength;
- boolean _isShrinked;
- public InputSplitShim() {
- super();
- _isShrinked = false;
- }
-
- public InputSplitShim(CombineFileSplit old) throws IOException {
- super(old);
- _isShrinked = false;
- }
-
- @Override
- public void shrinkSplit(long length) {
- _isShrinked = true;
- shrinkedLength = length;
- }
-
- public boolean isShrinked() {
- return _isShrinked;
- }
-
- public long getShrinkedLength() {
- return shrinkedLength;
- }
-
- @Override
- public void readFields(DataInput in) throws IOException {
- super.readFields(in);
- _isShrinked = in.readBoolean();
- if (_isShrinked) {
- shrinkedLength = in.readLong();
- }
- }
-
- @Override
- public void write(DataOutput out) throws IOException {
- super.write(out);
- out.writeBoolean(_isShrinked);
- if (_isShrinked) {
- out.writeLong(shrinkedLength);
- }
- }
- }
-
- /* This class should be replaced with org.apache.hadoop.mapred.lib.CombineFileRecordReader class, once
- * https://issues.apache.org/jira/browse/MAPREDUCE-955 is fixed. This code should be removed - it is a copy
- * of org.apache.hadoop.mapred.lib.CombineFileRecordReader
- */
- public static class CombineFileRecordReader implements RecordReader {
-
- static final Class[] constructorSignature = new Class[] {
- InputSplit.class,
- Configuration.class,
- Reporter.class,
- Integer.class
- };
-
- protected CombineFileSplit split;
- protected JobConf jc;
- protected Reporter reporter;
- protected Class> rrClass;
- protected Constructor> rrConstructor;
- protected FileSystem fs;
-
- protected int idx;
- protected long progress;
- protected RecordReader curReader;
- protected boolean isShrinked;
- protected long shrinkedLength;
-
- public boolean next(K key, V value) throws IOException {
-
- while ((curReader == null)
- || !doNextWithExceptionHandler((K) ((CombineHiveKey) key).getKey(),
- value)) {
- if (!initNextRecordReader(key)) {
- return false;
- }
- }
- return true;
- }
-
- public K createKey() {
- K newKey = curReader.createKey();
- return (K)(new CombineHiveKey(newKey));
- }
-
- public V createValue() {
- return curReader.createValue();
- }
-
- /**
- * Return the amount of data processed.
- */
- public long getPos() throws IOException {
- return progress;
- }
-
- public void close() throws IOException {
- if (curReader != null) {
- curReader.close();
- curReader = null;
- }
- }
-
- /**
- * Return progress based on the amount of data processed so far.
- */
- public float getProgress() throws IOException {
- return Math.min(1.0f, progress / (float) (split.getLength()));
- }
-
- /**
- * A generic RecordReader that can hand out different recordReaders
- * for each chunk in the CombineFileSplit.
- */
- public CombineFileRecordReader(JobConf job, CombineFileSplit split,
- Reporter reporter,
- Class> rrClass)
- throws IOException {
- this.split = split;
- this.jc = job;
- this.rrClass = rrClass;
- this.reporter = reporter;
- this.idx = 0;
- this.curReader = null;
- this.progress = 0;
-
- isShrinked = false;
-
- assert (split instanceof InputSplitShim);
- if (((InputSplitShim) split).isShrinked()) {
- isShrinked = true;
- shrinkedLength = ((InputSplitShim) split).getShrinkedLength();
- }
-
- try {
- rrConstructor = rrClass.getDeclaredConstructor(constructorSignature);
- rrConstructor.setAccessible(true);
- } catch (Exception e) {
- throw new RuntimeException(rrClass.getName() +
- " does not have valid constructor", e);
- }
- initNextRecordReader(null);
- }
-
- /**
- * do next and handle exception inside it.
- * @param key
- * @param value
- * @return
- * @throws IOException
- */
- private boolean doNextWithExceptionHandler(K key, V value) throws IOException {
- try {
- return curReader.next(key, value);
- } catch (Exception e) {
- return HiveIOExceptionHandlerUtil.handleRecordReaderNextException(e, jc);
- }
- }
-
- /**
- * Get the record reader for the next chunk in this CombineFileSplit.
- */
- protected boolean initNextRecordReader(K key) throws IOException {
-
- if (curReader != null) {
- curReader.close();
- curReader = null;
- if (idx > 0) {
- progress += split.getLength(idx - 1); // done processing so far
- }
- }
-
- // if all chunks have been processed or reached the length, nothing more to do.
- if (idx == split.getNumPaths() || (isShrinked && progress > shrinkedLength)) {
- return false;
- }
-
- // get a record reader for the idx-th chunk
- try {
- curReader = rrConstructor.newInstance(new Object[]
- {split, jc, reporter, Integer.valueOf(idx)});
-
- // change the key if need be
- if (key != null) {
- K newKey = curReader.createKey();
- ((CombineHiveKey)key).setKey(newKey);
- }
-
- // setup some helper config variables.
- jc.set("map.input.file", split.getPath(idx).toString());
- jc.setLong("map.input.start", split.getOffset(idx));
- jc.setLong("map.input.length", split.getLength(idx));
- } catch (Exception e) {
- curReader=HiveIOExceptionHandlerUtil.handleRecordReaderCreationException(e, jc);
- }
- idx++;
- return true;
- }
- }
-
- public abstract static class CombineFileInputFormatShim extends
- CombineFileInputFormat
- implements HadoopShims.CombineFileInputFormatShim {
-
- public Path[] getInputPathsShim(JobConf conf) {
- try {
- return FileInputFormat.getInputPaths(conf);
- } catch (Exception e) {
- throw new RuntimeException(e);
- }
- }
-
- @Override
- public void createPool(JobConf conf, PathFilter... filters) {
- super.createPool(conf, filters);
- }
-
- @Override
- public InputSplitShim[] getSplits(JobConf job, int numSplits) throws IOException {
- long minSize = job.getLong("mapred.min.split.size", 0);
-
- // For backward compatibility, let the above parameter be used
- if (job.getLong("mapred.min.split.size.per.node", 0) == 0) {
- super.setMinSplitSizeNode(minSize);
- }
-
- if (job.getLong("mapred.min.split.size.per.rack", 0) == 0) {
- super.setMinSplitSizeRack(minSize);
- }
-
- if (job.getLong("mapred.max.split.size", 0) == 0) {
- super.setMaxSplitSize(minSize);
- }
-
- InputSplit[] splits = super.getSplits(job, numSplits);
-
- InputSplitShim[] isplits = new InputSplitShim[splits.length];
- for (int pos = 0; pos < splits.length; pos++) {
- isplits[pos] = new InputSplitShim((CombineFileSplit) splits[pos]);
- }
-
- return isplits;
- }
-
- public InputSplitShim getInputSplitShim() throws IOException {
- return new InputSplitShim();
- }
-
- public RecordReader getRecordReader(JobConf job, HadoopShims.InputSplitShim split,
- Reporter reporter,
- Class> rrClass)
- throws IOException {
- CombineFileSplit cfSplit = (CombineFileSplit) split;
- return new CombineFileRecordReader(job, cfSplit, reporter, rrClass);
- }
-
- }
-
- public String getInputFormatClassName() {
- return "org.apache.hadoop.hive.ql.io.CombineHiveInputFormat";
- }
-
- String[] ret = new String[2];
-
- @Override
- public String[] getTaskJobIDs(TaskCompletionEvent t) {
- TaskID tid = t.getTaskAttemptId().getTaskID();
- ret[0] = tid.toString();
- ret[1] = tid.getJobID().toString();
- return ret;
- }
-
- public void setFloatConf(Configuration conf, String varName, float val) {
- conf.setFloat(varName, val);
- }
-
- @Override
- public int createHadoopArchive(Configuration conf, Path sourceDir, Path destDir,
- String archiveName) throws Exception {
-
- HadoopArchives har = new HadoopArchives(conf);
- List args = new ArrayList();
-
- if (conf.get("hive.archive.har.parentdir.settable") == null) {
- throw new RuntimeException("hive.archive.har.parentdir.settable is not set");
- }
- boolean parentSettable =
- conf.getBoolean("hive.archive.har.parentdir.settable", false);
-
- if (parentSettable) {
- args.add("-archiveName");
- args.add(archiveName);
- args.add("-p");
- args.add(sourceDir.toString());
- args.add(destDir.toString());
- } else {
- args.add("-archiveName");
- args.add(archiveName);
- args.add(sourceDir.toString());
- args.add(destDir.toString());
- }
-
- return ToolRunner.run(har, args.toArray(new String[0]));
- }
-
- public static class NullOutputCommitter extends OutputCommitter {
- @Override
- public void setupJob(JobContext jobContext) { }
- @Override
- public void cleanupJob(JobContext jobContext) { }
-
- @Override
- public void setupTask(TaskAttemptContext taskContext) { }
- @Override
- public boolean needsTaskCommit(TaskAttemptContext taskContext) {
- return false;
- }
- @Override
- public void commitTask(TaskAttemptContext taskContext) { }
- @Override
- public void abortTask(TaskAttemptContext taskContext) { }
- }
-
- public void setNullOutputFormat(JobConf conf) {
- conf.setOutputFormat(NullOutputFormat.class);
- conf.setOutputCommitter(Hadoop23Shims.NullOutputCommitter.class);
-
- // option to bypass job setup and cleanup was introduced in hadoop-21 (MAPREDUCE-463)
- // but can be backported. So we disable setup/cleanup in all versions >= 0.19
- conf.setBoolean("mapred.committer.job.setup.cleanup.needed", false);
-
- // option to bypass task cleanup task was introduced in hadoop-23 (MAPREDUCE-2206)
- // but can be backported. So we disable setup/cleanup in all versions >= 0.19
- conf.setBoolean("mapreduce.job.committer.task.cleanup.needed", false);
- }
-
- @Override
- public UserGroupInformation getUGIForConf(Configuration conf) throws IOException {
- return UserGroupInformation.getCurrentUser();
- }
-
- @Override
- public boolean isSecureShimImpl() {
- return true;
- }
-
- @Override
- public String getShortUserName(UserGroupInformation ugi) {
- return ugi.getShortUserName();
- }
-
- @Override
- public String getTokenStrForm(String tokenSignature) throws IOException {
- UserGroupInformation ugi = UserGroupInformation.getCurrentUser();
- TokenSelector extends TokenIdentifier> tokenSelector = new DelegationTokenSelector23();
-
- Token extends TokenIdentifier> token = tokenSelector.selectToken(
- tokenSignature == null ? new Text() : new Text(tokenSignature), ugi.getTokens());
- return token != null ? token.encodeToUrlString() : null;
- }
-
- @Override
public JobTrackerState getJobTrackerState(ClusterStatus clusterStatus) throws Exception {
JobTrackerState state;
switch (clusterStatus.getJobTrackerStatus()) {
@@ -528,7 +45,7 @@
throw new Exception(errorMsg);
}
}
-
+
@Override
public org.apache.hadoop.mapreduce.TaskAttemptContext newTaskAttemptContext(Configuration conf, final Progressable progressable) {
return new TaskAttemptContextImpl(conf, new TaskAttemptID()) {
Index: shims/src/common/java/org/apache/hadoop/hive/shims/ShimLoader.java
===================================================================
--- shims/src/common/java/org/apache/hadoop/hive/shims/ShimLoader.java (revision 1228525)
+++ shims/src/common/java/org/apache/hadoop/hive/shims/ShimLoader.java (working copy)
@@ -79,7 +79,7 @@
}
public static synchronized HadoopThriftAuthBridge getHadoopThriftAuthBridge() {
- if ("0.20S".equals(getMajorVersion())) {
+ if (getHadoopShims().isSecureShimImpl()) {
return createShim("org.apache.hadoop.hive.thrift.HadoopThriftAuthBridge20S",
HadoopThriftAuthBridge.class);
} else {
@@ -87,7 +87,6 @@
}
}
- @SuppressWarnings("unchecked")
private static T loadShims(Map classMap, Class xface) {
String vers = getMajorVersion();
String className = classMap.get(vers);
@@ -96,7 +95,7 @@
private static T createShim(String className, Class xface) {
try {
- Class clazz = Class.forName(className);
+ Class> clazz = Class.forName(className);
return xface.cast(clazz.newInstance());
} catch (Exception e) {
throw new RuntimeException("Could not load shims in class " +
Index: shims/src/0.20S/java/org/apache/hadoop/security/token/delegation/HiveDelegationTokenSupport.java
===================================================================
--- shims/src/0.20S/java/org/apache/hadoop/security/token/delegation/HiveDelegationTokenSupport.java (revision 1228525)
+++ shims/src/0.20S/java/org/apache/hadoop/security/token/delegation/HiveDelegationTokenSupport.java (working copy)
@@ -1,68 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.security.token.delegation;
-
-import java.io.ByteArrayInputStream;
-import java.io.ByteArrayOutputStream;
-import java.io.DataInputStream;
-import java.io.DataOutputStream;
-import java.io.IOException;
-
-import org.apache.hadoop.io.WritableUtils;
-import org.apache.hadoop.security.token.delegation.AbstractDelegationTokenSecretManager.DelegationTokenInformation;
-
-/**
- * Workaround for serialization of {@link DelegationTokenInformation} through package access.
- * Future version of Hadoop should add this to DelegationTokenInformation itself.
- */
-public final class HiveDelegationTokenSupport {
-
- private HiveDelegationTokenSupport() {}
-
- public static byte[] encodeDelegationTokenInformation(DelegationTokenInformation token) {
- try {
- ByteArrayOutputStream bos = new ByteArrayOutputStream();
- DataOutputStream out = new DataOutputStream(bos);
- WritableUtils.writeVInt(out, token.password.length);
- out.write(token.password);
- out.writeLong(token.renewDate);
- out.flush();
- return bos.toByteArray();
- } catch (IOException ex) {
- throw new RuntimeException("Failed to encode token.", ex);
- }
- }
-
- public static DelegationTokenInformation decodeDelegationTokenInformation(byte[] tokenBytes)
- throws IOException {
- DataInputStream in = new DataInputStream(new ByteArrayInputStream(tokenBytes));
- DelegationTokenInformation token = new DelegationTokenInformation(0, null);
- int len = WritableUtils.readVInt(in);
- token.password = new byte[len];
- in.readFully(token.password);
- token.renewDate = in.readLong();
- return token;
- }
-
- public static void rollMasterKey(
- AbstractDelegationTokenSecretManager extends AbstractDelegationTokenIdentifier> mgr)
- throws IOException {
- mgr.rollMasterKey();
- }
-
-}
Index: shims/src/0.20S/java/org/apache/hadoop/hive/thrift/HadoopThriftAuthBridge20S.java
===================================================================
--- shims/src/0.20S/java/org/apache/hadoop/hive/thrift/HadoopThriftAuthBridge20S.java (revision 1228525)
+++ shims/src/0.20S/java/org/apache/hadoop/hive/thrift/HadoopThriftAuthBridge20S.java (working copy)
@@ -1,563 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hive.thrift;
-
-import java.io.IOException;
-import java.net.InetAddress;
-import java.net.Socket;
-import java.security.PrivilegedAction;
-import java.security.PrivilegedExceptionAction;
-
-import javax.security.auth.callback.Callback;
-import javax.security.auth.callback.CallbackHandler;
-import javax.security.auth.callback.NameCallback;
-import javax.security.auth.callback.PasswordCallback;
-import javax.security.auth.callback.UnsupportedCallbackException;
-import javax.security.sasl.AuthorizeCallback;
-import javax.security.sasl.RealmCallback;
-import javax.security.sasl.RealmChoiceCallback;
-import javax.security.sasl.SaslException;
-import javax.security.sasl.SaslServer;
-
-import org.apache.commons.codec.binary.Base64;
-import org.apache.commons.lang.StringUtils;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hive.thrift.client.TUGIAssumingTransport;
-import org.apache.hadoop.security.SaslRpcServer;
-import org.apache.hadoop.security.SaslRpcServer.AuthMethod;
-import org.apache.hadoop.security.SecurityUtil;
-import org.apache.hadoop.security.UserGroupInformation;
-import org.apache.hadoop.security.UserGroupInformation.AuthenticationMethod;
-import org.apache.hadoop.security.authorize.AuthorizationException;
-import org.apache.hadoop.security.authorize.ProxyUsers;
-import org.apache.hadoop.security.token.SecretManager.InvalidToken;
-import org.apache.hadoop.security.token.Token;
-import org.apache.hadoop.security.token.TokenIdentifier;
-import org.apache.hadoop.util.ReflectionUtils;
-import org.apache.thrift.TException;
-import org.apache.thrift.TProcessor;
-import org.apache.thrift.protocol.TProtocol;
-import org.apache.thrift.transport.TSaslClientTransport;
-import org.apache.thrift.transport.TSaslServerTransport;
-import org.apache.thrift.transport.TSocket;
-import org.apache.thrift.transport.TTransport;
-import org.apache.thrift.transport.TTransportException;
-import org.apache.thrift.transport.TTransportFactory;
-
-
- /**
- * Functions that bridge Thrift's SASL transports to Hadoop's
- * SASL callback handlers and authentication classes.
- */
- public class HadoopThriftAuthBridge20S extends HadoopThriftAuthBridge {
- static final Log LOG = LogFactory.getLog(HadoopThriftAuthBridge.class);
-
- @Override
- public Client createClient() {
- return new Client();
- }
-
- @Override
- public Server createServer(String keytabFile, String principalConf) throws TTransportException {
- return new Server(keytabFile, principalConf);
- }
-
- public static class Client extends HadoopThriftAuthBridge.Client {
- /**
- * Create a client-side SASL transport that wraps an underlying transport.
- *
- * @param method The authentication method to use. Currently only KERBEROS is
- * supported.
- * @param serverPrincipal The Kerberos principal of the target server.
- * @param underlyingTransport The underlying transport mechanism, usually a TSocket.
- */
-
- @Override
- public TTransport createClientTransport(
- String principalConfig, String host,
- String methodStr, String tokenStrForm, TTransport underlyingTransport)
- throws IOException {
- AuthMethod method = AuthMethod.valueOf(AuthMethod.class, methodStr);
-
- TTransport saslTransport = null;
- switch (method) {
- case DIGEST:
- Token t= new Token();
- t.decodeFromUrlString(tokenStrForm);
- saslTransport = new TSaslClientTransport(
- method.getMechanismName(),
- null,
- null, SaslRpcServer.SASL_DEFAULT_REALM,
- SaslRpcServer.SASL_PROPS, new SaslClientCallbackHandler(t),
- underlyingTransport);
- return new TUGIAssumingTransport(saslTransport, UserGroupInformation.getCurrentUser());
-
- case KERBEROS:
- String serverPrincipal = SecurityUtil.getServerPrincipal(principalConfig, host);
- String names[] = SaslRpcServer.splitKerberosName(serverPrincipal);
- if (names.length != 3) {
- throw new IOException(
- "Kerberos principal name does NOT have the expected hostname part: "
- + serverPrincipal);
- }
- try {
- saslTransport = new TSaslClientTransport(
- method.getMechanismName(),
- null,
- names[0], names[1],
- SaslRpcServer.SASL_PROPS, null,
- underlyingTransport);
- return new TUGIAssumingTransport(saslTransport, UserGroupInformation.getCurrentUser());
- } catch (SaslException se) {
- throw new IOException("Could not instantiate SASL transport", se);
- }
-
- default:
- throw new IOException("Unsupported authentication method: " + method);
- }
- }
- private static class SaslClientCallbackHandler implements CallbackHandler {
- private final String userName;
- private final char[] userPassword;
-
- public SaslClientCallbackHandler(Token extends TokenIdentifier> token) {
- this.userName = encodeIdentifier(token.getIdentifier());
- this.userPassword = encodePassword(token.getPassword());
- }
-
- public void handle(Callback[] callbacks)
- throws UnsupportedCallbackException {
- NameCallback nc = null;
- PasswordCallback pc = null;
- RealmCallback rc = null;
- for (Callback callback : callbacks) {
- if (callback instanceof RealmChoiceCallback) {
- continue;
- } else if (callback instanceof NameCallback) {
- nc = (NameCallback) callback;
- } else if (callback instanceof PasswordCallback) {
- pc = (PasswordCallback) callback;
- } else if (callback instanceof RealmCallback) {
- rc = (RealmCallback) callback;
- } else {
- throw new UnsupportedCallbackException(callback,
- "Unrecognized SASL client callback");
- }
- }
- if (nc != null) {
- if (LOG.isDebugEnabled()) {
- LOG.debug("SASL client callback: setting username: " + userName);
- }
- nc.setName(userName);
- }
- if (pc != null) {
- if (LOG.isDebugEnabled()) {
- LOG.debug("SASL client callback: setting userPassword");
- }
- pc.setPassword(userPassword);
- }
- if (rc != null) {
- if (LOG.isDebugEnabled()) {
- LOG.debug("SASL client callback: setting realm: "
- + rc.getDefaultText());
- }
- rc.setText(rc.getDefaultText());
- }
- }
-
- static String encodeIdentifier(byte[] identifier) {
- return new String(Base64.encodeBase64(identifier));
- }
-
- static char[] encodePassword(byte[] password) {
- return new String(Base64.encodeBase64(password)).toCharArray();
- }
- }
- }
-
- public static class Server extends HadoopThriftAuthBridge.Server {
- final UserGroupInformation realUgi;
- DelegationTokenSecretManager secretManager;
- private final static long DELEGATION_TOKEN_GC_INTERVAL = 3600000; // 1 hour
- //Delegation token related keys
- public static final String DELEGATION_KEY_UPDATE_INTERVAL_KEY =
- "hive.cluster.delegation.key.update-interval";
- public static final long DELEGATION_KEY_UPDATE_INTERVAL_DEFAULT =
- 24*60*60*1000; // 1 day
- public static final String DELEGATION_TOKEN_RENEW_INTERVAL_KEY =
- "hive.cluster.delegation.token.renew-interval";
- public static final long DELEGATION_TOKEN_RENEW_INTERVAL_DEFAULT =
- 24*60*60*1000; // 1 day
- public static final String DELEGATION_TOKEN_MAX_LIFETIME_KEY =
- "hive.cluster.delegation.token.max-lifetime";
- public static final long DELEGATION_TOKEN_MAX_LIFETIME_DEFAULT =
- 7*24*60*60*1000; // 7 days
- public static final String DELEGATION_TOKEN_STORE_CLS =
- "hive.cluster.delegation.token.store.class";
- public static final String DELEGATION_TOKEN_STORE_ZK_CONNECT_STR =
- "hive.cluster.delegation.token.store.zookeeper.connectString";
- public static final String DELEGATION_TOKEN_STORE_ZK_ROOT_NODE =
- "hive.cluster.delegation.token.store.zookeeper.rootNode";
- public static final String DELEGATION_TOKEN_STORE_ZK_ROOT_NODE_DEFAULT =
- "/hive/cluster/delegation";
-
- public Server() throws TTransportException {
- try {
- realUgi = UserGroupInformation.getCurrentUser();
- } catch (IOException ioe) {
- throw new TTransportException(ioe);
- }
- }
- /**
- * Create a server with a kerberos keytab/principal.
- */
- private Server(String keytabFile, String principalConf)
- throws TTransportException {
- if (keytabFile == null || keytabFile.isEmpty()) {
- throw new TTransportException("No keytab specified");
- }
- if (principalConf == null || principalConf.isEmpty()) {
- throw new TTransportException("No principal specified");
- }
-
- // Login from the keytab
- String kerberosName;
- try {
- kerberosName =
- SecurityUtil.getServerPrincipal(principalConf, "0.0.0.0");
- UserGroupInformation.loginUserFromKeytab(
- kerberosName, keytabFile);
- realUgi = UserGroupInformation.getLoginUser();
- assert realUgi.isFromKeytab();
- } catch (IOException ioe) {
- throw new TTransportException(ioe);
- }
- }
-
- /**
- * Create a TTransportFactory that, upon connection of a client socket,
- * negotiates a Kerberized SASL transport. The resulting TTransportFactory
- * can be passed as both the input and output transport factory when
- * instantiating a TThreadPoolServer, for example.
- *
- */
- @Override
- public TTransportFactory createTransportFactory() throws TTransportException
- {
- // Parse out the kerberos principal, host, realm.
- String kerberosName = realUgi.getUserName();
- final String names[] = SaslRpcServer.splitKerberosName(kerberosName);
- if (names.length != 3) {
- throw new TTransportException("Kerberos principal should have 3 parts: " + kerberosName);
- }
-
- TSaslServerTransport.Factory transFactory = new TSaslServerTransport.Factory();
- transFactory.addServerDefinition(
- AuthMethod.KERBEROS.getMechanismName(),
- names[0], names[1], // two parts of kerberos principal
- SaslRpcServer.SASL_PROPS,
- new SaslRpcServer.SaslGssCallbackHandler());
- transFactory.addServerDefinition(AuthMethod.DIGEST.getMechanismName(),
- null, SaslRpcServer.SASL_DEFAULT_REALM,
- SaslRpcServer.SASL_PROPS, new SaslDigestCallbackHandler(secretManager));
-
- return new TUGIAssumingTransportFactory(transFactory, realUgi);
- }
-
- /**
- * Wrap a TProcessor in such a way that, before processing any RPC, it
- * assumes the UserGroupInformation of the user authenticated by
- * the SASL transport.
- */
- @Override
- public TProcessor wrapProcessor(TProcessor processor) {
- return new TUGIAssumingProcessor(processor, secretManager);
- }
-
- protected TokenStoreDelegationTokenSecretManager.TokenStore getTokenStore(Configuration conf)
- throws IOException {
- String tokenStoreClassName = conf.get(DELEGATION_TOKEN_STORE_CLS, "");
- if (StringUtils.isBlank(tokenStoreClassName)) {
- return new MemoryTokenStore();
- }
- try {
- Class extends TokenStoreDelegationTokenSecretManager.TokenStore> storeClass = Class
- .forName(tokenStoreClassName).asSubclass(
- TokenStoreDelegationTokenSecretManager.TokenStore.class);
- return ReflectionUtils.newInstance(storeClass, conf);
- } catch (ClassNotFoundException e) {
- throw new IOException("Error initializing delegation token store: " + tokenStoreClassName,
- e);
- }
- }
-
- @Override
- public void startDelegationTokenSecretManager(Configuration conf)
- throws IOException{
- long secretKeyInterval =
- conf.getLong(DELEGATION_KEY_UPDATE_INTERVAL_KEY,
- DELEGATION_KEY_UPDATE_INTERVAL_DEFAULT);
- long tokenMaxLifetime =
- conf.getLong(DELEGATION_TOKEN_MAX_LIFETIME_KEY,
- DELEGATION_TOKEN_MAX_LIFETIME_DEFAULT);
- long tokenRenewInterval =
- conf.getLong(DELEGATION_TOKEN_RENEW_INTERVAL_KEY,
- DELEGATION_TOKEN_RENEW_INTERVAL_DEFAULT);
-
- secretManager = new TokenStoreDelegationTokenSecretManager(secretKeyInterval,
- tokenMaxLifetime,
- tokenRenewInterval,
- DELEGATION_TOKEN_GC_INTERVAL, getTokenStore(conf));
- secretManager.startThreads();
- }
-
- @Override
- public String getDelegationToken(final String owner, final String renewer)
- throws IOException, InterruptedException {
- if (!authenticationMethod.get().equals(AuthenticationMethod.KERBEROS)) {
- throw new AuthorizationException(
- "Delegation Token can be issued only with kerberos authentication");
- }
- //if the user asking the token is same as the 'owner' then don't do
- //any proxy authorization checks. For cases like oozie, where it gets
- //a delegation token for another user, we need to make sure oozie is
- //authorized to get a delegation token.
- //Do all checks on short names
- UserGroupInformation currUser = UserGroupInformation.getCurrentUser();
- UserGroupInformation ownerUgi = UserGroupInformation.createRemoteUser(owner);
- if (!ownerUgi.getShortUserName().equals(currUser.getShortUserName())) {
- //in the case of proxy users, the getCurrentUser will return the
- //real user (for e.g. oozie) due to the doAs that happened just before the
- //server started executing the method getDelegationToken in the MetaStore
- ownerUgi = UserGroupInformation.createProxyUser(owner,
- UserGroupInformation.getCurrentUser());
- InetAddress remoteAddr = getRemoteAddress();
- ProxyUsers.authorize(ownerUgi,remoteAddr.getHostAddress(), null);
- }
- return ownerUgi.doAs(new PrivilegedExceptionAction() {
- public String run() throws IOException {
- return secretManager.getDelegationToken(renewer);
- }
- });
- }
-
- @Override
- public long renewDelegationToken(String tokenStrForm) throws IOException {
- if (!authenticationMethod.get().equals(AuthenticationMethod.KERBEROS)) {
- throw new AuthorizationException(
- "Delegation Token can be issued only with kerberos authentication");
- }
- return secretManager.renewDelegationToken(tokenStrForm);
- }
-
- @Override
- public void cancelDelegationToken(String tokenStrForm) throws IOException {
- secretManager.cancelDelegationToken(tokenStrForm);
- }
-
- final static ThreadLocal remoteAddress =
- new ThreadLocal() {
- @Override
- protected synchronized InetAddress initialValue() {
- return null;
- }
- };
-
- @Override
- public InetAddress getRemoteAddress() {
- return remoteAddress.get();
- }
-
- final static ThreadLocal authenticationMethod =
- new ThreadLocal() {
- @Override
- protected synchronized AuthenticationMethod initialValue() {
- return AuthenticationMethod.TOKEN;
- }
- };
-
- /** CallbackHandler for SASL DIGEST-MD5 mechanism */
- // This code is pretty much completely based on Hadoop's
- // SaslRpcServer.SaslDigestCallbackHandler - the only reason we could not
- // use that Hadoop class as-is was because it needs a Server.Connection object
- // which is relevant in hadoop rpc but not here in the metastore - so the
- // code below does not deal with the Connection Server.object.
- static class SaslDigestCallbackHandler implements CallbackHandler {
- private final DelegationTokenSecretManager secretManager;
-
- public SaslDigestCallbackHandler(
- DelegationTokenSecretManager secretManager) {
- this.secretManager = secretManager;
- }
-
- private char[] getPassword(DelegationTokenIdentifier tokenid) throws InvalidToken {
- return encodePassword(secretManager.retrievePassword(tokenid));
- }
-
- private char[] encodePassword(byte[] password) {
- return new String(Base64.encodeBase64(password)).toCharArray();
- }
- /** {@inheritDoc} */
- @Override
- public void handle(Callback[] callbacks) throws InvalidToken,
- UnsupportedCallbackException {
- NameCallback nc = null;
- PasswordCallback pc = null;
- AuthorizeCallback ac = null;
- for (Callback callback : callbacks) {
- if (callback instanceof AuthorizeCallback) {
- ac = (AuthorizeCallback) callback;
- } else if (callback instanceof NameCallback) {
- nc = (NameCallback) callback;
- } else if (callback instanceof PasswordCallback) {
- pc = (PasswordCallback) callback;
- } else if (callback instanceof RealmCallback) {
- continue; // realm is ignored
- } else {
- throw new UnsupportedCallbackException(callback,
- "Unrecognized SASL DIGEST-MD5 Callback");
- }
- }
- if (pc != null) {
- DelegationTokenIdentifier tokenIdentifier = SaslRpcServer.
- getIdentifier(nc.getDefaultName(), secretManager);
- char[] password = getPassword(tokenIdentifier);
-
- if (LOG.isDebugEnabled()) {
- LOG.debug("SASL server DIGEST-MD5 callback: setting password "
- + "for client: " + tokenIdentifier.getUser());
- }
- pc.setPassword(password);
- }
- if (ac != null) {
- String authid = ac.getAuthenticationID();
- String authzid = ac.getAuthorizationID();
- if (authid.equals(authzid)) {
- ac.setAuthorized(true);
- } else {
- ac.setAuthorized(false);
- }
- if (ac.isAuthorized()) {
- if (LOG.isDebugEnabled()) {
- String username =
- SaslRpcServer.getIdentifier(authzid, secretManager).getUser().getUserName();
- LOG.debug("SASL server DIGEST-MD5 callback: setting "
- + "canonicalized client ID: " + username);
- }
- ac.setAuthorizedID(authzid);
- }
- }
- }
- }
-
- /**
- * Processor that pulls the SaslServer object out of the transport, and
- * assumes the remote user's UGI before calling through to the original
- * processor.
- *
- * This is used on the server side to set the UGI for each specific call.
- */
- private class TUGIAssumingProcessor implements TProcessor {
- final TProcessor wrapped;
- DelegationTokenSecretManager secretManager;
- TUGIAssumingProcessor(TProcessor wrapped, DelegationTokenSecretManager secretManager) {
- this.wrapped = wrapped;
- this.secretManager = secretManager;
- }
-
- public boolean process(final TProtocol inProt, final TProtocol outProt) throws TException {
- TTransport trans = inProt.getTransport();
- if (!(trans instanceof TSaslServerTransport)) {
- throw new TException("Unexpected non-SASL transport " + trans.getClass());
- }
- TSaslServerTransport saslTrans = (TSaslServerTransport)trans;
- SaslServer saslServer = saslTrans.getSaslServer();
- String authId = saslServer.getAuthorizationID();
- authenticationMethod.set(AuthenticationMethod.KERBEROS);
- LOG.debug("AUTH ID ======>" + authId);
- String endUser = authId;
-
- if(saslServer.getMechanismName().equals("DIGEST-MD5")) {
- try {
- TokenIdentifier tokenId = SaslRpcServer.getIdentifier(authId,
- secretManager);
- endUser = tokenId.getUser().getUserName();
- authenticationMethod.set(AuthenticationMethod.TOKEN);
- } catch (InvalidToken e) {
- throw new TException(e.getMessage());
- }
- }
- Socket socket = ((TSocket)(saslTrans.getUnderlyingTransport())).getSocket();
- remoteAddress.set(socket.getInetAddress());
- try {
- UserGroupInformation clientUgi = UserGroupInformation.createProxyUser(
- endUser, UserGroupInformation.getLoginUser());
- return clientUgi.doAs(new PrivilegedExceptionAction() {
- public Boolean run() {
- try {
- return wrapped.process(inProt, outProt);
- } catch (TException te) {
- throw new RuntimeException(te);
- }
- }
- });
- } catch (RuntimeException rte) {
- if (rte.getCause() instanceof TException) {
- throw (TException)rte.getCause();
- }
- throw rte;
- } catch (InterruptedException ie) {
- throw new RuntimeException(ie); // unexpected!
- } catch (IOException ioe) {
- throw new RuntimeException(ioe); // unexpected!
- }
- }
- }
-
- /**
- * A TransportFactory that wraps another one, but assumes a specified UGI
- * before calling through.
- *
- * This is used on the server side to assume the server's Principal when accepting
- * clients.
- */
- static class TUGIAssumingTransportFactory extends TTransportFactory {
- private final UserGroupInformation ugi;
- private final TTransportFactory wrapped;
-
- public TUGIAssumingTransportFactory(TTransportFactory wrapped, UserGroupInformation ugi) {
- assert wrapped != null;
- assert ugi != null;
-
- this.wrapped = wrapped;
- this.ugi = ugi;
- }
-
- @Override
- public TTransport getTransport(final TTransport trans) {
- return ugi.doAs(new PrivilegedAction() {
- public TTransport run() {
- return wrapped.getTransport(trans);
- }
- });
- }
- }
- }
- }
Index: shims/src/0.20S/java/org/apache/hadoop/hive/thrift/DelegationTokenIdentifier.java
===================================================================
--- shims/src/0.20S/java/org/apache/hadoop/hive/thrift/DelegationTokenIdentifier.java (revision 1228525)
+++ shims/src/0.20S/java/org/apache/hadoop/hive/thrift/DelegationTokenIdentifier.java (working copy)
@@ -1,52 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hive.thrift;
-
-import org.apache.hadoop.io.Text;
-import org.apache.hadoop.security.token.delegation.AbstractDelegationTokenIdentifier;
-
-/**
- * A delegation token identifier that is specific to Hive.
- */
-public class DelegationTokenIdentifier
- extends AbstractDelegationTokenIdentifier {
- public static final Text HIVE_DELEGATION_KIND = new Text("HIVE_DELEGATION_TOKEN");
-
- /**
- * Create an empty delegation token identifier for reading into.
- */
- public DelegationTokenIdentifier() {
- }
-
- /**
- * Create a new delegation token identifier
- * @param owner the effective username of the token owner
- * @param renewer the username of the renewer
- * @param realUser the real username of the token owner
- */
- public DelegationTokenIdentifier(Text owner, Text renewer, Text realUser) {
- super(owner, renewer, realUser);
- }
-
- @Override
- public Text getKind() {
- return HIVE_DELEGATION_KIND;
- }
-
-}
Index: shims/src/0.20S/java/org/apache/hadoop/hive/thrift/ZooKeeperTokenStore.java
===================================================================
--- shims/src/0.20S/java/org/apache/hadoop/hive/thrift/ZooKeeperTokenStore.java (revision 1228525)
+++ shims/src/0.20S/java/org/apache/hadoop/hive/thrift/ZooKeeperTokenStore.java (working copy)
@@ -1,323 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hive.thrift;
-
-import java.io.IOException;
-import java.util.HashMap;
-import java.util.List;
-import java.util.Map;
-
-import org.apache.commons.lang.StringUtils;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hive.thrift.TokenStoreDelegationTokenSecretManager.TokenStoreError;
-import org.apache.hadoop.security.token.delegation.AbstractDelegationTokenSecretManager.DelegationTokenInformation;
-import org.apache.hadoop.security.token.delegation.HiveDelegationTokenSupport;
-import org.apache.zookeeper.CreateMode;
-import org.apache.zookeeper.KeeperException;
-import org.apache.zookeeper.Watcher;
-import org.apache.zookeeper.ZooDefs.Ids;
-import org.apache.zookeeper.ZooKeeper;
-import org.apache.zookeeper.ZooKeeper.States;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-/**
- * ZooKeeper token store implementation.
- */
-public class ZooKeeperTokenStore implements TokenStoreDelegationTokenSecretManager.TokenStore {
-
- private static final Logger LOGGER =
- LoggerFactory.getLogger(ZooKeeperTokenStore.class.getName());
-
- private static final String ZK_SEQ_FORMAT = "%010d";
- private static final String NODE_KEYS = "/keys";
- private static final String NODE_TOKENS = "/tokens";
-
- private String rootNode = "";
- private volatile ZooKeeper zkSession;
- private String zkConnectString;
- private final int zkSessionTimeout = 3000;
-
- private class ZooKeeperWatcher implements Watcher {
- public void process(org.apache.zookeeper.WatchedEvent event) {
- LOGGER.info(event.toString());
- if (event.getState() == Watcher.Event.KeeperState.Expired) {
- LOGGER.warn("ZooKeeper session expired, discarding connection");
- try {
- zkSession.close();
- } catch (Throwable e) {
- LOGGER.warn("Failed to close connection on expired session", e);
- }
- }
- }
- }
-
- /**
- * Default constructor for dynamic instantiation w/ Configurable
- * (ReflectionUtils does not support Configuration constructor injection).
- */
- protected ZooKeeperTokenStore() {
- }
-
- public ZooKeeperTokenStore(String hostPort) {
- this.zkConnectString = hostPort;
- init();
- }
-
- private ZooKeeper getSession() {
- if (zkSession == null || zkSession.getState() == States.CLOSED) {
- synchronized (this) {
- if (zkSession == null || zkSession.getState() == States.CLOSED) {
- try {
- zkSession = new ZooKeeper(this.zkConnectString, this.zkSessionTimeout,
- new ZooKeeperWatcher());
- } catch (IOException ex) {
- throw new TokenStoreError("Token store error.", ex);
- }
- }
- }
- }
- return zkSession;
- }
-
- private static String ensurePath(ZooKeeper zk, String path) throws KeeperException,
- InterruptedException {
- String[] pathComps = StringUtils.splitByWholeSeparator(path, "/");
- String currentPath = "";
- for (String pathComp : pathComps) {
- currentPath += "/" + pathComp;
- try {
- String node = zk.create(currentPath, new byte[0], Ids.OPEN_ACL_UNSAFE,
- CreateMode.PERSISTENT);
- LOGGER.info("Created path: " + node);
- } catch (KeeperException.NodeExistsException e) {
- }
- }
- return currentPath;
- }
-
- private void init() {
- if (this.zkConnectString == null) {
- throw new IllegalStateException("Not initialized");
- }
-
- if (this.zkSession != null) {
- try {
- this.zkSession.close();
- } catch (InterruptedException ex) {
- LOGGER.warn("Failed to close existing session.", ex);
- }
- }
-
- ZooKeeper zk = getSession();
- try {
- ensurePath(zk, rootNode + NODE_KEYS);
- ensurePath(zk, rootNode + NODE_TOKENS);
- } catch (Exception e) {
- throw new TokenStoreError("Failed to validate token path.", e);
- }
- }
-
- @Override
- public void setConf(Configuration conf) {
- if (conf != null) {
- this.zkConnectString = conf.get(
- HadoopThriftAuthBridge20S.Server.DELEGATION_TOKEN_STORE_ZK_CONNECT_STR, null);
- this.rootNode = conf.get(
- HadoopThriftAuthBridge20S.Server.DELEGATION_TOKEN_STORE_ZK_ROOT_NODE,
- HadoopThriftAuthBridge20S.Server.DELEGATION_TOKEN_STORE_ZK_ROOT_NODE_DEFAULT);
- }
- init();
- }
-
- @Override
- public Configuration getConf() {
- return null; // not required
- }
-
- private Map getAllKeys() throws KeeperException,
- InterruptedException {
-
- String masterKeyNode = rootNode + NODE_KEYS;
- ZooKeeper zk = getSession();
- List nodes = zk.getChildren(masterKeyNode, false);
- Map result = new HashMap();
- for (String node : nodes) {
- byte[] data = zk.getData(masterKeyNode + "/" + node, false, null);
- if (data != null) {
- result.put(getSeq(node), data);
- }
- }
- return result;
- }
-
- private int getSeq(String path) {
- String[] pathComps = path.split("/");
- return Integer.parseInt(pathComps[pathComps.length-1]);
- }
-
- @Override
- public int addMasterKey(String s) {
- try {
- ZooKeeper zk = getSession();
- String newNode = zk.create(rootNode + NODE_KEYS + "/", s.getBytes(), Ids.OPEN_ACL_UNSAFE,
- CreateMode.PERSISTENT_SEQUENTIAL);
- LOGGER.info("Added key {}", newNode);
- return getSeq(newNode);
- } catch (KeeperException ex) {
- throw new TokenStoreError(ex);
- } catch (InterruptedException ex) {
- throw new TokenStoreError(ex);
- }
- }
-
- @Override
- public void updateMasterKey(int keySeq, String s) {
- try {
- ZooKeeper zk = getSession();
- zk.setData(rootNode + NODE_KEYS + "/" + String.format(ZK_SEQ_FORMAT, keySeq), s.getBytes(),
- -1);
- } catch (KeeperException ex) {
- throw new TokenStoreError(ex);
- } catch (InterruptedException ex) {
- throw new TokenStoreError(ex);
- }
- }
-
- @Override
- public boolean removeMasterKey(int keySeq) {
- try {
- ZooKeeper zk = getSession();
- zk.delete(rootNode + NODE_KEYS + "/" + String.format(ZK_SEQ_FORMAT, keySeq), -1);
- return true;
- } catch (KeeperException.NoNodeException ex) {
- return false;
- } catch (KeeperException ex) {
- throw new TokenStoreError(ex);
- } catch (InterruptedException ex) {
- throw new TokenStoreError(ex);
- }
- }
-
- @Override
- public String[] getMasterKeys() {
- try {
- Map allKeys = getAllKeys();
- String[] result = new String[allKeys.size()];
- int resultIdx = 0;
- for (byte[] keyBytes : allKeys.values()) {
- result[resultIdx++] = new String(keyBytes);
- }
- return result;
- } catch (KeeperException ex) {
- throw new TokenStoreError(ex);
- } catch (InterruptedException ex) {
- throw new TokenStoreError(ex);
- }
- }
-
-
- private String getTokenPath(DelegationTokenIdentifier tokenIdentifier) {
- try {
- return rootNode + NODE_TOKENS + "/"
- + TokenStoreDelegationTokenSecretManager.encodeWritable(tokenIdentifier);
- } catch (IOException ex) {
- throw new TokenStoreError("Failed to encode token identifier", ex);
- }
- }
-
- @Override
- public boolean addToken(DelegationTokenIdentifier tokenIdentifier,
- DelegationTokenInformation token) {
- try {
- ZooKeeper zk = getSession();
- byte[] tokenBytes = HiveDelegationTokenSupport.encodeDelegationTokenInformation(token);
- String newNode = zk.create(getTokenPath(tokenIdentifier),
- tokenBytes, Ids.OPEN_ACL_UNSAFE, CreateMode.PERSISTENT);
- LOGGER.info("Added token: {}", newNode);
- return true;
- } catch (KeeperException.NodeExistsException ex) {
- return false;
- } catch (KeeperException ex) {
- throw new TokenStoreError(ex);
- } catch (InterruptedException ex) {
- throw new TokenStoreError(ex);
- }
- }
-
- @Override
- public boolean removeToken(DelegationTokenIdentifier tokenIdentifier) {
- try {
- ZooKeeper zk = getSession();
- zk.delete(getTokenPath(tokenIdentifier), -1);
- return true;
- } catch (KeeperException.NoNodeException ex) {
- return false;
- } catch (KeeperException ex) {
- throw new TokenStoreError(ex);
- } catch (InterruptedException ex) {
- throw new TokenStoreError(ex);
- }
- }
-
- @Override
- public DelegationTokenInformation getToken(DelegationTokenIdentifier tokenIdentifier) {
- try {
- ZooKeeper zk = getSession();
- byte[] tokenBytes = zk.getData(getTokenPath(tokenIdentifier), false, null);
- try {
- return HiveDelegationTokenSupport.decodeDelegationTokenInformation(tokenBytes);
- } catch (Exception ex) {
- throw new TokenStoreError("Failed to decode token", ex);
- }
- } catch (KeeperException.NoNodeException ex) {
- return null;
- } catch (KeeperException ex) {
- throw new TokenStoreError(ex);
- } catch (InterruptedException ex) {
- throw new TokenStoreError(ex);
- }
- }
-
- @Override
- public List getAllDelegationTokenIdentifiers() {
- String containerNode = rootNode + NODE_TOKENS;
- final List nodes;
- try {
- nodes = getSession().getChildren(containerNode, false);
- } catch (KeeperException ex) {
- throw new TokenStoreError(ex);
- } catch (InterruptedException ex) {
- throw new TokenStoreError(ex);
- }
- List result = new java.util.ArrayList(
- nodes.size());
- for (String node : nodes) {
- DelegationTokenIdentifier id = new DelegationTokenIdentifier();
- try {
- TokenStoreDelegationTokenSecretManager.decodeWritable(id, node);
- result.add(id);
- } catch (Exception e) {
- LOGGER.warn("Failed to decode token '{}'", node);
- }
- }
- return result;
- }
-
-}
Index: shims/src/0.20S/java/org/apache/hadoop/hive/thrift/MemoryTokenStore.java
===================================================================
--- shims/src/0.20S/java/org/apache/hadoop/hive/thrift/MemoryTokenStore.java (revision 1228525)
+++ shims/src/0.20S/java/org/apache/hadoop/hive/thrift/MemoryTokenStore.java (working copy)
@@ -1,99 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hive.thrift;
-
-import java.util.List;
-import java.util.concurrent.atomic.AtomicInteger;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.security.token.delegation.AbstractDelegationTokenSecretManager.DelegationTokenInformation;
-
-/**
- * Default in-memory token store implementation.
- */
-public class MemoryTokenStore implements TokenStoreDelegationTokenSecretManager.TokenStore {
-
- private final java.util.concurrent.ConcurrentHashMap masterKeys
- = new java.util.concurrent.ConcurrentHashMap();
-
- private final java.util.concurrent.ConcurrentHashMap tokens
- = new java.util.concurrent.ConcurrentHashMap();
-
- private final AtomicInteger masterKeySeq = new AtomicInteger();
-
- @Override
- public void setConf(Configuration conf) {
- }
-
- @Override
- public Configuration getConf() {
- return null;
- }
-
- @Override
- public int addMasterKey(String s) {
- int keySeq = masterKeySeq.getAndIncrement();
- masterKeys.putIfAbsent(keySeq, s);
- return keySeq;
- }
-
- @Override
- public void updateMasterKey(int keySeq, String s) {
- masterKeys.put(keySeq, s);
- }
-
- @Override
- public boolean removeMasterKey(int keySeq) {
- return masterKeys.remove(keySeq) != null;
- }
-
- @Override
- public String[] getMasterKeys() {
- return masterKeys.values().toArray(new String[0]);
- }
-
- @Override
- public boolean addToken(DelegationTokenIdentifier tokenIdentifier,
- DelegationTokenInformation token) {
- DelegationTokenInformation tokenInfo = tokens.putIfAbsent(tokenIdentifier, token);
- return (tokenInfo == null);
- }
-
- @Override
- public boolean removeToken(DelegationTokenIdentifier tokenIdentifier) {
- DelegationTokenInformation tokenInfo = tokens.remove(tokenIdentifier);
- return tokenInfo != null;
- }
-
- @Override
- public DelegationTokenInformation getToken(DelegationTokenIdentifier tokenIdentifier) {
- return tokens.get(tokenIdentifier);
- }
-
- @Override
- public List getAllDelegationTokenIdentifiers() {
- List result = new java.util.ArrayList(
- tokens.size());
- for (DelegationTokenIdentifier id : tokens.keySet()) {
- result.add(id);
- }
- return result;
- }
-
-}
Index: shims/src/0.20S/java/org/apache/hadoop/hive/thrift/TokenStoreDelegationTokenSecretManager.java
===================================================================
--- shims/src/0.20S/java/org/apache/hadoop/hive/thrift/TokenStoreDelegationTokenSecretManager.java (revision 1228525)
+++ shims/src/0.20S/java/org/apache/hadoop/hive/thrift/TokenStoreDelegationTokenSecretManager.java (working copy)
@@ -1,407 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hive.thrift;
-
-import java.io.ByteArrayInputStream;
-import java.io.ByteArrayOutputStream;
-import java.io.DataInputStream;
-import java.io.DataOutputStream;
-import java.io.IOException;
-import java.lang.reflect.Method;
-import java.util.Arrays;
-import java.util.Iterator;
-import java.util.List;
-import java.util.Map;
-
-import org.apache.commons.codec.binary.Base64;
-import org.apache.hadoop.conf.Configurable;
-import org.apache.hadoop.io.Writable;
-import org.apache.hadoop.security.token.Token;
-import org.apache.hadoop.security.token.delegation.AbstractDelegationTokenSecretManager;
-import org.apache.hadoop.security.token.delegation.DelegationKey;
-import org.apache.hadoop.security.token.delegation.HiveDelegationTokenSupport;
-import org.apache.hadoop.util.Daemon;
-import org.apache.hadoop.util.StringUtils;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-/**
- * Extension of {@link DelegationTokenSecretManager} to support alternative to default in-memory
- * token management for fail-over and clustering through plug-able token store (ZooKeeper etc.).
- * Delegation tokens will be retrieved from the store on-demand and (unlike base class behavior) not
- * cached in memory. This avoids complexities related to token expiration. The security token is
- * needed only at the time the transport is opened (as opposed to per interface operation). The
- * assumption therefore is low cost of interprocess token retrieval (for random read efficient store
- * such as ZooKeeper) compared to overhead of synchronizing per-process in-memory token caches.
- * The wrapper incorporates the token store abstraction within the limitations of current
- * Hive/Hadoop dependency (.20S) with minimum code duplication.
- * Eventually this should be supported by Hadoop security directly.
- */
-public class TokenStoreDelegationTokenSecretManager extends DelegationTokenSecretManager {
-
- private static final Logger LOGGER =
- LoggerFactory.getLogger(TokenStoreDelegationTokenSecretManager.class.getName());
-
- /**
- * Exception for internal token store errors that typically cannot be handled by the caller.
- */
- public static class TokenStoreError extends RuntimeException {
- private static final long serialVersionUID = -8693819817623074083L;
-
- public TokenStoreError(Throwable cause) {
- super(cause);
- }
-
- public TokenStoreError(String message, Throwable cause) {
- super(message, cause);
- }
- }
-
- /**
- * Interface for pluggable token store that can be implemented as shared store with external
- * storage (for example with ZooKeeper for HA).
- * Internal, store specific errors are translated into {@link TokenStoreError}.
- */
- public static interface TokenStore extends Configurable {
- /**
- * Add new master key. The token store assigns and returns the sequence number.
- * Caller needs to use the identifier to update the key (since it is embedded in the key).
- *
- * @param s
- * @return sequence number for new key
- */
- int addMasterKey(String s) throws TokenStoreError;
-
- void updateMasterKey(int keySeq, String s) throws TokenStoreError;
-
- /**
- * Remove key for given id.
- * @param keySeq
- * @return false if key no longer present, true otherwise.
- */
- boolean removeMasterKey(int keySeq);
-
- String[] getMasterKeys() throws TokenStoreError;
-
- /**
- * Add token. If identifier is already present, token won't be added.
- * @param tokenIdentifier
- * @param token
- * @return true if token was added, false for existing identifier
- */
- boolean addToken(DelegationTokenIdentifier tokenIdentifier,
- DelegationTokenInformation token) throws TokenStoreError;
-
- /**
- * Get token. Returns null if the token does not exist.
- * @param tokenIdentifier
- * @return
- */
- DelegationTokenInformation getToken(DelegationTokenIdentifier tokenIdentifier)
- throws TokenStoreError;
-
- /**
- * Remove token. Ignores token does not exist.
- * @param tokenIdentifier
- */
- boolean removeToken(DelegationTokenIdentifier tokenIdentifier) throws TokenStoreError;
-
- /**
- * List of all token identifiers in the store. This is used to remove expired tokens
- * and a potential scalability improvement would be to partition by master key id
- * @return
- */
- List getAllDelegationTokenIdentifiers();
-
- }
-
- final private long keyUpdateInterval;
- final private long tokenRemoverScanInterval;
- private Thread tokenRemoverThread;
-
- final private TokenStore tokenStore;
-
- public TokenStoreDelegationTokenSecretManager(long delegationKeyUpdateInterval,
- long delegationTokenMaxLifetime, long delegationTokenRenewInterval,
- long delegationTokenRemoverScanInterval, TokenStore sharedStore) {
- super(delegationKeyUpdateInterval, delegationTokenMaxLifetime, delegationTokenRenewInterval,
- delegationTokenRemoverScanInterval);
- this.keyUpdateInterval = delegationKeyUpdateInterval;
- this.tokenRemoverScanInterval = delegationTokenRemoverScanInterval;
-
- this.tokenStore = sharedStore;
- }
-
- protected DelegationTokenIdentifier getTokenIdentifier(Token token)
- throws IOException {
- // turn bytes back into identifier for cache lookup
- ByteArrayInputStream buf = new ByteArrayInputStream(token.getIdentifier());
- DataInputStream in = new DataInputStream(buf);
- DelegationTokenIdentifier id = createIdentifier();
- id.readFields(in);
- return id;
- }
-
- protected Map reloadKeys() {
- // read keys from token store
- String[] allKeys = tokenStore.getMasterKeys();
- Map keys
- = new java.util.HashMap(allKeys.length);
- for (String keyStr : allKeys) {
- DelegationKey key = new DelegationKey();
- try {
- decodeWritable(key, keyStr);
- keys.put(key.getKeyId(), key);
- } catch (IOException ex) {
- LOGGER.error("Failed to load master key.", ex);
- }
- }
- synchronized (this) {
- super.allKeys.clear();
- super.allKeys.putAll(keys);
- }
- return keys;
- }
-
- @Override
- public byte[] retrievePassword(DelegationTokenIdentifier identifier)
- throws org.apache.hadoop.security.token.SecretManager.InvalidToken {
- DelegationTokenInformation info = this.tokenStore.getToken(identifier);
- if (info == null) {
- throw new InvalidToken("token expired or does not exist: " + identifier);
- }
- // must reuse super as info.getPassword is not accessible
- synchronized (this) {
- try {
- super.currentTokens.put(identifier, info);
- return super.retrievePassword(identifier);
- } finally {
- super.currentTokens.remove(identifier);
- }
- }
- }
-
- @Override
- public DelegationTokenIdentifier cancelToken(Token token,
- String canceller) throws IOException {
- DelegationTokenIdentifier id = getTokenIdentifier(token);
- LOGGER.info("Token cancelation requested for identifier: "+id);
- this.tokenStore.removeToken(id);
- return id;
- }
-
- /**
- * Create the password and add it to shared store.
- */
- @Override
- protected byte[] createPassword(DelegationTokenIdentifier id) {
- byte[] password;
- DelegationTokenInformation info;
- synchronized (this) {
- password = super.createPassword(id);
- // add new token to shared store
- // need to persist expiration along with password
- info = super.currentTokens.remove(id);
- if (info == null) {
- throw new IllegalStateException("Failed to retrieve token after creation");
- }
- }
- this.tokenStore.addToken(id, info);
- return password;
- }
-
- @Override
- public long renewToken(Token token,
- String renewer) throws InvalidToken, IOException {
- // since renewal is KERBEROS authenticated token may not be cached
- final DelegationTokenIdentifier id = getTokenIdentifier(token);
- DelegationTokenInformation tokenInfo = this.tokenStore.getToken(id);
- if (tokenInfo == null) {
- throw new InvalidToken("token does not exist: " + id); // no token found
- }
- // ensure associated master key is available
- if (!super.allKeys.containsKey(id.getMasterKeyId())) {
- LOGGER.info("Unknown master key (id={}), (re)loading keys from token store.",
- id.getMasterKeyId());
- reloadKeys();
- }
- // reuse super renewal logic
- synchronized (this) {
- super.currentTokens.put(id, tokenInfo);
- try {
- return super.renewToken(token, renewer);
- } finally {
- super.currentTokens.remove(id);
- }
- }
- }
-
- public static String encodeWritable(Writable key) throws IOException {
- ByteArrayOutputStream bos = new ByteArrayOutputStream();
- DataOutputStream dos = new DataOutputStream(bos);
- key.write(dos);
- dos.flush();
- return Base64.encodeBase64URLSafeString(bos.toByteArray());
- }
-
- public static void decodeWritable(Writable w, String idStr) throws IOException {
- DataInputStream in = new DataInputStream(new ByteArrayInputStream(Base64.decodeBase64(idStr)));
- w.readFields(in);
- }
-
- /**
- * Synchronize master key updates / sequence generation for multiple nodes.
- * NOTE: {@Link AbstractDelegationTokenSecretManager} keeps currentKey private, so we need
- * to utilize this "hook" to manipulate the key through the object reference.
- * This .20S workaround should cease to exist when Hadoop supports token store.
- */
- @Override
- protected void logUpdateMasterKey(DelegationKey key) throws IOException {
- int keySeq = this.tokenStore.addMasterKey(encodeWritable(key));
- // update key with assigned identifier
- DelegationKey keyWithSeq = new DelegationKey(keySeq, key.getExpiryDate(), key.getKey());
- String keyStr = encodeWritable(keyWithSeq);
- this.tokenStore.updateMasterKey(keySeq, keyStr);
- decodeWritable(key, keyStr);
- LOGGER.info("New master key with key id={}", key.getKeyId());
- super.logUpdateMasterKey(key);
- }
-
- @Override
- public synchronized void startThreads() throws IOException {
- try {
- // updateCurrentKey needs to be called to initialize the master key
- // (there should be a null check added in the future in rollMasterKey)
- // updateCurrentKey();
- Method m = AbstractDelegationTokenSecretManager.class.getDeclaredMethod("updateCurrentKey");
- m.setAccessible(true);
- m.invoke(this);
- } catch (Exception e) {
- throw new IOException("Failed to initialize master key", e);
- }
- running = true;
- tokenRemoverThread = new Daemon(new ExpiredTokenRemover());
- tokenRemoverThread.start();
- }
-
- @Override
- public synchronized void stopThreads() {
- if (LOGGER.isDebugEnabled()) {
- LOGGER.debug("Stopping expired delegation token remover thread");
- }
- running = false;
- if (tokenRemoverThread != null) {
- tokenRemoverThread.interrupt();
- }
- }
-
- /**
- * Remove expired tokens. Replaces logic in {@link AbstractDelegationTokenSecretManager}
- * that cannot be reused due to private method access. Logic here can more efficiently
- * deal with external token store by only loading into memory the minimum data needed.
- */
- protected void removeExpiredTokens() {
- long now = System.currentTimeMillis();
- Iterator i = tokenStore.getAllDelegationTokenIdentifiers()
- .iterator();
- while (i.hasNext()) {
- DelegationTokenIdentifier id = i.next();
- if (now > id.getMaxDate()) {
- this.tokenStore.removeToken(id); // no need to look at token info
- } else {
- // get token info to check renew date
- DelegationTokenInformation tokenInfo = tokenStore.getToken(id);
- if (tokenInfo != null) {
- if (now > tokenInfo.getRenewDate()) {
- this.tokenStore.removeToken(id);
- }
- }
- }
- }
- }
-
- /**
- * Extension of rollMasterKey to remove expired keys from store.
- * @throws IOException
- */
- protected void rollMasterKeyExt() throws IOException {
- Map keys = reloadKeys();
- int currentKeyId = super.currentId;
- HiveDelegationTokenSupport.rollMasterKey(TokenStoreDelegationTokenSecretManager.this);
- List keysAfterRoll = Arrays.asList(getAllKeys());
- for (DelegationKey key : keysAfterRoll) {
- keys.remove(key.getKeyId());
- if (key.getKeyId() == currentKeyId) {
- tokenStore.updateMasterKey(currentKeyId, encodeWritable(key));
- }
- }
- for (DelegationKey expiredKey : keys.values()) {
- LOGGER.info("Removing expired key id={}", expiredKey.getKeyId());
- tokenStore.removeMasterKey(expiredKey.getKeyId());
- }
- }
-
-
- /**
- * Cloned from {@link AbstractDelegationTokenSecretManager} to deal with private access
- * restriction (there would not be an need to clone the remove thread if the remove logic was
- * protected/extensible).
- */
- protected class ExpiredTokenRemover extends Thread {
- private long lastMasterKeyUpdate;
- private long lastTokenCacheCleanup;
-
- @Override
- public void run() {
- LOGGER.info("Starting expired delegation token remover thread, "
- + "tokenRemoverScanInterval=" + tokenRemoverScanInterval
- / (60 * 1000) + " min(s)");
- try {
- while (running) {
- long now = System.currentTimeMillis();
- if (lastMasterKeyUpdate + keyUpdateInterval < now) {
- try {
- rollMasterKeyExt();
- lastMasterKeyUpdate = now;
- } catch (IOException e) {
- LOGGER.error("Master key updating failed. "
- + StringUtils.stringifyException(e));
- }
- }
- if (lastTokenCacheCleanup + tokenRemoverScanInterval < now) {
- removeExpiredTokens();
- lastTokenCacheCleanup = now;
- }
- try {
- Thread.sleep(5000); // 5 seconds
- } catch (InterruptedException ie) {
- LOGGER
- .error("InterruptedExcpetion recieved for ExpiredTokenRemover thread "
- + ie);
- }
- }
- } catch (Throwable t) {
- LOGGER.error("ExpiredTokenRemover thread received unexpected exception. "
- + t, t);
- Runtime.getRuntime().exit(-1);
- }
- }
- }
-
-}
Index: shims/src/0.20S/java/org/apache/hadoop/hive/thrift/DelegationTokenSelector.java
===================================================================
--- shims/src/0.20S/java/org/apache/hadoop/hive/thrift/DelegationTokenSelector.java (revision 1228525)
+++ shims/src/0.20S/java/org/apache/hadoop/hive/thrift/DelegationTokenSelector.java (working copy)
@@ -1,33 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hive.thrift;
-
-import org.apache.hadoop.security.token.delegation.AbstractDelegationTokenSelector;
-
-/**
- * A delegation token that is specialized for Hive
- */
-
-public class DelegationTokenSelector
- extends AbstractDelegationTokenSelector{
-
- public DelegationTokenSelector() {
- super(DelegationTokenIdentifier.HIVE_DELEGATION_KIND);
- }
-}
Index: shims/src/0.20S/java/org/apache/hadoop/hive/thrift/DelegationTokenSecretManager.java
===================================================================
--- shims/src/0.20S/java/org/apache/hadoop/hive/thrift/DelegationTokenSecretManager.java (revision 1228525)
+++ shims/src/0.20S/java/org/apache/hadoop/hive/thrift/DelegationTokenSecretManager.java (working copy)
@@ -1,87 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hive.thrift;
-
-import java.io.IOException;
-
-import org.apache.hadoop.io.Text;
-import org.apache.hadoop.security.UserGroupInformation;
-import org.apache.hadoop.security.token.Token;
-import org.apache.hadoop.security.token.delegation.AbstractDelegationTokenSecretManager;
-
-/**
- * A Hive specific delegation token secret manager.
- * The secret manager is responsible for generating and accepting the password
- * for each token.
- */
-public class DelegationTokenSecretManager
- extends AbstractDelegationTokenSecretManager {
-
- /**
- * Create a secret manager
- * @param delegationKeyUpdateInterval the number of seconds for rolling new
- * secret keys.
- * @param delegationTokenMaxLifetime the maximum lifetime of the delegation
- * tokens
- * @param delegationTokenRenewInterval how often the tokens must be renewed
- * @param delegationTokenRemoverScanInterval how often the tokens are scanned
- * for expired tokens
- */
- public DelegationTokenSecretManager(long delegationKeyUpdateInterval,
- long delegationTokenMaxLifetime,
- long delegationTokenRenewInterval,
- long delegationTokenRemoverScanInterval) {
- super(delegationKeyUpdateInterval, delegationTokenMaxLifetime,
- delegationTokenRenewInterval, delegationTokenRemoverScanInterval);
- }
-
- @Override
- public DelegationTokenIdentifier createIdentifier() {
- return new DelegationTokenIdentifier();
- }
-
- public synchronized void cancelDelegationToken(String tokenStrForm) throws IOException {
- Token t= new Token();
- t.decodeFromUrlString(tokenStrForm);
- String user = UserGroupInformation.getCurrentUser().getUserName();
- cancelToken(t, user);
- }
-
- public synchronized long renewDelegationToken(String tokenStrForm) throws IOException {
- Token t= new Token();
- t.decodeFromUrlString(tokenStrForm);
- String user = UserGroupInformation.getCurrentUser().getUserName();
- return renewToken(t, user);
- }
-
- public synchronized String getDelegationToken(String renewer) throws IOException {
- UserGroupInformation ugi = UserGroupInformation.getCurrentUser();
- Text owner = new Text(ugi.getUserName());
- Text realUser = null;
- if (ugi.getRealUser() != null) {
- realUser = new Text(ugi.getRealUser().getUserName());
- }
- DelegationTokenIdentifier ident =
- new DelegationTokenIdentifier(owner, new Text(renewer), realUser);
- Token t = new Token(
- ident, this);
- return t.encodeToUrlString();
- }
-}
-
Index: shims/src/0.20S/java/org/apache/hadoop/hive/thrift/client/TUGIAssumingTransport.java
===================================================================
--- shims/src/0.20S/java/org/apache/hadoop/hive/thrift/client/TUGIAssumingTransport.java (revision 1228525)
+++ shims/src/0.20S/java/org/apache/hadoop/hive/thrift/client/TUGIAssumingTransport.java (working copy)
@@ -1,74 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hive.thrift.client;
-
-import java.io.IOException;
-import java.security.PrivilegedExceptionAction;
-
-import org.apache.hadoop.hive.thrift.TFilterTransport;
-import org.apache.hadoop.security.UserGroupInformation;
-import org.apache.thrift.transport.TTransport;
-import org.apache.thrift.transport.TTransportException;
-
-/**
- * The Thrift SASL transports call Sasl.createSaslServer and Sasl.createSaslClient
- * inside open(). So, we need to assume the correct UGI when the transport is opened
- * so that the SASL mechanisms have access to the right principal. This transport
- * wraps the Sasl transports to set up the right UGI context for open().
- *
- * This is used on the client side, where the API explicitly opens a transport to
- * the server.
- */
- public class TUGIAssumingTransport extends TFilterTransport {
- protected UserGroupInformation ugi;
-
- public TUGIAssumingTransport(TTransport wrapped, UserGroupInformation ugi) {
- super(wrapped);
- this.ugi = ugi;
- }
-
- @Override
- public void open() throws TTransportException {
- try {
- ugi.doAs(new PrivilegedExceptionAction() {
- public Void run() {
- try {
- wrapped.open();
- } catch (TTransportException tte) {
- // Wrap the transport exception in an RTE, since UGI.doAs() then goes
- // and unwraps this for us out of the doAs block. We then unwrap one
- // more time in our catch clause to get back the TTE. (ugh)
- throw new RuntimeException(tte);
- }
- return null;
- }
- });
- } catch (IOException ioe) {
- throw new RuntimeException("Received an ioe we never threw!", ioe);
- } catch (InterruptedException ie) {
- throw new RuntimeException("Received an ie we never threw!", ie);
- } catch (RuntimeException rte) {
- if (rte.getCause() instanceof TTransportException) {
- throw (TTransportException)rte.getCause();
- } else {
- throw rte;
- }
- }
- }
- }
\ No newline at end of file
Index: shims/src/0.20S/java/org/apache/hadoop/hive/shims/Hadoop20SShims.java
===================================================================
--- shims/src/0.20S/java/org/apache/hadoop/hive/shims/Hadoop20SShims.java (revision 1228525)
+++ shims/src/0.20S/java/org/apache/hadoop/hive/shims/Hadoop20SShims.java (working copy)
@@ -17,512 +17,19 @@
*/
package org.apache.hadoop.hive.shims;
-import java.io.DataInput;
-import java.io.DataOutput;
-import java.io.IOException;
-import java.lang.reflect.Constructor;
-import java.security.PrivilegedExceptionAction;
-import java.util.ArrayList;
-import java.util.List;
-
import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FileStatus;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.fs.PathFilter;
-import org.apache.hadoop.hdfs.MiniDFSCluster;
-import org.apache.hadoop.hive.io.HiveIOExceptionHandlerUtil;
-import org.apache.hadoop.hive.thrift.DelegationTokenSelector;
-import org.apache.hadoop.io.Text;
+import org.apache.hadoop.hive.shims.HadoopShimsSecure;
import org.apache.hadoop.mapred.ClusterStatus;
-import org.apache.hadoop.mapred.FileInputFormat;
-import org.apache.hadoop.mapred.InputFormat;
-import org.apache.hadoop.mapred.InputSplit;
-import org.apache.hadoop.mapred.JobConf;
-import org.apache.hadoop.mapred.JobContext;
-import org.apache.hadoop.mapred.JobStatus;
-import org.apache.hadoop.mapred.OutputCommitter;
-import org.apache.hadoop.mapred.RecordReader;
-import org.apache.hadoop.mapred.Reporter;
-import org.apache.hadoop.mapred.RunningJob;
-import org.apache.hadoop.mapred.TaskAttemptContext;
-import org.apache.hadoop.mapred.TaskCompletionEvent;
-import org.apache.hadoop.mapred.TaskID;
-import org.apache.hadoop.mapred.lib.CombineFileInputFormat;
-import org.apache.hadoop.mapred.lib.CombineFileSplit;
-import org.apache.hadoop.mapred.lib.NullOutputFormat;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.TaskAttemptID;
-import org.apache.hadoop.security.UserGroupInformation;
-import org.apache.hadoop.security.token.Token;
-import org.apache.hadoop.security.token.TokenIdentifier;
-import org.apache.hadoop.security.token.TokenSelector;
-import org.apache.hadoop.tools.HadoopArchives;
import org.apache.hadoop.util.Progressable;
-import org.apache.hadoop.util.ToolRunner;
/**
- * Implemention of shims against Hadoop 0.20.0.
+ * Implemention of shims against Hadoop 0.20 with Security.
*/
-public class Hadoop20SShims implements HadoopShims {
- public boolean usesJobShell() {
- return false;
- }
+public class Hadoop20SShims extends HadoopShimsSecure {
- public boolean fileSystemDeleteOnExit(FileSystem fs, Path path)
- throws IOException {
-
- return fs.deleteOnExit(path);
- }
-
- public void inputFormatValidateInput(InputFormat fmt, JobConf conf)
- throws IOException {
- // gone in 0.18+
- }
-
- public boolean isJobPreparing(RunningJob job) throws IOException {
- return job.getJobState() == JobStatus.PREP;
- }
- /**
- * Workaround for hadoop-17 - jobclient only looks at commandlineconfig.
- */
- public void setTmpFiles(String prop, String files) {
- // gone in 20+
- }
-
- public HadoopShims.MiniDFSShim getMiniDfs(Configuration conf,
- int numDataNodes,
- boolean format,
- String[] racks) throws IOException {
- return new MiniDFSShim(new MiniDFSCluster(conf, numDataNodes, format, racks));
- }
-
- /**
- * MiniDFSShim.
- *
- */
- public class MiniDFSShim implements HadoopShims.MiniDFSShim {
- private final MiniDFSCluster cluster;
-
- public MiniDFSShim(MiniDFSCluster cluster) {
- this.cluster = cluster;
- }
-
- public FileSystem getFileSystem() throws IOException {
- return cluster.getFileSystem();
- }
-
- public void shutdown() {
- cluster.shutdown();
- }
- }
-
- /**
- * We define this function here to make the code compatible between
- * hadoop 0.17 and hadoop 0.20.
- *
- * Hive binary that compiled Text.compareTo(Text) with hadoop 0.20 won't
- * work with hadoop 0.17 because in hadoop 0.20, Text.compareTo(Text) is
- * implemented in org.apache.hadoop.io.BinaryComparable, and Java compiler
- * references that class, which is not available in hadoop 0.17.
- */
- public int compareText(Text a, Text b) {
- return a.compareTo(b);
- }
-
@Override
- public long getAccessTime(FileStatus file) {
- return file.getAccessTime();
- }
-
- public HadoopShims.CombineFileInputFormatShim getCombineFileInputFormat() {
- return new CombineFileInputFormatShim() {
- @Override
- public RecordReader getRecordReader(InputSplit split,
- JobConf job, Reporter reporter) throws IOException {
- throw new IOException("CombineFileInputFormat.getRecordReader not needed.");
- }
- };
- }
-
- public static class InputSplitShim extends CombineFileSplit implements HadoopShims.InputSplitShim {
- long shrinkedLength;
- boolean _isShrinked;
- public InputSplitShim() {
- super();
- _isShrinked = false;
- }
-
- public InputSplitShim(CombineFileSplit old) throws IOException {
- super(old);
- _isShrinked = false;
- }
-
- @Override
- public void shrinkSplit(long length) {
- _isShrinked = true;
- shrinkedLength = length;
- }
-
- public boolean isShrinked() {
- return _isShrinked;
- }
-
- public long getShrinkedLength() {
- return shrinkedLength;
- }
-
- @Override
- public void readFields(DataInput in) throws IOException {
- super.readFields(in);
- _isShrinked = in.readBoolean();
- if (_isShrinked) {
- shrinkedLength = in.readLong();
- }
- }
-
- @Override
- public void write(DataOutput out) throws IOException {
- super.write(out);
- out.writeBoolean(_isShrinked);
- if (_isShrinked) {
- out.writeLong(shrinkedLength);
- }
- }
- }
-
- /* This class should be replaced with org.apache.hadoop.mapred.lib.CombineFileRecordReader class, once
- * https://issues.apache.org/jira/browse/MAPREDUCE-955 is fixed. This code should be removed - it is a copy
- * of org.apache.hadoop.mapred.lib.CombineFileRecordReader
- */
- public static class CombineFileRecordReader implements RecordReader {
-
- static final Class[] constructorSignature = new Class[] {
- InputSplit.class,
- Configuration.class,
- Reporter.class,
- Integer.class
- };
-
- protected CombineFileSplit split;
- protected JobConf jc;
- protected Reporter reporter;
- protected Class> rrClass;
- protected Constructor> rrConstructor;
- protected FileSystem fs;
-
- protected int idx;
- protected long progress;
- protected RecordReader curReader;
- protected boolean isShrinked;
- protected long shrinkedLength;
-
- public boolean next(K key, V value) throws IOException {
-
- while ((curReader == null)
- || !doNextWithExceptionHandler((K) ((CombineHiveKey) key).getKey(),
- value)) {
- if (!initNextRecordReader(key)) {
- return false;
- }
- }
- return true;
- }
-
- public K createKey() {
- K newKey = curReader.createKey();
- return (K)(new CombineHiveKey(newKey));
- }
-
- public V createValue() {
- return curReader.createValue();
- }
-
- /**
- * Return the amount of data processed.
- */
- public long getPos() throws IOException {
- return progress;
- }
-
- public void close() throws IOException {
- if (curReader != null) {
- curReader.close();
- curReader = null;
- }
- }
-
- /**
- * Return progress based on the amount of data processed so far.
- */
- public float getProgress() throws IOException {
- return Math.min(1.0f, progress / (float) (split.getLength()));
- }
-
- /**
- * A generic RecordReader that can hand out different recordReaders
- * for each chunk in the CombineFileSplit.
- */
- public CombineFileRecordReader(JobConf job, CombineFileSplit split,
- Reporter reporter,
- Class> rrClass)
- throws IOException {
- this.split = split;
- this.jc = job;
- this.rrClass = rrClass;
- this.reporter = reporter;
- this.idx = 0;
- this.curReader = null;
- this.progress = 0;
-
- isShrinked = false;
-
- assert (split instanceof Hadoop20Shims.InputSplitShim);
- if (((InputSplitShim) split).isShrinked()) {
- isShrinked = true;
- shrinkedLength = ((InputSplitShim) split).getShrinkedLength();
- }
-
- try {
- rrConstructor = rrClass.getDeclaredConstructor(constructorSignature);
- rrConstructor.setAccessible(true);
- } catch (Exception e) {
- throw new RuntimeException(rrClass.getName() +
- " does not have valid constructor", e);
- }
- initNextRecordReader(null);
- }
-
- /**
- * do next and handle exception inside it.
- * @param key
- * @param value
- * @return
- * @throws IOException
- */
- private boolean doNextWithExceptionHandler(K key, V value) throws IOException {
- try {
- return curReader.next(key, value);
- } catch (Exception e) {
- return HiveIOExceptionHandlerUtil
- .handleRecordReaderNextException(e, jc);
- }
- }
-
- /**
- * Get the record reader for the next chunk in this CombineFileSplit.
- */
- protected boolean initNextRecordReader(K key) throws IOException {
-
- if (curReader != null) {
- curReader.close();
- curReader = null;
- if (idx > 0) {
- progress += split.getLength(idx - 1); // done processing so far
- }
- }
-
- // if all chunks have been processed, nothing more to do.
- if (idx == split.getNumPaths() || (isShrinked && progress > shrinkedLength)) {
- return false;
- }
-
- // get a record reader for the idx-th chunk
- try {
- curReader = rrConstructor.newInstance(new Object[]
- {split, jc, reporter, Integer.valueOf(idx)});
-
- // change the key if need be
- if (key != null) {
- K newKey = curReader.createKey();
- ((CombineHiveKey)key).setKey(newKey);
- }
-
- // setup some helper config variables.
- jc.set("map.input.file", split.getPath(idx).toString());
- jc.setLong("map.input.start", split.getOffset(idx));
- jc.setLong("map.input.length", split.getLength(idx));
- } catch (Exception e) {
- curReader = HiveIOExceptionHandlerUtil.handleRecordReaderCreationException(
- e, jc);
- }
- idx++;
- return true;
- }
- }
-
- public abstract static class CombineFileInputFormatShim extends
- CombineFileInputFormat
- implements HadoopShims.CombineFileInputFormatShim {
-
- public Path[] getInputPathsShim(JobConf conf) {
- try {
- return FileInputFormat.getInputPaths(conf);
- } catch (Exception e) {
- throw new RuntimeException(e);
- }
- }
-
- @Override
- public void createPool(JobConf conf, PathFilter... filters) {
- super.createPool(conf, filters);
- }
-
- @Override
- public InputSplitShim[] getSplits(JobConf job, int numSplits) throws IOException {
- long minSize = job.getLong("mapred.min.split.size", 0);
-
- // For backward compatibility, let the above parameter be used
- if (job.getLong("mapred.min.split.size.per.node", 0) == 0) {
- super.setMinSplitSizeNode(minSize);
- }
-
- if (job.getLong("mapred.min.split.size.per.rack", 0) == 0) {
- super.setMinSplitSizeRack(minSize);
- }
-
- if (job.getLong("mapred.max.split.size", 0) == 0) {
- super.setMaxSplitSize(minSize);
- }
-
- CombineFileSplit[] splits = (CombineFileSplit[]) super.getSplits(job, numSplits);
-
- InputSplitShim[] isplits = new InputSplitShim[splits.length];
- for (int pos = 0; pos < splits.length; pos++) {
- isplits[pos] = new InputSplitShim(splits[pos]);
- }
-
- return isplits;
- }
-
- public InputSplitShim getInputSplitShim() throws IOException {
- return new InputSplitShim();
- }
-
- public RecordReader getRecordReader(JobConf job, HadoopShims.InputSplitShim split,
- Reporter reporter,
- Class> rrClass)
- throws IOException {
- CombineFileSplit cfSplit = (CombineFileSplit) split;
- return new CombineFileRecordReader(job, cfSplit, reporter, rrClass);
- }
-
- }
-
- public String getInputFormatClassName() {
- return "org.apache.hadoop.hive.ql.io.CombineHiveInputFormat";
- }
-
- String[] ret = new String[2];
-
- @Override
- public String[] getTaskJobIDs(TaskCompletionEvent t) {
- TaskID tid = t.getTaskAttemptId().getTaskID();
- ret[0] = tid.toString();
- ret[1] = tid.getJobID().toString();
- return ret;
- }
-
- public void setFloatConf(Configuration conf, String varName, float val) {
- conf.setFloat(varName, val);
- }
-
- @Override
- public int createHadoopArchive(Configuration conf, Path sourceDir, Path destDir,
- String archiveName) throws Exception {
-
- HadoopArchives har = new HadoopArchives(conf);
- List args = new ArrayList();
-
- if (conf.get("hive.archive.har.parentdir.settable") == null) {
- throw new RuntimeException("hive.archive.har.parentdir.settable is not set");
- }
- boolean parentSettable =
- conf.getBoolean("hive.archive.har.parentdir.settable", false);
-
- if (parentSettable) {
- args.add("-archiveName");
- args.add(archiveName);
- args.add("-p");
- args.add(sourceDir.toString());
- args.add(destDir.toString());
- } else {
- args.add("-archiveName");
- args.add(archiveName);
- args.add(sourceDir.toString());
- args.add(destDir.toString());
- }
-
- return ToolRunner.run(har, args.toArray(new String[0]));
- }
-
- public static class NullOutputCommitter extends OutputCommitter {
- @Override
- public void setupJob(JobContext jobContext) { }
- @Override
- public void cleanupJob(JobContext jobContext) { }
-
- @Override
- public void setupTask(TaskAttemptContext taskContext) { }
- @Override
- public boolean needsTaskCommit(TaskAttemptContext taskContext) {
- return false;
- }
- @Override
- public void commitTask(TaskAttemptContext taskContext) { }
- @Override
- public void abortTask(TaskAttemptContext taskContext) { }
- }
-
- public void setNullOutputFormat(JobConf conf) {
- conf.setOutputFormat(NullOutputFormat.class);
- conf.setOutputCommitter(Hadoop20Shims.NullOutputCommitter.class);
-
- // option to bypass job setup and cleanup was introduced in hadoop-21 (MAPREDUCE-463)
- // but can be backported. So we disable setup/cleanup in all versions >= 0.19
- conf.setBoolean("mapred.committer.job.setup.cleanup.needed", false);
-
- // option to bypass task cleanup task was introduced in hadoop-23 (MAPREDUCE-2206)
- // but can be backported. So we disable setup/cleanup in all versions >= 0.19
- conf.setBoolean("mapreduce.job.committer.task.cleanup.needed", false);
- }
-
- @Override
- public UserGroupInformation getUGIForConf(Configuration conf) throws IOException {
- return UserGroupInformation.getCurrentUser();
- }
-
- @Override
- public boolean isSecureShimImpl() {
- return true;
- }
-
- @Override
- public String getShortUserName(UserGroupInformation ugi) {
- return ugi.getShortUserName();
- }
-
- @Override
- public String getTokenStrForm(String tokenSignature) throws IOException {
- UserGroupInformation ugi = UserGroupInformation.getCurrentUser();
- TokenSelector extends TokenIdentifier> tokenSelector = new DelegationTokenSelector();
-
- Token extends TokenIdentifier> token = tokenSelector.selectToken(
- tokenSignature == null ? new Text() : new Text(tokenSignature), ugi.getTokens());
- return token != null ? token.encodeToUrlString() : null;
- }
-
- @Override
- public void doAs(UserGroupInformation ugi, PrivilegedExceptionAction pvea) throws IOException, InterruptedException {
- ugi.doAs(pvea);
- }
-
- @Override
- public UserGroupInformation createRemoteUser(String userName, List groupNames) {
- return UserGroupInformation.createRemoteUser(userName);
- }
-
- @Override
public JobTrackerState getJobTrackerState(ClusterStatus clusterStatus) throws Exception {
JobTrackerState state;
switch (clusterStatus.getJobTrackerState()) {
@@ -535,7 +42,7 @@
throw new Exception(errorMsg);
}
}
-
+
@Override
public org.apache.hadoop.mapreduce.TaskAttemptContext newTaskAttemptContext(Configuration conf, final Progressable progressable) {
return new org.apache.hadoop.mapreduce.TaskAttemptContext(conf, new TaskAttemptID()) {
Index: shims/src/common-secure/java/org/apache/hadoop/security/token/delegation/HiveDelegationTokenSupport.java
===================================================================
--- shims/src/common-secure/java/org/apache/hadoop/security/token/delegation/HiveDelegationTokenSupport.java (revision 0)
+++ shims/src/common-secure/java/org/apache/hadoop/security/token/delegation/HiveDelegationTokenSupport.java (working copy)
@@ -0,0 +1,68 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.security.token.delegation;
+
+import java.io.ByteArrayInputStream;
+import java.io.ByteArrayOutputStream;
+import java.io.DataInputStream;
+import java.io.DataOutputStream;
+import java.io.IOException;
+
+import org.apache.hadoop.io.WritableUtils;
+import org.apache.hadoop.security.token.delegation.AbstractDelegationTokenSecretManager.DelegationTokenInformation;
+
+/**
+ * Workaround for serialization of {@link DelegationTokenInformation} through package access.
+ * Future version of Hadoop should add this to DelegationTokenInformation itself.
+ */
+public final class HiveDelegationTokenSupport {
+
+ private HiveDelegationTokenSupport() {}
+
+ public static byte[] encodeDelegationTokenInformation(DelegationTokenInformation token) {
+ try {
+ ByteArrayOutputStream bos = new ByteArrayOutputStream();
+ DataOutputStream out = new DataOutputStream(bos);
+ WritableUtils.writeVInt(out, token.password.length);
+ out.write(token.password);
+ out.writeLong(token.renewDate);
+ out.flush();
+ return bos.toByteArray();
+ } catch (IOException ex) {
+ throw new RuntimeException("Failed to encode token.", ex);
+ }
+ }
+
+ public static DelegationTokenInformation decodeDelegationTokenInformation(byte[] tokenBytes)
+ throws IOException {
+ DataInputStream in = new DataInputStream(new ByteArrayInputStream(tokenBytes));
+ DelegationTokenInformation token = new DelegationTokenInformation(0, null);
+ int len = WritableUtils.readVInt(in);
+ token.password = new byte[len];
+ in.readFully(token.password);
+ token.renewDate = in.readLong();
+ return token;
+ }
+
+ public static void rollMasterKey(
+ AbstractDelegationTokenSecretManager extends AbstractDelegationTokenIdentifier> mgr)
+ throws IOException {
+ mgr.rollMasterKey();
+ }
+
+}
Index: shims/src/common-secure/java/org/apache/hadoop/hive/thrift/DelegationTokenSelector.java
===================================================================
--- shims/src/common-secure/java/org/apache/hadoop/hive/thrift/DelegationTokenSelector.java (revision 0)
+++ shims/src/common-secure/java/org/apache/hadoop/hive/thrift/DelegationTokenSelector.java (working copy)
@@ -0,0 +1,33 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hive.thrift;
+
+import org.apache.hadoop.security.token.delegation.AbstractDelegationTokenSelector;
+
+/**
+ * A delegation token that is specialized for Hive
+ */
+
+public class DelegationTokenSelector
+ extends AbstractDelegationTokenSelector{
+
+ public DelegationTokenSelector() {
+ super(DelegationTokenIdentifier.HIVE_DELEGATION_KIND);
+ }
+}
Index: shims/src/common-secure/java/org/apache/hadoop/hive/thrift/DelegationTokenSecretManager.java
===================================================================
--- shims/src/common-secure/java/org/apache/hadoop/hive/thrift/DelegationTokenSecretManager.java (revision 0)
+++ shims/src/common-secure/java/org/apache/hadoop/hive/thrift/DelegationTokenSecretManager.java (working copy)
@@ -0,0 +1,87 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hive.thrift;
+
+import java.io.IOException;
+
+import org.apache.hadoop.io.Text;
+import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.security.token.Token;
+import org.apache.hadoop.security.token.delegation.AbstractDelegationTokenSecretManager;
+
+/**
+ * A Hive specific delegation token secret manager.
+ * The secret manager is responsible for generating and accepting the password
+ * for each token.
+ */
+public class DelegationTokenSecretManager
+ extends AbstractDelegationTokenSecretManager {
+
+ /**
+ * Create a secret manager
+ * @param delegationKeyUpdateInterval the number of seconds for rolling new
+ * secret keys.
+ * @param delegationTokenMaxLifetime the maximum lifetime of the delegation
+ * tokens
+ * @param delegationTokenRenewInterval how often the tokens must be renewed
+ * @param delegationTokenRemoverScanInterval how often the tokens are scanned
+ * for expired tokens
+ */
+ public DelegationTokenSecretManager(long delegationKeyUpdateInterval,
+ long delegationTokenMaxLifetime,
+ long delegationTokenRenewInterval,
+ long delegationTokenRemoverScanInterval) {
+ super(delegationKeyUpdateInterval, delegationTokenMaxLifetime,
+ delegationTokenRenewInterval, delegationTokenRemoverScanInterval);
+ }
+
+ @Override
+ public DelegationTokenIdentifier createIdentifier() {
+ return new DelegationTokenIdentifier();
+ }
+
+ public synchronized void cancelDelegationToken(String tokenStrForm) throws IOException {
+ Token t= new Token();
+ t.decodeFromUrlString(tokenStrForm);
+ String user = UserGroupInformation.getCurrentUser().getUserName();
+ cancelToken(t, user);
+ }
+
+ public synchronized long renewDelegationToken(String tokenStrForm) throws IOException {
+ Token t= new Token();
+ t.decodeFromUrlString(tokenStrForm);
+ String user = UserGroupInformation.getCurrentUser().getUserName();
+ return renewToken(t, user);
+ }
+
+ public synchronized String getDelegationToken(String renewer) throws IOException {
+ UserGroupInformation ugi = UserGroupInformation.getCurrentUser();
+ Text owner = new Text(ugi.getUserName());
+ Text realUser = null;
+ if (ugi.getRealUser() != null) {
+ realUser = new Text(ugi.getRealUser().getUserName());
+ }
+ DelegationTokenIdentifier ident =
+ new DelegationTokenIdentifier(owner, new Text(renewer), realUser);
+ Token t = new Token(
+ ident, this);
+ return t.encodeToUrlString();
+ }
+}
+
Index: shims/src/common-secure/java/org/apache/hadoop/hive/thrift/HadoopThriftAuthBridge20S.java
===================================================================
--- shims/src/common-secure/java/org/apache/hadoop/hive/thrift/HadoopThriftAuthBridge20S.java (revision 0)
+++ shims/src/common-secure/java/org/apache/hadoop/hive/thrift/HadoopThriftAuthBridge20S.java (working copy)
@@ -0,0 +1,563 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hive.thrift;
+
+import java.io.IOException;
+import java.net.InetAddress;
+import java.net.Socket;
+import java.security.PrivilegedAction;
+import java.security.PrivilegedExceptionAction;
+
+import javax.security.auth.callback.Callback;
+import javax.security.auth.callback.CallbackHandler;
+import javax.security.auth.callback.NameCallback;
+import javax.security.auth.callback.PasswordCallback;
+import javax.security.auth.callback.UnsupportedCallbackException;
+import javax.security.sasl.AuthorizeCallback;
+import javax.security.sasl.RealmCallback;
+import javax.security.sasl.RealmChoiceCallback;
+import javax.security.sasl.SaslException;
+import javax.security.sasl.SaslServer;
+
+import org.apache.commons.codec.binary.Base64;
+import org.apache.commons.lang.StringUtils;
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hive.thrift.client.TUGIAssumingTransport;
+import org.apache.hadoop.security.SaslRpcServer;
+import org.apache.hadoop.security.SaslRpcServer.AuthMethod;
+import org.apache.hadoop.security.SecurityUtil;
+import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.security.UserGroupInformation.AuthenticationMethod;
+import org.apache.hadoop.security.authorize.AuthorizationException;
+import org.apache.hadoop.security.authorize.ProxyUsers;
+import org.apache.hadoop.security.token.SecretManager.InvalidToken;
+import org.apache.hadoop.security.token.Token;
+import org.apache.hadoop.security.token.TokenIdentifier;
+import org.apache.hadoop.util.ReflectionUtils;
+import org.apache.thrift.TException;
+import org.apache.thrift.TProcessor;
+import org.apache.thrift.protocol.TProtocol;
+import org.apache.thrift.transport.TSaslClientTransport;
+import org.apache.thrift.transport.TSaslServerTransport;
+import org.apache.thrift.transport.TSocket;
+import org.apache.thrift.transport.TTransport;
+import org.apache.thrift.transport.TTransportException;
+import org.apache.thrift.transport.TTransportFactory;
+
+
+ /**
+ * Functions that bridge Thrift's SASL transports to Hadoop's
+ * SASL callback handlers and authentication classes.
+ */
+ public class HadoopThriftAuthBridge20S extends HadoopThriftAuthBridge {
+ static final Log LOG = LogFactory.getLog(HadoopThriftAuthBridge.class);
+
+ @Override
+ public Client createClient() {
+ return new Client();
+ }
+
+ @Override
+ public Server createServer(String keytabFile, String principalConf) throws TTransportException {
+ return new Server(keytabFile, principalConf);
+ }
+
+ public static class Client extends HadoopThriftAuthBridge.Client {
+ /**
+ * Create a client-side SASL transport that wraps an underlying transport.
+ *
+ * @param method The authentication method to use. Currently only KERBEROS is
+ * supported.
+ * @param serverPrincipal The Kerberos principal of the target server.
+ * @param underlyingTransport The underlying transport mechanism, usually a TSocket.
+ */
+
+ @Override
+ public TTransport createClientTransport(
+ String principalConfig, String host,
+ String methodStr, String tokenStrForm, TTransport underlyingTransport)
+ throws IOException {
+ AuthMethod method = AuthMethod.valueOf(AuthMethod.class, methodStr);
+
+ TTransport saslTransport = null;
+ switch (method) {
+ case DIGEST:
+ Token t= new Token();
+ t.decodeFromUrlString(tokenStrForm);
+ saslTransport = new TSaslClientTransport(
+ method.getMechanismName(),
+ null,
+ null, SaslRpcServer.SASL_DEFAULT_REALM,
+ SaslRpcServer.SASL_PROPS, new SaslClientCallbackHandler(t),
+ underlyingTransport);
+ return new TUGIAssumingTransport(saslTransport, UserGroupInformation.getCurrentUser());
+
+ case KERBEROS:
+ String serverPrincipal = SecurityUtil.getServerPrincipal(principalConfig, host);
+ String names[] = SaslRpcServer.splitKerberosName(serverPrincipal);
+ if (names.length != 3) {
+ throw new IOException(
+ "Kerberos principal name does NOT have the expected hostname part: "
+ + serverPrincipal);
+ }
+ try {
+ saslTransport = new TSaslClientTransport(
+ method.getMechanismName(),
+ null,
+ names[0], names[1],
+ SaslRpcServer.SASL_PROPS, null,
+ underlyingTransport);
+ return new TUGIAssumingTransport(saslTransport, UserGroupInformation.getCurrentUser());
+ } catch (SaslException se) {
+ throw new IOException("Could not instantiate SASL transport", se);
+ }
+
+ default:
+ throw new IOException("Unsupported authentication method: " + method);
+ }
+ }
+ private static class SaslClientCallbackHandler implements CallbackHandler {
+ private final String userName;
+ private final char[] userPassword;
+
+ public SaslClientCallbackHandler(Token extends TokenIdentifier> token) {
+ this.userName = encodeIdentifier(token.getIdentifier());
+ this.userPassword = encodePassword(token.getPassword());
+ }
+
+ public void handle(Callback[] callbacks)
+ throws UnsupportedCallbackException {
+ NameCallback nc = null;
+ PasswordCallback pc = null;
+ RealmCallback rc = null;
+ for (Callback callback : callbacks) {
+ if (callback instanceof RealmChoiceCallback) {
+ continue;
+ } else if (callback instanceof NameCallback) {
+ nc = (NameCallback) callback;
+ } else if (callback instanceof PasswordCallback) {
+ pc = (PasswordCallback) callback;
+ } else if (callback instanceof RealmCallback) {
+ rc = (RealmCallback) callback;
+ } else {
+ throw new UnsupportedCallbackException(callback,
+ "Unrecognized SASL client callback");
+ }
+ }
+ if (nc != null) {
+ if (LOG.isDebugEnabled()) {
+ LOG.debug("SASL client callback: setting username: " + userName);
+ }
+ nc.setName(userName);
+ }
+ if (pc != null) {
+ if (LOG.isDebugEnabled()) {
+ LOG.debug("SASL client callback: setting userPassword");
+ }
+ pc.setPassword(userPassword);
+ }
+ if (rc != null) {
+ if (LOG.isDebugEnabled()) {
+ LOG.debug("SASL client callback: setting realm: "
+ + rc.getDefaultText());
+ }
+ rc.setText(rc.getDefaultText());
+ }
+ }
+
+ static String encodeIdentifier(byte[] identifier) {
+ return new String(Base64.encodeBase64(identifier));
+ }
+
+ static char[] encodePassword(byte[] password) {
+ return new String(Base64.encodeBase64(password)).toCharArray();
+ }
+ }
+ }
+
+ public static class Server extends HadoopThriftAuthBridge.Server {
+ final UserGroupInformation realUgi;
+ DelegationTokenSecretManager secretManager;
+ private final static long DELEGATION_TOKEN_GC_INTERVAL = 3600000; // 1 hour
+ //Delegation token related keys
+ public static final String DELEGATION_KEY_UPDATE_INTERVAL_KEY =
+ "hive.cluster.delegation.key.update-interval";
+ public static final long DELEGATION_KEY_UPDATE_INTERVAL_DEFAULT =
+ 24*60*60*1000; // 1 day
+ public static final String DELEGATION_TOKEN_RENEW_INTERVAL_KEY =
+ "hive.cluster.delegation.token.renew-interval";
+ public static final long DELEGATION_TOKEN_RENEW_INTERVAL_DEFAULT =
+ 24*60*60*1000; // 1 day
+ public static final String DELEGATION_TOKEN_MAX_LIFETIME_KEY =
+ "hive.cluster.delegation.token.max-lifetime";
+ public static final long DELEGATION_TOKEN_MAX_LIFETIME_DEFAULT =
+ 7*24*60*60*1000; // 7 days
+ public static final String DELEGATION_TOKEN_STORE_CLS =
+ "hive.cluster.delegation.token.store.class";
+ public static final String DELEGATION_TOKEN_STORE_ZK_CONNECT_STR =
+ "hive.cluster.delegation.token.store.zookeeper.connectString";
+ public static final String DELEGATION_TOKEN_STORE_ZK_ROOT_NODE =
+ "hive.cluster.delegation.token.store.zookeeper.rootNode";
+ public static final String DELEGATION_TOKEN_STORE_ZK_ROOT_NODE_DEFAULT =
+ "/hive/cluster/delegation";
+
+ public Server() throws TTransportException {
+ try {
+ realUgi = UserGroupInformation.getCurrentUser();
+ } catch (IOException ioe) {
+ throw new TTransportException(ioe);
+ }
+ }
+ /**
+ * Create a server with a kerberos keytab/principal.
+ */
+ private Server(String keytabFile, String principalConf)
+ throws TTransportException {
+ if (keytabFile == null || keytabFile.isEmpty()) {
+ throw new TTransportException("No keytab specified");
+ }
+ if (principalConf == null || principalConf.isEmpty()) {
+ throw new TTransportException("No principal specified");
+ }
+
+ // Login from the keytab
+ String kerberosName;
+ try {
+ kerberosName =
+ SecurityUtil.getServerPrincipal(principalConf, "0.0.0.0");
+ UserGroupInformation.loginUserFromKeytab(
+ kerberosName, keytabFile);
+ realUgi = UserGroupInformation.getLoginUser();
+ assert realUgi.isFromKeytab();
+ } catch (IOException ioe) {
+ throw new TTransportException(ioe);
+ }
+ }
+
+ /**
+ * Create a TTransportFactory that, upon connection of a client socket,
+ * negotiates a Kerberized SASL transport. The resulting TTransportFactory
+ * can be passed as both the input and output transport factory when
+ * instantiating a TThreadPoolServer, for example.
+ *
+ */
+ @Override
+ public TTransportFactory createTransportFactory() throws TTransportException
+ {
+ // Parse out the kerberos principal, host, realm.
+ String kerberosName = realUgi.getUserName();
+ final String names[] = SaslRpcServer.splitKerberosName(kerberosName);
+ if (names.length != 3) {
+ throw new TTransportException("Kerberos principal should have 3 parts: " + kerberosName);
+ }
+
+ TSaslServerTransport.Factory transFactory = new TSaslServerTransport.Factory();
+ transFactory.addServerDefinition(
+ AuthMethod.KERBEROS.getMechanismName(),
+ names[0], names[1], // two parts of kerberos principal
+ SaslRpcServer.SASL_PROPS,
+ new SaslRpcServer.SaslGssCallbackHandler());
+ transFactory.addServerDefinition(AuthMethod.DIGEST.getMechanismName(),
+ null, SaslRpcServer.SASL_DEFAULT_REALM,
+ SaslRpcServer.SASL_PROPS, new SaslDigestCallbackHandler(secretManager));
+
+ return new TUGIAssumingTransportFactory(transFactory, realUgi);
+ }
+
+ /**
+ * Wrap a TProcessor in such a way that, before processing any RPC, it
+ * assumes the UserGroupInformation of the user authenticated by
+ * the SASL transport.
+ */
+ @Override
+ public TProcessor wrapProcessor(TProcessor processor) {
+ return new TUGIAssumingProcessor(processor, secretManager);
+ }
+
+ protected TokenStoreDelegationTokenSecretManager.TokenStore getTokenStore(Configuration conf)
+ throws IOException {
+ String tokenStoreClassName = conf.get(DELEGATION_TOKEN_STORE_CLS, "");
+ if (StringUtils.isBlank(tokenStoreClassName)) {
+ return new MemoryTokenStore();
+ }
+ try {
+ Class extends TokenStoreDelegationTokenSecretManager.TokenStore> storeClass = Class
+ .forName(tokenStoreClassName).asSubclass(
+ TokenStoreDelegationTokenSecretManager.TokenStore.class);
+ return ReflectionUtils.newInstance(storeClass, conf);
+ } catch (ClassNotFoundException e) {
+ throw new IOException("Error initializing delegation token store: " + tokenStoreClassName,
+ e);
+ }
+ }
+
+ @Override
+ public void startDelegationTokenSecretManager(Configuration conf)
+ throws IOException{
+ long secretKeyInterval =
+ conf.getLong(DELEGATION_KEY_UPDATE_INTERVAL_KEY,
+ DELEGATION_KEY_UPDATE_INTERVAL_DEFAULT);
+ long tokenMaxLifetime =
+ conf.getLong(DELEGATION_TOKEN_MAX_LIFETIME_KEY,
+ DELEGATION_TOKEN_MAX_LIFETIME_DEFAULT);
+ long tokenRenewInterval =
+ conf.getLong(DELEGATION_TOKEN_RENEW_INTERVAL_KEY,
+ DELEGATION_TOKEN_RENEW_INTERVAL_DEFAULT);
+
+ secretManager = new TokenStoreDelegationTokenSecretManager(secretKeyInterval,
+ tokenMaxLifetime,
+ tokenRenewInterval,
+ DELEGATION_TOKEN_GC_INTERVAL, getTokenStore(conf));
+ secretManager.startThreads();
+ }
+
+ @Override
+ public String getDelegationToken(final String owner, final String renewer)
+ throws IOException, InterruptedException {
+ if (!authenticationMethod.get().equals(AuthenticationMethod.KERBEROS)) {
+ throw new AuthorizationException(
+ "Delegation Token can be issued only with kerberos authentication");
+ }
+ //if the user asking the token is same as the 'owner' then don't do
+ //any proxy authorization checks. For cases like oozie, where it gets
+ //a delegation token for another user, we need to make sure oozie is
+ //authorized to get a delegation token.
+ //Do all checks on short names
+ UserGroupInformation currUser = UserGroupInformation.getCurrentUser();
+ UserGroupInformation ownerUgi = UserGroupInformation.createRemoteUser(owner);
+ if (!ownerUgi.getShortUserName().equals(currUser.getShortUserName())) {
+ //in the case of proxy users, the getCurrentUser will return the
+ //real user (for e.g. oozie) due to the doAs that happened just before the
+ //server started executing the method getDelegationToken in the MetaStore
+ ownerUgi = UserGroupInformation.createProxyUser(owner,
+ UserGroupInformation.getCurrentUser());
+ InetAddress remoteAddr = getRemoteAddress();
+ ProxyUsers.authorize(ownerUgi,remoteAddr.getHostAddress(), null);
+ }
+ return ownerUgi.doAs(new PrivilegedExceptionAction() {
+ public String run() throws IOException {
+ return secretManager.getDelegationToken(renewer);
+ }
+ });
+ }
+
+ @Override
+ public long renewDelegationToken(String tokenStrForm) throws IOException {
+ if (!authenticationMethod.get().equals(AuthenticationMethod.KERBEROS)) {
+ throw new AuthorizationException(
+ "Delegation Token can be issued only with kerberos authentication");
+ }
+ return secretManager.renewDelegationToken(tokenStrForm);
+ }
+
+ @Override
+ public void cancelDelegationToken(String tokenStrForm) throws IOException {
+ secretManager.cancelDelegationToken(tokenStrForm);
+ }
+
+ final static ThreadLocal remoteAddress =
+ new ThreadLocal() {
+ @Override
+ protected synchronized InetAddress initialValue() {
+ return null;
+ }
+ };
+
+ @Override
+ public InetAddress getRemoteAddress() {
+ return remoteAddress.get();
+ }
+
+ final static ThreadLocal authenticationMethod =
+ new ThreadLocal() {
+ @Override
+ protected synchronized AuthenticationMethod initialValue() {
+ return AuthenticationMethod.TOKEN;
+ }
+ };
+
+ /** CallbackHandler for SASL DIGEST-MD5 mechanism */
+ // This code is pretty much completely based on Hadoop's
+ // SaslRpcServer.SaslDigestCallbackHandler - the only reason we could not
+ // use that Hadoop class as-is was because it needs a Server.Connection object
+ // which is relevant in hadoop rpc but not here in the metastore - so the
+ // code below does not deal with the Connection Server.object.
+ static class SaslDigestCallbackHandler implements CallbackHandler {
+ private final DelegationTokenSecretManager secretManager;
+
+ public SaslDigestCallbackHandler(
+ DelegationTokenSecretManager secretManager) {
+ this.secretManager = secretManager;
+ }
+
+ private char[] getPassword(DelegationTokenIdentifier tokenid) throws InvalidToken {
+ return encodePassword(secretManager.retrievePassword(tokenid));
+ }
+
+ private char[] encodePassword(byte[] password) {
+ return new String(Base64.encodeBase64(password)).toCharArray();
+ }
+ /** {@inheritDoc} */
+ @Override
+ public void handle(Callback[] callbacks) throws InvalidToken,
+ UnsupportedCallbackException {
+ NameCallback nc = null;
+ PasswordCallback pc = null;
+ AuthorizeCallback ac = null;
+ for (Callback callback : callbacks) {
+ if (callback instanceof AuthorizeCallback) {
+ ac = (AuthorizeCallback) callback;
+ } else if (callback instanceof NameCallback) {
+ nc = (NameCallback) callback;
+ } else if (callback instanceof PasswordCallback) {
+ pc = (PasswordCallback) callback;
+ } else if (callback instanceof RealmCallback) {
+ continue; // realm is ignored
+ } else {
+ throw new UnsupportedCallbackException(callback,
+ "Unrecognized SASL DIGEST-MD5 Callback");
+ }
+ }
+ if (pc != null) {
+ DelegationTokenIdentifier tokenIdentifier = SaslRpcServer.
+ getIdentifier(nc.getDefaultName(), secretManager);
+ char[] password = getPassword(tokenIdentifier);
+
+ if (LOG.isDebugEnabled()) {
+ LOG.debug("SASL server DIGEST-MD5 callback: setting password "
+ + "for client: " + tokenIdentifier.getUser());
+ }
+ pc.setPassword(password);
+ }
+ if (ac != null) {
+ String authid = ac.getAuthenticationID();
+ String authzid = ac.getAuthorizationID();
+ if (authid.equals(authzid)) {
+ ac.setAuthorized(true);
+ } else {
+ ac.setAuthorized(false);
+ }
+ if (ac.isAuthorized()) {
+ if (LOG.isDebugEnabled()) {
+ String username =
+ SaslRpcServer.getIdentifier(authzid, secretManager).getUser().getUserName();
+ LOG.debug("SASL server DIGEST-MD5 callback: setting "
+ + "canonicalized client ID: " + username);
+ }
+ ac.setAuthorizedID(authzid);
+ }
+ }
+ }
+ }
+
+ /**
+ * Processor that pulls the SaslServer object out of the transport, and
+ * assumes the remote user's UGI before calling through to the original
+ * processor.
+ *
+ * This is used on the server side to set the UGI for each specific call.
+ */
+ private class TUGIAssumingProcessor implements TProcessor {
+ final TProcessor wrapped;
+ DelegationTokenSecretManager secretManager;
+ TUGIAssumingProcessor(TProcessor wrapped, DelegationTokenSecretManager secretManager) {
+ this.wrapped = wrapped;
+ this.secretManager = secretManager;
+ }
+
+ public boolean process(final TProtocol inProt, final TProtocol outProt) throws TException {
+ TTransport trans = inProt.getTransport();
+ if (!(trans instanceof TSaslServerTransport)) {
+ throw new TException("Unexpected non-SASL transport " + trans.getClass());
+ }
+ TSaslServerTransport saslTrans = (TSaslServerTransport)trans;
+ SaslServer saslServer = saslTrans.getSaslServer();
+ String authId = saslServer.getAuthorizationID();
+ authenticationMethod.set(AuthenticationMethod.KERBEROS);
+ LOG.debug("AUTH ID ======>" + authId);
+ String endUser = authId;
+
+ if(saslServer.getMechanismName().equals("DIGEST-MD5")) {
+ try {
+ TokenIdentifier tokenId = SaslRpcServer.getIdentifier(authId,
+ secretManager);
+ endUser = tokenId.getUser().getUserName();
+ authenticationMethod.set(AuthenticationMethod.TOKEN);
+ } catch (InvalidToken e) {
+ throw new TException(e.getMessage());
+ }
+ }
+ Socket socket = ((TSocket)(saslTrans.getUnderlyingTransport())).getSocket();
+ remoteAddress.set(socket.getInetAddress());
+ try {
+ UserGroupInformation clientUgi = UserGroupInformation.createProxyUser(
+ endUser, UserGroupInformation.getLoginUser());
+ return clientUgi.doAs(new PrivilegedExceptionAction() {
+ public Boolean run() {
+ try {
+ return wrapped.process(inProt, outProt);
+ } catch (TException te) {
+ throw new RuntimeException(te);
+ }
+ }
+ });
+ } catch (RuntimeException rte) {
+ if (rte.getCause() instanceof TException) {
+ throw (TException)rte.getCause();
+ }
+ throw rte;
+ } catch (InterruptedException ie) {
+ throw new RuntimeException(ie); // unexpected!
+ } catch (IOException ioe) {
+ throw new RuntimeException(ioe); // unexpected!
+ }
+ }
+ }
+
+ /**
+ * A TransportFactory that wraps another one, but assumes a specified UGI
+ * before calling through.
+ *
+ * This is used on the server side to assume the server's Principal when accepting
+ * clients.
+ */
+ static class TUGIAssumingTransportFactory extends TTransportFactory {
+ private final UserGroupInformation ugi;
+ private final TTransportFactory wrapped;
+
+ public TUGIAssumingTransportFactory(TTransportFactory wrapped, UserGroupInformation ugi) {
+ assert wrapped != null;
+ assert ugi != null;
+
+ this.wrapped = wrapped;
+ this.ugi = ugi;
+ }
+
+ @Override
+ public TTransport getTransport(final TTransport trans) {
+ return ugi.doAs(new PrivilegedAction() {
+ public TTransport run() {
+ return wrapped.getTransport(trans);
+ }
+ });
+ }
+ }
+ }
+ }
Index: shims/src/common-secure/java/org/apache/hadoop/hive/thrift/DelegationTokenIdentifier.java
===================================================================
--- shims/src/common-secure/java/org/apache/hadoop/hive/thrift/DelegationTokenIdentifier.java (revision 0)
+++ shims/src/common-secure/java/org/apache/hadoop/hive/thrift/DelegationTokenIdentifier.java (working copy)
@@ -0,0 +1,52 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hive.thrift;
+
+import org.apache.hadoop.io.Text;
+import org.apache.hadoop.security.token.delegation.AbstractDelegationTokenIdentifier;
+
+/**
+ * A delegation token identifier that is specific to Hive.
+ */
+public class DelegationTokenIdentifier
+ extends AbstractDelegationTokenIdentifier {
+ public static final Text HIVE_DELEGATION_KIND = new Text("HIVE_DELEGATION_TOKEN");
+
+ /**
+ * Create an empty delegation token identifier for reading into.
+ */
+ public DelegationTokenIdentifier() {
+ }
+
+ /**
+ * Create a new delegation token identifier
+ * @param owner the effective username of the token owner
+ * @param renewer the username of the renewer
+ * @param realUser the real username of the token owner
+ */
+ public DelegationTokenIdentifier(Text owner, Text renewer, Text realUser) {
+ super(owner, renewer, realUser);
+ }
+
+ @Override
+ public Text getKind() {
+ return HIVE_DELEGATION_KIND;
+ }
+
+}
Index: shims/src/common-secure/java/org/apache/hadoop/hive/thrift/ZooKeeperTokenStore.java
===================================================================
--- shims/src/common-secure/java/org/apache/hadoop/hive/thrift/ZooKeeperTokenStore.java (revision 0)
+++ shims/src/common-secure/java/org/apache/hadoop/hive/thrift/ZooKeeperTokenStore.java (working copy)
@@ -0,0 +1,323 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hive.thrift;
+
+import java.io.IOException;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+
+import org.apache.commons.lang.StringUtils;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hive.thrift.TokenStoreDelegationTokenSecretManager.TokenStoreError;
+import org.apache.hadoop.security.token.delegation.AbstractDelegationTokenSecretManager.DelegationTokenInformation;
+import org.apache.hadoop.security.token.delegation.HiveDelegationTokenSupport;
+import org.apache.zookeeper.CreateMode;
+import org.apache.zookeeper.KeeperException;
+import org.apache.zookeeper.Watcher;
+import org.apache.zookeeper.ZooDefs.Ids;
+import org.apache.zookeeper.ZooKeeper;
+import org.apache.zookeeper.ZooKeeper.States;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * ZooKeeper token store implementation.
+ */
+public class ZooKeeperTokenStore implements TokenStoreDelegationTokenSecretManager.TokenStore {
+
+ private static final Logger LOGGER =
+ LoggerFactory.getLogger(ZooKeeperTokenStore.class.getName());
+
+ private static final String ZK_SEQ_FORMAT = "%010d";
+ private static final String NODE_KEYS = "/keys";
+ private static final String NODE_TOKENS = "/tokens";
+
+ private String rootNode = "";
+ private volatile ZooKeeper zkSession;
+ private String zkConnectString;
+ private final int zkSessionTimeout = 3000;
+
+ private class ZooKeeperWatcher implements Watcher {
+ public void process(org.apache.zookeeper.WatchedEvent event) {
+ LOGGER.info(event.toString());
+ if (event.getState() == Watcher.Event.KeeperState.Expired) {
+ LOGGER.warn("ZooKeeper session expired, discarding connection");
+ try {
+ zkSession.close();
+ } catch (Throwable e) {
+ LOGGER.warn("Failed to close connection on expired session", e);
+ }
+ }
+ }
+ }
+
+ /**
+ * Default constructor for dynamic instantiation w/ Configurable
+ * (ReflectionUtils does not support Configuration constructor injection).
+ */
+ protected ZooKeeperTokenStore() {
+ }
+
+ public ZooKeeperTokenStore(String hostPort) {
+ this.zkConnectString = hostPort;
+ init();
+ }
+
+ private ZooKeeper getSession() {
+ if (zkSession == null || zkSession.getState() == States.CLOSED) {
+ synchronized (this) {
+ if (zkSession == null || zkSession.getState() == States.CLOSED) {
+ try {
+ zkSession = new ZooKeeper(this.zkConnectString, this.zkSessionTimeout,
+ new ZooKeeperWatcher());
+ } catch (IOException ex) {
+ throw new TokenStoreError("Token store error.", ex);
+ }
+ }
+ }
+ }
+ return zkSession;
+ }
+
+ private static String ensurePath(ZooKeeper zk, String path) throws KeeperException,
+ InterruptedException {
+ String[] pathComps = StringUtils.splitByWholeSeparator(path, "/");
+ String currentPath = "";
+ for (String pathComp : pathComps) {
+ currentPath += "/" + pathComp;
+ try {
+ String node = zk.create(currentPath, new byte[0], Ids.OPEN_ACL_UNSAFE,
+ CreateMode.PERSISTENT);
+ LOGGER.info("Created path: " + node);
+ } catch (KeeperException.NodeExistsException e) {
+ }
+ }
+ return currentPath;
+ }
+
+ private void init() {
+ if (this.zkConnectString == null) {
+ throw new IllegalStateException("Not initialized");
+ }
+
+ if (this.zkSession != null) {
+ try {
+ this.zkSession.close();
+ } catch (InterruptedException ex) {
+ LOGGER.warn("Failed to close existing session.", ex);
+ }
+ }
+
+ ZooKeeper zk = getSession();
+ try {
+ ensurePath(zk, rootNode + NODE_KEYS);
+ ensurePath(zk, rootNode + NODE_TOKENS);
+ } catch (Exception e) {
+ throw new TokenStoreError("Failed to validate token path.", e);
+ }
+ }
+
+ @Override
+ public void setConf(Configuration conf) {
+ if (conf != null) {
+ this.zkConnectString = conf.get(
+ HadoopThriftAuthBridge20S.Server.DELEGATION_TOKEN_STORE_ZK_CONNECT_STR, null);
+ this.rootNode = conf.get(
+ HadoopThriftAuthBridge20S.Server.DELEGATION_TOKEN_STORE_ZK_ROOT_NODE,
+ HadoopThriftAuthBridge20S.Server.DELEGATION_TOKEN_STORE_ZK_ROOT_NODE_DEFAULT);
+ }
+ init();
+ }
+
+ @Override
+ public Configuration getConf() {
+ return null; // not required
+ }
+
+ private Map getAllKeys() throws KeeperException,
+ InterruptedException {
+
+ String masterKeyNode = rootNode + NODE_KEYS;
+ ZooKeeper zk = getSession();
+ List nodes = zk.getChildren(masterKeyNode, false);
+ Map result = new HashMap();
+ for (String node : nodes) {
+ byte[] data = zk.getData(masterKeyNode + "/" + node, false, null);
+ if (data != null) {
+ result.put(getSeq(node), data);
+ }
+ }
+ return result;
+ }
+
+ private int getSeq(String path) {
+ String[] pathComps = path.split("/");
+ return Integer.parseInt(pathComps[pathComps.length-1]);
+ }
+
+ @Override
+ public int addMasterKey(String s) {
+ try {
+ ZooKeeper zk = getSession();
+ String newNode = zk.create(rootNode + NODE_KEYS + "/", s.getBytes(), Ids.OPEN_ACL_UNSAFE,
+ CreateMode.PERSISTENT_SEQUENTIAL);
+ LOGGER.info("Added key {}", newNode);
+ return getSeq(newNode);
+ } catch (KeeperException ex) {
+ throw new TokenStoreError(ex);
+ } catch (InterruptedException ex) {
+ throw new TokenStoreError(ex);
+ }
+ }
+
+ @Override
+ public void updateMasterKey(int keySeq, String s) {
+ try {
+ ZooKeeper zk = getSession();
+ zk.setData(rootNode + NODE_KEYS + "/" + String.format(ZK_SEQ_FORMAT, keySeq), s.getBytes(),
+ -1);
+ } catch (KeeperException ex) {
+ throw new TokenStoreError(ex);
+ } catch (InterruptedException ex) {
+ throw new TokenStoreError(ex);
+ }
+ }
+
+ @Override
+ public boolean removeMasterKey(int keySeq) {
+ try {
+ ZooKeeper zk = getSession();
+ zk.delete(rootNode + NODE_KEYS + "/" + String.format(ZK_SEQ_FORMAT, keySeq), -1);
+ return true;
+ } catch (KeeperException.NoNodeException ex) {
+ return false;
+ } catch (KeeperException ex) {
+ throw new TokenStoreError(ex);
+ } catch (InterruptedException ex) {
+ throw new TokenStoreError(ex);
+ }
+ }
+
+ @Override
+ public String[] getMasterKeys() {
+ try {
+ Map allKeys = getAllKeys();
+ String[] result = new String[allKeys.size()];
+ int resultIdx = 0;
+ for (byte[] keyBytes : allKeys.values()) {
+ result[resultIdx++] = new String(keyBytes);
+ }
+ return result;
+ } catch (KeeperException ex) {
+ throw new TokenStoreError(ex);
+ } catch (InterruptedException ex) {
+ throw new TokenStoreError(ex);
+ }
+ }
+
+
+ private String getTokenPath(DelegationTokenIdentifier tokenIdentifier) {
+ try {
+ return rootNode + NODE_TOKENS + "/"
+ + TokenStoreDelegationTokenSecretManager.encodeWritable(tokenIdentifier);
+ } catch (IOException ex) {
+ throw new TokenStoreError("Failed to encode token identifier", ex);
+ }
+ }
+
+ @Override
+ public boolean addToken(DelegationTokenIdentifier tokenIdentifier,
+ DelegationTokenInformation token) {
+ try {
+ ZooKeeper zk = getSession();
+ byte[] tokenBytes = HiveDelegationTokenSupport.encodeDelegationTokenInformation(token);
+ String newNode = zk.create(getTokenPath(tokenIdentifier),
+ tokenBytes, Ids.OPEN_ACL_UNSAFE, CreateMode.PERSISTENT);
+ LOGGER.info("Added token: {}", newNode);
+ return true;
+ } catch (KeeperException.NodeExistsException ex) {
+ return false;
+ } catch (KeeperException ex) {
+ throw new TokenStoreError(ex);
+ } catch (InterruptedException ex) {
+ throw new TokenStoreError(ex);
+ }
+ }
+
+ @Override
+ public boolean removeToken(DelegationTokenIdentifier tokenIdentifier) {
+ try {
+ ZooKeeper zk = getSession();
+ zk.delete(getTokenPath(tokenIdentifier), -1);
+ return true;
+ } catch (KeeperException.NoNodeException ex) {
+ return false;
+ } catch (KeeperException ex) {
+ throw new TokenStoreError(ex);
+ } catch (InterruptedException ex) {
+ throw new TokenStoreError(ex);
+ }
+ }
+
+ @Override
+ public DelegationTokenInformation getToken(DelegationTokenIdentifier tokenIdentifier) {
+ try {
+ ZooKeeper zk = getSession();
+ byte[] tokenBytes = zk.getData(getTokenPath(tokenIdentifier), false, null);
+ try {
+ return HiveDelegationTokenSupport.decodeDelegationTokenInformation(tokenBytes);
+ } catch (Exception ex) {
+ throw new TokenStoreError("Failed to decode token", ex);
+ }
+ } catch (KeeperException.NoNodeException ex) {
+ return null;
+ } catch (KeeperException ex) {
+ throw new TokenStoreError(ex);
+ } catch (InterruptedException ex) {
+ throw new TokenStoreError(ex);
+ }
+ }
+
+ @Override
+ public List getAllDelegationTokenIdentifiers() {
+ String containerNode = rootNode + NODE_TOKENS;
+ final List nodes;
+ try {
+ nodes = getSession().getChildren(containerNode, false);
+ } catch (KeeperException ex) {
+ throw new TokenStoreError(ex);
+ } catch (InterruptedException ex) {
+ throw new TokenStoreError(ex);
+ }
+ List result = new java.util.ArrayList(
+ nodes.size());
+ for (String node : nodes) {
+ DelegationTokenIdentifier id = new DelegationTokenIdentifier();
+ try {
+ TokenStoreDelegationTokenSecretManager.decodeWritable(id, node);
+ result.add(id);
+ } catch (Exception e) {
+ LOGGER.warn("Failed to decode token '{}'", node);
+ }
+ }
+ return result;
+ }
+
+}
Index: shims/src/common-secure/java/org/apache/hadoop/hive/thrift/MemoryTokenStore.java
===================================================================
--- shims/src/common-secure/java/org/apache/hadoop/hive/thrift/MemoryTokenStore.java (revision 0)
+++ shims/src/common-secure/java/org/apache/hadoop/hive/thrift/MemoryTokenStore.java (working copy)
@@ -0,0 +1,99 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hive.thrift;
+
+import java.util.List;
+import java.util.concurrent.atomic.AtomicInteger;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.security.token.delegation.AbstractDelegationTokenSecretManager.DelegationTokenInformation;
+
+/**
+ * Default in-memory token store implementation.
+ */
+public class MemoryTokenStore implements TokenStoreDelegationTokenSecretManager.TokenStore {
+
+ private final java.util.concurrent.ConcurrentHashMap masterKeys
+ = new java.util.concurrent.ConcurrentHashMap();
+
+ private final java.util.concurrent.ConcurrentHashMap tokens
+ = new java.util.concurrent.ConcurrentHashMap();
+
+ private final AtomicInteger masterKeySeq = new AtomicInteger();
+
+ @Override
+ public void setConf(Configuration conf) {
+ }
+
+ @Override
+ public Configuration getConf() {
+ return null;
+ }
+
+ @Override
+ public int addMasterKey(String s) {
+ int keySeq = masterKeySeq.getAndIncrement();
+ masterKeys.putIfAbsent(keySeq, s);
+ return keySeq;
+ }
+
+ @Override
+ public void updateMasterKey(int keySeq, String s) {
+ masterKeys.put(keySeq, s);
+ }
+
+ @Override
+ public boolean removeMasterKey(int keySeq) {
+ return masterKeys.remove(keySeq) != null;
+ }
+
+ @Override
+ public String[] getMasterKeys() {
+ return masterKeys.values().toArray(new String[0]);
+ }
+
+ @Override
+ public boolean addToken(DelegationTokenIdentifier tokenIdentifier,
+ DelegationTokenInformation token) {
+ DelegationTokenInformation tokenInfo = tokens.putIfAbsent(tokenIdentifier, token);
+ return (tokenInfo == null);
+ }
+
+ @Override
+ public boolean removeToken(DelegationTokenIdentifier tokenIdentifier) {
+ DelegationTokenInformation tokenInfo = tokens.remove(tokenIdentifier);
+ return tokenInfo != null;
+ }
+
+ @Override
+ public DelegationTokenInformation getToken(DelegationTokenIdentifier tokenIdentifier) {
+ return tokens.get(tokenIdentifier);
+ }
+
+ @Override
+ public List getAllDelegationTokenIdentifiers() {
+ List result = new java.util.ArrayList(
+ tokens.size());
+ for (DelegationTokenIdentifier id : tokens.keySet()) {
+ result.add(id);
+ }
+ return result;
+ }
+
+}
Index: shims/src/common-secure/java/org/apache/hadoop/hive/thrift/TokenStoreDelegationTokenSecretManager.java
===================================================================
--- shims/src/common-secure/java/org/apache/hadoop/hive/thrift/TokenStoreDelegationTokenSecretManager.java (revision 0)
+++ shims/src/common-secure/java/org/apache/hadoop/hive/thrift/TokenStoreDelegationTokenSecretManager.java (working copy)
@@ -0,0 +1,407 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hive.thrift;
+
+import java.io.ByteArrayInputStream;
+import java.io.ByteArrayOutputStream;
+import java.io.DataInputStream;
+import java.io.DataOutputStream;
+import java.io.IOException;
+import java.lang.reflect.Method;
+import java.util.Arrays;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Map;
+
+import org.apache.commons.codec.binary.Base64;
+import org.apache.hadoop.conf.Configurable;
+import org.apache.hadoop.io.Writable;
+import org.apache.hadoop.security.token.Token;
+import org.apache.hadoop.security.token.delegation.AbstractDelegationTokenSecretManager;
+import org.apache.hadoop.security.token.delegation.DelegationKey;
+import org.apache.hadoop.security.token.delegation.HiveDelegationTokenSupport;
+import org.apache.hadoop.util.Daemon;
+import org.apache.hadoop.util.StringUtils;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * Extension of {@link DelegationTokenSecretManager} to support alternative to default in-memory
+ * token management for fail-over and clustering through plug-able token store (ZooKeeper etc.).
+ * Delegation tokens will be retrieved from the store on-demand and (unlike base class behavior) not
+ * cached in memory. This avoids complexities related to token expiration. The security token is
+ * needed only at the time the transport is opened (as opposed to per interface operation). The
+ * assumption therefore is low cost of interprocess token retrieval (for random read efficient store
+ * such as ZooKeeper) compared to overhead of synchronizing per-process in-memory token caches.
+ * The wrapper incorporates the token store abstraction within the limitations of current
+ * Hive/Hadoop dependency (.20S) with minimum code duplication.
+ * Eventually this should be supported by Hadoop security directly.
+ */
+public class TokenStoreDelegationTokenSecretManager extends DelegationTokenSecretManager {
+
+ private static final Logger LOGGER =
+ LoggerFactory.getLogger(TokenStoreDelegationTokenSecretManager.class.getName());
+
+ /**
+ * Exception for internal token store errors that typically cannot be handled by the caller.
+ */
+ public static class TokenStoreError extends RuntimeException {
+ private static final long serialVersionUID = -8693819817623074083L;
+
+ public TokenStoreError(Throwable cause) {
+ super(cause);
+ }
+
+ public TokenStoreError(String message, Throwable cause) {
+ super(message, cause);
+ }
+ }
+
+ /**
+ * Interface for pluggable token store that can be implemented as shared store with external
+ * storage (for example with ZooKeeper for HA).
+ * Internal, store specific errors are translated into {@link TokenStoreError}.
+ */
+ public static interface TokenStore extends Configurable {
+ /**
+ * Add new master key. The token store assigns and returns the sequence number.
+ * Caller needs to use the identifier to update the key (since it is embedded in the key).
+ *
+ * @param s
+ * @return sequence number for new key
+ */
+ int addMasterKey(String s) throws TokenStoreError;
+
+ void updateMasterKey(int keySeq, String s) throws TokenStoreError;
+
+ /**
+ * Remove key for given id.
+ * @param keySeq
+ * @return false if key no longer present, true otherwise.
+ */
+ boolean removeMasterKey(int keySeq);
+
+ String[] getMasterKeys() throws TokenStoreError;
+
+ /**
+ * Add token. If identifier is already present, token won't be added.
+ * @param tokenIdentifier
+ * @param token
+ * @return true if token was added, false for existing identifier
+ */
+ boolean addToken(DelegationTokenIdentifier tokenIdentifier,
+ DelegationTokenInformation token) throws TokenStoreError;
+
+ /**
+ * Get token. Returns null if the token does not exist.
+ * @param tokenIdentifier
+ * @return
+ */
+ DelegationTokenInformation getToken(DelegationTokenIdentifier tokenIdentifier)
+ throws TokenStoreError;
+
+ /**
+ * Remove token. Ignores token does not exist.
+ * @param tokenIdentifier
+ */
+ boolean removeToken(DelegationTokenIdentifier tokenIdentifier) throws TokenStoreError;
+
+ /**
+ * List of all token identifiers in the store. This is used to remove expired tokens
+ * and a potential scalability improvement would be to partition by master key id
+ * @return
+ */
+ List getAllDelegationTokenIdentifiers();
+
+ }
+
+ final private long keyUpdateInterval;
+ final private long tokenRemoverScanInterval;
+ private Thread tokenRemoverThread;
+
+ final private TokenStore tokenStore;
+
+ public TokenStoreDelegationTokenSecretManager(long delegationKeyUpdateInterval,
+ long delegationTokenMaxLifetime, long delegationTokenRenewInterval,
+ long delegationTokenRemoverScanInterval, TokenStore sharedStore) {
+ super(delegationKeyUpdateInterval, delegationTokenMaxLifetime, delegationTokenRenewInterval,
+ delegationTokenRemoverScanInterval);
+ this.keyUpdateInterval = delegationKeyUpdateInterval;
+ this.tokenRemoverScanInterval = delegationTokenRemoverScanInterval;
+
+ this.tokenStore = sharedStore;
+ }
+
+ protected DelegationTokenIdentifier getTokenIdentifier(Token token)
+ throws IOException {
+ // turn bytes back into identifier for cache lookup
+ ByteArrayInputStream buf = new ByteArrayInputStream(token.getIdentifier());
+ DataInputStream in = new DataInputStream(buf);
+ DelegationTokenIdentifier id = createIdentifier();
+ id.readFields(in);
+ return id;
+ }
+
+ protected Map reloadKeys() {
+ // read keys from token store
+ String[] allKeys = tokenStore.getMasterKeys();
+ Map keys
+ = new java.util.HashMap(allKeys.length);
+ for (String keyStr : allKeys) {
+ DelegationKey key = new DelegationKey();
+ try {
+ decodeWritable(key, keyStr);
+ keys.put(key.getKeyId(), key);
+ } catch (IOException ex) {
+ LOGGER.error("Failed to load master key.", ex);
+ }
+ }
+ synchronized (this) {
+ super.allKeys.clear();
+ super.allKeys.putAll(keys);
+ }
+ return keys;
+ }
+
+ @Override
+ public byte[] retrievePassword(DelegationTokenIdentifier identifier)
+ throws org.apache.hadoop.security.token.SecretManager.InvalidToken {
+ DelegationTokenInformation info = this.tokenStore.getToken(identifier);
+ if (info == null) {
+ throw new InvalidToken("token expired or does not exist: " + identifier);
+ }
+ // must reuse super as info.getPassword is not accessible
+ synchronized (this) {
+ try {
+ super.currentTokens.put(identifier, info);
+ return super.retrievePassword(identifier);
+ } finally {
+ super.currentTokens.remove(identifier);
+ }
+ }
+ }
+
+ @Override
+ public DelegationTokenIdentifier cancelToken(Token token,
+ String canceller) throws IOException {
+ DelegationTokenIdentifier id = getTokenIdentifier(token);
+ LOGGER.info("Token cancelation requested for identifier: "+id);
+ this.tokenStore.removeToken(id);
+ return id;
+ }
+
+ /**
+ * Create the password and add it to shared store.
+ */
+ @Override
+ protected byte[] createPassword(DelegationTokenIdentifier id) {
+ byte[] password;
+ DelegationTokenInformation info;
+ synchronized (this) {
+ password = super.createPassword(id);
+ // add new token to shared store
+ // need to persist expiration along with password
+ info = super.currentTokens.remove(id);
+ if (info == null) {
+ throw new IllegalStateException("Failed to retrieve token after creation");
+ }
+ }
+ this.tokenStore.addToken(id, info);
+ return password;
+ }
+
+ @Override
+ public long renewToken(Token token,
+ String renewer) throws InvalidToken, IOException {
+ // since renewal is KERBEROS authenticated token may not be cached
+ final DelegationTokenIdentifier id = getTokenIdentifier(token);
+ DelegationTokenInformation tokenInfo = this.tokenStore.getToken(id);
+ if (tokenInfo == null) {
+ throw new InvalidToken("token does not exist: " + id); // no token found
+ }
+ // ensure associated master key is available
+ if (!super.allKeys.containsKey(id.getMasterKeyId())) {
+ LOGGER.info("Unknown master key (id={}), (re)loading keys from token store.",
+ id.getMasterKeyId());
+ reloadKeys();
+ }
+ // reuse super renewal logic
+ synchronized (this) {
+ super.currentTokens.put(id, tokenInfo);
+ try {
+ return super.renewToken(token, renewer);
+ } finally {
+ super.currentTokens.remove(id);
+ }
+ }
+ }
+
+ public static String encodeWritable(Writable key) throws IOException {
+ ByteArrayOutputStream bos = new ByteArrayOutputStream();
+ DataOutputStream dos = new DataOutputStream(bos);
+ key.write(dos);
+ dos.flush();
+ return Base64.encodeBase64URLSafeString(bos.toByteArray());
+ }
+
+ public static void decodeWritable(Writable w, String idStr) throws IOException {
+ DataInputStream in = new DataInputStream(new ByteArrayInputStream(Base64.decodeBase64(idStr)));
+ w.readFields(in);
+ }
+
+ /**
+ * Synchronize master key updates / sequence generation for multiple nodes.
+ * NOTE: {@Link AbstractDelegationTokenSecretManager} keeps currentKey private, so we need
+ * to utilize this "hook" to manipulate the key through the object reference.
+ * This .20S workaround should cease to exist when Hadoop supports token store.
+ */
+ @Override
+ protected void logUpdateMasterKey(DelegationKey key) throws IOException {
+ int keySeq = this.tokenStore.addMasterKey(encodeWritable(key));
+ // update key with assigned identifier
+ DelegationKey keyWithSeq = new DelegationKey(keySeq, key.getExpiryDate(), key.getKey());
+ String keyStr = encodeWritable(keyWithSeq);
+ this.tokenStore.updateMasterKey(keySeq, keyStr);
+ decodeWritable(key, keyStr);
+ LOGGER.info("New master key with key id={}", key.getKeyId());
+ super.logUpdateMasterKey(key);
+ }
+
+ @Override
+ public synchronized void startThreads() throws IOException {
+ try {
+ // updateCurrentKey needs to be called to initialize the master key
+ // (there should be a null check added in the future in rollMasterKey)
+ // updateCurrentKey();
+ Method m = AbstractDelegationTokenSecretManager.class.getDeclaredMethod("updateCurrentKey");
+ m.setAccessible(true);
+ m.invoke(this);
+ } catch (Exception e) {
+ throw new IOException("Failed to initialize master key", e);
+ }
+ running = true;
+ tokenRemoverThread = new Daemon(new ExpiredTokenRemover());
+ tokenRemoverThread.start();
+ }
+
+ @Override
+ public synchronized void stopThreads() {
+ if (LOGGER.isDebugEnabled()) {
+ LOGGER.debug("Stopping expired delegation token remover thread");
+ }
+ running = false;
+ if (tokenRemoverThread != null) {
+ tokenRemoverThread.interrupt();
+ }
+ }
+
+ /**
+ * Remove expired tokens. Replaces logic in {@link AbstractDelegationTokenSecretManager}
+ * that cannot be reused due to private method access. Logic here can more efficiently
+ * deal with external token store by only loading into memory the minimum data needed.
+ */
+ protected void removeExpiredTokens() {
+ long now = System.currentTimeMillis();
+ Iterator i = tokenStore.getAllDelegationTokenIdentifiers()
+ .iterator();
+ while (i.hasNext()) {
+ DelegationTokenIdentifier id = i.next();
+ if (now > id.getMaxDate()) {
+ this.tokenStore.removeToken(id); // no need to look at token info
+ } else {
+ // get token info to check renew date
+ DelegationTokenInformation tokenInfo = tokenStore.getToken(id);
+ if (tokenInfo != null) {
+ if (now > tokenInfo.getRenewDate()) {
+ this.tokenStore.removeToken(id);
+ }
+ }
+ }
+ }
+ }
+
+ /**
+ * Extension of rollMasterKey to remove expired keys from store.
+ * @throws IOException
+ */
+ protected void rollMasterKeyExt() throws IOException {
+ Map keys = reloadKeys();
+ int currentKeyId = super.currentId;
+ HiveDelegationTokenSupport.rollMasterKey(TokenStoreDelegationTokenSecretManager.this);
+ List keysAfterRoll = Arrays.asList(getAllKeys());
+ for (DelegationKey key : keysAfterRoll) {
+ keys.remove(key.getKeyId());
+ if (key.getKeyId() == currentKeyId) {
+ tokenStore.updateMasterKey(currentKeyId, encodeWritable(key));
+ }
+ }
+ for (DelegationKey expiredKey : keys.values()) {
+ LOGGER.info("Removing expired key id={}", expiredKey.getKeyId());
+ tokenStore.removeMasterKey(expiredKey.getKeyId());
+ }
+ }
+
+
+ /**
+ * Cloned from {@link AbstractDelegationTokenSecretManager} to deal with private access
+ * restriction (there would not be an need to clone the remove thread if the remove logic was
+ * protected/extensible).
+ */
+ protected class ExpiredTokenRemover extends Thread {
+ private long lastMasterKeyUpdate;
+ private long lastTokenCacheCleanup;
+
+ @Override
+ public void run() {
+ LOGGER.info("Starting expired delegation token remover thread, "
+ + "tokenRemoverScanInterval=" + tokenRemoverScanInterval
+ / (60 * 1000) + " min(s)");
+ try {
+ while (running) {
+ long now = System.currentTimeMillis();
+ if (lastMasterKeyUpdate + keyUpdateInterval < now) {
+ try {
+ rollMasterKeyExt();
+ lastMasterKeyUpdate = now;
+ } catch (IOException e) {
+ LOGGER.error("Master key updating failed. "
+ + StringUtils.stringifyException(e));
+ }
+ }
+ if (lastTokenCacheCleanup + tokenRemoverScanInterval < now) {
+ removeExpiredTokens();
+ lastTokenCacheCleanup = now;
+ }
+ try {
+ Thread.sleep(5000); // 5 seconds
+ } catch (InterruptedException ie) {
+ LOGGER
+ .error("InterruptedExcpetion recieved for ExpiredTokenRemover thread "
+ + ie);
+ }
+ }
+ } catch (Throwable t) {
+ LOGGER.error("ExpiredTokenRemover thread received unexpected exception. "
+ + t, t);
+ Runtime.getRuntime().exit(-1);
+ }
+ }
+ }
+
+}
Index: shims/src/common-secure/java/org/apache/hadoop/hive/thrift/client/TUGIAssumingTransport.java
===================================================================
--- shims/src/common-secure/java/org/apache/hadoop/hive/thrift/client/TUGIAssumingTransport.java (revision 0)
+++ shims/src/common-secure/java/org/apache/hadoop/hive/thrift/client/TUGIAssumingTransport.java (working copy)
@@ -0,0 +1,74 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hive.thrift.client;
+
+import java.io.IOException;
+import java.security.PrivilegedExceptionAction;
+
+import org.apache.hadoop.hive.thrift.TFilterTransport;
+import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.thrift.transport.TTransport;
+import org.apache.thrift.transport.TTransportException;
+
+/**
+ * The Thrift SASL transports call Sasl.createSaslServer and Sasl.createSaslClient
+ * inside open(). So, we need to assume the correct UGI when the transport is opened
+ * so that the SASL mechanisms have access to the right principal. This transport
+ * wraps the Sasl transports to set up the right UGI context for open().
+ *
+ * This is used on the client side, where the API explicitly opens a transport to
+ * the server.
+ */
+ public class TUGIAssumingTransport extends TFilterTransport {
+ protected UserGroupInformation ugi;
+
+ public TUGIAssumingTransport(TTransport wrapped, UserGroupInformation ugi) {
+ super(wrapped);
+ this.ugi = ugi;
+ }
+
+ @Override
+ public void open() throws TTransportException {
+ try {
+ ugi.doAs(new PrivilegedExceptionAction() {
+ public Void run() {
+ try {
+ wrapped.open();
+ } catch (TTransportException tte) {
+ // Wrap the transport exception in an RTE, since UGI.doAs() then goes
+ // and unwraps this for us out of the doAs block. We then unwrap one
+ // more time in our catch clause to get back the TTE. (ugh)
+ throw new RuntimeException(tte);
+ }
+ return null;
+ }
+ });
+ } catch (IOException ioe) {
+ throw new RuntimeException("Received an ioe we never threw!", ioe);
+ } catch (InterruptedException ie) {
+ throw new RuntimeException("Received an ie we never threw!", ie);
+ } catch (RuntimeException rte) {
+ if (rte.getCause() instanceof TTransportException) {
+ throw (TTransportException)rte.getCause();
+ } else {
+ throw rte;
+ }
+ }
+ }
+ }
Index: shims/src/common-secure/java/org/apache/hadoop/hive/shims/HadoopShimsSecure.java
===================================================================
--- shims/src/common-secure/java/org/apache/hadoop/hive/shims/HadoopShimsSecure.java (revision 0)
+++ shims/src/common-secure/java/org/apache/hadoop/hive/shims/HadoopShimsSecure.java (working copy)
@@ -0,0 +1,532 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hive.shims;
+
+import java.io.DataInput;
+import java.io.DataOutput;
+import java.io.IOException;
+import java.lang.reflect.Constructor;
+import java.security.PrivilegedExceptionAction;
+import java.util.ArrayList;
+import java.util.List;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.PathFilter;
+import org.apache.hadoop.hdfs.MiniDFSCluster;
+import org.apache.hadoop.hive.io.HiveIOExceptionHandlerUtil;
+import org.apache.hadoop.hive.thrift.DelegationTokenSelector;
+import org.apache.hadoop.io.Text;
+import org.apache.hadoop.mapred.ClusterStatus;
+import org.apache.hadoop.mapred.FileInputFormat;
+import org.apache.hadoop.mapred.InputFormat;
+import org.apache.hadoop.mapred.InputSplit;
+import org.apache.hadoop.mapred.JobConf;
+import org.apache.hadoop.mapred.JobContext;
+import org.apache.hadoop.mapred.JobStatus;
+import org.apache.hadoop.mapred.OutputCommitter;
+import org.apache.hadoop.mapred.RecordReader;
+import org.apache.hadoop.mapred.Reporter;
+import org.apache.hadoop.mapred.RunningJob;
+import org.apache.hadoop.mapred.TaskAttemptContext;
+import org.apache.hadoop.mapred.TaskCompletionEvent;
+import org.apache.hadoop.mapred.TaskID;
+import org.apache.hadoop.mapred.lib.CombineFileInputFormat;
+import org.apache.hadoop.mapred.lib.CombineFileSplit;
+import org.apache.hadoop.mapred.lib.NullOutputFormat;
+import org.apache.hadoop.mapreduce.Job;
+import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.security.token.Token;
+import org.apache.hadoop.security.token.TokenIdentifier;
+import org.apache.hadoop.security.token.TokenSelector;
+import org.apache.hadoop.tools.HadoopArchives;
+import org.apache.hadoop.util.Progressable;
+import org.apache.hadoop.util.ToolRunner;
+
+/**
+ * Base implemention for shims against secure Hadoop 0.20.3/0.23.
+ */
+public abstract class HadoopShimsSecure implements HadoopShims {
+ public boolean usesJobShell() {
+ return false;
+ }
+
+ public boolean fileSystemDeleteOnExit(FileSystem fs, Path path)
+ throws IOException {
+
+ return fs.deleteOnExit(path);
+ }
+
+ public void inputFormatValidateInput(InputFormat fmt, JobConf conf)
+ throws IOException {
+ // gone in 0.18+
+ }
+
+ public boolean isJobPreparing(RunningJob job) throws IOException {
+ return job.getJobState() == JobStatus.PREP;
+ }
+ /**
+ * Workaround for hadoop-17 - jobclient only looks at commandlineconfig.
+ */
+ public void setTmpFiles(String prop, String files) {
+ // gone in 20+
+ }
+
+ public HadoopShims.MiniDFSShim getMiniDfs(Configuration conf,
+ int numDataNodes,
+ boolean format,
+ String[] racks) throws IOException {
+ return new MiniDFSShim(new MiniDFSCluster(conf, numDataNodes, format, racks));
+ }
+
+ /**
+ * MiniDFSShim.
+ *
+ */
+ public class MiniDFSShim implements HadoopShims.MiniDFSShim {
+ private final MiniDFSCluster cluster;
+
+ public MiniDFSShim(MiniDFSCluster cluster) {
+ this.cluster = cluster;
+ }
+
+ public FileSystem getFileSystem() throws IOException {
+ return cluster.getFileSystem();
+ }
+
+ public void shutdown() {
+ cluster.shutdown();
+ }
+ }
+
+ /**
+ * We define this function here to make the code compatible between
+ * hadoop 0.17 and hadoop 0.20.
+ *
+ * Hive binary that compiled Text.compareTo(Text) with hadoop 0.20 won't
+ * work with hadoop 0.17 because in hadoop 0.20, Text.compareTo(Text) is
+ * implemented in org.apache.hadoop.io.BinaryComparable, and Java compiler
+ * references that class, which is not available in hadoop 0.17.
+ */
+ public int compareText(Text a, Text b) {
+ return a.compareTo(b);
+ }
+
+ @Override
+ public long getAccessTime(FileStatus file) {
+ return file.getAccessTime();
+ }
+
+ public HadoopShims.CombineFileInputFormatShim getCombineFileInputFormat() {
+ return new CombineFileInputFormatShim() {
+ @Override
+ public RecordReader getRecordReader(InputSplit split,
+ JobConf job, Reporter reporter) throws IOException {
+ throw new IOException("CombineFileInputFormat.getRecordReader not needed.");
+ }
+ };
+ }
+
+ public static class InputSplitShim extends CombineFileSplit implements HadoopShims.InputSplitShim {
+ long shrinkedLength;
+ boolean _isShrinked;
+ public InputSplitShim() {
+ super();
+ _isShrinked = false;
+ }
+
+ public InputSplitShim(CombineFileSplit old) throws IOException {
+ super(old);
+ _isShrinked = false;
+ }
+
+ @Override
+ public void shrinkSplit(long length) {
+ _isShrinked = true;
+ shrinkedLength = length;
+ }
+
+ public boolean isShrinked() {
+ return _isShrinked;
+ }
+
+ public long getShrinkedLength() {
+ return shrinkedLength;
+ }
+
+ @Override
+ public void readFields(DataInput in) throws IOException {
+ super.readFields(in);
+ _isShrinked = in.readBoolean();
+ if (_isShrinked) {
+ shrinkedLength = in.readLong();
+ }
+ }
+
+ @Override
+ public void write(DataOutput out) throws IOException {
+ super.write(out);
+ out.writeBoolean(_isShrinked);
+ if (_isShrinked) {
+ out.writeLong(shrinkedLength);
+ }
+ }
+ }
+
+ /* This class should be replaced with org.apache.hadoop.mapred.lib.CombineFileRecordReader class, once
+ * https://issues.apache.org/jira/browse/MAPREDUCE-955 is fixed. This code should be removed - it is a copy
+ * of org.apache.hadoop.mapred.lib.CombineFileRecordReader
+ */
+ public static class CombineFileRecordReader implements RecordReader {
+
+ static final Class[] constructorSignature = new Class[] {
+ InputSplit.class,
+ Configuration.class,
+ Reporter.class,
+ Integer.class
+ };
+
+ protected CombineFileSplit split;
+ protected JobConf jc;
+ protected Reporter reporter;
+ protected Class> rrClass;
+ protected Constructor> rrConstructor;
+ protected FileSystem fs;
+
+ protected int idx;
+ protected long progress;
+ protected RecordReader curReader;
+ protected boolean isShrinked;
+ protected long shrinkedLength;
+
+ public boolean next(K key, V value) throws IOException {
+
+ while ((curReader == null)
+ || !doNextWithExceptionHandler((K) ((CombineHiveKey) key).getKey(),
+ value)) {
+ if (!initNextRecordReader(key)) {
+ return false;
+ }
+ }
+ return true;
+ }
+
+ public K createKey() {
+ K newKey = curReader.createKey();
+ return (K)(new CombineHiveKey(newKey));
+ }
+
+ public V createValue() {
+ return curReader.createValue();
+ }
+
+ /**
+ * Return the amount of data processed.
+ */
+ public long getPos() throws IOException {
+ return progress;
+ }
+
+ public void close() throws IOException {
+ if (curReader != null) {
+ curReader.close();
+ curReader = null;
+ }
+ }
+
+ /**
+ * Return progress based on the amount of data processed so far.
+ */
+ public float getProgress() throws IOException {
+ return Math.min(1.0f, progress / (float) (split.getLength()));
+ }
+
+ /**
+ * A generic RecordReader that can hand out different recordReaders
+ * for each chunk in the CombineFileSplit.
+ */
+ public CombineFileRecordReader(JobConf job, CombineFileSplit split,
+ Reporter reporter,
+ Class> rrClass)
+ throws IOException {
+ this.split = split;
+ this.jc = job;
+ this.rrClass = rrClass;
+ this.reporter = reporter;
+ this.idx = 0;
+ this.curReader = null;
+ this.progress = 0;
+
+ isShrinked = false;
+
+ assert (split instanceof InputSplitShim);
+ if (((InputSplitShim) split).isShrinked()) {
+ isShrinked = true;
+ shrinkedLength = ((InputSplitShim) split).getShrinkedLength();
+ }
+
+ try {
+ rrConstructor = rrClass.getDeclaredConstructor(constructorSignature);
+ rrConstructor.setAccessible(true);
+ } catch (Exception e) {
+ throw new RuntimeException(rrClass.getName() +
+ " does not have valid constructor", e);
+ }
+ initNextRecordReader(null);
+ }
+
+ /**
+ * do next and handle exception inside it.
+ * @param key
+ * @param value
+ * @return
+ * @throws IOException
+ */
+ private boolean doNextWithExceptionHandler(K key, V value) throws IOException {
+ try {
+ return curReader.next(key, value);
+ } catch (Exception e) {
+ return HiveIOExceptionHandlerUtil
+ .handleRecordReaderNextException(e, jc);
+ }
+ }
+
+ /**
+ * Get the record reader for the next chunk in this CombineFileSplit.
+ */
+ protected boolean initNextRecordReader(K key) throws IOException {
+
+ if (curReader != null) {
+ curReader.close();
+ curReader = null;
+ if (idx > 0) {
+ progress += split.getLength(idx - 1); // done processing so far
+ }
+ }
+
+ // if all chunks have been processed, nothing more to do.
+ if (idx == split.getNumPaths() || (isShrinked && progress > shrinkedLength)) {
+ return false;
+ }
+
+ // get a record reader for the idx-th chunk
+ try {
+ curReader = rrConstructor.newInstance(new Object[]
+ {split, jc, reporter, Integer.valueOf(idx)});
+
+ // change the key if need be
+ if (key != null) {
+ K newKey = curReader.createKey();
+ ((CombineHiveKey)key).setKey(newKey);
+ }
+
+ // setup some helper config variables.
+ jc.set("map.input.file", split.getPath(idx).toString());
+ jc.setLong("map.input.start", split.getOffset(idx));
+ jc.setLong("map.input.length", split.getLength(idx));
+ } catch (Exception e) {
+ curReader = HiveIOExceptionHandlerUtil.handleRecordReaderCreationException(
+ e, jc);
+ }
+ idx++;
+ return true;
+ }
+ }
+
+ public abstract static class CombineFileInputFormatShim extends
+ CombineFileInputFormat
+ implements HadoopShims.CombineFileInputFormatShim {
+
+ public Path[] getInputPathsShim(JobConf conf) {
+ try {
+ return FileInputFormat.getInputPaths(conf);
+ } catch (Exception e) {
+ throw new RuntimeException(e);
+ }
+ }
+
+ @Override
+ public void createPool(JobConf conf, PathFilter... filters) {
+ super.createPool(conf, filters);
+ }
+
+ @Override
+ public InputSplitShim[] getSplits(JobConf job, int numSplits) throws IOException {
+ long minSize = job.getLong("mapred.min.split.size", 0);
+
+ // For backward compatibility, let the above parameter be used
+ if (job.getLong("mapred.min.split.size.per.node", 0) == 0) {
+ super.setMinSplitSizeNode(minSize);
+ }
+
+ if (job.getLong("mapred.min.split.size.per.rack", 0) == 0) {
+ super.setMinSplitSizeRack(minSize);
+ }
+
+ if (job.getLong("mapred.max.split.size", 0) == 0) {
+ super.setMaxSplitSize(minSize);
+ }
+
+ CombineFileSplit[] splits = (CombineFileSplit[]) super.getSplits(job, numSplits);
+
+ InputSplitShim[] isplits = new InputSplitShim[splits.length];
+ for (int pos = 0; pos < splits.length; pos++) {
+ isplits[pos] = new InputSplitShim(splits[pos]);
+ }
+
+ return isplits;
+ }
+
+ public InputSplitShim getInputSplitShim() throws IOException {
+ return new InputSplitShim();
+ }
+
+ public RecordReader getRecordReader(JobConf job, HadoopShims.InputSplitShim split,
+ Reporter reporter,
+ Class> rrClass)
+ throws IOException {
+ CombineFileSplit cfSplit = (CombineFileSplit) split;
+ return new CombineFileRecordReader(job, cfSplit, reporter, rrClass);
+ }
+
+ }
+
+ public String getInputFormatClassName() {
+ return "org.apache.hadoop.hive.ql.io.CombineHiveInputFormat";
+ }
+
+ String[] ret = new String[2];
+
+ @Override
+ public String[] getTaskJobIDs(TaskCompletionEvent t) {
+ TaskID tid = t.getTaskAttemptId().getTaskID();
+ ret[0] = tid.toString();
+ ret[1] = tid.getJobID().toString();
+ return ret;
+ }
+
+ public void setFloatConf(Configuration conf, String varName, float val) {
+ conf.setFloat(varName, val);
+ }
+
+ @Override
+ public int createHadoopArchive(Configuration conf, Path sourceDir, Path destDir,
+ String archiveName) throws Exception {
+
+ HadoopArchives har = new HadoopArchives(conf);
+ List args = new ArrayList();
+
+ if (conf.get("hive.archive.har.parentdir.settable") == null) {
+ throw new RuntimeException("hive.archive.har.parentdir.settable is not set");
+ }
+ boolean parentSettable =
+ conf.getBoolean("hive.archive.har.parentdir.settable", false);
+
+ if (parentSettable) {
+ args.add("-archiveName");
+ args.add(archiveName);
+ args.add("-p");
+ args.add(sourceDir.toString());
+ args.add(destDir.toString());
+ } else {
+ args.add("-archiveName");
+ args.add(archiveName);
+ args.add(sourceDir.toString());
+ args.add(destDir.toString());
+ }
+
+ return ToolRunner.run(har, args.toArray(new String[0]));
+ }
+
+ public static class NullOutputCommitter extends OutputCommitter {
+ @Override
+ public void setupJob(JobContext jobContext) { }
+ @Override
+ public void cleanupJob(JobContext jobContext) { }
+
+ @Override
+ public void setupTask(TaskAttemptContext taskContext) { }
+ @Override
+ public boolean needsTaskCommit(TaskAttemptContext taskContext) {
+ return false;
+ }
+ @Override
+ public void commitTask(TaskAttemptContext taskContext) { }
+ @Override
+ public void abortTask(TaskAttemptContext taskContext) { }
+ }
+
+ public void setNullOutputFormat(JobConf conf) {
+ conf.setOutputFormat(NullOutputFormat.class);
+ conf.setOutputCommitter(NullOutputCommitter.class);
+
+ // option to bypass job setup and cleanup was introduced in hadoop-21 (MAPREDUCE-463)
+ // but can be backported. So we disable setup/cleanup in all versions >= 0.19
+ conf.setBoolean("mapred.committer.job.setup.cleanup.needed", false);
+
+ // option to bypass task cleanup task was introduced in hadoop-23 (MAPREDUCE-2206)
+ // but can be backported. So we disable setup/cleanup in all versions >= 0.19
+ conf.setBoolean("mapreduce.job.committer.task.cleanup.needed", false);
+ }
+
+ @Override
+ public UserGroupInformation getUGIForConf(Configuration conf) throws IOException {
+ return UserGroupInformation.getCurrentUser();
+ }
+
+ @Override
+ public boolean isSecureShimImpl() {
+ return true;
+ }
+
+ @Override
+ public String getShortUserName(UserGroupInformation ugi) {
+ return ugi.getShortUserName();
+ }
+
+ @Override
+ public String getTokenStrForm(String tokenSignature) throws IOException {
+ UserGroupInformation ugi = UserGroupInformation.getCurrentUser();
+ TokenSelector extends TokenIdentifier> tokenSelector = new DelegationTokenSelector();
+
+ Token extends TokenIdentifier> token = tokenSelector.selectToken(
+ tokenSignature == null ? new Text() : new Text(tokenSignature), ugi.getTokens());
+ return token != null ? token.encodeToUrlString() : null;
+ }
+
+ @Override
+ public void doAs(UserGroupInformation ugi, PrivilegedExceptionAction pvea) throws IOException, InterruptedException {
+ ugi.doAs(pvea);
+ }
+
+ @Override
+ public UserGroupInformation createRemoteUser(String userName, List groupNames) {
+ return UserGroupInformation.createRemoteUser(userName);
+ }
+
+ @Override
+ abstract public JobTrackerState getJobTrackerState(ClusterStatus clusterStatus) throws Exception;
+
+ @Override
+ abstract public org.apache.hadoop.mapreduce.TaskAttemptContext newTaskAttemptContext(Configuration conf, final Progressable progressable);
+
+ @Override
+ abstract public org.apache.hadoop.mapreduce.JobContext newJobContext(Job job);
+}
Index: shims/build.xml
===================================================================
--- shims/build.xml (revision 1228525)
+++ shims/build.xml (working copy)
@@ -51,50 +51,49 @@
-
+
+
+
+
+
+
+
+
+
+
+
+
-
-
+
-
-
-
+
-
-
-
-
-
-
-
-
-
-
-
-
-
-
+
+
+
+ Building shims @{shimName}
+
+
+
+
+
+
-
@@ -133,9 +132,8 @@
-
+
Index: build.properties
===================================================================
--- build.properties (revision 1228525)
+++ build.properties (working copy)
@@ -10,9 +10,11 @@
javac.args=
javac.args.warnings=
-hadoop.version=0.20.1
-hadoop.security.version=0.20.3-CDH3-SNAPSHOT
-hadoop.security.version.prefix=0.20S
+hadoop-0.20.version=0.20.1
+hadoop-0.20S.version=0.20.3-CDH3-SNAPSHOT
+hadoop-0.23.version=0.23.0
+hadoop.version=${hadoop-0.20.version}
+hadoop.security.version=${hadoop-0.20S.version}
hadoop.mirror=http://mirror.facebook.net/facebook/hive-deps
hadoop.mirror2=http://archive.cloudera.com/hive-deps
Index: build-common.xml
===================================================================
--- build-common.xml (revision 1228525)
+++ build-common.xml (working copy)
@@ -115,14 +115,14 @@
log="${ivyresolvelog}"/>
-
-
-
+
+
-