diff --git .gitignore .gitignore
index c5decaf..099e0c9 100644
--- .gitignore
+++ .gitignore
@@ -27,3 +27,5 @@ hcatalog/webhcat/java-client/target
hcatalog/storage-handlers/hbase/target
hcatalog/webhcat/svr/target
conf/hive-default.xml.template
+data/files/test.jks
+data/files/.test.jks.crc
diff --git data/scripts/q_test_cleanup_for_encryption.sql data/scripts/q_test_cleanup_for_encryption.sql
new file mode 100644
index 0000000..d28f406
--- /dev/null
+++ data/scripts/q_test_cleanup_for_encryption.sql
@@ -0,0 +1,5 @@
+DROP DATABASE encryptedWith128BitsKeyDB;
+
+DROP DATABASE encryptedWith256BitsKeyDB;
+
+DROP DATABASE unencryptedDB;
\ No newline at end of file
diff --git data/scripts/q_test_init_for_encryption.sql data/scripts/q_test_init_for_encryption.sql
new file mode 100644
index 0000000..9245508
--- /dev/null
+++ data/scripts/q_test_init_for_encryption.sql
@@ -0,0 +1,5 @@
+CREATE DATABASE encryptedWith128BitsKeyDB;
+
+CREATE DATABASE encryptedWith256BitsKeyDB;
+
+CREATE DATABASE unencryptedDB;
\ No newline at end of file
diff --git itests/qtest/pom.xml itests/qtest/pom.xml
index 376f4a9..43c950f 100644
--- itests/qtest/pom.xml
+++ itests/qtest/pom.xml
@@ -532,6 +532,24 @@
hadoopVersion="${active.hadoop.version}"
initScript="q_test_init.sql"
cleanupScript="q_test_cleanup.sql"/>
+
+
diff --git itests/src/test/resources/testconfiguration.properties itests/src/test/resources/testconfiguration.properties
index 3ae001d..ed5cb7b 100644
--- itests/src/test/resources/testconfiguration.properties
+++ itests/src/test/resources/testconfiguration.properties
@@ -271,6 +271,8 @@ minitez.query.files=bucket_map_join_tez1.q,\
tez_smb_1.q,\
vectorized_dynamic_partition_pruning.q
+encrypted.query.files.shared=create_encrypted_table.q
+
beeline.positive.exclude=add_part_exist.q,\
alter1.q,\
alter2.q,\
diff --git itests/util/src/main/java/org/apache/hadoop/hive/ql/QTestUtil.java itests/util/src/main/java/org/apache/hadoop/hive/ql/QTestUtil.java
index 31d5c29..d18de31 100644
--- itests/util/src/main/java/org/apache/hadoop/hive/ql/QTestUtil.java
+++ itests/util/src/main/java/org/apache/hadoop/hive/ql/QTestUtil.java
@@ -39,6 +39,7 @@
import java.io.Serializable;
import java.io.StringWriter;
import java.net.URL;
+import java.security.NoSuchAlgorithmException;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collection;
@@ -76,6 +77,7 @@
import org.apache.hadoop.hive.ql.exec.Utilities;
import org.apache.hadoop.hive.ql.lockmgr.zookeeper.ZooKeeperHiveLockManager;
import org.apache.hadoop.hive.ql.metadata.Hive;
+import org.apache.hadoop.hive.ql.metadata.HiveException;
import org.apache.hadoop.hive.ql.metadata.Table;
import org.apache.hadoop.hive.ql.parse.ASTNode;
import org.apache.hadoop.hive.ql.parse.BaseSemanticAnalyzer;
@@ -102,6 +104,16 @@
public class QTestUtil {
public static final String UTF_8 = "UTF-8";
+
+ // database names used for testing the encrypted databases
+ public static final String encryptedWith128BitsKeyDBName = "encryptedwith128bitskeydb";
+ public static final String encryptedWith256BitsKeyDBName = "encryptedwith256bitskeydb";
+ public static final String unencryptedDBName = "unencrypteddb";
+
+ // keyNames used for encrypting the hdfs path
+ private final String keyNameIn128 = "k128";
+ private final String keyNameIn256 = "k256";
+
private static final Log LOG = LogFactory.getLog("QTestUtil");
private static final String QTEST_LEAVE_FILES = "QTEST_LEAVE_FILES";
private final String defaultInitScript = "q_test_init.sql";
@@ -130,6 +142,7 @@
private CliDriver cliDriver;
private HadoopShims.MiniMrShim mr = null;
private HadoopShims.MiniDFSShim dfs = null;
+ private HadoopShims.HdfsEncryptionShim hes = null;
private boolean miniMr = false;
private String hadoopVer = null;
private QTestSetup setup = null;
@@ -245,6 +258,16 @@ private String getHadoopMainVersion(String input) {
return null;
}
+ public void initEncryptionRelatedConfIfNeeded() {
+ if (clusterType != MiniClusterType.encrypted) {
+ return;
+ }
+ HadoopShims shims = ShimLoader.getHadoopShims();
+ // set up the java key provider for encrypted hdfs cluster
+ conf.set(shims.getHadoopConfNames().get("HADOOPSECURITYKEYPROVIDER"), getKeyProviderURI());
+ conf.set(shims.getHadoopConfNames().get("HADOOPSECURITYKEYCIPHER"), "AES/CTR/NoPadding");
+ }
+
public void initConf() throws Exception {
String vectorizationEnabled = System.getProperty("test.vectorization.enabled");
@@ -280,6 +303,7 @@ public void initConf() throws Exception {
public enum MiniClusterType {
mr,
tez,
+ encrypted,
none;
public static MiniClusterType valueForString(String type) {
@@ -287,6 +311,8 @@ public static MiniClusterType valueForString(String type) {
return mr;
} else if (type.equals("tez")) {
return tez;
+ } else if (type.equals("encrypted")) {
+ return encrypted;
} else {
return none;
}
@@ -299,6 +325,17 @@ public QTestUtil(String outDir, String logDir, MiniClusterType clusterType, Stri
this(outDir, logDir, clusterType, null, hadoopVer, initScript, cleanupScript);
}
+ private String getKeyProviderURI() {
+ // Use the current directory if it is not specified
+ String dataDir = conf.get("test.data.files");
+ if (dataDir == null) {
+ dataDir = new File(".").getAbsolutePath() + "/data/files";
+ }
+
+ // put the jks file in the current test path only for test purpose
+ return "jceks://file" + new Path(dataDir, "test.jks").toUri();
+ }
+
public QTestUtil(String outDir, String logDir, MiniClusterType clusterType,
String confDir, String hadoopVer, String initScript, String cleanupScript)
throws Exception {
@@ -323,8 +360,19 @@ public QTestUtil(String outDir, String logDir, MiniClusterType clusterType,
int numberOfDataNodes = 4;
if (clusterType != MiniClusterType.none) {
+ FileSystem fs;
+
+ initEncryptionRelatedConfIfNeeded();
+
dfs = shims.getMiniDfs(conf, numberOfDataNodes, true, null);
- FileSystem fs = dfs.getFileSystem();
+ fs = dfs.getFileSystem();
+
+ if (clusterType == MiniClusterType.encrypted) {
+ // set up the java key provider for encrypted hdfs cluster
+ hes = shims.createHdfsEncryptionShim(fs, conf);
+ System.out.println("key provider is initialized");
+ }
+
String uriString = WindowsPathUtil.getHdfsUriString(fs.getUri().toString());
if (clusterType == MiniClusterType.tez) {
mr = shims.getMiniTezCluster(conf, 4, uriString, 1);
@@ -340,7 +388,6 @@ public QTestUtil(String outDir, String logDir, MiniClusterType clusterType,
if (dataDir == null) {
dataDir = new File(".").getAbsolutePath() + "/data/files";
}
-
testFiles = dataDir;
// Use the current directory if it is not specified
@@ -368,7 +415,7 @@ public void shutdown() throws Exception {
if (System.getenv(QTEST_LEAVE_FILES) == null) {
cleanUp();
}
-
+
setup.tearDown();
if (mr != null) {
mr.shutdown();
@@ -538,6 +585,19 @@ public void clearPostTestEffects() throws Exception {
}
/**
+ * For the security type, we should reserve the encrypted databases for the test purpose
+ */
+ private boolean checkDBIfNeedToBeReserved(String dbName) {
+ if (clusterType == MiniClusterType.encrypted) {
+ return (DEFAULT_DATABASE_NAME.equals(dbName) ||
+ encryptedWith128BitsKeyDBName.equals(dbName) ||
+ encryptedWith256BitsKeyDBName.equals(dbName) || unencryptedDBName.equals(dbName));
+ } else {
+ return DEFAULT_DATABASE_NAME.equals(dbName);
+ }
+ }
+
+ /**
* Clear out any side effects of running tests
*/
public void clearTestSideEffects() throws Exception {
@@ -545,11 +605,11 @@ public void clearTestSideEffects() throws Exception {
return;
}
// Delete any tables other than the source tables
- // and any databases other than the default database.
+ // and any databases other than the default database or encrypted dbs in encryption mode.
for (String dbName : db.getAllDatabases()) {
SessionState.get().setCurrentDatabase(dbName);
for (String tblName : db.getAllTables()) {
- if (!DEFAULT_DATABASE_NAME.equals(dbName) || !srcTables.contains(tblName)) {
+ if (!checkDBIfNeedToBeReserved(dbName) || !srcTables.contains(tblName)) {
Table tblObj = db.getTable(tblName);
// dropping index table can not be dropped directly. Dropping the base
// table will automatically drop all its index table
@@ -567,7 +627,7 @@ public void clearTestSideEffects() throws Exception {
}
}
}
- if (!DEFAULT_DATABASE_NAME.equals(dbName)) {
+ if (!checkDBIfNeedToBeReserved(dbName)) {
// Drop cascade, may need to drop functions
db.dropDatabase(dbName, true, true, true);
}
@@ -593,11 +653,15 @@ public void clearTestSideEffects() throws Exception {
db.dropRole(roleName);
}
}
- // allocate and initialize a new conf since a test can
- // modify conf by using 'set' commands
- conf = new HiveConf (Driver.class);
- initConf();
- db = Hive.get(conf); // propagate new conf to meta store
+
+ if (clusterType != MiniClusterType.encrypted) {
+ // allocate and initialize a new conf since a test can
+ // modify conf by using 'set' commands
+ conf = new HiveConf (Driver.class);
+ initConf();
+ // renew the metastore since the cluster type is unencrypted
+ db = Hive.get(conf); // propagate new conf to meta store
+ }
setup.preTest(conf);
}
@@ -685,6 +749,8 @@ public void createSources() throws Exception {
cliDriver.processLine(initCommands);
conf.setBoolean("hive.test.init.phase", false);
+
+ encryptionZoneInitIfNeeds();
}
public void init() throws Exception {
@@ -705,6 +771,21 @@ public void init() throws Exception {
sem = new SemanticAnalyzer(conf);
}
+ private void encryptionZoneInitIfNeeds()
+ throws IOException, NoSuchAlgorithmException, HiveException {
+ if (clusterType != MiniClusterType.encrypted) {
+ return;
+ }
+ // create encryption zone via a 128-bits key respectively for encrypted database 1
+ hes.createKey(keyNameIn128, "128", "AES/CTR/NoPadding", conf);
+ hes.createEncryptionZone(new Path(db.getDatabase("encryptedWith128BitsKeyDB").getLocationUri()),
+ keyNameIn128);
+ // create encryption zone via a 256-bits key respectively for encrypted database 2
+ hes.createKey(keyNameIn256, "256", "AES/CTR/NoPadding", conf);
+ hes.createEncryptionZone(new Path(db.getDatabase("encryptedWith256BitsKeyDB").getLocationUri()),
+ keyNameIn256);
+ }
+
public void init(String tname) throws Exception {
cleanUp();
createSources();
@@ -819,7 +900,7 @@ public int execute(String tname) {
try {
return drv.run(qMap.get(tname)).getResponseCode();
} catch (CommandNeedRetryException e) {
- // TODO Auto-generated catch block
+ System.out.println("driver failed to run the command: " + tname);
e.printStackTrace();
return -1;
}
@@ -865,7 +946,7 @@ public void convertSequenceFileToTextFile() throws Exception {
// Move all data from dest4_sequencefile to dest4
drv
- .run("FROM dest4_sequencefile INSERT OVERWRITE TABLE dest4 SELECT dest4_sequencefile.*");
+ .run("FROM dest4_sequencefile INSERT OVERWRITE TABLE dest4 SELECT dest4_sequencefile.*");
// Drop dest4_sequencefile
db.dropTable(MetaStoreUtils.DEFAULT_DATABASE_NAME, "dest4_sequencefile",
@@ -1578,8 +1659,10 @@ public static boolean queryListRunnerMultiThreaded(File[] qfiles, QTestUtil[] qt
}
public static void outputTestFailureHelpMessage() {
- System.err.println("See ./ql/target/tmp/log/hive.log or ./itests/qtest/target/tmp/log/hive.log, "
- + "or check ./ql/target/surefire-reports or ./itests/qtest/target/surefire-reports/ for specific test cases logs.");
+ System.err.println(
+ "See ./ql/target/tmp/log/hive.log or ./itests/qtest/target/tmp/log/hive.log, or check " +
+ "./ql/target/surefire-reports or ./itests/qtest/target/surefire-reports/ for specific " +
+ "test cases logs.");
System.err.flush();
}
diff --git ql/src/test/queries/clientpositive/create_encrypted_table.q ql/src/test/queries/clientpositive/create_encrypted_table.q
new file mode 100644
index 0000000..b411b11
--- /dev/null
+++ ql/src/test/queries/clientpositive/create_encrypted_table.q
@@ -0,0 +1,2 @@
+USE encryptedWith128BitsKeyDB;
+CREATE TABLE a(k string, v string);
diff --git ql/src/test/templates/TestEncrytedHDFSCliDriver.vm ql/src/test/templates/TestEncrytedHDFSCliDriver.vm
new file mode 100644
index 0000000..0b49c97
--- /dev/null
+++ ql/src/test/templates/TestEncrytedHDFSCliDriver.vm
@@ -0,0 +1,164 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hive.cli;
+
+import junit.framework.Test;
+import junit.framework.TestCase;
+import junit.framework.TestSuite;
+
+import java.io.*;
+import java.util.*;
+
+import org.apache.hadoop.hive.ql.QTestUtil;
+import org.apache.hadoop.hive.ql.QTestUtil.MiniClusterType;
+import org.apache.hadoop.hive.ql.session.SessionState;
+
+public class $className extends TestCase {
+
+ private static final String HIVE_ROOT = QTestUtil.ensurePathEndsInSlash(System.getProperty("hive.root"));
+ private static QTestUtil qt;
+
+ static {
+
+ MiniClusterType miniMR = MiniClusterType.valueForString("$clusterMode");
+ String hiveConfDir = "$hiveConfDir";
+ String initScript = "$initScript";
+ String cleanupScript = "$cleanupScript";
+ try {
+ String hadoopVer = "$hadoopVersion";
+ qt = new QTestUtil((HIVE_ROOT + "$resultsDir"), (HIVE_ROOT + "$logDir"), miniMR,
+ hiveConfDir, hadoopVer, initScript, cleanupScript);
+
+ // do a one time initialization
+ qt.cleanUp();
+ qt.createSources();
+
+ } catch (Exception e) {
+ System.err.println("Exception: " + e.getMessage());
+ e.printStackTrace();
+ System.err.flush();
+ fail("Unexpected exception in static initialization " + e.getMessage());
+ }
+ }
+
+ public $className(String name) {
+ super(name);
+ }
+
+ @Override
+ protected void setUp() {
+ try {
+ qt.clearTestSideEffects();
+ } catch (Exception e) {
+ System.err.println("Exception: " + e.getMessage());
+ e.printStackTrace();
+ System.err.flush();
+ fail("Unexpected exception in setup");
+ }
+ }
+
+ /**
+ * Dummy last test. This is only meant to shutdown qt
+ */
+ public void testCliDriver_shutdown() {
+ System.err.println ("Cleaning up " + "$className");
+ }
+
+ @Override
+ protected void tearDown() {
+ try {
+ qt.clearPostTestEffects();
+ if (getName().equals("testCliDriver_shutdown"))
+ qt.shutdown();
+ }
+ catch (Exception e) {
+ System.err.println("Exception: " + e.getMessage());
+ e.printStackTrace();
+ System.err.flush();
+ fail("Unexpected exception in tearDown");
+ }
+ }
+
+ public static Test suite() {
+ Set qFilesToExecute = new HashSet();
+ String qFiles = System.getProperty("qfile", "").trim();
+ if(!qFiles.isEmpty()) {
+ for(String qFile : qFiles.split(",")) {
+ qFile = qFile.trim();
+ if(!qFile.isEmpty()) {
+ qFilesToExecute.add(qFile);
+ }
+ }
+ }
+ TestSuite suite = new TestSuite();
+#foreach ($qf in $qfiles)
+ #set ($fname = $qf.getName())
+ #set ($eidx = $fname.indexOf('.'))
+ #set ($tname = $fname.substring(0, $eidx))
+ if(qFilesToExecute.isEmpty() || qFilesToExecute.contains("$fname")) {
+ suite.addTest(new $className("testCliDriver_$tname"));
+ }
+#end
+ suite.addTest(new $className("testCliDriver_shutdown"));
+ return suite;
+ }
+
+ static String debugHint = "\nSee ./ql/target/tmp/log/hive.log or ./itests/qtest/target/tmp/log/hive.log, "
+ + "or check ./ql/target/surefire-reports or ./itests/qtest/target/surefire-reports/ for specific test cases logs.";
+
+#foreach ($qf in $qfiles)
+ #set ($fname = $qf.getName())
+ #set ($eidx = $fname.indexOf('.'))
+ #set ($tname = $fname.substring(0, $eidx))
+ #set ($fpath = $qfilesMap.get($fname))
+ public void testCliDriver_$tname() throws Exception {
+ runTest("$tname", "$fname", (HIVE_ROOT + "$fpath"));
+ }
+
+#end
+
+ private void runTest(String tname, String fname, String fpath) throws Exception {
+ long startTime = System.currentTimeMillis();
+ try {
+ System.err.println("Begin query: " + fname);
+
+ qt.addFile(fpath);
+
+ if (qt.shouldBeSkipped(fname)) {
+ return;
+ }
+
+ qt.cliInit(fname, false);
+ int ecode = qt.executeClient(fname);
+ if (ecode != 0) {
+ qt.failed(ecode, fname, debugHint);
+ }
+ ecode = qt.checkCliDriverResults(fname);
+ if (ecode != 0) {
+ qt.failedDiff(ecode, fname, debugHint);
+ }
+ }
+ catch (Throwable e) {
+ qt.failed(e, fname, debugHint);
+ }
+
+ long elapsedTime = System.currentTimeMillis() - startTime;
+ System.err.println("Done query: " + fname + " elapsedTime=" + elapsedTime/1000 + "s");
+ assertTrue("Test passed", true);
+ }
+}
diff --git shims/0.20/src/main/java/org/apache/hadoop/hive/shims/Hadoop20Shims.java shims/0.20/src/main/java/org/apache/hadoop/hive/shims/Hadoop20Shims.java
index ff7a82c..d6d91b5 100644
--- shims/0.20/src/main/java/org/apache/hadoop/hive/shims/Hadoop20Shims.java
+++ shims/0.20/src/main/java/org/apache/hadoop/hive/shims/Hadoop20Shims.java
@@ -869,7 +869,9 @@ public FileSystem createProxyFileSystem(FileSystem fs, URI uri) {
ret.put("HADOOPSPECULATIVEEXECREDUCERS", "mapred.reduce.tasks.speculative.execution");
ret.put("MAPREDSETUPCLEANUPNEEDED", "mapred.committer.job.setup.cleanup.needed");
ret.put("MAPREDTASKCLEANUPNEEDED", "mapreduce.job.committer.task.cleanup.needed");
- ret.put("HADOOPSECURITYKEYPROVIDER", "hadoop.encryption.is.not.supported");
+ ret.put("HADOOPSECURITYKEYPROVIDER", "dfs.encryption.key.provider.uri");
+ ret.put("HADOOPSECURITYKEYCIPHER", "hadoop.security.key.default.cipher");
+ ret.put("HADOOPSECURITYKEYBITLENGTH", "hadoop.security.key.default.bitlength");
return ret;
}
diff --git shims/0.20S/src/main/java/org/apache/hadoop/hive/shims/Hadoop20SShims.java shims/0.20S/src/main/java/org/apache/hadoop/hive/shims/Hadoop20SShims.java
index 2e00d93..ee50026 100644
--- shims/0.20S/src/main/java/org/apache/hadoop/hive/shims/Hadoop20SShims.java
+++ shims/0.20S/src/main/java/org/apache/hadoop/hive/shims/Hadoop20SShims.java
@@ -494,7 +494,9 @@ public FileSystem createProxyFileSystem(FileSystem fs, URI uri) {
ret.put("HADOOPSPECULATIVEEXECREDUCERS", "mapred.reduce.tasks.speculative.execution");
ret.put("MAPREDSETUPCLEANUPNEEDED", "mapred.committer.job.setup.cleanup.needed");
ret.put("MAPREDTASKCLEANUPNEEDED", "mapreduce.job.committer.task.cleanup.needed");
- ret.put("HADOOPSECURITYKEYPROVIDER", "hadoop.encryption.is.not.supported");
+ ret.put("HADOOPSECURITYKEYPROVIDER", "dfs.encryption.key.provider.uri");
+ ret.put("HADOOPSECURITYKEYCIPHER", "hadoop.security.key.default.cipher");
+ ret.put("HADOOPSECURITYKEYBITLENGTH", "hadoop.security.key.default.bitlength");
return ret;
}
diff --git shims/0.23/src/main/java/org/apache/hadoop/hive/shims/Hadoop23Shims.java shims/0.23/src/main/java/org/apache/hadoop/hive/shims/Hadoop23Shims.java
index 8161fc1..0ed99c5 100644
--- shims/0.23/src/main/java/org/apache/hadoop/hive/shims/Hadoop23Shims.java
+++ shims/0.23/src/main/java/org/apache/hadoop/hive/shims/Hadoop23Shims.java
@@ -24,8 +24,8 @@
import java.net.InetSocketAddress;
import java.net.MalformedURLException;
import java.net.URI;
-import java.net.URISyntaxException;
import java.security.AccessControlException;
+import java.security.NoSuchAlgorithmException;
import java.util.ArrayList;
import java.util.Comparator;
import java.util.HashMap;
@@ -36,7 +36,7 @@
import org.apache.commons.lang.StringUtils;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.crypto.key.KeyProvider;
-import org.apache.hadoop.crypto.key.KeyProviderFactory;
+import org.apache.hadoop.crypto.key.KeyProvider.Options;
import org.apache.hadoop.fs.BlockLocation;
import org.apache.hadoop.fs.DefaultFileAccess;
import org.apache.hadoop.fs.FSDataInputStream;
@@ -57,7 +57,7 @@
import org.apache.hadoop.fs.permission.AclStatus;
import org.apache.hadoop.fs.permission.FsAction;
import org.apache.hadoop.fs.permission.FsPermission;
-import org.apache.hadoop.hdfs.MiniDFSCluster;
+import org.apache.hadoop.hdfs.*;
import org.apache.hadoop.hdfs.client.HdfsAdmin;
import org.apache.hadoop.hdfs.protocol.EncryptionZone;
import org.apache.hadoop.io.LongWritable;
@@ -96,7 +96,8 @@
public class Hadoop23Shims extends HadoopShimsSecure {
HadoopShims.MiniDFSShim cluster = null;
-
+ MiniDFSCluster miniDFSCluster = null;
+ KeyProvider keyProvider;
final boolean zeroCopy;
public Hadoop23Shims() {
@@ -380,7 +381,9 @@ public void setupConfiguration(Configuration conf) {
int numDataNodes,
boolean format,
String[] racks) throws IOException {
- cluster = new MiniDFSShim(new MiniDFSCluster(conf, numDataNodes, format, racks));
+ miniDFSCluster = new MiniDFSCluster(conf, numDataNodes, format, racks);
+ keyProvider = miniDFSCluster.getNameNode().getNamesystem().getProvider();
+ cluster = new MiniDFSShim(miniDFSCluster);
return cluster;
}
@@ -742,7 +745,9 @@ public FileSystem createProxyFileSystem(FileSystem fs, URI uri) {
ret.put("HADOOPSPECULATIVEEXECREDUCERS", "mapreduce.reduce.speculative");
ret.put("MAPREDSETUPCLEANUPNEEDED", "mapreduce.job.committer.setup.cleanup.needed");
ret.put("MAPREDTASKCLEANUPNEEDED", "mapreduce.job.committer.task.cleanup.needed");
- ret.put("HADOOPSECURITYKEYPROVIDER", "hadoop.security.key.provider.path");
+ ret.put("HADOOPSECURITYKEYPROVIDER", "dfs.encryption.key.provider.uri");
+ ret.put("HADOOPSECURITYKEYCIPHER", "hadoop.security.key.default.cipher");
+ ret.put("HADOOPSECURITYKEYBITLENGTH", "hadoop.security.key.default.bitlength");
return ret;
}
@@ -938,12 +943,7 @@ public boolean runDistCp(Path src, Path dst, Configuration conf) throws IOExcept
return (0 == rc);
}
- public static class HdfsEncryptionShim implements HadoopShims.HdfsEncryptionShim {
- /**
- * Gets information about key encryption metadata
- */
- private KeyProvider keyProvider = null;
-
+ public class HdfsEncryptionShim implements HadoopShims.HdfsEncryptionShim {
/**
* Gets information about HDFS encryption zones
*/
@@ -951,17 +951,6 @@ public boolean runDistCp(Path src, Path dst, Configuration conf) throws IOExcept
public HdfsEncryptionShim(URI uri, Configuration conf) throws IOException {
hdfsAdmin = new HdfsAdmin(uri, conf);
-
- try {
- String keyProviderPath = conf.get(ShimLoader.getHadoopShims().getHadoopConfNames().get("HADOOPSECURITYKEYPROVIDER"), null);
- if (keyProviderPath != null) {
- keyProvider = KeyProviderFactory.get(new URI(keyProviderPath), conf);
- }
- } catch (URISyntaxException e) {
- throw new IOException("Invalid HDFS security key provider path", e);
- } catch (Exception e) {
- throw new IOException("Cannot create HDFS security object: ", e);
- }
}
@Override
@@ -1003,6 +992,27 @@ public int comparePathKeyStrength(Path path1, Path path2) throws IOException {
return compareKeyStrength(zone1.getKeyName(), zone2.getKeyName());
}
+ @Override
+ public void createEncryptionZone(Path path, String keyName) throws IOException {
+ hdfsAdmin.createEncryptionZone(path, keyName);
+ }
+
+ @Override
+ public void createKey(String keyName, String length, String cipher, Configuration conf)
+ throws IOException, NoSuchAlgorithmException {
+
+ if (keyProvider.getMetadata(keyName) != null) {
+ LOG.info("key " + keyName + " has already exists");
+ return;
+ }
+ Map confMap = ShimLoader.getHadoopShims().getHadoopConfNames();
+ conf.set(confMap.get("HADOOPSECURITYKEYCIPHER"), cipher);
+ conf.set(confMap.get("HADOOPSECURITYKEYBITLENGTH"), length);
+ Options options = new Options(conf);
+ keyProvider.createKey(keyName, options);
+ keyProvider.flush();
+ }
+
/**
* Compares two encryption key strengths.
*
diff --git shims/common/src/main/java/org/apache/hadoop/hive/shims/HadoopShims.java shims/common/src/main/java/org/apache/hadoop/hive/shims/HadoopShims.java
index fa66a4a..9ea1398 100644
--- shims/common/src/main/java/org/apache/hadoop/hive/shims/HadoopShims.java
+++ shims/common/src/main/java/org/apache/hadoop/hive/shims/HadoopShims.java
@@ -26,6 +26,7 @@
import java.net.URISyntaxException;
import java.nio.ByteBuffer;
import java.security.AccessControlException;
+import java.security.NoSuchAlgorithmException;
import java.security.PrivilegedExceptionAction;
import java.util.Comparator;
import java.util.List;
@@ -34,6 +35,7 @@
import javax.security.auth.login.LoginException;
+import com.google.common.annotations.VisibleForTesting;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
@@ -400,7 +402,11 @@ public void refreshDefaultQueue(Configuration conf, String userName)
/**
* Verify proxy access to given UGI for given user
- * @param ugi
+ * @param proxyUser
+ * @param realUserUgi
+ * @param ipAddress
+ * @param conf
+ * @throws IOException
*/
public void authorizeProxyAccess(String proxyUser, UserGroupInformation realUserUgi,
String ipAddress, Configuration conf) throws IOException;
@@ -819,6 +825,19 @@ public void checkFileAccess(FileSystem fs, FileStatus status, FsAction action)
* @throws IOException If an error occurred attempting to get encryption/key metadata
*/
public int comparePathKeyStrength(Path path1, Path path2) throws IOException;
+
+ /**
+ * create encryption zone by path and keyname
+ * @param path HDFS path to create encryption zone
+ * @param keyName keyname
+ * @throws IOException
+ */
+ @VisibleForTesting
+ public void createEncryptionZone(Path path, String keyName) throws IOException;
+
+ @VisibleForTesting
+ public void createKey(String keyName, String length, String cipher, Configuration conf)
+ throws IOException, NoSuchAlgorithmException;
}
/**
@@ -842,6 +861,16 @@ public int comparePathKeyStrength(Path path1, Path path2) throws IOException {
/* not supported */
return 0;
}
+
+ @Override
+ public void createEncryptionZone(Path path, String keyName) {
+ /* not supported */
+ }
+
+ @Override
+ public void createKey(String keyName, String length, String cipher, Configuration conf) {
+ /* not supported */
+ }
}
/**