diff --git itests/hive-unit/src/main/java/org/apache/hive/jdbc/miniHS2/MiniHS2.java itests/hive-unit/src/main/java/org/apache/hive/jdbc/miniHS2/MiniHS2.java index 751d8ea..dad4ebb 100644 --- itests/hive-unit/src/main/java/org/apache/hive/jdbc/miniHS2/MiniHS2.java +++ itests/hive-unit/src/main/java/org/apache/hive/jdbc/miniHS2/MiniHS2.java @@ -81,6 +81,7 @@ private boolean isHTTPTransMode = false; private boolean isMetastoreRemote; private boolean usePortsFromConf = false; + private boolean isHA = false; public Builder() { } @@ -107,6 +108,11 @@ public Builder withConf(HiveConf hiveConf) { return this; } + public Builder withHA() { + this.isHA = true; + return this; + } + /** * Start HS2 with HTTP transport mode, default is binary mode * @return this Builder @@ -127,7 +133,7 @@ public MiniHS2 build() throws Exception { hiveConf.setVar(ConfVars.HIVE_SERVER2_TRANSPORT_MODE, HS2_BINARY_MODE); } return new MiniHS2(hiveConf, miniClusterType, useMiniKdc, serverPrincipal, serverKeytab, - isMetastoreRemote, usePortsFromConf); + isMetastoreRemote, usePortsFromConf, isHA); } } @@ -165,7 +171,7 @@ public boolean isUseMiniKdc() { private MiniHS2(HiveConf hiveConf, MiniClusterType miniClusterType, boolean useMiniKdc, String serverPrincipal, String serverKeytab, boolean isMetastoreRemote, - boolean usePortsFromConf) throws Exception { + boolean usePortsFromConf, boolean isHA) throws Exception { super(hiveConf, "localhost", (usePortsFromConf ? hiveConf.getIntVar(HiveConf.ConfVars.HIVE_SERVER2_THRIFT_PORT) : MetaStoreUtils.findFreePort()), (usePortsFromConf ? hiveConf.getIntVar(HiveConf.ConfVars.HIVE_SERVER2_THRIFT_HTTP_PORT) : MetaStoreUtils.findFreePort())); @@ -179,7 +185,7 @@ private MiniHS2(HiveConf hiveConf, MiniClusterType miniClusterType, boolean useM if (miniClusterType != MiniClusterType.DFS_ONLY) { // Initialize dfs - dfs = ShimLoader.getHadoopShims().getMiniDfs(hiveConf, 4, true, null); + dfs = ShimLoader.getHadoopShims().getMiniDfs(hiveConf, 4, true, null, isHA); fs = dfs.getFileSystem(); String uriString = WindowsPathUtil.getHdfsUriString(fs.getUri().toString()); @@ -254,7 +260,7 @@ public MiniHS2(HiveConf hiveConf, MiniClusterType clusterType) throws Exception public MiniHS2(HiveConf hiveConf, MiniClusterType clusterType, boolean usePortsFromConf) throws Exception { - this(hiveConf, clusterType, false, null, null, false, usePortsFromConf); + this(hiveConf, clusterType, false, null, null, false, usePortsFromConf, false); } public void start(Map confOverlay) throws Exception { diff --git shims/0.23/src/main/java/org/apache/hadoop/hive/shims/Hadoop23Shims.java shims/0.23/src/main/java/org/apache/hadoop/hive/shims/Hadoop23Shims.java index 9a3a31c..b6d8e32 100644 --- shims/0.23/src/main/java/org/apache/hadoop/hive/shims/Hadoop23Shims.java +++ shims/0.23/src/main/java/org/apache/hadoop/hive/shims/Hadoop23Shims.java @@ -64,6 +64,7 @@ import org.apache.hadoop.hdfs.DFSClient; import org.apache.hadoop.hdfs.DistributedFileSystem; import org.apache.hadoop.hdfs.MiniDFSCluster; +import org.apache.hadoop.hdfs.MiniDFSNNTopology; import org.apache.hadoop.hdfs.client.HdfsAdmin; import org.apache.hadoop.hdfs.protocol.DirectoryListing; import org.apache.hadoop.hdfs.protocol.EncryptionZone; @@ -574,6 +575,14 @@ public void setupConfiguration(Configuration conf) { } } + @Override + public HadoopShims.MiniDFSShim getMiniDfs(Configuration conf, + int numDataNodes, + boolean format, + String[] racks) throws IOException { + return getMiniDfs(conf, numDataNodes, format, racks, false); + } + // Don't move this code to the parent class. There's a binary // incompatibility between hadoop 1 and 2 wrt MiniDFSCluster and we // need to have two different shim classes even though they are @@ -582,16 +591,32 @@ public void setupConfiguration(Configuration conf) { public HadoopShims.MiniDFSShim getMiniDfs(Configuration conf, int numDataNodes, boolean format, - String[] racks) throws IOException { + String[] racks, + boolean isHA) throws IOException { configureImpersonation(conf); - MiniDFSCluster miniDFSCluster = new MiniDFSCluster(conf, numDataNodes, format, racks); + MiniDFSCluster miniDFSCluster; + if (isHA) { + MiniDFSNNTopology topo = new MiniDFSNNTopology() + .addNameservice(new MiniDFSNNTopology.NSConf("minidfs").addNN( + new MiniDFSNNTopology.NNConf("nn1")).addNN( + new MiniDFSNNTopology.NNConf("nn2"))); + miniDFSCluster = new MiniDFSCluster.Builder(conf) + .numDataNodes(numDataNodes).format(format) + .racks(racks).nnTopology(topo).build(); + miniDFSCluster.waitActive(); + miniDFSCluster.transitionToActive(0); + } else { + miniDFSCluster = new MiniDFSCluster.Builder(conf) + .numDataNodes(numDataNodes).format(format) + .racks(racks).build(); + } // Need to set the client's KeyProvider to the NN's for JKS, // else the updates do not get flushed properly - KeyProviderCryptoExtension keyProvider = miniDFSCluster.getNameNode().getNamesystem().getProvider(); + KeyProviderCryptoExtension keyProvider = miniDFSCluster.getNameNode(0).getNamesystem().getProvider(); if (keyProvider != null) { try { - setKeyProvider(miniDFSCluster.getFileSystem().getClient(), keyProvider); + setKeyProvider(miniDFSCluster.getFileSystem(0).getClient(), keyProvider); } catch (Exception err) { throw new IOException(err); } @@ -631,7 +656,7 @@ public MiniDFSShim(MiniDFSCluster cluster) { @Override public FileSystem getFileSystem() throws IOException { - return cluster.getFileSystem(); + return cluster.getFileSystem(0); } @Override diff --git shims/common/src/main/java/org/apache/hadoop/hive/shims/HadoopShims.java shims/common/src/main/java/org/apache/hadoop/hive/shims/HadoopShims.java index 7a5a9b5..f3dc54e 100644 --- shims/common/src/main/java/org/apache/hadoop/hive/shims/HadoopShims.java +++ shims/common/src/main/java/org/apache/hadoop/hive/shims/HadoopShims.java @@ -113,6 +113,12 @@ MiniDFSShim getMiniDfs(Configuration conf, boolean format, String[] racks) throws IOException; + MiniDFSShim getMiniDfs(Configuration conf, + int numDataNodes, + boolean format, + String[] racks, + boolean isHA) throws IOException; + /** * Shim around the functions in MiniDFSCluster that Hive uses. */