diff --git build-common.xml build-common.xml index e68ecea..3049ec2 100644 --- build-common.xml +++ build-common.xml @@ -443,7 +443,7 @@ - + diff --git build.properties build.properties index 2d293a6..8c9c2e6 100644 --- build.properties +++ build.properties @@ -131,6 +131,9 @@ datanucleus.repo=http://www.datanucleus.org/downloads/maven2 # JVM arguments jvm.args=-XX:-UseSplitVerifier +# junit jvm args +junit.jvm.args=-XX:-UseSplitVerifier -XX:+CMSClassUnloadingEnabled -XX:+CMSPermGenSweepingEnabled -XX:MaxPermSize=128M + # # Eclipse Properties # diff --git shims/ivy.xml shims/ivy.xml index a18634b..9126abe 100644 --- shims/ivy.xml +++ shims/ivy.xml @@ -91,6 +91,36 @@ + + + + + + + + + + + + + + + + + + + + + + + diff --git shims/src/0.20S/java/org/apache/hadoop/hive/shims/Hadoop20SShims.java shims/src/0.20S/java/org/apache/hadoop/hive/shims/Hadoop20SShims.java index b7515a5..6d9d2ee 100644 --- shims/src/0.20S/java/org/apache/hadoop/hive/shims/Hadoop20SShims.java +++ shims/src/0.20S/java/org/apache/hadoop/hive/shims/Hadoop20SShims.java @@ -26,6 +26,7 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.Trash; import org.apache.hadoop.hive.shims.HadoopShimsSecure; +import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.mapred.ClusterStatus; import org.apache.hadoop.mapred.JobConf; import org.apache.hadoop.mapred.TaskLogServlet; @@ -119,4 +120,35 @@ public long getDefaultBlockSize(FileSystem fs, Path path) { public short getDefaultReplication(FileSystem fs, Path path) { return fs.getDefaultReplication(); } + + // Don't move this code to the parent class. There's a binary + // incompatibility between hadoop 1 and 2 wrt MiniDFSCluster and we + // need to have two different shim classes even though they are + // exactly the same. + public HadoopShims.MiniDFSShim getMiniDfs(Configuration conf, + int numDataNodes, + boolean format, + String[] racks) throws IOException { + return new MiniDFSShim(new MiniDFSCluster(conf, numDataNodes, format, racks)); + } + + /** + * MiniDFSShim. + * + */ + public class MiniDFSShim implements HadoopShims.MiniDFSShim { + private final MiniDFSCluster cluster; + + public MiniDFSShim(MiniDFSCluster cluster) { + this.cluster = cluster; + } + + public FileSystem getFileSystem() throws IOException { + return cluster.getFileSystem(); + } + + public void shutdown() { + cluster.shutdown(); + } + } } diff --git shims/src/0.23/java/org/apache/hadoop/hive/shims/Hadoop23Shims.java shims/src/0.23/java/org/apache/hadoop/hive/shims/Hadoop23Shims.java index 9a22355..fa2040b 100644 --- shims/src/0.23/java/org/apache/hadoop/hive/shims/Hadoop23Shims.java +++ shims/src/0.23/java/org/apache/hadoop/hive/shims/Hadoop23Shims.java @@ -28,6 +28,7 @@ import org.apache.hadoop.fs.Trash; import org.apache.hadoop.hive.shims.HadoopShims.JobTrackerState; import org.apache.hadoop.hive.shims.HadoopShimsSecure; +import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.mapred.ClusterStatus; import org.apache.hadoop.mapred.JobConf; import org.apache.hadoop.mapreduce.Job; @@ -132,4 +133,35 @@ public boolean moveToAppropriateTrash(FileSystem fs, Path path, Configuration co throws IOException { return Trash.moveToAppropriateTrash(fs, path, conf); } + + // Don't move this code to the parent class. There's a binary + // incompatibility between hadoop 1 and 2 wrt MiniDFSCluster and we + // need to have two different shim classes even though they are + // exactly the same. + public HadoopShims.MiniDFSShim getMiniDfs(Configuration conf, + int numDataNodes, + boolean format, + String[] racks) throws IOException { + return new MiniDFSShim(new MiniDFSCluster(conf, numDataNodes, format, racks)); + } + + /** + * MiniDFSShim. + * + */ + public class MiniDFSShim implements HadoopShims.MiniDFSShim { + private final MiniDFSCluster cluster; + + public MiniDFSShim(MiniDFSCluster cluster) { + this.cluster = cluster; + } + + public FileSystem getFileSystem() throws IOException { + return cluster.getFileSystem(); + } + + public void shutdown() { + cluster.shutdown(); + } + } } diff --git shims/src/common-secure/java/org/apache/hadoop/hive/shims/HadoopShimsSecure.java shims/src/common-secure/java/org/apache/hadoop/hive/shims/HadoopShimsSecure.java index 2d32b07..23ff543 100644 --- shims/src/common-secure/java/org/apache/hadoop/hive/shims/HadoopShimsSecure.java +++ shims/src/common-secure/java/org/apache/hadoop/hive/shims/HadoopShimsSecure.java @@ -37,7 +37,6 @@ import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.PathFilter; -import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.hive.io.HiveIOExceptionHandlerUtil; import org.apache.hadoop.hive.thrift.DelegationTokenSelector; import org.apache.hadoop.http.HtmlQuoting; @@ -104,33 +103,6 @@ public void setTmpFiles(String prop, String files) { // gone in 20+ } - public HadoopShims.MiniDFSShim getMiniDfs(Configuration conf, - int numDataNodes, - boolean format, - String[] racks) throws IOException { - return new MiniDFSShim(new MiniDFSCluster(conf, numDataNodes, format, racks)); - } - - /** - * MiniDFSShim. - * - */ - public class MiniDFSShim implements HadoopShims.MiniDFSShim { - private final MiniDFSCluster cluster; - - public MiniDFSShim(MiniDFSCluster cluster) { - this.cluster = cluster; - } - - public FileSystem getFileSystem() throws IOException { - return cluster.getFileSystem(); - } - - public void shutdown() { - cluster.shutdown(); - } - } - /** * We define this function here to make the code compatible between * hadoop 0.17 and hadoop 0.20.