diff --git itests/util/src/main/java/org/apache/hadoop/hive/cli/control/AbstractCliConfig.java itests/util/src/main/java/org/apache/hadoop/hive/cli/control/AbstractCliConfig.java index 03d4075..c12f51e 100644 --- itests/util/src/main/java/org/apache/hadoop/hive/cli/control/AbstractCliConfig.java +++ itests/util/src/main/java/org/apache/hadoop/hive/cli/control/AbstractCliConfig.java @@ -409,7 +409,18 @@ public CliAdapter getCliAdapter() { } protected void setMetastoreType(MetastoreType mt) { - metastoreType=mt; + String metaStoreTypeProperty = getSysPropValue("metaStoreType"); + if (metaStoreTypeProperty != null) { + if (metaStoreTypeProperty.equalsIgnoreCase("sql")) { + metastoreType = MetastoreType.sql; + } else if (metaStoreTypeProperty.equalsIgnoreCase("hbase")) { + metastoreType = MetastoreType.hbase; + } else { + throw new IllegalArgumentException("Unknown metastore type: " + metaStoreTypeProperty); + } + } else { + metastoreType = mt; + } } public MetastoreType getMetastoreType() { diff --git itests/util/src/main/java/org/apache/hadoop/hive/ql/QTestUtil.java itests/util/src/main/java/org/apache/hadoop/hive/ql/QTestUtil.java index 909d7f6..ebc93a9 100644 --- itests/util/src/main/java/org/apache/hadoop/hive/ql/QTestUtil.java +++ itests/util/src/main/java/org/apache/hadoop/hive/ql/QTestUtil.java @@ -50,6 +50,7 @@ import java.util.Collection; import java.util.Comparator; import java.util.Deque; +import java.util.EnumSet; import java.util.HashSet; import java.util.LinkedList; import java.util.List; @@ -152,7 +153,6 @@ private String testWarehouse; private final String testFiles; - private final boolean localMode; protected final String outDir; protected final String logDir; private final TreeMap qMap; @@ -411,6 +411,11 @@ private void createRemoteDirs() { } } + private enum CoreClusteType { + mr, + tez, + spark + } public enum FsType { local, @@ -420,35 +425,48 @@ private void createRemoteDirs() { public enum MiniClusterType { - mr(FsType.hdfs), - tez(FsType.hdfs), - spark(FsType.local), - miniSparkOnYarn(FsType.hdfs), - llap(FsType.hdfs), - none(FsType.local); + mr(CoreClusteType.mr, FsType.hdfs), + tez(CoreClusteType.tez, FsType.hdfs), + tez_local(CoreClusteType.tez, FsType.local), + spark(CoreClusteType.spark, FsType.local), + miniSparkOnYarn(CoreClusteType.spark, FsType.hdfs), + llap(CoreClusteType.tez, FsType.hdfs), + llap_local(CoreClusteType.tez, FsType.local), + none(CoreClusteType.mr, FsType.local); + private final CoreClusteType coreClusterType; private final FsType defaultFsType; - MiniClusterType(FsType defaultFsType) { + MiniClusterType(CoreClusteType coreClusterType, FsType defaultFsType) { + this.coreClusterType = coreClusterType; this.defaultFsType = defaultFsType; } + public CoreClusteType getCoreClusterType() { + return coreClusterType; + } + public FsType getDefaultFsType() { return defaultFsType; } public static MiniClusterType valueForString(String type) { + // Replace this with valueOf. if (type.equals("miniMR")) { return mr; } else if (type.equals("tez")) { return tez; + } else if (type.equals("tez_local")) { + return tez_local; } else if (type.equals("spark")) { return spark; } else if (type.equals("miniSparkOnYarn")) { return miniSparkOnYarn; } else if (type.equals("llap")) { return llap; + } else if (type.equals("llap_local")) { + return llap_local; } else { return none; } @@ -487,25 +505,24 @@ public QTestUtil(String outDir, String logDir, MiniClusterType clusterType, String confDir, String hadoopVer, String initScript, String cleanupScript, boolean useHBaseMetastore, boolean withLlapIo) throws Exception { this(outDir, logDir, clusterType, confDir, hadoopVer, initScript, cleanupScript, - useHBaseMetastore, withLlapIo, false, null); + useHBaseMetastore, withLlapIo, null); } public QTestUtil(String outDir, String logDir, MiniClusterType clusterType, String confDir, String hadoopVer, String initScript, String cleanupScript, - boolean useHBaseMetastore, boolean withLlapIo, boolean localMode, FsType fsType) + boolean useHBaseMetastore, boolean withLlapIo, FsType fsType) throws Exception { LOG.info("Setting up QTestUtil with outDir={}, logDir={}, clusterType={}, confDir={}," + " hadoopVer={}, initScript={}, cleanupScript={}, useHbaseMetaStore={}, withLlapIo={}," + - " localMode={}, fsType={}" + " fsType={}" , outDir, logDir, clusterType, confDir, hadoopVer, initScript, cleanupScript, - useHBaseMetastore, withLlapIo, localMode, fsType); + useHBaseMetastore, withLlapIo, fsType); Preconditions.checkNotNull(clusterType, "ClusterType cannot be null"); if (fsType != null) { this.fsType = fsType; } else { this.fsType = clusterType.getDefaultFsType(); } - this.localMode = localMode; this.outDir = outDir; this.logDir = logDir; this.useHBaseMetastore = useHBaseMetastore; @@ -604,28 +621,22 @@ private void setupFileSystem(HadoopShims shims) throws IOException { private void setupMiniCluster(HadoopShims shims, String confDir) throws IOException { - if (localMode) { - Preconditions - .checkState(clusterType == MiniClusterType.tez || clusterType == MiniClusterType.llap, - "localMode can currently only be set for tez or llap"); - } - String uriString = WindowsPathUtil.getHdfsUriString(fs.getUri().toString()); - if (clusterType == MiniClusterType.tez || clusterType == MiniClusterType.llap) { + if (clusterType.getCoreClusterType() == CoreClusteType.tez) { if (confDir != null && !confDir.isEmpty()) { conf.addResource(new URL("file://" + new File(confDir).toURI().getPath() + "/tez-site.xml")); } int numTrackers; - if (clusterType == MiniClusterType.tez) { - numTrackers = 4; - } else { + if (EnumSet.of(MiniClusterType.llap, MiniClusterType.llap_local).contains(clusterType)) { llapCluster = LlapItUtils.startAndGetMiniLlapCluster(conf, setup.zooKeeperCluster, confDir); numTrackers = 2; + } else { + numTrackers = 4; } - if (localMode) { - mr = shims.getLocalMiniTezCluster(conf, clusterType == MiniClusterType.llap); + if (EnumSet.of(MiniClusterType.llap_local, MiniClusterType.tez_local).contains(clusterType)) { + mr = shims.getLocalMiniTezCluster(conf, clusterType == MiniClusterType.llap_local); } else { mr = shims.getMiniTezCluster(conf, numTrackers, uriString); } @@ -642,7 +653,7 @@ public void shutdown() throws Exception { cleanUp(); } - if (clusterType == MiniClusterType.tez || clusterType == MiniClusterType.llap) { + if (clusterType.getCoreClusterType() == CoreClusteType.tez) { SessionState.get().getTezSession().close(false); } setup.tearDown(); @@ -1152,8 +1163,7 @@ public String cliInit(String tname, boolean recreate) throws Exception { SessionState oldSs = SessionState.get(); boolean canReuseSession = !qNoSessionReuseQuerySet.contains(tname); - if (oldSs != null && canReuseSession - && (clusterType == MiniClusterType.tez || clusterType == MiniClusterType.llap)) { + if (oldSs != null && canReuseSession && clusterType.getCoreClusterType() == CoreClusteType.tez) { // Copy the tezSessionState from the old CliSessionState. tezSessionState = oldSs.getTezSession(); oldSs.setTezSession(null); @@ -1161,8 +1171,7 @@ public String cliInit(String tname, boolean recreate) throws Exception { oldSs.close(); } - if (oldSs != null && (clusterType == MiniClusterType.spark - || clusterType == MiniClusterType.miniSparkOnYarn)) { + if (oldSs != null && clusterType.getCoreClusterType() == CoreClusteType.spark) { sparkSession = oldSs.getSparkSession(); ss.setSparkSession(sparkSession); oldSs.setSparkSession(null); @@ -1225,8 +1234,7 @@ private CliSessionState startSessionState(boolean canReuseSession) ss.err = System.out; SessionState oldSs = SessionState.get(); - if (oldSs != null && canReuseSession - && (clusterType == MiniClusterType.tez || clusterType == MiniClusterType.llap)) { + if (oldSs != null && canReuseSession && clusterType.getCoreClusterType() == CoreClusteType.tez) { // Copy the tezSessionState from the old CliSessionState. tezSessionState = oldSs.getTezSession(); ss.setTezSession(tezSessionState); @@ -1234,8 +1242,7 @@ private CliSessionState startSessionState(boolean canReuseSession) oldSs.close(); } - if (oldSs != null && (clusterType == MiniClusterType.spark - || clusterType == MiniClusterType.miniSparkOnYarn)) { + if (oldSs != null && clusterType.getCoreClusterType() == CoreClusteType.spark) { sparkSession = oldSs.getSparkSession(); ss.setSparkSession(sparkSession); oldSs.setSparkSession(null); diff --git llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/LlapDaemon.java llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/LlapDaemon.java index 53a2295..752e6ee 100644 --- llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/LlapDaemon.java +++ llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/LlapDaemon.java @@ -116,8 +116,6 @@ public LlapDaemon(Configuration daemonConf, int numExecutors, long executorMemor int mngPort, int shufflePort, int webPort, String appName) { super("LlapDaemon"); - initializeLogging(daemonConf); - printAsciiArt(); Preconditions.checkArgument(numExecutors > 0); @@ -278,7 +276,7 @@ public LlapDaemon(Configuration daemonConf, int numExecutors, long executorMemor addIfService(amReporter); } - private void initializeLogging(final Configuration conf) { + private static void initializeLogging(final Configuration conf) { long start = System.currentTimeMillis(); URL llap_l4j2 = LlapDaemon.class.getClassLoader().getResource( LlapConstants.LOG4j2_PROPERTIES_FILE); @@ -450,6 +448,7 @@ public static void main(String[] args) throws Exception { long ioMemoryBytes = HiveConf.getSizeVar(daemonConf, ConfVars.LLAP_IO_MEMORY_MAX_SIZE); boolean isDirectCache = HiveConf.getBoolVar(daemonConf, ConfVars.LLAP_ALLOCATOR_DIRECT); boolean isLlapIo = HiveConf.getBoolVar(daemonConf, HiveConf.ConfVars.LLAP_IO_ENABLED, true); + LlapDaemon.initializeLogging(daemonConf); llapDaemon = new LlapDaemon(daemonConf, numExecutors, executorMemoryBytes, isLlapIo, isDirectCache, ioMemoryBytes, localDirs, rpcPort, mngPort, shufflePort, webPort, appName);