diff --git a/itests/hive-unit-hadoop2/src/test/java/org/apache/hive/jdbc/TestSchedulerQueue.java b/itests/hive-unit-hadoop2/src/test/java/org/apache/hive/jdbc/TestSchedulerQueue.java
index 0c5f8a5..7507d47 100644
--- a/itests/hive-unit-hadoop2/src/test/java/org/apache/hive/jdbc/TestSchedulerQueue.java
+++ b/itests/hive-unit-hadoop2/src/test/java/org/apache/hive/jdbc/TestSchedulerQueue.java
@@ -35,6 +35,7 @@
import org.apache.hadoop.yarn.conf.YarnConfiguration;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.FairSchedulerConfiguration;
import org.apache.hive.jdbc.miniHS2.MiniHS2;
+import org.apache.hive.jdbc.miniHS2.MiniHS2.MiniClusterType;
import org.junit.After;
import org.junit.Before;
import org.junit.BeforeClass;
@@ -76,7 +77,7 @@ public static void beforeTest() throws Exception {
@Before
public void setUp() throws Exception {
DriverManager.setLoginTimeout(0);
- miniHS2 = new MiniHS2(conf, true);
+ miniHS2 = new MiniHS2(conf, MiniClusterType.MR);
miniHS2.setConfProperty(HiveConf.ConfVars.HIVE_SERVER2_ENABLE_DOAS.varname, "false");
miniHS2.setConfProperty(HiveConf.ConfVars.HIVE_SERVER2_MAP_FAIR_SCHEDULER_QUEUE.varname,
"true");
diff --git a/itests/hive-unit/pom.xml b/itests/hive-unit/pom.xml
index 1a2f131..aef2aa4 100644
--- a/itests/hive-unit/pom.xml
+++ b/itests/hive-unit/pom.xml
@@ -128,6 +128,49 @@
${mockito-all.version}
test
+
+ org.apache.hadoop
+ hadoop-yarn-server-tests
+ ${hadoop-23.version}
+ test
+ tests
+
+
+ org.apache.hadoop
+ hadoop-yarn-client
+ ${hadoop-23.version}
+ test
+
+
+ org.apache.tez
+ tez-tests
+ ${tez.version}
+ test-jar
+
+
+ org.apache.tez
+ tez-api
+ ${tez.version}
+ test
+
+
+ org.apache.tez
+ tez-runtime-library
+ ${tez.version}
+ test
+
+
+ org.apache.tez
+ tez-mapreduce
+ ${tez.version}
+ test
+
+
+ org.apache.tez
+ tez-dag
+ ${tez.version}
+ test
+
diff --git a/itests/hive-unit/src/main/java/org/apache/hive/jdbc/miniHS2/MiniHS2.java b/itests/hive-unit/src/main/java/org/apache/hive/jdbc/miniHS2/MiniHS2.java
index 32b971c..adb8a71 100644
--- a/itests/hive-unit/src/main/java/org/apache/hive/jdbc/miniHS2/MiniHS2.java
+++ b/itests/hive-unit/src/main/java/org/apache/hive/jdbc/miniHS2/MiniHS2.java
@@ -32,6 +32,7 @@
import org.apache.hadoop.hive.conf.HiveConf;
import org.apache.hadoop.hive.conf.HiveConf.ConfVars;
import org.apache.hadoop.hive.metastore.MetaStoreUtils;
+import org.apache.hadoop.hive.ql.WindowsPathUtil;
import org.apache.hadoop.hive.ql.exec.Utilities;
import org.apache.hadoop.hive.shims.HadoopShims.MiniDFSShim;
import org.apache.hadoop.hive.shims.HadoopShims.MiniMrShim;
@@ -59,15 +60,20 @@
private MiniMrShim mr;
private MiniDFSShim dfs;
private FileSystem localFS;
- private boolean useMiniMR = false;
private boolean useMiniKdc = false;
private final String serverPrincipal;
- private final String serverKeytab;
private final boolean isMetastoreRemote;
+ private MiniClusterType miniClusterType = MiniClusterType.DFS_ONLY;
+
+ public enum MiniClusterType {
+ MR,
+ TEZ,
+ DFS_ONLY;
+ }
public static class Builder {
private HiveConf hiveConf = new HiveConf();
- private boolean useMiniMR = false;
+ private MiniClusterType miniClusterType = MiniClusterType.DFS_ONLY;
private boolean useMiniKdc = false;
private String serverPrincipal;
private String serverKeytab;
@@ -78,7 +84,7 @@ public Builder() {
}
public Builder withMiniMR() {
- this.useMiniMR = true;
+ this.miniClusterType = MiniClusterType.MR;
return this;
}
@@ -110,7 +116,7 @@ public Builder withHTTPTransport(){
public MiniHS2 build() throws Exception {
- if (useMiniMR && useMiniKdc) {
+ if (miniClusterType == MiniClusterType.MR && useMiniKdc) {
throw new IOException("Can't create secure miniMr ... yet");
}
if (isHTTPTransMode) {
@@ -118,7 +124,7 @@ public MiniHS2 build() throws Exception {
} else {
hiveConf.setVar(ConfVars.HIVE_SERVER2_TRANSPORT_MODE, HS2_BINARY_MODE);
}
- return new MiniHS2(hiveConf, useMiniMR, useMiniKdc, serverPrincipal, serverKeytab,
+ return new MiniHS2(hiveConf, miniClusterType, useMiniKdc, serverPrincipal, serverKeytab,
isMetastoreRemote);
}
}
@@ -143,38 +149,51 @@ public FileSystem getLocalFS() {
return localFS;
}
- public boolean isUseMiniMR() {
- return useMiniMR;
+ public MiniClusterType getMiniClusterType() {
+ return miniClusterType;
}
- public void setUseMiniMR(boolean useMiniMR) {
- this.useMiniMR = useMiniMR;
+ public void setMiniClusterType(MiniClusterType miniClusterType) {
+ this.miniClusterType = miniClusterType;
}
public boolean isUseMiniKdc() {
return useMiniKdc;
}
- private MiniHS2(HiveConf hiveConf, boolean useMiniMR, boolean useMiniKdc,
+ private MiniHS2(HiveConf hiveConf, MiniClusterType miniClusterType, boolean useMiniKdc,
String serverPrincipal, String serverKeytab, boolean isMetastoreRemote) throws Exception {
super(hiveConf, "localhost", MetaStoreUtils.findFreePort(), MetaStoreUtils.findFreePort());
- this.useMiniMR = useMiniMR;
+ this.miniClusterType = miniClusterType;
this.useMiniKdc = useMiniKdc;
this.serverPrincipal = serverPrincipal;
- this.serverKeytab = serverKeytab;
this.isMetastoreRemote = isMetastoreRemote;
baseDir = Files.createTempDir();
localFS = FileSystem.getLocal(hiveConf);
FileSystem fs;
- if (useMiniMR) {
+
+ if (miniClusterType != MiniClusterType.DFS_ONLY) {
+ // Initialize dfs
dfs = ShimLoader.getHadoopShims().getMiniDfs(hiveConf, 4, true, null);
fs = dfs.getFileSystem();
- mr = ShimLoader.getHadoopShims().getMiniMrCluster(hiveConf, 4,
- fs.getUri().toString(), 1);
+ String uriString = WindowsPathUtil.getHdfsUriString(fs.getUri().toString());
+
+ // Initialize the execution engine based on cluster type
+ switch (miniClusterType) {
+ case TEZ:
+ mr = ShimLoader.getHadoopShims().getMiniTezCluster(hiveConf, 4, uriString, 1);
+ break;
+ case MR:
+ mr = ShimLoader.getHadoopShims().getMiniMrCluster(hiveConf, 4, uriString, 1);
+ break;
+ default:
+ throw new IllegalArgumentException("Unsupported cluster type " + mr);
+ }
// store the config in system properties
mr.setupConfiguration(getHiveConf());
baseDfsDir = new Path(new Path(fs.getUri()), "/base");
} else {
+ // This is DFS only mode, just initialize the dfs root directory.
fs = FileSystem.getLocal(hiveConf);
baseDfsDir = new Path("file://"+ baseDir.toURI().getPath());
}
@@ -213,11 +232,11 @@ private MiniHS2(HiveConf hiveConf, boolean useMiniMR, boolean useMiniKdc,
}
public MiniHS2(HiveConf hiveConf) throws Exception {
- this(hiveConf, false);
+ this(hiveConf, MiniClusterType.DFS_ONLY);
}
- public MiniHS2(HiveConf hiveConf, boolean useMiniMR) throws Exception {
- this(hiveConf, useMiniMR, false, null, null, false);
+ public MiniHS2(HiveConf hiveConf, MiniClusterType clusterType) throws Exception {
+ this(hiveConf, clusterType, false, null, null, false);
}
public void start(Map confOverlay) throws Exception {
diff --git a/itests/hive-unit/src/test/java/org/apache/hive/jdbc/TestJdbcWithLocalClusterSpark.java b/itests/hive-unit/src/test/java/org/apache/hive/jdbc/TestJdbcWithLocalClusterSpark.java
index ef64483..f649fc2 100644
--- a/itests/hive-unit/src/test/java/org/apache/hive/jdbc/TestJdbcWithLocalClusterSpark.java
+++ b/itests/hive-unit/src/test/java/org/apache/hive/jdbc/TestJdbcWithLocalClusterSpark.java
@@ -35,6 +35,7 @@
import org.apache.hadoop.hive.conf.HiveConf;
import org.apache.hadoop.hive.conf.HiveConf.ConfVars;
import org.apache.hive.jdbc.miniHS2.MiniHS2;
+import org.apache.hive.jdbc.miniHS2.MiniHS2.MiniClusterType;
import org.apache.hive.service.cli.HiveSQLException;
import org.apache.hive.service.cli.session.HiveSessionHook;
import org.apache.hive.service.cli.session.HiveSessionHookContext;
@@ -82,7 +83,7 @@ public static void beforeTest() throws Exception {
dataFilePath = new Path(dataFileDir, "kv1.txt");
DriverManager.setLoginTimeout(0);
conf.setBoolVar(ConfVars.HIVE_SUPPORT_CONCURRENCY, false);
- miniHS2 = new MiniHS2(conf, true);
+ miniHS2 = new MiniHS2(conf, MiniClusterType.MR);
Map overlayProps = new HashMap();
overlayProps.put(ConfVars.HIVE_SERVER2_SESSION_HOOK.varname,
LocalClusterSparkSessionHook.class.getName());
diff --git a/itests/hive-unit/src/test/java/org/apache/hive/jdbc/TestJdbcWithMiniMr.java b/itests/hive-unit/src/test/java/org/apache/hive/jdbc/TestJdbcWithMiniMr.java
index e459c4e..bcd65a9 100644
--- a/itests/hive-unit/src/test/java/org/apache/hive/jdbc/TestJdbcWithMiniMr.java
+++ b/itests/hive-unit/src/test/java/org/apache/hive/jdbc/TestJdbcWithMiniMr.java
@@ -37,6 +37,7 @@
import org.apache.hadoop.hive.conf.HiveConf;
import org.apache.hadoop.hive.conf.HiveConf.ConfVars;
import org.apache.hive.jdbc.miniHS2.MiniHS2;
+import org.apache.hive.jdbc.miniHS2.MiniHS2.MiniClusterType;
import org.apache.hive.service.cli.HiveSQLException;
import org.apache.hive.service.cli.session.HiveSessionHook;
import org.apache.hive.service.cli.session.HiveSessionHookContext;
@@ -73,7 +74,7 @@ public static void beforeTest() throws Exception {
dataFilePath = new Path(dataFileDir, "kv1.txt");
DriverManager.setLoginTimeout(0);
conf.setBoolVar(HiveConf.ConfVars.HIVE_SUPPORT_CONCURRENCY, false);
- miniHS2 = new MiniHS2(conf, true);
+ miniHS2 = new MiniHS2(conf, MiniClusterType.MR);
Map overlayProps = new HashMap();
overlayProps.put(ConfVars.HIVE_SERVER2_SESSION_HOOK.varname,
MiniMrTestSessionHook.class.getName());
diff --git a/itests/hive-unit/src/test/java/org/apache/hive/jdbc/TestMultiSessionsHS2WithLocalClusterSpark.java b/itests/hive-unit/src/test/java/org/apache/hive/jdbc/TestMultiSessionsHS2WithLocalClusterSpark.java
index e7383c9..0c3479d 100644
--- a/itests/hive-unit/src/test/java/org/apache/hive/jdbc/TestMultiSessionsHS2WithLocalClusterSpark.java
+++ b/itests/hive-unit/src/test/java/org/apache/hive/jdbc/TestMultiSessionsHS2WithLocalClusterSpark.java
@@ -38,6 +38,7 @@
import org.apache.hadoop.hive.conf.HiveConf;
import org.apache.hadoop.hive.conf.HiveConf.ConfVars;
import org.apache.hive.jdbc.miniHS2.MiniHS2;
+import org.apache.hive.jdbc.miniHS2.MiniHS2.MiniClusterType;
import org.apache.hive.service.cli.HiveSQLException;
import org.apache.hive.service.cli.session.HiveSessionHook;
import org.apache.hive.service.cli.session.HiveSessionHookContext;
@@ -91,7 +92,7 @@ public static void beforeTest() throws Exception {
dataFilePath = new Path(dataFileDir, "kv1.txt");
DriverManager.setLoginTimeout(0);
conf.setBoolVar(ConfVars.HIVE_SUPPORT_CONCURRENCY, false);
- miniHS2 = new MiniHS2(conf, true);
+ miniHS2 = new MiniHS2(conf, MiniClusterType.MR);
Map overlayProps = new HashMap();
overlayProps.put(ConfVars.HIVE_SERVER2_SESSION_HOOK.varname,
LocalClusterSparkSessionHook.class.getName());
diff --git a/itests/hive-unit/src/test/java/org/apache/hive/service/cli/operation/TestOperationLoggingAPI.java b/itests/hive-unit/src/test/java/org/apache/hive/service/cli/operation/TestOperationLoggingAPI.java
deleted file mode 100644
index 21487b0..0000000
--- a/itests/hive-unit/src/test/java/org/apache/hive/service/cli/operation/TestOperationLoggingAPI.java
+++ /dev/null
@@ -1,379 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hive.service.cli.operation;
-
-import java.io.File;
-import java.util.HashMap;
-import java.util.Map;
-
-import org.apache.hadoop.hive.conf.HiveConf;
-import org.apache.hadoop.hive.conf.HiveConf.ConfVars;
-import org.apache.hive.jdbc.miniHS2.MiniHS2;
-import org.apache.hive.service.cli.CLIServiceClient;
-import org.apache.hive.service.cli.FetchOrientation;
-import org.apache.hive.service.cli.FetchType;
-import org.apache.hive.service.cli.HiveSQLException;
-import org.apache.hive.service.cli.OperationHandle;
-import org.apache.hive.service.cli.OperationState;
-import org.apache.hive.service.cli.OperationStatus;
-import org.apache.hive.service.cli.RowSet;
-import org.apache.hive.service.cli.SessionHandle;
-import org.junit.After;
-import org.junit.AfterClass;
-import org.junit.Assert;
-import org.junit.Before;
-import org.junit.BeforeClass;
-import org.junit.Test;
-
-/**
- * TestOperationLoggingAPI
- * Test the FetchResults of TFetchType.LOG in thrift level.
- */
-public class TestOperationLoggingAPI {
- private static HiveConf hiveConf;
- private final String tableName = "testOperationLoggingAPI_table";
- private File dataFile;
- private CLIServiceClient client;
- private static MiniHS2 miniHS2 = null;
- private static Map confOverlay;
- private SessionHandle sessionHandle;
- private final String sql = "select * from " + tableName;
- private final String sqlCntStar = "select count(*) from " + tableName;
- private final String[] expectedLogs = {
- "Parsing command",
- "Parse Completed",
- "Starting Semantic Analysis",
- "Semantic Analysis Completed",
- "Starting command"
- };
- private final String[] expectedLogsExecution = {
- "Number of reduce tasks determined at compile time",
- "number of splits",
- "Submitting tokens for job",
- "Ended Job"
- };
- private final String[] expectedLogsPerformance = {
- "",
- "",
- "",
- ""
- };
-
- @BeforeClass
- public static void setUpBeforeClass() throws Exception {
- hiveConf = new HiveConf();
- hiveConf.set(ConfVars.HIVE_SERVER2_LOGGING_OPERATION_LEVEL.varname, "verbose");
- // We need to set the below parameter to test performance level logging
- hiveConf.set("hive.ql.log.PerfLogger.level", "INFO,DRFA");
- miniHS2 = new MiniHS2(hiveConf);
- confOverlay = new HashMap();
- confOverlay.put(ConfVars.HIVE_SUPPORT_CONCURRENCY.varname, "false");
- miniHS2.start(confOverlay);
- }
-
- /**
- * Open a session, and create a table for cases usage
- * @throws Exception
- */
- @Before
- public void setUp() throws Exception {
- dataFile = new File(hiveConf.get("test.data.files"), "kv1.txt");
- client = miniHS2.getServiceClient();
- sessionHandle = setupSession();
- }
-
- @After
- public void tearDown() throws Exception {
- // Cleanup
- String queryString = "DROP TABLE " + tableName;
- client.executeStatement(sessionHandle, queryString, null);
-
- client.closeSession(sessionHandle);
- }
-
- @AfterClass
- public static void tearDownAfterClass() throws Exception {
- miniHS2.stop();
- }
-
- @Test
- public void testFetchResultsOfLog() throws Exception {
- // verify whether the sql operation log is generated and fetch correctly.
- OperationHandle operationHandle = client.executeStatement(sessionHandle, sql, null);
- RowSet rowSetLog = client.fetchResults(operationHandle, FetchOrientation.FETCH_FIRST, 1000,
- FetchType.LOG);
- verifyFetchedLog(rowSetLog, expectedLogs);
- }
-
- @Test
- public void testFetchResultsOfLogAsync() throws Exception {
- // verify whether the sql operation log is generated and fetch correctly in async mode.
- OperationHandle operationHandle = client.executeStatementAsync(sessionHandle, sql, null);
-
- // Poll on the operation status till the query is completed
- boolean isQueryRunning = true;
- long pollTimeout = System.currentTimeMillis() + 100000;
- OperationStatus opStatus;
- OperationState state = null;
- RowSet rowSetAccumulated = null;
- StringBuilder logs = new StringBuilder();
-
- while (isQueryRunning) {
- // Break if polling times out
- if (System.currentTimeMillis() > pollTimeout) {
- break;
- }
- opStatus = client.getOperationStatus(operationHandle);
- Assert.assertNotNull(opStatus);
- state = opStatus.getState();
-
- rowSetAccumulated = client.fetchResults(operationHandle, FetchOrientation.FETCH_NEXT, 2000,
- FetchType.LOG);
- for (Object[] row : rowSetAccumulated) {
- logs.append(row[0]);
- }
-
- if (state == OperationState.CANCELED ||
- state == OperationState.CLOSED ||
- state == OperationState.FINISHED ||
- state == OperationState.ERROR) {
- isQueryRunning = false;
- }
- Thread.sleep(10);
- }
- // The sql should be completed now.
- Assert.assertEquals("Query should be finished", OperationState.FINISHED, state);
-
- // Verify the accumulated logs
- verifyFetchedLogPost(logs.toString(), expectedLogs, true);
-
- // Verify the fetched logs from the beginning of the log file
- RowSet rowSet = client.fetchResults(operationHandle, FetchOrientation.FETCH_FIRST, 2000,
- FetchType.LOG);
- verifyFetchedLog(rowSet, expectedLogs);
- }
-
- @Test
- public void testFetchResultsOfLogWithOrientation() throws Exception {
- // (FETCH_FIRST) execute a sql, and fetch its sql operation log as expected value
- OperationHandle operationHandle = client.executeStatement(sessionHandle, sql, null);
- RowSet rowSetLog = client.fetchResults(operationHandle, FetchOrientation.FETCH_FIRST, 1000,
- FetchType.LOG);
- int expectedLogLength = rowSetLog.numRows();
-
- // (FETCH_NEXT) execute the same sql again,
- // and fetch the sql operation log with FETCH_NEXT orientation
- OperationHandle operationHandleWithOrientation = client.executeStatement(sessionHandle, sql,
- null);
- RowSet rowSetLogWithOrientation;
- int logLength = 0;
- int maxRows = calculateProperMaxRows(expectedLogLength);
- do {
- rowSetLogWithOrientation = client.fetchResults(operationHandleWithOrientation,
- FetchOrientation.FETCH_NEXT, maxRows, FetchType.LOG);
- logLength += rowSetLogWithOrientation.numRows();
- } while (rowSetLogWithOrientation.numRows() == maxRows);
- Assert.assertEquals(expectedLogLength, logLength);
-
- // (FETCH_FIRST) fetch again from the same operation handle with FETCH_FIRST orientation
- rowSetLogWithOrientation = client.fetchResults(operationHandleWithOrientation,
- FetchOrientation.FETCH_FIRST, 1000, FetchType.LOG);
- verifyFetchedLog(rowSetLogWithOrientation, expectedLogs);
- }
-
- @Test
- public void testFetchResultsOfLogWithVerboseMode() throws Exception {
- String queryString = "set hive.server2.logging.operation.level=verbose";
- client.executeStatement(sessionHandle, queryString, null);
- // verify whether the sql operation log is generated and fetch correctly.
- OperationHandle operationHandle = client.executeStatement(sessionHandle, sqlCntStar, null);
- RowSet rowSetLog = client.fetchResults(operationHandle, FetchOrientation.FETCH_FIRST, 1000,
- FetchType.LOG);
- // Verbose Logs should contain everything, including execution and performance
- verifyFetchedLog(rowSetLog, expectedLogs);
- verifyFetchedLog(rowSetLog, expectedLogsExecution);
- verifyFetchedLog(rowSetLog, expectedLogsPerformance);
- }
-
- @Test
- public void testFetchResultsOfLogWithPerformanceMode() throws Exception {
- try {
- String queryString = "set hive.server2.logging.operation.level=performance";
- client.executeStatement(sessionHandle, queryString, null);
- // verify whether the sql operation log is generated and fetch correctly.
- OperationHandle operationHandle = client.executeStatement(sessionHandle, sqlCntStar, null);
- RowSet rowSetLog = client.fetchResults(operationHandle, FetchOrientation.FETCH_FIRST, 1000,
- FetchType.LOG);
- // rowSetLog should contain execution as well as performance logs
- verifyFetchedLog(rowSetLog, expectedLogsExecution);
- verifyFetchedLog(rowSetLog, expectedLogsPerformance);
- verifyMissingContentsInFetchedLog(rowSetLog, expectedLogs);
- } finally {
- // Restore everything to default setup to avoid discrepancy between junit test runs
- String queryString2 = "set hive.server2.logging.operation.level=verbose";
- client.executeStatement(sessionHandle, queryString2, null);
- }
- }
-
- @Test
- public void testFetchResultsOfLogWithExecutionMode() throws Exception {
- try {
- String queryString = "set hive.server2.logging.operation.level=execution";
- client.executeStatement(sessionHandle, queryString, null);
- // verify whether the sql operation log is generated and fetch correctly.
- OperationHandle operationHandle = client.executeStatement(sessionHandle, sqlCntStar, null);
- RowSet rowSetLog = client.fetchResults(operationHandle, FetchOrientation.FETCH_FIRST, 1000,
- FetchType.LOG);
- verifyFetchedLog(rowSetLog, expectedLogsExecution);
- verifyMissingContentsInFetchedLog(rowSetLog, expectedLogsPerformance);
- verifyMissingContentsInFetchedLog(rowSetLog, expectedLogs);
- } finally {
- // Restore everything to default setup to avoid discrepancy between junit test runs
- String queryString2 = "set hive.server2.logging.operation.level=verbose";
- client.executeStatement(sessionHandle, queryString2, null);
- }
- }
-
- @Test
- public void testFetchResultsOfLogWithNoneMode() throws Exception {
- try {
- String queryString = "set hive.server2.logging.operation.level=none";
- client.executeStatement(sessionHandle, queryString, null);
- // verify whether the sql operation log is generated and fetch correctly.
- OperationHandle operationHandle = client.executeStatement(sessionHandle, sqlCntStar, null);
- RowSet rowSetLog = client.fetchResults(operationHandle, FetchOrientation.FETCH_FIRST, 1000,
- FetchType.LOG);
- // We should not get any rows.
- assert(rowSetLog.numRows() == 0);
- } finally {
- // Restore everything to default setup to avoid discrepancy between junit test runs
- String queryString2 = "set hive.server2.logging.operation.level=verbose";
- client.executeStatement(sessionHandle, queryString2, null);
- }
- }
-
- @Test
- public void testFetchResultsOfLogCleanup() throws Exception {
- // Verify cleanup functionality.
- // Open a new session, since this case needs to close the session in the end.
- SessionHandle sessionHandleCleanup = setupSession();
-
- // prepare
- OperationHandle operationHandle = client.executeStatement(sessionHandleCleanup, sql, null);
- RowSet rowSetLog = client.fetchResults(operationHandle, FetchOrientation.FETCH_FIRST, 1000,
- FetchType.LOG);
- verifyFetchedLog(rowSetLog, expectedLogs);
-
- File sessionLogDir = new File(
- hiveConf.getVar(HiveConf.ConfVars.HIVE_SERVER2_LOGGING_OPERATION_LOG_LOCATION) +
- File.separator + sessionHandleCleanup.getHandleIdentifier());
- File operationLogFile = new File(sessionLogDir, operationHandle.getHandleIdentifier().toString());
-
- // check whether exception is thrown when fetching log from a closed operation.
- client.closeOperation(operationHandle);
- try {
- client.fetchResults(operationHandle, FetchOrientation.FETCH_FIRST, 1000, FetchType.LOG);
- Assert.fail("Fetch should fail");
- } catch (HiveSQLException e) {
- Assert.assertTrue(e.getMessage().contains("Invalid OperationHandle:"));
- }
-
- // check whether operation log file is deleted.
- if (operationLogFile.exists()) {
- Assert.fail("Operation log file should be deleted.");
- }
-
- // check whether session log dir is deleted after session is closed.
- client.closeSession(sessionHandleCleanup);
- if (sessionLogDir.exists()) {
- Assert.fail("Session log dir should be deleted.");
- }
- }
-
- private SessionHandle setupSession() throws Exception {
- // Open a session
- SessionHandle sessionHandle = client.openSession(null, null, null);
-
- // Change lock manager to embedded mode
- String queryString = "SET hive.lock.manager=" +
- "org.apache.hadoop.hive.ql.lockmgr.EmbeddedLockManager";
- client.executeStatement(sessionHandle, queryString, null);
-
- // Drop the table if it exists
- queryString = "DROP TABLE IF EXISTS " + tableName;
- client.executeStatement(sessionHandle, queryString, null);
-
- // Create a test table
- queryString = "create table " + tableName + " (key int, value string)";
- client.executeStatement(sessionHandle, queryString, null);
-
- // Load data
- queryString = "load data local inpath '" + dataFile + "' into table " + tableName;
- client.executeStatement(sessionHandle, queryString, null);
-
- // Precondition check: verify whether the table is created and data is fetched correctly.
- OperationHandle operationHandle = client.executeStatement(sessionHandle, sql, null);
- RowSet rowSetResult = client.fetchResults(operationHandle);
- Assert.assertEquals(500, rowSetResult.numRows());
- Assert.assertEquals(238, rowSetResult.iterator().next()[0]);
- Assert.assertEquals("val_238", rowSetResult.iterator().next()[1]);
-
- return sessionHandle;
- }
-
- // Since the log length of the sql operation may vary during HIVE dev, calculate a proper maxRows.
- private int calculateProperMaxRows(int len) {
- if (len < 10) {
- return 1;
- } else if (len < 100) {
- return 10;
- } else {
- return 100;
- }
- }
-
- private String verifyFetchedLogPre(RowSet rowSet, String[] el) {
- StringBuilder stringBuilder = new StringBuilder();
-
- for (Object[] row : rowSet) {
- stringBuilder.append(row[0]);
- }
-
- return stringBuilder.toString();
- }
-
- private void verifyFetchedLog(RowSet rowSet, String[] el) {
- String logs = verifyFetchedLogPre(rowSet, el);
- verifyFetchedLogPost(logs, el, true);
- }
-
- private void verifyMissingContentsInFetchedLog(RowSet rowSet, String[] el) {
- String logs = verifyFetchedLogPre(rowSet, el);
- verifyFetchedLogPost(logs, el, false);
- }
-
- private void verifyFetchedLogPost(String logs, String[] el, boolean contains) {
- for (String log : el) {
- if (contains) {
- Assert.assertTrue("Checking for presence of " + log, logs.contains(log));
- } else {
- Assert.assertFalse("Checking for absence of " + log, logs.contains(log));
- }
- }
- }
-}
diff --git a/itests/hive-unit/src/test/java/org/apache/hive/service/cli/operation/TestOperationLoggingAPIBase.java b/itests/hive-unit/src/test/java/org/apache/hive/service/cli/operation/TestOperationLoggingAPIBase.java
new file mode 100644
index 0000000..b1cc4b7
--- /dev/null
+++ b/itests/hive-unit/src/test/java/org/apache/hive/service/cli/operation/TestOperationLoggingAPIBase.java
@@ -0,0 +1,233 @@
+package org.apache.hive.service.cli.operation;
+
+import java.io.File;
+import java.util.Map;
+
+import org.apache.hadoop.hive.conf.HiveConf;
+import org.apache.hive.jdbc.miniHS2.MiniHS2;
+import org.apache.hive.service.cli.CLIServiceClient;
+import org.apache.hive.service.cli.FetchOrientation;
+import org.apache.hive.service.cli.FetchType;
+import org.apache.hive.service.cli.HiveSQLException;
+import org.apache.hive.service.cli.OperationHandle;
+import org.apache.hive.service.cli.RowSet;
+import org.apache.hive.service.cli.SessionHandle;
+import org.junit.After;
+import org.junit.AfterClass;
+import org.junit.Assert;
+import org.junit.Before;
+import org.junit.Test;
+
+/**
+ * TestOperationLoggingAPIBase
+ * Test the FetchResults of TFetchType.LOG in thrift level.
+ * This is the base class.
+ */
+public abstract class TestOperationLoggingAPIBase {
+ protected static HiveConf hiveConf;
+ protected static String tableName;
+ private File dataFile;
+ protected CLIServiceClient client;
+ protected static MiniHS2 miniHS2 = null;
+ protected static Map confOverlay;
+ protected SessionHandle sessionHandle;
+ protected final String sql = "select * from " + tableName;
+ private final String sqlCntStar = "select count(*) from " + tableName;
+ protected static String[] expectedLogsVerbose;
+ protected static String[] expectedLogsExecution;
+ protected static String[] expectedLogsPerformance;
+
+ /**
+ * Open a session, and create a table for cases usage
+ * @throws Exception
+ */
+ @Before
+ public void setUp() throws Exception {
+ dataFile = new File(hiveConf.get("test.data.files"), "kv1.txt");
+ client = miniHS2.getServiceClient();
+ sessionHandle = setupSession();
+ }
+
+ @After
+ public void tearDown() throws Exception {
+ // Cleanup
+ String queryString = "DROP TABLE " + tableName;
+ client.executeStatement(sessionHandle, queryString, null);
+
+ client.closeSession(sessionHandle);
+ }
+
+ @AfterClass
+ public static void tearDownAfterClass() throws Exception {
+ miniHS2.stop();
+ }
+ @Test
+ public void testFetchResultsOfLogWithVerboseMode() throws Exception {
+ String queryString = "set hive.server2.logging.operation.level=verbose";
+ client.executeStatement(sessionHandle, queryString, null);
+ // verify whether the sql operation log is generated and fetch correctly.
+ OperationHandle operationHandle = client.executeStatement(sessionHandle, sqlCntStar, null);
+ RowSet rowSetLog = client.fetchResults(operationHandle, FetchOrientation.FETCH_FIRST, 1000,
+ FetchType.LOG);
+ // Verbose Logs should contain everything, including execution and performance
+ verifyFetchedLog(rowSetLog, expectedLogsVerbose);
+ verifyFetchedLog(rowSetLog, expectedLogsExecution);
+ verifyFetchedLog(rowSetLog, expectedLogsPerformance);
+ }
+
+ @Test
+ public void testFetchResultsOfLogWithPerformanceMode() throws Exception {
+ try {
+ String queryString = "set hive.server2.logging.operation.level=performance";
+ client.executeStatement(sessionHandle, queryString, null);
+ // verify whether the sql operation log is generated and fetch correctly.
+ OperationHandle operationHandle = client.executeStatement(sessionHandle, sqlCntStar, null);
+ RowSet rowSetLog = client.fetchResults(operationHandle, FetchOrientation.FETCH_FIRST, 1000,
+ FetchType.LOG);
+ // rowSetLog should contain execution as well as performance logs
+ verifyFetchedLog(rowSetLog, expectedLogsExecution);
+ verifyFetchedLog(rowSetLog, expectedLogsPerformance);
+ verifyMissingContentsInFetchedLog(rowSetLog, expectedLogsVerbose);
+ } finally {
+ // Restore everything to default setup to avoid discrepancy between junit test runs
+ String queryString2 = "set hive.server2.logging.operation.level=verbose";
+ client.executeStatement(sessionHandle, queryString2, null);
+ }
+ }
+
+ @Test
+ public void testFetchResultsOfLogWithExecutionMode() throws Exception {
+ try {
+ String queryString = "set hive.server2.logging.operation.level=execution";
+ client.executeStatement(sessionHandle, queryString, null);
+ // verify whether the sql operation log is generated and fetch correctly.
+ OperationHandle operationHandle = client.executeStatement(sessionHandle, sqlCntStar, null);
+ RowSet rowSetLog = client.fetchResults(operationHandle, FetchOrientation.FETCH_FIRST, 1000,
+ FetchType.LOG);
+ verifyFetchedLog(rowSetLog, expectedLogsExecution);
+ verifyMissingContentsInFetchedLog(rowSetLog, expectedLogsPerformance);
+ verifyMissingContentsInFetchedLog(rowSetLog, expectedLogsVerbose);
+ } finally {
+ // Restore everything to default setup to avoid discrepancy between junit test runs
+ String queryString2 = "set hive.server2.logging.operation.level=verbose";
+ client.executeStatement(sessionHandle, queryString2, null);
+ }
+ }
+
+ @Test
+ public void testFetchResultsOfLogWithNoneMode() throws Exception {
+ try {
+ String queryString = "set hive.server2.logging.operation.level=none";
+ client.executeStatement(sessionHandle, queryString, null);
+ // verify whether the sql operation log is generated and fetch correctly.
+ OperationHandle operationHandle = client.executeStatement(sessionHandle, sqlCntStar, null);
+ RowSet rowSetLog = client.fetchResults(operationHandle, FetchOrientation.FETCH_FIRST, 1000,
+ FetchType.LOG);
+ // We should not get any rows.
+ assert(rowSetLog.numRows() == 0);
+ } finally {
+ // Restore everything to default setup to avoid discrepancy between junit test runs
+ String queryString2 = "set hive.server2.logging.operation.level=verbose";
+ client.executeStatement(sessionHandle, queryString2, null);
+ }
+ }
+
+ @Test
+ public void testFetchResultsOfLogCleanup() throws Exception {
+ // Verify cleanup functionality.
+ // Open a new session, since this case needs to close the session in the end.
+ SessionHandle sessionHandleCleanup = setupSession();
+
+ // prepare
+ OperationHandle operationHandle = client.executeStatement(sessionHandleCleanup, sql, null);
+ RowSet rowSetLog = client.fetchResults(operationHandle, FetchOrientation.FETCH_FIRST, 1000,
+ FetchType.LOG);
+ verifyFetchedLog(rowSetLog, expectedLogsVerbose);
+
+ File sessionLogDir = new File(
+ hiveConf.getVar(HiveConf.ConfVars.HIVE_SERVER2_LOGGING_OPERATION_LOG_LOCATION) +
+ File.separator + sessionHandleCleanup.getHandleIdentifier());
+ File operationLogFile = new File(sessionLogDir, operationHandle.getHandleIdentifier().toString());
+
+ // check whether exception is thrown when fetching log from a closed operation.
+ client.closeOperation(operationHandle);
+ try {
+ client.fetchResults(operationHandle, FetchOrientation.FETCH_FIRST, 1000, FetchType.LOG);
+ Assert.fail("Fetch should fail");
+ } catch (HiveSQLException e) {
+ Assert.assertTrue(e.getMessage().contains("Invalid OperationHandle:"));
+ }
+
+ // check whether operation log file is deleted.
+ if (operationLogFile.exists()) {
+ Assert.fail("Operation log file should be deleted.");
+ }
+
+ // check whether session log dir is deleted after session is closed.
+ client.closeSession(sessionHandleCleanup);
+ if (sessionLogDir.exists()) {
+ Assert.fail("Session log dir should be deleted.");
+ }
+ }
+
+ private SessionHandle setupSession() throws Exception {
+ // Open a session
+ SessionHandle sessionHandle = client.openSession(null, null, null);
+
+ // Change lock manager to embedded mode
+ String queryString = "SET hive.lock.manager=" +
+ "org.apache.hadoop.hive.ql.lockmgr.EmbeddedLockManager";
+ client.executeStatement(sessionHandle, queryString, null);
+
+ // Drop the table if it exists
+ queryString = "DROP TABLE IF EXISTS " + tableName;
+ client.executeStatement(sessionHandle, queryString, null);
+
+ // Create a test table
+ queryString = "create table " + tableName + " (key int, value string)";
+ client.executeStatement(sessionHandle, queryString, null);
+
+ // Load data
+ queryString = "load data local inpath '" + dataFile + "' into table " + tableName;
+ client.executeStatement(sessionHandle, queryString, null);
+
+ // Precondition check: verify whether the table is created and data is fetched correctly.
+ OperationHandle operationHandle = client.executeStatement(sessionHandle, sql, null);
+ RowSet rowSetResult = client.fetchResults(operationHandle);
+ Assert.assertEquals(500, rowSetResult.numRows());
+ Assert.assertEquals(238, rowSetResult.iterator().next()[0]);
+ Assert.assertEquals("val_238", rowSetResult.iterator().next()[1]);
+
+ return sessionHandle;
+ }
+
+ private String verifyFetchedLogPre(RowSet rowSet, String[] el) {
+ StringBuilder stringBuilder = new StringBuilder();
+
+ for (Object[] row : rowSet) {
+ stringBuilder.append(row[0]);
+ }
+
+ return stringBuilder.toString();
+ }
+
+ protected void verifyFetchedLog(RowSet rowSet, String[] el) {
+ String logs = verifyFetchedLogPre(rowSet, el);
+ verifyFetchedLogPost(logs, el, true);
+ }
+
+ private void verifyMissingContentsInFetchedLog(RowSet rowSet, String[] el) {
+ String logs = verifyFetchedLogPre(rowSet, el);
+ verifyFetchedLogPost(logs, el, false);
+ }
+
+ protected void verifyFetchedLogPost(String logs, String[] el, boolean contains) {
+ for (String log : el) {
+ if (contains) {
+ Assert.assertTrue("Checking for presence of " + log, logs.contains(log));
+ } else {
+ Assert.assertFalse("Checking for absence of " + log, logs.contains(log));
+ }
+ }
+ }
+}
diff --git a/itests/hive-unit/src/test/java/org/apache/hive/service/cli/operation/TestOperationLoggingAPIWithMr.java b/itests/hive-unit/src/test/java/org/apache/hive/service/cli/operation/TestOperationLoggingAPIWithMr.java
new file mode 100644
index 0000000..a5fd9c6
--- /dev/null
+++ b/itests/hive-unit/src/test/java/org/apache/hive/service/cli/operation/TestOperationLoggingAPIWithMr.java
@@ -0,0 +1,167 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hive.service.cli.operation;
+
+import java.util.HashMap;
+import org.apache.hadoop.hive.conf.HiveConf;
+import org.apache.hadoop.hive.conf.HiveConf.ConfVars;
+import org.apache.hive.jdbc.miniHS2.MiniHS2;
+import org.apache.hive.service.cli.FetchOrientation;
+import org.apache.hive.service.cli.FetchType;
+import org.apache.hive.service.cli.OperationHandle;
+import org.apache.hive.service.cli.OperationState;
+import org.apache.hive.service.cli.OperationStatus;
+import org.apache.hive.service.cli.RowSet;
+import org.junit.Assert;
+import org.junit.BeforeClass;
+import org.junit.Test;
+
+/**
+ * TestOperationLoggingAPIWithMr
+ * Test the FetchResults of TFetchType.LOG in thrift level in MR mode.
+ */
+public class TestOperationLoggingAPIWithMr extends TestOperationLoggingAPIBase{
+
+ @BeforeClass
+ public static void setUpBeforeClass() throws Exception {
+ tableName = "testOperationLoggingAPIWithMr_table";
+ expectedLogsVerbose = new String[]{
+ "Parsing command",
+ "Parse Completed",
+ "Starting Semantic Analysis",
+ "Semantic Analysis Completed",
+ "Starting command"
+ };
+ expectedLogsExecution = new String[]{
+ "Number of reduce tasks determined at compile time",
+ "number of splits",
+ "Submitting tokens for job",
+ "Ended Job"
+ };
+ expectedLogsPerformance = new String[]{
+ "",
+ "",
+ "",
+ ""
+ };
+ hiveConf = new HiveConf();
+ hiveConf.set(ConfVars.HIVE_SERVER2_LOGGING_OPERATION_LEVEL.varname, "verbose");
+ // We need to set the below parameter to test performance level logging
+ hiveConf.set("hive.ql.log.PerfLogger.level", "INFO,DRFA");
+ miniHS2 = new MiniHS2(hiveConf);
+ confOverlay = new HashMap();
+ confOverlay.put(ConfVars.HIVE_SUPPORT_CONCURRENCY.varname, "false");
+ miniHS2.start(confOverlay);
+ }
+
+ @Test
+ public void testFetchResultsOfLog() throws Exception {
+ // verify whether the sql operation log is generated and fetch correctly.
+ OperationHandle operationHandle = client.executeStatement(sessionHandle, sql, null);
+ RowSet rowSetLog = client.fetchResults(operationHandle, FetchOrientation.FETCH_FIRST, 1000,
+ FetchType.LOG);
+ verifyFetchedLog(rowSetLog, expectedLogsVerbose);
+ }
+
+ @Test
+ public void testFetchResultsOfLogAsync() throws Exception {
+ // verify whether the sql operation log is generated and fetch correctly in async mode.
+ OperationHandle operationHandle = client.executeStatementAsync(sessionHandle, sql, null);
+
+ // Poll on the operation status till the query is completed
+ boolean isQueryRunning = true;
+ long pollTimeout = System.currentTimeMillis() + 100000;
+ OperationStatus opStatus;
+ OperationState state = null;
+ RowSet rowSetAccumulated = null;
+ StringBuilder logs = new StringBuilder();
+
+ while (isQueryRunning) {
+ // Break if polling times out
+ if (System.currentTimeMillis() > pollTimeout) {
+ break;
+ }
+ opStatus = client.getOperationStatus(operationHandle);
+ Assert.assertNotNull(opStatus);
+ state = opStatus.getState();
+
+ rowSetAccumulated = client.fetchResults(operationHandle, FetchOrientation.FETCH_NEXT, 2000,
+ FetchType.LOG);
+ for (Object[] row : rowSetAccumulated) {
+ logs.append(row[0]);
+ }
+
+ if (state == OperationState.CANCELED ||
+ state == OperationState.CLOSED ||
+ state == OperationState.FINISHED ||
+ state == OperationState.ERROR) {
+ isQueryRunning = false;
+ }
+ Thread.sleep(10);
+ }
+ // The sql should be completed now.
+ Assert.assertEquals("Query should be finished", OperationState.FINISHED, state);
+
+ // Verify the accumulated logs
+ verifyFetchedLogPost(logs.toString(), expectedLogsVerbose, true);
+
+ // Verify the fetched logs from the beginning of the log file
+ RowSet rowSet = client.fetchResults(operationHandle, FetchOrientation.FETCH_FIRST, 2000,
+ FetchType.LOG);
+ verifyFetchedLog(rowSet, expectedLogsVerbose);
+ }
+
+ @Test
+ public void testFetchResultsOfLogWithOrientation() throws Exception {
+ // (FETCH_FIRST) execute a sql, and fetch its sql operation log as expected value
+ OperationHandle operationHandle = client.executeStatement(sessionHandle, sql, null);
+ RowSet rowSetLog = client.fetchResults(operationHandle, FetchOrientation.FETCH_FIRST, 1000,
+ FetchType.LOG);
+ int expectedLogLength = rowSetLog.numRows();
+
+ // (FETCH_NEXT) execute the same sql again,
+ // and fetch the sql operation log with FETCH_NEXT orientation
+ OperationHandle operationHandleWithOrientation = client.executeStatement(sessionHandle, sql,
+ null);
+ RowSet rowSetLogWithOrientation;
+ int logLength = 0;
+ int maxRows = calculateProperMaxRows(expectedLogLength);
+ do {
+ rowSetLogWithOrientation = client.fetchResults(operationHandleWithOrientation,
+ FetchOrientation.FETCH_NEXT, maxRows, FetchType.LOG);
+ logLength += rowSetLogWithOrientation.numRows();
+ } while (rowSetLogWithOrientation.numRows() == maxRows);
+ Assert.assertEquals(expectedLogLength, logLength);
+
+ // (FETCH_FIRST) fetch again from the same operation handle with FETCH_FIRST orientation
+ rowSetLogWithOrientation = client.fetchResults(operationHandleWithOrientation,
+ FetchOrientation.FETCH_FIRST, 1000, FetchType.LOG);
+ verifyFetchedLog(rowSetLogWithOrientation, expectedLogsVerbose);
+ }
+
+ // Since the log length of the sql operation may vary during HIVE dev, calculate a proper maxRows.
+ private int calculateProperMaxRows(int len) {
+ if (len < 10) {
+ return 1;
+ } else if (len < 100) {
+ return 10;
+ } else {
+ return 100;
+ }
+ }
+}
diff --git a/itests/hive-unit/src/test/java/org/apache/hive/service/cli/operation/TestOperationLoggingAPIWithTez.java b/itests/hive-unit/src/test/java/org/apache/hive/service/cli/operation/TestOperationLoggingAPIWithTez.java
new file mode 100644
index 0000000..65b0895
--- /dev/null
+++ b/itests/hive-unit/src/test/java/org/apache/hive/service/cli/operation/TestOperationLoggingAPIWithTez.java
@@ -0,0 +1,53 @@
+package org.apache.hive.service.cli.operation;
+
+import java.util.HashMap;
+import org.apache.hadoop.hive.conf.HiveConf;
+import org.apache.hadoop.hive.conf.HiveConf.ConfVars;
+import org.apache.hive.jdbc.miniHS2.MiniHS2;
+import org.apache.hive.jdbc.miniHS2.MiniHS2.MiniClusterType;
+import org.junit.BeforeClass;
+
+/**
+ * TestOperationLoggingAPIWithTez
+ * Test the FetchResults of TFetchType.LOG in thrift level in Tez mode.
+ */
+public class TestOperationLoggingAPIWithTez extends TestOperationLoggingAPIBase {
+
+ @BeforeClass
+ public static void setUpBeforeClass() throws Exception {
+ tableName = "testOperationLoggingAPIWithTez_table";
+ expectedLogsVerbose = new String[]{
+ "Parsing command",
+ "Parse Completed",
+ "Starting Semantic Analysis",
+ "Semantic Analysis Completed",
+ "Starting command"
+ };
+ expectedLogsExecution = new String[]{
+ "Executing on YARN cluster with App id",
+ "Setting Tez DAG access"
+ };
+ expectedLogsPerformance = new String[]{
+ "",
+ "",
+ "",
+ "from=org.apache.hadoop.hive.ql.exec.tez.TezJobMonitor",
+ "org.apache.tez.common.counters.DAGCounter",
+ "NUM_SUCCEEDED_TASKS",
+ "TOTAL_LAUNCHED_TASKS",
+ "CPU_TIME_MILLIS"
+ };
+ hiveConf = new HiveConf();
+ hiveConf.set(ConfVars.HIVE_SERVER2_LOGGING_OPERATION_LEVEL.varname, "verbose");
+ // We need to set the below parameter to test performance level logging
+ hiveConf.set("hive.ql.log.PerfLogger.level", "INFO,DRFA");
+ // Change the engine to tez
+ hiveConf.setVar(ConfVars.HIVE_EXECUTION_ENGINE, "tez");
+ // Set tez execution summary to false.
+ hiveConf.setBoolVar(ConfVars.TEZ_EXEC_SUMMARY, false);
+ miniHS2 = new MiniHS2(hiveConf, MiniClusterType.TEZ);
+ confOverlay = new HashMap();
+ confOverlay.put(ConfVars.HIVE_SUPPORT_CONCURRENCY.varname, "false");
+ miniHS2.start(confOverlay);
+ }
+}
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java
index 6956393..970ea38 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java
@@ -3821,4 +3821,16 @@ public static String getQualifiedPath(HiveConf conf, Path path) throws HiveExcep
public static boolean isDefaultNameNode(HiveConf conf) {
return !conf.getChangedProperties().containsKey(HiveConf.ConfVars.HADOOPFS.varname);
}
+
+ /**
+ * Checks if the current HiveServer2 logging operation level is >= PERFORMANCE.
+ * @param conf Hive configuration.
+ * @return true if current HiveServer2 logging operation level is >= PERFORMANCE.
+ * Else, false.
+ */
+ public static boolean isPerfOrAboveLogging(HiveConf conf) {
+ String loggingLevel = conf.getVar(HiveConf.ConfVars.HIVE_SERVER2_LOGGING_OPERATION_LEVEL);
+ return conf.getBoolVar(HiveConf.ConfVars.HIVE_SERVER2_LOGGING_OPERATION_ENABLED) &&
+ (loggingLevel.equalsIgnoreCase("PERFORMANCE") || loggingLevel.equalsIgnoreCase("VERBOSE"));
+ }
}
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/TezJobMonitor.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/TezJobMonitor.java
index f1a7f46..4423cd1 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/TezJobMonitor.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/TezJobMonitor.java
@@ -29,6 +29,7 @@
import org.apache.hadoop.hive.ql.exec.Heartbeater;
import org.apache.hadoop.hive.ql.exec.MapOperator;
import org.apache.hadoop.hive.ql.exec.ReduceSinkOperator;
+import org.apache.hadoop.hive.ql.exec.Utilities;
import org.apache.hadoop.hive.ql.lockmgr.HiveTxnManager;
import org.apache.hadoop.hive.ql.log.PerfLogger;
import org.apache.hadoop.hive.ql.session.SessionState;
@@ -264,7 +265,8 @@ public int monitorExecution(final DAGClient dagClient, HiveTxnManager txnMgr, Hi
Set opts = new HashSet();
Heartbeater heartbeater = new Heartbeater(txnMgr, conf);
long startTime = 0;
- boolean isProfileEnabled = conf.getBoolVar(conf, HiveConf.ConfVars.TEZ_EXEC_SUMMARY);
+ boolean isProfileEnabled = conf.getBoolVar(conf, HiveConf.ConfVars.TEZ_EXEC_SUMMARY) ||
+ Utilities.isPerfOrAboveLogging(conf);
boolean inPlaceUpdates = conf.getBoolVar(conf, HiveConf.ConfVars.TEZ_EXEC_INPLACE_PROGRESS);
boolean wideTerminal = false;
boolean isTerminal = inPlaceUpdates == true ? isUnixTerminal() : false;
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/TezTask.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/TezTask.java
index 6b1f57d..2ea83ab 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/TezTask.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/TezTask.java
@@ -176,7 +176,8 @@ public int execute(DriverContext driverContext) {
TezSessionPoolManager.getInstance().returnSession(session);
if (LOG.isInfoEnabled() && counters != null
- && conf.getBoolVar(conf, HiveConf.ConfVars.TEZ_EXEC_SUMMARY)) {
+ && (conf.getBoolVar(conf, HiveConf.ConfVars.TEZ_EXEC_SUMMARY) ||
+ Utilities.isPerfOrAboveLogging(conf))) {
for (CounterGroup group: counters) {
LOG.info(group.getDisplayName() +":");
for (TezCounter counter: group) {