diff --git a/itests/hive-unit-hadoop2/src/test/java/org/apache/hive/jdbc/TestSchedulerQueue.java b/itests/hive-unit-hadoop2/src/test/java/org/apache/hive/jdbc/TestSchedulerQueue.java index 0c5f8a5..4b1411c 100644 --- a/itests/hive-unit-hadoop2/src/test/java/org/apache/hive/jdbc/TestSchedulerQueue.java +++ b/itests/hive-unit-hadoop2/src/test/java/org/apache/hive/jdbc/TestSchedulerQueue.java @@ -35,6 +35,7 @@ import org.apache.hadoop.yarn.conf.YarnConfiguration; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.FairSchedulerConfiguration; import org.apache.hive.jdbc.miniHS2.MiniHS2; +import org.apache.hive.jdbc.miniHS2.MiniHS2.MiniClusterType; import org.junit.After; import org.junit.Before; import org.junit.BeforeClass; @@ -76,7 +77,7 @@ public static void beforeTest() throws Exception { @Before public void setUp() throws Exception { DriverManager.setLoginTimeout(0); - miniHS2 = new MiniHS2(conf, true); + miniHS2 = new MiniHS2(conf, MiniClusterType.mr); miniHS2.setConfProperty(HiveConf.ConfVars.HIVE_SERVER2_ENABLE_DOAS.varname, "false"); miniHS2.setConfProperty(HiveConf.ConfVars.HIVE_SERVER2_MAP_FAIR_SCHEDULER_QUEUE.varname, "true"); diff --git a/itests/hive-unit/src/main/java/org/apache/hive/jdbc/miniHS2/MiniHS2.java b/itests/hive-unit/src/main/java/org/apache/hive/jdbc/miniHS2/MiniHS2.java index 32b971c..d6c2975 100644 --- a/itests/hive-unit/src/main/java/org/apache/hive/jdbc/miniHS2/MiniHS2.java +++ b/itests/hive-unit/src/main/java/org/apache/hive/jdbc/miniHS2/MiniHS2.java @@ -32,6 +32,7 @@ import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.conf.HiveConf.ConfVars; import org.apache.hadoop.hive.metastore.MetaStoreUtils; +import org.apache.hadoop.hive.ql.WindowsPathUtil; import org.apache.hadoop.hive.ql.exec.Utilities; import org.apache.hadoop.hive.shims.HadoopShims.MiniDFSShim; import org.apache.hadoop.hive.shims.HadoopShims.MiniMrShim; @@ -59,15 +60,23 @@ private MiniMrShim mr; private MiniDFSShim dfs; private FileSystem localFS; - private boolean useMiniMR = false; private boolean useMiniKdc = false; private final String serverPrincipal; - private final String serverKeytab; private final boolean isMetastoreRemote; + private MiniClusterType miniClusterType = MiniClusterType.none; + + public enum MiniClusterType { + mr, + tez, + spark, + encrypted, + miniSparkOnYarn, + none; + } public static class Builder { private HiveConf hiveConf = new HiveConf(); - private boolean useMiniMR = false; + private MiniClusterType miniClusterType = MiniClusterType.none; private boolean useMiniKdc = false; private String serverPrincipal; private String serverKeytab; @@ -78,7 +87,7 @@ public Builder() { } public Builder withMiniMR() { - this.useMiniMR = true; + this.miniClusterType = MiniClusterType.mr; return this; } @@ -110,7 +119,7 @@ public Builder withHTTPTransport(){ public MiniHS2 build() throws Exception { - if (useMiniMR && useMiniKdc) { + if (miniClusterType == MiniClusterType.mr && useMiniKdc) { throw new IOException("Can't create secure miniMr ... yet"); } if (isHTTPTransMode) { @@ -118,7 +127,7 @@ public MiniHS2 build() throws Exception { } else { hiveConf.setVar(ConfVars.HIVE_SERVER2_TRANSPORT_MODE, HS2_BINARY_MODE); } - return new MiniHS2(hiveConf, useMiniMR, useMiniKdc, serverPrincipal, serverKeytab, + return new MiniHS2(hiveConf, miniClusterType, useMiniKdc, serverPrincipal, serverKeytab, isMetastoreRemote); } } @@ -143,34 +152,39 @@ public FileSystem getLocalFS() { return localFS; } - public boolean isUseMiniMR() { - return useMiniMR; + public MiniClusterType getMiniClusterType() { + return miniClusterType; } - public void setUseMiniMR(boolean useMiniMR) { - this.useMiniMR = useMiniMR; + public void setMiniClusterType(MiniClusterType miniClusterType) { + this.miniClusterType = miniClusterType; } public boolean isUseMiniKdc() { return useMiniKdc; } - private MiniHS2(HiveConf hiveConf, boolean useMiniMR, boolean useMiniKdc, + private MiniHS2(HiveConf hiveConf, MiniClusterType miniClusterType, boolean useMiniKdc, String serverPrincipal, String serverKeytab, boolean isMetastoreRemote) throws Exception { super(hiveConf, "localhost", MetaStoreUtils.findFreePort(), MetaStoreUtils.findFreePort()); - this.useMiniMR = useMiniMR; + this.miniClusterType = miniClusterType; this.useMiniKdc = useMiniKdc; this.serverPrincipal = serverPrincipal; - this.serverKeytab = serverKeytab; this.isMetastoreRemote = isMetastoreRemote; baseDir = Files.createTempDir(); localFS = FileSystem.getLocal(hiveConf); FileSystem fs; - if (useMiniMR) { + if (miniClusterType != MiniClusterType.none) { dfs = ShimLoader.getHadoopShims().getMiniDfs(hiveConf, 4, true, null); fs = dfs.getFileSystem(); - mr = ShimLoader.getHadoopShims().getMiniMrCluster(hiveConf, 4, - fs.getUri().toString(), 1); + String uriString = WindowsPathUtil.getHdfsUriString(fs.getUri().toString()); + if (miniClusterType == MiniClusterType.tez) { + mr = ShimLoader.getHadoopShims().getMiniTezCluster(hiveConf, 4, uriString, 1); + } else if (miniClusterType == MiniClusterType.miniSparkOnYarn) { + mr = ShimLoader.getHadoopShims().getMiniSparkCluster(hiveConf, 4, uriString, 1); + } else { + mr = ShimLoader.getHadoopShims().getMiniMrCluster(hiveConf, 4, uriString, 1); + } // store the config in system properties mr.setupConfiguration(getHiveConf()); baseDfsDir = new Path(new Path(fs.getUri()), "/base"); @@ -213,11 +227,11 @@ private MiniHS2(HiveConf hiveConf, boolean useMiniMR, boolean useMiniKdc, } public MiniHS2(HiveConf hiveConf) throws Exception { - this(hiveConf, false); + this(hiveConf, MiniClusterType.none); } - public MiniHS2(HiveConf hiveConf, boolean useMiniMR) throws Exception { - this(hiveConf, useMiniMR, false, null, null, false); + public MiniHS2(HiveConf hiveConf, MiniClusterType clusterType) throws Exception { + this(hiveConf, clusterType, false, null, null, false); } public void start(Map confOverlay) throws Exception { diff --git a/itests/hive-unit/src/test/java/org/apache/hive/jdbc/TestJdbcWithLocalClusterSpark.java b/itests/hive-unit/src/test/java/org/apache/hive/jdbc/TestJdbcWithLocalClusterSpark.java index ef64483..8763e28 100644 --- a/itests/hive-unit/src/test/java/org/apache/hive/jdbc/TestJdbcWithLocalClusterSpark.java +++ b/itests/hive-unit/src/test/java/org/apache/hive/jdbc/TestJdbcWithLocalClusterSpark.java @@ -35,6 +35,7 @@ import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.conf.HiveConf.ConfVars; import org.apache.hive.jdbc.miniHS2.MiniHS2; +import org.apache.hive.jdbc.miniHS2.MiniHS2.MiniClusterType; import org.apache.hive.service.cli.HiveSQLException; import org.apache.hive.service.cli.session.HiveSessionHook; import org.apache.hive.service.cli.session.HiveSessionHookContext; @@ -82,7 +83,7 @@ public static void beforeTest() throws Exception { dataFilePath = new Path(dataFileDir, "kv1.txt"); DriverManager.setLoginTimeout(0); conf.setBoolVar(ConfVars.HIVE_SUPPORT_CONCURRENCY, false); - miniHS2 = new MiniHS2(conf, true); + miniHS2 = new MiniHS2(conf, MiniClusterType.mr); Map overlayProps = new HashMap(); overlayProps.put(ConfVars.HIVE_SERVER2_SESSION_HOOK.varname, LocalClusterSparkSessionHook.class.getName()); diff --git a/itests/hive-unit/src/test/java/org/apache/hive/jdbc/TestJdbcWithMiniMr.java b/itests/hive-unit/src/test/java/org/apache/hive/jdbc/TestJdbcWithMiniMr.java index e459c4e..ea4a385 100644 --- a/itests/hive-unit/src/test/java/org/apache/hive/jdbc/TestJdbcWithMiniMr.java +++ b/itests/hive-unit/src/test/java/org/apache/hive/jdbc/TestJdbcWithMiniMr.java @@ -37,6 +37,7 @@ import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.conf.HiveConf.ConfVars; import org.apache.hive.jdbc.miniHS2.MiniHS2; +import org.apache.hive.jdbc.miniHS2.MiniHS2.MiniClusterType; import org.apache.hive.service.cli.HiveSQLException; import org.apache.hive.service.cli.session.HiveSessionHook; import org.apache.hive.service.cli.session.HiveSessionHookContext; @@ -73,7 +74,7 @@ public static void beforeTest() throws Exception { dataFilePath = new Path(dataFileDir, "kv1.txt"); DriverManager.setLoginTimeout(0); conf.setBoolVar(HiveConf.ConfVars.HIVE_SUPPORT_CONCURRENCY, false); - miniHS2 = new MiniHS2(conf, true); + miniHS2 = new MiniHS2(conf, MiniClusterType.mr); Map overlayProps = new HashMap(); overlayProps.put(ConfVars.HIVE_SERVER2_SESSION_HOOK.varname, MiniMrTestSessionHook.class.getName()); diff --git a/itests/hive-unit/src/test/java/org/apache/hive/jdbc/TestMultiSessionsHS2WithLocalClusterSpark.java b/itests/hive-unit/src/test/java/org/apache/hive/jdbc/TestMultiSessionsHS2WithLocalClusterSpark.java index e7383c9..6770fe4 100644 --- a/itests/hive-unit/src/test/java/org/apache/hive/jdbc/TestMultiSessionsHS2WithLocalClusterSpark.java +++ b/itests/hive-unit/src/test/java/org/apache/hive/jdbc/TestMultiSessionsHS2WithLocalClusterSpark.java @@ -34,10 +34,12 @@ import java.util.concurrent.Future; import com.google.common.util.concurrent.ThreadFactoryBuilder; + import org.apache.hadoop.fs.Path; import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.conf.HiveConf.ConfVars; import org.apache.hive.jdbc.miniHS2.MiniHS2; +import org.apache.hive.jdbc.miniHS2.MiniHS2.MiniClusterType; import org.apache.hive.service.cli.HiveSQLException; import org.apache.hive.service.cli.session.HiveSessionHook; import org.apache.hive.service.cli.session.HiveSessionHookContext; @@ -91,7 +93,7 @@ public static void beforeTest() throws Exception { dataFilePath = new Path(dataFileDir, "kv1.txt"); DriverManager.setLoginTimeout(0); conf.setBoolVar(ConfVars.HIVE_SUPPORT_CONCURRENCY, false); - miniHS2 = new MiniHS2(conf, true); + miniHS2 = new MiniHS2(conf, MiniClusterType.mr); Map overlayProps = new HashMap(); overlayProps.put(ConfVars.HIVE_SERVER2_SESSION_HOOK.varname, LocalClusterSparkSessionHook.class.getName()); diff --git a/itests/hive-unit/src/test/java/org/apache/hive/service/cli/operation/TestOperationLoggingAPI.java b/itests/hive-unit/src/test/java/org/apache/hive/service/cli/operation/TestOperationLoggingAPI.java index 21487b0..86f1dc0 100644 --- a/itests/hive-unit/src/test/java/org/apache/hive/service/cli/operation/TestOperationLoggingAPI.java +++ b/itests/hive-unit/src/test/java/org/apache/hive/service/cli/operation/TestOperationLoggingAPI.java @@ -24,6 +24,7 @@ import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.conf.HiveConf.ConfVars; import org.apache.hive.jdbc.miniHS2.MiniHS2; +import org.apache.hive.jdbc.miniHS2.MiniHS2.MiniClusterType; import org.apache.hive.service.cli.CLIServiceClient; import org.apache.hive.service.cli.FetchOrientation; import org.apache.hive.service.cli.FetchType; @@ -80,7 +81,7 @@ public static void setUpBeforeClass() throws Exception { hiveConf.set(ConfVars.HIVE_SERVER2_LOGGING_OPERATION_LEVEL.varname, "verbose"); // We need to set the below parameter to test performance level logging hiveConf.set("hive.ql.log.PerfLogger.level", "INFO,DRFA"); - miniHS2 = new MiniHS2(hiveConf); + miniHS2 = new MiniHS2(hiveConf, MiniClusterType.mr); confOverlay = new HashMap(); confOverlay.put(ConfVars.HIVE_SUPPORT_CONCURRENCY.varname, "false"); miniHS2.start(confOverlay); diff --git a/itests/hive-unit/src/test/java/org/apache/hive/service/cli/operation/TestOperationLoggingAPIWithTez.java b/itests/hive-unit/src/test/java/org/apache/hive/service/cli/operation/TestOperationLoggingAPIWithTez.java new file mode 100644 index 0000000..8989c2a --- /dev/null +++ b/itests/hive-unit/src/test/java/org/apache/hive/service/cli/operation/TestOperationLoggingAPIWithTez.java @@ -0,0 +1,221 @@ +package org.apache.hive.service.cli.operation; + +import java.io.File; +import java.util.HashMap; +import java.util.Map; + +import org.apache.hadoop.hive.conf.HiveConf; +import org.apache.hadoop.hive.conf.HiveConf.ConfVars; +import org.apache.hive.jdbc.miniHS2.MiniHS2; +import org.apache.hive.jdbc.miniHS2.MiniHS2.MiniClusterType; +import org.apache.hive.service.cli.CLIServiceClient; +import org.apache.hive.service.cli.FetchOrientation; +import org.apache.hive.service.cli.FetchType; +import org.apache.hive.service.cli.OperationHandle; +import org.apache.hive.service.cli.RowSet; +import org.apache.hive.service.cli.SessionHandle; +import org.junit.After; +import org.junit.AfterClass; +import org.junit.Assert; +import org.junit.Before; +import org.junit.BeforeClass; +import org.junit.Test; + +public class TestOperationLoggingAPIWithTez { +private static HiveConf hiveConf; +private final String tableName = "testOperationLoggingAPIWithTez_table"; +private File dataFile; +private CLIServiceClient client; +private static MiniHS2 miniHS2 = null; +private static Map confOverlay; +private SessionHandle sessionHandle; +private final String sql = "select * from " + tableName; +private final String sqlCntStar = "select count(*) from " + tableName; +private final String[] expectedLogs = { + "Parsing command", + "Parse Completed", + "Starting Semantic Analysis", + "Semantic Analysis Completed", + "Starting command" +}; +private final String[] expectedLogsExecution = { +}; +private final String[] expectedLogsPerformance = { + "", + "", + "", +}; + +@BeforeClass +public static void setUpBeforeClass() throws Exception { + hiveConf = new HiveConf(); + hiveConf.set(ConfVars.HIVE_SERVER2_LOGGING_OPERATION_LEVEL.varname, "verbose"); + // We need to set the below parameter to test performance level logging + hiveConf.set("hive.ql.log.PerfLogger.level", "INFO,DRFA"); + // Set tez execution summary to false. + hiveConf.setBoolVar(ConfVars.TEZ_EXEC_SUMMARY, false); + miniHS2 = new MiniHS2(hiveConf, MiniClusterType.tez); + confOverlay = new HashMap(); + confOverlay.put(ConfVars.HIVE_SUPPORT_CONCURRENCY.varname, "false"); + miniHS2.start(confOverlay); +} + +/** + * Open a session, and create a table for cases usage + * @throws Exception + */ +@Before +public void setUp() throws Exception { + dataFile = new File(hiveConf.get("test.data.files"), "kv1.txt"); + client = miniHS2.getServiceClient(); + sessionHandle = setupSession(); +} + +@After +public void tearDown() throws Exception { + // Cleanup + String queryString = "DROP TABLE " + tableName; + client.executeStatement(sessionHandle, queryString, null); + + client.closeSession(sessionHandle); +} + +@AfterClass +public static void tearDownAfterClass() throws Exception { + miniHS2.stop(); +} + +@Test +public void testFetchResultsOfLogWithVerboseMode() throws Exception { + String queryString = "set hive.server2.logging.operation.level=verbose"; + client.executeStatement(sessionHandle, queryString, null); + // verify whether the sql operation log is generated and fetch correctly. + OperationHandle operationHandle = client.executeStatement(sessionHandle, sqlCntStar, null); + RowSet rowSetLog = client.fetchResults(operationHandle, FetchOrientation.FETCH_FIRST, 1000, + FetchType.LOG); + // Verbose Logs should contain everything, including execution and performance + verifyFetchedLog(rowSetLog, expectedLogs); + verifyFetchedLog(rowSetLog, expectedLogsExecution); + verifyFetchedLog(rowSetLog, expectedLogsPerformance); +} + +@Test +public void testFetchResultsOfLogWithPerformanceMode() throws Exception { + try { + String queryString = "set hive.server2.logging.operation.level=performance"; + client.executeStatement(sessionHandle, queryString, null); + // verify whether the sql operation log is generated and fetch correctly. + OperationHandle operationHandle = client.executeStatement(sessionHandle, sqlCntStar, null); + RowSet rowSetLog = client.fetchResults(operationHandle, FetchOrientation.FETCH_FIRST, 1000, + FetchType.LOG); + // rowSetLog should contain execution as well as performance logs + verifyFetchedLog(rowSetLog, expectedLogsExecution); + verifyFetchedLog(rowSetLog, expectedLogsPerformance); + verifyMissingContentsInFetchedLog(rowSetLog, expectedLogs); + } finally { + // Restore everything to default setup to avoid discrepancy between junit test runs + String queryString2 = "set hive.server2.logging.operation.level=verbose"; + client.executeStatement(sessionHandle, queryString2, null); + } +} + +@Test +public void testFetchResultsOfLogWithExecutionMode() throws Exception { + try { + String queryString = "set hive.server2.logging.operation.level=execution"; + client.executeStatement(sessionHandle, queryString, null); + // verify whether the sql operation log is generated and fetch correctly. + OperationHandle operationHandle = client.executeStatement(sessionHandle, sqlCntStar, null); + RowSet rowSetLog = client.fetchResults(operationHandle, FetchOrientation.FETCH_FIRST, 1000, + FetchType.LOG); + verifyFetchedLog(rowSetLog, expectedLogsExecution); + verifyMissingContentsInFetchedLog(rowSetLog, expectedLogsPerformance); + verifyMissingContentsInFetchedLog(rowSetLog, expectedLogs); + } finally { + // Restore everything to default setup to avoid discrepancy between junit test runs + String queryString2 = "set hive.server2.logging.operation.level=verbose"; + client.executeStatement(sessionHandle, queryString2, null); + } +} + +@Test +public void testFetchResultsOfLogWithNoneMode() throws Exception { + try { + String queryString = "set hive.server2.logging.operation.level=none"; + client.executeStatement(sessionHandle, queryString, null); + // verify whether the sql operation log is generated and fetch correctly. + OperationHandle operationHandle = client.executeStatement(sessionHandle, sqlCntStar, null); + RowSet rowSetLog = client.fetchResults(operationHandle, FetchOrientation.FETCH_FIRST, 1000, + FetchType.LOG); + // We should not get any rows. + assert(rowSetLog.numRows() == 0); + } finally { + // Restore everything to default setup to avoid discrepancy between junit test runs + String queryString2 = "set hive.server2.logging.operation.level=verbose"; + client.executeStatement(sessionHandle, queryString2, null); + } +} + +private SessionHandle setupSession() throws Exception { + // Open a session + SessionHandle sessionHandle = client.openSession(null, null, null); + + // Change lock manager to embedded mode + String queryString = "SET hive.lock.manager=" + + "org.apache.hadoop.hive.ql.lockmgr.EmbeddedLockManager"; + client.executeStatement(sessionHandle, queryString, null); + + // Drop the table if it exists + queryString = "DROP TABLE IF EXISTS " + tableName; + client.executeStatement(sessionHandle, queryString, null); + + // Create a test table + queryString = "create table " + tableName + " (key int, value string)"; + client.executeStatement(sessionHandle, queryString, null); + + // Load data + queryString = "load data local inpath '" + dataFile + "' into table " + tableName; + client.executeStatement(sessionHandle, queryString, null); + + // Precondition check: verify whether the table is created and data is fetched correctly. + OperationHandle operationHandle = client.executeStatement(sessionHandle, sql, null); + RowSet rowSetResult = client.fetchResults(operationHandle); + Assert.assertEquals(500, rowSetResult.numRows()); + Assert.assertEquals(238, rowSetResult.iterator().next()[0]); + Assert.assertEquals("val_238", rowSetResult.iterator().next()[1]); + + return sessionHandle; +} + +private String verifyFetchedLogPre(RowSet rowSet, String[] el) { + StringBuilder stringBuilder = new StringBuilder(); + + for (Object[] row : rowSet) { + stringBuilder.append(row[0]); + } + + return stringBuilder.toString(); +} + +private void verifyFetchedLog(RowSet rowSet, String[] el) { + String logs = verifyFetchedLogPre(rowSet, el); + verifyFetchedLogPost(logs, el, true); +} + +private void verifyMissingContentsInFetchedLog(RowSet rowSet, String[] el) { + String logs = verifyFetchedLogPre(rowSet, el); + verifyFetchedLogPost(logs, el, false); +} + +private void verifyFetchedLogPost(String logs, String[] el, boolean contains) { + System.out.println("LOGS are : " + logs); + for (String log : el) { + if (contains) { + Assert.assertTrue("Checking for presence of " + log, logs.contains(log)); + } else { + Assert.assertFalse("Checking for absence of " + log, logs.contains(log)); + } + } +} + +} diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java index 6956393..970ea38 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java @@ -3821,4 +3821,16 @@ public static String getQualifiedPath(HiveConf conf, Path path) throws HiveExcep public static boolean isDefaultNameNode(HiveConf conf) { return !conf.getChangedProperties().containsKey(HiveConf.ConfVars.HADOOPFS.varname); } + + /** + * Checks if the current HiveServer2 logging operation level is >= PERFORMANCE. + * @param conf Hive configuration. + * @return true if current HiveServer2 logging operation level is >= PERFORMANCE. + * Else, false. + */ + public static boolean isPerfOrAboveLogging(HiveConf conf) { + String loggingLevel = conf.getVar(HiveConf.ConfVars.HIVE_SERVER2_LOGGING_OPERATION_LEVEL); + return conf.getBoolVar(HiveConf.ConfVars.HIVE_SERVER2_LOGGING_OPERATION_ENABLED) && + (loggingLevel.equalsIgnoreCase("PERFORMANCE") || loggingLevel.equalsIgnoreCase("VERBOSE")); + } } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/TezJobMonitor.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/TezJobMonitor.java index f1a7f46..4423cd1 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/TezJobMonitor.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/TezJobMonitor.java @@ -29,6 +29,7 @@ import org.apache.hadoop.hive.ql.exec.Heartbeater; import org.apache.hadoop.hive.ql.exec.MapOperator; import org.apache.hadoop.hive.ql.exec.ReduceSinkOperator; +import org.apache.hadoop.hive.ql.exec.Utilities; import org.apache.hadoop.hive.ql.lockmgr.HiveTxnManager; import org.apache.hadoop.hive.ql.log.PerfLogger; import org.apache.hadoop.hive.ql.session.SessionState; @@ -264,7 +265,8 @@ public int monitorExecution(final DAGClient dagClient, HiveTxnManager txnMgr, Hi Set opts = new HashSet(); Heartbeater heartbeater = new Heartbeater(txnMgr, conf); long startTime = 0; - boolean isProfileEnabled = conf.getBoolVar(conf, HiveConf.ConfVars.TEZ_EXEC_SUMMARY); + boolean isProfileEnabled = conf.getBoolVar(conf, HiveConf.ConfVars.TEZ_EXEC_SUMMARY) || + Utilities.isPerfOrAboveLogging(conf); boolean inPlaceUpdates = conf.getBoolVar(conf, HiveConf.ConfVars.TEZ_EXEC_INPLACE_PROGRESS); boolean wideTerminal = false; boolean isTerminal = inPlaceUpdates == true ? isUnixTerminal() : false; diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/TezTask.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/TezTask.java index 2356345..7673e0d 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/TezTask.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/TezTask.java @@ -175,7 +175,8 @@ public int execute(DriverContext driverContext) { TezSessionPoolManager.getInstance().returnSession(session); if (LOG.isInfoEnabled() && counters != null - && conf.getBoolVar(conf, HiveConf.ConfVars.TEZ_EXEC_SUMMARY)) { + && (conf.getBoolVar(conf, HiveConf.ConfVars.TEZ_EXEC_SUMMARY) || + Utilities.isPerfOrAboveLogging(conf))) { for (CounterGroup group: counters) { LOG.info(group.getDisplayName() +":"); for (TezCounter counter: group) {