commit d08e3d79927821050487c824c6023af7f0987b01 Author: Daniel Dai Date: Thu Sep 13 12:11:10 2018 -0700 kill api diff --git a/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java b/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java index aa58d74..ec53360 100644 --- a/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java +++ b/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java @@ -1517,6 +1517,8 @@ private static void populateLlapDaemonVarsSet(Set llapDaemonVarsSetLocal HIVEQUERYID("hive.query.id", "", "ID for query being executed (might be multiple per a session)"), + HIVEQUERYTAG("hive.query.tag", null, "tag for the query"), + HIVESPARKJOBNAMELENGTH("hive.spark.jobname.length", 100000, "max jobname length for Hive on " + "Spark queries"), HIVEJOBNAMELENGTH("hive.jobname.length", 50, "max jobname length"), @@ -5421,6 +5423,7 @@ public ZoneId getLocalTimeZone() { ConfVars.SHOW_JOB_FAIL_DEBUG_INFO.varname, ConfVars.TASKLOG_DEBUG_TIMEOUT.varname, ConfVars.HIVEQUERYID.varname, + ConfVars.HIVEQUERYTAG.varname, }; /** diff --git a/itests/hive-unit/src/test/java/org/apache/hive/jdbc/TestJdbcWithMiniLlapArrow.java b/itests/hive-unit/src/test/java/org/apache/hive/jdbc/TestJdbcWithMiniLlapArrow.java index 4942ed9..a9bc741 100644 --- a/itests/hive-unit/src/test/java/org/apache/hive/jdbc/TestJdbcWithMiniLlapArrow.java +++ b/itests/hive-unit/src/test/java/org/apache/hive/jdbc/TestJdbcWithMiniLlapArrow.java @@ -41,8 +41,8 @@ import org.apache.hive.jdbc.miniHS2.MiniHS2; import static org.junit.Assert.assertNotNull; import static org.junit.Assert.assertNull; +import static org.junit.Assert.assertTrue; import org.apache.hadoop.fs.Path; -import org.apache.hadoop.fs.permission.FsPermission; /** * TestJdbcWithMiniLlap for Arrow format @@ -52,6 +52,7 @@ private static final String tableName = "testJdbcMinihs2Tbl"; private static String dataFileDir; private static final String testDbName = "testJdbcMinihs2"; + private static final String tag = "mytag"; private static class ExceptionHolder { Throwable throwable; @@ -61,6 +62,12 @@ public static void beforeTest() throws Exception { HiveConf conf = defaultConf(); conf.setBoolVar(ConfVars.LLAP_OUTPUT_FORMAT_ARROW, true); + conf.setVar(ConfVars.HIVE_AUTHENTICATOR_MANAGER, "org.apache.hadoop.hive.ql.security" + + ".SessionStateUserAuthenticator"); + conf.setVar(ConfVars.USERS_IN_ADMIN_ROLE, System.getProperty("user.name")); + conf.setBoolVar(ConfVars.HIVE_AUTHORIZATION_ENABLED, true); + conf.setVar(ConfVars.HIVE_AUTHORIZATION_SQL_STD_AUTH_CONFIG_WHITELIST_APPEND, ConfVars.HIVE_SUPPORT_CONCURRENCY + .varname + "|" + ConfVars.HIVE_SERVER2_ENABLE_DOAS.varname); MiniHS2.cleanupLocalDir(); miniHS2 = BaseJdbcWithMiniLlap.beforeTest(conf); dataFileDir = conf.get("test.data.files").replace('\\', '/').replace("c:", ""); @@ -68,8 +75,19 @@ public static void beforeTest() throws Exception { Connection conDefault = BaseJdbcWithMiniLlap.getConnection(miniHS2.getJdbcURL(), System.getProperty("user.name"), "bar"); Statement stmt = conDefault.createStatement(); + String tblName = testDbName + "." + tableName; + Path dataFilePath = new Path(dataFileDir, "kv1.txt"); + String udfName = SleepMsUDF.class.getName(); stmt.execute("drop database if exists " + testDbName + " cascade"); stmt.execute("create database " + testDbName); + stmt.execute("set role admin"); + stmt.execute("dfs -put " + dataFilePath.toString() + " " + "kv1.txt"); + stmt.execute("use " + testDbName); + stmt.execute("create table " + tblName + " (int_col int, value string) "); + stmt.execute("load data inpath 'kv1.txt' into table " + tblName); + stmt.execute("create function sleepMsUDF as '" + udfName + "'"); + stmt.execute("grant select on table " + tblName + " to role public"); + stmt.close(); conDefault.close(); } @@ -286,29 +304,16 @@ public Integer evaluate(int value, int ms) { * that runs for a sufficiently long time. * @throws Exception */ - @Test - public void testKillQuery() throws Exception { - Connection con = BaseJdbcWithMiniLlap.getConnection(miniHS2.getJdbcURL(testDbName), - System.getProperty("user.name"), "bar"); + private void testKillQueryInternal(String user, String killUser, boolean useTag, final + ExceptionHolder stmtHolder, + final ExceptionHolder killHolder) throws Exception { + Connection con1 = BaseJdbcWithMiniLlap.getConnection(miniHS2.getJdbcURL(testDbName), + user, "bar"); Connection con2 = BaseJdbcWithMiniLlap.getConnection(miniHS2.getJdbcURL(testDbName), - System.getProperty("user.name"), "bar"); + killUser, "bar"); - String udfName = SleepMsUDF.class.getName(); - Statement stmt1 = con.createStatement(); final Statement stmt2 = con2.createStatement(); - Path dataFilePath = new Path(dataFileDir, "kv1.txt"); - - String tblName = testDbName + "." + tableName; - - stmt1.execute("create temporary function sleepMsUDF as '" + udfName + "'"); - stmt1.execute("create table " + tblName + " (int_col int, value string) "); - stmt1.execute("load data local inpath '" + dataFilePath.toString() + "' into table " + tblName); - - - stmt1.close(); - final Statement stmt = con.createStatement(); - final ExceptionHolder tExecuteHolder = new ExceptionHolder(); - final ExceptionHolder tKillHolder = new ExceptionHolder(); + final Statement stmt = con1.createStatement(); // Thread executing the query Thread tExecute = new Thread(new Runnable() { @@ -317,12 +322,14 @@ public void run() { try { System.out.println("Executing query: "); stmt.execute("set hive.llap.execution.mode = none"); - + if (useTag) { + stmt.execute("set hive.query.tag = " + tag); + } // The test table has 500 rows, so total query time should be ~ 500*500ms stmt.executeQuery("select sleepMsUDF(t1.int_col, 100), t1.int_col, t2.int_col " + "from " + tableName + " t1 join " + tableName + " t2 on t1.int_col = t2.int_col"); } catch (SQLException e) { - tExecuteHolder.throwable = e; + stmtHolder.throwable = e; } } }); @@ -332,12 +339,20 @@ public void run() { public void run() { try { Thread.sleep(5000); - String queryId = ((HiveStatement) stmt).getQueryId(); + String queryId; + if (useTag) { + queryId = tag; + } else { + queryId = ((HiveStatement) stmt).getQueryId(); + } System.out.println("Killing query: " + queryId); + if (killUser.equals(System.getProperty("user.name"))) { + stmt2.execute("set role admin"); + } stmt2.execute("kill query '" + queryId + "'"); stmt2.close(); } catch (Exception e) { - tKillHolder.throwable = e; + killHolder.throwable = e; } } }); @@ -347,12 +362,51 @@ public void run() { tExecute.join(); tKill.join(); stmt.close(); + con1.close(); con2.close(); - con.close(); + } + + @Test + @Override + public void testKillQuery() throws Exception { + testKillQueryById(); + testKillQueryByTagNegative(); + testKillQueryByTagAdmin(); + testKillQueryByTagOwner(); + } + public void testKillQueryById() throws Exception { + ExceptionHolder tExecuteHolder = new ExceptionHolder(); + ExceptionHolder tKillHolder = new ExceptionHolder(); + testKillQueryInternal(System.getProperty("user.name"), System.getProperty("user.name"), false, + tExecuteHolder, tKillHolder); assertNotNull("tExecute", tExecuteHolder.throwable); assertNull("tCancel", tKillHolder.throwable); } + public void testKillQueryByTagNegative() throws Exception { + ExceptionHolder tExecuteHolder = new ExceptionHolder(); + ExceptionHolder tKillHolder = new ExceptionHolder(); + testKillQueryInternal("user1", "user2", true, tExecuteHolder, tKillHolder); + assertNull("tExecute", tExecuteHolder.throwable); + assertNotNull("tCancel", tKillHolder.throwable); + assertTrue(tKillHolder.throwable.getMessage(), tKillHolder.throwable.getMessage().contains("No privilege")); + } + + public void testKillQueryByTagAdmin() throws Exception { + ExceptionHolder tExecuteHolder = new ExceptionHolder(); + ExceptionHolder tKillHolder = new ExceptionHolder(); + testKillQueryInternal("user1", System.getProperty("user.name"), true, tExecuteHolder, tKillHolder); + assertNotNull("tExecute", tExecuteHolder.throwable); + assertNull("tCancel", tKillHolder.throwable); + } + + public void testKillQueryByTagOwner() throws Exception { + ExceptionHolder tExecuteHolder = new ExceptionHolder(); + ExceptionHolder tKillHolder = new ExceptionHolder(); + testKillQueryInternal("user1", "user1", true, tExecuteHolder, tKillHolder); + assertNotNull("tExecute", tExecuteHolder.throwable); + assertNull("tCancel", tKillHolder.throwable); + } } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/Driver.java b/ql/src/java/org/apache/hadoop/hive/ql/Driver.java index dad2035..d426e6d 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/Driver.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/Driver.java @@ -707,7 +707,12 @@ public void run() { try { perfLogger.PerfLogBegin(CLASS_NAME, PerfLogger.DO_AUTHORIZATION); - doAuthorization(queryState.getHiveOperation(), sem, command); + // Authorization check for kill query will be in KillQueryImpl + // As both admin or operation owner can perform the operation. + // Which is not directly supported in authorizer + if (queryState.getHiveOperation() != HiveOperation.KILL_QUERY) { + doAuthorization(queryState.getHiveOperation(), sem, command); + } } catch (AuthorizationException authExp) { console.printError("Authorization failed:" + authExp.getMessage() + ". Use SHOW GRANT to get more details."); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/QueryState.java b/ql/src/java/org/apache/hadoop/hive/ql/QueryState.java index 028dd60..40f9e9c 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/QueryState.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/QueryState.java @@ -23,7 +23,9 @@ import org.apache.hadoop.hive.ql.lockmgr.HiveTxnManager; import org.apache.hadoop.hive.ql.plan.HiveOperation; import org.apache.hadoop.hive.ql.session.LineageState; +import org.apache.hadoop.hive.ql.session.SessionState; import org.apache.hadoop.mapreduce.MRJobConfig; +import org.apache.tez.dag.api.TezConfiguration; /** * The class to store query level info such as queryId. Multiple queries can run @@ -55,10 +57,6 @@ */ private long numModifiedRows = 0; - // Holds the tag supplied by user to uniquely identify the query. Can be used to kill the query if the query - // id cannot be queried for some reason like hive server restart. - private String queryTag = null; - /** * Private constructor, use QueryState.Builder instead. * @param conf The query specific configuration object @@ -120,21 +118,25 @@ public void setNumModifiedRows(long numModifiedRows) { } public String getQueryTag() { - return queryTag; + return HiveConf.getVar(this.queryConf, HiveConf.ConfVars.HIVEQUERYTAG); } public void setQueryTag(String queryTag) { - this.queryTag = queryTag; + HiveConf.setVar(this.queryConf, HiveConf.ConfVars.HIVEQUERYTAG, queryTag); } - public static void setMapReduceJobTag(HiveConf queryConf, String queryTag) { - String jobTag = queryConf.get(MRJobConfig.JOB_TAGS); - if (jobTag == null) { + public static void setApplicationTag(HiveConf queryConf, String queryTag) { + String jobTag = HiveConf.getVar(queryConf, HiveConf.ConfVars.HIVEQUERYTAG); + if (jobTag == null || jobTag.isEmpty()) { jobTag = queryTag; } else { jobTag = jobTag.concat("," + queryTag); } + if (SessionState.get() != null) { + jobTag = jobTag.concat("," + "userid=" + SessionState.get().getUserName()); + } queryConf.set(MRJobConfig.JOB_TAGS, jobTag); + queryConf.set(TezConfiguration.TEZ_APPLICATION_TAGS, jobTag); } /** @@ -246,7 +248,7 @@ public QueryState build() { if (generateNewQueryId) { String queryId = QueryPlan.makeQueryId(); queryConf.setVar(HiveConf.ConfVars.HIVEQUERYID, queryId); - setMapReduceJobTag(queryConf, queryId); + setApplicationTag(queryConf, queryId); // FIXME: druid storage handler relies on query.id to maintain some staging directories // expose queryid to session level diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/ReplicationSemanticAnalyzer.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/ReplicationSemanticAnalyzer.java index e4186c4..448c5f8 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/ReplicationSemanticAnalyzer.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/ReplicationSemanticAnalyzer.java @@ -358,7 +358,7 @@ private void setConfigs(ASTNode node) throws SemanticException { if (key.equalsIgnoreCase(HIVEQUERYID.varname)) { String queryTag = config.getValue(); if (!StringUtils.isEmpty(queryTag)) { - QueryState.setMapReduceJobTag(conf, queryTag); + QueryState.setApplicationTag(conf, queryTag); } queryState.setQueryTag(queryTag); } else { diff --git a/ql/src/test/queries/clientnegative/authorization_kill_query.q b/ql/src/test/queries/clientnegative/authorization_kill_query.q deleted file mode 100644 index 5379f87..0000000 --- a/ql/src/test/queries/clientnegative/authorization_kill_query.q +++ /dev/null @@ -1,15 +0,0 @@ -set hive.security.authorization.enabled=true; -set hive.test.authz.sstd.hs2.mode=true; -set hive.security.authorization.manager=org.apache.hadoop.hive.ql.security.authorization.plugin.sqlstd.SQLStdHiveAuthorizerFactoryForTest; -set hive.security.authenticator.manager=org.apache.hadoop.hive.ql.security.SessionStateConfigUserAuthenticator; - -set user.name=hive_admin_user; -set role ADMIN; -explain authorization kill query 'dummyqueryid'; -kill query 'dummyqueryid'; - -set user.name=ruser1; - --- kill query as non-admin should fail -explain authorization kill query 'dummyqueryid'; -kill query 'dummyqueryid'; diff --git a/service/src/java/org/apache/hive/service/cli/operation/OperationManager.java b/service/src/java/org/apache/hive/service/cli/operation/OperationManager.java index 8db6a29..61d5e88 100644 --- a/service/src/java/org/apache/hive/service/cli/operation/OperationManager.java +++ b/service/src/java/org/apache/hive/service/cli/operation/OperationManager.java @@ -22,12 +22,18 @@ import java.util.ArrayList; import java.util.Collection; import java.util.Collections; +import java.util.HashSet; import java.util.LinkedHashMap; import java.util.LinkedList; import java.util.List; import java.util.Map; +import java.util.Set; import java.util.concurrent.ConcurrentHashMap; +import com.google.common.collect.Multimap; +import com.google.common.collect.MultimapBuilder; +import com.google.common.collect.Multimaps; +import com.google.common.collect.SetMultimap; import org.apache.hadoop.hive.common.metrics.common.Metrics; import org.apache.hadoop.hive.common.metrics.common.MetricsConstant; import org.apache.hadoop.hive.common.metrics.common.MetricsFactory; @@ -62,7 +68,8 @@ new ConcurrentHashMap(); private final ConcurrentHashMap queryIdOperation = new ConcurrentHashMap(); - private final ConcurrentHashMap queryTagToIdMap = new ConcurrentHashMap<>(); + private final SetMultimap queryTagToIdMap = + Multimaps.synchronizedSetMultimap(MultimapBuilder.hashKeys().hashSetValues().build()); //Following fields for displaying queries on WebUI private Object webuiLock = new Object(); @@ -205,12 +212,7 @@ private void addOperation(Operation operation) { public void updateQueryTag(String queryId, String queryTag) { Operation operation = queryIdOperation.get(queryId); if (operation != null) { - String queryIdTemp = queryTagToIdMap.get(queryTag); - if (queryIdTemp != null) { - throw new RuntimeException("tag " + queryTag + " is already applied for query " + queryIdTemp); - } queryTagToIdMap.put(queryTag, queryId); - LOG.info("Query " + queryId + " is updated with tag " + queryTag); return; } LOG.info("Query id is missing during query tag updation"); @@ -225,7 +227,7 @@ private Operation removeOperation(OperationHandle opHandle) { queryIdOperation.remove(queryId); String queryTag = operation.getQueryTag(); if (queryTag != null) { - queryTagToIdMap.remove(queryTag); + queryTagToIdMap.remove(queryTag, queryId); } LOG.info("Removed queryId: {} corresponding to operation: {} with tag: {}", queryId, opHandle, queryTag); if (operation instanceof SQLOperation) { @@ -442,11 +444,14 @@ public Operation getOperationByQueryId(String queryId) { return queryIdOperation.get(queryId); } - public Operation getOperationByQueryTag(String queryTag) { - String queryId = queryTagToIdMap.get(queryTag); - if (queryId != null) { - return getOperationByQueryId(queryId); + public Set getOperationsByQueryTag(String queryTag) { + Set queryIds = queryTagToIdMap.get(queryTag); + Set result = new HashSet(); + for (String queryId : queryIds) { + if (queryId != null && getOperationByQueryId(queryId) != null) { + result.add(getOperationByQueryId(queryId)); + } } - return null; + return result; } } diff --git a/service/src/java/org/apache/hive/service/server/KillQueryImpl.java b/service/src/java/org/apache/hive/service/server/KillQueryImpl.java index 490a04d..e96de0a 100644 --- a/service/src/java/org/apache/hive/service/server/KillQueryImpl.java +++ b/service/src/java/org/apache/hive/service/server/KillQueryImpl.java @@ -22,7 +22,11 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.ql.metadata.HiveException; +import org.apache.hadoop.hive.ql.security.authorization.plugin.HiveAuthzContext; +import org.apache.hadoop.hive.ql.security.authorization.plugin.HiveOperationType; +import org.apache.hadoop.hive.ql.security.authorization.plugin.HivePrivilegeObject; import org.apache.hadoop.hive.ql.session.KillQuery; +import org.apache.hadoop.hive.ql.session.SessionState; import org.apache.hadoop.yarn.api.ApplicationClientProtocol; import org.apache.hadoop.yarn.api.protocolrecords.ApplicationsRequestScope; import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationsRequest; @@ -40,6 +44,7 @@ import org.slf4j.LoggerFactory; import java.io.IOException; +import java.util.ArrayList; import java.util.Collections; import java.util.HashSet; import java.util.List; @@ -49,6 +54,7 @@ private final static Logger LOG = LoggerFactory.getLogger(KillQueryImpl.class); private final OperationManager operationManager; + private enum TagOrId {TAG, ID, UNKNOWN}; public KillQueryImpl(OperationManager operationManager) { this.operationManager = operationManager; @@ -64,7 +70,9 @@ public KillQueryImpl(OperationManager operationManager) { GetApplicationsResponse apps = proxy.getApplications(gar); List appsList = apps.getApplicationList(); for(ApplicationReport appReport : appsList) { - childYarnJobs.add(appReport.getApplicationId()); + if (isAdmin() || appReport.getApplicationTags().contains("userid=" + SessionState.get().getUserName())) { + childYarnJobs.add(appReport.getApplicationId()); + } } if (childYarnJobs.isEmpty()) { @@ -81,6 +89,7 @@ public static void killChildYarnJobs(Configuration conf, String tag) { if (tag == null) { return; } + LOG.info("Killing yarn jobs using query tag:" + tag); Set childYarnJobs = getChildYarnJobs(conf, tag); if (!childYarnJobs.isEmpty()) { YarnClient yarnClient = YarnClient.createYarnClient(); @@ -91,44 +100,87 @@ public static void killChildYarnJobs(Configuration conf, String tag) { } } } catch (IOException | YarnException ye) { - throw new RuntimeException("Exception occurred while killing child job(s)", ye); + LOG.warn("Exception occurred while killing child job({})", ye); + } + } + + private static boolean isAdmin() { + boolean isAdmin = false; + if (SessionState.get().getAuthorizerV2() != null) { + try { + SessionState.get().getAuthorizerV2().checkPrivileges(HiveOperationType.KILL_QUERY, + new ArrayList(), new ArrayList(), + new HiveAuthzContext.Builder().build()); + isAdmin = true; + } catch (Exception e) { + } + } + return isAdmin; + } + + private boolean cancelOperation(Operation operation, boolean isAdmin, String errMsg) throws + HiveSQLException { + if (isAdmin || operation.getParentSession().getUserName().equals(SessionState.get() + .getAuthenticator().getUserName())) { + OperationHandle handle = operation.getHandle(); + operationManager.cancelOperation(handle, errMsg); + return true; + } else { + return false; } } @Override - public void killQuery(String queryId, String errMsg, HiveConf conf) throws HiveException { + public void killQuery(String queryIdOrTag, String errMsg, HiveConf conf) throws HiveException { try { - String queryTag = null; - - Operation operation = operationManager.getOperationByQueryId(queryId); - if (operation == null) { - // Check if user has passed the query tag to kill the operation. This is possible if the application - // restarts and it does not have the proper query id. The tag can be used in that case to kill the query. - operation = operationManager.getOperationByQueryTag(queryId); - if (operation == null) { - LOG.info("Query not found: " + queryId); - } + TagOrId tagOrId = TagOrId.UNKNOWN; + Set operationsToKill = new HashSet(); + if (operationManager.getOperationByQueryId(queryIdOrTag) != null) { + operationsToKill.add(operationManager.getOperationByQueryId(queryIdOrTag)); + tagOrId = TagOrId.ID; } else { - // This is the normal flow, where the query is tagged and user wants to kill the query using the query id. - queryTag = operation.getQueryTag(); + operationsToKill.addAll(operationManager.getOperationsByQueryTag(queryIdOrTag)); + if (!operationsToKill.isEmpty()) { + tagOrId = TagOrId.TAG; + } } - - if (queryTag == null) { - //use query id as tag if user wanted to kill only the yarn jobs after hive server restart. The yarn jobs are - //tagged with query id by default. This will cover the case where the application after restarts wants to kill - //the yarn jobs with query tag. The query tag can be passed as query id. - queryTag = queryId; + if (operationsToKill.isEmpty()) { + LOG.info("Query not found: " + queryIdOrTag); } - - LOG.info("Killing yarn jobs for query id : " + queryId + " using tag :" + queryTag); - killChildYarnJobs(conf, queryTag); - - if (operation != null) { - OperationHandle handle = operation.getHandle(); - operationManager.cancelOperation(handle, errMsg); + boolean admin = isAdmin(); + switch(tagOrId) { + case ID: + Operation operation = operationsToKill.iterator().next(); + boolean canceled = cancelOperation(operation, admin, errMsg); + if (canceled) { + String queryTag = operation.getQueryTag(); + if (queryTag == null) { + queryTag = queryIdOrTag; + } + killChildYarnJobs(conf, queryTag); + } else { + // no privilege to cancel + throw new HiveSQLException("No privilege"); + } + break; + case TAG: + int numCanceled = 0; + for (Operation operationToKill : operationsToKill) { + if (cancelOperation(operationToKill, admin, errMsg)) { + numCanceled++; + } + } + killChildYarnJobs(conf, queryIdOrTag); + if (numCanceled == 0) { + throw new HiveSQLException("No privilege"); + } + break; + case UNKNOWN: + killChildYarnJobs(conf, queryIdOrTag); + break; } } catch (HiveSQLException e) { - LOG.error("Kill query failed for query " + queryId, e); + LOG.error("Kill query failed for query " + queryIdOrTag, e); throw new HiveException(e.getMessage(), e); } }