diff --git ql/src/java/org/apache/hadoop/hive/ql/Compiler.java ql/src/java/org/apache/hadoop/hive/ql/Compiler.java index a559d90..cc435be 100644 --- ql/src/java/org/apache/hadoop/hive/ql/Compiler.java +++ ql/src/java/org/apache/hadoop/hive/ql/Compiler.java @@ -193,7 +193,7 @@ HiveSemanticAnalyzerHookContext hookCtx = new HiveSemanticAnalyzerHookContextImpl(); if (executeHooks) { hookCtx.setConf(driverContext.getConf()); - hookCtx.setUserName(SessionState.get().getUserName()); + hookCtx.setUserName(SessionState.get().getUserName1()); hookCtx.setIpAddress(SessionState.get().getUserIpAddress()); hookCtx.setCommand(context.getCmd()); hookCtx.setHiveOperation(driverContext.getQueryState().getHiveOperation()); diff --git ql/src/java/org/apache/hadoop/hive/ql/DriverUtils.java ql/src/java/org/apache/hadoop/hive/ql/DriverUtils.java index 21e5f72..770b88f 100644 --- ql/src/java/org/apache/hadoop/hive/ql/DriverUtils.java +++ ql/src/java/org/apache/hadoop/hive/ql/DriverUtils.java @@ -27,6 +27,7 @@ import org.apache.hadoop.hive.ql.log.PerfLogger; import org.apache.hadoop.hive.ql.metadata.HiveException; import org.apache.hadoop.hive.ql.processors.CommandProcessorException; +import org.apache.hadoop.hive.ql.scheduled.XL1; import org.apache.hadoop.hive.ql.session.SessionState; import org.apache.hadoop.hive.ql.session.SessionState.LogHelper; import org.apache.hadoop.util.StringUtils; @@ -94,7 +95,8 @@ if (sessionState == null) { // Note: we assume that workers run on the same threads repeatedly, so we can set up // the session here and it will be reused without explicitly storing in the worker. - sessionState = new SessionState(conf, user); + XL1.setupCC(conf, user); + sessionState = new SessionState(conf); if (doStart) { // TODO: Required due to SessionState.getHDFSSessionPath. Why wasn't it required before? sessionState.setIsHiveServerQuery(true); diff --git ql/src/java/org/apache/hadoop/hive/ql/Executor.java ql/src/java/org/apache/hadoop/hive/ql/Executor.java index e9909a9..af37a1f 100644 --- ql/src/java/org/apache/hadoop/hive/ql/Executor.java +++ ql/src/java/org/apache/hadoop/hive/ql/Executor.java @@ -97,9 +97,8 @@ try { LOG.info("Executing command(queryId=" + driverContext.getQueryId() + "): " + driverContext.getQueryString()); - // TODO: should this use getUserFromAuthenticator? hookContext = new PrivateHookContext(driverContext.getPlan(), driverContext.getQueryState(), - context.getPathToCS(), SessionState.get().getUserName(), SessionState.get().getUserIpAddress(), + context.getPathToCS(), SessionState.get().getUserName1(), SessionState.get().getUserIpAddress(), InetAddress.getLocalHost().getHostAddress(), driverContext.getOperationId(), SessionState.get().getSessionId(), Thread.currentThread().getName(), SessionState.get().isHiveServerQuery(), SessionState.getPerfLogger(), driverContext.getQueryInfo(), context); diff --git ql/src/java/org/apache/hadoop/hive/ql/QueryState.java ql/src/java/org/apache/hadoop/hive/ql/QueryState.java index 280b7a4..7f29ad6 100644 --- ql/src/java/org/apache/hadoop/hive/ql/QueryState.java +++ ql/src/java/org/apache/hadoop/hive/ql/QueryState.java @@ -135,7 +135,7 @@ jobTag = jobTag.concat("," + queryTag); } if (SessionState.get() != null) { - jobTag = jobTag.concat("," + USERID_TAG + "=" + SessionState.get().getUserName()); + jobTag = jobTag.concat("," + USERID_TAG + "=" + SessionState.get().getUserName1()); } queryConf.set(MRJobConfig.JOB_TAGS, jobTag); queryConf.set(TezConfiguration.TEZ_APPLICATION_TAGS, jobTag); diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/function/create/CreateFunctionOperation.java ql/src/java/org/apache/hadoop/hive/ql/ddl/function/create/CreateFunctionOperation.java index 9489675..7759669 100644 --- ql/src/java/org/apache/hadoop/hive/ql/ddl/function/create/CreateFunctionOperation.java +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/function/create/CreateFunctionOperation.java @@ -188,8 +188,7 @@ private boolean addToMetastore(String dbName, String functionName, String registeredName) throws HiveException { try { - // TODO: should this use getUserFromAuthenticator instead of SessionState.get().getUserName()? - Function function = new Function(functionName, dbName, desc.getClassName(), SessionState.get().getUserName(), + Function function = new Function(functionName, dbName, desc.getClassName(), SessionState.get().getUserName1(), PrincipalType.USER, (int) (System.currentTimeMillis() / 1000), FunctionType.JAVA, desc.getResources()); context.getDb().createFunction(function); return true; diff --git ql/src/java/org/apache/hadoop/hive/ql/exec/tez/KillTriggerActionHandler.java ql/src/java/org/apache/hadoop/hive/ql/exec/tez/KillTriggerActionHandler.java index cb27998..87d37fb 100644 --- ql/src/java/org/apache/hadoop/hive/ql/exec/tez/KillTriggerActionHandler.java +++ ql/src/java/org/apache/hadoop/hive/ql/exec/tez/KillTriggerActionHandler.java @@ -21,6 +21,7 @@ import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.ql.metadata.HiveException; +import org.apache.hadoop.hive.ql.scheduled.XL1; import org.apache.hadoop.hive.ql.session.KillQuery; import org.apache.hadoop.hive.ql.session.SessionState; import org.apache.hadoop.hive.ql.wm.Trigger; @@ -43,8 +44,9 @@ TezSessionState sessionState = entry.getKey(); String queryId = sessionState.getWmContext().getQueryId(); try { - UserGroupInformation ugi = UserGroupInformation.getCurrentUser(); - SessionState ss = new SessionState(new HiveConf(), ugi.getShortUserName()); + HiveConf sessionConf = new HiveConf(); + XL1.setupCC(sessionConf, UserGroupInformation.getCurrentUser().getShortUserName()); + SessionState ss = new SessionState(sessionConf); ss.setIsHiveServerQuery(true); SessionState.start(ss); KillQuery killQuery = sessionState.getKillQuery(); diff --git ql/src/java/org/apache/hadoop/hive/ql/exec/tez/TezSessionPoolManager.java ql/src/java/org/apache/hadoop/hive/ql/exec/tez/TezSessionPoolManager.java index 7c0a1fe..752487d 100644 --- ql/src/java/org/apache/hadoop/hive/ql/exec/tez/TezSessionPoolManager.java +++ ql/src/java/org/apache/hadoop/hive/ql/exec/tez/TezSessionPoolManager.java @@ -251,8 +251,9 @@ SessionState ss = SessionState.get(); String userName = null; if (ss != null) { + //FIXME remove crap userName = ss.getAuthenticator() != null - ? ss.getAuthenticator().getUserName() : ss.getUserName(); + ? ss.getAuthenticator().getUserName() : ss.getUserName1(); } if (userName == null) { userName = Utils.getUGI().getShortUserName(); @@ -451,7 +452,7 @@ return (queueName == null) ? confQueueName == null : queueName.equals(confQueueName); } else { // this session should never be a default session unless something has messed up. - throw new HiveException("The pool session " + session + " should have been returned to the pool"); + throw new HiveException("The pool session " + session + " should have been returned to the pool"); } } diff --git ql/src/java/org/apache/hadoop/hive/ql/exec/tez/TezTask.java ql/src/java/org/apache/hadoop/hive/ql/exec/tez/TezTask.java index 25dd970..54bb4ba 100644 --- ql/src/java/org/apache/hadoop/hive/ql/exec/tez/TezTask.java +++ ql/src/java/org/apache/hadoop/hive/ql/exec/tez/TezTask.java @@ -329,9 +329,12 @@ // This should be removed when authenticator and the 2-username mess is cleaned up. if (ss.getAuthenticator() != null) { String userName = ss.getAuthenticator().getUserName(); - if (userName != null) return userName; + if (userName != null) { + return userName; + } } - return ss.getUserName(); + //FIXME crap + return ss.getUserName1(); } private void closeDagClientOnCancellation(DAGClient dagClient) { @@ -347,7 +350,9 @@ private void logResources(List additionalLr) { // log which resources we're adding (apart from the hive exec) - if (!LOG.isDebugEnabled()) return; + if (!LOG.isDebugEnabled()) { + return; + } if (additionalLr == null || additionalLr.size() == 0) { LOG.debug("No local resources to process (other than hive-exec)"); } else { @@ -686,7 +691,9 @@ } LOG.info("Shutting down Tez task " + this + " " + ((dagClient == null) ? " before submit" : "")); - if (dagClient == null) return; + if (dagClient == null) { + return; + } closeDagClientOnCancellation(dagClient); } diff --git ql/src/java/org/apache/hadoop/hive/ql/exec/tez/WorkloadManager.java ql/src/java/org/apache/hadoop/hive/ql/exec/tez/WorkloadManager.java index 1aa133e..0d0053a 100644 --- ql/src/java/org/apache/hadoop/hive/ql/exec/tez/WorkloadManager.java +++ ql/src/java/org/apache/hadoop/hive/ql/exec/tez/WorkloadManager.java @@ -68,6 +68,7 @@ import org.apache.hadoop.hive.ql.exec.tez.UserPoolMapping.MappingInput; import org.apache.hadoop.hive.ql.exec.tez.WmEvent.EventType; import org.apache.hadoop.hive.ql.metadata.HiveException; +import org.apache.hadoop.hive.ql.scheduled.XL1; import org.apache.hadoop.hive.ql.session.KillQuery; import org.apache.hadoop.hive.ql.session.SessionState; import org.apache.hadoop.hive.ql.wm.ExecutionTrigger; @@ -248,7 +249,9 @@ } private static int determineQueryParallelism(WMFullResourcePlan plan) { - if (plan == null) return 0; + if (plan == null) { + return 0; + } int result = 0; for (WMPool pool : plan.getPools()) { result += pool.getQueryParallelism(); @@ -440,8 +443,9 @@ WmEvent wmEvent = new WmEvent(WmEvent.EventType.KILL); LOG.info("Invoking KillQuery for " + queryId + ": " + reason); try { - UserGroupInformation ugi = UserGroupInformation.getCurrentUser(); - SessionState ss = new SessionState(new HiveConf(), ugi.getShortUserName()); + HiveConf sessionConf = new HiveConf(); + XL1.setupCC(sessionConf, UserGroupInformation.getCurrentUser().getShortUserName()); + SessionState ss = new SessionState(sessionConf); ss.setIsHiveServerQuery(true); SessionState.start(ss); kq.killQuery(queryId, reason, toKill.getConf()); @@ -1069,7 +1073,10 @@ LOG.info("Updating with " + totalQueryParallelism + " total query parallelism"); int deltaSessions = totalQueryParallelism - this.totalQueryParallelism; this.totalQueryParallelism = totalQueryParallelism; - if (deltaSessions == 0) return; // Nothing to do. + if (deltaSessions == 0) + { + return; // Nothing to do. + } if (deltaSessions < 0) { // First, see if we have sessions that we were planning to restart/kill; get rid of those. deltaSessions = transferSessionsToDestroy( @@ -1086,7 +1093,9 @@ List toDestroy, int deltaSessions) { // We were going to kill some queries and reuse the sessions, or maybe restart and put the new // ones back into the AM pool. However, the AM pool has shrunk, so we will close them instead. - if (deltaSessions >= 0) return deltaSessions; + if (deltaSessions >= 0) { + return deltaSessions; + } int toTransfer = Math.min(-deltaSessions, source.size()); Iterator iter = source.iterator(); for (int i = 0; i < toTransfer; ++i) { @@ -1175,7 +1184,10 @@ private void processPoolChangesOnMasterThread( String poolName, boolean hasRequeues, WmThreadSyncWork syncWork) throws Exception { PoolState pool = pools.get(poolName); - if (pool == null) return; // Might be from before the new resource plan. + if (pool == null) + { + return; // Might be from before the new resource plan. + } // 1. First, start the queries from the queue. int queriesToStart = Math.min(pool.queue.size(), @@ -1231,7 +1243,9 @@ private void returnSessionOnFailedReuse( GetRequest req, WmThreadSyncWork syncWork, HashSet poolsToRedistribute) { WmTezSession session = req.sessionToReuse; - if (session == null) return; + if (session == null) { + return; + } req.sessionToReuse = null; session.setQueryId(null); if (poolsToRedistribute != null) { @@ -1241,7 +1255,9 @@ // the current iteration, so we would have cleared sessionToReuse when killing this. boolean isOk = (rr == RemoveSessionResult.OK); assert isOk || rr == RemoveSessionResult.IGNORE; - if (!isOk) return; + if (!isOk) { + return; + } } WmEvent wmEvent = new WmEvent(WmEvent.EventType.RETURN); if (!tezAmPool.returnSessionAsync(session)) { @@ -1383,7 +1399,9 @@ private final static class GetRequest { public static final Comparator ORDER_COMPARATOR = (o1, o2) -> { - if (o1.order == o2.order) return 0; + if (o1.order == o2.order) { + return 0; + } return o1.order < o2.order ? -1 : 1; }; private final long order; @@ -1506,7 +1524,9 @@ try { Integer existing = current.updateErrors.get(wmTezSession); // Only store the latest error, if there are multiple. - if (existing != null && existing >= endpointVersion) return; + if (existing != null && existing >= endpointVersion) { + return; + } current.updateErrors.put(wmTezSession, endpointVersion); notifyWmThreadUnderLock(); } finally { @@ -1616,13 +1636,17 @@ // ======= VARIOUS UTILITY METHOD private void notifyWmThreadUnderLock() { - if (hasChanges) return; + if (hasChanges) { + return; + } hasChanges = true; hasChangesCondition.signalAll(); } private WmTezSession checkSessionForReuse(TezSessionState session) throws Exception { - if (session == null) return null; + if (session == null) { + return null; + } WmTezSession result = null; if (session instanceof WmTezSession) { result = (WmTezSession) session; @@ -1811,7 +1835,9 @@ switch (schedulingPolicy) { case FAIR: int totalSessions = sessions.size() + initializingSessions.size(); - if (totalSessions == 0) return 0; + if (totalSessions == 0) { + return 0; + } double allocation = finalFractionRemaining / totalSessions; for (WmTezSession session : sessions) { updateSessionAllocationWithEvent(session, allocation); @@ -1820,7 +1846,9 @@ // we expect init to be fast. return finalFractionRemaining - allocation * initializingSessions.size(); case FIFO: - if (sessions.isEmpty()) return 0; + if (sessions.isEmpty()) { + return 0; + } boolean isFirst = true; for (WmTezSession session : sessions) { updateSessionAllocationWithEvent(session, isFirst ? finalFractionRemaining : 0); @@ -2063,7 +2091,9 @@ } public void discardSessionOnFailure(WmTezSession session) { - if (session == null) return; + if (session == null) { + return; + } session.clearWm(); session.setQueryId(null); // We can just restart the session if we have received one. @@ -2106,7 +2136,9 @@ List results, List toDelete) { lock.lock(); try { - if (state != SessionInitState.DONE) return false; + if (state != SessionInitState.DONE) { + return false; + } this.state = SessionInitState.CANCELED; if (pathToDelete != null) { toDelete.add(pathToDelete); @@ -2189,7 +2221,10 @@ session.setIsIrrelevantForWm(reason); return KillQueryResult.RESTART_REQUIRED; } - if (!isUserDone || !isKillDone) return KillQueryResult.IN_PROGRESS; // Someone is not done. + if (!isUserDone || !isKillDone) + { + return KillQueryResult.IN_PROGRESS; // Someone is not done. + } // Both user and the kill have returned. if (hasUserFailed && hasKillFailed) { // If the kill failed and the user also thinks the session is invalid, restart it. diff --git ql/src/java/org/apache/hadoop/hive/ql/parse/ScheduledQueryAnalyzer.java ql/src/java/org/apache/hadoop/hive/ql/parse/ScheduledQueryAnalyzer.java index 7e78aca..8525151 100644 --- ql/src/java/org/apache/hadoop/hive/ql/parse/ScheduledQueryAnalyzer.java +++ ql/src/java/org/apache/hadoop/hive/ql/parse/ScheduledQueryAnalyzer.java @@ -107,10 +107,11 @@ private String getUserName() { SessionState sessionState = SessionState.get(); + // FIXME remove crap like this from everywhere: if (sessionState.getAuthenticator() != null && sessionState.getAuthenticator().getUserName() != null) { return sessionState.getAuthenticator().getUserName(); } - String userName = sessionState.getUserName(); + String userName = sessionState.getUserName1(); if(userName == null) { throw new RuntimeException("userName is unset; this is unexpected"); } diff --git ql/src/java/org/apache/hadoop/hive/ql/scheduled/ScheduledQueryExecutionService.java ql/src/java/org/apache/hadoop/hive/ql/scheduled/ScheduledQueryExecutionService.java index 717a452..ab698a8 100644 --- ql/src/java/org/apache/hadoop/hive/ql/scheduled/ScheduledQueryExecutionService.java +++ ql/src/java/org/apache/hadoop/hive/ql/scheduled/ScheduledQueryExecutionService.java @@ -32,7 +32,6 @@ import org.apache.hadoop.hive.ql.DriverFactory; import org.apache.hadoop.hive.ql.IDriver; import org.apache.hadoop.hive.ql.processors.CommandProcessorException; -import org.apache.hadoop.hive.ql.security.SessionStateUserAuthenticator; import org.apache.hadoop.hive.ql.session.SessionState; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -110,9 +109,9 @@ try { HiveConf conf = new HiveConf(context.conf); conf.set(Constants.HIVE_QUERY_EXCLUSIVE_LOCK, lockNameFor(q.getScheduleKey())); - conf.setVar(HiveConf.ConfVars.HIVE_AUTHENTICATOR_MANAGER, SessionStateUserAuthenticator.class.getName()); conf.unset(HiveConf.ConfVars.HIVESESSIONID.varname); - state = new SessionState(conf, q.getUser()); + XL1.setupCC(conf, q.getUser()); + state = new SessionState(conf); SessionState.start(state); reportQueryProgress(); try ( diff --git ql/src/java/org/apache/hadoop/hive/ql/scheduled/XL1.java ql/src/java/org/apache/hadoop/hive/ql/scheduled/XL1.java new file mode 100644 index 0000000..740fc1f --- /dev/null +++ ql/src/java/org/apache/hadoop/hive/ql/scheduled/XL1.java @@ -0,0 +1,13 @@ +package org.apache.hadoop.hive.ql.scheduled; + +import org.apache.hadoop.hive.conf.HiveConf; +import org.apache.hadoop.hive.ql.security.SessionStateConfigUserAuthenticator; + +public class XL1 { + + public static void setupCC(HiveConf conf, String user) { + conf.setVar(HiveConf.ConfVars.HIVE_AUTHENTICATOR_MANAGER, SessionStateConfigUserAuthenticator.class.getName()); + conf.set("user.name", user); + } + +} diff --git ql/src/java/org/apache/hadoop/hive/ql/security/SessionStateUserAuthenticator.java ql/src/java/org/apache/hadoop/hive/ql/security/SessionStateUserAuthenticator.java index 226fbe0..acbd015 100644 --- ql/src/java/org/apache/hadoop/hive/ql/security/SessionStateUserAuthenticator.java +++ ql/src/java/org/apache/hadoop/hive/ql/security/SessionStateUserAuthenticator.java @@ -29,24 +29,28 @@ * Authenticator that returns the userName set in SessionState. For use when authorizing with HS2 * so that HS2 can set the user for the session through SessionState */ +@Deprecated public class SessionStateUserAuthenticator implements HiveAuthenticationProvider { protected Configuration conf; private SessionState sessionState; private List groups; + public SessionStateUserAuthenticator() { + throw new RuntimeException("dont use this!"); + } + @Override public List getGroupNames() { - // In case of embedded hs2, sessionState.getUserName()=null - if (groups == null && sessionState.getUserName() != null) { - groups = UserGroupInformation.createRemoteUser(sessionState.getUserName()).getGroups(); + if (groups == null) { + groups = UserGroupInformation.createRemoteUser(sessionState.getUserName1()).getGroups(); } return groups; } @Override public String getUserName() { - return sessionState.getUserName(); + return sessionState.getUserName1(); } @Override diff --git ql/src/java/org/apache/hadoop/hive/ql/security/authorization/plugin/HiveV1Authorizer.java ql/src/java/org/apache/hadoop/hive/ql/security/authorization/plugin/HiveV1Authorizer.java index c889321..b00ce0e 100644 --- ql/src/java/org/apache/hadoop/hive/ql/security/authorization/plugin/HiveV1Authorizer.java +++ ql/src/java/org/apache/hadoop/hive/ql/security/authorization/plugin/HiveV1Authorizer.java @@ -366,7 +366,7 @@ @Override public List getCurrentRoleNames() throws HiveAuthzPluginException { - String userName = SessionState.get().getUserName(); + String userName = SessionState.get().getUserName1(); if (userName == null) { userName = SessionState.getUserFromAuthenticator(); } diff --git ql/src/java/org/apache/hadoop/hive/ql/session/SessionState.java ql/src/java/org/apache/hadoop/hive/ql/session/SessionState.java index 072758b..a87cc01 100644 --- ql/src/java/org/apache/hadoop/hive/ql/session/SessionState.java +++ ql/src/java/org/apache/hadoop/hive/ql/session/SessionState.java @@ -260,8 +260,6 @@ */ private Map erasureCodingShims; - private final String userName; - /** * scratch path to use for all non-local (ie. hdfs) file system tmp folders * @return Path for Scratch path for the current session @@ -401,16 +399,8 @@ } public SessionState(HiveConf conf) { - this(conf, null); - } - - public SessionState(HiveConf conf, String userName) { this.sessionConf = conf; - this.userName = userName; this.registry = new Registry(false); - if (LOG.isDebugEnabled()) { - LOG.debug("SessionState user: " + userName); - } isSilent = conf.getBoolVar(HiveConf.ConfVars.HIVESESSIONSILENT); resourceMaps = new ResourceMaps(); // Must be deterministic order map for consistent q-test output across Java versions @@ -1909,10 +1899,6 @@ } } - public String getUserName() { - return userName; - } - /** * If authorization mode is v2, then pass it through authorizer so that it can apply * any security configuration changes. @@ -2078,6 +2064,10 @@ public String getNewSparkSessionId() { return getSessionId() + "_" + Long.toString(this.sparkSessionId.getAndIncrement()); } + + public String getUserName1() { + return getUserFromAuthenticator(); + } } class ResourceMaps { diff --git ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFLoggedInUser.java ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFLoggedInUser.java index 3d36e0f..a8a43a1 100644 --- ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFLoggedInUser.java +++ ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFLoggedInUser.java @@ -45,8 +45,7 @@ } if (loggedInUser == null) { - // TODO: getUserFromAuthenticator? - String loggedInUserName = SessionState.get().getUserName(); + String loggedInUserName = SessionState.get().getUserName1(); if (loggedInUserName != null) { loggedInUser = new Text(loggedInUserName); } diff --git ql/src/test/org/apache/hadoop/hive/ql/parse/authorization/TestSessionUserName.java ql/src/test/org/apache/hadoop/hive/ql/parse/authorization/TestSessionUserName.java index 61d755d..51c7275 100644 --- ql/src/test/org/apache/hadoop/hive/ql/parse/authorization/TestSessionUserName.java +++ ql/src/test/org/apache/hadoop/hive/ql/parse/authorization/TestSessionUserName.java @@ -67,22 +67,6 @@ } /** - * Test if the authorization factory gets the username set in the SessionState constructor - * @throws Exception - */ - @Test - public void testSessionConstructorUser() throws Exception { - final String USER_NAME = "authtestuser"; - SessionState ss = new SessionState(getAuthV2HiveConf(), USER_NAME); - setupDataNucleusFreeHive(ss.getConf()); - SessionState.start(ss); - ss.getAuthenticator(); - - Assert.assertEquals("check username", USER_NAME, - HiveAuthorizerStoringUserNameFactory.username); - } - - /** * Test that the groupNames are retrieved properly from UGI * @throws Exception */ @@ -92,7 +76,9 @@ final List testGroups = Arrays.asList("group1", "group2"); UserGroupInformation.createUserForTesting(testUser, testGroups.toArray(new String[0])); - SessionState ss = new SessionState(getAuthV2HiveConf(), testUser); + HiveConf conf = getAuthV2HiveConf(); + conf.set("user.name", testUser); + SessionState ss = new SessionState(conf); setupDataNucleusFreeHive(ss.getConf()); assertEquals("check groups", testGroups, ss.getAuthenticator().getGroupNames()); } @@ -104,7 +90,7 @@ */ @Test public void testSessionNullUser() throws Exception { - SessionState ss = new SessionState(getAuthV2HiveConf(), null); + SessionState ss = new SessionState(getAuthV2HiveConf()); setupDataNucleusFreeHive(ss.getConf()); SessionState.start(ss); diff --git ql/src/test/org/apache/hadoop/hive/ql/stats/TestStatsUpdaterThread.java ql/src/test/org/apache/hadoop/hive/ql/stats/TestStatsUpdaterThread.java index 84827d1..a2d57be 100644 --- ql/src/test/org/apache/hadoop/hive/ql/stats/TestStatsUpdaterThread.java +++ ql/src/test/org/apache/hadoop/hive/ql/stats/TestStatsUpdaterThread.java @@ -618,12 +618,13 @@ } private void drainWorkQueue(StatsUpdaterThread su) throws InterruptedException { - while (su.runOneWorkerIteration(ss, ss.getUserName(), ss.getConf(), false)) {} + while (su.runOneWorkerIteration(ss, ss.getUserName1(), ss.getConf(), false)) { + } } private void drainWorkQueue(StatsUpdaterThread su, int expectedReqs) throws InterruptedException { int actualReqs = 0; - while (su.runOneWorkerIteration(ss, ss.getUserName(), ss.getConf(), false)) { + while (su.runOneWorkerIteration(ss, ss.getUserName1(), ss.getConf(), false)) { ++actualReqs; } assertEquals(expectedReqs, actualReqs); @@ -716,7 +717,7 @@ } private void executeQuery(String query) throws HiveException { - DriverUtils.runOnDriver(hiveConf, ss.getUserName(), ss, query); + DriverUtils.runOnDriver(hiveConf, ss.getUserName1(), ss, query); } private StatsUpdaterThread createUpdater() throws MetaException {