diff --git cli/src/java/org/apache/hadoop/hive/cli/CliDriver.java cli/src/java/org/apache/hadoop/hive/cli/CliDriver.java index e57412aca0..68741f67da 100644 --- cli/src/java/org/apache/hadoop/hive/cli/CliDriver.java +++ cli/src/java/org/apache/hadoop/hive/cli/CliDriver.java @@ -181,18 +181,23 @@ public int processCmd(String cmd) { } } else { // local mode try { - CommandProcessor proc = CommandProcessorFactory.get(tokens, (HiveConf) conf); - if (proc instanceof IDriver) { - // Let Driver strip comments using sql parser - ret = processLocalCmd(cmd, proc, ss); - } else { - ret = processLocalCmd(cmd_trimmed, proc, ss); + + try (CommandProcessor proc = CommandProcessorFactory.get(tokens, (HiveConf) conf)) { + if (proc instanceof IDriver) { + // Let Driver strip comments using sql parser + ret = processLocalCmd(cmd, proc, ss); + } else { + ret = processLocalCmd(cmd_trimmed, proc, ss); + } } } catch (SQLException e) { console.printError("Failed processing command " + tokens[0] + " " + e.getLocalizedMessage(), org.apache.hadoop.util.StringUtils.stringifyException(e)); ret = 1; } + catch (Exception e) { + throw new RuntimeException(e); + } } ss.resetThreadName(); @@ -270,10 +275,7 @@ int processLocalCmd(String cmd, CommandProcessor proc, CliSessionState ss) { ret = 1; } - int cret = qp.close(); - if (ret == 0) { - ret = cret; - } + qp.close(); if (out instanceof FetchConverter) { ((FetchConverter) out).fetchFinished(); @@ -402,11 +404,9 @@ public void handle(Signal signal) { lastRet = ret; boolean ignoreErrors = HiveConf.getBoolVar(conf, HiveConf.ConfVars.CLIIGNOREERRORS); if (ret != 0 && !ignoreErrors) { - CommandProcessorFactory.clean((HiveConf) conf); return ret; } } - CommandProcessorFactory.clean((HiveConf) conf); return lastRet; } finally { // Once we are done processing the line, restore the old handler diff --git hcatalog/core/src/main/java/org/apache/hive/hcatalog/cli/HCatCli.java hcatalog/core/src/main/java/org/apache/hive/hcatalog/cli/HCatCli.java index a36b0db3a5..d7a9bb094d 100644 --- hcatalog/core/src/main/java/org/apache/hive/hcatalog/cli/HCatCli.java +++ hcatalog/core/src/main/java/org/apache/hive/hcatalog/cli/HCatCli.java @@ -144,9 +144,6 @@ public static void main(String[] args) { // -D : process these first, so that we can instantiate SessionState appropriately. setConfProperties(conf, cmdLine.getOptionProperties("D")); - // Now that the properties are in, we can instantiate SessionState. - SessionState.start(ss); - // -h if (cmdLine.hasOption('h')) { printUsage(options, ss.out); @@ -176,6 +173,9 @@ public static void main(String[] args) { conf.set(HCatConstants.HCAT_GROUP, grp); } + // Now that the properties are in, we can instantiate SessionState. + SessionState.start(ss); + // all done parsing, let's run stuff! if (execString != null) { @@ -286,7 +286,7 @@ private static int processCmd(String cmd) { return new DfsProcessor(ss.getConf()).run(cmd.substring(firstToken.length()).trim()).getResponseCode(); } - HCatDriver driver = new HCatDriver(); + HCatDriver driver = new HCatDriver(ss.getConf()); int ret = driver.run(cmd).getResponseCode(); diff --git hcatalog/core/src/main/java/org/apache/hive/hcatalog/cli/HCatDriver.java hcatalog/core/src/main/java/org/apache/hive/hcatalog/cli/HCatDriver.java index e112412f75..6a7b9390de 100644 --- hcatalog/core/src/main/java/org/apache/hive/hcatalog/cli/HCatDriver.java +++ hcatalog/core/src/main/java/org/apache/hive/hcatalog/cli/HCatDriver.java @@ -21,10 +21,10 @@ import java.io.IOException; import java.util.ArrayList; -import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.permission.FsPermission; +import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.metastore.Warehouse; import org.apache.hadoop.hive.ql.DriverFactory; import org.apache.hadoop.hive.ql.IDriver; @@ -39,8 +39,8 @@ private IDriver driver; - public HCatDriver() { - driver = DriverFactory.newDriver(); + public HCatDriver(HiveConf hiveConf) { + driver = DriverFactory.newDriver(hiveConf); } public CommandProcessorResponse run(String command) { @@ -52,7 +52,8 @@ public CommandProcessorResponse run(String command) { if (cpr.getResponseCode() == 0) { // Only attempt to do this, if cmd was successful. - int rc = setFSPermsNGrp(ss); + // FIXME: it would be probably better to move this to an after-execution + int rc = setFSPermsNGrp(ss, driver.getConf()); cpr = new CommandProcessorResponse(rc); } // reset conf vars @@ -62,9 +63,7 @@ public CommandProcessorResponse run(String command) { return cpr; } - private int setFSPermsNGrp(SessionState ss) { - - Configuration conf = ss.getConf(); + private int setFSPermsNGrp(SessionState ss, HiveConf conf) { String tblName = conf.get(HCatConstants.HCAT_CREATE_TBL_NAME, ""); if (tblName.isEmpty()) { @@ -145,7 +144,8 @@ private int setFSPermsNGrp(SessionState ss) { } public int close() { - return driver.close(); + driver.close(); + return 0; } public boolean getResults(ArrayList res) throws IOException { diff --git hcatalog/core/src/test/java/org/apache/hive/hcatalog/cli/TestPermsGrp.java hcatalog/core/src/test/java/org/apache/hive/hcatalog/cli/TestPermsGrp.java index 4dbf7acb9d..d78ab78b90 100644 --- hcatalog/core/src/test/java/org/apache/hive/hcatalog/cli/TestPermsGrp.java +++ hcatalog/core/src/test/java/org/apache/hive/hcatalog/cli/TestPermsGrp.java @@ -25,8 +25,6 @@ import java.util.List; import java.util.concurrent.TimeUnit; -import junit.framework.TestCase; - import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.hive.conf.HiveConf; @@ -44,7 +42,6 @@ import org.apache.hadoop.hive.metastore.api.StorageDescriptor; import org.apache.hadoop.hive.metastore.api.Table; import org.apache.hadoop.hive.metastore.api.Type; -import org.apache.hadoop.hive.metastore.security.HadoopThriftAuthBridge; import org.apache.hadoop.hive.ql.io.HiveInputFormat; import org.apache.hadoop.hive.ql.io.HiveOutputFormat; import org.apache.hadoop.hive.ql.metadata.Hive; @@ -58,6 +55,8 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import junit.framework.TestCase; + public class TestPermsGrp extends TestCase { private boolean isServerRunning = false; @@ -120,12 +119,13 @@ public void testCustomPerms() throws Exception { // Next user did specify perms. try { callHCatCli(new String[]{"-e", "create table simptbl (name string) stored as RCFILE", "-p", "rwx-wx---"}); + fail(); } catch (Exception e) { assertTrue(e instanceof ExitException); assertEquals(((ExitException) e).getStatus(), 0); } dfsPath = clientWH.getDefaultTablePath(db, tblName); - assertTrue(dfsPath.getFileSystem(hcatConf).getFileStatus(dfsPath).getPermission().equals(FsPermission.valueOf("drwx-wx---"))); + assertEquals(FsPermission.valueOf("drwx-wx---"), dfsPath.getFileSystem(hcatConf).getFileStatus(dfsPath).getPermission()); cleanupTbl(dbName, tblName, typeName); @@ -134,7 +134,7 @@ public void testCustomPerms() throws Exception { // make sure create table fails. try { callHCatCli(new String[]{"-e", "create table simptbl (name string) stored as RCFILE", "-p", "rwx"}); - assert false; + fail(); } catch (Exception me) { assertTrue(me instanceof ExitException); } @@ -142,7 +142,7 @@ public void testCustomPerms() throws Exception { dfsPath = clientWH.getDefaultTablePath(db, tblName); try { dfsPath.getFileSystem(hcatConf).getFileStatus(dfsPath); - assert false; + fail(); } catch (Exception fnfe) { assertTrue(fnfe instanceof FileNotFoundException); } @@ -150,7 +150,7 @@ public void testCustomPerms() throws Exception { // And no metadata gets created. try { msc.getTable(Warehouse.DEFAULT_DATABASE_NAME, tblName); - assert false; + fail(); } catch (Exception e) { assertTrue(e instanceof NoSuchObjectException); assertEquals("default.simptbl table not found", e.getMessage()); @@ -163,7 +163,7 @@ public void testCustomPerms() throws Exception { try { // create table must fail. callHCatCli(new String[]{"-e", "create table simptbl (name string) stored as RCFILE", "-p", "rw-rw-rw-", "-g", "THIS_CANNOT_BE_A_VALID_GRP_NAME_EVER"}); - assert false; + fail(); } catch (Exception me) { assertTrue(me instanceof SecurityException); } @@ -171,7 +171,7 @@ public void testCustomPerms() throws Exception { try { // no metadata should get created. msc.getTable(dbName, tblName); - assert false; + fail(); } catch (Exception e) { assertTrue(e instanceof NoSuchObjectException); assertEquals("default.simptbl table not found", e.getMessage()); @@ -179,7 +179,7 @@ public void testCustomPerms() throws Exception { try { // neither dir should get created. dfsPath.getFileSystem(hcatConf).getFileStatus(dfsPath); - assert false; + fail(); } catch (Exception e) { assertTrue(e instanceof FileNotFoundException); } diff --git hcatalog/hcatalog-pig-adapter/src/test/java/org/apache/hive/hcatalog/pig/TestHCatLoaderEncryption.java hcatalog/hcatalog-pig-adapter/src/test/java/org/apache/hive/hcatalog/pig/TestHCatLoaderEncryption.java index 1560571931..72ed8dfcba 100644 --- hcatalog/hcatalog-pig-adapter/src/test/java/org/apache/hive/hcatalog/pig/TestHCatLoaderEncryption.java +++ hcatalog/hcatalog-pig-adapter/src/test/java/org/apache/hive/hcatalog/pig/TestHCatLoaderEncryption.java @@ -79,6 +79,7 @@ @RunWith(Parameterized.class) public class TestHCatLoaderEncryption { + private static final AtomicInteger salt = new AtomicInteger(new Random().nextInt()); private static final Logger LOG = LoggerFactory.getLogger(TestHCatLoaderEncryption.class); private final String TEST_DATA_DIR = HCatUtil.makePathASafeFileName(System.getProperty @@ -177,12 +178,12 @@ public void setup() throws Exception { "_" + salt.getAndIncrement() + "/dfs/"); } - driver = DriverFactory.newDriver(hiveConf); - initEncryptionShim(hiveConf); String encryptedTablePath = TEST_WAREHOUSE_DIR + "/encryptedTable"; SessionState.start(new CliSessionState(hiveConf)); + driver = DriverFactory.newDriver(hiveConf); + SessionState.get().out = System.out; createTable(BASIC_TABLE, "a int, b string"); diff --git itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/security/authorization/plugin/TestHiveAuthorizerShowFilters.java itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/security/authorization/plugin/TestHiveAuthorizerShowFilters.java index 89812232d4..d69696c391 100644 --- itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/security/authorization/plugin/TestHiveAuthorizerShowFilters.java +++ itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/security/authorization/plugin/TestHiveAuthorizerShowFilters.java @@ -121,7 +121,8 @@ public static void beforeTest() throws Exception { conf.setBoolVar(ConfVars.HIVE_SUPPORT_CONCURRENCY, false); UtilsForTest.setNewDerbyDbLocation(conf, TestHiveAuthorizerShowFilters.class.getSimpleName()); - SessionState.start(conf); + SessionState ss = SessionState.start(conf); + ss.applyAuthorizationPolicy(); driver = DriverFactory.newDriver(conf); runCmd("create table " + tableName1 + " (i int, j int, k string) partitioned by (city string, `date` string) "); diff --git ql/src/java/org/apache/hadoop/hive/ql/Driver.java ql/src/java/org/apache/hadoop/hive/ql/Driver.java index c6f7d6459e..8f7291d182 100644 --- ql/src/java/org/apache/hadoop/hive/ql/Driver.java +++ ql/src/java/org/apache/hadoop/hive/ql/Driver.java @@ -40,6 +40,7 @@ import com.google.common.annotations.VisibleForTesting; import org.apache.commons.lang.StringUtils; +import org.apache.hadoop.conf.Configurable; import org.apache.hadoop.fs.FSDataInputStream; import org.apache.hadoop.hive.common.JavaUtils; import org.apache.hadoop.hive.common.ValidReadTxnList; @@ -172,7 +173,7 @@ private LockedDriverState lDrvState = new LockedDriverState(); // Query specific info - private QueryState queryState; + private final QueryState queryState; // Query hooks that execute before compilation and after execution private QueryLifeTimeHookRunner queryLifeTimeHookRunner; @@ -371,25 +372,17 @@ public void setMaxRows(int maxRows) { this.maxRows = maxRows; } - public Driver() { - this(getNewQueryState((SessionState.get() != null) ? - SessionState.get().getConf() : new HiveConf()), null); - } - public Driver(HiveConf conf) { - this(getNewQueryState(conf), null); + this(new QueryState.Builder().withGenerateNewQueryId(true).withHiveConf(conf).build(), null); } // Pass lineageState when a driver instantiates another Driver to run // or compile another query + // NOTE: only used from index related classes public Driver(HiveConf conf, LineageState lineageState) { this(getNewQueryState(conf, lineageState), null); } - public Driver(HiveConf conf, HiveTxnManager txnMgr) { - this(getNewQueryState(conf), null, null, txnMgr); - } - // Pass lineageState when a driver instantiates another Driver to run // or compile another query public Driver(HiveConf conf, Context ctx, LineageState lineageState) { @@ -397,10 +390,6 @@ public Driver(HiveConf conf, Context ctx, LineageState lineageState) { this.ctx = ctx; } - public Driver(HiveConf conf, String userName) { - this(getNewQueryState(conf), userName, null); - } - // Pass lineageState when a driver instantiates another Driver to run // or compile another query public Driver(HiveConf conf, String userName, LineageState lineageState) { @@ -411,10 +400,6 @@ public Driver(QueryState queryState, String userName) { this(queryState, userName, new HooksLoader(queryState.getConf()), null, null); } - public Driver(HiveConf conf, HooksLoader hooksLoader) { - this(getNewQueryState(conf), null, hooksLoader, null, null); - } - public Driver(QueryState queryState, String userName, QueryInfo queryInfo) { this(queryState, userName, new HooksLoader(queryState.getConf()), queryInfo, null); } @@ -438,17 +423,6 @@ public Driver(QueryState queryState, String userName, HooksLoader hooksLoader, Q /** * Generating the new QueryState object. Making sure, that the new queryId is generated. * @param conf The HiveConf which should be used - * @return The new QueryState object - */ - // move to driverFactory ; with those constructors... - @Deprecated - private static QueryState getNewQueryState(HiveConf conf) { - return new QueryState.Builder().withGenerateNewQueryId(true).withHiveConf(conf).build(); - } - - /** - * Generating the new QueryState object. Making sure, that the new queryId is generated. - * @param conf The HiveConf which should be used * @param lineageState a LineageState to be set in the new QueryState object * @return The new QueryState object */ @@ -542,6 +516,9 @@ private void compile(String command, boolean resetTaskIds, boolean deferClose) t LOG.info("Compiling command(queryId=" + queryId + "): " + queryStr); + conf.setQueryString(queryStr); + // FIXME: sideeffect will leave the last query set at the session level + SessionState.get().getConf().setQueryString(queryStr); SessionState.get().setupQueryCurrentTimestamp(); // Whether any error occurred during query compilation. Used for query lifetime hook. @@ -556,6 +533,9 @@ private void compile(String command, boolean resetTaskIds, boolean deferClose) t } else { queryTxnMgr = SessionState.get().initTxnMgr(conf); } + if (queryTxnMgr instanceof Configurable) { + ((Configurable) queryTxnMgr).setConf(conf); + } queryState.setTxnManager(queryTxnMgr); // In case when user Ctrl-C twice to kill Hive CLI JVM, we want to release locks @@ -660,7 +640,6 @@ public void run() { plan = new QueryPlan(queryStr, sem, perfLogger.getStartTime(PerfLogger.DRIVER_RUN), queryId, queryState.getHiveOperation(), schema); - conf.setQueryString(queryStr); conf.set("mapreduce.workflow.id", "hive_" + queryId); conf.set("mapreduce.workflow.name", queryStr); @@ -1203,6 +1182,11 @@ private static HiveOperationType getHiveOperationType(HiveOperation op) { return HiveOperationType.valueOf(op.name()); } + @Override + public HiveConf getConf() { + return conf; + } + /** * @return The current query plan associated with this Driver, if any. */ @@ -1379,7 +1363,7 @@ public void releaseLocksAndCommitOrRollback(boolean commit, HiveTxnManager txnMa * Release some resources after a query is executed * while keeping the result around. */ - private void releaseResources() { + public void releaseResources() { releasePlan(); releaseDriverContext(); } @@ -2498,14 +2482,13 @@ private int closeInProcess(boolean destroyed) { // is called to stop the query if it is running, clean query results, and release resources. @Override - public int close() { + public void close() { lDrvState.stateLock.lock(); try { releaseDriverContext(); if (lDrvState.driverState == DriverState.COMPILING || lDrvState.driverState == DriverState.EXECUTING) { lDrvState.abort(); - return 0; } releasePlan(); releaseCachedResult(); @@ -2517,7 +2500,7 @@ public int close() { lDrvState.stateLock.unlock(); LockedDriverState.removeLockedDriverState(); } - return 0; + destroy(); } // is usually called after close() to commit or rollback a query and end the driver life cycle. @@ -2571,19 +2554,6 @@ public void setOperationId(String opId) { this.operationId = opId; } - /** - * Resets QueryState to get new queryId on Driver reuse. - */ - - @Override - public void resetQueryState() { - // Note: Driver cleanup for reuse at this point is not very clear. The assumption here is that - // repeated compile/execute calls create new contexts, plan, etc., so we don't need to worry - // propagating queryState into those existing fields, or resetting them. - releaseResources(); - this.queryState = getNewQueryState(queryState.getConf()); - } - public QueryState getQueryState() { return queryState; } diff --git ql/src/java/org/apache/hadoop/hive/ql/DriverFactory.java ql/src/java/org/apache/hadoop/hive/ql/DriverFactory.java index 49d2bf5f33..60e8de8fd4 100644 --- ql/src/java/org/apache/hadoop/hive/ql/DriverFactory.java +++ ql/src/java/org/apache/hadoop/hive/ql/DriverFactory.java @@ -19,17 +19,12 @@ package org.apache.hadoop.hive.ql; import org.apache.hadoop.hive.conf.HiveConf; -import org.apache.hadoop.hive.ql.session.SessionState; /** * Constructs a driver for ql clients */ public class DriverFactory { - public static IDriver newDriver(HiveConf conf) { - return newDriver(getNewQueryState(conf), null, null); - } - enum ExecutionStrategy { none { @Override @@ -41,22 +36,16 @@ IDriver build(QueryState queryState, String userName, QueryInfo queryInfo) { abstract IDriver build(QueryState queryState, String userName, QueryInfo queryInfo); } + public static IDriver newDriver(HiveConf conf) { + return newDriver(getNewQueryState(conf), null, null); + } + public static IDriver newDriver(QueryState queryState, String userName, QueryInfo queryInfo) { ExecutionStrategy strategy = ExecutionStrategy.none; return strategy.build(queryState, userName, queryInfo); } private static QueryState getNewQueryState(HiveConf conf) { - // FIXME: isolate hiveConf used for a single query return new QueryState.Builder().withGenerateNewQueryId(true).withHiveConf(conf).build(); } - - // FIXME: remove this method ; and use the conf at the callsite... - @Deprecated - public static IDriver newDriver() { - // only CLIDriver enter at this point - HiveConf conf = (SessionState.get() != null) ? SessionState.get().getConf() : new HiveConf(); - return newDriver(conf); - } - } diff --git ql/src/java/org/apache/hadoop/hive/ql/IDriver.java ql/src/java/org/apache/hadoop/hive/ql/IDriver.java index d4494cc72e..77996eb255 100644 --- ql/src/java/org/apache/hadoop/hive/ql/IDriver.java +++ ql/src/java/org/apache/hadoop/hive/ql/IDriver.java @@ -21,6 +21,7 @@ import java.io.IOException; import java.util.List; +import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.metastore.api.Schema; import org.apache.hadoop.hive.ql.exec.FetchTask; import org.apache.hadoop.hive.ql.processors.CommandProcessor; @@ -60,8 +61,10 @@ void resetFetch() throws IOException; // close&destroy is used in seq coupling most of the time - the difference is either not clear; or not relevant - remove? - int close(); + @Override + void close(); void destroy(); - void resetQueryState(); + // FIXME: remove after moving hcat fsperms to a hook + HiveConf getConf(); } diff --git ql/src/java/org/apache/hadoop/hive/ql/QueryState.java ql/src/java/org/apache/hadoop/hive/ql/QueryState.java index d8d19e8606..6bc9630c43 100644 --- ql/src/java/org/apache/hadoop/hive/ql/QueryState.java +++ ql/src/java/org/apache/hadoop/hive/ql/QueryState.java @@ -19,11 +19,15 @@ package org.apache.hadoop.hive.ql; import java.util.Map; +import java.util.Set; +import org.apache.hadoop.hbase.shaded.com.google.common.collect.Sets; import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.ql.lockmgr.HiveTxnManager; import org.apache.hadoop.hive.ql.plan.HiveOperation; import org.apache.hadoop.hive.ql.session.LineageState; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; /** * The class to store query level info such as queryId. Multiple queries can run @@ -106,7 +110,7 @@ public void setTxnManager(HiveTxnManager txnManager) { */ public static class Builder { private Map confOverlay = null; - private boolean runAsync = false; + private boolean isolated = true; private boolean generateNewQueryId = false; private HiveConf hiveConf = null; private LineageState lineageState = null; @@ -118,17 +122,6 @@ public Builder() { } /** - * Set this to true if the configuration should be detached from the original config. If not - * set the default value is false. - * @param runAsync If the configuration should be detached - * @return The builder - */ - public Builder withRunAsync(boolean runAsync) { - this.runAsync = runAsync; - return this; - } - - /** * Set this if there are specific configuration values which should be added to the original * config. If at least one value is set, then the configuration will be detached from the * original one. @@ -141,6 +134,16 @@ public Builder withConfOverlay(Map confOverlay) { } /** + * Disable configuration isolation. + * + * For internal use / testing purposes only. + */ + public Builder nonIsolated() { + isolated = false; + return this; + } + + /** * Set this to true if new queryId should be generated, otherwise the original one will be kept. * If not set the default value is false. * @param generateNewQueryId If new queryId should be generated @@ -173,6 +176,59 @@ public Builder withLineageState(LineageState lineageState) { return this; } + private static final Logger LOG = LoggerFactory.getLogger(QueryState.class); + + public class HiveConf1 extends HiveConf { + + // Set protected1 = Sets.newHashSet("hive.txn.valid.txns"); + + Set protected1 = Sets.newHashSet("hive.txn.valid.txns", "fs.permissions.umask-mode", "fs.scheme.class", + "hive.conf.restricted.list", "hive.exec.dynamic.partition.mode", "hive.execution.engine", "_hive.hdfs.session.path", + "hive.internal.ss.authz.settings.applied.marker", "_hive.local.session.path", "hive.mapred.mode", "hive.metastore.filter.hook", + "hive.metastore.rawstore.impl", "hive.security.authenticator.manager", "hive.security.authorization.createtable.owner.grants", + "hive.session.id", "hive.test.init.phase", "hive.test.shutdown.phase", "_hive.tmp_table_space", "hive.zookeeper.client.port", + "hive.zookeeper.quorum", "io.file.buffer.size", "mapreduce.job.name", "mapreduce.workflow.name", "test.data.dir", + "hive.query.id", "hive.query.id", + + // "hive.doing.acid", + + "mapreduce.workflow.adjacency.Stage-0", "mapreduce.workflow.id", "mapreduce.workflow.node.name" + + ); + + private final HiveConf pc; + + public HiveConf1() { + this(new HiveConf()); + } + + public HiveConf1(HiveConf hiveConf) { + super(hiveConf); + pc = hiveConf; + } + + @Override + public void set(String arg0, String arg1, String arg2) { + LOG.info("set3|{}|{}|{}", arg0, arg1, arg2); + super.set(arg0, arg1, arg2); + if(!protected1.contains(arg0)) { + pc.set(arg0, arg1, arg2); + LOG.info("BYP3|{}|{}|{}", arg0, arg1, arg2); + } + } + + @Override + public void set(String name, String value) { + LOG.info("set2|{}|{}", name, value); + super.set(name, value); + if(!protected1.contains(name)) { + pc.set(name, value); + LOG.info("BYP2|{}|{}", name, value); + } + } + + } + /** * Creates the QueryState object. The default values are: * - runAsync false @@ -182,14 +238,17 @@ public Builder withLineageState(LineageState lineageState) { * @return The generated QueryState object */ public QueryState build() { - HiveConf queryConf = hiveConf; - - if (queryConf == null) { - // Generate a new conf if necessary - queryConf = new HiveConf(); - } else if (runAsync || (confOverlay != null && !confOverlay.isEmpty())) { - // Detach the original conf if necessary - queryConf = new HiveConf(queryConf); + HiveConf queryConf; + + if (isolated) { + // isolate query conf + if (hiveConf == null) { + queryConf = new HiveConf(); + } else { + queryConf = new HiveConf(hiveConf); + } + } else { + queryConf = hiveConf; } // Set the specific parameters if needed @@ -206,7 +265,13 @@ public QueryState build() { // Generate the new queryId if needed if (generateNewQueryId) { - queryConf.setVar(HiveConf.ConfVars.HIVEQUERYID, QueryPlan.makeQueryId()); + String queryId = QueryPlan.makeQueryId(); + queryConf.setVar(HiveConf.ConfVars.HIVEQUERYID, queryId); + // FIXME: druid storage handler relies on query.id to maintain some staging directories + // expose queryid to session level + if (hiveConf != null) { + hiveConf.setVar(HiveConf.ConfVars.HIVEQUERYID, queryId); + } } QueryState queryState = new QueryState(queryConf); diff --git ql/src/java/org/apache/hadoop/hive/ql/hooks/HooksLoader.java ql/src/java/org/apache/hadoop/hive/ql/hooks/HooksLoader.java index 5a370e89a9..8c19338bcf 100644 --- ql/src/java/org/apache/hadoop/hive/ql/hooks/HooksLoader.java +++ ql/src/java/org/apache/hadoop/hive/ql/hooks/HooksLoader.java @@ -58,8 +58,8 @@ public HooksLoader(HiveConf conf) { * @throws IllegalAccessException if the specified class names could not be accessed * @throws InstantiationException if the specified class names could not be instantiated */ - public final List getHooks(HiveConf.ConfVars hookConfVar, SessionState.LogHelper console, Class clazz) - throws IllegalAccessException, InstantiationException, ClassNotFoundException { + public final List getHooks(HiveConf.ConfVars hookConfVar, SessionState.LogHelper console, + Class clazz) throws IllegalAccessException, InstantiationException, ClassNotFoundException { try { return getHooks(hookConfVar, clazz); } catch (ClassNotFoundException e) { @@ -85,8 +85,8 @@ public HooksLoader(HiveConf conf) { * @throws IllegalAccessException if the specified class names could not be accessed * @throws InstantiationException if the specified class names could not be instantiated */ - public List getHooks(HiveConf.ConfVars hookConfVar, Class clazz) - throws InstantiationException, IllegalAccessException, ClassNotFoundException { + public List getHooks(HiveConf.ConfVars hookConfVar, Class clazz) + throws InstantiationException, IllegalAccessException, ClassNotFoundException { String csHooks = conf.getVar(hookConfVar); ImmutableList.Builder hooks = ImmutableList.builder(); if (csHooks == null) { diff --git ql/src/java/org/apache/hadoop/hive/ql/lockmgr/DummyTxnManager.java ql/src/java/org/apache/hadoop/hive/ql/lockmgr/DummyTxnManager.java index cf8bc7f256..fca640859b 100644 --- ql/src/java/org/apache/hadoop/hive/ql/lockmgr/DummyTxnManager.java +++ ql/src/java/org/apache/hadoop/hive/ql/lockmgr/DummyTxnManager.java @@ -47,6 +47,8 @@ private HiveLockManager lockMgr; + private HiveLockManagerCtx lockManagerCtx; + @Override public long openTxn(Context ctx, String user) throws LockException { // No-op @@ -81,7 +83,8 @@ public HiveLockManager getLockManager() throws LockException { LOG.info("Creating lock manager of type " + lockMgrName); lockMgr = (HiveLockManager)ReflectionUtils.newInstance( conf.getClassByName(lockMgrName), conf); - lockMgr.setContext(new HiveLockManagerCtx(conf)); + lockManagerCtx = new HiveLockManagerCtx(conf); + lockMgr.setContext(lockManagerCtx); } catch (Exception e) { // set hiveLockMgr to null just in case this invalid manager got set to // next query's ctx. @@ -103,6 +106,7 @@ public HiveLockManager getLockManager() throws LockException { } // Force a re-read of the configuration file. This is done because // different queries in the session may be using the same lock manager. + lockManagerCtx.setConf(conf); lockMgr.refresh(); return lockMgr; } @@ -119,7 +123,9 @@ public void acquireLocks(QueryPlan plan, Context ctx, String username, LockedDri // If the lock manager is still null, then it means we aren't using a // lock manager - if (lockMgr == null) return; + if (lockMgr == null) { + return; + } List lockObjects = new ArrayList(); @@ -234,6 +240,7 @@ public boolean supportsAcid() { } + @Override protected void destruct() { if (lockMgr != null) { try { diff --git ql/src/java/org/apache/hadoop/hive/ql/lockmgr/HiveTxnManagerImpl.java ql/src/java/org/apache/hadoop/hive/ql/lockmgr/HiveTxnManagerImpl.java index d750e77215..c8cafa2a68 100644 --- ql/src/java/org/apache/hadoop/hive/ql/lockmgr/HiveTxnManagerImpl.java +++ ql/src/java/org/apache/hadoop/hive/ql/lockmgr/HiveTxnManagerImpl.java @@ -21,6 +21,8 @@ import java.util.List; import java.util.Map; +import org.apache.hadoop.conf.Configurable; +import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.ql.Context; import org.apache.hadoop.hive.ql.Driver.LockedDriverState; @@ -42,12 +44,22 @@ * transaction managers need to implement but that we don't want to expose to * outside. */ -abstract class HiveTxnManagerImpl implements HiveTxnManager { +abstract class HiveTxnManagerImpl implements HiveTxnManager, Configurable { protected HiveConf conf; void setHiveConf(HiveConf c) { - conf = c; + setConf(c); + } + + @Override + public void setConf(Configuration c) { + conf = (HiveConf) c; + } + + @Override + public Configuration getConf() { + return conf; } abstract protected void destruct(); diff --git ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java index 8e587f1cf6..cde505e9dc 100644 --- ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java +++ ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java @@ -7504,6 +7504,7 @@ These props are now enabled elsewhere (see commit diffs). It would be better in backwards incompatible. */ conf.set(AcidUtils.CONF_ACID_KEY, "true"); + SessionState.get().getConf().set(AcidUtils.CONF_ACID_KEY, "true"); } /** diff --git ql/src/java/org/apache/hadoop/hive/ql/processors/AddResourceProcessor.java ql/src/java/org/apache/hadoop/hive/ql/processors/AddResourceProcessor.java index 5fcbd69644..d2286821f4 100644 --- ql/src/java/org/apache/hadoop/hive/ql/processors/AddResourceProcessor.java +++ ql/src/java/org/apache/hadoop/hive/ql/processors/AddResourceProcessor.java @@ -75,4 +75,8 @@ public CommandProcessorResponse run(String command) { return new CommandProcessorResponse(0); } + @Override + public void close() throws Exception { + } + } diff --git ql/src/java/org/apache/hadoop/hive/ql/processors/CommandProcessor.java ql/src/java/org/apache/hadoop/hive/ql/processors/CommandProcessor.java index c7532648f4..4d73181659 100644 --- ql/src/java/org/apache/hadoop/hive/ql/processors/CommandProcessor.java +++ ql/src/java/org/apache/hadoop/hive/ql/processors/CommandProcessor.java @@ -18,6 +18,6 @@ package org.apache.hadoop.hive.ql.processors; -public interface CommandProcessor { +public interface CommandProcessor extends AutoCloseable { CommandProcessorResponse run(String command); } diff --git ql/src/java/org/apache/hadoop/hive/ql/processors/CommandProcessorFactory.java ql/src/java/org/apache/hadoop/hive/ql/processors/CommandProcessorFactory.java index dcf8d31eaf..74a34b3ffb 100644 --- ql/src/java/org/apache/hadoop/hive/ql/processors/CommandProcessorFactory.java +++ ql/src/java/org/apache/hadoop/hive/ql/processors/CommandProcessorFactory.java @@ -21,18 +21,15 @@ import static org.apache.commons.lang.StringUtils.isBlank; import java.sql.SQLException; -import java.util.Collections; -import java.util.HashMap; import java.util.HashSet; -import java.util.Map; import java.util.Set; +import javax.annotation.Nonnull; import org.slf4j.Logger; import org.slf4j.LoggerFactory; + import org.apache.hadoop.hive.conf.HiveConf; -import org.apache.hadoop.hive.ql.Driver; import org.apache.hadoop.hive.ql.DriverFactory; -import org.apache.hadoop.hive.ql.IDriver; import org.apache.hadoop.hive.ql.metadata.*; import org.apache.hadoop.hive.ql.session.SessionState; @@ -46,13 +43,6 @@ private CommandProcessorFactory() { // prevent instantiation } - private static final Map mapDrivers = Collections.synchronizedMap(new HashMap()); - - public static CommandProcessor get(String cmd) - throws SQLException { - return get(new String[]{cmd}, null); - } - public static CommandProcessor getForHiveCommand(String[] cmd, HiveConf conf) throws SQLException { return getForHiveCommandInternal(cmd, conf, false); @@ -111,8 +101,8 @@ public static CommandProcessor getForHiveCommandInternal(String[] cmd, HiveConf } static Logger LOG = LoggerFactory.getLogger(CommandProcessorFactory.class); - public static CommandProcessor get(String[] cmd, HiveConf conf) - throws SQLException { + + public static CommandProcessor get(String[] cmd, @Nonnull HiveConf conf) throws SQLException { CommandProcessor result = getForHiveCommand(cmd, conf); if (result != null) { return result; @@ -120,27 +110,7 @@ public static CommandProcessor get(String[] cmd, HiveConf conf) if (isBlank(cmd[0])) { return null; } else { - if (conf == null) { - return new Driver(); - } - IDriver drv = mapDrivers.get(conf); - if (drv == null) { - // FIXME: why this method didn't use the conf constructor? - drv = DriverFactory.newDriver(); - mapDrivers.put(conf, drv); - } else { - drv.resetQueryState(); - } - return drv; + return DriverFactory.newDriver(conf); } } - - public static void clean(HiveConf conf) { - IDriver drv = mapDrivers.get(conf); - if (drv != null) { - drv.destroy(); - } - - mapDrivers.remove(conf); - } } diff --git ql/src/java/org/apache/hadoop/hive/ql/processors/CompileProcessor.java ql/src/java/org/apache/hadoop/hive/ql/processors/CompileProcessor.java index fad4f52c03..7b96b33ba3 100644 --- ql/src/java/org/apache/hadoop/hive/ql/processors/CompileProcessor.java +++ ql/src/java/org/apache/hadoop/hive/ql/processors/CompileProcessor.java @@ -309,4 +309,8 @@ public String getCommand() { super(s); } } + + @Override + public void close() throws Exception { + } } diff --git ql/src/java/org/apache/hadoop/hive/ql/processors/CryptoProcessor.java ql/src/java/org/apache/hadoop/hive/ql/processors/CryptoProcessor.java index d1202f92bb..d2a864a9be 100644 --- ql/src/java/org/apache/hadoop/hive/ql/processors/CryptoProcessor.java +++ ql/src/java/org/apache/hadoop/hive/ql/processors/CryptoProcessor.java @@ -176,4 +176,8 @@ private void deleteEncryptionKey(String[] params) throws Exception { writeTestOutput("Encryption key deleted: '" + keyName + "'"); } + + @Override + public void close() throws Exception { + } } diff --git ql/src/java/org/apache/hadoop/hive/ql/processors/DeleteResourceProcessor.java ql/src/java/org/apache/hadoop/hive/ql/processors/DeleteResourceProcessor.java index 54a7d4b0de..bac020d5a7 100644 --- ql/src/java/org/apache/hadoop/hive/ql/processors/DeleteResourceProcessor.java +++ ql/src/java/org/apache/hadoop/hive/ql/processors/DeleteResourceProcessor.java @@ -72,4 +72,8 @@ public CommandProcessorResponse run(String command) { return new CommandProcessorResponse(0); } + + @Override + public void close() throws Exception { + } } diff --git ql/src/java/org/apache/hadoop/hive/ql/processors/DfsProcessor.java ql/src/java/org/apache/hadoop/hive/ql/processors/DfsProcessor.java index 62a1725114..0b334e14c4 100644 --- ql/src/java/org/apache/hadoop/hive/ql/processors/DfsProcessor.java +++ ql/src/java/org/apache/hadoop/hive/ql/processors/DfsProcessor.java @@ -114,7 +114,7 @@ public CommandProcessorResponse run(String command) { switch(x) { case ' ': - if ((int) y == 0) { + if (y == 0) { String str = command.substring(start, i).trim(); if (!str.equals("")) { paras.add(str); @@ -123,7 +123,7 @@ public CommandProcessorResponse run(String command) { } break; case '"': - if ((int) y == 0) { + if (y == 0) { y = x; start = i + 1; } else if ('"' == y) { @@ -133,7 +133,7 @@ public CommandProcessorResponse run(String command) { } break; case '\'': - if ((int) y == 0) { + if (y == 0) { y = x; start = i + 1; } else if ('\'' == y) { @@ -150,7 +150,7 @@ public CommandProcessorResponse run(String command) { } } - if ((int) y != 0) { + if (y != 0) { String message = "Syntax error on hadoop options: dfs " + command; console.printError(message); throw new HiveException(message); @@ -159,4 +159,7 @@ public CommandProcessorResponse run(String command) { return paras.toArray(new String[paras.size()]); } + @Override + public void close() throws Exception { + } } diff --git ql/src/java/org/apache/hadoop/hive/ql/processors/ListResourceProcessor.java ql/src/java/org/apache/hadoop/hive/ql/processors/ListResourceProcessor.java index 91a6aba62f..afd604aa67 100644 --- ql/src/java/org/apache/hadoop/hive/ql/processors/ListResourceProcessor.java +++ ql/src/java/org/apache/hadoop/hive/ql/processors/ListResourceProcessor.java @@ -62,4 +62,8 @@ public CommandProcessorResponse run(String command) { } return new CommandProcessorResponse(0, null, null, SCHEMA); } + + @Override + public void close() throws Exception { + } } diff --git ql/src/java/org/apache/hadoop/hive/ql/processors/ReloadProcessor.java ql/src/java/org/apache/hadoop/hive/ql/processors/ReloadProcessor.java index 4caab916ff..bcbc03083e 100644 --- ql/src/java/org/apache/hadoop/hive/ql/processors/ReloadProcessor.java +++ ql/src/java/org/apache/hadoop/hive/ql/processors/ReloadProcessor.java @@ -41,4 +41,8 @@ public CommandProcessorResponse run(String command) { } return new CommandProcessorResponse(0); } + + @Override + public void close() throws Exception { + } } diff --git ql/src/java/org/apache/hadoop/hive/ql/processors/ResetProcessor.java ql/src/java/org/apache/hadoop/hive/ql/processors/ResetProcessor.java index ca39ff98ce..ce65ccd4bc 100644 --- ql/src/java/org/apache/hadoop/hive/ql/processors/ResetProcessor.java +++ ql/src/java/org/apache/hadoop/hive/ql/processors/ResetProcessor.java @@ -156,4 +156,8 @@ private static CommandProcessorResponse resetToDefault(SessionState ss, String v } return confVars; } + + @Override + public void close() throws Exception { + } } diff --git ql/src/java/org/apache/hadoop/hive/ql/processors/SetProcessor.java ql/src/java/org/apache/hadoop/hive/ql/processors/SetProcessor.java index 1ff4b3c947..db0fef1684 100644 --- ql/src/java/org/apache/hadoop/hive/ql/processors/SetProcessor.java +++ ql/src/java/org/apache/hadoop/hive/ql/processors/SetProcessor.java @@ -433,4 +433,7 @@ private Schema getSchema() { return sch; } + @Override + public void close() throws Exception { + } } diff --git ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/Worker.java ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/Worker.java index 26c6700d99..e5ebf9ad43 100644 --- ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/Worker.java +++ ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/Worker.java @@ -31,6 +31,7 @@ import org.apache.hadoop.hive.metastore.txn.CompactionInfo; import org.apache.hadoop.hive.metastore.txn.TxnUtils; import org.apache.hadoop.hive.ql.Driver; +import org.apache.hadoop.hive.ql.QueryState; import org.apache.hadoop.hive.ql.processors.CommandProcessorResponse; import org.apache.hadoop.hive.ql.session.SessionState; import org.apache.hadoop.hive.ql.stats.StatsUtils; @@ -237,7 +238,7 @@ public static StatsUpdater init(CompactionInfo ci, List columnListForSta private final HiveConf conf; private final String userName; private final CompactionInfo ci; - + private StatsUpdater(CompactionInfo ci, List columnListForStats, HiveConf conf, String userName) { this.conf = conf; @@ -287,7 +288,7 @@ void gatherStats() throws IOException { } sb.setLength(sb.length() - 1); //remove trailing , LOG.info("running '" + sb.toString() + "'"); - Driver d = new Driver(conf, userName); + Driver d = new Driver(new QueryState.Builder().withGenerateNewQueryId(true).withHiveConf(conf).build(), userName); SessionState localSession = null; if(SessionState.get() == null) { localSession = SessionState.start(new SessionState(conf)); diff --git ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDTFGetSplits.java ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDTFGetSplits.java index 1e83799e03..128b66ce45 100644 --- ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDTFGetSplits.java +++ ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDTFGetSplits.java @@ -61,6 +61,7 @@ import org.apache.hadoop.hive.ql.Context; import org.apache.hadoop.hive.ql.Driver; import org.apache.hadoop.hive.ql.QueryPlan; +import org.apache.hadoop.hive.ql.QueryState; import org.apache.hadoop.hive.ql.exec.Description; import org.apache.hadoop.hive.ql.exec.Task; import org.apache.hadoop.hive.ql.exec.UDFArgumentException; @@ -242,7 +243,7 @@ public PlanFragment createPlanFragment(String query, int num, ApplicationId spli // So initialize the new Driver with a new TxnManager so that it does not use the // Session TxnManager that is already in use. HiveTxnManager txnManager = TxnManagerFactory.getTxnManagerFactory().getTxnManager(conf); - Driver driver = new Driver(conf, txnManager); + Driver driver = new Driver(new QueryState.Builder().withHiveConf(conf).nonIsolated().build(), null, null, txnManager); DriverCleanup driverCleanup = new DriverCleanup(driver, txnManager, splitsAppId.toString()); boolean needsCleanup = true; try { @@ -267,8 +268,7 @@ public PlanFragment createPlanFragment(String query, int num, ApplicationId spli String ctas = "create temporary table " + tableName + " as " + query; LOG.info("Materializing the query for LLAPIF; CTAS: " + ctas); - - driver.resetQueryState(); + driver.releaseResources(); HiveConf.setVar(conf, ConfVars.HIVE_EXECUTION_MODE, originalMode); cpr = driver.run(ctas, false); diff --git ql/src/test/org/apache/hadoop/hive/ql/TestTxnCommands2.java ql/src/test/org/apache/hadoop/hive/ql/TestTxnCommands2.java index 048215aa37..bab6d5e71c 100644 --- ql/src/test/org/apache/hadoop/hive/ql/TestTxnCommands2.java +++ ql/src/test/org/apache/hadoop/hive/ql/TestTxnCommands2.java @@ -138,8 +138,9 @@ void setUpWithTableProperties(String tableProperties) throws Exception { if (!(new File(TEST_WAREHOUSE_DIR).mkdirs())) { throw new RuntimeException("Could not create " + TEST_WAREHOUSE_DIR); } - SessionState.start(new SessionState(hiveConf)); - d = new Driver(hiveConf); + SessionState ss = SessionState.start(hiveConf); + ss.applyAuthorizationPolicy(); + d = new Driver(new QueryState.Builder().withHiveConf(hiveConf).nonIsolated().build(), null); d.setMaxRows(10000); dropTables(); runStatementOnDriver("create table " + Table.ACIDTBL + "(a int, b int) clustered by (a) into " + BUCKET_COUNT + " buckets stored as orc TBLPROPERTIES (" + tableProperties + ")"); diff --git ql/src/test/org/apache/hadoop/hive/ql/TxnCommandsBaseForTests.java ql/src/test/org/apache/hadoop/hive/ql/TxnCommandsBaseForTests.java index 93074e998d..12083fd79a 100644 --- ql/src/test/org/apache/hadoop/hive/ql/TxnCommandsBaseForTests.java +++ ql/src/test/org/apache/hadoop/hive/ql/TxnCommandsBaseForTests.java @@ -25,7 +25,6 @@ import org.apache.hadoop.fs.RemoteIterator; import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.metastore.api.MetaException; -import org.apache.hadoop.hive.metastore.conf.MetastoreConf; import org.apache.hadoop.hive.metastore.txn.TxnDbUtil; import org.apache.hadoop.hive.ql.io.HiveInputFormat; import org.apache.hadoop.hive.ql.processors.CommandProcessorResponse; @@ -92,8 +91,9 @@ void setUpInternal() throws Exception { if (!(new File(getWarehouseDir()).mkdirs())) { throw new RuntimeException("Could not create " + getWarehouseDir()); } - SessionState.start(new SessionState(hiveConf)); - d = new Driver(hiveConf); + SessionState ss = SessionState.start(hiveConf); + ss.applyAuthorizationPolicy(); + d = new Driver(new QueryState.Builder().withHiveConf(hiveConf).nonIsolated().build(), null); d.setMaxRows(10000); dropTables(); runStatementOnDriver("create table " + Table.ACIDTBL + "(a int, b int) clustered by (a) into " + BUCKET_COUNT + " buckets stored as orc TBLPROPERTIES ('transactional'='true')"); @@ -136,7 +136,7 @@ String getWarehouseDir() { String makeValuesClause(int[][] rows) { return TestTxnCommands2.makeValuesClause(rows); } - + void runWorker(HiveConf hiveConf) throws MetaException { TestTxnCommands2.runWorker(hiveConf); } diff --git ql/src/test/org/apache/hadoop/hive/ql/exec/TestOperators.java ql/src/test/org/apache/hadoop/hive/ql/exec/TestOperators.java index df19d72411..4fd67dbccb 100644 --- ql/src/test/org/apache/hadoop/hive/ql/exec/TestOperators.java +++ ql/src/test/org/apache/hadoop/hive/ql/exec/TestOperators.java @@ -413,7 +413,7 @@ public void testFetchOperatorContext() throws Exception { "inputformat 'org.apache.hadoop.hive.ql.exec.TestOperators$CustomInFmt' " + "outputformat 'org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat' " + "tblproperties ('myprop1'='val1', 'myprop2' = 'val2')"; - Driver driver = new Driver(); + Driver driver = new Driver(conf); CommandProcessorResponse response = driver.run(cmd); assertEquals(0, response.getResponseCode()); List result = new ArrayList(); diff --git ql/src/test/org/apache/hadoop/hive/ql/hooks/TestQueryHooks.java ql/src/test/org/apache/hadoop/hive/ql/hooks/TestQueryHooks.java index 06a96d5a39..492b63d93e 100644 --- ql/src/test/org/apache/hadoop/hive/ql/hooks/TestQueryHooks.java +++ ql/src/test/org/apache/hadoop/hive/ql/hooks/TestQueryHooks.java @@ -22,6 +22,7 @@ import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.ql.Driver; +import org.apache.hadoop.hive.ql.QueryState; import org.apache.hadoop.hive.ql.session.SessionState; import org.junit.BeforeClass; @@ -135,7 +136,8 @@ private Driver createDriver(QueryLifeTimeHook mockHook) throws IllegalAccessExce Lists.newArrayList(mockHook)); SessionState.start(conf); - Driver driver = new Driver(conf, mockLoader); + + Driver driver = new Driver(new QueryState.Builder().withGenerateNewQueryId(true).withHiveConf(conf).build(), null, mockLoader, null, null); return driver; } diff --git ql/src/test/org/apache/hadoop/hive/ql/lockmgr/TestDbTxnManager2.java ql/src/test/org/apache/hadoop/hive/ql/lockmgr/TestDbTxnManager2.java index 71d960f4c9..7650917509 100644 --- ql/src/test/org/apache/hadoop/hive/ql/lockmgr/TestDbTxnManager2.java +++ ql/src/test/org/apache/hadoop/hive/ql/lockmgr/TestDbTxnManager2.java @@ -38,6 +38,7 @@ import org.apache.hadoop.hive.ql.Context; import org.apache.hadoop.hive.ql.Driver; import org.apache.hadoop.hive.ql.ErrorMsg; +import org.apache.hadoop.hive.ql.QueryState; import org.apache.hadoop.hive.ql.processors.CommandProcessorResponse; import org.apache.hadoop.hive.ql.session.SessionState; import org.junit.Before; @@ -63,17 +64,17 @@ * code path that CLI would but with the advantage that you can create a 2nd HiveTxnManager and then * simulate interleaved transactional/locking operations but all from within a single thread. * The later not only controls concurrency precisely but is the only way to run in UT env with DerbyDB. - * + * * A slightly different (and simpler) approach is to use "start transaction/(commit/rollback)" * command with the Driver.run(). This allows you to "see" the state of the Lock Manager after * each statement and can also simulate concurrent (but very controlled) work but w/o forking any * threads. The limitation here is that not all statements are allowed in an explicit transaction. * For example, "drop table foo". This approach will also cause the query to execute which will * make tests slower but will exericise the code path that is much closer to the actual user calls. - * + * * In either approach, each logical "session" should use it's own Transaction Manager. This requires * using {@link #swapTxnManager(HiveTxnManager)} since in the SessionState the TM is associated with - * each thread. + * each thread. */ public class TestDbTxnManager2 { private static final Logger LOG = LoggerFactory.getLogger(TestDbTxnManager2.class); @@ -94,7 +95,7 @@ public TestDbTxnManager2() throws Exception { public void setUp() throws Exception { SessionState.start(conf); ctx = new Context(conf); - driver = new Driver(conf); + driver = new Driver(new QueryState.Builder().withHiveConf(conf).nonIsolated().build(), null); TxnDbUtil.cleanDb(conf); TxnDbUtil.prepDb(conf); SessionState ss = SessionState.get(); @@ -663,7 +664,7 @@ public void checkExpectedLocks() throws Exception { checkLock(LockType.SHARED_READ, LockState.ACQUIRED, "default", "acidPart", "p=1", locks); checkLock(LockType.SHARED_READ, LockState.ACQUIRED, "_dummy_database", "_dummy_table", null, locks); txnMgr.rollbackTxn(); - + cpr = driver.compileAndRespond("update acidPart set b = 17 where a = 1"); checkCmdOnDriver(cpr); lockState = ((DbTxnManager) txnMgr).acquireLocks(driver.getPlan(), ctx, "Practical", false); diff --git ql/src/test/org/apache/hadoop/hive/ql/lockmgr/TestDummyTxnManager.java ql/src/test/org/apache/hadoop/hive/ql/lockmgr/TestDummyTxnManager.java index 2f5fc2fff1..8f7505dae0 100644 --- ql/src/test/org/apache/hadoop/hive/ql/lockmgr/TestDummyTxnManager.java +++ ql/src/test/org/apache/hadoop/hive/ql/lockmgr/TestDummyTxnManager.java @@ -59,6 +59,9 @@ HiveLockManager mockLockManager; @Mock + HiveLockManagerCtx mockLockManagerCtx; + + @Mock QueryPlan mockQueryPlan; @Before @@ -73,11 +76,17 @@ public void setUp() throws Exception { txnMgr = TxnManagerFactory.getTxnManagerFactory().getTxnManager(conf); Assert.assertTrue(txnMgr instanceof DummyTxnManager); + // Use reflection to set LockManager since creating the object using the // relection in DummyTxnManager won't take Mocked object Field field = DummyTxnManager.class.getDeclaredField("lockMgr"); field.setAccessible(true); field.set(txnMgr, mockLockManager); + + Field field2 = DummyTxnManager.class.getDeclaredField("lockManagerCtx"); + field2.setAccessible(true); + field2.set(txnMgr, mockLockManagerCtx); + } @After diff --git ql/src/test/queries/clientpositive/driver_conf_isolation.q ql/src/test/queries/clientpositive/driver_conf_isolation.q new file mode 100644 index 0000000000..3a8356c1d1 --- /dev/null +++ ql/src/test/queries/clientpositive/driver_conf_isolation.q @@ -0,0 +1,5 @@ +set hive.mapred.mode=strict; +select "${hiveconf:hive.mapred.mode}"; +create table t (a int); +analyze table t compute statistics; +select "${hiveconf:hive.mapred.mode}"; diff --git ql/src/test/queries/clientpositive/special_character_in_tabnames_1.q ql/src/test/queries/clientpositive/special_character_in_tabnames_1.q index c017172fd1..adc23e99b3 100644 --- ql/src/test/queries/clientpositive/special_character_in_tabnames_1.q +++ ql/src/test/queries/clientpositive/special_character_in_tabnames_1.q @@ -2,6 +2,7 @@ set hive.cbo.enable=true; set hive.exec.check.crossproducts=false; set hive.stats.fetch.column.stats=true; set hive.auto.convert.join=false; +set hive.strict.checks.cartesian.product=false; -- SORT_QUERY_RESULTS diff --git ql/src/test/results/clientpositive/driver_conf_isolation.q.out ql/src/test/results/clientpositive/driver_conf_isolation.q.out new file mode 100644 index 0000000000..298c24ff60 --- /dev/null +++ ql/src/test/results/clientpositive/driver_conf_isolation.q.out @@ -0,0 +1,34 @@ +PREHOOK: query: select "strict" +PREHOOK: type: QUERY +PREHOOK: Input: _dummy_database@_dummy_table +#### A masked pattern was here #### +POSTHOOK: query: select "strict" +POSTHOOK: type: QUERY +POSTHOOK: Input: _dummy_database@_dummy_table +#### A masked pattern was here #### +strict +PREHOOK: query: create table t (a int) +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@t +POSTHOOK: query: create table t (a int) +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@t +PREHOOK: query: analyze table t compute statistics +PREHOOK: type: QUERY +PREHOOK: Input: default@t +PREHOOK: Output: default@t +POSTHOOK: query: analyze table t compute statistics +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t +POSTHOOK: Output: default@t +PREHOOK: query: select "strict" +PREHOOK: type: QUERY +PREHOOK: Input: _dummy_database@_dummy_table +#### A masked pattern was here #### +POSTHOOK: query: select "strict" +POSTHOOK: type: QUERY +POSTHOOK: Input: _dummy_database@_dummy_table +#### A masked pattern was here #### +strict diff --git ql/src/test/results/clientpositive/input39.q.out ql/src/test/results/clientpositive/input39.q.out index 3000404448..6a5b82de0d 100644 --- ql/src/test/results/clientpositive/input39.q.out +++ ql/src/test/results/clientpositive/input39.q.out @@ -165,4 +165,4 @@ POSTHOOK: Input: default@t2@ds=1 #### A masked pattern was here #### 18 mapreduce.framework.name=yarn -mapreduce.jobtracker.address=local +mapreduce.jobtracker.address=localhost:58 diff --git service/src/java/org/apache/hive/service/cli/operation/Operation.java service/src/java/org/apache/hive/service/cli/operation/Operation.java index 2ef1479540..51e21650b5 100644 --- service/src/java/org/apache/hive/service/cli/operation/Operation.java +++ service/src/java/org/apache/hive/service/cli/operation/Operation.java @@ -80,14 +80,9 @@ protected Operation(HiveSession parentSession, OperationType opType) { this(parentSession, null, opType); } - - protected Operation(HiveSession parentSession, Map confOverlay, - OperationType opType) { - this(parentSession, confOverlay, opType, false); - } protected Operation(HiveSession parentSession, - Map confOverlay, OperationType opType, boolean isAsyncQueryState) { + Map confOverlay, OperationType opType) { this.parentSession = parentSession; this.opHandle = new OperationHandle(opType, parentSession.getProtocolVersion()); beginTime = System.currentTimeMillis(); @@ -99,7 +94,6 @@ protected Operation(HiveSession parentSession, MetricsConstant.COMPLETED_OPERATION_PREFIX, state); queryState = new QueryState.Builder() .withConfOverlay(confOverlay) - .withRunAsync(isAsyncQueryState) .withGenerateNewQueryId(true) .withHiveConf(parentSession.getHiveConf()) .build();