diff --git cli/src/java/org/apache/hadoop/hive/cli/CliDriver.java cli/src/java/org/apache/hadoop/hive/cli/CliDriver.java index 779fb66350..c6f8946247 100644 --- cli/src/java/org/apache/hadoop/hive/cli/CliDriver.java +++ cli/src/java/org/apache/hadoop/hive/cli/CliDriver.java @@ -63,6 +63,7 @@ import org.apache.hadoop.hive.ql.parse.CalcitePlanner; import org.apache.hadoop.hive.ql.parse.HiveParser; import org.apache.hadoop.hive.ql.processors.CommandProcessor; +import org.apache.hadoop.hive.ql.processors.CommandProcessorException; import org.apache.hadoop.hive.ql.processors.CommandProcessorFactory; import org.apache.hadoop.hive.ql.processors.CommandProcessorResponse; import org.apache.hadoop.hive.ql.session.SessionState; @@ -116,7 +117,7 @@ public CliDriver() { console = new LogHelper(LOG); } - public CommandProcessorResponse processCmd(String cmd) { + public CommandProcessorResponse processCmd(String cmd) throws CommandProcessorException { CliSessionState ss = (CliSessionState) SessionState.get(); ss.setLastCommand(cmd); @@ -126,7 +127,7 @@ public CommandProcessorResponse processCmd(String cmd) { ss.err.flush(); String cmd_trimmed = HiveStringUtils.removeComments(cmd).trim(); String[] tokens = tokenizeCmd(cmd_trimmed); - CommandProcessorResponse response = new CommandProcessorResponse(0); + CommandProcessorResponse response = new CommandProcessorResponse(); if (cmd_trimmed.toLowerCase().equals("quit") || cmd_trimmed.toLowerCase().equals("exit")) { @@ -148,14 +149,14 @@ public CommandProcessorResponse processCmd(String cmd) { File sourceFile = new File(cmd_1); if (! sourceFile.isFile()){ console.printError("File: "+ cmd_1 + " is not a file."); - response = new CommandProcessorResponse(1); + throw new CommandProcessorException(1); } else { try { response = processFile(cmd_1); } catch (IOException e) { console.printError("Failed processing file "+ cmd_1 +" "+ e.getLocalizedMessage(), stringifyException(e)); - response = new CommandProcessorResponse(1); + throw new CommandProcessorException(1); } } } else if (cmd_trimmed.startsWith("!")) { @@ -171,14 +172,17 @@ public CommandProcessorResponse processCmd(String cmd) { // shell_cmd = "/bin/bash -c \'" + shell_cmd + "\'"; try { ShellCmdExecutor executor = new ShellCmdExecutor(shell_cmd, ss.out, ss.err); - response = new CommandProcessorResponse(executor.execute()); - if (response.getResponseCode() != 0) { + int responseCode = executor.execute(); + if (responseCode != 0) { console.printError("Command failed with exit code = " + response); + ss.resetThreadName(); + throw new CommandProcessorException(responseCode); } + response = new CommandProcessorResponse(); } catch (Exception e) { console.printError("Exception raised from Shell command " + e.getLocalizedMessage(), stringifyException(e)); - response = new CommandProcessorResponse(1); + throw new CommandProcessorException(1); } } else { // local mode try { @@ -194,9 +198,10 @@ public CommandProcessorResponse processCmd(String cmd) { } catch (SQLException e) { console.printError("Failed processing command " + tokens[0] + " " + e.getLocalizedMessage(), org.apache.hadoop.util.StringUtils.stringifyException(e)); - response = new CommandProcessorResponse(1); - } - catch (Exception e) { + throw new CommandProcessorException(1); + } catch (CommandProcessorException e) { + throw e; + } catch (Exception e) { throw new RuntimeException(e); } } @@ -224,9 +229,10 @@ private String getFirstCmd(String cmd, int length) { return cmd.split("\\s+"); } - CommandProcessorResponse processLocalCmd(String cmd, CommandProcessor proc, CliSessionState ss) { + CommandProcessorResponse processLocalCmd(String cmd, CommandProcessor proc, CliSessionState ss) + throws CommandProcessorException { boolean escapeCRLF = HiveConf.getBoolVar(conf, HiveConf.ConfVars.HIVE_CLI_PRINT_ESCAPE_CRLF); - CommandProcessorResponse response = new CommandProcessorResponse(0); + CommandProcessorResponse response = new CommandProcessorResponse(); if (proc != null) { if (proc instanceof IDriver) { @@ -239,12 +245,12 @@ CommandProcessorResponse processLocalCmd(String cmd, CommandProcessor proc, CliS // Set HDFS CallerContext to queryId and reset back to sessionId after the query is done ShimLoader.getHadoopShims().setHadoopQueryContext(qp.getQueryState().getQueryId()); - response = qp.run(cmd); - - if (response.getResponseCode() != 0) { + try { + response = qp.run(cmd); + } catch (CommandProcessorException e) { qp.close(); ShimLoader.getHadoopShims().setHadoopSessionContext(ss.getSessionId()); - return response; + throw e; } // query has run capture the time @@ -277,18 +283,18 @@ CommandProcessorResponse processLocalCmd(String cmd, CommandProcessor proc, CliS } catch (IOException e) { console.printError("Failed with exception " + e.getClass().getName() + ":" + e.getMessage(), "\n" + org.apache.hadoop.util.StringUtils.stringifyException(e)); - response = new CommandProcessorResponse(1); - } + throw new CommandProcessorException(1); + } finally { + qp.close(); + ShimLoader.getHadoopShims().setHadoopSessionContext(ss.getSessionId()); - qp.close(); - ShimLoader.getHadoopShims().setHadoopSessionContext(ss.getSessionId()); + if (out instanceof FetchConverter) { + ((FetchConverter) out).fetchFinished(); + } - if (out instanceof FetchConverter) { - ((FetchConverter) out).fetchFinished(); + console.printInfo( + "Time taken: " + timeTaken + " seconds" + (counter == 0 ? "" : ", Fetched: " + counter + " row(s)")); } - - console.printInfo( - "Time taken: " + timeTaken + " seconds" + (counter == 0 ? "" : ", Fetched: " + counter + " row(s)")); } else { String firstToken = tokenizeCmd(cmd.trim())[0]; String cmd_1 = getFirstCmd(cmd.trim(), firstToken.length()); @@ -296,17 +302,17 @@ CommandProcessorResponse processLocalCmd(String cmd, CommandProcessor proc, CliS if (ss.getIsVerbose()) { ss.out.println(firstToken + " " + cmd_1); } - CommandProcessorResponse res = proc.run(cmd_1); - if (res.getResponseCode() != 0) { - ss.out - .println("Query returned non-zero code: " + res.getResponseCode() + ", cause: " + res.getErrorMessage()); - } - if (res.getConsoleMessages() != null) { - for (String consoleMsg : res.getConsoleMessages()) { - console.printInfo(consoleMsg); + + try { + CommandProcessorResponse res = proc.run(cmd_1); + if (res.getMessage() != null) { + console.printInfo(res.getMessage()); } + return res; + } catch (CommandProcessorException e) { + ss.out.println("Query returned non-zero code: " + e.getResponseCode() + ", cause: " + e.getErrorMessage()); + throw e; } - return res; } } return response; @@ -336,7 +342,7 @@ private void printHeader(IDriver qp, PrintStream out) { } } - public CommandProcessorResponse processLine(String line) { + public CommandProcessorResponse processLine(String line) throws CommandProcessorException { return processLine(line, false); } @@ -350,7 +356,7 @@ public CommandProcessorResponse processLine(String line) { * returning -1 * @return 0 if ok */ - public CommandProcessorResponse processLine(String line, boolean allowInterrupting) { + public CommandProcessorResponse processLine(String line, boolean allowInterrupting) throws CommandProcessorException { SignalHandler oldSignal = null; Signal interruptSignal = null; @@ -386,7 +392,7 @@ public void handle(Signal signal) { } try { - CommandProcessorResponse lastRet = new CommandProcessorResponse(0); + CommandProcessorResponse lastRet = new CommandProcessorResponse(); CommandProcessorResponse ret; // we can not use "split" function directly as ";" may be quoted @@ -405,12 +411,16 @@ public void handle(Signal signal) { continue; } - ret = processCmd(command.toString()); - command.setLength(0);; - lastRet = ret; - boolean ignoreErrors = HiveConf.getBoolVar(conf, HiveConf.ConfVars.CLIIGNOREERRORS); - if (ret.getResponseCode() != 0 && !ignoreErrors) { - return ret; + try { + ret = processCmd(command.toString()); + lastRet = ret; + } catch (CommandProcessorException e) { + boolean ignoreErrors = HiveConf.getBoolVar(conf, HiveConf.ConfVars.CLIIGNOREERRORS); + if (!ignoreErrors) { + throw e; + } + } finally { + command.setLength(0); } } return lastRet; @@ -474,7 +484,7 @@ public void handle(Signal signal) { return ret; } - public CommandProcessorResponse processReader(BufferedReader r) throws IOException { + public CommandProcessorResponse processReader(BufferedReader r) throws IOException, CommandProcessorException { String line; StringBuilder qsb = new StringBuilder(); @@ -488,7 +498,7 @@ public CommandProcessorResponse processReader(BufferedReader r) throws IOExcepti return (processLine(qsb.toString())); } - public CommandProcessorResponse processFile(String fileName) throws IOException { + public CommandProcessorResponse processFile(String fileName) throws IOException, CommandProcessorException { Path path = new Path(fileName); FileSystem fs; if (!path.toUri().isAbsolute()) { @@ -507,20 +517,18 @@ public CommandProcessorResponse processFile(String fileName) throws IOException } } - public void processInitFiles(CliSessionState ss) throws IOException { + public void processInitFiles(CliSessionState ss) throws IOException, CommandProcessorException { boolean saveSilent = ss.getIsSilent(); ss.setIsSilent(true); for (String initFile : ss.initFiles) { - CommandProcessorResponse response = processFile(initFile); - exitOnFailure(response); + processFileExitOnFailure(initFile); } if (ss.initFiles.size() == 0) { if (System.getenv("HIVE_HOME") != null) { String hivercDefault = System.getenv("HIVE_HOME") + File.separator + "bin" + File.separator + HIVERCFILE; if (new File(hivercDefault).exists()) { - CommandProcessorResponse response = processFile(hivercDefault); - exitOnFailure(response); + processFileExitOnFailure(hivercDefault); console.printError("Putting the global hiverc in " + "$HIVE_HOME/bin/.hiverc is deprecated. Please "+ "use $HIVE_CONF_DIR/.hiverc instead."); @@ -530,34 +538,40 @@ public void processInitFiles(CliSessionState ss) throws IOException { String hivercDefault = System.getenv("HIVE_CONF_DIR") + File.separator + HIVERCFILE; if (new File(hivercDefault).exists()) { - CommandProcessorResponse response = processFile(hivercDefault); - exitOnFailure(response); + processFileExitOnFailure(hivercDefault); } } if (System.getProperty("user.home") != null) { String hivercUser = System.getProperty("user.home") + File.separator + HIVERCFILE; if (new File(hivercUser).exists()) { - CommandProcessorResponse response = processFile(hivercUser); - exitOnFailure(response); + processFileExitOnFailure(hivercUser); } } } ss.setIsSilent(saveSilent); } - private void exitOnFailure(CommandProcessorResponse response) { - int rc = response.getResponseCode(); - if (rc != 0) { - System.exit(rc); + private void processFileExitOnFailure(String fileName) throws IOException { + try { + processFile(fileName); + } catch (CommandProcessorException e) { + System.exit(e.getResponseCode()); + } + } + + private void processLineExitOnFailure(String command) throws IOException { + try { + processLine(command); + } catch (CommandProcessorException e) { + System.exit(e.getResponseCode()); } } - public void processSelectDatabase(CliSessionState ss) throws IOException { + public void processSelectDatabase(CliSessionState ss) throws IOException, CommandProcessorException { String database = ss.database; if (database != null) { - CommandProcessorResponse response = processLine("use " + database + ";"); - exitOnFailure(response); + processLineExitOnFailure("use " + database + ";"); } } @@ -774,7 +788,10 @@ public int run(String[] args) throws Exception { // execute cli driver work try { - return executeDriver(ss, conf, oproc).getResponseCode(); + executeDriver(ss, conf, oproc); + return 0; + } catch (CommandProcessorException e) { + return e.getResponseCode(); } finally { ss.resetThreadName(); ss.close(); @@ -811,7 +828,7 @@ private CommandProcessorResponse executeDriver(CliSessionState ss, HiveConf conf } } catch (FileNotFoundException e) { System.err.println("Could not open input file for reading. (" + e.getMessage() + ")"); - return new CommandProcessorResponse(3); + throw new CommandProcessorException(3); } if ("mr".equals(HiveConf.getVar(conf, ConfVars.HIVE_EXECUTION_ENGINE))) { console.printInfo(HiveConf.generateMrDeprecationWarning()); @@ -820,7 +837,7 @@ private CommandProcessorResponse executeDriver(CliSessionState ss, HiveConf conf setupConsoleReader(); String line; - CommandProcessorResponse response = new CommandProcessorResponse(0); + CommandProcessorResponse response = new CommandProcessorResponse(); StringBuilder prefix = new StringBuilder(); String curDB = getFormattedDb(conf, ss); String curPrompt = prompt + curDB; diff --git cli/src/test/org/apache/hadoop/hive/cli/TestCliDriverMethods.java cli/src/test/org/apache/hadoop/hive/cli/TestCliDriverMethods.java index a3ea32671d..bbf2b04979 100644 --- cli/src/test/org/apache/hadoop/hive/cli/TestCliDriverMethods.java +++ cli/src/test/org/apache/hadoop/hive/cli/TestCliDriverMethods.java @@ -54,6 +54,7 @@ import org.apache.hadoop.hive.metastore.api.Schema; import org.apache.hadoop.hive.ql.IDriver; import org.apache.hadoop.hive.ql.QueryState; +import org.apache.hadoop.hive.ql.processors.CommandProcessorException; import org.apache.hadoop.hive.ql.processors.CommandProcessorResponse; import org.junit.Test; import static org.junit.Assert.assertTrue; @@ -89,7 +90,7 @@ public void tearDown() { // If the command has an associated schema, make sure it gets printed to use @Test - public void testThatCliDriverPrintsHeaderForCommandsWithSchema() { + public void testThatCliDriverPrintsHeaderForCommandsWithSchema() throws CommandProcessorException { Schema mockSchema = mock(Schema.class); List fieldSchemas = new ArrayList(); String fieldName = "FlightOfTheConchords"; @@ -104,7 +105,7 @@ public void testThatCliDriverPrintsHeaderForCommandsWithSchema() { // If the command has no schema, make sure nothing is printed @Test - public void testThatCliDriverPrintsNoHeaderForCommandsWithNoSchema() { + public void testThatCliDriverPrintsNoHeaderForCommandsWithNoSchema() throws CommandProcessorException { Schema mockSchema = mock(Schema.class); when(mockSchema.getFieldSchemas()).thenReturn(null); @@ -136,12 +137,14 @@ public void testThatCliDriverDoesNotStripComments() throws Exception { // Save output as yo cannot print it while System.out and System.err are weird String message; String errors; - CommandProcessorResponse response; try { CliSessionState.start(ss); CliDriver cliDriver = new CliDriver(); // issue a command with bad options - response = cliDriver.processCmd("!ls --abcdefghijklmnopqrstuvwxyz123456789"); + cliDriver.processCmd("!ls --abcdefghijklmnopqrstuvwxyz123456789"); + assertTrue("Comments with '--; should not have been stripped, so command should fail", false); + } catch (CommandProcessorException e) { + // this is expected to happen } finally { // restore System.out and System.err System.setOut(oldOut); @@ -149,8 +152,6 @@ public void testThatCliDriverDoesNotStripComments() throws Exception { } message = dataOut.toString("UTF-8"); errors = dataErr.toString("UTF-8"); - assertTrue("Comments with '--; should not have been stripped," - + " so command should fail", response.getResponseCode() != 0); assertTrue("Comments with '--; should not have been stripped," + " so we should have got an error in the output: '" + errors + "'.", errors.contains("option")); @@ -163,10 +164,11 @@ public void testThatCliDriverDoesNotStripComments() throws Exception { * @param mockSchema * Schema to throw against test * @return Output that would have been sent to the user + * @throws CommandProcessorException * @throws CommandNeedRetryException * won't actually be thrown */ - private PrintStream headerPrintingTestDriver(Schema mockSchema) { + private PrintStream headerPrintingTestDriver(Schema mockSchema) throws CommandProcessorException { CliDriver cliDriver = new CliDriver(); // We want the driver to try to print the header... @@ -180,7 +182,6 @@ private PrintStream headerPrintingTestDriver(Schema mockSchema) { CommandProcessorResponse cpr = mock(CommandProcessorResponse.class); QueryState queryState = new QueryState.Builder().withGenerateNewQueryId(true).build(); - when(cpr.getResponseCode()).thenReturn(0); when(proc.run(anyString())).thenReturn(cpr); when(proc.getQueryState()).thenReturn(queryState); diff --git hcatalog/core/src/main/java/org/apache/hive/hcatalog/cli/HCatCli.java hcatalog/core/src/main/java/org/apache/hive/hcatalog/cli/HCatCli.java index 7a5896442b..08e328a173 100644 --- hcatalog/core/src/main/java/org/apache/hive/hcatalog/cli/HCatCli.java +++ hcatalog/core/src/main/java/org/apache/hive/hcatalog/cli/HCatCli.java @@ -46,6 +46,7 @@ import org.apache.hadoop.hive.common.LogUtils.LogInitializationException; import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.conf.HiveConf.ConfVars; +import org.apache.hadoop.hive.ql.processors.CommandProcessorException; import org.apache.hadoop.hive.ql.processors.DfsProcessor; import org.apache.hadoop.hive.ql.processors.SetProcessor; import org.apache.hadoop.hive.ql.session.SessionState; @@ -281,20 +282,30 @@ private static int processCmd(String cmd) { String firstToken = cmd.split("\\s+")[0].trim(); if (firstToken.equalsIgnoreCase("set")) { - return new SetProcessor().run(cmd.substring(firstToken.length()).trim()).getResponseCode(); + try { + new SetProcessor().run(cmd.substring(firstToken.length()).trim()); + return 0; + } catch (CommandProcessorException e) { + return e.getResponseCode(); + } } else if (firstToken.equalsIgnoreCase("dfs")) { - return new DfsProcessor(ss.getConf()).run(cmd.substring(firstToken.length()).trim()).getResponseCode(); + try { + new DfsProcessor(ss.getConf()).run(cmd.substring(firstToken.length()).trim()); + return 0; + } catch (CommandProcessorException e) { + return e.getResponseCode(); + } } HCatDriver driver = new HCatDriver(ss.getConf()); - - int ret = driver.run(cmd).getResponseCode(); - - if (ret != 0) { + try { + driver.run(cmd); + } catch (CommandProcessorException e) { driver.close(); - sysExit(ss, ret); + sysExit(ss, e.getResponseCode()); } + int ret = 0; ArrayList res = new ArrayList(); try { while (driver.getResults(res)) { diff --git hcatalog/core/src/main/java/org/apache/hive/hcatalog/cli/HCatDriver.java hcatalog/core/src/main/java/org/apache/hive/hcatalog/cli/HCatDriver.java index 6a7b9390de..aced7442d6 100644 --- hcatalog/core/src/main/java/org/apache/hive/hcatalog/cli/HCatDriver.java +++ hcatalog/core/src/main/java/org/apache/hive/hcatalog/cli/HCatDriver.java @@ -31,6 +31,7 @@ import org.apache.hadoop.hive.ql.metadata.Hive; import org.apache.hadoop.hive.ql.metadata.HiveException; import org.apache.hadoop.hive.ql.metadata.Table; +import org.apache.hadoop.hive.ql.processors.CommandProcessorException; import org.apache.hadoop.hive.ql.processors.CommandProcessorResponse; import org.apache.hadoop.hive.ql.session.SessionState; import org.apache.hive.hcatalog.common.HCatConstants; @@ -43,22 +44,24 @@ public HCatDriver(HiveConf hiveConf) { driver = DriverFactory.newDriver(hiveConf); } - public CommandProcessorResponse run(String command) { + public CommandProcessorResponse run(String command) throws CommandProcessorException { + SessionState ss = SessionState.get(); CommandProcessorResponse cpr = null; - cpr = driver.run(command); - - SessionState ss = SessionState.get(); + try { + cpr = driver.run(command); + } finally { + // reset conf vars + ss.getConf().set(HCatConstants.HCAT_CREATE_DB_NAME, ""); + ss.getConf().set(HCatConstants.HCAT_CREATE_TBL_NAME, ""); + } - if (cpr.getResponseCode() == 0) { - // Only attempt to do this, if cmd was successful. - // FIXME: it would be probably better to move this to an after-execution - int rc = setFSPermsNGrp(ss, driver.getConf()); - cpr = new CommandProcessorResponse(rc); + // Only attempt to do this, if cmd was successful. + // FIXME: it would be probably better to move this to an after-execution + int rc = setFSPermsNGrp(ss, driver.getConf()); + if (rc != 0) { + throw new CommandProcessorException(rc); } - // reset conf vars - ss.getConf().set(HCatConstants.HCAT_CREATE_DB_NAME, ""); - ss.getConf().set(HCatConstants.HCAT_CREATE_TBL_NAME, ""); return cpr; } diff --git hcatalog/core/src/test/java/org/apache/hive/hcatalog/cli/TestSemanticAnalysis.java hcatalog/core/src/test/java/org/apache/hive/hcatalog/cli/TestSemanticAnalysis.java index d6386ab67e..220eecddde 100644 --- hcatalog/core/src/test/java/org/apache/hive/hcatalog/cli/TestSemanticAnalysis.java +++ hcatalog/core/src/test/java/org/apache/hive/hcatalog/cli/TestSemanticAnalysis.java @@ -30,6 +30,7 @@ import org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat; import org.apache.hadoop.hive.ql.io.RCFileInputFormat; import org.apache.hadoop.hive.ql.io.RCFileOutputFormat; +import org.apache.hadoop.hive.ql.processors.CommandProcessorException; import org.apache.hadoop.hive.ql.processors.CommandProcessorResponse; import org.apache.hadoop.hive.ql.session.SessionState; import org.apache.hadoop.mapred.TextInputFormat; @@ -46,7 +47,6 @@ import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertNull; import static org.junit.Assert.assertTrue; public class TestSemanticAnalysis extends HCatBaseTest { @@ -77,9 +77,8 @@ public void setUpHCatDriver() throws IOException { @Test public void testDescDB() throws Exception { hcatDriver.run("drop database mydb cascade"); - assertEquals(0, hcatDriver.run("create database mydb").getResponseCode()); + hcatDriver.run("create database mydb"); CommandProcessorResponse resp = hcatDriver.run("describe database mydb"); - assertEquals(0, resp.getResponseCode()); ArrayList result = new ArrayList(); hcatDriver.getResults(result); assertTrue(result.get(0).contains("mydb")); // location is not shown in test mode @@ -90,8 +89,6 @@ public void testDescDB() throws Exception { public void testCreateTblWithLowerCasePartNames() throws Exception { driver.run("drop table junit_sem_analysis"); CommandProcessorResponse resp = driver.run("create table junit_sem_analysis (a int) partitioned by (B string) stored as TEXTFILE"); - assertEquals(resp.getResponseCode(), 0); - assertEquals(null, resp.getErrorMessage()); Table tbl = client.getTable(Warehouse.DEFAULT_DATABASE_NAME, TBL_NAME); assertEquals("Partition key name case problem", "b", tbl.getPartitionKeys().get(0).getName()); driver.run("drop table junit_sem_analysis"); @@ -121,8 +118,12 @@ public void testAlterTblFFpart() throws Exception { @Test public void testUsNonExistentDB() throws Exception { - CommandProcessorResponse resp = hcatDriver.run("use no_such_db"); - assertEquals(ErrorMsg.DATABASE_NOT_EXISTS.getErrorCode(), resp.getResponseCode()); + try { + hcatDriver.run("use no_such_db"); + assert false; + } catch (CommandProcessorException e) { + assertEquals(ErrorMsg.DATABASE_NOT_EXISTS.getErrorCode(), e.getResponseCode()); + } } @Test @@ -133,22 +134,22 @@ public void testDatabaseOperations() throws Exception { String testDb2 = "testdatabaseoperatons2"; if (dbs.contains(testDb1.toLowerCase())) { - assertEquals(0, hcatDriver.run("drop database " + testDb1).getResponseCode()); + hcatDriver.run("drop database " + testDb1); } if (dbs.contains(testDb2.toLowerCase())) { - assertEquals(0, hcatDriver.run("drop database " + testDb2).getResponseCode()); + hcatDriver.run("drop database " + testDb2); } - assertEquals(0, hcatDriver.run("create database " + testDb1).getResponseCode()); + hcatDriver.run("create database " + testDb1); assertTrue(client.getAllDatabases().contains(testDb1)); - assertEquals(0, hcatDriver.run("create database if not exists " + testDb1).getResponseCode()); + hcatDriver.run("create database if not exists " + testDb1); assertTrue(client.getAllDatabases().contains(testDb1)); - assertEquals(0, hcatDriver.run("create database if not exists " + testDb2).getResponseCode()); + hcatDriver.run("create database if not exists " + testDb2); assertTrue(client.getAllDatabases().contains(testDb2)); - assertEquals(0, hcatDriver.run("drop database " + testDb1).getResponseCode()); - assertEquals(0, hcatDriver.run("drop database " + testDb2).getResponseCode()); + hcatDriver.run("drop database " + testDb1); + hcatDriver.run("drop database " + testDb2); assertFalse(client.getAllDatabases().contains(testDb1)); assertFalse(client.getAllDatabases().contains(testDb2)); } @@ -165,9 +166,7 @@ public void testCreateTableIfNotExists() throws Exception { assertEquals(RCFileInputFormat.class.getName(), tbl.getSd().getInputFormat()); assertEquals(RCFileOutputFormat.class.getName(), tbl.getSd().getOutputFormat()); - CommandProcessorResponse resp = hcatDriver.run("create table if not exists junit_sem_analysis (a int) stored as RCFILE"); - assertEquals(0, resp.getResponseCode()); - assertNull(resp.getErrorMessage()); + hcatDriver.run("create table if not exists junit_sem_analysis (a int) stored as RCFILE"); tbl = client.getTable(Warehouse.DEFAULT_DATABASE_NAME, TBL_NAME); cols = tbl.getSd().getCols(); assertEquals(1, cols.size()); @@ -183,11 +182,9 @@ public void testAlterTblTouch() throws Exception { hcatDriver.run("drop table junit_sem_analysis"); hcatDriver.run("create table junit_sem_analysis (a int) partitioned by (b string) stored as RCFILE"); - CommandProcessorResponse response = hcatDriver.run("alter table junit_sem_analysis touch"); - assertEquals(0, response.getResponseCode()); + hcatDriver.run("alter table junit_sem_analysis touch"); hcatDriver.run("alter table junit_sem_analysis touch partition (b='12')"); - assertEquals(0, response.getResponseCode()); hcatDriver.run("drop table junit_sem_analysis"); } @@ -196,14 +193,11 @@ public void testAlterTblTouch() throws Exception { public void testChangeColumns() throws Exception { hcatDriver.run("drop table junit_sem_analysis"); hcatDriver.run("create table junit_sem_analysis (a int, c string) partitioned by (b string) stored as RCFILE"); - CommandProcessorResponse response = hcatDriver.run("alter table junit_sem_analysis change a a1 int"); - assertEquals(0, response.getResponseCode()); + hcatDriver.run("alter table junit_sem_analysis change a a1 int"); - response = hcatDriver.run("alter table junit_sem_analysis change a1 a string"); - assertEquals(0, response.getResponseCode()); + hcatDriver.run("alter table junit_sem_analysis change a1 a string"); - response = hcatDriver.run("alter table junit_sem_analysis change a a int after c"); - assertEquals(0, response.getResponseCode()); + hcatDriver.run("alter table junit_sem_analysis change a a int after c"); hcatDriver.run("drop table junit_sem_analysis"); } @@ -212,15 +206,11 @@ public void testAddReplaceCols() throws Exception { hcatDriver.run("drop table junit_sem_analysis"); hcatDriver.run("create table junit_sem_analysis (a int, c string) partitioned by (b string) stored as RCFILE"); - CommandProcessorResponse response = hcatDriver.run("alter table junit_sem_analysis replace columns (a1 tinyint)"); - assertEquals(0, response.getResponseCode()); + hcatDriver.run("alter table junit_sem_analysis replace columns (a1 tinyint)"); - response = hcatDriver.run("alter table junit_sem_analysis add columns (d tinyint)"); - assertEquals(0, response.getResponseCode()); - assertNull(response.getErrorMessage()); + hcatDriver.run("alter table junit_sem_analysis add columns (d tinyint)"); - response = hcatDriver.run("describe extended junit_sem_analysis"); - assertEquals(0, response.getResponseCode()); + hcatDriver.run("describe extended junit_sem_analysis"); Table tbl = client.getTable(Warehouse.DEFAULT_DATABASE_NAME, TBL_NAME); List cols = tbl.getSd().getCols(); assertEquals(2, cols.size()); @@ -234,8 +224,7 @@ public void testAlterTblClusteredBy() throws Exception { hcatDriver.run("drop table junit_sem_analysis"); hcatDriver.run("create table junit_sem_analysis (a int) partitioned by (b string) stored as RCFILE"); - CommandProcessorResponse response = hcatDriver.run("alter table junit_sem_analysis clustered by (a) into 7 buckets"); - assertEquals(0, response.getResponseCode()); + hcatDriver.run("alter table junit_sem_analysis clustered by (a) into 7 buckets"); hcatDriver.run("drop table junit_sem_analysis"); } @@ -285,8 +274,7 @@ public void testAddPartFail() throws Exception { driver.run("drop table junit_sem_analysis"); driver.run("create table junit_sem_analysis (a int) partitioned by (b string) stored as RCFILE"); - CommandProcessorResponse response = hcatDriver.run("alter table junit_sem_analysis add partition (b='2') location 'README.txt'"); - assertEquals(0, response.getResponseCode()); + hcatDriver.run("alter table junit_sem_analysis add partition (b='2') location 'README.txt'"); driver.run("drop table junit_sem_analysis"); } @@ -295,9 +283,7 @@ public void testAddPartPass() throws Exception { hcatDriver.run("drop table junit_sem_analysis"); hcatDriver.run("create table junit_sem_analysis (a int) partitioned by (b string) stored as RCFILE"); - CommandProcessorResponse response = hcatDriver.run("alter table junit_sem_analysis add partition (b='2') location '" + TEST_DATA_DIR + "'"); - assertEquals(0, response.getResponseCode()); - assertNull(response.getErrorMessage()); + hcatDriver.run("alter table junit_sem_analysis add partition (b='2') location '" + TEST_DATA_DIR + "'"); hcatDriver.run("drop table junit_sem_analysis"); } @@ -305,9 +291,14 @@ public void testAddPartPass() throws Exception { public void testCTAS() throws Exception { hcatDriver.run("drop table junit_sem_analysis"); query = "create table junit_sem_analysis (a int) as select * from tbl2"; - CommandProcessorResponse response = hcatDriver.run(query); - assertEquals(40000, response.getResponseCode()); - assertTrue(response.getErrorMessage().contains("FAILED: SemanticException Operation not supported. Create table as Select is not a valid operation.")); + try { + hcatDriver.run(query); + assert false; + } catch (CommandProcessorException e) { + assertEquals(40000, e.getResponseCode()); + assertTrue(e.getErrorMessage().contains( + "FAILED: SemanticException Operation not supported. Create table as Select is not a valid operation.")); + } hcatDriver.run("drop table junit_sem_analysis"); } @@ -315,8 +306,7 @@ public void testCTAS() throws Exception { public void testStoredAs() throws Exception { hcatDriver.run("drop table junit_sem_analysis"); query = "create table junit_sem_analysis (a int)"; - CommandProcessorResponse response = hcatDriver.run(query); - assertEquals(0, response.getResponseCode()); + hcatDriver.run(query); hcatDriver.run("drop table junit_sem_analysis"); } @@ -327,7 +317,7 @@ public void testAddDriverInfo() throws Exception { query = "create table junit_sem_analysis (a int) partitioned by (b string) stored as " + "INPUTFORMAT 'org.apache.hadoop.hive.ql.io.RCFileInputFormat' OUTPUTFORMAT " + "'org.apache.hadoop.hive.ql.io.RCFileOutputFormat' inputdriver 'mydriver' outputdriver 'yourdriver' "; - assertEquals(0, hcatDriver.run(query).getResponseCode()); + hcatDriver.run(query); Table tbl = client.getTable(Warehouse.DEFAULT_DATABASE_NAME, TBL_NAME); assertEquals(RCFileInputFormat.class.getName(), tbl.getSd().getInputFormat()); @@ -342,11 +332,13 @@ public void testInvalidateNonStringPartition() throws Exception { hcatDriver.run("drop table junit_sem_analysis"); query = "create table junit_sem_analysis (a int) partitioned by (b int) stored as RCFILE"; - CommandProcessorResponse response = hcatDriver.run(query); - assertEquals(40000, response.getResponseCode()); - assertEquals("FAILED: SemanticException Operation not supported. HCatalog only supports partition columns of type string. For column: b Found type: int", - response.getErrorMessage()); - + try { + hcatDriver.run(query); + } catch (CommandProcessorException e) { + assertEquals(40000, e.getResponseCode()); + assertEquals("FAILED: SemanticException Operation not supported. HCatalog only supports partition columns of " + + "type string. For column: b Found type: int", e.getErrorMessage()); + } } @Test @@ -354,10 +346,7 @@ public void testInvalidateSeqFileStoredAs() throws Exception { hcatDriver.run("drop table junit_sem_analysis"); query = "create table junit_sem_analysis (a int) partitioned by (b string) stored as SEQUENCEFILE"; - - CommandProcessorResponse response = hcatDriver.run(query); - assertEquals(0, response.getResponseCode()); - + hcatDriver.run(query); } @Test @@ -365,10 +354,7 @@ public void testInvalidateTextFileStoredAs() throws Exception { hcatDriver.run("drop table junit_sem_analysis"); query = "create table junit_sem_analysis (a int) partitioned by (b string) stored as TEXTFILE"; - - CommandProcessorResponse response = hcatDriver.run(query); - assertEquals(0, response.getResponseCode()); - + hcatDriver.run(query); } @Test @@ -376,9 +362,7 @@ public void testInvalidateClusteredBy() throws Exception { hcatDriver.run("drop table junit_sem_analysis"); query = "create table junit_sem_analysis (a int) partitioned by (b string) clustered by (a) into 10 buckets stored as TEXTFILE"; - - CommandProcessorResponse response = hcatDriver.run(query); - assertEquals(0, response.getResponseCode()); + hcatDriver.run(query); } @Test @@ -390,8 +374,7 @@ public void testCTLFail() throws Exception { driver.run(query); query = "create table like_table like junit_sem_analysis"; - CommandProcessorResponse response = hcatDriver.run(query); - assertEquals(0, response.getResponseCode()); + hcatDriver.run(query); } @Test @@ -408,8 +391,7 @@ public void testCTLPass() throws Exception { String likeTbl = "like_table"; hcatDriver.run("drop table " + likeTbl); query = "create table like_table like junit_sem_analysis"; - CommandProcessorResponse resp = hcatDriver.run(query); - assertEquals(0, resp.getResponseCode()); + hcatDriver.run(query); // Table tbl = client.getTable(MetaStoreUtils.DEFAULT_DATABASE_NAME, likeTbl); // assertEquals(likeTbl,tbl.getTableName()); // List cols = tbl.getSd().getCols(); diff --git hcatalog/core/src/test/java/org/apache/hive/hcatalog/cli/TestUseDatabase.java hcatalog/core/src/test/java/org/apache/hive/hcatalog/cli/TestUseDatabase.java index c4896feb6d..ed7824fa98 100644 --- hcatalog/core/src/test/java/org/apache/hive/hcatalog/cli/TestUseDatabase.java +++ hcatalog/core/src/test/java/org/apache/hive/hcatalog/cli/TestUseDatabase.java @@ -28,11 +28,8 @@ import org.apache.hadoop.hive.conf.HiveConf.ConfVars; import org.apache.hadoop.hive.ql.DriverFactory; import org.apache.hadoop.hive.ql.IDriver; -import org.apache.hadoop.hive.ql.processors.CommandProcessorResponse; import org.apache.hadoop.hive.ql.session.SessionState; import org.apache.hive.hcatalog.cli.SemanticAnalysis.HCatSemanticAnalyzer; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertNull; import org.junit.Before; import org.junit.Test; @@ -68,20 +65,14 @@ public void testAlterTablePass() throws Exception { hcatDriver.run("use " + dbName); hcatDriver.run("create table " + tblName + " (a int) partitioned by (b string) stored as RCFILE"); - CommandProcessorResponse response; - String tmpDir = System.getProperty("test.tmp.dir"); File dir = new File(tmpDir + "/hive-junit-" + System.nanoTime()); - response = hcatDriver.run("alter table " + tblName + " add partition (b='2') location '" + dir.toURI().getPath() + "'"); - assertEquals(0, response.getResponseCode()); - assertNull(response.getErrorMessage()); + hcatDriver.run("alter table " + tblName + " add partition (b='2') location '" + dir.toURI().getPath() + "'"); - response = hcatDriver.run("alter table " + tblName + " set fileformat " + hcatDriver.run("alter table " + tblName + " set fileformat " + "INPUTFORMAT 'org.apache.hadoop.hive.ql.io.RCFileInputFormat' " + "OUTPUTFORMAT 'org.apache.hadoop.hive.ql.io.RCFileOutputFormat' " + "serde 'org.apache.hadoop.hive.serde2.columnar.LazyBinaryColumnarSerDe' inputdriver 'mydriver' outputdriver 'yourdriver'"); - assertEquals(0, response.getResponseCode()); - assertNull(response.getErrorMessage()); hcatDriver.run("drop table " + tblName); hcatDriver.run("drop database " + dbName); diff --git hcatalog/core/src/test/java/org/apache/hive/hcatalog/data/HCatDataCheckUtil.java hcatalog/core/src/test/java/org/apache/hive/hcatalog/data/HCatDataCheckUtil.java index 1c6ad9b3cc..cf5ef56b52 100644 --- hcatalog/core/src/test/java/org/apache/hive/hcatalog/data/HCatDataCheckUtil.java +++ hcatalog/core/src/test/java/org/apache/hive/hcatalog/data/HCatDataCheckUtil.java @@ -28,6 +28,7 @@ import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.ql.DriverFactory; import org.apache.hadoop.hive.ql.IDriver; +import org.apache.hadoop.hive.ql.processors.CommandProcessorException; import org.apache.hadoop.hive.ql.session.SessionState; import org.apache.hive.hcatalog.MiniCluster; import org.slf4j.Logger; @@ -64,19 +65,18 @@ public static void generateDataFile(MiniCluster cluster, String fileName) throws MiniCluster.createInputFile(cluster, fileName, input); } - public static void createTable(IDriver driver, String tableName, String createTableArgs) throws IOException { + public static void createTable(IDriver driver, String tableName, String createTableArgs) + throws IOException, CommandProcessorException { String createTable = "create table " + tableName + createTableArgs; - int retCode = driver.run(createTable).getResponseCode(); - if (retCode != 0) { - throw new IOException("Failed to create table. [" + createTable + "], return code from hive driver : [" + retCode + "]"); - } + driver.run(createTable); } - public static void dropTable(IDriver driver, String tablename) throws IOException { + public static void dropTable(IDriver driver, String tablename) throws IOException, CommandProcessorException { driver.run("drop table if exists " + tablename); } - public static ArrayList formattedRun(IDriver driver, String name, String selectCmd) throws IOException { + public static ArrayList formattedRun(IDriver driver, String name, String selectCmd) + throws IOException, CommandProcessorException { driver.run(selectCmd); ArrayList src_values = new ArrayList(); driver.getResults(src_values); diff --git hcatalog/core/src/test/java/org/apache/hive/hcatalog/mapreduce/TestHCatDynamicPartitioned.java hcatalog/core/src/test/java/org/apache/hive/hcatalog/mapreduce/TestHCatDynamicPartitioned.java index fbb11f2c88..424e428be5 100644 --- hcatalog/core/src/test/java/org/apache/hive/hcatalog/mapreduce/TestHCatDynamicPartitioned.java +++ hcatalog/core/src/test/java/org/apache/hive/hcatalog/mapreduce/TestHCatDynamicPartitioned.java @@ -131,11 +131,7 @@ protected void runHCatDynamicPartitionedTable(boolean asSingleMapTask, // read from hive to test String query = "select * from " + tableName; - int retCode = driver.run(query).getResponseCode(); - - if (retCode != 0) { - throw new Exception("Error " + retCode + " running query " + query); - } + driver.run(query); ArrayList res = new ArrayList(); driver.getResults(res); @@ -169,38 +165,26 @@ protected void runHCatDynamicPartitionedTable(boolean asSingleMapTask, } query = "show partitions " + tableName; - retCode = driver.run(query).getResponseCode(); - if (retCode != 0) { - throw new Exception("Error " + retCode + " running query " + query); - } + driver.run(query); res = new ArrayList(); driver.getResults(res); assertEquals(NUM_PARTITIONS, res.size()); query = "select * from " + tableName; - retCode = driver.run(query).getResponseCode(); - if (retCode != 0) { - throw new Exception("Error " + retCode + " running query " + query); - } + driver.run(query); res = new ArrayList(); driver.getResults(res); assertEquals(NUM_RECORDS, res.size()); query = "select count(*) from " + tableName; - retCode = driver.run(query).getResponseCode(); - if (retCode != 0) { - throw new Exception("Error " + retCode + " running query " + query); - } + driver.run(query); res = new ArrayList(); driver.getResults(res); assertEquals(1, res.size()); assertEquals("20", res.get(0)); query = "select count(*) from " + tableName + " where p1=1"; - retCode = driver.run(query).getResponseCode(); - if (retCode != 0) { - throw new Exception("Error " + retCode + " running query " + query); - } + driver.run(query); res = new ArrayList(); driver.getResults(res); assertEquals(1, res.size()); diff --git hcatalog/core/src/test/java/org/apache/hive/hcatalog/mapreduce/TestHCatInputFormat.java hcatalog/core/src/test/java/org/apache/hive/hcatalog/mapreduce/TestHCatInputFormat.java index 6f61dd3c10..b1ff965a38 100644 --- hcatalog/core/src/test/java/org/apache/hive/hcatalog/mapreduce/TestHCatInputFormat.java +++ hcatalog/core/src/test/java/org/apache/hive/hcatalog/mapreduce/TestHCatInputFormat.java @@ -81,8 +81,8 @@ public void setUp() throws Exception { seqFileWriter.close(); // Now let's load this file into a new Hive table. - Assert.assertEquals(0, driver.run("drop table if exists test_bad_records").getResponseCode()); - Assert.assertEquals(0, driver.run( + driver.run("drop table if exists test_bad_records"); + driver.run( "create table test_bad_records " + "row format serde 'org.apache.hadoop.hive.serde2.thrift.ThriftDeserializer' " + "with serdeproperties ( " + @@ -90,10 +90,8 @@ public void setUp() throws Exception { " 'serialization.format'='org.apache.thrift.protocol.TBinaryProtocol') " + "stored as" + " inputformat 'org.apache.hadoop.mapred.SequenceFileInputFormat'" + - " outputformat 'org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'") - .getResponseCode()); - Assert.assertEquals(0, driver.run("load data local inpath '" + intStringSeq.getParent() + - "' into table test_bad_records").getResponseCode()); + " outputformat 'org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'"); + driver.run("load data local inpath '" + intStringSeq.getParent() + "' into table test_bad_records"); setUpComplete = true; } diff --git hcatalog/core/src/test/java/org/apache/hive/hcatalog/mapreduce/TestHCatInputFormatMethods.java hcatalog/core/src/test/java/org/apache/hive/hcatalog/mapreduce/TestHCatInputFormatMethods.java index f43cc51f8e..2c5d8ca2ea 100644 --- hcatalog/core/src/test/java/org/apache/hive/hcatalog/mapreduce/TestHCatInputFormatMethods.java +++ hcatalog/core/src/test/java/org/apache/hive/hcatalog/mapreduce/TestHCatInputFormatMethods.java @@ -40,10 +40,8 @@ public void setUp() throws Exception { return; } - Assert.assertEquals(0, driver.run("drop table if exists testHCIFMethods").getResponseCode()); - Assert.assertEquals(0, driver.run( - "create table testHCIFMethods (a string, b int) partitioned by (x string, y string)") - .getResponseCode()); + driver.run("drop table if exists testHCIFMethods"); + driver.run("create table testHCIFMethods (a string, b int) partitioned by (x string, y string)"); setUpComplete = true; } diff --git hcatalog/core/src/test/java/org/apache/hive/hcatalog/mapreduce/TestHCatNonPartitioned.java hcatalog/core/src/test/java/org/apache/hive/hcatalog/mapreduce/TestHCatNonPartitioned.java index 9d6613d632..fe4dd436aa 100644 --- hcatalog/core/src/test/java/org/apache/hive/hcatalog/mapreduce/TestHCatNonPartitioned.java +++ hcatalog/core/src/test/java/org/apache/hive/hcatalog/mapreduce/TestHCatNonPartitioned.java @@ -34,7 +34,6 @@ import org.apache.hive.hcatalog.data.HCatRecord; import org.apache.hive.hcatalog.data.schema.HCatFieldSchema; import org.apache.hive.hcatalog.data.schema.HCatSchemaUtils; -import org.junit.BeforeClass; import org.junit.Test; import static junit.framework.Assert.assertEquals; @@ -133,11 +132,7 @@ public void testHCatNonPartitionedTable() throws Exception { private void hiveReadTest() throws Exception { String query = "select * from " + tableName; - int retCode = driver.run(query).getResponseCode(); - - if (retCode != 0) { - throw new Exception("Error " + retCode + " running query " + query); - } + driver.run(query); ArrayList res = new ArrayList(); driver.getResults(res); @@ -148,11 +143,7 @@ private void hiveReadTest() throws Exception { } query = "select count(*) from " + tableName; - retCode = driver.run(query).getResponseCode(); - - if (retCode != 0) { - throw new Exception("Error " + retCode + " running query " + query); - } + driver.run(query); res = new ArrayList(); driver.getResults(res); diff --git hcatalog/core/src/test/java/org/apache/hive/hcatalog/mapreduce/TestHCatPartitioned.java hcatalog/core/src/test/java/org/apache/hive/hcatalog/mapreduce/TestHCatPartitioned.java index fbf39fc01b..3a2e1f9a89 100644 --- hcatalog/core/src/test/java/org/apache/hive/hcatalog/mapreduce/TestHCatPartitioned.java +++ hcatalog/core/src/test/java/org/apache/hive/hcatalog/mapreduce/TestHCatPartitioned.java @@ -358,11 +358,7 @@ private void columnOrderChangeTest() throws Exception { private void hiveReadTest() throws Exception { String query = "select * from " + tableName; - int retCode = driver.run(query).getResponseCode(); - - if (retCode != 0) { - throw new Exception("Error " + retCode + " running query " + query); - } + driver.run(query); ArrayList res = new ArrayList(); driver.getResults(res); diff --git hcatalog/core/src/test/java/org/apache/hive/hcatalog/mapreduce/TestPassProperties.java hcatalog/core/src/test/java/org/apache/hive/hcatalog/mapreduce/TestPassProperties.java index 332c240e52..c955aa5021 100644 --- hcatalog/core/src/test/java/org/apache/hive/hcatalog/mapreduce/TestPassProperties.java +++ hcatalog/core/src/test/java/org/apache/hive/hcatalog/mapreduce/TestPassProperties.java @@ -85,8 +85,7 @@ public void testSequenceTableWriteReadMR() throws Exception { Initialize(); String createTable = "CREATE TABLE bad_props_table(a0 int, a1 String, a2 String) STORED AS SEQUENCEFILE"; driver.run("drop table bad_props_table"); - int retCode1 = driver.run(createTable).getResponseCode(); - assertTrue(retCode1 == 0); + driver.run(createTable); boolean caughtException = false; try { diff --git hcatalog/hcatalog-pig-adapter/src/test/java/org/apache/hive/hcatalog/pig/AbstractHCatLoaderTest.java hcatalog/hcatalog-pig-adapter/src/test/java/org/apache/hive/hcatalog/pig/AbstractHCatLoaderTest.java index 4c4551c13c..8110b18959 100644 --- hcatalog/hcatalog-pig-adapter/src/test/java/org/apache/hive/hcatalog/pig/AbstractHCatLoaderTest.java +++ hcatalog/hcatalog-pig-adapter/src/test/java/org/apache/hive/hcatalog/pig/AbstractHCatLoaderTest.java @@ -43,7 +43,6 @@ import org.apache.hadoop.hive.common.type.Date; import org.apache.hadoop.hive.common.type.Timestamp; import org.apache.hadoop.hive.ql.IDriver; -import org.apache.hadoop.hive.ql.processors.CommandProcessorResponse; import org.apache.hadoop.hive.serde2.ColumnProjectionUtils; import org.apache.hadoop.mapreduce.Job; import org.apache.hive.hcatalog.HcatTestUtils; @@ -149,10 +148,7 @@ private void createTableDefaultDB(String tablename, String schema) throws Except */ static void executeStatementOnDriver(String cmd, IDriver driver) throws Exception { LOG.debug("Executing: " + cmd); - CommandProcessorResponse cpr = driver.run(cmd); - if(cpr.getResponseCode() != 0) { - throw new IOException("Failed to execute \"" + cmd + "\". Driver returned " + cpr.getResponseCode() + " Error: " + cpr.getErrorMessage()); - } + driver.run(cmd); } private static void checkProjection(FieldSchema fs, String expectedName, byte expectedPigType) { @@ -659,11 +655,11 @@ public void testConvertBooleanToInt() throws Exception { String[] lines = new String[]{"llama\ttrue", "alpaca\tfalse"}; HcatTestUtils.createTestDataFile(inputFileName, lines); - assertEquals(0, driver.run("drop table if exists " + tbl).getResponseCode()); - assertEquals(0, driver.run("create external table " + tbl + + driver.run("drop table if exists " + tbl); + driver.run("create external table " + tbl + " (a string, b boolean) row format delimited fields terminated by '\t'" + " stored as textfile location 'file:///" + - inputDataDir.getPath().replaceAll("\\\\", "/") + "'").getResponseCode()); + inputDataDir.getPath().replaceAll("\\\\", "/") + "'"); Properties properties = new Properties(); properties.setProperty(HCatConstants.HCAT_DATA_CONVERT_BOOLEAN_TO_INTEGER, "true"); diff --git hcatalog/hcatalog-pig-adapter/src/test/java/org/apache/hive/hcatalog/pig/AbstractHCatStorerTest.java hcatalog/hcatalog-pig-adapter/src/test/java/org/apache/hive/hcatalog/pig/AbstractHCatStorerTest.java index 99e5da4f9e..f37b216020 100644 --- hcatalog/hcatalog-pig-adapter/src/test/java/org/apache/hive/hcatalog/pig/AbstractHCatStorerTest.java +++ hcatalog/hcatalog-pig-adapter/src/test/java/org/apache/hive/hcatalog/pig/AbstractHCatStorerTest.java @@ -35,7 +35,7 @@ import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; -import org.apache.hadoop.hive.ql.processors.CommandProcessorResponse; +import org.apache.hadoop.hive.ql.processors.CommandProcessorException; import org.apache.hive.hcatalog.HcatTestUtils; import org.apache.hive.hcatalog.mapreduce.HCatBaseTest; import org.apache.pig.EvalFunc; @@ -178,7 +178,6 @@ public void testWriteDate() throws Exception { @Test public void testWriteDate3() throws Exception { DateTime d = new DateTime(1991, 10, 11, 23, 10, DateTimeZone.forOffsetHours(-11)); - FrontendException fe = null; // expect to fail since the time component is not 0 pigValueRangeTestOverflow("junitTypeTest4", "date", "datetime", HCatBaseStorer.OOR_VALUE_OPT_VALUES.Throw, d.toString(), FORMAT_4_DATE); @@ -274,7 +273,6 @@ void pigValueRangeTest(String tblName, String hiveType, String pigType, int queryNumber = 1; logAndRegister(server, "A = load '" + INPUT_FILE_NAME + "' as (" + field + ":" + pigType + ");", queryNumber++); - Iterator firstLoad = server.openIterator("A"); if (goal == null) { logAndRegister(server, "store A into '" + tblName + "' using " + HCatStorer.class.getName() + "();", queryNumber++); @@ -301,9 +299,11 @@ void pigValueRangeTest(String tblName, String hiveType, String pigType, } logAndRegister(server, "B = load '" + tblName + "' using " + HCatLoader.class.getName() + "();", queryNumber); - CommandProcessorResponse cpr = driver.run("select * from " + tblName); - LOG.debug("cpr.respCode=" + cpr.getResponseCode() + " cpr.errMsg=" + cpr.getErrorMessage() - + " for table " + tblName); + try { + driver.run("select * from " + tblName); + } catch (CommandProcessorException e) { + LOG.debug("cpr.respCode=" + e.getResponseCode() + " cpr.errMsg=" + e.getErrorMessage() + " for table " + tblName); + } List l = new ArrayList(); driver.getResults(l); LOG.debug("Dumping rows via SQL from " + tblName); @@ -367,8 +367,11 @@ public void testDateCharTypes() throws Exception { + "();", queryNumber++); logAndRegister(server, "B = load '" + tblName + "' using " + HCatLoader.class.getName() + "();", queryNumber); - CommandProcessorResponse cpr = driver.run("select * from " + tblName); - LOG.debug("cpr.respCode=" + cpr.getResponseCode() + " cpr.errMsg=" + cpr.getErrorMessage()); + try { + driver.run("select * from " + tblName); + } catch (CommandProcessorException e) { + LOG.debug("cpr.respCode=" + e.getResponseCode() + " cpr.errMsg=" + e.getErrorMessage()); + } List l = new ArrayList(); driver.getResults(l); LOG.debug("Dumping rows via SQL from " + tblName); @@ -986,11 +989,7 @@ public void testPartitionPublish() throws Exception { server.executeBatch(); String query = "show partitions ptn_fail"; - int retCode = driver.run(query).getResponseCode(); - - if (retCode != 0) { - throw new IOException("Error " + retCode + " running query " + query); - } + driver.run(query); ArrayList res = new ArrayList(); driver.getResults(res); diff --git hcatalog/hcatalog-pig-adapter/src/test/java/org/apache/hive/hcatalog/pig/TestE2EScenarios.java hcatalog/hcatalog-pig-adapter/src/test/java/org/apache/hive/hcatalog/pig/TestE2EScenarios.java index e373f195fc..ae292eb78c 100644 --- hcatalog/hcatalog-pig-adapter/src/test/java/org/apache/hive/hcatalog/pig/TestE2EScenarios.java +++ hcatalog/hcatalog-pig-adapter/src/test/java/org/apache/hive/hcatalog/pig/TestE2EScenarios.java @@ -118,11 +118,7 @@ private void createTable(String tablename, String schema, String partitionedBy, } private void driverRun(String cmd) throws Exception { - int retCode = driver.run(cmd).getResponseCode(); - if (retCode != 0) { - throw new IOException("Failed to run [" - + cmd + "], return code from hive driver : [" + retCode + "]"); - } + driver.run(cmd); } private void pigDump(String tableName) throws IOException { diff --git hcatalog/hcatalog-pig-adapter/src/test/java/org/apache/hive/hcatalog/pig/TestHCatLoaderEncryption.java hcatalog/hcatalog-pig-adapter/src/test/java/org/apache/hive/hcatalog/pig/TestHCatLoaderEncryption.java index 5f5c9f761a..beb4fe9f4b 100644 --- hcatalog/hcatalog-pig-adapter/src/test/java/org/apache/hive/hcatalog/pig/TestHCatLoaderEncryption.java +++ hcatalog/hcatalog-pig-adapter/src/test/java/org/apache/hive/hcatalog/pig/TestHCatLoaderEncryption.java @@ -50,8 +50,8 @@ import org.apache.hadoop.hive.ql.IDriver; import org.apache.hadoop.hive.ql.io.StorageFormats; import org.apache.hadoop.hive.ql.processors.CommandProcessor; +import org.apache.hadoop.hive.ql.processors.CommandProcessorException; import org.apache.hadoop.hive.ql.processors.CommandProcessorFactory; -import org.apache.hadoop.hive.ql.processors.CommandProcessorResponse; import org.apache.hadoop.hive.ql.processors.HiveCommand; import org.apache.hadoop.hive.ql.session.SessionState; import org.apache.hadoop.hive.shims.HadoopShims; @@ -93,7 +93,6 @@ private static final String SECURITY_KEY_PROVIDER_URI_NAME = "dfs.encryption.key.provider.uri"; private HadoopShims.MiniDFSShim dfs = null; - private HadoopShims.HdfsEncryptionShim hes = null; private final String[] testOnlyCommands = new String[]{"crypto"}; private IDriver driver; private Map> basicInputData; @@ -146,10 +145,7 @@ private void createTable(String tablename, String schema) throws Exception { */ static void executeStatementOnDriver(String cmd, IDriver driver) throws Exception { LOG.debug("Executing: " + cmd); - CommandProcessorResponse cpr = driver.run(cmd); - if(cpr.getResponseCode() != 0) { - throw new IOException("Failed to execute \"" + cmd + "\". Driver returned " + cpr.getResponseCode() + " Error: " + cpr.getErrorMessage()); - } + driver.run(cmd); } @Before @@ -224,7 +220,7 @@ void initEncryptionShim(HiveConf conf) throws IOException { fs = dfs.getFileSystem(); // set up a java key provider for encrypted hdfs cluster - hes = shims.createHdfsEncryptionShim(fs, conf); + shims.createHdfsEncryptionShim(fs, conf); } public static String ensurePathEndsInSlash(String path) { @@ -246,16 +242,17 @@ private void associateEncryptionZoneWithPath(String path) throws Exception { if (crypto == null) { return; } - checkExecutionResponse(crypto.run("CREATE_KEY --keyName key_128 --bitLength 128")); - checkExecutionResponse(crypto.run("CREATE_ZONE --keyName key_128 --path " + path)); + checkExecutionResponse(crypto, "CREATE_KEY --keyName key_128 --bitLength 128"); + checkExecutionResponse(crypto, "CREATE_ZONE --keyName key_128 --path " + path); } - private void checkExecutionResponse(CommandProcessorResponse response) { - int rc = response.getResponseCode(); - if (rc != 0) { - SessionState.get().out.println(response); + private void checkExecutionResponse(CommandProcessor processor, String command) { + try { + processor.run(command); + } catch (CommandProcessorException e) { + SessionState.get().out.println(e); + assertTrue("Crypto command failed with the exit code " + e.getResponseCode(), false); } - assertEquals("Crypto command failed with the exit code" + rc, 0, rc); } private void removeEncryptionZone() throws Exception { @@ -265,7 +262,7 @@ private void removeEncryptionZone() throws Exception { if (crypto == null) { return; } - checkExecutionResponse(crypto.run("DELETE_KEY --keyName key_128")); + checkExecutionResponse(crypto, "DELETE_KEY --keyName key_128"); } private CommandProcessor getTestCommand(final String commandName) throws SQLException { diff --git hcatalog/hcatalog-pig-adapter/src/test/java/org/apache/hive/hcatalog/pig/TestHCatLoaderStorer.java hcatalog/hcatalog-pig-adapter/src/test/java/org/apache/hive/hcatalog/pig/TestHCatLoaderStorer.java index 281754be0d..8d383eb028 100644 --- hcatalog/hcatalog-pig-adapter/src/test/java/org/apache/hive/hcatalog/pig/TestHCatLoaderStorer.java +++ hcatalog/hcatalog-pig-adapter/src/test/java/org/apache/hive/hcatalog/pig/TestHCatLoaderStorer.java @@ -119,12 +119,12 @@ public void testSmallTinyInt() throws Exception { }); // Create a table with smallint/tinyint columns, load data, and query from Hive. - Assert.assertEquals(0, driver.run("drop table if exists " + readTblName).getResponseCode()); - Assert.assertEquals(0, driver.run("create external table " + readTblName + - " (my_small_int smallint, my_tiny_int tinyint)" + - " row format delimited fields terminated by '\t' stored as textfile").getResponseCode()); - Assert.assertEquals(0, driver.run("load data local inpath '" + - dataDir.getPath().replaceAll("\\\\", "/") + "' into table " + readTblName).getResponseCode()); + driver.run("drop table if exists " + readTblName); + driver.run("create external table " + readTblName + + " (my_small_int smallint, my_tiny_int tinyint)" + + " row format delimited fields terminated by '\t' stored as textfile"); + driver.run("load data local inpath '" + + dataDir.getPath().replaceAll("\\\\", "/") + "' into table " + readTblName); PigServer server = HCatBaseTest.createPigServer(false); server.registerQuery( @@ -150,9 +150,9 @@ public void testSmallTinyInt() throws Exception { // Ensure Pig can write correctly to smallint/tinyint columns. This means values within the // bounds of the column type are written, and values outside throw an exception. - Assert.assertEquals(0, driver.run("drop table if exists " + writeTblName).getResponseCode()); - Assert.assertEquals(0, driver.run("create table " + writeTblName + - " (my_small_int smallint, my_tiny_int tinyint) stored as rcfile").getResponseCode()); + driver.run("drop table if exists " + writeTblName); + driver.run("create table " + writeTblName + + " (my_small_int smallint, my_tiny_int tinyint) stored as rcfile"); // Values within the column type bounds. HcatTestUtils.createTestDataFile(writeDataFile.getAbsolutePath(), new String[]{ @@ -181,9 +181,8 @@ public void testSmallTinyInt() throws Exception { private void smallTinyIntBoundsCheckHelper(String data, ExecJob.JOB_STATUS expectedStatus) throws Exception { - Assert.assertEquals(0, driver.run("drop table if exists test_tbl").getResponseCode()); - Assert.assertEquals(0, driver.run("create table test_tbl" + - " (my_small_int smallint, my_tiny_int tinyint) stored as rcfile").getResponseCode()); + driver.run("drop table if exists test_tbl"); + driver.run("create table test_tbl (my_small_int smallint, my_tiny_int tinyint) stored as rcfile"); PigServer server = HCatBaseTest.createPigServer(false); server.setBatchOn(); diff --git hcatalog/hcatalog-pig-adapter/src/test/java/org/apache/hive/hcatalog/pig/TestHCatStorerWrapper.java hcatalog/hcatalog-pig-adapter/src/test/java/org/apache/hive/hcatalog/pig/TestHCatStorerWrapper.java index 3edd020017..734c97182a 100644 --- hcatalog/hcatalog-pig-adapter/src/test/java/org/apache/hive/hcatalog/pig/TestHCatStorerWrapper.java +++ hcatalog/hcatalog-pig-adapter/src/test/java/org/apache/hive/hcatalog/pig/TestHCatStorerWrapper.java @@ -19,7 +19,6 @@ package org.apache.hive.hcatalog.pig; import java.io.File; -import java.io.IOException; import java.util.ArrayList; import java.util.Iterator; import java.util.UUID; @@ -53,7 +52,7 @@ public void testStoreExternalTableWithExternalDir() throws Exception { driver.run("drop table junit_external"); String createTable = "create external table junit_external(a int, b string) partitioned by (c string) stored as RCFILE"; - Assert.assertEquals(0, driver.run(createTable).getResponseCode()); + driver.run(createTable); int LOOP_SIZE = 3; String[] inputData = new String[LOOP_SIZE*LOOP_SIZE]; diff --git hcatalog/server-extensions/src/test/java/org/apache/hive/hcatalog/listener/TestMsgBusConnection.java hcatalog/server-extensions/src/test/java/org/apache/hive/hcatalog/listener/TestMsgBusConnection.java index 729a5e7f62..d7937703d2 100644 --- hcatalog/server-extensions/src/test/java/org/apache/hive/hcatalog/listener/TestMsgBusConnection.java +++ hcatalog/server-extensions/src/test/java/org/apache/hive/hcatalog/listener/TestMsgBusConnection.java @@ -20,7 +20,6 @@ package org.apache.hive.hcatalog.listener; import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertTrue; import javax.jms.Connection; @@ -39,7 +38,6 @@ import org.apache.hadoop.hive.conf.HiveConf.ConfVars; import org.apache.hadoop.hive.ql.DriverFactory; import org.apache.hadoop.hive.ql.IDriver; -import org.apache.hadoop.hive.ql.processors.CommandProcessorResponse; import org.apache.hadoop.hive.ql.session.SessionState; import org.apache.hive.hcatalog.common.HCatConstants; import org.apache.hive.hcatalog.messaging.HCatEventMessage; @@ -114,7 +112,6 @@ public void testConnection() throws Exception { } private void runQuery(String query) throws Exception { - CommandProcessorResponse cpr = driver.run(query); - assertFalse(cpr.getMessage(), cpr.failed()); + driver.run(query); } } diff --git hcatalog/streaming/src/test/org/apache/hive/hcatalog/streaming/TestStreaming.java hcatalog/streaming/src/test/org/apache/hive/hcatalog/streaming/TestStreaming.java index bc67d03078..da677c7977 100644 --- hcatalog/streaming/src/test/org/apache/hive/hcatalog/streaming/TestStreaming.java +++ hcatalog/streaming/src/test/org/apache/hive/hcatalog/streaming/TestStreaming.java @@ -83,7 +83,7 @@ import org.apache.hadoop.hive.ql.io.orc.OrcStruct; import org.apache.hadoop.hive.ql.io.orc.Reader; import org.apache.hadoop.hive.ql.io.orc.RecordReader; -import org.apache.hadoop.hive.ql.processors.CommandProcessorResponse; +import org.apache.hadoop.hive.ql.processors.CommandProcessorException; import org.apache.hadoop.hive.ql.session.SessionState; import org.apache.hadoop.hive.ql.txn.compactor.Worker; import org.apache.hadoop.hive.serde.serdeConstants; @@ -2249,19 +2249,21 @@ private static boolean runDDL(IDriver driver, String sql) throws QueryFailedExce LOG.debug(sql); System.out.println(sql); //LOG.debug("Running Hive Query: "+ sql); - CommandProcessorResponse cpr = driver.run(sql); - if (cpr.getResponseCode() == 0) { + try { + driver.run(sql); return true; + } catch (CommandProcessorException e) { + LOG.error("Statement: " + sql + " failed: " + e); + return false; } - LOG.error("Statement: " + sql + " failed: " + cpr); - return false; } private static ArrayList queryTable(IDriver driver, String query) throws IOException { - CommandProcessorResponse cpr = driver.run(query); - if(cpr.getResponseCode() != 0) { - throw new RuntimeException(query + " failed: " + cpr); + try { + driver.run(query); + } catch (CommandProcessorException e) { + throw new RuntimeException(query + " failed: " + e); } ArrayList res = new ArrayList(); driver.getResults(res); diff --git hcatalog/webhcat/java-client/src/test/java/org/apache/hive/hcatalog/api/repl/commands/TestCommands.java hcatalog/webhcat/java-client/src/test/java/org/apache/hive/hcatalog/api/repl/commands/TestCommands.java index 04177500da..ec19c5e1f6 100644 --- hcatalog/webhcat/java-client/src/test/java/org/apache/hive/hcatalog/api/repl/commands/TestCommands.java +++ hcatalog/webhcat/java-client/src/test/java/org/apache/hive/hcatalog/api/repl/commands/TestCommands.java @@ -27,7 +27,7 @@ import org.apache.hadoop.hive.metastore.api.MetaException; import org.apache.hadoop.hive.ql.DriverFactory; import org.apache.hadoop.hive.ql.IDriver; -import org.apache.hadoop.hive.ql.processors.CommandProcessorResponse; +import org.apache.hadoop.hive.ql.processors.CommandProcessorException; import org.apache.hadoop.hive.ql.session.SessionState; import org.apache.hive.hcatalog.HcatTestUtils; import org.apache.hive.hcatalog.api.HCatAddPartitionDesc; @@ -97,7 +97,7 @@ public static void tearDownAfterClass() throws Exception { } @Test - public void testDropDatabaseCommand() throws HCatException { + public void testDropDatabaseCommand() throws HCatException, CommandProcessorException { String dbName = "cmd_testdb"; int evid = 999; Command testCmd = new DropDatabaseCommand(dbName, evid); @@ -129,7 +129,7 @@ public void testDropDatabaseCommand() throws HCatException { } @Test - public void testDropTableCommand() throws HCatException { + public void testDropTableCommand() throws HCatException, CommandProcessorException { String dbName = "cmd_testdb"; String tableName = "cmd_testtable"; int evid = 789; @@ -209,7 +209,7 @@ public void testDropTableCommand() throws HCatException { } @Test - public void testDropPartitionCommand() throws HCatException, MetaException { + public void testDropPartitionCommand() throws HCatException, MetaException, CommandProcessorException { String dbName = "cmd_testdb"; String tableName = "cmd_testtable"; int evid = 789; @@ -301,7 +301,7 @@ public void testDropPartitionCommand() throws HCatException, MetaException { } @Test - public void testDropTableCommand2() throws HCatException, MetaException { + public void testDropTableCommand2() throws HCatException, MetaException, CommandProcessorException { // Secondary DropTableCommand test for testing repl-drop-tables' effect on partitions inside a partitioned table // when there exist partitions inside the table which are older than the drop event. // Our goal is this : Create a table t, with repl.last.id=157, say. @@ -372,7 +372,7 @@ public void testDropTableCommand2() throws HCatException, MetaException { @Test - public void testBasicReplEximCommands() throws IOException { + public void testBasicReplEximCommands() throws IOException, CommandProcessorException { // repl export, has repl.last.id and repl.scope=all in it // import repl dump, table has repl.last.id on it (will likely be 0) int evid = 111; @@ -398,14 +398,9 @@ public void testBasicReplEximCommands() throws IOException { HcatTestUtils.createTestDataFile(tempLocation,data); - CommandProcessorResponse ret = driver.run( - "LOAD DATA LOCAL INPATH '"+tempLocation+"' OVERWRITE INTO TABLE "+ dbName+ "." + tableName - ); - assertEquals(ret.getResponseCode() + ":" + ret.getErrorMessage(), null, ret.getException()); + driver.run("LOAD DATA LOCAL INPATH '"+tempLocation+"' OVERWRITE INTO TABLE "+ dbName+ "." + tableName); - CommandProcessorResponse selectRet = driver.run("SELECT * from " + dbName + "." + tableName); - assertEquals(selectRet.getResponseCode() + ":" + selectRet.getErrorMessage(), - null, selectRet.getException()); + driver.run("SELECT * from " + dbName + "." + tableName); List values = new ArrayList(); driver.getResults(values); @@ -418,8 +413,7 @@ public void testBasicReplEximCommands() throws IOException { exportLocation, false, evid); LOG.info("About to run :" + exportCmd.get().get(0)); - CommandProcessorResponse ret2 = driver.run(exportCmd.get().get(0)); - assertEquals(ret2.getResponseCode() + ":" + ret2.getErrorMessage(), null, ret2.getException()); + driver.run(exportCmd.get().get(0)); List exportPaths = exportCmd.cleanupLocationsAfterEvent(); assertEquals(1,exportPaths.size()); @@ -432,12 +426,9 @@ public void testBasicReplEximCommands() throws IOException { ImportCommand importCmd = new ImportCommand(dbName, importedTableName, null, exportLocation, false, evid); LOG.info("About to run :" + importCmd.get().get(0)); - CommandProcessorResponse ret3 = driver.run(importCmd.get().get(0)); - assertEquals(ret3.getResponseCode() + ":" + ret3.getErrorMessage(), null, ret3.getException()); + driver.run(importCmd.get().get(0)); - CommandProcessorResponse selectRet2 = driver.run("SELECT * from " + dbName + "." + importedTableName); - assertEquals(selectRet2.getResponseCode() + ":" + selectRet2.getErrorMessage(), - null, selectRet2.getException()); + driver.run("SELECT * from " + dbName + "." + importedTableName); List values2 = new ArrayList(); driver.getResults(values2); @@ -453,7 +444,7 @@ public void testBasicReplEximCommands() throws IOException { } @Test - public void testMetadataReplEximCommands() throws IOException { + public void testMetadataReplEximCommands() throws IOException, CommandProcessorException { // repl metadata export, has repl.last.id and repl.scope=metadata // import repl metadata dump, table metadata changed, allows override, has repl.last.id int evid = 222; @@ -479,14 +470,9 @@ public void testMetadataReplEximCommands() throws IOException { HcatTestUtils.createTestDataFile(tempLocation,data); - CommandProcessorResponse ret = driver.run( - "LOAD DATA LOCAL INPATH '"+tempLocation+"' OVERWRITE INTO TABLE "+ dbName+ "." + tableName - ); - assertEquals(ret.getResponseCode() + ":" + ret.getErrorMessage(), null, ret.getException()); + driver.run("LOAD DATA LOCAL INPATH '"+tempLocation+"' OVERWRITE INTO TABLE "+ dbName+ "." + tableName); - CommandProcessorResponse selectRet = driver.run("SELECT * from " + dbName + "." + tableName); - assertEquals(selectRet.getResponseCode() + ":" + selectRet.getErrorMessage(), - null, selectRet.getException()); + driver.run("SELECT * from " + dbName + "." + tableName); List values = new ArrayList(); driver.getResults(values); @@ -499,8 +485,7 @@ public void testMetadataReplEximCommands() throws IOException { exportLocation, true, evid); LOG.info("About to run :" + exportMdCmd.get().get(0)); - CommandProcessorResponse ret2 = driver.run(exportMdCmd.get().get(0)); - assertEquals(ret2.getResponseCode() + ":" + ret2.getErrorMessage(), null, ret2.getException()); + driver.run(exportMdCmd.get().get(0)); List exportPaths = exportMdCmd.cleanupLocationsAfterEvent(); assertEquals(1,exportPaths.size()); @@ -513,12 +498,9 @@ public void testMetadataReplEximCommands() throws IOException { ImportCommand importMdCmd = new ImportCommand(dbName, importedTableName, null, exportLocation, true, evid); LOG.info("About to run :" + importMdCmd.get().get(0)); - CommandProcessorResponse ret3 = driver.run(importMdCmd.get().get(0)); - assertEquals(ret3.getResponseCode() + ":" + ret3.getErrorMessage(), null, ret3.getException()); + driver.run(importMdCmd.get().get(0)); - CommandProcessorResponse selectRet2 = driver.run("SELECT * from " + dbName + "." + importedTableName); - assertEquals(selectRet2.getResponseCode() + ":" + selectRet2.getErrorMessage(), - null, selectRet2.getException()); + driver.run("SELECT * from " + dbName + "." + importedTableName); List values2 = new ArrayList(); driver.getResults(values2); @@ -531,7 +513,6 @@ public void testMetadataReplEximCommands() throws IOException { assertTrue(importedTable.getTblProps().containsKey("repl.last.id")); } - @Test public void testNoopReplEximCommands() throws Exception { // repl noop export on non-existant table, has repl.noop, does not error @@ -546,8 +527,7 @@ public void testNoopReplEximCommands() throws Exception { exportLocation, false, evid); LOG.info("About to run :" + noopExportCmd.get().get(0)); - CommandProcessorResponse ret = driver.run(noopExportCmd.get().get(0)); - assertEquals(ret.getResponseCode() + ":" + ret.getErrorMessage(), null, ret.getException()); + driver.run(noopExportCmd.get().get(0)); List exportPaths = noopExportCmd.cleanupLocationsAfterEvent(); assertEquals(1,exportPaths.size()); @@ -559,8 +539,7 @@ public void testNoopReplEximCommands() throws Exception { ImportCommand noopImportCmd = new ImportCommand(dbName, tableName, null, exportLocation, false, evid); LOG.info("About to run :" + noopImportCmd.get().get(0)); - CommandProcessorResponse ret2 = driver.run(noopImportCmd.get().get(0)); - assertEquals(ret2.getResponseCode() + ":" + ret2.getErrorMessage(), null, ret2.getException()); + driver.run(noopImportCmd.get().get(0)); Exception onfe = null; try { diff --git itests/hcatalog-unit/src/test/java/org/apache/hive/hcatalog/hbase/TestPigHBaseStorageHandler.java itests/hcatalog-unit/src/test/java/org/apache/hive/hcatalog/hbase/TestPigHBaseStorageHandler.java index 4219757d7e..c5854f784c 100644 --- itests/hcatalog-unit/src/test/java/org/apache/hive/hcatalog/hbase/TestPigHBaseStorageHandler.java +++ itests/hcatalog-unit/src/test/java/org/apache/hive/hcatalog/hbase/TestPigHBaseStorageHandler.java @@ -27,18 +27,15 @@ import java.io.IOException; import java.net.URI; import java.util.ArrayList; -import java.util.Arrays; import java.util.Iterator; import java.util.List; import java.util.Map; -import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.Admin; import org.apache.hadoop.hbase.client.Connection; import org.apache.hadoop.hbase.client.ConnectionFactory; -import org.apache.hadoop.hbase.client.HBaseAdmin; import org.apache.hadoop.hbase.client.Put; import org.apache.hadoop.hbase.client.Result; import org.apache.hadoop.hbase.client.ResultScanner; @@ -50,7 +47,6 @@ import org.apache.hadoop.hive.conf.HiveConf.ConfVars; import org.apache.hadoop.hive.ql.DriverFactory; import org.apache.hadoop.hive.ql.IDriver; -import org.apache.hadoop.hive.ql.processors.CommandProcessorResponse; import org.apache.hadoop.hive.ql.session.SessionState; import org.apache.hive.hcatalog.common.HCatUtil; import org.apache.hive.hcatalog.mapreduce.HCatBaseTest; @@ -65,7 +61,6 @@ private static HiveConf hcatConf; private static IDriver driver; - private static String mypath; private final byte[] FAMILY = Bytes.toBytes("testFamily"); private final byte[] QUALIFIER1 = Bytes.toBytes("testQualifier1"); @@ -113,8 +108,6 @@ private void populateHBaseTable(String tName, Connection connection) throws IOEx } private List generatePuts(String tableName) throws IOException { - - List columnFamilies = Arrays.asList("testFamily"); List myPuts; myPuts = new ArrayList(); for (int i = 1; i <=10; i++) { @@ -167,15 +160,9 @@ public void testPigHBaseSchema() throws Exception { + " WITH SERDEPROPERTIES ('hbase.columns.mapping'=':key,testFamily:testQualifier1,testFamily:testQualifier2')" + " TBLPROPERTIES ('hbase.table.name'='"+hbaseTableName+"')"; - CommandProcessorResponse responseOne = driver.run(deleteQuery); - assertEquals(0, responseOne.getResponseCode()); - - - CommandProcessorResponse responseTwo = driver.run(dbQuery); - assertEquals(0, responseTwo.getResponseCode()); - - - CommandProcessorResponse responseThree = driver.run(tableQuery); + driver.run(deleteQuery); + driver.run(dbQuery); + driver.run(tableQuery); Connection connection = null; Admin hAdmin = null; @@ -236,15 +223,9 @@ public void testPigFilterProjection() throws Exception { " WITH SERDEPROPERTIES ('hbase.columns.mapping'=':key,testFamily:testQualifier1,testFamily:testQualifier2')" + " TBLPROPERTIES ('hbase.table.default.storage.type'='binary')"; - CommandProcessorResponse responseOne = driver.run(deleteQuery); - assertEquals(0, responseOne.getResponseCode()); - - - CommandProcessorResponse responseTwo = driver.run(dbQuery); - assertEquals(0, responseTwo.getResponseCode()); - - - CommandProcessorResponse responseThree = driver.run(tableQuery); + driver.run(deleteQuery); + driver.run(dbQuery); + driver.run(tableQuery); Connection connection = null; Admin hAdmin = null; @@ -322,19 +303,11 @@ public void testPigPopulation() throws Exception { + " WITH SERDEPROPERTIES ('hbase.columns.mapping'=':key,testFamily:testQualifier1,testFamily:testQualifier2')" + " TBLPROPERTIES ('hbase.table.default.storage.type'='binary')"; - String selectQuery = "SELECT * from "+databaseName.toLowerCase()+"."+tableName.toLowerCase(); - - CommandProcessorResponse responseOne = driver.run(deleteQuery); - assertEquals(0, responseOne.getResponseCode()); - - - CommandProcessorResponse responseTwo = driver.run(dbQuery); - assertEquals(0, responseTwo.getResponseCode()); - - - CommandProcessorResponse responseThree = driver.run(tableQuery); + driver.run(deleteQuery); + driver.run(dbQuery); + driver.run(tableQuery); Connection connection = null; Admin hAdmin = null; @@ -426,9 +399,7 @@ public void testPigPopulation() throws Exception { } //delete the table from the database - CommandProcessorResponse responseFour = driver.run(deleteQuery); - assertEquals(0, responseFour.getResponseCode()); - + driver.run(deleteQuery); } } diff --git itests/hcatalog-unit/src/test/java/org/apache/hive/hcatalog/mapreduce/TestHCatHiveCompatibility.java itests/hcatalog-unit/src/test/java/org/apache/hive/hcatalog/mapreduce/TestHCatHiveCompatibility.java index 424c2f823a..891914676d 100644 --- itests/hcatalog-unit/src/test/java/org/apache/hive/hcatalog/mapreduce/TestHCatHiveCompatibility.java +++ itests/hcatalog-unit/src/test/java/org/apache/hive/hcatalog/mapreduce/TestHCatHiveCompatibility.java @@ -53,7 +53,7 @@ public void testUnpartedReadWrite() throws Exception { driver.run("drop table if exists junit_unparted_noisd"); String createTable = "create table junit_unparted_noisd(a int) stored as RCFILE"; - Assert.assertEquals(0, driver.run(createTable).getResponseCode()); + driver.run(createTable); // assert that the table created has no hcat instrumentation, and that we're still able to read it. Table table = client.getTable("default", "junit_unparted_noisd"); @@ -89,7 +89,7 @@ public void testPartedRead() throws Exception { driver.run("drop table if exists junit_parted_noisd"); String createTable = "create table junit_parted_noisd(a int) partitioned by (b string) stored as RCFILE"; - Assert.assertEquals(0, driver.run(createTable).getResponseCode()); + driver.run(createTable); // assert that the table created has no hcat instrumentation, and that we're still able to read it. Table table = client.getTable("default", "junit_parted_noisd"); diff --git itests/hcatalog-unit/src/test/java/org/apache/hive/hcatalog/mapreduce/TestHCatHiveThriftCompatibility.java itests/hcatalog-unit/src/test/java/org/apache/hive/hcatalog/mapreduce/TestHCatHiveThriftCompatibility.java index 6ac24d014b..0eaea55706 100644 --- itests/hcatalog-unit/src/test/java/org/apache/hive/hcatalog/mapreduce/TestHCatHiveThriftCompatibility.java +++ itests/hcatalog-unit/src/test/java/org/apache/hive/hcatalog/mapreduce/TestHCatHiveThriftCompatibility.java @@ -76,8 +76,8 @@ public void setUp() throws Exception { */ @Test public void testDynamicCols() throws Exception { - Assert.assertEquals(0, driver.run("drop table if exists test_thrift").getResponseCode()); - Assert.assertEquals(0, driver.run( + driver.run("drop table if exists test_thrift"); + driver.run( "create external table test_thrift " + "partitioned by (year string) " + "row format serde 'org.apache.hadoop.hive.serde2.thrift.ThriftDeserializer' " + @@ -86,11 +86,9 @@ public void testDynamicCols() throws Exception { " 'serialization.format'='org.apache.thrift.protocol.TBinaryProtocol') " + "stored as" + " inputformat 'org.apache.hadoop.mapred.SequenceFileInputFormat'" + - " outputformat 'org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'") - .getResponseCode()); - Assert.assertEquals(0, - driver.run("alter table test_thrift add partition (year = '2012') location '" + - intStringSeq.getParent() + "'").getResponseCode()); + " outputformat 'org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'"); + + driver.run("alter table test_thrift add partition (year = '2012') location '" + intStringSeq.getParent() + "'"); PigServer pigServer = createPigServer(false); pigServer.registerQuery("A = load 'test_thrift' using org.apache.hive.hcatalog.pig.HCatLoader();"); diff --git itests/hcatalog-unit/src/test/java/org/apache/hive/hcatalog/mapreduce/TestSequenceFileReadWrite.java itests/hcatalog-unit/src/test/java/org/apache/hive/hcatalog/mapreduce/TestSequenceFileReadWrite.java index ab25ffed8e..c093055ecf 100644 --- itests/hcatalog-unit/src/test/java/org/apache/hive/hcatalog/mapreduce/TestSequenceFileReadWrite.java +++ itests/hcatalog-unit/src/test/java/org/apache/hive/hcatalog/mapreduce/TestSequenceFileReadWrite.java @@ -49,7 +49,6 @@ import org.apache.hive.hcatalog.data.DefaultHCatRecord; import org.apache.hive.hcatalog.data.schema.HCatFieldSchema; import org.apache.hive.hcatalog.data.schema.HCatSchema; -import org.apache.pig.ExecType; import org.apache.pig.PigServer; import org.apache.pig.data.Tuple; import org.junit.After; @@ -109,8 +108,7 @@ public void teardown() throws IOException { public void testSequenceTableWriteRead() throws Exception { String createTable = "CREATE TABLE demo_table(a0 int, a1 String, a2 String) STORED AS SEQUENCEFILE"; driver.run("drop table demo_table"); - int retCode1 = driver.run(createTable).getResponseCode(); - assertTrue(retCode1 == 0); + driver.run(createTable); server.setBatchOn(); server.registerQuery("A = load '" @@ -137,8 +135,7 @@ public void testSequenceTableWriteRead() throws Exception { public void testTextTableWriteRead() throws Exception { String createTable = "CREATE TABLE demo_table_1(a0 int, a1 String, a2 String) STORED AS TEXTFILE"; driver.run("drop table demo_table_1"); - int retCode1 = driver.run(createTable).getResponseCode(); - assertTrue(retCode1 == 0); + driver.run(createTable); server.setBatchOn(); server.registerQuery("A = load '" @@ -166,8 +163,7 @@ public void testTextTableWriteRead() throws Exception { public void testSequenceTableWriteReadMR() throws Exception { String createTable = "CREATE TABLE demo_table_2(a0 int, a1 String, a2 String) STORED AS SEQUENCEFILE"; driver.run("drop table demo_table_2"); - int retCode1 = driver.run(createTable).getResponseCode(); - assertTrue(retCode1 == 0); + driver.run(createTable); Configuration conf = new Configuration(); conf.set(HCatConstants.HCAT_KEY_HIVE_CONF, @@ -213,8 +209,7 @@ public void testSequenceTableWriteReadMR() throws Exception { public void testTextTableWriteReadMR() throws Exception { String createTable = "CREATE TABLE demo_table_3(a0 int, a1 String, a2 String) STORED AS TEXTFILE"; driver.run("drop table demo_table_3"); - int retCode1 = driver.run(createTable).getResponseCode(); - assertTrue(retCode1 == 0); + driver.run(createTable); Configuration conf = new Configuration(); conf.set(HCatConstants.HCAT_KEY_HIVE_CONF, diff --git itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestMetastoreVersion.java itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestMetastoreVersion.java index 88c7efa724..1d1c496339 100644 --- itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestMetastoreVersion.java +++ itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestMetastoreVersion.java @@ -27,8 +27,8 @@ import org.apache.hadoop.hive.ql.DriverFactory; import org.apache.hadoop.hive.ql.IDriver; import org.apache.hadoop.hive.ql.metadata.Hive; -import org.apache.hadoop.hive.ql.processors.CommandProcessorResponse; import org.apache.hadoop.hive.ql.session.SessionState; +import org.apache.hadoop.hive.ql.processors.CommandProcessorException; import org.apache.hive.common.util.HiveStringUtils; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -49,7 +49,6 @@ private static final Logger LOG = LoggerFactory.getLogger(TestMetastoreVersion.class); protected HiveConf hiveConf; private IDriver driver; - private String metaStoreRoot; private String testMetastoreDB; private IMetaStoreSchemaInfo metastoreSchemaInfo; @@ -73,7 +72,6 @@ public void setUp() throws Exception { File.separator + "test_metastore-" + System.currentTimeMillis(); System.setProperty(HiveConf.ConfVars.METASTORECONNECTURLKEY.varname, "jdbc:derby:" + testMetastoreDB + ";create=true"); - metaStoreRoot = System.getProperty("test.tmp.dir"); metastoreSchemaInfo = MetaStoreSchemaInfoFactory.get(hiveConf, System.getProperty("test.tmp.dir", "target/tmp"), "derby"); } @@ -127,14 +125,19 @@ public void testVersionRestriction () throws Exception { * @throws Exception */ @Test - public void testMetastoreVersion () throws Exception { + public void testMetastoreVersion() throws Exception { // let the schema and version be auto created System.setProperty(HiveConf.ConfVars.METASTORE_SCHEMA_VERIFICATION.toString(), "false"); System.setProperty(HiveConf.ConfVars.METASTORE_SCHEMA_VERIFICATION_RECORD_VERSION.toString(), "true"); hiveConf = new HiveConf(this.getClass()); SessionState.start(new CliSessionState(hiveConf)); driver = DriverFactory.newDriver(hiveConf); - driver.run("show tables"); + try { + driver.run("show tables"); + assert false; + } catch (CommandProcessorException e) { + // this is expected + } // correct version stored by Metastore during startup assertEquals(metastoreSchemaInfo.getHiveSchemaVersion(), getVersion(hiveConf)); @@ -152,15 +155,19 @@ public void testVersionMatching () throws Exception { hiveConf = new HiveConf(this.getClass()); SessionState.start(new CliSessionState(hiveConf)); driver = DriverFactory.newDriver(hiveConf); - driver.run("show tables"); + try { + driver.run("show tables"); + assert false; + } catch (CommandProcessorException e) { + // this is expected + } ObjectStore.setSchemaVerified(false); hiveConf.setBoolVar(HiveConf.ConfVars.METASTORE_SCHEMA_VERIFICATION, true); hiveConf = new HiveConf(this.getClass()); setVersion(hiveConf, metastoreSchemaInfo.getHiveSchemaVersion()); driver = DriverFactory.newDriver(hiveConf); - CommandProcessorResponse proc = driver.run("show tables"); - assertTrue(proc.getResponseCode() == 0); + driver.run("show tables"); } /** @@ -181,8 +188,12 @@ public void testVersionMisMatch () throws Exception { setVersion(hiveConf, "fooVersion"); SessionState.start(new CliSessionState(hiveConf)); driver = DriverFactory.newDriver(hiveConf); - CommandProcessorResponse proc = driver.run("show tables"); - assertTrue(proc.getResponseCode() != 0); + try { + driver.run("show tables"); + assert false; + } catch (CommandProcessorException e) { + // this is expected + } } /** @@ -203,8 +214,7 @@ public void testVersionCompatibility () throws Exception { setVersion(hiveConf, "3.9000.0"); SessionState.start(new CliSessionState(hiveConf)); driver = DriverFactory.newDriver(hiveConf); - CommandProcessorResponse proc = driver.run("show tables"); - assertEquals(0, proc.getResponseCode()); + driver.run("show tables"); } // write the given version to metastore diff --git itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/TestAcidOnTez.java itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/TestAcidOnTez.java index ff8a84fb39..286842798d 100644 --- itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/TestAcidOnTez.java +++ itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/TestAcidOnTez.java @@ -56,7 +56,6 @@ import org.apache.hadoop.hive.ql.io.orc.OrcInputFormat; import org.apache.hadoop.hive.ql.io.orc.Reader; import org.apache.hadoop.hive.ql.lockmgr.TestDbTxnManager2; -import org.apache.hadoop.hive.ql.processors.CommandProcessorResponse; import org.apache.hadoop.hive.ql.session.SessionState; import org.apache.orc.OrcProto; import org.apache.tez.mapreduce.hadoop.MRJobConfig; @@ -651,7 +650,6 @@ public void testInsertWithRemoveUnion() throws Exception { public void testAcidInsertWithRemoveUnion() throws Exception { HiveConf confForTez = new HiveConf(hiveConf); // make a clone of existing hive conf setupTez(confForTez); - int[][] values = {{1,2},{3,4},{5,6},{7,8},{9,10}}; runStatementOnDriver("drop table if exists T", confForTez); runStatementOnDriver("create table T (a int, b int) stored as ORC TBLPROPERTIES ('transactional'='true')", confForTez); /*On Tez, below (T is transactional), we get the following layout @@ -1014,10 +1012,7 @@ private void setupMapJoin(HiveConf conf) { } private List runStatementOnDriver(String stmt) throws Exception { - CommandProcessorResponse cpr = d.run(stmt); - if(cpr.getResponseCode() != 0) { - throw new RuntimeException(stmt + " failed: " + cpr); - } + d.run(stmt); List rs = new ArrayList(); d.getResults(rs); return rs; @@ -1030,10 +1025,7 @@ private void setupMapJoin(HiveConf conf) { throws Exception { IDriver driver = DriverFactory.newDriver(conf); driver.setMaxRows(10000); - CommandProcessorResponse cpr = driver.run(stmt); - if(cpr.getResponseCode() != 0) { - throw new RuntimeException(stmt + " failed: " + cpr); - } + driver.run(stmt); List rs = new ArrayList(); driver.getResults(rs); return rs; diff --git itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/TestDDLWithRemoteMetastoreSecondNamenode.java itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/TestDDLWithRemoteMetastoreSecondNamenode.java index 5b1c903803..ed120286eb 100644 --- itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/TestDDLWithRemoteMetastoreSecondNamenode.java +++ itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/TestDDLWithRemoteMetastoreSecondNamenode.java @@ -31,6 +31,7 @@ import org.apache.hadoop.hive.metastore.api.Database; import org.apache.hadoop.hive.ql.exec.mr.ExecDriver; import org.apache.hadoop.hive.ql.metadata.*; +import org.apache.hadoop.hive.ql.processors.CommandProcessorException; import org.apache.hadoop.hive.ql.processors.CommandProcessorResponse; import org.apache.hadoop.hive.ql.session.SessionState; import static org.junit.Assert.assertNotNull; @@ -157,12 +158,13 @@ private void cleanup() throws Exception { } private void executeQuery(String query) throws Exception { - CommandProcessorResponse result = driver.run(query); - assertNotNull("driver.run() was expected to return result for query: " + query, result); - assertEquals("Execution of (" + query + ") failed with exit status: " - + result.getResponseCode() + ", " + result.getErrorMessage() - + ", query: " + query, - result.getResponseCode(), 0); + try { + CommandProcessorResponse result = driver.run(query); + assertNotNull("driver.run() was expected to return result for query: " + query, result); + } catch (CommandProcessorException e) { + throw new RuntimeException("Execution of (" + query + ") failed with exit status: " + + e.getResponseCode() + ", " + e.getErrorMessage() + ", query: " + query); + } } private String buildLocationClause(String location) { diff --git itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/exec/spark/TestSmallTableCacheEviction.java itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/exec/spark/TestSmallTableCacheEviction.java index 51722a4c60..7bdb5e5bcf 100644 --- itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/exec/spark/TestSmallTableCacheEviction.java +++ itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/exec/spark/TestSmallTableCacheEviction.java @@ -21,6 +21,7 @@ import org.apache.hadoop.hive.ql.Driver; import org.apache.hadoop.hive.ql.QueryState; import org.apache.hadoop.hive.ql.exec.persistence.MapJoinTableContainer; +import org.apache.hadoop.hive.ql.processors.CommandProcessorException; import org.apache.hadoop.hive.ql.session.SessionState; import org.apache.logging.log4j.core.util.ReflectionUtil; import org.junit.After; @@ -72,7 +73,7 @@ public void setUp() throws Exception { } @After - public void tearDown() { + public void tearDown() throws CommandProcessorException { Driver driver = createDriver(); driver.run("drop table if exists " + smallTableName1); driver.run("drop table if exists " + smallTableName2); @@ -89,7 +90,7 @@ public void testSmallTableEvictionIfNewQueryIsExecuted() throws Exception { driver = createDriver(); String simpleJoinQuery = "select large.col, s1.col, s2.col from " + largeTableName + " large join " + smallTableName1 + " s1 on s1.col = large.col join " + smallTableName2 + " s2 on s2.col = large.col"; - Assert.assertEquals(0, driver.run(simpleJoinQuery).getResponseCode()); + driver.run(simpleJoinQuery); Assert.assertEquals(2, innerCache.size()); } finally { if (driver != null) { @@ -123,16 +124,15 @@ public MockDataBuilder numberOfRows(int numberOfRows) { return this; } - public void create(Driver driver) { - Assert.assertEquals(0, driver.run("create table " + tableName + " (col int)") - .getResponseCode()); + public void create(Driver driver) throws CommandProcessorException { + driver.run("create table " + tableName + " (col int)"); if (numberOfRows > 0) { StringJoiner query = new StringJoiner(",", "insert into " + tableName + " values ", ""); for (int i = 0; i < numberOfRows; i++) { query.add("(" + Integer.toString(i + 1) + ")"); } - Assert.assertEquals(0, driver.run(query.toString()).getResponseCode()); + driver.run(query.toString()); } } diff --git itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/exec/spark/TestSparkSessionTimeout.java itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/exec/spark/TestSparkSessionTimeout.java index 7ede07d841..51bd8a79b0 100644 --- itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/exec/spark/TestSparkSessionTimeout.java +++ itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/exec/spark/TestSparkSessionTimeout.java @@ -23,6 +23,7 @@ import org.apache.hadoop.hive.ql.exec.spark.session.SparkSession; import org.apache.hadoop.hive.ql.exec.spark.session.SparkSessionManagerImpl; import org.apache.hadoop.hive.ql.metadata.HiveException; +import org.apache.hadoop.hive.ql.processors.CommandProcessorException; import org.apache.hadoop.hive.ql.session.SessionState; import org.junit.Assert; @@ -42,7 +43,8 @@ public class TestSparkSessionTimeout { @Test - public void testSparkSessionTimeout() throws HiveException, InterruptedException, MalformedURLException { + public void testSparkSessionTimeout() + throws HiveException, InterruptedException, MalformedURLException, CommandProcessorException { String confDir = "../../data/conf/spark/standalone/hive-site.xml"; HiveConf.setHiveSiteLocation(new File(confDir).toURI().toURL()); @@ -82,7 +84,8 @@ public void testMultiSessionSparkSessionTimeout() throws InterruptedException, } @Test - public void testSparkSessionMultipleTimeout() throws HiveException, InterruptedException, MalformedURLException { + public void testSparkSessionMultipleTimeout() + throws HiveException, InterruptedException, MalformedURLException, CommandProcessorException { String confDir = "../../data/conf/spark/standalone/hive-site.xml"; HiveConf.setHiveSiteLocation(new File(confDir).toURI().toURL()); @@ -95,8 +98,8 @@ public void testSparkSessionMultipleTimeout() throws HiveException, InterruptedE runTestSparkSessionTimeout(conf, 2); } - private void runTestSparkSessionTimeout(HiveConf conf, int sleepRunIteration) throws HiveException, - InterruptedException { + private void runTestSparkSessionTimeout(HiveConf conf, int sleepRunIteration) + throws HiveException, InterruptedException, CommandProcessorException { conf.setVar(HiveConf.ConfVars.SPARK_SESSION_TIMEOUT, "5s"); conf.setVar(HiveConf.ConfVars.SPARK_SESSION_TIMEOUT_PERIOD, "1s"); @@ -113,22 +116,19 @@ private void runTestSparkSessionTimeout(HiveConf conf, int sleepRunIteration) th SparkSession sparkSession = SparkUtilities.getSparkSession(conf, SparkSessionManagerImpl .getInstance()); - Assert.assertEquals(0, - driver.run("create table " + tableName + " (col int)").getResponseCode()); - Assert.assertEquals(0, - driver.run("select * from " + tableName + " order by col").getResponseCode()); + driver.run("create table " + tableName + " (col int)"); + driver.run("select * from " + tableName + " order by col"); for (int i = 0; i < sleepRunIteration; i++) { Thread.sleep(10000); Assert.assertFalse(sparkSession.isOpen()); - Assert.assertEquals(0, - driver.run("select * from " + tableName + " order by col").getResponseCode()); + driver.run("select * from " + tableName + " order by col"); } } finally { if (driver != null) { - Assert.assertEquals(0, driver.run("drop table if exists " + tableName).getResponseCode()); + driver.run("drop table if exists " + tableName); driver.destroy(); } } diff --git itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/exec/spark/TestSparkStatistics.java itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/exec/spark/TestSparkStatistics.java index 137bedd972..205b0fd0dd 100644 --- itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/exec/spark/TestSparkStatistics.java +++ itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/exec/spark/TestSparkStatistics.java @@ -27,6 +27,7 @@ import org.apache.hadoop.hive.ql.exec.Utilities; import org.apache.hadoop.hive.ql.exec.spark.Statistic.SparkStatistic; import org.apache.hadoop.hive.ql.exec.spark.Statistic.SparkStatisticsNames; +import org.apache.hadoop.hive.ql.processors.CommandProcessorException; import org.apache.hadoop.hive.ql.session.SessionState; import org.junit.Assert; @@ -42,7 +43,7 @@ public class TestSparkStatistics { @Test - public void testSparkStatistics() throws MalformedURLException { + public void testSparkStatistics() throws MalformedURLException, CommandProcessorException { String confDir = "../../data/conf/spark/standalone/hive-site.xml"; HiveConf.setHiveSiteLocation(new File(confDir).toURI().toURL()); HiveConf conf = new HiveConf(); @@ -59,7 +60,7 @@ public void testSparkStatistics() throws MalformedURLException { .withHiveConf(conf).build(), null, null); - Assert.assertEquals(0, driver.run("create table test (col int)").getResponseCode()); + driver.run("create table test (col int)"); Assert.assertEquals(0, driver.compile("select * from test order by col", true)); List sparkTasks = Utilities.getSparkTasks(driver.getPlan().getRootTasks()); @@ -93,7 +94,7 @@ public void testSparkStatistics() throws MalformedURLException { Assert.assertTrue(Long.parseLong(statsMap.get(SparkStatisticsNames.EXECUTOR_RUN_TIME)) > 0); } finally { if (driver != null) { - Assert.assertEquals(0, driver.run("drop table if exists test").getResponseCode()); + driver.run("drop table if exists test"); driver.destroy(); } } diff --git itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/history/TestHiveHistory.java itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/history/TestHiveHistory.java index 3c40983c80..31d15fdef9 100644 --- itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/history/TestHiveHistory.java +++ itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/history/TestHiveHistory.java @@ -151,10 +151,7 @@ public void testSimpleQuery() { String cmd = "select a.key+1 from src a"; IDriver d = DriverFactory.newDriver(conf); - int ret = d.run(cmd).getResponseCode(); - if (ret != 0) { - fail("Failed"); - } + d.run(cmd); HiveHistoryViewer hv = new HiveHistoryViewer(SessionState.get() .getHiveHistory().getHistFileName()); Map jobInfoMap = hv.getJobInfoMap(); diff --git itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/metadata/TestAlterTableMetadata.java itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/metadata/TestAlterTableMetadata.java index 940a1d370d..f6035fa01e 100644 --- itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/metadata/TestAlterTableMetadata.java +++ itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/metadata/TestAlterTableMetadata.java @@ -21,7 +21,7 @@ import org.apache.hadoop.hive.metastore.api.PrincipalType; import org.apache.hadoop.hive.ql.DriverFactory; import org.apache.hadoop.hive.ql.IDriver; -import org.apache.hadoop.hive.ql.processors.CommandProcessorResponse; +import org.apache.hadoop.hive.ql.processors.CommandProcessorException; import org.apache.hadoop.hive.ql.session.SessionState; import org.junit.Test; @@ -29,7 +29,7 @@ public class TestAlterTableMetadata { @Test - public void testAlterTableOwner() throws HiveException { + public void testAlterTableOwner() throws HiveException, CommandProcessorException { /* * This test verifies that the ALTER TABLE ... SET OWNER command will change the * owner metadata of the table in HMS. @@ -39,31 +39,26 @@ public void testAlterTableOwner() throws HiveException { conf.set(HiveConf.ConfVars.HIVE_SUPPORT_CONCURRENCY.varname, "false"); SessionState.start(conf); IDriver driver = DriverFactory.newDriver(conf); - CommandProcessorResponse resp; Table table; - resp = driver.run("create table t1(id int)"); - assertEquals(0, resp.getResponseCode()); + driver.run("create table t1(id int)"); // Changes the owner to a user and verify the change - resp = driver.run("alter table t1 set owner user u1"); - assertEquals(0, resp.getResponseCode()); + driver.run("alter table t1 set owner user u1"); table = Hive.get(conf).getTable("t1"); assertEquals(PrincipalType.USER, table.getOwnerType()); assertEquals("u1", table.getOwner()); // Changes the owner to a group and verify the change - resp = driver.run("alter table t1 set owner group g1"); - assertEquals(0, resp.getResponseCode()); + driver.run("alter table t1 set owner group g1"); table = Hive.get(conf).getTable("t1"); assertEquals(PrincipalType.GROUP, table.getOwnerType()); assertEquals("g1", table.getOwner()); // Changes the owner to a role and verify the change - resp = driver.run("alter table t1 set owner role r1"); - assertEquals(0, resp.getResponseCode()); + driver.run("alter table t1 set owner role r1"); table = Hive.get(conf).getTable("t1"); assertEquals(PrincipalType.ROLE, table.getOwnerType()); diff --git itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/metadata/TestSemanticAnalyzerHookLoading.java itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/metadata/TestSemanticAnalyzerHookLoading.java index 8cdd75b1fa..e8766f3861 100644 --- itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/metadata/TestSemanticAnalyzerHookLoading.java +++ itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/metadata/TestSemanticAnalyzerHookLoading.java @@ -20,17 +20,14 @@ import java.util.Map; - - import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.conf.HiveConf.ConfVars; import org.apache.hadoop.hive.metastore.Warehouse; import org.apache.hadoop.hive.ql.DriverFactory; import org.apache.hadoop.hive.ql.IDriver; -import org.apache.hadoop.hive.ql.processors.CommandProcessorResponse; +import org.apache.hadoop.hive.ql.processors.CommandProcessorException; import org.apache.hadoop.hive.ql.session.SessionState; import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertNull; import org.junit.Test; /** @@ -48,12 +45,14 @@ public void testHookLoading() throws Exception{ IDriver driver = DriverFactory.newDriver(conf); driver.run("drop table testDL"); - CommandProcessorResponse resp = driver.run("create table testDL (a int) as select * from tbl2"); - assertEquals(40000, resp.getResponseCode()); + try { + driver.run("create table testDL (a int) as select * from tbl2"); + assert false; + } catch (CommandProcessorException e) { + assertEquals(40000, e.getResponseCode()); + } - resp = driver.run("create table testDL (a int)"); - assertEquals(0, resp.getResponseCode()); - assertNull(resp.getErrorMessage()); + driver.run("create table testDL (a int)"); Map params = Hive.get(conf).getTable(Warehouse.DEFAULT_DATABASE_NAME, "testDL").getParameters(); diff --git itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/BaseReplicationScenariosAcidTables.java itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/BaseReplicationScenariosAcidTables.java index 010855ce8f..8593480724 100644 --- itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/BaseReplicationScenariosAcidTables.java +++ itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/BaseReplicationScenariosAcidTables.java @@ -27,7 +27,6 @@ import org.apache.hadoop.hive.metastore.txn.TxnStore; import org.apache.hadoop.hive.ql.DriverFactory; import org.apache.hadoop.hive.ql.IDriver; -import org.apache.hadoop.hive.ql.processors.CommandProcessorResponse; import org.apache.hadoop.hive.ql.session.SessionState; import org.apache.hadoop.hive.shims.Utils; @@ -225,10 +224,7 @@ private void verifyIncAcidLoad(String dbName) throws Throwable { } private void runUsingDriver(IDriver driver, String command) throws Throwable { - CommandProcessorResponse ret = driver.run(command); - if (ret.getException() != null) { - throw ret.getException(); - } + driver.run(command); } void prepareInc2AcidData(String dbName, HiveConf hiveConf) throws Throwable { diff --git itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationScenarios.java itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationScenarios.java index 36e0fcf6d7..43ea76b203 100644 --- itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationScenarios.java +++ itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationScenarios.java @@ -68,7 +68,7 @@ import org.apache.hadoop.hive.ql.exec.repl.ReplLoadWork; import org.apache.hadoop.hive.ql.metadata.Hive; import org.apache.hadoop.hive.ql.parse.repl.load.EventDumpDirComparator; -import org.apache.hadoop.hive.ql.processors.CommandProcessorResponse; +import org.apache.hadoop.hive.ql.processors.CommandProcessorException; import org.apache.hadoop.hive.ql.session.SessionState; import org.apache.hadoop.hive.ql.stats.StatsUtils; import org.apache.hadoop.hive.shims.Utils; @@ -700,8 +700,6 @@ public void testBootstrapWithConcurrentRename() throws IOException { run("LOAD DATA LOCAL INPATH '" + ptn_locn + "' OVERWRITE INTO TABLE " + dbName + ".ptned PARTITION(b=1)", driver); BehaviourInjection ptnedTableRenamer = new BehaviourInjection(){ - boolean success = false; - @Nullable @Override public Table apply(@Nullable Table table) { @@ -716,14 +714,13 @@ public void run() { LOG.info("Entered new thread"); IDriver driver2 = DriverFactory.newDriver(hconf); SessionState.start(new CliSessionState(hconf)); - CommandProcessorResponse ret = - driver2.run("ALTER TABLE " + dbName + ".ptned PARTITION (b=1) RENAME TO PARTITION (b=10)"); - success = (ret.getException() == null); - assertFalse(success); - ret = driver2.run("ALTER TABLE " + dbName + ".ptned RENAME TO " + dbName + ".ptned_renamed"); - success = (ret.getException() == null); - assertFalse(success); - LOG.info("Exit new thread success - {}", success); + try { + driver2.run("ALTER TABLE " + dbName + ".ptned PARTITION (b=1) RENAME TO PARTITION (b=10)"); + driver2.run("ALTER TABLE " + dbName + ".ptned RENAME TO " + dbName + ".ptned_renamed"); + } catch (CommandProcessorException e) { + throw new RuntimeException(e); + } + LOG.info("Exit new thread success"); } }); t.start(); @@ -772,8 +769,6 @@ public void testBootstrapWithDropPartitionedTable() throws IOException { run("LOAD DATA LOCAL INPATH '" + ptn_locn + "' OVERWRITE INTO TABLE " + dbName + ".ptned PARTITION(b=1)", driver); BehaviourInjection ptnedTableRenamer = new BehaviourInjection(){ - boolean success = false; - @Nullable @Override public Table apply(@Nullable Table table) { @@ -788,10 +783,12 @@ public void run() { LOG.info("Entered new thread"); IDriver driver2 = DriverFactory.newDriver(hconf); SessionState.start(new CliSessionState(hconf)); - CommandProcessorResponse ret = driver2.run("DROP TABLE " + dbName + ".ptned"); - success = (ret.getException() == null); - assertTrue(success); - LOG.info("Exit new thread success - {}", success, ret.getException()); + try { + driver2.run("DROP TABLE " + dbName + ".ptned"); + } catch (CommandProcessorException e) { + throw new RuntimeException(e); + } + LOG.info("Exit new thread success"); } }); t.start(); @@ -988,8 +985,12 @@ public NotificationEventResponse apply(@Nullable NotificationEventResponse event InjectableBehaviourObjectStore.setGetNextNotificationBehaviour(eventIdSkipper); try { advanceDumpDir(); - CommandProcessorResponse ret = driver.run("REPL DUMP " + dbName + " FROM " + replDumpId); - assertTrue(ret.getResponseCode() == ErrorMsg.REPL_EVENTS_MISSING_IN_METASTORE.getErrorCode()); + try { + driver.run("REPL DUMP " + dbName + " FROM " + replDumpId); + assert false; + } catch (CommandProcessorException e) { + assertTrue(e.getResponseCode() == ErrorMsg.REPL_EVENTS_MISSING_IN_METASTORE.getErrorCode()); + } eventIdSkipper.assertInjectionsPerformed(true,false); } finally { InjectableBehaviourObjectStore.resetGetNextNotificationBehaviour(); // reset the behaviour @@ -3252,8 +3253,12 @@ public void testLoadCmPathMissing() throws IOException { assertTrue(fileCount != 0); fs.delete(path); - CommandProcessorResponse ret = driverMirror.run("REPL LOAD " + dbName + " FROM '" + dumpLocation + "'"); - assertTrue(ret.getResponseCode() == ErrorMsg.REPL_FILE_MISSING_FROM_SRC_AND_CM_PATH.getErrorCode()); + try { + driverMirror.run("REPL LOAD " + dbName + " FROM '" + dumpLocation + "'"); + assert false; + } catch (CommandProcessorException e) { + assertTrue(e.getResponseCode() == ErrorMsg.REPL_FILE_MISSING_FROM_SRC_AND_CM_PATH.getErrorCode()); + } run("drop database " + dbName, true, driver); fs.create(path, false); } @@ -3271,8 +3276,12 @@ public void testDumpWithTableDirMissing() throws IOException { fs.delete(path); advanceDumpDir(); - CommandProcessorResponse ret = driver.run("REPL DUMP " + dbName); - Assert.assertEquals(ret.getResponseCode(), ErrorMsg.FILE_NOT_FOUND.getErrorCode()); + try { + driver.run("REPL DUMP " + dbName); + assert false; + } catch (CommandProcessorException e) { + Assert.assertEquals(e.getResponseCode(), ErrorMsg.FILE_NOT_FOUND.getErrorCode()); + } run("DROP TABLE " + dbName + ".normal", driver); run("drop database " + dbName, true, driver); @@ -3292,8 +3301,12 @@ public void testDumpWithPartitionDirMissing() throws IOException { fs.delete(path); advanceDumpDir(); - CommandProcessorResponse ret = driver.run("REPL DUMP " + dbName); - Assert.assertEquals(ret.getResponseCode(), ErrorMsg.FILE_NOT_FOUND.getErrorCode()); + try { + driver.run("REPL DUMP " + dbName); + assert false; + } catch (CommandProcessorException e) { + Assert.assertEquals(e.getResponseCode(), ErrorMsg.FILE_NOT_FOUND.getErrorCode()); + } run("DROP TABLE " + dbName + ".normal", driver); run("drop database " + dbName, true, driver); @@ -3575,7 +3588,6 @@ private void verifyIfPartitionNotExist(String dbName, String tableName, List partValues, HiveMetaStoreClient myClient){ - Exception e = null; try { Partition ptn = myClient.getPartition(dbName, tableName, partValues); assertNotNull(ptn); @@ -3640,10 +3652,13 @@ private static void run(String cmd, IDriver myDriver) throws RuntimeException { private static boolean run(String cmd, boolean errorOnFail, IDriver myDriver) throws RuntimeException { boolean success = false; - CommandProcessorResponse ret = myDriver.run(cmd); - success = ((ret.getException() == null) && (ret.getErrorMessage() == null)); - if (!success) { - LOG.warn("Error {} : {} running [{}].", ret.getErrorCode(), ret.getErrorMessage(), cmd); + try { + myDriver.run(cmd); + } catch (CommandProcessorException e) { + success = ((e.getException() == null) && (e.getErrorMessage() == null)); + if (!success) { + LOG.warn("Error {} : {} running [{}].", e.getErrorCode(), e.getErrorMessage(), cmd); + } } return success; } diff --git itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationScenariosAcidTables.java itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationScenariosAcidTables.java index 998159f57d..63b32c83db 100644 --- itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationScenariosAcidTables.java +++ itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationScenariosAcidTables.java @@ -32,7 +32,7 @@ import org.apache.hadoop.hive.ql.DriverFactory; import org.apache.hadoop.hive.ql.ErrorMsg; import org.apache.hadoop.hive.ql.IDriver; -import org.apache.hadoop.hive.ql.processors.CommandProcessorResponse; +import org.apache.hadoop.hive.ql.processors.CommandProcessorException; import org.apache.hadoop.hive.ql.session.SessionState; import org.apache.hadoop.hive.shims.Utils; @@ -43,13 +43,13 @@ import org.junit.BeforeClass; import javax.annotation.Nullable; + import java.util.Arrays; import java.util.HashMap; import java.util.List; import java.util.Collections; import java.util.Map; -import static org.junit.Assert.assertTrue; import static org.apache.hadoop.hive.metastore.ReplChangeManager.SOURCE_OF_REPLICATION; /** @@ -251,10 +251,12 @@ public void run() { LOG.info("Entered new thread"); IDriver driver = DriverFactory.newDriver(primaryConf); SessionState.start(new CliSessionState(primaryConf)); - CommandProcessorResponse ret = driver.run("insert into " + primaryDbName + ".t1 values(2)"); - boolean success = (ret.getException() == null); - assertTrue(success); - LOG.info("Exit new thread success - {}", success, ret.getException()); + try { + driver.run("insert into " + primaryDbName + ".t1 values(2)"); + } catch (CommandProcessorException e) { + throw new RuntimeException(e); + } + LOG.info("Exit new thread success"); } }); t.start(); @@ -322,13 +324,13 @@ public void run() { LOG.info("Entered new thread"); IDriver driver = DriverFactory.newDriver(primaryConf); SessionState.start(new CliSessionState(primaryConf)); - CommandProcessorResponse ret = driver.run("insert into " + primaryDbName + ".t1 values(2)"); - boolean success = (ret.getException() == null); - assertTrue(success); - ret = driver.run("drop table " + primaryDbName + ".t1"); - success = (ret.getException() == null); - assertTrue(success); - LOG.info("Exit new thread success - {}", success, ret.getException()); + try { + driver.run("insert into " + primaryDbName + ".t1 values(2)"); + driver.run("drop table " + primaryDbName + ".t1"); + } catch (CommandProcessorException e) { + throw new RuntimeException(e); + } + LOG.info("Exit new thread success"); } }); t.start(); @@ -412,7 +414,6 @@ public void testOpenTxnEvent() throws Throwable { @Test public void testAbortTxnEvent() throws Throwable { - String tableName = testName.getMethodName(); String tableNameFail = testName.getMethodName() + "Fail"; WarehouseInstance.Tuple bootStrapDump = primary.dump(primaryDbName, null); replica.load(replicatedDbName, bootStrapDump.dumpLocation) @@ -565,9 +566,12 @@ public void testDumpAcidTableWithPartitionDirMissing() throws Throwable { FileSystem fs = path.getFileSystem(conf); fs.delete(path); - CommandProcessorResponse ret = primary.runCommand("REPL DUMP " + dbName + - " with ('hive.repl.dump.include.acid.tables' = 'true')"); - Assert.assertEquals(ret.getResponseCode(), ErrorMsg.FILE_NOT_FOUND.getErrorCode()); + try { + primary.runCommand("REPL DUMP " + dbName + " with ('hive.repl.dump.include.acid.tables' = 'true')"); + assert false; + } catch (CommandProcessorException e) { + Assert.assertEquals(e.getResponseCode(), ErrorMsg.FILE_NOT_FOUND.getErrorCode()); + } primary.run("DROP TABLE " + dbName + ".normal"); primary.run("drop database " + dbName); @@ -587,9 +591,12 @@ public void testDumpAcidTableWithTableDirMissing() throws Throwable { FileSystem fs = path.getFileSystem(conf); fs.delete(path); - CommandProcessorResponse ret = primary.runCommand("REPL DUMP " + dbName + - " with ('hive.repl.dump.include.acid.tables' = 'true')"); - Assert.assertEquals(ret.getResponseCode(), ErrorMsg.FILE_NOT_FOUND.getErrorCode()); + try { + primary.runCommand("REPL DUMP " + dbName + " with ('hive.repl.dump.include.acid.tables' = 'true')"); + assert false; + } catch (CommandProcessorException e) { + Assert.assertEquals(e.getResponseCode(), ErrorMsg.FILE_NOT_FOUND.getErrorCode()); + } primary.run("DROP TABLE " + dbName + ".normal"); primary.run("drop database " + dbName); diff --git itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationScenariosAcrossInstances.java itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationScenariosAcrossInstances.java index 46a6627ffa..4f5cbbb019 100644 --- itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationScenariosAcrossInstances.java +++ itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationScenariosAcrossInstances.java @@ -36,7 +36,7 @@ import org.apache.hadoop.hive.ql.exec.repl.incremental.IncrementalLoadTasksBuilder; import org.apache.hadoop.hive.ql.exec.repl.util.ReplUtils; import org.apache.hadoop.hive.ql.parse.repl.PathBuilder; -import org.apache.hadoop.hive.ql.processors.CommandProcessorResponse; +import org.apache.hadoop.hive.ql.processors.CommandProcessorException; import org.apache.hadoop.hive.ql.util.DependencyResolver; import org.apache.hadoop.security.UserGroupInformation; import org.junit.Assert; @@ -991,20 +991,21 @@ public void testIncrementalDumpEmptyDumpDirectory() throws Throwable { // Incremental load to non existing db should return database not exist error. tuple = primary.dump("someJunkDB", tuple.lastReplicationId); - CommandProcessorResponse response = - replica.runCommand("REPL LOAD someJunkDB from '" + tuple.dumpLocation + "'"); - assertTrue(response.getErrorMessage().toLowerCase() - .contains("org.apache.hadoop.hive.ql.ddl.DDLTask. Database does not exist: someJunkDB" - .toLowerCase())); + try { + replica.runCommand("REPL LOAD someJunkDB from '" + tuple.dumpLocation + "'"); + assert false; + } catch (CommandProcessorException e) { + assertTrue(e.getErrorMessage().toLowerCase().contains( + "org.apache.hadoop.hive.ql.ddl.DDLTask. Database does not exist: someJunkDB".toLowerCase())); + } // Bootstrap load from an empty dump directory should return empty load directory error. tuple = primary.dump("someJunkDB", null); - response = replica.runCommand("REPL LOAD someJunkDB from '" + tuple.dumpLocation+"'"); - assertTrue(response.getErrorMessage().toLowerCase() - .contains( - "semanticException no data to load in path" - .toLowerCase()) - ); + try { + replica.runCommand("REPL LOAD someJunkDB from '" + tuple.dumpLocation+"'"); + } catch (CommandProcessorException e) { + assertTrue(e.getErrorMessage().toLowerCase().contains("semanticException no data to load in path".toLowerCase())); + } primary.run(" drop database if exists " + testDbName + " cascade"); } diff --git itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/WarehouseInstance.java itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/WarehouseInstance.java index 91981e3413..c1414412a8 100644 --- itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/WarehouseInstance.java +++ itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/WarehouseInstance.java @@ -54,6 +54,7 @@ import org.apache.hadoop.hive.ql.exec.repl.ReplDumpWork; import org.apache.hadoop.hive.ql.exec.repl.util.ReplUtils; import org.apache.hadoop.hive.ql.parse.repl.PathBuilder; +import org.apache.hadoop.hive.ql.processors.CommandProcessorException; import org.apache.hadoop.hive.ql.processors.CommandProcessorResponse; import org.apache.hadoop.hive.ql.session.SessionState; import org.apache.hive.hcatalog.api.repl.ReplicationV1CompatRule; @@ -227,11 +228,12 @@ private String row0Result(int colNum, boolean reuse) throws IOException { } public WarehouseInstance run(String command) throws Throwable { - CommandProcessorResponse ret = driver.run(command); - if (ret.getException() != null) { - throw ret.getException(); + try { + driver.run(command); + return this; + } catch (CommandProcessorException e) { + throw e.getException(); } - return this; } public CommandProcessorResponse runCommand(String command) throws Throwable { @@ -239,23 +241,25 @@ public CommandProcessorResponse runCommand(String command) throws Throwable { } WarehouseInstance runFailure(String command) throws Throwable { - CommandProcessorResponse ret = driver.run(command); - if (ret.getException() == null) { + try { + driver.run(command); throw new RuntimeException("command execution passed for a invalid command" + command); + } catch (CommandProcessorException e) { + return this; } - return this; } WarehouseInstance runFailure(String command, int errorCode) throws Throwable { - CommandProcessorResponse ret = driver.run(command); - if (ret.getException() == null) { + try { + driver.run(command); throw new RuntimeException("command execution passed for a invalid command" + command); + } catch (CommandProcessorException e) { + if (e.getResponseCode() != errorCode) { + throw new RuntimeException("Command: " + command + " returned incorrect error code: " + + e.getResponseCode() + " instead of " + errorCode); + } + return this; } - if (ret.getResponseCode() != errorCode) { - throw new RuntimeException("Command: " + command + " returned incorrect error code: " - + ret.getResponseCode() + " instead of " + errorCode); - } - return this; } Tuple dump(String dbName, String lastReplicationId, List withClauseOptions) diff --git itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/security/TestClientSideAuthorizationProvider.java itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/security/TestClientSideAuthorizationProvider.java index b284777536..43e16246fa 100644 --- itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/security/TestClientSideAuthorizationProvider.java +++ itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/security/TestClientSideAuthorizationProvider.java @@ -29,10 +29,9 @@ import org.apache.hadoop.hive.metastore.MetaStoreTestUtils; import org.apache.hadoop.hive.metastore.api.Database; import org.apache.hadoop.hive.metastore.api.Table; -import org.apache.hadoop.hive.metastore.security.HadoopThriftAuthBridge; import org.apache.hadoop.hive.ql.DriverFactory; import org.apache.hadoop.hive.ql.IDriver; -import org.apache.hadoop.hive.ql.processors.CommandProcessorResponse; +import org.apache.hadoop.hive.ql.processors.CommandProcessorException; import org.apache.hadoop.hive.ql.security.authorization.DefaultHiveAuthorizationProvider; import org.apache.hadoop.hive.ql.session.SessionState; import org.apache.hadoop.hive.shims.Utils; @@ -128,8 +127,7 @@ public void testSimplePrivileges() throws Exception { allowCreateDatabase(userName); - CommandProcessorResponse ret = driver.run("create database " + dbName); - assertEquals(0,ret.getResponseCode()); + driver.run("create database " + dbName); Database db = msc.getDatabase(dbName); String dbLocn = db.getLocationUri(); @@ -139,19 +137,18 @@ public void testSimplePrivileges() throws Exception { disallowCreateInDb(dbName, userName, dbLocn); driver.run("use " + dbName); - ret = driver.run( - String.format("create table %s (a string) partitioned by (b string)", tblName)); - - // failure from not having permissions to create table - assertNoPrivileges(ret); + try { + driver.run(String.format("create table %s (a string) partitioned by (b string)", tblName)); + } catch (CommandProcessorException e) { + // failure from not having permissions to create table + assertNoPrivileges(e); + } allowCreateInDb(dbName, userName, dbLocn); driver.run("use " + dbName); - ret = driver.run( - String.format("create table %s (a string) partitioned by (b string)", tblName)); + driver.run(String.format("create table %s (a string) partitioned by (b string)", tblName)); - assertEquals(0,ret.getResponseCode()); // now it succeeds. Table tbl = msc.getTable(dbName, tblName); validateCreateTable(tbl,tblName, dbName); @@ -165,23 +162,26 @@ public void testSimplePrivileges() throws Exception { InjectableDummyAuthenticator.injectMode(true); allowSelectOnTable(tbl.getTableName(), fakeUser, tbl.getSd().getLocation()); - ret = driver.run(String.format("select * from %s limit 10", tblName)); - assertEquals(0,ret.getResponseCode()); - - ret = driver.run( - String.format("create table %s (a string) partitioned by (b string)", tblName+"mal")); + driver.run(String.format("select * from %s limit 10", tblName)); - assertNoPrivileges(ret); + try { + driver.run(String.format("create table %s (a string) partitioned by (b string)", tblName+"mal")); + } catch (CommandProcessorException e) { + assertNoPrivileges(e); + } disallowCreateInTbl(tbl.getTableName(), userName, tbl.getSd().getLocation()); - ret = driver.run("alter table "+tblName+" add partition (b='2011')"); - assertNoPrivileges(ret); + + try { + driver.run("alter table "+tblName+" add partition (b='2011')"); + } catch (CommandProcessorException e) { + assertNoPrivileges(e); + } InjectableDummyAuthenticator.injectMode(false); allowCreateInTbl(tbl.getTableName(), userName, tbl.getSd().getLocation()); - ret = driver.run("alter table "+tblName+" add partition (b='2011')"); - assertEquals(0,ret.getResponseCode()); + driver.run("alter table "+tblName+" add partition (b='2011')"); allowDropOnTable(tblName, userName, tbl.getSd().getLocation()); allowDropOnDb(dbName,userName,db.getLocationUri()); @@ -234,7 +234,7 @@ protected void allowSelectOnTable(String tblName, String userName, String locati driver.run("grant select on table "+tblName+" to user "+userName); } - protected void assertNoPrivileges(CommandProcessorResponse ret){ + protected void assertNoPrivileges(CommandProcessorException ret){ assertNotNull(ret); assertFalse(0 == ret.getResponseCode()); assertTrue(ret.getErrorMessage().indexOf("No privilege") != -1); diff --git itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/security/TestMetastoreAuthorizationProvider.java itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/security/TestMetastoreAuthorizationProvider.java index 10a1ac2a21..519d358f0d 100644 --- itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/security/TestMetastoreAuthorizationProvider.java +++ itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/security/TestMetastoreAuthorizationProvider.java @@ -24,8 +24,6 @@ import java.util.List; import java.util.Collections; - - import org.apache.hadoop.fs.Path; import org.apache.hadoop.hive.cli.CliSessionState; import org.apache.hadoop.hive.conf.HiveConf; @@ -38,12 +36,11 @@ import org.apache.hadoop.hive.metastore.api.SerDeInfo; import org.apache.hadoop.hive.metastore.api.StorageDescriptor; import org.apache.hadoop.hive.metastore.api.Table; -import org.apache.hadoop.hive.metastore.security.HadoopThriftAuthBridge; import org.apache.hadoop.hive.ql.DriverFactory; import org.apache.hadoop.hive.ql.IDriver; import org.apache.hadoop.hive.ql.io.HiveInputFormat; import org.apache.hadoop.hive.ql.io.HiveOutputFormat; -import org.apache.hadoop.hive.ql.processors.CommandProcessorResponse; +import org.apache.hadoop.hive.ql.processors.CommandProcessorException; import org.apache.hadoop.hive.ql.security.authorization.AuthorizationPreEventListener; import org.apache.hadoop.hive.ql.security.authorization.DefaultHiveMetastoreAuthorizationProvider; import org.apache.hadoop.hive.ql.session.SessionState; @@ -51,8 +48,6 @@ import org.apache.hadoop.hive.shims.Utils; import org.apache.hadoop.security.UserGroupInformation; import org.junit.Assert; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertNotNull; import static org.junit.Assert.assertTrue; @@ -76,8 +71,6 @@ * authorization providers like StorageBasedAuthorizationProvider */ public class TestMetastoreAuthorizationProvider { - private static final Logger LOG = LoggerFactory.getLogger(TestMetastoreAuthorizationProvider.class); - protected HiveConf clientHiveConf; protected HiveMetaStoreClient msc; protected IDriver driver; @@ -182,26 +175,30 @@ public void testSimplePrivileges() throws Exception { String tblName = getTestTableName(); String userName = setupUser(); allowCreateDatabase(userName); - CommandProcessorResponse ret = driver.run("create database " + dbName); - assertEquals(0,ret.getResponseCode()); + driver.run("create database " + dbName); Database db = msc.getDatabase(dbName); String dbLocn = db.getLocationUri(); validateCreateDb(db,dbName); disallowCreateInDb(dbName, userName, dbLocn); disallowCreateDatabase(userName); driver.run("use " + dbName); - ret = driver.run( - String.format("create table %s (a string) partitioned by (b string)", tblName)); - assertEquals(1,ret.getResponseCode()); + try { + driver.run(String.format("create table %s (a string) partitioned by (b string)", tblName)); + assert false; + } catch (CommandProcessorException e) { + assertEquals(1, e.getResponseCode()); + } // Even if table location is specified table creation should fail String tblNameLoc = tblName + "_loc"; String tblLocation = new Path(dbLocn).getParent().toUri() + "/" + tblNameLoc; driver.run("use " + dbName); - ret = driver.run( - String.format("create table %s (a string) partitioned by (b string) location '" + - tblLocation + "'", tblNameLoc)); - assertEquals(1, ret.getResponseCode()); + try { + driver.run(String.format( + "create table %s (a string) partitioned by (b string) location '" +tblLocation + "'", tblNameLoc)); + } catch (CommandProcessorException e) { + assertEquals(1, e.getResponseCode()); + } // failure from not having permissions to create table ArrayList fields = new ArrayList(2); @@ -235,10 +232,8 @@ public void testSimplePrivileges() throws Exception { allowCreateInDb(dbName, userName, dbLocn); driver.run("use " + dbName); - ret = driver.run( - String.format("create table %s (a string) partitioned by (b string)", tblName)); + driver.run(String.format("create table %s (a string) partitioned by (b string)", tblName)); - assertEquals(0,ret.getResponseCode()); // now it succeeds. Table tbl = msc.getTable(dbName, tblName); Assert.assertTrue(tbl.isSetId()); tbl.unsetId(); @@ -246,10 +241,8 @@ public void testSimplePrivileges() throws Exception { // Table creation should succeed even if location is specified driver.run("use " + dbName); - ret = driver.run( - String.format("create table %s (a string) partitioned by (b string) location '" + - tblLocation + "'", tblNameLoc)); - assertEquals(0, ret.getResponseCode()); + driver.run(String.format( + "create table %s (a string) partitioned by (b string) location '" + tblLocation + "'", tblNameLoc)); Table tblLoc = msc.getTable(dbName, tblNameLoc); validateCreateTable(tblLoc, tblNameLoc, dbName); @@ -261,10 +254,11 @@ public void testSimplePrivileges() throws Exception { InjectableDummyAuthenticator.injectGroupNames(fakeGroupNames); InjectableDummyAuthenticator.injectMode(true); - ret = driver.run( - String.format("create table %s (a string) partitioned by (b string)", tblName+"mal")); - - assertEquals(1,ret.getResponseCode()); + try { + driver.run(String.format("create table %s (a string) partitioned by (b string)", tblName+"mal")); + } catch (CommandProcessorException e) { + assertEquals(1, e.getResponseCode()); + } ttbl.setTableName(tblName+"mal"); me = null; @@ -276,8 +270,11 @@ public void testSimplePrivileges() throws Exception { assertNoPrivileges(me); disallowCreateInTbl(tbl.getTableName(), userName, tbl.getSd().getLocation()); - ret = driver.run("alter table "+tblName+" add partition (b='2011')"); - assertEquals(1,ret.getResponseCode()); + try { + driver.run("alter table "+tblName+" add partition (b='2011')"); + } catch (CommandProcessorException e) { + assertEquals(1, e.getResponseCode()); + } List ptnVals = new ArrayList(); ptnVals.add("b=2011"); @@ -301,8 +298,7 @@ public void testSimplePrivileges() throws Exception { InjectableDummyAuthenticator.injectMode(false); allowCreateInTbl(tbl.getTableName(), userName, tbl.getSd().getLocation()); - ret = driver.run("alter table "+tblName+" add partition (b='2011')"); - assertEquals(0,ret.getResponseCode()); + driver.run("alter table "+tblName+" add partition (b='2011')"); String proxyUserName = getProxyUserName(); if (proxyUserName != null) { @@ -312,15 +308,13 @@ public void testSimplePrivileges() throws Exception { InjectableDummyAuthenticator.injectGroupNames(Collections.singletonList(proxyUserName)); InjectableDummyAuthenticator.injectMode(true); disallowCreateInTbl(tbl.getTableName(), proxyUserName, tbl.getSd().getLocation()); - ret = driver.run("alter table "+tblName+" add partition (b='2012')"); - assertEquals(0, ret.getResponseCode()); + driver.run("alter table "+tblName+" add partition (b='2012')"); InjectableDummyAuthenticator.injectMode(false); } allowDropOnTable(tblName, userName, tbl.getSd().getLocation()); allowDropOnDb(dbName,userName,db.getLocationUri()); - ret = driver.run("drop database if exists "+getTestDbName()+" cascade"); - assertEquals(0,ret.getResponseCode()); + driver.run("drop database if exists "+getTestDbName()+" cascade"); InjectableDummyAuthenticator.injectUserName(userName); InjectableDummyAuthenticator.injectGroupNames(Arrays.asList(ugi.getGroupNames())); @@ -331,8 +325,11 @@ public void testSimplePrivileges() throws Exception { tbl.setTableType("EXTERNAL_TABLE"); msc.createTable(tbl); disallowDropOnTable(tblName, userName, tbl.getSd().getLocation()); - ret = driver.run("drop table "+tbl.getTableName()); - assertEquals(1,ret.getResponseCode()); + try { + driver.run("drop table "+tbl.getTableName()); + } catch (CommandProcessorException e) { + assertEquals(1, e.getResponseCode()); + } } protected void allowCreateDatabase(String userName) diff --git itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/security/TestStorageBasedClientSideAuthorizationProvider.java itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/security/TestStorageBasedClientSideAuthorizationProvider.java index d0645d74e2..5cce4a547c 100644 --- itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/security/TestStorageBasedClientSideAuthorizationProvider.java +++ itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/security/TestStorageBasedClientSideAuthorizationProvider.java @@ -22,7 +22,7 @@ import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.permission.FsPermission; -import org.apache.hadoop.hive.ql.processors.CommandProcessorResponse; +import org.apache.hadoop.hive.ql.processors.CommandProcessorException; import org.apache.hadoop.hive.ql.security.authorization.StorageBasedAuthorizationProvider; import static org.junit.Assert.assertNotNull; @@ -91,10 +91,10 @@ private void setPermissions(String locn, String permissions) throws Exception { } @Override - protected void assertNoPrivileges(CommandProcessorResponse ret){ - assertNotNull(ret); - assertFalse(0 == ret.getResponseCode()); - assertTrue(ret.getErrorMessage().indexOf("AccessControlException") != -1); + protected void assertNoPrivileges(CommandProcessorException e){ + assertNotNull(e); + assertFalse(0 == e.getResponseCode()); + assertTrue(e.getErrorMessage().indexOf("AccessControlException") != -1); } diff --git itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/security/authorization/plugin/TestHiveAuthorizerCheckInvocation.java itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/security/authorization/plugin/TestHiveAuthorizerCheckInvocation.java index a897dc6ab8..09d142a61a 100644 --- itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/security/authorization/plugin/TestHiveAuthorizerCheckInvocation.java +++ itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/security/authorization/plugin/TestHiveAuthorizerCheckInvocation.java @@ -40,7 +40,6 @@ import org.apache.hadoop.hive.conf.HiveConf.ConfVars; import org.apache.hadoop.hive.ql.Driver; import org.apache.hadoop.hive.ql.lockmgr.DbTxnManager; -import org.apache.hadoop.hive.ql.processors.CommandProcessorResponse; import org.apache.hadoop.hive.ql.security.HiveAuthenticationProvider; import org.apache.hadoop.hive.ql.security.SessionStateUserAuthenticator; import org.apache.hadoop.hive.ql.security.authorization.plugin.HivePrivilegeObject.HivePrivilegeObjectType; @@ -115,8 +114,7 @@ public static void beforeTest() throws Exception { } private static void runCmd(String cmd) throws Exception { - CommandProcessorResponse resp = driver.run(cmd); - assertEquals(0, resp.getResponseCode()); + driver.run(cmd); } @AfterClass @@ -287,8 +285,7 @@ public void testPermFunction() throws Exception { assertTrue("db name", dbName.equalsIgnoreCase(dbObj.getDbname())); // actually create the permanent function - CommandProcessorResponse cresponse = driver.run(null, true); - assertEquals(0, cresponse.getResponseCode()); + driver.run(null, true); // Verify privilege objects reset(mockedAuthorizer); @@ -315,9 +312,7 @@ public void testPermFunction() throws Exception { // create 2nd permanent function String funcName2 = "funcName2"; - cresponse = driver - .run("create function " + dbName + "." + funcName2 + " as 'org.apache.hadoop.hive.ql.udf.UDFRand'"); - assertEquals(0, cresponse.getResponseCode()); + driver.run("create function " + dbName + "." + funcName2 + " as 'org.apache.hadoop.hive.ql.udf.UDFRand'"); // try using 2nd permanent function and verify its only 2nd one that shows up // for auth @@ -394,9 +389,7 @@ public void testTempTable() throws Exception { final String tableName = "testTempTable"; { // create temp table reset(mockedAuthorizer); - int status = driver.run("create temporary table " + tableName + "(i int) location '" + tmpTableDir + "'") - .getResponseCode(); - assertEquals(0, status); + driver.run("create temporary table " + tableName + "(i int) location '" + tmpTableDir + "'"); List inputs = getHivePrivilegeObjectInputs().getLeft(); List outputs = getHivePrivilegeObjectInputs().getRight(); @@ -409,7 +402,7 @@ public void testTempTable() throws Exception { assertEquals("output count", 1, outputs.size()); assertEquals("output type", HivePrivilegeObjectType.DATABASE, outputs.get(0).getType()); - status = driver.compile("select * from " + tableName, true); + int status = driver.compile("select * from " + tableName, true); assertEquals(0, status); } { // select from the temp table @@ -446,11 +439,10 @@ public void testTempTable() throws Exception { @Test public void testTempTableImplicit() throws Exception { final String tableName = "testTempTableImplicit"; - int status = driver.run("create table " + tableName + "(i int)").getResponseCode(); - assertEquals(0, status); + driver.run("create table " + tableName + "(i int)"); reset(mockedAuthorizer); - status = driver.compile("insert into " + tableName + " values (1)", true); + int status = driver.compile("insert into " + tableName + " values (1)", true); assertEquals(0, status); List inputs = getHivePrivilegeObjectInputs().getLeft(); diff --git itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/security/authorization/plugin/TestHiveAuthorizerShowFilters.java itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/security/authorization/plugin/TestHiveAuthorizerShowFilters.java index 76d1fb552f..cf120ea1f2 100644 --- itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/security/authorization/plugin/TestHiveAuthorizerShowFilters.java +++ itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/security/authorization/plugin/TestHiveAuthorizerShowFilters.java @@ -31,7 +31,6 @@ import org.apache.hadoop.hive.conf.HiveConf.ConfVars; import org.apache.hadoop.hive.ql.DriverFactory; import org.apache.hadoop.hive.ql.IDriver; -import org.apache.hadoop.hive.ql.processors.CommandProcessorResponse; import org.apache.hadoop.hive.ql.security.HiveAuthenticationProvider; import org.apache.hadoop.hive.ql.security.SessionStateUserAuthenticator; import org.apache.hadoop.hive.ql.security.authorization.plugin.HivePrivilegeObject.HivePrivilegeObjectType; @@ -256,8 +255,7 @@ private static void setFilteredResults(HivePrivilegeObjectType type, String... o } private static void runCmd(String cmd) throws Exception { - CommandProcessorResponse resp = driver.run(cmd); - assertEquals(0, resp.getResponseCode()); + driver.run(cmd); } private static List getSortedList(String... strings) { diff --git itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/txn/compactor/TestCompactor.java itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/txn/compactor/TestCompactor.java index 96cc87a16f..5b8fb4bfb1 100644 --- itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/txn/compactor/TestCompactor.java +++ itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/txn/compactor/TestCompactor.java @@ -79,7 +79,6 @@ import org.apache.hadoop.hive.ql.io.orc.OrcInputFormat; import org.apache.hadoop.hive.ql.io.orc.OrcStruct; import org.apache.hadoop.hive.ql.io.orc.Reader; -import org.apache.hadoop.hive.ql.processors.CommandProcessorResponse; import org.apache.hadoop.hive.ql.session.SessionState; import org.apache.hive.common.util.Retry; import org.apache.hive.hcatalog.common.HCatUtil; @@ -1750,10 +1749,7 @@ public RangeResponse isWriteIdRangeAborted(long minWriteId, long maxWriteId) { */ static void executeStatementOnDriver(String cmd, IDriver driver) throws Exception { LOG.debug("Executing: " + cmd); - CommandProcessorResponse cpr = driver.run(cmd); - if (cpr.getResponseCode() != 0) { - throw new IOException("Failed to execute \"" + cmd + "\". Driver returned: " + cpr); - } + driver.run(cmd); } static void createTestDataFile(String filename, String[] lines) throws IOException { diff --git itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/txn/compactor/TestCrudCompactorOnTez.java itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/txn/compactor/TestCrudCompactorOnTez.java index 0de7a54490..b7245e2c35 100644 --- itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/txn/compactor/TestCrudCompactorOnTez.java +++ itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/txn/compactor/TestCrudCompactorOnTez.java @@ -23,8 +23,6 @@ import java.util.Arrays; import java.util.List; import java.util.Random; -import java.util.SortedSet; -import java.util.TreeSet; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicInteger; @@ -38,7 +36,6 @@ import org.apache.hadoop.hive.metastore.IMetaStoreClient; import org.apache.hadoop.hive.metastore.api.CompactionRequest; import org.apache.hadoop.hive.metastore.api.CompactionType; -import org.apache.hadoop.hive.metastore.api.MetaException; import org.apache.hadoop.hive.metastore.api.ShowCompactRequest; import org.apache.hadoop.hive.metastore.api.ShowCompactResponse; import org.apache.hadoop.hive.metastore.api.ShowCompactResponseElement; @@ -50,9 +47,8 @@ import org.apache.hadoop.hive.ql.IDriver; import org.apache.hadoop.hive.ql.io.AcidUtils; import org.apache.hadoop.hive.ql.io.HiveInputFormat; -import org.apache.hadoop.hive.ql.processors.CommandProcessorResponse; +import org.apache.hadoop.hive.ql.processors.CommandProcessorException; import org.apache.hadoop.hive.ql.session.SessionState; -import org.apache.orc.OrcConf; import org.junit.After; import org.junit.Assert; import org.junit.Before; @@ -111,7 +107,7 @@ private void setupTez(HiveConf conf) { conf.set("tez.staging-dir", TEST_DATA_DIR); conf.setBoolean("tez.ignore.lib.uris", true); conf.set("hive.tez.container.size", "128"); - conf.setBoolean("hive.merge.tezfiles", false); + conf.setBoolean("hive.merge.tezfiles", false); conf.setBoolean("hive.in.tez.test", true); } @@ -409,17 +405,19 @@ static void runCleaner(HiveConf hConf) throws Exception { */ static void executeStatementOnDriver(String cmd, IDriver driver) throws Exception { LOG.debug("Executing: " + cmd); - CommandProcessorResponse cpr = driver.run(cmd); - if (cpr.getResponseCode() != 0) { - throw new IOException("Failed to execute \"" + cmd + "\". Driver returned: " + cpr); + try { + driver.run(cmd); + } catch (CommandProcessorException e) { + throw new IOException("Failed to execute \"" + cmd + "\". Driver returned: " + e); } } static List executeStatementOnDriverAndReturnResults(String cmd, IDriver driver) throws Exception { LOG.debug("Executing: " + cmd); - CommandProcessorResponse cpr = driver.run(cmd); - if (cpr.getResponseCode() != 0) { - throw new IOException("Failed to execute \"" + cmd + "\". Driver returned: " + cpr); + try { + driver.run(cmd); + } catch (CommandProcessorException e) { + throw new IOException("Failed to execute \"" + cmd + "\". Driver returned: " + e); } List rs = new ArrayList(); driver.getResults(rs); diff --git itests/hive-unit/src/test/java/org/apache/hive/jdbc/TestJdbcDriver2.java itests/hive-unit/src/test/java/org/apache/hive/jdbc/TestJdbcDriver2.java index 4010535bda..92a0bbe806 100644 --- itests/hive-unit/src/test/java/org/apache/hive/jdbc/TestJdbcDriver2.java +++ itests/hive-unit/src/test/java/org/apache/hive/jdbc/TestJdbcDriver2.java @@ -38,9 +38,7 @@ import org.apache.hive.service.cli.operation.ClassicTableTypeMapping.ClassicTableTypes; import org.apache.hive.service.cli.operation.HiveTableTypeMapping; import org.apache.hive.service.cli.operation.TableTypeMappingFactory.TableTypeMappings; -import org.junit.After; import org.junit.AfterClass; -import org.junit.Before; import org.junit.BeforeClass; import org.junit.Rule; import org.junit.Test; diff --git itests/util/src/main/java/org/apache/hadoop/hive/cli/control/AbstractCoreBlobstoreCliDriver.java itests/util/src/main/java/org/apache/hadoop/hive/cli/control/AbstractCoreBlobstoreCliDriver.java index 2431859c3d..ea985d8892 100644 --- itests/util/src/main/java/org/apache/hadoop/hive/cli/control/AbstractCoreBlobstoreCliDriver.java +++ itests/util/src/main/java/org/apache/hadoop/hive/cli/control/AbstractCoreBlobstoreCliDriver.java @@ -32,7 +32,7 @@ import org.apache.hadoop.hive.ql.QTestProcessExecResult; import org.apache.hadoop.hive.ql.QTestUtil; import org.apache.hadoop.hive.ql.QTestMiniClusters.MiniClusterType; -import org.apache.hadoop.hive.ql.processors.CommandProcessorResponse; +import org.apache.hadoop.hive.ql.processors.CommandProcessorException; import org.apache.hive.testutils.HiveTestEnvSetup; import org.junit.After; import org.junit.AfterClass; @@ -148,9 +148,10 @@ protected void runTestHelper(String tname, String fname, String fpath, boolean e qt.addFile(fpath); qt.cliInit(new File(fpath)); - CommandProcessorResponse response = qt.executeClient(fname); - if ((response.getResponseCode() == 0) ^ expectSuccess) { - qt.failedQuery(response.getException(), response.getResponseCode(), fname, debugHint); + try { + qt.executeClient(fname); + } catch (CommandProcessorException e) { + qt.failedQuery(e.getException(), e.getResponseCode(), fname, debugHint); } QTestProcessExecResult result = qt.checkCliDriverResults(fname); diff --git itests/util/src/main/java/org/apache/hadoop/hive/cli/control/CoreAccumuloCliDriver.java itests/util/src/main/java/org/apache/hadoop/hive/cli/control/CoreAccumuloCliDriver.java index 432d6eacfe..9a23ef855e 100644 --- itests/util/src/main/java/org/apache/hadoop/hive/cli/control/CoreAccumuloCliDriver.java +++ itests/util/src/main/java/org/apache/hadoop/hive/cli/control/CoreAccumuloCliDriver.java @@ -26,7 +26,7 @@ import org.apache.hadoop.hive.accumulo.AccumuloTestSetup; import org.apache.hadoop.hive.ql.QTestProcessExecResult; import org.apache.hadoop.hive.ql.QTestMiniClusters.MiniClusterType; -import org.apache.hadoop.hive.ql.processors.CommandProcessorResponse; +import org.apache.hadoop.hive.ql.processors.CommandProcessorException; import org.junit.After; import org.junit.AfterClass; import org.junit.Before; @@ -110,9 +110,10 @@ public void runTest(String tname, String fname, String fpath) { qt.addFile(fpath); qt.cliInit(new File(fpath)); - CommandProcessorResponse response = qt.executeClient(fname); - if (response.getResponseCode() != 0) { - qt.failedQuery(response.getException(), response.getResponseCode(), fname, null); + try { + qt.executeClient(fname); + } catch (CommandProcessorException e) { + qt.failedQuery(e.getException(), e.getResponseCode(), fname, null); } QTestProcessExecResult result = qt.checkCliDriverResults(fname); diff --git itests/util/src/main/java/org/apache/hadoop/hive/cli/control/CoreCliDriver.java itests/util/src/main/java/org/apache/hadoop/hive/cli/control/CoreCliDriver.java index 1d453ec189..d06acfb978 100644 --- itests/util/src/main/java/org/apache/hadoop/hive/cli/control/CoreCliDriver.java +++ itests/util/src/main/java/org/apache/hadoop/hive/cli/control/CoreCliDriver.java @@ -29,7 +29,7 @@ import org.apache.hadoop.hive.ql.QTestProcessExecResult; import org.apache.hadoop.hive.ql.QTestUtil; import org.apache.hadoop.hive.ql.QTestMiniClusters.MiniClusterType; -import org.apache.hadoop.hive.ql.processors.CommandProcessorResponse; +import org.apache.hadoop.hive.ql.processors.CommandProcessorException; import org.apache.hadoop.hive.util.ElapsedTimeLoggingWrapper; import org.junit.After; import org.junit.AfterClass; @@ -179,11 +179,11 @@ public void runTest(String testName, String fname, String fpath) { qt.addFile(fpath); qt.cliInit(new File(fpath)); - CommandProcessorResponse response = qt.executeClient(fname); - int ecode = response.getResponseCode(); - if (ecode != 0) { + try { + qt.executeClient(fname); + } catch (CommandProcessorException e) { failed = true; - qt.failedQuery(response.getException(), response.getResponseCode(), fname, QTestUtil.DEBUG_HINT); + qt.failedQuery(e.getException(), e.getResponseCode(), fname, QTestUtil.DEBUG_HINT); } setupAdditionalPartialMasks(); diff --git itests/util/src/main/java/org/apache/hadoop/hive/cli/control/CoreCompareCliDriver.java itests/util/src/main/java/org/apache/hadoop/hive/cli/control/CoreCompareCliDriver.java index d6bdb778fe..62ea96089a 100644 --- itests/util/src/main/java/org/apache/hadoop/hive/cli/control/CoreCompareCliDriver.java +++ itests/util/src/main/java/org/apache/hadoop/hive/cli/control/CoreCompareCliDriver.java @@ -28,13 +28,14 @@ import org.apache.hadoop.hive.ql.QTestProcessExecResult; import org.apache.hadoop.hive.ql.QTestUtil; import org.apache.hadoop.hive.ql.QTestMiniClusters.MiniClusterType; -import org.apache.hadoop.hive.ql.processors.CommandProcessorResponse; +import org.apache.hadoop.hive.ql.processors.CommandProcessorException; import org.junit.After; import org.junit.AfterClass; import org.junit.Before; import org.junit.BeforeClass; import com.google.common.base.Strings; + public class CoreCompareCliDriver extends CliAdapter{ private static QTestUtil qt; @@ -144,9 +145,10 @@ public void runTest(String tname, String fname, String fpath) { String versionStr = versionFile.substring(tname.length() + 1, versionFile.length() - 3); outputs.add(qt.cliInit(new File(queryDirectory, versionFile))); // TODO: will this work? - CommandProcessorResponse response = qt.executeClient(versionFile, fname); - if (response.getResponseCode() != 0) { - qt.failedQuery(response.getException(), response.getResponseCode(), fname, QTestUtil.DEBUG_HINT); + try { + qt.executeClient(versionFile, fname); + } catch (CommandProcessorException e) { + qt.failedQuery(e.getException(), e.getResponseCode(), fname, QTestUtil.DEBUG_HINT); } } diff --git itests/util/src/main/java/org/apache/hadoop/hive/cli/control/CoreHBaseCliDriver.java itests/util/src/main/java/org/apache/hadoop/hive/cli/control/CoreHBaseCliDriver.java index 5c6597dc8b..40545d8d65 100644 --- itests/util/src/main/java/org/apache/hadoop/hive/cli/control/CoreHBaseCliDriver.java +++ itests/util/src/main/java/org/apache/hadoop/hive/cli/control/CoreHBaseCliDriver.java @@ -26,7 +26,7 @@ import org.apache.hadoop.hive.hbase.HBaseTestSetup; import org.apache.hadoop.hive.ql.QTestProcessExecResult; import org.apache.hadoop.hive.ql.QTestMiniClusters.MiniClusterType; -import org.apache.hadoop.hive.ql.processors.CommandProcessorResponse; +import org.apache.hadoop.hive.ql.processors.CommandProcessorException; import org.junit.After; import org.junit.AfterClass; import org.junit.Before; @@ -112,9 +112,10 @@ public void runTest(String tname, String fname, String fpath) { qt.cliInit(new File(fpath)); - CommandProcessorResponse response = qt.executeClient(fname); - if (response.getResponseCode() != 0) { - qt.failedQuery(response.getException(), response.getResponseCode(), fname, null); + try { + qt.executeClient(fname); + } catch (CommandProcessorException e) { + qt.failedQuery(e.getException(), e.getResponseCode(), fname, null); } QTestProcessExecResult result = qt.checkCliDriverResults(fname); diff --git itests/util/src/main/java/org/apache/hadoop/hive/cli/control/CoreHBaseNegativeCliDriver.java itests/util/src/main/java/org/apache/hadoop/hive/cli/control/CoreHBaseNegativeCliDriver.java index 772561842d..7bf597a595 100644 --- itests/util/src/main/java/org/apache/hadoop/hive/cli/control/CoreHBaseNegativeCliDriver.java +++ itests/util/src/main/java/org/apache/hadoop/hive/cli/control/CoreHBaseNegativeCliDriver.java @@ -26,6 +26,7 @@ import org.apache.hadoop.hive.hbase.HBaseQTestUtil; import org.apache.hadoop.hive.hbase.HBaseTestSetup; import org.apache.hadoop.hive.ql.QTestProcessExecResult; +import org.apache.hadoop.hive.ql.processors.CommandProcessorException; import org.apache.hadoop.hive.ql.QTestMiniClusters.MiniClusterType; import org.junit.After; import org.junit.AfterClass; @@ -104,8 +105,9 @@ public void runTest(String tname, String fname, String fpath) { System.err.println("Begin query: " + fname); qt.addFile(fpath); qt.cliInit(new File(fpath)); - int ecode = qt.executeClient(fname).getResponseCode(); - if (ecode == 0) { + try { + qt.executeClient(fname); + } catch (CommandProcessorException e) { qt.failed(fname, null); } diff --git itests/util/src/main/java/org/apache/hadoop/hive/cli/control/CoreKuduCliDriver.java itests/util/src/main/java/org/apache/hadoop/hive/cli/control/CoreKuduCliDriver.java index c54410c89d..71134e7b0a 100644 --- itests/util/src/main/java/org/apache/hadoop/hive/cli/control/CoreKuduCliDriver.java +++ itests/util/src/main/java/org/apache/hadoop/hive/cli/control/CoreKuduCliDriver.java @@ -21,7 +21,7 @@ import org.apache.hadoop.hive.ql.QTestArguments; import org.apache.hadoop.hive.ql.QTestProcessExecResult; import org.apache.hadoop.hive.ql.QTestUtil; -import org.apache.hadoop.hive.ql.processors.CommandProcessorResponse; +import org.apache.hadoop.hive.ql.processors.CommandProcessorException; import org.junit.After; import org.junit.AfterClass; import org.junit.Before; @@ -115,9 +115,10 @@ public void runTest(String tname, String fname, String fpath) { qt.addFile(fpath); qt.cliInit(new File(fpath)); - CommandProcessorResponse response = qt.executeClient(fname); - if (response.getResponseCode() != 0) { - qt.failedQuery(response.getException(), response.getResponseCode(), fname, null); + try { + qt.executeClient(fname); + } catch (CommandProcessorException e) { + qt.failedQuery(e.getException(), e.getResponseCode(), fname, null); } QTestProcessExecResult result = qt.checkCliDriverResults(fname); diff --git itests/util/src/main/java/org/apache/hadoop/hive/cli/control/CoreKuduNegativeCliDriver.java itests/util/src/main/java/org/apache/hadoop/hive/cli/control/CoreKuduNegativeCliDriver.java index 3a7905ed2d..4f6988c9f3 100644 --- itests/util/src/main/java/org/apache/hadoop/hive/cli/control/CoreKuduNegativeCliDriver.java +++ itests/util/src/main/java/org/apache/hadoop/hive/cli/control/CoreKuduNegativeCliDriver.java @@ -21,7 +21,7 @@ import org.apache.hadoop.hive.ql.QTestArguments; import org.apache.hadoop.hive.ql.QTestProcessExecResult; import org.apache.hadoop.hive.ql.QTestUtil; -import org.apache.hadoop.hive.ql.processors.CommandProcessorResponse; +import org.apache.hadoop.hive.ql.processors.CommandProcessorException; import org.junit.After; import org.junit.AfterClass; import org.junit.Before; @@ -113,9 +113,11 @@ public void runTest(String tname, String fname, String fpath) { System.err.println("Begin query: " + fname); qt.addFile(fpath); qt.cliInit(new File(fpath)); - int ecode = qt.executeClient(fname).getResponseCode(); - if (ecode == 0) { + try { + qt.executeClient(fname); qt.failed(fname, null); + } catch (CommandProcessorException e) { + // this is the expected behaviour } QTestProcessExecResult result = qt.checkCliDriverResults(fname); diff --git itests/util/src/main/java/org/apache/hadoop/hive/cli/control/CoreNegativeCliDriver.java itests/util/src/main/java/org/apache/hadoop/hive/cli/control/CoreNegativeCliDriver.java index 16fdd6bb5f..bb9e65524d 100644 --- itests/util/src/main/java/org/apache/hadoop/hive/cli/control/CoreNegativeCliDriver.java +++ itests/util/src/main/java/org/apache/hadoop/hive/cli/control/CoreNegativeCliDriver.java @@ -25,6 +25,7 @@ import org.apache.hadoop.hive.ql.QTestArguments; import org.apache.hadoop.hive.ql.QTestProcessExecResult; import org.apache.hadoop.hive.ql.QTestUtil; +import org.apache.hadoop.hive.ql.processors.CommandProcessorException; import org.apache.hadoop.hive.ql.QTestMiniClusters.MiniClusterType; import org.junit.After; import org.junit.AfterClass; @@ -122,9 +123,11 @@ public void runTest(String tname, String fname, String fpath) throws Exception { qt.addFile(fpath); qt.cliInit(new File(fpath)); - int ecode = qt.executeClient(fname).getResponseCode(); - if (ecode == 0) { + try { + qt.executeClient(fname); qt.failed(fname, QTestUtil.DEBUG_HINT); + } catch (CommandProcessorException e) { + // this is the expected outcome } QTestProcessExecResult result = qt.checkCliDriverResults(fname); diff --git itests/util/src/main/java/org/apache/hadoop/hive/cli/control/CorePerfCliDriver.java itests/util/src/main/java/org/apache/hadoop/hive/cli/control/CorePerfCliDriver.java index 1613e96053..59c71f544c 100644 --- itests/util/src/main/java/org/apache/hadoop/hive/cli/control/CorePerfCliDriver.java +++ itests/util/src/main/java/org/apache/hadoop/hive/cli/control/CorePerfCliDriver.java @@ -29,7 +29,7 @@ import org.apache.hadoop.hive.ql.QTestSystemProperties; import org.apache.hadoop.hive.ql.QTestUtil; import org.apache.hadoop.hive.ql.QTestMiniClusters.MiniClusterType; -import org.apache.hadoop.hive.ql.processors.CommandProcessorResponse; +import org.apache.hadoop.hive.ql.processors.CommandProcessorException; import org.junit.After; import org.junit.AfterClass; @@ -127,10 +127,10 @@ public void runTest(String name, String fname, String fpath) { qt.addFile(fpath); qt.cliInit(new File(fpath)); - CommandProcessorResponse response = qt.executeClient(fname); - int ecode = response.getResponseCode(); - if (ecode != 0) { - qt.failedQuery(response.getException(), response.getResponseCode(), fname, QTestUtil.DEBUG_HINT); + try { + qt.executeClient(fname); + } catch (CommandProcessorException e) { + qt.failedQuery(e.getException(), e.getResponseCode(), fname, QTestUtil.DEBUG_HINT); } QTestProcessExecResult result = qt.checkCliDriverResults(fname); diff --git itests/util/src/main/java/org/apache/hadoop/hive/ql/QTestUtil.java itests/util/src/main/java/org/apache/hadoop/hive/ql/QTestUtil.java index c58d9f6caf..7148d15293 100644 --- itests/util/src/main/java/org/apache/hadoop/hive/ql/QTestUtil.java +++ itests/util/src/main/java/org/apache/hadoop/hive/ql/QTestUtil.java @@ -72,6 +72,7 @@ import org.apache.hadoop.hive.ql.parse.SemanticException; import org.apache.hadoop.hive.ql.plan.mapper.StatsSources; import org.apache.hadoop.hive.ql.processors.CommandProcessor; +import org.apache.hadoop.hive.ql.processors.CommandProcessorException; import org.apache.hadoop.hive.ql.processors.CommandProcessorFactory; import org.apache.hadoop.hive.ql.processors.CommandProcessorResponse; import org.apache.hadoop.hive.ql.processors.HiveCommand; @@ -107,7 +108,6 @@ protected Hive db; protected HiveConf conf; protected HiveConf savedConf; - private IDriver drv; private BaseSemanticAnalyzer sem; private CliDriver cliDriver; private final QTestMiniClusters miniClusters = new QTestMiniClusters(); @@ -459,9 +459,10 @@ private void cleanupFromFile() throws IOException { String cleanupCommands = FileUtils.readFileToString(cleanupFile); LOG.info("Cleanup (" + cleanupScript + "):\n" + cleanupCommands); - int result = getCliDriver().processLine(cleanupCommands).getResponseCode(); - if (result != 0) { - LOG.error("Failed during cleanup processLine with code={}. Ignoring", result); + try { + getCliDriver().processLine(cleanupCommands); + } catch (CommandProcessorException e) { + LOG.error("Failed during cleanup processLine with code={}. Ignoring", e.getResponseCode()); // TODO Convert this to an Assert.fail once HIVE-14682 is fixed } } else { @@ -498,10 +499,11 @@ private void initFromScript() throws IOException { String initCommands = FileUtils.readFileToString(scriptFile); LOG.info("Initial setup (" + initScript + "):\n" + initCommands); - int result = cliDriver.processLine(initCommands).getResponseCode(); - LOG.info("Result from cliDrriver.processLine in createSources=" + result); - if (result != 0) { - Assert.fail("Failed during createSources processLine with code=" + result); + try { + cliDriver.processLine(initCommands); + LOG.info("Result from cliDrriver.processLine in createSources=0"); + } catch (CommandProcessorException e) { + Assert.fail("Failed during createSources processLine with code=" + e.getResponseCode()); } } @@ -511,7 +513,6 @@ private void postInit() throws Exception { testWarehouse = conf.getVar(HiveConf.ConfVars.METASTOREWAREHOUSE); db = Hive.get(conf); - drv = DriverFactory.newDriver(conf); pd = new ParseDriver(); initMaterializedViews(); // Create views registry @@ -626,7 +627,7 @@ private void closeSession(SessionState oldSs) throws IOException { } } - public int executeAdhocCommand(String q) { + public int executeAdhocCommand(String q) throws CommandProcessorException { if (!q.contains(";")) { return -1; } @@ -634,26 +635,23 @@ public int executeAdhocCommand(String q) { String q1 = q.split(";")[0] + ";"; LOG.debug("Executing " + q1); - return cliDriver.processLine(q1).getResponseCode(); - } - - public int execute(String tname) { - return drv.run(qMap.get(tname)).getResponseCode(); + cliDriver.processLine(q1); + return 0; } - public CommandProcessorResponse executeClient(String tname1, String tname2) { + public CommandProcessorResponse executeClient(String tname1, String tname2) throws CommandProcessorException { String commands = getCommand(tname1) + System.getProperty("line.separator") + getCommand(tname2); return executeClientInternal(commands); } - public CommandProcessorResponse executeClient(String fileName) { + public CommandProcessorResponse executeClient(String fileName) throws CommandProcessorException { return executeClientInternal(getCommand(fileName)); } - private CommandProcessorResponse executeClientInternal(String commands) { + private CommandProcessorResponse executeClientInternal(String commands) throws CommandProcessorException { List cmds = CliDriver.splitSemiColon(commands); - CommandProcessorResponse response = new CommandProcessorResponse(0); + CommandProcessorResponse response = new CommandProcessorResponse(); StringBuilder command = new StringBuilder(); QTestSyntaxUtil qtsu = new QTestSyntaxUtil(this, conf, pd); @@ -674,18 +672,20 @@ private CommandProcessorResponse executeClientInternal(String commands) { } String strCommand = command.toString(); - if (isCommandUsedForTesting(strCommand)) { - response = executeTestCommand(strCommand); - } else { - response = cliDriver.processLine(strCommand); - } - - if (response.getResponseCode() != 0 && !ignoreErrors()) { - break; + try { + if (isCommandUsedForTesting(strCommand)) { + response = executeTestCommand(strCommand); + } else { + response = cliDriver.processLine(strCommand); + } + } catch (CommandProcessorException e) { + if (!ignoreErrors()) { + throw e; + } } command.setLength(0); } - if (response.getResponseCode() == 0 && SessionState.get() != null) { + if (SessionState.get() != null) { SessionState.get().setLastCommand(null); // reset } return response; @@ -710,7 +710,7 @@ boolean isHiveCommand(String command) { } } - private CommandProcessorResponse executeTestCommand(final String command) { + private CommandProcessorResponse executeTestCommand(String command) throws CommandProcessorException { String commandName = command.trim().split("\\s+")[0]; String commandArgs = command.trim().substring(commandName.length()); @@ -720,8 +720,7 @@ private CommandProcessorResponse executeTestCommand(final String command) { //replace ${hiveconf:hive.metastore.warehouse.dir} with actual dir if existed. //we only want the absolute path, so remove the header, such as hdfs://localhost:57145 - String - wareHouseDir = + String wareHouseDir = SessionState.get().getConf().getVar(ConfVars.METASTOREWAREHOUSE).replaceAll("^[a-zA-Z]+://.*?:\\d+", ""); commandArgs = commandArgs.replaceAll("\\$\\{hiveconf:hive\\.metastore\\.warehouse\\.dir\\}", wareHouseDir); @@ -734,16 +733,14 @@ private CommandProcessorResponse executeTestCommand(final String command) { try { CommandProcessor proc = getTestCommand(commandName); if (proc != null) { - CommandProcessorResponse response = proc.run(commandArgs.trim()); - - int rc = response.getResponseCode(); - if (rc != 0) { - SessionState.getConsole() - .printError(response.toString(), - response.getException() != null ? Throwables.getStackTraceAsString(response.getException()) : ""); + try { + CommandProcessorResponse response = proc.run(commandArgs.trim()); + return response; + } catch (CommandProcessorException e) { + SessionState.getConsole().printError(e.toString(), + e.getException() != null ? Throwables.getStackTraceAsString(e.getException()) : ""); + throw e; } - - return response; } else { throw new RuntimeException("Could not get CommandProcessor for command: " + commandName); } diff --git itests/util/src/main/java/org/apache/hadoop/hive/ql/dataset/QTestDatasetHandler.java itests/util/src/main/java/org/apache/hadoop/hive/ql/dataset/QTestDatasetHandler.java index ca3c2772e7..d0e3a718b7 100644 --- itests/util/src/main/java/org/apache/hadoop/hive/ql/dataset/QTestDatasetHandler.java +++ itests/util/src/main/java/org/apache/hadoop/hive/ql/dataset/QTestDatasetHandler.java @@ -28,6 +28,7 @@ import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.ql.QTestSystemProperties; import org.apache.hadoop.hive.ql.QTestUtil; +import org.apache.hadoop.hive.ql.processors.CommandProcessorException; import org.apache.hadoop.hive.ql.processors.CommandProcessorResponse; import org.apache.hadoop.hive.ql.qoption.QTestOptionHandler; import org.junit.Assert; @@ -68,10 +69,11 @@ public boolean initDataset(String table, CliDriver cliDriver) throws Exception { throw new RuntimeException(String.format("dataset file not found %s", tableFile), e); } - CommandProcessorResponse result = cliDriver.processLine(commands); - LOG.info("Result from cliDrriver.processLine in initFromDatasets=" + result); - if (result.getResponseCode() != 0) { - Assert.fail("Failed during initFromDatasets processLine with code=" + result); + try { + CommandProcessorResponse result = cliDriver.processLine(commands); + LOG.info("Result from cliDrriver.processLine in initFromDatasets=" + result); + } catch (CommandProcessorException e) { + Assert.fail("Failed during initFromDatasets processLine with code=" + e); } return true; diff --git ql/src/java/org/apache/hadoop/hive/ql/Driver.java ql/src/java/org/apache/hadoop/hive/ql/Driver.java index bcd4600683..91910d1c0c 100644 --- ql/src/java/org/apache/hadoop/hive/ql/Driver.java +++ ql/src/java/org/apache/hadoop/hive/ql/Driver.java @@ -112,6 +112,7 @@ import org.apache.hadoop.hive.ql.plan.TableDesc; import org.apache.hadoop.hive.ql.plan.mapper.PlanMapper; import org.apache.hadoop.hive.ql.plan.mapper.StatsSource; +import org.apache.hadoop.hive.ql.processors.CommandProcessorException; import org.apache.hadoop.hive.ql.processors.CommandProcessorResponse; import org.apache.hadoop.hive.ql.security.authorization.command.CommandAuthorizer; import org.apache.hadoop.hive.ql.session.LineageState; @@ -199,12 +200,7 @@ private boolean retrial = false; private boolean checkConcurrency() { - boolean supportConcurrency = conf.getBoolVar(HiveConf.ConfVars.HIVE_SUPPORT_CONCURRENCY); - if (!supportConcurrency) { - LOG.info("Concurrency mode is disabled, not creating a lock manager"); - return false; - } - return true; + return conf.getBoolVar(HiveConf.ConfVars.HIVE_SUPPORT_CONCURRENCY); } @Override @@ -343,7 +339,7 @@ public int compile(String command, boolean resetTaskIds) { try { compile(command, resetTaskIds, false); return 0; - } catch (CommandProcessorResponse cpr) { + } catch (CommandProcessorException cpr) { return cpr.getErrorCode(); } } @@ -352,15 +348,12 @@ public int compile(String command, boolean resetTaskIds) { // interrupted, it should be set to true if the compile is called within another method like // runInternal, which defers the close to the called in that method. @VisibleForTesting - public void compile(String command, boolean resetTaskIds, boolean deferClose) throws CommandProcessorResponse { + public void compile(String command, boolean resetTaskIds, boolean deferClose) throws CommandProcessorException { + createTransactionManager(); + PerfLogger perfLogger = SessionState.getPerfLogger(); perfLogger.PerfLogBegin(CLASS_NAME, PerfLogger.COMPILE); - driverState.lock(); - try { - driverState.compiling(); - } finally { - driverState.unlock(); - } + driverState.compilingWithLocking(); command = new VariableSubstitution(new HiveVariableSource() { @Override @@ -420,35 +413,6 @@ public void compile(String command, boolean resetTaskIds, boolean deferClose) th boolean parseError = false; try { - - // Initialize the transaction manager. This must be done before analyze is called. - if (initTxnMgr != null) { - queryTxnMgr = initTxnMgr; - } else { - queryTxnMgr = SessionState.get().initTxnMgr(conf); - } - if (queryTxnMgr instanceof Configurable) { - ((Configurable) queryTxnMgr).setConf(conf); - } - queryState.setTxnManager(queryTxnMgr); - - // In case when user Ctrl-C twice to kill Hive CLI JVM, we want to release locks - // if compile is being called multiple times, clear the old shutdownhook - ShutdownHookManager.removeShutdownHook(shutdownRunner); - final HiveTxnManager txnMgr = queryTxnMgr; - shutdownRunner = new Runnable() { - @Override - public void run() { - try { - releaseLocksAndCommitOrRollback(false, txnMgr); - } catch (LockException e) { - LOG.warn("Exception when releasing locks in ShutdownHook for Driver: " + - e.getMessage()); - } - } - }; - ShutdownHookManager.addShutdownHook(shutdownRunner, SHUTDOWN_HOOK_PRIORITY); - checkInterrupted("before parsing and analysing the query", null, null); if (ctx == null) { @@ -565,7 +529,7 @@ public void run() { } } catch (AuthorizationException authExp) { console.printError("Authorization failed:" + authExp.getMessage() + ". Use SHOW GRANT to get more details."); - throw createProcessorResponse(403, authExp.getMessage(), "42000", null); + throw createProcessorException(403, authExp.getMessage(), "42000", null); } finally { perfLogger.PerfLogEnd(CLASS_NAME, PerfLogger.DO_AUTHORIZATION); } @@ -584,8 +548,8 @@ public void run() { } } } - } catch (CommandProcessorResponse cpr) { - throw cpr; + } catch (CommandProcessorException cpe) { + throw cpe; } catch (Exception e) { checkInterrupted("during query compilation: " + e.getMessage(), null, null); @@ -608,7 +572,7 @@ public void run() { } console.printError(errorMessage, "\n" + StringUtils.stringifyException(e)); - throw createProcessorResponse(error.getErrorCode(), errorMessage, error.getSQLState(), e); + throw createProcessorException(error.getErrorCode(), errorMessage, error.getSQLState(), e); } finally { // Trigger post compilation hook. Note that if the compilation fails here then // before/after execution hook will never be executed. @@ -628,25 +592,54 @@ public void run() { if (isInterrupted && !deferClose) { closeInProcess(true); } - driverState.lock(); - try { - if (isInterrupted) { - driverState.compilationInterrupted(deferClose); - } else { - driverState.compilationFinished(compileError); - } - } finally { - driverState.unlock(); - } if (isInterrupted) { + driverState.compilationInterruptedWithLocking(deferClose); LOG.info("Compiling command(queryId=" + queryId + ") has been interrupted after " + duration + " seconds"); } else { + driverState.compilationFinishedWithLocking(compileError); LOG.info("Completed compiling command(queryId=" + queryId + "); Time taken: " + duration + " seconds"); } } } + private void createTransactionManager() throws CommandProcessorException { + try { + // Initialize the transaction manager. This must be done before analyze is called. + if (initTxnMgr != null) { + queryTxnMgr = initTxnMgr; + } else { + queryTxnMgr = SessionState.get().initTxnMgr(conf); + } + if (queryTxnMgr instanceof Configurable) { + ((Configurable) queryTxnMgr).setConf(conf); + } + queryState.setTxnManager(queryTxnMgr); + + // In case when user Ctrl-C twice to kill Hive CLI JVM, we want to release locks + // if compile is being called multiple times, clear the old shutdownhook + ShutdownHookManager.removeShutdownHook(shutdownRunner); + shutdownRunner = new Runnable() { + @Override + public void run() { + try { + releaseLocksAndCommitOrRollback(false, queryTxnMgr); + } catch (LockException e) { + LOG.warn("Exception when releasing locks in ShutdownHook for Driver: " + + e.getMessage()); + } + } + }; + ShutdownHookManager.addShutdownHook(shutdownRunner, SHUTDOWN_HOOK_PRIORITY); + } catch (LockException e) { + ErrorMsg error = ErrorMsg.getErrorMsg(e.getMessage()); + String errorMessage = "FAILED: " + e.getClass().getSimpleName() + " [Error " + error.getErrorCode() + "]:"; + + console.printError(errorMessage, "\n" + StringUtils.stringifyException(e)); + throw createProcessorException(error.getErrorCode(), errorMessage, error.getSQLState(), e); + } + } + // Checks whether txn list has been invalidated while planning the query. // This would happen if query requires exclusive/semi-shared lock, and there // has been a committed transaction on the table over which the lock is @@ -773,7 +766,7 @@ private void setLastReplIdForDump(HiveConf conf) throws HiveException, TExceptio LOG.debug("Setting " + ReplUtils.LAST_REPL_ID_KEY + " = " + lastReplId); } - private void openTransaction() throws LockException, CommandProcessorResponse { + private void openTransaction() throws LockException, CommandProcessorException { if (checkConcurrency() && startImplicitTxn(queryTxnMgr) && !queryTxnMgr.isTxnOpen()) { String userFromUGI = getUserFromUGI(); queryTxnMgr.openTxn(ctx, userFromUGI); @@ -832,7 +825,8 @@ private boolean startImplicitTxn(HiveTxnManager txnManager) throws LockException return shouldOpenImplicitTxn; } - private void checkInterrupted(String msg, HookContext hookContext, PerfLogger perfLogger) throws CommandProcessorResponse { + private void checkInterrupted(String msg, HookContext hookContext, PerfLogger perfLogger) + throws CommandProcessorException { if (driverState.isAborted()) { String errorMessage = "FAILED: command has been interrupted: " + msg; console.printError(errorMessage); @@ -843,7 +837,7 @@ private void checkInterrupted(String msg, HookContext hookContext, PerfLogger pe LOG.warn("Caught exception attempting to invoke Failure Hooks", e); } } - throw createProcessorResponse(1000, errorMessage, "HY008", null); + throw createProcessorException(1000, errorMessage, "HY008", null); } } @@ -1050,7 +1044,7 @@ private void addTableFromEntity(Entity entity, Collection tableList) { return result; } - private String getUserFromUGI() throws CommandProcessorResponse { + private String getUserFromUGI() throws CommandProcessorException { // Don't use the userName member, as it may or may not have been set. Get the value from // conf, which calls into getUGI to figure out who the process is running as. try { @@ -1058,7 +1052,7 @@ private String getUserFromUGI() throws CommandProcessorResponse { } catch (IOException e) { String errorMessage = "FAILED: Error in determining user while acquiring locks: " + e.getMessage(); console.printError(errorMessage, "\n" + StringUtils.stringifyException(e)); - throw createProcessorResponse(10, errorMessage, ErrorMsg.findSQLState(e.getMessage()), e); + throw createProcessorException(10, errorMessage, ErrorMsg.findSQLState(e.getMessage()), e); } } @@ -1069,9 +1063,9 @@ private String getUserFromUGI() throws CommandProcessorResponse { * * This method also records the list of valid transactions. This must be done after any * transactions have been opened. - * @throws CommandProcessorResponse + * @throws CommandProcessorException **/ - private void acquireLocks() throws CommandProcessorResponse { + private void acquireLocks() throws CommandProcessorException { PerfLogger perfLogger = SessionState.getPerfLogger(); perfLogger.PerfLogBegin(CLASS_NAME, PerfLogger.ACQUIRE_READ_WRITE_LOCKS); @@ -1151,7 +1145,7 @@ private void acquireLocks() throws CommandProcessorResponse { } catch (Exception e) { String errorMessage = "FAILED: Error in acquiring locks: " + e.getMessage(); console.printError(errorMessage, "\n" + StringUtils.stringifyException(e)); - throw createProcessorResponse(10, errorMessage, ErrorMsg.findSQLState(e.getMessage()), e); + throw createProcessorException(10, errorMessage, ErrorMsg.findSQLState(e.getMessage()), e); } finally { perfLogger.PerfLogEnd(CLASS_NAME, PerfLogger.ACQUIRE_READ_WRITE_LOCKS); } @@ -1168,8 +1162,7 @@ public void releaseLocksAndCommitOrRollback(boolean commit) throws LockException * **/ @VisibleForTesting - public void releaseLocksAndCommitOrRollback(boolean commit, HiveTxnManager txnManager) - throws LockException { + public void releaseLocksAndCommitOrRollback(boolean commit, HiveTxnManager txnManager) throws LockException { PerfLogger perfLogger = SessionState.getPerfLogger(); perfLogger.PerfLogBegin(CLASS_NAME, PerfLogger.RELEASE_LOCKS); HiveTxnManager txnMgr; @@ -1222,29 +1215,28 @@ public void releaseResources() { } @Override - - public CommandProcessorResponse run(String command) { + public CommandProcessorResponse run(String command) throws CommandProcessorException { return run(command, false); } @Override - public CommandProcessorResponse run() { + public CommandProcessorResponse run() throws CommandProcessorException { return run(null, true); } - public CommandProcessorResponse run(String command, boolean alreadyCompiled) { + public CommandProcessorResponse run(String command, boolean alreadyCompiled) throws CommandProcessorException { try { runInternal(command, alreadyCompiled); - return createProcessorResponse(0, null, null, null); - } catch (CommandProcessorResponse cpr) { + return new CommandProcessorResponse(getSchema(), null); + } catch (CommandProcessorException cpe) { SessionState ss = SessionState.get(); if (ss == null) { - return cpr; + throw cpe; } MetaDataFormatter mdf = MetaDataFormatUtils.getFormatter(ss.getConf()); if (!(mdf instanceof JsonMetaDataFormatter)) { - return cpr; + throw cpe; } /*Here we want to encode the error in machine readable way (e.g. JSON) * Ideally, errorCode would always be set to a canonical error defined in ErrorMsg. @@ -1253,47 +1245,48 @@ public CommandProcessorResponse run(String command, boolean alreadyCompiled) { * the error is a specific/expected one. * It's written to stdout for backward compatibility (WebHCat consumes it).*/ try { - if (cpr.getException() == null) { - mdf.error(ss.out, cpr.getErrorMessage(), cpr.getResponseCode(), cpr.getSQLState()); - return cpr; + if (cpe.getException() == null) { + mdf.error(ss.out, cpe.getErrorMessage(), cpe.getResponseCode(), cpe.getSqlState()); + throw cpe; } - ErrorMsg canonicalErr = ErrorMsg.getErrorMsg(cpr.getResponseCode()); + ErrorMsg canonicalErr = ErrorMsg.getErrorMsg(cpe.getResponseCode()); if (canonicalErr != null && canonicalErr != ErrorMsg.GENERIC_ERROR) { /*Some HiveExceptions (e.g. SemanticException) don't set canonical ErrorMsg explicitly, but there is logic (e.g. #compile()) to find an appropriate canonical error and return its code as error code. In this case we want to preserve it for downstream code to interpret*/ - mdf.error(ss.out, cpr.getErrorMessage(), cpr.getResponseCode(), cpr.getSQLState(), null); - return cpr; + mdf.error(ss.out, cpe.getErrorMessage(), cpe.getResponseCode(), cpe.getSqlState(), null); + throw cpe; } - if (cpr.getException() instanceof HiveException) { - HiveException rc = (HiveException)cpr.getException(); - mdf.error(ss.out, cpr.getErrorMessage(), rc.getCanonicalErrorMsg().getErrorCode(), cpr.getSQLState(), + if (cpe.getException() instanceof HiveException) { + HiveException rc = (HiveException)cpe.getException(); + mdf.error(ss.out, cpe.getErrorMessage(), rc.getCanonicalErrorMsg().getErrorCode(), cpe.getSqlState(), rc.getCanonicalErrorMsg() == ErrorMsg.GENERIC_ERROR ? StringUtils.stringifyException(rc) : null); } else { - ErrorMsg canonicalMsg = ErrorMsg.getErrorMsg(cpr.getException().getMessage()); - mdf.error(ss.out, cpr.getErrorMessage(), canonicalMsg.getErrorCode(), cpr.getSQLState(), - StringUtils.stringifyException(cpr.getException())); + ErrorMsg canonicalMsg = ErrorMsg.getErrorMsg(cpe.getException().getMessage()); + mdf.error(ss.out, cpe.getErrorMessage(), canonicalMsg.getErrorCode(), cpe.getSqlState(), + StringUtils.stringifyException(cpe.getException())); } } catch (HiveException ex) { console.printError("Unable to JSON-encode the error", StringUtils.stringifyException(ex)); } - return cpr; + throw cpe; } } @Override - public CommandProcessorResponse compileAndRespond(String command) { + public CommandProcessorResponse compileAndRespond(String command) throws CommandProcessorException { return compileAndRespond(command, false); } - public CommandProcessorResponse compileAndRespond(String command, boolean cleanupTxnList) { + public CommandProcessorResponse compileAndRespond(String command, boolean cleanupTxnList) + throws CommandProcessorException { try { compileInternal(command, false); - return createProcessorResponse(0, null, null, null); - } catch (CommandProcessorResponse e) { - return e; + return new CommandProcessorResponse(getSchema(), null); + } catch (CommandProcessorException cpe) { + throw cpe; } finally { if (cleanupTxnList) { // Valid txn list might be generated for a query compiled using this @@ -1303,7 +1296,7 @@ public CommandProcessorResponse compileAndRespond(String command, boolean cleanu } } - public void lockAndRespond() throws CommandProcessorResponse { + public void lockAndRespond() throws CommandProcessorException { // Assumes the query has already been compiled if (plan == null) { throw new IllegalStateException( @@ -1313,14 +1306,14 @@ public void lockAndRespond() throws CommandProcessorResponse { if (requiresLock()) { try { acquireLocks(); - } catch (CommandProcessorResponse cpr) { - rollback(cpr); - throw cpr; + } catch (CommandProcessorException cpe) { + rollback(cpe); + throw cpe; } } } - private void compileInternal(String command, boolean deferClose) throws CommandProcessorResponse { + private void compileInternal(String command, boolean deferClose) throws CommandProcessorException { Metrics metrics = MetricsFactory.getInstance(); if (metrics != null) { metrics.incrementCounter(MetricsConstant.WAITING_COMPILE_OPS, 1); @@ -1339,18 +1332,18 @@ private void compileInternal(String command, boolean deferClose) throws CommandP } if (!success) { String errorMessage = ErrorMsg.COMPILE_LOCK_TIMED_OUT.getErrorCodedMsg(); - throw createProcessorResponse(ErrorMsg.COMPILE_LOCK_TIMED_OUT.getErrorCode(), errorMessage, null, null); + throw createProcessorException(ErrorMsg.COMPILE_LOCK_TIMED_OUT.getErrorCode(), errorMessage, null, null); } try { compile(command, true, deferClose); - } catch (CommandProcessorResponse cpr) { + } catch (CommandProcessorException cpe) { try { releaseLocksAndCommitOrRollback(false); } catch (LockException e) { LOG.warn("Exception in releasing locks. " + StringUtils.stringifyException(e)); } - throw cpr; + throw cpe; } } //Save compile-time PerfLogging for WebUI. @@ -1360,7 +1353,7 @@ private void compileInternal(String command, boolean deferClose) throws CommandP queryDisplay.setPerfLogEnds(QueryDisplay.Phase.COMPILATION, perfLogger.getEndTimes()); } - private void runInternal(String command, boolean alreadyCompiled) throws CommandProcessorResponse { + private void runInternal(String command, boolean alreadyCompiled) throws CommandProcessorException { DriverState.setDriverState(driverState); driverState.lock(); @@ -1371,7 +1364,7 @@ private void runInternal(String command, boolean alreadyCompiled) throws Command } else { String errorMessage = "FAILED: Precompiled query has been cancelled or closed."; console.printError(errorMessage); - throw createProcessorResponse(12, errorMessage, null, null); + throw createProcessorException(12, errorMessage, null, null); } } else { driverState.compiling(); @@ -1392,7 +1385,7 @@ private void runInternal(String command, boolean alreadyCompiled) throws Command } catch (Exception e) { String errorMessage = "FAILED: Hive Internal Error: " + Utilities.getNameMessage(e); console.printError(errorMessage + "\n" + StringUtils.stringifyException(e)); - throw createProcessorResponse(12, errorMessage, ErrorMsg.findSQLState(e.getMessage()), e); + throw createProcessorException(12, errorMessage, ErrorMsg.findSQLState(e.getMessage()), e); } if (!alreadyCompiled) { @@ -1461,9 +1454,9 @@ private void runInternal(String command, boolean alreadyCompiled) throws Command try { execute(); - } catch (CommandProcessorResponse cpr) { - rollback(cpr); - throw cpr; + } catch (CommandProcessorException cpe) { + rollback(cpe); + throw cpe; } //if needRequireLock is false, the release here will do nothing because there is no lock @@ -1494,7 +1487,7 @@ else if(plan.getOperation() == HiveOperation.ROLLBACK) { } catch (Exception e) { String errorMessage = "FAILED: Hive Internal Error: " + Utilities.getNameMessage(e); console.printError(errorMessage + "\n" + StringUtils.stringifyException(e)); - throw createProcessorResponse(12, errorMessage, ErrorMsg.findSQLState(e.getMessage()), e); + throw createProcessorException(12, errorMessage, ErrorMsg.findSQLState(e.getMessage()), e); } isFinishedWithError = false; } finally { @@ -1512,26 +1505,27 @@ else if(plan.getOperation() == HiveOperation.ROLLBACK) { driverState.unlock(); } } + + SessionState.getPerfLogger().cleanupPerfLogMetrics(); } - private CommandProcessorResponse rollback(CommandProcessorResponse cpr) throws CommandProcessorResponse { + private void rollback(CommandProcessorException cpe) throws CommandProcessorException { //console.printError(cpr.toString()); try { releaseLocksAndCommitOrRollback(false); - } - catch (LockException e) { - LOG.error("rollback() FAILED: " + cpr);//make sure not to loose + } catch (LockException e) { + LOG.error("rollback() FAILED: " + cpe); //make sure not to loose handleHiveException(e, 12, "Additional info in hive.log at \"rollback() FAILED\""); } - return cpr; } - private CommandProcessorResponse handleHiveException(HiveException e, int ret) throws CommandProcessorResponse { + private CommandProcessorException handleHiveException(HiveException e, int ret) throws CommandProcessorException { return handleHiveException(e, ret, null); } - private CommandProcessorResponse handleHiveException(HiveException e, int ret, String rootMsg) throws CommandProcessorResponse { + private CommandProcessorException handleHiveException(HiveException e, int ret, String rootMsg) + throws CommandProcessorException { String errorMessage = "FAILED: Hive Internal Error: " + Utilities.getNameMessage(e); if(rootMsg != null) { errorMessage += "\n" + rootMsg; @@ -1539,10 +1533,11 @@ private CommandProcessorResponse handleHiveException(HiveException e, int ret, S String sqlState = e.getCanonicalErrorMsg() != null ? e.getCanonicalErrorMsg().getSQLState() : ErrorMsg.findSQLState(e.getMessage()); console.printError(errorMessage + "\n" + StringUtils.stringifyException(e)); - throw createProcessorResponse(ret, errorMessage, sqlState, e); + throw createProcessorException(ret, errorMessage, sqlState, e); } private boolean requiresLock() { if (!checkConcurrency()) { + LOG.info("Concurrency mode is disabled, not creating a lock manager"); return false; } // Lock operations themselves don't require the lock. @@ -1587,18 +1582,17 @@ private boolean isExplicitLockOperation() { return false; } - private CommandProcessorResponse createProcessorResponse(int ret, String errorMessage, String sqlState, + private CommandProcessorException createProcessorException(int ret, String errorMessage, String sqlState, Throwable downstreamError) { SessionState.getPerfLogger().cleanupPerfLogMetrics(); queryDisplay.setErrorMessage(errorMessage); - if(downstreamError != null && downstreamError instanceof HiveException) { + if (downstreamError != null && downstreamError instanceof HiveException) { ErrorMsg em = ((HiveException)downstreamError).getCanonicalErrorMsg(); - if(em != null) { - return new CommandProcessorResponse(ret, errorMessage, sqlState, - schema, downstreamError, em.getErrorCode(), null); + if (em != null) { + return new CommandProcessorException(ret, em.getErrorCode(), errorMessage, sqlState, downstreamError); } } - return new CommandProcessorResponse(ret, errorMessage, sqlState, downstreamError); + return new CommandProcessorException(ret, -1, errorMessage, sqlState, downstreamError); } private void useFetchFromCache(CacheEntry cacheEntry) { @@ -1665,7 +1659,7 @@ private void postExecutionCacheActions() throws Exception { } } - private void execute() throws CommandProcessorResponse { + private void execute() throws CommandProcessorException { PerfLogger perfLogger = SessionState.getPerfLogger(); perfLogger.PerfLogBegin(CLASS_NAME, PerfLogger.DRIVER_EXECUTE); @@ -1691,7 +1685,7 @@ private void execute() throws CommandProcessorResponse { if (!driverState.isCompiled() && !driverState.isExecuting()) { String errorMessage = "FAILED: unexpected driverstate: " + driverState + ", for query " + queryStr; console.printError(errorMessage); - throw createProcessorResponse(1000, errorMessage, "HY008", null); + throw createProcessorException(1000, errorMessage, "HY008", null); } else { driverState.executing(); } @@ -1857,7 +1851,7 @@ private void execute() throws CommandProcessorResponse { // in case we decided to run everything in local mode, restore the // the jobtracker setting to its initial value ctx.restoreOriginalTracker(); - throw createProcessorResponse(exitVal, errorMessage, sqlState, result.getTaskError()); + throw createProcessorException(exitVal, errorMessage, sqlState, result.getTaskError()); } } @@ -1889,7 +1883,7 @@ private void execute() throws CommandProcessorResponse { String errorMessage = "FAILED: Operation cancelled"; invokeFailureHooks(perfLogger, hookContext, errorMessage, null); console.printError(errorMessage); - throw createProcessorResponse(1000, errorMessage, "HY008", null); + throw createProcessorException(1000, errorMessage, "HY008", null); } // remove incomplete outputs. @@ -1917,9 +1911,9 @@ private void execute() throws CommandProcessorResponse { SessionState.get().getHiveHistory().printRowCount(queryId); } releasePlan(plan); - } catch (CommandProcessorResponse cpr) { + } catch (CommandProcessorException cpe) { executionError = true; - throw cpr; + throw cpe; } catch (Throwable e) { executionError = true; @@ -1940,7 +1934,7 @@ private void execute() throws CommandProcessorResponse { } } console.printError(errorMessage + "\n" + StringUtils.stringifyException(e)); - throw createProcessorResponse(12, errorMessage, "08S01", e); + throw createProcessorException(12, errorMessage, "08S01", e); } finally { // Trigger query hooks after query completes its execution. try { @@ -2442,11 +2436,8 @@ public boolean hasResultSet() { return true; } } - if (plan.getFetchTask() != null && schema != null && schema.isSetFieldSchemas()) { - return true; - } else { - return false; - } + + return plan.getFetchTask() != null && plan.getResultSchema() != null && plan.getResultSchema().isSetFieldSchemas(); } void setCompactionWriteIds(ValidWriteIdList val, long compactorTxnId) { diff --git ql/src/java/org/apache/hadoop/hive/ql/DriverState.java ql/src/java/org/apache/hadoop/hive/ql/DriverState.java index 26129c14eb..6ac071f9d9 100644 --- ql/src/java/org/apache/hadoop/hive/ql/DriverState.java +++ ql/src/java/org/apache/hadoop/hive/ql/DriverState.java @@ -86,16 +86,35 @@ public void compiling() { driverState = State.COMPILING; } + public void compilingWithLocking() { + lock(); + try { + driverState = State.COMPILING; + } finally { + unlock(); + } + } + public boolean isCompiling() { return driverState == State.COMPILING; } - public void compilationInterrupted(boolean deferClose) { - driverState = deferClose ? State.EXECUTING : State.ERROR; + public void compilationInterruptedWithLocking(boolean deferClose) { + lock(); + try { + driverState = deferClose ? State.EXECUTING : State.ERROR; + } finally { + unlock(); + } } - public void compilationFinished(boolean wasError) { - driverState = wasError ? State.ERROR : State.COMPILED; + public void compilationFinishedWithLocking(boolean wasError) { + lock(); + try { + driverState = wasError ? State.ERROR : State.COMPILED; + } finally { + unlock(); + } } public boolean isCompiled() { diff --git ql/src/java/org/apache/hadoop/hive/ql/DriverUtils.java ql/src/java/org/apache/hadoop/hive/ql/DriverUtils.java index f31b15687d..596b1099f5 100644 --- ql/src/java/org/apache/hadoop/hive/ql/DriverUtils.java +++ ql/src/java/org/apache/hadoop/hive/ql/DriverUtils.java @@ -21,7 +21,7 @@ import org.apache.hadoop.hive.common.ValidWriteIdList; import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.ql.metadata.HiveException; -import org.apache.hadoop.hive.ql.processors.CommandProcessorResponse; +import org.apache.hadoop.hive.ql.processors.CommandProcessorException; import org.apache.hadoop.hive.ql.session.SessionState; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -51,10 +51,11 @@ public static void runOnDriver(HiveConf conf, String user, Driver driver = new Driver(qs, user, null, null); driver.setCompactionWriteIds(writeIds, compactorTxnId); try { - CommandProcessorResponse cpr = driver.run(query); - if (cpr.getResponseCode() != 0) { - LOG.error("Failed to run " + query, cpr.getException()); - throw new HiveException("Failed to run " + query, cpr.getException()); + try { + driver.run(query); + } catch (CommandProcessorException e) { + LOG.error("Failed to run " + query, e.getException()); + throw new HiveException("Failed to run " + query, e.getException()); } } finally { driver.close(); diff --git ql/src/java/org/apache/hadoop/hive/ql/IDriver.java ql/src/java/org/apache/hadoop/hive/ql/IDriver.java index b61822417d..baad2694b3 100644 --- ql/src/java/org/apache/hadoop/hive/ql/IDriver.java +++ ql/src/java/org/apache/hadoop/hive/ql/IDriver.java @@ -27,6 +27,7 @@ import org.apache.hadoop.hive.metastore.api.Schema; import org.apache.hadoop.hive.ql.exec.FetchTask; import org.apache.hadoop.hive.ql.processors.CommandProcessor; +import org.apache.hadoop.hive.ql.processors.CommandProcessorException; import org.apache.hadoop.hive.ql.processors.CommandProcessorResponse; /** @@ -36,7 +37,7 @@ @InterfaceStability.Unstable public interface IDriver extends CommandProcessor { - CommandProcessorResponse compileAndRespond(String statement); + CommandProcessorResponse compileAndRespond(String statement) throws CommandProcessorException; QueryPlan getPlan(); @@ -46,10 +47,10 @@ void setOperationId(String guid64); - CommandProcessorResponse run(); - @Override - CommandProcessorResponse run(String command); + CommandProcessorResponse run() throws CommandProcessorException; + @Override + CommandProcessorResponse run(String command) throws CommandProcessorException; // create some "cover" to the result? boolean getResults(List res) throws IOException; diff --git ql/src/java/org/apache/hadoop/hive/ql/parse/ExplainSemanticAnalyzer.java ql/src/java/org/apache/hadoop/hive/ql/parse/ExplainSemanticAnalyzer.java index 578f99437a..e9c10e4b0c 100644 --- ql/src/java/org/apache/hadoop/hive/ql/parse/ExplainSemanticAnalyzer.java +++ ql/src/java/org/apache/hadoop/hive/ql/parse/ExplainSemanticAnalyzer.java @@ -44,7 +44,7 @@ import org.apache.hadoop.hive.ql.parse.ExplainConfiguration.AnalyzeState; import org.apache.hadoop.hive.ql.parse.ExplainConfiguration.VectorizationDetailLevel; import org.apache.hadoop.hive.ql.plan.ExplainWork; -import org.apache.hadoop.hive.ql.processors.CommandProcessorResponse; +import org.apache.hadoop.hive.ql.processors.CommandProcessorException; import org.apache.hadoop.hive.ql.stats.StatsAggregator; import org.apache.hadoop.hive.ql.stats.StatsCollectionContext; import org.apache.hadoop.hive.ql.stats.fs.FSStatsAggregator; @@ -62,7 +62,6 @@ public ExplainSemanticAnalyzer(QueryState queryState) throws SemanticException { config = new ExplainConfiguration(); } - @SuppressWarnings("unchecked") @Override public void analyzeInternal(ASTNode ast) throws SemanticException { final int childCount = ast.getChildCount(); @@ -148,15 +147,12 @@ public void analyzeInternal(ASTNode ast) throws SemanticException { runCtx = new Context(conf); // runCtx and ctx share the configuration, but not isExplainPlan() runCtx.setExplainConfig(config); - Driver driver = new Driver(conf, runCtx, queryState.getLineageState()); - CommandProcessorResponse ret = driver.run(query); - if(ret.getResponseCode() == 0) { - // Note that we need to call getResults for simple fetch optimization. - // However, we need to skip all the results. + try (Driver driver = new Driver(conf, runCtx, queryState.getLineageState())) { + driver.run(query); while (driver.getResults(new ArrayList())) { } - } else { - throw new SemanticException(ret.getErrorMessage(), ret.getException()); + } catch (CommandProcessorException e) { + throw new SemanticException(e.getErrorMessage(), e.getException()); } config.setOpIdToRuntimeNumRows(aggregateStats(config.getExplainRootPath())); } catch (IOException e1) { @@ -232,7 +228,7 @@ public void analyzeInternal(ASTNode ast) throws SemanticException { ExplainTask explTask = (ExplainTask) TaskFactory.get(work); - fieldList = explTask.getResultSchema(); + fieldList = ExplainTask.getResultSchema(); rootTasks.add(explTask); } diff --git ql/src/java/org/apache/hadoop/hive/ql/processors/AddResourceProcessor.java ql/src/java/org/apache/hadoop/hive/ql/processors/AddResourceProcessor.java index d2286821f4..ee57e73b3b 100644 --- ql/src/java/org/apache/hadoop/hive/ql/processors/AddResourceProcessor.java +++ ql/src/java/org/apache/hadoop/hive/ql/processors/AddResourceProcessor.java @@ -36,12 +36,11 @@ */ public class AddResourceProcessor implements CommandProcessor { - public static final Logger LOG = LoggerFactory.getLogger(AddResourceProcessor.class - .getName()); - public static final LogHelper console = new LogHelper(LOG); + private static final Logger LOG = LoggerFactory.getLogger(AddResourceProcessor.class.getName()); + private static final LogHelper console = new LogHelper(LOG); @Override - public CommandProcessorResponse run(String command) { + public CommandProcessorResponse run(String command) throws CommandProcessorException { SessionState ss = SessionState.get(); command = new VariableSubstitution(new HiveVariableSource() { @Override @@ -56,7 +55,7 @@ public CommandProcessorResponse run(String command) { console.printError("Usage: add [" + StringUtils.join(SessionState.ResourceType.values(), "|") + "] []*"); - return new CommandProcessorResponse(1); + throw new CommandProcessorException(1); } CommandProcessorResponse authErrResp = @@ -70,9 +69,9 @@ public CommandProcessorResponse run(String command) { ss.add_resources(t, Arrays.asList(Arrays.copyOfRange(tokens, 1, tokens.length))); } catch (Exception e) { - return CommandProcessorResponse.create(e); + throw new CommandProcessorException(e); } - return new CommandProcessorResponse(0); + return new CommandProcessorResponse(); } @Override diff --git ql/src/java/org/apache/hadoop/hive/ql/processors/CommandProcessor.java ql/src/java/org/apache/hadoop/hive/ql/processors/CommandProcessor.java index 4d73181659..7fd1093dfa 100644 --- ql/src/java/org/apache/hadoop/hive/ql/processors/CommandProcessor.java +++ ql/src/java/org/apache/hadoop/hive/ql/processors/CommandProcessor.java @@ -19,5 +19,5 @@ package org.apache.hadoop.hive.ql.processors; public interface CommandProcessor extends AutoCloseable { - CommandProcessorResponse run(String command); + CommandProcessorResponse run(String command) throws CommandProcessorException; } diff --git ql/src/java/org/apache/hadoop/hive/ql/processors/CommandProcessorException.java ql/src/java/org/apache/hadoop/hive/ql/processors/CommandProcessorException.java new file mode 100644 index 0000000000..36799961aa --- /dev/null +++ ql/src/java/org/apache/hadoop/hive/ql/processors/CommandProcessorException.java @@ -0,0 +1,85 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.processors; + +/** + * Exception thrown during command processing class. + */ +public class CommandProcessorException extends Exception { + private static final long serialVersionUID = 1L; + + private final int responseCode; + private final int hiveErrorCode; + private final String errorMessage; + private final String sqlState; + private final Throwable exception; + + public CommandProcessorException(int responseCode) { + this(responseCode, -1, null, null, null); + } + + public CommandProcessorException(String errorMessage) { + this(errorMessage, null); + } + + public CommandProcessorException(Throwable exception) { + this(exception.getMessage(), exception); + } + + public CommandProcessorException(String errorMessage, Throwable exception) { + this(1, -1, errorMessage, null, exception); + } + + public CommandProcessorException(int responseCode, int hiveErrorCode, String errorMessage, String sqlState, + Throwable exception) { + this.responseCode = responseCode; + this.hiveErrorCode = hiveErrorCode; + this.errorMessage = errorMessage; + this.sqlState = sqlState; + this.exception = exception; + } + + public int getResponseCode() { + return responseCode; + } + + public int getErrorCode() { + return hiveErrorCode; + } + + public String getErrorMessage() { + return errorMessage; + } + + public String getSqlState() { + return sqlState; + } + + public Throwable getException() { + return exception; + } + + @Override + public String toString() { + return "(responseCode = " + responseCode + ", errorMessage = " + errorMessage + ", " + + (hiveErrorCode > 0 ? "hiveErrorCode = " + hiveErrorCode + ", " : "") + + "SQLState = " + sqlState + + (exception == null ? "" : ", exception = " + exception.getMessage()) + ")"; + } +} diff --git ql/src/java/org/apache/hadoop/hive/ql/processors/CommandProcessorFactory.java ql/src/java/org/apache/hadoop/hive/ql/processors/CommandProcessorFactory.java index f8b6a97c91..5302800f88 100644 --- ql/src/java/org/apache/hadoop/hive/ql/processors/CommandProcessorFactory.java +++ ql/src/java/org/apache/hadoop/hive/ql/processors/CommandProcessorFactory.java @@ -111,7 +111,7 @@ public static CommandProcessor getForHiveCommandInternal(String[] cmd, HiveConf } } - static Logger LOG = LoggerFactory.getLogger(CommandProcessorFactory.class); + private static Logger LOG = LoggerFactory.getLogger(CommandProcessorFactory.class); public static CommandProcessor get(String[] cmd, @Nonnull HiveConf conf) throws SQLException { CommandProcessor result = getForHiveCommand(cmd, conf); diff --git ql/src/java/org/apache/hadoop/hive/ql/processors/CommandProcessorResponse.java ql/src/java/org/apache/hadoop/hive/ql/processors/CommandProcessorResponse.java index 94cfa5178c..ccb24a3dc1 100644 --- ql/src/java/org/apache/hadoop/hive/ql/processors/CommandProcessorResponse.java +++ ql/src/java/org/apache/hadoop/hive/ql/processors/CommandProcessorResponse.java @@ -18,10 +18,7 @@ package org.apache.hadoop.hive.ql.processors; -import java.util.List; - import org.apache.hadoop.hive.metastore.api.Schema; -import org.apache.hadoop.hive.ql.ErrorMsg; /** * Encapsulates the basic response info returned by classes the implement the @@ -30,87 +27,29 @@ * is not 0. Note that often {@code responseCode} ends up the exit value of * command shell process so should keep it to < 127. */ -public class CommandProcessorResponse extends Exception { - private final int responseCode; - private final String errorMessage; - private final int hiveErrorCode; - private final String SQLState; - private final Schema resSchema; - - private final Throwable exception; - private final List consoleMessages; - - public CommandProcessorResponse(int responseCode) { - this(responseCode, null, null, null, null); - } - - public CommandProcessorResponse(int responseCode, String errorMessage, String SQLState) { - this(responseCode, errorMessage, SQLState, null, null); - } - - public CommandProcessorResponse(int responseCode, List consoleMessages) { - this(responseCode, null, null, null, null, -1, consoleMessages); - } +public class CommandProcessorResponse { + private final Schema schema; + private final String message; - public CommandProcessorResponse(int responseCode, String errorMessage, String SQLState, Throwable exception) { - this(responseCode, errorMessage, SQLState, null, exception); + public CommandProcessorResponse() { + this(null, null); } - public CommandProcessorResponse(int responseCode, String errorMessage, String SQLState, Schema schema) { - this(responseCode, errorMessage, SQLState, schema, null); - } - public CommandProcessorResponse(int responseCode, ErrorMsg canonicalErrMsg, Throwable t, String ... msgArgs) { - this(responseCode, canonicalErrMsg.format(msgArgs), - canonicalErrMsg.getSQLState(), null, t, canonicalErrMsg.getErrorCode(), null); + public CommandProcessorResponse(Schema schema, String message) { + this.schema = schema; + this.message = message; } - /** - * Create CommandProcessorResponse object indicating an error. - * Creates new CommandProcessorResponse with responseCode=1, and sets message - * from exception argument - * - * @param e - * @return - */ - public static CommandProcessorResponse create(Exception e) { - return new CommandProcessorResponse(1, e.getMessage(), null); + public Schema getSchema() { + return schema; } - public CommandProcessorResponse(int responseCode, String errorMessage, String SQLState, - Schema schema, Throwable exception) { - this(responseCode, errorMessage, SQLState, schema, exception, -1, null); + public String getMessage() { + return message; } - public CommandProcessorResponse(int responseCode, String errorMessage, String SQLState, - Schema schema, Throwable exception, int hiveErrorCode, List consoleMessages) { - this.responseCode = responseCode; - this.errorMessage = errorMessage; - this.SQLState = SQLState; - this.resSchema = schema; - this.exception = exception; - this.hiveErrorCode = hiveErrorCode; - this.consoleMessages = consoleMessages; - } - - public int getResponseCode() { return responseCode; } - public String getErrorMessage() { return errorMessage; } - public String getSQLState() { return SQLState; } - public Schema getSchema() { return resSchema; } - public Throwable getException() { return exception; } - public List getConsoleMessages() { - return consoleMessages; - } - public int getErrorCode() { return hiveErrorCode; } @Override public String toString() { - return "(responseCode = " + responseCode + ", errorMessage = " + errorMessage + ", " + - (hiveErrorCode > 0 ? "hiveErrorCode = " + hiveErrorCode + ", " : "" ) + - "SQLState = " + SQLState + - (resSchema == null ? "" : ", resSchema = " + resSchema) + - (exception == null ? "" : ", exception = " + exception.getMessage()) + ")"; - } - - public boolean failed() { - return responseCode != 0; + return "(" + (schema == null ? "" : ", schema = " + schema + ", ") + "message = " + message + ")"; } } diff --git ql/src/java/org/apache/hadoop/hive/ql/processors/CommandUtil.java ql/src/java/org/apache/hadoop/hive/ql/processors/CommandUtil.java index e06ea5e9ca..b22e28b510 100644 --- ql/src/java/org/apache/hadoop/hive/ql/processors/CommandUtil.java +++ ql/src/java/org/apache/hadoop/hive/ql/processors/CommandUtil.java @@ -35,7 +35,7 @@ import com.google.common.base.Joiner; class CommandUtil { - public static final Logger LOG = LoggerFactory.getLogger(CommandUtil.class); + private static final Logger LOG = LoggerFactory.getLogger(CommandUtil.class); /** * Authorize command of given type and arguments @@ -47,7 +47,7 @@ * capturing the authorization error */ static CommandProcessorResponse authorizeCommand(SessionState ss, HiveOperationType type, - List command) { + List command) throws CommandProcessorException { if (ss == null) { // ss can be null in unit tests return null; @@ -60,12 +60,9 @@ static CommandProcessorResponse authorizeCommand(SessionState ss, HiveOperationT authorizeCommandThrowEx(ss, type, command); // authorized to perform action return null; - } catch (HiveAuthzPluginException e) { - LOG.error(errMsg, e); - return CommandProcessorResponse.create(e); - } catch (HiveAccessControlException e) { + } catch (HiveAuthzPluginException | HiveAccessControlException e) { LOG.error(errMsg, e); - return CommandProcessorResponse.create(e); + throw new CommandProcessorException(e); } } return null; @@ -78,7 +75,7 @@ static CommandProcessorResponse authorizeCommand(SessionState ss, HiveOperationT * @throws HiveAuthzPluginException * @throws HiveAccessControlException */ - static void authorizeCommandThrowEx(SessionState ss, HiveOperationType type, + private static void authorizeCommandThrowEx(SessionState ss, HiveOperationType type, List command) throws HiveAuthzPluginException, HiveAccessControlException { HivePrivilegeObject commandObj = HivePrivilegeObject.createHivePrivilegeObject(command); HiveAuthzContext.Builder ctxBuilder = new HiveAuthzContext.Builder(); @@ -99,7 +96,7 @@ static void authorizeCommandThrowEx(SessionState ss, HiveOperationType type, * capturing the authorization error */ static CommandProcessorResponse authorizeCommandAndServiceObject(SessionState ss, HiveOperationType type, - List command, String serviceObject) { + List command, String serviceObject) throws CommandProcessorException { if (ss == null) { // ss can be null in unit tests return null; @@ -114,7 +111,7 @@ static CommandProcessorResponse authorizeCommandAndServiceObject(SessionState ss return null; } catch (HiveAuthzPluginException | HiveAccessControlException e) { LOG.error(errMsg, e); - return CommandProcessorResponse.create(e); + throw new CommandProcessorException(e); } } return null; diff --git ql/src/java/org/apache/hadoop/hive/ql/processors/CompileProcessor.java ql/src/java/org/apache/hadoop/hive/ql/processors/CompileProcessor.java index 7b96b33ba3..c5fb7725ba 100644 --- ql/src/java/org/apache/hadoop/hive/ql/processors/CompileProcessor.java +++ ql/src/java/org/apache/hadoop/hive/ql/processors/CompileProcessor.java @@ -33,7 +33,6 @@ import org.apache.commons.io.IOUtils; import org.apache.hadoop.hive.conf.HiveVariableSource; import org.apache.hadoop.hive.conf.VariableSubstitution; -import org.apache.hadoop.hive.ql.metadata.HiveException; import org.apache.hadoop.hive.ql.security.authorization.plugin.HiveOperationType; import org.apache.hadoop.hive.ql.session.SessionState; import org.apache.hadoop.hive.ql.session.SessionState.LogHelper; @@ -100,10 +99,10 @@ * will be added to the session state via the session state's * ADD RESOURCE command. * @param command a String to be compiled - * @return CommandProcessorResponse with 0 for success and 1 for failure + * @return CommandProcessorResponse with some message */ @Override - public CommandProcessorResponse run(String command) { + public CommandProcessorResponse run(String command) throws CommandProcessorException { SessionState ss = SessionState.get(); this.command = command; @@ -116,17 +115,8 @@ public CommandProcessorResponse run(String command) { myId = runCount.getAndIncrement(); - try { - parse(ss); - } catch (CompileProcessorException e) { - return CommandProcessorResponse.create(e); - } - CommandProcessorResponse result = null; - try { - result = compile(ss); - } catch (CompileProcessorException e) { - return CommandProcessorResponse.create(e); - } + parse(ss); + CommandProcessorResponse result = compile(ss); return result; } @@ -136,7 +126,7 @@ public CommandProcessorResponse run(String command) { * @throws CompileProcessorException if the code can not be compiled or the jar can not be made */ @VisibleForTesting - void parse(SessionState ss) throws CompileProcessorException { + void parse(SessionState ss) throws CommandProcessorException { if (ss != null){ command = new VariableSubstitution(new HiveVariableSource() { @Override @@ -146,7 +136,7 @@ void parse(SessionState ss) throws CompileProcessorException { }).substitute(ss.getConf(), command); } if (command == null || command.length() == 0) { - throw new CompileProcessorException("Command was empty"); + throw new CommandProcessorException("Command was empty"); } StringBuilder toCompile = new StringBuilder(); int startPosition = 0; @@ -160,7 +150,7 @@ void parse(SessionState ss) throws CompileProcessorException { } if (startPosition == command.length()){ - throw new CompileProcessorException(SYNTAX); + throw new CommandProcessorException(SYNTAX); } for (int i = startPosition; i < command.length(); i++) { if (command.charAt(i) == '\\') { @@ -175,23 +165,23 @@ void parse(SessionState ss) throws CompileProcessorException { } } if (endPosition == -1){ - throw new CompileProcessorException(SYNTAX); + throw new CommandProcessorException(SYNTAX); } StringTokenizer st = new StringTokenizer(command.substring(endPosition+1), " "); if (st.countTokens() != 4){ - throw new CompileProcessorException(SYNTAX); + throw new CommandProcessorException(SYNTAX); } String shouldBeAs = st.nextToken(); if (!shouldBeAs.equalsIgnoreCase(AS)){ - throw new CompileProcessorException(SYNTAX); + throw new CommandProcessorException(SYNTAX); } setLang(st.nextToken()); if (!lang.equalsIgnoreCase(GROOVY)){ - throw new CompileProcessorException("Can not compile " + lang + ". Hive can only compile " + GROOVY); + throw new CommandProcessorException("Can not compile " + lang + ". Hive can only compile " + GROOVY); } String shouldBeNamed = st.nextToken(); if (!shouldBeNamed.equalsIgnoreCase(NAMED)){ - throw new CompileProcessorException(SYNTAX); + throw new CommandProcessorException(SYNTAX); } setNamed(st.nextToken()); setCode(toCompile.toString()); @@ -204,15 +194,15 @@ void parse(SessionState ss) throws CompileProcessorException { * @return Response code of 0 for success 1 for failure * @throws CompileProcessorException */ - CommandProcessorResponse compile(SessionState ss) throws CompileProcessorException { + CommandProcessorResponse compile(SessionState ss) throws CommandProcessorException { Project proj = new Project(); String ioTempDir = System.getProperty(IO_TMP_DIR); File ioTempFile = new File(ioTempDir); if (!ioTempFile.exists()){ - throw new CompileProcessorException(ioTempDir + " does not exists"); + throw new CommandProcessorException(ioTempDir + " does not exists"); } if (!ioTempFile.isDirectory() || !ioTempFile.canWrite()){ - throw new CompileProcessorException(ioTempDir + " is not a writable directory"); + throw new CommandProcessorException(ioTempDir + " is not a writable directory"); } Groovyc g = new Groovyc(); long runStamp = System.currentTimeMillis(); @@ -230,13 +220,13 @@ CommandProcessorResponse compile(SessionState ss) throws CompileProcessorExcepti try { Files.write(this.code, fileToWrite, Charset.forName("UTF-8")); } catch (IOException e1) { - throw new CompileProcessorException("writing file", e1); + throw new CommandProcessorException("writing file", e1); } destination.mkdir(); try { g.execute(); } catch (BuildException ex){ - throw new CompileProcessorException("Problem compiling", ex); + throw new CommandProcessorException("Problem compiling", ex); } File testArchive = new File(ioTempFile, jarId + ".jar"); JarArchiveOutputStream out = null; @@ -252,7 +242,7 @@ CommandProcessorResponse compile(SessionState ss) throws CompileProcessorExcepti } out.finish(); } catch (IOException e) { - throw new CompileProcessorException("Exception while writing jar", e); + throw new CommandProcessorException("Exception while writing jar", e); } finally { if (out!=null){ try { @@ -265,7 +255,7 @@ CommandProcessorResponse compile(SessionState ss) throws CompileProcessorExcepti if (ss != null){ ss.add_resource(ResourceType.JAR, testArchive.getAbsolutePath()); } - CommandProcessorResponse good = new CommandProcessorResponse(0, testArchive.getAbsolutePath(), null); + CommandProcessorResponse good = new CommandProcessorResponse(null, testArchive.getAbsolutePath()); return good; } @@ -297,19 +287,6 @@ public String getCommand() { return command; } - class CompileProcessorException extends HiveException { - - private static final long serialVersionUID = 1L; - - CompileProcessorException(String s, Throwable t) { - super(s, t); - } - - CompileProcessorException(String s) { - super(s); - } - } - @Override public void close() throws Exception { } diff --git ql/src/java/org/apache/hadoop/hive/ql/processors/CryptoProcessor.java ql/src/java/org/apache/hadoop/hive/ql/processors/CryptoProcessor.java index a8a97a0127..59e2559f45 100644 --- ql/src/java/org/apache/hadoop/hive/ql/processors/CryptoProcessor.java +++ ql/src/java/org/apache/hadoop/hive/ql/processors/CryptoProcessor.java @@ -49,11 +49,8 @@ private int DEFAULT_BIT_LENGTH = 128; - private HiveConf conf; - public CryptoProcessor(HadoopShims.HdfsEncryptionShim encryptionShim, HiveConf conf) { this.encryptionShim = encryptionShim; - this.conf = conf; CREATE_KEY_OPTIONS = new Options(); CREATE_KEY_OPTIONS.addOption(OptionBuilder.hasArg().withLongOpt("keyName").isRequired().create()); @@ -72,24 +69,21 @@ private CommandLine parseCommandArgs(final Options opts, String[] args) throws P return parser.parse(opts, args); } - private CommandProcessorResponse returnErrorResponse(final String errmsg) { - return new CommandProcessorResponse(1, "Encryption Processor Helper Failed:" + errmsg, null); - } - private void writeTestOutput(final String msg) { SessionState.get().out.println(msg); } @Override - public CommandProcessorResponse run(String command) { + public CommandProcessorResponse run(String command) throws CommandProcessorException { String[] args = command.split("\\s+"); if (args.length < 1) { - return returnErrorResponse("Command arguments are empty."); + throw new CommandProcessorException("Encryption Processor Helper Failed: Command arguments are empty."); } if (encryptionShim == null) { - return returnErrorResponse("Hadoop encryption shim is not initialized."); + throw new CommandProcessorException( + "Encryption Processor Helper Failed: Hadoop encryption shim is not initialized."); } String action = args[0]; @@ -103,13 +97,13 @@ public CommandProcessorResponse run(String command) { } else if (action.equalsIgnoreCase("delete_key")) { deleteEncryptionKey(params); } else { - return returnErrorResponse("Unknown command action: " + action); + throw new CommandProcessorException("Encryption Processor Helper Failed: Unknown command action: " + action); } } catch (Exception e) { - return returnErrorResponse(e.getMessage()); + throw new CommandProcessorException("Encryption Processor Helper Failed: " + e.getMessage()); } - return new CommandProcessorResponse(0); + return new CommandProcessorResponse(); } /** diff --git ql/src/java/org/apache/hadoop/hive/ql/processors/DeleteResourceProcessor.java ql/src/java/org/apache/hadoop/hive/ql/processors/DeleteResourceProcessor.java index bac020d5a7..c42fb050d8 100644 --- ql/src/java/org/apache/hadoop/hive/ql/processors/DeleteResourceProcessor.java +++ ql/src/java/org/apache/hadoop/hive/ql/processors/DeleteResourceProcessor.java @@ -36,11 +36,11 @@ */ public class DeleteResourceProcessor implements CommandProcessor { - public static final Logger LOG = LoggerFactory.getLogger(DeleteResourceProcessor.class.getName()); - public static final LogHelper console = new LogHelper(LOG); + private static final Logger LOG = LoggerFactory.getLogger(DeleteResourceProcessor.class.getName()); + private static final LogHelper console = new LogHelper(LOG); @Override - public CommandProcessorResponse run(String command) { + public CommandProcessorResponse run(String command) throws CommandProcessorException { SessionState ss = SessionState.get(); command = new VariableSubstitution(new HiveVariableSource() { @Override @@ -56,7 +56,7 @@ public CommandProcessorResponse run(String command) { console.printError("Usage: delete [" + StringUtils.join(SessionState.ResourceType.values(), "|") + "] []*"); - return new CommandProcessorResponse(1); + throw new CommandProcessorException(1); } CommandProcessorResponse authErrResp = CommandUtil.authorizeCommand(ss, HiveOperationType.DELETE, Arrays.asList(tokens)); @@ -70,7 +70,7 @@ public CommandProcessorResponse run(String command) { ss.delete_resources(t); } - return new CommandProcessorResponse(0); + return new CommandProcessorResponse(); } @Override diff --git ql/src/java/org/apache/hadoop/hive/ql/processors/DfsProcessor.java ql/src/java/org/apache/hadoop/hive/ql/processors/DfsProcessor.java index 0b334e14c4..2a68da0699 100644 --- ql/src/java/org/apache/hadoop/hive/ql/processors/DfsProcessor.java +++ ql/src/java/org/apache/hadoop/hive/ql/processors/DfsProcessor.java @@ -42,8 +42,8 @@ */ public class DfsProcessor implements CommandProcessor { - public static final Logger LOG = LoggerFactory.getLogger(DfsProcessor.class.getName()); - public static final LogHelper console = new LogHelper(LOG); + private static final Logger LOG = LoggerFactory.getLogger(DfsProcessor.class.getName()); + private static final LogHelper console = new LogHelper(LOG); public static final String DFS_RESULT_HEADER = "DFS Output"; private final FsShell dfs; @@ -60,7 +60,7 @@ public DfsProcessor(Configuration conf, boolean addSchema) { } @Override - public CommandProcessorResponse run(String command) { + public CommandProcessorResponse run(String command) throws CommandProcessorException { try { @@ -87,18 +87,19 @@ public CommandProcessorResponse run(String command) { } int ret = dfs.run(tokens); + System.setOut(oldOut); if (ret != 0) { console.printError("Command " + command + " failed with exit code = " + ret); + throw new CommandProcessorException(ret); } - - System.setOut(oldOut); - return new CommandProcessorResponse(ret, null, null, dfsSchema); - + return new CommandProcessorResponse(dfsSchema, null); + } catch (CommandProcessorException e) { + throw e; } catch (Exception e) { console.printError("Exception raised from DFSShell.run " + e.getLocalizedMessage(), org.apache.hadoop.util.StringUtils .stringifyException(e)); - return new CommandProcessorResponse(1); + throw new CommandProcessorException(1); } } diff --git ql/src/java/org/apache/hadoop/hive/ql/processors/ErasureProcessor.java ql/src/java/org/apache/hadoop/hive/ql/processors/ErasureProcessor.java index 04cc8b0e07..33bfe82dee 100644 --- ql/src/java/org/apache/hadoop/hive/ql/processors/ErasureProcessor.java +++ ql/src/java/org/apache/hadoop/hive/ql/processors/ErasureProcessor.java @@ -87,24 +87,20 @@ private CommandLine parseCommandArgs(final Options opts, String[] args) throws P return parser.parse(opts, args); } - private CommandProcessorResponse returnErrorResponse(final String errmsg) { - return new CommandProcessorResponse(1, "Erasure Processor Helper Failed: " + errmsg, null); - } - private void writeTestOutput(final String msg) { SessionState.get().out.println(msg); } @Override - public CommandProcessorResponse run(String command) { + public CommandProcessorResponse run(String command) throws CommandProcessorException { String[] args = command.split("\\s+"); if (args.length < 1) { - return returnErrorResponse("Command arguments are empty."); + throw new CommandProcessorException("Erasure Processor Helper Failed: Command arguments are empty."); } if (erasureCodingShim == null) { - return returnErrorResponse("Hadoop erasure shim is not initialized."); + throw new CommandProcessorException("Erasure Processor Helper Failed: Hadoop erasure shim is not initialized."); } String action = args[0].toLowerCase(); @@ -138,13 +134,14 @@ public CommandProcessorResponse run(String command) { unsetPolicy(params); break; default: - return returnErrorResponse("Unknown erasure command action: " + action); + throw new CommandProcessorException( + "Erasure Processor Helper Failed: Unknown erasure command action: " + action); } } catch (Exception e) { - return returnErrorResponse(e.getMessage()); + throw new CommandProcessorException("Erasure Processor Helper Failed: " + e.getMessage()); } - return new CommandProcessorResponse(0); + return new CommandProcessorResponse(); } /** diff --git ql/src/java/org/apache/hadoop/hive/ql/processors/ListResourceProcessor.java ql/src/java/org/apache/hadoop/hive/ql/processors/ListResourceProcessor.java index afd604aa67..69439f14cb 100644 --- ql/src/java/org/apache/hadoop/hive/ql/processors/ListResourceProcessor.java +++ ql/src/java/org/apache/hadoop/hive/ql/processors/ListResourceProcessor.java @@ -43,14 +43,14 @@ } @Override - public CommandProcessorResponse run(String command) { + public CommandProcessorResponse run(String command) throws CommandProcessorException { SessionState ss = SessionState.get(); String[] tokens = command.split("\\s+"); SessionState.ResourceType t; if (tokens.length < 1 || (t = SessionState.find_resource_type(tokens[0])) == null) { String message = "Usage: list [" + StringUtils.join(SessionState.ResourceType.values(), "|") + "] [ []*]"; - return new CommandProcessorResponse(1, message, null); + throw new CommandProcessorException(message); } List filter = null; if (tokens.length > 1) { @@ -60,7 +60,7 @@ public CommandProcessorResponse run(String command) { if (s != null && !s.isEmpty()) { ss.out.println(StringUtils.join(s, "\n")); } - return new CommandProcessorResponse(0, null, null, SCHEMA); + return new CommandProcessorResponse(SCHEMA, null); } @Override diff --git ql/src/java/org/apache/hadoop/hive/ql/processors/LlapCacheResourceProcessor.java ql/src/java/org/apache/hadoop/hive/ql/processors/LlapCacheResourceProcessor.java index b11014cb7c..bb0c1cbd88 100644 --- ql/src/java/org/apache/hadoop/hive/ql/processors/LlapCacheResourceProcessor.java +++ ql/src/java/org/apache/hadoop/hive/ql/processors/LlapCacheResourceProcessor.java @@ -61,7 +61,7 @@ import com.google.common.collect.Lists; public class LlapCacheResourceProcessor implements CommandProcessor { - public static final Logger LOG = LoggerFactory.getLogger(LlapCacheResourceProcessor.class); + private static final Logger LOG = LoggerFactory.getLogger(LlapCacheResourceProcessor.class); private Options CACHE_OPTIONS = new Options(); private HelpFormatter helpFormatter = new HelpFormatter(); @@ -69,28 +69,24 @@ CACHE_OPTIONS.addOption("purge", "purge", false, "Purge LLAP IO cache"); } - private CommandProcessorResponse returnErrorResponse(final String errmsg) { - return new CommandProcessorResponse(1, "LLAP Cache Processor Helper Failed:" + errmsg, null); - } - @Override - public CommandProcessorResponse run(String command) { + public CommandProcessorResponse run(String command) throws CommandProcessorException { SessionState ss = SessionState.get(); command = new VariableSubstitution(() -> SessionState.get().getHiveVariables()).substitute(ss.getConf(), command); String[] tokens = command.split("\\s+"); if (tokens.length < 1) { - return returnErrorResponse("Command arguments are empty."); + throw new CommandProcessorException("LLAP Cache Processor Helper Failed: Command arguments are empty."); } String params[] = Arrays.copyOfRange(tokens, 1, tokens.length); try { return llapCacheCommandHandler(ss, params); } catch (Exception e) { - return returnErrorResponse(e.getMessage()); + throw new CommandProcessorException("LLAP Cache Processor Helper Failed: " + e.getMessage()); } } - private CommandProcessorResponse llapCacheCommandHandler(final SessionState ss, - final String[] params) throws ParseException { + private CommandProcessorResponse llapCacheCommandHandler(SessionState ss, String[] params) + throws ParseException, CommandProcessorException { CommandLine args = parseCommandArgs(CACHE_OPTIONS, params); boolean purge = args.hasOption("purge"); String hs2Host = null; @@ -109,21 +105,19 @@ private CommandProcessorResponse llapCacheCommandHandler(final SessionState ss, try { LlapRegistryService llapRegistryService = LlapRegistryService.getClient(ss.getConf()); llapCachePurge(ss, llapRegistryService); - return createProcessorSuccessResponse(); + return new CommandProcessorResponse(getSchema(), null); } catch (Exception e) { LOG.error("Error while purging LLAP IO Cache. err: ", e); - return returnErrorResponse("Error while purging LLAP IO Cache. err: " + e.getMessage()); + throw new CommandProcessorException( + "LLAP Cache Processor Helper Failed: Error while purging LLAP IO Cache. err: " + e.getMessage()); } } else { String usage = getUsageAsString(); - return returnErrorResponse("Unsupported sub-command option. " + usage); + throw new CommandProcessorException( + "LLAP Cache Processor Helper Failed: Unsupported sub-command option. " + usage); } } - private CommandProcessorResponse createProcessorSuccessResponse() { - return new CommandProcessorResponse(0, null, null, getSchema()); - } - private Schema getSchema() { Schema sch = new Schema(); sch.addToFieldSchemas(new FieldSchema("hostName", "string", "")); diff --git ql/src/java/org/apache/hadoop/hive/ql/processors/LlapClusterResourceProcessor.java ql/src/java/org/apache/hadoop/hive/ql/processors/LlapClusterResourceProcessor.java index 64a5b10f88..c5dd688a46 100644 --- ql/src/java/org/apache/hadoop/hive/ql/processors/LlapClusterResourceProcessor.java +++ ql/src/java/org/apache/hadoop/hive/ql/processors/LlapClusterResourceProcessor.java @@ -54,29 +54,25 @@ CLUSTER_OPTIONS.addOption("info", "info", false, "Information about LLAP cluster"); } - private CommandProcessorResponse returnErrorResponse(final String errmsg) { - return new CommandProcessorResponse(1, "LLAP Cluster Processor Helper Failed:" + errmsg, null); - } - @Override - public CommandProcessorResponse run(String command) { + public CommandProcessorResponse run(String command) throws CommandProcessorException { SessionState ss = SessionState.get(); command = new VariableSubstitution(() -> SessionState.get().getHiveVariables()).substitute(ss.getConf(), command); String[] tokens = command.split("\\s+"); if (tokens.length < 1) { - return returnErrorResponse("Command arguments are empty."); + throw new CommandProcessorException("LLAP Cluster Processor Helper Failed: Command arguments are empty."); } String params[] = Arrays.copyOfRange(tokens, 1, tokens.length); try { return llapClusterCommandHandler(ss, params); } catch (Exception e) { - return returnErrorResponse(e.getMessage()); + throw new CommandProcessorException("LLAP Cluster Processor Helper Failed: " + e.getMessage()); } } - private CommandProcessorResponse llapClusterCommandHandler(final SessionState ss, - final String[] params) throws ParseException { + private CommandProcessorResponse llapClusterCommandHandler(SessionState ss, String[] params) + throws ParseException, CommandProcessorException { CommandLine args = parseCommandArgs(CLUSTER_OPTIONS, params); String hs2Host = null; if (ss.isHiveServerQuery()) { @@ -102,21 +98,19 @@ private CommandProcessorResponse llapClusterCommandHandler(final SessionState ss instance.getRpcPort(), instance.getResource().getMemory() * 1024L * 1024L, instance.getResource().getVirtualCores())); } - return createProcessorSuccessResponse(); + return new CommandProcessorResponse(getSchema(), null); } catch (Exception e) { LOG.error("Unable to list LLAP instances. err: ", e); - return returnErrorResponse("Unable to list LLAP instances. err: " + e.getMessage()); + throw new CommandProcessorException( + "LLAP Cluster Processor Helper Failed: Unable to list LLAP instances. err: " + e.getMessage()); } } else { String usage = getUsageAsString(); - return returnErrorResponse("Unsupported sub-command option. " + usage); + throw new CommandProcessorException( + "LLAP Cluster Processor Helper Failed: Unsupported sub-command option. " + usage); } } - private CommandProcessorResponse createProcessorSuccessResponse() { - return new CommandProcessorResponse(0, null, null, getSchema()); - } - private Schema getSchema() { Schema sch = new Schema(); sch.addToFieldSchemas(new FieldSchema("applicationId", "string", "")); diff --git ql/src/java/org/apache/hadoop/hive/ql/processors/ReloadProcessor.java ql/src/java/org/apache/hadoop/hive/ql/processors/ReloadProcessor.java index bcbc03083e..e9b1296274 100644 --- ql/src/java/org/apache/hadoop/hive/ql/processors/ReloadProcessor.java +++ ql/src/java/org/apache/hadoop/hive/ql/processors/ReloadProcessor.java @@ -31,15 +31,15 @@ private static final Logger LOG = LoggerFactory.getLogger(ReloadProcessor.class); @Override - public CommandProcessorResponse run(String command) { + public CommandProcessorResponse run(String command) throws CommandProcessorException { SessionState ss = SessionState.get(); try { ss.loadReloadableAuxJars(); } catch (IOException e) { LOG.error("fail to reload auxiliary jar files", e); - return CommandProcessorResponse.create(e); + throw new CommandProcessorException(e.getMessage(), e); } - return new CommandProcessorResponse(0); + return new CommandProcessorResponse(); } @Override diff --git ql/src/java/org/apache/hadoop/hive/ql/processors/ResetProcessor.java ql/src/java/org/apache/hadoop/hive/ql/processors/ResetProcessor.java index ce65ccd4bc..150362417c 100644 --- ql/src/java/org/apache/hadoop/hive/ql/processors/ResetProcessor.java +++ ql/src/java/org/apache/hadoop/hive/ql/processors/ResetProcessor.java @@ -24,7 +24,6 @@ import java.util.Map; import com.google.common.annotations.VisibleForTesting; -import com.google.common.collect.Lists; import org.apache.commons.lang3.StringUtils; @@ -36,18 +35,17 @@ import org.apache.hadoop.hive.ql.security.authorization.plugin.HiveOperationType; import org.apache.hadoop.hive.ql.session.SessionState; - public class ResetProcessor implements CommandProcessor { private final static String DEFAULT_ARG = "-d"; @Override - public CommandProcessorResponse run(String command) { + public CommandProcessorResponse run(String command) throws CommandProcessorException { return run(SessionState.get(), command); } @VisibleForTesting - CommandProcessorResponse run(SessionState ss, String command) { + CommandProcessorResponse run(SessionState ss, String command) throws CommandProcessorException { CommandProcessorResponse authErrResp = CommandUtil.authorizeCommand(ss, HiveOperationType.RESET, Arrays.asList(command)); if (authErrResp != null) { @@ -57,7 +55,7 @@ CommandProcessorResponse run(SessionState ss, String command) { command = command.trim(); if (StringUtils.isBlank(command)) { resetOverridesOnly(ss); - return new CommandProcessorResponse(0); + return new CommandProcessorResponse(); } String[] parts = command.split("\\s+"); boolean isDefault = false; @@ -73,22 +71,22 @@ CommandProcessorResponse run(SessionState ss, String command) { } } if (varnames.isEmpty()) { - return new CommandProcessorResponse(1, "No variable names specified", "42000"); + throw new CommandProcessorException(1, -1, "No variable names specified", "42000", null); } - String message = ""; + String variableNames = ""; for (String varname : varnames) { if (isDefault) { - if (!message.isEmpty()) { - message += ", "; + if (!variableNames.isEmpty()) { + variableNames += ", "; } - message += varname; + variableNames += varname; resetToDefault(ss, varname); } else { resetOverrideOnly(ss, varname); } } - return new CommandProcessorResponse(0, isDefault - ? Lists.newArrayList("Resetting " + message + " to default values") : null); + String message = isDefault ? "Resetting " + variableNames + " to default values" : null; + return new CommandProcessorResponse(null, message); } private static void resetOverridesOnly(SessionState ss) { @@ -117,7 +115,8 @@ private static void setSessionVariableFromConf(SessionState ss, String varname, } } - private static CommandProcessorResponse resetToDefault(SessionState ss, String varname) { + private static CommandProcessorResponse resetToDefault(SessionState ss, String varname) + throws CommandProcessorException { varname = varname.trim(); try { String nonErrorMessage = null; @@ -141,11 +140,10 @@ private static CommandProcessorResponse resetToDefault(SessionState ss, String v SessionState.get().updateHistory(Boolean.parseBoolean(defaultVal), ss); } } - return nonErrorMessage == null ? new CommandProcessorResponse(0) - : new CommandProcessorResponse(0, Lists.newArrayList(nonErrorMessage)); + return new CommandProcessorResponse(null, nonErrorMessage); } catch (Exception e) { - return new CommandProcessorResponse(1, e.getMessage(), "42000", - e instanceof IllegalArgumentException ? null : e); + Throwable exception = e instanceof IllegalArgumentException ? null : e; + throw new CommandProcessorException(1, -1, e.getMessage(), "42000", exception); } } diff --git ql/src/java/org/apache/hadoop/hive/ql/processors/SetProcessor.java ql/src/java/org/apache/hadoop/hive/ql/processors/SetProcessor.java index 4ede85364e..82d275ecdf 100644 --- ql/src/java/org/apache/hadoop/hive/ql/processors/SetProcessor.java +++ ql/src/java/org/apache/hadoop/hive/ql/processors/SetProcessor.java @@ -44,7 +44,6 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import com.google.common.collect.Lists; import com.google.common.collect.Sets; /** @@ -159,17 +158,16 @@ private void dumpOption(String s) { } } - public CommandProcessorResponse executeSetVariable(String varname, String varvalue) { + public CommandProcessorResponse executeSetVariable(String varname, String varvalue) throws CommandProcessorException { try { return setVariable(varname, varvalue); } catch (Exception e) { - return new CommandProcessorResponse(1, e.getMessage(), "42000", - e instanceof IllegalArgumentException ? null : e); + Throwable exception = e instanceof IllegalArgumentException ? null : e; + throw new CommandProcessorException(1, -1, e.getMessage(), "42000", exception); } } - public static CommandProcessorResponse setVariable( - String varname, String varvalue) throws Exception { + public static CommandProcessorResponse setVariable(String varname, String varvalue) throws Exception { SessionState ss = SessionState.get(); if (varvalue.contains("\n")){ ss.err.println("Warning: Value had a \\n character in it."); @@ -178,7 +176,7 @@ public static CommandProcessorResponse setVariable( String nonErrorMessage = null; if (varname.startsWith(ENV_PREFIX)){ ss.err.println("env:* variables can not be set."); - return new CommandProcessorResponse(1); // Should we propagate the error message properly? + throw new CommandProcessorException(1); // Should we propagate the error message properly? } else if (varname.startsWith(SYSTEM_PREFIX)){ String propName = varname.substring(SYSTEM_PREFIX.length()); System.getProperties() @@ -214,8 +212,7 @@ public static CommandProcessorResponse setVariable( SessionState.get().updateHistory(Boolean.parseBoolean(varvalue), ss); } } - return nonErrorMessage == null ? new CommandProcessorResponse(0) - : new CommandProcessorResponse(0, Lists.newArrayList(nonErrorMessage)); + return new CommandProcessorResponse(null, nonErrorMessage); } static String setConf(String varname, String key, String varvalue, boolean register) @@ -291,7 +288,7 @@ private CommandProcessorResponse getVariable(String varname) throws Exception { SessionState ss = SessionState.get(); if (varname.equals("silent")){ ss.out.println("silent" + "=" + ss.getIsSilent()); - return createProcessorSuccessResponse(); + return new CommandProcessorResponse(getSchema(), null); } if (varname.startsWith(SYSTEM_PREFIX)) { String propName = varname.substring(SYSTEM_PREFIX.length()); @@ -302,10 +299,10 @@ private CommandProcessorResponse getVariable(String varname) throws Exception { } else { ss.out.println(SYSTEM_PREFIX + propName + "=" + result); } - return createProcessorSuccessResponse(); + return new CommandProcessorResponse(getSchema(), null); } else { ss.out.println(propName + " is undefined as a system property"); - return new CommandProcessorResponse(1); + throw new CommandProcessorException(1); } } else if (varname.indexOf(ENV_PREFIX) == 0) { String var = varname.substring(ENV_PREFIX.length()); @@ -315,31 +312,31 @@ private CommandProcessorResponse getVariable(String varname) throws Exception { } else { ss.out.println(ENV_PREFIX + var + "=" + System.getenv(var)); } - return createProcessorSuccessResponse(); + return new CommandProcessorResponse(getSchema(), null); } else { ss.out.println(varname + " is undefined as an environmental variable"); - return new CommandProcessorResponse(1); + throw new CommandProcessorException(1); } } else if (varname.indexOf(HIVECONF_PREFIX) == 0) { String var = varname.substring(HIVECONF_PREFIX.length()); if (ss.getConf().isHiddenConfig(var)) { ss.out.println(HIVECONF_PREFIX + var + " is a hidden config"); - return createProcessorSuccessResponse(); + return new CommandProcessorResponse(getSchema(), null); } if (ss.getConf().get(var) != null) { ss.out.println(HIVECONF_PREFIX + var + "=" + ss.getConf().get(var)); - return createProcessorSuccessResponse(); + return new CommandProcessorResponse(getSchema(), null); } else { ss.out.println(varname + " is undefined as a hive configuration variable"); - return new CommandProcessorResponse(1); + throw new CommandProcessorException(1); } } else if (varname.indexOf(HIVEVAR_PREFIX) == 0) { String var = varname.substring(HIVEVAR_PREFIX.length()); if (ss.getHiveVariables().get(var) != null) { ss.out.println(HIVEVAR_PREFIX + var + "=" + ss.getHiveVariables().get(var)); - return createProcessorSuccessResponse(); + return new CommandProcessorResponse(getSchema(), null); } else { ss.out.println(varname + " is undefined as a hive variable"); - return new CommandProcessorResponse(1); + throw new CommandProcessorException(1); } } else if (varname.indexOf(METACONF_PREFIX) == 0) { String var = varname.substring(METACONF_PREFIX.length()); @@ -347,29 +344,25 @@ private CommandProcessorResponse getVariable(String varname) throws Exception { String value = hive.getMetaConf(var); if (value != null) { ss.out.println(METACONF_PREFIX + var + "=" + value); - return createProcessorSuccessResponse(); + return new CommandProcessorResponse(getSchema(), null); } else { ss.out.println(varname + " is undefined as a hive meta variable"); - return new CommandProcessorResponse(1); + throw new CommandProcessorException(1); } } else { dumpOption(varname); - return createProcessorSuccessResponse(); + return new CommandProcessorResponse(getSchema(), null); } } - private CommandProcessorResponse createProcessorSuccessResponse() { - return new CommandProcessorResponse(0, null, null, getSchema()); - } - @Override - public CommandProcessorResponse run(String command) { + public CommandProcessorResponse run(String command) throws CommandProcessorException { SessionState ss = SessionState.get(); String nwcmd = command.trim(); if (nwcmd.equals("")) { dumpOptions(ss.getConf().getChangedProperties()); - return createProcessorSuccessResponse(); + return new CommandProcessorResponse(getSchema(), null); } if (nwcmd.equals("-v")) { @@ -383,13 +376,13 @@ public CommandProcessorResponse run(String command) { (Configuration) clazz.getConstructor(Configuration.class).newInstance(ss.getConf()); properties = HiveConf.getProperties(tezConf); } catch (Exception e) { - return new CommandProcessorResponse(1, e.getMessage(), "42000", e); + throw new CommandProcessorException(1, -1, e.getMessage(), "42000", e); } } else { properties = ss.getConf().getAllProperties(); } dumpOptions(properties); - return createProcessorSuccessResponse(); + return new CommandProcessorResponse(getSchema(), null); } // Special handling for time-zone @@ -411,14 +404,14 @@ public CommandProcessorResponse run(String command) { } if (part[0].equals("silent")) { ss.setIsSilent(getBoolean(part[1])); - return new CommandProcessorResponse(0); + return new CommandProcessorResponse(); } return executeSetVariable(part[0],part[1]); } try { return getVariable(nwcmd); } catch (Exception e) { - return new CommandProcessorResponse(1, e.getMessage(), "42000", e); + throw new CommandProcessorException(1, -1, e.getMessage(), "42000", e); } } diff --git ql/src/java/org/apache/hadoop/hive/ql/reexec/ReExecDriver.java ql/src/java/org/apache/hadoop/hive/ql/reexec/ReExecDriver.java index e8bf9dcd15..dea46c022c 100644 --- ql/src/java/org/apache/hadoop/hive/ql/reexec/ReExecDriver.java +++ ql/src/java/org/apache/hadoop/hive/ql/reexec/ReExecDriver.java @@ -42,6 +42,7 @@ import org.apache.hadoop.hive.ql.parse.SemanticException; import org.apache.hadoop.hive.ql.plan.mapper.PlanMapper; import org.apache.hadoop.hive.ql.plan.mapper.StatsSource; +import org.apache.hadoop.hive.ql.processors.CommandProcessorException; import org.apache.hadoop.hive.ql.processors.CommandProcessorResponse; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -119,7 +120,7 @@ public int compile(String command, boolean resetTaskIds) { } @Override - public CommandProcessorResponse compileAndRespond(String statement) { + public CommandProcessorResponse compileAndRespond(String statement) throws CommandProcessorException { currentQuery = statement; return coreDriver.compileAndRespond(statement); } @@ -145,7 +146,7 @@ public void setOperationId(String guid64) { } @Override - public CommandProcessorResponse run() { + public CommandProcessorResponse run() throws CommandProcessorException { executionIndex = 0; int maxExecutuions = 1 + coreDriver.getConf().getIntVar(ConfVars.HIVE_QUERY_MAX_REEXECUTION_COUNT); @@ -157,24 +158,35 @@ public CommandProcessorResponse run() { } coreDriver.getContext().setExecutionIndex(executionIndex); LOG.info("Execution #{} of query", executionIndex); - CommandProcessorResponse cpr = coreDriver.run(); + CommandProcessorResponse cpr = null; + CommandProcessorException cpe = null; + try { + cpr = coreDriver.run(); + } catch (CommandProcessorException e) { + cpe = e; + } PlanMapper oldPlanMapper = coreDriver.getPlanMapper(); - afterExecute(oldPlanMapper, cpr.getResponseCode() == 0); + afterExecute(oldPlanMapper, cpr != null); boolean shouldReExecute = explainReOptimization && executionIndex==1; - shouldReExecute |= cpr.getResponseCode() != 0 && shouldReExecute(); + shouldReExecute |= cpr == null && shouldReExecute(); if (executionIndex >= maxExecutuions || !shouldReExecute) { - return cpr; + if (cpr != null) { + return cpr; + } else { + throw cpe; + } } LOG.info("Preparing to re-execute query"); prepareToReExecute(); - CommandProcessorResponse compile_resp = coreDriver.compileAndRespond(currentQuery); - if (compile_resp.failed()) { + try { + coreDriver.compileAndRespond(currentQuery); + } catch (CommandProcessorException e) { LOG.error("Recompilation of the query failed; this is unexpected."); // FIXME: somehow place pointers that re-execution compilation have failed; the query have been successfully compiled before? - return compile_resp; + throw e; } PlanMapper newPlanMapper = coreDriver.getPlanMapper(); @@ -213,11 +225,8 @@ private boolean shouldReExecute() { } @Override - public CommandProcessorResponse run(String command) { - CommandProcessorResponse r0 = compileAndRespond(command); - if (r0.getResponseCode() != 0) { - return r0; - } + public CommandProcessorResponse run(String command) throws CommandProcessorException { + compileAndRespond(command); return run(); } diff --git ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/Worker.java ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/Worker.java index 4fccfff492..052f948be8 100644 --- ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/Worker.java +++ ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/Worker.java @@ -21,7 +21,6 @@ import org.apache.hadoop.hive.common.JavaUtils; import org.apache.hadoop.hive.common.ValidCompactorWriteIdList; import org.apache.hadoop.hive.common.ValidTxnList; -import org.apache.hadoop.hive.common.ValidWriteIdList; import org.apache.hadoop.hive.metastore.HiveMetaStoreUtils; import org.apache.hadoop.hive.metastore.IMetaStoreClient; import org.apache.hadoop.hive.metastore.MetaStoreThread; @@ -41,7 +40,7 @@ import org.apache.hadoop.hive.metastore.txn.TxnUtils; import org.apache.hadoop.hive.ql.Driver; import org.apache.hadoop.hive.ql.QueryState; -import org.apache.hadoop.hive.ql.processors.CommandProcessorResponse; +import org.apache.hadoop.hive.ql.processors.CommandProcessorException; import org.apache.hadoop.hive.ql.session.SessionState; import org.apache.hadoop.hive.ql.stats.StatsUtils; import org.apache.hadoop.security.UserGroupInformation; @@ -65,7 +64,6 @@ static final private String CLASS_NAME = Worker.class.getName(); static final private Logger LOG = LoggerFactory.getLogger(CLASS_NAME); static final private long SLEEP_TIME = 10000; - static final private int baseThreadNum = 10002; private String workerName; private JobConf mrJob; // the MR job for compaction @@ -342,16 +340,17 @@ void gatherStats() { conf.setVar(HiveConf.ConfVars.METASTOREURIS,""); //todo: use DriverUtils.runOnDriver() here - Driver d = new Driver(new QueryState.Builder().withGenerateNewQueryId(true).withHiveConf(conf).build(), userName); + QueryState queryState = new QueryState.Builder().withGenerateNewQueryId(true).withHiveConf(conf).build(); SessionState localSession = null; - try { + try (Driver d = new Driver(queryState, userName)) { if (SessionState.get() == null) { localSession = new SessionState(conf); SessionState.start(localSession); } - CommandProcessorResponse cpr = d.run(sb.toString()); - if (cpr.getResponseCode() != 0) { - LOG.warn(ci + ": " + sb.toString() + " failed due to: " + cpr); + try { + d.run(sb.toString()); + } catch (CommandProcessorException e) { + LOG.warn(ci + ": " + sb.toString() + " failed due to: " + e); } } finally { if (localSession != null) { diff --git ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDTFGetSplits.java ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDTFGetSplits.java index b6974fa420..48f476e152 100644 --- ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDTFGetSplits.java +++ ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDTFGetSplits.java @@ -80,7 +80,7 @@ import org.apache.hadoop.hive.ql.plan.MapWork; import org.apache.hadoop.hive.ql.plan.PlanUtils; import org.apache.hadoop.hive.ql.plan.TezWork; -import org.apache.hadoop.hive.ql.processors.CommandProcessorResponse; +import org.apache.hadoop.hive.ql.processors.CommandProcessorException; import org.apache.hadoop.hive.ql.session.SessionState; import org.apache.hadoop.hive.ql.udf.UDFType; import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector; @@ -94,7 +94,6 @@ import org.apache.hadoop.mapred.InputSplit; import org.apache.hadoop.mapred.JobConf; import org.apache.hadoop.mapred.SplitLocationInfo; -import org.apache.hadoop.registry.client.binding.RegistryUtils; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.token.Token; import org.apache.hadoop.yarn.api.records.ApplicationId; @@ -311,9 +310,10 @@ private PlanFragment createPlanFragment(String query, ApplicationId splitsAppId) DriverCleanup driverCleanup = new DriverCleanup(driver, txnManager, splitsAppId.toString()); boolean needsCleanup = true; try { - CommandProcessorResponse cpr = driver.compileAndRespond(query, false); - if (cpr.getResponseCode() != 0) { - throw new HiveException("Failed to compile query: " + cpr.getException()); + try { + driver.compileAndRespond(query, false); + } catch (CommandProcessorException e) { + throw new HiveException("Failed to compile query: " + e.getException()); } QueryPlan plan = driver.getPlan(); @@ -345,17 +345,18 @@ private PlanFragment createPlanFragment(String query, ApplicationId splitsAppId) driver.releaseLocksAndCommitOrRollback(false); driver.releaseResources(); HiveConf.setVar(conf, ConfVars.HIVE_EXECUTION_MODE, originalMode); - cpr = driver.run(ctas, false); - - if(cpr.getResponseCode() != 0) { - throw new HiveException("Failed to create temp table: " + cpr.getException()); + try { + driver.run(ctas, false); + } catch (CommandProcessorException e) { + throw new HiveException("Failed to create temp table: " + e.getException()); } HiveConf.setVar(conf, ConfVars.HIVE_EXECUTION_MODE, "llap"); query = "select * from " + tableName; - cpr = driver.compileAndRespond(query, true); - if(cpr.getResponseCode() != 0) { - throw new HiveException("Failed to create temp table: "+cpr.getException()); + try { + driver.compileAndRespond(query, true); + } catch (CommandProcessorException e) { + throw new HiveException("Failed to create temp table: " + e.getException()); } plan = driver.getPlan(); @@ -373,7 +374,7 @@ private PlanFragment createPlanFragment(String query, ApplicationId splitsAppId) // The read will have READ_COMMITTED level semantics. try { driver.lockAndRespond(); - } catch (CommandProcessorResponse cpr1) { + } catch (CommandProcessorException cpr1) { throw new HiveException("Failed to acquire locks", cpr1); } diff --git ql/src/test/org/apache/hadoop/hive/ql/TestCompileLock.java ql/src/test/org/apache/hadoop/hive/ql/TestCompileLock.java index a9917cf109..5921044fea 100644 --- ql/src/test/org/apache/hadoop/hive/ql/TestCompileLock.java +++ ql/src/test/org/apache/hadoop/hive/ql/TestCompileLock.java @@ -35,6 +35,7 @@ import org.apache.hadoop.hive.common.metrics.common.MetricsFactory; import org.apache.hadoop.hive.common.metrics.metrics2.CodahaleMetrics; import org.apache.hadoop.hive.conf.HiveConf; +import org.apache.hadoop.hive.ql.processors.CommandProcessorException; import org.apache.hadoop.hive.ql.processors.CommandProcessorResponse; import org.apache.hadoop.hive.ql.session.SessionState; @@ -106,7 +107,7 @@ public void testSerializableCompilation() throws Exception { conf.setBoolVar(HIVE_SERVER2_PARALLEL_COMPILATION, false); initDriver(conf, 10); - List responseList = compileAndRespond(10); + List responseList = compileAndRespond(10); verifyThatWaitingCompileOpsCountIsEqualTo(0); verifyThatTimedOutCompileOpsCountIsZero(responseList); @@ -120,7 +121,7 @@ public void testParallelCompilationWithSingleQuota() throws Exception { conf.setIntVar(HIVE_SERVER2_PARALLEL_COMPILATION_LIMIT, 1); initDriver(conf, 10); - List responseList = compileAndRespond(10); + List responseList = compileAndRespond(10); verifyThatWaitingCompileOpsCountIsEqualTo(0); verifyThatTimedOutCompileOpsCountIsZero(responseList); @@ -134,7 +135,7 @@ public void testParallelCompilationWithUnboundedQuota() throws Exception { conf.setIntVar(HIVE_SERVER2_PARALLEL_COMPILATION_LIMIT, -1); initDriver(conf, 10); - List responseList = compileAndRespond(10); + List responseList = compileAndRespond(10); verifyThatWaitingCompileOpsCountIsEqualTo(0); verifyThatTimedOutCompileOpsCountIsZero(responseList); @@ -148,7 +149,7 @@ public void testParallelCompilationWithUnboundedQuotaAndSingleSession() throws E conf.setIntVar(HIVE_SERVER2_PARALLEL_COMPILATION_LIMIT, -1); initDriver(conf, 10); - List responseList = compileAndRespond(true, 10); + List responseList = compileAndRespond(true, 10); verifyThatWaitingCompileOpsCountIsEqualTo(0); verifyThatTimedOutCompileOpsCountIsZero(responseList); @@ -163,7 +164,7 @@ public void testParallelCompilationTimeoutWithSingleQuota() throws Exception { conf.setTimeVar(HIVE_SERVER2_COMPILE_LOCK_TIMEOUT, 1, TimeUnit.SECONDS); initDriver(conf, 10); - List responseList = compileAndRespond(10); + List responseList = compileAndRespond(10); verifyThatWaitingCompileOpsCountIsEqualTo(0); verifyThatTimedOutCompileOpsCountIsNotZero(responseList); @@ -180,7 +181,7 @@ public void testParallelCompilationTimeoutWithMultipleQuota() throws Exception { conf.setTimeVar(HIVE_SERVER2_COMPILE_LOCK_TIMEOUT, 1, TimeUnit.SECONDS); initDriver(conf, 10); - List responseList = compileAndRespond(LONG_QUERY, 10); + List responseList = compileAndRespond(LONG_QUERY, 10); verifyThatWaitingCompileOpsCountIsEqualTo(0); verifyThatTimedOutCompileOpsCount(responseList, 6); @@ -193,7 +194,7 @@ public void testParallelCompilationWithSingleQuotaAndZeroTimeout() throws Except conf.setTimeVar(HIVE_SERVER2_COMPILE_LOCK_TIMEOUT, 0, TimeUnit.SECONDS); initDriver(conf, 10); - List responseList = compileAndRespond(10); + List responseList = compileAndRespond(10); verifyThatWaitingCompileOpsCountIsEqualTo(0); verifyThatTimedOutCompileOpsCountIsZero(responseList); @@ -207,7 +208,7 @@ public void testParallelCompilationWithMultipleQuotas() throws Exception { conf.setIntVar(HIVE_SERVER2_PARALLEL_COMPILATION_LIMIT, 2); initDriver(conf, 10); - List responseList = compileAndRespond(10); + List responseList = compileAndRespond(10); verifyThatWaitingCompileOpsCountIsEqualTo(0); verifyThatTimedOutCompileOpsCountIsZero(responseList); @@ -221,17 +222,17 @@ public void testParallelCompilationWithMultipleQuotasAndClientSessionConcurrency conf.setIntVar(HIVE_SERVER2_PARALLEL_COMPILATION_LIMIT, 2); initDriver(conf, 10); - List responseList = new ArrayList<>(); + List responseList = new ArrayList<>(); - List>> callables = new ArrayList<>(); + List>> callables = new ArrayList<>(); for (int i = 0; i < 5; i++) { callables.add(() -> compileAndRespond(true, 2)); } ExecutorService pool = Executors.newFixedThreadPool(callables.size()); try { - List>> futures = pool.invokeAll(callables); - for (Future> future : futures) { + List>> futures = pool.invokeAll(callables); + for (Future> future : futures) { responseList.addAll(future.get()); } } finally { @@ -244,22 +245,22 @@ public void testParallelCompilationWithMultipleQuotasAndClientSessionConcurrency verifyThatConcurrentCompilationWasIndeed(responseList); } - private List compileAndRespond(int threadCount) throws Exception { + private List compileAndRespond(int threadCount) throws Exception { return compileAndRespond(SHORT_QUERY, false, threadCount); } - private List compileAndRespond(boolean reuseSession, int threadCount) throws Exception { + private List compileAndRespond(boolean reuseSession, int threadCount) throws Exception { return compileAndRespond(SHORT_QUERY, reuseSession, threadCount); } - private List compileAndRespond(String query, int threadCount) throws Exception { + private List compileAndRespond(String query, int threadCount) throws Exception { return compileAndRespond(query, false, threadCount); } - private List compileAndRespond(String query, boolean reuseSession, int threadCount) + private List compileAndRespond(String query, boolean reuseSession, int threadCount) throws Exception { - List responseList = new ArrayList<>(); + List responseList = new ArrayList<>(); SessionState sessionState = new SessionState(conf); List> callables = new ArrayList<>(); @@ -285,13 +286,12 @@ public void testParallelCompilationWithMultipleQuotasAndClientSessionConcurrency for (Future future : futures) { try { - responseList.add(future.get()); + future.get(); + responseList.add(0); } catch (ExecutionException ex) { - responseList.add( - (ex.getCause() instanceof CommandProcessorResponse) ? - new CommandProcessorResponse(ErrorMsg.COMPILE_LOCK_TIMED_OUT.getErrorCode()) : - new CommandProcessorResponse(CONCURRENT_COMPILATION)); + responseList.add(ex.getCause() instanceof CommandProcessorException ? + ErrorMsg.COMPILE_LOCK_TIMED_OUT.getErrorCode() : CONCURRENT_COMPILATION); } } } finally { @@ -317,35 +317,31 @@ private void resetParallelCompilationLimit(HiveConf conf) throws Exception { return Enum.valueOf((Class) type, name); } - private void verifyThatTimedOutCompileOpsCountIsZero(List responseList) { - verifyErrorCount(ErrorMsg.COMPILE_LOCK_TIMED_OUT.getErrorCode(), - is(equalTo(0)), responseList); + private void verifyThatTimedOutCompileOpsCountIsZero(List responseList) { + verifyErrorCount(ErrorMsg.COMPILE_LOCK_TIMED_OUT.getErrorCode(), is(equalTo(0)), responseList); } - private void verifyThatTimedOutCompileOpsCountIsNotZero(List responseList) { - verifyErrorCount(ErrorMsg.COMPILE_LOCK_TIMED_OUT.getErrorCode(), - is(not(equalTo(0))), responseList); + private void verifyThatTimedOutCompileOpsCountIsNotZero(List responseList) { + verifyErrorCount(ErrorMsg.COMPILE_LOCK_TIMED_OUT.getErrorCode(), is(not(equalTo(0))), responseList); } - private void verifyThatTimedOutCompileOpsCount(List responseList, int count) { - verifyErrorCount(ErrorMsg.COMPILE_LOCK_TIMED_OUT.getErrorCode(), - is(equalTo(count)), responseList); + private void verifyThatTimedOutCompileOpsCount(List responseList, int count) { + verifyErrorCount(ErrorMsg.COMPILE_LOCK_TIMED_OUT.getErrorCode(), is(equalTo(count)), responseList); } - private void verifyThatConcurrentCompilationWasIndeed(List responseList){ - verifyErrorCount(CONCURRENT_COMPILATION, - is(not(equalTo(0))), responseList); + private void verifyThatConcurrentCompilationWasIndeed(List responseList){ + verifyErrorCount(CONCURRENT_COMPILATION, is(not(equalTo(0))), responseList); } - private void verifyThatNoConcurrentCompilationWasIndeed(List responseList){ - verifyErrorCount(CONCURRENT_COMPILATION, - is(equalTo(0)), responseList); + private void verifyThatNoConcurrentCompilationWasIndeed(List responseList){ + verifyErrorCount(CONCURRENT_COMPILATION, is(equalTo(0)), responseList); } - private void verifyErrorCount(int code, Matcher matcher, List responseList) { + + private void verifyErrorCount(int code, Matcher matcher, List responseList) { int count = 0; - for(CommandProcessorResponse response : responseList){ - if(code == response.getResponseCode()){ + for(Integer response : responseList){ + if (code == response){ count++; } } diff --git ql/src/test/org/apache/hadoop/hive/ql/TestTxnAddPartition.java ql/src/test/org/apache/hadoop/hive/ql/TestTxnAddPartition.java index e6bc11e6aa..0edc9120c0 100644 --- ql/src/test/org/apache/hadoop/hive/ql/TestTxnAddPartition.java +++ ql/src/test/org/apache/hadoop/hive/ql/TestTxnAddPartition.java @@ -18,13 +18,11 @@ package org.apache.hadoop.hive.ql; -import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hive.conf.HiveConf; -import org.apache.hadoop.hive.ql.io.AcidUtils; import org.apache.hadoop.hive.ql.lockmgr.TestDbTxnManager2; -import org.apache.hadoop.hive.ql.processors.CommandProcessorResponse; +import org.apache.hadoop.hive.ql.processors.CommandProcessorException; import org.junit.Assert; import org.junit.Ignore; import org.junit.Rule; @@ -116,10 +114,10 @@ private void addPartition(boolean isVectorized) throws Exception { runStatementOnDriver("export table Tstage to '" + getWarehouseDir() + "/3'"); //should be an error since p=3 exists - CommandProcessorResponse cpr = runStatementOnDriverNegative( - "ALTER TABLE T ADD PARTITION (p=0) location '" + getWarehouseDir() + "/3/data'"); - Assert.assertTrue("add existing partition", cpr.getErrorMessage() != null - && cpr.getErrorMessage().contains("Partition already exists")); + CommandProcessorException e = + runStatementOnDriverNegative("ALTER TABLE T ADD PARTITION (p=0) location '" + getWarehouseDir() + "/3/data'"); + Assert.assertTrue("add existing partition", + e.getErrorMessage() != null && e.getErrorMessage().contains("Partition already exists")); //should be no-op since p=3 exists String stmt = "ALTER TABLE T ADD IF NOT EXISTS " + @@ -190,10 +188,10 @@ private void addPartitionMM(boolean isVectorized) throws Exception { runStatementOnDriver("export table Tstage to '" + getWarehouseDir() + "/3'"); //should be an error since p=3 exists - CommandProcessorResponse cpr = runStatementOnDriverNegative( - "ALTER TABLE T ADD PARTITION (p=0) location '" + getWarehouseDir() + "/3/data'"); - Assert.assertTrue("add existing partition", cpr.getErrorMessage() != null - && cpr.getErrorMessage().contains("Partition already exists")); + CommandProcessorException e = + runStatementOnDriverNegative("ALTER TABLE T ADD PARTITION (p=0) location '" + getWarehouseDir() + "/3/data'"); + Assert.assertTrue("add existing partition", + e.getErrorMessage() != null && e.getErrorMessage().contains("Partition already exists")); //should be no-op since p=3 exists runStatementOnDriver("ALTER TABLE T ADD IF NOT EXISTS " + @@ -256,10 +254,8 @@ public void addPartitionRename() throws Exception { runStatementOnDriver("insert into Tstage values(0,2),(1,4)"); runStatementOnDriver("export table Tstage to '" + getWarehouseDir() + "/1'"); FileSystem fs = FileSystem.get(hiveConf); - FileStatus[] status = fs.listStatus(new Path(getWarehouseDir() + "/1/data"), - AcidUtils.originalBucketFilter); - boolean b = fs.rename(new Path(getWarehouseDir() + "/1/data/000000_0"), new Path(getWarehouseDir() + "/1/data/part-m000")); - b = fs.rename(new Path(getWarehouseDir() + "/1/data/000001_0"), new Path(getWarehouseDir() + "/1/data/part-m001")); + fs.rename(new Path(getWarehouseDir() + "/1/data/000000_0"), new Path(getWarehouseDir() + "/1/data/part-m000")); + fs.rename(new Path(getWarehouseDir() + "/1/data/000001_0"), new Path(getWarehouseDir() + "/1/data/part-m001")); runStatementOnDriver("ALTER TABLE T ADD PARTITION (p=0) location '" + getWarehouseDir() + "/1/data'"); diff --git ql/src/test/org/apache/hadoop/hive/ql/TestTxnCommands.java ql/src/test/org/apache/hadoop/hive/ql/TestTxnCommands.java index 3a509dae5f..900290ef34 100644 --- ql/src/test/org/apache/hadoop/hive/ql/TestTxnCommands.java +++ ql/src/test/org/apache/hadoop/hive/ql/TestTxnCommands.java @@ -67,7 +67,7 @@ import org.apache.hadoop.hive.ql.lockmgr.TestDbTxnManager2; import org.apache.hadoop.hive.ql.metadata.Hive; import org.apache.hadoop.hive.ql.metadata.HiveException; -import org.apache.hadoop.hive.ql.processors.CommandProcessorResponse; +import org.apache.hadoop.hive.ql.processors.CommandProcessorException; import org.apache.hadoop.hive.ql.session.SessionState; import org.apache.thrift.TException; import org.junit.Assert; @@ -89,8 +89,7 @@ static final private Logger LOG = LoggerFactory.getLogger(TestTxnCommands.class); private static final String TEST_DATA_DIR = new File(System.getProperty("java.io.tmpdir") + - File.separator + TestTxnCommands.class.getCanonicalName() - + "-" + System.currentTimeMillis() + File.separator + TestTxnCommands.class.getCanonicalName() + "-" + System.currentTimeMillis() ).getPath().replaceAll("\\\\", "/"); @Override protected String getTestDataDir() { @@ -150,7 +149,7 @@ private void dumpBucketData(Table table, long writeId, int stmtId, int bucketNum // FileDump.printJsonData(conf, bucket.toString(), delta); // } // catch(FileNotFoundException ex) { - ;//this happens if you change BUCKET_COUNT +// ; //this happens if you change BUCKET_COUNT // } delta.close(); } @@ -179,8 +178,9 @@ public void testSimpleAcidInsert() throws Exception { dumpTableData(Table.ACIDTBL, 1, 0); dumpTableData(Table.ACIDTBL, 2, 0); runStatementOnDriver("select a,b from " + Table.ACIDTBL + " order by a,b"); - CommandProcessorResponse cpr = runStatementOnDriverNegative("COMMIT");//txn started implicitly by previous statement - Assert.assertEquals("Error didn't match: " + cpr, ErrorMsg.OP_NOT_ALLOWED_WITHOUT_TXN.getErrorCode(), cpr.getErrorCode()); + CommandProcessorException e = runStatementOnDriverNegative("COMMIT"); //txn started implicitly by previous statement + Assert.assertEquals("Error didn't match: " + e, + ErrorMsg.OP_NOT_ALLOWED_WITHOUT_TXN.getErrorCode(), e.getErrorCode()); List rs1 = runStatementOnDriver("select a,b from " + Table.ACIDTBL + " order by a,b"); Assert.assertEquals("Data didn't match inside tx (rs0)", allData, rs1); } @@ -271,21 +271,19 @@ public void run() { throw new RuntimeException(e); } QueryState qs = new QueryState.Builder().withHiveConf(hiveConf).nonIsolated().build(); - Driver d = new Driver(qs, null); - try { + try (Driver d = new Driver(qs, null)) { LOG.info("Ready to run the query: " + query); syncThreadStart(cdlIn, cdlOut); try { - CommandProcessorResponse cpr = d.run(query); - if(cpr.getResponseCode() != 0) { - throw new RuntimeException(query + " failed: " + cpr); + try { + d.run(query); + } catch (CommandProcessorException e) { + throw new RuntimeException(query + " failed: " + e); } d.getResults(new ArrayList()); } catch (Exception e) { throw new RuntimeException(e); } - } finally { - d.close(); } } } @@ -528,25 +526,26 @@ private void verifyMmExportPaths(List paths, int deltasOrBases) { @Test public void testErrors() throws Exception { runStatementOnDriver("start transaction"); - CommandProcessorResponse cpr2 = runStatementOnDriverNegative("create table foo(x int, y int)"); - Assert.assertEquals("Expected DDL to fail in an open txn", ErrorMsg.OP_NOT_ALLOWED_IN_TXN.getErrorCode(), cpr2.getErrorCode()); - CommandProcessorResponse cpr3 = runStatementOnDriverNegative("update " + Table.ACIDTBL + " set a = 1 where b != 1"); + CommandProcessorException e1 = runStatementOnDriverNegative("create table foo(x int, y int)"); + Assert.assertEquals("Expected DDL to fail in an open txn", + ErrorMsg.OP_NOT_ALLOWED_IN_TXN.getErrorCode(), e1.getErrorCode()); + CommandProcessorException e2 = runStatementOnDriverNegative("update " + Table.ACIDTBL + " set a = 1 where b != 1"); Assert.assertEquals("Expected update of bucket column to fail", - "FAILED: SemanticException [Error 10302]: Updating values of bucketing columns is not supported. Column a.", - cpr3.getErrorMessage()); + "FAILED: SemanticException [Error 10302]: Updating values of bucketing columns is not supported. Column a.", + e2.getErrorMessage()); Assert.assertEquals("Expected update of bucket column to fail", - ErrorMsg.UPDATE_CANNOT_UPDATE_BUCKET_VALUE.getErrorCode(), cpr3.getErrorCode()); - cpr3 = runStatementOnDriverNegative("commit");//not allowed in w/o tx - Assert.assertEquals("Error didn't match: " + cpr3, - ErrorMsg.OP_NOT_ALLOWED_WITHOUT_TXN.getErrorCode(), cpr3.getErrorCode()); - cpr3 = runStatementOnDriverNegative("rollback");//not allowed in w/o tx - Assert.assertEquals("Error didn't match: " + cpr3, - ErrorMsg.OP_NOT_ALLOWED_WITHOUT_TXN.getErrorCode(), cpr3.getErrorCode()); + ErrorMsg.UPDATE_CANNOT_UPDATE_BUCKET_VALUE.getErrorCode(), e2.getErrorCode()); + CommandProcessorException e3 = runStatementOnDriverNegative("commit"); //not allowed in w/o tx + Assert.assertEquals("Error didn't match: " + e3, + ErrorMsg.OP_NOT_ALLOWED_WITHOUT_TXN.getErrorCode(), e3.getErrorCode()); + CommandProcessorException e4 = runStatementOnDriverNegative("rollback"); //not allowed in w/o tx + Assert.assertEquals("Error didn't match: " + e4, + ErrorMsg.OP_NOT_ALLOWED_WITHOUT_TXN.getErrorCode(), e4.getErrorCode()); runStatementOnDriver("start transaction"); - cpr3 = runStatementOnDriverNegative("start transaction");//not allowed in a tx + CommandProcessorException e5 = runStatementOnDriverNegative("start transaction"); //not allowed in a tx Assert.assertEquals("Expected start transaction to fail", - ErrorMsg.OP_NOT_ALLOWED_IN_TXN.getErrorCode(), cpr3.getErrorCode()); - runStatementOnDriver("start transaction");//ok since previously opened txn was killed + ErrorMsg.OP_NOT_ALLOWED_IN_TXN.getErrorCode(), e5.getErrorCode()); + runStatementOnDriver("start transaction"); //ok since previously opened txn was killed runStatementOnDriver("insert into " + Table.ACIDTBL + "(a,b) values(1,2)"); List rs0 = runStatementOnDriver("select a,b from " + Table.ACIDTBL + " order by a,b"); Assert.assertEquals("Can't see my own write", 1, rs0.size()); @@ -554,6 +553,7 @@ public void testErrors() throws Exception { rs0 = runStatementOnDriver("select a,b from " + Table.ACIDTBL + " order by a,b"); Assert.assertEquals("Can't see my own write", 1, rs0.size()); } + @Test public void testReadMyOwnInsert() throws Exception { runStatementOnDriver("START TRANSACTION"); @@ -575,10 +575,10 @@ public void testImplicitRollback() throws Exception { List rs0 = runStatementOnDriver("select a,b from " + Table.ACIDTBL + " order by a,b"); Assert.assertEquals("Can't see my own write", 1, rs0.size()); //next command should produce an error - CommandProcessorResponse cpr = runStatementOnDriverNegative("select * from no_such_table"); + CommandProcessorException e = runStatementOnDriverNegative("select * from no_such_table"); Assert.assertEquals("Txn didn't fail?", - "FAILED: SemanticException [Error 10001]: Line 1:14 Table not found 'no_such_table'", - cpr.getErrorMessage()); + "FAILED: SemanticException [Error 10001]: Line 1:14 Table not found 'no_such_table'", + e.getErrorMessage()); runStatementOnDriver("start transaction"); List rs1 = runStatementOnDriver("select a,b from " + Table.ACIDTBL + " order by a,b"); runStatementOnDriver("commit"); @@ -707,7 +707,7 @@ public void testMultipleDelete() throws Exception { @Test public void testDeleteIn() throws Exception { runStatementOnDriver("delete from " + Table.ACIDTBL + " where a IN (SELECT A.a from " + - Table.ACIDTBL + " A)"); + Table.ACIDTBL + " A)"); int[][] tableData = {{1,2},{3,2},{5,2},{1,3},{3,3},{5,3}}; runStatementOnDriver("insert into " + Table.ACIDTBL + "(a,b) " + makeValuesClause(tableData)); runStatementOnDriver("insert into " + Table.ACIDTBL2 + "(a,b,c) values(1,7,17),(3,7,17)"); @@ -730,8 +730,9 @@ public void testTimeOutReaper() throws Exception { //this will abort the txn houseKeeperService.run(); //this should fail because txn aborted due to timeout - CommandProcessorResponse cpr = runStatementOnDriverNegative("delete from " + Table.ACIDTBL + " where a = 5"); - Assert.assertTrue("Actual: " + cpr.getErrorMessage(), cpr.getErrorMessage().contains("Transaction manager has aborted the transaction txnid:1")); + CommandProcessorException e = runStatementOnDriverNegative("delete from " + Table.ACIDTBL + " where a = 5"); + Assert.assertTrue("Actual: " + e.getErrorMessage(), + e.getErrorMessage().contains("Transaction manager has aborted the transaction txnid:1")); //now test that we don't timeout locks we should not //heartbeater should be running in the background every 1/2 second @@ -785,7 +786,7 @@ public void testTimeOutReaper() throws Exception { vals = s.split("\\s+"); Assert.assertEquals("Didn't get expected timestamps", 2, vals.length); Assert.assertTrue("Heartbeat didn't progress: (old,new) (" + lastHeartbeat + "," + vals[1]+ ")", - lastHeartbeat < Long.parseLong(vals[1])); + lastHeartbeat < Long.parseLong(vals[1])); runStatementOnDriver("rollback"); slr = txnHandler.showLocks(new ShowLocksRequest()); @@ -811,21 +812,22 @@ public void exchangePartition() throws Exception { } @Test public void testMergeNegative() throws Exception { - CommandProcessorResponse cpr = runStatementOnDriverNegative("MERGE INTO " + Table.ACIDTBL + - " target USING " + Table.NONACIDORCTBL + - " source\nON target.a = source.a " + - "\nWHEN MATCHED THEN UPDATE set b = 1 " + - "\nWHEN MATCHED THEN DELETE " + - "\nWHEN NOT MATCHED AND a < 1 THEN INSERT VALUES(1,2)"); - Assert.assertEquals(ErrorMsg.MERGE_PREDIACTE_REQUIRED, ((HiveException)cpr.getException()).getCanonicalErrorMsg()); + CommandProcessorException e = runStatementOnDriverNegative( + "MERGE INTO " + Table.ACIDTBL + " target\n" + + "USING " + Table.NONACIDORCTBL + " source ON target.a = source.a\n" + + "WHEN MATCHED THEN UPDATE set b = 1\n" + + "WHEN MATCHED THEN DELETE\n" + + "WHEN NOT MATCHED AND a < 1 THEN INSERT VALUES(1,2)"); + Assert.assertEquals(ErrorMsg.MERGE_PREDIACTE_REQUIRED, ((HiveException)e.getException()).getCanonicalErrorMsg()); } @Test public void testMergeNegative2() throws Exception { - CommandProcessorResponse cpr = runStatementOnDriverNegative("MERGE INTO "+ Table.ACIDTBL + - " target USING " + Table.NONACIDORCTBL + "\n source ON target.pk = source.pk " + - "\nWHEN MATCHED THEN UPDATE set b = 1 " + - "\nWHEN MATCHED THEN UPDATE set b=a"); - Assert.assertEquals(ErrorMsg.MERGE_TOO_MANY_UPDATE, ((HiveException)cpr.getException()).getCanonicalErrorMsg()); + CommandProcessorException e = runStatementOnDriverNegative( + "MERGE INTO "+ Table.ACIDTBL + + " target USING " + Table.NONACIDORCTBL + "\n source ON target.pk = source.pk " + + "\nWHEN MATCHED THEN UPDATE set b = 1 " + + "\nWHEN MATCHED THEN UPDATE set b=a"); + Assert.assertEquals(ErrorMsg.MERGE_TOO_MANY_UPDATE, ((HiveException)e.getException()).getCanonicalErrorMsg()); } /** @@ -841,25 +843,25 @@ public void testQuotedIdentifier() throws Exception { runStatementOnDriver("drop table if exists " + target); runStatementOnDriver("drop table if exists " + src); runStatementOnDriver("create table " + target + "(i int," + - "`d?*de e` decimal(5,2)," + - "vc varchar(128)) clustered by (i) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true')"); + "`d?*de e` decimal(5,2)," + + "vc varchar(128)) clustered by (i) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true')"); runStatementOnDriver("create table " + src + "(gh int, j decimal(5,2), k varchar(128))"); runStatementOnDriver("merge into " + target + " as `d/8` using " + src + " as `a/b` on i=gh " + - "\nwhen matched and i > 5 then delete " + - "\nwhen matched then update set vc='blah' " + - "\nwhen not matched then insert values(1,2.1,'baz')"); + "\nwhen matched and i > 5 then delete " + + "\nwhen matched then update set vc='blah' " + + "\nwhen not matched then insert values(1,2.1,'baz')"); runStatementOnDriver("merge into " + target + " as `d/8` using " + src + " as `a/b` on i=gh " + - "\nwhen matched and i > 5 then delete " + - "\nwhen matched then update set vc='blah', `d?*de e` = current_timestamp() " + - "\nwhen not matched then insert values(1,2.1, concat('baz', current_timestamp()))"); + "\nwhen matched and i > 5 then delete " + + "\nwhen matched then update set vc='blah', `d?*de e` = current_timestamp() " + + "\nwhen not matched then insert values(1,2.1, concat('baz', current_timestamp()))"); runStatementOnDriver("merge into " + target + " as `d/8` using " + src + " as `a/b` on i=gh " + - "\nwhen matched and i > 5 then delete " + - "\nwhen matched then update set vc='blah' " + - "\nwhen not matched then insert values(1,2.1,'a\\b')"); + "\nwhen matched and i > 5 then delete " + + "\nwhen matched then update set vc='blah' " + + "\nwhen not matched then insert values(1,2.1,'a\\b')"); runStatementOnDriver("merge into " + target + " as `d/8` using " + src + " as `a/b` on i=gh " + - "\nwhen matched and i > 5 then delete " + - "\nwhen matched then update set vc='∆∋'" + - "\nwhen not matched then insert values(`a/b`.gh,`a/b`.j,'c\\t')"); + "\nwhen matched and i > 5 then delete " + + "\nwhen matched then update set vc='∆∋'" + + "\nwhen not matched then insert values(`a/b`.gh,`a/b`.j,'c\\t')"); } @Test public void testQuotedIdentifier2() throws Exception { @@ -868,17 +870,17 @@ public void testQuotedIdentifier2() throws Exception { runStatementOnDriver("drop table if exists " + target); runStatementOnDriver("drop table if exists " + src); runStatementOnDriver("create table " + target + "(i int," + - "`d?*de e` decimal(5,2)," + - "vc varchar(128)) clustered by (i) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true')"); + "`d?*de e` decimal(5,2)," + + "vc varchar(128)) clustered by (i) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true')"); runStatementOnDriver("create table " + src + "(`g/h` int, j decimal(5,2), k varchar(128))"); runStatementOnDriver("merge into " + target + " as `d/8` using " + src + " as `a/b` on i=`g/h`" + - "\nwhen matched and `g/h` > 5 then delete " + - "\nwhen matched and `g/h` < 0 then update set vc='∆∋', `d?*de e` = `d?*de e` * j + 1" + - "\nwhen not matched and `d?*de e` <> 0 then insert values(`a/b`.`g/h`,`a/b`.j,`a/b`.k)"); + "\nwhen matched and `g/h` > 5 then delete " + + "\nwhen matched and `g/h` < 0 then update set vc='∆∋', `d?*de e` = `d?*de e` * j + 1" + + "\nwhen not matched and `d?*de e` <> 0 then insert values(`a/b`.`g/h`,`a/b`.j,`a/b`.k)"); runStatementOnDriver("merge into " + target + " as `d/8` using " + src + " as `a/b` on i=`g/h`" + - "\nwhen matched and `g/h` > 5 then delete" + - "\n when matched and `g/h` < 0 then update set vc='∆∋' , `d?*de e` = `d?*de e` * j + 1 " + - "\n when not matched and `d?*de e` <> 0 then insert values(`a/b`.`g/h`,`a/b`.j,`a/b`.k)"); + "\nwhen matched and `g/h` > 5 then delete" + + "\n when matched and `g/h` < 0 then update set vc='∆∋' , `d?*de e` = `d?*de e` * j + 1 " + + "\n when not matched and `d?*de e` <> 0 then insert values(`a/b`.`g/h`,`a/b`.j,`a/b`.k)"); } /** * https://www.linkedin.com/pulse/how-load-slowly-changing-dimension-type-2-using-oracle-padhy @@ -910,8 +912,8 @@ public void testMergeType2SCD01() throws Exception { List r2 = runStatementOnDriver(teeCurMatch); } String stmt = "merge into target t using (" + teeCurMatch + ") s on t.key=s.key and t.cur=1 and s.`o/p\\n`=1 " + - "when matched then update set cur=0 " + - "when not matched then insert values(s.key,s.data,1)"; + "when matched then update set cur=0 " + + "when not matched then insert values(s.key,s.data,1)"; //to allow cross join from 'teeCurMatch' hiveConf.setBoolVar(HiveConf.ConfVars.HIVE_STRICT_CHECKS_CARTESIAN, false); runStatementOnDriver(stmt); @@ -935,10 +937,10 @@ public void testMergeType2SCD02() throws Exception { runStatementOnDriver("insert into source " + makeValuesClause(sourceVals)); String baseSrc = "select source.*, 0 c from source " + - "union all " + - "select source.*, 1 c from source " + - "inner join target " + - "on source.key=target.key where target.cur=1"; + "union all " + + "select source.*, 1 c from source " + + "inner join target " + + "on source.key=target.key where target.cur=1"; if(false) { //this is just for debug List r1 = runStatementOnDriver(baseSrc); @@ -947,10 +949,10 @@ public void testMergeType2SCD02() throws Exception { "\non t.key=s.key and t.cur=s.c and t.cur=1"); } String stmt = "merge into target t using " + - "(" + baseSrc + ") s " + - "on t.key=s.key and t.cur=s.c and t.cur=1 " + - "when matched then update set cur=0 " + - "when not matched then insert values(s.key,s.data,1)"; + "(" + baseSrc + ") s " + + "on t.key=s.key and t.cur=s.c and t.cur=1 " + + "when matched then update set cur=0 " + + "when not matched then insert values(s.key,s.data,1)"; runStatementOnDriver(stmt); int[][] resultVals = {{1,5,0},{1,7,1},{1,18,0},{2,6,1},{3,8,1}}; @@ -961,10 +963,10 @@ public void testMergeType2SCD02() throws Exception { @Test public void testMergeOnTezEdges() throws Exception { String query = "merge into " + Table.ACIDTBL + - " as t using " + Table.NONACIDORCTBL + " s ON t.a = s.a " + - "WHEN MATCHED AND s.a > 8 THEN DELETE " + - "WHEN MATCHED THEN UPDATE SET b = 7 " + - "WHEN NOT MATCHED THEN INSERT VALUES(s.a, s.b) "; + " as t using " + Table.NONACIDORCTBL + " s ON t.a = s.a " + + "WHEN MATCHED AND s.a > 8 THEN DELETE " + + "WHEN MATCHED THEN UPDATE SET b = 7 " + + "WHEN NOT MATCHED THEN INSERT VALUES(s.a, s.b) "; d.destroy(); HiveConf hc = new HiveConf(hiveConf); hc.setVar(HiveConf.ConfVars.HIVE_EXECUTION_ENGINE, "tez"); @@ -1012,10 +1014,10 @@ public void testMergeUpdateDelete() throws Exception { int[][] vals = {{2,1},{4,3},{5,6},{7,8}}; runStatementOnDriver("insert into " + Table.ACIDTBL + " " + makeValuesClause(vals)); String query = "merge into " + Table.ACIDTBL + - " as t using " + Table.NONACIDORCTBL + " s ON t.a = s.a " + - "WHEN MATCHED AND s.a < 3 THEN update set b = 0 " + //updates (2,1) -> (2,0) - "WHEN MATCHED and t.a > 3 and t.a < 5 THEN DELETE " +//deletes (4,3) - "WHEN NOT MATCHED THEN INSERT VALUES(s.a, s.b) ";//inserts (11,11) + " as t using " + Table.NONACIDORCTBL + " s ON t.a = s.a " + + "WHEN MATCHED AND s.a < 3 THEN update set b = 0 " + //updates (2,1) -> (2,0) + "WHEN MATCHED and t.a > 3 and t.a < 5 THEN DELETE " +//deletes (4,3) + "WHEN NOT MATCHED THEN INSERT VALUES(s.a, s.b) "; //inserts (11,11) runStatementOnDriver(query); List r = runStatementOnDriver("select a,b from " + Table.ACIDTBL + " order by a,b"); @@ -1035,9 +1037,9 @@ public void testMergeUpdateDeleteNoCardCheck() throws Exception { int[][] vals = {{2,1},{4,3},{5,6},{7,8}}; runStatementOnDriver("insert into " + Table.ACIDTBL + " " + makeValuesClause(vals)); String query = "merge into " + Table.ACIDTBL + - " as t using " + Table.NONACIDORCTBL + " s ON t.a = s.a " + - "WHEN MATCHED AND s.a < 3 THEN update set b = 0 " + - "WHEN MATCHED and t.a > 3 and t.a < 5 THEN DELETE "; + " as t using " + Table.NONACIDORCTBL + " s ON t.a = s.a " + + "WHEN MATCHED AND s.a < 3 THEN update set b = 0 " + + "WHEN MATCHED and t.a > 3 and t.a < 5 THEN DELETE "; runStatementOnDriver(query); List r = runStatementOnDriver("select a,b from " + Table.ACIDTBL + " order by a,b"); @@ -1051,10 +1053,10 @@ public void testMergeDeleteUpdate() throws Exception { int[][] targetVals = {{2,1},{4,3},{5,6},{7,8}}; runStatementOnDriver("insert into " + Table.ACIDTBL + " " + makeValuesClause(targetVals)); String query = "merge into " + Table.ACIDTBL + - " as t using " + Table.NONACIDORCTBL + " s ON t.a = s.a " + - "WHEN MATCHED and s.a < 5 THEN DELETE " + - "WHEN MATCHED AND s.a < 3 THEN update set b = 0 " + - "WHEN NOT MATCHED THEN INSERT VALUES(s.a, s.b) "; + " as t using " + Table.NONACIDORCTBL + " s ON t.a = s.a " + + "WHEN MATCHED and s.a < 5 THEN DELETE " + + "WHEN MATCHED AND s.a < 3 THEN update set b = 0 " + + "WHEN NOT MATCHED THEN INSERT VALUES(s.a, s.b) "; runStatementOnDriver(query); List r = runStatementOnDriver("select a,b from " + Table.ACIDTBL + " order by a,b"); @@ -1073,39 +1075,44 @@ public void testMergeCardinalityViolation() throws Exception { int[][] targetVals = {{2,1},{4,3},{5,6},{7,8}}; runStatementOnDriver("insert into " + Table.ACIDTBL + " " + makeValuesClause(targetVals)); String query = "merge into " + Table.ACIDTBL + - " as t using " + Table.NONACIDORCTBL + " s ON t.a = s.a " + - "WHEN MATCHED and s.a < 5 THEN DELETE " + - "WHEN MATCHED AND s.a < 3 THEN update set b = 0 " + - "WHEN NOT MATCHED THEN INSERT VALUES(s.a, s.b) "; + " as t using " + Table.NONACIDORCTBL + " s ON t.a = s.a " + + "WHEN MATCHED and s.a < 5 THEN DELETE " + + "WHEN MATCHED AND s.a < 3 THEN update set b = 0 " + + "WHEN NOT MATCHED THEN INSERT VALUES(s.a, s.b) "; runStatementOnDriverNegative(query); runStatementOnDriver("insert into " + Table.ACIDTBLPART + " partition(p) values(1,1,'p1'),(2,2,'p1'),(3,3,'p1'),(4,4,'p2')"); query = "merge into " + Table.ACIDTBLPART + - " as t using " + Table.NONACIDORCTBL + " s ON t.a = s.a " + - "WHEN MATCHED and s.a < 5 THEN DELETE " + - "WHEN MATCHED AND s.a < 3 THEN update set b = 0 " + - "WHEN NOT MATCHED THEN INSERT VALUES(s.a, s.b, 'p1') "; + " as t using " + Table.NONACIDORCTBL + " s ON t.a = s.a " + + "WHEN MATCHED and s.a < 5 THEN DELETE " + + "WHEN MATCHED AND s.a < 3 THEN update set b = 0 " + + "WHEN NOT MATCHED THEN INSERT VALUES(s.a, s.b, 'p1') "; runStatementOnDriverNegative(query); } @Test public void testSetClauseFakeColumn() throws Exception { - CommandProcessorResponse cpr = runStatementOnDriverNegative("MERGE INTO "+ Table.ACIDTBL + - " target USING " + Table.NONACIDORCTBL + - "\n source ON target.a = source.a " + - "\nWHEN MATCHED THEN UPDATE set t = 1"); + CommandProcessorException e1 = runStatementOnDriverNegative( + "MERGE INTO "+ Table.ACIDTBL + " target\n" + + "USING " + Table.NONACIDORCTBL + "\n" + + " source ON target.a = source.a\n" + + "WHEN MATCHED THEN UPDATE set t = 1"); Assert.assertEquals(ErrorMsg.INVALID_TARGET_COLUMN_IN_SET_CLAUSE, - ((HiveException)cpr.getException()).getCanonicalErrorMsg()); - cpr = runStatementOnDriverNegative("update " + Table.ACIDTBL + " set t = 1"); + ((HiveException)e1.getException()).getCanonicalErrorMsg()); + + CommandProcessorException e2 = runStatementOnDriverNegative("update " + Table.ACIDTBL + " set t = 1"); Assert.assertEquals(ErrorMsg.INVALID_TARGET_COLUMN_IN_SET_CLAUSE, - ((HiveException)cpr.getException()).getCanonicalErrorMsg()); + ((HiveException)e2.getException()).getCanonicalErrorMsg()); } + @Test public void testBadOnClause() throws Exception { - CommandProcessorResponse cpr = runStatementOnDriverNegative("merge into " + Table.ACIDTBL + - " trgt using (select * from " + Table.NONACIDORCTBL + - "src) sub on sub.a = target.a when not matched then insert values (sub.a,sub.b)"); - Assert.assertTrue("Error didn't match: " + cpr, cpr.getErrorMessage().contains( - "No columns from target table 'trgt' found in ON clause '`sub`.`a` = `target`.`a`' of MERGE statement.")); - + CommandProcessorException e = + runStatementOnDriverNegative( + "merge into " + Table.ACIDTBL + " trgt\n" + + "using (select *\n" + + " from " + Table.NONACIDORCTBL + " src) sub on sub.a = target.a\n" + + "when not matched then insert values (sub.a,sub.b)"); + Assert.assertTrue("Error didn't match: " + e, e.getErrorMessage().contains( + "No columns from target table 'trgt' found in ON clause '`sub`.`a` = `target`.`a`' of MERGE statement.")); } /** @@ -1196,8 +1203,8 @@ public void testMoreBucketsThanReducers() throws Exception { hc.setBoolVar(HiveConf.ConfVars.HIVE_EXPLAIN_USER, false); d = new Driver(hc); d.setMaxRows(10000); - runStatementOnDriver("insert into " + Table.ACIDTBL + " values(1,1)");//txn X write to bucket1 - runStatementOnDriver("insert into " + Table.ACIDTBL + " values(0,0),(3,3)");// txn X + 1 write to bucket0 + bucket1 + runStatementOnDriver("insert into " + Table.ACIDTBL + " values(1,1)"); //txn X write to bucket1 + runStatementOnDriver("insert into " + Table.ACIDTBL + " values(0,0),(3,3)"); // txn X + 1 write to bucket0 + bucket1 runStatementOnDriver("update " + Table.ACIDTBL + " set b = -1"); List r = runStatementOnDriver("select * from " + Table.ACIDTBL + " order by a, b"); int[][] expected = {{0, -1}, {1, -1}, {3, -1}}; @@ -1217,10 +1224,10 @@ public void testMoreBucketsThanReducers2() throws Exception { d.setMaxRows(10000); runStatementOnDriver("create table fourbuckets (a int, b int) clustered by (a) into 4 buckets stored as orc TBLPROPERTIES ('transactional'='true')"); //below value for a is bucket id, for b - txn id (logically) - runStatementOnDriver("insert into fourbuckets values(0,1),(1,1)");//txn X write to b0 + b1 - runStatementOnDriver("insert into fourbuckets values(2,2),(3,2)");// txn X + 1 write to b2 + b3 - runStatementOnDriver("insert into fourbuckets values(0,3),(1,3)");//txn X + 2 write to b0 + b1 - runStatementOnDriver("insert into fourbuckets values(2,4),(3,4)");//txn X + 3 write to b2 + b3 + runStatementOnDriver("insert into fourbuckets values(0,1),(1,1)"); //txn X write to b0 + b1 + runStatementOnDriver("insert into fourbuckets values(2,2),(3,2)"); // txn X + 1 write to b2 + b3 + runStatementOnDriver("insert into fourbuckets values(0,3),(1,3)"); //txn X + 2 write to b0 + b1 + runStatementOnDriver("insert into fourbuckets values(2,4),(3,4)"); //txn X + 3 write to b2 + b3 //so with 2 FileSinks and 4 buckets, FS1 should see (0,1),(2,2),(0,3)(2,4) since data is sorted by ROW__ID where tnxid is the first component //FS2 should see (1,1),(3,2),(1,3),(3,4) diff --git ql/src/test/org/apache/hadoop/hive/ql/TestTxnCommands2.java ql/src/test/org/apache/hadoop/hive/ql/TestTxnCommands2.java index 6efec8de9d..cbc72b47ea 100644 --- ql/src/test/org/apache/hadoop/hive/ql/TestTxnCommands2.java +++ ql/src/test/org/apache/hadoop/hive/ql/TestTxnCommands2.java @@ -57,14 +57,10 @@ import org.apache.hadoop.hive.ql.io.HiveInputFormat; import org.apache.hadoop.hive.ql.lockmgr.HiveTxnManager; import org.apache.hadoop.hive.ql.lockmgr.TxnManagerFactory; -import org.apache.hadoop.hive.ql.processors.CommandProcessorResponse; +import org.apache.hadoop.hive.ql.processors.CommandProcessorException; import org.apache.hadoop.hive.ql.session.SessionState; import org.apache.hadoop.hive.metastore.txn.AcidOpenTxnsCounterService; -import org.apache.hadoop.hive.ql.txn.compactor.Cleaner; -import org.apache.hadoop.hive.ql.txn.compactor.CompactorThread; import org.apache.hadoop.hive.ql.txn.compactor.Initiator; -import org.apache.hadoop.hive.ql.txn.compactor.Worker; -import org.apache.hadoop.mapred.JobConf; import org.apache.orc.OrcFile; import org.apache.orc.Reader; import org.apache.orc.TypeDescription; @@ -2303,9 +2299,10 @@ static String makeValuesClause(int[][] rows) { protected List runStatementOnDriver(String stmt) throws Exception { LOG.info("+runStatementOnDriver(" + stmt + ")"); - CommandProcessorResponse cpr = d.run(stmt); - if(cpr.getResponseCode() != 0) { - throw new RuntimeException(stmt + " failed: " + cpr); + try { + d.run(stmt); + } catch (CommandProcessorException e) { + throw new RuntimeException(stmt + " failed: " + e); } List rs = new ArrayList(); d.getResults(rs); diff --git ql/src/test/org/apache/hadoop/hive/ql/TestTxnLoadData.java ql/src/test/org/apache/hadoop/hive/ql/TestTxnLoadData.java index 3231a97009..13c739d746 100644 --- ql/src/test/org/apache/hadoop/hive/ql/TestTxnLoadData.java +++ ql/src/test/org/apache/hadoop/hive/ql/TestTxnLoadData.java @@ -20,7 +20,7 @@ import org.apache.commons.io.FileUtils; import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.metastore.conf.MetastoreConf; -import org.apache.hadoop.hive.ql.processors.CommandProcessorResponse; +import org.apache.hadoop.hive.ql.processors.CommandProcessorException; import org.junit.Assert; import org.junit.Before; import org.junit.Rule; @@ -489,9 +489,9 @@ public void testLoadAcidFile() throws Exception { Assert.assertTrue("Unexpcted file name", rs.get(0) .endsWith("t/delta_0000001_0000001_0000/bucket_00000")); //T2 is an acid table so this should fail - CommandProcessorResponse cpr = runStatementOnDriverNegative( - "load data local inpath '" + rs.get(0) + "' into table T2"); + CommandProcessorException e = + runStatementOnDriverNegative("load data local inpath '" + rs.get(0) + "' into table T2"); Assert.assertEquals("Unexpected error code", - ErrorMsg.LOAD_DATA_ACID_FILE.getErrorCode(), cpr.getErrorCode()); + ErrorMsg.LOAD_DATA_ACID_FILE.getErrorCode(), e.getErrorCode()); } } diff --git ql/src/test/org/apache/hadoop/hive/ql/TestTxnNoBuckets.java ql/src/test/org/apache/hadoop/hive/ql/TestTxnNoBuckets.java index 765962a499..4a8be40aab 100644 --- ql/src/test/org/apache/hadoop/hive/ql/TestTxnNoBuckets.java +++ ql/src/test/org/apache/hadoop/hive/ql/TestTxnNoBuckets.java @@ -28,10 +28,9 @@ import org.apache.hadoop.hive.metastore.conf.MetastoreConf; import org.apache.hadoop.hive.metastore.txn.TxnStore; import org.apache.hadoop.hive.metastore.txn.TxnUtils; -import org.apache.hadoop.hive.ql.io.AcidUtils; import org.apache.hadoop.hive.ql.io.BucketCodec; import org.apache.hadoop.hive.ql.metadata.Hive; -import org.apache.hadoop.hive.ql.processors.CommandProcessorResponse; +import org.apache.hadoop.hive.ql.processors.CommandProcessorException; import org.junit.Assert; import org.junit.Before; import org.junit.Rule; @@ -351,13 +350,13 @@ public void testInsertOverwriteToAcidWithUnionRemove() throws Exception { runStatementOnDriver("drop table if exists T"); runStatementOnDriver("create table T (a int, b int) stored as ORC TBLPROPERTIES ('transactional'='true')"); - CommandProcessorResponse cpr = runStatementOnDriverNegative( + CommandProcessorException e = runStatementOnDriverNegative( "insert overwrite table T select a, b from " + TxnCommandsBaseForTests.Table.ACIDTBL + " where a between 1 and 3 group by a, b union all select a, b from " + TxnCommandsBaseForTests.Table.ACIDTBL + " where a between 5 and 7 union all select a, b from " + TxnCommandsBaseForTests.Table.ACIDTBL + " where a >= 9"); - Assert.assertTrue("", cpr.getErrorMessage().contains("not supported due to OVERWRITE and UNION ALL")); + Assert.assertTrue("", e.getErrorMessage().contains("not supported due to OVERWRITE and UNION ALL")); } /** * The idea here is to create a non acid table that was written by multiple writers, i.e. @@ -628,12 +627,12 @@ public void testToAcidConversion02() throws Exception { @Test public void testCtasBucketed() throws Exception { runStatementOnDriver("insert into " + Table.NONACIDNONBUCKET + "(a,b) values(1,2),(1,3)"); - CommandProcessorResponse cpr = runStatementOnDriverNegative("create table myctas " + + CommandProcessorException e = runStatementOnDriverNegative("create table myctas " + "clustered by (a) into 2 buckets stored as ORC TBLPROPERTIES ('transactional'='true') as " + "select a, b from " + Table.NONACIDORCTBL); - int j = ErrorMsg.CTAS_PARCOL_COEXISTENCE.getErrorCode(); //this code doesn't propagate + ErrorMsg.CTAS_PARCOL_COEXISTENCE.getErrorCode(); //this code doesn't propagate // Assert.assertEquals("Wrong msg", ErrorMsg.CTAS_PARCOL_COEXISTENCE.getErrorCode(), cpr.getErrorCode()); - Assert.assertTrue(cpr.getErrorMessage().contains("CREATE-TABLE-AS-SELECT does not support")); + Assert.assertTrue(e.getErrorMessage().contains("CREATE-TABLE-AS-SELECT does not support")); } /** * Currently CTAS doesn't support partitioned tables. Correspondingly Acid only supports CTAS for @@ -643,11 +642,11 @@ public void testCtasBucketed() throws Exception { @Test public void testCtasPartitioned() throws Exception { runStatementOnDriver("insert into " + Table.NONACIDNONBUCKET + "(a,b) values(1,2),(1,3)"); - CommandProcessorResponse cpr = runStatementOnDriverNegative("create table myctas partitioned " + + CommandProcessorException e = runStatementOnDriverNegative("create table myctas partitioned " + "by (b int) stored as " + "ORC TBLPROPERTIES ('transactional'='true') as select a, b from " + Table.NONACIDORCTBL); - int j = ErrorMsg.CTAS_PARCOL_COEXISTENCE.getErrorCode();//this code doesn't propagate - Assert.assertTrue(cpr.getErrorMessage().contains("CREATE-TABLE-AS-SELECT does not support " + + ErrorMsg.CTAS_PARCOL_COEXISTENCE.getErrorCode(); //this code doesn't propagate + Assert.assertTrue(e.getErrorMessage().contains("CREATE-TABLE-AS-SELECT does not support " + "partitioning in the target table")); } /** diff --git ql/src/test/org/apache/hadoop/hive/ql/TxnCommandsBaseForTests.java ql/src/test/org/apache/hadoop/hive/ql/TxnCommandsBaseForTests.java index 7039b89089..f01a07e591 100644 --- ql/src/test/org/apache/hadoop/hive/ql/TxnCommandsBaseForTests.java +++ ql/src/test/org/apache/hadoop/hive/ql/TxnCommandsBaseForTests.java @@ -33,7 +33,7 @@ import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.metastore.txn.TxnDbUtil; import org.apache.hadoop.hive.ql.io.HiveInputFormat; -import org.apache.hadoop.hive.ql.processors.CommandProcessorResponse; +import org.apache.hadoop.hive.ql.processors.CommandProcessorException; import org.apache.hadoop.hive.ql.session.SessionState; import org.apache.hadoop.hive.ql.txn.compactor.Cleaner; import org.apache.hadoop.hive.ql.txn.compactor.CompactorThread; @@ -193,18 +193,21 @@ private static void runCompactorThread(HiveConf hiveConf, CompactorThreadType ty protected List runStatementOnDriver(String stmt) throws Exception { LOG.info("Running the query: " + stmt); - CommandProcessorResponse cpr = d.run(stmt); - if(cpr.getResponseCode() != 0) { - throw new RuntimeException(stmt + " failed: " + cpr); + try { + d.run(stmt); + } catch (CommandProcessorException e) { + throw new RuntimeException(stmt + " failed: " + e); } List rs = new ArrayList(); d.getResults(rs); return rs; } - CommandProcessorResponse runStatementOnDriverNegative(String stmt) throws Exception { - CommandProcessorResponse cpr = d.run(stmt); - if(cpr.getResponseCode() != 0) { - return cpr; + + CommandProcessorException runStatementOnDriverNegative(String stmt) throws Exception { + try { + d.run(stmt); + } catch (CommandProcessorException e) { + return e; } throw new RuntimeException("Didn't get expected failure!"); } diff --git ql/src/test/org/apache/hadoop/hive/ql/exec/TestOperators.java ql/src/test/org/apache/hadoop/hive/ql/exec/TestOperators.java index 0f8331014a..dc00ceb398 100644 --- ql/src/test/org/apache/hadoop/hive/ql/exec/TestOperators.java +++ ql/src/test/org/apache/hadoop/hive/ql/exec/TestOperators.java @@ -426,23 +426,19 @@ public void testFetchOperatorContext() throws Exception { "tblproperties ('myprop1'='val1', 'myprop2' = 'val2')"; Driver driver = new Driver(conf); CommandProcessorResponse response = driver.run(cmd); - assertEquals(0, response.getResponseCode()); List result = new ArrayList(); cmd = "load data local inpath '../data/files/employee.dat' " + "overwrite into table fetchOp partition (state='CA')"; response = driver.run(cmd); - assertEquals(0, response.getResponseCode()); cmd = "load data local inpath '../data/files/employee2.dat' " + "overwrite into table fetchOp partition (state='OR')"; response = driver.run(cmd); - assertEquals(0, response.getResponseCode()); cmd = "select * from fetchOp"; driver.setMaxRows(500); response = driver.run(cmd); - assertEquals(0, response.getResponseCode()); driver.getResults(result); assertEquals(20, result.size()); driver.close(); diff --git ql/src/test/org/apache/hadoop/hive/ql/exec/spark/TestHiveSparkClient.java ql/src/test/org/apache/hadoop/hive/ql/exec/spark/TestHiveSparkClient.java index 0b61a1a155..04b1db1cfa 100644 --- ql/src/test/org/apache/hadoop/hive/ql/exec/spark/TestHiveSparkClient.java +++ ql/src/test/org/apache/hadoop/hive/ql/exec/spark/TestHiveSparkClient.java @@ -68,7 +68,7 @@ public void testSetJobGroupAndDescription() throws Exception { try { driver = DriverFactory.newDriver(conf); - Assert.assertEquals(0, driver.run("create table test (col int)").getResponseCode()); + driver.run("create table test (col int)"); String query = "select * from test order by col"; ((ReExecDriver)driver).compile(query, true); @@ -102,7 +102,7 @@ public void testSetJobGroupAndDescription() throws Exception { .contains(sparkTask.getWork().getQueryId())); } finally { if (driver != null) { - Assert.assertEquals(0, driver.run("drop table if exists test").getResponseCode()); + driver.run("drop table if exists test"); driver.destroy(); } if (sc != null) { diff --git ql/src/test/org/apache/hadoop/hive/ql/exec/spark/TestLocalHiveSparkClient.java ql/src/test/org/apache/hadoop/hive/ql/exec/spark/TestLocalHiveSparkClient.java index bbf3d9c05f..94991d3219 100644 --- ql/src/test/org/apache/hadoop/hive/ql/exec/spark/TestLocalHiveSparkClient.java +++ ql/src/test/org/apache/hadoop/hive/ql/exec/spark/TestLocalHiveSparkClient.java @@ -96,7 +96,7 @@ private void runSparkTestSession(HiveConf conf, int threadId) throws Exception { SparkSession sparkSession = SparkUtilities.getSparkSession(conf, SparkSessionManagerImpl.getInstance()); - Assert.assertEquals(0, driver.run("show tables").getResponseCode()); + driver.run("show tables"); barrier.await(); SparkContext sparkContext = getSparkContext(sparkSession); diff --git ql/src/test/org/apache/hadoop/hive/ql/exec/spark/TestSparkInvalidFileFormat.java ql/src/test/org/apache/hadoop/hive/ql/exec/spark/TestSparkInvalidFileFormat.java index bcc0924a08..3a662761f0 100644 --- ql/src/test/org/apache/hadoop/hive/ql/exec/spark/TestSparkInvalidFileFormat.java +++ ql/src/test/org/apache/hadoop/hive/ql/exec/spark/TestSparkInvalidFileFormat.java @@ -25,6 +25,7 @@ import org.apache.hadoop.hive.ql.DriverFactory; import org.apache.hadoop.hive.ql.IDriver; import org.apache.hadoop.hive.ql.metadata.HiveException; +import org.apache.hadoop.hive.ql.processors.CommandProcessorException; import org.apache.hadoop.hive.ql.security.authorization.plugin.sqlstd.SQLStdHiveAuthorizerFactory; import org.apache.hadoop.hive.ql.session.SessionState; @@ -37,7 +38,7 @@ public class TestSparkInvalidFileFormat { @Test - public void readTextFileAsParquet() throws IOException { + public void readTextFileAsParquet() throws IOException, CommandProcessorException { HiveConf conf = new HiveConf(); conf.setVar(HiveConf.ConfVars.HIVE_AUTHORIZATION_MANAGER, SQLStdHiveAuthorizerFactory.class.getName()); @@ -56,21 +57,21 @@ public void readTextFileAsParquet() throws IOException { try { driver = DriverFactory.newDriver(conf); - Assert.assertEquals(0, - driver.run("CREATE TABLE test_table (key STRING, value STRING)").getResponseCode()); - Assert.assertEquals(0, driver.run( - "LOAD DATA LOCAL INPATH '" + testFile + "' INTO TABLE test_table").getResponseCode()); - Assert.assertEquals(0, - driver.run("ALTER TABLE test_table SET FILEFORMAT parquet").getResponseCode()); - Throwable exception = driver.run( - "SELECT * FROM test_table ORDER BY key LIMIT 10").getException(); - Assert.assertTrue(exception instanceof HiveException); - Assert.assertTrue(exception.getMessage().contains("Spark job failed due to task failures")); - Assert.assertTrue(exception.getMessage().contains("kv1.txt is not a Parquet file. expected " + + driver.run("CREATE TABLE test_table (key STRING, value STRING)"); + driver.run("LOAD DATA LOCAL INPATH '" + testFile + "' INTO TABLE test_table"); + driver.run("ALTER TABLE test_table SET FILEFORMAT parquet"); + try { + driver.run("SELECT * FROM test_table ORDER BY key LIMIT 10"); + assert false; + } catch (CommandProcessorException e) { + Assert.assertTrue(e.getException() instanceof HiveException); + Assert.assertTrue(e.getException().getMessage().contains("Spark job failed due to task failures")); + Assert.assertTrue(e.getException().getMessage().contains("kv1.txt is not a Parquet file. expected " + "magic number at tail [80, 65, 82, 49] but found [95, 57, 55, 10]")); + } } finally { if (driver != null) { - Assert.assertEquals(0, driver.run("DROP TABLE IF EXISTS test_table").getResponseCode()); + driver.run("DROP TABLE IF EXISTS test_table"); driver.destroy(); } if (fs.exists(tmpDir)) { diff --git ql/src/test/org/apache/hadoop/hive/ql/exec/spark/TestSparkPlan.java ql/src/test/org/apache/hadoop/hive/ql/exec/spark/TestSparkPlan.java index 5badabfd5e..8e64c15162 100644 --- ql/src/test/org/apache/hadoop/hive/ql/exec/spark/TestSparkPlan.java +++ ql/src/test/org/apache/hadoop/hive/ql/exec/spark/TestSparkPlan.java @@ -26,7 +26,6 @@ import org.apache.hadoop.hive.ql.exec.Utilities; import org.apache.hadoop.hive.ql.io.HiveKey; import org.apache.hadoop.hive.ql.reexec.ReExecDriver; -import org.apache.hadoop.hive.ql.security.authorization.plugin.sqlstd.SQLStdHiveAuthorizerFactory; import org.apache.hadoop.hive.ql.session.SessionState; import org.apache.hadoop.io.BytesWritable; @@ -76,7 +75,7 @@ public void testSetRDDCallSite() throws Exception { try { driver = DriverFactory.newDriver(conf); - Assert.assertEquals(0, driver.run("create table test (col int)").getResponseCode()); + driver.run("create table test (col int)"); ((ReExecDriver)driver).compile("select * from test order by col", true); List sparkTasks = Utilities.getSparkTasks(driver.getPlan().getRootTasks()); @@ -133,7 +132,7 @@ public void testSetRDDCallSite() throws Exception { Assert.assertTrue(hadoopRdd.creationSite().shortForm().contains("Map 1")); } finally { if (driver != null) { - Assert.assertEquals(0, driver.run("drop table if exists test").getResponseCode()); + driver.run("drop table if exists test"); driver.destroy(); } if (sc != null) { diff --git ql/src/test/org/apache/hadoop/hive/ql/hooks/TestHooks.java ql/src/test/org/apache/hadoop/hive/ql/hooks/TestHooks.java index b38b128061..f1a9a44e1f 100644 --- ql/src/test/org/apache/hadoop/hive/ql/hooks/TestHooks.java +++ ql/src/test/org/apache/hadoop/hive/ql/hooks/TestHooks.java @@ -36,8 +36,7 @@ public static void onetimeSetup() throws Exception { .setVar(HiveConf.ConfVars.HIVE_AUTHORIZATION_MANAGER, "org.apache.hadoop.hive.ql.security.authorization.plugin.sqlstd.SQLStdHiveAuthorizerFactory"); Driver driver = createDriver(conf); - int ret = driver.run("create table t1(i int)").getResponseCode(); - assertEquals("Checking command success", 0, ret); + driver.run("create table t1(i int)"); } @AfterClass diff --git ql/src/test/org/apache/hadoop/hive/ql/hooks/TestQueryHooks.java ql/src/test/org/apache/hadoop/hive/ql/hooks/TestQueryHooks.java index 5b4b42b168..51e7215183 100644 --- ql/src/test/org/apache/hadoop/hive/ql/hooks/TestQueryHooks.java +++ ql/src/test/org/apache/hadoop/hive/ql/hooks/TestQueryHooks.java @@ -20,16 +20,13 @@ import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.ql.Driver; -import org.apache.hadoop.hive.ql.QueryState; +import org.apache.hadoop.hive.ql.processors.CommandProcessorException; import org.apache.hadoop.hive.ql.session.SessionState; - +import org.junit.Assert; import org.junit.BeforeClass; import org.junit.Test; import org.mockito.ArgumentMatcher; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertNotEquals; - import static org.mockito.Matchers.any; import static org.mockito.Matchers.anyBoolean; import static org.mockito.Matchers.argThat; @@ -38,7 +35,6 @@ import static org.mockito.Mockito.never; import static org.mockito.Mockito.verify; - public class TestQueryHooks { private static HiveConf conf; @@ -58,8 +54,7 @@ public void testAllQueryLifeTimeWithParseHooks() throws Exception { QueryLifeTimeHookWithParseHooks mockHook = mock(QueryLifeTimeHookWithParseHooks.class); Driver driver = createDriver(); driver.getHookRunner().addLifeTimeHook(mockHook); - int ret = driver.run(query).getResponseCode(); - assertEquals("Expected query to succeed", 0, ret); + driver.run(query); verify(mockHook).beforeParse(argThat(argMatcher)); verify(mockHook).afterParse(argThat(argMatcher), eq(false)); @@ -76,8 +71,13 @@ public void testQueryLifeTimeWithParseHooksWithParseError() throws Exception { QueryLifeTimeHookWithParseHooks mockHook = mock(QueryLifeTimeHookWithParseHooks.class); Driver driver = createDriver(); driver.getHookRunner().addLifeTimeHook(mockHook); - int ret = driver.run(query).getResponseCode(); - assertNotEquals("Expected parsing to fail", 0, ret); + try { + driver.run(query); + Assert.fail("Expected parsing to fail"); + } catch (CommandProcessorException e) { + // we expect to get here + } + verify(mockHook).beforeParse(argThat(argMatcher)); verify(mockHook).afterParse(argThat(argMatcher), eq(true)); @@ -94,8 +94,13 @@ public void testQueryLifeTimeWithParseHooksWithCompileError() throws Exception { QueryLifeTimeHookWithParseHooks mockHook = mock(QueryLifeTimeHookWithParseHooks.class); Driver driver = createDriver(); driver.getHookRunner().addLifeTimeHook(mockHook); - int ret = driver.run(query).getResponseCode(); - assertNotEquals("Expected compilation to fail", 0, ret); + try { + driver.run(query); + Assert.fail("Expected compilation to fail"); + } catch (CommandProcessorException e) { + // we expect to get here + } + verify(mockHook).beforeParse(argThat(argMatcher)); verify(mockHook).afterParse(argThat(argMatcher), eq(false)); @@ -112,8 +117,7 @@ public void testAllQueryLifeTimeHooks() throws Exception { QueryLifeTimeHook mockHook = mock(QueryLifeTimeHook.class); Driver driver = createDriver(); driver.getHookRunner().addLifeTimeHook(mockHook); - int ret = driver.run(query).getResponseCode(); - assertEquals("Expected query to succeed", 0, ret); + driver.run(query); verify(mockHook).beforeCompile(argThat(argMatcher)); verify(mockHook).afterCompile(argThat(argMatcher), eq(false)); @@ -128,8 +132,12 @@ public void testQueryLifeTimeWithCompileError() throws Exception { QueryLifeTimeHook mockHook = mock(QueryLifeTimeHook.class); Driver driver = createDriver(); driver.getHookRunner().addLifeTimeHook(mockHook); - int ret = driver.run(query).getResponseCode(); - assertNotEquals("Expected compilation to fail", 0, ret); + try { + driver.run(query); + Assert.fail("Expected compilation to fail"); + } catch (CommandProcessorException e) { + // we expect to get here + } verify(mockHook).beforeCompile(argThat(argMatcher)); verify(mockHook).afterCompile(argThat(argMatcher), eq(true)); diff --git ql/src/test/org/apache/hadoop/hive/ql/io/TestSymlinkTextInputFormat.java ql/src/test/org/apache/hadoop/hive/ql/io/TestSymlinkTextInputFormat.java index 1a1a7b14a6..afdc684410 100644 --- ql/src/test/org/apache/hadoop/hive/ql/io/TestSymlinkTextInputFormat.java +++ ql/src/test/org/apache/hadoop/hive/ql/io/TestSymlinkTextInputFormat.java @@ -149,21 +149,13 @@ public void testCombine() throws Exception { boolean tblCreated = false; try { int ecode = 0; - ecode = drv.run(createSymlinkTableCmd).getResponseCode(); - if (ecode != 0) { - throw new Exception("Create table command: " + createSymlinkTableCmd - + " failed with exit code= " + ecode); - } + drv.run(createSymlinkTableCmd); tblCreated = true; String loadFileCommand = "LOAD DATA LOCAL INPATH '" + new Path(symlinkDir, "symlink_file").toString() + "' INTO TABLE " + tblName; - ecode = drv.run(loadFileCommand).getResponseCode(); - if (ecode != 0) { - throw new Exception("Load data command: " + loadFileCommand - + " failed with exit code= " + ecode); - } + drv.run(loadFileCommand); String cmd = "select key*1 from " + tblName; ecode = drv.compile(cmd, true); @@ -196,7 +188,7 @@ public void testCombine() throws Exception { fail("Caught exception " + e); } finally { if (tblCreated) { - drv.run("drop table text_symlink_text").getResponseCode(); + drv.run("drop table text_symlink_text"); } } } diff --git ql/src/test/org/apache/hadoop/hive/ql/lockmgr/TestDbTxnManager2.java ql/src/test/org/apache/hadoop/hive/ql/lockmgr/TestDbTxnManager2.java index 860b6e6024..8f111b9b34 100644 --- ql/src/test/org/apache/hadoop/hive/ql/lockmgr/TestDbTxnManager2.java +++ ql/src/test/org/apache/hadoop/hive/ql/lockmgr/TestDbTxnManager2.java @@ -44,15 +44,13 @@ import org.apache.hadoop.hive.ql.Driver; import org.apache.hadoop.hive.ql.ErrorMsg; import org.apache.hadoop.hive.ql.QueryState; -import org.apache.hadoop.hive.ql.processors.CommandProcessorResponse; +import org.apache.hadoop.hive.ql.processors.CommandProcessorException; import org.apache.hadoop.hive.ql.session.SessionState; import org.junit.Before; import org.junit.ComparisonFailure; import org.junit.Rule; import org.junit.Test; import org.junit.rules.TemporaryFolder; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; import java.util.ArrayList; import java.util.Arrays; @@ -85,7 +83,6 @@ * each thread. */ public class TestDbTxnManager2 { - private static final Logger LOG = LoggerFactory.getLogger(TestDbTxnManager2.class); private static HiveConf conf = new HiveConf(Driver.class); private HiveTxnManager txnMgr; private Context ctx; @@ -132,8 +129,8 @@ public void testMetadataOperationLocks() throws Exception { //to make insert into non-acid take shared lock conf.setBoolVar(HiveConf.ConfVars.HIVE_TXN_STRICT_LOCKING_MODE, false); dropTable(new String[] {"T"}); - checkCmdOnDriver(driver.run("create table if not exists T (a int, b int)")); - checkCmdOnDriver(driver.compileAndRespond("insert into T values (1,2)", true)); + driver.run("create table if not exists T (a int, b int)"); + driver.compileAndRespond("insert into T values (1,2)", true); txnMgr.acquireLocks(driver.getPlan(), ctx, "Fifer"); List locks = getLocks(); Assert.assertEquals("Unexpected lock count", 2, locks.size()); @@ -144,7 +141,7 @@ public void testMetadataOperationLocks() throws Exception { //simulate concurrent session HiveTxnManager txnMgr2 = TxnManagerFactory.getTxnManagerFactory().getTxnManager(conf); swapTxnManager(txnMgr2); - checkCmdOnDriver(driver.compileAndRespond("alter table T SET TBLPROPERTIES ('transactional'='true')", true)); + driver.compileAndRespond("alter table T SET TBLPROPERTIES ('transactional'='true')", true); ((DbTxnManager)txnMgr2).acquireLocks(driver.getPlan(), ctx, "Fiddler", false); locks = getLocks(); Assert.assertEquals("Unexpected lock count", 3, locks.size()); @@ -157,12 +154,12 @@ public void testMetadataOperationLocks() throws Exception { } @Test public void testLocksInSubquery() throws Exception { - dropTable(new String[] {"T","S", "R"}); - checkCmdOnDriver(driver.run("create table if not exists T (a int, b int)")); - checkCmdOnDriver(driver.run("create table if not exists S (a int, b int) clustered by(b) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true')")); - checkCmdOnDriver(driver.run("create table if not exists R (a int, b int) clustered by(b) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true')")); + dropTable(new String[] {"T", "S", "R"}); + driver.run("create table if not exists T (a int, b int)"); + driver.run("create table if not exists S (a int, b int) clustered by(b) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true')"); + driver.run("create table if not exists R (a int, b int) clustered by(b) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true')"); - checkCmdOnDriver(driver.compileAndRespond("delete from S where a in (select a from T where b = 1)", true)); + driver.compileAndRespond("delete from S where a in (select a from T where b = 1)", true); txnMgr.acquireLocks(driver.getPlan(), ctx, "one"); List locks = getLocks(); Assert.assertEquals("Unexpected lock count", 2, locks.size()); @@ -170,7 +167,7 @@ public void testLocksInSubquery() throws Exception { checkLock(LockType.SHARED_WRITE, LockState.ACQUIRED, "default", "S", null, locks); txnMgr.rollbackTxn(); - checkCmdOnDriver(driver.compileAndRespond("update S set a = 7 where a in (select a from T where b = 1)", true)); + driver.compileAndRespond("update S set a = 7 where a in (select a from T where b = 1)", true); txnMgr.acquireLocks(driver.getPlan(), ctx, "one"); locks = getLocks(); Assert.assertEquals("Unexpected lock count", 2, locks.size()); @@ -178,7 +175,7 @@ public void testLocksInSubquery() throws Exception { checkLock(LockType.SHARED_WRITE, LockState.ACQUIRED, "default", "S", null, locks); txnMgr.rollbackTxn(); - checkCmdOnDriver(driver.compileAndRespond("insert into R select * from S where a in (select a from T where b = 1)", true)); + driver.compileAndRespond("insert into R select * from S where a in (select a from T where b = 1)", true); txnMgr.acquireLocks(driver.getPlan(), ctx, "three"); locks = getLocks(); Assert.assertEquals("Unexpected lock count", 3, locks.size()); @@ -190,8 +187,7 @@ public void testLocksInSubquery() throws Exception { @Test public void createTable() throws Exception { dropTable(new String[] {"T"}); - CommandProcessorResponse cpr = driver.compileAndRespond("create table if not exists T (a int, b int)", true); - checkCmdOnDriver(cpr); + driver.compileAndRespond("create table if not exists T (a int, b int)", true); txnMgr.acquireLocks(driver.getPlan(), ctx, "Fifer"); List locks = getLocks(); Assert.assertEquals("Unexpected lock count", 1, locks.size()); @@ -212,12 +208,9 @@ private void insertOverwriteCreate(boolean isTransactional) throws Exception { MetastoreConf.setBoolVar(conf, MetastoreConf.ConfVars.CREATE_TABLES_AS_ACID, true); } dropTable(new String[] {"T2", "T3"}); - CommandProcessorResponse cpr = driver.run("create table if not exists T2(a int)"); - checkCmdOnDriver(cpr); - cpr = driver.run("create table T3(a int) stored as ORC"); - checkCmdOnDriver(cpr); - cpr = driver.compileAndRespond("insert overwrite table T3 select a from T2", true); - checkCmdOnDriver(cpr); + driver.run("create table if not exists T2(a int)"); + driver.run("create table T3(a int) stored as ORC"); + driver.compileAndRespond("insert overwrite table T3 select a from T2", true); txnMgr.acquireLocks(driver.getPlan(), ctx, "Fifer"); List locks = getLocks(); Assert.assertEquals("Unexpected lock count", 2, locks.size()); @@ -225,11 +218,10 @@ private void insertOverwriteCreate(boolean isTransactional) throws Exception { checkLock(LockType.EXCLUSIVE, LockState.ACQUIRED, "default", "T3", null, locks); txnMgr.commitTxn(); Assert.assertEquals("Lock remained", 0, getLocks().size()); - cpr = driver.run("drop table if exists T1"); - checkCmdOnDriver(cpr); - cpr = driver.run("drop table if exists T2"); - checkCmdOnDriver(cpr); + driver.run("drop table if exists T1"); + driver.run("drop table if exists T2"); } + @Test public void insertOverwritePartitionedCreate() throws Exception { insertOverwritePartitionedCreate(true); @@ -243,14 +235,9 @@ private void insertOverwritePartitionedCreate(boolean isTransactional) throws Ex if(isTransactional) { MetastoreConf.setBoolVar(conf, MetastoreConf.ConfVars.CREATE_TABLES_AS_ACID, true); } - CommandProcessorResponse cpr = driver.run("create table T4" + - "(name string, gpa double) partitioned by (age int) stored as ORC"); - checkCmdOnDriver(cpr); - cpr = driver.run("create table T5(name string, age int, gpa double)"); - checkCmdOnDriver(cpr); - cpr = driver.compileAndRespond("INSERT OVERWRITE TABLE T4 PARTITION (age) SELECT " + - "name, age, gpa FROM T5", true); - checkCmdOnDriver(cpr); + driver.run("create table T4 (name string, gpa double) partitioned by (age int) stored as ORC"); + driver.run("create table T5(name string, age int, gpa double)"); + driver.compileAndRespond("INSERT OVERWRITE TABLE T4 PARTITION (age) SELECT name, age, gpa FROM T5", true); txnMgr.acquireLocks(driver.getPlan(), ctx, "Fifer"); List locks = getLocks(); Assert.assertEquals("Unexpected lock count", 2, locks.size()); @@ -258,55 +245,47 @@ private void insertOverwritePartitionedCreate(boolean isTransactional) throws Ex checkLock(LockType.EXCLUSIVE, LockState.ACQUIRED, "default", "T4", null, locks); txnMgr.commitTxn(); Assert.assertEquals("Lock remained", 0, getLocks().size()); - cpr = driver.run("drop table if exists T5"); - checkCmdOnDriver(cpr); - cpr = driver.run("drop table if exists T4"); - checkCmdOnDriver(cpr); + driver.run("drop table if exists T5"); + driver.run("drop table if exists T4"); } + @Test public void basicBlocking() throws Exception { dropTable(new String[] {"T6"}); - CommandProcessorResponse cpr = driver.run("create table if not exists T6(a int)"); - checkCmdOnDriver(cpr); - cpr = driver.compileAndRespond("select a from T6", true); - checkCmdOnDriver(cpr); - txnMgr.acquireLocks(driver.getPlan(), ctx, "Fifer");//gets S lock on T6 + driver.run("create table if not exists T6(a int)"); + driver.compileAndRespond("select a from T6", true); + txnMgr.acquireLocks(driver.getPlan(), ctx, "Fifer"); //gets S lock on T6 List selectLocks = ctx.getHiveLocks(); HiveTxnManager txnMgr2 = TxnManagerFactory.getTxnManagerFactory().getTxnManager(conf); swapTxnManager(txnMgr2); - cpr = driver.compileAndRespond("drop table if exists T6", true); - checkCmdOnDriver(cpr); + driver.compileAndRespond("drop table if exists T6", true); //tries to get X lock on T1 and gets Waiting state LockState lockState = ((DbTxnManager) txnMgr2).acquireLocks(driver.getPlan(), ctx, "Fiddler", false); List locks = getLocks(); Assert.assertEquals("Unexpected lock count", 2, locks.size()); checkLock(LockType.SHARED_READ, LockState.ACQUIRED, "default", "T6", null, locks); checkLock(LockType.EXCLUSIVE, LockState.WAITING, "default", "T6", null, locks); - txnMgr.rollbackTxn();//release S on T6 + txnMgr.rollbackTxn(); //release S on T6 //attempt to X on T6 again - succeed lockState = ((DbLockManager)txnMgr.getLockManager()).checkLock(locks.get(1).getLockid()); locks = getLocks(); Assert.assertEquals("Unexpected lock count", 1, locks.size()); checkLock(LockType.EXCLUSIVE, LockState.ACQUIRED, "default", "T6", null, locks); txnMgr2.rollbackTxn(); - cpr = driver.run("drop table if exists T6"); + driver.run("drop table if exists T6"); locks = getLocks(); Assert.assertEquals("Unexpected number of locks found", 0, locks.size()); - checkCmdOnDriver(cpr); } @Test public void lockConflictDbTable() throws Exception { dropTable(new String[] {"temp.T7"}); - CommandProcessorResponse cpr = driver.run("create database if not exists temp"); - checkCmdOnDriver(cpr); - cpr = driver.run("create table if not exists temp.T7(a int, b int) clustered by(b) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true')"); - checkCmdOnDriver(cpr); - cpr = driver.compileAndRespond("update temp.T7 set a = 5 where b = 6", true);//gets SS lock on T7 - checkCmdOnDriver(cpr); + driver.run("create database if not exists temp"); + driver.run("create table if not exists temp.T7(a int, b int) clustered by(b) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true')"); + driver.compileAndRespond("update temp.T7 set a = 5 where b = 6", true); //gets SS lock on T7 txnMgr.acquireLocks(driver.getPlan(), ctx, "Fifer"); HiveTxnManager txnMgr2 = TxnManagerFactory.getTxnManagerFactory().getTxnManager(conf); swapTxnManager(txnMgr2); - checkCmdOnDriver(driver.compileAndRespond("drop database if exists temp", true)); + driver.compileAndRespond("drop database if exists temp", true); ((DbTxnManager)txnMgr2).acquireLocks(driver.getPlan(), ctx, "Fiddler", false); List locks = getLocks(); Assert.assertEquals("Unexpected lock count", 2, locks.size()); @@ -322,19 +301,16 @@ public void lockConflictDbTable() throws Exception { @Test public void updateSelectUpdate() throws Exception { dropTable(new String[] {"T8"}); - CommandProcessorResponse cpr = driver.run("create table T8(a int, b int) clustered by(b) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true')"); - checkCmdOnDriver(cpr); - cpr = driver.compileAndRespond("delete from T8 where b = 89", true); - checkCmdOnDriver(cpr); - txnMgr.acquireLocks(driver.getPlan(), ctx, "Fifer");//gets SS lock on T8 + driver.run("create table T8(a int, b int) clustered by(b) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true')"); + driver.compileAndRespond("delete from T8 where b = 89", true); + txnMgr.acquireLocks(driver.getPlan(), ctx, "Fifer"); //gets SS lock on T8 HiveTxnManager txnMgr2 = TxnManagerFactory.getTxnManagerFactory().getTxnManager(conf); swapTxnManager(txnMgr2); - checkCmdOnDriver(driver.run("start transaction")); - cpr = driver.compileAndRespond("select a from T8", true);//gets S lock on T8 - checkCmdOnDriver(cpr); + driver.run("start transaction"); + driver.compileAndRespond("select a from T8", true); //gets S lock on T8 txnMgr2.acquireLocks(driver.getPlan(), ctx, "Fiddler"); - checkCmdOnDriver(driver.compileAndRespond("update T8 set a = 1 where b = 1", true)); - ((DbTxnManager) txnMgr2).acquireLocks(driver.getPlan(), ctx, "Practical", false);//waits for SS lock on T8 from fifer + driver.compileAndRespond("update T8 set a = 1 where b = 1", true); + ((DbTxnManager) txnMgr2).acquireLocks(driver.getPlan(), ctx, "Practical", false); //waits for SS lock on T8 from fifer List locks = getLocks(); Assert.assertEquals("Unexpected lock count", 3, locks.size()); checkLock(LockType.SHARED_READ, LockState.ACQUIRED, "default", "T8", null, locks); @@ -348,10 +324,9 @@ public void updateSelectUpdate() throws Exception { checkLock(LockType.SHARED_WRITE, LockState.ACQUIRED, "default", "T8", null, locks); driver.releaseLocksAndCommitOrRollback(true, txnMgr2); swapTxnManager(txnMgr); - cpr = driver.run("drop table if exists T6"); + driver.run("drop table if exists T6"); locks = getLocks(); Assert.assertEquals("Unexpected number of locks found", 0, locks.size()); - checkCmdOnDriver(cpr); } @Test @@ -359,18 +334,15 @@ public void testLockRetryLimit() throws Exception { dropTable(new String[] {"T9"}); conf.setIntVar(HiveConf.ConfVars.HIVE_LOCK_NUMRETRIES, 2); conf.setBoolVar(HiveConf.ConfVars.TXN_MGR_DUMP_LOCK_STATE_ON_ACQUIRE_TIMEOUT, true); - CommandProcessorResponse cpr = driver.run("create table T9(a int)"); - checkCmdOnDriver(cpr); - cpr = driver.compileAndRespond("select * from T9", true); - checkCmdOnDriver(cpr); + driver.run("create table T9(a int)"); + driver.compileAndRespond("select * from T9", true); txnMgr.acquireLocks(driver.getPlan(), ctx, "Vincent Vega"); List locks = getLocks(); Assert.assertEquals("Unexpected lock count", 1, locks.size()); checkLock(LockType.SHARED_READ, LockState.ACQUIRED, "default", "T9", null, locks); HiveTxnManager txnMgr2 = TxnManagerFactory.getTxnManagerFactory().getTxnManager(conf); swapTxnManager(txnMgr2); - cpr = driver.compileAndRespond("drop table T9", true); - checkCmdOnDriver(cpr); + driver.compileAndRespond("drop table T9", true); try { txnMgr2.acquireLocks(driver.getPlan(), ctx, "Winston Winnfield"); } @@ -393,19 +365,16 @@ public void testLockRetryLimit() throws Exception { @Test public void testLockBlockedBy() throws Exception { dropTable(new String[] {"TAB_BLOCKED"}); - CommandProcessorResponse cpr = driver.run("create table TAB_BLOCKED (a int, b int) clustered by (a) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true')"); - checkCmdOnDriver(cpr); - cpr = driver.compileAndRespond("select * from TAB_BLOCKED", true); - checkCmdOnDriver(cpr); + driver.run("create table TAB_BLOCKED (a int, b int) clustered by (a) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true')"); + driver.compileAndRespond("select * from TAB_BLOCKED", true); txnMgr.acquireLocks(driver.getPlan(), ctx, "I AM SAM"); List locks = getLocks(); Assert.assertEquals("Unexpected lock count", 1, locks.size()); checkLock(LockType.SHARED_READ, LockState.ACQUIRED, "default", "TAB_BLOCKED", null, locks); HiveTxnManager txnMgr2 = TxnManagerFactory.getTxnManagerFactory().getTxnManager(conf); swapTxnManager(txnMgr2); - cpr = driver.compileAndRespond("drop table TAB_BLOCKED", true); - checkCmdOnDriver(cpr); - ((DbTxnManager)txnMgr2).acquireLocks(driver.getPlan(), ctx, "SAM I AM", false);//make non-blocking + driver.compileAndRespond("drop table TAB_BLOCKED", true); + ((DbTxnManager)txnMgr2).acquireLocks(driver.getPlan(), ctx, "SAM I AM", false); //make non-blocking locks = getLocks(); Assert.assertEquals("Unexpected lock count", 2, locks.size()); checkLock(LockType.SHARED_READ, LockState.ACQUIRED, "default", "TAB_BLOCKED", null, locks); @@ -418,31 +387,42 @@ public void testLockBlockedBy() throws Exception { public void testDummyTxnManagerOnAcidTable() throws Exception { dropTable(new String[] {"T10", "T11"}); // Create an ACID table with DbTxnManager - CommandProcessorResponse cpr = driver.run("create table T10 (a int, b int) clustered by(b) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true')"); - checkCmdOnDriver(cpr); - cpr = driver.run("create table T11 (a int, b int) clustered by(b) into 2 buckets stored as orc"); - checkCmdOnDriver(cpr); + driver.run("create table T10 (a int, b int) clustered by(b) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true')"); + driver.run("create table T11 (a int, b int) clustered by(b) into 2 buckets stored as orc"); // All DML should fail with DummyTxnManager on ACID table useDummyTxnManagerTemporarily(conf); - cpr = driver.compileAndRespond("select * from T10", true); - Assert.assertEquals(ErrorMsg.TXNMGR_NOT_ACID.getErrorCode(), cpr.getResponseCode()); - Assert.assertTrue(cpr.getErrorMessage().contains("This command is not allowed on an ACID table")); + try { + driver.compileAndRespond("select * from T10", true); + assert false; + } catch (CommandProcessorException e) { + Assert.assertEquals(ErrorMsg.TXNMGR_NOT_ACID.getErrorCode(), e.getResponseCode()); + Assert.assertTrue(e.getErrorMessage().contains("This command is not allowed on an ACID table")); + } useDummyTxnManagerTemporarily(conf); - cpr = driver.compileAndRespond("insert into table T10 values (1, 2)", true); - Assert.assertEquals(ErrorMsg.TXNMGR_NOT_ACID.getErrorCode(), cpr.getResponseCode()); - Assert.assertTrue(cpr.getErrorMessage().contains("This command is not allowed on an ACID table")); + try { + driver.compileAndRespond("insert into table T10 values (1, 2)", true); + } catch (CommandProcessorException e) { + Assert.assertEquals(ErrorMsg.TXNMGR_NOT_ACID.getErrorCode(), e.getResponseCode()); + Assert.assertTrue(e.getErrorMessage().contains("This command is not allowed on an ACID table")); + } useDummyTxnManagerTemporarily(conf); - cpr = driver.compileAndRespond("update T10 set a=0 where b=1", true); - Assert.assertEquals(ErrorMsg.ACID_OP_ON_NONACID_TXNMGR.getErrorCode(), cpr.getResponseCode()); - Assert.assertTrue(cpr.getErrorMessage().contains("Attempt to do update or delete using transaction manager that does not support these operations.")); + try { + driver.compileAndRespond("update T10 set a=0 where b=1", true); + } catch (CommandProcessorException e) { + Assert.assertEquals(ErrorMsg.ACID_OP_ON_NONACID_TXNMGR.getErrorCode(), e.getResponseCode()); + Assert.assertTrue(e.getErrorMessage().contains("Attempt to do update or delete using transaction manager that does not support these operations.")); + } useDummyTxnManagerTemporarily(conf); - cpr = driver.compileAndRespond("delete from T10", true); - Assert.assertEquals(ErrorMsg.ACID_OP_ON_NONACID_TXNMGR.getErrorCode(), cpr.getResponseCode()); - Assert.assertTrue(cpr.getErrorMessage().contains("Attempt to do update or delete using transaction manager that does not support these operations.")); + try { + driver.compileAndRespond("delete from T10", true); + } catch (CommandProcessorException e) { + Assert.assertEquals(ErrorMsg.ACID_OP_ON_NONACID_TXNMGR.getErrorCode(), e.getResponseCode()); + Assert.assertTrue(e.getErrorMessage().contains("Attempt to do update or delete using transaction manager that does not support these operations.")); + } conf.setVar(HiveConf.ConfVars.HIVE_TXN_MANAGER, "org.apache.hadoop.hive.ql.lockmgr.DbTxnManager"); } @@ -474,36 +454,23 @@ private void useDummyTxnManagerTemporarily(HiveConf hiveConf) throws Exception { public void testMetastoreTablesCleanup() throws Exception { dropTable(new String[] {"temp.T10", "temp.T11", "temp.T12p", "temp.T13p"}); - CommandProcessorResponse cpr = driver.run("create database if not exists temp"); - checkCmdOnDriver(cpr); + driver.run("create database if not exists temp"); // Create some ACID tables: T10, T11 - unpartitioned table, T12p, T13p - partitioned table - cpr = driver.run("create table temp.T10 (a int, b int) clustered by(b) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true')"); - checkCmdOnDriver(cpr); - cpr = driver.run("create table temp.T11 (a int, b int) clustered by(b) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true')"); - checkCmdOnDriver(cpr); - cpr = driver.run("create table temp.T12p (a int, b int) partitioned by (ds string, hour string) clustered by(b) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true')"); - checkCmdOnDriver(cpr); - cpr = driver.run("create table temp.T13p (a int, b int) partitioned by (ds string, hour string) clustered by(b) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true')"); - checkCmdOnDriver(cpr); + driver.run("create table temp.T10 (a int, b int) clustered by(b) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true')"); + driver.run("create table temp.T11 (a int, b int) clustered by(b) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true')"); + driver.run("create table temp.T12p (a int, b int) partitioned by (ds string, hour string) clustered by(b) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true')"); + driver.run("create table temp.T13p (a int, b int) partitioned by (ds string, hour string) clustered by(b) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true')"); // Successfully insert some data into ACID tables, so that we have records in COMPLETED_TXN_COMPONENTS - cpr = driver.run("insert into temp.T10 values (1, 1)"); - checkCmdOnDriver(cpr); - cpr = driver.run("insert into temp.T10 values (2, 2)"); - checkCmdOnDriver(cpr); - cpr = driver.run("insert into temp.T11 values (3, 3)"); - checkCmdOnDriver(cpr); - cpr = driver.run("insert into temp.T11 values (4, 4)"); - checkCmdOnDriver(cpr); - cpr = driver.run("insert into temp.T12p partition (ds='today', hour='1') values (5, 5)"); - checkCmdOnDriver(cpr); - cpr = driver.run("insert into temp.T12p partition (ds='tomorrow', hour='2') values (6, 6)"); - checkCmdOnDriver(cpr); - cpr = driver.run("insert into temp.T13p partition (ds='today', hour='1') values (7, 7)"); - checkCmdOnDriver(cpr); - cpr = driver.run("insert into temp.T13p partition (ds='tomorrow', hour='2') values (8, 8)"); - checkCmdOnDriver(cpr); + driver.run("insert into temp.T10 values (1, 1)"); + driver.run("insert into temp.T10 values (2, 2)"); + driver.run("insert into temp.T11 values (3, 3)"); + driver.run("insert into temp.T11 values (4, 4)"); + driver.run("insert into temp.T12p partition (ds='today', hour='1') values (5, 5)"); + driver.run("insert into temp.T12p partition (ds='tomorrow', hour='2') values (6, 6)"); + driver.run("insert into temp.T13p partition (ds='today', hour='1') values (7, 7)"); + driver.run("insert into temp.T13p partition (ds='tomorrow', hour='2') values (8, 8)"); int count = TxnDbUtil.countQueryAgent(conf, "select count(*) from COMPLETED_TXN_COMPONENTS where CTC_DATABASE='temp' and CTC_TABLE in ('t10', 't11')"); Assert.assertEquals(4, count); count = TxnDbUtil.countQueryAgent(conf, "select count(*) from COMPLETED_TXN_COMPONENTS where CTC_DATABASE='temp' and CTC_TABLE in ('t12p', 't13p')"); @@ -511,14 +478,10 @@ public void testMetastoreTablesCleanup() throws Exception { // Fail some inserts, so that we have records in TXN_COMPONENTS conf.setBoolVar(HiveConf.ConfVars.HIVETESTMODEROLLBACKTXN, true); - cpr = driver.run("insert into temp.T10 values (9, 9)"); - checkCmdOnDriver(cpr); - cpr = driver.run("insert into temp.T11 values (10, 10)"); - checkCmdOnDriver(cpr); - cpr = driver.run("insert into temp.T12p partition (ds='today', hour='1') values (11, 11)"); - checkCmdOnDriver(cpr); - cpr = driver.run("insert into temp.T13p partition (ds='today', hour='1') values (12, 12)"); - checkCmdOnDriver(cpr); + driver.run("insert into temp.T10 values (9, 9)"); + driver.run("insert into temp.T11 values (10, 10)"); + driver.run("insert into temp.T12p partition (ds='today', hour='1') values (11, 11)"); + driver.run("insert into temp.T13p partition (ds='today', hour='1') values (12, 12)"); count = TxnDbUtil.countQueryAgent(conf, "select count(*) from TXN_COMPONENTS where TC_DATABASE='temp' and TC_TABLE in ('t10', 't11', 't12p', 't13p')"); Assert.assertEquals(4, count); conf.setBoolVar(HiveConf.ConfVars.HIVETESTMODEROLLBACKTXN, false); @@ -528,8 +491,7 @@ public void testMetastoreTablesCleanup() throws Exception { Assert.assertEquals(1, count); count = TxnDbUtil.countQueryAgent(conf, "select count(*) from COMPLETED_TXN_COMPONENTS where CTC_DATABASE='temp' and CTC_TABLE='t10'"); Assert.assertEquals(2, count); - cpr = driver.run("drop table temp.T10"); - checkCmdOnDriver(cpr); + driver.run("drop table temp.T10"); count = TxnDbUtil.countQueryAgent(conf, "select count(*) from TXN_COMPONENTS where TC_DATABASE='temp' and TC_TABLE='t10'"); Assert.assertEquals(0, count); count = TxnDbUtil.countQueryAgent(conf, "select count(*) from COMPLETED_TXN_COMPONENTS where CTC_DATABASE='temp' and CTC_TABLE='t10'"); @@ -539,16 +501,14 @@ public void testMetastoreTablesCleanup() throws Exception { Assert.assertEquals(1, count); count = TxnDbUtil.countQueryAgent(conf, "select count(*) from COMPLETED_TXN_COMPONENTS where CTC_DATABASE='temp' and CTC_TABLE='t12p' and CTC_PARTITION='ds=today/hour=1'"); Assert.assertEquals(1, count); - cpr = driver.run("alter table temp.T12p drop partition (ds='today', hour='1')"); - checkCmdOnDriver(cpr); + driver.run("alter table temp.T12p drop partition (ds='today', hour='1')"); count = TxnDbUtil.countQueryAgent(conf, "select count(*) from TXN_COMPONENTS where TC_DATABASE='temp' and TC_TABLE='t12p' and TC_PARTITION='ds=today/hour=1'"); Assert.assertEquals(0, count); count = TxnDbUtil.countQueryAgent(conf, "select count(*) from COMPLETED_TXN_COMPONENTS where CTC_DATABASE='temp' and CTC_TABLE='t12p' and CTC_PARTITION='ds=today/hour=1'"); Assert.assertEquals(0, count); // Successfully perform compaction on a table/partition, so that we have successful records in COMPLETED_COMPACTIONS - cpr = driver.run("alter table temp.T11 compact 'minor'"); - checkCmdOnDriver(cpr); + driver.run("alter table temp.T11 compact 'minor'"); count = TxnDbUtil.countQueryAgent(conf, "select count(*) from COMPACTION_QUEUE where CQ_DATABASE='temp' and CQ_TABLE='t11' and CQ_STATE='i' and CQ_TYPE='i'"); Assert.assertEquals(1, count); TestTxnCommands2.runWorker(conf); @@ -560,8 +520,7 @@ public void testMetastoreTablesCleanup() throws Exception { count = TxnDbUtil.countQueryAgent(conf, "select count(*) from COMPLETED_COMPACTIONS where CC_DATABASE='temp' and CC_TABLE='t11' and CC_STATE='s' and CC_TYPE='i'"); Assert.assertEquals(1, count); - cpr = driver.run("alter table temp.T12p partition (ds='tomorrow', hour='2') compact 'minor'"); - checkCmdOnDriver(cpr); + driver.run("alter table temp.T12p partition (ds='tomorrow', hour='2') compact 'minor'"); count = TxnDbUtil.countQueryAgent(conf, "select count(*) from COMPACTION_QUEUE where CQ_DATABASE='temp' and CQ_TABLE='t12p' and CQ_PARTITION='ds=tomorrow/hour=2' and CQ_STATE='i' and CQ_TYPE='i'"); Assert.assertEquals(1, count); TestTxnCommands2.runWorker(conf); @@ -575,8 +534,7 @@ public void testMetastoreTablesCleanup() throws Exception { // Fail compaction, so that we have failed records in COMPLETED_COMPACTIONS conf.setBoolVar(HiveConf.ConfVars.HIVETESTMODEFAILCOMPACTION, true); - cpr = driver.run("alter table temp.T11 compact 'major'"); - checkCmdOnDriver(cpr); + driver.run("alter table temp.T11 compact 'major'"); count = TxnDbUtil.countQueryAgent(conf, "select count(*) from COMPACTION_QUEUE where CQ_DATABASE='temp' and CQ_TABLE='t11' and CQ_STATE='i' and CQ_TYPE='a'"); Assert.assertEquals(1, count); TestTxnCommands2.runWorker(conf); // will fail @@ -585,8 +543,7 @@ public void testMetastoreTablesCleanup() throws Exception { count = TxnDbUtil.countQueryAgent(conf, "select count(*) from COMPLETED_COMPACTIONS where CC_DATABASE='temp' and CC_TABLE='t11' and CC_STATE='f' and CC_TYPE='a'"); Assert.assertEquals(1, count); - cpr = driver.run("alter table temp.T12p partition (ds='tomorrow', hour='2') compact 'major'"); - checkCmdOnDriver(cpr); + driver.run("alter table temp.T12p partition (ds='tomorrow', hour='2') compact 'major'"); count = TxnDbUtil.countQueryAgent(conf, "select count(*) from COMPACTION_QUEUE where CQ_DATABASE='temp' and CQ_TABLE='t12p' and CQ_PARTITION='ds=tomorrow/hour=2' and CQ_STATE='i' and CQ_TYPE='a'"); Assert.assertEquals(1, count); TestTxnCommands2.runWorker(conf); // will fail @@ -597,33 +554,28 @@ public void testMetastoreTablesCleanup() throws Exception { conf.setBoolVar(HiveConf.ConfVars.HIVETESTMODEFAILCOMPACTION, false); // Put 2 records into COMPACTION_QUEUE and do nothing - cpr = driver.run("alter table temp.T11 compact 'major'"); - checkCmdOnDriver(cpr); + driver.run("alter table temp.T11 compact 'major'"); count = TxnDbUtil.countQueryAgent(conf, "select count(*) from COMPACTION_QUEUE where CQ_DATABASE='temp' and CQ_TABLE='t11' and CQ_STATE='i' and CQ_TYPE='a'"); Assert.assertEquals(1, count); - cpr = driver.run("alter table temp.T12p partition (ds='tomorrow', hour='2') compact 'major'"); - checkCmdOnDriver(cpr); + driver.run("alter table temp.T12p partition (ds='tomorrow', hour='2') compact 'major'"); count = TxnDbUtil.countQueryAgent(conf, "select count(*) from COMPACTION_QUEUE where CQ_DATABASE='temp' and CQ_TABLE='t12p' and CQ_PARTITION='ds=tomorrow/hour=2' and CQ_STATE='i' and CQ_TYPE='a'"); Assert.assertEquals(1, count); // Drop a table/partition, corresponding records in COMPACTION_QUEUE and COMPLETED_COMPACTIONS should disappear - cpr = driver.run("drop table temp.T11"); - checkCmdOnDriver(cpr); + driver.run("drop table temp.T11"); count = TxnDbUtil.countQueryAgent(conf, "select count(*) from COMPACTION_QUEUE where CQ_DATABASE='temp' and CQ_TABLE='t11'"); Assert.assertEquals(0, count); count = TxnDbUtil.countQueryAgent(conf, "select count(*) from COMPLETED_COMPACTIONS where CC_DATABASE='temp' and CC_TABLE='t11'"); Assert.assertEquals(0, count); - cpr = driver.run("alter table temp.T12p drop partition (ds='tomorrow', hour='2')"); - checkCmdOnDriver(cpr); + driver.run("alter table temp.T12p drop partition (ds='tomorrow', hour='2')"); count = TxnDbUtil.countQueryAgent(conf, "select count(*) from COMPACTION_QUEUE where CQ_DATABASE='temp' and CQ_TABLE='t12p'"); Assert.assertEquals(0, count); count = TxnDbUtil.countQueryAgent(conf, "select count(*) from COMPLETED_COMPACTIONS where CC_DATABASE='temp' and CC_TABLE='t12p'"); Assert.assertEquals(0, count); // Put 1 record into COMPACTION_QUEUE and do nothing - cpr = driver.run("alter table temp.T13p partition (ds='today', hour='1') compact 'major'"); - checkCmdOnDriver(cpr); + driver.run("alter table temp.T13p partition (ds='today', hour='1') compact 'major'"); count = TxnDbUtil.countQueryAgent(conf, "select count(*) from COMPACTION_QUEUE where CQ_DATABASE='temp' and CQ_TABLE='t13p' and CQ_STATE='i' and CQ_TYPE='a'"); Assert.assertEquals(1, count); @@ -636,8 +588,7 @@ public void testMetastoreTablesCleanup() throws Exception { Assert.assertEquals(1, count); count = TxnDbUtil.countQueryAgent(conf, "select count(*) from COMPLETED_COMPACTIONS where CC_DATABASE='temp' and CC_TABLE in ('t10', 't11', 't12p', 't13p')"); Assert.assertEquals(0, count); - cpr = driver.run("drop database if exists temp cascade"); - checkCmdOnDriver(cpr); + driver.run("drop database if exists temp cascade"); count = TxnDbUtil.countQueryAgent(conf, "select count(*) from TXN_COMPONENTS where TC_DATABASE='temp' and TC_TABLE in ('t10', 't11', 't12p', 't13p')"); Assert.assertEquals(0, count); count = TxnDbUtil.countQueryAgent(conf, "select count(*) from COMPLETED_TXN_COMPONENTS where CTC_DATABASE='temp' and CTC_TABLE in ('t10', 't11', 't12p', 't13p')"); @@ -655,14 +606,10 @@ public void testMetastoreTablesCleanup() throws Exception { @Test public void checkExpectedLocks() throws Exception { dropTable(new String[] {"acidPart", "nonAcidPart"}); - CommandProcessorResponse cpr = null; - cpr = driver.run("create table acidPart(a int, b int) partitioned by (p string) clustered by (a) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true')"); - checkCmdOnDriver(cpr); - cpr = driver.run("create table nonAcidPart(a int, b int) partitioned by (p string) stored as orc TBLPROPERTIES ('transactional'='false')"); - checkCmdOnDriver(cpr); - - cpr = driver.compileAndRespond("insert into nonAcidPart partition(p) values(1,2,3)", true); - checkCmdOnDriver(cpr); + driver.run("create table acidPart(a int, b int) partitioned by (p string) clustered by (a) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true')"); + driver.run("create table nonAcidPart(a int, b int) partitioned by (p string) stored as orc TBLPROPERTIES ('transactional'='false')"); + + driver.compileAndRespond("insert into nonAcidPart partition(p) values(1,2,3)", true); LockState lockState = ((DbTxnManager) txnMgr).acquireLocks(driver.getPlan(), ctx, "Practical", false); List locks = getLocks(); Assert.assertEquals("Unexpected lock count", 2, locks.size()); @@ -670,8 +617,7 @@ public void checkExpectedLocks() throws Exception { checkLock(LockType.SHARED_READ, LockState.ACQUIRED, "_dummy_database", "_dummy_table", null, locks); txnMgr.rollbackTxn();; - cpr = driver.compileAndRespond("insert into nonAcidPart partition(p=1) values(5,6)", true); - checkCmdOnDriver(cpr); + driver.compileAndRespond("insert into nonAcidPart partition(p=1) values(5,6)", true); lockState = ((DbTxnManager) txnMgr).acquireLocks(driver.getPlan(), ctx, "Practical", false); locks = getLocks(); Assert.assertEquals("Unexpected lock count", 2, locks.size()); @@ -679,8 +625,7 @@ public void checkExpectedLocks() throws Exception { checkLock(LockType.SHARED_READ, LockState.ACQUIRED, "_dummy_database", "_dummy_table", null, locks); txnMgr.rollbackTxn(); - cpr = driver.compileAndRespond("insert into acidPart partition(p) values(1,2,3)", true); - checkCmdOnDriver(cpr); + driver.compileAndRespond("insert into acidPart partition(p) values(1,2,3)", true); lockState = ((DbTxnManager) txnMgr).acquireLocks(driver.getPlan(), ctx, "Practical", false); locks = getLocks(); Assert.assertEquals("Unexpected lock count", 2, locks.size()); @@ -688,8 +633,7 @@ public void checkExpectedLocks() throws Exception { checkLock(LockType.SHARED_READ, LockState.ACQUIRED, "_dummy_database", "_dummy_table", null, locks); txnMgr.rollbackTxn(); - cpr = driver.compileAndRespond("insert into acidPart partition(p=1) values(5,6)", true); - checkCmdOnDriver(cpr); + driver.compileAndRespond("insert into acidPart partition(p=1) values(5,6)", true); lockState = ((DbTxnManager) txnMgr).acquireLocks(driver.getPlan(), ctx, "Practical", false); locks = getLocks(); Assert.assertEquals("Unexpected lock count", 2, locks.size()); @@ -697,20 +641,18 @@ public void checkExpectedLocks() throws Exception { checkLock(LockType.SHARED_READ, LockState.ACQUIRED, "_dummy_database", "_dummy_table", null, locks); txnMgr.rollbackTxn(); - cpr = driver.compileAndRespond("update acidPart set b = 17 where a = 1", true); - checkCmdOnDriver(cpr); + driver.compileAndRespond("update acidPart set b = 17 where a = 1", true); lockState = ((DbTxnManager) txnMgr).acquireLocks(driver.getPlan(), ctx, "Practical", false); locks = getLocks(); Assert.assertEquals("Unexpected lock count", 1, locks.size()); checkLock(LockType.SHARED_WRITE, LockState.ACQUIRED, "default", "acidPart", null, locks); txnMgr.rollbackTxn(); - cpr = driver.compileAndRespond("update acidPart set b = 17 where p = 1", true); - checkCmdOnDriver(cpr); + driver.compileAndRespond("update acidPart set b = 17 where p = 1", true); lockState = ((DbTxnManager) txnMgr).acquireLocks(driver.getPlan(), ctx, "Practical", false); locks = getLocks(); Assert.assertEquals("Unexpected lock count", 1, locks.size()); - checkLock(LockType.SHARED_WRITE, LockState.ACQUIRED, "default", "acidPart", null, locks);//https://issues.apache.org/jira/browse/HIVE-13212 + checkLock(LockType.SHARED_WRITE, LockState.ACQUIRED, "default", "acidPart", null, locks); //https://issues.apache.org/jira/browse/HIVE-13212 txnMgr.rollbackTxn(); } /** @@ -719,14 +661,14 @@ public void checkExpectedLocks() throws Exception { @Test public void checkExpectedLocks2() throws Exception { dropTable(new String[] {"tab_acid", "tab_not_acid"}); - checkCmdOnDriver(driver.run("create table if not exists tab_acid (a int, b int) partitioned by (p string) " + - "clustered by (a) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true')")); - checkCmdOnDriver(driver.run("create table if not exists tab_not_acid (na int, nb int) partitioned by (np string) " + - "clustered by (na) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='false')")); - checkCmdOnDriver(driver.run("insert into tab_acid partition(p) (a,b,p) values(1,2,'foo'),(3,4,'bar')")); - checkCmdOnDriver(driver.run("insert into tab_not_acid partition(np) (na,nb,np) values(1,2,'blah'),(3,4,'doh')")); + driver.run("create table if not exists tab_acid (a int, b int) partitioned by (p string) " + + "clustered by (a) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true')"); + driver.run("create table if not exists tab_not_acid (na int, nb int) partitioned by (np string) " + + "clustered by (na) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='false')"); + driver.run("insert into tab_acid partition(p) (a,b,p) values(1,2,'foo'),(3,4,'bar')"); + driver.run("insert into tab_not_acid partition(np) (na,nb,np) values(1,2,'blah'),(3,4,'doh')"); txnMgr.openTxn(ctx, "T1"); - checkCmdOnDriver(driver.compileAndRespond("select * from tab_acid inner join tab_not_acid on a = na", true)); + driver.compileAndRespond("select * from tab_acid inner join tab_not_acid on a = na", true); txnMgr.acquireLocks(driver.getPlan(), ctx, "T1"); List locks = getLocks(txnMgr); Assert.assertEquals("Unexpected lock count", 6, locks.size()); @@ -739,7 +681,7 @@ public void checkExpectedLocks2() throws Exception { HiveTxnManager txnMgr2 = TxnManagerFactory.getTxnManagerFactory().getTxnManager(conf); txnMgr2.openTxn(ctx, "T2"); - checkCmdOnDriver(driver.compileAndRespond("insert into tab_not_acid partition(np='doh') values(5,6)", true)); + driver.compileAndRespond("insert into tab_not_acid partition(np='doh') values(5,6)", true); LockState ls = ((DbTxnManager)txnMgr2).acquireLocks(driver.getPlan(), ctx, "T2", false); locks = getLocks(txnMgr2); Assert.assertEquals("Unexpected lock count", 8, locks.size()); @@ -757,7 +699,7 @@ public void checkExpectedLocks2() throws Exception { conf.setBoolVar(HiveConf.ConfVars.HIVE_TXN_STRICT_LOCKING_MODE, false); HiveTxnManager txnMgr3 = TxnManagerFactory.getTxnManagerFactory().getTxnManager(conf); txnMgr3.openTxn(ctx, "T3"); - checkCmdOnDriver(driver.compileAndRespond("insert into tab_not_acid partition(np='blah') values(7,8)", true)); + driver.compileAndRespond("insert into tab_not_acid partition(np='blah') values(7,8)", true); ((DbTxnManager)txnMgr3).acquireLocks(driver.getPlan(), ctx, "T3", false); locks = getLocks(txnMgr3); Assert.assertEquals("Unexpected lock count", 10, locks.size()); @@ -777,10 +719,10 @@ public void checkExpectedLocks2() throws Exception { @Test public void testLockingOnInsertIntoNonNativeTables() throws Exception { dropTable(new String[] {"tab_not_acid"}); - checkCmdOnDriver(driver.run("create table if not exists tab_not_acid (a int, b int) " + - " STORED BY 'org.apache.hadoop.hive.ql.metadata.StorageHandlerMock'")); + driver.run("create table if not exists tab_not_acid (a int, b int) " + + " STORED BY 'org.apache.hadoop.hive.ql.metadata.StorageHandlerMock'"); txnMgr.openTxn(ctx, "T1"); - checkCmdOnDriver(driver.compileAndRespond("insert into tab_not_acid values(1,2)", true)); + driver.compileAndRespond("insert into tab_not_acid values(1,2)", true); txnMgr.acquireLocks(driver.getPlan(), ctx, "T1"); List locks = getLocks(txnMgr); @@ -792,10 +734,10 @@ public void testLockingOnInsertIntoNonNativeTables() throws Exception { @Test public void testLockingOnInsertOverwriteNonNativeTables() throws Exception { dropTable(new String[] {"tab_not_acid"}); - checkCmdOnDriver(driver.run("create table if not exists tab_not_acid (a int, b int) " + - " STORED BY 'org.apache.hadoop.hive.ql.metadata.StorageHandlerMock'")); + driver.run("create table if not exists tab_not_acid (a int, b int) " + + " STORED BY 'org.apache.hadoop.hive.ql.metadata.StorageHandlerMock'"); txnMgr.openTxn(ctx, "T1"); - checkCmdOnDriver(driver.compileAndRespond("insert overwrite table tab_not_acid values(1,2)", true)); + driver.compileAndRespond("insert overwrite table tab_not_acid values(1,2)", true); txnMgr.acquireLocks(driver.getPlan(), ctx, "T1"); List locks = getLocks(txnMgr); Assert.assertEquals("Unexpected lock count", 2, locks.size()); @@ -832,62 +774,45 @@ public static HiveTxnManager swapTxnManager(HiveTxnManager txnMgr) { } @Test public void testShowLocksFilterOptions() throws Exception { - CommandProcessorResponse cpr = driver.run("drop table if exists db1.t14"); - checkCmdOnDriver(cpr); - cpr = driver.run("drop table if exists db2.t14"); // Note that db1 and db2 have a table with common name - checkCmdOnDriver(cpr); - cpr = driver.run("drop table if exists db2.t15"); - checkCmdOnDriver(cpr); - cpr = driver.run("drop table if exists db2.t16"); - checkCmdOnDriver(cpr); - cpr = driver.run("drop database if exists db1"); - checkCmdOnDriver(cpr); - cpr = driver.run("drop database if exists db2"); - checkCmdOnDriver(cpr); - - cpr = driver.run("create database if not exists db1"); - checkCmdOnDriver(cpr); - cpr = driver.run("create database if not exists db2"); - checkCmdOnDriver(cpr); - cpr = driver.run("create table if not exists db1.t14 (a int, b int) partitioned by (ds string) clustered by (b) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true')"); - checkCmdOnDriver(cpr); - cpr = driver.run("create table if not exists db2.t14 (a int, b int) clustered by (b) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true')"); - checkCmdOnDriver(cpr); - cpr = driver.run("create table if not exists db2.t15 (a int, b int) clustered by (b) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true')"); - checkCmdOnDriver(cpr); - cpr = driver.run("create table if not exists db2.t16 (a int, b int) clustered by (b) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true')"); - checkCmdOnDriver(cpr); + driver.run("drop table if exists db1.t14"); + driver.run("drop table if exists db2.t14"); // Note that db1 and db2 have a table with common name + driver.run("drop table if exists db2.t15"); + driver.run("drop table if exists db2.t16"); + driver.run("drop database if exists db1"); + driver.run("drop database if exists db2"); + + driver.run("create database if not exists db1"); + driver.run("create database if not exists db2"); + driver.run("create table if not exists db1.t14 (a int, b int) partitioned by (ds string) clustered by (b) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true')"); + driver.run("create table if not exists db2.t14 (a int, b int) clustered by (b) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true')"); + driver.run("create table if not exists db2.t15 (a int, b int) clustered by (b) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true')"); + driver.run("create table if not exists db2.t16 (a int, b int) clustered by (b) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true')"); // Acquire different locks at different levels HiveTxnManager txnMgr1 = TxnManagerFactory.getTxnManagerFactory().getTxnManager(conf); swapTxnManager(txnMgr1); - cpr = driver.compileAndRespond("insert into table db1.t14 partition (ds='today') values (1, 2)", true); - checkCmdOnDriver(cpr); + driver.compileAndRespond("insert into table db1.t14 partition (ds='today') values (1, 2)", true); txnMgr1.acquireLocks(driver.getPlan(), ctx, "Tom"); HiveTxnManager txnMgr2 = TxnManagerFactory.getTxnManagerFactory().getTxnManager(conf); swapTxnManager(txnMgr2); - cpr = driver.compileAndRespond("insert into table db1.t14 partition (ds='tomorrow') values (3, 4)", true); - checkCmdOnDriver(cpr); + driver.compileAndRespond("insert into table db1.t14 partition (ds='tomorrow') values (3, 4)", true); txnMgr2.acquireLocks(driver.getPlan(), ctx, "Jerry"); HiveTxnManager txnMgr3 = TxnManagerFactory.getTxnManagerFactory().getTxnManager(conf); swapTxnManager(txnMgr3); - cpr = driver.compileAndRespond("select * from db2.t15", true); - checkCmdOnDriver(cpr); + driver.compileAndRespond("select * from db2.t15", true); txnMgr3.acquireLocks(driver.getPlan(), ctx, "Donald"); HiveTxnManager txnMgr4 = TxnManagerFactory.getTxnManagerFactory().getTxnManager(conf); swapTxnManager(txnMgr4); - cpr = driver.compileAndRespond("select * from db2.t16", true); - checkCmdOnDriver(cpr); + driver.compileAndRespond("select * from db2.t16", true); txnMgr4.acquireLocks(driver.getPlan(), ctx, "Hillary"); HiveTxnManager txnMgr5 = TxnManagerFactory.getTxnManagerFactory().getTxnManager(conf); swapTxnManager(txnMgr5); - cpr = driver.compileAndRespond("select * from db2.t14", true); - checkCmdOnDriver(cpr); + driver.compileAndRespond("select * from db2.t14", true); txnMgr5.acquireLocks(driver.getPlan(), ctx, "Obama"); // Simulate SHOW LOCKS with different filter options @@ -911,8 +836,7 @@ public void testShowLocksFilterOptions() throws Exception { // SHOW LOCKS t14 swapTxnManager(txnMgr); - cpr = driver.run("use db1"); - checkCmdOnDriver(cpr); + driver.run("use db1"); locks = getLocksWithFilterOptions(txnMgr, null, "t14", null); Assert.assertEquals("Unexpected lock count", 2, locks.size()); checkLock(LockType.SHARED_READ, LockState.ACQUIRED, "db1", "t14", "ds=today", locks); @@ -927,16 +851,11 @@ public void testShowLocksFilterOptions() throws Exception { checkLock(LockType.SHARED_READ, LockState.ACQUIRED, "db1", "t14", "ds=today", locks); // SHOW LOCKS t15 - cpr = driver.run("use db2"); - checkCmdOnDriver(cpr); + driver.run("use db2"); locks = getLocksWithFilterOptions(txnMgr3, null, "t15", null); Assert.assertEquals("Unexpected lock count", 1, locks.size()); checkLock(LockType.SHARED_READ, LockState.ACQUIRED, "db2", "t15", null, locks); } - - private void checkCmdOnDriver(CommandProcessorResponse cpr) { - Assert.assertTrue(cpr.toString(), cpr.getResponseCode() == 0); - } private static String normalizeCase(String s) { return s == null ? null : s.toLowerCase(); } @@ -954,17 +873,16 @@ private static String normalizeCase(String s) { @Test public void testWriteSetTracking1() throws Exception { dropTable(new String[] {"TAB_PART"}); - CommandProcessorResponse cpr = driver.run("create table if not exists TAB_PART (a int, b int) " + - "partitioned by (p string) clustered by (a) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true')"); - checkCmdOnDriver(cpr); + driver.run("create table if not exists TAB_PART (a int, b int) " + + "partitioned by (p string) clustered by (a) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true')"); - checkCmdOnDriver(driver.compileAndRespond("select * from TAB_PART", true)); + driver.compileAndRespond("select * from TAB_PART", true); txnMgr.acquireLocks(driver.getPlan(), ctx, "Nicholas"); txnMgr.commitTxn(); HiveTxnManager txnMgr2 = TxnManagerFactory.getTxnManagerFactory().getTxnManager(conf); swapTxnManager(txnMgr2); - checkCmdOnDriver(driver.compileAndRespond("update TAB_PART set b = 7 where p = 'blah'", true)); - checkCmdOnDriver(driver.compileAndRespond("update TAB_PART set b = 7 where p = 'blah'", true)); + driver.compileAndRespond("update TAB_PART set b = 7 where p = 'blah'", true); + driver.compileAndRespond("update TAB_PART set b = 7 where p = 'blah'", true); txnMgr2.acquireLocks(driver.getPlan(), ctx, "Alexandra"); txnMgr2.commitTxn(); } @@ -979,16 +897,14 @@ private void dropTable(String[] tabs) throws Exception { @Test public void testWriteSetTracking2() throws Exception { dropTable(new String[] {"TAB_PART", "TAB2"}); - CommandProcessorResponse cpr = driver.run("create table if not exists TAB_PART (a int, b int) " + - "partitioned by (p string) clustered by (a) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true')"); - checkCmdOnDriver(cpr); - cpr = driver.run("create table if not exists TAB2 (a int, b int) partitioned by (p string) " + - "clustered by (a) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true')"); - checkCmdOnDriver(cpr); + driver.run("create table if not exists TAB_PART (a int, b int) " + + "partitioned by (p string) clustered by (a) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true')"); + driver.run("create table if not exists TAB2 (a int, b int) partitioned by (p string) " + + "clustered by (a) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true')"); HiveTxnManager txnMgr2 = TxnManagerFactory.getTxnManagerFactory().getTxnManager(conf); txnMgr.openTxn(ctx, "Peter"); - checkCmdOnDriver(driver.compileAndRespond("update TAB_PART set b = 7 where p = 'blah'", true)); + driver.compileAndRespond("update TAB_PART set b = 7 where p = 'blah'", true); txnMgr.acquireLocks(driver.getPlan(), ctx, "Peter"); txnMgr2.openTxn(ctx, "Catherine"); List locks = getLocks(txnMgr); @@ -996,22 +912,22 @@ public void testWriteSetTracking2() throws Exception { //note that "update" uses dynamic partitioning thus lock is on the table not partition checkLock(LockType.SHARED_WRITE, LockState.ACQUIRED, "default", "TAB_PART", null, locks); txnMgr.commitTxn(); - checkCmdOnDriver(driver.compileAndRespond("update TAB2 set b = 9 where p = 'doh'", true)); + driver.compileAndRespond("update TAB2 set b = 9 where p = 'doh'", true); txnMgr2.acquireLocks(driver.getPlan(), ctx, "Catherine"); txnMgr2.commitTxn(); } + /** * txns overlap and update the same resource - can't commit 2nd txn */ @Test public void testWriteSetTracking3() throws Exception { dropTable(new String[] {"TAB_PART"}); - CommandProcessorResponse cpr = driver.run("create table if not exists TAB_PART (a int, b int) " + - "partitioned by (p string) clustered by (a) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true')"); - checkCmdOnDriver(cpr); - checkCmdOnDriver(driver.run("insert into TAB_PART partition(p='blah') values(1,2)")); + driver.run("create table if not exists TAB_PART (a int, b int) " + + "partitioned by (p string) clustered by (a) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true')"); + driver.run("insert into TAB_PART partition(p='blah') values(1,2)"); - checkCmdOnDriver(driver.compileAndRespond("update TAB_PART set b = 7 where p = 'blah'", true)); + driver.compileAndRespond("update TAB_PART set b = 7 where p = 'blah'", true); long txnId = txnMgr.getCurrentTxnId(); txnMgr.acquireLocks(driver.getPlan(), ctx, "Known"); List locks = getLocks(txnMgr); @@ -1019,10 +935,10 @@ public void testWriteSetTracking3() throws Exception { checkLock(LockType.SHARED_WRITE, LockState.ACQUIRED, "default", "TAB_PART", "p=blah", locks); HiveTxnManager txnMgr2 = TxnManagerFactory.getTxnManagerFactory().getTxnManager(conf); swapTxnManager(txnMgr2); - checkCmdOnDriver(driver.compileAndRespond("update TAB_PART set b = 7 where p = 'blah'", true)); + driver.compileAndRespond("update TAB_PART set b = 7 where p = 'blah'", true); long txnId2 = txnMgr2.getCurrentTxnId(); ((DbTxnManager)txnMgr2).acquireLocks(driver.getPlan(), ctx, "Unknown", false); - locks = getLocks(txnMgr2);//should not matter which txnMgr is used here + locks = getLocks(txnMgr2); //should not matter which txnMgr is used here Assert.assertEquals("Unexpected lock count", 2, locks.size()); checkLock(LockType.SHARED_WRITE, LockState.ACQUIRED, "default", "TAB_PART", "p=blah", locks); checkLock(LockType.SHARED_WRITE, LockState.WAITING, "default", "TAB_PART", "p=blah", locks); @@ -1059,15 +975,13 @@ public void testWriteSetTracking3() throws Exception { public void testWriteSetTracking4() throws Exception { dropTable(new String[] {"TAB_PART", "TAB2"}); Assert.assertEquals(0, TxnDbUtil.countQueryAgent(conf, "select count(*) from WRITE_SET")); - CommandProcessorResponse cpr = driver.run("create table if not exists TAB_PART (a int, b int) " + - "partitioned by (p string) clustered by (a) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true')"); - checkCmdOnDriver(cpr); - cpr = driver.run("create table if not exists TAB2 (a int, b int) partitioned by (p string) " + - "clustered by (a) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true')"); - checkCmdOnDriver(cpr); + driver.run("create table if not exists TAB_PART (a int, b int) " + + "partitioned by (p string) clustered by (a) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true')"); + driver.run("create table if not exists TAB2 (a int, b int) partitioned by (p string) " + + "clustered by (a) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true')"); txnMgr.openTxn(ctx, "Long Running"); - checkCmdOnDriver(driver.compileAndRespond("select a from TAB_PART where p = 'blah'", true)); + driver.compileAndRespond("select a from TAB_PART where p = 'blah'", true); txnMgr.acquireLocks(driver.getPlan(), ctx, "Long Running"); List locks = getLocks(txnMgr); Assert.assertEquals("Unexpected lock count", 1, locks.size()); @@ -1078,7 +992,7 @@ public void testWriteSetTracking4() throws Exception { HiveTxnManager txnMgr2 = TxnManagerFactory.getTxnManagerFactory().getTxnManager(conf); txnMgr2.openTxn(ctx, "Short Running"); - checkCmdOnDriver(driver.compileAndRespond("update TAB2 set b = 7 where p = 'blah'", true));//no such partition + driver.compileAndRespond("update TAB2 set b = 7 where p = 'blah'", true); //no such partition txnMgr2.acquireLocks(driver.getPlan(), ctx, "Short Running"); locks = getLocks(txnMgr); Assert.assertEquals("Unexpected lock count", 2, locks.size()); @@ -1093,7 +1007,7 @@ public void testWriteSetTracking4() throws Exception { Assert.assertEquals(txnMgr2.getCurrentTxnId(), writeIds.getTxnToWriteIds().get(0).getTxnId()); AddDynamicPartitions adp = new AddDynamicPartitions(txnMgr2.getCurrentTxnId(), writeIds.getTxnToWriteIds().get(0).getWriteId(), - "default", "tab2", Collections.EMPTY_LIST); + "default", "tab2", Collections.EMPTY_LIST); adp.setOperationType(DataOperationType.UPDATE); txnHandler.addDynamicPartitions(adp); txnMgr2.commitTxn(); @@ -1101,12 +1015,12 @@ public void testWriteSetTracking4() throws Exception { Assert.assertEquals( 0, TxnDbUtil.countQueryAgent(conf, "select count(*) from WRITE_SET")); txnMgr2.openTxn(ctx, "T3"); - checkCmdOnDriver(driver.compileAndRespond("update TAB2 set b = 7 where p = 'two'", true));//pretend this partition exists + driver.compileAndRespond("update TAB2 set b = 7 where p = 'two'", true); //pretend this partition exists txnMgr2.acquireLocks(driver.getPlan(), ctx, "T3"); locks = getLocks(txnMgr); Assert.assertEquals("Unexpected lock count", 2, locks.size()); checkLock(LockType.SHARED_READ, LockState.ACQUIRED, "default", "TAB_PART", null, locks); - checkLock(LockType.SHARED_WRITE, LockState.ACQUIRED, "default", "TAB2", null, locks);//since TAB2 is empty + checkLock(LockType.SHARED_WRITE, LockState.ACQUIRED, "default", "TAB2", null, locks); //since TAB2 is empty //update stmt has p=blah, thus nothing is actually update and we generate empty dyn part list Assert.assertEquals(0, TxnDbUtil.countQueryAgent(conf, "select count(*) from WRITE_SET")); @@ -1116,19 +1030,19 @@ public void testWriteSetTracking4() throws Exception { Assert.assertEquals(txnMgr2.getCurrentTxnId(), writeIds.getTxnToWriteIds().get(0).getTxnId()); adp = new AddDynamicPartitions(txnMgr2.getCurrentTxnId(), writeIds.getTxnToWriteIds().get(0).getWriteId(), - "default", "tab2", Collections.singletonList("p=two")); + "default", "tab2", Collections.singletonList("p=two")); adp.setOperationType(DataOperationType.UPDATE); - txnHandler.addDynamicPartitions(adp);//simulate partition update + txnHandler.addDynamicPartitions(adp); //simulate partition update txnMgr2.commitTxn(); Assert.assertEquals("WRITE_SET mismatch: " + TxnDbUtil.queryToString(conf, "select * from WRITE_SET"), - 1, TxnDbUtil.countQueryAgent(conf, "select count(*) from WRITE_SET")); + 1, TxnDbUtil.countQueryAgent(conf, "select count(*) from WRITE_SET")); AcidWriteSetService houseKeeper = new AcidWriteSetService(); houseKeeper.setConf(conf); houseKeeper.run(); //since T3 overlaps with Long Running (still open) GC does nothing Assert.assertEquals(1, TxnDbUtil.countQueryAgent(conf, "select count(*) from WRITE_SET")); - checkCmdOnDriver(driver.compileAndRespond("update TAB2 set b = 17 where a = 1", true));//no rows match + driver.compileAndRespond("update TAB2 set b = 17 where a = 1", true); //no rows match txnMgr.acquireLocks(driver.getPlan(), ctx, "Long Running"); rqst = new AllocateTableWriteIdsRequest("default", "tab2"); @@ -1138,7 +1052,7 @@ public void testWriteSetTracking4() throws Exception { //so generate empty Dyn Part call adp = new AddDynamicPartitions(txnMgr.getCurrentTxnId(), writeIds.getTxnToWriteIds().get(0).getWriteId(), - "default", "tab2", Collections.EMPTY_LIST); + "default", "tab2", Collections.EMPTY_LIST); adp.setOperationType(DataOperationType.UPDATE); txnHandler.addDynamicPartitions(adp); txnMgr.commitTxn(); @@ -1156,22 +1070,21 @@ public void testWriteSetTracking4() throws Exception { public void testWriteSetTracking5() throws Exception { dropTable(new String[] {"TAB_PART"}); Assert.assertEquals(0, TxnDbUtil.countQueryAgent(conf, "select count(*) from WRITE_SET")); - CommandProcessorResponse cpr = driver.run("create table if not exists TAB_PART (a int, b int) " + + driver.run("create table if not exists TAB_PART (a int, b int) " + "partitioned by (p string) clustered by (a) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true')"); - checkCmdOnDriver(cpr); - checkCmdOnDriver(driver.run("insert into TAB_PART partition(p='blah') values(1,2)")); + driver.run("insert into TAB_PART partition(p='blah') values(1,2)"); HiveTxnManager txnMgr2 = TxnManagerFactory.getTxnManagerFactory().getTxnManager(conf); txnMgr.openTxn(ctx, "Known"); long txnId = txnMgr2.openTxn(ctx, "Unknown"); - checkCmdOnDriver(driver.compileAndRespond("update TAB_PART set b = 7 where p = 'blah'", true)); + driver.compileAndRespond("update TAB_PART set b = 7 where p = 'blah'", true); txnMgr.acquireLocks(driver.getPlan(), ctx, "Known"); List locks = getLocks(txnMgr); Assert.assertEquals("Unexpected lock count", 1, locks.size()); checkLock(LockType.SHARED_WRITE, LockState.ACQUIRED, "default", "TAB_PART", "p=blah", locks); - checkCmdOnDriver(driver.compileAndRespond("update TAB_PART set b = 7 where p = 'blah'", true)); + driver.compileAndRespond("update TAB_PART set b = 7 where p = 'blah'", true); ((DbTxnManager)txnMgr2).acquireLocks(driver.getPlan(), ctx, "Unknown", false); - locks = getLocks(txnMgr2);//should not matter which txnMgr is used here + locks = getLocks(txnMgr2); //should not matter which txnMgr is used here Assert.assertEquals("Unexpected lock count", 2, locks.size()); checkLock(LockType.SHARED_WRITE, LockState.ACQUIRED, "default", "TAB_PART", "p=blah", locks); checkLock(LockType.SHARED_WRITE, LockState.WAITING, "default", "TAB_PART", "p=blah", locks); @@ -1188,7 +1101,7 @@ public void testWriteSetTracking5() throws Exception { adp.setOperationType(DataOperationType.UPDATE); txnHandler.addDynamicPartitions(adp); Assert.assertEquals(0, TxnDbUtil.countQueryAgent(conf, "select count(*) from WRITE_SET")); - txnMgr2.commitTxn();//since conflicting txn rolled back, commit succeeds + txnMgr2.commitTxn(); //since conflicting txn rolled back, commit succeeds Assert.assertEquals(1, TxnDbUtil.countQueryAgent(conf, "select count(*) from WRITE_SET")); } /** @@ -1198,24 +1111,23 @@ public void testWriteSetTracking5() throws Exception { public void testWriteSetTracking6() throws Exception { dropTable(new String[] {"TAB2"}); Assert.assertEquals(0, TxnDbUtil.countQueryAgent(conf, "select count(*) from WRITE_SET")); - CommandProcessorResponse cpr = driver.run("create table if not exists TAB2(a int, b int) clustered " + + driver.run("create table if not exists TAB2(a int, b int) clustered " + "by (a) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true')"); - checkCmdOnDriver(cpr); - checkCmdOnDriver(driver.compileAndRespond("select * from TAB2 where a = 113", true)); + driver.compileAndRespond("select * from TAB2 where a = 113", true); txnMgr.acquireLocks(driver.getPlan(), ctx, "Works"); List locks = getLocks(txnMgr); Assert.assertEquals("Unexpected lock count", 1, locks.size()); checkLock(LockType.SHARED_READ, LockState.ACQUIRED, "default", "TAB2", null, locks); HiveTxnManager txnMgr2 = TxnManagerFactory.getTxnManagerFactory().getTxnManager(conf); swapTxnManager(txnMgr2); - checkCmdOnDriver(driver.compileAndRespond("update TAB2 set b = 17 where a = 101", true)); + driver.compileAndRespond("update TAB2 set b = 17 where a = 101", true); txnMgr2.acquireLocks(driver.getPlan(), ctx, "Horton"); Assert.assertEquals(0, TxnDbUtil.countQueryAgent(conf, "select count(*) from WRITE_SET")); locks = getLocks(txnMgr); Assert.assertEquals("Unexpected lock count", 2, locks.size()); checkLock(LockType.SHARED_READ, LockState.ACQUIRED, "default", "TAB2", null, locks); checkLock(LockType.SHARED_WRITE, LockState.ACQUIRED, "default", "TAB2", null, locks); - txnMgr2.commitTxn();//no conflict + txnMgr2.commitTxn(); //no conflict Assert.assertEquals(1, TxnDbUtil.countQueryAgent(conf, "select count(*) from WRITE_SET")); locks = getLocks(txnMgr); Assert.assertEquals("Unexpected lock count", 1, locks.size()); @@ -1235,14 +1147,13 @@ public void testWriteSetTracking6() throws Exception { public void testWriteSetTracking7() throws Exception { dropTable(new String[] {"tab2", "TAB2"}); Assert.assertEquals(0, TxnDbUtil.countQueryAgent(conf, "select count(*) from WRITE_SET")); - CommandProcessorResponse cpr = driver.run("create table if not exists tab2 (a int, b int) " + - "partitioned by (p string) clustered by (a) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true')"); - checkCmdOnDriver(cpr); - checkCmdOnDriver(driver.run("insert into tab2 partition(p)(a,b,p) values(1,1,'one'),(2,2,'two')"));//txnid:1 + driver.run("create table if not exists tab2 (a int, b int) " + + "partitioned by (p string) clustered by (a) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true')"); + driver.run("insert into tab2 partition(p)(a,b,p) values(1,1,'one'),(2,2,'two')"); //txnid:1 HiveTxnManager txnMgr2 = TxnManagerFactory.getTxnManagerFactory().getTxnManager(conf); swapTxnManager(txnMgr2); //test with predicates such that partition pruning works - checkCmdOnDriver(driver.compileAndRespond("update tab2 set b = 7 where p='two'", true)); + driver.compileAndRespond("update tab2 set b = 7 where p='two'", true); long idTxnUpdate1 = txnMgr2.getCurrentTxnId(); txnMgr2.acquireLocks(driver.getPlan(), ctx, "T2"); List locks = getLocks(txnMgr2); @@ -1251,7 +1162,7 @@ public void testWriteSetTracking7() throws Exception { //now start concurrent txn swapTxnManager(txnMgr); - checkCmdOnDriver(driver.compileAndRespond("update tab2 set b = 7 where p='one'", true)); + driver.compileAndRespond("update tab2 set b = 7 where p='one'", true); long idTxnUpdate2 = txnMgr.getCurrentTxnId(); ((DbTxnManager)txnMgr).acquireLocks(driver.getPlan(), ctx, "T3", false); locks = getLocks(txnMgr); @@ -1262,38 +1173,37 @@ public void testWriteSetTracking7() throws Exception { //this simulates the completion of txnid:idTxnUpdate1 long writeId = txnMgr2.getTableWriteId("default", "tab2"); AddDynamicPartitions adp = new AddDynamicPartitions(txnMgr2.getCurrentTxnId(), writeId, "default", "tab2", - Collections.singletonList("p=two")); + Collections.singletonList("p=two")); adp.setOperationType(DataOperationType.UPDATE); txnHandler.addDynamicPartitions(adp); - txnMgr2.commitTxn();//txnid:idTxnUpdate1 + txnMgr2.commitTxn(); //txnid:idTxnUpdate1 locks = getLocks(txnMgr2); Assert.assertEquals("Unexpected lock count", 1, locks.size()); checkLock(LockType.SHARED_WRITE, LockState.ACQUIRED, "default", "TAB2", "p=one", locks); //completion of txnid:idTxnUpdate2 writeId = txnMgr.getTableWriteId("default", "tab2"); adp = new AddDynamicPartitions(txnMgr.getCurrentTxnId(), writeId, "default", "tab2", - Collections.singletonList("p=one")); + Collections.singletonList("p=one")); adp.setOperationType(DataOperationType.UPDATE); txnHandler.addDynamicPartitions(adp); - txnMgr.commitTxn();//txnid:idTxnUpdate2 + txnMgr.commitTxn(); //txnid:idTxnUpdate2 //now both txns concurrently updated TAB2 but different partitions. Assert.assertEquals("WRITE_SET mismatch: " + TxnDbUtil.queryToString(conf, "select * from WRITE_SET"), - 1, TxnDbUtil.countQueryAgent(conf, "select count(*) from WRITE_SET where ws_partition='p=one' and ws_operation_type='u'")); + 1, TxnDbUtil.countQueryAgent(conf, "select count(*) from WRITE_SET where ws_partition='p=one' and ws_operation_type='u'")); Assert.assertEquals("WRITE_SET mismatch: " + TxnDbUtil.queryToString(conf, "select * from WRITE_SET"), - 1, TxnDbUtil.countQueryAgent(conf, "select count(*) from WRITE_SET where ws_partition='p=two' and ws_operation_type='u'")); + 1, TxnDbUtil.countQueryAgent(conf, "select count(*) from WRITE_SET where ws_partition='p=two' and ws_operation_type='u'")); //2 from txnid:1, 1 from txnid:2, 1 from txnid:3 Assert.assertEquals("COMPLETED_TXN_COMPONENTS mismatch: " + TxnDbUtil.queryToString(conf, "select * from COMPLETED_TXN_COMPONENTS"), - 4, TxnDbUtil.countQueryAgent(conf, "select count(*) from COMPLETED_TXN_COMPONENTS where ctc_table='tab2' and ctc_partition is not null")); + 4, TxnDbUtil.countQueryAgent(conf, "select count(*) from COMPLETED_TXN_COMPONENTS where ctc_table='tab2' and ctc_partition is not null")); //================ //test with predicates such that partition pruning doesn't kick in - cpr = driver.run("create table if not exists tab1 (a int, b int) partitioned by (p string) " + + driver.run("create table if not exists tab1 (a int, b int) partitioned by (p string) " + "clustered by (a) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true')"); - checkCmdOnDriver(cpr); - checkCmdOnDriver(driver.run("insert into tab1 partition(p)(a,b,p) values(1,1,'one'),(2,2,'two')"));//txnid:4 + driver.run("insert into tab1 partition(p)(a,b,p) values(1,1,'one'),(2,2,'two')"); //txnid:4 swapTxnManager(txnMgr2); - checkCmdOnDriver(driver.compileAndRespond("update tab1 set b = 7 where b=1", true)); + driver.compileAndRespond("update tab1 set b = 7 where b=1", true); long idTxnUpdate3 = txnMgr2.getCurrentTxnId(); txnMgr2.acquireLocks(driver.getPlan(), ctx, "T5"); locks = getLocks(txnMgr2); @@ -1303,7 +1213,7 @@ public void testWriteSetTracking7() throws Exception { //now start concurrent txn swapTxnManager(txnMgr); - checkCmdOnDriver(driver.compileAndRespond("update tab1 set b = 7 where b = 2", true)); + driver.compileAndRespond("update tab1 set b = 7 where b = 2", true); long idTxnUpdate4 = txnMgr.getCurrentTxnId(); ((DbTxnManager)txnMgr).acquireLocks(driver.getPlan(), ctx, "T6", false); locks = getLocks(txnMgr); @@ -1316,12 +1226,12 @@ public void testWriteSetTracking7() throws Exception { //this simulates the completion of txnid:idTxnUpdate3 writeId = txnMgr2.getTableWriteId("default", "tab1"); adp = new AddDynamicPartitions(txnMgr2.getCurrentTxnId(), writeId, "default", "tab1", - Collections.singletonList("p=one")); + Collections.singletonList("p=one")); adp.setOperationType(DataOperationType.UPDATE); txnHandler.addDynamicPartitions(adp); - txnMgr2.commitTxn();//txnid:idTxnUpdate3 + txnMgr2.commitTxn(); //txnid:idTxnUpdate3 - ((DbLockManager)txnMgr.getLockManager()).checkLock(locks.get(2).getLockid());//retest WAITING locks (both have same ext id) + ((DbLockManager)txnMgr.getLockManager()).checkLock(locks.get(2).getLockid()); //retest WAITING locks (both have same ext id) locks = getLocks(txnMgr); Assert.assertEquals("Unexpected lock count", 2, locks.size()); checkLock(LockType.SHARED_WRITE, LockState.ACQUIRED, "default", "TAB1", "p=two", locks); @@ -1329,18 +1239,18 @@ public void testWriteSetTracking7() throws Exception { //completion of txnid:idTxnUpdate4 writeId = txnMgr.getTableWriteId("default", "tab1"); adp = new AddDynamicPartitions(txnMgr.getCurrentTxnId(), writeId, "default", "tab1", - Collections.singletonList("p=two")); + Collections.singletonList("p=two")); adp.setOperationType(DataOperationType.UPDATE); txnHandler.addDynamicPartitions(adp); - txnMgr.commitTxn();//txnid:idTxnUpdate4 + txnMgr.commitTxn(); //txnid:idTxnUpdate4 Assert.assertEquals("WRITE_SET mismatch: " + TxnDbUtil.queryToString(conf, "select * from WRITE_SET"), - 1, TxnDbUtil.countQueryAgent(conf, "select count(*) from WRITE_SET where ws_partition='p=one' and ws_operation_type='u' and ws_table='tab1'")); + 1, TxnDbUtil.countQueryAgent(conf, "select count(*) from WRITE_SET where ws_partition='p=one' and ws_operation_type='u' and ws_table='tab1'")); Assert.assertEquals("WRITE_SET mismatch: " + TxnDbUtil.queryToString(conf, "select * from WRITE_SET"), - 1, TxnDbUtil.countQueryAgent(conf, "select count(*) from WRITE_SET where ws_partition='p=two' and ws_operation_type='u' and ws_table='tab1'")); + 1, TxnDbUtil.countQueryAgent(conf, "select count(*) from WRITE_SET where ws_partition='p=two' and ws_operation_type='u' and ws_table='tab1'")); //2 from insert + 1 for each update stmt Assert.assertEquals("COMPLETED_TXN_COMPONENTS mismatch: " + TxnDbUtil.queryToString(conf, "select * from COMPLETED_TXN_COMPONENTS"), - 4, TxnDbUtil.countQueryAgent(conf, "select count(*) from COMPLETED_TXN_COMPONENTS where ctc_table='tab1' and ctc_partition is not null")); + 4, TxnDbUtil.countQueryAgent(conf, "select count(*) from COMPLETED_TXN_COMPONENTS where ctc_table='tab1' and ctc_partition is not null")); } /** * Concurrent updates with partition pruning predicate and w/o one @@ -1348,13 +1258,12 @@ public void testWriteSetTracking7() throws Exception { @Test public void testWriteSetTracking8() throws Exception { dropTable(new String[] {"tab1", "TAB1"}); - CommandProcessorResponse cpr = driver.run("create table if not exists tab1 (a int, b int) partitioned by (p string) " + - "clustered by (a) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true')"); - checkCmdOnDriver(cpr); - checkCmdOnDriver(driver.run("insert into tab1 partition(p)(a,b,p) values(1,1,'one'),(2,2,'two')")); + driver.run("create table if not exists tab1 (a int, b int) partitioned by (p string) " + + "clustered by (a) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true')"); + driver.run("insert into tab1 partition(p)(a,b,p) values(1,1,'one'),(2,2,'two')"); HiveTxnManager txnMgr2 = TxnManagerFactory.getTxnManagerFactory().getTxnManager(conf); swapTxnManager(txnMgr2); - checkCmdOnDriver(driver.compileAndRespond("update tab1 set b = 7 where b=1", true)); + driver.compileAndRespond("update tab1 set b = 7 where b=1", true); long idTxnUpdate1 = txnMgr2.getCurrentTxnId(); txnMgr2.acquireLocks(driver.getPlan(), ctx, "T2"); List locks = getLocks(txnMgr2); @@ -1364,7 +1273,7 @@ public void testWriteSetTracking8() throws Exception { //now start concurrent txn swapTxnManager(txnMgr); - checkCmdOnDriver(driver.compileAndRespond("update tab1 set b = 7 where p='two'", true)); + driver.compileAndRespond("update tab1 set b = 7 where p='two'", true); long idTxnUpdate2 = txnMgr.getCurrentTxnId(); ((DbTxnManager)txnMgr).acquireLocks(driver.getPlan(), ctx, "T3", false); locks = getLocks(txnMgr); @@ -1376,29 +1285,29 @@ public void testWriteSetTracking8() throws Exception { //this simulates the completion of txnid:idTxnUpdate1 long writeId = txnMgr2.getTableWriteId("default", "tab1"); AddDynamicPartitions adp = new AddDynamicPartitions(txnMgr2.getCurrentTxnId(), writeId, "default", "tab1", - Collections.singletonList("p=one")); + Collections.singletonList("p=one")); adp.setOperationType(DataOperationType.UPDATE); txnHandler.addDynamicPartitions(adp); - txnMgr2.commitTxn();//txnid:idTxnUpdate1 + txnMgr2.commitTxn(); //txnid:idTxnUpdate1 - ((DbLockManager)txnMgr.getLockManager()).checkLock(locks.get(2).getLockid());//retest WAITING locks (both have same ext id) + ((DbLockManager)txnMgr.getLockManager()).checkLock(locks.get(2).getLockid()); //retest WAITING locks (both have same ext id) locks = getLocks(txnMgr); Assert.assertEquals("Unexpected lock count", 1, locks.size()); checkLock(LockType.SHARED_WRITE, LockState.ACQUIRED, "default", "TAB1", "p=two", locks); //completion of txnid:idTxnUpdate2 writeId = txnMgr.getTableWriteId("default", "tab1"); adp = new AddDynamicPartitions(txnMgr.getCurrentTxnId(), writeId, "default", "tab1", - Collections.singletonList("p=two")); + Collections.singletonList("p=two")); adp.setOperationType(DataOperationType.UPDATE); txnHandler.addDynamicPartitions(adp); - txnMgr.commitTxn();//txnid:idTxnUpdate2 + txnMgr.commitTxn(); //txnid:idTxnUpdate2 Assert.assertEquals("WRITE_SET mismatch: " + TxnDbUtil.queryToString(conf, "select * from WRITE_SET"), - 1, TxnDbUtil.countQueryAgent(conf, "select count(*) from WRITE_SET where ws_partition='p=one' and ws_operation_type='u' and ws_table='tab1'")); + 1, TxnDbUtil.countQueryAgent(conf, "select count(*) from WRITE_SET where ws_partition='p=one' and ws_operation_type='u' and ws_table='tab1'")); Assert.assertEquals("WRITE_SET mismatch: " + TxnDbUtil.queryToString(conf, "select * from WRITE_SET"), - 1, TxnDbUtil.countQueryAgent(conf, "select count(*) from WRITE_SET where ws_partition='p=two' and ws_operation_type='u' and ws_table='tab1'")); + 1, TxnDbUtil.countQueryAgent(conf, "select count(*) from WRITE_SET where ws_partition='p=two' and ws_operation_type='u' and ws_table='tab1'")); Assert.assertEquals("COMPLETED_TXN_COMPONENTS mismatch: " + TxnDbUtil.queryToString(conf, "select * from COMPLETED_TXN_COMPONENTS"), - 4, TxnDbUtil.countQueryAgent(conf, "select count(*) from COMPLETED_TXN_COMPONENTS where ctc_table='tab1' and ctc_partition is not null")); + 4, TxnDbUtil.countQueryAgent(conf, "select count(*) from COMPLETED_TXN_COMPONENTS where ctc_table='tab1' and ctc_partition is not null")); } /** * Concurrent update/delete of different partitions - should pass @@ -1406,13 +1315,12 @@ public void testWriteSetTracking8() throws Exception { @Test public void testWriteSetTracking9() throws Exception { dropTable(new String[] {"TAB1"}); - CommandProcessorResponse cpr = driver.run("create table if not exists tab1 (a int, b int) partitioned by (p string) " + - "clustered by (a) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true')"); - checkCmdOnDriver(cpr); - checkCmdOnDriver(driver.run("insert into tab1 partition(p)(a,b,p) values(1,1,'one'),(2,2,'two')")); + driver.run("create table if not exists tab1 (a int, b int) partitioned by (p string) " + + "clustered by (a) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true')"); + driver.run("insert into tab1 partition(p)(a,b,p) values(1,1,'one'),(2,2,'two')"); HiveTxnManager txnMgr2 = TxnManagerFactory.getTxnManagerFactory().getTxnManager(conf); swapTxnManager(txnMgr2); - checkCmdOnDriver(driver.compileAndRespond("update tab1 set b = 7 where b=1", true)); + driver.compileAndRespond("update tab1 set b = 7 where b=1", true); long idTxnUpdate1 = txnMgr2.getCurrentTxnId(); txnMgr2.acquireLocks(driver.getPlan(), ctx, "T2"); List locks = getLocks(txnMgr2); @@ -1422,7 +1330,7 @@ public void testWriteSetTracking9() throws Exception { //now start concurrent txn swapTxnManager(txnMgr); - checkCmdOnDriver(driver.compileAndRespond("delete from tab1 where p='two' and b=2", true)); + driver.compileAndRespond("delete from tab1 where p='two' and b=2", true); long idTxnDelete1 = txnMgr.getCurrentTxnId(); ((DbTxnManager)txnMgr).acquireLocks(driver.getPlan(), ctx, "T3", false); locks = getLocks(txnMgr); @@ -1434,35 +1342,35 @@ public void testWriteSetTracking9() throws Exception { //this simulates the completion of txnid:idTxnUpdate1 long writeId = txnMgr2.getTableWriteId("default", "tab1"); AddDynamicPartitions adp = new AddDynamicPartitions(txnMgr2.getCurrentTxnId(), writeId, "default", "tab1", - Collections.singletonList("p=one")); + Collections.singletonList("p=one")); adp.setOperationType(DataOperationType.UPDATE); txnHandler.addDynamicPartitions(adp); - txnMgr2.commitTxn();//txnid:idTxnUpdate1 + txnMgr2.commitTxn(); //txnid:idTxnUpdate1 - ((DbLockManager)txnMgr.getLockManager()).checkLock(locks.get(2).getLockid());//retest WAITING locks (both have same ext id) + ((DbLockManager)txnMgr.getLockManager()).checkLock(locks.get(2).getLockid()); //retest WAITING locks (both have same ext id) locks = getLocks(txnMgr); Assert.assertEquals("Unexpected lock count", 1, locks.size()); checkLock(LockType.SHARED_WRITE, LockState.ACQUIRED, "default", "TAB1", "p=two", locks); //completion of txnid:idTxnUpdate2 writeId = txnMgr.getTableWriteId("default", "tab1"); adp = new AddDynamicPartitions(txnMgr.getCurrentTxnId(), writeId, "default", "tab1", - Collections.singletonList("p=two")); + Collections.singletonList("p=two")); adp.setOperationType(DataOperationType.DELETE); txnHandler.addDynamicPartitions(adp); - txnMgr.commitTxn();//txnid:idTxnUpdate2 + txnMgr.commitTxn(); //txnid:idTxnUpdate2 Assert.assertEquals("WRITE_SET mismatch: " + TxnDbUtil.queryToString(conf, "select * from COMPLETED_TXN_COMPONENTS"), - 2, TxnDbUtil.countQueryAgent(conf, "select count(*) from COMPLETED_TXN_COMPONENTS where ctc_txnid=" + (idTxnUpdate1 - 1) + " and ctc_table='tab1'")); + 2, TxnDbUtil.countQueryAgent(conf, "select count(*) from COMPLETED_TXN_COMPONENTS where ctc_txnid=" + (idTxnUpdate1 - 1) + " and ctc_table='tab1'")); Assert.assertEquals("WRITE_SET mismatch: " + TxnDbUtil.queryToString(conf, "select * from COMPLETED_TXN_COMPONENTS"), - 1, TxnDbUtil.countQueryAgent(conf, "select count(*) from COMPLETED_TXN_COMPONENTS where ctc_txnid=" + idTxnUpdate1 + " and ctc_table='tab1' and ctc_partition='p=one'")); + 1, TxnDbUtil.countQueryAgent(conf, "select count(*) from COMPLETED_TXN_COMPONENTS where ctc_txnid=" + idTxnUpdate1 + " and ctc_table='tab1' and ctc_partition='p=one'")); Assert.assertEquals("WRITE_SET mismatch: " + TxnDbUtil.queryToString(conf, "select * from COMPLETED_TXN_COMPONENTS"), - 1, TxnDbUtil.countQueryAgent(conf, "select count(*) from COMPLETED_TXN_COMPONENTS where ctc_txnid=" + idTxnDelete1 + " and ctc_table='tab1' and ctc_partition='p=two'")); + 1, TxnDbUtil.countQueryAgent(conf, "select count(*) from COMPLETED_TXN_COMPONENTS where ctc_txnid=" + idTxnDelete1 + " and ctc_table='tab1' and ctc_partition='p=two'")); Assert.assertEquals("WRITE_SET mismatch: " + TxnDbUtil.queryToString(conf, "select * from WRITE_SET"), - 1, TxnDbUtil.countQueryAgent(conf, "select count(*) from WRITE_SET where ws_partition='p=one' and ws_operation_type='u' and ws_table='tab1'")); + 1, TxnDbUtil.countQueryAgent(conf, "select count(*) from WRITE_SET where ws_partition='p=one' and ws_operation_type='u' and ws_table='tab1'")); Assert.assertEquals("WRITE_SET mismatch: " + TxnDbUtil.queryToString(conf, "select * from WRITE_SET"), - 1, TxnDbUtil.countQueryAgent(conf, "select count(*) from WRITE_SET where ws_partition='p=two' and ws_operation_type='d' and ws_table='tab1'")); + 1, TxnDbUtil.countQueryAgent(conf, "select count(*) from WRITE_SET where ws_partition='p=two' and ws_operation_type='d' and ws_table='tab1'")); Assert.assertEquals("COMPLETED_TXN_COMPONENTS mismatch: " + TxnDbUtil.queryToString(conf, "select * from COMPLETED_TXN_COMPONENTS"), - 4, TxnDbUtil.countQueryAgent(conf, "select count(*) from COMPLETED_TXN_COMPONENTS where ctc_table='tab1' and ctc_partition is not null")); + 4, TxnDbUtil.countQueryAgent(conf, "select count(*) from COMPLETED_TXN_COMPONENTS where ctc_table='tab1' and ctc_partition is not null")); } /** * Concurrent update/delete of same partition - should fail to commit @@ -1470,13 +1378,12 @@ public void testWriteSetTracking9() throws Exception { @Test public void testWriteSetTracking10() throws Exception { dropTable(new String[] {"TAB1"}); - CommandProcessorResponse cpr = driver.run("create table if not exists tab1 (a int, b int) partitioned by (p string) " + - "clustered by (a) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true')"); - checkCmdOnDriver(cpr); - checkCmdOnDriver(driver.run("insert into tab1 partition(p)(a,b,p) values(1,1,'one'),(2,2,'two')"));//txnid:1 + driver.run("create table if not exists tab1 (a int, b int) partitioned by (p string) " + + "clustered by (a) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true')"); + driver.run("insert into tab1 partition(p)(a,b,p) values(1,1,'one'),(2,2,'two')"); //txnid:1 HiveTxnManager txnMgr2 = TxnManagerFactory.getTxnManagerFactory().getTxnManager(conf); swapTxnManager(txnMgr2); - checkCmdOnDriver(driver.compileAndRespond("update tab1 set b = 7 where b=2", true)); + driver.compileAndRespond("update tab1 set b = 7 where b=2", true); txnMgr2.acquireLocks(driver.getPlan(), ctx, "T2"); List locks = getLocks(txnMgr2); Assert.assertEquals("Unexpected lock count", 2, locks.size()); @@ -1485,7 +1392,7 @@ public void testWriteSetTracking10() throws Exception { //now start concurrent txn swapTxnManager(txnMgr); - checkCmdOnDriver(driver.compileAndRespond("delete from tab1 where p='two' and b=2", true)); + driver.compileAndRespond("delete from tab1 where p='two' and b=2", true); ((DbTxnManager)txnMgr).acquireLocks(driver.getPlan(), ctx, "T3", false); locks = getLocks(txnMgr); Assert.assertEquals("Unexpected lock count", 3, locks.size()); @@ -1496,37 +1403,37 @@ public void testWriteSetTracking10() throws Exception { //this simulates the completion of "Update tab2" txn long writeId = txnMgr2.getTableWriteId("default", "tab1"); AddDynamicPartitions adp = new AddDynamicPartitions(txnMgr2.getCurrentTxnId(), writeId, "default", "tab1", - Collections.singletonList("p=two")); + Collections.singletonList("p=two")); adp.setOperationType(DataOperationType.UPDATE); txnHandler.addDynamicPartitions(adp); - txnMgr2.commitTxn();//"Update tab2" + txnMgr2.commitTxn(); //"Update tab2" - ((DbLockManager)txnMgr.getLockManager()).checkLock(locks.get(2).getLockid());//retest WAITING locks (both have same ext id) + ((DbLockManager)txnMgr.getLockManager()).checkLock(locks.get(2).getLockid()); //retest WAITING locks (both have same ext id) locks = getLocks(txnMgr); Assert.assertEquals("Unexpected lock count", 1, locks.size()); checkLock(LockType.SHARED_WRITE, LockState.ACQUIRED, "default", "TAB1", "p=two", locks); //completion of "delete from tab1" txn writeId = txnMgr.getTableWriteId("default", "tab1"); adp = new AddDynamicPartitions(txnMgr.getCurrentTxnId(), writeId, "default", "tab1", - Collections.singletonList("p=two")); + Collections.singletonList("p=two")); adp.setOperationType(DataOperationType.DELETE); txnHandler.addDynamicPartitions(adp); LockException exception = null; try { - txnMgr.commitTxn();//"delete from tab1" + txnMgr.commitTxn(); //"delete from tab1" } catch(LockException e) { exception = e; } Assert.assertNotEquals("Expected exception", null, exception); Assert.assertEquals("Exception msg doesn't match", - "Aborting [txnid:5,5] due to a write conflict on default/tab1/p=two committed by [txnid:4,5] d/u", - exception.getCause().getMessage()); + "Aborting [txnid:5,5] due to a write conflict on default/tab1/p=two committed by [txnid:4,5] d/u", + exception.getCause().getMessage()); Assert.assertEquals("WRITE_SET mismatch: " + TxnDbUtil.queryToString(conf, "select * from WRITE_SET"), - 1, TxnDbUtil.countQueryAgent(conf, "select count(*) from WRITE_SET where ws_partition='p=two' and ws_operation_type='u' and ws_table='tab1'")); + 1, TxnDbUtil.countQueryAgent(conf, "select count(*) from WRITE_SET where ws_partition='p=two' and ws_operation_type='u' and ws_table='tab1'")); Assert.assertEquals("COMPLETED_TXN_COMPONENTS mismatch: " + TxnDbUtil.queryToString(conf, "select * from COMPLETED_TXN_COMPONENTS"), - 3, TxnDbUtil.countQueryAgent(conf, "select count(*) from COMPLETED_TXN_COMPONENTS where ctc_table='tab1' and ctc_partition is not null")); + 3, TxnDbUtil.countQueryAgent(conf, "select count(*) from COMPLETED_TXN_COMPONENTS where ctc_table='tab1' and ctc_partition is not null")); } /** * Concurrent delete/delete of same partition - should NOT pass @@ -1534,13 +1441,12 @@ public void testWriteSetTracking10() throws Exception { @Test public void testWriteSetTracking11() throws Exception { dropTable(new String[] {"TAB1"}); - CommandProcessorResponse cpr = driver.run("create table if not exists tab1 (a int, b int) partitioned by (p string) " + - "clustered by (a) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true')"); - checkCmdOnDriver(cpr); - checkCmdOnDriver(driver.run("insert into tab1 partition(p)(a,b,p) values(1,1,'one'),(2,2,'two')")); + driver.run("create table if not exists tab1 (a int, b int) partitioned by (p string) " + + "clustered by (a) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true')"); + driver.run("insert into tab1 partition(p)(a,b,p) values(1,1,'one'),(2,2,'two')"); HiveTxnManager txnMgr2 = TxnManagerFactory.getTxnManagerFactory().getTxnManager(conf); swapTxnManager(txnMgr2); - checkCmdOnDriver(driver.compileAndRespond("delete from tab1 where b=2", true));//start "delete from tab1" txn + driver.compileAndRespond("delete from tab1 where b=2", true); //start "delete from tab1" txn long txnIdDelete = txnMgr2.getCurrentTxnId(); txnMgr2.acquireLocks(driver.getPlan(), ctx, "T2"); List locks = getLocks(txnMgr2); @@ -1550,11 +1456,11 @@ public void testWriteSetTracking11() throws Exception { //now start concurrent "select * from tab1" txn swapTxnManager(txnMgr); - checkCmdOnDriver(driver.run("start transaction"));//start explicit txn so that txnMgr knows it - checkCmdOnDriver(driver.compileAndRespond("select * from tab1 where b=1 and p='one'", true)); + driver.run("start transaction"); //start explicit txn so that txnMgr knows it + driver.compileAndRespond("select * from tab1 where b=1 and p='one'", true); long txnIdSelect = txnMgr.getCurrentTxnId(); ((DbTxnManager)txnMgr).acquireLocks(driver.getPlan(), ctx, "T3", false); - checkCmdOnDriver(driver.compileAndRespond("delete from tab1 where p='two' and b=2", true)); + driver.compileAndRespond("delete from tab1 where p='two' and b=2", true); ((DbTxnManager)txnMgr).acquireLocks(driver.getPlan(), ctx, "T3", false); locks = getLocks(txnMgr); Assert.assertEquals("Unexpected lock count", 5, locks.size()); @@ -1567,12 +1473,12 @@ public void testWriteSetTracking11() throws Exception { //this simulates the completion of "delete from tab1" txn long writeId = txnMgr2.getTableWriteId("default", "tab1"); AddDynamicPartitions adp = new AddDynamicPartitions(txnMgr2.getCurrentTxnId(), writeId, "default", "tab1", - Collections.singletonList("p=two")); + Collections.singletonList("p=two")); adp.setOperationType(DataOperationType.DELETE); txnHandler.addDynamicPartitions(adp); - txnMgr2.commitTxn();//"delete from tab1" txn + txnMgr2.commitTxn(); //"delete from tab1" txn - ((DbLockManager)txnMgr.getLockManager()).checkLock(locks.get(4).getLockid());//retest WAITING locks (both have same ext id) + ((DbLockManager)txnMgr.getLockManager()).checkLock(locks.get(4).getLockid()); //retest WAITING locks (both have same ext id) locks = getLocks(txnMgr); Assert.assertEquals("Unexpected lock count", 3, locks.size()); checkLock(LockType.SHARED_READ, LockState.ACQUIRED, "default", "TAB1", null, locks); @@ -1581,12 +1487,12 @@ public void testWriteSetTracking11() throws Exception { //completion of txnid:txnIdSelect writeId = txnMgr.getTableWriteId("default", "tab1"); adp = new AddDynamicPartitions(txnMgr.getCurrentTxnId(), writeId, "default", "tab1", - Collections.singletonList("p=two")); + Collections.singletonList("p=two")); adp.setOperationType(DataOperationType.DELETE); txnHandler.addDynamicPartitions(adp); LockException expectedException = null; try { - txnMgr.commitTxn();//"select * from tab1" txn + txnMgr.commitTxn(); //"select * from tab1" txn } catch(LockException ex) { expectedException = ex; @@ -1596,32 +1502,30 @@ public void testWriteSetTracking11() throws Exception { "Reason: Aborting [txnid:5,5] due to a write conflict on default/tab1/p=two " + "committed by [txnid:4,5] d/d", expectedException.getMessage()); Assert.assertEquals("WRITE_SET mismatch: " + - TxnDbUtil.queryToString(conf, "select * from WRITE_SET"), - 1, TxnDbUtil.countQueryAgent(conf, + TxnDbUtil.queryToString(conf, "select * from WRITE_SET"), + 1, TxnDbUtil.countQueryAgent(conf, "select count(*) from WRITE_SET where ws_partition='p=two' and ws_operation_type='d' and ws_table='tab1' and ws_txnid=" + txnIdDelete)); Assert.assertEquals("WRITE_SET mismatch: " + - TxnDbUtil.queryToString(conf, "select * from WRITE_SET"), - 0, TxnDbUtil.countQueryAgent(conf, + TxnDbUtil.queryToString(conf, "select * from WRITE_SET"), + 0, TxnDbUtil.countQueryAgent(conf, "select count(*) from WRITE_SET where ws_partition='p=two' and ws_operation_type='d' and ws_table='tab1' and ws_txnid=" + txnIdSelect)); Assert.assertEquals("COMPLETED_TXN_COMPONENTS mismatch: " + TxnDbUtil.queryToString(conf, "select * from COMPLETED_TXN_COMPONENTS"), - 3, TxnDbUtil.countQueryAgent(conf, "select count(*) from COMPLETED_TXN_COMPONENTS where ctc_table='tab1' and ctc_partition is not null")); + 3, TxnDbUtil.countQueryAgent(conf, "select count(*) from COMPLETED_TXN_COMPONENTS where ctc_table='tab1' and ctc_partition is not null")); } @Test public void testCompletedTxnComponents() throws Exception { dropTable(new String[] {"TAB1", "tab_not_acid2"}); - CommandProcessorResponse cpr = driver.run("create table if not exists tab1 (a int, b int) partitioned by (p string) " + - "clustered by (a) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true')"); - checkCmdOnDriver(cpr); - cpr = driver.run("create table if not exists tab_not_acid2 (a int, b int)"); - checkCmdOnDriver(cpr); - checkCmdOnDriver(driver.run("insert into tab_not_acid2 values(1,1),(2,2)")); + driver.run("create table if not exists tab1 (a int, b int) partitioned by (p string) " + + "clustered by (a) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true')"); + driver.run("create table if not exists tab_not_acid2 (a int, b int)"); + driver.run("insert into tab_not_acid2 values(1,1),(2,2)"); //writing both acid and non-acid resources in the same txn - checkCmdOnDriver(driver.run("from tab_not_acid2 insert into tab1 partition(p='two')(a,b) select a,b insert into tab_not_acid2(a,b) select a,b "));//txnid:1 + driver.run("from tab_not_acid2 insert into tab1 partition(p='two')(a,b) select a,b insert into tab_not_acid2(a,b) select a,b "); //txnid:1 Assert.assertEquals(TxnDbUtil.queryToString(conf, "select * from COMPLETED_TXN_COMPONENTS"), - 1, TxnDbUtil.countQueryAgent(conf, "select count(*) from COMPLETED_TXN_COMPONENTS")); + 1, TxnDbUtil.countQueryAgent(conf, "select count(*) from COMPLETED_TXN_COMPONENTS")); //only expect transactional components to be in COMPLETED_TXN_COMPONENTS Assert.assertEquals(TxnDbUtil.queryToString(conf, "select * from COMPLETED_TXN_COMPONENTS"), - 1, TxnDbUtil.countQueryAgent(conf, "select count(*) from COMPLETED_TXN_COMPONENTS where ctc_txnid=6 and ctc_table='tab1'")); + 1, TxnDbUtil.countQueryAgent(conf, "select count(*) from COMPLETED_TXN_COMPONENTS where ctc_txnid=6 and ctc_table='tab1'")); } /** @@ -1630,25 +1534,23 @@ public void testCompletedTxnComponents() throws Exception { @Test public void testMultiInsert() throws Exception { dropTable(new String[] {"TAB1", "tab_not_acid"}); - checkCmdOnDriver(driver.run("drop table if exists tab1")); - checkCmdOnDriver(driver.run("drop table if exists tab_not_acid")); - CommandProcessorResponse cpr = driver.run("create table if not exists tab1 (a int, b int) partitioned by (p string) " + - "clustered by (a) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true')"); - checkCmdOnDriver(cpr); - cpr = driver.run("create table if not exists tab_not_acid (a int, b int, p string)"); - checkCmdOnDriver(cpr); - checkCmdOnDriver(driver.run("insert into tab_not_acid values(1,1,'one'),(2,2,'two')")); - checkCmdOnDriver(driver.run("insert into tab1 partition(p) values(3,3,'one'),(4,4,'two')"));//txinid:8 + driver.run("drop table if exists tab1"); + driver.run("drop table if exists tab_not_acid"); + driver.run("create table if not exists tab1 (a int, b int) partitioned by (p string) " + + "clustered by (a) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true')"); + driver.run("create table if not exists tab_not_acid (a int, b int, p string)"); + driver.run("insert into tab_not_acid values(1,1,'one'),(2,2,'two')"); + driver.run("insert into tab1 partition(p) values(3,3,'one'),(4,4,'two')"); //txinid:8 //writing both acid and non-acid resources in the same txn //tab1 write is a dynamic partition insert - checkCmdOnDriver(driver.run("from tab_not_acid insert into tab1 partition(p)(a,b,p) select a,b,p insert into tab_not_acid(a,b) select a,b where p='two'"));//txnid:9 + driver.run("from tab_not_acid insert into tab1 partition(p)(a,b,p) select a,b,p insert into tab_not_acid(a,b) select a,b where p='two'"); //txnid:9 Assert.assertEquals(TxnDbUtil.queryToString(conf, "select * from COMPLETED_TXN_COMPONENTS"), - 4, TxnDbUtil.countQueryAgent(conf, "select count(*) from COMPLETED_TXN_COMPONENTS")); + 4, TxnDbUtil.countQueryAgent(conf, "select count(*) from COMPLETED_TXN_COMPONENTS")); //only expect transactional components to be in COMPLETED_TXN_COMPONENTS Assert.assertEquals(TxnDbUtil.queryToString(conf, "select * from COMPLETED_TXN_COMPONENTS"), - 2, TxnDbUtil.countQueryAgent(conf, "select count(*) from COMPLETED_TXN_COMPONENTS where ctc_txnid=9")); + 2, TxnDbUtil.countQueryAgent(conf, "select count(*) from COMPLETED_TXN_COMPONENTS where ctc_txnid=9")); Assert.assertEquals(TxnDbUtil.queryToString(conf, "select * from COMPLETED_TXN_COMPONENTS"), - 2, TxnDbUtil.countQueryAgent(conf, "select count(*) from COMPLETED_TXN_COMPONENTS where ctc_txnid=9 and ctc_table='tab1'")); + 2, TxnDbUtil.countQueryAgent(conf, "select count(*) from COMPLETED_TXN_COMPONENTS where ctc_txnid=9 and ctc_table='tab1'")); } //todo: Concurrent insert/update of same partition - should pass @@ -1677,9 +1579,8 @@ public void testMultiInsert() throws Exception { @Test public void testShowLocksAgentInfo() throws Exception { - CommandProcessorResponse cpr = driver.run("create table if not exists XYZ (a int, b int)"); - checkCmdOnDriver(cpr); - checkCmdOnDriver(driver.compileAndRespond("select a from XYZ where b = 8", true)); + driver.run("create table if not exists XYZ (a int, b int)"); + driver.compileAndRespond("select a from XYZ where b = 8", true); txnMgr.acquireLocks(driver.getPlan(), ctx, "XYZ"); List locks = getLocks(txnMgr); Assert.assertEquals("Unexpected lock count", 1, locks.size()); @@ -1701,28 +1602,28 @@ public void testMerge3Way02() throws Exception { */ private void testMerge3Way(boolean cc) throws Exception { dropTable(new String[] {"target","source", "source2"}); - checkCmdOnDriver(driver.run("create table target (a int, b int) " + - "partitioned by (p int, q int) clustered by (a) into 2 buckets " + - "stored as orc TBLPROPERTIES ('transactional'='true')")); + driver.run("create table target (a int, b int) " + + "partitioned by (p int, q int) clustered by (a) into 2 buckets " + + "stored as orc TBLPROPERTIES ('transactional'='true')"); //in practice we don't really care about the data in any of these tables (except as far as //it creates partitions, the SQL being test is not actually executed and results of the //wrt ACID metadata is supplied manually via addDynamicPartitions(). But having data makes //it easier to follow the intent - checkCmdOnDriver(driver.run("insert into target partition(p,q) values (1,2,1,2), (3,4,1,2), (5,6,1,3), (7,8,2,2)")); - checkCmdOnDriver(driver.run("create table source (a int, b int, p int, q int)")); - checkCmdOnDriver(driver.run("insert into source values " + - // I-(1/2) D-(1/2) I-(1/3) U-(1/3) D-(2/2) I-(1/1) - new part - "(9,10,1,2), (3,4,1,2), (11,12,1,3), (5,13,1,3), (7,8,2,2), (14,15,1,1)")); - checkCmdOnDriver(driver.run("create table source2 (a int, b int, p int, q int)")); - checkCmdOnDriver(driver.run("insert into source2 values " + - //cc ? -:U-(1/2) D-(1/2) cc ? U-(1/3):- D-(2/2) I-(1/1) - new part 2 - "(9,100,1,2), (3,4,1,2), (5,13,1,3), (7,8,2,2), (14,15,2,1)")); - - - checkCmdOnDriver(driver.compileAndRespond("merge into target t using source s on t.a=s.b " + - "when matched and t.a=5 then update set b=s.b " + //updates p=1/q=3 - "when matched and t.a in (3,7) then delete " + //deletes from p=1/q=2, p=2/q=2 - "when not matched and t.a >= 8 then insert values(s.a, s.b, s.p, s.q)", true));//insert p=1/q=2, p=1/q=3 and new part 1/1 + driver.run("insert into target partition(p,q) values (1,2,1,2), (3,4,1,2), (5,6,1,3), (7,8,2,2)"); + driver.run("create table source (a int, b int, p int, q int)"); + driver.run("insert into source values " + + // I-(1/2) D-(1/2) I-(1/3) U-(1/3) D-(2/2) I-(1/1) - new part + "(9,10,1,2), (3,4,1,2), (11,12,1,3), (5,13,1,3), (7,8,2,2), (14,15,1,1)"); + driver.run("create table source2 (a int, b int, p int, q int)"); + driver.run("insert into source2 values " + + //cc ? -:U-(1/2) D-(1/2) cc ? U-(1/3):- D-(2/2) I-(1/1) - new part 2 + "(9,100,1,2), (3,4,1,2), (5,13,1,3), (7,8,2,2), (14,15,2,1)"); + + + driver.compileAndRespond("merge into target t using source s on t.a=s.b " + + "when matched and t.a=5 then update set b=s.b " + //updates p=1/q=3 + "when matched and t.a in (3,7) then delete " + //deletes from p=1/q=2, p=2/q=2 + "when not matched and t.a >= 8 then insert values(s.a, s.b, s.p, s.q)", true); //insert p=1/q=2, p=1/q=3 and new part 1/1 long txnId1 = txnMgr.getCurrentTxnId(); txnMgr.acquireLocks(driver.getPlan(), ctx, "T1"); List locks = getLocks(); @@ -1736,10 +1637,10 @@ private void testMerge3Way(boolean cc) throws Exception { //start concurrent txn DbTxnManager txnMgr2 = (DbTxnManager) TxnManagerFactory.getTxnManagerFactory().getTxnManager(conf); swapTxnManager(txnMgr2); - checkCmdOnDriver(driver.compileAndRespond("merge into target t using source2 s on t.a=s.b " + - "when matched and t.a=" + (cc ? 5 : 9) + " then update set b=s.b " + //if conflict updates p=1/q=3 else update p=1/q=2 - "when matched and t.a in (" + (cc ? "3,7" : "11, 13") + ") then delete " + //if cc deletes from p=1/q=2, p=2/q=2, else delete nothing - "when not matched and t.a >= 8 then insert values(s.a, s.b, s.p, s.q)", true));//insert p=1/q=2, p=1/q=3 and new part 1/1 + driver.compileAndRespond("merge into target t using source2 s on t.a=s.b " + + "when matched and t.a=" + (cc ? 5 : 9) + " then update set b=s.b " + //if conflict updates p=1/q=3 else update p=1/q=2 + "when matched and t.a in (" + (cc ? "3,7" : "11, 13") + ") then delete " + //if cc deletes from p=1/q=2, p=2/q=2, else delete nothing + "when not matched and t.a >= 8 then insert values(s.a, s.b, s.p, s.q)", true); //insert p=1/q=2, p=1/q=3 and new part 1/1 long txnId2 = txnMgr2.getCurrentTxnId(); txnMgr2.acquireLocks(driver.getPlan(), ctx, "T1", false); locks = getLocks(); @@ -1757,60 +1658,60 @@ private void testMerge3Way(boolean cc) throws Exception { checkLock(LockType.SHARED_WRITE, LockState.WAITING, "default", "target", "p=2/q=2", locks); Assert.assertEquals( - "TXN_COMPONENTS mismatch(" + JavaUtils.txnIdToString(txnId1) + "): " + - TxnDbUtil.queryToString(conf, "select * from TXN_COMPONENTS"), - 0, - TxnDbUtil.countQueryAgent(conf, "select count(*) from TXN_COMPONENTS where tc_txnid=" + txnId1)); + "TXN_COMPONENTS mismatch(" + JavaUtils.txnIdToString(txnId1) + "): " + + TxnDbUtil.queryToString(conf, "select * from TXN_COMPONENTS"), + 0, + TxnDbUtil.countQueryAgent(conf, "select count(*) from TXN_COMPONENTS where tc_txnid=" + txnId1)); //complete 1st txn long writeId = txnMgr.getTableWriteId("default", "target"); AddDynamicPartitions adp = new AddDynamicPartitions(txnId1, writeId, "default", "target", - Collections.singletonList("p=1/q=3"));//update clause + Collections.singletonList("p=1/q=3")); //update clause adp.setOperationType(DataOperationType.UPDATE); txnHandler.addDynamicPartitions(adp); adp = new AddDynamicPartitions(txnId1, writeId, "default", "target", - Arrays.asList("p=1/q=2","p=2/q=2"));//delete clause + Arrays.asList("p=1/q=2", "p=2/q=2")); //delete clause adp.setOperationType(DataOperationType.DELETE); txnHandler.addDynamicPartitions(adp); adp = new AddDynamicPartitions(txnId1, writeId, "default", "target", - Arrays.asList("p=1/q=2","p=1/q=3","p=1/q=1"));//insert clause + Arrays.asList("p=1/q=2", "p=1/q=3", "p=1/q=1")); //insert clause adp.setOperationType(DataOperationType.INSERT); txnHandler.addDynamicPartitions(adp); Assert.assertEquals( - "TXN_COMPONENTS mismatch(" + JavaUtils.txnIdToString(txnId1) + "): " + - TxnDbUtil.queryToString(conf, "select * from TXN_COMPONENTS"), - 1, - TxnDbUtil.countQueryAgent(conf, "select count(*) from TXN_COMPONENTS where tc_txnid=" + txnId1 + - " and tc_operation_type='u'")); + "TXN_COMPONENTS mismatch(" + JavaUtils.txnIdToString(txnId1) + "): " + + TxnDbUtil.queryToString(conf, "select * from TXN_COMPONENTS"), + 1, + TxnDbUtil.countQueryAgent(conf, "select count(*) from TXN_COMPONENTS where tc_txnid=" + txnId1 + + " and tc_operation_type='u'")); Assert.assertEquals( - "TXN_COMPONENTS mismatch(" + JavaUtils.txnIdToString(txnId1) + "): " + - TxnDbUtil.queryToString(conf, "select * from TXN_COMPONENTS"), - 2, - TxnDbUtil.countQueryAgent(conf, "select count(*) from TXN_COMPONENTS where tc_txnid=" + txnId1 + - " and tc_operation_type='d'")); + "TXN_COMPONENTS mismatch(" + JavaUtils.txnIdToString(txnId1) + "): " + + TxnDbUtil.queryToString(conf, "select * from TXN_COMPONENTS"), + 2, + TxnDbUtil.countQueryAgent(conf, "select count(*) from TXN_COMPONENTS where tc_txnid=" + txnId1 + + " and tc_operation_type='d'")); Assert.assertEquals( - "TXN_COMPONENTS mismatch(" + JavaUtils.txnIdToString(txnId1) + "): " + - TxnDbUtil.queryToString(conf, "select * from TXN_COMPONENTS"), - 3, - TxnDbUtil.countQueryAgent(conf, "select count(*) from TXN_COMPONENTS where tc_txnid=" + txnId1 + - " and tc_operation_type='i'")); - txnMgr.commitTxn();//commit T1 + "TXN_COMPONENTS mismatch(" + JavaUtils.txnIdToString(txnId1) + "): " + + TxnDbUtil.queryToString(conf, "select * from TXN_COMPONENTS"), + 3, + TxnDbUtil.countQueryAgent(conf, "select count(*) from TXN_COMPONENTS where tc_txnid=" + txnId1 + + " and tc_operation_type='i'")); + txnMgr.commitTxn(); //commit T1 Assert.assertEquals( - "COMPLETED_TXN_COMPONENTS mismatch(" + JavaUtils.txnIdToString(txnId1) + "): " + - TxnDbUtil.queryToString(conf, "select * from COMPLETED_TXN_COMPONENTS"), - 6, - TxnDbUtil.countQueryAgent(conf, "select count(*) from COMPLETED_TXN_COMPONENTS where ctc_txnid=" + txnId1)); + "COMPLETED_TXN_COMPONENTS mismatch(" + JavaUtils.txnIdToString(txnId1) + "): " + + TxnDbUtil.queryToString(conf, "select * from COMPLETED_TXN_COMPONENTS"), + 6, + TxnDbUtil.countQueryAgent(conf, "select count(*) from COMPLETED_TXN_COMPONENTS where ctc_txnid=" + txnId1)); Assert.assertEquals( - "WRITE_SET mismatch(" + JavaUtils.txnIdToString(txnId1) + "): " + - TxnDbUtil.queryToString(conf, "select * from WRITE_SET"), - 1, - TxnDbUtil.countQueryAgent(conf, "select count(*) from WRITE_SET where ws_txnid=" + txnId1 + - " and ws_operation_type='u'")); + "WRITE_SET mismatch(" + JavaUtils.txnIdToString(txnId1) + "): " + + TxnDbUtil.queryToString(conf, "select * from WRITE_SET"), + 1, + TxnDbUtil.countQueryAgent(conf, "select count(*) from WRITE_SET where ws_txnid=" + txnId1 + + " and ws_operation_type='u'")); Assert.assertEquals( - "WRITE_SET mismatch(" + JavaUtils.txnIdToString(txnId1) + "): " + - TxnDbUtil.queryToString(conf, "select * from WRITE_SET"), - 2, - TxnDbUtil.countQueryAgent(conf, "select count(*) from WRITE_SET where ws_txnid=" + txnId1 + - " and ws_operation_type='d'")); + "WRITE_SET mismatch(" + JavaUtils.txnIdToString(txnId1) + "): " + + TxnDbUtil.queryToString(conf, "select * from WRITE_SET"), + 2, + TxnDbUtil.countQueryAgent(conf, "select count(*) from WRITE_SET where ws_txnid=" + txnId1 + + " and ws_operation_type='d'")); //re-check locks which were in Waiting state - should now be Acquired ((DbLockManager)txnMgr2.getLockManager()).checkLock(extLockId); @@ -1823,48 +1724,48 @@ private void testMerge3Way(boolean cc) throws Exception { checkLock(LockType.SHARED_WRITE, LockState.ACQUIRED, "default", "target", "p=2/q=2", locks); Assert.assertEquals( - "TXN_COMPONENTS mismatch(" + JavaUtils.txnIdToString(txnId2) + "): " + - TxnDbUtil.queryToString(conf, "select * from TXN_COMPONENTS"), - 0, - TxnDbUtil.countQueryAgent(conf, "select count(*) from TXN_COMPONENTS where tc_txnid=" + txnId2)); + "TXN_COMPONENTS mismatch(" + JavaUtils.txnIdToString(txnId2) + "): " + + TxnDbUtil.queryToString(conf, "select * from TXN_COMPONENTS"), + 0, + TxnDbUtil.countQueryAgent(conf, "select count(*) from TXN_COMPONENTS where tc_txnid=" + txnId2)); //complete 2nd txn writeId = txnMgr2.getTableWriteId("default", "target"); adp = new AddDynamicPartitions(txnId2, writeId, "default", "target", - Collections.singletonList(cc ? "p=1/q=3" : "p=1/p=2"));//update clause + Collections.singletonList(cc ? "p=1/q=3" : "p=1/p=2")); //update clause adp.setOperationType(DataOperationType.UPDATE); txnHandler.addDynamicPartitions(adp); if(cc) { adp = new AddDynamicPartitions(txnId2, writeId, "default", "target", - Arrays.asList("p=1/q=2", "p=2/q=2"));//delete clause + Arrays.asList("p=1/q=2", "p=2/q=2")); //delete clause adp.setOperationType(DataOperationType.DELETE); txnHandler.addDynamicPartitions(adp); } adp = new AddDynamicPartitions(txnId2, writeId, "default", "target", - Arrays.asList("p=1/q=2","p=1/q=3","p=1/q=1"));//insert clause + Arrays.asList("p=1/q=2", "p=1/q=3", "p=1/q=1")); //insert clause adp.setOperationType(DataOperationType.INSERT); txnHandler.addDynamicPartitions(adp); Assert.assertEquals( - "TXN_COMPONENTS mismatch(" + JavaUtils.txnIdToString(txnId2) + "): " + - TxnDbUtil.queryToString(conf, "select * from TXN_COMPONENTS"), - 1, - TxnDbUtil.countQueryAgent(conf, "select count(*) from TXN_COMPONENTS where tc_txnid=" + txnId2 + - " and tc_operation_type='u'")); + "TXN_COMPONENTS mismatch(" + JavaUtils.txnIdToString(txnId2) + "): " + + TxnDbUtil.queryToString(conf, "select * from TXN_COMPONENTS"), + 1, + TxnDbUtil.countQueryAgent(conf, "select count(*) from TXN_COMPONENTS where tc_txnid=" + txnId2 + + " and tc_operation_type='u'")); Assert.assertEquals( - "TXN_COMPONENTS mismatch(" + JavaUtils.txnIdToString(txnId2) + "): " + - TxnDbUtil.queryToString(conf, "select * from TXN_COMPONENTS"), - (cc ? 2 : 0), - TxnDbUtil.countQueryAgent(conf, "select count(*) from TXN_COMPONENTS where tc_txnid=" + txnId2 + - " and tc_operation_type='d'")); + "TXN_COMPONENTS mismatch(" + JavaUtils.txnIdToString(txnId2) + "): " + + TxnDbUtil.queryToString(conf, "select * from TXN_COMPONENTS"), + (cc ? 2 : 0), + TxnDbUtil.countQueryAgent(conf, "select count(*) from TXN_COMPONENTS where tc_txnid=" + txnId2 + + " and tc_operation_type='d'")); Assert.assertEquals( - "TXN_COMPONENTS mismatch(" + JavaUtils.txnIdToString(txnId2) + "): " + - TxnDbUtil.queryToString(conf, "select * from TXN_COMPONENTS"), - 3, - TxnDbUtil.countQueryAgent(conf, "select count(*) from TXN_COMPONENTS where tc_txnid=" + txnId2 + - " and tc_operation_type='i'")); + "TXN_COMPONENTS mismatch(" + JavaUtils.txnIdToString(txnId2) + "): " + + TxnDbUtil.queryToString(conf, "select * from TXN_COMPONENTS"), + 3, + TxnDbUtil.countQueryAgent(conf, "select count(*) from TXN_COMPONENTS where tc_txnid=" + txnId2 + + " and tc_operation_type='i'")); LockException expectedException = null; try { - txnMgr2.commitTxn();//commit T2 + txnMgr2.commitTxn(); //commit T2 } catch (LockException e) { expectedException = e; @@ -1884,35 +1785,35 @@ private void testMerge3Way(boolean cc) throws Exception { "committed by [txnid:10,11] d/d", expectedException.getMessage()); } Assert.assertEquals( - "COMPLETED_TXN_COMPONENTS mismatch(" + JavaUtils.txnIdToString(txnId2) + "): " + - TxnDbUtil.queryToString(conf, "select * from COMPLETED_TXN_COMPONENTS"), - 0, - TxnDbUtil.countQueryAgent(conf, "select count(*) from COMPLETED_TXN_COMPONENTS where ctc_txnid=" + txnId2)); + "COMPLETED_TXN_COMPONENTS mismatch(" + JavaUtils.txnIdToString(txnId2) + "): " + + TxnDbUtil.queryToString(conf, "select * from COMPLETED_TXN_COMPONENTS"), + 0, + TxnDbUtil.countQueryAgent(conf, "select count(*) from COMPLETED_TXN_COMPONENTS where ctc_txnid=" + txnId2)); Assert.assertEquals( - "WRITE_SET mismatch(" + JavaUtils.txnIdToString(txnId2) + "): " + - TxnDbUtil.queryToString(conf, "select * from WRITE_SET"), - 0, - TxnDbUtil.countQueryAgent(conf, "select count(*) from WRITE_SET where ws_txnid=" + txnId2)); + "WRITE_SET mismatch(" + JavaUtils.txnIdToString(txnId2) + "): " + + TxnDbUtil.queryToString(conf, "select * from WRITE_SET"), + 0, + TxnDbUtil.countQueryAgent(conf, "select count(*) from WRITE_SET where ws_txnid=" + txnId2)); } else { Assert.assertNull("Unexpected exception " + expectedException, expectedException); Assert.assertEquals( - "COMPLETED_TXN_COMPONENTS mismatch(" + JavaUtils.txnIdToString(txnId2) + "): " + - TxnDbUtil.queryToString(conf, "select * from COMPLETED_TXN_COMPONENTS"), - 4, - TxnDbUtil.countQueryAgent(conf, "select count(*) from COMPLETED_TXN_COMPONENTS where ctc_txnid=" + txnId2)); + "COMPLETED_TXN_COMPONENTS mismatch(" + JavaUtils.txnIdToString(txnId2) + "): " + + TxnDbUtil.queryToString(conf, "select * from COMPLETED_TXN_COMPONENTS"), + 4, + TxnDbUtil.countQueryAgent(conf, "select count(*) from COMPLETED_TXN_COMPONENTS where ctc_txnid=" + txnId2)); Assert.assertEquals( - "WRITE_SET mismatch(" + JavaUtils.txnIdToString(txnId2) + "): " + - TxnDbUtil.queryToString(conf, "select * from WRITE_SET"), - 1, - TxnDbUtil.countQueryAgent(conf, "select count(*) from WRITE_SET where ws_txnid=" + txnId2 + - " and ws_operation_type='u'")); + "WRITE_SET mismatch(" + JavaUtils.txnIdToString(txnId2) + "): " + + TxnDbUtil.queryToString(conf, "select * from WRITE_SET"), + 1, + TxnDbUtil.countQueryAgent(conf, "select count(*) from WRITE_SET where ws_txnid=" + txnId2 + + " and ws_operation_type='u'")); Assert.assertEquals( - "WRITE_SET mismatch(" + JavaUtils.txnIdToString(txnId2) + "): " + - TxnDbUtil.queryToString(conf, "select * from WRITE_SET"), - 0, - TxnDbUtil.countQueryAgent(conf, "select count(*) from WRITE_SET where ws_txnid=" + txnId2 + - " and ws_operation_type='d'")); + "WRITE_SET mismatch(" + JavaUtils.txnIdToString(txnId2) + "): " + + TxnDbUtil.queryToString(conf, "select * from WRITE_SET"), + 0, + TxnDbUtil.countQueryAgent(conf, "select count(*) from WRITE_SET where ws_txnid=" + txnId2 + + " and ws_operation_type='d'")); } @@ -1935,24 +1836,24 @@ public void testMergeUnpartitioned02() throws Exception { */ private void testMergeUnpartitioned(boolean causeConflict) throws Exception { dropTable(new String[] {"target","source"}); - checkCmdOnDriver(driver.run("create table target (a int, b int) " + - "clustered by (a) into 2 buckets " + - "stored as orc TBLPROPERTIES ('transactional'='true')")); - checkCmdOnDriver(driver.run("insert into target values (1,2), (3,4), (5,6), (7,8)")); - checkCmdOnDriver(driver.run("create table source (a int, b int)")); + driver.run("create table target (a int, b int) " + + "clustered by (a) into 2 buckets " + + "stored as orc TBLPROPERTIES ('transactional'='true')"); + driver.run("insert into target values (1,2), (3,4), (5,6), (7,8)"); + driver.run("create table source (a int, b int)"); if(causeConflict) { - checkCmdOnDriver(driver.compileAndRespond("update target set b = 2 where a=1", true)); + driver.compileAndRespond("update target set b = 2 where a=1", true); } else { - checkCmdOnDriver(driver.compileAndRespond("insert into target values(9,10),(11,12)", true)); + driver.compileAndRespond("insert into target values(9,10),(11,12)", true); } long txnid1 = txnMgr.getCurrentTxnId(); txnMgr.acquireLocks(driver.getPlan(), ctx, "T1"); Assert.assertEquals( - "TXN_COMPONENTS mismatch(" + JavaUtils.txnIdToString(txnid1) + "): " + - TxnDbUtil.queryToString(conf, "select * from TXN_COMPONENTS"), - 1,//no DP, so it's populated from lock info - TxnDbUtil.countQueryAgent(conf, "select count(*) from TXN_COMPONENTS where tc_txnid=" + txnid1)); + "TXN_COMPONENTS mismatch(" + JavaUtils.txnIdToString(txnid1) + "): " + + TxnDbUtil.queryToString(conf, "select * from TXN_COMPONENTS"), + 1, //no DP, so it's populated from lock info + TxnDbUtil.countQueryAgent(conf, "select count(*) from TXN_COMPONENTS where tc_txnid=" + txnid1)); List locks = getLocks(txnMgr); if (causeConflict) { @@ -1967,10 +1868,10 @@ private void testMergeUnpartitioned(boolean causeConflict) throws Exception { DbTxnManager txnMgr2 = (DbTxnManager) TxnManagerFactory.getTxnManagerFactory().getTxnManager(conf); swapTxnManager(txnMgr2); //start a 2nd (overlapping) txn - checkCmdOnDriver(driver.compileAndRespond("merge into target t using source s " + - "on t.a=s.a " + - "when matched then delete " + - "when not matched then insert values(s.a,s.b)", true)); + driver.compileAndRespond("merge into target t using source s " + + "on t.a=s.a " + + "when matched then delete " + + "when not matched then insert values(s.a,s.b)", true); long txnid2 = txnMgr2.getCurrentTxnId(); txnMgr2.acquireLocks(driver.getPlan(), ctx, "T2", false); locks = getLocks(); @@ -1985,13 +1886,13 @@ private void testMergeUnpartitioned(boolean causeConflict) throws Exception { checkLock(LockType.SHARED_READ, LockState.ACQUIRED, "_dummy_database", "_dummy_table", null, locks); } - txnMgr.commitTxn();//commit T1 + txnMgr.commitTxn(); //commit T1 Assert.assertEquals("WRITE_SET mismatch(" + JavaUtils.txnIdToString(txnid1) + "): " + TxnDbUtil.queryToString(conf, "select * from WRITE_SET"), - causeConflict ? 1 : 0,//Inserts are not tracked by WRITE_SET - TxnDbUtil.countQueryAgent(conf, "select count(*) from WRITE_SET where ws_txnid=" + txnid1 + - " and ws_operation_type=" + (causeConflict ? "'u'" : "'i'"))); + causeConflict ? 1 : 0, //Inserts are not tracked by WRITE_SET + TxnDbUtil.countQueryAgent(conf, "select count(*) from WRITE_SET where ws_txnid=" + txnid1 + + " and ws_operation_type=" + (causeConflict ? "'u'" : "'i'"))); //re-check locks which were in Waiting state - should now be Acquired @@ -2002,16 +1903,16 @@ private void testMergeUnpartitioned(boolean causeConflict) throws Exception { checkLock(LockType.SHARED_WRITE, LockState.ACQUIRED, "default", "target", null, locks); Assert.assertEquals( - "TXN_COMPONENTS mismatch(" + JavaUtils.txnIdToString(txnid2) + "): " + - TxnDbUtil.queryToString(conf, "select * from TXN_COMPONENTS"), - 1,// - TxnDbUtil.countQueryAgent(conf, "select count(*) from TXN_COMPONENTS where tc_txnid=" + txnid2)); + "TXN_COMPONENTS mismatch(" + JavaUtils.txnIdToString(txnid2) + "): " + + TxnDbUtil.queryToString(conf, "select * from TXN_COMPONENTS"), + 1, // + TxnDbUtil.countQueryAgent(conf, "select count(*) from TXN_COMPONENTS where tc_txnid=" + txnid2)); Assert.assertEquals( - "TXN_COMPONENTS mismatch(" + JavaUtils.txnIdToString(txnid2) + "): " + - TxnDbUtil.queryToString(conf, "select * from TXN_COMPONENTS"), - 1,// - TxnDbUtil.countQueryAgent(conf, "select count(*) from TXN_COMPONENTS where tc_txnid=" + txnid2 + - "and tc_operation_type='d'")); + "TXN_COMPONENTS mismatch(" + JavaUtils.txnIdToString(txnid2) + "): " + + TxnDbUtil.queryToString(conf, "select * from TXN_COMPONENTS"), + 1, // + TxnDbUtil.countQueryAgent(conf, "select count(*) from TXN_COMPONENTS where tc_txnid=" + txnid2 + + "and tc_operation_type='d'")); //complete T2 txn LockException expectedException = null; @@ -2025,15 +1926,15 @@ private void testMergeUnpartitioned(boolean causeConflict) throws Exception { Assert.assertTrue("Didn't get exception", expectedException != null); Assert.assertEquals("Got wrong message code", ErrorMsg.TXN_ABORTED, expectedException.getCanonicalErrorMsg()); Assert.assertEquals("Exception msg didn't match", - "Aborting [txnid:7,7] due to a write conflict on default/target committed by [txnid:6,7] d/u", - expectedException.getCause().getMessage()); + "Aborting [txnid:7,7] due to a write conflict on default/target committed by [txnid:6,7] d/u", + expectedException.getCause().getMessage()); } else { Assert.assertEquals("WRITE_SET mismatch(" + JavaUtils.txnIdToString(txnid1) + "): " + TxnDbUtil.queryToString(conf, "select * from WRITE_SET"), - 1,//Unpartitioned table: 1 row for Delete; Inserts are not tracked in WRITE_SET - TxnDbUtil.countQueryAgent(conf, "select count(*) from WRITE_SET where ws_txnid=" + txnid2 + - " and ws_operation_type='d'")); + 1, //Unpartitioned table: 1 row for Delete; Inserts are not tracked in WRITE_SET + TxnDbUtil.countQueryAgent(conf, "select count(*) from WRITE_SET where ws_txnid=" + txnid2 + + " and ws_operation_type='d'")); } } /** @@ -2043,11 +1944,11 @@ private void testMergeUnpartitioned(boolean causeConflict) throws Exception { @Test public void testDynamicPartitionInsert() throws Exception { dropTable(new String[] {"target"}); - checkCmdOnDriver(driver.run("create table target (a int, b int) " + - "partitioned by (p int, q int) clustered by (a) into 2 buckets " + - "stored as orc TBLPROPERTIES ('transactional'='true')")); + driver.run("create table target (a int, b int) " + + "partitioned by (p int, q int) clustered by (a) into 2 buckets " + + "stored as orc TBLPROPERTIES ('transactional'='true')"); long txnid1 = txnMgr.openTxn(ctx, "T1"); - checkCmdOnDriver(driver.compileAndRespond("insert into target partition(p=1,q) values (1,2,2), (3,4,2), (5,6,3), (7,8,2)", true)); + driver.compileAndRespond("insert into target partition(p=1,q) values (1,2,2), (3,4,2), (5,6,3), (7,8,2)", true); txnMgr.acquireLocks(driver.getPlan(), ctx, "T1"); List locks = getLocks(txnMgr); Assert.assertEquals("Unexpected lock count", 2, locks.size()); @@ -2055,32 +1956,32 @@ public void testDynamicPartitionInsert() throws Exception { checkLock(LockType.SHARED_READ, LockState.ACQUIRED, "default", "target", null, locks); checkLock(LockType.SHARED_READ, LockState.ACQUIRED, "_dummy_database", "_dummy_table", null, locks); Assert.assertEquals( - "HIVE_LOCKS mismatch(" + JavaUtils.txnIdToString(txnid1) + "): " + - TxnDbUtil.queryToString(conf, "select * from HIVE_LOCKS"), - 2, - TxnDbUtil.countQueryAgent(conf, "select count(*) from HIVE_LOCKS where hl_txnid=" + txnid1)); + "HIVE_LOCKS mismatch(" + JavaUtils.txnIdToString(txnid1) + "): " + + TxnDbUtil.queryToString(conf, "select * from HIVE_LOCKS"), + 2, + TxnDbUtil.countQueryAgent(conf, "select count(*) from HIVE_LOCKS where hl_txnid=" + txnid1)); txnMgr.rollbackTxn(); Assert.assertEquals( - "TXN_COMPONENTS mismatch(" + JavaUtils.txnIdToString(txnid1) + "): " + - TxnDbUtil.queryToString(conf, "select * from TXN_COMPONENTS"), - 0, - TxnDbUtil.countQueryAgent(conf, "select count(*) from TXN_COMPONENTS where tc_txnid=" + txnid1)); + "TXN_COMPONENTS mismatch(" + JavaUtils.txnIdToString(txnid1) + "): " + + TxnDbUtil.queryToString(conf, "select * from TXN_COMPONENTS"), + 0, + TxnDbUtil.countQueryAgent(conf, "select count(*) from TXN_COMPONENTS where tc_txnid=" + txnid1)); //now actually write to table to generate some partitions - checkCmdOnDriver(driver.run("insert into target partition(p=1,q) values (1,2,2), (3,4,2), (5,6,3), (7,8,2)")); + driver.run("insert into target partition(p=1,q) values (1,2,2), (3,4,2), (5,6,3), (7,8,2)"); driver.run("select count(*) from target"); List r = new ArrayList<>(); driver.getResults(r); Assert.assertEquals("", "4", r.get(0)); Assert.assertEquals(//look in COMPLETED_TXN_COMPONENTS because driver.run() committed!!!! - "COMPLETED_TXN_COMPONENTS mismatch(" + JavaUtils.txnIdToString(txnid1 + 1) + "): " + + "COMPLETED_TXN_COMPONENTS mismatch(" + JavaUtils.txnIdToString(txnid1 + 1) + "): " + TxnDbUtil.queryToString(conf, "select * from COMPLETED_TXN_COMPONENTS"), - 2,//2 distinct partitions created - //txnid+1 because we want txn used by previous driver.run("insert....) - TxnDbUtil.countQueryAgent(conf, "select count(*) from COMPLETED_TXN_COMPONENTS where ctc_txnid=" + (txnid1 + 1))); + 2, //2 distinct partitions created + //txnid+1 because we want txn used by previous driver.run("insert....) + TxnDbUtil.countQueryAgent(conf, "select count(*) from COMPLETED_TXN_COMPONENTS where ctc_txnid=" + (txnid1 + 1))); long txnid2 = txnMgr.openTxn(ctx, "T1"); - checkCmdOnDriver(driver.compileAndRespond("insert into target partition(p=1,q) values (10,2,2), (30,4,2), (50,6,3), (70,8,2)", true)); + driver.compileAndRespond("insert into target partition(p=1,q) values (10,2,2), (30,4,2), (50,6,3), (70,8,2)", true); txnMgr.acquireLocks(driver.getPlan(), ctx, "T1"); locks = getLocks(txnMgr); Assert.assertEquals("Unexpected lock count", 2, locks.size()); @@ -2092,10 +1993,10 @@ public void testDynamicPartitionInsert() throws Exception { adp.setOperationType(DataOperationType.INSERT); txnHandler.addDynamicPartitions(adp); Assert.assertEquals( - "TXN_COMPONENTS mismatch(" + JavaUtils.txnIdToString(txnid2) + "): " + - TxnDbUtil.queryToString(conf, "select * from TXN_COMPONENTS"), - 2,//2 distinct partitions modified - TxnDbUtil.countQueryAgent(conf, "select count(*) from TXN_COMPONENTS where tc_txnid=" + txnid2)); + "TXN_COMPONENTS mismatch(" + JavaUtils.txnIdToString(txnid2) + "): " + + TxnDbUtil.queryToString(conf, "select * from TXN_COMPONENTS"), + 2, //2 distinct partitions modified + TxnDbUtil.countQueryAgent(conf, "select count(*) from TXN_COMPONENTS where tc_txnid=" + txnid2)); txnMgr.commitTxn(); } @Test @@ -2114,13 +2015,13 @@ public void testMergePartitioned02() throws Exception { */ private void testMergePartitioned(boolean causeConflict) throws Exception { dropTable(new String[] {"target","source"}); - checkCmdOnDriver(driver.run("create table target (a int, b int) " + - "partitioned by (p int, q int) clustered by (a) into 2 buckets " + - "stored as orc TBLPROPERTIES ('transactional'='true')")); - checkCmdOnDriver(driver.run("insert into target partition(p,q) values (1,2,1,2), (3,4,1,2), (5,6,1,3), (7,8,2,2)")); - checkCmdOnDriver(driver.run("create table source (a1 int, b1 int, p1 int, q1 int)")); + driver.run("create table target (a int, b int) " + + "partitioned by (p int, q int) clustered by (a) into 2 buckets " + + "stored as orc TBLPROPERTIES ('transactional'='true')"); + driver.run("insert into target partition(p,q) values (1,2,1,2), (3,4,1,2), (5,6,1,3), (7,8,2,2)"); + driver.run("create table source (a1 int, b1 int, p1 int, q1 int)"); - checkCmdOnDriver(driver.compileAndRespond("update target set b = 2 where p=1", true)); + driver.compileAndRespond("update target set b = 2 where p=1", true); long txnId1 = txnMgr.getCurrentTxnId(); txnMgr.acquireLocks(driver.getPlan(), ctx, "T1"); List locks = getLocks(); @@ -2131,10 +2032,10 @@ private void testMergePartitioned(boolean causeConflict) throws Exception { DbTxnManager txnMgr2 = (DbTxnManager) TxnManagerFactory.getTxnManagerFactory().getTxnManager(conf); swapTxnManager(txnMgr2); //start a 2nd (overlapping) txn - checkCmdOnDriver(driver.compileAndRespond("merge into target using source " + - "on target.p=source.p1 and target.a=source.a1 " + - "when matched then update set b = 11 " + - "when not matched then insert values(a1,b1,p1,q1)", true)); + driver.compileAndRespond("merge into target using source " + + "on target.p=source.p1 and target.a=source.a1 " + + "when matched then update set b = 11 " + + "when not matched then insert values(a1,b1,p1,q1)", true); long txnid2 = txnMgr2.getCurrentTxnId(); txnMgr2.acquireLocks(driver.getPlan(), ctx, "T2", false); locks = getLocks(txnMgr); @@ -2156,28 +2057,28 @@ private void testMergePartitioned(boolean causeConflict) throws Exception { checkLock(LockType.SHARED_WRITE, LockState.WAITING, "default", "target", "p=2/q=2", locks); Assert.assertEquals( - "TXN_COMPONENTS mismatch(" + JavaUtils.txnIdToString(txnId1) + "): " + - TxnDbUtil.queryToString(conf, "select * from TXN_COMPONENTS"), - 0,//because it's using a DP write - TxnDbUtil.countQueryAgent(conf, "select count(*) from TXN_COMPONENTS where tc_txnid=" + txnId1)); + "TXN_COMPONENTS mismatch(" + JavaUtils.txnIdToString(txnId1) + "): " + + TxnDbUtil.queryToString(conf, "select * from TXN_COMPONENTS"), + 0, //because it's using a DP write + TxnDbUtil.countQueryAgent(conf, "select count(*) from TXN_COMPONENTS where tc_txnid=" + txnId1)); //complete T1 transaction (simulate writing to 2 partitions) long writeId = txnMgr.getTableWriteId("default", "target"); AddDynamicPartitions adp = new AddDynamicPartitions(txnId1, writeId, "default", "target", - Arrays.asList("p=1/q=2","p=1/q=3")); + Arrays.asList("p=1/q=2", "p=1/q=3")); adp.setOperationType(DataOperationType.UPDATE); txnHandler.addDynamicPartitions(adp); Assert.assertEquals( - "TXN_COMPONENTS mismatch(" + JavaUtils.txnIdToString(txnId1) + "): " + - TxnDbUtil.queryToString(conf, "select * from TXN_COMPONENTS"), - 2, - TxnDbUtil.countQueryAgent(conf, "select count(*) from TXN_COMPONENTS where tc_txnid=" + txnId1 + - " and tc_operation_type='u'")); - txnMgr.commitTxn();//commit T1 + "TXN_COMPONENTS mismatch(" + JavaUtils.txnIdToString(txnId1) + "): " + + TxnDbUtil.queryToString(conf, "select * from TXN_COMPONENTS"), + 2, + TxnDbUtil.countQueryAgent(conf, "select count(*) from TXN_COMPONENTS where tc_txnid=" + txnId1 + + " and tc_operation_type='u'")); + txnMgr.commitTxn(); //commit T1 Assert.assertEquals("WRITE_SET mismatch(" + JavaUtils.txnIdToString(txnId1) + "): " + - TxnDbUtil.queryToString(conf, "select * from WRITE_SET"), - 2,//2 partitions updated - TxnDbUtil.countQueryAgent(conf, "select count(*) from WRITE_SET where ws_txnid=" + txnId1 + - " and ws_operation_type='u'")); + TxnDbUtil.queryToString(conf, "select * from WRITE_SET"), + 2, //2 partitions updated + TxnDbUtil.countQueryAgent(conf, "select count(*) from WRITE_SET where ws_txnid=" + txnId1 + + " and ws_operation_type='u'")); //re-check locks which were in Waiting state - should now be Acquired @@ -2192,10 +2093,10 @@ private void testMergePartitioned(boolean causeConflict) throws Exception { Assert.assertEquals( - "TXN_COMPONENTS mismatch(" + JavaUtils.txnIdToString(txnid2) + "): " + - TxnDbUtil.queryToString(conf, "select * from TXN_COMPONENTS"), - 0,//because it's using a DP write - TxnDbUtil.countQueryAgent(conf, "select count(*) from TXN_COMPONENTS where tc_txnid=" + txnid2)); + "TXN_COMPONENTS mismatch(" + JavaUtils.txnIdToString(txnid2) + "): " + + TxnDbUtil.queryToString(conf, "select * from TXN_COMPONENTS"), + 0, //because it's using a DP write + TxnDbUtil.countQueryAgent(conf, "select count(*) from TXN_COMPONENTS where tc_txnid=" + txnid2)); //complete T2 txn //simulate Insert into 2 partitions writeId = txnMgr2.getTableWriteId("default", "target"); @@ -2204,10 +2105,10 @@ private void testMergePartitioned(boolean causeConflict) throws Exception { adp.setOperationType(DataOperationType.INSERT); txnHandler.addDynamicPartitions(adp); Assert.assertEquals( - "TXN_COMPONENTS mismatch(" + JavaUtils.txnIdToString(txnid2) + "): " + - TxnDbUtil.queryToString(conf, "select * from TXN_COMPONENTS"), - 2, - TxnDbUtil.countQueryAgent(conf, "select count(*) from TXN_COMPONENTS where tc_txnid=" + txnid2 + " and tc_operation_type='i'")); + "TXN_COMPONENTS mismatch(" + JavaUtils.txnIdToString(txnid2) + "): " + + TxnDbUtil.queryToString(conf, "select * from TXN_COMPONENTS"), + 2, + TxnDbUtil.countQueryAgent(conf, "select count(*) from TXN_COMPONENTS where tc_txnid=" + txnid2 + " and tc_operation_type='i'")); //simulate Update of 1 partitions; depending on causeConflict, choose one of the partitions //which was modified by the T1 update stmt or choose a non-conflicting one adp = new AddDynamicPartitions(txnid2, writeId, "default", "target", @@ -2215,10 +2116,10 @@ private void testMergePartitioned(boolean causeConflict) throws Exception { adp.setOperationType(DataOperationType.UPDATE); txnHandler.addDynamicPartitions(adp); Assert.assertEquals( - "TXN_COMPONENTS mismatch(" + JavaUtils.txnIdToString(txnid2) + "): " + - TxnDbUtil.queryToString(conf, "select * from TXN_COMPONENTS"), - 1, - TxnDbUtil.countQueryAgent(conf, "select count(*) from TXN_COMPONENTS where tc_txnid=" + txnid2 + " and tc_operation_type='u'")); + "TXN_COMPONENTS mismatch(" + JavaUtils.txnIdToString(txnid2) + "): " + + TxnDbUtil.queryToString(conf, "select * from TXN_COMPONENTS"), + 1, + TxnDbUtil.countQueryAgent(conf, "select count(*) from TXN_COMPONENTS where tc_txnid=" + txnid2 + " and tc_operation_type='u'")); LockException expectedException = null; @@ -2232,19 +2133,19 @@ private void testMergePartitioned(boolean causeConflict) throws Exception { Assert.assertTrue("Didn't get exception", expectedException != null); Assert.assertEquals("Got wrong message code", ErrorMsg.TXN_ABORTED, expectedException.getCanonicalErrorMsg()); Assert.assertEquals("Exception msg didn't match", - "Aborting [txnid:7,7] due to a write conflict on default/target/p=1/q=2 committed by [txnid:6,7] u/u", - expectedException.getCause().getMessage()); + "Aborting [txnid:7,7] due to a write conflict on default/target/p=1/q=2 committed by [txnid:6,7] u/u", + expectedException.getCause().getMessage()); } else { Assert.assertEquals("WRITE_SET mismatch(" + JavaUtils.txnIdToString(txnid2) + "): " + TxnDbUtil.queryToString(conf, "select * from WRITE_SET"), - 1,//1 partitions updated - TxnDbUtil.countQueryAgent(conf, "select count(*) from WRITE_SET where ws_txnid=" + txnid2 + - " and ws_operation_type='u'")); + 1, //1 partitions updated + TxnDbUtil.countQueryAgent(conf, "select count(*) from WRITE_SET where ws_txnid=" + txnid2 + + " and ws_operation_type='u'")); Assert.assertEquals("WRITE_SET mismatch(" + JavaUtils.txnIdToString(txnid2) + "): " + TxnDbUtil.queryToString(conf, "select * from WRITE_SET"), - 1,//1 partitions updated (and no other entries) - TxnDbUtil.countQueryAgent(conf, "select count(*) from WRITE_SET where ws_txnid=" + txnid2)); + 1, //1 partitions updated (and no other entries) + TxnDbUtil.countQueryAgent(conf, "select count(*) from WRITE_SET where ws_txnid=" + txnid2)); } } @@ -2256,12 +2157,10 @@ private void testMergePartitioned(boolean causeConflict) throws Exception { @Test public void testShowTablesLock() throws Exception { dropTable(new String[] {"T", "T2"}); - CommandProcessorResponse cpr = driver.run( - "create table T (a int, b int)"); - checkCmdOnDriver(cpr); + driver.run("create table T (a int, b int)"); long txnid1 = txnMgr.openTxn(ctx, "Fifer"); - checkCmdOnDriver(driver.compileAndRespond("insert into T values(1,3)", true)); + driver.compileAndRespond("insert into T values(1,3)", true); txnMgr.acquireLocks(driver.getPlan(), ctx, "Fifer"); List locks = getLocks(); Assert.assertEquals("Unexpected lock count", 2, locks.size()); @@ -2271,7 +2170,7 @@ public void testShowTablesLock() throws Exception { DbTxnManager txnMgr2 = (DbTxnManager) TxnManagerFactory.getTxnManagerFactory().getTxnManager(conf); txnMgr2.openTxn(ctx, "Fidler"); swapTxnManager(txnMgr2); - checkCmdOnDriver(driver.compileAndRespond("show tables", true)); + driver.compileAndRespond("show tables", true); txnMgr2.acquireLocks(driver.getPlan(), ctx, "Fidler"); locks = getLocks(); Assert.assertEquals("Unexpected lock count", 3, locks.size()); @@ -2285,12 +2184,11 @@ public void testShowTablesLock() throws Exception { swapTxnManager(txnMgr); - cpr = driver.run( - "create table T2 (a int, b int) partitioned by (p int) clustered by (a) " + + driver.run( + "create table T2 (a int, b int) partitioned by (p int) clustered by (a) " + "into 2 buckets stored as orc TBLPROPERTIES ('transactional'='false')"); - checkCmdOnDriver(cpr); - checkCmdOnDriver(driver.compileAndRespond("insert into T2 partition(p=1) values(1,3)", true)); + driver.compileAndRespond("insert into T2 partition(p=1) values(1,3)", true); txnMgr.acquireLocks(driver.getPlan(), ctx, "Fifer"); locks = getLocks(); Assert.assertEquals("Unexpected lock count", 2, locks.size()); @@ -2300,7 +2198,7 @@ public void testShowTablesLock() throws Exception { txnMgr2 = (DbTxnManager) TxnManagerFactory.getTxnManagerFactory().getTxnManager(conf); txnMgr2.openTxn(ctx, "Fidler"); swapTxnManager(txnMgr2); - checkCmdOnDriver(driver.compileAndRespond("show tables", true)); + driver.compileAndRespond("show tables", true); txnMgr2.acquireLocks(driver.getPlan(), ctx, "Fidler", false); locks = getLocks(); Assert.assertEquals("Unexpected lock count", 3, locks.size()); @@ -2315,15 +2213,12 @@ public void testShowTablesLock() throws Exception { @Test public void testFairness() throws Exception { dropTable(new String[] {"T6"}); - CommandProcessorResponse cpr = driver.run("create table if not exists T6(a int)"); - checkCmdOnDriver(cpr); - cpr = driver.compileAndRespond("select a from T6", true); - checkCmdOnDriver(cpr); - txnMgr.acquireLocks(driver.getPlan(), ctx, "Fifer");//gets S lock on T6 + driver.run("create table if not exists T6(a int)"); + driver.compileAndRespond("select a from T6", true); + txnMgr.acquireLocks(driver.getPlan(), ctx, "Fifer"); //gets S lock on T6 HiveTxnManager txnMgr2 = TxnManagerFactory.getTxnManagerFactory().getTxnManager(conf); swapTxnManager(txnMgr2); - cpr = driver.compileAndRespond("drop table if exists T6", true); - checkCmdOnDriver(cpr); + driver.compileAndRespond("drop table if exists T6", true); //tries to get X lock on T6 and gets Waiting state LockState lockState = ((DbTxnManager) txnMgr2).acquireLocks(driver.getPlan(), ctx, "Fiddler", false); List locks = getLocks(); @@ -2335,9 +2230,8 @@ public void testFairness() throws Exception { swapTxnManager(txnMgr3); //this should block behind the X lock on T6 //this is a contrived example, in practice this query would of course fail after drop table - cpr = driver.compileAndRespond("select a from T6", true); - checkCmdOnDriver(cpr); - ((DbTxnManager)txnMgr3).acquireLocks(driver.getPlan(), ctx, "Fifer", false);//gets S lock on T6 + driver.compileAndRespond("select a from T6", true); + ((DbTxnManager)txnMgr3).acquireLocks(driver.getPlan(), ctx, "Fifer", false); //gets S lock on T6 locks = getLocks(); Assert.assertEquals("Unexpected lock count", 3, locks.size()); checkLock(LockType.SHARED_READ, LockState.ACQUIRED, "default", "T6", null, locks); @@ -2358,18 +2252,14 @@ public void testFairness() throws Exception { @Test public void testFairness2() throws Exception { dropTable(new String[]{"T7"}); - CommandProcessorResponse cpr = driver.run("create table if not exists T7 (a int) " - + "partitioned by (p int) stored as orc TBLPROPERTIES ('transactional'='true')"); - checkCmdOnDriver(cpr); - checkCmdOnDriver(driver.run( - "insert into T7 partition(p) values(1,1),(1,2)"));//create 2 partitions - cpr = driver.compileAndRespond("select a from T7 ", true); - checkCmdOnDriver(cpr); - txnMgr.acquireLocks(driver.getPlan(), ctx, "Fifer");//gets S lock on T7 + driver.run("create table if not exists T7 (a int) " + + "partitioned by (p int) stored as orc TBLPROPERTIES ('transactional'='true')"); + driver.run("insert into T7 partition(p) values(1,1),(1,2)"); //create 2 partitions + driver.compileAndRespond("select a from T7 ", true); + txnMgr.acquireLocks(driver.getPlan(), ctx, "Fifer"); //gets S lock on T7 HiveTxnManager txnMgr2 = TxnManagerFactory.getTxnManagerFactory().getTxnManager(conf); swapTxnManager(txnMgr2); - cpr = driver.compileAndRespond("alter table T7 drop partition (p=1)", true); - checkCmdOnDriver(cpr); + driver.compileAndRespond("alter table T7 drop partition (p=1)", true); //tries to get X lock on T7.p=1 and gets Waiting state LockState lockState = ((DbTxnManager) txnMgr2).acquireLocks(driver.getPlan(), ctx, "Fiddler", false); @@ -2383,8 +2273,7 @@ public void testFairness2() throws Exception { HiveTxnManager txnMgr3 = TxnManagerFactory.getTxnManagerFactory().getTxnManager(conf); swapTxnManager(txnMgr3); //this should block behind the X lock on T7.p=1 - cpr = driver.compileAndRespond("select a from T7", true); - checkCmdOnDriver(cpr); + driver.compileAndRespond("select a from T7", true); //tries to get S lock on T7, S on T7.p=1 and S on T7.p=2 ((DbTxnManager)txnMgr3).acquireLocks(driver.getPlan(), ctx, "Fifer", false); locks = getLocks(); @@ -2397,7 +2286,7 @@ public void testFairness2() throws Exception { checkLock(LockType.SHARED_READ, LockState.WAITING, "default", "T7", "p=2", locks); checkLock(LockType.EXCLUSIVE, LockState.WAITING, "default", "T7", "p=1", locks); - txnMgr.commitTxn();//release locks from "select a from T7" - to unblock hte drop partition + txnMgr.commitTxn(); //release locks from "select a from T7" - to unblock hte drop partition //retest the the "drop partiton" X lock lockState = ((DbLockManager)txnMgr2.getLockManager()).checkLock(locks.get(6).getLockid()); locks = getLocks(); @@ -2407,9 +2296,9 @@ public void testFairness2() throws Exception { checkLock(LockType.SHARED_READ, LockState.WAITING, "default", "T7", "p=1", locks); checkLock(LockType.SHARED_READ, LockState.WAITING, "default", "T7", "p=2", locks); - txnMgr2.rollbackTxn();//release the X lock on T7.p=1 + txnMgr2.rollbackTxn(); //release the X lock on T7.p=1 //re-test the locks - lockState = ((DbLockManager)txnMgr2.getLockManager()).checkLock(locks.get(1).getLockid());//S lock on T7 + lockState = ((DbLockManager)txnMgr2.getLockManager()).checkLock(locks.get(1).getLockid()); //S lock on T7 locks = getLocks(); Assert.assertEquals("Unexpected lock count", 3, locks.size()); checkLock(LockType.SHARED_READ, LockState.ACQUIRED, "default", "T7", null, locks); @@ -2422,11 +2311,9 @@ public void testFairness2() throws Exception { public void testValidWriteIdListSnapshot() throws Exception { // Create a transactional table dropTable(new String[] {"temp.T7"}); - CommandProcessorResponse cpr = driver.run("create database if not exists temp"); - checkCmdOnDriver(cpr); - cpr = driver.run("create table if not exists temp.T7(a int, b int) clustered by(b) into 2 buckets stored as orc " - + "TBLPROPERTIES ('transactional'='true')"); - checkCmdOnDriver(cpr); + driver.run("create database if not exists temp"); + driver.run("create table if not exists temp.T7(a int, b int) clustered by(b) into 2 buckets stored as orc " + + "TBLPROPERTIES ('transactional'='true')"); // Open a base txn which allocates write ID and then committed. long baseTxnId = txnMgr.openTxn(ctx, "u0"); @@ -2464,7 +2351,7 @@ public void testValidWriteIdListSnapshot() throws Exception { // Verify the ValidWriteIdList with one open txn on this table. Write ID of open txn should be invalid. testValidWriteIds = txnMgr2.getValidWriteIds(Collections.singletonList("temp.t7"), testValidTxns) - .getTableValidWriteIdList("temp.t7"); + .getTableValidWriteIdList("temp.t7"); Assert.assertEquals(underHwmOpenWriteId, testValidWriteIds.getHighWatermark()); Assert.assertTrue("Invalid write ID list", testValidWriteIds.isWriteIdValid(baseWriteId)); Assert.assertFalse("Invalid write ID list", testValidWriteIds.isWriteIdValid(underHwmOpenWriteId)); @@ -2474,7 +2361,7 @@ public void testValidWriteIdListSnapshot() throws Exception { // Verify the writeId of this committed txn should be invalid for test txn. txnMgr1.commitTxn(); testValidWriteIds = txnMgr2.getValidWriteIds(Collections.singletonList("temp.t7"), testValidTxns) - .getTableValidWriteIdList("temp.t7"); + .getTableValidWriteIdList("temp.t7"); Assert.assertEquals(underHwmOpenWriteId, testValidWriteIds.getHighWatermark()); Assert.assertTrue("Invalid write ID list", testValidWriteIds.isWriteIdValid(baseWriteId)); Assert.assertFalse("Invalid write ID list", testValidWriteIds.isWriteIdValid(underHwmOpenWriteId)); @@ -2487,7 +2374,7 @@ public void testValidWriteIdListSnapshot() throws Exception { Assert.assertEquals(4, testWriteId); testValidWriteIds = txnMgr2.getValidWriteIds(Collections.singletonList("temp.t7"), testValidTxns) - .getTableValidWriteIdList("temp.t7"); + .getTableValidWriteIdList("temp.t7"); Assert.assertEquals(testWriteId, testValidWriteIds.getHighWatermark()); Assert.assertTrue("Invalid write ID list", testValidWriteIds.isWriteIdValid(baseWriteId)); Assert.assertTrue("Invalid write ID list", testValidWriteIds.isWriteIdValid(testWriteId)); @@ -2496,8 +2383,7 @@ public void testValidWriteIdListSnapshot() throws Exception { txnMgr2.commitTxn(); txnMgr3.commitTxn(); - cpr = driver.run("drop database if exists temp cascade"); - checkCmdOnDriver(cpr); + driver.run("drop database if exists temp cascade"); } @Rule public TemporaryFolder exportFolder = new TemporaryFolder(); @@ -2507,23 +2393,17 @@ public void testValidWriteIdListSnapshot() throws Exception { @Test public void testAddPartitionLocks() throws Exception { dropTable(new String[] {"T", "Tstage"}); - CommandProcessorResponse cpr = driver.run("create table T (a int, b int) partitioned by (p int) " + + driver.run("create table T (a int, b int) partitioned by (p int) " + "stored as orc tblproperties('transactional'='true')"); - checkCmdOnDriver(cpr); //bucketed just so that we get 2 files - cpr = driver.run("create table Tstage (a int, b int) clustered by (a) into 2 " + + driver.run("create table Tstage (a int, b int) clustered by (a) into 2 " + "buckets stored as orc tblproperties('transactional'='false')"); - checkCmdOnDriver(cpr); - cpr = driver.run("insert into Tstage values(0,2),(1,4)"); - checkCmdOnDriver(cpr); + driver.run("insert into Tstage values(0,2),(1,4)"); String exportLoc = exportFolder.newFolder("1").toString(); - cpr = driver.run("export table Tstage to '" + exportLoc + "'"); - checkCmdOnDriver(cpr); + driver.run("export table Tstage to '" + exportLoc + "'"); - cpr = driver.compileAndRespond("ALTER TABLE T ADD if not exists PARTITION (p=0)" + - " location '" + exportLoc + "/data'", true); - checkCmdOnDriver(cpr); - txnMgr.acquireLocks(driver.getPlan(), ctx, "Fifer");//gets X lock on T + driver.compileAndRespond("ALTER TABLE T ADD if not exists PARTITION (p=0) location '" + exportLoc + "/data'", true); + txnMgr.acquireLocks(driver.getPlan(), ctx, "Fifer"); //gets X lock on T List locks = getLocks(); Assert.assertEquals("Unexpected lock count", 1, locks.size()); @@ -2532,15 +2412,11 @@ public void testAddPartitionLocks() throws Exception { @Test public void testLoadData() throws Exception { dropTable(new String[] {"T2"}); - CommandProcessorResponse cpr = driver.run("create table T2(a int) " + - "stored as ORC TBLPROPERTIES ('transactional'='true')"); - checkCmdOnDriver(cpr); - checkCmdOnDriver(driver.run("insert into T2 values(1)")); + driver.run("create table T2(a int) stored as ORC TBLPROPERTIES ('transactional'='true')"); + driver.run("insert into T2 values(1)"); String exportLoc = exportFolder.newFolder("1").toString(); - checkCmdOnDriver(driver.run("export table T2 to '" + exportLoc + "/2'")); - cpr = driver.compileAndRespond( - "load data inpath '" + exportLoc + "/2/data' overwrite into table T2"); - checkCmdOnDriver(cpr); + driver.run("export table T2 to '" + exportLoc + "/2'"); + driver.compileAndRespond("load data inpath '" + exportLoc + "/2/data' overwrite into table T2"); txnMgr.acquireLocks(driver.getPlan(), ctx, "Fifer"); List locks = getLocks(); Assert.assertEquals("Unexpected lock count", 1, locks.size()); @@ -2550,15 +2426,12 @@ public void testLoadData() throws Exception { @Test public void testMmConversionLocks() throws Exception { dropTable(new String[] {"T"}); - CommandProcessorResponse cpr = driver.run("create table T (a int, b int) tblproperties('transactional'='false')"); - checkCmdOnDriver(cpr); - cpr = driver.run("insert into T values(0,2),(1,4)"); - checkCmdOnDriver(cpr); + driver.run("create table T (a int, b int) tblproperties('transactional'='false')"); + driver.run("insert into T values(0,2),(1,4)"); - cpr = driver.compileAndRespond("ALTER TABLE T set tblproperties" + driver.compileAndRespond("ALTER TABLE T set tblproperties" + "('transactional'='true', 'transactional_properties'='insert_only')", true); - checkCmdOnDriver(cpr); - txnMgr.acquireLocks(driver.getPlan(), ctx, "Fifer");//gets X lock on T + txnMgr.acquireLocks(driver.getPlan(), ctx, "Fifer"); //gets X lock on T List locks = getLocks(); Assert.assertEquals("Unexpected lock count", 1, locks.size()); @@ -2567,14 +2440,11 @@ public void testMmConversionLocks() throws Exception { @Test public void testTruncate() throws Exception { dropTable(new String[] {"T"}); - CommandProcessorResponse cpr = driver.run("create table T (a int, b int) stored as" + - " orc tblproperties('transactional'='true')"); - checkCmdOnDriver(cpr); - checkCmdOnDriver(driver.run("insert into T values(0,2),(1,4)")); - checkCmdOnDriver(driver.run("truncate table T")); - cpr = driver.compileAndRespond("truncate table T"); - checkCmdOnDriver(cpr); - txnMgr.acquireLocks(driver.getPlan(), ctx, "Fifer");//gets X lock on T + driver.run("create table T (a int, b int) stored as orc tblproperties('transactional'='true')"); + driver.run("insert into T values(0,2),(1,4)"); + driver.run("truncate table T"); + driver.compileAndRespond("truncate table T"); + txnMgr.acquireLocks(driver.getPlan(), ctx, "Fifer"); //gets X lock on T List locks = getLocks(); Assert.assertEquals("Unexpected lock count", 1, locks.size()); checkLock(LockType.EXCLUSIVE, LockState.ACQUIRED, "default", "T", null, locks); diff --git ql/src/test/org/apache/hadoop/hive/ql/parse/TestColumnAccess.java ql/src/test/org/apache/hadoop/hive/ql/parse/TestColumnAccess.java index 98121e98d4..c4e336a871 100644 --- ql/src/test/org/apache/hadoop/hive/ql/parse/TestColumnAccess.java +++ ql/src/test/org/apache/hadoop/hive/ql/parse/TestColumnAccess.java @@ -39,12 +39,9 @@ @BeforeClass public static void Setup() throws Exception { Driver driver = createDriver(); - int ret = driver.run("create table t1(id1 int, name1 string)").getResponseCode(); - Assert.assertEquals("Checking command success", 0, ret); - ret = driver.run("create table t2(id2 int, id1 int, name2 string)").getResponseCode(); - Assert.assertEquals("Checking command success", 0, ret); - ret = driver.run("create view v1 as select * from t1").getResponseCode(); - Assert.assertEquals("Checking command success", 0, ret); + driver.run("create table t1(id1 int, name1 string)"); + driver.run("create table t2(id2 int, id1 int, name2 string)"); + driver.run("create view v1 as select * from t1"); } @AfterClass diff --git ql/src/test/org/apache/hadoop/hive/ql/parse/TestHiveDecimalParse.java ql/src/test/org/apache/hadoop/hive/ql/parse/TestHiveDecimalParse.java index e4535ca6e1..c81ab2d6a7 100644 --- ql/src/test/org/apache/hadoop/hive/ql/parse/TestHiveDecimalParse.java +++ ql/src/test/org/apache/hadoop/hive/ql/parse/TestHiveDecimalParse.java @@ -25,7 +25,7 @@ import org.apache.hadoop.hive.ql.ddl.DDLTask; import org.apache.hadoop.hive.ql.ddl.DDLWork; import org.apache.hadoop.hive.ql.ddl.table.creation.CreateTableDesc; -import org.apache.hadoop.hive.ql.processors.CommandProcessorResponse; +import org.apache.hadoop.hive.ql.processors.CommandProcessorException; import org.apache.hadoop.hive.ql.session.SessionState; import org.junit.Assert; import org.junit.Test; @@ -60,7 +60,7 @@ public void testDecimalType3() throws ParseException { Driver driver = createDriver(); try { driver.compile(query, true, false); - } catch (CommandProcessorResponse cpe) { + } catch (CommandProcessorException cpe) { Assert.assertTrue("Got " + cpe.getResponseCode() + ", expected not zero", cpe.getResponseCode() != 0); Assert.assertTrue(cpe.getErrorMessage(), cpe.getErrorMessage().contains("Decimal precision out of allowed range [1,38]")); @@ -76,7 +76,7 @@ public void testDecimalType4() throws ParseException { Driver driver = createDriver(); try { driver.compile(query, true, false); - } catch (CommandProcessorResponse cpe) { + } catch (CommandProcessorException cpe) { Assert.assertTrue("Got " + cpe.getResponseCode() + ", expected not zero", cpe.getResponseCode() != 0); Assert.assertTrue(cpe.getErrorMessage(), cpe.getErrorMessage().contains("Decimal precision out of allowed range [1,38]")); @@ -92,7 +92,7 @@ public void testDecimalType5() throws ParseException { Driver driver = createDriver(); try { driver.compile(query, true, false); - } catch (CommandProcessorResponse cpe) { + } catch (CommandProcessorException cpe) { Assert.assertTrue("Got " + cpe.getResponseCode() + ", expected not zero", cpe.getResponseCode() != 0); Assert.assertTrue(cpe.getErrorMessage(), cpe.getErrorMessage().contains("Decimal scale must be less than or equal to precision")); @@ -108,7 +108,7 @@ public void testDecimalType6() throws ParseException { Driver driver = createDriver(); try { driver.compile(query, true, false); - } catch (CommandProcessorResponse cpe) { + } catch (CommandProcessorException cpe) { Assert.assertTrue("Got " + cpe.getResponseCode() + ", expected not zero", cpe.getResponseCode() != 0); Assert.assertTrue(cpe.getErrorMessage(), cpe.getErrorMessage().contains("extraneous input '-' expecting Number")); @@ -124,7 +124,7 @@ public void testDecimalType7() throws ParseException { Driver driver = createDriver(); try { driver.compile(query, true, false); - } catch (CommandProcessorResponse cpe) { + } catch (CommandProcessorException cpe) { Assert.assertTrue("Got " + cpe.getResponseCode() + ", expected not zero", cpe.getResponseCode() != 0); Assert.assertTrue(cpe.getErrorMessage(), cpe.getErrorMessage().contains("missing ) at ',' near ',' in column name or constraint")); @@ -140,7 +140,7 @@ public void testDecimalType8() throws ParseException { Driver driver = createDriver(); try { driver.compile(query, true, false); - } catch (CommandProcessorResponse cpe) { + } catch (CommandProcessorException cpe) { Assert.assertTrue("Got " + cpe.getResponseCode() + ", expected not zero", cpe.getResponseCode() != 0); Assert.assertTrue(cpe.getErrorMessage(), cpe.getErrorMessage().contains("mismatched input '7a' expecting Number near '('")); @@ -156,7 +156,7 @@ public void testDecimalType9() throws ParseException { Driver driver = createDriver(); try { driver.compile(query, true, false); - } catch (CommandProcessorResponse cpe) { + } catch (CommandProcessorException cpe) { Assert.assertTrue("Got " + cpe.getResponseCode() + ", expected not zero", cpe.getResponseCode() != 0); Assert.assertTrue(cpe.getErrorMessage(), cpe.getErrorMessage().contains("Decimal scale must be less than or equal to precision")); diff --git ql/src/test/org/apache/hadoop/hive/ql/plan/TestReadEntityDirect.java ql/src/test/org/apache/hadoop/hive/ql/plan/TestReadEntityDirect.java index bfa0efef77..57508b3188 100644 --- ql/src/test/org/apache/hadoop/hive/ql/plan/TestReadEntityDirect.java +++ ql/src/test/org/apache/hadoop/hive/ql/plan/TestReadEntityDirect.java @@ -50,10 +50,8 @@ @BeforeClass public static void onetimeSetup() throws Exception { Driver driver = createDriver(); - int ret = driver.run("create table t1(i int)").getResponseCode(); - assertEquals("Checking command success", 0, ret); - ret = driver.run("create view v1 as select * from t1").getResponseCode(); - assertEquals("Checking command success", 0, ret); + driver.run("create table t1(i int)"); + driver.run("create view v1 as select * from t1"); } @AfterClass diff --git ql/src/test/org/apache/hadoop/hive/ql/plan/TestViewEntity.java ql/src/test/org/apache/hadoop/hive/ql/plan/TestViewEntity.java index 78366259b2..cbf1c83c8a 100644 --- ql/src/test/org/apache/hadoop/hive/ql/plan/TestViewEntity.java +++ ql/src/test/org/apache/hadoop/hive/ql/plan/TestViewEntity.java @@ -30,6 +30,7 @@ import org.apache.hadoop.hive.ql.parse.AbstractSemanticAnalyzerHook; import org.apache.hadoop.hive.ql.parse.HiveSemanticAnalyzerHookContext; import org.apache.hadoop.hive.ql.parse.SemanticException; +import org.apache.hadoop.hive.ql.processors.CommandProcessorException; import org.apache.hadoop.hive.ql.session.SessionState; import org.junit.AfterClass; import org.junit.BeforeClass; @@ -84,14 +85,10 @@ public void testUnionView() throws Exception { final String tab1 = prefix + "t1"; final String tab2 = prefix + "t2"; final String view1 = prefix + "v1"; - int ret = driver.run("create table " + tab1 + "(id int)").getResponseCode(); - assertEquals("Checking command success", 0, ret); - ret = driver.run("create table " + tab2 + "(id int)").getResponseCode(); - assertEquals("Checking command success", 0, ret); - ret = driver.run("create view " + view1 + " as select t.id from " - + "(select " + tab1 + ".id from " + tab1 + " union all select " + tab2 + ".id from " + tab2 + ") as t") - .getResponseCode(); - assertEquals("Checking command success", 0, ret); + driver.run("create table " + tab1 + "(id int)"); + driver.run("create table " + tab2 + "(id int)"); + driver.run("create view " + view1 + " as select t.id from " + + "(select " + tab1 + ".id from " + tab1 + " union all select " + tab2 + ".id from " + tab2 + ") as t"); driver.compile("select * from " + view1, true); // view entity @@ -123,10 +120,8 @@ public void testViewInSubQuery() throws Exception { final String tab1 = prefix + "t"; final String view1 = prefix + "v"; - int ret = driver.run("create table " + tab1 + "(id int)").getResponseCode(); - assertEquals("Checking command success", 0, ret); - ret = driver.run("create view " + view1 + " as select * from " + tab1).getResponseCode(); - assertEquals("Checking command success", 0, ret); + driver.run("create table " + tab1 + "(id int)"); + driver.run("create view " + view1 + " as select * from " + tab1); driver.compile("select * from " + view1, true); // view entity @@ -162,7 +157,7 @@ public void testViewInSubQueryWithWhereClauseRbo() throws Exception { testViewInSubQueryWithWhereClause(); } - private void testViewInSubQueryWithWhereClause() { + private void testViewInSubQueryWithWhereClause() throws CommandProcessorException { String prefix = "tvsubquerywithwhereclause" + NAME_PREFIX; final String tab1 = prefix + "t"; final String view1 = prefix + "v"; @@ -171,37 +166,28 @@ private void testViewInSubQueryWithWhereClause() { final String tab1row2 = "'a','b','c'"; //drop all if exists - int ret = driver.run("drop table if exists " + tab1).getResponseCode(); - assertEquals("Checking command success", 0, ret); - ret = driver.run("drop view if exists " + view1).getResponseCode(); - assertEquals("Checking command success", 0, ret); - ret = driver.run("drop view if exists " + view2).getResponseCode(); - assertEquals("Checking command success", 0, ret); + driver.run("drop table if exists " + tab1); + driver.run("drop view if exists " + view1); + driver.run("drop view if exists " + view2); //create tab1 - ret = driver.run("create table " + tab1 + "(col1 string, col2 string, col3 string)") - .getResponseCode(); - assertEquals("Checking command success", 0, ret); - ret = driver.run("insert into " + tab1 + " values (" + tab1row1 + ")").getResponseCode(); - assertEquals("Checking command success", 0, ret); + driver.run("create table " + tab1 + "(col1 string, col2 string, col3 string)"); + driver.run("insert into " + tab1 + " values (" + tab1row1 + ")"); //create view1 - ret = driver.run("create view " + view1 + " as select " + + driver.run("create view " + view1 + " as select " + tab1 + ".col1, " + tab1 + ".col2, " + tab1 + ".col3 " + - " from " + tab1).getResponseCode(); - assertEquals("Checking command success", 0, ret); + " from " + tab1); - ret = driver.run("insert into " + tab1 + " values (" + tab1row2 + ")").getResponseCode(); - assertEquals("Checking command success", 0, ret); + driver.run("insert into " + tab1 + " values (" + tab1row2 + ")"); //create view2 - ret = driver.run( + driver.run( "create view " + view2 + " as select " + tab1 + ".col1, " + tab1 + ".col2, " + tab1 + ".col3 " + " from " + tab1 + " where " + tab1 + ".col1 NOT IN (" + - "SELECT " + view1 + ".col1 FROM " + view1 + ")").getResponseCode(); - assertEquals("Checking command success", 0, ret); + "SELECT " + view1 + ".col1 FROM " + view1 + ")"); //select from view2 driver.compile("select * from " + view2, true); @@ -235,13 +221,10 @@ public void testSubQueryInSubView() throws Exception { final String view1 = prefix + "v"; final String view2 = prefix + "v2"; - int ret = driver.run("create table " + tab1 + "(id int)").getResponseCode(); - assertEquals("Checking command success", 0, ret); - ret = driver.run("create view " + view1 + " as select * from " + tab1).getResponseCode(); - assertEquals("Checking command success", 0, ret); + driver.run("create table " + tab1 + "(id int)"); + driver.run("create view " + view1 + " as select * from " + tab1); - ret = driver.run("create view " + view2 + " as select * from (select * from " + view1 + ") x").getResponseCode(); - assertEquals("Checking command success", 0, ret); + driver.run("create view " + view2 + " as select * from (select * from " + view1 + ") x"); driver.compile("select * from " + view2, true); // view entity @@ -269,13 +252,10 @@ public void testUnionAllInSubView() throws Exception { final String view1 = prefix + "v"; final String view2 = prefix + "v2"; - int ret = driver.run("create table " + tab1 + "(id int)").getResponseCode(); - assertEquals("Checking command success", 0, ret); - ret = driver.run("create view " + view1 + " as select * from " + tab1).getResponseCode(); - assertEquals("Checking command success", 0, ret); + driver.run("create table " + tab1 + "(id int)"); + driver.run("create view " + view1 + " as select * from " + tab1); - ret = driver.run("create view " + view2 + " as select * from (select * from " + view1 + " union all select * from " + view1 + ") x").getResponseCode(); - assertEquals("Checking command success", 0, ret); + driver.run("create view " + view2 + " as select * from (select * from " + view1 + " union all select * from " + view1 + ") x"); driver.compile("select * from " + view2, true); // view entity diff --git ql/src/test/org/apache/hadoop/hive/ql/plan/mapping/TestCounterMapping.java ql/src/test/org/apache/hadoop/hive/ql/plan/mapping/TestCounterMapping.java index b705fd7f88..7de7065823 100644 --- ql/src/test/org/apache/hadoop/hive/ql/plan/mapping/TestCounterMapping.java +++ ql/src/test/org/apache/hadoop/hive/ql/plan/mapping/TestCounterMapping.java @@ -23,7 +23,6 @@ import java.util.Comparator; import java.util.Iterator; import java.util.List; -import java.util.Objects; import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.conf.HiveConf.ConfVars; @@ -38,6 +37,7 @@ import org.apache.hadoop.hive.ql.plan.mapper.PlanMapper; import org.apache.hadoop.hive.ql.plan.mapper.PlanMapper.EquivGroup; import org.apache.hadoop.hive.ql.plan.mapper.StatsSources; +import org.apache.hadoop.hive.ql.processors.CommandProcessorException; import org.apache.hadoop.hive.ql.reexec.ReExecDriver; import org.apache.hadoop.hive.ql.session.SessionState; import org.apache.hadoop.hive.ql.stats.OperatorStats; @@ -65,10 +65,6 @@ public int compare(Operator o1, Operator o2) { Long id1 = Long.valueOf(o1.getIdentifier()); Long id2 = Long.valueOf(o2.getIdentifier()); - int c0 = Objects.compare(o1.getOperatorName(), o2.getOperatorName(), Comparator.naturalOrder()); - if (c0 != 0) { - return c0; - } return Long.compare(id1, id2); } }; @@ -99,8 +95,7 @@ public static void beforeClass() throws Exception { // @formatter:on }; for (String cmd : cmds) { - int ret = driver.run(cmd).getResponseCode(); - assertEquals("Checking command success", 0, ret); + driver.run(cmd); } } @@ -113,21 +108,18 @@ public static void afterClass() throws Exception { public static void dropTables(IDriver driver) throws Exception { String tables[] = { "s", "tu", "tv", "tw" }; for (String t : tables) { - int ret = driver.run("drop table if exists " + t).getResponseCode(); - assertEquals("Checking command success", 0, ret); + driver.run("drop table if exists " + t); } } - private PlanMapper getMapperForQuery(IDriver driver, String query) { - int ret; - ret = driver.run(query).getResponseCode(); - assertEquals("Checking command success", 0, ret); + private PlanMapper getMapperForQuery(IDriver driver, String query) throws CommandProcessorException { + driver.run(query); PlanMapper pm0 = driver.getContext().getPlanMapper(); return pm0; } @Test - public void testUsageOfRuntimeInfo() throws ParseException { + public void testUsageOfRuntimeInfo() throws ParseException, CommandProcessorException { IDriver driver = createDriver(); String query = "select sum(u) from tu where u>1"; PlanMapper pm1 = getMapperForQuery(driver, query); @@ -151,7 +143,7 @@ public void testUsageOfRuntimeInfo() throws ParseException { } @Test - public void testInConversion() throws ParseException { + public void testInConversion() throws ParseException, CommandProcessorException { String query = "explain select sum(id_uv) from tu where u in (1,2) group by u"; @@ -169,7 +161,7 @@ public void testInConversion() throws ParseException { } @Test - public void testBreakupAnd() throws ParseException { + public void testBreakupAnd() throws ParseException, CommandProcessorException { String query = "explain select sum(id_uv) from tu where u=1 and (u=2 or u=1) group by u"; @@ -182,7 +174,7 @@ public void testBreakupAnd() throws ParseException { } @Test - public void testBreakupAnd2() throws ParseException { + public void testBreakupAnd2() throws ParseException, CommandProcessorException { String query = "explain select sum(id_uv) from tu where u in (1,2,3) and u=2 and u=2 and 2=u group by u"; @@ -198,10 +190,13 @@ public void testBreakupAnd2() throws ParseException { @Test @Ignore("needs HiveFilter mapping") - public void testMappingJoinLookup() throws ParseException { + public void testMappingJoinLookup() throws ParseException, CommandProcessorException { IDriver driver = createDriver(); - PlanMapper pm0 = getMapperForQuery(driver, "select sum(tu.id_uv),sum(u) from tu join tv on (tu.id_uv = tv.id_uv) where u>1 and v>1"); + PlanMapper pm0 = getMapperForQuery(driver, + "select sum(tu.id_uv), sum(u)\n" + + "from tu join tv on (tu.id_uv = tv.id_uv)\n" + + "where u > 1 and v > 1"); Iterator itG = pm0.iterateGroups(); int checkedOperators = 0; diff --git ql/src/test/org/apache/hadoop/hive/ql/plan/mapping/TestOperatorCmp.java ql/src/test/org/apache/hadoop/hive/ql/plan/mapping/TestOperatorCmp.java index b1b035f9f1..60241a15ff 100644 --- ql/src/test/org/apache/hadoop/hive/ql/plan/mapping/TestOperatorCmp.java +++ ql/src/test/org/apache/hadoop/hive/ql/plan/mapping/TestOperatorCmp.java @@ -34,6 +34,7 @@ import org.apache.hadoop.hive.ql.optimizer.signature.TestOperatorSignature; import org.apache.hadoop.hive.ql.parse.ParseException; import org.apache.hadoop.hive.ql.plan.mapper.PlanMapper; +import org.apache.hadoop.hive.ql.processors.CommandProcessorException; import org.apache.hadoop.hive.ql.session.SessionState; import org.apache.hadoop.hive.ql.stats.OperatorStatsReaderHook; import org.apache.hive.testutils.HiveTestEnvSetup; @@ -68,8 +69,7 @@ public static void beforeClass() throws Exception { // @formatter:on }; for (String cmd : cmds) { - int ret = driver.run(cmd).getResponseCode(); - assertEquals("Checking command success", 0, ret); + driver.run(cmd); } } @@ -82,20 +82,18 @@ public static void afterClass() throws Exception { public static void dropTables(IDriver driver) throws Exception { String tables[] = { "tu", "tv", "tw" }; for (String t : tables) { - int ret = driver.run("drop table if exists " + t).getResponseCode(); - assertEquals("Checking command success", 0, ret); + driver.run("drop table if exists " + t); } } - private PlanMapper getMapperForQuery(IDriver driver, String query) { - int ret = driver.run(query).getResponseCode(); - assertEquals("Checking command success", 0, ret); + private PlanMapper getMapperForQuery(IDriver driver, String query) throws CommandProcessorException { + driver.run(query); PlanMapper pm0 = driver.getContext().getPlanMapper(); return pm0; } @Test - public void testUnrelatedFiltersAreNotMatched0() throws ParseException { + public void testUnrelatedFiltersAreNotMatched0() throws ParseException, CommandProcessorException { IDriver driver = createDriver(); String query = "select u from tu where id_uv = 1 union all select v from tv where id_uv = 1"; @@ -117,7 +115,7 @@ public void testUnrelatedFiltersAreNotMatched0() throws ParseException { } @Test - public void testUnrelatedFiltersAreNotMatched1() throws ParseException { + public void testUnrelatedFiltersAreNotMatched1() throws ParseException, CommandProcessorException { IDriver driver = createDriver(); PlanMapper pm0 = getMapperForQuery(driver, "select u from tu where id_uv = 1 group by u"); PlanMapper pm1 = getMapperForQuery(driver, "select v from tv where id_uv = 1 group by v"); @@ -130,7 +128,7 @@ public void testUnrelatedFiltersAreNotMatched1() throws ParseException { } @Test - public void testDifferentFiltersAreNotMatched() throws ParseException { + public void testDifferentFiltersAreNotMatched() throws ParseException, CommandProcessorException { IDriver driver = createDriver(); PlanMapper pm0 = getMapperForQuery(driver, "select u from tu where id_uv = 1 group by u"); PlanMapper pm1 = getMapperForQuery(driver, "select u from tu where id_uv = 2 group by u"); diff --git ql/src/test/org/apache/hadoop/hive/ql/plan/mapping/TestReOptimization.java ql/src/test/org/apache/hadoop/hive/ql/plan/mapping/TestReOptimization.java index 4283c2275b..e283ddda81 100644 --- ql/src/test/org/apache/hadoop/hive/ql/plan/mapping/TestReOptimization.java +++ ql/src/test/org/apache/hadoop/hive/ql/plan/mapping/TestReOptimization.java @@ -23,7 +23,6 @@ import java.util.ArrayList; import java.util.Iterator; import java.util.List; -import java.util.Optional; import org.apache.calcite.rel.metadata.RelMetadataQuery; import org.apache.hadoop.hive.conf.HiveConf; @@ -33,12 +32,11 @@ import org.apache.hadoop.hive.ql.exec.CommonJoinOperator; import org.apache.hadoop.hive.ql.exec.FilterOperator; import org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveFilter; -import org.apache.hadoop.hive.ql.optimizer.signature.RelTreeSignature; import org.apache.hadoop.hive.ql.plan.Statistics; import org.apache.hadoop.hive.ql.plan.mapper.PlanMapper; import org.apache.hadoop.hive.ql.plan.mapper.StatsSources; import org.apache.hadoop.hive.ql.plan.mapper.PlanMapper.EquivGroup; -import org.apache.hadoop.hive.ql.processors.CommandProcessorResponse; +import org.apache.hadoop.hive.ql.processors.CommandProcessorException; import org.apache.hadoop.hive.ql.session.SessionState; import org.apache.hadoop.hive.ql.stats.OperatorStats; import org.apache.hadoop.hive.ql.stats.OperatorStatsReaderHook; @@ -49,6 +47,7 @@ import org.junit.ClassRule; import org.junit.Rule; import org.junit.Test; +import org.junit.rules.ExpectedException; import org.junit.rules.TestRule; public class TestReOptimization { @@ -75,8 +74,7 @@ public static void beforeClass() throws Exception { // @formatter:on }; for (String cmd : cmds) { - int ret = driver.run(cmd).getResponseCode(); - assertEquals("Checking command success", 0, ret); + driver.run(cmd); } } @@ -94,15 +92,15 @@ public void after() { public static void dropTables(IDriver driver) throws Exception { String[] tables = new String[] {"tu", "tv", "tw" }; for (String t : tables) { - int ret = driver.run("drop table if exists " + t).getResponseCode(); - assertEquals("Checking command success", 0, ret); + driver.run("drop table if exists " + t); } } - private PlanMapper getMapperForQuery(IDriver driver, String query) throws CommandProcessorResponse { - CommandProcessorResponse res = driver.run(query); - if (res.getResponseCode() != 0) { - throw res; + private PlanMapper getMapperForQuery(IDriver driver, String query) { + try { + driver.run(query); + } catch (CommandProcessorException e) { + throw new RuntimeException("running the query " + query + " was not successful"); } PlanMapper pm0 = driver.getContext().getPlanMapper(); return pm0; @@ -152,14 +150,19 @@ public void testReExecutedIfMapJoinError() throws Exception { } - @Test(expected = CommandProcessorResponse.class) + @Rule + public ExpectedException exceptionRule = ExpectedException.none(); + + @Test public void testNotReExecutedIfAssertionError() throws Exception { IDriver driver = createDriver("reoptimize"); String query = "select assert_true(${hiveconf:zzz}>sum(1)) from tu join tv on (tu.id_uv=tv.id_uv) where u<10 and v>1"; + exceptionRule.expect(RuntimeException.class); + exceptionRule.expectMessage("running the query " + query + " was not successful"); + getMapperForQuery(driver, query); - assertEquals(1, driver.getContext().getExecutionIndex()); } @Test @@ -192,7 +195,7 @@ public void testStatCachingMetaStore() throws Exception { private void checkRuntimeStatsReuse( boolean expectInSameSession, boolean expectNewHs2Session, - boolean expectHs2Instance) throws CommandProcessorResponse { + boolean expectHs2Instance) throws CommandProcessorException { { // same session IDriver driver = createDriver("reoptimize"); @@ -216,7 +219,7 @@ private void checkRuntimeStatsReuse( } @SuppressWarnings("rawtypes") - private void checkUsageOfRuntimeStats(IDriver driver, boolean expected) throws CommandProcessorResponse { + private void checkUsageOfRuntimeStats(IDriver driver, boolean expected) throws CommandProcessorException { String query = "select sum(u) from tu join tv on (tu.id_uv=tv.id_uv) where u<10 and v>1"; PlanMapper pm = getMapperForQuery(driver, query); assertEquals(1, driver.getContext().getExecutionIndex()); @@ -272,8 +275,6 @@ public void testReOptimizationCanSendBackStatsToCBO() throws Exception { FilterOperator fo = fos.get(0); OperatorStats os = oss.get(0); - Optional prevOs = driver.getContext().getStatsSource().lookup(RelTreeSignature.of(hf)); - long cntFilter = RelMetadataQuery.instance().getRowCount(hf).longValue(); if (fo.getStatistics() != null) { // in case the join order is changed the subTree-s are not matching anymore because an RS is present in the condition diff --git ql/src/test/org/apache/hadoop/hive/ql/plan/mapping/TestStatEstimations.java ql/src/test/org/apache/hadoop/hive/ql/plan/mapping/TestStatEstimations.java index 0acace60a6..4a2d568dd9 100644 --- ql/src/test/org/apache/hadoop/hive/ql/plan/mapping/TestStatEstimations.java +++ ql/src/test/org/apache/hadoop/hive/ql/plan/mapping/TestStatEstimations.java @@ -28,6 +28,7 @@ import org.apache.hadoop.hive.ql.exec.FilterOperator; import org.apache.hadoop.hive.ql.parse.ParseException; import org.apache.hadoop.hive.ql.plan.mapper.PlanMapper; +import org.apache.hadoop.hive.ql.processors.CommandProcessorException; import org.apache.hadoop.hive.ql.session.SessionState; import org.apache.hive.testutils.HiveTestEnvSetup; import org.hamcrest.Matchers; @@ -59,8 +60,7 @@ public static void beforeClass() throws Exception { // @formatter:on }; for (String cmd : cmds) { - int ret = driver.run(cmd).getResponseCode(); - assertEquals("Checking command success", 0, ret); + driver.run(cmd); } } @@ -73,20 +73,18 @@ public static void afterClass() throws Exception { public static void dropTables(IDriver driver) throws Exception { String tables[] = {"t2" }; for (String t : tables) { - int ret = driver.run("drop table if exists " + t).getResponseCode(); - assertEquals("Checking command success", 0, ret); + driver.run("drop table if exists " + t); } } - private PlanMapper getMapperForQuery(IDriver driver, String query) { - int ret = driver.run(query).getResponseCode(); - assertEquals("Checking command success", 0, ret); + private PlanMapper getMapperForQuery(IDriver driver, String query) throws CommandProcessorException { + driver.run(query); PlanMapper pm0 = driver.getContext().getPlanMapper(); return pm0; } @Test - public void testFilterIntIn() throws ParseException { + public void testFilterIntIn() throws ParseException, CommandProcessorException { IDriver driver = createDriver(); String query = "explain select a from t2 where a IN (-1,0,1,2,10,20,30,40) order by a"; diff --git ql/src/test/org/apache/hadoop/hive/ql/processors/TestCompileProcessor.java ql/src/test/org/apache/hadoop/hive/ql/processors/TestCompileProcessor.java index d1ffc1bfba..bf7965885c 100644 --- ql/src/test/org/apache/hadoop/hive/ql/processors/TestCompileProcessor.java +++ ql/src/test/org/apache/hadoop/hive/ql/processors/TestCompileProcessor.java @@ -29,17 +29,25 @@ @Test public void testSyntax() throws Exception { CompileProcessor cp = new CompileProcessor(); - Assert.assertEquals(0, cp.run("` public class x { \n }` AS GROOVY NAMED x.groovy").getResponseCode()); + cp.run("` public class x { \n }` AS GROOVY NAMED x.groovy"); Assert.assertEquals("GROOVY", cp.getLang()); Assert.assertEquals(" public class x { \n }", cp.getCode()); Assert.assertEquals("x.groovy", cp.getNamed()); - Assert.assertEquals(1, cp.run("").getResponseCode()); - Assert.assertEquals(1, cp.run("bla bla ").getResponseCode()); + try { + cp.run(""); + } catch (CommandProcessorException e) { + Assert.assertEquals(1, e.getResponseCode()); + } + try { + cp.run("bla bla "); + } catch (CommandProcessorException e) { + Assert.assertEquals(1, e.getResponseCode()); + } + CompileProcessor cp2 = new CompileProcessor(); CommandProcessorResponse response = cp2.run( "` import org.apache.hadoop.hive.ql.exec.UDF \n public class x { \n }` AS GROOVY NAMED x.groovy"); - Assert.assertEquals(0, response.getResponseCode()); - File f = new File(response.getErrorMessage()); + File f = new File(response.getMessage()); Assert.assertTrue(f.exists()); f.delete(); } diff --git ql/src/test/org/apache/hadoop/hive/ql/processors/TestSetProcessor.java ql/src/test/org/apache/hadoop/hive/ql/processors/TestSetProcessor.java index b8d79bd475..ddbbef0b01 100644 --- ql/src/test/org/apache/hadoop/hive/ql/processors/TestSetProcessor.java +++ ql/src/test/org/apache/hadoop/hive/ql/processors/TestSetProcessor.java @@ -70,7 +70,7 @@ public void testHiddenConfig() throws Exception { } @Test - public void testHiddenConfigSetVarName() { + public void testHiddenConfigSetVarName() throws CommandProcessorException { runSetProcessor(HiveConf.ConfVars.METASTOREPWD.varname); String output = baos.toString(); Assert.assertTrue(output.contains("hidden")); @@ -109,7 +109,7 @@ public void testSystemPropertyIndividual() throws Exception { /* * Simulates the set ; */ - private void runSetProcessor(String command) { + private void runSetProcessor(String command) throws CommandProcessorException { processor.run(command); state.out.flush(); } diff --git ql/src/test/org/apache/hadoop/hive/ql/security/authorization/plugin/TestHivePrivilegeObjectOwnerNameAndType.java ql/src/test/org/apache/hadoop/hive/ql/security/authorization/plugin/TestHivePrivilegeObjectOwnerNameAndType.java index 0f45c81497..e6576d18cf 100644 --- ql/src/test/org/apache/hadoop/hive/ql/security/authorization/plugin/TestHivePrivilegeObjectOwnerNameAndType.java +++ ql/src/test/org/apache/hadoop/hive/ql/security/authorization/plugin/TestHivePrivilegeObjectOwnerNameAndType.java @@ -24,7 +24,6 @@ import org.apache.hadoop.hive.conf.HiveConf.ConfVars; import org.apache.hadoop.hive.ql.Driver; import org.apache.hadoop.hive.ql.lockmgr.DbTxnManager; -import org.apache.hadoop.hive.ql.processors.CommandProcessorResponse; import org.apache.hadoop.hive.ql.security.HiveAuthenticationProvider; import org.apache.hadoop.hive.ql.session.SessionState; import org.apache.hadoop.security.UserGroupInformation; @@ -37,7 +36,6 @@ import java.util.List; -import static org.junit.Assert.assertEquals; import static org.mockito.Matchers.any; import static org.mockito.Mockito.reset; import static org.mockito.Mockito.verify; @@ -85,8 +83,7 @@ public static void beforeTest() throws Exception { } private static void runCmd(String cmd) throws Exception { - CommandProcessorResponse resp = driver.run(cmd); - assertEquals(0, resp.getResponseCode()); + driver.run(cmd); } @AfterClass diff --git ql/src/test/org/apache/hive/testutils/TestHiveTestEnvSetup.java ql/src/test/org/apache/hive/testutils/TestHiveTestEnvSetup.java index c34affd2c4..66fec030ee 100644 --- ql/src/test/org/apache/hive/testutils/TestHiveTestEnvSetup.java +++ ql/src/test/org/apache/hive/testutils/TestHiveTestEnvSetup.java @@ -24,8 +24,6 @@ import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.ql.DriverFactory; import org.apache.hadoop.hive.ql.IDriver; -import org.apache.hadoop.hive.ql.parse.ParseException; -import org.apache.hadoop.hive.ql.processors.CommandProcessorResponse; import org.apache.hadoop.hive.ql.session.SessionState; import org.apache.hive.testutils.HiveTestEnvSetup; import org.junit.AfterClass; @@ -54,8 +52,7 @@ public static void beforeClass() throws Exception { // @formatter:on }; for (String cmd : cmds) { - int ret = driver.run(cmd).getResponseCode(); - assertEquals("Checking command success", 0, ret); + driver.run(cmd); } } @@ -68,17 +65,15 @@ public static void afterClass() throws Exception { public static void dropTables(IDriver driver) throws Exception { String tables[] = { "s", "tu", "tv", "tw" }; for (String t : tables) { - int ret = driver.run("drop table if exists " + t).getResponseCode(); - assertEquals("Checking command success", 0, ret); + driver.run("drop table if exists " + t); } } @Test - public void testMappingSameQuery() throws ParseException, Exception { + public void testMappingSameQuery() throws Exception { IDriver driver = createDriver(); String query = "select sum(u*u),sum(u) from tu where u>1"; - CommandProcessorResponse ret = driver.run(query); - assertEquals(0, ret.getResponseCode()); + driver.run(query); List res = new ArrayList(); driver.getFetchTask().fetch(res); diff --git service/src/java/org/apache/hive/service/cli/operation/HiveCommandOperation.java service/src/java/org/apache/hive/service/cli/operation/HiveCommandOperation.java index eafe821583..8f04ec3868 100644 --- service/src/java/org/apache/hive/service/cli/operation/HiveCommandOperation.java +++ service/src/java/org/apache/hive/service/cli/operation/HiveCommandOperation.java @@ -33,6 +33,7 @@ import org.apache.hadoop.hive.common.io.SessionStream; import org.apache.hadoop.hive.metastore.api.Schema; import org.apache.hadoop.hive.ql.processors.CommandProcessor; +import org.apache.hadoop.hive.ql.processors.CommandProcessorException; import org.apache.hadoop.hive.ql.processors.CommandProcessorResponse; import org.apache.hadoop.hive.ql.session.SessionState; import org.apache.hive.service.ServiceUtils; @@ -111,10 +112,6 @@ public void runInternal() throws HiveSQLException { String commandArgs = command.substring(tokens[0].length()).trim(); CommandProcessorResponse response = commandProcessor.run(commandArgs); - int returnCode = response.getResponseCode(); - if (returnCode != 0) { - throw toSQLException("Error while processing statement", response); - } Schema schema = response.getSchema(); if (schema != null) { setHasResultSet(true); @@ -123,14 +120,12 @@ public void runInternal() throws HiveSQLException { setHasResultSet(false); resultSchema = new TableSchema(); } - if (response.getConsoleMessages() != null) { - for (String consoleMsg : response.getConsoleMessages()) { - LOG.info(consoleMsg); - } + if (response.getMessage() != null) { + LOG.info(response.getMessage()); } - } catch (HiveSQLException e) { + } catch (CommandProcessorException e) { setState(OperationState.ERROR); - throw e; + throw toSQLException("Error while processing statement", e); } catch (Exception e) { setState(OperationState.ERROR); throw new HiveSQLException("Error running query: " + e.toString(), e); diff --git service/src/java/org/apache/hive/service/cli/operation/Operation.java service/src/java/org/apache/hive/service/cli/operation/Operation.java index 5109981261..5036d5907c 100644 --- service/src/java/org/apache/hive/service/cli/operation/Operation.java +++ service/src/java/org/apache/hive/service/cli/operation/Operation.java @@ -37,7 +37,7 @@ import org.apache.hadoop.hive.ql.QueryState; import org.apache.hadoop.hive.ql.log.LogDivertAppender; import org.apache.hadoop.hive.ql.log.LogDivertAppenderForTest; -import org.apache.hadoop.hive.ql.processors.CommandProcessorResponse; +import org.apache.hadoop.hive.ql.processors.CommandProcessorException; import org.apache.hadoop.hive.ql.session.OperationLog; import org.apache.hadoop.hive.shims.ShimLoader; import org.apache.hive.service.cli.FetchOrientation; @@ -351,11 +351,11 @@ protected void validateFetchOrientation(FetchOrientation orientation, } } - protected HiveSQLException toSQLException(String prefix, CommandProcessorResponse response) { - HiveSQLException ex = new HiveSQLException(prefix + ": " + response.getErrorMessage(), - response.getSQLState(), response.getResponseCode()); - if (response.getException() != null) { - ex.initCause(response.getException()); + protected HiveSQLException toSQLException(String prefix, CommandProcessorException e) { + HiveSQLException ex = + new HiveSQLException(prefix + ": " + e.getErrorMessage(), e.getSqlState(), e.getResponseCode()); + if (e.getException() != null) { + ex.initCause(e.getException()); } return ex; } diff --git service/src/java/org/apache/hive/service/cli/operation/SQLOperation.java service/src/java/org/apache/hive/service/cli/operation/SQLOperation.java index 747db58e68..cfb0e643e0 100644 --- service/src/java/org/apache/hive/service/cli/operation/SQLOperation.java +++ service/src/java/org/apache/hive/service/cli/operation/SQLOperation.java @@ -55,7 +55,7 @@ import org.apache.hadoop.hive.ql.exec.FetchTask; import org.apache.hadoop.hive.ql.log.PerfLogger; import org.apache.hadoop.hive.ql.metadata.Hive; -import org.apache.hadoop.hive.ql.processors.CommandProcessorResponse; +import org.apache.hadoop.hive.ql.processors.CommandProcessorException; import org.apache.hadoop.hive.ql.session.SessionState; import org.apache.hadoop.hive.serde.serdeConstants; import org.apache.hadoop.hive.serde2.AbstractSerDe; @@ -87,7 +87,6 @@ */ public class SQLOperation extends ExecuteStatementOperation { private IDriver driver = null; - private CommandProcessorResponse response; private TableSchema resultSchema; private AbstractSerDe serde = null; private boolean fetchStarted = false; @@ -201,17 +200,14 @@ public void run() { // In Hive server mode, we are not able to retry in the FetchTask // case, when calling fetch queries since execute() has returned. // For now, we disable the test attempts. - response = driver.compileAndRespond(statement); - if (0 != response.getResponseCode()) { - throw toSQLException("Error while compiling statement", response); - } + driver.compileAndRespond(statement); if (queryState.getQueryTag() != null && queryState.getQueryId() != null) { parentSession.updateQueryTag(queryState.getQueryId(), queryState.getQueryTag()); } setHasResultSet(driver.hasResultSet()); - } catch (HiveSQLException e) { + } catch (CommandProcessorException e) { setState(OperationState.ERROR); - throw e; + throw toSQLException("Error while compiling statement", e); } catch (Throwable e) { setState(OperationState.ERROR); throw new HiveSQLException("Error running query: " + e.toString(), e); @@ -230,10 +226,9 @@ private void runQuery() throws HiveSQLException { // In Hive server mode, we are not able to retry in the FetchTask // case, when calling fetch queries since execute() has returned. // For now, we disable the test attempts. - response = driver.run(); - if (0 != response.getResponseCode()) { - throw toSQLException("Error while processing statement", response); - } + driver.run(); + } catch (CommandProcessorException e) { + throw toSQLException("Error while compiling statement", e); } catch (Throwable e) { /** * If the operation was cancelled by another thread, or the execution timed out, Driver#run diff --git streaming/src/test/org/apache/hive/streaming/TestStreaming.java streaming/src/test/org/apache/hive/streaming/TestStreaming.java index dbff263aed..055672f910 100644 --- streaming/src/test/org/apache/hive/streaming/TestStreaming.java +++ streaming/src/test/org/apache/hive/streaming/TestStreaming.java @@ -88,7 +88,7 @@ import org.apache.hadoop.hive.ql.io.orc.Reader; import org.apache.hadoop.hive.ql.io.orc.RecordReader; import org.apache.hadoop.hive.ql.metadata.Table; -import org.apache.hadoop.hive.ql.processors.CommandProcessorResponse; +import org.apache.hadoop.hive.ql.processors.CommandProcessorException; import org.apache.hadoop.hive.ql.session.SessionState; import org.apache.hadoop.hive.ql.txn.compactor.Worker; import org.apache.hadoop.hive.serde.serdeConstants; @@ -3147,19 +3147,21 @@ private static boolean runDDL(IDriver driver, String sql) { LOG.debug(sql); System.out.println(sql); //LOG.debug("Running Hive Query: "+ sql); - CommandProcessorResponse cpr = driver.run(sql); - if (cpr.getResponseCode() == 0) { + try { + driver.run(sql); return true; + } catch (CommandProcessorException e) { + LOG.error("Statement: " + sql + " failed: " + e); + return false; } - LOG.error("Statement: " + sql + " failed: " + cpr); - return false; } private static ArrayList queryTable(IDriver driver, String query) throws IOException { - CommandProcessorResponse cpr = driver.run(query); - if (cpr.getResponseCode() != 0) { - throw new RuntimeException(query + " failed: " + cpr); + try { + driver.run(query); + } catch (CommandProcessorException e) { + throw new RuntimeException(query + " failed: " + e); } ArrayList res = new ArrayList(); driver.getResults(res); diff --git streaming/src/test/org/apache/hive/streaming/TestStreamingDynamicPartitioning.java streaming/src/test/org/apache/hive/streaming/TestStreamingDynamicPartitioning.java index 4bc4780bfa..3ef62f1326 100644 --- streaming/src/test/org/apache/hive/streaming/TestStreamingDynamicPartitioning.java +++ streaming/src/test/org/apache/hive/streaming/TestStreamingDynamicPartitioning.java @@ -42,7 +42,7 @@ import org.apache.hadoop.hive.metastore.txn.TxnDbUtil; import org.apache.hadoop.hive.ql.DriverFactory; import org.apache.hadoop.hive.ql.IDriver; -import org.apache.hadoop.hive.ql.processors.CommandProcessorResponse; +import org.apache.hadoop.hive.ql.processors.CommandProcessorException; import org.apache.hadoop.hive.ql.session.SessionState; import org.apache.hadoop.hive.serde.serdeConstants; import org.apache.thrift.TException; @@ -825,19 +825,21 @@ public void testTableValidation() throws Exception { private static boolean runDDL(IDriver driver, String sql) { LOG.debug(sql); System.out.println(sql); - CommandProcessorResponse cpr = driver.run(sql); - if (cpr.getResponseCode() == 0) { + try { + driver.run(sql); return true; + } catch (CommandProcessorException e) { + LOG.error("Statement: " + sql + " failed: " + e); + return false; } - LOG.error("Statement: " + sql + " failed: " + cpr); - return false; } private static ArrayList queryTable(IDriver driver, String query) throws IOException { - CommandProcessorResponse cpr = driver.run(query); - if (cpr.getResponseCode() != 0) { - throw new RuntimeException(query + " failed: " + cpr); + try { + driver.run(query); + } catch (CommandProcessorException e) { + throw new RuntimeException(query + " failed: " + e); } ArrayList res = new ArrayList(); driver.getResults(res);