diff --git a/common/src/java/org/apache/hadoop/hive/ql/ErrorMsg.java b/common/src/java/org/apache/hadoop/hive/ql/ErrorMsg.java index 92406cda65..132d8a32a8 100644 --- a/common/src/java/org/apache/hadoop/hive/ql/ErrorMsg.java +++ b/common/src/java/org/apache/hadoop/hive/ql/ErrorMsg.java @@ -624,7 +624,9 @@ REPL_INVALID_INTERNAL_CONFIG_FOR_SERVICE(40009, "Invalid internal config error : {0} for {1} service.", true), REPL_RETRY_EXHAUSTED(40010, "Retry exhausted for retryable error code {0}.", true), REPL_FAILED_WITH_NON_RECOVERABLE_ERROR(40011, "Replication failed with non recoverable error. Needs manual intervention"), - REPL_INVALID_ARGUMENTS(40012, "Invalid arguments error : {0}.", true) + REPL_INVALID_ARGUMENTS(40012, "Invalid arguments error : {0}.", true), + REPL_INVALID_ALTER_TABLE(40013, "{0}Unable to alter table{1}", true), + REPL_PERMISSION_DENIED(40014, "{0}org.apache.hadoop.security.AccessControlException{1}", true) ; private int errorCode; @@ -644,7 +646,7 @@ for (ErrorMsg errorMsg : values()) { if (errorMsg.format != null) { String pattern = errorMsg.mesg.replaceAll("\\{[0-9]+\\}", ".*"); - formatToErrorMsgMap.put(Pattern.compile("^" + pattern + "$"), errorMsg); + formatToErrorMsgMap.put(Pattern.compile("^" + pattern + "$", Pattern.DOTALL), errorMsg); } else { mesgToErrorMsgMap.put(errorMsg.getMsg().trim(), errorMsg); int length = errorMsg.getMsg().trim().length(); diff --git a/contrib/src/test/results/clientnegative/serde_regex.q.out b/contrib/src/test/results/clientnegative/serde_regex.q.out index 69a5403ccd..e7d3aa55b5 100644 --- a/contrib/src/test/results/clientnegative/serde_regex.q.out +++ b/contrib/src/test/results/clientnegative/serde_regex.q.out @@ -78,4 +78,4 @@ STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@serde_regex -FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask. java.lang.RuntimeException: MetaException(message:org.apache.hadoop.hive.serde2.SerDeException org.apache.hadoop.hive.contrib.serde2.RegexSerDe only accepts string columns, but column[5] named status has type int) +FAILED: Execution Error, return code 40000 from org.apache.hadoop.hive.ql.ddl.DDLTask. java.lang.RuntimeException: MetaException(message:org.apache.hadoop.hive.serde2.SerDeException org.apache.hadoop.hive.contrib.serde2.RegexSerDe only accepts string columns, but column[5] named status has type int) diff --git a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationScenarios.java b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationScenarios.java index ca0639665e..e6430d90f5 100644 --- a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationScenarios.java +++ b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationScenarios.java @@ -1139,7 +1139,7 @@ public NotificationEventResponse apply(@Nullable NotificationEventResponse event driver.run("REPL DUMP " + dbName); assert false; } catch (CommandProcessorException e) { - assertTrue(e.getResponseCode() == ErrorMsg.REPL_EVENTS_MISSING_IN_METASTORE.getErrorCode()); + assertTrue(e.getCauseMessage() == ErrorMsg.REPL_EVENTS_MISSING_IN_METASTORE.getMsg()); } eventIdSkipper.assertInjectionsPerformed(true,false); } finally { diff --git a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationScenariosAcidTablesBootstrap.java b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationScenariosAcidTablesBootstrap.java index f52975aff5..504338e203 100644 --- a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationScenariosAcidTablesBootstrap.java +++ b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationScenariosAcidTablesBootstrap.java @@ -26,8 +26,11 @@ import org.apache.hadoop.hive.metastore.InjectableBehaviourObjectStore; import org.apache.hadoop.hive.metastore.InjectableBehaviourObjectStore.CallerArguments; import org.apache.hadoop.hive.metastore.InjectableBehaviourObjectStore.BehaviourInjection; -import org.apache.hadoop.hive.ql.ErrorMsg; import org.apache.hadoop.hive.ql.exec.repl.util.ReplUtils; +import org.apache.hadoop.hive.ql.ErrorMsg; +import org.apache.hadoop.fs.FileStatus; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; import org.junit.Assert; import org.junit.Test; @@ -35,13 +38,18 @@ import javax.annotation.Nullable; import java.io.File; +import java.nio.charset.StandardCharsets; +import java.io.IOException; import java.util.Arrays; +import java.util.Base64; +import java.util.Collections; import java.util.HashMap; import java.util.LinkedList; import java.util.List; -import java.util.Collections; import java.util.Map; +import static org.apache.hadoop.hive.ql.exec.repl.ReplAck.NON_RECOVERABLE_MARKER; + /** * TestReplicationScenariosAcidTables - test bootstrap of ACID tables during an incremental. */ @@ -138,6 +146,11 @@ public Boolean apply(@Nullable CallerArguments args) { } finally { InjectableBehaviourObjectStore.resetAlterTableModifier(); } + Path baseDumpDir = new Path(primary.hiveConf.getVar(HiveConf.ConfVars.REPLDIR)); + Path nonRecoverablePath = getNonRecoverablePath(baseDumpDir, primaryDbName); + if(nonRecoverablePath != null){ + baseDumpDir.getFileSystem(primary.hiveConf).delete(nonRecoverablePath, true); + } //Load again should succeed as checkpointing is in place replica.load(replicatedDbName, primaryDbName); verifyIncLoad(replicatedDbName, incDump.lastReplicationId); @@ -307,4 +320,18 @@ public void run() { replica.load(replicatedDbName, primaryDbName); verifyInc2Load(replicatedDbName, inc2Dump.lastReplicationId); } + + private Path getNonRecoverablePath(Path dumpDir, String dbName) throws IOException { + Path dumpPath = new Path(dumpDir, + Base64.getEncoder().encodeToString(dbName.toLowerCase() + .getBytes(StandardCharsets.UTF_8.name()))); + FileSystem fs = dumpPath.getFileSystem(conf); + if (fs.exists(dumpPath)) { + FileStatus[] statuses = fs.listStatus(dumpPath); + if (statuses.length > 0) { + return new Path(statuses[0].getPath(), NON_RECOVERABLE_MARKER.toString()); + } + } + return null; + } } diff --git a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationScenariosAcrossInstances.java b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationScenariosAcrossInstances.java index 0cc72821f0..dbdee9d334 100644 --- a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationScenariosAcrossInstances.java +++ b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationScenariosAcrossInstances.java @@ -1571,6 +1571,14 @@ public Boolean apply(@Nullable CallerArguments args) { // Retry with same dump with which it was already loaded should resume the bootstrap load. Make sure that table t1, // is loaded before t2. So that scope is set to table in first iteration for table t1. In the next iteration, it // loads only remaining partitions of t2, so that the table tracker has no tasks. + + Path baseDumpDir = new Path(primary.hiveConf.getVar(HiveConf.ConfVars.REPLDIR)); + Path nonRecoverablePath = getNonRecoverablePath(baseDumpDir, primaryDbName); + if(nonRecoverablePath != null){ + baseDumpDir.getFileSystem(primary.hiveConf).delete(nonRecoverablePath, true); + } + + List withConfigs = Arrays.asList("'hive.in.repl.test.files.sorted'='true'"); replica.load(replicatedDbName, primaryDbName, withConfigs); diff --git a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationScenariosExternalTables.java b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationScenariosExternalTables.java index f4ef7166a3..5cb9bbc90f 100644 --- a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationScenariosExternalTables.java +++ b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationScenariosExternalTables.java @@ -73,6 +73,7 @@ import static org.apache.hadoop.hive.metastore.ReplChangeManager.SOURCE_OF_REPLICATION; import static org.apache.hadoop.hive.ql.exec.repl.ReplExternalTables.FILE_NAME; import static org.apache.hadoop.hive.ql.exec.repl.util.ReplUtils.INC_BOOTSTRAP_ROOT_DIR_NAME; +import static org.apache.hadoop.hive.ql.exec.repl.ReplAck.NON_RECOVERABLE_MARKER; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertTrue; @@ -909,6 +910,12 @@ public Boolean apply(@Nullable CallerArguments args) { InjectableBehaviourObjectStore.resetAlterTableModifier(); } + Path baseDumpDir = new Path(primary.hiveConf.getVar(HiveConf.ConfVars.REPLDIR)); + Path nonRecoverablePath = getNonRecoverablePath(baseDumpDir, primaryDbName); + if(nonRecoverablePath != null){ + baseDumpDir.getFileSystem(primary.hiveConf).delete(nonRecoverablePath, true); + } + replica.load(replicatedDbName, primaryDbName) .run("use " + replicatedDbName) .run("show tables like 't1'") @@ -1324,5 +1331,18 @@ private String relativeExtInfoPath(String dbName) { return File.separator + dbName.toLowerCase() + File.separator + FILE_NAME; } } - + + private Path getNonRecoverablePath(Path dumpDir, String dbName) throws IOException { + Path dumpPath = new Path(dumpDir, + Base64.getEncoder().encodeToString(dbName.toLowerCase() + .getBytes(StandardCharsets.UTF_8.name()))); + FileSystem fs = dumpPath.getFileSystem(conf); + if (fs.exists(dumpPath)) { + FileStatus[] statuses = fs.listStatus(dumpPath); + if (statuses.length > 0) { + return new Path(statuses[0].getPath(), NON_RECOVERABLE_MARKER.toString()); + } + } + return null; + } } diff --git a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestStatsReplicationScenarios.java b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestStatsReplicationScenarios.java index 7e76215166..e5074bf1c8 100644 --- a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestStatsReplicationScenarios.java +++ b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestStatsReplicationScenarios.java @@ -33,6 +33,10 @@ import org.apache.hadoop.hive.metastore.InjectableBehaviourObjectStore.BehaviourInjection; import org.apache.hadoop.hive.metastore.InjectableBehaviourObjectStore.CallerArguments; import org.apache.hadoop.hive.shims.Utils; +import org.apache.hadoop.fs.FileStatus; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; + import org.junit.After; import org.junit.AfterClass; import org.junit.Before; @@ -44,6 +48,8 @@ import org.slf4j.LoggerFactory; import org.junit.Assert; +import java.nio.charset.StandardCharsets; +import java.util.Base64; import java.io.IOException; import java.util.Arrays; import java.util.ArrayList; @@ -54,6 +60,7 @@ import javax.annotation.Nullable; import static org.apache.hadoop.hive.metastore.ReplChangeManager.SOURCE_OF_REPLICATION; +import static org.apache.hadoop.hive.ql.exec.repl.ReplAck.NON_RECOVERABLE_MARKER; /** * Tests for statistics replication. @@ -342,7 +349,13 @@ private String dumpLoadVerify(List tableNames, String lastReplicationId, failIncrementalLoad(); } } - + + Path baseDumpDir = new Path(primary.hiveConf.getVar(HiveConf.ConfVars.REPLDIR)); + Path nonRecoverablePath = getNonRecoverablePath(baseDumpDir, primaryDbName); + if(nonRecoverablePath != null){ + baseDumpDir.getFileSystem(primary.hiveConf).delete(nonRecoverablePath, true); + } + // Load, possibly a retry replica.load(replicatedDbName, primaryDbName); @@ -665,6 +678,20 @@ private void testStatsReplicationCommon(boolean parallelBootstrap, boolean metad lastReplicationId = dumpLoadVerify(tableNames, lastReplicationId, parallelBootstrap, metadataOnly, false); } + + private Path getNonRecoverablePath(Path dumpDir, String dbName) throws IOException { + Path dumpPath = new Path(dumpDir, + Base64.getEncoder().encodeToString(dbName.toLowerCase() + .getBytes(StandardCharsets.UTF_8.name()))); + FileSystem fs = dumpPath.getFileSystem(conf); + if (fs.exists(dumpPath)) { + FileStatus[] statuses = fs.listStatus(dumpPath); + if (statuses.length > 0) { + return new Path(statuses[0].getPath(), NON_RECOVERABLE_MARKER.toString()); + } + } + return null; + } @Test public void testNonParallelBootstrapLoad() throws Throwable { diff --git a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/security/TestMetastoreAuthorizationProvider.java b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/security/TestMetastoreAuthorizationProvider.java index 213048624f..ef9ea73592 100644 --- a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/security/TestMetastoreAuthorizationProvider.java +++ b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/security/TestMetastoreAuthorizationProvider.java @@ -189,7 +189,7 @@ public void testSimplePrivileges() throws Exception { driver.run(String.format("create table %s (a string) partitioned by (b string)", tblName)); assert false; } catch (CommandProcessorException e) { - assertEquals(1, e.getResponseCode()); + assertEquals(40000, e.getResponseCode()); } // Even if table location is specified table creation should fail @@ -202,7 +202,7 @@ public void testSimplePrivileges() throws Exception { driver.run(String.format( "create table %s (a string) partitioned by (b string) location '" +tblLocation + "'", tblNameLoc)); } catch (CommandProcessorException e) { - assertEquals(1, e.getResponseCode()); + assertEquals(40000, e.getResponseCode()); } } @@ -265,7 +265,7 @@ public void testSimplePrivileges() throws Exception { try { driver.run(String.format("create table %s (a string) partitioned by (b string)", tblName+"mal")); } catch (CommandProcessorException e) { - assertEquals(1, e.getResponseCode()); + assertEquals(40000, e.getResponseCode()); } ttbl.setTableName(tblName+"mal"); @@ -282,7 +282,7 @@ public void testSimplePrivileges() throws Exception { try { driver.run("alter table "+tblName+" add partition (b='2011')"); } catch (CommandProcessorException e) { - assertEquals(1, e.getResponseCode()); + assertEquals(40000, e.getResponseCode()); } List ptnVals = new ArrayList(); @@ -341,7 +341,7 @@ public void testSimplePrivileges() throws Exception { try { driver.run("drop table "+tbl.getTableName()); } catch (CommandProcessorException e) { - assertEquals(1, e.getResponseCode()); + assertEquals(40000, e.getResponseCode()); } } diff --git a/itests/hive-unit/src/test/java/org/apache/hive/jdbc/TestJdbcDriver2.java b/itests/hive-unit/src/test/java/org/apache/hive/jdbc/TestJdbcDriver2.java index f4e7c4f57a..14eaf60238 100644 --- a/itests/hive-unit/src/test/java/org/apache/hive/jdbc/TestJdbcDriver2.java +++ b/itests/hive-unit/src/test/java/org/apache/hive/jdbc/TestJdbcDriver2.java @@ -1117,8 +1117,8 @@ public void testErrorMessages() throws SQLException { // codes and messages. This should be fixed. doTestErrorCase( "create table " + tableName + " (key int, value string)", - "FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask", - "08S01", 1); + "FAILED: Execution Error, return code 40000 from org.apache.hadoop.hive.ql.ddl.DDLTask", + "08S01", 40000); } private void doTestErrorCase(String sql, String expectedMessage, diff --git a/kudu-handler/src/test/results/negative/kudu_config.q.out b/kudu-handler/src/test/results/negative/kudu_config.q.out index b0071e7cd6..c7525e96fb 100644 --- a/kudu-handler/src/test/results/negative/kudu_config.q.out +++ b/kudu-handler/src/test/results/negative/kudu_config.q.out @@ -4,4 +4,4 @@ TBLPROPERTIES ("kudu.table_name" = "default.kudu_kv") PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@kv_table -FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask. java.lang.RuntimeException: MetaException(message:org.apache.hadoop.hive.serde2.SerDeException java.io.IOException: Kudu master addresses are not specified in the table property (kudu.master_addresses), or default configuration (hive.kudu.master.addresses.default).) +FAILED: Execution Error, return code 40000 from org.apache.hadoop.hive.ql.ddl.DDLTask. java.lang.RuntimeException: MetaException(message:org.apache.hadoop.hive.serde2.SerDeException java.io.IOException: Kudu master addresses are not specified in the table property (kudu.master_addresses), or default configuration (hive.kudu.master.addresses.default).) diff --git a/ql/src/java/org/apache/hadoop/hive/ql/ddl/DDLTask.java b/ql/src/java/org/apache/hadoop/hive/ql/ddl/DDLTask.java index 3e4496471f..04b9451f3e 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/ddl/DDLTask.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/ddl/DDLTask.java @@ -27,6 +27,7 @@ import java.util.Set; import org.apache.hadoop.hive.ql.exec.Task; +import org.apache.hadoop.hive.ql.exec.repl.util.ReplUtils; import org.apache.hadoop.hive.ql.parse.ExplainConfiguration.AnalyzeState; import org.apache.hadoop.hive.ql.plan.api.StageType; import org.reflections.Reflections; @@ -67,6 +68,7 @@ public int execute() { return 0; } + DDLOperation ddlOperation = null; try { DDLDesc ddlDesc = work.getDDLDesc(); @@ -76,14 +78,18 @@ public int execute() { Class ddlOpertaionClass = DESC_TO_OPARATION.get(ddlDesc.getClass()); Constructor constructor = ddlOpertaionClass.getConstructor(DDLOperationContext.class, ddlDesc.getClass()); - DDLOperation ddlOperation = constructor.newInstance(ddlOperationContext, ddlDesc); + ddlOperation = constructor.newInstance(ddlOperationContext, ddlDesc); return ddlOperation.execute(); } else { throw new IllegalArgumentException("Unknown DDL request: " + ddlDesc.getClass()); } } catch (Throwable e) { failed(e); - return 1; + if(ddlOperation != null) { + LOG.error("DDLTask failed, DDL Operation: " + ddlOperation.getClass().toString(), e); + } + return ReplUtils.handleException(work.isReplication(), e, work.getDumpDirectory(), + work.getMetricCollector(), getName(), conf); } } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/ddl/DDLWork.java b/ql/src/java/org/apache/hadoop/hive/ql/ddl/DDLWork.java index 6eea86b8bf..d590e8d5ce 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/ddl/DDLWork.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/ddl/DDLWork.java @@ -19,6 +19,7 @@ import org.apache.hadoop.hive.ql.hooks.ReadEntity; import org.apache.hadoop.hive.ql.hooks.WriteEntity; +import org.apache.hadoop.hive.ql.parse.repl.metric.ReplicationMetricCollector; import org.apache.hadoop.hive.ql.plan.Explain; import org.apache.hadoop.hive.ql.plan.Explain.Level; @@ -39,6 +40,19 @@ private Set inputs; /** List of WriteEntities that are passed to the hooks. */ private Set outputs; + private boolean isReplication; + private String dumpDirectory; + private transient ReplicationMetricCollector metricCollector; + + public DDLWork(Set inputs, Set outputs, DDLDesc ddlDesc, boolean isReplication, + String dumpDirectory, ReplicationMetricCollector metricCollector) { + this.inputs = inputs; + this.outputs = outputs; + this.ddlDesc = ddlDesc; + this.isReplication = isReplication; + this.dumpDirectory = dumpDirectory; + this.metricCollector = metricCollector; + } public DDLWork(Set inputs, Set outputs, DDLDesc ddlDesc) { this.inputs = inputs; @@ -54,6 +68,18 @@ public DDLWork(Set inputs, Set outputs, DDLDesc ddlDesc return outputs; } + public ReplicationMetricCollector getMetricCollector() { + return metricCollector; + } + + public String getDumpDirectory() { + return dumpDirectory; + } + + public boolean isReplication() { + return isReplication; + } + public boolean getNeedLock() { return needLock; } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/ColumnStatsUpdateTask.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/ColumnStatsUpdateTask.java index 7844dd6b37..d8492a16f1 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/ColumnStatsUpdateTask.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/ColumnStatsUpdateTask.java @@ -355,8 +355,9 @@ public int execute() { } catch (Exception e) { setException(e); LOG.info("Failed to persist stats in metastore", e); + return ReplUtils.handleException(work.isReplication(), e, work.getDumpDirectory(), work.getMetricCollector(), + getName(), conf); } - return 1; } @Override diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/CopyTask.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/CopyTask.java index 5ffc110c42..5b101df4ef 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/CopyTask.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/CopyTask.java @@ -25,6 +25,7 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.PathFilter; import org.apache.hadoop.hive.common.FileUtils; +import org.apache.hadoop.hive.ql.exec.repl.util.ReplUtils; import org.apache.hadoop.hive.ql.plan.CopyWork; import org.apache.hadoop.hive.ql.plan.api.StageType; import org.apache.hadoop.util.StringUtils; @@ -103,7 +104,10 @@ protected int copyOnePath(Path fromPath, Path toPath) { } catch (Exception e) { console.printError("Failed with exception " + e.getMessage(), "\n" + StringUtils.stringifyException(e)); - return (1); + LOG.error("CopyTask failed", e); + setException(e); + return ReplUtils.handleException(work.isReplication(), e, work.getDumpDirectory(), work.getMetricCollector(), + getName(), conf); } } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/MoveTask.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/MoveTask.java index 529050123f..31f634e4ad 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/MoveTask.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/MoveTask.java @@ -33,6 +33,7 @@ import org.apache.hadoop.hive.ql.ddl.DDLUtils; import org.apache.hadoop.hive.ql.exec.mr.MapRedTask; import org.apache.hadoop.hive.ql.exec.mr.MapredLocalTask; +import org.apache.hadoop.hive.ql.exec.repl.util.ReplUtils; import org.apache.hadoop.hive.ql.hooks.LineageInfo.DataContainer; import org.apache.hadoop.hive.ql.hooks.WriteEntity; import org.apache.hadoop.hive.ql.io.AcidUtils; @@ -464,14 +465,17 @@ public int execute() { console.printInfo("\n", StringUtils.stringifyException(he),false); } } - setException(he); + errorCode = ReplUtils.handleException(work.isReplication(), he, work.getDumpDirectory(), + work.getMetricCollector(), getName(), conf); return errorCode; } catch (Exception e) { console.printError("Failed with exception " + e.getMessage(), "\n" + StringUtils.stringifyException(e)); setException(e); - return (1); + LOG.error("MoveTask failed", e); + return ReplUtils.handleException(work.isReplication(), e, work.getDumpDirectory(), work.getMetricCollector(), + getName(), conf); } } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/ReplCopyTask.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/ReplCopyTask.java index 1f40dd02e1..3b07b739a2 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/ReplCopyTask.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/ReplCopyTask.java @@ -20,11 +20,11 @@ import org.apache.hadoop.hive.metastore.ReplChangeManager; import org.apache.hadoop.hive.metastore.api.MetaException; -import org.apache.hadoop.hive.ql.ErrorMsg; import org.apache.hadoop.hive.ql.exec.repl.util.ReplUtils; import org.apache.hadoop.hive.ql.io.AcidUtils; import org.apache.hadoop.hive.ql.parse.EximUtil; import org.apache.hadoop.hive.ql.parse.ReplicationSpec; +import org.apache.hadoop.hive.ql.parse.repl.metric.ReplicationMetricCollector; import org.apache.hadoop.hive.ql.plan.CopyWork; import org.apache.hadoop.hive.ql.plan.ReplCopyWork; import org.apache.hadoop.hive.ql.parse.repl.CopyUtils; @@ -166,7 +166,8 @@ public int execute() { } catch (Exception e) { LOG.error(StringUtils.stringifyException(e)); setException(e); - return ErrorMsg.getErrorMsg(e.getMessage()).getErrorCode(); + return ReplUtils.handleException(true, e, work.getDumpDirectory(), work.getMetricCollector(), + getName(), conf); } } @@ -230,6 +231,14 @@ public String getName() { readSourceAsFileList, false); } + public static Task getLoadCopyTask(ReplicationSpec replicationSpec, Path srcPath, Path dstPath, + HiveConf conf, boolean isAutoPurge, boolean needRecycle, + boolean readSourceAsFileList, String dumpDirectory, + ReplicationMetricCollector metricCollector) { + return getLoadCopyTask(replicationSpec, srcPath, dstPath, conf, isAutoPurge, needRecycle, + readSourceAsFileList, false, dumpDirectory, metricCollector); + } + private static Task getLoadCopyTask(ReplicationSpec replicationSpec, Path srcPath, Path dstPath, HiveConf conf, boolean isAutoPurge, boolean needRecycle, boolean readSourceAsFileList, @@ -259,12 +268,52 @@ public String getName() { return copyTask; } + private static Task getLoadCopyTask(ReplicationSpec replicationSpec, Path srcPath, Path dstPath, + HiveConf conf, boolean isAutoPurge, boolean needRecycle, + boolean readSourceAsFileList, + boolean overWrite, + String dumpDirectory, + ReplicationMetricCollector metricCollector) { + Task copyTask = null; + LOG.debug("ReplCopyTask:getLoadCopyTask: {}=>{}", srcPath, dstPath); + if ((replicationSpec != null) && replicationSpec.isInReplicationScope()){ + ReplCopyWork rcwork = new ReplCopyWork(srcPath, dstPath, false, overWrite, dumpDirectory, + metricCollector); + rcwork.setReadSrcAsFilesList(readSourceAsFileList); + if (replicationSpec.isReplace() && (conf.getBoolVar(REPL_ENABLE_MOVE_OPTIMIZATION))) { + rcwork.setDeleteDestIfExist(true); + rcwork.setAutoPurge(isAutoPurge); + rcwork.setNeedRecycle(needRecycle); + } + // For replace case, duplicate check should not be done. The new base directory will automatically make the older + // data invisible. Doing duplicate check and ignoring copy will cause consistency issue if there are multiple + // replace events getting replayed in the first incremental load. + rcwork.setCheckDuplicateCopy(replicationSpec.needDupCopyCheck() && !replicationSpec.isReplace()); + LOG.debug("ReplCopyTask:\trcwork"); + String distCpDoAsUser = conf.getVar(HiveConf.ConfVars.HIVE_DISTCP_DOAS_USER); + rcwork.setDistCpDoAsUser(distCpDoAsUser); + copyTask = TaskFactory.get(rcwork, conf); + } else { + LOG.debug("ReplCopyTask:\tcwork"); + copyTask = TaskFactory.get(new CopyWork(srcPath, dstPath, false, dumpDirectory, metricCollector, true), conf); + } + return copyTask; + } + + public static Task getLoadCopyTask(ReplicationSpec replicationSpec, Path srcPath, Path dstPath, HiveConf conf) { return getLoadCopyTask(replicationSpec, srcPath, dstPath, conf, false, false, true, false); } + public static Task getLoadCopyTask(ReplicationSpec replicationSpec, Path srcPath, Path dstPath, + HiveConf conf, String dumpDirectory, ReplicationMetricCollector metricCollector) { + return getLoadCopyTask(replicationSpec, srcPath, dstPath, conf, false, false, + true, false, dumpDirectory, metricCollector); + } + + /* * Invoked in the bootstrap path. * Overwrite set to true @@ -274,4 +323,12 @@ public String getName() { return getLoadCopyTask(replicationSpec, srcPath, dstPath, conf, false, false, readSourceAsFileList, overWrite); } + + public static Task getLoadCopyTask(ReplicationSpec replicationSpec, Path srcPath, Path dstPath, + HiveConf conf, boolean readSourceAsFileList, boolean overWrite, + String dumpDirectory, ReplicationMetricCollector metricCollector) { + return getLoadCopyTask(replicationSpec, srcPath, dstPath, conf, false, false, + readSourceAsFileList, overWrite, dumpDirectory, metricCollector); + } + } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/ReplTxnTask.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/ReplTxnTask.java index 48721d394f..c9ad302984 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/ReplTxnTask.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/ReplTxnTask.java @@ -132,7 +132,9 @@ public int execute() { console.printError("Failed with exception " + e.getMessage(), "\n" + StringUtils.stringifyException(e)); setException(e); - return 1; + LOG.error("ReplTxnTask failed", e); + return ReplUtils.handleException(true, e, work.getDumpDirectory(), work.getMetricCollector(), + getName(), conf); } } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/AckTask.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/AckTask.java index 4dba12ccd2..fdc44a3a98 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/AckTask.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/AckTask.java @@ -21,6 +21,7 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.hive.ql.ErrorMsg; import org.apache.hadoop.hive.ql.exec.Task; +import org.apache.hadoop.hive.ql.exec.repl.util.ReplUtils; import org.apache.hadoop.hive.ql.parse.SemanticException; import org.apache.hadoop.hive.ql.parse.repl.dump.Utils; import org.apache.hadoop.hive.ql.plan.api.StageType; @@ -45,9 +46,10 @@ public int execute() { Path ackPath = work.getAckFilePath(); Utils.create(ackPath, conf); LOG.info("Created ack file : {} ", ackPath); - } catch (SemanticException e) { + } catch (Exception e) { setException(e); - return ErrorMsg.getErrorMsg(e.getMessage()).getErrorCode(); + return ReplUtils.handleException(true, e, work.getAckFilePath().getParent().getParent().toString(), + work.getMetricCollector(), getName(), conf); } return 0; } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/AckWork.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/AckWork.java index 0fa0a951c3..8f9a23701e 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/AckWork.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/AckWork.java @@ -19,6 +19,7 @@ package org.apache.hadoop.hive.ql.exec.repl; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hive.ql.parse.repl.metric.ReplicationMetricCollector; import org.apache.hadoop.hive.ql.plan.Explain; import org.apache.hadoop.hive.ql.plan.Explain.Level; @@ -33,13 +34,22 @@ public class AckWork implements Serializable { private static final long serialVersionUID = 1L; private Path ackFilePath; + private transient ReplicationMetricCollector metricCollector; public Path getAckFilePath() { return ackFilePath; } + public ReplicationMetricCollector getMetricCollector() { + return metricCollector; + } + public AckWork(Path ackFilePath) { this.ackFilePath = ackFilePath; } + public AckWork(Path ackFilePath, ReplicationMetricCollector metricCollector) { + this.ackFilePath = ackFilePath; + this.metricCollector = metricCollector; + } } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/AtlasDumpTask.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/AtlasDumpTask.java index 45d67c61ba..030cf6082f 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/AtlasDumpTask.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/AtlasDumpTask.java @@ -40,6 +40,7 @@ import org.apache.hadoop.hive.ql.parse.repl.dump.log.AtlasDumpLogger; import org.apache.hadoop.hive.ql.parse.repl.metric.event.Status; import org.apache.hadoop.hive.ql.plan.api.StageType; +import org.apache.hadoop.yarn.webapp.hamlet2.Hamlet; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -104,24 +105,27 @@ public int execute() { replLogger.endLog(0L); work.getMetricCollector().reportStageEnd(getName(), Status.SUCCESS); return 0; + } catch (RuntimeException e) { + LOG.error("RuntimeException while dumping atlas metadata", e); + setException(e); + try{ + ReplUtils.handleException(true, e, work.getStagingDir().getParent().toString(), work.getMetricCollector(), + getName(), conf); + } catch (Exception ex){ + LOG.error("Failed to collect replication metrics: ", ex); + } + throw e; } catch (Exception e) { LOG.error("Exception while dumping atlas metadata", e); setException(e); int errorCode = ErrorMsg.getErrorMsg(e.getMessage()).getErrorCode(); - try { - if (errorCode > 40000) { - //Create non recoverable marker at top level - Path nonRecoverableMarker = new Path(work.getStagingDir().getParent(), - ReplAck.NON_RECOVERABLE_MARKER.toString()); - Utils.writeStackTrace(e, nonRecoverableMarker, conf); - work.getMetricCollector().reportStageEnd(getName(), Status.FAILED_ADMIN, nonRecoverableMarker.toString()); - } else { - work.getMetricCollector().reportStageEnd(getName(), Status.FAILED); - } + try{ + return ReplUtils.handleException(true, e, work.getStagingDir().getParent().toString(), work.getMetricCollector(), + getName(), conf); } catch (Exception ex) { - LOG.error("Failed to collect Metrics ", ex); + LOG.error("Failed to collect replication metrics: ", ex); + return errorCode; } - return errorCode; } } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/AtlasLoadTask.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/AtlasLoadTask.java index 3a658473a8..a44aa435aa 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/AtlasLoadTask.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/AtlasLoadTask.java @@ -88,24 +88,28 @@ public int execute() { LOG.info("Atlas entities import count {}", importCount); work.getMetricCollector().reportStageEnd(getName(), Status.SUCCESS); return 0; + } catch (RuntimeException e) { + LOG.error("RuntimeException while loading atlas metadata", e); + setException(e); + try{ + ReplUtils.handleException(true, e, work.getStagingDir().getParent().toString(), work.getMetricCollector(), + getName(), conf); + } catch (Exception ex){ + LOG.error("Failed to collect replication metrics: ", ex); + } + throw e; } catch (Exception e) { LOG.error("Exception while loading atlas metadata", e); setException(e); int errorCode = ErrorMsg.getErrorMsg(e.getMessage()).getErrorCode(); - try { - if (errorCode > 40000) { - //Create non recoverable marker at top level - Path nonRecoverableMarker = new Path(work.getStagingDir().getParent(), - ReplAck.NON_RECOVERABLE_MARKER.toString()); - Utils.writeStackTrace(e, nonRecoverableMarker, conf); - work.getMetricCollector().reportStageEnd(getName(), Status.FAILED_ADMIN, nonRecoverableMarker.toString()); - } else { - work.getMetricCollector().reportStageEnd(getName(), Status.FAILED); - } - } catch (Exception ex) { - LOG.error("Failed to collect Metrics ", ex); + try{ + return ReplUtils.handleException(true, e, work.getStagingDir().getParent().toString(), work.getMetricCollector(), + getName(), conf); + } + catch (Exception ex){ + LOG.error("Failed to collect replication metrics: ", ex); + return errorCode; } - return errorCode; } } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/DirCopyTask.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/DirCopyTask.java index 5ed09f8781..43d45a95b2 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/DirCopyTask.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/DirCopyTask.java @@ -22,6 +22,7 @@ import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.ql.ErrorMsg; import org.apache.hadoop.hive.ql.exec.Task; +import org.apache.hadoop.hive.ql.exec.repl.util.ReplUtils; import org.apache.hadoop.hive.ql.exec.util.Retryable; import org.apache.hadoop.hive.ql.parse.repl.CopyUtils; import org.apache.hadoop.hive.ql.plan.api.StageType; @@ -140,7 +141,10 @@ public int execute() { } }); } catch (Exception e) { - throw new SecurityException(ErrorMsg.REPL_RETRY_EXHAUSTED.format(e.getMessage()), e); + LOG.error("Replication failed ", e); + Exception ex = new SecurityException(ErrorMsg.REPL_RETRY_EXHAUSTED.format(e.getMessage()), e); + setException(ex); + return ReplUtils.handleException(true, ex, work.getDumpDirectory(), work.getMetricCollector(), getName(), conf); } } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/DirCopyWork.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/DirCopyWork.java index 04bbd564be..c232dfa0b0 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/DirCopyWork.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/DirCopyWork.java @@ -19,6 +19,7 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.hive.ql.exec.repl.util.StringConvertibleObject; +import org.apache.hadoop.hive.ql.parse.repl.metric.ReplicationMetricCollector; import org.apache.hadoop.hive.ql.plan.Explain; import java.io.Serializable; @@ -33,8 +34,12 @@ private static final long serialVersionUID = 1L; private Path fullyQualifiedSourcePath; private Path fullyQualifiedTargetPath; + private String dumpDirectory; + private transient ReplicationMetricCollector metricCollector; - public DirCopyWork() { + public DirCopyWork(ReplicationMetricCollector metricCollector, String dumpDirectory) { + this.metricCollector = metricCollector; + this.dumpDirectory = dumpDirectory; } public DirCopyWork(Path fullyQualifiedSourcePath, Path fullyQualifiedTargetPath) { @@ -57,6 +62,14 @@ public Path getFullyQualifiedTargetPath() { return fullyQualifiedTargetPath; } + public ReplicationMetricCollector getMetricCollector() { + return metricCollector; + } + + public String getDumpDirectory() { + return dumpDirectory; + } + @Override public String convertToString() { StringBuilder objInStr = new StringBuilder(); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/RangerDumpTask.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/RangerDumpTask.java index eb2af1d268..e7b403b8db 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/RangerDumpTask.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/RangerDumpTask.java @@ -134,24 +134,27 @@ public int execute() { LOG.debug("Ranger policy export filePath:" + filePath); LOG.info("Number of ranger policies exported {}", exportCount); return 0; + } catch (RuntimeException e) { + LOG.error("RuntimeException during Ranger dump", e); + setException(e); + try{ + ReplUtils.handleException(true, e, work.getCurrentDumpPath().getParent().toString(), + work.getMetricCollector(), getName(), conf); + } catch (Exception ex){ + LOG.error("Failed to collect replication metrics: ", ex); + } + throw e; } catch (Exception e) { - LOG.error("failed", e); + LOG.error("Ranger Dump Failed: ", e); setException(e); int errorCode = ErrorMsg.getErrorMsg(e.getMessage()).getErrorCode(); - try { - if (errorCode > 40000) { - //Create non recoverable marker at top level - Path nonRecoverableMarker = new Path(work.getCurrentDumpPath().getParent(), - ReplAck.NON_RECOVERABLE_MARKER.toString()); - Utils.writeStackTrace(e, nonRecoverableMarker, conf); - work.getMetricCollector().reportStageEnd(getName(), Status.FAILED_ADMIN, nonRecoverableMarker.toString()); - } else { - work.getMetricCollector().reportStageEnd(getName(), Status.FAILED); - } - } catch (Exception ex) { - LOG.error("Failed to collect Metrics ", ex); + try{ + return ReplUtils.handleException(true, e, work.getCurrentDumpPath().getParent().toString(), + work.getMetricCollector(), getName(), conf); + } catch (Exception ex){ + LOG.error("Failed to collect replication metrics: ", ex); + return errorCode; } - return errorCode; } } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/RangerLoadTask.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/RangerLoadTask.java index 0049f76847..63fad4b94d 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/RangerLoadTask.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/RangerLoadTask.java @@ -152,24 +152,27 @@ public int execute() { } work.getMetricCollector().reportStageEnd(getName(), Status.SUCCESS); return 0; - } catch (Exception e) { - LOG.error("Failed", e); + } catch (RuntimeException e) { + LOG.error("Runtime Excepton during RangerLoad", e); setException(e); + try{ + ReplUtils.handleException(true, e, work.getCurrentDumpPath().getParent().toString(), work.getMetricCollector(), + getName(), conf); + } catch (Exception ex){ + LOG.error("Failed to collect replication metrics: ", ex); + } + throw e; + } catch (Exception e) { + LOG.error("RangerLoad Failed", e); int errorCode = ErrorMsg.getErrorMsg(e.getMessage()).getErrorCode(); - try { - if (errorCode > 40000) { - //Create non recoverable marker at top level - Path nonRecoverableMarker = new Path(work.getCurrentDumpPath().getParent(), - ReplAck.NON_RECOVERABLE_MARKER.toString()); - Utils.writeStackTrace(e, nonRecoverableMarker, conf); - work.getMetricCollector().reportStageEnd(getName(), Status.FAILED_ADMIN, nonRecoverableMarker.toString()); - } else { - work.getMetricCollector().reportStageEnd(getName(), Status.FAILED); - } - } catch (Exception ex) { - LOG.error("Failed to collect Metrics ", ex); + setException(e); + try{ + return ReplUtils.handleException(true, e, work.getCurrentDumpPath().getParent().toString(), work.getMetricCollector(), + getName(), conf); + } catch (Exception ex){ + LOG.error("Failed to collect replication metrics: ", ex); + return errorCode; } - return errorCode; } } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/ReplDumpTask.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/ReplDumpTask.java index 4630f95fb9..ea9bf9af78 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/ReplDumpTask.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/ReplDumpTask.java @@ -54,6 +54,7 @@ import org.apache.hadoop.hive.ql.exec.util.DAGTraversal; import org.apache.hadoop.hive.ql.exec.util.Retryable; import org.apache.hadoop.hive.ql.io.AcidUtils; +import org.apache.hadoop.hive.ql.io.orc.ExternalCache; import org.apache.hadoop.hive.ql.lockmgr.DbLockManager; import org.apache.hadoop.hive.ql.lockmgr.HiveLockManager; import org.apache.hadoop.hive.ql.lockmgr.LockException; @@ -193,23 +194,27 @@ public int execute() { LOG.info("Previous Dump is not yet loaded"); } } + } catch (RuntimeException e) { + LOG.error("replication failed with run time exception", e); + setException(e); + try{ + ReplUtils.handleException(true, e, work.getCurrentDumpPath().toString(), + work.getMetricCollector(), getName(), conf); + } catch (Exception ex){ + LOG.error("Failed to collect replication metrics: ", ex); + } + throw e; } catch (Exception e) { - LOG.error("failed", e); setException(e); int errorCode = ErrorMsg.getErrorMsg(e.getMessage()).getErrorCode(); - try { - if (errorCode > 40000) { - Path nonRecoverableMarker = new Path(work.getCurrentDumpPath(), - ReplAck.NON_RECOVERABLE_MARKER.toString()); - Utils.writeStackTrace(e, nonRecoverableMarker, conf); - work.getMetricCollector().reportStageEnd(getName(), Status.FAILED_ADMIN, nonRecoverableMarker.toString()); - } else { - work.getMetricCollector().reportStageEnd(getName(), Status.FAILED); - } - } catch (Exception ex) { - LOG.error("Failed to collect Metrics", ex); + try{ + return ReplUtils.handleException(true, e, work.getCurrentDumpPath().toString(), + work.getMetricCollector(), getName(), conf); + } + catch (Exception ex){ + LOG.error("Failed to collect replication metrics: ", ex); + return errorCode; } - return errorCode; } return 0; } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/ReplDumpWork.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/ReplDumpWork.java index 64b9dd3d3a..4da5bacab3 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/ReplDumpWork.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/ReplDumpWork.java @@ -183,7 +183,7 @@ public void setResultValues(List resultValues) { } List> tasks = new ArrayList<>(); while (externalTblCopyPathIterator.hasNext() && tracker.canAddMoreTasks()) { - DirCopyWork dirCopyWork = new DirCopyWork(); + DirCopyWork dirCopyWork = new DirCopyWork(metricCollector, currentDumpPath.toString()); dirCopyWork.loadFromString(externalTblCopyPathIterator.next()); Task task = TaskFactory.get(dirCopyWork, conf); tasks.add(task); @@ -206,7 +206,8 @@ public void setResultValues(List resultValues) { managedTableCopyPath.loadFromString(managedTblCopyPathIterator.next()); Task copyTask = ReplCopyTask.getLoadCopyTask( managedTableCopyPath.getReplicationSpec(), managedTableCopyPath.getSrcPath(), - managedTableCopyPath.getTargetPath(), conf, false, shouldOverwrite); + managedTableCopyPath.getTargetPath(), conf, false, shouldOverwrite, + getCurrentDumpPath().toString(), getMetricCollector()); tasks.add(copyTask); tracker.addTask(copyTask); LOG.debug("added task for {}", managedTableCopyPath); @@ -220,7 +221,8 @@ public void setResultValues(List resultValues) { while (functionCopyPathIterator.hasNext() && tracker.canAddMoreTasks()) { EximUtil.DataCopyPath binaryCopyPath = functionCopyPathIterator.next(); Task copyTask = ReplCopyTask.getLoadCopyTask( - binaryCopyPath.getReplicationSpec(), binaryCopyPath.getSrcPath(), binaryCopyPath.getTargetPath(), conf + binaryCopyPath.getReplicationSpec(), binaryCopyPath.getSrcPath(), binaryCopyPath.getTargetPath(), conf, + getCurrentDumpPath().toString(), getMetricCollector() ); tasks.add(copyTask); tracker.addTask(copyTask); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/ReplLoadTask.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/ReplLoadTask.java index 61b3652829..57a77b56fa 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/ReplLoadTask.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/ReplLoadTask.java @@ -64,6 +64,7 @@ import org.apache.hadoop.hive.ql.parse.repl.ReplLogger; import org.apache.hadoop.hive.ql.parse.repl.dump.Utils; import org.apache.hadoop.hive.ql.parse.repl.load.MetaData; +import org.apache.hadoop.hive.ql.parse.repl.metric.ReplicationMetricCollector; import org.apache.hadoop.hive.ql.parse.repl.metric.event.Status; import org.apache.hadoop.hive.ql.plan.api.StageType; @@ -84,6 +85,7 @@ public class ReplLoadTask extends Task implements Serializable { private static final long serialVersionUID = 1L; private final static int ZERO_TASKS = 0; + private final String STAGE_NAME = "REPL_LOAD"; @Override public String getName() { @@ -128,29 +130,25 @@ public int execute() { } } catch (RuntimeException e) { LOG.error("replication failed with run time exception", e); + setException(e); try { - work.getMetricCollector().reportEnd(Status.FAILED); - } catch (SemanticException ex) { - LOG.error("Failed to collect Metrics ", ex); + ReplUtils.handleException(true, e, new Path(work.getDumpDirectory()).getParent().toString(), + work.getMetricCollector(), STAGE_NAME, conf); + } catch (Exception ex){ + LOG.error("Failed to collect replication metrics: ", ex); } throw e; } catch (Exception e) { - LOG.error("replication failed", e); setException(e); int errorCode = ErrorMsg.getErrorMsg(e.getMessage()).getErrorCode(); try { - if (errorCode > 40000) { - Path nonRecoverableMarker = new Path(new Path(work.dumpDirectory).getParent(), - ReplAck.NON_RECOVERABLE_MARKER.toString()); - Utils.writeStackTrace(e, nonRecoverableMarker, conf); - work.getMetricCollector().reportStageEnd(getName(), Status.FAILED_ADMIN, nonRecoverableMarker.toString()); - } else { - work.getMetricCollector().reportStageEnd(getName(), Status.FAILED); - } - } catch (Exception ex) { - LOG.error("Failed to collect Metrics ", ex); + return ReplUtils.handleException(true, e, new Path(work.getDumpDirectory()).getParent().toString(), + work.getMetricCollector(), STAGE_NAME, conf); + } + catch (Exception ex) { + LOG.error("Failed to collect replication metrics: ", ex); + return errorCode; } - return errorCode; } } @@ -229,7 +227,8 @@ a database ( directory ) switch (next.eventType()) { case Database: DatabaseEvent dbEvent = (DatabaseEvent) next; - dbTracker = new LoadDatabase(loadContext, dbEvent, work.dbNameToLoadIn, loadTaskTracker).tasks(); + dbTracker = new LoadDatabase(loadContext, dbEvent, work.dbNameToLoadIn, loadTaskTracker, + work.getMetricCollector()).tasks(); loadTaskTracker.update(dbTracker); if (work.hasDbState()) { loadTaskTracker.update(updateDatabaseLastReplID(maxTasks, loadContext, scope)); @@ -256,7 +255,8 @@ a database ( directory ) FSTableEvent tableEvent = (FSTableEvent) next; if (TableType.VIRTUAL_VIEW.name().equals(tableEvent.getMetaData().getTable().getTableType())) { tableTracker = new TaskTracker(1); - tableTracker.addTask(createViewTask(tableEvent.getMetaData(), work.dbNameToLoadIn, conf)); + tableTracker.addTask(createViewTask(tableEvent.getMetaData(), work.dbNameToLoadIn, conf, + (new Path(work.dumpDirectory).getParent()).toString(), work.getMetricCollector())); } else { LoadTable loadTable = new LoadTable(tableEvent, loadContext, iterator.replLogger(), tableContext, loadTaskTracker, work.getMetricCollector()); @@ -387,7 +387,8 @@ private TaskTracker addLoadConstraintsTasks(Context loadContext, TaskTracker dbTracker, Scope scope) throws IOException, SemanticException { LoadConstraint loadConstraint = - new LoadConstraint(loadContext, (ConstraintEvent) next, work.dbNameToLoadIn, dbTracker); + new LoadConstraint(loadContext, (ConstraintEvent) next, work.dbNameToLoadIn, dbTracker, + (new Path(work.dumpDirectory)).getParent().toString(), work.getMetricCollector()); TaskTracker constraintTracker = loadConstraint.tasks(); scope.rootTasks.addAll(constraintTracker.tasks()); constraintTracker.debugLog("constraints"); @@ -397,7 +398,8 @@ private TaskTracker addLoadConstraintsTasks(Context loadContext, private TaskTracker addLoadFunctionTasks(Context loadContext, BootstrapEventsIterator iterator, BootstrapEvent next, TaskTracker dbTracker, Scope scope) throws IOException, SemanticException { LoadFunction loadFunction = new LoadFunction(loadContext, iterator.replLogger(), - (FunctionEvent) next, work.dbNameToLoadIn, dbTracker, work.getMetricCollector()); + (FunctionEvent) next, work.dbNameToLoadIn, dbTracker, (new Path(work.dumpDirectory)).getParent().toString(), + work.getMetricCollector()); TaskTracker functionsTracker = loadFunction.tasks(); if (!scope.database) { scope.rootTasks.addAll(functionsTracker.tasks()); @@ -431,6 +433,31 @@ private TaskTracker addLoadFunctionTasks(Context loadContext, BootstrapEventsIte return TaskFactory.get(new DDLWork(new HashSet<>(), new HashSet<>(), desc), conf); } + public static Task createViewTask(MetaData metaData, String dbNameToLoadIn, HiveConf conf, + String dumpDirectory, ReplicationMetricCollector metricCollector) + throws SemanticException { + Table table = new Table(metaData.getTable()); + String dbName = dbNameToLoadIn == null ? table.getDbName() : dbNameToLoadIn; + TableName tableName = HiveTableName.ofNullable(table.getTableName(), dbName); + String dbDotView = tableName.getNotEmptyDbTable(); + + String viewOriginalText = table.getViewOriginalText(); + String viewExpandedText = table.getViewExpandedText(); + if (!dbName.equals(table.getDbName())) { + // TODO: If the DB name doesn't match with the metadata from dump, then need to rewrite the original and expanded + // texts using new DB name. Currently it refers to the source database name. + } + + CreateViewDesc desc = new CreateViewDesc(dbDotView, table.getAllCols(), null, table.getParameters(), + table.getPartColNames(), false, false, viewOriginalText, viewExpandedText, table.getPartCols()); + + desc.setReplicationSpec(metaData.getReplicationSpec()); + desc.setOwnerName(table.getOwner()); + + return TaskFactory.get(new DDLWork(new HashSet<>(), new HashSet<>(), desc, true, + dumpDirectory, metricCollector), conf); + } + /** * If replication policy is changed between previous and current load, then the excluded tables in * the new replication policy will be dropped. @@ -466,7 +493,7 @@ private void createReplLoadCompleteAckTask() { || (!work.isIncrementalLoad() && !work.hasBootstrapLoadTasks())) { //All repl load tasks are executed and status is 0, create the task to add the acknowledgement AckWork replLoadAckWork = new AckWork( - new Path(work.dumpDirectory, LOAD_ACKNOWLEDGEMENT.toString())); + new Path(work.dumpDirectory, LOAD_ACKNOWLEDGEMENT.toString()), work.getMetricCollector()); Task loadAckWorkTask = TaskFactory.get(replLoadAckWork, conf); if (childTasks.isEmpty()) { childTasks.add(loadAckWorkTask); @@ -487,7 +514,9 @@ private void createEndReplLogTask(Context context, Scope scope, Database dbInMetadata = work.databaseEvent(context.hiveConf).dbInMetadata(work.dbNameToLoadIn); dbProps = dbInMetadata.getParameters(); } - ReplStateLogWork replLogWork = new ReplStateLogWork(replLogger, dbProps, work.getMetricCollector()); + ReplStateLogWork replLogWork = new ReplStateLogWork(replLogger, dbProps, + (new Path(work.dumpDirectory).getParent()).toString(), + work.getMetricCollector()); Task replLogTask = TaskFactory.get(replLogWork, conf); if (scope.rootTasks.isEmpty()) { scope.rootTasks.add(replLogTask); @@ -513,7 +542,7 @@ private TaskTracker updateDatabaseLastReplID(int maxTasks, Context context, Scop */ TaskTracker taskTracker = new AlterDatabase(context, work.databaseEvent(context.hiveConf), work.dbNameToLoadIn, - new TaskTracker(maxTasks)).tasks(); + new TaskTracker(maxTasks), work.getMetricCollector()).tasks(); AddDependencyToLeaves function = new AddDependencyToLeaves(taskTracker.tasks()); DAGTraversal.traverse(scope.rootTasks, function); @@ -603,7 +632,8 @@ private int executeIncrementalLoad() throws Exception { new AlterDatabaseSetPropertiesDesc(dbName, mapProp, new ReplicationSpec(lastEventid, lastEventid)); Task updateReplIdTask = - TaskFactory.get(new DDLWork(new HashSet<>(), new HashSet<>(), alterDbDesc), conf); + TaskFactory.get(new DDLWork(new HashSet<>(), new HashSet<>(), alterDbDesc, true, + (new Path(work.dumpDirectory).getParent()).toString(), work.getMetricCollector()), conf); DAGTraversal.traverse(childTasks, new AddDependencyToLeaves(updateReplIdTask)); work.setLastReplIDUpdated(true); LOG.debug("Added task to set last repl id of db " + dbName + " to " + lastEventid); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/ReplLoadWork.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/ReplLoadWork.java index 4050235233..376fd7caa1 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/ReplLoadWork.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/ReplLoadWork.java @@ -163,6 +163,8 @@ IncrementalLoadTasksBuilder incrementalLoadTasksBuilder() { return rootTask; } + public String getDumpDirectory() {return dumpDirectory;} + public void setRootTask(Task rootTask) { this.rootTask = rootTask; } @@ -193,7 +195,7 @@ public Long getDumpExecutionId() { } List> tasks = new ArrayList<>(); while (externalTableDataCopyItr.hasNext() && tracker.canAddMoreTasks()) { - DirCopyWork dirCopyWork = new DirCopyWork(); + DirCopyWork dirCopyWork = new DirCopyWork(metricCollector, (new Path(dumpDirectory).getParent()).toString()); dirCopyWork.loadFromString(externalTableDataCopyItr.next()); Task task = TaskFactory.get(dirCopyWork, conf); tasks.add(task); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/ReplStateLogTask.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/ReplStateLogTask.java index 240f5a7db6..230f056ff0 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/ReplStateLogTask.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/ReplStateLogTask.java @@ -20,6 +20,7 @@ import org.apache.hadoop.hive.ql.ErrorMsg; import org.apache.hadoop.hive.ql.exec.Task; +import org.apache.hadoop.hive.ql.exec.repl.util.ReplUtils; import org.apache.hadoop.hive.ql.parse.SemanticException; import org.apache.hadoop.hive.ql.plan.api.StageType; @@ -38,10 +39,11 @@ public int execute() { try { work.replStateLog(); - } catch (SemanticException e) { + } catch (Exception e) { LOG.error("Exception while logging metrics ", e); setException(e); - return ErrorMsg.getErrorMsg(e.getMessage()).getErrorCode(); + return ReplUtils.handleException(true, e, work.getDumpDirectory(), work.getMetricCollector(), + getName(), conf); } return 0; } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/ReplStateLogWork.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/ReplStateLogWork.java index 5bd7bdaa5f..ce9b545a6a 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/ReplStateLogWork.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/ReplStateLogWork.java @@ -48,6 +48,7 @@ private TableType tableType; private String functionName; private String lastReplId; + String dumpDirectory; private final transient ReplicationMetricCollector metricCollector; private enum LOG_TYPE { @@ -66,6 +67,16 @@ public ReplStateLogWork(ReplLogger replLogger, ReplicationMetricCollector metric this.metricCollector = metricCollector; } + public ReplStateLogWork(ReplLogger replLogger, ReplicationMetricCollector metricCollector, + String eventId, String eventType, String dumpDirectory) { + this.logType = LOG_TYPE.EVENT; + this.replLogger = replLogger; + this.eventId = eventId; + this.eventType = eventType; + this.metricCollector = metricCollector; + this.dumpDirectory = dumpDirectory; + } + public ReplStateLogWork(ReplLogger replLogger, ReplicationMetricCollector metricCollector, String tableName, TableType tableType) { this.logType = LOG_TYPE.TABLE; @@ -75,6 +86,16 @@ public ReplStateLogWork(ReplLogger replLogger, ReplicationMetricCollector metric this.metricCollector = metricCollector; } + public ReplStateLogWork(ReplLogger replLogger, ReplicationMetricCollector metricCollector, + String tableName, TableType tableType, String dumpDirectory) { + this.logType = LOG_TYPE.TABLE; + this.replLogger = replLogger; + this.tableName = tableName; + this.tableType = tableType; + this.metricCollector = metricCollector; + this.dumpDirectory = dumpDirectory; + } + public ReplStateLogWork(ReplLogger replLogger, String functionName, ReplicationMetricCollector metricCollector) { this.logType = LOG_TYPE.FUNCTION; this.replLogger = replLogger; @@ -82,6 +103,14 @@ public ReplStateLogWork(ReplLogger replLogger, String functionName, ReplicationM this.metricCollector = metricCollector; } + public ReplStateLogWork(ReplLogger replLogger, String functionName, String dumpDirectory, ReplicationMetricCollector metricCollector) { + this.logType = LOG_TYPE.FUNCTION; + this.replLogger = replLogger; + this.functionName = functionName; + this.dumpDirectory = dumpDirectory; + this.metricCollector = metricCollector; + } + public ReplStateLogWork(ReplLogger replLogger, Map dbProps, ReplicationMetricCollector collector) { this.logType = LOG_TYPE.END; this.replLogger = replLogger; @@ -89,6 +118,19 @@ public ReplStateLogWork(ReplLogger replLogger, Map dbProps, Repl this.metricCollector = collector; } + public ReplStateLogWork(ReplLogger replLogger, Map dbProps, String dumpDirectory, ReplicationMetricCollector collector) { + this.logType = LOG_TYPE.END; + this.replLogger = replLogger; + this.lastReplId = ReplicationSpec.getLastReplicatedStateFromParameters(dbProps); + this.dumpDirectory = dumpDirectory; + this.metricCollector = collector; + } + + + public ReplicationMetricCollector getMetricCollector() { return metricCollector; } + + public String getDumpDirectory() { return dumpDirectory; } + public void replStateLog() throws SemanticException { switch (logType) { case TABLE: diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/bootstrap/load/LoadConstraint.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/bootstrap/load/LoadConstraint.java index e95dbb7598..b131b8ef0b 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/bootstrap/load/LoadConstraint.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/bootstrap/load/LoadConstraint.java @@ -52,6 +52,7 @@ import org.apache.hadoop.hive.ql.parse.repl.load.message.AddPrimaryKeyHandler; import org.apache.hadoop.hive.ql.parse.repl.load.message.AddUniqueConstraintHandler; import org.apache.hadoop.hive.ql.parse.repl.load.message.MessageHandler; +import org.apache.hadoop.hive.ql.parse.repl.metric.ReplicationMetricCollector; import org.json.JSONObject; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -71,6 +72,8 @@ private final String dbNameToLoadIn; private final TaskTracker tracker; private final MessageDeserializer deserializer = JSONMessageEncoder.getInstance().getDeserializer(); + String dumpDirectory; + private transient ReplicationMetricCollector metricCollector; public LoadConstraint(Context context, ConstraintEvent event, String dbNameToLoadIn, TaskTracker existingTracker) { @@ -80,6 +83,17 @@ public LoadConstraint(Context context, ConstraintEvent event, String dbNameToLoa this.tracker = new TaskTracker(existingTracker); } + public LoadConstraint(Context context, ConstraintEvent event, String dbNameToLoadIn, + TaskTracker existingTracker, String dumpDirectory, + ReplicationMetricCollector metricCollector) { + this.context = context; + this.event = event; + this.dbNameToLoadIn = dbNameToLoadIn; + this.tracker = new TaskTracker(existingTracker); + this.dumpDirectory = dumpDirectory; + this.metricCollector = metricCollector; + } + public TaskTracker tasks() throws IOException, SemanticException { URI fromURI = EximUtil .getValidatedURI(context.hiveConf, stripQuotes(event.rootDir().toUri().toString())); @@ -104,7 +118,7 @@ public TaskTracker tasks() throws IOException, SemanticException { tasks.addAll(pkHandler.handle( new MessageHandler.Context( dbNameToLoadIn, fromPath.toString(), null, pkDumpMetaData, context.hiveConf, - context.hiveDb, context.nestedContext, LOG))); + context.hiveDb, context.nestedContext, LOG, dumpDirectory, metricCollector))); } if (StringUtils.isNotEmpty(StringUtils.trim(uksString)) && !isUniqueConstraintsAlreadyLoaded(uksString)) { @@ -115,7 +129,7 @@ public TaskTracker tasks() throws IOException, SemanticException { tasks.addAll(ukHandler.handle( new MessageHandler.Context( dbNameToLoadIn, fromPath.toString(), null, ukDumpMetaData, context.hiveConf, - context.hiveDb, context.nestedContext, LOG))); + context.hiveDb, context.nestedContext, LOG, dumpDirectory, metricCollector))); } if (StringUtils.isNotEmpty(StringUtils.trim(nnsString)) && !isNotNullConstraintsAlreadyLoaded(nnsString)) { @@ -126,7 +140,7 @@ public TaskTracker tasks() throws IOException, SemanticException { tasks.addAll(nnHandler.handle( new MessageHandler.Context( dbNameToLoadIn, fromPath.toString(), null, nnDumpMetaData, context.hiveConf, - context.hiveDb, context.nestedContext, LOG))); + context.hiveDb, context.nestedContext, LOG, dumpDirectory, metricCollector))); } if (StringUtils.isNotEmpty(StringUtils.trim(dksString)) && !isDefaultConstraintsAlreadyLoaded(dksString)) { @@ -137,7 +151,7 @@ public TaskTracker tasks() throws IOException, SemanticException { tasks.addAll(dkHandler.handle( new MessageHandler.Context( dbNameToLoadIn, fromPath.toString(), null, dkDumpMetaData, context.hiveConf, - context.hiveDb, context.nestedContext, LOG))); + context.hiveDb, context.nestedContext, LOG, dumpDirectory, metricCollector))); } if (StringUtils.isNotEmpty(StringUtils.trim(cksString)) && !isCheckConstraintsAlreadyLoaded(cksString)) { @@ -148,7 +162,7 @@ public TaskTracker tasks() throws IOException, SemanticException { tasks.addAll(ckHandler.handle( new MessageHandler.Context( dbNameToLoadIn, fromPath.toString(), null, dkDumpMetaData, context.hiveConf, - context.hiveDb, context.nestedContext, LOG))); + context.hiveDb, context.nestedContext, LOG, dumpDirectory, metricCollector))); } if (StringUtils.isNotEmpty(StringUtils.trim(fksString)) && !isForeignKeysAlreadyLoaded(fksString)) { @@ -159,7 +173,7 @@ public TaskTracker tasks() throws IOException, SemanticException { tasks.addAll(fkHandler.handle( new MessageHandler.Context( dbNameToLoadIn, fromPath.toString(), null, fkDumpMetaData, context.hiveConf, - context.hiveDb, context.nestedContext, LOG))); + context.hiveDb, context.nestedContext, LOG, dumpDirectory, metricCollector))); } tasks.forEach(tracker::addTask); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/bootstrap/load/LoadDatabase.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/bootstrap/load/LoadDatabase.java index 41e09e1d0e..343347a48b 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/bootstrap/load/LoadDatabase.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/bootstrap/load/LoadDatabase.java @@ -17,6 +17,7 @@ */ package org.apache.hadoop.hive.ql.exec.repl.bootstrap.load; +import org.apache.hadoop.fs.Path; import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.metastore.api.Database; import org.apache.hadoop.hive.metastore.api.InvalidOperationException; @@ -36,6 +37,7 @@ import org.apache.hadoop.hive.ql.parse.SemanticException; import org.apache.hadoop.hive.ql.exec.repl.util.ReplUtils; import org.apache.hadoop.hive.ql.exec.repl.util.ReplUtils.ReplLoadOpType; +import org.apache.hadoop.hive.ql.parse.repl.metric.ReplicationMetricCollector; import java.io.Serializable; import java.util.HashMap; @@ -50,6 +52,7 @@ private final DatabaseEvent event; private final String dbNameToLoadIn; + transient ReplicationMetricCollector metricCollector; public LoadDatabase(Context context, DatabaseEvent event, String dbNameToLoadIn, TaskTracker loadTaskTracker) { this.context = context; @@ -58,6 +61,15 @@ public LoadDatabase(Context context, DatabaseEvent event, String dbNameToLoadIn, this.tracker = new TaskTracker(loadTaskTracker); } + public LoadDatabase(Context context, DatabaseEvent event, String dbNameToLoadIn, + TaskTracker loadTaskTracker, ReplicationMetricCollector metricCollector) { + this.context = context; + this.event = event; + this.dbNameToLoadIn = dbNameToLoadIn; + this.tracker = new TaskTracker(loadTaskTracker); + this.metricCollector = metricCollector; + } + public TaskTracker tasks() throws Exception { Database dbInMetadata = readDbMetadata(); String dbName = dbInMetadata.getName(); @@ -123,19 +135,21 @@ private boolean isDbEmpty(String dbName) throws HiveException { // If it exists, we want this to be an error condition. Repl Load is not intended to replace a // db. // TODO: we might revisit this in create-drop-recreate cases, needs some thinking on. - DDLWork work = new DDLWork(new HashSet<>(), new HashSet<>(), createDbDesc); + DDLWork work = new DDLWork(new HashSet<>(), new HashSet<>(), createDbDesc, true, + (new Path(context.dumpDirectory)).getParent().toString(), this.metricCollector); return TaskFactory.get(work, context.hiveConf); } private Task alterDbTask(Database dbObj) { return alterDbTask(dbObj.getName(), updateDbProps(dbObj, context.dumpDirectory), - context.hiveConf); + context.hiveConf, context.dumpDirectory, this.metricCollector); } private Task setOwnerInfoTask(Database dbObj) { AlterDatabaseSetOwnerDesc alterDbDesc = new AlterDatabaseSetOwnerDesc(dbObj.getName(), new PrincipalDesc(dbObj.getOwnerName(), dbObj.getOwnerType()), null); - DDLWork work = new DDLWork(new HashSet<>(), new HashSet<>(), alterDbDesc); + DDLWork work = new DDLWork(new HashSet<>(), new HashSet<>(), alterDbDesc, true, + (new Path(context.dumpDirectory)).getParent().toString(), this.metricCollector); return TaskFactory.get(work, context.hiveConf); } @@ -165,9 +179,11 @@ private boolean isDbEmpty(String dbName) throws HiveException { } private static Task alterDbTask(String dbName, Map props, - HiveConf hiveConf) { + HiveConf hiveConf, String dumpDirectory, + ReplicationMetricCollector metricCollector) { AlterDatabaseSetPropertiesDesc alterDbDesc = new AlterDatabaseSetPropertiesDesc(dbName, props, null); - DDLWork work = new DDLWork(new HashSet<>(), new HashSet<>(), alterDbDesc); + DDLWork work = new DDLWork(new HashSet<>(), new HashSet<>(), alterDbDesc, true, + (new Path(dumpDirectory)).getParent().toString(), metricCollector); return TaskFactory.get(work, hiveConf); } @@ -178,10 +194,16 @@ public AlterDatabase(Context context, DatabaseEvent event, String dbNameToLoadIn super(context, event, dbNameToLoadIn, loadTaskTracker); } + public AlterDatabase(Context context, DatabaseEvent event, String dbNameToLoadIn, + TaskTracker loadTaskTracker, ReplicationMetricCollector metricCollector) { + super(context, event, dbNameToLoadIn, loadTaskTracker, metricCollector); + } + @Override public TaskTracker tasks() throws SemanticException { Database dbObj = readDbMetadata(); - tracker.addTask(alterDbTask(dbObj.getName(), dbObj.getParameters(), context.hiveConf)); + tracker.addTask(alterDbTask(dbObj.getName(), dbObj.getParameters(), context.hiveConf, + context.dumpDirectory, this.metricCollector )); return tracker; } } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/bootstrap/load/LoadFunction.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/bootstrap/load/LoadFunction.java index 667ec7ff31..7350267c06 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/bootstrap/load/LoadFunction.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/bootstrap/load/LoadFunction.java @@ -56,6 +56,7 @@ private final FunctionEvent event; private final String dbNameToLoadIn; private final TaskTracker tracker; + String dumpDirectory; private final ReplicationMetricCollector metricCollector; public LoadFunction(Context context, ReplLogger replLogger, FunctionEvent event, @@ -68,9 +69,21 @@ public LoadFunction(Context context, ReplLogger replLogger, FunctionEvent event, this.metricCollector = metricCollector; } + public LoadFunction(Context context, ReplLogger replLogger, FunctionEvent event, + String dbNameToLoadIn, TaskTracker existingTracker, + String dumpDirectory, ReplicationMetricCollector metricCollector) { + this.context = context; + this.replLogger = replLogger; + this.event = event; + this.dbNameToLoadIn = dbNameToLoadIn; + this.tracker = new TaskTracker(existingTracker); + this.dumpDirectory = dumpDirectory; + this.metricCollector = metricCollector; + } + private void createFunctionReplLogTask(List> functionTasks, String functionName) { - ReplStateLogWork replLogWork = new ReplStateLogWork(replLogger, functionName, metricCollector); + ReplStateLogWork replLogWork = new ReplStateLogWork(replLogger, functionName, dumpDirectory, metricCollector); Task replLogTask = TaskFactory.get(replLogWork, context.hiveConf); DAGTraversal.traverse(functionTasks, new AddDependencyToLeaves(replLogTask)); } @@ -88,7 +101,7 @@ public TaskTracker tasks() throws IOException, SemanticException { List> tasks = handler.handle( new MessageHandler.Context( dbNameToLoadIn, fromPath.toString(), null, null, context.hiveConf, - context.hiveDb, context.nestedContext, LOG) + context.hiveDb, context.nestedContext, LOG, dumpDirectory, metricCollector) ); createFunctionReplLogTask(tasks, handler.getFunctionName()); tasks.forEach(tracker::addTask); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/bootstrap/load/table/LoadPartitions.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/bootstrap/load/table/LoadPartitions.java index 48c5e737ba..e0c9b96a75 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/bootstrap/load/table/LoadPartitions.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/bootstrap/load/table/LoadPartitions.java @@ -128,7 +128,8 @@ public TaskTracker tasks() throws Exception { if (!forNewTable().hasReplicationState()) { // Add ReplStateLogTask only if no pending table load tasks left for next cycle Task replLogTask - = ReplUtils.getTableReplLogTask(tableDesc, replLogger, context.hiveConf, metricCollector); + = ReplUtils.getTableReplLogTask(tableDesc, replLogger, context.hiveConf, metricCollector, + (new Path(context.dumpDirectory)).getParent().toString()); tracker.addDependentTask(replLogTask); } return tracker; @@ -142,7 +143,8 @@ public TaskTracker tasks() throws Exception { if (!forExistingTable(lastReplicatedPartition).hasReplicationState()) { // Add ReplStateLogTask only if no pending table load tasks left for next cycle Task replLogTask - = ReplUtils.getTableReplLogTask(tableDesc, replLogger, context.hiveConf, metricCollector); + = ReplUtils.getTableReplLogTask(tableDesc, replLogger, context.hiveConf, metricCollector, + (new Path(context.dumpDirectory)).getParent().toString()); tracker.addDependentTask(replLogTask); } return tracker; @@ -248,7 +250,8 @@ private void addPartition(boolean hasMorePartitions, AlterTableAddPartitionDesc private Task tasksForAddPartition(Table table, AlterTableAddPartitionDesc addPartitionDesc, Task ptnRootTask) throws MetaException, HiveException { Task addPartTask = TaskFactory.get( - new DDLWork(new HashSet<>(), new HashSet<>(), addPartitionDesc), + new DDLWork(new HashSet<>(), new HashSet<>(), addPartitionDesc, + true, (new Path(context.dumpDirectory)).getParent().toString(), this.metricCollector), context.hiveConf ); //checkpointing task already added as part of add batch of partition in case for metadata only and external tables @@ -272,6 +275,7 @@ private void addPartition(boolean hasMorePartitions, AlterTableAddPartitionDesc tableDesc, (HashMap)partSpec.getPartSpec(), context.dumpDirectory, + this.metricCollector, context.hiveConf ); @@ -290,7 +294,8 @@ private void addPartition(boolean hasMorePartitions, AlterTableAddPartitionDesc event.replicationSpec(), new Path(event.dataPath() + Path.SEPARATOR + getPartitionName(sourceWarehousePartitionLocation)), stagingDir, - context.hiveConf, copyAtLoad, false + context.hiveConf, copyAtLoad, false, (new Path(context.dumpDirectory)).getParent().toString(), + this.metricCollector ); Task movePartitionTask = null; @@ -328,7 +333,9 @@ private String getPartitionName(Path partitionMetadataFullPath) { */ private Task movePartitionTask(Table table, AlterTableAddPartitionDesc.PartitionDesc partSpec, Path tmpPath, LoadFileType loadFileType) { - MoveWork moveWork = new MoveWork(new HashSet<>(), new HashSet<>(), null, null, false); + MoveWork moveWork = new MoveWork(new HashSet<>(), new HashSet<>(), null, null, false, + (new Path(context.dumpDirectory)).getParent().toString(), this.metricCollector, + true); if (AcidUtils.isTransactionalTable(table)) { LoadMultiFilesDesc loadFilesWork = new LoadMultiFilesDesc( Collections.singletonList(tmpPath), @@ -386,7 +393,8 @@ private Path locationOnReplicaWarehouse(Table table, AlterTableAddPartitionDesc. AlterTableDropPartitionDesc dropPtnDesc = new AlterTableDropPartitionDesc(HiveTableName.of(table), partSpecsExpr, true, event.replicationSpec()); dropPtnTask = TaskFactory.get( - new DDLWork(new HashSet<>(), new HashSet<>(), dropPtnDesc), context.hiveConf + new DDLWork(new HashSet<>(), new HashSet<>(), dropPtnDesc, true, + (new Path(context.dumpDirectory)).getParent().toString(), this.metricCollector), context.hiveConf ); } return dropPtnTask; diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/bootstrap/load/table/LoadTable.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/bootstrap/load/table/LoadTable.java index 45fca07ae0..11a1036123 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/bootstrap/load/table/LoadTable.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/bootstrap/load/table/LoadTable.java @@ -147,11 +147,13 @@ public TaskTracker tasks(boolean isBootstrapDuringInc) throws Exception { tableDesc, null, context.dumpDirectory, + this.metricCollector, context.hiveConf ); if (!isPartitioned(tableDesc)) { Task replLogTask - = ReplUtils.getTableReplLogTask(tableDesc, replLogger, context.hiveConf, metricCollector); + = ReplUtils.getTableReplLogTask(tableDesc, replLogger, context.hiveConf, metricCollector, + (new Path(context.dumpDirectory)).getParent().toString()); ckptTask.addDependentTask(replLogTask); } tracker.addDependentTask(ckptTask); @@ -187,7 +189,8 @@ private void newTableTasks(ImportTableDesc tblDesc, Task tblRootTask, TableLo tblDesc.setLocation(null); } Task createTableTask = - tblDesc.getCreateTableTask(new HashSet<>(), new HashSet<>(), context.hiveConf); + tblDesc.getCreateTableTask(new HashSet<>(), new HashSet<>(), context.hiveConf, true, + (new Path(context.dumpDirectory)).getParent().toString(), metricCollector); if (tblRootTask == null) { tblRootTask = createTableTask; } else { @@ -202,7 +205,8 @@ private void newTableTasks(ImportTableDesc tblDesc, Task tblRootTask, TableLo if (replicationSpec.isTransactionalTableDump()) { List partNames = isPartitioned(tblDesc) ? event.partitions(tblDesc) : null; ReplTxnWork replTxnWork = new ReplTxnWork(tblDesc.getDatabaseName(), tblDesc.getTableName(), partNames, - replicationSpec.getValidWriteIdList(), ReplTxnWork.OperationType.REPL_WRITEID_STATE); + replicationSpec.getValidWriteIdList(), ReplTxnWork.OperationType.REPL_WRITEID_STATE, + (new Path(context.dumpDirectory)).getParent().toString(), metricCollector); Task replTxnTask = TaskFactory.get(replTxnWork, context.hiveConf); parentTask.addDependentTask(replTxnTask); parentTask = replTxnTask; @@ -283,9 +287,11 @@ static TableLocationTuple tableLocation(ImportTableDesc tblDesc, Database parent boolean copyAtLoad = context.hiveConf.getBoolVar(HiveConf.ConfVars.REPL_RUN_DATA_COPY_TASKS_ON_TARGET); Task copyTask = ReplCopyTask.getLoadCopyTask(replicationSpec, dataPath, tmpPath, context.hiveConf, - copyAtLoad, false); + copyAtLoad, false, (new Path(context.dumpDirectory)).getParent().toString(), metricCollector); - MoveWork moveWork = new MoveWork(new HashSet<>(), new HashSet<>(), null, null, false); + MoveWork moveWork = new MoveWork(new HashSet<>(), new HashSet<>(), null, null, false, + (new Path(context.dumpDirectory)).getParent().toString(), metricCollector, + true); if (AcidUtils.isTransactionalTable(table)) { LoadMultiFilesDesc loadFilesWork = new LoadMultiFilesDesc( Collections.singletonList(tmpPath), @@ -308,6 +314,8 @@ static TableLocationTuple tableLocation(ImportTableDesc tblDesc, Database parent private Task dropTableTask(Table table) { assert(table != null); DropTableDesc dropTblDesc = new DropTableDesc(table.getFullyQualifiedName(), true, false, event.replicationSpec()); - return TaskFactory.get(new DDLWork(new HashSet<>(), new HashSet<>(), dropTblDesc), context.hiveConf); + return TaskFactory.get(new DDLWork(new HashSet<>(), new HashSet<>(), dropTblDesc, + true, (new Path(context.dumpDirectory)).getParent().toString(), + this.metricCollector), context.hiveConf); } } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/incremental/IncrementalLoadTasksBuilder.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/incremental/IncrementalLoadTasksBuilder.java index b00341a4f2..15dc451a1d 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/incremental/IncrementalLoadTasksBuilder.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/incremental/IncrementalLoadTasksBuilder.java @@ -71,6 +71,7 @@ private final ReplLogger replLogger; private static long numIteration; private final Long eventTo; + private String dumpDirectory; private final ReplicationMetricCollector metricCollector; public IncrementalLoadTasksBuilder(String dbName, String loadPath, @@ -78,6 +79,7 @@ public IncrementalLoadTasksBuilder(String dbName, String loadPath, Long eventTo, ReplicationMetricCollector metricCollector) throws SemanticException { this.dbName = dbName; + dumpDirectory = (new Path(loadPath).getParent()).toString(); this.iterator = iterator; inputs = new HashSet<>(); outputs = new HashSet<>(); @@ -135,13 +137,14 @@ public IncrementalLoadTasksBuilder(String dbName, String loadPath, // entire chain MessageHandler.Context mhContext = new MessageHandler.Context(dbName, location, - taskChainTail, eventDmd, conf, hive, context, this.log); + taskChainTail, eventDmd, conf, hive, context, this.log, + dumpDirectory, metricCollector); List> evTasks = analyzeEventLoad(mhContext); if ((evTasks != null) && (!evTasks.isEmpty())) { ReplStateLogWork replStateLogWork = new ReplStateLogWork(replLogger, metricCollector, dir.getPath().getName(), - eventDmd.getDumpType().toString()); + eventDmd.getDumpType().toString(), dumpDirectory); Task barrierTask = TaskFactory.get(replStateLogWork, conf); AddDependencyToLeaves function = new AddDependencyToLeaves(barrierTask); DAGTraversal.traverse(evTasks, function); @@ -155,13 +158,14 @@ public IncrementalLoadTasksBuilder(String dbName, String loadPath, if (!hasMoreWork()) { ReplRemoveFirstIncLoadPendFlagDesc desc = new ReplRemoveFirstIncLoadPendFlagDesc(dbName); - Task updateIncPendTask = TaskFactory.get(new DDLWork(inputs, outputs, desc), conf); + Task updateIncPendTask = TaskFactory.get(new DDLWork(inputs, outputs, desc, + true, dumpDirectory, this.metricCollector), conf); taskChainTail.addDependentTask(updateIncPendTask); taskChainTail = updateIncPendTask; Map dbProps = new HashMap<>(); dbProps.put(ReplicationSpec.KEY.CURR_STATE_ID.toString(), String.valueOf(lastReplayedEvent)); - ReplStateLogWork replStateLogWork = new ReplStateLogWork(replLogger, dbProps, metricCollector); + ReplStateLogWork replStateLogWork = new ReplStateLogWork(replLogger, dbProps, dumpDirectory, metricCollector); Task barrierTask = TaskFactory.get(replStateLogWork, conf); taskChainTail.addDependentTask(barrierTask); this.log.debug("Added {}:{} as a precursor of barrier task {}:{}", @@ -230,7 +234,8 @@ private boolean shouldReplayEvent(FileStatus dir, DumpType dumpType, String dbNa AlterTableSetPropertiesDesc alterTblDesc = new AlterTableSetPropertiesDesc(tName, partSpec, new ReplicationSpec(replState, replState), false, mapProp, false, false, null); - Task updateReplIdTask = TaskFactory.get(new DDLWork(inputs, outputs, alterTblDesc), conf); + Task updateReplIdTask = TaskFactory.get(new DDLWork(inputs, outputs, alterTblDesc, + true, dumpDirectory, metricCollector), conf); // Link the update repl state task with dependency collection task if (preCursor != null) { @@ -248,7 +253,9 @@ private boolean shouldReplayEvent(FileStatus dir, DumpType dumpType, String dbNa AlterDatabaseSetPropertiesDesc alterDbDesc = new AlterDatabaseSetPropertiesDesc(dbName, mapProp, new ReplicationSpec(replState, replState)); - Task updateReplIdTask = TaskFactory.get(new DDLWork(inputs, outputs, alterDbDesc), conf); + Task updateReplIdTask = TaskFactory.get(new DDLWork(inputs, outputs, alterDbDesc, + true, dumpDirectory, + metricCollector), conf); // Link the update repl state task with dependency collection task if (preCursor != null) { diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/util/ReplUtils.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/util/ReplUtils.java index 5d71ce03ad..75f6f0afc8 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/util/ReplUtils.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/util/ReplUtils.java @@ -37,13 +37,23 @@ import org.apache.hadoop.hive.ql.ddl.table.partition.PartitionUtils; import org.apache.hadoop.hive.ql.exec.Task; import org.apache.hadoop.hive.ql.exec.TaskFactory; +import org.apache.hadoop.hive.ql.exec.repl.ReplAck; import org.apache.hadoop.hive.ql.exec.repl.ReplStateLogWork; import org.apache.hadoop.hive.ql.exec.util.Retryable; import org.apache.hadoop.hive.ql.metadata.Table; import org.apache.hadoop.hive.ql.parse.EximUtil; import org.apache.hadoop.hive.ql.parse.SemanticException; +import org.apache.hadoop.hive.ql.parse.repl.DumpType; import org.apache.hadoop.hive.ql.parse.repl.ReplLogger; +import org.apache.hadoop.hive.ql.parse.repl.dump.Utils; +import org.apache.hadoop.hive.ql.parse.repl.dump.metric.BootstrapDumpMetricCollector; +import org.apache.hadoop.hive.ql.parse.repl.dump.metric.IncrementalDumpMetricCollector; +import org.apache.hadoop.hive.ql.parse.repl.load.DumpMetaData; +import org.apache.hadoop.hive.ql.parse.repl.load.message.MessageHandler; +import org.apache.hadoop.hive.ql.parse.repl.load.metric.BootstrapLoadMetricCollector; +import org.apache.hadoop.hive.ql.parse.repl.load.metric.IncrementalLoadMetricCollector; import org.apache.hadoop.hive.ql.parse.repl.metric.ReplicationMetricCollector; +import org.apache.hadoop.hive.ql.parse.repl.metric.event.Status; import org.apache.hadoop.hive.ql.plan.ColumnStatsUpdateWork; import org.apache.hadoop.hive.ql.plan.ExprNodeColumnDesc; import org.apache.hadoop.hive.ql.plan.ExprNodeConstantDesc; @@ -192,6 +202,17 @@ return TaskFactory.get(replLogWork, conf); } + public static Task getTableReplLogTask(ImportTableDesc tableDesc, ReplLogger replLogger, HiveConf conf, + ReplicationMetricCollector metricCollector, + String dumpRoot) + throws SemanticException { + TableType tableType = tableDesc.isExternal() ? TableType.EXTERNAL_TABLE : tableDesc.tableType(); + ReplStateLogWork replLogWork = new ReplStateLogWork(replLogger, metricCollector, + tableDesc.getTableName(), tableType, dumpRoot); + return TaskFactory.get(replLogWork, conf); + } + + public static Task getTableCheckpointTask(ImportTableDesc tableDesc, HashMap partSpec, String dumpRoot, HiveConf conf) throws SemanticException { HashMap mapProp = new HashMap<>(); @@ -203,6 +224,19 @@ return TaskFactory.get(new DDLWork(new HashSet<>(), new HashSet<>(), alterTblDesc), conf); } + public static Task getTableCheckpointTask(ImportTableDesc tableDesc, HashMap partSpec, + String dumpRoot, ReplicationMetricCollector metricCollector, + HiveConf conf) throws SemanticException { + HashMap mapProp = new HashMap<>(); + mapProp.put(REPL_CHECKPOINT_KEY, dumpRoot); + + final TableName tName = TableName.fromString(tableDesc.getTableName(), null, tableDesc.getDatabaseName()); + AlterTableSetPropertiesDesc alterTblDesc = new AlterTableSetPropertiesDesc(tName, partSpec, null, false, + mapProp, false, false, null); + return TaskFactory.get(new DDLWork(new HashSet<>(), new HashSet<>(), alterTblDesc, + true, (new Path(dumpRoot)).getParent().toString(), metricCollector), conf); + } + public static boolean replCkptStatus(String dbName, Map props, String dumpRoot) throws InvalidOperationException { // If ckpt property not set or empty means, bootstrap is not run on this object. @@ -256,6 +290,24 @@ public static String getNonEmpty(String configParam, HiveConf hiveConf, String e return taskList; } + + public static List> addTasksForLoadingColStats(ColumnStatistics colStats, + HiveConf conf, + UpdatedMetaDataTracker updatedMetadata, + org.apache.hadoop.hive.metastore.api.Table tableObj, + long writeId, + String nonRecoverableMarkPath, + ReplicationMetricCollector metricCollector) + throws IOException, TException { + List> taskList = new ArrayList<>(); + ColumnStatsUpdateWork work = new ColumnStatsUpdateWork(colStats, nonRecoverableMarkPath, metricCollector, true); + work.setWriteId(writeId); + Task task = TaskFactory.get(work, conf); + taskList.add(task); + return taskList; + + } + // Path filters to filter only events (directories) excluding "_bootstrap" public static PathFilter getEventsDirectoryFilter(final FileSystem fs) { return p -> { @@ -280,6 +332,52 @@ public static PathFilter getBootstrapDirectoryFilter(final FileSystem fs) { }; } + public static int handleException(boolean isReplication, Throwable e, String nonRecoverablePath, + ReplicationMetricCollector metricCollector, String stageName, HiveConf conf){ + int errorCode = ErrorMsg.getErrorMsg(e.getMessage()).getErrorCode(); + if(isReplication){ + try { + if (nonRecoverablePath != null) { + final int recoverableLimit = ErrorMsg.GENERIC_ERROR.getErrorCode(); + String metricStage = getMetricStageName(stageName, metricCollector); + if(errorCode > recoverableLimit){ + Path nonRecoverableMarker = new Path(new Path(nonRecoverablePath), ReplAck.NON_RECOVERABLE_MARKER.toString()); + Utils.writeStackTrace(e, nonRecoverableMarker, conf); + metricCollector.reportStageEnd(metricStage, Status.FAILED_ADMIN, nonRecoverableMarker.toString()); + } + else { + metricCollector.reportStageEnd(metricStage, Status.FAILED); + } + } + } catch (Exception ex) { + LOG.error("Failed to collect Metrics ", ex); + } + } + return errorCode; + } + + private static String getMetricStageName(String stageName, ReplicationMetricCollector metricCollector) { + if( stageName == "REPL_DUMP" || stageName == "REPL_LOAD" || stageName == "ATLAS_DUMP" || stageName == "ATLAS_LOAD" + || stageName == "RANGER_DUMP" || stageName == "RANGER_LOAD"){ + return stageName; + } + if(isDumpMetricCollector(metricCollector)){ + return "REPL_DUMP"; + } else { + return "REPL_LOAD"; + } + } + + private static boolean isDumpMetricCollector(ReplicationMetricCollector metricCollector) { + return metricCollector instanceof BootstrapDumpMetricCollector || + metricCollector instanceof IncrementalDumpMetricCollector; + } + + private static boolean isLoadMetricCollector(ReplicationMetricCollector metricCollector) { + return metricCollector instanceof BootstrapLoadMetricCollector || + metricCollector instanceof IncrementalLoadMetricCollector; + } + public static boolean isFirstIncPending(Map parameters) { if (parameters == null) { return false; diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/ImportSemanticAnalyzer.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/ImportSemanticAnalyzer.java index 3b9bc6f16e..5e05c73ca8 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/ImportSemanticAnalyzer.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/ImportSemanticAnalyzer.java @@ -59,6 +59,7 @@ import org.apache.hadoop.hive.ql.parse.repl.DumpType; import org.apache.hadoop.hive.ql.parse.repl.load.MetaData; import org.apache.hadoop.hive.ql.parse.repl.load.UpdatedMetaDataTracker; +import org.apache.hadoop.hive.ql.parse.repl.metric.ReplicationMetricCollector; import org.apache.hadoop.hive.ql.plan.CopyWork; import org.apache.hadoop.hive.ql.plan.ImportTableDesc; import org.apache.hadoop.hive.ql.plan.LoadTableDesc; @@ -222,6 +223,23 @@ public static boolean prepareImport(boolean isImportCmd, long writeId, // Initialize with 0 for non-ACID and non-MM tables. MetaData rv ) throws IOException, MetaException, HiveException, URISyntaxException { + return prepareImport(isImportCmd, isLocationSet, isExternalSet, isPartSpecSet, waitOnPrecursor, + parsedLocation, parsedTableName, overrideDBName, parsedPartSpec, fromLocn, + x, updatedMetadata, txnMgr, writeId, rv, null, null); + } + + public static boolean prepareImport(boolean isImportCmd, + boolean isLocationSet, boolean isExternalSet, boolean isPartSpecSet, + boolean waitOnPrecursor, + String parsedLocation, String parsedTableName, String overrideDBName, + LinkedHashMap parsedPartSpec, + String fromLocn, EximUtil.SemanticAnalyzerWrapperContext x, + UpdatedMetaDataTracker updatedMetadata, HiveTxnManager txnMgr, + long writeId, // Initialize with 0 for non-ACID and non-MM tables. + MetaData rv, + String dumpRoot, + ReplicationMetricCollector metricCollector + ) throws IOException, MetaException, HiveException, URISyntaxException { // initialize load path URI fromURI = EximUtil.getValidatedURI(x.getConf(), stripQuotes(fromLocn)); @@ -358,7 +376,7 @@ public static boolean prepareImport(boolean isImportCmd, createReplImportTasks( tblDesc, partitionDescs, replicationSpec, waitOnPrecursor, table, - fromURI, wh, x, writeId, stmtId, updatedMetadata); + fromURI, wh, x, writeId, stmtId, updatedMetadata, dumpRoot, metricCollector); } else { createRegularImportTasks( tblDesc, partitionDescs, @@ -405,6 +423,12 @@ private static ImportTableDesc getBaseCreateTableDescFromTable(String dbName, private static Task loadTable(URI fromURI, Table table, boolean replace, Path tgtPath, ReplicationSpec replicationSpec, EximUtil.SemanticAnalyzerWrapperContext x, Long writeId, int stmtId) throws HiveException { + return loadTable(fromURI, table, replace, tgtPath, replicationSpec, x, writeId,stmtId, null, null); + } + private static Task loadTable(URI fromURI, Table table, boolean replace, Path tgtPath, + ReplicationSpec replicationSpec, EximUtil.SemanticAnalyzerWrapperContext x, + Long writeId, int stmtId, + String dumpRoot, ReplicationMetricCollector metricCollector) throws HiveException { assert table != null; assert table.getParameters() != null; Path dataPath = new Path(fromURI.toString(), EximUtil.DATA_PATH_NAME); @@ -462,12 +486,13 @@ private static ImportTableDesc getBaseCreateTableDescFromTable(String dbName, if (replicationSpec.isInReplicationScope()) { boolean copyAtLoad = x.getConf().getBoolVar(HiveConf.ConfVars.REPL_RUN_DATA_COPY_TASKS_ON_TARGET); copyTask = ReplCopyTask.getLoadCopyTask(replicationSpec, dataPath, destPath, x.getConf(), - isSkipTrash, needRecycle, copyAtLoad); + isSkipTrash, needRecycle, copyAtLoad, dumpRoot, metricCollector); } else { - copyTask = TaskFactory.get(new CopyWork(dataPath, destPath, false)); + copyTask = TaskFactory.get(new CopyWork(dataPath, destPath, false, dumpRoot, metricCollector, true)); } - MoveWork moveWork = new MoveWork(x.getInputs(), x.getOutputs(), null, null, false); + MoveWork moveWork = new MoveWork(x.getInputs(), x.getOutputs(), null, null, false, + dumpRoot, metricCollector, true); if (replicationSpec.isInReplicationScope() && AcidUtils.isTransactionalTable(table)) { @@ -496,12 +521,26 @@ private static ImportTableDesc getBaseCreateTableDescFromTable(String dbName, return tableDesc.getCreateTableTask(x.getInputs(), x.getOutputs(), x.getConf()); } + private static Task createTableTask(ImportTableDesc tableDesc, EximUtil.SemanticAnalyzerWrapperContext x, + String dumpRoot, ReplicationMetricCollector metricCollector) { + return tableDesc.getCreateTableTask(x.getInputs(), x.getOutputs(), x.getConf(), true, + dumpRoot, metricCollector); + } + private static Task dropTableTask(Table table, EximUtil.SemanticAnalyzerWrapperContext x, ReplicationSpec replicationSpec) { DropTableDesc dropTblDesc = new DropTableDesc(table.getTableName(), true, false, replicationSpec); return TaskFactory.get(new DDLWork(x.getInputs(), x.getOutputs(), dropTblDesc), x.getConf()); } + private static Task dropTableTask(Table table, EximUtil.SemanticAnalyzerWrapperContext x, + ReplicationSpec replicationSpec, String dumpRoot, + ReplicationMetricCollector metricCollector) { + DropTableDesc dropTblDesc = new DropTableDesc(table.getTableName(), true, false, replicationSpec); + return TaskFactory.get(new DDLWork(x.getInputs(), x.getOutputs(), dropTblDesc, + true, dumpRoot, metricCollector), x.getConf()); + } + private static Task alterTableTask(ImportTableDesc tableDesc, EximUtil.SemanticAnalyzerWrapperContext x, ReplicationSpec replicationSpec) { @@ -512,6 +551,18 @@ private static ImportTableDesc getBaseCreateTableDescFromTable(String dbName, return tableDesc.getCreateTableTask(x.getInputs(), x.getOutputs(), x.getConf()); } + private static Task alterTableTask(ImportTableDesc tableDesc, + EximUtil.SemanticAnalyzerWrapperContext x, + ReplicationSpec replicationSpec, boolean isReplication, + String dumpRoot, ReplicationMetricCollector metricCollector) { + tableDesc.setReplaceMode(true); + if ((replicationSpec != null) && (replicationSpec.isInReplicationScope())) { + tableDesc.setReplicationSpec(replicationSpec); + } + return tableDesc.getCreateTableTask(x.getInputs(), x.getOutputs(), x.getConf(), isReplication, + dumpRoot, metricCollector); + } + private static Task alterSinglePartition( ImportTableDesc tblDesc, Table table, Warehouse wh, AlterTableAddPartitionDesc addPartitionDesc, ReplicationSpec replicationSpec, org.apache.hadoop.hive.ql.metadata.Partition ptn, @@ -528,11 +579,40 @@ private static ImportTableDesc getBaseCreateTableDescFromTable(String dbName, return TaskFactory.get(new DDLWork(x.getInputs(), x.getOutputs(), addPartitionDesc), x.getConf()); } + private static Task alterSinglePartition( + ImportTableDesc tblDesc, Table table, Warehouse wh, AlterTableAddPartitionDesc addPartitionDesc, + ReplicationSpec replicationSpec, org.apache.hadoop.hive.ql.metadata.Partition ptn, + EximUtil.SemanticAnalyzerWrapperContext x, boolean isReplication, + String dumpRoot, ReplicationMetricCollector metricCollector) throws MetaException, IOException, HiveException { + if ((replicationSpec != null) && (replicationSpec.isInReplicationScope())) { + addPartitionDesc.setReplicationSpec(replicationSpec); + } + AlterTableAddPartitionDesc.PartitionDesc partSpec = addPartitionDesc.getPartitions().get(0); + if (ptn == null) { + fixLocationInPartSpec(tblDesc, table, wh, replicationSpec, partSpec, x); + } else if (!externalTablePartition(tblDesc, replicationSpec)) { + partSpec.setLocation(ptn.getLocation()); // use existing location + } + return TaskFactory.get(new DDLWork(x.getInputs(), x.getOutputs(), addPartitionDesc, + isReplication, dumpRoot, metricCollector), x.getConf()); + } + private static Task addSinglePartition(ImportTableDesc tblDesc, Table table, Warehouse wh, AlterTableAddPartitionDesc addPartitionDesc, ReplicationSpec replicationSpec, EximUtil.SemanticAnalyzerWrapperContext x, Long writeId, int stmtId) throws MetaException, IOException, HiveException { + return addSinglePartition(tblDesc, table, wh, addPartitionDesc, replicationSpec, + x, writeId, stmtId, false, null, null); + } + + private static Task addSinglePartition(ImportTableDesc tblDesc, + Table table, Warehouse wh, AlterTableAddPartitionDesc addPartitionDesc, + ReplicationSpec replicationSpec, + EximUtil.SemanticAnalyzerWrapperContext x, Long writeId, int stmtId, + boolean isReplication, String dumpRoot, + ReplicationMetricCollector metricCollector) + throws MetaException, IOException, HiveException { AlterTableAddPartitionDesc.PartitionDesc partSpec = addPartitionDesc.getPartitions().get(0); boolean isSkipTrash = false; boolean needRecycle = false; @@ -544,7 +624,8 @@ private static ImportTableDesc getBaseCreateTableDescFromTable(String dbName, // addPartitionDesc already has the right partition location @SuppressWarnings("unchecked") Task addPartTask = TaskFactory.get( - new DDLWork(x.getInputs(), x.getOutputs(), addPartitionDesc), x.getConf()); + new DDLWork(x.getInputs(), x.getOutputs(), addPartitionDesc, isReplication, + dumpRoot, metricCollector), x.getConf()); return addPartTask; } else { String srcLocation = partSpec.getLocation(); @@ -605,9 +686,10 @@ private static ImportTableDesc getBaseCreateTableDescFromTable(String dbName, if (replicationSpec.isInReplicationScope()) { boolean copyAtLoad = x.getConf().getBoolVar(HiveConf.ConfVars.REPL_RUN_DATA_COPY_TASKS_ON_TARGET); copyTask = ReplCopyTask.getLoadCopyTask(replicationSpec, new Path(srcLocation), destPath, - x.getConf(), isSkipTrash, needRecycle, copyAtLoad); + x.getConf(), isSkipTrash, needRecycle, copyAtLoad, dumpRoot, metricCollector); } else { - copyTask = TaskFactory.get(new CopyWork(new Path(srcLocation), destPath, false)); + copyTask = TaskFactory.get(new CopyWork(new Path(srcLocation), destPath, false, + dumpRoot, metricCollector, isReplication)); } Task addPartTask = null; @@ -615,12 +697,12 @@ private static ImportTableDesc getBaseCreateTableDescFromTable(String dbName, // During replication, by the time we are applying commit transaction event, we expect // the partition/s to be already added or altered by previous events. So no need to // create add partition event again. - addPartTask = TaskFactory.get( - new DDLWork(x.getInputs(), x.getOutputs(), addPartitionDesc), x.getConf()); + addPartTask = TaskFactory.get(new DDLWork(x.getInputs(), x.getOutputs(), addPartitionDesc, + isReplication, dumpRoot, metricCollector), x.getConf()); } MoveWork moveWork = new MoveWork(x.getInputs(), x.getOutputs(), - null, null, false); + null, null, false, dumpRoot, metricCollector, isReplication); // Note: this sets LoadFileType incorrectly for ACID; is that relevant for import? // See setLoadFileType and setIsAcidIow calls elsewhere for an example. @@ -990,7 +1072,8 @@ private static void createRegularImportTasks( org.apache.hadoop.hive.ql.metadata.Partition ptn = null; if ((ptn = x.getHive().getPartition(table, partSpec, false)) == null) { x.getTasks().add(addSinglePartition( - tblDesc, table, wh, addPartitionDesc, replicationSpec, x, writeId, stmtId)); + tblDesc, table, wh, addPartitionDesc, replicationSpec, x, writeId, stmtId, + false, null, null)); } else { throw new SemanticException( ErrorMsg.PARTITION_EXISTS.getMsg(partSpecToString(partSpec))); @@ -1021,7 +1104,7 @@ private static void createRegularImportTasks( if (isPartitioned(tblDesc)) { for (AlterTableAddPartitionDesc addPartitionDesc : partitionDescs) { t.addDependentTask(addSinglePartition(tblDesc, table, wh, addPartitionDesc, - replicationSpec, x, writeId, stmtId)); + replicationSpec, x, writeId, stmtId, false, null, null)); } } else { x.getLOG().debug("adding dependent CopyWork/MoveWork for table"); @@ -1072,7 +1155,8 @@ private static void createReplImportTasks( ReplicationSpec replicationSpec, boolean waitOnPrecursor, Table table, URI fromURI, Warehouse wh, EximUtil.SemanticAnalyzerWrapperContext x, Long writeId, int stmtId, - UpdatedMetaDataTracker updatedMetadata) + UpdatedMetaDataTracker updatedMetadata, String dumpRoot, + ReplicationMetricCollector metricCollector) throws HiveException, IOException, MetaException { Task dropTblTask = null; @@ -1110,7 +1194,7 @@ private static void createReplImportTasks( // We need to drop here to handle the case where the previous incremental load created the table but // didn't set the last repl ID due to some failure. if (x.getEventType() == DumpType.EVENT_CREATE_TABLE) { - dropTblTask = dropTableTask(table, x, replicationSpec); + dropTblTask = dropTableTask(table, x, replicationSpec, dumpRoot, metricCollector); table = null; } else if (!firstIncPending) { //If in db pending flag is not set then check in table parameter for table level load. @@ -1171,10 +1255,12 @@ private static void createReplImportTasks( addPartitionDesc.setReplicationSpec(replicationSpec); if (!replicationSpec.isMetadataOnly()) { dependentTasks.add(addSinglePartition(tblDesc, table, wh, addPartitionDesc, - replicationSpec, x, writeId, stmtId)); + replicationSpec, x, writeId, stmtId, + true, dumpRoot, metricCollector)); } else { dependentTasks.add(alterSinglePartition(tblDesc, table, wh, addPartitionDesc, - replicationSpec, null, x)); + replicationSpec, null, x, true, + dumpRoot, metricCollector)); } if (updatedMetadata != null) { updatedMetadata.addPartition(table.getDbName(), table.getTableName(), @@ -1185,7 +1271,7 @@ private static void createReplImportTasks( && !shouldSkipDataCopyInReplScope(tblDesc, replicationSpec)) { x.getLOG().debug("adding dependent CopyWork/MoveWork for table"); dependentTasks = Collections.singletonList(loadTable(fromURI, table, replicationSpec.isReplace(), - new Path(tblDesc.getLocation()), replicationSpec, x, writeId, stmtId)); + new Path(tblDesc.getLocation()), replicationSpec, x, writeId, stmtId, dumpRoot, metricCollector)); } // During replication, by the time we replay a commit transaction event, the table should @@ -1196,7 +1282,7 @@ private static void createReplImportTasks( if (x.getEventType() == DumpType.EVENT_CREATE_TABLE && !tblDesc.isExternal()) { tblDesc.setLocation(null); } - Task t = createTableTask(tblDesc, x); + Task t = createTableTask(tblDesc, x, dumpRoot, metricCollector); if (dependentTasks != null) { dependentTasks.forEach(task -> t.addDependentTask(task)); } @@ -1250,13 +1336,15 @@ private static void createReplImportTasks( if (ptn == null) { if (!replicationSpec.isMetadataOnly()){ x.getTasks().add(addSinglePartition( - tblDesc, table, wh, addPartitionDesc, replicationSpec, x, writeId, stmtId)); + tblDesc, table, wh, addPartitionDesc, replicationSpec, x, writeId, stmtId, + true, dumpRoot, metricCollector)); if (updatedMetadata != null) { updatedMetadata.addPartition(table.getDbName(), table.getTableName(), partSpec); } } else { x.getTasks().add(alterSinglePartition( - tblDesc, table, wh, addPartitionDesc, replicationSpec, null, x)); + tblDesc, table, wh, addPartitionDesc, replicationSpec, null, x, + true, dumpRoot, metricCollector)); if (updatedMetadata != null) { updatedMetadata.addPartition(table.getDbName(), table.getTableName(), partSpec); } @@ -1266,11 +1354,11 @@ private static void createReplImportTasks( // the destination ptn's repl.last.id is older than the replacement's. if (replicationSpec.allowReplacementInto(ptn.getParameters())){ if (!replicationSpec.isMetadataOnly()){ - x.getTasks().add(addSinglePartition( - tblDesc, table, wh, addPartitionDesc, replicationSpec, x, writeId, stmtId)); + x.getTasks().add(addSinglePartition(tblDesc, table, wh, addPartitionDesc, replicationSpec, x, + writeId, stmtId, true, dumpRoot, metricCollector)); } else { x.getTasks().add(alterSinglePartition( - tblDesc, table, wh, addPartitionDesc, replicationSpec, ptn, x)); + tblDesc, table, wh, addPartitionDesc, replicationSpec, ptn, x, true, dumpRoot, metricCollector)); } if (updatedMetadata != null) { updatedMetadata.addPartition(table.getDbName(), table.getTableName(), partSpec); @@ -1283,7 +1371,7 @@ private static void createReplImportTasks( } if (replicationSpec.isMetadataOnly() && partitionDescs.isEmpty()){ // MD-ONLY table alter - x.getTasks().add(alterTableTask(tblDesc, x,replicationSpec)); + x.getTasks().add(alterTableTask(tblDesc, x,replicationSpec, true, dumpRoot, metricCollector)); if (lockType == WriteEntity.WriteType.DDL_NO_LOCK){ lockType = WriteEntity.WriteType.DDL_SHARED; } @@ -1296,9 +1384,9 @@ private static void createReplImportTasks( if (!replicationSpec.isMetadataOnly()) { // repl-imports are replace-into unless the event is insert-into loadTable(fromURI, table, replicationSpec.isReplace(), new Path(tblDesc.getLocation()), - replicationSpec, x, writeId, stmtId); + replicationSpec, x, writeId, stmtId, dumpRoot, metricCollector); } else { - x.getTasks().add(alterTableTask(tblDesc, x, replicationSpec)); + x.getTasks().add(alterTableTask(tblDesc, x, replicationSpec, true, dumpRoot, metricCollector)); } if (lockType == WriteEntity.WriteType.DDL_NO_LOCK){ lockType = WriteEntity.WriteType.DDL_SHARED; diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/Utils.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/Utils.java index c7656bc64d..5805a9c9d0 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/Utils.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/Utils.java @@ -168,7 +168,7 @@ public static long writeFile(FileSystem fs, Path exportFilePath, InputStream is, } } - public static void writeStackTrace(Exception e, Path outputFile, HiveConf conf) throws SemanticException { + public static void writeStackTrace(Throwable e, Path outputFile, HiveConf conf) throws SemanticException { Retryable retryable = Retryable.builder() .withHiveConf(conf) .withRetryOnException(IOException.class).withFailOnException(FileNotFoundException.class).build(); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/DumpMetaData.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/DumpMetaData.java index dc40e1df9a..b6d43f7946 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/DumpMetaData.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/DumpMetaData.java @@ -173,6 +173,10 @@ public Path getDumpFilePath() { return dumpFile; } + public static String getDmdFileName() { + return DUMP_METADATA; + } + public boolean isIncrementalDump() throws SemanticException { initializeIfNot(); return (this.dumpType == DumpType.INCREMENTAL); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/AbortTxnHandler.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/AbortTxnHandler.java index b1c2709b33..c92ef253de 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/AbortTxnHandler.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/AbortTxnHandler.java @@ -45,7 +45,8 @@ Task abortTxnTask = TaskFactory.get( new ReplTxnWork(HiveUtils.getReplPolicy(context.dbName), context.dbName, null, - msg.getTxnId(), ReplTxnWork.OperationType.REPL_ABORT_TXN, context.eventOnlyReplicationSpec()), + msg.getTxnId(), ReplTxnWork.OperationType.REPL_ABORT_TXN, context.eventOnlyReplicationSpec(), + context.getDumpDirectory(), context.getMetricCollector()), context.hiveConf ); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/AddCheckConstraintHandler.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/AddCheckConstraintHandler.java index ce3316917c..82f93fa280 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/AddCheckConstraintHandler.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/AddCheckConstraintHandler.java @@ -70,7 +70,8 @@ AlterTableAddConstraintDesc addConstraintsDesc = new AlterTableAddConstraintDesc(tName, context.eventOnlyReplicationSpec(), constraints); Task addConstraintsTask = TaskFactory.get( - new DDLWork(readEntitySet, writeEntitySet, addConstraintsDesc), context.hiveConf); + new DDLWork(readEntitySet, writeEntitySet, addConstraintsDesc, true, + context.getDumpDirectory(), context.getMetricCollector()), context.hiveConf); tasks.add(addConstraintsTask); context.log.debug("Added add constrains task : {}:{}", addConstraintsTask.getId(), actualTblName); updatedMetadata.set(context.dmd.getEventTo().toString(), actualDbName, actualTblName, null); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/AddDefaultConstraintHandler.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/AddDefaultConstraintHandler.java index b17126e9ed..6022105c6d 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/AddDefaultConstraintHandler.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/AddDefaultConstraintHandler.java @@ -69,7 +69,8 @@ AlterTableAddConstraintDesc addConstraintsDesc = new AlterTableAddConstraintDesc(tName, context.eventOnlyReplicationSpec(), constraints); Task addConstraintsTask = TaskFactory.get( - new DDLWork(readEntitySet, writeEntitySet, addConstraintsDesc), context.hiveConf); + new DDLWork(readEntitySet, writeEntitySet, addConstraintsDesc, true, + context.getDumpDirectory(), context.getMetricCollector()), context.hiveConf); tasks.add(addConstraintsTask); context.log.debug("Added add constrains task : {}:{}", addConstraintsTask.getId(), actualTblName); updatedMetadata.set(context.dmd.getEventTo().toString(), actualDbName, actualTblName, null); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/AddForeignKeyHandler.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/AddForeignKeyHandler.java index 6f98373c9e..5f723baa7e 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/AddForeignKeyHandler.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/AddForeignKeyHandler.java @@ -72,7 +72,10 @@ AlterTableAddConstraintDesc addConstraintsDesc = new AlterTableAddConstraintDesc(tName, context.eventOnlyReplicationSpec(), constraints); Task addConstraintsTask = TaskFactory.get( - new DDLWork(readEntitySet, writeEntitySet, addConstraintsDesc), context.hiveConf); + new DDLWork(readEntitySet, writeEntitySet, addConstraintsDesc, + true, context.getDumpDirectory(), context.getMetricCollector()), + context.hiveConf + ); tasks.add(addConstraintsTask); context.log.debug("Added add constrains task : {}:{}", addConstraintsTask.getId(), actualTblName); updatedMetadata.set(context.dmd.getEventTo().toString(), actualDbName, actualTblName, null); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/AddNotNullConstraintHandler.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/AddNotNullConstraintHandler.java index 995c5d2f84..c6eaed7fe5 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/AddNotNullConstraintHandler.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/AddNotNullConstraintHandler.java @@ -66,8 +66,8 @@ Constraints constraints = new Constraints(null, null, nns, null, null, null); AlterTableAddConstraintDesc addConstraintsDesc = new AlterTableAddConstraintDesc(tName, context.eventOnlyReplicationSpec(), constraints); - Task addConstraintsTask = TaskFactory.get( - new DDLWork(readEntitySet, writeEntitySet, addConstraintsDesc), context.hiveConf); + Task addConstraintsTask = TaskFactory.get( new DDLWork(readEntitySet, writeEntitySet, addConstraintsDesc, + true, context.getDumpDirectory(), context.getMetricCollector()), context.hiveConf); tasks.add(addConstraintsTask); context.log.debug("Added add constrains task : {}:{}", addConstraintsTask.getId(), actualTblName); updatedMetadata.set(context.dmd.getEventTo().toString(), actualDbName, actualTblName, null); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/AddPrimaryKeyHandler.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/AddPrimaryKeyHandler.java index f6decc27fc..bddaf37287 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/AddPrimaryKeyHandler.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/AddPrimaryKeyHandler.java @@ -66,8 +66,9 @@ Constraints constraints = new Constraints(pks, null, null, null, null, null); AlterTableAddConstraintDesc addConstraintsDesc = new AlterTableAddConstraintDesc(tName, context.eventOnlyReplicationSpec(), constraints); - Task addConstraintsTask = TaskFactory.get( - new DDLWork(readEntitySet, writeEntitySet, addConstraintsDesc), context.hiveConf); + Task addConstraintsTask = TaskFactory.get(new DDLWork(readEntitySet, writeEntitySet, + addConstraintsDesc, true, context.getDumpDirectory(), + context.getMetricCollector()), context.hiveConf); tasks.add(addConstraintsTask); context.log.debug("Added add constrains task : {}:{}", addConstraintsTask.getId(), actualTblName); updatedMetadata.set(context.dmd.getEventTo().toString(), actualDbName, actualTblName, null); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/AddUniqueConstraintHandler.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/AddUniqueConstraintHandler.java index e1c1d3a180..adbe13a5ae 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/AddUniqueConstraintHandler.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/AddUniqueConstraintHandler.java @@ -67,7 +67,8 @@ AlterTableAddConstraintDesc addConstraintsDesc = new AlterTableAddConstraintDesc(tName, context.eventOnlyReplicationSpec(), constraints); Task addConstraintsTask = TaskFactory.get( - new DDLWork(readEntitySet, writeEntitySet, addConstraintsDesc), context.hiveConf); + new DDLWork(readEntitySet, writeEntitySet, addConstraintsDesc, + true, context.getDumpDirectory(), context.getMetricCollector()), context.hiveConf); tasks.add(addConstraintsTask); context.log.debug("Added add constrains task : {}:{}", addConstraintsTask.getId(), actualTblName); updatedMetadata.set(context.dmd.getEventTo().toString(), actualDbName, actualTblName, null); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/AllocWriteIdHandler.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/AllocWriteIdHandler.java index f9a075076a..d62a6692bf 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/AllocWriteIdHandler.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/AllocWriteIdHandler.java @@ -51,7 +51,8 @@ // Repl policy should be created based on the table name in context. ReplTxnWork work = new ReplTxnWork(HiveUtils.getReplPolicy(context.dbName), dbName, tableName, - ReplTxnWork.OperationType.REPL_ALLOC_WRITE_ID, msg.getTxnToWriteIdList(), context.eventOnlyReplicationSpec()); + ReplTxnWork.OperationType.REPL_ALLOC_WRITE_ID, msg.getTxnToWriteIdList(), context.eventOnlyReplicationSpec(), + context.getDumpDirectory(), context.getMetricCollector()); Task allocWriteIdTask = TaskFactory.get(work, context.hiveConf); context.log.info("Added alloc write id task : {}", allocWriteIdTask.getId()); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/AlterDatabaseHandler.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/AlterDatabaseHandler.java index 76c7dd5194..041a4c9120 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/AlterDatabaseHandler.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/AlterDatabaseHandler.java @@ -78,8 +78,8 @@ newDb.getOwnerType()), context.eventOnlyReplicationSpec()); } - Task alterDbTask = TaskFactory.get( - new DDLWork(readEntitySet, writeEntitySet, alterDbDesc), context.hiveConf); + Task alterDbTask = TaskFactory.get(new DDLWork(readEntitySet, writeEntitySet, + alterDbDesc, true, context.getDumpDirectory(), context.getMetricCollector()), context.hiveConf); context.log.debug("Added alter database task : {}:{}", alterDbTask.getId(), actualDbName); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/CommitTxnHandler.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/CommitTxnHandler.java index 86f1cb9d53..2224793a05 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/CommitTxnHandler.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/CommitTxnHandler.java @@ -55,7 +55,9 @@ String tblName = null; ReplTxnWork work = new ReplTxnWork(HiveUtils.getReplPolicy(context.dbName), context.dbName, - null, msg.getTxnId(), ReplTxnWork.OperationType.REPL_COMMIT_TXN, context.eventOnlyReplicationSpec()); + null, msg.getTxnId(), ReplTxnWork.OperationType.REPL_COMMIT_TXN, + context.eventOnlyReplicationSpec(), context.getDumpDirectory(), + context.getMetricCollector()); if (numEntry > 0) { context.log.debug("Commit txn handler for txnid " + msg.getTxnId() + " databases : " + msg.getDatabases() + @@ -76,7 +78,8 @@ tblName = actualTblName; // for warehouse level dump, use db name from write event dbName = (context.isDbNameEmpty() ? actualDBName : context.dbName); - Context currentContext = new Context(context, dbName); + Context currentContext = new Context(context, dbName, + context.getDumpDirectory(), context.getMetricCollector()); currentContext.setLocation(location.toUri().toString()); // Piggybacking in Import logic for now diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/CreateDatabaseHandler.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/CreateDatabaseHandler.java index c2e652a1b1..cf7879875a 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/CreateDatabaseHandler.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/CreateDatabaseHandler.java @@ -44,7 +44,7 @@ @Override public List> handle(Context context) - throws SemanticException { + throws SemanticException { MetaData metaData; try { FileSystem fs = FileSystem.get(new Path(context.location).toUri(), context.hiveConf); @@ -60,20 +60,23 @@ CreateDatabaseDesc createDatabaseDesc = new CreateDatabaseDesc(destinationDBName, db.getDescription(), null, null, true, db.getParameters()); Task createDBTask = TaskFactory.get( - new DDLWork(new HashSet<>(), new HashSet<>(), createDatabaseDesc), context.hiveConf); + new DDLWork(new HashSet<>(), new HashSet<>(), createDatabaseDesc, true, + context.getDumpDirectory(), context.getMetricCollector()), context.hiveConf); if (!db.getParameters().isEmpty()) { AlterDatabaseSetPropertiesDesc alterDbDesc = new AlterDatabaseSetPropertiesDesc(destinationDBName, db.getParameters(), context.eventOnlyReplicationSpec()); - Task alterDbProperties = TaskFactory - .get(new DDLWork(new HashSet<>(), new HashSet<>(), alterDbDesc), context.hiveConf); + Task alterDbProperties = TaskFactory.get(new DDLWork(new HashSet<>(), new HashSet<>(), + alterDbDesc, true, context.getDumpDirectory(), + context.getMetricCollector()), context.hiveConf); createDBTask.addDependentTask(alterDbProperties); } if (StringUtils.isNotEmpty(db.getOwnerName())) { AlterDatabaseSetOwnerDesc alterDbOwner = new AlterDatabaseSetOwnerDesc(destinationDBName, new PrincipalDesc(db.getOwnerName(), db.getOwnerType()), context.eventOnlyReplicationSpec()); - Task alterDbTask = TaskFactory - .get(new DDLWork(new HashSet<>(), new HashSet<>(), alterDbOwner), context.hiveConf); + Task alterDbTask = TaskFactory.get(new DDLWork(new HashSet<>(), new HashSet<>(), + alterDbOwner, true, context.getDumpDirectory(), context.getMetricCollector()), + context.hiveConf); createDBTask.addDependentTask(alterDbTask); } updatedMetadata diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/CreateFunctionHandler.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/CreateFunctionHandler.java index e65769aa04..b934ca4af4 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/CreateFunctionHandler.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/CreateFunctionHandler.java @@ -62,10 +62,10 @@ public String getFunctionName() { FunctionDescBuilder builder = new FunctionDescBuilder(context); CreateFunctionDesc descToLoad = builder.build(); this.functionName = builder.metadata.function.getFunctionName(); - context.log.debug("Loading function desc : {}", descToLoad.toString()); Task createTask = TaskFactory.get( - new DDLWork(readEntitySet, writeEntitySet, descToLoad), context.hiveConf); + new DDLWork(readEntitySet, writeEntitySet, descToLoad, + true, context.getDumpDirectory(), context.getMetricCollector()), context.hiveConf); context.log.debug("Added create function task : {}:{},{}", createTask.getId(), descToLoad.getName(), descToLoad.getClassName()); // This null check is specifically done as the same class is used to handle both incremental and @@ -203,9 +203,11 @@ ResourceUri destinationResourceUri(ResourceUri resourceUri) private Task getCopyTask(String sourceUri, Path dest) { boolean copyAtLoad = context.hiveConf.getBoolVar(HiveConf.ConfVars.REPL_RUN_DATA_COPY_TASKS_ON_TARGET); if (copyAtLoad ) { - return ReplCopyTask.getLoadCopyTask(metadata.getReplicationSpec(), new Path(sourceUri), dest, context.hiveConf); + return ReplCopyTask.getLoadCopyTask(metadata.getReplicationSpec(), new Path(sourceUri), dest, context.hiveConf, + context.getDumpDirectory(), context.getMetricCollector()); } else { - return TaskFactory.get(new CopyWork(new Path(sourceUri), dest, true, false), context.hiveConf); + return TaskFactory.get(new CopyWork(new Path(sourceUri), dest, true, false, + context.getDumpDirectory(), context.getMetricCollector(), true), context.hiveConf); } } } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/DropConstraintHandler.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/DropConstraintHandler.java index 34d3b00500..70299f1b5d 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/DropConstraintHandler.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/DropConstraintHandler.java @@ -43,7 +43,8 @@ AlterTableDropConstraintDesc dropConstraintsDesc = new AlterTableDropConstraintDesc(tName, context.eventOnlyReplicationSpec(), constraintName); Task dropConstraintsTask = TaskFactory.get( - new DDLWork(readEntitySet, writeEntitySet, dropConstraintsDesc), context.hiveConf); + new DDLWork(readEntitySet, writeEntitySet, dropConstraintsDesc, true, + context.getDumpDirectory(), context.getMetricCollector()), context.hiveConf); context.log.debug("Added drop constrain task : {}:{}", dropConstraintsTask.getId(), actualTblName); updatedMetadata.set(context.dmd.getEventTo().toString(), actualDbName, actualTblName, null); return Collections.singletonList(dropConstraintsTask); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/DropDatabaseHandler.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/DropDatabaseHandler.java index c10174abd0..8ac874a58f 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/DropDatabaseHandler.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/DropDatabaseHandler.java @@ -37,8 +37,8 @@ deserializer.getDropDatabaseMessage(context.dmd.getPayload()); String actualDbName = context.isDbNameEmpty() ? msg.getDB() : context.dbName; DropDatabaseDesc desc = new DropDatabaseDesc(actualDbName, true, context.eventOnlyReplicationSpec()); - Task dropDBTask = - TaskFactory.get(new DDLWork(new HashSet<>(), new HashSet<>(), desc), context.hiveConf); + Task dropDBTask = TaskFactory.get(new DDLWork(new HashSet<>(), new HashSet<>(), desc, + true, context.getDumpDirectory(), context.getMetricCollector()), context.hiveConf); context.log.info( "Added drop database task : {}:{}", dropDBTask.getId(), desc.getDatabaseName()); updatedMetadata.set(context.dmd.getEventTo().toString(), actualDbName, null, null); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/DropFunctionHandler.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/DropFunctionHandler.java index a3d5fd0c0d..55d9232276 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/DropFunctionHandler.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/DropFunctionHandler.java @@ -47,7 +47,8 @@ DropFunctionDesc desc = new DropFunctionDesc( qualifiedFunctionName, false, context.eventOnlyReplicationSpec()); Task dropFunctionTask = - TaskFactory.get(new DDLWork(readEntitySet, writeEntitySet, desc), context.hiveConf); + TaskFactory.get(new DDLWork(readEntitySet, writeEntitySet, desc, true, + context.getDumpDirectory(), context.getMetricCollector()), context.hiveConf); context.log.debug( "Added drop function task : {}:{}", dropFunctionTask.getId(), desc.getName() ); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/DropPartitionHandler.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/DropPartitionHandler.java index 066549d9cd..448cb2f471 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/DropPartitionHandler.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/DropPartitionHandler.java @@ -47,8 +47,8 @@ AlterTableDropPartitionDesc dropPtnDesc = new AlterTableDropPartitionDesc(HiveTableName.ofNullable(actualTblName, actualDbName), partSpecs, true, context.eventOnlyReplicationSpec()); - Task dropPtnTask = TaskFactory.get( - new DDLWork(readEntitySet, writeEntitySet, dropPtnDesc), context.hiveConf + Task dropPtnTask = TaskFactory.get(new DDLWork(readEntitySet, writeEntitySet, dropPtnDesc, + true, context.getDumpDirectory(), context.getMetricCollector()), context.hiveConf ); context.log.debug("Added drop ptn task : {}:{},{}", dropPtnTask.getId(), dropPtnDesc.getTableName(), msg.getPartitions()); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/DropTableHandler.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/DropTableHandler.java index ec4cb82583..31649b3b27 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/DropTableHandler.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/DropTableHandler.java @@ -49,7 +49,8 @@ DropTableDesc dropTableDesc = new DropTableDesc(actualDbName + "." + actualTblName, true, true, context.eventOnlyReplicationSpec(), false); Task dropTableTask = TaskFactory.get( - new DDLWork(readEntitySet, writeEntitySet, dropTableDesc), context.hiveConf + new DDLWork(readEntitySet, writeEntitySet, dropTableDesc, true, + context.getDumpDirectory(), context.getMetricCollector()), context.hiveConf ); context.log.debug( "Added drop tbl task : {}:{}", dropTableTask.getId(), dropTableDesc.getTableName() diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/InsertHandler.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/InsertHandler.java index 4b8274d5e6..611c4863b8 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/InsertHandler.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/InsertHandler.java @@ -55,7 +55,8 @@ InsertMessage insertMessage = deserializer.getInsertMessage(withinContext.dmd.getPayload()); String actualDbName = withinContext.isDbNameEmpty() ? insertMessage.getDB() : withinContext.dbName; - Context currentContext = new Context(withinContext, actualDbName); + Context currentContext = new Context(withinContext, actualDbName, + withinContext.getDumpDirectory(), withinContext.getMetricCollector()); // Piggybacking in Import logic for now TableHandler tableHandler = new TableHandler(); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/MessageHandler.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/MessageHandler.java index 2851880c38..57d62b3a66 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/MessageHandler.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/MessageHandler.java @@ -27,6 +27,7 @@ import org.apache.hadoop.hive.ql.parse.ReplicationSpec; import org.apache.hadoop.hive.ql.parse.SemanticException; import org.apache.hadoop.hive.ql.parse.repl.load.UpdatedMetaDataTracker; +import org.apache.hadoop.hive.ql.parse.repl.metric.ReplicationMetricCollector; import org.slf4j.Logger; import java.io.Serializable; @@ -54,6 +55,8 @@ final Hive db; final org.apache.hadoop.hive.ql.Context nestedContext; final Logger log; + String dumpDirectory; + private transient ReplicationMetricCollector metricCollector; public Context(String dbName, String location, Task precursor, DumpMetaData dmd, HiveConf hiveConf, @@ -68,6 +71,22 @@ public Context(String dbName, String location, this.log = log; } + public Context(String dbName, String location, + Task precursor, DumpMetaData dmd, HiveConf hiveConf, + Hive db, org.apache.hadoop.hive.ql.Context nestedContext, Logger log, + String dumpDirectory, ReplicationMetricCollector metricCollector) { + this.dbName = dbName; + this.location = location; + this.precursor = precursor; + this.dmd = dmd; + this.hiveConf = hiveConf; + this.db = db; + this.nestedContext = nestedContext; + this.log = log; + this.dumpDirectory = dumpDirectory; + this.metricCollector = metricCollector; + } + public Context(Context other, String dbName) { this.dbName = dbName; this.location = other.location; @@ -79,6 +98,19 @@ public Context(Context other, String dbName) { this.log = other.log; } + public Context(Context other, String dbName, String dumpDirectory, ReplicationMetricCollector metricCollector) { + this.dbName = dbName; + this.location = other.location; + this.precursor = other.precursor; + this.dmd = other.dmd; + this.hiveConf = other.hiveConf; + this.db = other.db; + this.nestedContext = other.nestedContext; + this.log = other.log; + this.dumpDirectory = dumpDirectory; + this.metricCollector = metricCollector; + } + public boolean isDbNameEmpty() { return StringUtils.isEmpty(dbName); } @@ -96,6 +128,14 @@ ReplicationSpec eventOnlyReplicationSpec() throws SemanticException { return nestedContext; } + public String getDumpDirectory() { + return dumpDirectory; + } + + public ReplicationMetricCollector getMetricCollector() { + return metricCollector; + } + public HiveTxnManager getTxnMgr() { return nestedContext.getHiveTxnManager(); } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/OpenTxnHandler.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/OpenTxnHandler.java index cd7274de60..dc61814b5b 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/OpenTxnHandler.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/OpenTxnHandler.java @@ -44,7 +44,8 @@ Task openTxnTask = TaskFactory.get( new ReplTxnWork(HiveUtils.getReplPolicy(context.dbName), context.dbName, null, - msg.getTxnIds(), ReplTxnWork.OperationType.REPL_OPEN_TXN, context.eventOnlyReplicationSpec()), + msg.getTxnIds(), ReplTxnWork.OperationType.REPL_OPEN_TXN, context.eventOnlyReplicationSpec(), + context.getDumpDirectory(), context.getMetricCollector()), context.hiveConf ); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/RenamePartitionHandler.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/RenamePartitionHandler.java index ed7aa8d604..1627337880 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/RenamePartitionHandler.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/RenamePartitionHandler.java @@ -61,7 +61,8 @@ tableName, oldPartSpec, newPartSpec, replicationSpec, null); renamePtnDesc.setWriteId(msg.getWriteId()); Task renamePtnTask = TaskFactory.get( - new DDLWork(readEntitySet, writeEntitySet, renamePtnDesc), context.hiveConf); + new DDLWork(readEntitySet, writeEntitySet, renamePtnDesc, true, + context.getDumpDirectory(), context.getMetricCollector()), context.hiveConf); context.log.debug("Added rename ptn task : {}:{}->{}", renamePtnTask.getId(), oldPartSpec, newPartSpec); updatedMetadata.set(context.dmd.getEventTo().toString(), actualDbName, actualTblName, newPartSpec); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/RenameTableHandler.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/RenameTableHandler.java index 05e094bbb1..2e673b3f94 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/RenameTableHandler.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/RenameTableHandler.java @@ -61,8 +61,9 @@ AlterTableRenameDesc renameTableDesc = new AlterTableRenameDesc(oldName, replicationSpec, false, newName.getNotEmptyDbTable()); renameTableDesc.setWriteId(msg.getWriteId()); - Task renameTableTask = TaskFactory.get( - new DDLWork(readEntitySet, writeEntitySet, renameTableDesc), context.hiveConf); + Task renameTableTask = TaskFactory.get(new DDLWork(readEntitySet, writeEntitySet, + renameTableDesc, true, context.getDumpDirectory(), + context.getMetricCollector()), context.hiveConf); context.log.debug("Added rename table task : {}:{}->{}", renameTableTask.getId(), oldName.getNotEmptyDbTable(), newName.getNotEmptyDbTable()); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/TableHandler.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/TableHandler.java index fda6c6c7fe..a8d2bffa83 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/TableHandler.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/TableHandler.java @@ -67,11 +67,13 @@ // REPL LOAD is not partition level. It is always DB or table level. So, passing null for partition specs. if (TableType.VIRTUAL_VIEW.name().equals(rv.getTable().getTableType())) { - importTasks.add(ReplLoadTask.createViewTask(rv, context.dbName, context.hiveConf)); + importTasks.add(ReplLoadTask.createViewTask(rv, context.dbName, context.hiveConf, + context.getDumpDirectory(), context.getMetricCollector())); } else { ImportSemanticAnalyzer.prepareImport(false, isLocationSet, isExternal, false, (context.precursor != null), parsedLocation, null, context.dbName, - null, context.location, x, updatedMetadata, context.getTxnMgr(), tuple.writeId, rv); + null, context.location, x, updatedMetadata, context.getTxnMgr(), tuple.writeId, rv, + context.getDumpDirectory(), context.getMetricCollector()); } Task openTxnTask = x.getOpenTxnTask(); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/TruncatePartitionHandler.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/TruncatePartitionHandler.java index 85e9f9245e..2af9f1354a 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/TruncatePartitionHandler.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/TruncatePartitionHandler.java @@ -60,7 +60,8 @@ context.eventOnlyReplicationSpec()); truncateTableDesc.setWriteId(msg.getWriteId()); Task truncatePtnTask = TaskFactory.get( - new DDLWork(readEntitySet, writeEntitySet, truncateTableDesc), context.hiveConf); + new DDLWork(readEntitySet, writeEntitySet, truncateTableDesc, true, + context.getDumpDirectory(), context.getMetricCollector()), context.hiveConf); context.log.debug("Added truncate ptn task : {}:{}:{}", truncatePtnTask.getId(), truncateTableDesc.getTableName(), truncateTableDesc.getWriteId()); updatedMetadata.set(context.dmd.getEventTo().toString(), tName.getDb(), tName.getTable(), partSpec); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/TruncateTableHandler.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/TruncateTableHandler.java index 6a50c8a054..552cbed788 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/TruncateTableHandler.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/TruncateTableHandler.java @@ -37,8 +37,9 @@ TruncateTableDesc truncateTableDesc = new TruncateTableDesc(tName, null, context.eventOnlyReplicationSpec()); truncateTableDesc.setWriteId(msg.getWriteId()); - Task truncateTableTask = TaskFactory.get( - new DDLWork(readEntitySet, writeEntitySet, truncateTableDesc), context.hiveConf); + Task truncateTableTask = TaskFactory.get(new DDLWork(readEntitySet, writeEntitySet, + truncateTableDesc, true, context.getDumpDirectory(), + context.getMetricCollector()), context.hiveConf); context.log.debug("Added truncate tbl task : {}:{}:{}", truncateTableTask.getId(), truncateTableDesc.getTableName(), truncateTableDesc.getWriteId()); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/UpdatePartColStatHandler.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/UpdatePartColStatHandler.java index 8b1741e2ae..9438c99073 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/UpdatePartColStatHandler.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/UpdatePartColStatHandler.java @@ -53,7 +53,9 @@ try { return ReplUtils.addTasksForLoadingColStats(colStats, context.hiveConf, updatedMetadata, - upcsm.getTableObject(), upcsm.getWriteId()); + upcsm.getTableObject(), upcsm.getWriteId(), + context.getDumpDirectory(), + context.getMetricCollector()); } catch(Exception e) { throw new SemanticException(e); } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/UpdateTableColStatHandler.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/UpdateTableColStatHandler.java index 6c54f9753a..a7f189e9e1 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/UpdateTableColStatHandler.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/UpdateTableColStatHandler.java @@ -53,7 +53,8 @@ try { return ReplUtils.addTasksForLoadingColStats(colStats, context.hiveConf, updatedMetadata, - utcsm.getTableObject(), utcsm.getWriteId()); + utcsm.getTableObject(), utcsm.getWriteId(), context.getDumpDirectory(), + context.getMetricCollector()); } catch(Exception e) { throw new SemanticException(e); } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/metric/ReplicationMetricCollector.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/metric/ReplicationMetricCollector.java index 59bc626084..0b4b3cf544 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/metric/ReplicationMetricCollector.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/metric/ReplicationMetricCollector.java @@ -75,8 +75,12 @@ public void reportStageEnd(String stageName, Status status, long lastReplId) thr LOG.debug("Stage ended {}, {}, {}", stageName, status, lastReplId ); Progress progress = replicationMetric.getProgress(); Stage stage = progress.getStageByName(stageName); + if(stage == null){ + stage = new Stage(stageName, status, -1L); + } stage.setStatus(status); stage.setEndTime(System.currentTimeMillis()); + progress.addStage(stage); replicationMetric.setProgress(progress); Metadata metadata = replicationMetric.getMetadata(); metadata.setLastReplId(lastReplId); @@ -93,9 +97,13 @@ public void reportStageEnd(String stageName, Status status, String errorLogPath) LOG.debug("Stage Ended {}, {}", stageName, status ); Progress progress = replicationMetric.getProgress(); Stage stage = progress.getStageByName(stageName); + if(stage == null){ + stage = new Stage(stageName, status, -1L); + } stage.setStatus(status); stage.setEndTime(System.currentTimeMillis()); stage.setErrorLogPath(errorLogPath); + progress.addStage(stage); replicationMetric.setProgress(progress); metricCollector.addMetric(replicationMetric); if (Status.FAILED == status || Status.FAILED_ADMIN == status) { @@ -109,8 +117,12 @@ public void reportStageEnd(String stageName, Status status) throws SemanticExcep LOG.debug("Stage Ended {}, {}", stageName, status ); Progress progress = replicationMetric.getProgress(); Stage stage = progress.getStageByName(stageName); + if(stage == null){ + stage = new Stage(stageName, status, -1L); + } stage.setStatus(status); stage.setEndTime(System.currentTimeMillis()); + progress.addStage(stage); replicationMetric.setProgress(progress); metricCollector.addMetric(replicationMetric); if (Status.FAILED == status || Status.FAILED_ADMIN == status) { diff --git a/ql/src/java/org/apache/hadoop/hive/ql/plan/ColumnStatsUpdateWork.java b/ql/src/java/org/apache/hadoop/hive/ql/plan/ColumnStatsUpdateWork.java index 14900255fd..ac27eb9c85 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/plan/ColumnStatsUpdateWork.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/plan/ColumnStatsUpdateWork.java @@ -23,6 +23,7 @@ import org.apache.hadoop.hive.metastore.api.ColumnStatistics; import org.apache.hadoop.hive.ql.ddl.DDLDesc.DDLDescWithWriteId; +import org.apache.hadoop.hive.ql.parse.repl.metric.ReplicationMetricCollector; import org.apache.hadoop.hive.ql.plan.Explain.Level; @@ -45,6 +46,9 @@ private final String colType; private final ColumnStatistics colStats; private long writeId; + private boolean isReplication; + private String dumpDirectory; + private transient ReplicationMetricCollector metricCollector; public ColumnStatsUpdateWork(String partName, Map mapProp, @@ -71,11 +75,33 @@ public ColumnStatsUpdateWork(ColumnStatistics colStats) { this.colType = null; } + public ColumnStatsUpdateWork(ColumnStatistics colStats, String dumpRoot, ReplicationMetricCollector metricCollector, + boolean isReplication) { + this.colStats = colStats; + this.partName = null; + this.mapProp = null; + this.dbName = null; + this.tableName = null; + this.colName = null; + this.colType = null; + this.dumpDirectory = dumpRoot; + this.metricCollector = metricCollector; + this.isReplication = true; + } + @Override public String toString() { return null; } + public String getDumpDirectory() { + return dumpDirectory; + } + + public boolean isReplication() { + return isReplication; + } + public String getPartName() { return partName; } @@ -102,6 +128,11 @@ public String getColType() { public ColumnStatistics getColStats() { return colStats; } + public ReplicationMetricCollector getMetricCollector() { + return metricCollector; + } + + @Override public void setWriteId(long writeId) { this.writeId = writeId; diff --git a/ql/src/java/org/apache/hadoop/hive/ql/plan/CopyWork.java b/ql/src/java/org/apache/hadoop/hive/ql/plan/CopyWork.java index f69776ad7b..2439cde446 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/plan/CopyWork.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/plan/CopyWork.java @@ -21,6 +21,7 @@ import java.io.Serializable; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hive.ql.parse.repl.metric.ReplicationMetricCollector; import org.apache.hadoop.hive.ql.plan.Explain.Level; /** @@ -34,6 +35,9 @@ private Path[] toPath; private boolean errorOnSrcEmpty; private boolean overwrite = true; + private boolean isReplication; + private String dumpDirectory; + private transient ReplicationMetricCollector metricCollector; public CopyWork() { } @@ -43,12 +47,32 @@ public CopyWork(final Path fromPath, final Path toPath, boolean errorOnSrcEmpty) this.setErrorOnSrcEmpty(errorOnSrcEmpty); } + public CopyWork(final Path fromPath, final Path toPath, boolean errorOnSrcEmpty, + String dumpDirectory, ReplicationMetricCollector metricCollector, + boolean isReplication) { + this(new Path[] { fromPath }, new Path[] { toPath }); + this.dumpDirectory = dumpDirectory; + this.metricCollector = metricCollector; + this.setErrorOnSrcEmpty(errorOnSrcEmpty); + this.isReplication = isReplication; + } + public CopyWork(final Path fromPath, final Path toPath, boolean errorOnSrcEmpty, boolean overwrite) { this(new Path[] { fromPath }, new Path[] { toPath }); this.setErrorOnSrcEmpty(errorOnSrcEmpty); this.setOverwrite(overwrite); } + public CopyWork(final Path fromPath, final Path toPath, boolean errorOnSrcEmpty, boolean overwrite, + String dumpDirectory, ReplicationMetricCollector metricCollector, boolean isReplication) { + this(new Path[] { fromPath }, new Path[] { toPath }); + this.setErrorOnSrcEmpty(errorOnSrcEmpty); + this.setOverwrite(overwrite); + this.dumpDirectory = dumpDirectory; + this.metricCollector = metricCollector; + this.isReplication = isReplication; + } + public CopyWork(final Path[] fromPath, final Path[] toPath) { if (fromPath.length != toPath.length) { throw new RuntimeException( @@ -87,6 +111,16 @@ public Path getToPathExplain() { return toPath; } + public ReplicationMetricCollector getMetricCollector() { + return metricCollector; + } + + public String getDumpDirectory() { + return dumpDirectory; + } + + public boolean isReplication() { return isReplication; } + public void setErrorOnSrcEmpty(boolean errorOnSrcEmpty) { this.errorOnSrcEmpty = errorOnSrcEmpty; } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/plan/ImportTableDesc.java b/ql/src/java/org/apache/hadoop/hive/ql/plan/ImportTableDesc.java index 41fbe2eabf..5dd7fc5f11 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/plan/ImportTableDesc.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/plan/ImportTableDesc.java @@ -37,6 +37,7 @@ import org.apache.hadoop.hive.ql.parse.HiveTableName; import org.apache.hadoop.hive.ql.parse.ReplicationSpec; import org.apache.hadoop.hive.ql.parse.SemanticException; +import org.apache.hadoop.hive.ql.parse.repl.metric.ReplicationMetricCollector; /** * ImportTableDesc. @@ -163,6 +164,13 @@ public String getDatabaseName() { return TaskFactory.get(new DDLWork(inputs, outputs, createTblDesc), conf); } + public Task getCreateTableTask(Set inputs, Set outputs, HiveConf conf, + boolean isReplication, + String dumpRoot, ReplicationMetricCollector metricCollector) { + return TaskFactory.get(new DDLWork(inputs, outputs, createTblDesc, isReplication, + dumpRoot, metricCollector), conf); + } + public TableType tableType() { return TableType.MANAGED_TABLE; } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/plan/MoveWork.java b/ql/src/java/org/apache/hadoop/hive/ql/plan/MoveWork.java index d7253a4955..4fd3768882 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/plan/MoveWork.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/plan/MoveWork.java @@ -26,6 +26,7 @@ import org.apache.hadoop.hive.ql.exec.Utilities; import org.apache.hadoop.hive.ql.hooks.ReadEntity; import org.apache.hadoop.hive.ql.hooks.WriteEntity; +import org.apache.hadoop.hive.ql.parse.repl.metric.ReplicationMetricCollector; import org.apache.hadoop.hive.ql.plan.Explain.Level; /** @@ -41,6 +42,9 @@ private boolean checkFileFormat; private boolean srcLocal; private boolean needCleanTarget; + private boolean isReplication; + private String dumpDirectory; + private transient ReplicationMetricCollector metricCollector; /** * ReadEntitites that are passed to the hooks. @@ -87,6 +91,16 @@ public MoveWork(Set inputs, Set outputs, this(inputs, outputs, loadTableWork, loadFileWork, checkFileFormat, false); } + public MoveWork(Set inputs, Set outputs, + final LoadTableDesc loadTableWork, final LoadFileDesc loadFileWork, + boolean checkFileFormat, String dumpRoot, ReplicationMetricCollector metricCollector, + boolean isReplication) { + this(inputs, outputs, loadTableWork, loadFileWork, checkFileFormat, false); + this.dumpDirectory = dumpRoot; + this.metricCollector = metricCollector; + this.isReplication = isReplication; + } + public MoveWork(final MoveWork o) { loadTableWork = o.getLoadTableWork(); loadFileWork = o.getLoadFileWork(); @@ -169,6 +183,18 @@ public void setIsInReplicationScope(boolean isInReplicationScope) { this.isInReplicationScope = isInReplicationScope; } + public ReplicationMetricCollector getMetricCollector() { + return metricCollector; + } + + public String getDumpDirectory() { + return dumpDirectory; + } + + public boolean isReplication() { + return isReplication; + } + public boolean getIsInReplicationScope() { return this.isInReplicationScope; } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/plan/ReplCopyWork.java b/ql/src/java/org/apache/hadoop/hive/ql/plan/ReplCopyWork.java index 21da20fc06..3faed74bc6 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/plan/ReplCopyWork.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/plan/ReplCopyWork.java @@ -19,6 +19,7 @@ package org.apache.hadoop.hive.ql.plan; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hive.ql.parse.repl.metric.ReplicationMetricCollector; import org.apache.hadoop.hive.ql.plan.Explain.Level; /** @@ -61,6 +62,10 @@ private boolean overWrite = false; + private String dumpDirectory; + + private transient ReplicationMetricCollector metricCollector; + public ReplCopyWork(final Path srcPath, final Path destPath, boolean errorOnSrcEmpty) { super(srcPath, destPath, errorOnSrcEmpty); } @@ -70,6 +75,13 @@ public ReplCopyWork(final Path srcPath, final Path destPath, boolean errorOnSrcE this.overWrite = overWrite; } + public ReplCopyWork(final Path srcPath, final Path destPath, boolean errorOnSrcEmpty, boolean overWrite, + String dumpDirectory, ReplicationMetricCollector metricCollector) { + this(srcPath, destPath, errorOnSrcEmpty); + this.overWrite = overWrite; + this.dumpDirectory = dumpDirectory; + this.metricCollector = metricCollector; + } public void setReadSrcAsFilesList(boolean readSrcAsFilesList) { this.readSrcAsFilesList = readSrcAsFilesList; } @@ -118,6 +130,10 @@ public void setCheckDuplicateCopy(boolean flag) { checkDuplicateCopy = flag; } + public ReplicationMetricCollector getMetricCollector() { return metricCollector; } + + public String getDumpDirectory() { return dumpDirectory; } + public boolean isOverWrite() { return overWrite; } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/plan/ReplTxnWork.java b/ql/src/java/org/apache/hadoop/hive/ql/plan/ReplTxnWork.java index 7e16a7c49e..2d2dc3f7cf 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/plan/ReplTxnWork.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/plan/ReplTxnWork.java @@ -18,10 +18,13 @@ package org.apache.hadoop.hive.ql.plan; import java.io.Serializable; + +import com.google.inject.internal.cglib.core.$MethodInfoTransformer; import org.apache.hadoop.hive.metastore.api.ReplLastIdInfo; import org.apache.hadoop.hive.metastore.api.TxnToWriteId; import org.apache.hadoop.hive.metastore.api.WriteEventInfo; import org.apache.hadoop.hive.ql.parse.ReplicationSpec; +import org.apache.hadoop.hive.ql.parse.repl.metric.ReplicationMetricCollector; import org.apache.hadoop.hive.ql.plan.Explain.Level; import java.util.ArrayList; import java.util.Collections; @@ -44,6 +47,9 @@ private ReplicationSpec replicationSpec; private List writeEventInfos; private ReplLastIdInfo replLastIdInfo; + private String dumpDirectory; + private transient ReplicationMetricCollector metricCollector; + /** * OperationType. @@ -73,16 +79,38 @@ public ReplTxnWork(String replPolicy, String dbName, String tableName, List txnIds, OperationType type, + ReplicationSpec replicationSpec, String dumpDirectory, ReplicationMetricCollector metricCollector) { + this(replPolicy, dbName, tableName, txnIds, type, null, replicationSpec); + this.dumpDirectory = dumpDirectory; + this.metricCollector = metricCollector; + } + public ReplTxnWork(String replPolicy, String dbName, String tableName, Long txnId, OperationType type, ReplicationSpec replicationSpec) { this(replPolicy, dbName, tableName, Collections.singletonList(txnId), type, null, replicationSpec); } + public ReplTxnWork(String replPolicy, String dbName, String tableName, Long txnId, + OperationType type, ReplicationSpec replicationSpec, + String dumpDirectory, ReplicationMetricCollector metricCollector) { + this(replPolicy, dbName, tableName, Collections.singletonList(txnId), type, null, replicationSpec); + this.dumpDirectory = dumpDirectory; + this.metricCollector = metricCollector; + } + public ReplTxnWork(String replPolicy, String dbName, String tableName, OperationType type, List txnToWriteIdList, ReplicationSpec replicationSpec) { this(replPolicy, dbName, tableName, null, type, txnToWriteIdList, replicationSpec); } + public ReplTxnWork(String replPolicy, String dbName, String tableName, OperationType type, + List txnToWriteIdList, ReplicationSpec replicationSpec, + String dumpDirectory, ReplicationMetricCollector metricCollector) { + this(replPolicy, dbName, tableName, null, type, txnToWriteIdList, replicationSpec); + this.dumpDirectory = dumpDirectory; + this.metricCollector = metricCollector; + } public ReplTxnWork(String dbName, String tableName, List partNames, String validWriteIdList, OperationType type) { this.dbName = dbName; @@ -92,6 +120,18 @@ public ReplTxnWork(String dbName, String tableName, List partNames, this.operation = type; } + public ReplTxnWork(String dbName, String tableName, List partNames, + String validWriteIdList, OperationType type, String dumpDirectory, + ReplicationMetricCollector metricCollector) { + this.dbName = dbName; + this.tableName = tableName; + this.partNames = partNames; + this.validWriteIdList = validWriteIdList; + this.operation = type; + this.dumpDirectory = dumpDirectory; + this.metricCollector = metricCollector; + } + public void addWriteEventInfo(WriteEventInfo writeEventInfo) { if (this.writeEventInfos == null) { this.writeEventInfos = new ArrayList<>(); @@ -142,4 +182,12 @@ public ReplicationSpec getReplicationSpec() { public ReplLastIdInfo getReplLastIdInfo() { return replLastIdInfo; } + + public ReplicationMetricCollector getMetricCollector() { + return metricCollector; + } + + public String getDumpDirectory() { + return dumpDirectory; + } } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/processors/CommandProcessorException.java b/ql/src/java/org/apache/hadoop/hive/ql/processors/CommandProcessorException.java index 37fbc2042b..7c4d2c1ed6 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/processors/CommandProcessorException.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/processors/CommandProcessorException.java @@ -55,6 +55,10 @@ public CommandProcessorException(int responseCode, int hiveErrorCode, String err public int getResponseCode() { return responseCode; } + + public String getCauseMessage() { + return getCause() == null ? "" : getCause().getMessage(); + } public int getErrorCode() { return hiveErrorCode; diff --git a/ql/src/test/org/apache/hadoop/hive/ql/exec/repl/TestRangerDumpTask.java b/ql/src/test/org/apache/hadoop/hive/ql/exec/repl/TestRangerDumpTask.java index d2222e7cae..0af6b00ce5 100644 --- a/ql/src/test/org/apache/hadoop/hive/ql/exec/repl/TestRangerDumpTask.java +++ b/ql/src/test/org/apache/hadoop/hive/ql/exec/repl/TestRangerDumpTask.java @@ -21,6 +21,7 @@ import com.google.gson.Gson; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hive.conf.HiveConf; +import org.apache.hadoop.hive.ql.ErrorMsg; import org.apache.hadoop.hive.ql.exec.repl.ranger.RangerExportPolicyList; import org.apache.hadoop.hive.ql.exec.repl.ranger.RangerRestClientImpl; import org.apache.hadoop.hive.ql.exec.repl.ranger.RangerPolicy; @@ -78,14 +79,19 @@ public void setup() throws Exception { @Test public void testFailureInvalidAuthProviderEndpoint() throws Exception { Mockito.when(conf.get(RANGER_REST_URL)).thenReturn(null); + Mockito.when(work.getDbName()).thenReturn("testdb"); + Mockito.when(work.getCurrentDumpPath()).thenReturn(new Path("/tmp")); + Mockito.when(work.getRangerConfigResource()).thenReturn(new URL("file://ranger.xml")); int status = task.execute(); - Assert.assertEquals(40000, status); + Assert.assertEquals(ErrorMsg.REPL_INVALID_CONFIG_FOR_SERVICE.getErrorCode(), status); } @Test public void testFailureInvalidRangerConfig() throws Exception { + Mockito.when(work.getDbName()).thenReturn("testdb"); + Mockito.when(work.getCurrentDumpPath()).thenReturn(new Path("/tmp")); int status = task.execute(); - Assert.assertEquals(40000, status); + Assert.assertEquals(ErrorMsg.REPL_INVALID_CONFIG_FOR_SERVICE.getErrorCode(), status); } @Test diff --git a/ql/src/test/org/apache/hadoop/hive/ql/parse/repl/metric/TestReplicationMetricUpdateOnFailure.java b/ql/src/test/org/apache/hadoop/hive/ql/parse/repl/metric/TestReplicationMetricUpdateOnFailure.java new file mode 100644 index 0000000000..db951fe5e9 --- /dev/null +++ b/ql/src/test/org/apache/hadoop/hive/ql/parse/repl/metric/TestReplicationMetricUpdateOnFailure.java @@ -0,0 +1,329 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.parse.repl.metric; + +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hive.conf.Constants; +import org.apache.hadoop.hive.conf.HiveConf; +import org.apache.hadoop.hive.ql.ErrorMsg; +import org.apache.hadoop.hive.ql.ddl.DDLWork; +import org.apache.hadoop.hive.ql.exec.Task; +import org.apache.hadoop.hive.ql.Context; +import org.apache.hadoop.hive.ql.exec.TaskFactory; +import org.apache.hadoop.hive.ql.exec.repl.ReplAck; +import org.apache.hadoop.hive.ql.exec.repl.ReplDumpWork; +import org.apache.hadoop.hive.ql.exec.repl.ReplLoadWork; +import org.apache.hadoop.hive.ql.parse.ExplainConfiguration; +import org.apache.hadoop.hive.ql.parse.repl.dump.metric.BootstrapDumpMetricCollector; +import org.apache.hadoop.hive.ql.parse.repl.dump.metric.IncrementalDumpMetricCollector; +import org.apache.hadoop.hive.ql.parse.repl.load.metric.BootstrapLoadMetricCollector; +import org.apache.hadoop.hive.ql.parse.repl.load.metric.IncrementalLoadMetricCollector; +import org.apache.hadoop.hive.ql.parse.repl.metric.event.Progress; +import org.apache.hadoop.hive.ql.parse.repl.metric.event.ReplicationMetric; +import org.apache.hadoop.hive.ql.parse.repl.metric.event.Status; +import org.junit.Assert; +import org.junit.Before; +import org.junit.Rule; +import org.junit.Test; +import org.junit.rules.TestName; +import org.junit.runner.RunWith; +import org.mockito.Mockito; +import org.mockito.junit.MockitoJUnitRunner; + + +import java.io.IOException; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +@RunWith(MockitoJUnitRunner.class) +public class TestReplicationMetricUpdateOnFailure { + + FileSystem fs; + HiveConf conf; + String TEST_PATH; + + @Rule + public final TestName testName = new TestName(); + + RuntimeException recoverableException = new RuntimeException(); + RuntimeException nonRecoverableException = new RuntimeException(ErrorMsg.REPL_FAILED_WITH_NON_RECOVERABLE_ERROR.getMsg()); + + @Before + public void setup() throws Exception { + + conf = new HiveConf(); + conf.set(HiveConf.ConfVars.HIVE_IN_TEST_REPL.varname, "true"); + conf.set(Constants.SCHEDULED_QUERY_SCHEDULENAME, "repl"); + conf.set(Constants.SCHEDULED_QUERY_EXECUTIONID, "1"); + + final String tid = + TestReplicationMetricUpdateOnFailure.class.getCanonicalName().toLowerCase().replace('.','_') + + "_" + System.currentTimeMillis(); + TEST_PATH = System.getProperty("test.warehouse.dir", "/tmp") + Path.SEPARATOR + tid; + Path testPath = new Path(TEST_PATH); + fs = FileSystem.get(testPath.toUri(), conf); + fs.mkdirs(testPath); + } + + @Test + public void testReplDumpFailure() throws Exception { + String dumpDir = TEST_PATH + Path.SEPARATOR + testName.getMethodName(); + IncrementalDumpMetricCollector metricCollector = + new IncrementalDumpMetricCollector(null, TEST_PATH, conf); + ReplDumpWork replDumpWork = Mockito.mock(ReplDumpWork.class); + Mockito.when(replDumpWork.getCurrentDumpPath()).thenReturn(new Path(dumpDir)); + Mockito.when(replDumpWork.getMetricCollector()).thenReturn(metricCollector); + Mockito.when(replDumpWork.dataCopyIteratorsInitialized()).thenThrow(recoverableException, nonRecoverableException); + Task replDumpTask = TaskFactory.get(replDumpWork, conf); + + String stageName = "REPL_DUMP"; + metricCollector.reportStageStart(stageName, new HashMap<>()); + Assert.assertThrows(RuntimeException.class, () -> replDumpTask.execute()); + performRecoverableChecks(stageName); + + metricCollector.reportStageStart(stageName, new HashMap<>()); + Assert.assertThrows(RuntimeException.class, () -> replDumpTask.execute()); + performNonRecoverableChecks(dumpDir, stageName); + } + + @Test + public void testReplDumpRecoverableMissingStage() throws Exception { + String dumpDir = TEST_PATH + Path.SEPARATOR + testName.getMethodName(); + MetricCollector.getInstance().deinit(); + BootstrapDumpMetricCollector metricCollector = + new BootstrapDumpMetricCollector(null, TEST_PATH, conf); + ReplDumpWork replDumpWork = Mockito.mock(ReplDumpWork.class); + Mockito.when(replDumpWork.getMetricCollector()).thenReturn(metricCollector); + Mockito.when(replDumpWork.getCurrentDumpPath()).thenReturn(new Path(dumpDir)); + Mockito.when(replDumpWork.dataCopyIteratorsInitialized()).thenThrow(recoverableException); + Task replDumpTask = TaskFactory.get(replDumpWork, conf); + + //ensure stages are missing initially and execute without reporting start metrics + Assert.assertEquals(0, MetricCollector.getInstance().getMetrics().size()); + + Assert.assertThrows(RuntimeException.class, () -> replDumpTask.execute()); + performRecoverableChecks("REPL_DUMP"); + } + + @Test + public void testReplDumpNonRecoverableMissingStage() throws Exception { + String dumpDir = TEST_PATH + Path.SEPARATOR + testName.getMethodName(); + MetricCollector.getInstance().deinit(); + IncrementalDumpMetricCollector metricCollector = + new IncrementalDumpMetricCollector(null, TEST_PATH, conf); + ReplDumpWork replDumpWork = Mockito.mock(ReplDumpWork.class); + Mockito.when(replDumpWork.getCurrentDumpPath()).thenReturn(new Path(dumpDir)); + Mockito.when(replDumpWork.getMetricCollector()).thenReturn(metricCollector); + Mockito.when(replDumpWork.dataCopyIteratorsInitialized()).thenThrow(nonRecoverableException); + Task replDumpTask = TaskFactory.get(replDumpWork, conf); + + //ensure stages are missing initially and execute without reporting start metrics + Assert.assertEquals(0, MetricCollector.getInstance().getMetrics().size()); + + Assert.assertThrows(RuntimeException.class, () -> replDumpTask.execute()); + performNonRecoverableChecks(dumpDir, "REPL_DUMP"); + } + + @Test + public void testReplLoadFailure() throws Exception { + String dumpDir = TEST_PATH + Path.SEPARATOR + testName.getMethodName(); + MetricCollector.getInstance().deinit(); + IncrementalLoadMetricCollector metricCollector = + new IncrementalLoadMetricCollector(null, TEST_PATH, 0, conf); + ReplLoadWork replLoadWork = Mockito.mock(ReplLoadWork.class); + Mockito.when(replLoadWork.getDumpDirectory()).thenReturn( + new Path(dumpDir + Path.SEPARATOR + "test").toString()); + Mockito.when(replLoadWork.getMetricCollector()).thenReturn(metricCollector); + Mockito.when(replLoadWork.getRootTask()).thenThrow(recoverableException, nonRecoverableException); + Task replLoadTask = TaskFactory.get(replLoadWork, conf); + + String stageName = "REPL_LOAD"; + metricCollector.reportStageStart(stageName, new HashMap<>()); + Assert.assertThrows(RuntimeException.class, () -> replLoadTask.execute()); + performRecoverableChecks(stageName); + + metricCollector.reportStageStart(stageName, new HashMap<>()); + Assert.assertThrows(RuntimeException.class, () -> replLoadTask.execute()); + performNonRecoverableChecks(dumpDir, stageName); + } + + @Test + public void testReplLoadRecoverableMissingStage() throws Exception { + String dumpDir = TEST_PATH + Path.SEPARATOR + testName.getMethodName(); + MetricCollector.getInstance().deinit(); + BootstrapLoadMetricCollector metricCollector = + new BootstrapLoadMetricCollector(null, TEST_PATH, 0, conf); + ReplLoadWork replLoadWork = Mockito.mock(ReplLoadWork.class); + Mockito.when(replLoadWork.getDumpDirectory()).thenReturn( + new Path(dumpDir + Path.SEPARATOR + "test").toString()); + Mockito.when(replLoadWork.getMetricCollector()).thenReturn(metricCollector); + Mockito.when(replLoadWork.getRootTask()).thenThrow(recoverableException); + Task replLoadTask = TaskFactory.get(replLoadWork, conf); + + //ensure stages are missing initially and execute without reporting start metrics + Assert.assertEquals(0, MetricCollector.getInstance().getMetrics().size()); + + Assert.assertThrows(RuntimeException.class, () -> replLoadTask.execute()); + performRecoverableChecks("REPL_LOAD"); + } + + @Test + public void testReplLoadNonRecoverableMissingStage() throws Exception { + String dumpDir = TEST_PATH + Path.SEPARATOR + testName.getMethodName(); + MetricCollector.getInstance().deinit(); + IncrementalLoadMetricCollector metricCollector = + new IncrementalLoadMetricCollector(null, TEST_PATH, 0, conf); + ReplLoadWork replLoadWork = Mockito.mock(ReplLoadWork.class); + Mockito.when(replLoadWork.getDumpDirectory()).thenReturn( + new Path(dumpDir + Path.SEPARATOR + "test").toString()); + Mockito.when(replLoadWork.getMetricCollector()).thenReturn(metricCollector); + Mockito.when(replLoadWork.getRootTask()).thenThrow(nonRecoverableException); + Task replLoadTask = TaskFactory.get(replLoadWork, conf); + + //ensure stages are missing initially and execute without reporting start metrics + Assert.assertEquals(0, MetricCollector.getInstance().getMetrics().size()); + + Assert.assertThrows(RuntimeException.class, () -> replLoadTask.execute()); + performNonRecoverableChecks(dumpDir, "REPL_LOAD"); + } + + /* + * Check update on metrics upon intermediate task failures(not repl-dump / repl-load). + * Here, DDLTask is used as the intermediate task, other task failures should behave in similar fashion. + */ + @Test + public void testDDLTaskFailure() throws Exception { + + //task-setup for DDL-Task + DDLWork ddlWork = Mockito.mock(DDLWork.class); + Context context = Mockito.mock(Context.class); + Mockito.when(context.getExplainAnalyze()).thenReturn(ExplainConfiguration.AnalyzeState.ANALYZING); + Mockito.when(ddlWork.isReplication()).thenReturn(true); + String dumpDir = TEST_PATH + Path.SEPARATOR + testName.getMethodName(); + Mockito.when(ddlWork.getDumpDirectory()).thenReturn(dumpDir); + Task ddlTask = TaskFactory.get(ddlWork, conf); + ddlTask.initialize(null, null, null, context); + + + IncrementalLoadMetricCollector metricCollector = new + IncrementalLoadMetricCollector(null, TEST_PATH, 1, conf); + Mockito.when(ddlWork.getMetricCollector()).thenReturn(metricCollector); + + //setup for 2 runs - first recoverable and second non-recoverable + Mockito.when(ddlWork.getDDLDesc()).thenThrow(recoverableException, nonRecoverableException); + + String stageName = "REPL_LOAD"; + + //test recoverable error during DDL-Task + metricCollector.reportStageStart(stageName, new HashMap<>()); + ddlTask.execute(); + performRecoverableChecks(stageName); + + //test non-recoverable error during DDL-Task + metricCollector.reportStageStart(stageName, new HashMap<>()); + ddlTask.execute(); + performNonRecoverableChecks(dumpDir, stageName); + } + + @Test + public void testRecoverableDDLFailureWithStageMissing() throws Exception { + + //task-setup for DDL-Task + DDLWork ddlWork = Mockito.mock(DDLWork.class); + Context context = Mockito.mock(Context.class); + Mockito.when(context.getExplainAnalyze()).thenReturn(ExplainConfiguration.AnalyzeState.ANALYZING); + Mockito.when(ddlWork.isReplication()).thenReturn(true); + String dumpDir = TEST_PATH + Path.SEPARATOR + testName.getMethodName(); + Mockito.when(ddlWork.getDumpDirectory()).thenReturn(dumpDir); + Task ddlTask = TaskFactory.get(ddlWork, conf); + ddlTask.initialize(null, null, null, context); + + MetricCollector.getInstance().deinit(); + IncrementalLoadMetricCollector metricCollector = new + IncrementalLoadMetricCollector(null, TEST_PATH, 1, conf); + //ensure stages are missing initially and execute without reporting start metrics + Assert.assertEquals(0, MetricCollector.getInstance().getMetrics().size()); + + + Map metricMap = new HashMap<>(); + Mockito.when(ddlWork.getMetricCollector()).thenReturn(metricCollector); + Mockito.when(ddlWork.getDDLDesc()).thenThrow(recoverableException); + + //test recoverable error during DDL-Task + ddlTask.execute(); + performRecoverableChecks( "REPL_LOAD"); + } + + @Test + public void testNonRecoverableDDLFailureWithStageMissing() throws Exception { + + //task-setup for DDL-Task + DDLWork ddlWork = Mockito.mock(DDLWork.class); + Context context = Mockito.mock(Context.class); + Mockito.when(context.getExplainAnalyze()).thenReturn(ExplainConfiguration.AnalyzeState.ANALYZING); + Mockito.when(ddlWork.isReplication()).thenReturn(true); + String dumpDir = TEST_PATH + Path.SEPARATOR + testName.getMethodName(); + Mockito.when(ddlWork.getDumpDirectory()).thenReturn(dumpDir); + Task ddlTask = TaskFactory.get(ddlWork, conf); + ddlTask.initialize(null, null, null, context); + + MetricCollector.getInstance().deinit(); + IncrementalLoadMetricCollector metricCollector = new + IncrementalLoadMetricCollector(null, TEST_PATH, 1, conf); + //ensure stages are missing initially and execute without reporting start metrics + Assert.assertEquals(0, MetricCollector.getInstance().getMetrics().size()); + + Map metricMap = new HashMap<>(); + Mockito.when(ddlWork.getMetricCollector()).thenReturn(metricCollector); + Mockito.when(ddlWork.getDDLDesc()).thenThrow(nonRecoverableException); + + //test non-recoverable error during DDL-Task, without initializing stage + ddlTask.execute(); + performNonRecoverableChecks(dumpDir, "REPL_LOAD"); + } + + + void performRecoverableChecks(String stageName){ + List metricList = MetricCollector.getInstance().getMetrics(); + Assert.assertEquals(1, metricList.size()); + ReplicationMetric updatedMetric = metricList.get(0); + Progress updatedProgress = updatedMetric.getProgress(); + Assert.assertEquals(Status.FAILED, updatedProgress.getStatus()); + Assert.assertEquals(1, updatedProgress.getStages().size()); + Assert.assertEquals(Status.FAILED, updatedProgress.getStageByName(stageName).getStatus()); + Assert.assertNotEquals(0, updatedProgress.getStageByName(stageName).getEndTime()); + } + + void performNonRecoverableChecks(String dumpDir, String stageName) throws IOException { + List metricList = MetricCollector.getInstance().getMetrics(); + Assert.assertEquals(1, metricList.size()); + ReplicationMetric updatedMetric = metricList.get(0); + Progress updatedProgress = updatedMetric.getProgress(); + Assert.assertEquals(Status.FAILED_ADMIN, updatedProgress.getStatus()); + Assert.assertEquals(1, updatedProgress.getStages().size()); + Assert.assertEquals(Status.FAILED_ADMIN, updatedProgress.getStageByName(stageName).getStatus()); + Assert.assertNotEquals(0, updatedProgress.getStageByName(stageName).getEndTime()); + Path expectedNonRecoverablePath = new Path(new Path(dumpDir), ReplAck.NON_RECOVERABLE_MARKER.toString()); + Assert.assertTrue(fs.exists(expectedNonRecoverablePath)); + fs.delete(expectedNonRecoverablePath, true); + MetricCollector.getInstance().deinit(); + } +} diff --git a/ql/src/test/results/clientnegative/add_partition_with_whitelist.q.out b/ql/src/test/results/clientnegative/add_partition_with_whitelist.q.out index ec7cf22004..6aa789b6f7 100644 --- a/ql/src/test/results/clientnegative/add_partition_with_whitelist.q.out +++ b/ql/src/test/results/clientnegative/add_partition_with_whitelist.q.out @@ -15,4 +15,4 @@ POSTHOOK: Input: default@part_whitelist_test PREHOOK: query: ALTER TABLE part_whitelist_test ADD PARTITION (ds='1,2,3,4') PREHOOK: type: ALTERTABLE_ADDPARTS PREHOOK: Output: default@part_whitelist_test -FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask. MetaException(message:Partition value '1,2,3,4' contains a character not matched by whitelist pattern '[\\x20-\\x7E&&[^,]]*'. (configure with metastore.partition.name.whitelist.pattern)) +FAILED: Execution Error, return code 40000 from org.apache.hadoop.hive.ql.ddl.DDLTask. MetaException(message:Partition value '1,2,3,4' contains a character not matched by whitelist pattern '[\\x20-\\x7E&&[^,]]*'. (configure with metastore.partition.name.whitelist.pattern)) diff --git a/ql/src/test/results/clientnegative/addpart1.q.out b/ql/src/test/results/clientnegative/addpart1.q.out index 864709fd1a..a1c6549d3b 100644 --- a/ql/src/test/results/clientnegative/addpart1.q.out +++ b/ql/src/test/results/clientnegative/addpart1.q.out @@ -23,4 +23,4 @@ b=f/c=s PREHOOK: query: alter table addpart1 add partition (b='f', c='') PREHOOK: type: ALTERTABLE_ADDPARTS PREHOOK: Output: default@addpart1 -FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask. partition spec is invalid; field c does not exist or is empty +FAILED: Execution Error, return code 40000 from org.apache.hadoop.hive.ql.ddl.DDLTask. partition spec is invalid; field c does not exist or is empty diff --git a/ql/src/test/results/clientnegative/allow_change_col_type_par_neg.q.out b/ql/src/test/results/clientnegative/allow_change_col_type_par_neg.q.out index 98d99a9087..4263945041 100644 --- a/ql/src/test/results/clientnegative/allow_change_col_type_par_neg.q.out +++ b/ql/src/test/results/clientnegative/allow_change_col_type_par_neg.q.out @@ -14,5 +14,5 @@ PREHOOK: query: alter table t1 change column c1 c1 smallint PREHOOK: type: ALTERTABLE_RENAMECOL PREHOOK: Input: default@t1 PREHOOK: Output: default@t1 -FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask. Unable to alter table. The following columns have types incompatible with the existing columns in their respective positions : +FAILED: Execution Error, return code 40013 from org.apache.hadoop.hive.ql.ddl.DDLTask. Unable to alter table. The following columns have types incompatible with the existing columns in their respective positions : c1 diff --git a/ql/src/test/results/clientnegative/alter_external_acid.q.out b/ql/src/test/results/clientnegative/alter_external_acid.q.out index 8005676400..84a7bc2a91 100644 --- a/ql/src/test/results/clientnegative/alter_external_acid.q.out +++ b/ql/src/test/results/clientnegative/alter_external_acid.q.out @@ -10,4 +10,4 @@ PREHOOK: query: alter table acid_external set TBLPROPERTIES ('transactional'='tr PREHOOK: type: ALTERTABLE_PROPERTIES PREHOOK: Input: default@acid_external PREHOOK: Output: default@acid_external -FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask. Unable to alter table. default.acid_external cannot be declared transactional because it's an external table +FAILED: Execution Error, return code 40013 from org.apache.hadoop.hive.ql.ddl.DDLTask. Unable to alter table. default.acid_external cannot be declared transactional because it's an external table diff --git a/ql/src/test/results/clientnegative/alter_partition_change_col_dup_col.q.out b/ql/src/test/results/clientnegative/alter_partition_change_col_dup_col.q.out index b226f8e242..845540d92b 100644 --- a/ql/src/test/results/clientnegative/alter_partition_change_col_dup_col.q.out +++ b/ql/src/test/results/clientnegative/alter_partition_change_col_dup_col.q.out @@ -17,4 +17,4 @@ PREHOOK: query: alter table alter_partition_change_col_dup_col change c2 c1 deci PREHOOK: type: ALTERTABLE_RENAMECOL PREHOOK: Input: default@alter_partition_change_col_dup_col PREHOOK: Output: default@alter_partition_change_col_dup_col -FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask. Duplicate column name: c1 +FAILED: Execution Error, return code 10036 from org.apache.hadoop.hive.ql.ddl.DDLTask. Duplicate column name: c1 diff --git a/ql/src/test/results/clientnegative/alter_partition_change_col_nonexist.q.out b/ql/src/test/results/clientnegative/alter_partition_change_col_nonexist.q.out index f3a8069727..9f04f8869b 100644 --- a/ql/src/test/results/clientnegative/alter_partition_change_col_nonexist.q.out +++ b/ql/src/test/results/clientnegative/alter_partition_change_col_nonexist.q.out @@ -17,4 +17,4 @@ PREHOOK: query: alter table alter_partition_change_col_nonexist change c3 c4 dec PREHOOK: type: ALTERTABLE_RENAMECOL PREHOOK: Input: default@alter_partition_change_col_nonexist PREHOOK: Output: default@alter_partition_change_col_nonexist -FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask. Invalid column reference c3 +FAILED: Execution Error, return code 10002 from org.apache.hadoop.hive.ql.ddl.DDLTask. Invalid column reference c3 diff --git a/ql/src/test/results/clientnegative/alter_partition_with_whitelist.q.out b/ql/src/test/results/clientnegative/alter_partition_with_whitelist.q.out index b5213b1c2f..8a7857ee82 100644 --- a/ql/src/test/results/clientnegative/alter_partition_with_whitelist.q.out +++ b/ql/src/test/results/clientnegative/alter_partition_with_whitelist.q.out @@ -23,4 +23,4 @@ PREHOOK: query: ALTER TABLE part_whitelist_test PARTITION (ds='1') rename to par PREHOOK: type: ALTERTABLE_RENAMEPART PREHOOK: Input: default@part_whitelist_test PREHOOK: Output: default@part_whitelist_test@ds=1 -FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask. Unable to rename partition. Partition value '1,2,3' contains a character not matched by whitelist pattern '[\\x20-\\x7E&&[^,]]*'. (configure with metastore.partition.name.whitelist.pattern) +FAILED: Execution Error, return code 40000 from org.apache.hadoop.hive.ql.ddl.DDLTask. Unable to rename partition. Partition value '1,2,3' contains a character not matched by whitelist pattern '[\\x20-\\x7E&&[^,]]*'. (configure with metastore.partition.name.whitelist.pattern) diff --git a/ql/src/test/results/clientnegative/alter_rename_partition_failure.q.out b/ql/src/test/results/clientnegative/alter_rename_partition_failure.q.out index be6fc9b54e..0314fa1d6d 100644 --- a/ql/src/test/results/clientnegative/alter_rename_partition_failure.q.out +++ b/ql/src/test/results/clientnegative/alter_rename_partition_failure.q.out @@ -34,4 +34,4 @@ POSTHOOK: Lineage: alter_rename_partition PARTITION(pcol1=old_part1,pcol2=old_pa PREHOOK: query: alter table alter_rename_partition partition (pCol1='nonexist_part1', pcol2='nonexist_part2') rename to partition (pCol1='new_part1', pcol2='new_part2') PREHOOK: type: ALTERTABLE_RENAMEPART PREHOOK: Input: default@alter_rename_partition -FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask. Rename partition: source partition [pcol1=nonexist_part1/pcol2=nonexist_part2] does not exist. +FAILED: Execution Error, return code 40000 from org.apache.hadoop.hive.ql.ddl.DDLTask. Rename partition: source partition [pcol1=nonexist_part1/pcol2=nonexist_part2] does not exist. diff --git a/ql/src/test/results/clientnegative/alter_rename_partition_failure2.q.out b/ql/src/test/results/clientnegative/alter_rename_partition_failure2.q.out index f6ccedd836..e0eb35d342 100644 --- a/ql/src/test/results/clientnegative/alter_rename_partition_failure2.q.out +++ b/ql/src/test/results/clientnegative/alter_rename_partition_failure2.q.out @@ -35,4 +35,4 @@ PREHOOK: query: alter table alter_rename_partition partition (pCol1='old_part1:' PREHOOK: type: ALTERTABLE_RENAMEPART PREHOOK: Input: default@alter_rename_partition PREHOOK: Output: default@alter_rename_partition@pcol1=old_part1%3A/pcol2=old_part2%3A -FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask. Unable to rename partition. Partition already exists:default.alter_rename_partition.[old_part1:, old_part2:] +FAILED: Execution Error, return code 40000 from org.apache.hadoop.hive.ql.ddl.DDLTask. Unable to rename partition. Partition already exists:default.alter_rename_partition.[old_part1:, old_part2:] diff --git a/ql/src/test/results/clientnegative/alter_table_constraint_duplicate_pk.q.out b/ql/src/test/results/clientnegative/alter_table_constraint_duplicate_pk.q.out index ce62f1fa90..2417cdfbdb 100644 --- a/ql/src/test/results/clientnegative/alter_table_constraint_duplicate_pk.q.out +++ b/ql/src/test/results/clientnegative/alter_table_constraint_duplicate_pk.q.out @@ -8,4 +8,4 @@ POSTHOOK: Output: database:default POSTHOOK: Output: default@table1 PREHOOK: query: alter table table1 add constraint pk4 primary key (b) disable novalidate rely PREHOOK: type: ALTERTABLE_ADDCONSTRAINT -FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask. MetaException(message: Primary key already exists for: hive.default.table1) +FAILED: Execution Error, return code 40000 from org.apache.hadoop.hive.ql.ddl.DDLTask. MetaException(message: Primary key already exists for: hive.default.table1) diff --git a/ql/src/test/results/clientnegative/alter_table_constraint_invalid_fk_col1.q.out b/ql/src/test/results/clientnegative/alter_table_constraint_invalid_fk_col1.q.out index d36cf77082..2c319c9aae 100644 --- a/ql/src/test/results/clientnegative/alter_table_constraint_invalid_fk_col1.q.out +++ b/ql/src/test/results/clientnegative/alter_table_constraint_invalid_fk_col1.q.out @@ -16,4 +16,4 @@ POSTHOOK: Output: database:default POSTHOOK: Output: default@table2 PREHOOK: query: alter table table2 add constraint fk1 foreign key (c) references table1(a) disable novalidate PREHOOK: type: ALTERTABLE_ADDCONSTRAINT -FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask. InvalidObjectException(message:Child column not found: c) +FAILED: Execution Error, return code 40000 from org.apache.hadoop.hive.ql.ddl.DDLTask. InvalidObjectException(message:Child column not found: c) diff --git a/ql/src/test/results/clientnegative/alter_table_constraint_invalid_fk_col2.q.out b/ql/src/test/results/clientnegative/alter_table_constraint_invalid_fk_col2.q.out index d77a76bfbd..b0476811bc 100644 --- a/ql/src/test/results/clientnegative/alter_table_constraint_invalid_fk_col2.q.out +++ b/ql/src/test/results/clientnegative/alter_table_constraint_invalid_fk_col2.q.out @@ -16,4 +16,4 @@ POSTHOOK: Output: database:default POSTHOOK: Output: default@table2 PREHOOK: query: alter table table2 add constraint fk1 foreign key (b) references table1(c) disable novalidate PREHOOK: type: ALTERTABLE_ADDCONSTRAINT -FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask. InvalidObjectException(message:Parent column not found: c) +FAILED: Execution Error, return code 40000 from org.apache.hadoop.hive.ql.ddl.DDLTask. InvalidObjectException(message:Parent column not found: c) diff --git a/ql/src/test/results/clientnegative/alter_table_constraint_invalid_fk_tbl1.q.out b/ql/src/test/results/clientnegative/alter_table_constraint_invalid_fk_tbl1.q.out index 869f1edc02..c73371f883 100644 --- a/ql/src/test/results/clientnegative/alter_table_constraint_invalid_fk_tbl1.q.out +++ b/ql/src/test/results/clientnegative/alter_table_constraint_invalid_fk_tbl1.q.out @@ -16,4 +16,4 @@ POSTHOOK: Output: database:default POSTHOOK: Output: default@table2 PREHOOK: query: alter table table3 add constraint fk1 foreign key (c) references table1(a) disable novalidate PREHOOK: type: ALTERTABLE_ADDCONSTRAINT -FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask. InvalidObjectException(message:Child table not found: table3) +FAILED: Execution Error, return code 40000 from org.apache.hadoop.hive.ql.ddl.DDLTask. InvalidObjectException(message:Child table not found: table3) diff --git a/ql/src/test/results/clientnegative/alter_table_constraint_invalid_fk_tbl2.q.out b/ql/src/test/results/clientnegative/alter_table_constraint_invalid_fk_tbl2.q.out index 80c0f30321..2604954e3f 100644 --- a/ql/src/test/results/clientnegative/alter_table_constraint_invalid_fk_tbl2.q.out +++ b/ql/src/test/results/clientnegative/alter_table_constraint_invalid_fk_tbl2.q.out @@ -16,4 +16,4 @@ POSTHOOK: Output: database:default POSTHOOK: Output: default@table2 PREHOOK: query: alter table table2 add constraint fk1 foreign key (b) references table3(a) disable novalidate PREHOOK: type: ALTERTABLE_ADDCONSTRAINT -FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask. InvalidObjectException(message:Parent table not found: table3) +FAILED: Execution Error, return code 40000 from org.apache.hadoop.hive.ql.ddl.DDLTask. InvalidObjectException(message:Parent table not found: table3) diff --git a/ql/src/test/results/clientnegative/alter_table_constraint_invalid_pk_col.q.out b/ql/src/test/results/clientnegative/alter_table_constraint_invalid_pk_col.q.out index f9532de06c..090fc654b4 100644 --- a/ql/src/test/results/clientnegative/alter_table_constraint_invalid_pk_col.q.out +++ b/ql/src/test/results/clientnegative/alter_table_constraint_invalid_pk_col.q.out @@ -8,4 +8,4 @@ POSTHOOK: Output: database:default POSTHOOK: Output: default@table1 PREHOOK: query: alter table table1 add constraint pk1 primary key (c) disable novalidate PREHOOK: type: ALTERTABLE_ADDCONSTRAINT -FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask. InvalidObjectException(message:Parent column not found: c) +FAILED: Execution Error, return code 40000 from org.apache.hadoop.hive.ql.ddl.DDLTask. InvalidObjectException(message:Parent column not found: c) diff --git a/ql/src/test/results/clientnegative/alter_table_constraint_invalid_pk_tbl.q.out b/ql/src/test/results/clientnegative/alter_table_constraint_invalid_pk_tbl.q.out index 56d87d5c48..e37850a0a2 100644 --- a/ql/src/test/results/clientnegative/alter_table_constraint_invalid_pk_tbl.q.out +++ b/ql/src/test/results/clientnegative/alter_table_constraint_invalid_pk_tbl.q.out @@ -16,4 +16,4 @@ POSTHOOK: Output: database:default POSTHOOK: Output: default@table2 PREHOOK: query: alter table table3 add constraint pk3 primary key (a) disable novalidate rely PREHOOK: type: ALTERTABLE_ADDCONSTRAINT -FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask. InvalidObjectException(message:Parent table not found: table3) +FAILED: Execution Error, return code 40000 from org.apache.hadoop.hive.ql.ddl.DDLTask. InvalidObjectException(message:Parent table not found: table3) diff --git a/ql/src/test/results/clientnegative/alter_table_constraint_invalid_ref.q.out b/ql/src/test/results/clientnegative/alter_table_constraint_invalid_ref.q.out index f66641f86d..0cda4547af 100644 --- a/ql/src/test/results/clientnegative/alter_table_constraint_invalid_ref.q.out +++ b/ql/src/test/results/clientnegative/alter_table_constraint_invalid_ref.q.out @@ -16,4 +16,4 @@ POSTHOOK: Output: database:default POSTHOOK: Output: default@table2 PREHOOK: query: alter table table2 add constraint fk1 foreign key (a) references table1(b) disable novalidate PREHOOK: type: ALTERTABLE_ADDCONSTRAINT -FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask. MetaException(message:Foreign key references b:string; but no corresponding primary key or unique key exists. Possible keys: [a:string;]) +FAILED: Execution Error, return code 40000 from org.apache.hadoop.hive.ql.ddl.DDLTask. MetaException(message:Foreign key references b:string; but no corresponding primary key or unique key exists. Possible keys: [a:string;]) diff --git a/ql/src/test/results/clientnegative/alter_table_wrong_db.q.out b/ql/src/test/results/clientnegative/alter_table_wrong_db.q.out index 07641b4a0c..cf35c4289c 100644 --- a/ql/src/test/results/clientnegative/alter_table_wrong_db.q.out +++ b/ql/src/test/results/clientnegative/alter_table_wrong_db.q.out @@ -22,4 +22,4 @@ PREHOOK: query: alter table bad_rename1.rename1 rename to bad_db_notexists.renam PREHOOK: type: ALTERTABLE_RENAME PREHOOK: Input: bad_rename1@rename1 PREHOOK: Output: bad_rename1@rename1 -FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask. Unable to alter table. Unable to change partition or table. Object database hive.bad_db_notexists does not exist. Check metastore logs for detailed stack. +FAILED: Execution Error, return code 40013 from org.apache.hadoop.hive.ql.ddl.DDLTask. Unable to alter table. Unable to change partition or table. Object database hive.bad_db_notexists does not exist. Check metastore logs for detailed stack. diff --git a/ql/src/test/results/clientnegative/alter_table_wrong_location2.q.out b/ql/src/test/results/clientnegative/alter_table_wrong_location2.q.out index 54b8a3fe68..702bb32b8a 100644 --- a/ql/src/test/results/clientnegative/alter_table_wrong_location2.q.out +++ b/ql/src/test/results/clientnegative/alter_table_wrong_location2.q.out @@ -11,4 +11,4 @@ PREHOOK: type: ALTERTABLE_LOCATION PREHOOK: Input: default@testwrongloc PREHOOK: Output: default@testwrongloc #### A masked pattern was here #### -FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask. relative/testwrongloc is not absolute. Please specify a complete absolute uri. +FAILED: Execution Error, return code 10244 from org.apache.hadoop.hive.ql.ddl.DDLTask. relative/testwrongloc is not absolute. Please specify a complete absolute uri. diff --git a/ql/src/test/results/clientnegative/alter_table_wrong_regex.q.out b/ql/src/test/results/clientnegative/alter_table_wrong_regex.q.out index 7aa238d136..cdf9e58c2a 100644 --- a/ql/src/test/results/clientnegative/alter_table_wrong_regex.q.out +++ b/ql/src/test/results/clientnegative/alter_table_wrong_regex.q.out @@ -18,4 +18,4 @@ PREHOOK: query: alter table aa set serdeproperties ("input.regex" = "[^\\](.*)", PREHOOK: type: ALTERTABLE_SERDEPROPERTIES PREHOOK: Input: default@aa PREHOOK: Output: default@aa -FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask. at least one column must be specified for the table +FAILED: Execution Error, return code 40000 from org.apache.hadoop.hive.ql.ddl.DDLTask. at least one column must be specified for the table diff --git a/ql/src/test/results/clientnegative/altern1.q.out b/ql/src/test/results/clientnegative/altern1.q.out index 310b4bfeef..a2de13f399 100644 --- a/ql/src/test/results/clientnegative/altern1.q.out +++ b/ql/src/test/results/clientnegative/altern1.q.out @@ -10,4 +10,4 @@ PREHOOK: query: alter table altern1 replace columns(a int, b int, ds string) PREHOOK: type: ALTERTABLE_REPLACECOLS PREHOOK: Input: default@altern1 PREHOOK: Output: default@altern1 -FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask. Partition column name ds conflicts with table columns. +FAILED: Execution Error, return code 40000 from org.apache.hadoop.hive.ql.ddl.DDLTask. Partition column name ds conflicts with table columns. diff --git a/ql/src/test/results/clientnegative/archive1.q.out b/ql/src/test/results/clientnegative/archive1.q.out index 8b87e8a07a..dece98d853 100644 --- a/ql/src/test/results/clientnegative/archive1.q.out +++ b/ql/src/test/results/clientnegative/archive1.q.out @@ -32,4 +32,4 @@ PREHOOK: query: ALTER TABLE srcpart_archived ARCHIVE PARTITION (ds='2008-04-08', PREHOOK: type: ALTERTABLE_ARCHIVE PREHOOK: Input: default@srcpart_archived PREHOOK: Output: default@srcpart_archived@ds=2008-04-08/hr=12 -FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask. Partition(s) already archived +FAILED: Execution Error, return code 40000 from org.apache.hadoop.hive.ql.ddl.DDLTask. Partition(s) already archived diff --git a/ql/src/test/results/clientnegative/archive2.q.out b/ql/src/test/results/clientnegative/archive2.q.out index e2ca7d391f..41e66f4daa 100644 --- a/ql/src/test/results/clientnegative/archive2.q.out +++ b/ql/src/test/results/clientnegative/archive2.q.out @@ -28,4 +28,4 @@ PREHOOK: query: ALTER TABLE tstsrcpart UNARCHIVE PARTITION (ds='2008-04-08', hr= PREHOOK: type: ALTERTABLE_UNARCHIVE PREHOOK: Input: default@tstsrcpart PREHOOK: Output: default@tstsrcpart@ds=2008-04-08/hr=12 -FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask. Partition ds=2008-04-08/hr=12 is not archived. +FAILED: Execution Error, return code 40000 from org.apache.hadoop.hive.ql.ddl.DDLTask. Partition ds=2008-04-08/hr=12 is not archived. diff --git a/ql/src/test/results/clientnegative/archive_multi1.q.out b/ql/src/test/results/clientnegative/archive_multi1.q.out index 342b77f77e..541ad4a641 100644 --- a/ql/src/test/results/clientnegative/archive_multi1.q.out +++ b/ql/src/test/results/clientnegative/archive_multi1.q.out @@ -49,4 +49,4 @@ PREHOOK: type: ALTERTABLE_ARCHIVE PREHOOK: Input: default@tstsrcpart PREHOOK: Output: default@tstsrcpart@ds=2008-04-08/hr=11 PREHOOK: Output: default@tstsrcpart@ds=2008-04-08/hr=12 -FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask. Partition(s) already archived +FAILED: Execution Error, return code 40000 from org.apache.hadoop.hive.ql.ddl.DDLTask. Partition(s) already archived diff --git a/ql/src/test/results/clientnegative/archive_multi2.q.out b/ql/src/test/results/clientnegative/archive_multi2.q.out index a4680d6432..f226cc26d0 100644 --- a/ql/src/test/results/clientnegative/archive_multi2.q.out +++ b/ql/src/test/results/clientnegative/archive_multi2.q.out @@ -42,4 +42,4 @@ PREHOOK: query: ALTER TABLE tstsrcpart UNARCHIVE PARTITION (ds='2008-04-08', hr= PREHOOK: type: ALTERTABLE_UNARCHIVE PREHOOK: Input: default@tstsrcpart PREHOOK: Output: default@tstsrcpart@ds=2008-04-08/hr=12 -FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask. Partition ds=2008-04-08/hr=12 is not archived. +FAILED: Execution Error, return code 40000 from org.apache.hadoop.hive.ql.ddl.DDLTask. Partition ds=2008-04-08/hr=12 is not archived. diff --git a/ql/src/test/results/clientnegative/archive_multi3.q.out b/ql/src/test/results/clientnegative/archive_multi3.q.out index 0ad82dd553..8bd3488d3c 100644 --- a/ql/src/test/results/clientnegative/archive_multi3.q.out +++ b/ql/src/test/results/clientnegative/archive_multi3.q.out @@ -47,4 +47,4 @@ PREHOOK: type: ALTERTABLE_ARCHIVE PREHOOK: Input: default@tstsrcpart PREHOOK: Output: default@tstsrcpart@ds=2008-04-08/hr=11 PREHOOK: Output: default@tstsrcpart@ds=2008-04-08/hr=12 -FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask. Conflict with existing archive ds=2008-04-08/hr=12 +FAILED: Execution Error, return code 40000 from org.apache.hadoop.hive.ql.ddl.DDLTask. Conflict with existing archive ds=2008-04-08/hr=12 diff --git a/ql/src/test/results/clientnegative/archive_multi4.q.out b/ql/src/test/results/clientnegative/archive_multi4.q.out index 24f3094db8..18a48e7552 100644 --- a/ql/src/test/results/clientnegative/archive_multi4.q.out +++ b/ql/src/test/results/clientnegative/archive_multi4.q.out @@ -48,4 +48,4 @@ PREHOOK: query: ALTER TABLE tstsrcpart ARCHIVE PARTITION (ds='2008-04-08', hr='1 PREHOOK: type: ALTERTABLE_ARCHIVE PREHOOK: Input: default@tstsrcpart PREHOOK: Output: default@tstsrcpart@ds=2008-04-08/hr=12 -FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask. Conflict with existing archive ds=2008-04-08 +FAILED: Execution Error, return code 40000 from org.apache.hadoop.hive.ql.ddl.DDLTask. Conflict with existing archive ds=2008-04-08 diff --git a/ql/src/test/results/clientnegative/archive_multi5.q.out b/ql/src/test/results/clientnegative/archive_multi5.q.out index 7c0cc905fa..d8c54fadd8 100644 --- a/ql/src/test/results/clientnegative/archive_multi5.q.out +++ b/ql/src/test/results/clientnegative/archive_multi5.q.out @@ -47,4 +47,4 @@ PREHOOK: type: ALTERTABLE_UNARCHIVE PREHOOK: Input: default@tstsrcpart PREHOOK: Output: default@tstsrcpart@ds=2008-04-08/hr=11 PREHOOK: Output: default@tstsrcpart@ds=2008-04-08/hr=12 -FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask. Partition ds=2008-04-08/hr=11 is not archived. +FAILED: Execution Error, return code 40000 from org.apache.hadoop.hive.ql.ddl.DDLTask. Partition ds=2008-04-08/hr=11 is not archived. diff --git a/ql/src/test/results/clientnegative/archive_multi6.q.out b/ql/src/test/results/clientnegative/archive_multi6.q.out index 3ab6e01726..88fb3a97bc 100644 --- a/ql/src/test/results/clientnegative/archive_multi6.q.out +++ b/ql/src/test/results/clientnegative/archive_multi6.q.out @@ -48,4 +48,4 @@ PREHOOK: query: ALTER TABLE tstsrcpart UNARCHIVE PARTITION (ds='2008-04-08', hr= PREHOOK: type: ALTERTABLE_UNARCHIVE PREHOOK: Input: default@tstsrcpart PREHOOK: Output: default@tstsrcpart@ds=2008-04-08/hr=12 -FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask. Partition ds=2008-04-08/hr=12 is archived at level 1, and given partspec only has 2 specs. +FAILED: Execution Error, return code 40000 from org.apache.hadoop.hive.ql.ddl.DDLTask. Partition ds=2008-04-08/hr=12 is archived at level 1, and given partspec only has 2 specs. diff --git a/ql/src/test/results/clientnegative/authorization_cannot_create_default_role.q.out b/ql/src/test/results/clientnegative/authorization_cannot_create_default_role.q.out index 502088f310..78543d27eb 100644 --- a/ql/src/test/results/clientnegative/authorization_cannot_create_default_role.q.out +++ b/ql/src/test/results/clientnegative/authorization_cannot_create_default_role.q.out @@ -4,4 +4,4 @@ POSTHOOK: query: set role ADMIN POSTHOOK: type: SHOW_ROLES PREHOOK: query: create role default PREHOOK: type: CREATEROLE -FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask. Role name cannot be one of the reserved roles: [ALL, DEFAULT, NONE] +FAILED: Execution Error, return code 40000 from org.apache.hadoop.hive.ql.ddl.DDLTask. Role name cannot be one of the reserved roles: [ALL, DEFAULT, NONE] diff --git a/ql/src/test/results/clientnegative/authorization_caseinsensitivity.q.out b/ql/src/test/results/clientnegative/authorization_caseinsensitivity.q.out index 4efc7d412c..e807c8536d 100644 --- a/ql/src/test/results/clientnegative/authorization_caseinsensitivity.q.out +++ b/ql/src/test/results/clientnegative/authorization_caseinsensitivity.q.out @@ -55,4 +55,4 @@ public testrole PREHOOK: query: create role TESTRoLE PREHOOK: type: CREATEROLE -FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask. Error create role: Got exception: org.apache.hadoop.hive.metastore.api.InvalidObjectException Role testrole already exists. +FAILED: Execution Error, return code 40000 from org.apache.hadoop.hive.ql.ddl.DDLTask. Error create role: Got exception: org.apache.hadoop.hive.metastore.api.InvalidObjectException Role testrole already exists. diff --git a/ql/src/test/results/clientnegative/authorization_create_role_no_admin.q.out b/ql/src/test/results/clientnegative/authorization_create_role_no_admin.q.out index dedb273f22..1818d076c1 100644 --- a/ql/src/test/results/clientnegative/authorization_create_role_no_admin.q.out +++ b/ql/src/test/results/clientnegative/authorization_create_role_no_admin.q.out @@ -1,3 +1,3 @@ PREHOOK: query: create role r1 PREHOOK: type: CREATEROLE -FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask. Current user : hive_test_user is not allowed to add roles. User has to belong to ADMIN role and have it as current role, for this action. +FAILED: Execution Error, return code 40000 from org.apache.hadoop.hive.ql.ddl.DDLTask. Current user : hive_test_user is not allowed to add roles. User has to belong to ADMIN role and have it as current role, for this action. diff --git a/ql/src/test/results/clientnegative/authorization_drop_admin_role.q.out b/ql/src/test/results/clientnegative/authorization_drop_admin_role.q.out index 027221c109..6837887585 100644 --- a/ql/src/test/results/clientnegative/authorization_drop_admin_role.q.out +++ b/ql/src/test/results/clientnegative/authorization_drop_admin_role.q.out @@ -4,4 +4,4 @@ POSTHOOK: query: set role admin POSTHOOK: type: SHOW_ROLES PREHOOK: query: drop role admin PREHOOK: type: DROPROLE -FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask. Error dropping role: public,admin roles can't be dropped. +FAILED: Execution Error, return code 40000 from org.apache.hadoop.hive.ql.ddl.DDLTask. Error dropping role: public,admin roles can't be dropped. diff --git a/ql/src/test/results/clientnegative/authorization_drop_role_no_admin.q.out b/ql/src/test/results/clientnegative/authorization_drop_role_no_admin.q.out index 2850c778e5..b086ec452a 100644 --- a/ql/src/test/results/clientnegative/authorization_drop_role_no_admin.q.out +++ b/ql/src/test/results/clientnegative/authorization_drop_role_no_admin.q.out @@ -22,4 +22,4 @@ POSTHOOK: type: SHOW_ROLES public PREHOOK: query: drop role r1 PREHOOK: type: DROPROLE -FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask. Current user : hive_admin_user is not allowed to drop role. User has to belong to ADMIN role and have it as current role, for this action. +FAILED: Execution Error, return code 40000 from org.apache.hadoop.hive.ql.ddl.DDLTask. Current user : hive_admin_user is not allowed to drop role. User has to belong to ADMIN role and have it as current role, for this action. diff --git a/ql/src/test/results/clientnegative/authorization_fail_1.q.out b/ql/src/test/results/clientnegative/authorization_fail_1.q.out index 16cf00c9d3..0943a43765 100644 --- a/ql/src/test/results/clientnegative/authorization_fail_1.q.out +++ b/ql/src/test/results/clientnegative/authorization_fail_1.q.out @@ -15,4 +15,4 @@ POSTHOOK: Output: default@authorization_fail_1 PREHOOK: query: grant Create on table authorization_fail_1 to user hive_test_user PREHOOK: type: GRANT_PRIVILEGE PREHOOK: Output: default@authorization_fail_1 -FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask. org.apache.hadoop.hive.ql.metadata.HiveException: MetaException(message:Got exception: org.apache.hadoop.hive.metastore.api.InvalidObjectException CREATE is already granted on table [default,authorization_fail_1] by hive_test_user) +FAILED: Execution Error, return code 40000 from org.apache.hadoop.hive.ql.ddl.DDLTask. org.apache.hadoop.hive.ql.metadata.HiveException: MetaException(message:Got exception: org.apache.hadoop.hive.metastore.api.InvalidObjectException CREATE is already granted on table [default,authorization_fail_1] by hive_test_user) diff --git a/ql/src/test/results/clientnegative/authorization_fail_8.q.out b/ql/src/test/results/clientnegative/authorization_fail_8.q.out index b8bd912d76..df3c5ba073 100644 --- a/ql/src/test/results/clientnegative/authorization_fail_8.q.out +++ b/ql/src/test/results/clientnegative/authorization_fail_8.q.out @@ -43,4 +43,4 @@ default authorization_fail user2 USER SELECT false -1 user1 PREHOOK: query: GRANT SELECT ON authorization_fail TO USER user3 PREHOOK: type: GRANT_PRIVILEGE PREHOOK: Output: default@authorization_fail -FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask. Permission denied: Principal [name=user2, type=USER] does not have following privileges for operation GRANT_PRIVILEGE [[SELECT with grant] on Object [type=TABLE_OR_VIEW, name=default.authorization_fail]] +FAILED: Execution Error, return code 40000 from org.apache.hadoop.hive.ql.ddl.DDLTask. Permission denied: Principal [name=user2, type=USER] does not have following privileges for operation GRANT_PRIVILEGE [[SELECT with grant] on Object [type=TABLE_OR_VIEW, name=default.authorization_fail]] diff --git a/ql/src/test/results/clientnegative/authorization_grant_group.q.out b/ql/src/test/results/clientnegative/authorization_grant_group.q.out index eb638f2b01..ce5344dc6e 100644 --- a/ql/src/test/results/clientnegative/authorization_grant_group.q.out +++ b/ql/src/test/results/clientnegative/authorization_grant_group.q.out @@ -9,4 +9,4 @@ POSTHOOK: Output: default@table_gg PREHOOK: query: GRANT INSERT ON table_gg TO group g1 PREHOOK: type: GRANT_PRIVILEGE PREHOOK: Output: default@table_gg -FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask. Invalid principal type in principal Principal [name=g1, type=GROUP] +FAILED: Execution Error, return code 40000 from org.apache.hadoop.hive.ql.ddl.DDLTask. Invalid principal type in principal Principal [name=g1, type=GROUP] diff --git a/ql/src/test/results/clientnegative/authorization_grant_table_allpriv.q.out b/ql/src/test/results/clientnegative/authorization_grant_table_allpriv.q.out index fe075e80fd..39c2945c8a 100644 --- a/ql/src/test/results/clientnegative/authorization_grant_table_allpriv.q.out +++ b/ql/src/test/results/clientnegative/authorization_grant_table_allpriv.q.out @@ -15,4 +15,4 @@ POSTHOOK: Output: default@table_priv_allf PREHOOK: query: GRANT ALL ON table_priv_allf TO USER user3 PREHOOK: type: GRANT_PRIVILEGE PREHOOK: Output: default@table_priv_allf -FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask. Permission denied: Principal [name=user2, type=USER] does not have following privileges for operation GRANT_PRIVILEGE [[SELECT with grant, UPDATE with grant, DELETE with grant] on Object [type=TABLE_OR_VIEW, name=default.table_priv_allf]] +FAILED: Execution Error, return code 40000 from org.apache.hadoop.hive.ql.ddl.DDLTask. Permission denied: Principal [name=user2, type=USER] does not have following privileges for operation GRANT_PRIVILEGE [[SELECT with grant, UPDATE with grant, DELETE with grant] on Object [type=TABLE_OR_VIEW, name=default.table_priv_allf]] diff --git a/ql/src/test/results/clientnegative/authorization_grant_table_dup.q.out b/ql/src/test/results/clientnegative/authorization_grant_table_dup.q.out index c9f82955e5..c2b2d7e3e7 100644 --- a/ql/src/test/results/clientnegative/authorization_grant_table_dup.q.out +++ b/ql/src/test/results/clientnegative/authorization_grant_table_dup.q.out @@ -22,4 +22,4 @@ default tauth_gdup user1 USER UPDATE true -1 user1 PREHOOK: query: GRANT INSERT ON tauth_gdup TO USER user1 PREHOOK: type: GRANT_PRIVILEGE PREHOOK: Output: default@tauth_gdup -FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask. Error granting privileges: Got exception: org.apache.hadoop.hive.metastore.api.InvalidObjectException INSERT is already granted on table [default,tauth_gdup] by user1 +FAILED: Execution Error, return code 40000 from org.apache.hadoop.hive.ql.ddl.DDLTask. Error granting privileges: Got exception: org.apache.hadoop.hive.metastore.api.InvalidObjectException INSERT is already granted on table [default,tauth_gdup] by user1 diff --git a/ql/src/test/results/clientnegative/authorization_grant_table_fail1.q.out b/ql/src/test/results/clientnegative/authorization_grant_table_fail1.q.out index 071e6e3faf..567a4bf682 100644 --- a/ql/src/test/results/clientnegative/authorization_grant_table_fail1.q.out +++ b/ql/src/test/results/clientnegative/authorization_grant_table_fail1.q.out @@ -9,4 +9,4 @@ POSTHOOK: Output: default@table_priv_gfail1 PREHOOK: query: GRANT INSERT ON table_priv_gfail1 TO USER user3 PREHOOK: type: GRANT_PRIVILEGE PREHOOK: Output: default@table_priv_gfail1 -FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask. Permission denied: Principal [name=user2, type=USER] does not have following privileges for operation GRANT_PRIVILEGE [[INSERT with grant] on Object [type=TABLE_OR_VIEW, name=default.table_priv_gfail1]] +FAILED: Execution Error, return code 40000 from org.apache.hadoop.hive.ql.ddl.DDLTask. Permission denied: Principal [name=user2, type=USER] does not have following privileges for operation GRANT_PRIVILEGE [[INSERT with grant] on Object [type=TABLE_OR_VIEW, name=default.table_priv_gfail1]] diff --git a/ql/src/test/results/clientnegative/authorization_grant_table_fail_nogrant.q.out b/ql/src/test/results/clientnegative/authorization_grant_table_fail_nogrant.q.out index 3f37585a64..37f06ac5d9 100644 --- a/ql/src/test/results/clientnegative/authorization_grant_table_fail_nogrant.q.out +++ b/ql/src/test/results/clientnegative/authorization_grant_table_fail_nogrant.q.out @@ -15,4 +15,4 @@ POSTHOOK: Output: default@table_priv_gfail1 PREHOOK: query: GRANT INSERT ON table_priv_gfail1 TO USER user3 PREHOOK: type: GRANT_PRIVILEGE PREHOOK: Output: default@table_priv_gfail1 -FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask. Permission denied: Principal [name=user2, type=USER] does not have following privileges for operation GRANT_PRIVILEGE [[INSERT with grant] on Object [type=TABLE_OR_VIEW, name=default.table_priv_gfail1]] +FAILED: Execution Error, return code 40000 from org.apache.hadoop.hive.ql.ddl.DDLTask. Permission denied: Principal [name=user2, type=USER] does not have following privileges for operation GRANT_PRIVILEGE [[INSERT with grant] on Object [type=TABLE_OR_VIEW, name=default.table_priv_gfail1]] diff --git a/ql/src/test/results/clientnegative/authorization_invalid_priv_v2.q.out b/ql/src/test/results/clientnegative/authorization_invalid_priv_v2.q.out index 996fa8f750..a56fac9b89 100644 --- a/ql/src/test/results/clientnegative/authorization_invalid_priv_v2.q.out +++ b/ql/src/test/results/clientnegative/authorization_invalid_priv_v2.q.out @@ -9,4 +9,4 @@ POSTHOOK: Output: default@authorization_invalid_v2 PREHOOK: query: grant lock on table authorization_invalid_v2 to user hive_test_user PREHOOK: type: GRANT_PRIVILEGE PREHOOK: Output: default@authorization_invalid_v2 -FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask. Unsupported privilege type LOCK +FAILED: Execution Error, return code 40000 from org.apache.hadoop.hive.ql.ddl.DDLTask. Unsupported privilege type LOCK diff --git a/ql/src/test/results/clientnegative/authorization_priv_current_role_neg.q.out b/ql/src/test/results/clientnegative/authorization_priv_current_role_neg.q.out index 2ec51e65c4..419987f6a0 100644 --- a/ql/src/test/results/clientnegative/authorization_priv_current_role_neg.q.out +++ b/ql/src/test/results/clientnegative/authorization_priv_current_role_neg.q.out @@ -61,4 +61,4 @@ POSTHOOK: type: SHOW_ROLES PREHOOK: query: grant all on table tpriv_current_role to user user5 PREHOOK: type: GRANT_PRIVILEGE PREHOOK: Output: default@tpriv_current_role -FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask. Permission denied: Principal [name=user2, type=USER] does not have following privileges for operation GRANT_PRIVILEGE [[SELECT with grant, INSERT with grant, UPDATE with grant, DELETE with grant] on Object [type=TABLE_OR_VIEW, name=default.tpriv_current_role]] +FAILED: Execution Error, return code 40000 from org.apache.hadoop.hive.ql.ddl.DDLTask. Permission denied: Principal [name=user2, type=USER] does not have following privileges for operation GRANT_PRIVILEGE [[SELECT with grant, INSERT with grant, UPDATE with grant, DELETE with grant] on Object [type=TABLE_OR_VIEW, name=default.tpriv_current_role]] diff --git a/ql/src/test/results/clientnegative/authorization_public_create.q.out b/ql/src/test/results/clientnegative/authorization_public_create.q.out index 669cffe7b4..16e3cb36f6 100644 --- a/ql/src/test/results/clientnegative/authorization_public_create.q.out +++ b/ql/src/test/results/clientnegative/authorization_public_create.q.out @@ -1,3 +1,3 @@ PREHOOK: query: create role public PREHOOK: type: CREATEROLE -FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask. org.apache.hadoop.hive.ql.metadata.HiveException: MetaException(message:public role implicitly exists. It can't be created.) +FAILED: Execution Error, return code 40000 from org.apache.hadoop.hive.ql.ddl.DDLTask. org.apache.hadoop.hive.ql.metadata.HiveException: MetaException(message:public role implicitly exists. It can't be created.) diff --git a/ql/src/test/results/clientnegative/authorization_public_drop.q.out b/ql/src/test/results/clientnegative/authorization_public_drop.q.out index e1b538dfee..a218682c5b 100644 --- a/ql/src/test/results/clientnegative/authorization_public_drop.q.out +++ b/ql/src/test/results/clientnegative/authorization_public_drop.q.out @@ -1,3 +1,3 @@ PREHOOK: query: drop role public PREHOOK: type: DROPROLE -FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask. org.apache.hadoop.hive.ql.metadata.HiveException: MetaException(message:public,admin roles can't be dropped.) +FAILED: Execution Error, return code 40000 from org.apache.hadoop.hive.ql.ddl.DDLTask. org.apache.hadoop.hive.ql.metadata.HiveException: MetaException(message:public,admin roles can't be dropped.) diff --git a/ql/src/test/results/clientnegative/authorization_revoke_table_fail1.q.out b/ql/src/test/results/clientnegative/authorization_revoke_table_fail1.q.out index d7454f7541..590fb8f1d8 100644 --- a/ql/src/test/results/clientnegative/authorization_revoke_table_fail1.q.out +++ b/ql/src/test/results/clientnegative/authorization_revoke_table_fail1.q.out @@ -15,5 +15,5 @@ POSTHOOK: Output: default@table_priv_rfail1 PREHOOK: query: REVOKE INSERT ON TABLE table_priv_rfail1 FROM USER user2 PREHOOK: type: REVOKE_PRIVILEGE PREHOOK: Output: default@table_priv_rfail1 -FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask. Cannot find privilege Privilege [name=INSERT, columns=null] for Principal [name=user2, type=USER] on Object [type=TABLE_OR_VIEW, name=default.table_priv_rfail1] granted by user3 +FAILED: Execution Error, return code 40000 from org.apache.hadoop.hive.ql.ddl.DDLTask. Cannot find privilege Privilege [name=INSERT, columns=null] for Principal [name=user2, type=USER] on Object [type=TABLE_OR_VIEW, name=default.table_priv_rfail1] granted by user3 diff --git a/ql/src/test/results/clientnegative/authorization_revoke_table_fail2.q.out b/ql/src/test/results/clientnegative/authorization_revoke_table_fail2.q.out index c7030cabd0..2c24fb64ef 100644 --- a/ql/src/test/results/clientnegative/authorization_revoke_table_fail2.q.out +++ b/ql/src/test/results/clientnegative/authorization_revoke_table_fail2.q.out @@ -27,5 +27,5 @@ POSTHOOK: Output: default@table_priv_rfai2 PREHOOK: query: REVOKE INSERT ON TABLE table_priv_rfai2 FROM USER user2 PREHOOK: type: REVOKE_PRIVILEGE PREHOOK: Output: default@table_priv_rfai2 -FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask. Cannot find privilege Privilege [name=INSERT, columns=null] for Principal [name=user2, type=USER] on Object [type=TABLE_OR_VIEW, name=default.table_priv_rfai2] granted by user3 +FAILED: Execution Error, return code 40000 from org.apache.hadoop.hive.ql.ddl.DDLTask. Cannot find privilege Privilege [name=INSERT, columns=null] for Principal [name=user2, type=USER] on Object [type=TABLE_OR_VIEW, name=default.table_priv_rfai2] granted by user3 diff --git a/ql/src/test/results/clientnegative/authorization_role_case.q.out b/ql/src/test/results/clientnegative/authorization_role_case.q.out index adb6d3c384..fb200cf9cc 100644 --- a/ql/src/test/results/clientnegative/authorization_role_case.q.out +++ b/ql/src/test/results/clientnegative/authorization_role_case.q.out @@ -31,4 +31,4 @@ POSTHOOK: Output: default@t1 PREHOOK: query: grant UPDATE on table t1 to role mixcaserole2 PREHOOK: type: GRANT_PRIVILEGE PREHOOK: Output: default@t1 -FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask. org.apache.hadoop.hive.ql.metadata.HiveException: MetaException(message:Got exception: org.apache.hadoop.hive.metastore.api.NoSuchObjectException Role mixcaserole2 does not exist) +FAILED: Execution Error, return code 40000 from org.apache.hadoop.hive.ql.ddl.DDLTask. org.apache.hadoop.hive.ql.metadata.HiveException: MetaException(message:Got exception: org.apache.hadoop.hive.metastore.api.NoSuchObjectException Role mixcaserole2 does not exist) diff --git a/ql/src/test/results/clientnegative/authorization_role_cycles1.q.out b/ql/src/test/results/clientnegative/authorization_role_cycles1.q.out index e7f3a313ea..5d7fe3e117 100644 --- a/ql/src/test/results/clientnegative/authorization_role_cycles1.q.out +++ b/ql/src/test/results/clientnegative/authorization_role_cycles1.q.out @@ -16,4 +16,4 @@ POSTHOOK: query: grant role role1 to role role2 POSTHOOK: type: GRANT_ROLE PREHOOK: query: grant role role2 to role role1 PREHOOK: type: GRANT_ROLE -FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask. Error granting role: Cannot grant role role1 to role2 as role2 already belongs to the role role1. (no cycles allowed) +FAILED: Execution Error, return code 40000 from org.apache.hadoop.hive.ql.ddl.DDLTask. Error granting role: Cannot grant role role1 to role2 as role2 already belongs to the role role1. (no cycles allowed) diff --git a/ql/src/test/results/clientnegative/authorization_role_cycles2.q.out b/ql/src/test/results/clientnegative/authorization_role_cycles2.q.out index 4f20b84d43..eedabe8031 100644 --- a/ql/src/test/results/clientnegative/authorization_role_cycles2.q.out +++ b/ql/src/test/results/clientnegative/authorization_role_cycles2.q.out @@ -40,4 +40,4 @@ POSTHOOK: query: grant role role5 to role role4 POSTHOOK: type: GRANT_ROLE PREHOOK: query: grant role role2 to role role4 PREHOOK: type: GRANT_ROLE -FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask. Error granting role: Cannot grant role role4 to role2 as role2 already belongs to the role role4. (no cycles allowed) +FAILED: Execution Error, return code 40000 from org.apache.hadoop.hive.ql.ddl.DDLTask. Error granting role: Cannot grant role role4 to role2 as role2 already belongs to the role role4. (no cycles allowed) diff --git a/ql/src/test/results/clientnegative/authorization_role_grant.q.out b/ql/src/test/results/clientnegative/authorization_role_grant.q.out index daef930e3c..0cabd84947 100644 --- a/ql/src/test/results/clientnegative/authorization_role_grant.q.out +++ b/ql/src/test/results/clientnegative/authorization_role_grant.q.out @@ -31,4 +31,4 @@ POSTHOOK: query: set role role_noadmin POSTHOOK: type: SHOW_ROLES PREHOOK: query: grant src_role_wadmin to user user3 PREHOOK: type: GRANT_ROLE -FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask. Current user : user2 is not allowed to grant role. User has to belong to ADMIN role and have it as current role, for this action. Otherwise, grantor need to have ADMIN OPTION on role being granted and have it as a current role for this action. +FAILED: Execution Error, return code 40000 from org.apache.hadoop.hive.ql.ddl.DDLTask. Current user : user2 is not allowed to grant role. User has to belong to ADMIN role and have it as current role, for this action. Otherwise, grantor need to have ADMIN OPTION on role being granted and have it as a current role for this action. diff --git a/ql/src/test/results/clientnegative/authorization_role_grant2.q.out b/ql/src/test/results/clientnegative/authorization_role_grant2.q.out index e5495805e7..896dbe9589 100644 --- a/ql/src/test/results/clientnegative/authorization_role_grant2.q.out +++ b/ql/src/test/results/clientnegative/authorization_role_grant2.q.out @@ -48,4 +48,4 @@ POSTHOOK: query: set role src_role_wadmin POSTHOOK: type: SHOW_ROLES PREHOOK: query: grant src_role_wadmin to user user3 PREHOOK: type: GRANT_ROLE -FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask. Current user : user2 is not allowed to grant role. User has to belong to ADMIN role and have it as current role, for this action. Otherwise, grantor need to have ADMIN OPTION on role being granted and have it as a current role for this action. +FAILED: Execution Error, return code 40000 from org.apache.hadoop.hive.ql.ddl.DDLTask. Current user : user2 is not allowed to grant role. User has to belong to ADMIN role and have it as current role, for this action. Otherwise, grantor need to have ADMIN OPTION on role being granted and have it as a current role for this action. diff --git a/ql/src/test/results/clientnegative/authorization_role_grant_nosuchrole.q.out b/ql/src/test/results/clientnegative/authorization_role_grant_nosuchrole.q.out index fcb2ec94ed..277bff5cdd 100644 --- a/ql/src/test/results/clientnegative/authorization_role_grant_nosuchrole.q.out +++ b/ql/src/test/results/clientnegative/authorization_role_grant_nosuchrole.q.out @@ -8,4 +8,4 @@ POSTHOOK: query: create role role1 POSTHOOK: type: CREATEROLE PREHOOK: query: grant role1 to role nosuchrole PREHOOK: type: GRANT_ROLE -FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask. Error granting role: Got exception: org.apache.hadoop.hive.metastore.api.NoSuchObjectException Role nosuchrole does not exist +FAILED: Execution Error, return code 40000 from org.apache.hadoop.hive.ql.ddl.DDLTask. Error granting role: Got exception: org.apache.hadoop.hive.metastore.api.NoSuchObjectException Role nosuchrole does not exist diff --git a/ql/src/test/results/clientnegative/authorization_role_grant_otherrole.q.out b/ql/src/test/results/clientnegative/authorization_role_grant_otherrole.q.out index bb4e23ec9b..79c0c57243 100644 --- a/ql/src/test/results/clientnegative/authorization_role_grant_otherrole.q.out +++ b/ql/src/test/results/clientnegative/authorization_role_grant_otherrole.q.out @@ -8,4 +8,4 @@ POSTHOOK: query: create role accounting POSTHOOK: type: CREATEROLE PREHOOK: query: show role grant role accounting PREHOOK: type: SHOW_ROLE_GRANT -FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask. Error getting role grant information for user accounting: User : user1 is not allowed check privileges of a role it does not belong to : accounting. User has to belong to ADMIN role and have it as current role, for this action. +FAILED: Execution Error, return code 40000 from org.apache.hadoop.hive.ql.ddl.DDLTask. Error getting role grant information for user accounting: User : user1 is not allowed check privileges of a role it does not belong to : accounting. User has to belong to ADMIN role and have it as current role, for this action. diff --git a/ql/src/test/results/clientnegative/authorization_role_grant_otheruser.q.out b/ql/src/test/results/clientnegative/authorization_role_grant_otheruser.q.out index 5422b191d7..72cd42b956 100644 --- a/ql/src/test/results/clientnegative/authorization_role_grant_otheruser.q.out +++ b/ql/src/test/results/clientnegative/authorization_role_grant_otheruser.q.out @@ -19,4 +19,4 @@ POSTHOOK: type: SHOW_ROLE_GRANT public false -1 PREHOOK: query: show role grant user ruser2 PREHOOK: type: SHOW_ROLE_GRANT -FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask. Error getting role grant information for user ruser2: User : ruser1 is not allowed check privileges of another user : ruser2. User has to belong to ADMIN role and have it as current role, for this action. +FAILED: Execution Error, return code 40000 from org.apache.hadoop.hive.ql.ddl.DDLTask. Error getting role grant information for user ruser2: User : ruser1 is not allowed check privileges of another user : ruser2. User has to belong to ADMIN role and have it as current role, for this action. diff --git a/ql/src/test/results/clientnegative/authorization_set_role_neg1.q.out b/ql/src/test/results/clientnegative/authorization_set_role_neg1.q.out index fb46d432ab..fd60dcaa6d 100644 --- a/ql/src/test/results/clientnegative/authorization_set_role_neg1.q.out +++ b/ql/src/test/results/clientnegative/authorization_set_role_neg1.q.out @@ -1,3 +1,3 @@ PREHOOK: query: set role nosuchroleexists PREHOOK: type: SHOW_ROLES -FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask. hive_test_user doesn't belong to role nosuchroleexists +FAILED: Execution Error, return code 40000 from org.apache.hadoop.hive.ql.ddl.DDLTask. hive_test_user doesn't belong to role nosuchroleexists diff --git a/ql/src/test/results/clientnegative/authorization_set_role_neg2.q.out b/ql/src/test/results/clientnegative/authorization_set_role_neg2.q.out index dad672ddd0..1dbcc35b70 100644 --- a/ql/src/test/results/clientnegative/authorization_set_role_neg2.q.out +++ b/ql/src/test/results/clientnegative/authorization_set_role_neg2.q.out @@ -20,4 +20,4 @@ POSTHOOK: query: set role public POSTHOOK: type: SHOW_ROLES PREHOOK: query: set role nosuchroleexists PREHOOK: type: SHOW_ROLES -FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask. user2 doesn't belong to role nosuchroleexists +FAILED: Execution Error, return code 40000 from org.apache.hadoop.hive.ql.ddl.DDLTask. user2 doesn't belong to role nosuchroleexists diff --git a/ql/src/test/results/clientnegative/authorization_show_grant_otherrole.q.out b/ql/src/test/results/clientnegative/authorization_show_grant_otherrole.q.out index 198986a301..47d24de6a4 100644 --- a/ql/src/test/results/clientnegative/authorization_show_grant_otherrole.q.out +++ b/ql/src/test/results/clientnegative/authorization_show_grant_otherrole.q.out @@ -8,4 +8,4 @@ POSTHOOK: query: create role role1 POSTHOOK: type: CREATEROLE PREHOOK: query: show grant role role1 PREHOOK: type: SHOW_GRANT -FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask. Error showing privileges: User : user1 is not allowed check privileges of a role it does not belong to : role1. User has to belong to ADMIN role and have it as current role, for this action. +FAILED: Execution Error, return code 40000 from org.apache.hadoop.hive.ql.ddl.DDLTask. Error showing privileges: User : user1 is not allowed check privileges of a role it does not belong to : role1. User has to belong to ADMIN role and have it as current role, for this action. diff --git a/ql/src/test/results/clientnegative/authorization_show_grant_otheruser_all.q.out b/ql/src/test/results/clientnegative/authorization_show_grant_otheruser_all.q.out index bd510aa949..8ecf33ebd6 100644 --- a/ql/src/test/results/clientnegative/authorization_show_grant_otheruser_all.q.out +++ b/ql/src/test/results/clientnegative/authorization_show_grant_otheruser_all.q.out @@ -1,3 +1,3 @@ PREHOOK: query: show grant PREHOOK: type: SHOW_GRANT -FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask. Error showing privileges: User : user1 has to specify a user name or role in the show grant. User has to belong to ADMIN role and have it as current role, for this action. +FAILED: Execution Error, return code 40000 from org.apache.hadoop.hive.ql.ddl.DDLTask. Error showing privileges: User : user1 has to specify a user name or role in the show grant. User has to belong to ADMIN role and have it as current role, for this action. diff --git a/ql/src/test/results/clientnegative/authorization_show_grant_otheruser_alltabs.q.out b/ql/src/test/results/clientnegative/authorization_show_grant_otheruser_alltabs.q.out index e5479fd12b..1135a10ae4 100644 --- a/ql/src/test/results/clientnegative/authorization_show_grant_otheruser_alltabs.q.out +++ b/ql/src/test/results/clientnegative/authorization_show_grant_otheruser_alltabs.q.out @@ -1,3 +1,3 @@ PREHOOK: query: show grant user user2 PREHOOK: type: SHOW_GRANT -FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask. Error showing privileges: User : user1 is not allowed check privileges of another user : user2. User has to belong to ADMIN role and have it as current role, for this action. +FAILED: Execution Error, return code 40000 from org.apache.hadoop.hive.ql.ddl.DDLTask. Error showing privileges: User : user1 is not allowed check privileges of another user : user2. User has to belong to ADMIN role and have it as current role, for this action. diff --git a/ql/src/test/results/clientnegative/authorization_show_grant_otheruser_wtab.q.out b/ql/src/test/results/clientnegative/authorization_show_grant_otheruser_wtab.q.out index 1b137f17a0..dd7eee0aef 100644 --- a/ql/src/test/results/clientnegative/authorization_show_grant_otheruser_wtab.q.out +++ b/ql/src/test/results/clientnegative/authorization_show_grant_otheruser_wtab.q.out @@ -8,4 +8,4 @@ POSTHOOK: Output: database:default POSTHOOK: Output: default@t1 PREHOOK: query: show grant user user2 on table t1 PREHOOK: type: SHOW_GRANT -FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask. Error showing privileges: User : user1 is not allowed check privileges of another user : user2. User has to belong to ADMIN role and have it as current role, for this action. +FAILED: Execution Error, return code 40000 from org.apache.hadoop.hive.ql.ddl.DDLTask. Error showing privileges: User : user1 is not allowed check privileges of another user : user2. User has to belong to ADMIN role and have it as current role, for this action. diff --git a/ql/src/test/results/clientnegative/authorization_show_role_principals_no_admin.q.out b/ql/src/test/results/clientnegative/authorization_show_role_principals_no_admin.q.out index f5ce765a7a..9100689f12 100644 --- a/ql/src/test/results/clientnegative/authorization_show_role_principals_no_admin.q.out +++ b/ql/src/test/results/clientnegative/authorization_show_role_principals_no_admin.q.out @@ -1,3 +1,3 @@ PREHOOK: query: show principals role1 PREHOOK: type: SHOW_ROLE_PRINCIPALS -FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask. Current user : hive_test_user is not allowed get principals in a role. User has to belong to ADMIN role and have it as current role, for this action. Otherwise, grantor need to have ADMIN OPTION on role being granted and have it as a current role for this action. +FAILED: Execution Error, return code 40000 from org.apache.hadoop.hive.ql.ddl.DDLTask. Current user : hive_test_user is not allowed get principals in a role. User has to belong to ADMIN role and have it as current role, for this action. Otherwise, grantor need to have ADMIN OPTION on role being granted and have it as a current role for this action. diff --git a/ql/src/test/results/clientnegative/authorization_show_roles_no_admin.q.out b/ql/src/test/results/clientnegative/authorization_show_roles_no_admin.q.out index ea46d10008..1600a2e37b 100644 --- a/ql/src/test/results/clientnegative/authorization_show_roles_no_admin.q.out +++ b/ql/src/test/results/clientnegative/authorization_show_roles_no_admin.q.out @@ -1,3 +1,3 @@ PREHOOK: query: show roles PREHOOK: type: SHOW_ROLES -FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask. Current user : hive_test_user is not allowed to list roles. User has to belong to ADMIN role and have it as current role, for this action. +FAILED: Execution Error, return code 40000 from org.apache.hadoop.hive.ql.ddl.DDLTask. Current user : hive_test_user is not allowed to list roles. User has to belong to ADMIN role and have it as current role, for this action. diff --git a/ql/src/test/results/clientnegative/authorization_table_grant_nosuchrole.q.out b/ql/src/test/results/clientnegative/authorization_table_grant_nosuchrole.q.out index 21af3b13d9..2c341bd597 100644 --- a/ql/src/test/results/clientnegative/authorization_table_grant_nosuchrole.q.out +++ b/ql/src/test/results/clientnegative/authorization_table_grant_nosuchrole.q.out @@ -9,4 +9,4 @@ POSTHOOK: Output: default@t1 PREHOOK: query: grant ALL on t1 to role nosuchrole PREHOOK: type: GRANT_PRIVILEGE PREHOOK: Output: default@t1 -FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask. Error granting privileges: Got exception: org.apache.hadoop.hive.metastore.api.NoSuchObjectException Role nosuchrole does not exist +FAILED: Execution Error, return code 40000 from org.apache.hadoop.hive.ql.ddl.DDLTask. Error granting privileges: Got exception: org.apache.hadoop.hive.metastore.api.NoSuchObjectException Role nosuchrole does not exist diff --git a/ql/src/test/results/clientnegative/authorize_grant_public.q.out b/ql/src/test/results/clientnegative/authorize_grant_public.q.out index a2e8083c6c..c1fe19550f 100644 --- a/ql/src/test/results/clientnegative/authorize_grant_public.q.out +++ b/ql/src/test/results/clientnegative/authorize_grant_public.q.out @@ -1,3 +1,3 @@ PREHOOK: query: grant role public to user hive_test_user PREHOOK: type: GRANT_ROLE -FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask. org.apache.hadoop.hive.ql.metadata.HiveException: MetaException(message:No user can be added to public. Since all users implicitly belong to public role.) +FAILED: Execution Error, return code 40000 from org.apache.hadoop.hive.ql.ddl.DDLTask. org.apache.hadoop.hive.ql.metadata.HiveException: MetaException(message:No user can be added to public. Since all users implicitly belong to public role.) diff --git a/ql/src/test/results/clientnegative/authorize_revoke_public.q.out b/ql/src/test/results/clientnegative/authorize_revoke_public.q.out index 8579c54a6a..fb3448a5de 100644 --- a/ql/src/test/results/clientnegative/authorize_revoke_public.q.out +++ b/ql/src/test/results/clientnegative/authorize_revoke_public.q.out @@ -1,3 +1,3 @@ PREHOOK: query: revoke role public from user hive_test_user PREHOOK: type: REVOKE_ROLE -FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask. org.apache.hadoop.hive.ql.metadata.HiveException: MetaException(message:public role can't be revoked.) +FAILED: Execution Error, return code 40000 from org.apache.hadoop.hive.ql.ddl.DDLTask. org.apache.hadoop.hive.ql.metadata.HiveException: MetaException(message:public role can't be revoked.) diff --git a/ql/src/test/results/clientnegative/avro_add_column_extschema.q.out b/ql/src/test/results/clientnegative/avro_add_column_extschema.q.out index 8040fe34db..9493f976c5 100644 --- a/ql/src/test/results/clientnegative/avro_add_column_extschema.q.out +++ b/ql/src/test/results/clientnegative/avro_add_column_extschema.q.out @@ -40,4 +40,4 @@ CHANGE COLUMN number number bigint PREHOOK: type: ALTERTABLE_RENAMECOL PREHOOK: Input: default@avro_extschema PREHOOK: Output: default@avro_extschema -FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask. Not allowed to alter schema of Avro stored table having external schema. Consider removing avro.schema.literal or avro.schema.url from table properties. +FAILED: Execution Error, return code 40000 from org.apache.hadoop.hive.ql.ddl.DDLTask. Not allowed to alter schema of Avro stored table having external schema. Consider removing avro.schema.literal or avro.schema.url from table properties. diff --git a/ql/src/test/results/clientnegative/avro_decimal.q.out b/ql/src/test/results/clientnegative/avro_decimal.q.out index c2bc4f4948..3b46c7b63a 100644 --- a/ql/src/test/results/clientnegative/avro_decimal.q.out +++ b/ql/src/test/results/clientnegative/avro_decimal.q.out @@ -19,4 +19,4 @@ TBLPROPERTIES ( PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@avro_dec -FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask. java.lang.RuntimeException: MetaException(message:org.apache.hadoop.hive.serde2.avro.AvroSerdeException Invalid precision or scale for decimal type) +FAILED: Execution Error, return code 40000 from org.apache.hadoop.hive.ql.ddl.DDLTask. java.lang.RuntimeException: MetaException(message:org.apache.hadoop.hive.serde2.avro.AvroSerdeException Invalid precision or scale for decimal type) diff --git a/ql/src/test/results/clientnegative/column_rename1.q.out b/ql/src/test/results/clientnegative/column_rename1.q.out index a280774895..1f0c5cf2d2 100644 --- a/ql/src/test/results/clientnegative/column_rename1.q.out +++ b/ql/src/test/results/clientnegative/column_rename1.q.out @@ -26,4 +26,4 @@ PREHOOK: query: alter table tstsrc change src_not_exist key_value string PREHOOK: type: ALTERTABLE_RENAMECOL PREHOOK: Input: default@tstsrc PREHOOK: Output: default@tstsrc -FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask. Invalid column reference src_not_exist +FAILED: Execution Error, return code 10002 from org.apache.hadoop.hive.ql.ddl.DDLTask. Invalid column reference src_not_exist diff --git a/ql/src/test/results/clientnegative/column_rename2.q.out b/ql/src/test/results/clientnegative/column_rename2.q.out index 3eeda3ab48..2fef073924 100644 --- a/ql/src/test/results/clientnegative/column_rename2.q.out +++ b/ql/src/test/results/clientnegative/column_rename2.q.out @@ -26,4 +26,4 @@ PREHOOK: query: alter table tstsrc change key value string PREHOOK: type: ALTERTABLE_RENAMECOL PREHOOK: Input: default@tstsrc PREHOOK: Output: default@tstsrc -FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask. Duplicate column name: value +FAILED: Execution Error, return code 10036 from org.apache.hadoop.hive.ql.ddl.DDLTask. Duplicate column name: value diff --git a/ql/src/test/results/clientnegative/column_rename4.q.out b/ql/src/test/results/clientnegative/column_rename4.q.out index 92c886ccec..afed2146eb 100644 --- a/ql/src/test/results/clientnegative/column_rename4.q.out +++ b/ql/src/test/results/clientnegative/column_rename4.q.out @@ -26,4 +26,4 @@ PREHOOK: query: alter table tstsrc change key key2 string after key_value PREHOOK: type: ALTERTABLE_RENAMECOL PREHOOK: Input: default@tstsrc PREHOOK: Output: default@tstsrc -FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask. Invalid column reference key_value +FAILED: Execution Error, return code 10002 from org.apache.hadoop.hive.ql.ddl.DDLTask. Invalid column reference key_value diff --git a/ql/src/test/results/clientnegative/compact_non_acid_table.q.out b/ql/src/test/results/clientnegative/compact_non_acid_table.q.out index 34b9e91119..dc8b01eccc 100644 --- a/ql/src/test/results/clientnegative/compact_non_acid_table.q.out +++ b/ql/src/test/results/clientnegative/compact_non_acid_table.q.out @@ -8,4 +8,4 @@ POSTHOOK: Output: database:default POSTHOOK: Output: default@not_an_acid_table PREHOOK: query: alter table not_an_acid_table compact 'major' PREHOOK: type: ALTERTABLE_COMPACT -FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask. Compaction is not allowed on non-ACID table default.not_an_acid_table +FAILED: Execution Error, return code 10286 from org.apache.hadoop.hive.ql.ddl.DDLTask. Compaction is not allowed on non-ACID table default.not_an_acid_table diff --git a/ql/src/test/results/clientnegative/create_external_acid.q.out b/ql/src/test/results/clientnegative/create_external_acid.q.out index 11fa05db2a..84c4d7bc81 100644 --- a/ql/src/test/results/clientnegative/create_external_acid.q.out +++ b/ql/src/test/results/clientnegative/create_external_acid.q.out @@ -2,4 +2,4 @@ PREHOOK: query: create external table acid_external (a int, b varchar(128)) clus PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@acid_external -FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask. MetaException(message:default.acid_external cannot be declared transactional because it's an external table) +FAILED: Execution Error, return code 40000 from org.apache.hadoop.hive.ql.ddl.DDLTask. MetaException(message:default.acid_external cannot be declared transactional because it's an external table) diff --git a/ql/src/test/results/clientnegative/create_not_acid.q.out b/ql/src/test/results/clientnegative/create_not_acid.q.out index e8a45e503f..74973ac5a2 100644 --- a/ql/src/test/results/clientnegative/create_not_acid.q.out +++ b/ql/src/test/results/clientnegative/create_not_acid.q.out @@ -2,4 +2,4 @@ PREHOOK: query: create table acid_notbucketed(a int, b varchar(128)) TBLPROPERTI PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@acid_notbucketed -FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask. MetaException(message:The table must be stored using an ACID compliant format (such as ORC): default.acid_notbucketed) +FAILED: Execution Error, return code 40000 from org.apache.hadoop.hive.ql.ddl.DDLTask. MetaException(message:The table must be stored using an ACID compliant format (such as ORC): default.acid_notbucketed) diff --git a/ql/src/test/results/clientnegative/create_table_wrong_regex.q.out b/ql/src/test/results/clientnegative/create_table_wrong_regex.q.out index b6517ffa4c..7de34c5c48 100644 --- a/ql/src/test/results/clientnegative/create_table_wrong_regex.q.out +++ b/ql/src/test/results/clientnegative/create_table_wrong_regex.q.out @@ -8,6 +8,6 @@ PREHOOK: query: create table aa ( test STRING ) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@aa -FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask. java.util.regex.PatternSyntaxException: Unclosed character class near index 7 +FAILED: Execution Error, return code 40000 from org.apache.hadoop.hive.ql.ddl.DDLTask. java.util.regex.PatternSyntaxException: Unclosed character class near index 7 [^\](.*) ^ diff --git a/ql/src/test/results/clientnegative/create_view_failure1.q.out b/ql/src/test/results/clientnegative/create_view_failure1.q.out index b960a5f5a2..03ec5ae6ca 100644 --- a/ql/src/test/results/clientnegative/create_view_failure1.q.out +++ b/ql/src/test/results/clientnegative/create_view_failure1.q.out @@ -15,4 +15,4 @@ PREHOOK: type: CREATEVIEW PREHOOK: Input: default@src PREHOOK: Output: database:default PREHOOK: Output: default@xxx12 -FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask. Table already exists: default.xxx12 +FAILED: Execution Error, return code 10073 from org.apache.hadoop.hive.ql.ddl.DDLTask. Table already exists: default.xxx12 diff --git a/ql/src/test/results/clientnegative/create_view_failure2.q.out b/ql/src/test/results/clientnegative/create_view_failure2.q.out index 52d22735b2..9688687d6c 100644 --- a/ql/src/test/results/clientnegative/create_view_failure2.q.out +++ b/ql/src/test/results/clientnegative/create_view_failure2.q.out @@ -17,4 +17,4 @@ PREHOOK: query: CREATE TABLE xxx4(key int) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@xxx4 -FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask. AlreadyExistsException(message:Table hive.default.xxx4 already exists) +FAILED: Execution Error, return code 40000 from org.apache.hadoop.hive.ql.ddl.DDLTask. AlreadyExistsException(message:Table hive.default.xxx4 already exists) diff --git a/ql/src/test/results/clientnegative/create_view_failure4.q.out b/ql/src/test/results/clientnegative/create_view_failure4.q.out index 19cf005be1..0d5d6c5626 100644 --- a/ql/src/test/results/clientnegative/create_view_failure4.q.out +++ b/ql/src/test/results/clientnegative/create_view_failure4.q.out @@ -8,4 +8,4 @@ PREHOOK: type: CREATEVIEW PREHOOK: Input: default@src PREHOOK: Output: database:default PREHOOK: Output: default@xxx5 -FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask. org.apache.hadoop.hive.ql.metadata.HiveException: Duplicate column name x in the table definition. +FAILED: Execution Error, return code 40000 from org.apache.hadoop.hive.ql.ddl.DDLTask. org.apache.hadoop.hive.ql.metadata.HiveException: Duplicate column name x in the table definition. diff --git a/ql/src/test/results/clientnegative/create_with_constraints_duplicate_name.q.out b/ql/src/test/results/clientnegative/create_with_constraints_duplicate_name.q.out index 41ed7146a1..38fbbc28bc 100644 --- a/ql/src/test/results/clientnegative/create_with_constraints_duplicate_name.q.out +++ b/ql/src/test/results/clientnegative/create_with_constraints_duplicate_name.q.out @@ -48,4 +48,4 @@ POSTHOOK: Output: database:db2 POSTHOOK: Output: db2@t2 PREHOOK: query: alter table t1 add constraint constraint_name foreign key (x) references t2(x) disable novalidate rely PREHOOK: type: ALTERTABLE_ADDCONSTRAINT -FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask. InvalidObjectException(message:Constraint name already exists: db2.t2.constraint_name) +FAILED: Execution Error, return code 40000 from org.apache.hadoop.hive.ql.ddl.DDLTask. InvalidObjectException(message:Constraint name already exists: db2.t2.constraint_name) diff --git a/ql/src/test/results/clientnegative/create_with_fk_constraint.q.out b/ql/src/test/results/clientnegative/create_with_fk_constraint.q.out index d2a45bdec6..a8e0153ed7 100644 --- a/ql/src/test/results/clientnegative/create_with_fk_constraint.q.out +++ b/ql/src/test/results/clientnegative/create_with_fk_constraint.q.out @@ -10,4 +10,4 @@ PREHOOK: query: CREATE TABLE table1 (a STRING, b STRING, CONSTRAINT fk1 FOREIGN PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@table1 -FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask. InvalidObjectException(message:Child column not found: x) +FAILED: Execution Error, return code 40000 from org.apache.hadoop.hive.ql.ddl.DDLTask. InvalidObjectException(message:Child column not found: x) diff --git a/ql/src/test/results/clientnegative/create_with_fk_pk_same_tab.q.out b/ql/src/test/results/clientnegative/create_with_fk_pk_same_tab.q.out index 4cfcb8bdd6..43831a6e37 100644 --- a/ql/src/test/results/clientnegative/create_with_fk_pk_same_tab.q.out +++ b/ql/src/test/results/clientnegative/create_with_fk_pk_same_tab.q.out @@ -2,4 +2,4 @@ PREHOOK: query: CREATE TABLE table1 (a STRING PRIMARY KEY DISABLE, b STRING, CON PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@table1 -FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask. MetaException(message:Cannot be both foreign key and primary/unique key on same table: a:string;) +FAILED: Execution Error, return code 40000 from org.apache.hadoop.hive.ql.ddl.DDLTask. MetaException(message:Cannot be both foreign key and primary/unique key on same table: a:string;) diff --git a/ql/src/test/results/clientnegative/create_with_fk_uk_same_tab.q.out b/ql/src/test/results/clientnegative/create_with_fk_uk_same_tab.q.out index 5cf4dd95c2..31fe0f0c65 100644 --- a/ql/src/test/results/clientnegative/create_with_fk_uk_same_tab.q.out +++ b/ql/src/test/results/clientnegative/create_with_fk_uk_same_tab.q.out @@ -2,4 +2,4 @@ PREHOOK: query: CREATE TABLE table1 (a STRING UNIQUE DISABLE, b STRING, CONSTRAI PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@table1 -FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask. MetaException(message:Cannot be both foreign key and primary/unique key on same table: a:string;) +FAILED: Execution Error, return code 40000 from org.apache.hadoop.hive.ql.ddl.DDLTask. MetaException(message:Cannot be both foreign key and primary/unique key on same table: a:string;) diff --git a/ql/src/test/results/clientnegative/create_with_fk_wrong_ref.q.out b/ql/src/test/results/clientnegative/create_with_fk_wrong_ref.q.out index 1477a96734..8d51055cdf 100644 --- a/ql/src/test/results/clientnegative/create_with_fk_wrong_ref.q.out +++ b/ql/src/test/results/clientnegative/create_with_fk_wrong_ref.q.out @@ -10,4 +10,4 @@ PREHOOK: query: CREATE TABLE table1 (a STRING, b STRING, CONSTRAINT fk1 FOREIGN PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@table1 -FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask. MetaException(message:Foreign key references a:string; but no corresponding primary key or unique key exists. Possible keys: [a:int;]) +FAILED: Execution Error, return code 40000 from org.apache.hadoop.hive.ql.ddl.DDLTask. MetaException(message:Foreign key references a:string; but no corresponding primary key or unique key exists. Possible keys: [a:int;]) diff --git a/ql/src/test/results/clientnegative/create_with_fk_wrong_ref2.q.out b/ql/src/test/results/clientnegative/create_with_fk_wrong_ref2.q.out index 843a434d6b..cb4f333d23 100644 --- a/ql/src/test/results/clientnegative/create_with_fk_wrong_ref2.q.out +++ b/ql/src/test/results/clientnegative/create_with_fk_wrong_ref2.q.out @@ -10,4 +10,4 @@ PREHOOK: query: CREATE TABLE table1 (a STRING, b STRING, CONSTRAINT fk1 FOREIGN PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@table1 -FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask. MetaException(message:Foreign key references a:string;b:string; but no corresponding primary key or unique key exists. Possible keys: [b:int;a:string;, a:string;b:int;]) +FAILED: Execution Error, return code 40000 from org.apache.hadoop.hive.ql.ddl.DDLTask. MetaException(message:Foreign key references a:string;b:string; but no corresponding primary key or unique key exists. Possible keys: [b:int;a:string;, a:string;b:int;]) diff --git a/ql/src/test/results/clientnegative/database_create_already_exists.q.out b/ql/src/test/results/clientnegative/database_create_already_exists.q.out index 14746a6bba..a486507fab 100644 --- a/ql/src/test/results/clientnegative/database_create_already_exists.q.out +++ b/ql/src/test/results/clientnegative/database_create_already_exists.q.out @@ -12,4 +12,4 @@ POSTHOOK: Output: database:test_db PREHOOK: query: CREATE DATABASE test_db PREHOOK: type: CREATEDATABASE PREHOOK: Output: database:test_db -FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask. Database test_db already exists +FAILED: Execution Error, return code 10242 from org.apache.hadoop.hive.ql.ddl.DDLTask. Database test_db already exists diff --git a/ql/src/test/results/clientnegative/database_create_invalid_name.q.out b/ql/src/test/results/clientnegative/database_create_invalid_name.q.out index 9dba025d9c..3d7d4d8b4f 100644 --- a/ql/src/test/results/clientnegative/database_create_invalid_name.q.out +++ b/ql/src/test/results/clientnegative/database_create_invalid_name.q.out @@ -6,4 +6,4 @@ default PREHOOK: query: CREATE DATABASE `test§db` PREHOOK: type: CREATEDATABASE PREHOOK: Output: database:test§db -FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask. InvalidObjectException(message:test§db is not a valid database name) +FAILED: Execution Error, return code 40000 from org.apache.hadoop.hive.ql.ddl.DDLTask. InvalidObjectException(message:test§db is not a valid database name) diff --git a/ql/src/test/results/clientnegative/database_drop_not_empty.q.out b/ql/src/test/results/clientnegative/database_drop_not_empty.q.out index d1604641fc..20463ce178 100644 --- a/ql/src/test/results/clientnegative/database_drop_not_empty.q.out +++ b/ql/src/test/results/clientnegative/database_drop_not_empty.q.out @@ -33,4 +33,4 @@ PREHOOK: query: DROP DATABASE test_db PREHOOK: type: DROPDATABASE PREHOOK: Input: database:test_db PREHOOK: Output: database:test_db -FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask. InvalidOperationException(message:Database test_db is not empty. One or more tables exist.) +FAILED: Execution Error, return code 40000 from org.apache.hadoop.hive.ql.ddl.DDLTask. InvalidOperationException(message:Database test_db is not empty. One or more tables exist.) diff --git a/ql/src/test/results/clientnegative/database_drop_not_empty_restrict.q.out b/ql/src/test/results/clientnegative/database_drop_not_empty_restrict.q.out index a8c009e3c1..a01e472b76 100644 --- a/ql/src/test/results/clientnegative/database_drop_not_empty_restrict.q.out +++ b/ql/src/test/results/clientnegative/database_drop_not_empty_restrict.q.out @@ -33,4 +33,4 @@ PREHOOK: query: DROP DATABASE db_drop_non_empty_restrict PREHOOK: type: DROPDATABASE PREHOOK: Input: database:db_drop_non_empty_restrict PREHOOK: Output: database:db_drop_non_empty_restrict -FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask. InvalidOperationException(message:Database db_drop_non_empty_restrict is not empty. One or more tables exist.) +FAILED: Execution Error, return code 40000 from org.apache.hadoop.hive.ql.ddl.DDLTask. InvalidOperationException(message:Database db_drop_non_empty_restrict is not empty. One or more tables exist.) diff --git a/ql/src/test/results/clientnegative/database_location_conflict.q.out b/ql/src/test/results/clientnegative/database_location_conflict.q.out index 8034185af8..f0d2a4e878 100644 --- a/ql/src/test/results/clientnegative/database_location_conflict.q.out +++ b/ql/src/test/results/clientnegative/database_location_conflict.q.out @@ -3,4 +3,4 @@ PREHOOK: query: CREATE DATABASE db PREHOOK: type: CREATEDATABASE PREHOOK: Output: database:db #### A masked pattern was here #### -FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask. Managed and external locations for database cannot be the same +FAILED: Execution Error, return code 40000 from org.apache.hadoop.hive.ql.ddl.DDLTask. Managed and external locations for database cannot be the same diff --git a/ql/src/test/results/clientnegative/database_location_conflict2.q.out b/ql/src/test/results/clientnegative/database_location_conflict2.q.out index 80786bf86d..082bee2458 100644 --- a/ql/src/test/results/clientnegative/database_location_conflict2.q.out +++ b/ql/src/test/results/clientnegative/database_location_conflict2.q.out @@ -11,4 +11,4 @@ POSTHOOK: Output: database:db PREHOOK: type: ALTERDATABASE_LOCATION PREHOOK: Output: database:db #### A masked pattern was here #### -FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask. Managed and external locations for database cannot be the same +FAILED: Execution Error, return code 40000 from org.apache.hadoop.hive.ql.ddl.DDLTask. Managed and external locations for database cannot be the same diff --git a/ql/src/test/results/clientnegative/database_location_conflict3.q.out b/ql/src/test/results/clientnegative/database_location_conflict3.q.out index 80786bf86d..082bee2458 100644 --- a/ql/src/test/results/clientnegative/database_location_conflict3.q.out +++ b/ql/src/test/results/clientnegative/database_location_conflict3.q.out @@ -11,4 +11,4 @@ POSTHOOK: Output: database:db PREHOOK: type: ALTERDATABASE_LOCATION PREHOOK: Output: database:db #### A masked pattern was here #### -FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask. Managed and external locations for database cannot be the same +FAILED: Execution Error, return code 40000 from org.apache.hadoop.hive.ql.ddl.DDLTask. Managed and external locations for database cannot be the same diff --git a/ql/src/test/results/clientnegative/dbtxnmgr_nodblock.q.out b/ql/src/test/results/clientnegative/dbtxnmgr_nodblock.q.out index b2b19cef0a..de0332ab53 100644 --- a/ql/src/test/results/clientnegative/dbtxnmgr_nodblock.q.out +++ b/ql/src/test/results/clientnegative/dbtxnmgr_nodblock.q.out @@ -12,4 +12,4 @@ PREHOOK: query: lock database drop_nodblock shared PREHOOK: type: LOCKDATABASE PREHOOK: Input: database:drop_nodblock PREHOOK: Output: database:drop_nodblock -FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask. Current transaction manager does not support explicit lock requests. Transaction manager: org.apache.hadoop.hive.ql.lockmgr.DbTxnManager +FAILED: Execution Error, return code 10271 from org.apache.hadoop.hive.ql.ddl.DDLTask. Current transaction manager does not support explicit lock requests. Transaction manager: org.apache.hadoop.hive.ql.lockmgr.DbTxnManager diff --git a/ql/src/test/results/clientnegative/dbtxnmgr_nodbunlock.q.out b/ql/src/test/results/clientnegative/dbtxnmgr_nodbunlock.q.out index 4455e0c05f..1ed0dcfb0f 100644 --- a/ql/src/test/results/clientnegative/dbtxnmgr_nodbunlock.q.out +++ b/ql/src/test/results/clientnegative/dbtxnmgr_nodbunlock.q.out @@ -12,4 +12,4 @@ PREHOOK: query: unlock database drop_nodbunlock PREHOOK: type: UNLOCKDATABASE PREHOOK: Input: database:drop_nodbunlock PREHOOK: Output: database:drop_nodbunlock -FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask. Current transaction manager does not support explicit lock requests. Transaction manager: org.apache.hadoop.hive.ql.lockmgr.DbTxnManager +FAILED: Execution Error, return code 10271 from org.apache.hadoop.hive.ql.ddl.DDLTask. Current transaction manager does not support explicit lock requests. Transaction manager: org.apache.hadoop.hive.ql.lockmgr.DbTxnManager diff --git a/ql/src/test/results/clientnegative/dbtxnmgr_notablelock.q.out b/ql/src/test/results/clientnegative/dbtxnmgr_notablelock.q.out index c91d3e0ef2..098aae6b4d 100644 --- a/ql/src/test/results/clientnegative/dbtxnmgr_notablelock.q.out +++ b/ql/src/test/results/clientnegative/dbtxnmgr_notablelock.q.out @@ -12,4 +12,4 @@ POSTHOOK: Output: database:default POSTHOOK: Output: default@drop_notablelock PREHOOK: query: lock table drop_notablelock shared PREHOOK: type: LOCKTABLE -FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask. Current transaction manager does not support explicit lock requests. Transaction manager: org.apache.hadoop.hive.ql.lockmgr.DbTxnManager +FAILED: Execution Error, return code 10271 from org.apache.hadoop.hive.ql.ddl.DDLTask. Current transaction manager does not support explicit lock requests. Transaction manager: org.apache.hadoop.hive.ql.lockmgr.DbTxnManager diff --git a/ql/src/test/results/clientnegative/dbtxnmgr_notableunlock.q.out b/ql/src/test/results/clientnegative/dbtxnmgr_notableunlock.q.out index 80066d7251..8211dd470e 100644 --- a/ql/src/test/results/clientnegative/dbtxnmgr_notableunlock.q.out +++ b/ql/src/test/results/clientnegative/dbtxnmgr_notableunlock.q.out @@ -12,4 +12,4 @@ POSTHOOK: Output: database:default POSTHOOK: Output: default@drop_notableunlock PREHOOK: query: unlock table drop_notableunlock PREHOOK: type: UNLOCKTABLE -FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask. Current transaction manager does not support explicit lock requests. Transaction manager: org.apache.hadoop.hive.ql.lockmgr.DbTxnManager +FAILED: Execution Error, return code 10271 from org.apache.hadoop.hive.ql.ddl.DDLTask. Current transaction manager does not support explicit lock requests. Transaction manager: org.apache.hadoop.hive.ql.lockmgr.DbTxnManager diff --git a/ql/src/test/results/clientnegative/deletejar.q.out b/ql/src/test/results/clientnegative/deletejar.q.out index 2827196501..65dbc71ab8 100644 --- a/ql/src/test/results/clientnegative/deletejar.q.out +++ b/ql/src/test/results/clientnegative/deletejar.q.out @@ -2,4 +2,4 @@ PREHOOK: query: CREATE TABLE DELETEJAR(KEY STRING, VALUE STRING) ROW FORMAT SERD PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@DELETEJAR -FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask. Cannot validate serde: org.apache.hadoop.hive.serde2.TestSerDe +FAILED: Execution Error, return code 40000 from org.apache.hadoop.hive.ql.ddl.DDLTask. Cannot validate serde: org.apache.hadoop.hive.serde2.TestSerDe diff --git a/ql/src/test/results/clientnegative/describe_xpath1.q.out b/ql/src/test/results/clientnegative/describe_xpath1.q.out index 61cb539e3d..af51e38205 100644 --- a/ql/src/test/results/clientnegative/describe_xpath1.q.out +++ b/ql/src/test/results/clientnegative/describe_xpath1.q.out @@ -1,4 +1,4 @@ PREHOOK: query: describe src_thrift $elem$ PREHOOK: type: DESCTABLE PREHOOK: Input: default@src_thrift -FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask. cannot find field $elem$ from [private int org.apache.hadoop.hive.serde2.thrift.test.Complex.aint, private java.lang.String org.apache.hadoop.hive.serde2.thrift.test.Complex.aString, private java.util.List org.apache.hadoop.hive.serde2.thrift.test.Complex.lint, private java.util.List org.apache.hadoop.hive.serde2.thrift.test.Complex.lString, private java.util.List org.apache.hadoop.hive.serde2.thrift.test.Complex.lintString, private java.util.Map org.apache.hadoop.hive.serde2.thrift.test.Complex.mStringString, private java.util.Map org.apache.hadoop.hive.serde2.thrift.test.Complex.attributes, private org.apache.hadoop.hive.serde2.thrift.test.PropValueUnion org.apache.hadoop.hive.serde2.thrift.test.Complex.unionField1, private org.apache.hadoop.hive.serde2.thrift.test.PropValueUnion org.apache.hadoop.hive.serde2.thrift.test.Complex.unionField2, private org.apache.hadoop.hive.serde2.thrift.test.PropValueUnion org.apache.hadoop.hive.serde2.thrift.test.Complex.unionField3] +FAILED: Execution Error, return code 40000 from org.apache.hadoop.hive.ql.ddl.DDLTask. cannot find field $elem$ from [private int org.apache.hadoop.hive.serde2.thrift.test.Complex.aint, private java.lang.String org.apache.hadoop.hive.serde2.thrift.test.Complex.aString, private java.util.List org.apache.hadoop.hive.serde2.thrift.test.Complex.lint, private java.util.List org.apache.hadoop.hive.serde2.thrift.test.Complex.lString, private java.util.List org.apache.hadoop.hive.serde2.thrift.test.Complex.lintString, private java.util.Map org.apache.hadoop.hive.serde2.thrift.test.Complex.mStringString, private java.util.Map org.apache.hadoop.hive.serde2.thrift.test.Complex.attributes, private org.apache.hadoop.hive.serde2.thrift.test.PropValueUnion org.apache.hadoop.hive.serde2.thrift.test.Complex.unionField1, private org.apache.hadoop.hive.serde2.thrift.test.PropValueUnion org.apache.hadoop.hive.serde2.thrift.test.Complex.unionField2, private org.apache.hadoop.hive.serde2.thrift.test.PropValueUnion org.apache.hadoop.hive.serde2.thrift.test.Complex.unionField3] diff --git a/ql/src/test/results/clientnegative/describe_xpath2.q.out b/ql/src/test/results/clientnegative/describe_xpath2.q.out index 5f3f84960d..3defc6ff66 100644 --- a/ql/src/test/results/clientnegative/describe_xpath2.q.out +++ b/ql/src/test/results/clientnegative/describe_xpath2.q.out @@ -1,4 +1,4 @@ PREHOOK: query: describe src_thrift $key$ PREHOOK: type: DESCTABLE PREHOOK: Input: default@src_thrift -FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask. cannot find field $key$ from [private int org.apache.hadoop.hive.serde2.thrift.test.Complex.aint, private java.lang.String org.apache.hadoop.hive.serde2.thrift.test.Complex.aString, private java.util.List org.apache.hadoop.hive.serde2.thrift.test.Complex.lint, private java.util.List org.apache.hadoop.hive.serde2.thrift.test.Complex.lString, private java.util.List org.apache.hadoop.hive.serde2.thrift.test.Complex.lintString, private java.util.Map org.apache.hadoop.hive.serde2.thrift.test.Complex.mStringString, private java.util.Map org.apache.hadoop.hive.serde2.thrift.test.Complex.attributes, private org.apache.hadoop.hive.serde2.thrift.test.PropValueUnion org.apache.hadoop.hive.serde2.thrift.test.Complex.unionField1, private org.apache.hadoop.hive.serde2.thrift.test.PropValueUnion org.apache.hadoop.hive.serde2.thrift.test.Complex.unionField2, private org.apache.hadoop.hive.serde2.thrift.test.PropValueUnion org.apache.hadoop.hive.serde2.thrift.test.Complex.unionField3] +FAILED: Execution Error, return code 40000 from org.apache.hadoop.hive.ql.ddl.DDLTask. cannot find field $key$ from [private int org.apache.hadoop.hive.serde2.thrift.test.Complex.aint, private java.lang.String org.apache.hadoop.hive.serde2.thrift.test.Complex.aString, private java.util.List org.apache.hadoop.hive.serde2.thrift.test.Complex.lint, private java.util.List org.apache.hadoop.hive.serde2.thrift.test.Complex.lString, private java.util.List org.apache.hadoop.hive.serde2.thrift.test.Complex.lintString, private java.util.Map org.apache.hadoop.hive.serde2.thrift.test.Complex.mStringString, private java.util.Map org.apache.hadoop.hive.serde2.thrift.test.Complex.attributes, private org.apache.hadoop.hive.serde2.thrift.test.PropValueUnion org.apache.hadoop.hive.serde2.thrift.test.Complex.unionField1, private org.apache.hadoop.hive.serde2.thrift.test.PropValueUnion org.apache.hadoop.hive.serde2.thrift.test.Complex.unionField2, private org.apache.hadoop.hive.serde2.thrift.test.PropValueUnion org.apache.hadoop.hive.serde2.thrift.test.Complex.unionField3] diff --git a/ql/src/test/results/clientnegative/describe_xpath3.q.out b/ql/src/test/results/clientnegative/describe_xpath3.q.out index c73abb30ac..f243d118ad 100644 --- a/ql/src/test/results/clientnegative/describe_xpath3.q.out +++ b/ql/src/test/results/clientnegative/describe_xpath3.q.out @@ -1,4 +1,4 @@ PREHOOK: query: describe src_thrift lint.abc PREHOOK: type: DESCTABLE PREHOOK: Input: default@src_thrift -FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask. Error in getting fields from serde.Unknown type for abc +FAILED: Execution Error, return code 40000 from org.apache.hadoop.hive.ql.ddl.DDLTask. Error in getting fields from serde.Unknown type for abc diff --git a/ql/src/test/results/clientnegative/describe_xpath4.q.out b/ql/src/test/results/clientnegative/describe_xpath4.q.out index d81b62985a..8282039f95 100644 --- a/ql/src/test/results/clientnegative/describe_xpath4.q.out +++ b/ql/src/test/results/clientnegative/describe_xpath4.q.out @@ -1,4 +1,4 @@ PREHOOK: query: describe src_thrift mStringString.abc PREHOOK: type: DESCTABLE PREHOOK: Input: default@src_thrift -FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask. Error in getting fields from serde.Unknown type for abc +FAILED: Execution Error, return code 40000 from org.apache.hadoop.hive.ql.ddl.DDLTask. Error in getting fields from serde.Unknown type for abc diff --git a/ql/src/test/results/clientnegative/disallow_incompatible_type_change_on1.q.out b/ql/src/test/results/clientnegative/disallow_incompatible_type_change_on1.q.out index 2eeef04dd0..c93a8f9a41 100644 --- a/ql/src/test/results/clientnegative/disallow_incompatible_type_change_on1.q.out +++ b/ql/src/test/results/clientnegative/disallow_incompatible_type_change_on1.q.out @@ -104,5 +104,5 @@ PREHOOK: query: ALTER TABLE test_table123 REPLACE COLUMNS (a INT, b STRING) PREHOOK: type: ALTERTABLE_REPLACECOLS PREHOOK: Input: default@test_table123 PREHOOK: Output: default@test_table123 -FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask. Unable to alter table. The following columns have types incompatible with the existing columns in their respective positions : +FAILED: Execution Error, return code 40013 from org.apache.hadoop.hive.ql.ddl.DDLTask. Unable to alter table. The following columns have types incompatible with the existing columns in their respective positions : a,b diff --git a/ql/src/test/results/clientnegative/disallow_incompatible_type_change_on2.q.out b/ql/src/test/results/clientnegative/disallow_incompatible_type_change_on2.q.out index 16c40ed3df..b98e689d89 100644 --- a/ql/src/test/results/clientnegative/disallow_incompatible_type_change_on2.q.out +++ b/ql/src/test/results/clientnegative/disallow_incompatible_type_change_on2.q.out @@ -40,5 +40,5 @@ PREHOOK: query: ALTER TABLE test_table123 CHANGE COLUMN b b MAP PREHOOK: type: ALTERTABLE_RENAMECOL PREHOOK: Input: default@test_table123 PREHOOK: Output: default@test_table123 -FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask. Unable to alter table. The following columns have types incompatible with the existing columns in their respective positions : +FAILED: Execution Error, return code 40013 from org.apache.hadoop.hive.ql.ddl.DDLTask. Unable to alter table. The following columns have types incompatible with the existing columns in their respective positions : b diff --git a/ql/src/test/results/clientnegative/drop_invalid_constraint1.q.out b/ql/src/test/results/clientnegative/drop_invalid_constraint1.q.out index 5bec46e9c4..c43be9486e 100644 --- a/ql/src/test/results/clientnegative/drop_invalid_constraint1.q.out +++ b/ql/src/test/results/clientnegative/drop_invalid_constraint1.q.out @@ -12,4 +12,4 @@ POSTHOOK: query: ALTER TABLE table1 DROP CONSTRAINT pk1 POSTHOOK: type: ALTERTABLE_DROPCONSTRAINT PREHOOK: query: ALTER TABLE table1 DROP CONSTRAINT pk1 PREHOOK: type: ALTERTABLE_DROPCONSTRAINT -FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask. InvalidObjectException(message:The constraint: pk1 does not exist for the associated table: default.table1) +FAILED: Execution Error, return code 40000 from org.apache.hadoop.hive.ql.ddl.DDLTask. InvalidObjectException(message:The constraint: pk1 does not exist for the associated table: default.table1) diff --git a/ql/src/test/results/clientnegative/drop_invalid_constraint2.q.out b/ql/src/test/results/clientnegative/drop_invalid_constraint2.q.out index 7f42c4cb99..8c1be49d8c 100644 --- a/ql/src/test/results/clientnegative/drop_invalid_constraint2.q.out +++ b/ql/src/test/results/clientnegative/drop_invalid_constraint2.q.out @@ -8,4 +8,4 @@ POSTHOOK: Output: database:default POSTHOOK: Output: default@table2 PREHOOK: query: ALTER TABLE table1 DROP CONSTRAINT pk1 PREHOOK: type: ALTERTABLE_DROPCONSTRAINT -FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask. InvalidObjectException(message:The constraint: pk1 does not exist for the associated table: default.table1) +FAILED: Execution Error, return code 40000 from org.apache.hadoop.hive.ql.ddl.DDLTask. InvalidObjectException(message:The constraint: pk1 does not exist for the associated table: default.table1) diff --git a/ql/src/test/results/clientnegative/drop_invalid_constraint3.q.out b/ql/src/test/results/clientnegative/drop_invalid_constraint3.q.out index e8081d6246..181153fcb2 100644 --- a/ql/src/test/results/clientnegative/drop_invalid_constraint3.q.out +++ b/ql/src/test/results/clientnegative/drop_invalid_constraint3.q.out @@ -8,4 +8,4 @@ POSTHOOK: Output: database:default POSTHOOK: Output: default@table2 PREHOOK: query: ALTER TABLE table2 DROP CONSTRAINT pk2 PREHOOK: type: ALTERTABLE_DROPCONSTRAINT -FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask. InvalidObjectException(message:The constraint: pk2 does not exist for the associated table: default.table2) +FAILED: Execution Error, return code 40000 from org.apache.hadoop.hive.ql.ddl.DDLTask. InvalidObjectException(message:The constraint: pk2 does not exist for the associated table: default.table2) diff --git a/ql/src/test/results/clientnegative/drop_invalid_constraint4.q.out b/ql/src/test/results/clientnegative/drop_invalid_constraint4.q.out index a930844246..ed39a44053 100644 --- a/ql/src/test/results/clientnegative/drop_invalid_constraint4.q.out +++ b/ql/src/test/results/clientnegative/drop_invalid_constraint4.q.out @@ -16,4 +16,4 @@ POSTHOOK: Output: database:default POSTHOOK: Output: default@table2 PREHOOK: query: ALTER TABLE table1 DROP CONSTRAINT pk2 PREHOOK: type: ALTERTABLE_DROPCONSTRAINT -FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask. InvalidObjectException(message:The constraint: pk2 does not exist for the associated table: default.table1) +FAILED: Execution Error, return code 40000 from org.apache.hadoop.hive.ql.ddl.DDLTask. InvalidObjectException(message:The constraint: pk2 does not exist for the associated table: default.table1) diff --git a/ql/src/test/results/clientnegative/drop_table_failure2.q.out b/ql/src/test/results/clientnegative/drop_table_failure2.q.out index b31c18bc55..6abfc4b06b 100644 --- a/ql/src/test/results/clientnegative/drop_table_failure2.q.out +++ b/ql/src/test/results/clientnegative/drop_table_failure2.q.out @@ -13,4 +13,4 @@ PREHOOK: query: DROP TABLE xxx6 PREHOOK: type: DROPTABLE PREHOOK: Input: default@xxx6 PREHOOK: Output: default@xxx6 -FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask. Cannot drop a view with DROP TABLE +FAILED: Execution Error, return code 40000 from org.apache.hadoop.hive.ql.ddl.DDLTask. Cannot drop a view with DROP TABLE diff --git a/ql/src/test/results/clientnegative/drop_table_used_by_mv.q.out b/ql/src/test/results/clientnegative/drop_table_used_by_mv.q.out index 5d980c13b1..77ec4cf2d5 100644 --- a/ql/src/test/results/clientnegative/drop_table_used_by_mv.q.out +++ b/ql/src/test/results/clientnegative/drop_table_used_by_mv.q.out @@ -32,5 +32,5 @@ PREHOOK: query: drop table mytable PREHOOK: type: DROPTABLE PREHOOK: Input: default@mytable PREHOOK: Output: default@mytable -FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask. MetaException(message:Cannot drop table as it is used in the following materialized views [default.mv1] +FAILED: Execution Error, return code 40000 from org.apache.hadoop.hive.ql.ddl.DDLTask. MetaException(message:Cannot drop table as it is used in the following materialized views [default.mv1] ) diff --git a/ql/src/test/results/clientnegative/drop_table_used_by_mv2.q.out b/ql/src/test/results/clientnegative/drop_table_used_by_mv2.q.out index 53ae0f86a4..54da2eaeee 100644 --- a/ql/src/test/results/clientnegative/drop_table_used_by_mv2.q.out +++ b/ql/src/test/results/clientnegative/drop_table_used_by_mv2.q.out @@ -68,5 +68,5 @@ PREHOOK: query: drop table mytable PREHOOK: type: DROPTABLE PREHOOK: Input: default@mytable PREHOOK: Output: default@mytable -FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask. MetaException(message:Cannot drop table as it is used in the following materialized views [default.mv1, default.mv2] +FAILED: Execution Error, return code 40000 from org.apache.hadoop.hive.ql.ddl.DDLTask. MetaException(message:Cannot drop table as it is used in the following materialized views [default.mv1, default.mv2] ) diff --git a/ql/src/test/results/clientnegative/drop_view_failure1.q.out b/ql/src/test/results/clientnegative/drop_view_failure1.q.out index 55ca79245d..e0a55bdb47 100644 --- a/ql/src/test/results/clientnegative/drop_view_failure1.q.out +++ b/ql/src/test/results/clientnegative/drop_view_failure1.q.out @@ -10,4 +10,4 @@ PREHOOK: query: DROP VIEW xxx1 PREHOOK: type: DROPVIEW PREHOOK: Input: default@xxx1 PREHOOK: Output: default@xxx1 -FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask. Cannot drop a base table with DROP VIEW +FAILED: Execution Error, return code 40000 from org.apache.hadoop.hive.ql.ddl.DDLTask. Cannot drop a base table with DROP VIEW diff --git a/ql/src/test/results/clientnegative/druid_address.q.out b/ql/src/test/results/clientnegative/druid_address.q.out index dcd729022c..cb9318044d 100644 --- a/ql/src/test/results/clientnegative/druid_address.q.out +++ b/ql/src/test/results/clientnegative/druid_address.q.out @@ -4,4 +4,4 @@ TBLPROPERTIES ("druid.datasource" = "wikipedia") PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@druid_table_1 -FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask. java.lang.RuntimeException: MetaException(message:org.apache.hadoop.hive.serde2.SerDeException Druid broker address not specified in configuration) +FAILED: Execution Error, return code 40000 from org.apache.hadoop.hive.ql.ddl.DDLTask. java.lang.RuntimeException: MetaException(message:org.apache.hadoop.hive.serde2.SerDeException Druid broker address not specified in configuration) diff --git a/ql/src/test/results/clientnegative/druid_buckets.q.out b/ql/src/test/results/clientnegative/druid_buckets.q.out index a229f5f7ab..0f926ae90f 100644 --- a/ql/src/test/results/clientnegative/druid_buckets.q.out +++ b/ql/src/test/results/clientnegative/druid_buckets.q.out @@ -5,4 +5,4 @@ TBLPROPERTIES ("druid.datasource" = "wikipedia") PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@druid_table_1 -FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask. MetaException(message:CLUSTERED BY may not be specified for Druid) +FAILED: Execution Error, return code 40000 from org.apache.hadoop.hive.ql.ddl.DDLTask. MetaException(message:CLUSTERED BY may not be specified for Druid) diff --git a/ql/src/test/results/clientnegative/druid_case.q.out b/ql/src/test/results/clientnegative/druid_case.q.out index b9bf9eb7fa..78ff4184f3 100644 --- a/ql/src/test/results/clientnegative/druid_case.q.out +++ b/ql/src/test/results/clientnegative/druid_case.q.out @@ -4,4 +4,4 @@ TBLPROPERTIES ("druid.datasource" = "wikipedia") PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@druid_table_1 -FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask. org.apache.hadoop.hive.ql.metadata.HiveException: Duplicate column name anonymous in the table definition. +FAILED: Execution Error, return code 40000 from org.apache.hadoop.hive.ql.ddl.DDLTask. org.apache.hadoop.hive.ql.metadata.HiveException: Duplicate column name anonymous in the table definition. diff --git a/ql/src/test/results/clientnegative/druid_datasource.q.out b/ql/src/test/results/clientnegative/druid_datasource.q.out index 40c8e9c18f..426bb18031 100644 --- a/ql/src/test/results/clientnegative/druid_datasource.q.out +++ b/ql/src/test/results/clientnegative/druid_datasource.q.out @@ -4,4 +4,4 @@ TBLPROPERTIES ("property" = "localhost") PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@druid_table_1 -FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask. java.lang.RuntimeException: MetaException(message:org.apache.hadoop.hive.serde2.SerDeException Druid data source not specified; use druid.datasource in table properties) +FAILED: Execution Error, return code 40000 from org.apache.hadoop.hive.ql.ddl.DDLTask. java.lang.RuntimeException: MetaException(message:org.apache.hadoop.hive.serde2.SerDeException Druid data source not specified; use druid.datasource in table properties) diff --git a/ql/src/test/results/clientnegative/druid_datasource2.q.out b/ql/src/test/results/clientnegative/druid_datasource2.q.out index 7781884097..4bbba4fa86 100644 --- a/ql/src/test/results/clientnegative/druid_datasource2.q.out +++ b/ql/src/test/results/clientnegative/druid_datasource2.q.out @@ -4,4 +4,4 @@ TBLPROPERTIES ("property" = "localhost", "druid.datasource" = "mydatasource") PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@druid_table_1 -FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask. MetaException(message:Table default.druid_table_1 failed strict managed table checks due to the following reason: Table is marked as a managed table but is not transactional.) +FAILED: Execution Error, return code 40000 from org.apache.hadoop.hive.ql.ddl.DDLTask. MetaException(message:Table default.druid_table_1 failed strict managed table checks due to the following reason: Table is marked as a managed table but is not transactional.) diff --git a/ql/src/test/results/clientnegative/druid_location.q.out b/ql/src/test/results/clientnegative/druid_location.q.out index 176ac764c8..dc47a18859 100644 --- a/ql/src/test/results/clientnegative/druid_location.q.out +++ b/ql/src/test/results/clientnegative/druid_location.q.out @@ -6,4 +6,4 @@ PREHOOK: type: CREATETABLE #### A masked pattern was here #### PREHOOK: Output: database:default PREHOOK: Output: default@druid_table_1 -FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask. MetaException(message:LOCATION may not be specified for Druid) +FAILED: Execution Error, return code 40000 from org.apache.hadoop.hive.ql.ddl.DDLTask. MetaException(message:LOCATION may not be specified for Druid) diff --git a/ql/src/test/results/clientnegative/druid_partitions.q.out b/ql/src/test/results/clientnegative/druid_partitions.q.out index 52627cc736..f977d6cda5 100644 --- a/ql/src/test/results/clientnegative/druid_partitions.q.out +++ b/ql/src/test/results/clientnegative/druid_partitions.q.out @@ -5,4 +5,4 @@ TBLPROPERTIES ("druid.datasource" = "wikipedia") PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@druid_table_1 -FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask. MetaException(message:PARTITIONED BY may not be specified for Druid) +FAILED: Execution Error, return code 40000 from org.apache.hadoop.hive.ql.ddl.DDLTask. MetaException(message:PARTITIONED BY may not be specified for Druid) diff --git a/ql/src/test/results/clientnegative/dyn_part_max.q.out b/ql/src/test/results/clientnegative/dyn_part_max.q.out index 7de4998c2c..2ae09eac95 100644 --- a/ql/src/test/results/clientnegative/dyn_part_max.q.out +++ b/ql/src/test/results/clientnegative/dyn_part_max.q.out @@ -19,4 +19,4 @@ LIMIT 50 PREHOOK: type: QUERY PREHOOK: Input: default@src PREHOOK: Output: default@max_parts -FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.exec.MoveTask. Number of dynamic partitions created is 49, which is more than 10. To solve this try to set hive.exec.max.dynamic.partitions to at least 49. +FAILED: Execution Error, return code 40000 from org.apache.hadoop.hive.ql.exec.MoveTask. Number of dynamic partitions created is 49, which is more than 10. To solve this try to set hive.exec.max.dynamic.partitions to at least 49. diff --git a/ql/src/test/results/clientnegative/exchange_partition.q.out b/ql/src/test/results/clientnegative/exchange_partition.q.out index 76d626074e..925a02d359 100644 --- a/ql/src/test/results/clientnegative/exchange_partition.q.out +++ b/ql/src/test/results/clientnegative/exchange_partition.q.out @@ -53,4 +53,4 @@ PREHOOK: query: ALTER TABLE ex_table1 EXCHANGE PARTITION (part='part1') WITH TAB PREHOOK: type: ALTERTABLE_EXCHANGEPARTITION PREHOOK: Input: default@ex_table2 PREHOOK: Output: default@ex_table1 -FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask. MetaException(message:Got exception: java.io.IOException Cannot rename the source path. The destination path already exists.) +FAILED: Execution Error, return code 40000 from org.apache.hadoop.hive.ql.ddl.DDLTask. MetaException(message:Got exception: java.io.IOException Cannot rename the source path. The destination path already exists.) diff --git a/ql/src/test/results/clientnegative/external1.q.out b/ql/src/test/results/clientnegative/external1.q.out index c556ca2b5c..258f593136 100644 --- a/ql/src/test/results/clientnegative/external1.q.out +++ b/ql/src/test/results/clientnegative/external1.q.out @@ -3,4 +3,4 @@ PREHOOK: type: CREATETABLE #### A masked pattern was here #### PREHOOK: Output: database:default PREHOOK: Output: default@external1 -FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask. org.apache.hadoop.fs.UnsupportedFileSystemException: No FileSystem for scheme "invalidscheme" +FAILED: Execution Error, return code 40000 from org.apache.hadoop.hive.ql.ddl.DDLTask. org.apache.hadoop.fs.UnsupportedFileSystemException: No FileSystem for scheme "invalidscheme" diff --git a/ql/src/test/results/clientnegative/external2.q.out b/ql/src/test/results/clientnegative/external2.q.out index 8fb7924962..5ca73cbea3 100644 --- a/ql/src/test/results/clientnegative/external2.q.out +++ b/ql/src/test/results/clientnegative/external2.q.out @@ -10,4 +10,4 @@ POSTHOOK: Output: default@external2 PREHOOK: type: ALTERTABLE_ADDPARTS #### A masked pattern was here #### PREHOOK: Output: default@external2 -FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask. org.apache.hadoop.fs.UnsupportedFileSystemException: No FileSystem for scheme "invalidscheme" +FAILED: Execution Error, return code 40000 from org.apache.hadoop.hive.ql.ddl.DDLTask. org.apache.hadoop.fs.UnsupportedFileSystemException: No FileSystem for scheme "invalidscheme" diff --git a/ql/src/test/results/clientnegative/hms_using_serde_alter_table_update_columns.q.out b/ql/src/test/results/clientnegative/hms_using_serde_alter_table_update_columns.q.out index 22b917002d..bad06d6b07 100644 --- a/ql/src/test/results/clientnegative/hms_using_serde_alter_table_update_columns.q.out +++ b/ql/src/test/results/clientnegative/hms_using_serde_alter_table_update_columns.q.out @@ -15,4 +15,4 @@ POSTHOOK: Input: default@hmsserdetable name string PREHOOK: query: ALTER TABLE hmsserdetable UPDATE COLUMNS PREHOOK: type: ALTERTABLE_UPDATECOLUMNS -FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask. hmsserdetable has serde org.apache.hadoop.hive.ql.io.parquet.serde.ParquetHiveSerDe for which schema is already handled by HMS. +FAILED: Execution Error, return code 40000 from org.apache.hadoop.hive.ql.ddl.DDLTask. hmsserdetable has serde org.apache.hadoop.hive.ql.io.parquet.serde.ParquetHiveSerDe for which schema is already handled by HMS. diff --git a/ql/src/test/results/clientnegative/insert_sorted.q.out b/ql/src/test/results/clientnegative/insert_sorted.q.out index a28fa1e6c6..762d586255 100644 --- a/ql/src/test/results/clientnegative/insert_sorted.q.out +++ b/ql/src/test/results/clientnegative/insert_sorted.q.out @@ -20,4 +20,4 @@ PREHOOK: query: create table acid_insertsort(a int, b varchar(128)) clustered by PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@acid_insertsort -FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask. MetaException(message:Table default.acid_insertsort cannot support full ACID functionality since it is sorted.) +FAILED: Execution Error, return code 40000 from org.apache.hadoop.hive.ql.ddl.DDLTask. MetaException(message:Table default.acid_insertsort cannot support full ACID functionality since it is sorted.) diff --git a/ql/src/test/results/clientnegative/lockneg2.q.out b/ql/src/test/results/clientnegative/lockneg2.q.out index 2443341995..d5cff03f45 100644 --- a/ql/src/test/results/clientnegative/lockneg2.q.out +++ b/ql/src/test/results/clientnegative/lockneg2.q.out @@ -22,4 +22,4 @@ POSTHOOK: Lineage: tstsrc.key SIMPLE [(src)src.FieldSchema(name:key, type:string POSTHOOK: Lineage: tstsrc.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] PREHOOK: query: UNLOCK TABLE tstsrc PREHOOK: type: UNLOCKTABLE -FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask. Table tstsrc is not locked +FAILED: Execution Error, return code 40000 from org.apache.hadoop.hive.ql.ddl.DDLTask. Table tstsrc is not locked diff --git a/ql/src/test/results/clientnegative/lockneg3.q.out b/ql/src/test/results/clientnegative/lockneg3.q.out index 7c1983d8be..ad548cb6dc 100644 --- a/ql/src/test/results/clientnegative/lockneg3.q.out +++ b/ql/src/test/results/clientnegative/lockneg3.q.out @@ -26,4 +26,4 @@ POSTHOOK: Lineage: tstsrcpart PARTITION(ds=2008-04-08,hr=11).key SIMPLE [(srcpar POSTHOOK: Lineage: tstsrcpart PARTITION(ds=2008-04-08,hr=11).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ] PREHOOK: query: UNLOCK TABLE tstsrcpart PARTITION(ds='2008-04-08', hr='11') PREHOOK: type: UNLOCKTABLE -FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask. Table tstsrcpart is not locked +FAILED: Execution Error, return code 40000 from org.apache.hadoop.hive.ql.ddl.DDLTask. Table tstsrcpart is not locked diff --git a/ql/src/test/results/clientnegative/lockneg4.q.out b/ql/src/test/results/clientnegative/lockneg4.q.out index 5655415b04..59741dbf96 100644 --- a/ql/src/test/results/clientnegative/lockneg4.q.out +++ b/ql/src/test/results/clientnegative/lockneg4.q.out @@ -30,4 +30,4 @@ POSTHOOK: query: LOCK TABLE tstsrcpart PARTITION(ds='2008-04-08', hr='11') EXCLU POSTHOOK: type: LOCKTABLE PREHOOK: query: SHOW LOCKS tstsrcpart PARTITION(ds='2008-04-08', hr='12') PREHOOK: type: SHOWLOCKS -FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask. org.apache.hadoop.hive.ql.metadata.HiveException: Partition {ds=2008-04-08, hr=12} for table tstsrcpart does not exist +FAILED: Execution Error, return code 40000 from org.apache.hadoop.hive.ql.ddl.DDLTask. org.apache.hadoop.hive.ql.metadata.HiveException: Partition {ds=2008-04-08, hr=12} for table tstsrcpart does not exist diff --git a/ql/src/test/results/clientnegative/lockneg5.q.out b/ql/src/test/results/clientnegative/lockneg5.q.out index c02312b254..08487efc0c 100644 --- a/ql/src/test/results/clientnegative/lockneg5.q.out +++ b/ql/src/test/results/clientnegative/lockneg5.q.out @@ -4,4 +4,4 @@ POSTHOOK: query: drop table tstsrcpart POSTHOOK: type: DROPTABLE PREHOOK: query: show locks tstsrcpart extended PREHOOK: type: SHOWLOCKS -FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask. org.apache.hadoop.hive.ql.metadata.InvalidTableException: Table not found tstsrcpart +FAILED: Execution Error, return code 40000 from org.apache.hadoop.hive.ql.ddl.DDLTask. org.apache.hadoop.hive.ql.metadata.InvalidTableException: Table not found tstsrcpart diff --git a/ql/src/test/results/clientnegative/lockneg_query_tbl_in_locked_db.q.out b/ql/src/test/results/clientnegative/lockneg_query_tbl_in_locked_db.q.out index 5df04539d0..3e62801756 100644 --- a/ql/src/test/results/clientnegative/lockneg_query_tbl_in_locked_db.q.out +++ b/ql/src/test/results/clientnegative/lockneg_query_tbl_in_locked_db.q.out @@ -59,4 +59,4 @@ PREHOOK: query: unlock database lockneg1 PREHOOK: type: UNLOCKDATABASE PREHOOK: Input: database:lockneg1 PREHOOK: Output: database:lockneg1 -FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask. Database lockneg1 is not locked +FAILED: Execution Error, return code 40000 from org.apache.hadoop.hive.ql.ddl.DDLTask. Database lockneg1 is not locked diff --git a/ql/src/test/results/clientnegative/materialized_view_drop.q.out b/ql/src/test/results/clientnegative/materialized_view_drop.q.out index e860283622..35d3c5be20 100644 --- a/ql/src/test/results/clientnegative/materialized_view_drop.q.out +++ b/ql/src/test/results/clientnegative/materialized_view_drop.q.out @@ -39,4 +39,4 @@ PREHOOK: query: drop materialized view cmv_basetable PREHOOK: type: DROP_MATERIALIZED_VIEW PREHOOK: Input: default@cmv_basetable PREHOOK: Output: default@cmv_basetable -FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask. Cannot drop a base table with DROP MATERIALIZED VIEW +FAILED: Execution Error, return code 40000 from org.apache.hadoop.hive.ql.ddl.DDLTask. Cannot drop a base table with DROP MATERIALIZED VIEW diff --git a/ql/src/test/results/clientnegative/materialized_view_drop2.q.out b/ql/src/test/results/clientnegative/materialized_view_drop2.q.out index 4671de1328..803b5b42da 100644 --- a/ql/src/test/results/clientnegative/materialized_view_drop2.q.out +++ b/ql/src/test/results/clientnegative/materialized_view_drop2.q.out @@ -31,4 +31,4 @@ PREHOOK: query: drop view cmv_mat_view PREHOOK: type: DROPVIEW PREHOOK: Input: default@cmv_mat_view PREHOOK: Output: default@cmv_mat_view -FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask. Cannot drop a materialized view with DROP VIEW +FAILED: Execution Error, return code 40000 from org.apache.hadoop.hive.ql.ddl.DDLTask. Cannot drop a materialized view with DROP VIEW diff --git a/ql/src/test/results/clientnegative/materialized_view_no_cbo_rewrite_2.q.out b/ql/src/test/results/clientnegative/materialized_view_no_cbo_rewrite_2.q.out index 7e22225233..b2cb1db6c1 100644 --- a/ql/src/test/results/clientnegative/materialized_view_no_cbo_rewrite_2.q.out +++ b/ql/src/test/results/clientnegative/materialized_view_no_cbo_rewrite_2.q.out @@ -33,4 +33,4 @@ PREHOOK: query: alter materialized view cmv_mat_view enable rewrite PREHOOK: type: ALTER_MATERIALIZED_VIEW_REWRITE PREHOOK: Input: default@cmv_mat_view PREHOOK: Output: default@cmv_mat_view -FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask. org.apache.hadoop.hive.ql.metadata.HiveException: Cannot enable rewriting for materialized view. Statement has unsupported clause: sort by. +FAILED: Execution Error, return code 40000 from org.apache.hadoop.hive.ql.ddl.DDLTask. org.apache.hadoop.hive.ql.metadata.HiveException: Cannot enable rewriting for materialized view. Statement has unsupported clause: sort by. diff --git a/ql/src/test/results/clientnegative/materialized_view_no_supported_op_rewrite_2.q.out b/ql/src/test/results/clientnegative/materialized_view_no_supported_op_rewrite_2.q.out index cd8f5fa660..135d89b13c 100644 --- a/ql/src/test/results/clientnegative/materialized_view_no_supported_op_rewrite_2.q.out +++ b/ql/src/test/results/clientnegative/materialized_view_no_supported_op_rewrite_2.q.out @@ -34,4 +34,4 @@ PREHOOK: query: alter materialized view cmv_mat_view enable rewrite PREHOOK: type: ALTER_MATERIALIZED_VIEW_REWRITE PREHOOK: Input: default@cmv_mat_view PREHOOK: Output: default@cmv_mat_view -FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask. org.apache.hadoop.hive.ql.metadata.HiveException: Cannot enable rewriting for materialized view. LEFT join type is not supported by rewriting algorithm. +FAILED: Execution Error, return code 40000 from org.apache.hadoop.hive.ql.ddl.DDLTask. org.apache.hadoop.hive.ql.metadata.HiveException: Cannot enable rewriting for materialized view. LEFT join type is not supported by rewriting algorithm. diff --git a/ql/src/test/results/clientnegative/mm_convert.q.out b/ql/src/test/results/clientnegative/mm_convert.q.out index ee52c1583e..e0e49e4717 100644 --- a/ql/src/test/results/clientnegative/mm_convert.q.out +++ b/ql/src/test/results/clientnegative/mm_convert.q.out @@ -14,4 +14,4 @@ PREHOOK: query: alter table convert_mm unset tblproperties('transactional_proper PREHOOK: type: ALTERTABLE_PROPERTIES PREHOOK: Input: default@convert_mm PREHOOK: Output: default@convert_mm -FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask. Cannot convert an ACID table to non-ACID +FAILED: Execution Error, return code 40000 from org.apache.hadoop.hive.ql.ddl.DDLTask. Cannot convert an ACID table to non-ACID diff --git a/ql/src/test/results/clientnegative/nested_complex_neg.q.out b/ql/src/test/results/clientnegative/nested_complex_neg.q.out index d3ecca1f13..39b4461bb6 100644 --- a/ql/src/test/results/clientnegative/nested_complex_neg.q.out +++ b/ql/src/test/results/clientnegative/nested_complex_neg.q.out @@ -7,4 +7,4 @@ simple_string string) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@nestedcomplex -FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask. java.lang.RuntimeException: MetaException(message:org.apache.hadoop.hive.serde2.SerDeException Number of levels of nesting supported for LazySimpleSerde is 7 Unable to work with level 23. Use hive.serialization.extend.nesting.levels serde property for tables using LazySimpleSerde.) +FAILED: Execution Error, return code 40000 from org.apache.hadoop.hive.ql.ddl.DDLTask. java.lang.RuntimeException: MetaException(message:org.apache.hadoop.hive.serde2.SerDeException Number of levels of nesting supported for LazySimpleSerde is 7 Unable to work with level 23. Use hive.serialization.extend.nesting.levels serde property for tables using LazySimpleSerde.) diff --git a/ql/src/test/results/clientnegative/orc_change_fileformat.q.out b/ql/src/test/results/clientnegative/orc_change_fileformat.q.out index e5429619aa..9652c0daf6 100644 --- a/ql/src/test/results/clientnegative/orc_change_fileformat.q.out +++ b/ql/src/test/results/clientnegative/orc_change_fileformat.q.out @@ -10,4 +10,4 @@ PREHOOK: query: alter table src_orc set fileformat textfile PREHOOK: type: ALTERTABLE_FILEFORMAT PREHOOK: Input: default@src_orc PREHOOK: Output: default@src_orc -FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask. Changing file format (from ORC) is not supported for table default.src_orc +FAILED: Execution Error, return code 40000 from org.apache.hadoop.hive.ql.ddl.DDLTask. Changing file format (from ORC) is not supported for table default.src_orc diff --git a/ql/src/test/results/clientnegative/orc_change_fileformat_acid.q.out b/ql/src/test/results/clientnegative/orc_change_fileformat_acid.q.out index 375795a4c1..ec094dfdc0 100644 --- a/ql/src/test/results/clientnegative/orc_change_fileformat_acid.q.out +++ b/ql/src/test/results/clientnegative/orc_change_fileformat_acid.q.out @@ -10,4 +10,4 @@ PREHOOK: query: alter table src_orc set fileformat textfile PREHOOK: type: ALTERTABLE_FILEFORMAT PREHOOK: Input: default@src_orc PREHOOK: Output: default@src_orc -FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask. Changing file format (from ORC) is not supported for table default.src_orc +FAILED: Execution Error, return code 40000 from org.apache.hadoop.hive.ql.ddl.DDLTask. Changing file format (from ORC) is not supported for table default.src_orc diff --git a/ql/src/test/results/clientnegative/orc_change_serde.q.out b/ql/src/test/results/clientnegative/orc_change_serde.q.out index aea47137b5..be73373255 100644 --- a/ql/src/test/results/clientnegative/orc_change_serde.q.out +++ b/ql/src/test/results/clientnegative/orc_change_serde.q.out @@ -10,4 +10,4 @@ PREHOOK: query: alter table src_orc set serde 'org.apache.hadoop.hive.serde2.col PREHOOK: type: ALTERTABLE_SERIALIZER PREHOOK: Input: default@src_orc PREHOOK: Output: default@src_orc -FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask. Changing SerDe (from OrcSerde) is not supported for table default.src_orc. File format may be incompatible +FAILED: Execution Error, return code 40000 from org.apache.hadoop.hive.ql.ddl.DDLTask. Changing SerDe (from OrcSerde) is not supported for table default.src_orc. File format may be incompatible diff --git a/ql/src/test/results/clientnegative/orc_change_serde_acid.q.out b/ql/src/test/results/clientnegative/orc_change_serde_acid.q.out index 60938e4892..746f9bff10 100644 --- a/ql/src/test/results/clientnegative/orc_change_serde_acid.q.out +++ b/ql/src/test/results/clientnegative/orc_change_serde_acid.q.out @@ -10,4 +10,4 @@ PREHOOK: query: alter table src_orc set serde 'org.apache.hadoop.hive.serde2.col PREHOOK: type: ALTERTABLE_SERIALIZER PREHOOK: Input: default@src_orc PREHOOK: Output: default@src_orc -FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask. Changing SerDe (from OrcSerde) is not supported for table default.src_orc. File format may be incompatible +FAILED: Execution Error, return code 40000 from org.apache.hadoop.hive.ql.ddl.DDLTask. Changing SerDe (from OrcSerde) is not supported for table default.src_orc. File format may be incompatible diff --git a/ql/src/test/results/clientnegative/orc_reorder_columns1.q.out b/ql/src/test/results/clientnegative/orc_reorder_columns1.q.out index f415350c7c..f64329ba07 100644 --- a/ql/src/test/results/clientnegative/orc_reorder_columns1.q.out +++ b/ql/src/test/results/clientnegative/orc_reorder_columns1.q.out @@ -10,4 +10,4 @@ PREHOOK: query: alter table src_orc change key k tinyint first PREHOOK: type: ALTERTABLE_RENAMECOL PREHOOK: Input: default@src_orc PREHOOK: Output: default@src_orc -FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask. Reordering columns is not supported for table default.src_orc. SerDe may be incompatible +FAILED: Execution Error, return code 10311 from org.apache.hadoop.hive.ql.ddl.DDLTask. Reordering columns is not supported for table default.src_orc. SerDe may be incompatible diff --git a/ql/src/test/results/clientnegative/orc_reorder_columns1_acid.q.out b/ql/src/test/results/clientnegative/orc_reorder_columns1_acid.q.out index 611fbb064d..a1a790e79a 100644 --- a/ql/src/test/results/clientnegative/orc_reorder_columns1_acid.q.out +++ b/ql/src/test/results/clientnegative/orc_reorder_columns1_acid.q.out @@ -10,4 +10,4 @@ PREHOOK: query: alter table src_orc change key k tinyint first PREHOOK: type: ALTERTABLE_RENAMECOL PREHOOK: Input: default@src_orc PREHOOK: Output: default@src_orc -FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask. Reordering columns is not supported for table default.src_orc. SerDe may be incompatible +FAILED: Execution Error, return code 10311 from org.apache.hadoop.hive.ql.ddl.DDLTask. Reordering columns is not supported for table default.src_orc. SerDe may be incompatible diff --git a/ql/src/test/results/clientnegative/orc_reorder_columns2.q.out b/ql/src/test/results/clientnegative/orc_reorder_columns2.q.out index 2fb288c619..63a1833a02 100644 --- a/ql/src/test/results/clientnegative/orc_reorder_columns2.q.out +++ b/ql/src/test/results/clientnegative/orc_reorder_columns2.q.out @@ -10,4 +10,4 @@ PREHOOK: query: alter table src_orc change key k tinyint after val PREHOOK: type: ALTERTABLE_RENAMECOL PREHOOK: Input: default@src_orc PREHOOK: Output: default@src_orc -FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask. Reordering columns is not supported for table default.src_orc. SerDe may be incompatible +FAILED: Execution Error, return code 10311 from org.apache.hadoop.hive.ql.ddl.DDLTask. Reordering columns is not supported for table default.src_orc. SerDe may be incompatible diff --git a/ql/src/test/results/clientnegative/orc_reorder_columns2_acid.q.out b/ql/src/test/results/clientnegative/orc_reorder_columns2_acid.q.out index 2deb291b85..37f53c6f37 100644 --- a/ql/src/test/results/clientnegative/orc_reorder_columns2_acid.q.out +++ b/ql/src/test/results/clientnegative/orc_reorder_columns2_acid.q.out @@ -10,4 +10,4 @@ PREHOOK: query: alter table src_orc change key k tinyint after val PREHOOK: type: ALTERTABLE_RENAMECOL PREHOOK: Input: default@src_orc PREHOOK: Output: default@src_orc -FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask. Reordering columns is not supported for table default.src_orc. SerDe may be incompatible +FAILED: Execution Error, return code 10311 from org.apache.hadoop.hive.ql.ddl.DDLTask. Reordering columns is not supported for table default.src_orc. SerDe may be incompatible diff --git a/ql/src/test/results/clientnegative/orc_replace_columns1.q.out b/ql/src/test/results/clientnegative/orc_replace_columns1.q.out index f3fcae5878..716d3c2bef 100644 --- a/ql/src/test/results/clientnegative/orc_replace_columns1.q.out +++ b/ql/src/test/results/clientnegative/orc_replace_columns1.q.out @@ -10,4 +10,4 @@ PREHOOK: query: alter table src_orc replace columns (k int) PREHOOK: type: ALTERTABLE_REPLACECOLS PREHOOK: Input: default@src_orc PREHOOK: Output: default@src_orc -FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask. Replacing columns cannot drop columns for table default.src_orc. SerDe may be incompatible +FAILED: Execution Error, return code 10313 from org.apache.hadoop.hive.ql.ddl.DDLTask. Replacing columns cannot drop columns for table default.src_orc. SerDe may be incompatible diff --git a/ql/src/test/results/clientnegative/orc_replace_columns1_acid.q.out b/ql/src/test/results/clientnegative/orc_replace_columns1_acid.q.out index 6c66155a86..d14551e4f4 100644 --- a/ql/src/test/results/clientnegative/orc_replace_columns1_acid.q.out +++ b/ql/src/test/results/clientnegative/orc_replace_columns1_acid.q.out @@ -10,4 +10,4 @@ PREHOOK: query: alter table src_orc replace columns (k int) PREHOOK: type: ALTERTABLE_REPLACECOLS PREHOOK: Input: default@src_orc PREHOOK: Output: default@src_orc -FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask. Replacing columns cannot drop columns for table default.src_orc. SerDe may be incompatible +FAILED: Execution Error, return code 10313 from org.apache.hadoop.hive.ql.ddl.DDLTask. Replacing columns cannot drop columns for table default.src_orc. SerDe may be incompatible diff --git a/ql/src/test/results/clientnegative/orc_replace_columns2.q.out b/ql/src/test/results/clientnegative/orc_replace_columns2.q.out index 5c0b45ab96..8e19445cd3 100644 --- a/ql/src/test/results/clientnegative/orc_replace_columns2.q.out +++ b/ql/src/test/results/clientnegative/orc_replace_columns2.q.out @@ -10,5 +10,5 @@ PREHOOK: query: alter table src_orc replace columns (k smallint, val int) PREHOOK: type: ALTERTABLE_REPLACECOLS PREHOOK: Input: default@src_orc PREHOOK: Output: default@src_orc -FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask. Unable to alter table. The following columns have types incompatible with the existing columns in their respective positions : +FAILED: Execution Error, return code 40013 from org.apache.hadoop.hive.ql.ddl.DDLTask. Unable to alter table. The following columns have types incompatible with the existing columns in their respective positions : val diff --git a/ql/src/test/results/clientnegative/orc_replace_columns2_acid.q.out b/ql/src/test/results/clientnegative/orc_replace_columns2_acid.q.out index 8ebb960fc9..13f33566bf 100644 --- a/ql/src/test/results/clientnegative/orc_replace_columns2_acid.q.out +++ b/ql/src/test/results/clientnegative/orc_replace_columns2_acid.q.out @@ -10,5 +10,5 @@ PREHOOK: query: alter table src_orc replace columns (k smallint, val int) PREHOOK: type: ALTERTABLE_REPLACECOLS PREHOOK: Input: default@src_orc PREHOOK: Output: default@src_orc -FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask. Unable to alter table. The following columns have types incompatible with the existing columns in their respective positions : +FAILED: Execution Error, return code 40013 from org.apache.hadoop.hive.ql.ddl.DDLTask. Unable to alter table. The following columns have types incompatible with the existing columns in their respective positions : val diff --git a/ql/src/test/results/clientnegative/orc_replace_columns3.q.out b/ql/src/test/results/clientnegative/orc_replace_columns3.q.out index 3af387f2e5..ca3d67957f 100644 --- a/ql/src/test/results/clientnegative/orc_replace_columns3.q.out +++ b/ql/src/test/results/clientnegative/orc_replace_columns3.q.out @@ -18,5 +18,5 @@ PREHOOK: query: alter table src_orc replace columns (k int, val string, z tinyin PREHOOK: type: ALTERTABLE_REPLACECOLS PREHOOK: Input: default@src_orc PREHOOK: Output: default@src_orc -FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask. Unable to alter table. The following columns have types incompatible with the existing columns in their respective positions : +FAILED: Execution Error, return code 40013 from org.apache.hadoop.hive.ql.ddl.DDLTask. Unable to alter table. The following columns have types incompatible with the existing columns in their respective positions : z diff --git a/ql/src/test/results/clientnegative/orc_replace_columns3_acid.q.out b/ql/src/test/results/clientnegative/orc_replace_columns3_acid.q.out index 6c0fd97d52..99b5a3cb39 100644 --- a/ql/src/test/results/clientnegative/orc_replace_columns3_acid.q.out +++ b/ql/src/test/results/clientnegative/orc_replace_columns3_acid.q.out @@ -18,5 +18,5 @@ PREHOOK: query: alter table src_orc replace columns (k int, val string, z tinyin PREHOOK: type: ALTERTABLE_REPLACECOLS PREHOOK: Input: default@src_orc PREHOOK: Output: default@src_orc -FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask. Unable to alter table. The following columns have types incompatible with the existing columns in their respective positions : +FAILED: Execution Error, return code 40013 from org.apache.hadoop.hive.ql.ddl.DDLTask. Unable to alter table. The following columns have types incompatible with the existing columns in their respective positions : z diff --git a/ql/src/test/results/clientnegative/orc_type_promotion1.q.out b/ql/src/test/results/clientnegative/orc_type_promotion1.q.out index 2152df83a2..7c3b35be32 100644 --- a/ql/src/test/results/clientnegative/orc_type_promotion1.q.out +++ b/ql/src/test/results/clientnegative/orc_type_promotion1.q.out @@ -10,5 +10,5 @@ PREHOOK: query: alter table src_orc change key key int PREHOOK: type: ALTERTABLE_RENAMECOL PREHOOK: Input: default@src_orc PREHOOK: Output: default@src_orc -FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask. Unable to alter table. The following columns have types incompatible with the existing columns in their respective positions : +FAILED: Execution Error, return code 40013 from org.apache.hadoop.hive.ql.ddl.DDLTask. Unable to alter table. The following columns have types incompatible with the existing columns in their respective positions : key diff --git a/ql/src/test/results/clientnegative/orc_type_promotion1_acid.q.out b/ql/src/test/results/clientnegative/orc_type_promotion1_acid.q.out index 71119d0145..d82cd81bb1 100644 --- a/ql/src/test/results/clientnegative/orc_type_promotion1_acid.q.out +++ b/ql/src/test/results/clientnegative/orc_type_promotion1_acid.q.out @@ -10,5 +10,5 @@ PREHOOK: query: alter table src_orc change key key int PREHOOK: type: ALTERTABLE_RENAMECOL PREHOOK: Input: default@src_orc PREHOOK: Output: default@src_orc -FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask. Unable to alter table. The following columns have types incompatible with the existing columns in their respective positions : +FAILED: Execution Error, return code 40013 from org.apache.hadoop.hive.ql.ddl.DDLTask. Unable to alter table. The following columns have types incompatible with the existing columns in their respective positions : key diff --git a/ql/src/test/results/clientnegative/orc_type_promotion2.q.out b/ql/src/test/results/clientnegative/orc_type_promotion2.q.out index 0b60b975da..46c0229a90 100644 --- a/ql/src/test/results/clientnegative/orc_type_promotion2.q.out +++ b/ql/src/test/results/clientnegative/orc_type_promotion2.q.out @@ -66,5 +66,5 @@ PREHOOK: query: alter table src_orc change val val int PREHOOK: type: ALTERTABLE_RENAMECOL PREHOOK: Input: default@src_orc PREHOOK: Output: default@src_orc -FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask. Unable to alter table. The following columns have types incompatible with the existing columns in their respective positions : +FAILED: Execution Error, return code 40013 from org.apache.hadoop.hive.ql.ddl.DDLTask. Unable to alter table. The following columns have types incompatible with the existing columns in their respective positions : val diff --git a/ql/src/test/results/clientnegative/orc_type_promotion2_acid.q.out b/ql/src/test/results/clientnegative/orc_type_promotion2_acid.q.out index 3bcf7ae619..b0c04a43ae 100644 --- a/ql/src/test/results/clientnegative/orc_type_promotion2_acid.q.out +++ b/ql/src/test/results/clientnegative/orc_type_promotion2_acid.q.out @@ -66,5 +66,5 @@ PREHOOK: query: alter table src_orc change val val int PREHOOK: type: ALTERTABLE_RENAMECOL PREHOOK: Input: default@src_orc PREHOOK: Output: default@src_orc -FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask. Unable to alter table. The following columns have types incompatible with the existing columns in their respective positions : +FAILED: Execution Error, return code 40013 from org.apache.hadoop.hive.ql.ddl.DDLTask. Unable to alter table. The following columns have types incompatible with the existing columns in their respective positions : val diff --git a/ql/src/test/results/clientnegative/orc_type_promotion3.q.out b/ql/src/test/results/clientnegative/orc_type_promotion3.q.out index 05dbcb605e..b7f4c3dbec 100644 --- a/ql/src/test/results/clientnegative/orc_type_promotion3.q.out +++ b/ql/src/test/results/clientnegative/orc_type_promotion3.q.out @@ -10,5 +10,5 @@ PREHOOK: query: alter table src_orc change key key smallint PREHOOK: type: ALTERTABLE_RENAMECOL PREHOOK: Input: default@src_orc PREHOOK: Output: default@src_orc -FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask. Unable to alter table. The following columns have types incompatible with the existing columns in their respective positions : +FAILED: Execution Error, return code 40013 from org.apache.hadoop.hive.ql.ddl.DDLTask. Unable to alter table. The following columns have types incompatible with the existing columns in their respective positions : key diff --git a/ql/src/test/results/clientnegative/orc_type_promotion3_acid.q.out b/ql/src/test/results/clientnegative/orc_type_promotion3_acid.q.out index bfcb6ee641..e05f5b6edd 100644 --- a/ql/src/test/results/clientnegative/orc_type_promotion3_acid.q.out +++ b/ql/src/test/results/clientnegative/orc_type_promotion3_acid.q.out @@ -10,5 +10,5 @@ PREHOOK: query: alter table src_orc change key key smallint PREHOOK: type: ALTERTABLE_RENAMECOL PREHOOK: Input: default@src_orc PREHOOK: Output: default@src_orc -FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask. Unable to alter table. The following columns have types incompatible with the existing columns in their respective positions : +FAILED: Execution Error, return code 40013 from org.apache.hadoop.hive.ql.ddl.DDLTask. Unable to alter table. The following columns have types incompatible with the existing columns in their respective positions : key diff --git a/ql/src/test/results/clientnegative/parquet_alter_part_table_drop_columns.q.out b/ql/src/test/results/clientnegative/parquet_alter_part_table_drop_columns.q.out index 5b3936dee5..7556aefd39 100644 --- a/ql/src/test/results/clientnegative/parquet_alter_part_table_drop_columns.q.out +++ b/ql/src/test/results/clientnegative/parquet_alter_part_table_drop_columns.q.out @@ -50,4 +50,4 @@ favnumber int PREHOOK: type: ALTERTABLE_REPLACECOLS PREHOOK: Input: default@myparquettable_parted PREHOOK: Output: default@myparquettable_parted -FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask. Replacing columns cannot drop columns for table default.myparquettable_parted. SerDe may be incompatible +FAILED: Execution Error, return code 10313 from org.apache.hadoop.hive.ql.ddl.DDLTask. Replacing columns cannot drop columns for table default.myparquettable_parted. SerDe may be incompatible diff --git a/ql/src/test/results/clientnegative/serde_regex.q.out b/ql/src/test/results/clientnegative/serde_regex.q.out index 231bc57efb..b316bc7f14 100644 --- a/ql/src/test/results/clientnegative/serde_regex.q.out +++ b/ql/src/test/results/clientnegative/serde_regex.q.out @@ -22,4 +22,4 @@ STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@serde_regex -FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask. java.lang.RuntimeException: MetaException(message:org.apache.hadoop.hive.serde2.SerDeException org.apache.hadoop.hive.serde2.RegexSerDe doesn't allow column [9] named strct with type struct) +FAILED: Execution Error, return code 40000 from org.apache.hadoop.hive.ql.ddl.DDLTask. java.lang.RuntimeException: MetaException(message:org.apache.hadoop.hive.serde2.SerDeException org.apache.hadoop.hive.serde2.RegexSerDe doesn't allow column [9] named strct with type struct) diff --git a/ql/src/test/results/clientnegative/serde_regex3.q.out b/ql/src/test/results/clientnegative/serde_regex3.q.out index 5348afdbd3..65cdcb3381 100644 --- a/ql/src/test/results/clientnegative/serde_regex3.q.out +++ b/ql/src/test/results/clientnegative/serde_regex3.q.out @@ -19,4 +19,4 @@ STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@serde_regex -FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask. java.lang.RuntimeException: MetaException(message:org.apache.hadoop.hive.serde2.SerDeException This table does not have serde property "input.regex"!) +FAILED: Execution Error, return code 40000 from org.apache.hadoop.hive.ql.ddl.DDLTask. java.lang.RuntimeException: MetaException(message:org.apache.hadoop.hive.serde2.SerDeException This table does not have serde property "input.regex"!) diff --git a/ql/src/test/results/clientnegative/special_character_in_tabnames_1.q.out b/ql/src/test/results/clientnegative/special_character_in_tabnames_1.q.out index 7793afcb61..0d69543a0b 100644 --- a/ql/src/test/results/clientnegative/special_character_in_tabnames_1.q.out +++ b/ql/src/test/results/clientnegative/special_character_in_tabnames_1.q.out @@ -2,4 +2,4 @@ PREHOOK: query: create table `c/b/o_t1`(key string, value string, c_int int, c_f PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@c/b/o_t1 -FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask. org.apache.hadoop.hive.ql.metadata.HiveException: [c/b/o_t1]: is not a valid table name +FAILED: Execution Error, return code 40000 from org.apache.hadoop.hive.ql.ddl.DDLTask. org.apache.hadoop.hive.ql.metadata.HiveException: [c/b/o_t1]: is not a valid table name diff --git a/ql/src/test/results/clientnegative/strict_managed_tables1.q.out b/ql/src/test/results/clientnegative/strict_managed_tables1.q.out index e11460e12a..1ece98997f 100644 --- a/ql/src/test/results/clientnegative/strict_managed_tables1.q.out +++ b/ql/src/test/results/clientnegative/strict_managed_tables1.q.out @@ -26,4 +26,4 @@ PREHOOK: query: create table strict_managed_tables1_tab4 (c1 string, c2 string) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@strict_managed_tables1_tab4 -FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask. MetaException(message:Table default.strict_managed_tables1_tab4 failed strict managed table checks due to the following reason: Table is marked as a managed table but is not transactional.) +FAILED: Execution Error, return code 40000 from org.apache.hadoop.hive.ql.ddl.DDLTask. MetaException(message:Table default.strict_managed_tables1_tab4 failed strict managed table checks due to the following reason: Table is marked as a managed table but is not transactional.) diff --git a/ql/src/test/results/clientnegative/strict_managed_tables2.q.out b/ql/src/test/results/clientnegative/strict_managed_tables2.q.out index 04b878dd3d..edc1788607 100644 --- a/ql/src/test/results/clientnegative/strict_managed_tables2.q.out +++ b/ql/src/test/results/clientnegative/strict_managed_tables2.q.out @@ -10,4 +10,4 @@ PREHOOK: query: alter table strict_managed_tables2_tab1 set tblproperties ('EXTE PREHOOK: type: ALTERTABLE_PROPERTIES PREHOOK: Input: default@strict_managed_tables2_tab1 PREHOOK: Output: default@strict_managed_tables2_tab1 -FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask. Unable to alter table. Table default.strict_managed_tables2_tab1 failed strict managed table checks due to the following reason: Table is marked as a managed table but is not transactional. +FAILED: Execution Error, return code 40013 from org.apache.hadoop.hive.ql.ddl.DDLTask. Unable to alter table. Table default.strict_managed_tables2_tab1 failed strict managed table checks due to the following reason: Table is marked as a managed table but is not transactional. diff --git a/ql/src/test/results/clientnegative/strict_managed_tables3.q.out b/ql/src/test/results/clientnegative/strict_managed_tables3.q.out index ed92c03816..e938c8c3b6 100644 --- a/ql/src/test/results/clientnegative/strict_managed_tables3.q.out +++ b/ql/src/test/results/clientnegative/strict_managed_tables3.q.out @@ -10,4 +10,4 @@ PREHOOK: query: alter table strict_managed_tables3_tab1 unset tblproperties ('EX PREHOOK: type: ALTERTABLE_PROPERTIES PREHOOK: Input: default@strict_managed_tables3_tab1 PREHOOK: Output: default@strict_managed_tables3_tab1 -FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask. Unable to alter table. Table default.strict_managed_tables3_tab1 failed strict managed table checks due to the following reason: Table is marked as a managed table but is not transactional. +FAILED: Execution Error, return code 40013 from org.apache.hadoop.hive.ql.ddl.DDLTask. Unable to alter table. Table default.strict_managed_tables3_tab1 failed strict managed table checks due to the following reason: Table is marked as a managed table but is not transactional. diff --git a/ql/src/test/results/clientnegative/strict_managed_tables4.q.out b/ql/src/test/results/clientnegative/strict_managed_tables4.q.out index 924f03bade..2a120e036c 100644 --- a/ql/src/test/results/clientnegative/strict_managed_tables4.q.out +++ b/ql/src/test/results/clientnegative/strict_managed_tables4.q.out @@ -28,4 +28,4 @@ STORED AS AVRO PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@strict_managed_tables6_tab2 -FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask. MetaException(message:Table default.strict_managed_tables6_tab2 failed strict managed table checks due to the following reason: Table is marked as a managed table but is not transactional.) +FAILED: Execution Error, return code 40000 from org.apache.hadoop.hive.ql.ddl.DDLTask. MetaException(message:Table default.strict_managed_tables6_tab2 failed strict managed table checks due to the following reason: Table is marked as a managed table but is not transactional.) diff --git a/ql/src/test/results/clientnegative/strict_managed_tables5.q.out b/ql/src/test/results/clientnegative/strict_managed_tables5.q.out index a233b16000..f1294fd796 100644 --- a/ql/src/test/results/clientnegative/strict_managed_tables5.q.out +++ b/ql/src/test/results/clientnegative/strict_managed_tables5.q.out @@ -16,4 +16,4 @@ STORED BY 'org.apache.hadoop.hive.ql.metadata.DefaultStorageHandler' PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@strict_managed_tables5_tab2 -FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask. MetaException(message:Table default.strict_managed_tables5_tab2 failed strict managed table checks due to the following reason: Table is marked as a managed table but is not transactional.) +FAILED: Execution Error, return code 40000 from org.apache.hadoop.hive.ql.ddl.DDLTask. MetaException(message:Table default.strict_managed_tables5_tab2 failed strict managed table checks due to the following reason: Table is marked as a managed table but is not transactional.) diff --git a/ql/src/test/results/clientnegative/strict_managed_tables6.q.out b/ql/src/test/results/clientnegative/strict_managed_tables6.q.out index 09154e2f8e..077bd7d615 100644 --- a/ql/src/test/results/clientnegative/strict_managed_tables6.q.out +++ b/ql/src/test/results/clientnegative/strict_managed_tables6.q.out @@ -27,4 +27,4 @@ PREHOOK: type: ALTERTABLE_LOCATION PREHOOK: Input: smt6@strict_managed_tables1_tab1 #### A masked pattern was here #### PREHOOK: Output: smt6@strict_managed_tables1_tab1 -FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask. Unable to alter table. Cannot change location of a managed table hive.smt6.strict_managed_tables1_tab1 as it is enabled for replication. +FAILED: Execution Error, return code 40013 from org.apache.hadoop.hive.ql.ddl.DDLTask. Unable to alter table. Cannot change location of a managed table hive.smt6.strict_managed_tables1_tab1 as it is enabled for replication. diff --git a/ql/src/test/results/clientnegative/temp_table_addpart1.q.out b/ql/src/test/results/clientnegative/temp_table_addpart1.q.out index 524a8e6de9..daa2436d85 100644 --- a/ql/src/test/results/clientnegative/temp_table_addpart1.q.out +++ b/ql/src/test/results/clientnegative/temp_table_addpart1.q.out @@ -23,4 +23,4 @@ b=f/c=s PREHOOK: query: alter table addpart1_temp add partition (b='f', c='') PREHOOK: type: ALTERTABLE_ADDPARTS PREHOOK: Output: default@addpart1_temp -FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask. partition spec is invalid; field c does not exist or is empty +FAILED: Execution Error, return code 40000 from org.apache.hadoop.hive.ql.ddl.DDLTask. partition spec is invalid; field c does not exist or is empty diff --git a/ql/src/test/results/clientnegative/temp_table_alter_rename_partition_failure.q.out b/ql/src/test/results/clientnegative/temp_table_alter_rename_partition_failure.q.out index 66e4abb345..c3bbc2b82e 100644 --- a/ql/src/test/results/clientnegative/temp_table_alter_rename_partition_failure.q.out +++ b/ql/src/test/results/clientnegative/temp_table_alter_rename_partition_failure.q.out @@ -34,4 +34,4 @@ POSTHOOK: Lineage: alter_rename_partition_temp PARTITION(pcol1=old_part1,pcol2=o PREHOOK: query: alter table alter_rename_partition_temp partition (pCol1='nonexist_part1', pcol2='nonexist_part2') rename to partition (pCol1='new_part1', pcol2='new_part2') PREHOOK: type: ALTERTABLE_RENAMEPART PREHOOK: Input: default@alter_rename_partition_temp -FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask. Rename partition: source partition [pcol1=nonexist_part1/pcol2=nonexist_part2] does not exist. +FAILED: Execution Error, return code 40000 from org.apache.hadoop.hive.ql.ddl.DDLTask. Rename partition: source partition [pcol1=nonexist_part1/pcol2=nonexist_part2] does not exist. diff --git a/ql/src/test/results/clientnegative/temp_table_alter_rename_partition_failure2.q.out b/ql/src/test/results/clientnegative/temp_table_alter_rename_partition_failure2.q.out index 2e3b2c170b..a8c7bb3e26 100644 --- a/ql/src/test/results/clientnegative/temp_table_alter_rename_partition_failure2.q.out +++ b/ql/src/test/results/clientnegative/temp_table_alter_rename_partition_failure2.q.out @@ -43,4 +43,4 @@ PREHOOK: type: ALTERTABLE_RENAMEPART PREHOOK: Input: default@alter_rename_partition_temp PREHOOK: Output: default@alter_rename_partition_temp@pcol1=new_part1%3A/pcol2=new_part2%3A PREHOOK: Output: default@alter_rename_partition_temp@pcol1=old_part1%3A/pcol2=old_part2%3A -FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask. Unable to rename partition. Partition pcol1=new_part1%3A/pcol2=new_part2%3A already exists +FAILED: Execution Error, return code 40000 from org.apache.hadoop.hive.ql.ddl.DDLTask. Unable to rename partition. Partition pcol1=new_part1%3A/pcol2=new_part2%3A already exists diff --git a/ql/src/test/results/clientnegative/temp_table_exchange_partitions.q.out b/ql/src/test/results/clientnegative/temp_table_exchange_partitions.q.out index 5d94a7b087..77856609a4 100644 --- a/ql/src/test/results/clientnegative/temp_table_exchange_partitions.q.out +++ b/ql/src/test/results/clientnegative/temp_table_exchange_partitions.q.out @@ -62,4 +62,4 @@ PREHOOK: query: ALTER TABLE ex1.exchange_part_test1 EXCHANGE PARTITION (ds='2013 PREHOOK: type: ALTERTABLE_EXCHANGEPARTITION PREHOOK: Input: ex2@exchange_part_test2 PREHOOK: Output: ex1@exchange_part_test1 -FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask. MetaException(message:Exchanging partitions between temporary and non-temporary tables is not supported.) +FAILED: Execution Error, return code 40000 from org.apache.hadoop.hive.ql.ddl.DDLTask. MetaException(message:Exchanging partitions between temporary and non-temporary tables is not supported.) diff --git a/ql/src/test/results/clientnegative/temp_table_rename.q.out b/ql/src/test/results/clientnegative/temp_table_rename.q.out index e868e95249..819184921a 100644 --- a/ql/src/test/results/clientnegative/temp_table_rename.q.out +++ b/ql/src/test/results/clientnegative/temp_table_rename.q.out @@ -18,4 +18,4 @@ PREHOOK: query: alter table tmp2 rename to tmp1 PREHOOK: type: ALTERTABLE_RENAME PREHOOK: Input: default@tmp2 PREHOOK: Output: default@tmp2 -FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask. Unable to alter table. Cannot rename temporary table to tmp1 - temporary table already exists with the same name +FAILED: Execution Error, return code 40013 from org.apache.hadoop.hive.ql.ddl.DDLTask. Unable to alter table. Cannot rename temporary table to tmp1 - temporary table already exists with the same name diff --git a/ql/src/test/results/clientnegative/touch1.q.out b/ql/src/test/results/clientnegative/touch1.q.out index 9a623397e4..fc22130fb0 100644 --- a/ql/src/test/results/clientnegative/touch1.q.out +++ b/ql/src/test/results/clientnegative/touch1.q.out @@ -1,4 +1,4 @@ PREHOOK: query: ALTER TABLE srcpart TOUCH PARTITION (ds='2008-04-08', hr='13') PREHOOK: type: ALTERTABLE_TOUCH PREHOOK: Input: default@srcpart -FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask. Specified partition does not exist +FAILED: Execution Error, return code 40000 from org.apache.hadoop.hive.ql.ddl.DDLTask. Specified partition does not exist diff --git a/ql/src/test/results/clientpositive/encrypted/encryption_move_tbl.q.out b/ql/src/test/results/clientpositive/encrypted/encryption_move_tbl.q.out index 354d048dcc..8e651f1b54 100644 --- a/ql/src/test/results/clientpositive/encrypted/encryption_move_tbl.q.out +++ b/ql/src/test/results/clientpositive/encrypted/encryption_move_tbl.q.out @@ -202,7 +202,7 @@ PREHOOK: query: ALTER TABLE encrypted_db_outloc.renamed_encrypted_table_n1 RENAM PREHOOK: type: ALTERTABLE_RENAME PREHOOK: Input: encrypted_db_outloc@renamed_encrypted_table_n1 PREHOOK: Output: encrypted_db_outloc@renamed_encrypted_table_n1 -FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask. Unable to alter table. Alter Table operation for encrypted_db_outloc.renamed_encrypted_table_n1 failed to move data due to: 'Got exception: org.apache.hadoop.ipc.RemoteException /build/ql/test/data/specified_db_location/renamed_encrypted_table_n1 can't be moved from an encryption zone.' See hive log file for details. +FAILED: Execution Error, return code 40013 from org.apache.hadoop.hive.ql.ddl.DDLTask. Unable to alter table. Alter Table operation for encrypted_db_outloc.renamed_encrypted_table_n1 failed to move data due to: 'Got exception: org.apache.hadoop.ipc.RemoteException /build/ql/test/data/specified_db_location/renamed_encrypted_table_n1 can't be moved from an encryption zone.' See hive log file for details. PREHOOK: query: SHOW TABLES PREHOOK: type: SHOWTABLES PREHOOK: Input: database:encrypted_db_outloc @@ -230,7 +230,7 @@ PREHOOK: query: DROP DATABASE encrypted_db PREHOOK: type: DROPDATABASE PREHOOK: Input: database:encrypted_db PREHOOK: Output: database:encrypted_db -FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask. InvalidOperationException(message:Database encrypted_db is not empty. One or more tables exist.) +FAILED: Execution Error, return code 40000 from org.apache.hadoop.hive.ql.ddl.DDLTask. InvalidOperationException(message:Database encrypted_db is not empty. One or more tables exist.) PREHOOK: query: DROP TABLE encrypted_db_outloc.renamed_encrypted_table_n1 PURGE PREHOOK: type: DROPTABLE PREHOOK: Input: encrypted_db_outloc@renamed_encrypted_table_n1 diff --git a/ql/src/test/results/clientpositive/llap/resourceplan.q.out b/ql/src/test/results/clientpositive/llap/resourceplan.q.out index dddf14739c..b4b48d780d 100644 --- a/ql/src/test/results/clientpositive/llap/resourceplan.q.out +++ b/ql/src/test/results/clientpositive/llap/resourceplan.q.out @@ -485,7 +485,7 @@ plan_2 default DISABLED 10 default PREHOOK: query: CREATE RESOURCE PLAN plan_2 PREHOOK: type: CREATE RESOURCEPLAN PREHOOK: Output: dummyHostnameForTest -FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask. Resource plan plan_2 already exists +FAILED: Execution Error, return code 10417 from org.apache.hadoop.hive.ql.ddl.DDLTask. Resource plan plan_2 already exists PREHOOK: query: CREATE RESOURCE PLAN IF NOT EXISTS plan_2 PREHOOK: type: CREATE RESOURCEPLAN PREHOOK: Output: dummyHostnameForTest @@ -495,7 +495,7 @@ FAILED: SemanticException Invalid create arguments (tok_create_rp plan_3 (tok_qu PREHOOK: query: ALTER RESOURCE PLAN plan_1 RENAME TO plan_2 PREHOOK: type: ALTER RESOURCEPLAN PREHOOK: Output: dummyHostnameForTest -FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask. AlreadyExistsException(message:Resource plan name should be unique: ) +FAILED: Execution Error, return code 40000 from org.apache.hadoop.hive.ql.ddl.DDLTask. AlreadyExistsException(message:Resource plan name should be unique: ) PREHOOK: query: SELECT * FROM SYS.WM_RESOURCEPLANS PREHOOK: type: QUERY PREHOOK: Input: sys@wm_resourceplans @@ -569,7 +569,7 @@ STAGE PLANS: PREHOOK: query: ALTER RESOURCE PLAN plan_3 SET QUERY_PARALLELISM = 30, DEFAULT POOL = default1 PREHOOK: type: ALTER RESOURCEPLAN PREHOOK: Output: dummyHostnameForTest -FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask. NoSuchObjectException(message:Cannot find pool: default1) +FAILED: Execution Error, return code 40000 from org.apache.hadoop.hive.ql.ddl.DDLTask. NoSuchObjectException(message:Cannot find pool: default1) PREHOOK: query: SELECT * FROM SYS.WM_RESOURCEPLANS PREHOOK: type: QUERY PREHOOK: Input: sys@wm_resourceplans @@ -616,11 +616,11 @@ STAGE PLANS: PREHOOK: query: ALTER RESOURCE PLAN plan_3 RENAME TO plan_4 PREHOOK: type: ALTER RESOURCEPLAN PREHOOK: Output: dummyHostnameForTest -FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask. InvalidOperationException(message:Resource plan must be disabled to edit it.) +FAILED: Execution Error, return code 40000 from org.apache.hadoop.hive.ql.ddl.DDLTask. InvalidOperationException(message:Resource plan must be disabled to edit it.) PREHOOK: query: ALTER RESOURCE PLAN plan_3 SET QUERY_PARALLELISM = 30 PREHOOK: type: ALTER RESOURCEPLAN PREHOOK: Output: dummyHostnameForTest -FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask. InvalidOperationException(message:Resource plan must be disabled to edit it.) +FAILED: Execution Error, return code 40000 from org.apache.hadoop.hive.ql.ddl.DDLTask. InvalidOperationException(message:Resource plan must be disabled to edit it.) PREHOOK: query: ALTER RESOURCE PLAN plan_3 DISABLE PREHOOK: type: ALTER RESOURCEPLAN PREHOOK: Output: dummyHostnameForTest @@ -639,7 +639,7 @@ plan_3 default DISABLED NULL default PREHOOK: query: ALTER RESOURCE PLAN plan_3 ACTIVATE PREHOOK: type: ALTER RESOURCEPLAN PREHOOK: Output: dummyHostnameForTest -FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask. InvalidOperationException(message:Resource plan plan_3 is disabled and should be enabled before activation (or in the same command)) +FAILED: Execution Error, return code 40000 from org.apache.hadoop.hive.ql.ddl.DDLTask. InvalidOperationException(message:Resource plan plan_3 is disabled and should be enabled before activation (or in the same command)) PREHOOK: query: SELECT * FROM SYS.WM_RESOURCEPLANS PREHOOK: type: QUERY PREHOOK: Input: sys@wm_resourceplans @@ -713,7 +713,7 @@ plan_3 default ACTIVE NULL default PREHOOK: query: ALTER RESOURCE PLAN plan_3 ENABLE PREHOOK: type: ALTER RESOURCEPLAN PREHOOK: Output: dummyHostnameForTest -FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask. InvalidOperationException(message:Resource plan plan_3 is active; activate another plan first, or disable workload management.) +FAILED: Execution Error, return code 40000 from org.apache.hadoop.hive.ql.ddl.DDLTask. InvalidOperationException(message:Resource plan plan_3 is active; activate another plan first, or disable workload management.) PREHOOK: query: SELECT * FROM SYS.WM_RESOURCEPLANS PREHOOK: type: QUERY PREHOOK: Input: sys@wm_resourceplans @@ -727,7 +727,7 @@ plan_3 default ACTIVE NULL default PREHOOK: query: ALTER RESOURCE PLAN plan_3 DISABLE PREHOOK: type: ALTER RESOURCEPLAN PREHOOK: Output: dummyHostnameForTest -FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask. InvalidOperationException(message:Resource plan plan_3 is active; activate another plan first, or disable workload management.) +FAILED: Execution Error, return code 40000 from org.apache.hadoop.hive.ql.ddl.DDLTask. InvalidOperationException(message:Resource plan plan_3 is active; activate another plan first, or disable workload management.) PREHOOK: query: SELECT * FROM SYS.WM_RESOURCEPLANS PREHOOK: type: QUERY PREHOOK: Input: sys@wm_resourceplans @@ -861,7 +861,7 @@ STAGE PLANS: PREHOOK: query: DROP RESOURCE PLAN plan_2 PREHOOK: type: DROP RESOURCEPLAN PREHOOK: Output: dummyHostnameForTest -FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask. MetaException(message:Cannot drop an active resource plan) +FAILED: Execution Error, return code 40000 from org.apache.hadoop.hive.ql.ddl.DDLTask. MetaException(message:Cannot drop an active resource plan) PREHOOK: query: DROP RESOURCE PLAN plan_3 PREHOOK: type: DROP RESOURCEPLAN PREHOOK: Output: dummyHostnameForTest @@ -879,7 +879,7 @@ plan_2 default ACTIVE 10 default PREHOOK: query: DROP RESOURCE PLAN plan_99999 PREHOOK: type: DROP RESOURCEPLAN PREHOOK: Output: dummyHostnameForTest -FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask. Resource plan plan_99999 does not exist +FAILED: Execution Error, return code 10418 from org.apache.hadoop.hive.ql.ddl.DDLTask. Resource plan plan_99999 does not exist PREHOOK: query: DROP RESOURCE PLAN IF EXISTS plan_99999 PREHOOK: type: DROP RESOURCEPLAN PREHOOK: Output: dummyHostnameForTest @@ -975,7 +975,7 @@ plan_1 default trigger_1 BYTES_READ > '10kb' KILL PREHOOK: query: CREATE TRIGGER plan_1.trigger_1 WHEN ELAPSED_TIME > 300 DO KILL PREHOOK: type: CREATE TRIGGER PREHOOK: Output: dummyHostnameForTest -FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask. AlreadyExistsException(message:Trigger already exists, use alter: ) +FAILED: Execution Error, return code 40000 from org.apache.hadoop.hive.ql.ddl.DDLTask. AlreadyExistsException(message:Trigger already exists, use alter: ) FAILED: ParseException line 4:60 mismatched input 'AND' expecting DO near ''30sec'' in create trigger statement FAILED: ParseException line 2:63 mismatched input 'OR' expecting DO near ''30second'' in create trigger statement FAILED: ParseException line 2:50 mismatched input '>=' expecting > near 'ELAPSED_TIME' in comparisionOperator @@ -985,15 +985,15 @@ FAILED: ParseException line 2:50 mismatched input '=' expecting > near 'ELAPSED_ PREHOOK: query: CREATE TRIGGER plan_1.trigger_2 WHEN BYTES_READ > '10k' DO KILL PREHOOK: type: CREATE TRIGGER PREHOOK: Output: dummyHostnameForTest -FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask. java.lang.IllegalArgumentException: Invalid size unit k +FAILED: Execution Error, return code 40000 from org.apache.hadoop.hive.ql.ddl.DDLTask. java.lang.IllegalArgumentException: Invalid size unit k PREHOOK: query: CREATE TRIGGER plan_1.trigger_2 WHEN ELAPSED_TIME > '10 millis' DO KILL PREHOOK: type: CREATE TRIGGER PREHOOK: Output: dummyHostnameForTest -FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask. java.lang.IllegalArgumentException: Invalid time unit millis +FAILED: Execution Error, return code 40000 from org.apache.hadoop.hive.ql.ddl.DDLTask. java.lang.IllegalArgumentException: Invalid time unit millis PREHOOK: query: CREATE TRIGGER plan_1.trigger_2 WHEN BYTES_READ > '-1000' DO KILL PREHOOK: type: CREATE TRIGGER PREHOOK: Output: dummyHostnameForTest -FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask. java.lang.IllegalArgumentException: Illegal value for counter limit. Expected a positive long value. +FAILED: Execution Error, return code 40000 from org.apache.hadoop.hive.ql.ddl.DDLTask. java.lang.IllegalArgumentException: Illegal value for counter limit. Expected a positive long value. PREHOOK: query: CREATE TRIGGER plan_1.trigger_2 WHEN ELAPSED_TIME > '30hour' DO MOVE TO slow_pool PREHOOK: type: CREATE TRIGGER PREHOOK: Output: dummyHostnameForTest @@ -1071,7 +1071,7 @@ plan_1 default trigger_2 ELAPSED_TIME > '30hour' MOVE TO slow_pool PREHOOK: query: CREATE TRIGGER plan_2.trigger_1 WHEN BYTES_READ > '100mb' DO MOVE TO null_pool PREHOOK: type: CREATE TRIGGER PREHOOK: Output: dummyHostnameForTest -FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask. InvalidOperationException(message:Resource plan must be disabled to edit it.) +FAILED: Execution Error, return code 40000 from org.apache.hadoop.hive.ql.ddl.DDLTask. InvalidOperationException(message:Resource plan must be disabled to edit it.) PREHOOK: query: CREATE TRIGGER `table`.`table` WHEN BYTES_WRITTEN > '100KB' DO MOVE TO `default` PREHOOK: type: CREATE TRIGGER PREHOOK: Output: dummyHostnameForTest @@ -1148,11 +1148,11 @@ table default DISABLED 1 default PREHOOK: query: DROP TRIGGER plan_1.trigger_2 PREHOOK: type: DROP TRIGGER PREHOOK: Output: dummyHostnameForTest -FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask. InvalidOperationException(message:Resource plan must be disabled to edit it.) +FAILED: Execution Error, return code 40000 from org.apache.hadoop.hive.ql.ddl.DDLTask. InvalidOperationException(message:Resource plan must be disabled to edit it.) PREHOOK: query: ALTER TRIGGER plan_1.trigger_2 WHEN BYTES_READ > "1000gb" DO KILL PREHOOK: type: ALTER TRIGGER PREHOOK: Output: dummyHostnameForTest -FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask. InvalidOperationException(message:Resource plan must be disabled to edit it.) +FAILED: Execution Error, return code 40000 from org.apache.hadoop.hive.ql.ddl.DDLTask. InvalidOperationException(message:Resource plan must be disabled to edit it.) PREHOOK: query: ALTER RESOURCE PLAN plan_1 ACTIVATE PREHOOK: type: ALTER RESOURCEPLAN PREHOOK: Output: dummyHostnameForTest @@ -1172,11 +1172,11 @@ table default DISABLED 1 default PREHOOK: query: DROP TRIGGER plan_1.trigger_2 PREHOOK: type: DROP TRIGGER PREHOOK: Output: dummyHostnameForTest -FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask. InvalidOperationException(message:Resource plan must be disabled to edit it.) +FAILED: Execution Error, return code 40000 from org.apache.hadoop.hive.ql.ddl.DDLTask. InvalidOperationException(message:Resource plan must be disabled to edit it.) PREHOOK: query: ALTER TRIGGER plan_1.trigger_2 WHEN BYTES_READ > "1000KB" DO KILL PREHOOK: type: ALTER TRIGGER PREHOOK: Output: dummyHostnameForTest -FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask. InvalidOperationException(message:Resource plan must be disabled to edit it.) +FAILED: Execution Error, return code 40000 from org.apache.hadoop.hive.ql.ddl.DDLTask. InvalidOperationException(message:Resource plan must be disabled to edit it.) PREHOOK: query: ALTER RESOURCE PLAN plan_2 DISABLE PREHOOK: type: ALTER RESOURCEPLAN PREHOOK: Output: dummyHostnameForTest @@ -1224,13 +1224,13 @@ PREHOOK: query: CREATE POOL plan_1.default WITH ALLOC_FRACTION=1.0, QUERY_PARALLELISM=5, SCHEDULING_POLICY='default' PREHOOK: type: CREATE POOL PREHOOK: Output: dummyHostnameForTest -FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask. InvalidOperationException(message:Resource plan must be disabled to edit it.) +FAILED: Execution Error, return code 40000 from org.apache.hadoop.hive.ql.ddl.DDLTask. InvalidOperationException(message:Resource plan must be disabled to edit it.) FAILED: SemanticException alloc_fraction should be specified for a pool FAILED: SemanticException query_parallelism should be specified for a pool PREHOOK: query: CREATE POOL plan_2.default WITH ALLOC_FRACTION=1.0, QUERY_PARALLELISM=5 PREHOOK: type: CREATE POOL PREHOOK: Output: dummyHostnameForTest -FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask. AlreadyExistsException(message:Pool already exists: ) +FAILED: Execution Error, return code 40000 from org.apache.hadoop.hive.ql.ddl.DDLTask. AlreadyExistsException(message:Pool already exists: ) PREHOOK: query: SELECT * FROM SYS.WM_POOLS PREHOOK: type: QUERY PREHOOK: Input: sys@wm_pools @@ -1246,7 +1246,7 @@ PREHOOK: query: CREATE POOL plan_2.default.c1 WITH ALLOC_FRACTION=0.3, QUERY_PARALLELISM=3, SCHEDULING_POLICY='invalid' PREHOOK: type: CREATE POOL PREHOOK: Output: dummyHostnameForTest -FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask. Invalid scheduling policy invalid +FAILED: Execution Error, return code 40000 from org.apache.hadoop.hive.ql.ddl.DDLTask. Invalid scheduling policy invalid PREHOOK: query: CREATE POOL plan_2.default.c1 WITH ALLOC_FRACTION=0.3, QUERY_PARALLELISM=3, SCHEDULING_POLICY='fair' PREHOOK: type: CREATE POOL @@ -1290,7 +1290,7 @@ Sum of children pools' alloc fraction should be less than 1 got: 1.05 for pool: PREHOOK: query: ALTER RESOURCE PLAN plan_2 ENABLE ACTIVATE PREHOOK: type: ALTER RESOURCEPLAN PREHOOK: Output: dummyHostnameForTest -FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask. InvalidOperationException(message:ResourcePlan: plan_2 is invalid: [Sum of children pools' alloc fraction should be less than 1 got: 1.05 for pool: default]) +FAILED: Execution Error, return code 40000 from org.apache.hadoop.hive.ql.ddl.DDLTask. InvalidOperationException(message:ResourcePlan: plan_2 is invalid: [Sum of children pools' alloc fraction should be less than 1 got: 1.05 for pool: default]) PREHOOK: query: EXPLAIN ALTER POOL plan_2.default.c2 SET ALLOC_FRACTION = 0.7, QUERY_PARALLELISM = 1 PREHOOK: type: ALTER POOL PREHOOK: Output: dummyHostnameForTest @@ -1404,7 +1404,7 @@ STAGE PLANS: PREHOOK: query: DROP POOL plan_2.default PREHOOK: type: DROP POOL PREHOOK: Output: dummyHostnameForTest -FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask. NoSuchObjectException(message:Cannot delete pool: default) +FAILED: Execution Error, return code 40000 from org.apache.hadoop.hive.ql.ddl.DDLTask. NoSuchObjectException(message:Cannot delete pool: default) PREHOOK: query: SELECT * FROM SYS.WM_POOLS PREHOOK: type: QUERY PREHOOK: Input: sys@wm_pools @@ -1422,7 +1422,7 @@ PREHOOK: query: CREATE POOL plan_2.child1.child2 WITH QUERY_PARALLELISM=2, SCHEDULING_POLICY='fifo', ALLOC_FRACTION=0.8 PREHOOK: type: CREATE POOL PREHOOK: Output: dummyHostnameForTest -FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask. NoSuchObjectException(message:Pool path is invalid, the parent does not exist) +FAILED: Execution Error, return code 40000 from org.apache.hadoop.hive.ql.ddl.DDLTask. NoSuchObjectException(message:Pool path is invalid, the parent does not exist) PREHOOK: query: CREATE POOL `table`.`table` WITH SCHEDULING_POLICY='fifo', ALLOC_FRACTION=0.5, QUERY_PARALLELISM=1 PREHOOK: type: CREATE POOL @@ -1498,7 +1498,7 @@ table default table.pool.child2 0.7 3 fair PREHOOK: query: DROP POOL `table`.`table` PREHOOK: type: DROP POOL PREHOOK: Output: dummyHostnameForTest -FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask. InvalidOperationException(message:Cannot drop a pool that has child pools) +FAILED: Execution Error, return code 40000 from org.apache.hadoop.hive.ql.ddl.DDLTask. InvalidOperationException(message:Cannot drop a pool that has child pools) PREHOOK: query: SELECT * FROM SYS.WM_POOLS PREHOOK: type: QUERY PREHOOK: Input: sys@wm_pools @@ -1519,7 +1519,7 @@ table default table.pool.child2 0.7 3 fair PREHOOK: query: DROP POOL `table`.default PREHOOK: type: DROP POOL PREHOOK: Output: dummyHostnameForTest -FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask. InvalidOperationException(message:Cannot drop default pool of a resource plan) +FAILED: Execution Error, return code 40000 from org.apache.hadoop.hive.ql.ddl.DDLTask. InvalidOperationException(message:Cannot drop default pool of a resource plan) PREHOOK: query: SELECT * FROM SYS.WM_POOLS PREHOOK: type: QUERY PREHOOK: Input: sys@wm_pools @@ -1769,11 +1769,11 @@ table default table.pool.child2 trigger2 PREHOOK: query: ALTER POOL plan_2.default ADD TRIGGER trigger_1 PREHOOK: type: ALTER POOL PREHOOK: Output: dummyHostnameForTest -FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask. NoSuchObjectException(message:Cannot find pool: default) +FAILED: Execution Error, return code 40000 from org.apache.hadoop.hive.ql.ddl.DDLTask. NoSuchObjectException(message:Cannot find pool: default) PREHOOK: query: ALTER POOL plan_2.def ADD TRIGGER trigger_2 PREHOOK: type: ALTER POOL PREHOOK: Output: dummyHostnameForTest -FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask. NoSuchObjectException(message:Cannot find trigger with name: trigger_2) +FAILED: Execution Error, return code 40000 from org.apache.hadoop.hive.ql.ddl.DDLTask. NoSuchObjectException(message:Cannot find trigger with name: trigger_2) PREHOOK: query: SELECT * FROM SYS.WM_POOLS_TO_TRIGGERS PREHOOK: type: QUERY PREHOOK: Input: sys@wm_pools_to_triggers @@ -1811,7 +1811,7 @@ POSTHOOK: type: ALTER POOL PREHOOK: query: ALTER POOL plan_2.def.c1 DROP TRIGGER trigger_2 PREHOOK: type: ALTER POOL PREHOOK: Output: dummyHostnameForTest -FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask. NoSuchObjectException(message:Cannot find trigger with name: trigger_2) +FAILED: Execution Error, return code 40000 from org.apache.hadoop.hive.ql.ddl.DDLTask. NoSuchObjectException(message:Cannot find trigger with name: trigger_2) PREHOOK: query: DROP POOL `table`.`table`.pool.child1 PREHOOK: type: DROP POOL PREHOOK: Output: dummyHostnameForTest @@ -1949,7 +1949,7 @@ plan_2 default USER user2 def 1 PREHOOK: query: DROP POOL plan_2.def.c1 PREHOOK: type: DROP POOL PREHOOK: Output: dummyHostnameForTest -FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask. InvalidOperationException(message:Please remove all mappings for this pool.) +FAILED: Execution Error, return code 40000 from org.apache.hadoop.hive.ql.ddl.DDLTask. InvalidOperationException(message:Please remove all mappings for this pool.) PREHOOK: query: EXPLAIN DROP USER MAPPING "user2" in plan_2 PREHOOK: type: DROP MAPPING PREHOOK: Output: dummyHostnameForTest diff --git a/service/src/test/org/apache/hive/service/cli/CLIServiceTest.java b/service/src/test/org/apache/hive/service/cli/CLIServiceTest.java index c9a57c53ed..120de13299 100644 --- a/service/src/test/org/apache/hive/service/cli/CLIServiceTest.java +++ b/service/src/test/org/apache/hive/service/cli/CLIServiceTest.java @@ -245,7 +245,7 @@ public void testExecuteStatementAsync() throws Exception { opStatus = runAsyncAndWait(sessionHandle, queryString, confOverlay, OperationState.ERROR, longPollingTimeout); // sqlState, errorCode should be set assertEquals(opStatus.getOperationException().getSQLState(), "08S01"); - assertEquals(opStatus.getOperationException().getErrorCode(), 1); + assertEquals(opStatus.getOperationException().getErrorCode(), 40000); /** * Execute an async query with default config */ diff --git a/service/src/test/org/apache/hive/service/cli/thrift/ThriftCLIServiceTest.java b/service/src/test/org/apache/hive/service/cli/thrift/ThriftCLIServiceTest.java index 18eff56133..97ba39f1b8 100644 --- a/service/src/test/org/apache/hive/service/cli/thrift/ThriftCLIServiceTest.java +++ b/service/src/test/org/apache/hive/service/cli/thrift/ThriftCLIServiceTest.java @@ -290,7 +290,7 @@ public void testExecuteStatementAsync() throws Exception { OperationState.ERROR, state); // sqlState, errorCode should be set to appropriate values assertEquals(opStatus.getOperationException().getSQLState(), "08S01"); - assertEquals(opStatus.getOperationException().getErrorCode(), 1); + assertEquals(opStatus.getOperationException().getErrorCode(), 40000); // Cleanup queryString = "DROP TABLE TEST_EXEC_ASYNC_THRIFT";