commit f6200fcb64d62af5aef2ad55a45544bfdf706403 Author: Janaki Lahorani Date: Fri Jun 9 11:16:26 2017 -0700 Fix for HIVE-16960 Error from DFS results in ugly stack at the client. The fix returns hive errors for AccessControlException, *QuotaExceededException, UnresolvedPathException, FileNotFoundException and Invalid File Formats. First line from the DFS error message is included in the error returned to the client. The DFS error message also includes the stack from DFS, but that is not returned to the client, but written to the log file. SQL State values are defined with these error message and propagated to client. Internal error 29999 is introduced to handle any error that would require complete stack thrown to the client. diff --git itests/hive-unit/src/test/java/org/apache/hive/service/TestDFSErrorHandling.java itests/hive-unit/src/test/java/org/apache/hive/service/TestDFSErrorHandling.java new file mode 100644 index 0000000000000000000000000000000000000000..c58767fc75be5d2d2a8813ef64700585c7e8c2b4 --- /dev/null +++ itests/hive-unit/src/test/java/org/apache/hive/service/TestDFSErrorHandling.java @@ -0,0 +1,152 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hive.service; + +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; + +import org.apache.hadoop.fs.FileStatus; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.permission.FsAction; +import org.apache.hadoop.fs.permission.FsPermission; +import org.apache.hadoop.hive.conf.HiveConf; +import org.apache.hadoop.hive.conf.HiveConf.ConfVars; +import org.apache.hadoop.hive.ql.ErrorMsg; +import org.apache.hadoop.hive.shims.HadoopShims.MiniDFSShim; +import org.apache.hive.jdbc.miniHS2.MiniHS2; +import org.apache.hive.service.cli.HiveSQLException; +import org.junit.AfterClass; +import org.junit.Assert; +import org.junit.BeforeClass; +import org.junit.Test; + +import java.sql.Connection; +import java.sql.DriverManager; +import java.sql.SQLException; +import java.sql.Statement; +import java.util.HashMap; + +/** + * If the operation fails because of a DFS error, it used to result in an ugly stack at the client. + * HIVE-16960 fixes that issue. This test case checks one DFS error related to sticky bit. When + * the sticky bit is set, a user error indicating access denied will the thrown. + * + * Setup: HIVE_SERVER2_ENABLE_DOAS set to true: HS2 performs the operation as connected user. + * Connect to HS2 as "hive". + * Create a file and set the sticky bit on the directory. This will not allow the file to move + * out of the directory. + * Perform "LOAD" operation. This operation will attempt to move the file, resulting in an error + * from DFS. The DFS error will translate to an Hive Error with number 20009, that corresponds to + * "ACCESS DENIED". The test checks that 20009 is thrown. + * + * Additional tests can be added to cover Quota related exceptions. + */ +public class TestDFSErrorHandling +{ + + private static MiniHS2 miniHS2 = null; + private static HiveConf hiveConf = null; + + @BeforeClass + public static void startServices() throws Exception { + hiveConf = new HiveConf(); + hiveConf.setIntVar(ConfVars.HIVE_SERVER2_THRIFT_MIN_WORKER_THREADS, 1); + hiveConf.setIntVar(ConfVars.HIVE_SERVER2_THRIFT_MAX_WORKER_THREADS, 1); + hiveConf.setBoolVar(ConfVars.METASTORE_EXECUTE_SET_UGI, true); + hiveConf.setBoolVar(ConfVars.HIVE_SUPPORT_CONCURRENCY, false); + + // Setting hive.server2.enable.doAs to True ensures that HS2 performs the query operation as + // the connected user instead of the user running HS2. + hiveConf.setBoolVar(ConfVars.HIVE_SERVER2_ENABLE_DOAS, true); + + miniHS2 = new MiniHS2.Builder() + .withMiniMR() + .withRemoteMetastore() + .withConf(hiveConf).build(); + + miniHS2.start(new HashMap()); + } + + @AfterClass + public static void stopServices() throws Exception { + if (miniHS2 != null && miniHS2.isStarted()) { + miniHS2.stop(); + } + } + + @Test + public void testAccessDenied() throws Exception { + assertTrue("Test setup failed. MiniHS2 is not initialized", + miniHS2 != null && miniHS2.isStarted()); + + Class.forName(MiniHS2.getJdbcDriverName()); + Path scratchDir = new Path(HiveConf.getVar(hiveConf, HiveConf.ConfVars.SCRATCHDIR)); + + MiniDFSShim dfs = miniHS2.getDfs(); + FileSystem fs = dfs.getFileSystem(); + + Path stickyBitDir = new Path(scratchDir, "stickyBitDir"); + + fs.mkdirs(stickyBitDir); + + String dataFileDir = hiveConf.get("test.data.files").replace('\\', '/') + .replace("c:", "").replace("C:", "").replace("D:", "").replace("d:", ""); + Path dataFilePath = new Path(dataFileDir, "kv1.txt"); + + fs.copyFromLocalFile(dataFilePath, stickyBitDir); + + FsPermission fsPermission = new FsPermission(FsAction.ALL, FsAction.ALL, FsAction.ALL, true); + + // Sets the sticky bit on stickyBitDir - now removing file kv1.txt from stickyBitDir by + // unprivileged user will result in a DFS error. + fs.setPermission(stickyBitDir, fsPermission); + + FileStatus[] files = fs.listStatus(stickyBitDir); + + // Connecting to HS2 as foo. + Connection hs2Conn = DriverManager.getConnection(miniHS2.getJdbcURL(), "foo", "bar"); + Statement stmt = hs2Conn.createStatement(); + + String tableName = "stickyBitTable"; + + stmt.execute("drop table if exists " + tableName); + stmt.execute("create table " + tableName + " (foo int, bar string)"); + + try { + // This statement will attempt to move kv1.txt out of stickyBitDir as user foo. HS2 is + // expected to return 20009. + stmt.execute("LOAD DATA INPATH '" + stickyBitDir.toUri().getPath() + "/kv1.txt' " + + "OVERWRITE INTO TABLE " + tableName); + } catch (Exception e) { + if (e instanceof SQLException) { + SQLException se = (SQLException) e; + Assert.assertEquals("Unexpected error code", 20009, se.getErrorCode()); + System.out.println(String.format("Error Message: %s", se.getMessage())); + } else + throw e; + } + + stmt.execute("drop table if exists " + tableName); + + stmt.close(); + hs2Conn.close(); + } +} diff --git ql/src/java/org/apache/hadoop/hive/ql/Driver.java ql/src/java/org/apache/hadoop/hive/ql/Driver.java index 9aced9fc02c816bc5e529cbd9124ce84a3454e04..1618e5fbf8445fe30441316ba2361ba999f7c572 100644 --- ql/src/java/org/apache/hadoop/hive/ql/Driver.java +++ ql/src/java/org/apache/hadoop/hive/ql/Driver.java @@ -1877,6 +1877,17 @@ public int execute(boolean deferClose) throws CommandNeedRetryException { invokeFailureHooks(perfLogger, hookContext, errorMessage + Strings.nullToEmpty(tsk.getDiagnosticsMessage()), result.getTaskError()); SQLState = "08S01"; + + // 08S01 (Communication error) is the default sql state. Override the sqlstate + // based on the ErrorMsg set in HiveException. + if (result.getTaskError() instanceof HiveException) { + ErrorMsg errorMsg = ((HiveException) result.getTaskError()). + getCanonicalErrorMsg(); + if (errorMsg != ErrorMsg.GENERIC_ERROR) { + SQLState = errorMsg.getSQLState(); + } + } + console.printError(errorMessage); driverCxt.shutdown(); // in case we decided to run everything in local mode, restore the diff --git ql/src/java/org/apache/hadoop/hive/ql/ErrorMsg.java ql/src/java/org/apache/hadoop/hive/ql/ErrorMsg.java index 6651900e79a5c3d4ad8329afbe3894544ce9f46e..76451ec7a3ef303ad5a1e43e2e91b56d467cfac9 100644 --- ql/src/java/org/apache/hadoop/hive/ql/ErrorMsg.java +++ ql/src/java/org/apache/hadoop/hive/ql/ErrorMsg.java @@ -18,6 +18,7 @@ package org.apache.hadoop.hive.ql; +import java.io.FileNotFoundException; import java.text.MessageFormat; import java.util.HashMap; import java.util.Map; @@ -25,9 +26,13 @@ import java.util.regex.Pattern; import org.antlr.runtime.tree.Tree; +import org.apache.hadoop.hdfs.protocol.DSQuotaExceededException; +import org.apache.hadoop.hdfs.protocol.NSQuotaExceededException; +import org.apache.hadoop.hdfs.protocol.UnresolvedPathException; import org.apache.hadoop.hive.ql.parse.ASTNode; import org.apache.hadoop.hive.ql.parse.ASTNodeOrigin; import org.apache.hadoop.hive.ql.plan.AlterTableDesc.AlterTableTypes; +import org.apache.hadoop.security.AccessControlException; /** * List of all error messages. @@ -495,6 +500,15 @@ */ OP_NOT_ALLOWED_IN_TXN(20007, "Operation {0} is not allowed in a transaction ({1},queryId={2}).", true), OP_NOT_ALLOWED_WITHOUT_TXN(20008, "Operation {0} is not allowed without an active transaction", true), + ACCESS_DENIED(20009, "Access denied: {0}", "42000", true), + QUOTA_EXCEEDED(20010, "Quota exceeded: {0}", "64000", true), + UNRESOLVED_PATH(20011, "Unresolved path: {0}", "64000", true), + FILE_NOT_FOUND(20012, "File not found: {0}", "64000", true), + WRONG_FILE_FORMAT(20013, "Wrong file format. Please check the file's format.", "64000", true), + + // An exception from runtime that will show the full stack to client + UNRESOLVED_RT_EXCEPTION(29999, "Runtime Error: {0}", "58004", true), + //========================== 30000 range starts here ========================// STATSPUBLISHER_NOT_OBTAINED(30000, "StatsPublisher cannot be obtained. " + "There was a error to retrieve the StatsPublisher, and retrying " + @@ -580,6 +594,20 @@ } /** + * Given a remote runtime exception, returns the ErrorMsg object associated with it. + * @param e An exception + * @return ErrorMsg + */ + public static ErrorMsg getErrorMsg(Exception e) { + if (e instanceof AccessControlException) return ACCESS_DENIED; + if (e instanceof NSQuotaExceededException) return QUOTA_EXCEEDED; + if (e instanceof DSQuotaExceededException) return QUOTA_EXCEEDED; + if (e instanceof UnresolvedPathException) return UNRESOLVED_PATH; + if (e instanceof FileNotFoundException) return FILE_NOT_FOUND; + return UNRESOLVED_RT_EXCEPTION; + } + + /** * Given an error message string, returns the ErrorMsg object associated with it. * @param mesg An error message string * @return ErrorMsg diff --git ql/src/java/org/apache/hadoop/hive/ql/exec/MoveTask.java ql/src/java/org/apache/hadoop/hive/ql/exec/MoveTask.java index f329b5111b214cba8fd800eb4fcdb2d2dcc9b8d6..5e4196d6e21a29e2a1329bc6ee2f7201b1cacc42 100644 --- ql/src/java/org/apache/hadoop/hive/ql/exec/MoveTask.java +++ ql/src/java/org/apache/hadoop/hive/ql/exec/MoveTask.java @@ -40,6 +40,7 @@ import org.apache.hadoop.hive.metastore.api.Order; import org.apache.hadoop.hive.ql.Context; import org.apache.hadoop.hive.ql.DriverContext; +import org.apache.hadoop.hive.ql.ErrorMsg; import org.apache.hadoop.hive.ql.exec.mr.MapRedTask; import org.apache.hadoop.hive.ql.exec.mr.MapredLocalTask; import org.apache.hadoop.hive.ql.hooks.LineageInfo.DataContainer; @@ -341,8 +342,7 @@ public int execute(DriverContext driverContext) { } } if (!flag) { - throw new HiveException( - "Wrong file format. Please check the file's format."); + throw new HiveException(ErrorMsg.WRONG_FILE_FORMAT); } } else { LOG.warn("Skipping file format check as dpCtx is not null"); @@ -540,6 +540,23 @@ public int execute(DriverContext driverContext) { } return 0; + } catch (HiveException he) { + int errorCode = 1; + + if (he.getCanonicalErrorMsg() != ErrorMsg.GENERIC_ERROR) { + errorCode = he.getCanonicalErrorMsg().getErrorCode(); + if (he.getCanonicalErrorMsg() == ErrorMsg.UNRESOLVED_RT_EXCEPTION) { + console.printError("Failed with exception " + he.getMessage(), "\n" + + StringUtils.stringifyException(he)); + } else { + console.printInfo("Failed with exception " + he.getMessage() + "\n" + + he.getRemoteErrorMsg(),"\n" + StringUtils.stringifyException(he), + false); + } + } + + setException(he); + return errorCode; } catch (Exception e) { console.printError("Failed with exception " + e.getMessage(), "\n" + StringUtils.stringifyException(e)); diff --git ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java index ceb8079a7fc50944abbd8b2aee2233c4347eb743..0044c7a1720bedf557f8ebc139a0fc73b9683679 100644 --- ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java +++ ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java @@ -62,6 +62,7 @@ import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.PathFilter; +import org.apache.hadoop.hive.common.BlobStorageUtils; import org.apache.hadoop.hive.common.FileUtils; import org.apache.hadoop.hive.common.HiveStatsUtils; import org.apache.hadoop.hive.common.ObjectPair; @@ -147,6 +148,7 @@ import org.apache.hadoop.hive.shims.HadoopShims; import org.apache.hadoop.hive.shims.ShimLoader; import org.apache.hadoop.mapred.InputFormat; +import org.apache.hadoop.security.AccessControlException; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.util.StringUtils; import org.apache.thrift.TException; @@ -158,6 +160,7 @@ import com.google.common.collect.Maps; import com.google.common.collect.Sets; import com.google.common.util.concurrent.ThreadFactoryBuilder; +import com.google.common.base.Splitter; /** * This class has functions that implement meta data/DDL operations using calls @@ -2978,6 +2981,9 @@ private static void copyFiles(final HiveConf conf, final FileSystem destFs, final boolean needToCopy = needToCopy(srcP, destf, srcFs, destFs); final boolean isRenameAllowed = !needToCopy && !isSrcLocal; + + final String msg = "Unable to move source " + srcP + " to destination " + destf; + // If we do a rename for a non-local file, we will be transfering the original // file permissions from source to the destination. Else, in case of mvFile() where we // copy from source to destination, we will inherit the destination's parent group ownership. @@ -2988,22 +2994,26 @@ private static void copyFiles(final HiveConf conf, final FileSystem destFs, if (null != newFiles) { newFiles.add(destPath); } - } catch (IOException ioe) { - LOG.error("Failed to move: {}", ioe.getMessage()); - throw new HiveException(ioe.getCause()); + } catch (Exception e) { + throw getHiveException(e, msg, "Failed to move: {}"); } } else { futures.add(pool.submit(new Callable>() { @Override - public ObjectPair call() throws Exception { + public ObjectPair call() throws HiveException { SessionState.setCurrentSessionState(parentSession); - Path destPath = mvFile(conf, srcFs, srcP, destFs, destf, isSrcLocal, isRenameAllowed); + try { + Path destPath = + mvFile(conf, srcFs, srcP, destFs, destf, isSrcLocal, isRenameAllowed); - if (null != newFiles) { - newFiles.add(destPath); + if (null != newFiles) { + newFiles.add(destPath); + } + return ObjectPair.create(srcP, destPath); + } catch (Exception e) { + throw getHiveException(e, msg); } - return ObjectPair.create(srcP, destPath); } })); } @@ -3016,9 +3026,7 @@ private static void copyFiles(final HiveConf conf, final FileSystem destFs, ObjectPair pair = future.get(); LOG.debug("Moved src: {}", pair.getFirst().toString(), ", to dest: {}", pair.getSecond().toString()); } catch (Exception e) { - LOG.error("Failed to move: {}", e.getMessage()); - pool.shutdownNow(); - throw new HiveException(e.getCause()); + throw handlePoolException(pool, e); } } } @@ -3200,6 +3208,8 @@ public static boolean moveFile(final HiveConf conf, Path srcf, final Path destf, // (2) It is assumed that subdir and dir are in same encryption zone. // (3) Move individual files from scr dir to dest dir. boolean destIsSubDir = isSubDir(srcf, destf, srcFs, destFs, isSrcLocal); + final String msg = "Unable to move source " + srcf + " to destination " + destf; + try { if (replace) { try{ @@ -3242,6 +3252,10 @@ public static boolean moveFile(final HiveConf conf, Path srcf, final Path destf, for (final FileStatus srcStatus : srcs) { final Path destFile = new Path(destf, srcStatus.getPath().getName()); + + final String poolMsg = + "Unable to move source " + srcStatus.getPath() + " to destination " + destFile; + if (null == pool) { if(!destFs.rename(srcStatus.getPath(), destFile)) { throw new IOException("rename for src path: " + srcStatus.getPath() + " to dest:" @@ -3250,12 +3264,17 @@ public static boolean moveFile(final HiveConf conf, Path srcf, final Path destf, } else { futures.add(pool.submit(new Callable() { @Override - public Void call() throws Exception { + public Void call() throws HiveException { SessionState.setCurrentSessionState(parentSession); final String group = srcStatus.getGroup(); - if(!destFs.rename(srcStatus.getPath(), destFile)) { - throw new IOException("rename for src path: " + srcStatus.getPath() + " to dest path:" - + destFile + " returned false"); + try { + if (!destFs.rename(srcStatus.getPath(), destFile)) { + throw new IOException( + "rename for src path: " + srcStatus.getPath() + " to dest path:" + + destFile + " returned false"); + } + } catch (Exception e) { + throw getHiveException(e, poolMsg); } return null; } @@ -3268,9 +3287,7 @@ public Void call() throws Exception { try { future.get(); } catch (Exception e) { - LOG.debug(e.getMessage()); - pool.shutdownNow(); - throw new HiveException(e.getCause()); + throw handlePoolException(pool, e); } } } @@ -3283,9 +3300,57 @@ public Void call() throws Exception { } } } - } catch (IOException ioe) { - throw new HiveException("Unable to move source " + srcf + " to destination " + destf, ioe); + } catch (Exception e) { + throw getHiveException(e, msg); + } + } + + static private HiveException getHiveException(Exception e, String msg) { + return getHiveException(e, msg, null); + } + + static private HiveException handlePoolException(ExecutorService pool, Exception e) { + HiveException he = null; + + if (e instanceof HiveException) { + he = (HiveException) e; + if (he.getCanonicalErrorMsg() != ErrorMsg.GENERIC_ERROR) { + if (he.getCanonicalErrorMsg() == ErrorMsg.UNRESOLVED_RT_EXCEPTION) { + LOG.error(String.format("Failed to move: {}", he.getMessage())); + } else { + LOG.info(String.format("Failed to move: {}", he.getRemoteErrorMsg())); + } + } + } else { + LOG.error(String.format("Failed to move: {}", e.getMessage())); + he = new HiveException(e.getCause()); } + pool.shutdownNow(); + return he; + } + + static private HiveException getHiveException(Exception e, String msg, String logMsg) { + // The message from remote exception includes the entire stack. The error thrown from + // hive based on the remote exception needs only the first line. + String remErrMsg = null; + + if (e.getMessage() != null) { + remErrMsg = String.format("%s%s%s", msg, ": ", + Splitter.on(System.getProperty("line.separator")).split(e.getMessage()).iterator() + .next()); + } else { + remErrMsg = msg; + } + + ErrorMsg errorMsg = ErrorMsg.getErrorMsg(e); + + if (logMsg != null) + LOG.info(String.format(logMsg, e.getMessage())); + + if (errorMsg != ErrorMsg.UNRESOLVED_RT_EXCEPTION) + return new HiveException(e, e.getMessage(), errorMsg, remErrMsg); + else + return new HiveException(msg, e); } /** @@ -3436,10 +3501,16 @@ private static void moveAcidDeltaFiles(String deltaFileType, PathFilter pathFilt for (FileStatus bucketStat : bucketStats) { Path bucketSrc = bucketStat.getPath(); Path bucketDest = new Path(deltaDest, bucketSrc.getName()); + final String msg = "Unable to move source " + bucketSrc + " to destination " + + bucketDest; LOG.info("Moving bucket " + bucketSrc.toUri().toString() + " to " + bucketDest.toUri().toString()); - fs.rename(bucketSrc, bucketDest); - if (newFiles != null) newFiles.add(bucketDest); + try { + fs.rename(bucketSrc, bucketDest); + if (newFiles != null) newFiles.add(bucketDest); + } catch (Exception e) { + throw getHiveException(e, msg); + } } } catch (IOException e) { throw new HiveException("Error moving acid files " + e.getMessage(), e); diff --git ql/src/java/org/apache/hadoop/hive/ql/metadata/HiveException.java ql/src/java/org/apache/hadoop/hive/ql/metadata/HiveException.java index d0177059ed5b73ea028109ec40b0eb9ed142069b..a23d8c008a7f2fed2bd0a36189cc01fdcf8d7734 100644 --- ql/src/java/org/apache/hadoop/hive/ql/metadata/HiveException.java +++ ql/src/java/org/apache/hadoop/hive/ql/metadata/HiveException.java @@ -29,6 +29,12 @@ * Standard predefined message with error code and possibly SQL State, etc. */ private ErrorMsg canonicalErrorMsg = ErrorMsg.GENERIC_ERROR; + + /** + * Error Messages returned from remote exception (eg. hadoop error) + */ + private String remoteErrorMsg; + public HiveException() { super(); } @@ -46,27 +52,40 @@ public HiveException(String message, Throwable cause) { } public HiveException(ErrorMsg message, String... msgArgs) { - this(null, message, msgArgs); + this(null, null, message, msgArgs); + } + + public HiveException(Throwable cause, ErrorMsg errorMsg, String... msgArgs) { + this(cause, null, errorMsg, msgArgs); + } + + public HiveException(Throwable cause, ErrorMsg errorMsg) { + this(cause, null, errorMsg, new String[0]); + } + + public HiveException(ErrorMsg errorMsg) { + this(null, null, errorMsg, new String[0]); } /** * This is the recommended constructor to use since it helps use - * canonical messages throughout. + * canonical messages throughout and propagate remote errors. + * * @param errorMsg Canonical error message * @param msgArgs message arguments if message is parametrized; must be {@code null} is message takes no arguments */ - public HiveException(Throwable cause, ErrorMsg errorMsg, String... msgArgs) { + public HiveException(Throwable cause, String remErrMsg, ErrorMsg errorMsg, String... msgArgs) { super(errorMsg.format(msgArgs), cause); canonicalErrorMsg = errorMsg; - - } - public HiveException(Throwable cause, ErrorMsg errorMsg) { - this(cause, errorMsg, new String[0]); + remoteErrorMsg = remErrMsg; } + /** * @return {@link ErrorMsg#GENERIC_ERROR} by default */ public ErrorMsg getCanonicalErrorMsg() { return canonicalErrorMsg; } + + public String getRemoteErrorMsg() { return remoteErrorMsg; } } diff --git ql/src/test/results/clientnegative/dyn_part_max.q.out ql/src/test/results/clientnegative/dyn_part_max.q.out index 736bfac8edfc0111c634b7bb6403b396dae7578b..7de4998c2c05d84055ad3a3e3d3e811aa021513c 100644 --- ql/src/test/results/clientnegative/dyn_part_max.q.out +++ ql/src/test/results/clientnegative/dyn_part_max.q.out @@ -19,5 +19,4 @@ LIMIT 50 PREHOOK: type: QUERY PREHOOK: Input: default@src PREHOOK: Output: default@max_parts -Failed with exception Number of dynamic partitions created is 49, which is more than 10. To solve this try to set hive.exec.max.dynamic.partitions to at least 49. FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.exec.MoveTask. Number of dynamic partitions created is 49, which is more than 10. To solve this try to set hive.exec.max.dynamic.partitions to at least 49. diff --git service/src/java/org/apache/hive/service/cli/thrift/ThriftCLIService.java service/src/java/org/apache/hive/service/cli/thrift/ThriftCLIService.java index 2b252d2d806112b55405822af6a0641000e3e756..9880fc608210115698c7f0107cfed06797a35119 100644 --- service/src/java/org/apache/hive/service/cli/thrift/ThriftCLIService.java +++ service/src/java/org/apache/hive/service/cli/thrift/ThriftCLIService.java @@ -683,8 +683,10 @@ public TGetOperationStatusResp GetOperationStatus(TGetOperationStatusReq req) th if (opException != null) { resp.setSqlState(opException.getSQLState()); resp.setErrorCode(opException.getErrorCode()); - resp.setErrorMessage(org.apache.hadoop.util.StringUtils. - stringifyException(opException)); + if (opException.getErrorCode() == 29999) + resp.setErrorMessage(org.apache.hadoop.util.StringUtils.stringifyException(opException)); + else + resp.setErrorMessage(opException.getMessage()); } else if (executionStatus == TJobExecutionStatus.NOT_AVAILABLE && OperationType.EXECUTE_STATEMENT.equals(operationHandle.getOperationType())) { resp.getProgressUpdateResponse().setProgressedPercentage(