Index: ant/src/org/apache/hadoop/hive/ant/GetVersionPref.java =================================================================== --- ant/src/org/apache/hadoop/hive/ant/GetVersionPref.java (revision 1160102) +++ ant/src/org/apache/hadoop/hive/ant/GetVersionPref.java (working copy) @@ -88,7 +88,7 @@ getProject().setProperty(property, m.matches() ? m.group(1) : ""); } catch (Exception e) { - throw new BuildException("Failed with: " + e.getMessage()); + throw new BuildException("Failed with: ", e); } } } Index: jdbc/src/java/org/apache/hadoop/hive/jdbc/HiveConnection.java =================================================================== --- jdbc/src/java/org/apache/hadoop/hive/jdbc/HiveConnection.java (revision 1160102) +++ jdbc/src/java/org/apache/hadoop/hive/jdbc/HiveConnection.java (working copy) @@ -76,7 +76,7 @@ client = new HiveServer.HiveServerHandler(); } catch (MetaException e) { throw new SQLException("Error accessing Hive metastore: " - + e.getMessage(), "08S01"); + + e.getMessage(), "08S01",e); } } else { // parse uri Index: jdbc/src/java/org/apache/hadoop/hive/jdbc/HiveDataSource.java =================================================================== --- jdbc/src/java/org/apache/hadoop/hive/jdbc/HiveDataSource.java (revision 1160102) +++ jdbc/src/java/org/apache/hadoop/hive/jdbc/HiveDataSource.java (working copy) @@ -58,7 +58,7 @@ try { return new HiveConnection("", null); } catch (Exception ex) { - throw new SQLException(); + throw new SQLException("Error in getting HiveConnection",ex); } } Index: metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/InvalidObjectException.java =================================================================== --- metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/InvalidObjectException.java (revision 1160102) +++ metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/InvalidObjectException.java (working copy) @@ -121,6 +121,10 @@ } } + public InvalidObjectException(String message, Throwable throwable) { + super(message, throwable); + } + public InvalidObjectException deepCopy() { return new InvalidObjectException(this); } Index: metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/MetaException.java =================================================================== --- metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/MetaException.java (revision 1160102) +++ metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/MetaException.java (working copy) @@ -121,6 +121,10 @@ } } + public MetaException(String message, Throwable throwable) { + super(message, throwable); + } + public MetaException deepCopy() { return new MetaException(this); } Index: metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java =================================================================== --- metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java (revision 1160102) +++ metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java (working copy) @@ -512,7 +512,7 @@ try { return Class.forName(rawStoreClassName, true, classLoader); } catch (ClassNotFoundException e) { - throw new MetaException(rawStoreClassName + " class not found"); + throw new MetaException(rawStoreClassName + " class not found", e); } } @@ -2013,7 +2013,7 @@ return MetaStoreUtils.getFieldsFromDeserializer(tableName, s); } catch (SerDeException e) { StringUtils.stringifyException(e); - throw new MetaException(e.getMessage()); + throw new MetaException("Error occurred while getting Deserializer : ", e); } } } finally { Index: metastore/src/java/org/apache/hadoop/hive/metastore/ObjectStore.java =================================================================== --- metastore/src/java/org/apache/hadoop/hive/metastore/ObjectStore.java (revision 1160102) +++ metastore/src/java/org/apache/hadoop/hive/metastore/ObjectStore.java (working copy) @@ -878,7 +878,7 @@ } catch (NoSuchObjectException e) { LOG.error(StringUtils.stringifyException(e)); throw new InvalidObjectException("Database " + tbl.getDbName() - + " doesn't exist."); + + " doesn't exist.", e); } // If the table has property EXTERNAL set, update table type Index: ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java (revision 1160102) +++ ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java (working copy) @@ -1645,12 +1645,12 @@ outStream = null; } catch (FileNotFoundException e) { LOG.info("show partitions: " + stringifyException(e)); - throw new HiveException(e.toString()); + throw new HiveException("Error in showPartitions :", e); } catch (IOException e) { LOG.info("show partitions: " + stringifyException(e)); - throw new HiveException(e.toString()); + throw new HiveException("Error in showPartitions :", e); } catch (Exception e) { - throw new HiveException(e.toString()); + throw new HiveException("Error in showPartitions :", e); } finally { IOUtils.closeStream((FSDataOutputStream) outStream); } @@ -1865,7 +1865,7 @@ LOG.warn("show function: " + stringifyException(e)); return 1; } catch (Exception e) { - throw new HiveException(e.toString()); + throw new HiveException("Error in showFunctions : ", e); } finally { IOUtils.closeStream((FSDataOutputStream) outStream); } @@ -2121,7 +2121,7 @@ LOG.warn("describe function: " + stringifyException(e)); return 1; } catch (Exception e) { - throw new HiveException(e.toString()); + throw new HiveException("Error in describeFunction :", e); } finally { IOUtils.closeStream((FSDataOutputStream) outStream); } @@ -2439,7 +2439,7 @@ LOG.info("describe table: " + stringifyException(e)); return 1; } catch (Exception e) { - throw new HiveException(e); + throw new HiveException("Error in describeTable :", e); } finally { IOUtils.closeStream((FSDataOutputStream) outStream); } Index: ql/src/java/org/apache/hadoop/hive/ql/metadata/Partition.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/metadata/Partition.java (revision 1160102) +++ ql/src/java/org/apache/hadoop/hive/ql/metadata/Partition.java (working copy) @@ -151,7 +151,7 @@ sd.read(prot); } catch (TException e) { LOG.error("Could not create a copy of StorageDescription"); - throw new HiveException("Could not create a copy of StorageDescription"); + throw new HiveException("Could not create a copy of StorageDescription",e); } tpart.setSd(sd); Index: serde/src/java/org/apache/hadoop/hive/serde2/dynamic_type/SimpleCharStream.java =================================================================== --- serde/src/java/org/apache/hadoop/hive/serde2/dynamic_type/SimpleCharStream.java (revision 1160102) +++ serde/src/java/org/apache/hadoop/hive/serde2/dynamic_type/SimpleCharStream.java (working copy) @@ -76,7 +76,7 @@ maxNextCharInd = (bufpos -= tokenBegin); } } catch (Throwable t) { - throw new Error(t.getMessage()); + throw new Error("Error in ExpandBuff",t); } bufsize += 2048; Index: serde/src/java/org/apache/hadoop/hive/serde2/thrift/TBinarySortableProtocol.java =================================================================== --- serde/src/java/org/apache/hadoop/hive/serde2/thrift/TBinarySortableProtocol.java (revision 1160102) +++ serde/src/java/org/apache/hadoop/hive/serde2/thrift/TBinarySortableProtocol.java (working copy) @@ -41,7 +41,7 @@ /** * An implementation of the Thrift Protocol for binary sortable records. - * + * * The data format: NULL: a single byte \0 NON-NULL Primitives: ALWAYS prepend a * single byte \1, and then: Boolean: FALSE = \1, TRUE = \2 Byte: flip the * sign-bit to make sure negative comes before positive Short: flip the sign-bit @@ -55,16 +55,16 @@ * as Int (see above), then one key by one value, and then the next pair and so * on. Binary: size stored as Int (see above), then the binary data in its * original form - * + * * Note that the relative order of list/map/binary will be based on the size * first (and elements one by one if the sizes are equal). - * + * * This protocol takes an additional parameter SERIALIZATION_SORT_ORDER which is * a string containing only "+" and "-". The length of the string should equal * to the number of fields in the top-level struct for serialization. "+" means * the field should be sorted ascendingly, and "-" means descendingly. The sub * fields in the same top-level field will have the same sort order. - * + * * This is not thrift compliant in that it doesn't write out field ids so things * cannot actually be versioned. */ @@ -340,7 +340,7 @@ try { dat = str.getBytes("UTF-8"); } catch (UnsupportedEncodingException uex) { - throw new TException("JVM DOES NOT SUPPORT UTF-8: " + uex.getMessage()); + throw new TException("JVM DOES NOT SUPPORT UTF-8: ",uex); } writeTextBytes(dat, 0, dat.length); } @@ -635,7 +635,7 @@ String r = new String(stringBytes, 0, i, "UTF-8"); return r; } catch (UnsupportedEncodingException uex) { - throw new TException("JVM DOES NOT SUPPORT UTF-8: " + uex.getMessage()); + throw new TException("JVM DOES NOT SUPPORT UTF-8: ",uex); } }