Index: jdbc/src/java/org/apache/hadoop/hive/jdbc/HiveConnection.java =================================================================== --- jdbc/src/java/org/apache/hadoop/hive/jdbc/HiveConnection.java (revision 1197152) +++ jdbc/src/java/org/apache/hadoop/hive/jdbc/HiveConnection.java (working copy) @@ -76,7 +76,7 @@ client = new HiveServer.HiveServerHandler(); } catch (MetaException e) { throw new SQLException("Error accessing Hive metastore: " - + e.getMessage(), "08S01"); + + e.getMessage(), "08S01",e); } } else { // parse uri Index: jdbc/src/java/org/apache/hadoop/hive/jdbc/HiveDataSource.java =================================================================== --- jdbc/src/java/org/apache/hadoop/hive/jdbc/HiveDataSource.java (revision 1197152) +++ jdbc/src/java/org/apache/hadoop/hive/jdbc/HiveDataSource.java (working copy) @@ -58,7 +58,7 @@ try { return new HiveConnection("", null); } catch (Exception ex) { - throw new SQLException(); + throw new SQLException("Error in getting HiveConnection",ex); } } Index: ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java (revision 1197152) +++ ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java (working copy) @@ -34,14 +34,14 @@ import java.util.Collections; import java.util.Comparator; import java.util.Date; +import java.util.HashMap; import java.util.Iterator; import java.util.List; import java.util.Map; -import java.util.HashMap; +import java.util.Map.Entry; import java.util.Set; import java.util.SortedSet; import java.util.TreeSet; -import java.util.Map.Entry; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; @@ -97,6 +97,7 @@ import org.apache.hadoop.hive.ql.plan.AlterDatabaseDesc; import org.apache.hadoop.hive.ql.plan.AlterIndexDesc; import org.apache.hadoop.hive.ql.plan.AlterTableDesc; +import org.apache.hadoop.hive.ql.plan.AlterTableDesc.AlterTableTypes; import org.apache.hadoop.hive.ql.plan.AlterTableSimpleDesc; import org.apache.hadoop.hive.ql.plan.CreateDatabaseDesc; import org.apache.hadoop.hive.ql.plan.CreateIndexDesc; @@ -130,7 +131,6 @@ import org.apache.hadoop.hive.ql.plan.ShowTablesDesc; import org.apache.hadoop.hive.ql.plan.SwitchDatabaseDesc; import org.apache.hadoop.hive.ql.plan.UnlockTableDesc; -import org.apache.hadoop.hive.ql.plan.AlterTableDesc.AlterTableTypes; import org.apache.hadoop.hive.ql.plan.api.StageType; import org.apache.hadoop.hive.ql.security.authorization.Privilege; import org.apache.hadoop.hive.serde.Constants; @@ -836,7 +836,7 @@ Index idx = db.getIndex(dbName, baseTableName, indexName); switch(alterIndex.getOp()) { - case ADDPROPS: + case ADDPROPS: idx.getParameters().putAll(alterIndex.getProps()); break; case UPDATETIMESTAMP: @@ -1722,12 +1722,12 @@ outStream = null; } catch (FileNotFoundException e) { LOG.info("show partitions: " + stringifyException(e)); - throw new HiveException(e.toString()); + throw new HiveException(e); } catch (IOException e) { LOG.info("show partitions: " + stringifyException(e)); - throw new HiveException(e.toString()); + throw new HiveException(e); } catch (Exception e) { - throw new HiveException(e.toString()); + throw new HiveException(e); } finally { IOUtils.closeStream((FSDataOutputStream) outStream); } @@ -1942,7 +1942,7 @@ LOG.warn("show function: " + stringifyException(e)); return 1; } catch (Exception e) { - throw new HiveException(e.toString()); + throw new HiveException(e); } finally { IOUtils.closeStream((FSDataOutputStream) outStream); } @@ -2028,13 +2028,13 @@ ((FSDataOutputStream) outStream).close(); outStream = null; } catch (FileNotFoundException e) { - LOG.warn("show function: " + stringifyException(e)); + LOG.warn("show locks: " + stringifyException(e)); return 1; } catch (IOException e) { - LOG.warn("show function: " + stringifyException(e)); + LOG.warn("show locks: " + stringifyException(e)); return 1; } catch (Exception e) { - throw new HiveException(e.toString()); + throw new HiveException(e); } finally { IOUtils.closeStream((FSDataOutputStream) outStream); } @@ -2203,7 +2203,7 @@ LOG.warn("describe function: " + stringifyException(e)); return 1; } catch (Exception e) { - throw new HiveException(e.toString()); + throw new HiveException(e); } finally { IOUtils.closeStream((FSDataOutputStream) outStream); } Index: ql/src/java/org/apache/hadoop/hive/ql/metadata/Partition.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/metadata/Partition.java (revision 1197152) +++ ql/src/java/org/apache/hadoop/hive/ql/metadata/Partition.java (working copy) @@ -151,7 +151,7 @@ sd.read(prot); } catch (TException e) { LOG.error("Could not create a copy of StorageDescription"); - throw new HiveException("Could not create a copy of StorageDescription"); + throw new HiveException("Could not create a copy of StorageDescription",e); } tpart.setSd(sd); Index: serde/src/java/org/apache/hadoop/hive/serde2/dynamic_type/SimpleCharStream.java =================================================================== --- serde/src/java/org/apache/hadoop/hive/serde2/dynamic_type/SimpleCharStream.java (revision 1197152) +++ serde/src/java/org/apache/hadoop/hive/serde2/dynamic_type/SimpleCharStream.java (working copy) @@ -76,7 +76,7 @@ maxNextCharInd = (bufpos -= tokenBegin); } } catch (Throwable t) { - throw new Error(t.getMessage()); + throw new Error("Error in ExpandBuff",t); } bufsize += 2048; Index: serde/src/java/org/apache/hadoop/hive/serde2/thrift/TBinarySortableProtocol.java =================================================================== --- serde/src/java/org/apache/hadoop/hive/serde2/thrift/TBinarySortableProtocol.java (revision 1197152) +++ serde/src/java/org/apache/hadoop/hive/serde2/thrift/TBinarySortableProtocol.java (working copy) @@ -41,7 +41,7 @@ /** * An implementation of the Thrift Protocol for binary sortable records. - * + * * The data format: NULL: a single byte \0 NON-NULL Primitives: ALWAYS prepend a * single byte \1, and then: Boolean: FALSE = \1, TRUE = \2 Byte: flip the * sign-bit to make sure negative comes before positive Short: flip the sign-bit @@ -55,16 +55,16 @@ * as Int (see above), then one key by one value, and then the next pair and so * on. Binary: size stored as Int (see above), then the binary data in its * original form - * + * * Note that the relative order of list/map/binary will be based on the size * first (and elements one by one if the sizes are equal). - * + * * This protocol takes an additional parameter SERIALIZATION_SORT_ORDER which is * a string containing only "+" and "-". The length of the string should equal * to the number of fields in the top-level struct for serialization. "+" means * the field should be sorted ascendingly, and "-" means descendingly. The sub * fields in the same top-level field will have the same sort order. - * + * * This is not thrift compliant in that it doesn't write out field ids so things * cannot actually be versioned. */ @@ -340,7 +340,7 @@ try { dat = str.getBytes("UTF-8"); } catch (UnsupportedEncodingException uex) { - throw new TException("JVM DOES NOT SUPPORT UTF-8: " + uex.getMessage()); + throw new TException("JVM DOES NOT SUPPORT UTF-8: ",uex); } writeTextBytes(dat, 0, dat.length); } @@ -635,7 +635,7 @@ String r = new String(stringBytes, 0, i, "UTF-8"); return r; } catch (UnsupportedEncodingException uex) { - throw new TException("JVM DOES NOT SUPPORT UTF-8: " + uex.getMessage()); + throw new TException("JVM DOES NOT SUPPORT UTF-8: ",uex); } }