Index: ant/src/org/apache/hadoop/hive/ant/GetVersionPref.java =================================================================== --- ant/src/org/apache/hadoop/hive/ant/GetVersionPref.java (revision 1132521) +++ ant/src/org/apache/hadoop/hive/ant/GetVersionPref.java (working copy) @@ -88,7 +88,7 @@ getProject().setProperty(property, m.matches() ? m.group(1) : ""); } catch (Exception e) { - throw new BuildException("Failed with: " + e.getMessage()); + throw new BuildException("Failed with: ", e); } } } Index: contrib/src/java/org/apache/hadoop/hive/contrib/serde2/TypedBytesSerDe.java =================================================================== --- contrib/src/java/org/apache/hadoop/hive/contrib/serde2/TypedBytesSerDe.java (revision 1132521) +++ contrib/src/java/org/apache/hadoop/hive/contrib/serde2/TypedBytesSerDe.java (working copy) @@ -123,10 +123,15 @@ // All columns have to be primitive. for (int c = 0; c < numColumns; c++) { if (columnTypes.get(c).getCategory() != Category.PRIMITIVE) { - throw new SerDeException(getClass().getName() - + " only accepts primitive columns, but column[" + c + "] named " - + columnNames.get(c) + " has category " - + columnTypes.get(c).getCategory()); + StringBuilder sb = new StringBuilder(); + sb.append(getClass().getName()); + sb.append(" only accepts primitive columns, but column["); + sb.append(c); + sb.append("] named "); + sb.append(columnNames.get(c)); + sb.append(" has category "); + sb.append(columnTypes.get(c).getCategory()); + throw new SerDeException(sb.toString()); } } @@ -284,7 +289,7 @@ serializeBytesWritable.set(barrStr.getData(), 0, barrStr.getLength()); } catch (IOException e) { - throw new SerDeException(e.getMessage()); + throw new SerDeException("Unable to serialize object ",e); } return serializeBytesWritable; } Index: jdbc/src/java/org/apache/hadoop/hive/jdbc/HiveConnection.java =================================================================== --- jdbc/src/java/org/apache/hadoop/hive/jdbc/HiveConnection.java (revision 1132521) +++ jdbc/src/java/org/apache/hadoop/hive/jdbc/HiveConnection.java (working copy) @@ -75,7 +75,7 @@ client = new HiveServer.HiveServerHandler(); } catch (MetaException e) { throw new SQLException("Error accessing Hive metastore: " - + e.getMessage(), "08S01"); + + e.getMessage(), "08S01",e); } } else { // parse uri Index: jdbc/src/java/org/apache/hadoop/hive/jdbc/HiveDataSource.java =================================================================== --- jdbc/src/java/org/apache/hadoop/hive/jdbc/HiveDataSource.java (revision 1132521) +++ jdbc/src/java/org/apache/hadoop/hive/jdbc/HiveDataSource.java (working copy) @@ -58,7 +58,7 @@ try { return new HiveConnection("", null); } catch (Exception ex) { - throw new SQLException(); + throw new SQLException("Error in getting HiveConnection",ex); } } Index: metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/InvalidObjectException.java =================================================================== --- metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/InvalidObjectException.java (revision 1132521) +++ metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/InvalidObjectException.java (working copy) @@ -121,6 +121,10 @@ } } + public InvalidObjectException(String message, Throwable throwable) { + super(message, throwable); + } + public InvalidObjectException deepCopy() { return new InvalidObjectException(this); } Index: metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/MetaException.java =================================================================== --- metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/MetaException.java (revision 1132521) +++ metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/MetaException.java (working copy) @@ -121,6 +121,10 @@ } } + public MetaException(String message, Throwable throwable) { + super(message, throwable); + } + public MetaException deepCopy() { return new MetaException(this); } Index: metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java =================================================================== --- metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java (revision 1132521) +++ metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java (working copy) @@ -486,7 +486,7 @@ try { return Class.forName(rawStoreClassName, true, classLoader); } catch (ClassNotFoundException e) { - throw new MetaException(rawStoreClassName + " class not found"); + throw new MetaException(rawStoreClassName + " class not found",e); } } @@ -1822,8 +1822,7 @@ Deserializer s = MetaStoreUtils.getDeserializer(hiveConf, tbl); return MetaStoreUtils.getFieldsFromDeserializer(tableName, s); } catch (SerDeException e) { - StringUtils.stringifyException(e); - throw new MetaException(e.getMessage()); + throw new MetaException("Error occurred while getting Deserializer : ",e); } } } finally { Index: metastore/src/java/org/apache/hadoop/hive/metastore/ObjectStore.java =================================================================== --- metastore/src/java/org/apache/hadoop/hive/metastore/ObjectStore.java (revision 1132521) +++ metastore/src/java/org/apache/hadoop/hive/metastore/ObjectStore.java (working copy) @@ -91,7 +91,6 @@ import org.apache.hadoop.hive.metastore.parser.FilterLexer; import org.apache.hadoop.hive.metastore.parser.FilterParser; import org.apache.hadoop.hive.metastore.parser.ExpressionTree.ANTLRNoCaseStringStream; -import org.apache.hadoop.util.StringUtils; /** * This class is the interface between the application logic and the database @@ -819,9 +818,9 @@ try { mdb = getMDatabase(tbl.getDbName()); } catch (NoSuchObjectException e) { - LOG.error(StringUtils.stringifyException(e)); - throw new InvalidObjectException("Database " + tbl.getDbName() - + " doesn't exist."); + String errorMessage = "Database " + tbl.getDbName() + " doesn't exsit."; + LOG.error(errorMessage, e); + throw new InvalidObjectException(errorMessage,e); } // If the table has property EXTERNAL set, update table type Index: ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java (revision 1132521) +++ ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java (working copy) @@ -162,6 +162,7 @@ private static String INTERMEDIATE_ORIGINAL_DIR_SUFFIX; private static String INTERMEDIATE_EXTRACTED_DIR_SUFFIX; + @Override public boolean requireLock() { return this.work != null && this.work.getNeedLock(); } @@ -1628,12 +1629,12 @@ ((FSDataOutputStream) outStream).close(); } catch (FileNotFoundException e) { LOG.info("show partitions: " + stringifyException(e)); - throw new HiveException(e.toString()); + throw new HiveException("Error in showPartitions :",e); } catch (IOException e) { LOG.info("show partitions: " + stringifyException(e)); - throw new HiveException(e.toString()); + throw new HiveException("Error in showPartitions :",e); } catch (Exception e) { - throw new HiveException(e.toString()); + throw new HiveException("Error in showPartitions :",e); } return 0; @@ -1832,7 +1833,7 @@ LOG.warn("show function: " + stringifyException(e)); return 1; } catch (Exception e) { - throw new HiveException(e.toString()); + throw new HiveException("Error in showFunctions : ",e); } return 0; } @@ -2080,7 +2081,7 @@ LOG.warn("describe function: " + stringifyException(e)); return 1; } catch (Exception e) { - throw new HiveException(e.toString()); + throw new HiveException("Error in describeFunction :",e); } return 0; } @@ -2382,7 +2383,7 @@ LOG.info("describe table: " + stringifyException(e)); return 1; } catch (Exception e) { - throw new HiveException(e); + throw new HiveException("Error in describeTable :" ,e); } return 0; Index: ql/src/java/org/apache/hadoop/hive/ql/metadata/Partition.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/metadata/Partition.java (revision 1132521) +++ ql/src/java/org/apache/hadoop/hive/ql/metadata/Partition.java (working copy) @@ -151,7 +151,7 @@ sd.read(prot); } catch (TException e) { LOG.error("Could not create a copy of StorageDescription"); - throw new HiveException("Could not create a copy of StorageDescription"); + throw new HiveException("Could not create a copy of StorageDescription",e); } tpart.setSd(sd); Index: ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java (revision 1132521) +++ ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java (working copy) @@ -27,9 +27,9 @@ import java.util.LinkedHashMap; import java.util.List; import java.util.Map; -import java.util.Map.Entry; import java.util.Set; import java.util.TreeSet; +import java.util.Map.Entry; import java.util.regex.Pattern; import java.util.regex.PatternSyntaxException; @@ -92,7 +92,6 @@ import org.apache.hadoop.hive.ql.optimizer.GenMRFileSink1; import org.apache.hadoop.hive.ql.optimizer.GenMROperator; import org.apache.hadoop.hive.ql.optimizer.GenMRProcContext; -import org.apache.hadoop.hive.ql.optimizer.GenMRProcContext.GenMapRedCtx; import org.apache.hadoop.hive.ql.optimizer.GenMRRedSink1; import org.apache.hadoop.hive.ql.optimizer.GenMRRedSink2; import org.apache.hadoop.hive.ql.optimizer.GenMRRedSink3; @@ -102,6 +101,7 @@ import org.apache.hadoop.hive.ql.optimizer.GenMapRedUtils; import org.apache.hadoop.hive.ql.optimizer.MapJoinFactory; import org.apache.hadoop.hive.ql.optimizer.Optimizer; +import org.apache.hadoop.hive.ql.optimizer.GenMRProcContext.GenMapRedCtx; import org.apache.hadoop.hive.ql.optimizer.physical.PhysicalContext; import org.apache.hadoop.hive.ql.optimizer.physical.PhysicalOptimizer; import org.apache.hadoop.hive.ql.optimizer.ppr.PartitionPruner; @@ -122,7 +122,6 @@ import org.apache.hadoop.hive.ql.plan.FetchWork; import org.apache.hadoop.hive.ql.plan.FileSinkDesc; import org.apache.hadoop.hive.ql.plan.FilterDesc; -import org.apache.hadoop.hive.ql.plan.FilterDesc.sampleDesc; import org.apache.hadoop.hive.ql.plan.ForwardDesc; import org.apache.hadoop.hive.ql.plan.GroupByDesc; import org.apache.hadoop.hive.ql.plan.HiveOperation; @@ -145,12 +144,13 @@ import org.apache.hadoop.hive.ql.plan.TableScanDesc; import org.apache.hadoop.hive.ql.plan.UDTFDesc; import org.apache.hadoop.hive.ql.plan.UnionDesc; +import org.apache.hadoop.hive.ql.plan.FilterDesc.sampleDesc; import org.apache.hadoop.hive.ql.session.SessionState; import org.apache.hadoop.hive.ql.session.SessionState.ResourceType; import org.apache.hadoop.hive.ql.udf.generic.GenericUDAFEvaluator; -import org.apache.hadoop.hive.ql.udf.generic.GenericUDAFEvaluator.Mode; import org.apache.hadoop.hive.ql.udf.generic.GenericUDFHash; import org.apache.hadoop.hive.ql.udf.generic.GenericUDTF; +import org.apache.hadoop.hive.ql.udf.generic.GenericUDAFEvaluator.Mode; import org.apache.hadoop.hive.serde.Constants; import org.apache.hadoop.hive.serde2.Deserializer; import org.apache.hadoop.hive.serde2.MetadataTypedColumnsetSerDe; @@ -158,9 +158,9 @@ import org.apache.hadoop.hive.serde2.SerDeUtils; import org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe; import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector; -import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector.Category; import org.apache.hadoop.hive.serde2.objectinspector.StructField; import org.apache.hadoop.hive.serde2.objectinspector.StructObjectInspector; +import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector.Category; import org.apache.hadoop.hive.serde2.typeinfo.TypeInfo; import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoFactory; import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoUtils; @@ -1475,7 +1475,7 @@ regex = Pattern.compile(colRegex, Pattern.CASE_INSENSITIVE); } catch (PatternSyntaxException e) { throw new SemanticException(ErrorMsg.INVALID_COLUMN.getMsg(sel, e - .getMessage())); + .getMessage()),e); } StringBuilder replacementText = new StringBuilder(); @@ -7844,7 +7844,7 @@ .getMsg()); } } catch (ClassNotFoundException e) { - throw new SemanticException(ErrorMsg.INVALID_OUTPUT_FORMAT_TYPE.getMsg()); + throw new SemanticException(ErrorMsg.INVALID_OUTPUT_FORMAT_TYPE.getMsg(),e); } } Index: serde/src/java/org/apache/hadoop/hive/serde2/dynamic_type/SimpleCharStream.java =================================================================== --- serde/src/java/org/apache/hadoop/hive/serde2/dynamic_type/SimpleCharStream.java (revision 1132521) +++ serde/src/java/org/apache/hadoop/hive/serde2/dynamic_type/SimpleCharStream.java (working copy) @@ -76,7 +76,7 @@ maxNextCharInd = (bufpos -= tokenBegin); } } catch (Throwable t) { - throw new Error(t.getMessage()); + throw new Error("Error in ExpandBuff",t); } bufsize += 2048; Index: serde/src/java/org/apache/hadoop/hive/serde2/thrift/TBinarySortableProtocol.java =================================================================== --- serde/src/java/org/apache/hadoop/hive/serde2/thrift/TBinarySortableProtocol.java (revision 1132521) +++ serde/src/java/org/apache/hadoop/hive/serde2/thrift/TBinarySortableProtocol.java (working copy) @@ -41,7 +41,7 @@ /** * An implementation of the Thrift Protocol for binary sortable records. - * + * * The data format: NULL: a single byte \0 NON-NULL Primitives: ALWAYS prepend a * single byte \1, and then: Boolean: FALSE = \1, TRUE = \2 Byte: flip the * sign-bit to make sure negative comes before positive Short: flip the sign-bit @@ -55,16 +55,16 @@ * as Int (see above), then one key by one value, and then the next pair and so * on. Binary: size stored as Int (see above), then the binary data in its * original form - * + * * Note that the relative order of list/map/binary will be based on the size * first (and elements one by one if the sizes are equal). - * + * * This protocol takes an additional parameter SERIALIZATION_SORT_ORDER which is * a string containing only "+" and "-". The length of the string should equal * to the number of fields in the top-level struct for serialization. "+" means * the field should be sorted ascendingly, and "-" means descendingly. The sub * fields in the same top-level field will have the same sort order. - * + * * This is not thrift compliant in that it doesn't write out field ids so things * cannot actually be versioned. */ @@ -340,7 +340,7 @@ try { dat = str.getBytes("UTF-8"); } catch (UnsupportedEncodingException uex) { - throw new TException("JVM DOES NOT SUPPORT UTF-8: " + uex.getMessage()); + throw new TException("JVM DOES NOT SUPPORT UTF-8: ",uex); } writeTextBytes(dat, 0, dat.length); } @@ -635,7 +635,7 @@ String r = new String(stringBytes, 0, i, "UTF-8"); return r; } catch (UnsupportedEncodingException uex) { - throw new TException("JVM DOES NOT SUPPORT UTF-8: " + uex.getMessage()); + throw new TException("JVM DOES NOT SUPPORT UTF-8: ",uex); } }