Index: ant/src/org/apache/hadoop/hive/ant/GetVersionPref.java =================================================================== --- ant/src/org/apache/hadoop/hive/ant/GetVersionPref.java (revision 1125883) +++ ant/src/org/apache/hadoop/hive/ant/GetVersionPref.java (working copy) @@ -88,7 +88,7 @@ getProject().setProperty(property, m.matches() ? m.group(1) : ""); } catch (Exception e) { - throw new BuildException("Failed with: " + e.getMessage()); + throw new BuildException("Failed with: ", e); } } } Index: contrib/src/java/org/apache/hadoop/hive/contrib/serde2/TypedBytesSerDe.java =================================================================== --- contrib/src/java/org/apache/hadoop/hive/contrib/serde2/TypedBytesSerDe.java (revision 1125883) +++ contrib/src/java/org/apache/hadoop/hive/contrib/serde2/TypedBytesSerDe.java (working copy) @@ -122,10 +122,15 @@ // All columns have to be primitive. for (int c = 0; c < numColumns; c++) { if (columnTypes.get(c).getCategory() != Category.PRIMITIVE) { - throw new SerDeException(getClass().getName() - + " only accepts primitive columns, but column[" + c + "] named " - + columnNames.get(c) + " has category " - + columnTypes.get(c).getCategory()); + StringBuilder sb = new StringBuilder(); + sb.append(getClass().getName()); + sb.append(" only accepts primitive columns, but column["); + sb.append(c); + sb.append("] named "); + sb.append(columnNames.get(c)); + sb.append(" has category "); + sb.append(columnTypes.get(c).getCategory()); + throw new SerDeException(sb.toString()); } } @@ -283,7 +288,7 @@ serializeBytesWritable.set(barrStr.getData(), 0, barrStr.getLength()); } catch (IOException e) { - throw new SerDeException(e.getMessage()); + throw new SerDeException("Unable to serialize object ",e); } return serializeBytesWritable; } Index: contrib/src/java/org/apache/hadoop/hive/contrib/util/typedbytes/TypedBytesWritableInput.java =================================================================== --- contrib/src/java/org/apache/hadoop/hive/contrib/util/typedbytes/TypedBytesWritableInput.java (revision 1125883) +++ contrib/src/java/org/apache/hadoop/hive/contrib/util/typedbytes/TypedBytesWritableInput.java (working copy) @@ -349,9 +349,11 @@ mw = new MapWritable(); } int length = in.readMapHeader(); + Writable key = null; + Writable value = null; for (int i = 0; i < length; i++) { - Writable key = read(); - Writable value = read(); + key = read(); + value = read(); mw.put(key, value); } return mw; @@ -366,10 +368,12 @@ if (mw == null) { mw = new SortedMapWritable(); } + WritableComparable key = null; + Writable value = null; int length = in.readMapHeader(); for (int i = 0; i < length; i++) { - WritableComparable key = (WritableComparable) read(); - Writable value = read(); + key = (WritableComparable) read(); + value = read(); mw.put(key, value); } return mw; Index: hwi/src/java/org/apache/hadoop/hive/hwi/HWISessionItem.java =================================================================== --- hwi/src/java/org/apache/hadoop/hive/hwi/HWISessionItem.java (revision 1125883) +++ hwi/src/java/org/apache/hadoop/hive/hwi/HWISessionItem.java (working copy) @@ -134,10 +134,10 @@ l4j.debug("HWISessionItem itemInit start " + getSessionName()); OptionsProcessor oproc = new OptionsProcessor(); - if (System.getProperty("hwi-args") != null) { - String[] parts = System.getProperty("hwi-args").split("\\s+"); - if (!oproc.process_stage1(parts)) { - } + String hwi_prop=System.getProperty("hwi-args"); + if (hwi_prop != null) { + String[] parts = hwi_prop.split("\\s+"); + oproc.process_stage1(parts); } SessionState.initHiveLog4j(); Index: jdbc/src/java/org/apache/hadoop/hive/jdbc/HiveConnection.java =================================================================== --- jdbc/src/java/org/apache/hadoop/hive/jdbc/HiveConnection.java (revision 1125883) +++ jdbc/src/java/org/apache/hadoop/hive/jdbc/HiveConnection.java (working copy) @@ -74,8 +74,7 @@ try { client = new HiveServer.HiveServerHandler(); } catch (MetaException e) { - throw new SQLException("Error accessing Hive metastore: " - + e.getMessage(), "08S01"); + throw new SQLException("Error accessing Hive metastore: ", "08S01", e); } } else { // parse uri Index: jdbc/src/java/org/apache/hadoop/hive/jdbc/HiveDataSource.java =================================================================== --- jdbc/src/java/org/apache/hadoop/hive/jdbc/HiveDataSource.java (revision 1125883) +++ jdbc/src/java/org/apache/hadoop/hive/jdbc/HiveDataSource.java (working copy) @@ -58,7 +58,7 @@ try { return new HiveConnection("", null); } catch (Exception ex) { - throw new SQLException(); + throw new SQLException("Error in getting HiveConnection", ex); } } Index: metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/FieldSchema.java =================================================================== --- metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/FieldSchema.java (revision 1125883) +++ metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/FieldSchema.java (working copy) @@ -331,7 +331,11 @@ @Override public int hashCode() { - return 0; + int hashValue = 0; + hashValue = hashValue + (this.isSetName() ? this.getName().hashCode() : 0); + hashValue = hashValue + (this.isSetType() ? this.getType().hashCode() : 0); + hashValue = hashValue + (this.isSetComment() ? this.getComment().hashCode() : 0); + return hashValue; } public int compareTo(FieldSchema other) { Index: metastore/src/java/org/apache/hadoop/hive/metastore/HiveAlterHandler.java =================================================================== --- metastore/src/java/org/apache/hadoop/hive/metastore/HiveAlterHandler.java (revision 1125883) +++ metastore/src/java/org/apache/hadoop/hive/metastore/HiveAlterHandler.java (working copy) @@ -83,8 +83,13 @@ if (!newt.getTableName().equalsIgnoreCase(name) || !newt.getDbName().equalsIgnoreCase(dbname)) { if (msdb.getTable(newt.getDbName(), newt.getTableName()) != null) { - throw new InvalidOperationException("new table " + newt.getDbName() - + "." + newt.getTableName() + " already exists"); + StringBuilder sb = new StringBuilder(); + sb.append("New table:"); + sb.append(newt.getDbName()); + sb.append("."); + sb.append(newt.getTableName()); + sb.append(" already exists"); + throw new InvalidObjectException(sb.toString()); } rename = true; } @@ -92,8 +97,13 @@ // get old table oldt = msdb.getTable(dbname, name); if (oldt == null) { - throw new InvalidOperationException("table " + newt.getDbName() + "." - + newt.getTableName() + " doesn't exist"); + StringBuilder sb1 = new StringBuilder(); + sb1.append("Table:"); + sb1.append(newt.getDbName()); + sb1.append("."); + sb1.append(newt.getTableName()); + sb1.append(" does not exists"); + throw new InvalidObjectException(sb1.toString()); } // check that partition keys have not changed @@ -126,24 +136,43 @@ destFs = wh.getFs(destPath); // check that src and dest are on the same file system if (srcFs != destFs) { - throw new InvalidOperationException("table new location " + destPath - + " is on a different file system than the old location " - + srcPath + ". This operation is not supported"); + StringBuilder sb2 = new StringBuilder(); + sb2.append("New Location of Table:"); + sb2.append(newt.getDbName()); + sb2.append("."); + sb2.append(newt.getTableName()); + sb2.append(" is on a different file system than the old location "); + sb2.append("\nDestination File system Location:"); + sb2.append(destPath); + sb2.append("\nSource File system Location:"); + sb2.append(srcPath); + sb2.append(". This operation is not supported"); + throw new InvalidOperationException(sb2.toString()); } try { srcFs.exists(srcPath); // check that src exists and also checks // permissions necessary if (destFs.exists(destPath)) { - throw new InvalidOperationException("New location for this table " - + newt.getDbName() + "." + newt.getTableName() - + " already exists : " + destPath); + StringBuilder sb3 = new StringBuilder(); + sb3.append("New location for the Table:"); + sb3.append(newt.getDbName()); + sb3.append("."); + sb3.append( newt.getTableName()); + sb3.append(" already exists in the location: "); + sb3.append(destPath); + throw new InvalidOperationException(sb3.toString()); } } catch (IOException e) { Warehouse.closeFs(srcFs); Warehouse.closeFs(destFs); - throw new InvalidOperationException("Unable to access new location " - + destPath + " for table " + newt.getDbName() + "." - + newt.getTableName()); + StringBuilder sb4 = new StringBuilder(); + sb4.append("Unable to access new location:"); + sb4.append(destPath); + sb4.append(" for table:"); + sb4.append(newt.getDbName()); + sb4.append("."); + sb4.append(newt.getTableName()); + throw new InvalidOperationException(sb4.toString()+" "+org.apache.hadoop.util.StringUtils.stringifyException(e)); } // also the location field in partition List parts = msdb.getPartitions(dbname, name, 0); @@ -199,8 +228,14 @@ msdb.rollbackTransaction(); } } - throw new InvalidOperationException("Unable to access old location " - + srcPath + " for table " + dbname + "." + name); + StringBuilder sb5 = new StringBuilder(); + sb5.append("Unable to access old location:"); + sb5.append(srcPath); + sb5.append(" for table:"); + sb5.append(dbname); + sb5.append("."); + sb5.append(newt.getTableName()); + throw new InvalidOperationException(sb5.toString()+" "+org.apache.hadoop.util.StringUtils.stringifyException(e)); } } } Index: ql/src/java/org/apache/hadoop/hive/ql/exec/MapOperator.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/exec/MapOperator.java (revision 1125883) +++ ql/src/java/org/apache/hadoop/hive/ql/exec/MapOperator.java (working copy) @@ -22,13 +22,13 @@ import java.util.ArrayList; import java.util.Arrays; import java.util.HashMap; +import java.util.HashSet; import java.util.LinkedHashMap; import java.util.List; import java.util.Map; +import java.util.Properties; import java.util.Set; -import java.util.HashSet; import java.util.Map.Entry; -import java.util.Properties; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.Path; @@ -36,10 +36,10 @@ import org.apache.hadoop.hive.ql.io.IOContext; import org.apache.hadoop.hive.ql.metadata.HiveException; import org.apache.hadoop.hive.ql.metadata.VirtualColumn; -import org.apache.hadoop.hive.ql.plan.api.OperatorType; import org.apache.hadoop.hive.ql.plan.MapredWork; import org.apache.hadoop.hive.ql.plan.PartitionDesc; import org.apache.hadoop.hive.ql.plan.TableScanDesc; +import org.apache.hadoop.hive.ql.plan.api.OperatorType; import org.apache.hadoop.hive.serde2.Deserializer; import org.apache.hadoop.hive.serde2.SerDeException; import org.apache.hadoop.hive.serde2.SerDeUtils; @@ -81,7 +81,7 @@ private transient boolean isPartitioned; private transient boolean hasVC; private Map opCtxMap; - private Set listInputPaths = new HashSet(); + private final Set listInputPaths = new HashSet(); private Map, java.util.ArrayList> operatorToPaths; @@ -250,8 +250,9 @@ Object[] partValues = new Object[partKeys.length]; List partObjectInspectors = new ArrayList( partKeys.length); + String key = null; for (int i = 0; i < partKeys.length; i++) { - String key = partKeys[i]; + key = partKeys[i]; partNames.add(key); // Partitions do not exist for this table if (partSpec == null) { @@ -460,6 +461,7 @@ // Change the serializer etc. since it is a new file, and split can span // multiple files/partitions. + @Override public void cleanUpInputFileChangedOp() throws HiveException { Path fpath = new Path((new Path(this.getExecContext().getCurrentInputFile())) .toUri().getPath()); Index: ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/MapJoinObjectValue.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/MapJoinObjectValue.java (revision 1125883) +++ ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/MapJoinObjectValue.java (working copy) @@ -91,8 +91,9 @@ if (sz > 0) { int numCols = in.readInt(); if (numCols > 0) { + Writable val = null; for (int pos = 0; pos < sz; pos++) { - Writable val = ctx.getSerDe().getSerializedClass().newInstance(); + val = ctx.getSerDe().getSerializedClass().newInstance(); val.readFields(in); ArrayList memObj = (ArrayList) ObjectInspectorUtils Index: ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/RowContainer.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/RowContainer.java (revision 1125883) +++ ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/RowContainer.java (working copy) @@ -135,7 +135,7 @@ this.standardOI = null; this.jc = jc; } - + private JobConf getLocalFSJobConfClone(Configuration jc) { if (this.jobCloneUsingLocalFs == null) { this.jobCloneUsingLocalFs = new JobConf(jc); @@ -340,7 +340,9 @@ this.numFlushedBlocks++; } catch (Exception e) { clear(); - LOG.error(e.toString(), e); + if (LOG.isDebugEnabled()) { + LOG.debug("Error in appending the row block ", e); + } throw new HiveException(e); } } @@ -440,8 +442,8 @@ rr.close(); } } catch (Exception e) { - LOG.error(e.toString()); - throw new HiveException(e); + LOG.error("Error in closing/deleting of file "+tmpFile.getAbsolutePath(), e); + throw new HiveException("Error in closing/deleting of file ",e); } finally { rw = null; rr = null; Index: ql/src/java/org/apache/hadoop/hive/ql/exec/Throttle.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/exec/Throttle.java (revision 1125883) +++ ql/src/java/org/apache/hadoop/hive/ql/exec/Throttle.java (working copy) @@ -98,8 +98,10 @@ // The JobTracker has exceeded its threshold and is doing a GC. // The client has to wait and retry. - LOG.warn("Job is being throttled because of resource crunch on the " - + "JobTracker. Will retry in " + retry + " seconds.."); + StringBuilder sb1 = new StringBuilder("Job is being throttled because of resource crunch on the JobTracker. Will retry in "); + sb1.append(retry); + sb1.append(" seconds.."); + LOG.warn(sb1.toString()); Thread.sleep(retry * 1000L); } } catch (Exception e) { Index: ql/src/java/org/apache/hadoop/hive/ql/io/CodecPool.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/io/CodecPool.java (revision 1125883) +++ ql/src/java/org/apache/hadoop/hive/ql/io/CodecPool.java (working copy) @@ -103,7 +103,9 @@ compressor = codec.createCompressor(); LOG.info("Got brand-new compressor"); } else { - LOG.debug("Got recycled compressor"); + if (LOG.isDebugEnabled()) { + LOG.debug("Got recycled compressor"); + } } return compressor; } @@ -125,7 +127,9 @@ decompressor = codec.createDecompressor(); LOG.info("Got brand-new decompressor"); } else { - LOG.debug("Got recycled decompressor"); + if (LOG.isDebugEnabled()) { + LOG.debug("Got recycled decompressor"); + } } return decompressor; } Index: ql/src/java/org/apache/hadoop/hive/ql/metadata/Partition.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/metadata/Partition.java (revision 1125883) +++ ql/src/java/org/apache/hadoop/hive/ql/metadata/Partition.java (working copy) @@ -151,7 +151,7 @@ sd.read(prot); } catch (TException e) { LOG.error("Could not create a copy of StorageDescription"); - throw new HiveException("Could not create a copy of StorageDescription"); + throw new HiveException("Could not create a copy of StorageDescription",e); } tpart.setSd(sd); Index: ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMapRedUtils.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMapRedUtils.java (revision 1125883) +++ ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMapRedUtils.java (working copy) @@ -691,7 +691,13 @@ } String path = p.toString(); if (LOG.isDebugEnabled()) { - LOG.debug("Adding " + path + " of table" + alias_id); + + StringBuilder sb=new StringBuilder(); + sb.append("Adding "); + sb.append(path); + sb.append(" of table"); + sb.append(alias_id); + LOG.debug(sb.toString ()); } partDir.add(p); Index: ql/src/java/org/apache/hadoop/hive/ql/optimizer/lineage/ExprProcFactory.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/optimizer/lineage/ExprProcFactory.java (revision 1125883) +++ ql/src/java/org/apache/hadoop/hive/ql/optimizer/lineage/ExprProcFactory.java (working copy) @@ -182,15 +182,11 @@ // create a walker which walks the tree in a DFS manner while maintaining // the operator stack. The dispatcher // generates the plan from the operator tree + String PERCENTAGE_SYMBOL="%"; Map exprRules = new LinkedHashMap(); - exprRules.put( - new RuleRegExp("R1", ExprNodeColumnDesc.class.getName() + "%"), - getColumnProcessor()); - exprRules.put( - new RuleRegExp("R2", ExprNodeFieldDesc.class.getName() + "%"), - getFieldProcessor()); - exprRules.put(new RuleRegExp("R3", ExprNodeGenericFuncDesc.class.getName() - + "%"), getGenericFuncProcessor()); + exprRules.put(new RuleRegExp("R1", ExprNodeColumnDesc.class.getName() + PERCENTAGE_SYMBOL), getColumnProcessor()); + exprRules.put(new RuleRegExp("R2", ExprNodeFieldDesc.class.getName() + PERCENTAGE_SYMBOL), getFieldProcessor()); + exprRules.put(new RuleRegExp("R5", ExprNodeGenericFuncDesc.class.getName() + PERCENTAGE_SYMBOL), getGenericFuncProcessor()); // The dispatcher fires the processor corresponding to the closest matching // rule and passes the context along Index: ql/src/java/org/apache/hadoop/hive/ql/optimizer/ppr/ExprProcFactory.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/optimizer/ppr/ExprProcFactory.java (revision 1125883) +++ ql/src/java/org/apache/hadoop/hive/ql/optimizer/ppr/ExprProcFactory.java (working copy) @@ -64,6 +64,9 @@ public Object process(Node nd, Stack stack, NodeProcessorCtx procCtx, Object... nodeOutputs) throws SemanticException { + if(null==procCtx){ + throw new SemanticException("NodeProcessorCtx instance should be Non-Null value"); + } ExprNodeDesc newcd = null; ExprNodeColumnDesc cd = (ExprNodeColumnDesc) nd; ExprProcCtx epc = (ExprProcCtx) procCtx; @@ -211,7 +214,7 @@ /** * Generates the partition pruner for the expression tree. - * + * * @param tabAlias * The table alias of the partition table that is being considered * for pruning Index: ql/src/java/org/apache/hadoop/hive/ql/optimizer/ppr/OpProcFactory.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/optimizer/ppr/OpProcFactory.java (revision 1125883) +++ ql/src/java/org/apache/hadoop/hive/ql/optimizer/ppr/OpProcFactory.java (working copy) @@ -49,6 +49,10 @@ @Override public Object process(Node nd, Stack stack, NodeProcessorCtx procCtx, Object... nodeOutputs) throws SemanticException { + + if(null==procCtx){ + throw new SemanticException("NodeProcessorCtx instance should be Non-Null value"); + } OpWalkerCtx owc = (OpWalkerCtx) procCtx; FilterOperator fop = (FilterOperator) nd; FilterOperator fop2 = null; Index: ql/src/java/org/apache/hadoop/hive/ql/optimizer/ppr/PartitionPruner.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/optimizer/ppr/PartitionPruner.java (revision 1125883) +++ ql/src/java/org/apache/hadoop/hive/ql/optimizer/ppr/PartitionPruner.java (working copy) @@ -71,9 +71,7 @@ public class PartitionPruner implements Transform { // The log - private static final Log LOG = LogFactory - .getLog("hive.ql.optimizer.ppr.PartitionPruner"); - + private static final Log LOG = LogFactory.getLog(PartitionPruner.class); /* * (non-Javadoc) * Index: ql/src/java/org/apache/hadoop/hive/ql/optimizer/SamplePruner.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/optimizer/SamplePruner.java (revision 1125883) +++ ql/src/java/org/apache/hadoop/hive/ql/optimizer/SamplePruner.java (working copy) @@ -184,9 +184,14 @@ // check if input pruning is possible if (sampleDescr.getInputPruning()) { - LOG.trace("numerator = " + num); - LOG.trace("denominator = " + den); - LOG.trace("bucket count = " + bucketCount); + StringBuilder sb=new StringBuilder(); + sb.append ("numerator = "); + sb.append (num); + sb.append ("\ndenominator = " ); + sb.append (den); + sb.append ("\nbucket count = "); + sb.append (bucketCount); + LOG.trace(sb.toString ()); if (bucketCount == den) { Path[] ret = new Path[1]; ret[0] = part.getBucketPath(num - 1); Index: ql/src/java/org/apache/hadoop/hive/ql/optimizer/unionproc/UnionProcFactory.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/optimizer/unionproc/UnionProcFactory.java (revision 1125883) +++ ql/src/java/org/apache/hadoop/hive/ql/optimizer/unionproc/UnionProcFactory.java (working copy) @@ -59,6 +59,9 @@ @Override public Object process(Node nd, Stack stack, NodeProcessorCtx procCtx, Object... nodeOutputs) throws SemanticException { + if(null == procCtx){ + throw new SemanticException("NodeProcessorCtx instance should be Non-Null value"); + } UnionOperator union = (UnionOperator) nd; UnionProcContext ctx = (UnionProcContext) procCtx; Index: ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java (revision 1125883) +++ ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java (working copy) @@ -121,7 +121,8 @@ * */ public class DDLSemanticAnalyzer extends BaseSemanticAnalyzer { - private static final Log LOG = LogFactory.getLog("hive.ql.parse.DDLSemanticAnalyzer"); + private static final Log LOG = LogFactory + .getLog(DDLSemanticAnalyzer.class); private static final Map TokenToTypeName = new HashMap(); private final Set reservedPartitionValues; Index: ql/src/java/org/apache/hadoop/hive/ql/parse/FunctionSemanticAnalyzer.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/parse/FunctionSemanticAnalyzer.java (revision 1125883) +++ ql/src/java/org/apache/hadoop/hive/ql/parse/FunctionSemanticAnalyzer.java (working copy) @@ -35,8 +35,8 @@ * */ public class FunctionSemanticAnalyzer extends BaseSemanticAnalyzer { - private static final Log LOG = LogFactory - .getLog("hive.ql.parse.FunctionSemanticAnalyzer"); + private static final Log LOG = + LogFactory.getLog(FunctionSemanticAnalyzer.class); public FunctionSemanticAnalyzer(HiveConf conf) throws SemanticException { super(conf); Index: ql/src/java/org/apache/hadoop/hive/ql/parse/InputSignature.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/parse/InputSignature.java (revision 1125883) +++ ql/src/java/org/apache/hadoop/hive/ql/parse/InputSignature.java (working copy) @@ -87,6 +87,9 @@ try { other = (InputSignature) obj; } catch (ClassCastException cce) { + if (LOG.isDebugEnabled()) { + LOG.debug("Problem in casting to InputSignature class", cce); + } return false; } Index: ql/src/java/org/apache/hadoop/hive/ql/parse/LoadSemanticAnalyzer.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/parse/LoadSemanticAnalyzer.java (revision 1125883) +++ ql/src/java/org/apache/hadoop/hive/ql/parse/LoadSemanticAnalyzer.java (working copy) @@ -106,7 +106,13 @@ fromAuthority = defaultURI.getAuthority(); } - LOG.debug(fromScheme + "@" + fromAuthority + "@" + path); + StringBuilder sb=new StringBuilder(); + sb.append(fromScheme); + sb.append("@"); + sb.append(fromAuthority); + sb.append("@"); + sb.append(path); + LOG.debug(sb.toString ()); return new URI(fromScheme, fromAuthority, path, null, null); } @@ -152,11 +158,15 @@ if (!isLocal && (!StringUtils.equals(fromURI.getScheme(), toURI.getScheme()) || !StringUtils .equals(fromURI.getAuthority(), toURI.getAuthority()))) { - String reason = "Move from: " + fromURI.toString() + " to: " - + toURI.toString() + " is not valid. " - + "Please check that values for params \"default.fs.name\" and " - + "\"hive.metastore.warehouse.dir\" do not conflict."; - throw new SemanticException(ErrorMsg.ILLEGAL_PATH.getMsg(ast, reason)); + StringBuilder reason=new StringBuilder(); + reason.append ("Move from: "); + reason.append (fromURI.toString()); + reason.append (" to: "); + reason.append ( toURI.toString()); + reason.append (" is not valid. "); + reason.append ("Please check that values for params \"default.fs.name\" and "); + reason.append ("\"hive.metastore.warehouse.dir\" do not conflict."); + throw new SemanticException(ErrorMsg.ILLEGAL_PATH.getMsg(ast, reason.toString ())) ; } } @@ -235,7 +245,7 @@ } // create final load/move work - + String loadTmpPath = ctx.getExternalTmpFileURI(toURI); Map partSpec = ts.getPartSpec(); if (partSpec == null) { Index: ql/src/java/org/apache/hadoop/hive/ql/parse/QB.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/parse/QB.java (revision 1125883) +++ ql/src/java/org/apache/hadoop/hive/ql/parse/QB.java (working copy) @@ -34,7 +34,7 @@ public class QB { - private static final Log LOG = LogFactory.getLog("hive.ql.parse.QB"); + private static final Log LOG = LogFactory.getLog(QB.class); private final int numJoins = 0; private final int numGbys = 0; Index: ql/src/java/org/apache/hadoop/hive/ql/parse/QBExpr.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/parse/QBExpr.java (revision 1125883) +++ ql/src/java/org/apache/hadoop/hive/ql/parse/QBExpr.java (working copy) @@ -28,7 +28,7 @@ public class QBExpr { - private static final Log LOG = LogFactory.getLog("hive.ql.parse.QBExpr"); + private static final Log LOG = LogFactory.getLog(QBExpr.class); /** * Opcode. Index: ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java (revision 1125883) +++ ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java (working copy) @@ -1474,8 +1474,7 @@ try { regex = Pattern.compile(colRegex, Pattern.CASE_INSENSITIVE); } catch (PatternSyntaxException e) { - throw new SemanticException(ErrorMsg.INVALID_COLUMN.getMsg(sel, e - .getMessage())); + throw new SemanticException ( ErrorMsg.INVALID_COLUMN.getMsg ( sel, e.getMessage () ),e ); } StringBuilder replacementText = new StringBuilder(); @@ -7833,7 +7832,7 @@ .getMsg()); } } catch (ClassNotFoundException e) { - throw new SemanticException(ErrorMsg.INVALID_OUTPUT_FORMAT_TYPE.getMsg()); + throw new SemanticException ( ErrorMsg.INVALID_OUTPUT_FORMAT_TYPE.getMsg (),e); } } Index: ql/src/java/org/apache/hadoop/hive/ql/plan/PlanUtils.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/plan/PlanUtils.java (revision 1125883) +++ ql/src/java/org/apache/hadoop/hive/ql/plan/PlanUtils.java (working copy) @@ -401,7 +401,8 @@ List cols, List outputColumnNames, int start, String fieldPrefix) { List schemas = new ArrayList(cols.size()); - for (int i = 0; i < cols.size(); i++) { + int cols_size=cols.size (); + for (int i=0; i getFieldSchemasFromColumnList( List cols, String fieldPrefix) { List schemas = new ArrayList(cols.size()); - for (int i = 0; i < cols.size(); i++) { + int cols_size=cols.size (); + for (int i=0; iCONCAT_WS(sep,str1,str2,str3,...). This mimics the function from * MySQL http://dev.mysql.com/doc/refman/5.0/en/string-functions.html# * function_concat-ws - * + * * @see org.apache.hadoop.hive.ql.udf.generic.GenericUDF */ @Description(name = "concat_ws", value = "_FUNC_(separator, str1, str2, ...) - " @@ -55,9 +55,15 @@ for (int i = 0; i < arguments.length; i++) { if (arguments[i].getTypeName() != Constants.STRING_TYPE_NAME && arguments[i].getTypeName() != Constants.VOID_TYPE_NAME) { - throw new UDFArgumentTypeException(i, "Argument " + (i + 1) - + " of function CONCAT_WS must be \"" + Constants.STRING_TYPE_NAME - + "\", but \"" + arguments[i].getTypeName() + "\" was found."); + StringBuilder sb=new StringBuilder(); + sb.append ( "Argument " ); + sb.append ((i +1 )); + sb.append (" of function CONCAT_WS must be \""); + sb.append (Constants.STRING_TYPE_NAME); + sb.append ( "\", but \"" ); + sb.append (arguments[i].getTypeName() ); + sb.append ("\" was found."); + throw new UDFArgumentTypeException(i,sb.toString ()); } } Index: ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFElt.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFElt.java (revision 1125883) +++ ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFElt.java (working copy) @@ -33,7 +33,7 @@ * Generic UDF for string function ELT(N,str1,str2,str3,...). This * mimics the function from MySQL * http://dev.mysql.com/doc/refman/5.1/en/string-functions.html#function_elt - * + * * @see org.apache.hadoop.hive.ql.udf.generic.GenericUDF */ @Description(name = "elt", @@ -53,11 +53,15 @@ for (int i = 0; i < arguments.length; i++) { Category category = arguments[i].getCategory(); if (category != Category.PRIMITIVE) { - throw new UDFArgumentTypeException(i, "The " - + GenericUDFUtils.getOrdinal(i + 1) - + " argument of function ELT is expected to a " - + Category.PRIMITIVE.toString().toLowerCase() + " type, but " - + category.toString().toLowerCase() + " is found"); + StringBuilder sb=new StringBuilder(); + sb.append("The "); + sb.append(GenericUDFUtils.getOrdinal(i + 1)); + sb.append(" argument of function ELT is expected to a "); + sb.append(Category.PRIMITIVE.toString().toLowerCase()); + sb.append(" type, but "); + sb.append (category.toString().toLowerCase() ); + sb.append (" is found"); + throw new UDFArgumentTypeException(i,sb.toString ()); } } Index: ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFIf.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFIf.java (revision 1125883) +++ ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFIf.java (working copy) @@ -54,18 +54,22 @@ || poi.getPrimitiveCategory() == PrimitiveObjectInspector.PrimitiveCategory.VOID); } if (!conditionTypeIsOk) { - throw new UDFArgumentTypeException(0, - "The first argument of function IF should be \"" - + Constants.BOOLEAN_TYPE_NAME + "\", but \"" - + arguments[0].getTypeName() + "\" is found"); + StringBuilder sb= new StringBuilder("The first argument of function IF should be \""); + sb.append(Constants.BOOLEAN_TYPE_NAME); + sb.append("\", but \""); + sb.append(arguments[0].getTypeName()); + sb.append("\" is found"); + throw new UDFArgumentTypeException(0,sb.toString()); } if (!(returnOIResolver.update(arguments[1]) && returnOIResolver .update(arguments[2]))) { - throw new UDFArgumentTypeException(2, - "The second and the third arguments of function IF should have the same type, " - + "but they are different: \"" + arguments[1].getTypeName() - + "\" and \"" + arguments[2].getTypeName() + "\""); + StringBuilder sb1= new StringBuilder("The second and the third arguments of function IF should have the same type, but they are different: \"" ); + sb1.append(arguments[1].getTypeName()); + sb1.append("\" and \""); + sb1.append(arguments[2].getTypeName()); + sb1.append("\""); + throw new UDFArgumentTypeException(2,sb1.toString()); } return returnOIResolver.get(); Index: ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFIndex.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFIndex.java (revision 1125883) +++ ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFIndex.java (working copy) @@ -18,6 +18,8 @@ package org.apache.hadoop.hive.ql.udf.generic; +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; import org.apache.hadoop.hive.ql.exec.Description; import org.apache.hadoop.hive.ql.exec.UDFArgumentException; import org.apache.hadoop.hive.ql.exec.UDFArgumentLengthException; @@ -41,7 +43,7 @@ private ListObjectInspector listOI; private PrimitiveObjectInspector indexOI; private ObjectInspector returnOI; - + private static Log LOG = LogFactory.getLog(GenericUDFIndex.class); @Override public ObjectInspector initialize(ObjectInspector[] arguments) throws UDFArgumentException { if (arguments.length != 2) { @@ -58,19 +60,26 @@ listOI = (ListObjectInspector) arguments[0]; mapOI = null; } else { - throw new UDFArgumentTypeException(0, "\"" - + Category.MAP.toString().toLowerCase() + "\" or \"" - + Category.LIST.toString().toLowerCase() - + "\" is expected at function INDEX, but \"" - + arguments[0].getTypeName() + "\" is found"); + StringBuilder sb=new StringBuilder(); + sb.append ( "\"" ); + sb.append ( Category.MAP.toString().toLowerCase()); + sb.append ("\" or \""); + sb.append ( Category.LIST.toString().toLowerCase() ); + sb.append ( "\" is expected at function INDEX, but \"" ); + sb.append ( arguments[0].getTypeName()); + sb.append ( "\" is found"); + throw new UDFArgumentTypeException(0,sb.toString ()); } // index has to be a primitive if (arguments[1] instanceof PrimitiveObjectInspector) { indexOI = (PrimitiveObjectInspector) arguments[1]; } else { - throw new UDFArgumentTypeException(1, "Primitive Type is expected but " - + arguments[1].getTypeName() + "\" is found"); + StringBuilder sb=new StringBuilder(); + sb.append ( "Primitive Type is expected but " ); + sb.append (arguments[1].getTypeName()); + sb.append ("\" is found"); + throw new UDFArgumentTypeException(1,sb.toString ()); } if (mapOI != null) { @@ -108,9 +117,15 @@ try { intIndex = PrimitiveObjectInspectorUtils.getInt(index, indexOI); } catch (NullPointerException e) { + if (LOG.isDebugEnabled()) { + LOG.debug("Index is null", e); + } // If index is null, we should return null. return null; } catch (NumberFormatException e) { + if (LOG.isDebugEnabled()) { + LOG.debug("Index is not a number", e); + } // If index is not a number, we should return null. return null; } Index: ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFInstr.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFInstr.java (revision 1125883) +++ ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFInstr.java (working copy) @@ -34,7 +34,7 @@ * Generic UDF for string function INSTR(str,substr). This mimcs * the function from MySQL * http://dev.mysql.com/doc/refman/5.1/en/string-functions.html#function_instr - * + * *
  * usage:
  * INSTR(str, substr)
@@ -59,11 +59,14 @@
     for (int i = 0; i < arguments.length; i++) {
       Category category = arguments[i].getCategory();
       if (category != Category.PRIMITIVE) {
-        throw new UDFArgumentTypeException(i, "The "
-            + GenericUDFUtils.getOrdinal(i + 1)
-            + " argument of function INSTR is expected to a "
-            + Category.PRIMITIVE.toString().toLowerCase() + " type, but "
-            + category.toString().toLowerCase() + " is found");
+        StringBuilder sb=new StringBuilder("The ");
+        sb.append(GenericUDFUtils.getOrdinal(i + 1));
+        sb.append(" argument of function INSTR is expected to a ");
+        sb.append(Category.PRIMITIVE.toString().toLowerCase());
+        sb.append(" type, but ");
+        sb.append(category.toString().toLowerCase());
+        sb.append(" is found");
+          throw new UDFArgumentTypeException(i,sb.toString());
       }
     }
 
Index: ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFMap.java
===================================================================
--- ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFMap.java	(revision 1125883)
+++ ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFMap.java	(working copy)
@@ -20,6 +20,8 @@
 
 import java.util.HashMap;
 
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.hive.ql.exec.Description;
 import org.apache.hadoop.hive.ql.exec.UDFArgumentException;
 import org.apache.hadoop.hive.ql.exec.UDFArgumentLengthException;
@@ -41,11 +43,14 @@
 public class GenericUDFMap extends GenericUDF {
   Converter[] converters;
   HashMap ret = new HashMap();
-
+  private static Log LOG = LogFactory.getLog(GenericUDFMap.class);
   @Override
   public ObjectInspector initialize(ObjectInspector[] arguments) throws UDFArgumentException {
 
     if (arguments.length % 2 != 0) {
+      if (LOG.isDebugEnabled()) {
+        LOG.debug("Arguments are not in key/value pairs");
+      }
       throw new UDFArgumentLengthException(
           "Arguments must be in key/value pairs");
     }
@@ -59,25 +64,33 @@
       if (i % 2 == 0) {
         // Keys
         if (!(arguments[i] instanceof PrimitiveObjectInspector)) {
-          throw new UDFArgumentTypeException(1,
-              "Primitive Type is expected but " + arguments[i].getTypeName()
-              + "\" is found");
+          StringBuilder sb=new StringBuilder();
+          sb.append ("Primitive Type is expected but ");
+          sb.append (arguments[i].getTypeName());
+          sb.append ("\" is found");
+          throw new UDFArgumentTypeException(1,sb.toString ());
         }
         if (!keyOIResolver.update(arguments[i])) {
-          throw new UDFArgumentTypeException(i, "Key type \""
-              + arguments[i].getTypeName()
-              + "\" is different from preceding key types. "
-              + "Previous key type was \"" + arguments[i - 2].getTypeName()
-              + "\"");
+          StringBuilder sb=new StringBuilder();
+          sb.append ("Key type \"");
+          sb.append (arguments[i].getTypeName());
+          sb.append ("\" is different from preceding key types. ");
+          sb.append ("Previous key type was \"");
+          sb.append (arguments[i-2].getTypeName());
+          sb.append ("\"");
+          throw new UDFArgumentTypeException(i, sb.toString ());
         }
       } else {
         // Values
         if (!valueOIResolver.update(arguments[i])) {
-          throw new UDFArgumentTypeException(i, "Value type \""
-              + arguments[i].getTypeName()
-              + "\" is different from preceding value types. "
-              + "Previous value type was \"" + arguments[i - 2].getTypeName()
-              + "\"");
+          StringBuilder sb=new StringBuilder();
+          sb.append ( "Value type \"" );
+          sb.append (arguments[i].getTypeName());
+          sb.append ( "\" is different from preceding value types. Previous value type was \"");
+          sb.append ("Previous key type was \"");
+          sb.append (arguments[i-2].getTypeName());
+          sb.append ("\"");
+          throw new UDFArgumentTypeException(i, sb.toString ());
         }
       }
     }
Index: ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFWhen.java
===================================================================
--- ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFWhen.java	(revision 1125883)
+++ ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFWhen.java	(working copy)
@@ -26,7 +26,7 @@
 
 /**
  * GenericUDF Class for SQL construct "CASE a WHEN b THEN c [ELSE f] END".
- * 
+ *
  * NOTES: 1. a and b should have the same TypeInfo, or an exception will be
  * thrown. 2. c and f should have the same TypeInfo, or an exception will be
  * thrown.
@@ -43,26 +43,35 @@
 
     for (int i = 0; i + 1 < arguments.length; i += 2) {
       if (!arguments[i].getTypeName().equals(Constants.BOOLEAN_TYPE_NAME)) {
-        throw new UDFArgumentTypeException(i, "\""
-            + Constants.BOOLEAN_TYPE_NAME + "\" is expected after WHEN, "
-            + "but \"" + arguments[i].getTypeName() + "\" is found");
+        StringBuilder sb=new StringBuilder();
+        sb.append ("\"");
+        sb.append (Constants.BOOLEAN_TYPE_NAME);
+        sb.append ("\" is expected after WHEN, ");
+        sb.append ("but \"");
+        sb.append (arguments[i].getTypeName());
+        sb.append ("\" is found");
+        throw new UDFArgumentTypeException(i,sb.toString ());
       }
       if (!returnOIResolver.update(arguments[i + 1])) {
-        throw new UDFArgumentTypeException(i + 1,
-            "The expressions after THEN should have the same type: \""
-            + returnOIResolver.get().getTypeName()
-            + "\" is expected but \"" + arguments[i + 1].getTypeName()
-            + "\" is found");
+        StringBuilder sb=new StringBuilder();
+        sb.append ("The expressions after THEN should have the same type: \"");
+        sb.append (returnOIResolver.get().getTypeName());
+        sb.append ("\" is expected but \"");
+        sb.append (arguments[i+1].getTypeName());
+        sb.append ("\" is found");
+        throw new UDFArgumentTypeException(i+1,sb.toString ());
       }
     }
     if (arguments.length % 2 == 1) {
       int i = arguments.length - 2;
       if (!returnOIResolver.update(arguments[i + 1])) {
-        throw new UDFArgumentTypeException(i + 1,
-            "The expression after ELSE should have the same type as those after THEN: \""
-            + returnOIResolver.get().getTypeName()
-            + "\" is expected but \"" + arguments[i + 1].getTypeName()
-            + "\" is found");
+        StringBuilder sb=new StringBuilder();
+        sb.append ("The expression after ELSE should have the same type as those after THEN: \"");
+        sb.append (returnOIResolver.get().getTypeName());
+        sb.append ("\" is expected but \"");
+        sb.append (arguments[i+1].getTypeName());
+        sb.append ("\" is found");
+        throw new UDFArgumentTypeException(i+1,sb.toString ());
       }
     }
 
Index: ql/src/java/org/apache/hadoop/hive/ql/udf/UDFDateDiff.java
===================================================================
--- ql/src/java/org/apache/hadoop/hive/ql/udf/UDFDateDiff.java	(revision 1125883)
+++ ql/src/java/org/apache/hadoop/hive/ql/udf/UDFDateDiff.java	(working copy)
@@ -22,6 +22,8 @@
 import java.text.SimpleDateFormat;
 import java.util.TimeZone;
 
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.hive.ql.exec.Description;
 import org.apache.hadoop.hive.ql.exec.UDF;
 import org.apache.hadoop.io.IntWritable;
@@ -41,7 +43,7 @@
     + "  1")
 public class UDFDateDiff extends UDF {
   private final SimpleDateFormat formatter = new SimpleDateFormat("yyyy-MM-dd");
-
+  private static Log LOG = LogFactory.getLog(UDFDateDiff.class);
   private IntWritable result = new IntWritable();
 
   public UDFDateDiff() {
@@ -76,6 +78,10 @@
       result.set((int) (diffInMilliSeconds / (86400 * 1000)));
       return result;
     } catch (ParseException e) {
+      if (LOG.isDebugEnabled()) {
+        LOG.debug("Error while getting difference of dates :" + dateString1.toString() + " and :"
+            + dateString2.toString(), e);
+      }
       return null;
     }
   }
Index: ql/src/java/org/apache/hadoop/hive/ql/udf/UDFDateSub.java
===================================================================
--- ql/src/java/org/apache/hadoop/hive/ql/udf/UDFDateSub.java	(revision 1125883)
+++ ql/src/java/org/apache/hadoop/hive/ql/udf/UDFDateSub.java	(working copy)
@@ -23,6 +23,8 @@
 import java.util.Calendar;
 import java.util.Date;
 
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.hive.ql.exec.Description;
 import org.apache.hadoop.hive.ql.exec.UDF;
 import org.apache.hadoop.io.IntWritable;
@@ -43,7 +45,7 @@
 public class UDFDateSub extends UDF {
   private final SimpleDateFormat formatter = new SimpleDateFormat("yyyy-MM-dd");
   private final Calendar calendar = Calendar.getInstance();
-
+  private static Log LOG = LogFactory.getLog(UDFDateSub.class);
   private Text result = new Text();
 
   public UDFDateSub() {
@@ -77,6 +79,9 @@
       result.set(formatter.format(newDate));
       return result;
     } catch (ParseException e) {
+      if (LOG.isDebugEnabled()) {
+        LOG.debug("Error in UDFDateSub :" + days.get() + " from :" + dateString1.toString(), e);
+      }
       return null;
     }
   }
Index: ql/src/java/org/apache/hadoop/hive/ql/udf/UDFDayOfMonth.java
===================================================================
--- ql/src/java/org/apache/hadoop/hive/ql/udf/UDFDayOfMonth.java	(revision 1125883)
+++ ql/src/java/org/apache/hadoop/hive/ql/udf/UDFDayOfMonth.java	(working copy)
@@ -23,6 +23,8 @@
 import java.util.Calendar;
 import java.util.Date;
 
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.hive.ql.exec.Description;
 import org.apache.hadoop.hive.ql.exec.UDF;
 import org.apache.hadoop.io.IntWritable;
@@ -41,7 +43,7 @@
 public class UDFDayOfMonth extends UDF {
   private final SimpleDateFormat formatter = new SimpleDateFormat("yyyy-MM-dd");
   private final Calendar calendar = Calendar.getInstance();
-
+  private static Log LOG = LogFactory.getLog(UDFDayOfMonth.class);
   private IntWritable result = new IntWritable();
 
   public UDFDayOfMonth() {
@@ -68,6 +70,9 @@
       result.set(calendar.get(Calendar.DAY_OF_MONTH));
       return result;
     } catch (ParseException e) {
+      if (LOG.isDebugEnabled()) {
+        LOG.debug("Error while getting day of the month :" + dateString.toString(), e);
+      }
       return null;
     }
   }
Index: ql/src/java/org/apache/hadoop/hive/ql/udf/UDFFindInSet.java
===================================================================
--- ql/src/java/org/apache/hadoop/hive/ql/udf/UDFFindInSet.java	(revision 1125883)
+++ ql/src/java/org/apache/hadoop/hive/ql/udf/UDFFindInSet.java	(working copy)
@@ -46,8 +46,8 @@
     }
 
     byte[] search_bytes = s.getBytes();
-
-    for (int i = 0; i < s.getLength(); i++) {
+    int search_length = s.getLength();
+    for(int i = 0; i < search_length; i++) {
       if (search_bytes[i] == ',') {
         result.set(0);
         return result;
@@ -56,13 +56,13 @@
     }
 
     byte[] data = txtarray.getBytes();
-    int search_length = s.getLength();
 
     int cur_pos_in_array = 0;
     int cur_length = 0;
     boolean matching = true;
+    int txtarray_length=txtarray.getLength();
 
-    for (int i = 0; i < txtarray.getLength(); i++) {
+    for(int i = 0; i < txtarray_length ; i++) {
       if (data[i] == ',') {
         cur_pos_in_array++;
         if (matching && cur_length == search_length) {
Index: ql/src/java/org/apache/hadoop/hive/ql/udf/UDFHex.java
===================================================================
--- ql/src/java/org/apache/hadoop/hive/ql/udf/UDFHex.java	(revision 1125883)
+++ ql/src/java/org/apache/hadoop/hive/ql/udf/UDFHex.java	(working copy)
@@ -44,7 +44,7 @@
 
   /**
    * Convert num to hex.
-   * 
+   *
    */
   private Text evaluate(long num) {
     // Extract the hex digits of num into value[] from right to left
@@ -76,7 +76,7 @@
 
   /**
    * Convert every character in s to two hex digits.
-   * 
+   *
    */
   public Text evaluate(Text s) {
     if (s == null) {
@@ -88,7 +88,8 @@
     }
 
     byte[] str = s.getBytes();
-    for (int i = 0; i < s.getLength(); i++) {
+    int length=s.getLength();
+    for(int i = 0; i < length; i++) {
       value[i * 2] = (byte) Character.toUpperCase(Character.forDigit(
           (str[i] & 0xF0) >>> 4, 16));
       value[i * 2 + 1] = (byte) Character.toUpperCase(Character.forDigit(
Index: ql/src/java/org/apache/hadoop/hive/ql/udf/UDFJson.java
===================================================================
--- ql/src/java/org/apache/hadoop/hive/ql/udf/UDFJson.java	(revision 1125883)
+++ ql/src/java/org/apache/hadoop/hive/ql/udf/UDFJson.java	(working copy)
@@ -25,6 +25,8 @@
 import java.util.regex.Matcher;
 import java.util.regex.Pattern;
 
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.hive.ql.exec.Description;
 import org.apache.hadoop.hive.ql.exec.UDF;
 import org.apache.hadoop.io.Text;
@@ -57,7 +59,7 @@
 public class UDFJson extends UDF {
   private final Pattern patternKey = Pattern.compile("^([a-zA-Z0-9_\\-]+).*");
   private final Pattern patternIndex = Pattern.compile("\\[([0-9]+|\\*)\\]");
-
+  private static Log LOG = LogFactory.getLog(UDFJson.class);
   // An LRU cache using a linked hash map
   static class HashCache extends LinkedHashMap {
 
@@ -138,6 +140,9 @@
       result.set(extractObject.toString());
       return result;
     } catch (Exception e) {
+      if (LOG.isDebugEnabled()) {
+        LOG.debug("Error in UDFJson. Inputs are " + jsonString + ", " + pathString);
+      }
       return null;
     }
   }
Index: ql/src/java/org/apache/hadoop/hive/ql/udf/UDFLength.java
===================================================================
--- ql/src/java/org/apache/hadoop/hive/ql/udf/UDFLength.java	(revision 1125883)
+++ ql/src/java/org/apache/hadoop/hive/ql/udf/UDFLength.java	(working copy)
@@ -41,7 +41,8 @@
 
     byte[] data = s.getBytes();
     int len = 0;
-    for (int i = 0; i < s.getLength(); i++) {
+    int length=s.getLength();
+    for(int i = 0; i < length; i++) {
       if (GenericUDFUtils.isUtfStartByte(data[i])) {
         len++;
       }
Index: ql/src/java/org/apache/hadoop/hive/ql/udf/UDFLpad.java
===================================================================
--- ql/src/java/org/apache/hadoop/hive/ql/udf/UDFLpad.java	(revision 1125883)
+++ ql/src/java/org/apache/hadoop/hive/ql/udf/UDFLpad.java	(working copy)
@@ -58,7 +58,8 @@
     int pos = Math.max(len - s.getLength(), 0);
 
     // Copy the padding
-    for (int i = 0; i < pos; i += pad.getLength()) {
+    int length=pad.getLength();
+    for(int i = 0; i < pos; i += length) {
       for (int j = 0; j < pad.getLength() && j < pos - i; j++) {
         data[i + j] = padTxt[j];
       }
Index: ql/src/java/org/apache/hadoop/hive/ql/udf/UDFReverse.java
===================================================================
--- ql/src/java/org/apache/hadoop/hive/ql/udf/UDFReverse.java	(revision 1125883)
+++ ql/src/java/org/apache/hadoop/hive/ql/udf/UDFReverse.java	(working copy)
@@ -34,7 +34,7 @@
 
   /**
    * Reverse a portion of an array in-place.
-   * 
+   *
    * @param arr
    *          The array where the data will be reversed.
    * @param first
@@ -63,14 +63,15 @@
     // character, then reverse the whole string.
     byte[] data = result.getBytes();
     int prev = 0; // The index where the current char starts
-    for (int i = 1; i < result.getLength(); i++) {
+    int length=result.getLength();
+    for(int i = 1; i < length; i++) {
       if (GenericUDFUtils.isUtfStartByte(data[i])) {
         reverse(data, prev, i - 1);
         prev = i;
       }
     }
-    reverse(data, prev, result.getLength() - 1);
-    reverse(data, 0, result.getLength() - 1);
+    reverse(data, prev, length - 1);
+    reverse(data, 0, length - 1);
 
     return result;
   }
Index: ql/src/java/org/apache/hadoop/hive/ql/udf/UDFToLong.java
===================================================================
--- ql/src/java/org/apache/hadoop/hive/ql/udf/UDFToLong.java	(revision 1125883)
+++ ql/src/java/org/apache/hadoop/hive/ql/udf/UDFToLong.java	(working copy)
@@ -18,6 +18,8 @@
 
 package org.apache.hadoop.hive.ql.udf;
 
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.hive.ql.exec.UDF;
 import org.apache.hadoop.hive.serde2.io.ByteWritable;
 import org.apache.hadoop.hive.serde2.io.DoubleWritable;
@@ -36,6 +38,7 @@
  */
 public class UDFToLong extends UDF {
   private LongWritable longWritable = new LongWritable();
+  private static Log LOG = LogFactory.getLog(UDFToLong.class);
 
   public UDFToLong() {
   }
@@ -174,6 +177,9 @@
             .set(LazyLong.parseLong(i.getBytes(), 0, i.getLength(), 10));
         return longWritable;
       } catch (NumberFormatException e) {
+        if (LOG.isDebugEnabled()) {
+          LOG.debug("Error in UDFToLong.Input is " + i.toString(), e);
+        }
         // MySQL returns 0 if the string is not a well-formed numeric value.
         // return LongWritable.valueOf(0);
         // But we decided to return NULL instead, which is more conservative.
Index: ql/src/java/org/apache/hadoop/hive/ql/udf/UDFUnixTimeStamp.java
===================================================================
--- ql/src/java/org/apache/hadoop/hive/ql/udf/UDFUnixTimeStamp.java	(revision 1125883)
+++ ql/src/java/org/apache/hadoop/hive/ql/udf/UDFUnixTimeStamp.java	(working copy)
@@ -22,6 +22,8 @@
 import java.text.SimpleDateFormat;
 import java.util.Date;
 
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.hive.ql.exec.Description;
 import org.apache.hadoop.hive.ql.exec.UDF;
 import org.apache.hadoop.io.LongWritable;
@@ -40,7 +42,7 @@
   // For now, we just use the default time zone.
   private final SimpleDateFormat formatter = new SimpleDateFormat(
       "yyyy-MM-dd HH:mm:ss");
-
+  private static Log LOG = LogFactory.getLog(UDFUnixTimeStamp.class);
   LongWritable result = new LongWritable();
 
   public UDFUnixTimeStamp() {
@@ -74,6 +76,9 @@
       result.set(date.getTime() / 1000);
       return result;
     } catch (ParseException e) {
+      if (LOG.isDebugEnabled()) {
+        LOG.debug("Error in UDFUnixTimeStamp.Input is " + dateText.toString(), e);
+      }
       return null;
     }
   }
@@ -99,6 +104,9 @@
         lastPatternText.set(patternText);
       }
     } catch (Exception e) {
+      if (LOG.isDebugEnabled()) {
+        LOG.debug("Error in UDFUnixTimeStamp.Input is " + dateText.toString(), e);
+      }
       return null;
     }
 
Index: ql/src/java/org/apache/hadoop/hive/ql/udf/UDFWeekOfYear.java
===================================================================
--- ql/src/java/org/apache/hadoop/hive/ql/udf/UDFWeekOfYear.java	(revision 1125883)
+++ ql/src/java/org/apache/hadoop/hive/ql/udf/UDFWeekOfYear.java	(working copy)
@@ -23,6 +23,8 @@
 import java.util.Calendar;
 import java.util.Date;
 
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.hive.ql.exec.Description;
 import org.apache.hadoop.hive.ql.exec.UDF;
 import org.apache.hadoop.io.IntWritable;
@@ -42,7 +44,7 @@
 public class UDFWeekOfYear extends UDF {
   private final SimpleDateFormat formatter = new SimpleDateFormat("yyyy-MM-dd");
   private final Calendar calendar = Calendar.getInstance();
-
+  private static Log LOG = LogFactory.getLog(UDFWeekOfYear.class);
   private IntWritable result = new IntWritable();
 
   public UDFWeekOfYear() {
@@ -69,6 +71,7 @@
       result.set(calendar.get(Calendar.WEEK_OF_YEAR));
       return result;
     } catch (ParseException e) {
+      LOG.error("Error in UDFWeekOfYear.Input is " + dateString.toString(), e);
       return null;
     }
   }
Index: ql/src/java/org/apache/hadoop/hive/ql/udf/UDFYear.java
===================================================================
--- ql/src/java/org/apache/hadoop/hive/ql/udf/UDFYear.java	(revision 1125883)
+++ ql/src/java/org/apache/hadoop/hive/ql/udf/UDFYear.java	(working copy)
@@ -23,6 +23,8 @@
 import java.util.Calendar;
 import java.util.Date;
 
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.hive.ql.exec.Description;
 import org.apache.hadoop.hive.ql.exec.UDF;
 import org.apache.hadoop.io.IntWritable;
@@ -43,6 +45,7 @@
   private final Calendar calendar = Calendar.getInstance();
 
   private IntWritable result = new IntWritable();
+  private static Log LOG = LogFactory.getLog(UDFYear.class);
 
   public UDFYear() {
   }
@@ -68,6 +71,9 @@
       result.set(calendar.get(Calendar.YEAR));
       return result;
     } catch (ParseException e) {
+      if (LOG.isDebugEnabled()) {
+        LOG.debug("Error in UDFYear.Input is " + dateString.toString(), e);
+      }
       return null;
     }
   }
Index: ql/src/test/results/clientnegative/describe_xpath1.q.out
===================================================================
--- ql/src/test/results/clientnegative/describe_xpath1.q.out	(revision 1125883)
+++ ql/src/test/results/clientnegative/describe_xpath1.q.out	(working copy)
@@ -1,4 +1,4 @@
 PREHOOK: query: describe src_thrift.$elem$
 PREHOOK: type: DESCTABLE
-FAILED: Error in metadata: java.lang.RuntimeException: cannot find field $elem$ from [public int org.apache.hadoop.hive.serde2.thrift.test.Complex.aint, public java.lang.String org.apache.hadoop.hive.serde2.thrift.test.Complex.aString, public java.util.List org.apache.hadoop.hive.serde2.thrift.test.Complex.lint, public java.util.List org.apache.hadoop.hive.serde2.thrift.test.Complex.lString, public java.util.List org.apache.hadoop.hive.serde2.thrift.test.Complex.lintString, public java.util.Map org.apache.hadoop.hive.serde2.thrift.test.Complex.mStringString]
+FAILED: Error in metadata: Error in describeTable :
 FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.exec.DDLTask
Index: ql/src/test/results/clientnegative/describe_xpath2.q.out
===================================================================
--- ql/src/test/results/clientnegative/describe_xpath2.q.out	(revision 1125883)
+++ ql/src/test/results/clientnegative/describe_xpath2.q.out	(working copy)
@@ -1,4 +1,4 @@
 PREHOOK: query: describe src_thrift.$key$
 PREHOOK: type: DESCTABLE
-FAILED: Error in metadata: java.lang.RuntimeException: cannot find field $key$ from [public int org.apache.hadoop.hive.serde2.thrift.test.Complex.aint, public java.lang.String org.apache.hadoop.hive.serde2.thrift.test.Complex.aString, public java.util.List org.apache.hadoop.hive.serde2.thrift.test.Complex.lint, public java.util.List org.apache.hadoop.hive.serde2.thrift.test.Complex.lString, public java.util.List org.apache.hadoop.hive.serde2.thrift.test.Complex.lintString, public java.util.Map org.apache.hadoop.hive.serde2.thrift.test.Complex.mStringString]
-FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.exec.DDLTask
+FAILED: Error in metadata: Error in describeTable :
+FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.exec.DDLTask
\ No newline at end of file
Index: ql/src/test/results/clientnegative/describe_xpath3.q.out
===================================================================
--- ql/src/test/results/clientnegative/describe_xpath3.q.out	(revision 1125883)
+++ ql/src/test/results/clientnegative/describe_xpath3.q.out	(working copy)
@@ -1,4 +1,4 @@
 PREHOOK: query: describe src_thrift.lint.abc
 PREHOOK: type: DESCTABLE
-FAILED: Error in metadata: org.apache.hadoop.hive.ql.metadata.HiveException: Error in getting fields from serde.Unknown type for abc
+FAILED: Error in metadata: Error in describeTable :
 FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.exec.DDLTask
Index: ql/src/test/results/clientnegative/describe_xpath4.q.out
===================================================================
--- ql/src/test/results/clientnegative/describe_xpath4.q.out	(revision 1125883)
+++ ql/src/test/results/clientnegative/describe_xpath4.q.out	(working copy)
@@ -1,4 +1,4 @@
 PREHOOK: query: describe src_thrift.mStringString.abc
 PREHOOK: type: DESCTABLE
-FAILED: Error in metadata: org.apache.hadoop.hive.ql.metadata.HiveException: Error in getting fields from serde.Unknown type for abc
+FAILED: Error in metadata: Error in describeTable :
 FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.exec.DDLTask
Index: serde/src/java/org/apache/hadoop/hive/serde2/columnar/BytesRefArrayWritable.java
===================================================================
--- serde/src/java/org/apache/hadoop/hive/serde2/columnar/BytesRefArrayWritable.java	(revision 1125883)
+++ serde/src/java/org/apache/hadoop/hive/serde2/columnar/BytesRefArrayWritable.java	(working copy)
@@ -55,7 +55,7 @@
    */
   public BytesRefArrayWritable(int capacity) {
     if (capacity < 0) {
-      throw new IllegalArgumentException("Capacity can not be negative.");
+      throw new IllegalArgumentException("Capacity can not be negative." + capacity);
     }
     bytesRefWritables = new BytesRefWritable[0];
     ensureCapacity(capacity);
Index: serde/src/java/org/apache/hadoop/hive/serde2/columnar/BytesRefWritable.java
===================================================================
--- serde/src/java/org/apache/hadoop/hive/serde2/columnar/BytesRefWritable.java	(revision 1125883)
+++ serde/src/java/org/apache/hadoop/hive/serde2/columnar/BytesRefWritable.java	(working copy)
@@ -100,7 +100,7 @@
 
   /**
    * Returns a copy of the underlying bytes referenced by this instance.
-   * 
+   *
    * @return a new copied byte array
    * @throws IOException
    */
@@ -113,7 +113,7 @@
 
   /**
    * Returns the underlying bytes.
-   * 
+   *
    * @throws IOException
    */
   public byte[] getData() throws IOException {
@@ -124,7 +124,7 @@
   /**
    * readFields() will corrupt the array. So use the set method whenever
    * possible.
-   * 
+   *
    * @see #readFields(DataInput)
    */
   public void set(byte[] newData, int offset, int len) {
@@ -137,7 +137,7 @@
   /**
    * readFields() will corrupt the array. So use the set method whenever
    * possible.
-   * 
+   *
    * @see #readFields(DataInput)
    */
   public void set(LazyDecompressionCallback newData, int offset, int len) {
@@ -156,7 +156,7 @@
    * Always reuse the bytes array if length of bytes array is equal or greater
    * to the current record, otherwise create a new one. readFields will corrupt
    * the array. Please use set() whenever possible.
-   * 
+   *
    * @see #set(byte[], int, int)
    */
   public void readFields(DataInput in) throws IOException {
@@ -186,12 +186,13 @@
   @Override
   public String toString() {
     StringBuilder sb = new StringBuilder(3 * length);
+    String num;
     for (int idx = start; idx < length; idx++) {
       // if not the first, put a blank separator in
       if (idx != 0) {
         sb.append(' ');
       }
-      String num = Integer.toHexString(0xff & bytes[idx]);
+      num = Integer.toHexString(0xff & bytes[idx]);
       // if it is only one digit, add a leading 0.
       if (num.length() < 2) {
         sb.append('0');
Index: serde/src/java/org/apache/hadoop/hive/serde2/columnar/ColumnarSerDe.java
===================================================================
--- serde/src/java/org/apache/hadoop/hive/serde2/columnar/ColumnarSerDe.java	(revision 1125883)
+++ serde/src/java/org/apache/hadoop/hive/serde2/columnar/ColumnarSerDe.java	(working copy)
@@ -78,7 +78,7 @@
 
   /**
    * Initialize the SerDe given the parameters.
-   * 
+   *
    * @see SerDe#initialize(Configuration, Properties)
    */
   public void initialize(Configuration job, Properties tbl) throws SerDeException {
@@ -137,7 +137,7 @@
 
   /**
    * Returns the Writable Class after serialization.
-   * 
+   *
    * @see SerDe#getSerializedClass()
    */
   public Class getSerializedClass() {
@@ -150,7 +150,7 @@
 
   /**
    * Serialize a row of data.
-   * 
+   *
    * @param obj
    *          The row object
    * @param objInspector
@@ -161,9 +161,11 @@
   public Writable serialize(Object obj, ObjectInspector objInspector) throws SerDeException {
 
     if (objInspector.getCategory() != Category.STRUCT) {
-      throw new SerDeException(getClass().toString()
-          + " can only serialize struct types, but we got: "
-          + objInspector.getTypeName());
+      StringBuilder sb=new StringBuilder();
+      sb.append ( getClass().toString() );
+      sb.append ( " can only serialize struct types, but we got: ");
+      sb.append ( objInspector.getTypeName());
+      throw new SerDeException(sb.toString ());
     }
 
     // Prepare the field ObjectInspectors
@@ -180,17 +182,27 @@
       serializeStream.reset();
       int count = 0;
       // Serialize each field
-      for (int i = 0; i < fields.size(); i++) {
+      int fields_size=fields.size ();
+      for (int i = 0; i < fields_size; i++) {
         // Get the field objectInspector and the field object.
         ObjectInspector foi = fields.get(i).getFieldObjectInspector();
         Object f = (list == null ? null : list.get(i));
 
         if (declaredFields != null && i >= declaredFields.size()) {
-          throw new SerDeException("Error: expecting " + declaredFields.size()
-              + " but asking for field " + i + "\n" + "data=" + obj + "\n"
-              + "tableType=" + serdeParams.getRowTypeInfo().toString() + "\n"
-              + "dataType="
-              + TypeInfoUtils.getTypeInfoFromObjectInspector(objInspector));
+          StringBuilder sb=new StringBuilder();
+          sb.append ("Error: expecting ");
+          sb.append (declaredFields.size());
+          sb.append (" but asking for field ");
+          sb.append ( i );
+          sb.append("\n data=");
+          sb.append (obj);
+          sb.append ("\n");
+          sb.append ("tableType=");
+          sb.append (serdeParams.getRowTypeInfo().toString());
+          sb.append ("\n");
+          sb.append("dataType=");
+          sb.append (TypeInfoUtils.getTypeInfoFromObjectInspector(objInspector));
+          throw new SerDeException(sb.toString ());
         }
 
         // If the field that is passed in is NOT a primitive, and either the
Index: serde/src/java/org/apache/hadoop/hive/serde2/dynamic_type/DynamicSerDeFieldList.java
===================================================================
--- serde/src/java/org/apache/hadoop/hive/serde2/dynamic_type/DynamicSerDeFieldList.java	(revision 1125883)
+++ serde/src/java/org/apache/hadoop/hive/serde2/dynamic_type/DynamicSerDeFieldList.java	(working copy)
@@ -244,9 +244,15 @@
     // For every field
     List fields = soi.getAllStructFieldRefs();
     if (fields.size() != ordered_types.length) {
-      throw new SerDeException("Trying to serialize " + fields.size()
-          + " fields into a struct with " + ordered_types.length + " object="
-          + o + " objectinspector=" + oi.getTypeName());
+      StringBuilder sb= new StringBuilder("Trying to serialize ");
+      sb.append(fields.size());
+      sb.append(" fields into a struct with ");
+      sb.append(ordered_types.length);
+      sb.append(" object=");
+      sb.append(o);
+      sb.append(" objectinspector=");
+      sb.append(oi.getTypeName());
+      throw new SerDeException(sb.toString());
     }
     for (int i = 0; i < fields.size(); i++) {
       Object f = soi.getStructFieldData(o, fields.get(i));
Index: serde/src/java/org/apache/hadoop/hive/serde2/dynamic_type/DynamicSerDeTypeSet.java
===================================================================
--- serde/src/java/org/apache/hadoop/hive/serde2/dynamic_type/DynamicSerDeTypeSet.java	(revision 1125883)
+++ serde/src/java/org/apache/hadoop/hive/serde2/dynamic_type/DynamicSerDeTypeSet.java	(working copy)
@@ -68,7 +68,7 @@
 
   @Override
   public String toString() {
-    return "set<" + getElementType().toString() + ">";
+    return "set<" + this.getElementType()+ ">";
   }
 
   @Override
@@ -93,8 +93,9 @@
     } else {
       result = new HashSet();
     }
+    Object elem;
     for (int i = 0; i < theset.size; i++) {
-      Object elem = getElementType().deserialize(null, iprot);
+     elem = getElementType().deserialize(null, iprot);
       result.add(elem);
     }
     // in theory, the below call isn't needed in non thrift_mode, but let's not
Index: serde/src/java/org/apache/hadoop/hive/serde2/dynamic_type/SimpleCharStream.java
===================================================================
--- serde/src/java/org/apache/hadoop/hive/serde2/dynamic_type/SimpleCharStream.java	(revision 1125883)
+++ serde/src/java/org/apache/hadoop/hive/serde2/dynamic_type/SimpleCharStream.java	(working copy)
@@ -76,7 +76,7 @@
         maxNextCharInd = (bufpos -= tokenBegin);
       }
     } catch (Throwable t) {
-      throw new Error(t.getMessage());
+      throw new Error("Error in ExpandBuff",t);
     }
 
     bufsize += 2048;
Index: serde/src/java/org/apache/hadoop/hive/serde2/lazy/LazySimpleSerDe.java
===================================================================
--- serde/src/java/org/apache/hadoop/hive/serde2/lazy/LazySimpleSerDe.java	(revision 1125883)
+++ serde/src/java/org/apache/hadoop/hive/serde2/lazy/LazySimpleSerDe.java	(working copy)
@@ -97,6 +97,9 @@
       try {
         return Byte.valueOf(altValue).byteValue();
       } catch (NumberFormatException e) {
+        if (LOG.isDebugEnabled()) {
+          LOG.debug("Error in getting the Byte from " + altValue);
+        }
         return (byte) altValue.charAt(0);
       }
     }
Index: serde/src/java/org/apache/hadoop/hive/serde2/lazy/LazyStruct.java
===================================================================
--- serde/src/java/org/apache/hadoop/hive/serde2/lazy/LazyStruct.java	(revision 1125883)
+++ serde/src/java/org/apache/hadoop/hive/serde2/lazy/LazyStruct.java	(working copy)
@@ -31,7 +31,7 @@
 /**
  * LazyObject for storing a struct. The field of a struct can be primitive or
  * non-primitive.
- * 
+ *
  * LazyStruct does not deal with the case of a NULL struct. That is handled by
  * the parent LazyObject.
  */
@@ -71,7 +71,7 @@
 
   /**
    * Set the row data for this LazyStruct.
-   * 
+   *
    * @see LazyObject#init(ByteArrayRef, int, int)
    */
   @Override
@@ -157,8 +157,13 @@
     // Missing fields?
     if (!missingFieldWarned && fieldId < fields.length) {
       missingFieldWarned = true;
-      LOG.warn("Missing fields! Expected " + fields.length + " fields but "
-          + "only got " + fieldId + "! Ignoring similar problems.");
+      StringBuilder sb=new StringBuilder();
+      sb.append ( "Missing fields! Expected " );
+      sb.append (  fields.length );
+      sb.append (" fields but only got" );
+      sb.append (  fieldId );
+      sb.append ( "! Ignoring similar problems.");
+      LOG.warn  (sb.toString ());
     }
 
     Arrays.fill(fieldInited, false);
@@ -167,13 +172,13 @@
 
   /**
    * Get one field out of the struct.
-   * 
+   *
    * If the field is a primitive field, return the actual object. Otherwise
    * return the LazyObject. This is because PrimitiveObjectInspector does not
    * have control over the object used by the user - the user simply directly
    * use the Object instead of going through Object
    * PrimitiveObjectInspector.get(Object).
-   * 
+   *
    * @param fieldID
    *          The field ID
    * @return The field as a LazyObject
@@ -188,7 +193,7 @@
   /**
    * Get the field out of the row without checking parsed. This is called by
    * both getField and getFieldsAsList.
-   * 
+   *
    * @param fieldID
    *          The id of the field starting from 0.
    * @param nullSequence
@@ -218,7 +223,7 @@
 
   /**
    * Get the values of the fields as an ArrayList.
-   * 
+   *
    * @return The values of the fields as an ArrayList.
    */
   public ArrayList getFieldsAsList() {
@@ -244,7 +249,7 @@
   protected boolean getParsed() {
     return parsed;
   }
-  
+
   protected void setParsed(boolean parsed) {
     this.parsed = parsed;
   }
@@ -256,11 +261,11 @@
   protected void setFields(LazyObject[] fields) {
     this.fields = fields;
   }
-  
+
   protected boolean[] getFieldInited() {
     return fieldInited;
   }
-  
+
   protected void setFieldInited(boolean[] fieldInited) {
     this.fieldInited = fieldInited;
   }
Index: serde/src/java/org/apache/hadoop/hive/serde2/MetadataTypedColumnsetSerDe.java
===================================================================
--- serde/src/java/org/apache/hadoop/hive/serde2/MetadataTypedColumnsetSerDe.java	(revision 1125883)
+++ serde/src/java/org/apache/hadoop/hive/serde2/MetadataTypedColumnsetSerDe.java	(working copy)
@@ -131,15 +131,22 @@
     splitLimit = (lastColumnTakesRest && columnNames != null) ? columnNames
         .size() : -1;
 
-    LOG.debug(getClass().getName() + ": initialized with columnNames: "
-        + columnNames + " and separator code=" + (int) separator.charAt(0)
-        + " lastColumnTakesRest=" + lastColumnTakesRest + " splitLimit="
-        + splitLimit);
+        StringBuilder message = new StringBuilder();
+        message.append ( getClass().getName() );
+        message.append ( ": initialized with columnNames: " );
+        message.append ( columnNames );
+        message.append ( " and separator code=" );
+        message.append ( (int)separator.charAt(0) );
+        message.append ( " lastColumnTakesRest=" );
+        message.append ( lastColumnTakesRest );
+        message.append ( " splitLimit=" );
+        message.append ( splitLimit );
+        LOG.debug(message.toString ());
   }
 
   /**
    * Split the row into columns.
-   * 
+   *
    * @param limit
    *          up to limit columns will be produced (the last column takes all
    *          the rest), -1 for unlimited.
@@ -214,11 +221,12 @@
     List fields = soi.getAllStructFieldRefs();
 
     StringBuilder sb = new StringBuilder();
+    Object column = null;
     for (int i = 0; i < fields.size(); i++) {
       if (i > 0) {
         sb.append(separator);
       }
-      Object column = soi.getStructFieldData(obj, fields.get(i));
+      column = soi.getStructFieldData(obj, fields.get(i));
       if (fields.get(i).getFieldObjectInspector().getCategory() == Category.PRIMITIVE) {
         // For primitive object, serialize to plain string
         sb.append(column == null ? nullString : column.toString());
Index: serde/src/java/org/apache/hadoop/hive/serde2/objectinspector/ObjectInspectorUtils.java
===================================================================
--- serde/src/java/org/apache/hadoop/hive/serde2/objectinspector/ObjectInspectorUtils.java	(revision 1125883)
+++ serde/src/java/org/apache/hadoop/hive/serde2/objectinspector/ObjectInspectorUtils.java	(working copy)
@@ -304,7 +304,8 @@
   public static StructField getStandardStructFieldRef(String fieldName,
       List fields) {
     fieldName = fieldName.toLowerCase();
-    for (int i = 0; i < fields.size(); i++) {
+    int fields_size = fields.size();
+    for(int i=0; i result = new ArrayList(fields.size());
-      for (int i = 0; i < fields.size(); i++) {
+      int fields_size =fields.size();
+      for(int i=0; i