Index: ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java (revision 1100891) +++ ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java (working copy) @@ -162,6 +162,7 @@ private static String INTERMEDIATE_ORIGINAL_DIR_SUFFIX; private static String INTERMEDIATE_EXTRACTED_DIR_SUFFIX; + @Override public boolean requireLock() { return this.work != null && this.work.getNeedLock(); } @@ -1627,13 +1628,11 @@ } ((FSDataOutputStream) outStream).close(); } catch (FileNotFoundException e) { - LOG.info("show partitions: " + stringifyException(e)); - throw new HiveException(e.toString()); + throw new HiveException("Error in show partitions: ",e); } catch (IOException e) { - LOG.info("show partitions: " + stringifyException(e)); - throw new HiveException(e.toString()); + throw new HiveException("Error in show partitions: ",e); } catch (Exception e) { - throw new HiveException(e.toString()); + throw new HiveException("Error in show partitions: ",e); } return 0; @@ -1786,7 +1785,7 @@ LOG.warn("show table: " + stringifyException(e)); return 1; } catch (Exception e) { - throw new HiveException(e.toString()); + throw new HiveException("Error in show table: ",e); } return 0; } @@ -1826,13 +1825,13 @@ } ((FSDataOutputStream) outStream).close(); } catch (FileNotFoundException e) { - LOG.warn("show function: " + stringifyException(e)); + LOG.warn("Error in show function: " + stringifyException(e)); return 1; } catch (IOException e) { - LOG.warn("show function: " + stringifyException(e)); + LOG.warn("Error in show function: " + stringifyException(e)); return 1; } catch (Exception e) { - throw new HiveException(e.toString()); + throw new HiveException("Error in show function: ",e); } return 0; } @@ -2074,13 +2073,13 @@ ((FSDataOutputStream) outStream).close(); } catch (FileNotFoundException e) { - LOG.warn("describe function: " + stringifyException(e)); + LOG.warn("Error in describe function: " + stringifyException(e)); return 1; } catch (IOException e) { - LOG.warn("describe function: " + stringifyException(e)); + LOG.warn("Error in describe function: " + stringifyException(e)); return 1; } catch (Exception e) { - throw new HiveException(e.toString()); + throw new HiveException("Error in describe function: ",e); } return 0; } @@ -2251,13 +2250,13 @@ } ((FSDataOutputStream) outStream).close(); } catch (FileNotFoundException e) { - LOG.info("show table status: " + stringifyException(e)); + LOG.info("Error in show table status: " + stringifyException(e)); return 1; } catch (IOException e) { - LOG.info("show table status: " + stringifyException(e)); + LOG.info("Error in show table status: " + stringifyException(e)); return 1; } catch (Exception e) { - throw new HiveException(e); + throw new HiveException("Error in show table status: ",e); } return 0; } @@ -2376,13 +2375,13 @@ ((FSDataOutputStream) outStream).close(); } catch (FileNotFoundException e) { - LOG.info("describe table: " + stringifyException(e)); + LOG.info("Error in describe table: " + stringifyException(e)); return 1; } catch (IOException e) { - LOG.info("describe table: " + stringifyException(e)); + LOG.info("Error in describe table: " + stringifyException(e)); return 1; } catch (Exception e) { - throw new HiveException(e); + throw new HiveException("Error in describe table: ",e); } return 0; Index: ql/src/java/org/apache/hadoop/hive/ql/exec/ExecReducer.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/exec/ExecReducer.java (revision 1100891) +++ ql/src/java/org/apache/hadoop/hive/ql/exec/ExecReducer.java (working copy) @@ -221,13 +221,15 @@ valueObject[tag.get()] = inputValueDeserializer[tag.get()] .deserialize(valueWritable); } catch (SerDeException e) { - throw new HiveException( - "Hive Runtime Error: Unable to deserialize reduce input value (tag=" - + tag.get() - + ") from " - + Utilities.formatBinaryString(valueWritable.get(), 0, - valueWritable.getSize()) + " with properties " - + valueTableDesc[tag.get()].getProperties(), e); + StringBuilder errorMsg = new StringBuilder("Hive Runtime Error: " + + "Unable to deserialize reduce input value (tag="); + errorMsg.append(tag.get()); + errorMsg.append(") from "); + errorMsg.append(Utilities.formatBinaryString(valueWritable.get(), 0, + valueWritable.getSize())); + errorMsg.append(" with properties "); + errorMsg.append(valueTableDesc[tag.get()].getProperties()); + throw new HiveException(errorMsg.toString(), e); } row.clear(); row.add(keyObject); @@ -238,8 +240,9 @@ cntr++; if (cntr == nextCntr) { long used_memory = memoryMXBean.getHeapMemoryUsage().getUsed(); - l4j.info("ExecReducer: processing " + cntr - + " rows: used memory = " + used_memory); + StringBuilder errorMsg = new StringBuilder("ExecReducer: processing ").append(cntr). + append(" rows: used memory = ").append(used_memory); + l4j.info(errorMsg.toString()); nextCntr = getNextCntr(cntr); } } @@ -253,8 +256,9 @@ rowString = "[Error getting row data with exception " + StringUtils.stringifyException(e2) + " ]"; } - throw new HiveException("Hive Runtime Error while processing row (tag=" - + tag.get() + ") " + rowString, e); + StringBuilder errorMsg = new StringBuilder("Hive Runtime Error while processing row (tag=") + .append(tag.get()).append(") ").append(rowString); + throw new HiveException(errorMsg.toString(), e); } } Index: ql/src/java/org/apache/hadoop/hive/ql/exec/FetchOperator.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/exec/FetchOperator.java (revision 1100891) +++ ql/src/java/org/apache/hadoop/hive/ql/exec/FetchOperator.java (working copy) @@ -284,9 +284,11 @@ serde.initialize(job, tmp.getProperties()); if (LOG.isDebugEnabled()) { - LOG.debug("Creating fetchTask with deserializer typeinfo: " - + serde.getObjectInspector().getTypeName()); - LOG.debug("deserializer properties: " + tmp.getProperties()); + StringBuilder sb = new StringBuilder("Creating fetchTask with deserializer typeinfo: "); + sb.append(serde.getObjectInspector().getTypeName()); + sb.append("\ndeserializer properties: "); + sb.append(tmp.getProperties()); + LOG.debug(sb.toString()); } if (currPart != null) { Index: ql/src/java/org/apache/hadoop/hive/ql/exec/FileSinkOperator.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/exec/FileSinkOperator.java (revision 1100891) +++ ql/src/java/org/apache/hadoop/hive/ql/exec/FileSinkOperator.java (working copy) @@ -27,7 +27,6 @@ import java.util.Map; import java.util.Set; -import org.apache.commons.logging.Log; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; @@ -42,7 +41,6 @@ import org.apache.hadoop.hive.ql.plan.ExprNodeDesc; import org.apache.hadoop.hive.ql.plan.FileSinkDesc; import org.apache.hadoop.hive.ql.plan.PlanUtils; -import org.apache.hadoop.hive.ql.plan.TableDesc; import org.apache.hadoop.hive.ql.plan.api.OperatorType; import org.apache.hadoop.hive.ql.stats.StatsPublisher; import org.apache.hadoop.hive.ql.stats.StatsSetupConst; @@ -50,10 +48,10 @@ import org.apache.hadoop.hive.serde2.Serializer; import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector; import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspectorUtils; -import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspectorUtils.ObjectInspectorCopyOption; import org.apache.hadoop.hive.serde2.objectinspector.StructField; import org.apache.hadoop.hive.serde2.objectinspector.StructObjectInspector; import org.apache.hadoop.hive.serde2.objectinspector.SubStructObjectInspector; +import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspectorUtils.ObjectInspectorCopyOption; import org.apache.hadoop.hive.shims.ShimLoader; import org.apache.hadoop.io.BytesWritable; import org.apache.hadoop.io.LongWritable; @@ -573,9 +571,9 @@ rowOutWriters[idx].write(recordValue); } } catch (IOException e) { - throw new HiveException(e); + throw new HiveException("Error in processOp :",e); } catch (SerDeException e) { - throw new HiveException(e); + throw new HiveException("Error in processOp :",e); } } Index: ql/src/java/org/apache/hadoop/hive/ql/exec/FunctionRegistry.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/exec/FunctionRegistry.java (revision 1100891) +++ ql/src/java/org/apache/hadoop/hive/ql/exec/FunctionRegistry.java (working copy) @@ -122,7 +122,6 @@ import org.apache.hadoop.hive.ql.udf.UDFWeekOfYear; import org.apache.hadoop.hive.ql.udf.UDFYear; import org.apache.hadoop.hive.ql.udf.generic.GenericUDAFAverage; -import org.apache.hadoop.hive.ql.udf.generic.GenericUDAFEWAHBitmap; import org.apache.hadoop.hive.ql.udf.generic.GenericUDAFBridge; import org.apache.hadoop.hive.ql.udf.generic.GenericUDAFCollectSet; import org.apache.hadoop.hive.ql.udf.generic.GenericUDAFContextNGrams; @@ -130,6 +129,7 @@ import org.apache.hadoop.hive.ql.udf.generic.GenericUDAFCount; import org.apache.hadoop.hive.ql.udf.generic.GenericUDAFCovariance; import org.apache.hadoop.hive.ql.udf.generic.GenericUDAFCovarianceSample; +import org.apache.hadoop.hive.ql.udf.generic.GenericUDAFEWAHBitmap; import org.apache.hadoop.hive.ql.udf.generic.GenericUDAFEvaluator; import org.apache.hadoop.hive.ql.udf.generic.GenericUDAFHistogramNumeric; import org.apache.hadoop.hive.ql.udf.generic.GenericUDAFMax; @@ -147,13 +147,13 @@ import org.apache.hadoop.hive.ql.udf.generic.GenericUDF; import org.apache.hadoop.hive.ql.udf.generic.GenericUDFArray; import org.apache.hadoop.hive.ql.udf.generic.GenericUDFArrayContains; -import org.apache.hadoop.hive.ql.udf.generic.GenericUDFEWAHBitmapAnd; -import org.apache.hadoop.hive.ql.udf.generic.GenericUDFEWAHBitmapOr; -import org.apache.hadoop.hive.ql.udf.generic.GenericUDFEWAHBitmapEmpty; import org.apache.hadoop.hive.ql.udf.generic.GenericUDFBridge; import org.apache.hadoop.hive.ql.udf.generic.GenericUDFCase; import org.apache.hadoop.hive.ql.udf.generic.GenericUDFCoalesce; import org.apache.hadoop.hive.ql.udf.generic.GenericUDFConcatWS; +import org.apache.hadoop.hive.ql.udf.generic.GenericUDFEWAHBitmapAnd; +import org.apache.hadoop.hive.ql.udf.generic.GenericUDFEWAHBitmapEmpty; +import org.apache.hadoop.hive.ql.udf.generic.GenericUDFEWAHBitmapOr; import org.apache.hadoop.hive.ql.udf.generic.GenericUDFElt; import org.apache.hadoop.hive.ql.udf.generic.GenericUDFField; import org.apache.hadoop.hive.ql.udf.generic.GenericUDFHash; @@ -451,8 +451,9 @@ new GenericUDFBridge(displayName, isOperator, UDFClass)); mFunctions.put(functionName.toLowerCase(), fI); } else { - throw new RuntimeException("Registering UDF Class " + UDFClass - + " which does not extend " + UDF.class); + StringBuilder errorMsg = new StringBuilder("Registering UDF Class ").append(UDFClass). + append(" which does not extend ").append(UDF.class); + throw new RuntimeException(errorMsg.toString()); } } @@ -473,8 +474,9 @@ (GenericUDF) ReflectionUtils.newInstance(genericUDFClass, null)); mFunctions.put(functionName.toLowerCase(), fI); } else { - throw new RuntimeException("Registering GenericUDF Class " - + genericUDFClass + " which does not extend " + GenericUDF.class); + StringBuilder erroMsg = new StringBuilder("Registering GenericUDF Class ").append(genericUDFClass). + append(" which does not extend ").append(GenericUDF.class); + throw new RuntimeException(erroMsg.toString()); } } @@ -495,8 +497,9 @@ (GenericUDTF) ReflectionUtils.newInstance(genericUDTFClass, null)); mFunctions.put(functionName.toLowerCase(), fI); } else { - throw new RuntimeException("Registering GenericUDTF Class " - + genericUDTFClass + " which does not extend " + GenericUDTF.class); + StringBuilder errorMsg = new StringBuilder("Registering GenericUDTF Class "). + append(genericUDTFClass).append(" which does not extend ").append(GenericUDTF.class); + throw new RuntimeException(errorMsg.toString()); } } @@ -800,9 +803,10 @@ argumentString.append("} of size " + arguments.length); } - throw new HiveException("Unable to execute method " + m + " " - + " on object " + thisObjectString + " with arguments " - + argumentString.toString(), e); + StringBuilder errorMsg = new StringBuilder("Unable to execute method ").append(m) + .append(" on object ").append(thisObjectString).append(" with arguments ") + .append(argumentString.toString()); + throw new HiveException(errorMsg.toString(), e); } return o; } @@ -902,9 +906,10 @@ } } if (LOG.isDebugEnabled()) { - LOG.debug("Method " + (match ? "did" : "didn't") + " match: passed = " - + argumentsPassed + " accepted = " + argumentsAccepted + - " method = " + m); + StringBuilder errorMsg = new StringBuilder("Method ").append((match ? "did" : "didn't")) + .append(" match: passed = ").append(argumentsPassed).append(" accepted = ") + .append(argumentsAccepted).append(" method = ").append(m); + LOG.debug(errorMsg.toString()); } if (match) { // Always choose the function with least implicit conversions. Index: ql/src/java/org/apache/hadoop/hive/ql/exec/GroupByOperator.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/exec/GroupByOperator.java (revision 1100891) +++ ql/src/java/org/apache/hadoop/hive/ql/exec/GroupByOperator.java (working copy) @@ -698,17 +698,19 @@ numRowsCompareHashAggr += groupbyMapAggrInterval; // map-side aggregation should reduce the entries by at-least half if (numRowsHashTbl > numRowsInput * minReductionHashAggr) { - LOG.warn("Disable Hash Aggr: #hash table = " + numRowsHashTbl - + " #total = " + numRowsInput + " reduction = " + 1.0 - * (numRowsHashTbl / numRowsInput) + " minReduction = " - + minReductionHashAggr); + StringBuilder errorMsg = new StringBuilder("Disable Hash Aggr: #hash table = "). + append(numRowsHashTbl).append(" #total = ").append(numRowsInput). + append(" reduction = ").append(1.0 * (numRowsHashTbl / numRowsInput)). + append(" minReduction = ").append(minReductionHashAggr); + LOG.warn(errorMsg.toString()); flush(true); hashAggr = false; } else { - LOG.trace("Hash Aggr Enabled: #hash table = " + numRowsHashTbl - + " #total = " + numRowsInput + " reduction = " + 1.0 - * (numRowsHashTbl / numRowsInput) + " minReduction = " - + minReductionHashAggr); + StringBuilder errorMsg = new StringBuilder("Hash Aggr Enabled: #hash table = "). + append(numRowsHashTbl).append(" #total = ").append(numRowsInput). + append(" reduction = ").append(1.0 * (numRowsHashTbl / numRowsInput)). + append(" minReduction = ").append(minReductionHashAggr); + LOG.trace(errorMsg.toString()); } } } Index: ql/src/java/org/apache/hadoop/hive/ql/exec/HadoopJobExecHelper.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/exec/HadoopJobExecHelper.java (revision 1100891) +++ ql/src/java/org/apache/hadoop/hive/ql/exec/HadoopJobExecHelper.java (working copy) @@ -45,10 +45,10 @@ import org.apache.hadoop.mapred.TaskReport; public class HadoopJobExecHelper { - + protected transient JobConf job; protected Task task; - + protected transient int mapProgress = 0; protected transient int reduceProgress = 0; public transient String jobId; @@ -69,10 +69,10 @@ return; } if(callBackObj != null) { - callBackObj.updateCounters(ctrs, rj); + callBackObj.updateCounters(ctrs, rj); } } - + /** * This msg pattern is used to track when a job is started. * @@ -113,7 +113,7 @@ return reduceProgress == 100; } - + public String getJobId() { return jobId; } @@ -122,10 +122,10 @@ this.jobId = jobId; } - + public HadoopJobExecHelper() { } - + public HadoopJobExecHelper(JobConf job, LogHelper console, Task task, HadoopJobExecHook hookCallBack) { this.job = job; @@ -134,7 +134,7 @@ this.callBackObj = hookCallBack; } - + /** * A list of the currently running jobs spawned in this Hive instance that is used to kill all * running jobs in the event of an unexpected shutdown - i.e., the JVM shuts down while there are @@ -143,7 +143,7 @@ public static Map runningJobKillURIs = Collections .synchronizedMap(new HashMap()); - + /** * In Hive, when the user control-c's the command line, any running jobs spawned from that command * line are best-effort killed. @@ -180,7 +180,7 @@ }); } } - + public boolean checkFatalErrors(Counters ctrs, StringBuilder errMsg) { if (ctrs == null) { // hadoop might return null if it cannot locate the job. @@ -196,7 +196,7 @@ } return this.callBackObj.checkFatalErrors(ctrs, errMsg); } - + private boolean progress(ExecDriverTaskHandle th) throws IOException { JobClient jc = th.getJobClient(); RunningJob rj = th.getRunningJob(); @@ -229,7 +229,7 @@ String logMapper; String logReducer; - + TaskReport[] mappers = jc.getMapTaskReports(rj.getJobID()); if (mappers == null) { logMapper = "no information for number of mappers; "; @@ -241,7 +241,7 @@ } logMapper = "number of mappers: " + numMap + "; "; } - + TaskReport[] reducers = jc.getReduceTaskReports(rj.getJobID()); if (reducers == null) { logReducer = "no information for number of reducers. "; @@ -338,7 +338,7 @@ // LOG.info(queryPlan); return (success); } - + private String getId() { return this.task.getId(); } @@ -357,8 +357,13 @@ } console.printInfo(getJobStartMsg(rj.getJobID()) + ", Tracking URL = " + rj.getTrackingURL()); - console.printInfo("Kill Command = " + HiveConf.getVar(job, HiveConf.ConfVars.HADOOPBIN) - + " job -Dmapred.job.tracker=" + hp + " -kill " + rj.getJobID()); + StringBuilder logMsg = new StringBuilder("Kill Command = "); + logMsg.append(HiveConf.getVar(job, HiveConf.ConfVars.HADOOPBIN)); + logMsg.append(" job -Dmapred.job.tracker="); + logMsg.append(hp); + logMsg.append(" -kill "); + logMsg.append(rj.getJobID()); + console.printInfo(logMsg.toString()); } } @@ -392,7 +397,7 @@ return rj.getCounters(); } } - + // Used for showJobFailDebugInfo private static class TaskInfo { String jobId; @@ -415,7 +420,7 @@ return jobId; } } - + @SuppressWarnings("deprecation") private void showJobFailDebugInfo(JobConf conf, RunningJob rj) throws IOException { // Mapping from task ID to the number of failures @@ -545,9 +550,9 @@ public int progress(RunningJob rj, JobClient jc) throws IOException { jobId = rj.getJobID(); - + int returnVal = 0; - + // remove the pwd from conf file so that job tracker doesn't show this // logs String pwd = HiveConf.getVar(job, HiveConf.ConfVars.METASTOREPWD); @@ -579,7 +584,7 @@ } else { console.printInfo(statusMesg); } - + return returnVal; } } Index: ql/src/java/org/apache/hadoop/hive/ql/exec/JoinOperator.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/exec/JoinOperator.java (revision 1100891) +++ ql/src/java/org/apache/hadoop/hive/ql/exec/JoinOperator.java (working copy) @@ -113,8 +113,9 @@ // operand // We won't output a warning for the last join operand since the size // will never goes to joinEmitInterval. - LOG.warn("table " + alias + " has " + sz + " rows for join key " - + keyObject); + StringBuilder errorMsg = new StringBuilder("table ").append(alias).append(" has ").append(sz). + append(" rows for join key ").append(keyObject); + LOG.warn(errorMsg.toString()); nextSz = getNextSize(nextSz); } } Index: ql/src/java/org/apache/hadoop/hive/ql/exec/MapRedTask.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/exec/MapRedTask.java (revision 1100891) +++ ql/src/java/org/apache/hadoop/hive/ql/exec/MapRedTask.java (working copy) @@ -383,6 +383,9 @@ break; } } + StringBuilder logMsg = new StringBuilder(" BytesPerReducer = " ).append(bytesPerReducer) + .append(" maxReducers=").append(maxReducers).append(" estimated totalInputFileSize = "); + if (allSample) { // This is a little bit dangerous if inputs turns out not to be able to be sampled. // In that case, we significantly underestimate number of reducers. @@ -390,11 +393,9 @@ // guess and there is no guarantee. totalInputFileSize = Math.min((long) (totalInputFileSize * highestSamplePercentage / 100D) , totalInputFileSize); - LOG.info("BytesPerReducer=" + bytesPerReducer + " maxReducers=" - + maxReducers + " estimated totalInputFileSize=" + totalInputFileSize); + LOG.info(logMsg.append(totalInputFileSize).toString()); } else { - LOG.info("BytesPerReducer=" + bytesPerReducer + " maxReducers=" - + maxReducers + " totalInputFileSize=" + totalInputFileSize); + LOG.info(logMsg.append(totalInputFileSize).toString()); } int reducers = (int) ((totalInputFileSize + bytesPerReducer - 1) / bytesPerReducer); Index: ql/src/java/org/apache/hadoop/hive/ql/exec/ScriptOperator.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/exec/ScriptOperator.java (revision 1100891) +++ ql/src/java/org/apache/hadoop/hive/ql/exec/ScriptOperator.java (working copy) @@ -256,12 +256,15 @@ } String[] wrappedCmdArgs = addWrapper(cmdArgs); - LOG.info("Executing " + Arrays.asList(wrappedCmdArgs)); - LOG.info("tablename=" - + hconf.get(HiveConf.ConfVars.HIVETABLENAME.varname)); - LOG.info("partname=" - + hconf.get(HiveConf.ConfVars.HIVEPARTITIONNAME.varname)); - LOG.info("alias=" + alias); + StringBuilder sb = new StringBuilder("Executing "); + sb.append(Arrays.asList(wrappedCmdArgs)); + sb.append("\ntablename="); + sb.append(hconf.get(HiveConf.ConfVars.HIVETABLENAME.varname)); + sb.append("\npartname="); + sb.append(hconf.get(HiveConf.ConfVars.HIVEPARTITIONNAME.varname)); + sb.append("\nalias="); + sb.append(alias); + LOG.info(sb.toString()); ProcessBuilder pb = new ProcessBuilder(wrappedCmdArgs); Map env = pb.environment(); @@ -567,8 +570,7 @@ } catch (Throwable th) { scriptError = th; LOG.warn("Exception in StreamThread.run(): " + th.getMessage() + - "\nCause: " + th.getCause()); - LOG.warn(StringUtils.stringifyException(th)); + "\nCause: " + th.getCause(),scriptError); } finally { try { if (in != null) { Index: ql/src/java/org/apache/hadoop/hive/ql/metadata/Table.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/metadata/Table.java (revision 1100891) +++ ql/src/java/org/apache/hadoop/hive/ql/metadata/Table.java (working copy) @@ -338,13 +338,23 @@ } } - if ((spec == null) || (spec.size() != partCols.size())) { - throw new HiveException( - "table is partitioned but partition spec is not specified or" - + " does not fully match table partitioning: " - + spec); + if((spec == null)) + { + throw new HiveException("table is partitioned but partition is not specified"); } + if (spec.size() != partCols.size()) + { + StringBuilder sb=new StringBuilder(); + sb.append("table is partitioned but table partition size is not matching with the specified partition size. The table partion size is "); + sb.append(partCols.size ()); + sb.append(" and the specified partition size is "); + sb.append(spec.size()); + sb.append(" "); + sb.append(spec); + throw new HiveException(sb.toString()); + } + for (FieldSchema field : partCols) { if (spec.get(field.getName()) == null) { throw new HiveException(field.getName() @@ -709,7 +719,7 @@ public boolean isView() { return TableType.VIRTUAL_VIEW.equals(getTableType()); } - + /** * @return whether this table is actually an index table */ Index: ql/src/java/org/apache/hadoop/hive/ql/optimizer/ppr/PartitionPruner.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/optimizer/ppr/PartitionPruner.java (revision 1100891) +++ ql/src/java/org/apache/hadoop/hive/ql/optimizer/ppr/PartitionPruner.java (working copy) @@ -165,10 +165,14 @@ public static PrunedPartitionList prune(Table tab, ExprNodeDesc prunerExpr, HiveConf conf, String alias, Map prunedPartitionsMap) throws HiveException { - LOG.trace("Started pruning partiton"); - LOG.trace("dbname = " + tab.getDbName()); - LOG.trace("tabname = " + tab.getTableName()); - LOG.trace("prune Expression = " + prunerExpr); + StringBuilder sb =new StringBuilder("Started pruning partiton"); + sb.append("\ndbname = "); + sb.append(tab.getDbName()); + sb.append("\ntabname = "); + sb.append(tab.getTableName()); + sb.append("\nprune Expression = "); + sb.append(prunerExpr); + LOG.trace(sb.toString()); String key = tab.getDbName() + "." + tab.getTableName() + ";"; @@ -195,9 +199,10 @@ if ("strict".equalsIgnoreCase(HiveConf.getVar(conf, HiveConf.ConfVars.HIVEMAPREDMODE))) { if (!hasColumnExpr(prunerExpr)) { + StringBuilder errorMsg = new StringBuilder("for Alias \"").append(alias). + append("\" Table \"").append(tab.getTableName()).append("\""); throw new SemanticException(ErrorMsg.NO_PARTITION_PREDICATE - .getMsg("for Alias \"" + alias + "\" Table \"" - + tab.getTableName() + "\"")); + .getMsg(errorMsg.toString())); } }