Index: ql/src/java/org/apache/hadoop/hive/ql/exec/MapRedTask.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/exec/MapRedTask.java (revision 1005291) +++ ql/src/java/org/apache/hadoop/hive/ql/exec/MapRedTask.java (working copy) @@ -186,13 +202,26 @@ String[] env; Map variables = new HashMap(System.getenv()); // The user can specify the hadoop memory - int hadoopMem = conf.getIntVar(HiveConf.ConfVars.HIVEHADOOPMAXMEM); - if (hadoopMem == 0) { - variables.remove(HADOOP_MEM_KEY); + + if ("local".equals(conf.getVar(HiveConf.ConfVars.HADOOPJT))) { + // if we are running in local mode - then the amount of memory used + // by the child jvm can no longer default to the memory used by the + // parent jvm + int hadoopMem = conf.getIntVar(HiveConf.ConfVars.HIVEHADOOPMAXMEM); + if (hadoopMem == 0) { + // remove env var that would default child jvm to use parent's memory + // as default. child jvm would use default memory for a hadoop client + variables.remove(HADOOP_MEM_KEY); + } else { + // user specified the memory for local mode hadoop run + variables.put(HADOOP_MEM_KEY, String.valueOf(hadoopMem)); + } } else { - // user specified the memory - only applicable for local mode - variables.put(HADOOP_MEM_KEY, String.valueOf(hadoopMem)); + // nothing to do - we are not running in local mode - only submitting + // the job via a child process. in this case it's appropriate that the + // child jvm use the same memory as the parent jvm } + if (variables.containsKey(HADOOP_OPTS_KEY)) { variables.put(HADOOP_OPTS_KEY, variables.get(HADOOP_OPTS_KEY) + hadoopOpts);