Index: src/ql/src/java/org/apache/hadoop/hive/ql/io/OneNullRowInputFormat.java =================================================================== --- src/ql/src/java/org/apache/hadoop/hive/ql/io/OneNullRowInputFormat.java (revision 21679) +++ src/ql/src/java/org/apache/hadoop/hive/ql/io/OneNullRowInputFormat.java (revision ) @@ -25,14 +25,17 @@ import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.fs.Path; import org.apache.hadoop.hive.ql.plan.MapredWork; import org.apache.hadoop.io.NullWritable; +import org.apache.hadoop.mapred.FileSplit; import org.apache.hadoop.mapred.InputFormat; import org.apache.hadoop.mapred.InputSplit; import org.apache.hadoop.mapred.JobConf; import org.apache.hadoop.mapred.JobConfigurable; import org.apache.hadoop.mapred.RecordReader; import org.apache.hadoop.mapred.Reporter; +import org.apache.hadoop.util.StringUtils; /** * OneNullRowInputFormat outputs one null row. Used in implementation of @@ -47,12 +50,18 @@ List partitions; long len; - static public class DummyInputSplit implements InputSplit { + static public class DummyInputSplit extends FileSplit implements InputSplit { + public DummyInputSplit() { + super((Path) null, 0, 0, (String[]) null); } + public DummyInputSplit(Path path) { + super(path, 0, 0, (String[]) null); + } + @Override - public long getLength() throws IOException { + public long getLength() { return 1; } @@ -61,16 +70,8 @@ return new String[0]; } - @Override - public void readFields(DataInput arg0) throws IOException { - } + } - @Override - public void write(DataOutput arg0) throws IOException { - } - - } - static public class OneNullRowRecordReader implements RecordReader { private boolean processed = false; public OneNullRowRecordReader() { @@ -120,7 +121,9 @@ @Override public InputSplit[] getSplits(JobConf arg0, int arg1) throws IOException { InputSplit[] ret = new InputSplit[1]; - ret[0] = new DummyInputSplit(); + String paths = arg0.get("mapred.input.dir"); + String[] pathArray = StringUtils.split(paths); + ret[0] = new DummyInputSplit(new Path(pathArray[0])); LOG.info("Calculating splits"); return ret; } \ No newline at end of file Index: src/ql/src/java/org/apache/hadoop/hive/ql/io/HiveInputFormat.java =================================================================== --- src/ql/src/java/org/apache/hadoop/hive/ql/io/HiveInputFormat.java (revision 21679) +++ src/ql/src/java/org/apache/hadoop/hive/ql/io/HiveInputFormat.java (revision ) @@ -104,7 +104,7 @@ if (inputSplit instanceof FileSplit) { return ((FileSplit) inputSplit).getPath(); } - return new Path(""); + return null; } /** The position of the first byte in the file to process. */ Index: src/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/MetadataOnlyOptimizer.java =================================================================== --- src/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/MetadataOnlyOptimizer.java (revision 21679) +++ src/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/MetadataOnlyOptimizer.java (revision ) @@ -237,9 +237,7 @@ + newPartition.getTableName() + encode(newPartition.getPartSpec())); work.getPathToPartitionInfo().remove(path); - work.getPathToPartitionInfo().put(fakePath.getName(), newPartition); - ArrayList aliases = work.getPathToAliases().remove(path); - work.getPathToAliases().put(fakePath.getName(), aliases); + work.getPathToPartitionInfo().put(path, newPartition); } }