Index: lucene/src/test/org/apache/lucene/codecs/lucene40/TestDocValues.java
===================================================================
--- lucene/src/test/org/apache/lucene/codecs/lucene40/TestDocValues.java	(revision 1227169)
+++ lucene/src/test/org/apache/lucene/codecs/lucene40/TestDocValues.java	(working copy)
@@ -20,10 +20,10 @@
 import java.io.IOException;
 import java.util.Comparator;
 
+import org.apache.lucene.codecs.DocValuesConsumer;
 import org.apache.lucene.codecs.lucene40.values.Bytes;
 import org.apache.lucene.codecs.lucene40.values.Floats;
 import org.apache.lucene.codecs.lucene40.values.Ints;
-import org.apache.lucene.codecs.lucene40.values.Writer;
 import org.apache.lucene.index.DocValues;
 import org.apache.lucene.index.DocValues.SortedSource;
 import org.apache.lucene.index.DocValues.Source;
@@ -63,7 +63,7 @@
 
     Directory dir = newDirectory();
     final Counter trackBytes = Counter.newCounter();
-    Writer w = Bytes.getWriter(dir, "test", mode, fixedSize, COMP, trackBytes, newIOContext(random),
+    DocValuesConsumer w = Bytes.getWriter(dir, "test", mode, fixedSize, COMP, trackBytes, newIOContext(random),
         random.nextBoolean());
     int maxDoc = 220;
     final String[] values = new String[maxDoc];
@@ -170,7 +170,7 @@
     for (int i = 0; i < minMax.length; i++) {
       Directory dir = newDirectory();
       final Counter trackBytes = Counter.newCounter();
-      Writer w = Ints.getWriter(dir, "test", trackBytes, Type.VAR_INTS, newIOContext(random));
+      DocValuesConsumer w = Ints.getWriter(dir, "test", trackBytes, Type.VAR_INTS, newIOContext(random));
       w.add(0, minMax[i][0]);
       w.add(1, minMax[i][1]);
       w.finish(2);
@@ -203,7 +203,7 @@
     byte[] sourceArray = new byte[] {1,2,3};
     Directory dir = newDirectory();
     final Counter trackBytes = Counter.newCounter();
-    Writer w = Ints.getWriter(dir, "test", trackBytes, Type.FIXED_INTS_8, newIOContext(random));
+    DocValuesConsumer w = Ints.getWriter(dir, "test", trackBytes, Type.FIXED_INTS_8, newIOContext(random));
     for (int i = 0; i < sourceArray.length; i++) {
       w.add(i, (long) sourceArray[i]);
     }
@@ -224,7 +224,7 @@
     short[] sourceArray = new short[] {1,2,3};
     Directory dir = newDirectory();
     final Counter trackBytes = Counter.newCounter();
-    Writer w = Ints.getWriter(dir, "test", trackBytes, Type.FIXED_INTS_16, newIOContext(random));
+    DocValuesConsumer w = Ints.getWriter(dir, "test", trackBytes, Type.FIXED_INTS_16, newIOContext(random));
     for (int i = 0; i < sourceArray.length; i++) {
       w.add(i, (long) sourceArray[i]);
     }
@@ -245,7 +245,7 @@
     long[] sourceArray = new long[] {1,2,3};
     Directory dir = newDirectory();
     final Counter trackBytes = Counter.newCounter();
-    Writer w = Ints.getWriter(dir, "test", trackBytes, Type.FIXED_INTS_64, newIOContext(random));
+    DocValuesConsumer w = Ints.getWriter(dir, "test", trackBytes, Type.FIXED_INTS_64, newIOContext(random));
     for (int i = 0; i < sourceArray.length; i++) {
       w.add(i, sourceArray[i]);
     }
@@ -266,7 +266,7 @@
     int[] sourceArray = new int[] {1,2,3};
     Directory dir = newDirectory();
     final Counter trackBytes = Counter.newCounter();
-    Writer w = Ints.getWriter(dir, "test", trackBytes, Type.FIXED_INTS_32, newIOContext(random));
+    DocValuesConsumer w = Ints.getWriter(dir, "test", trackBytes, Type.FIXED_INTS_32, newIOContext(random));
     for (int i = 0; i < sourceArray.length; i++) {
       w.add(i, (long) sourceArray[i]);
     }
@@ -287,7 +287,7 @@
     float[] sourceArray = new float[] {1,2,3};
     Directory dir = newDirectory();
     final Counter trackBytes = Counter.newCounter();
-    Writer w = Floats.getWriter(dir, "test", trackBytes, newIOContext(random), Type.FLOAT_32);
+    DocValuesConsumer w = Floats.getWriter(dir, "test", trackBytes, newIOContext(random), Type.FLOAT_32);
     for (int i = 0; i < sourceArray.length; i++) {
       w.add(i, sourceArray[i]);
     }
@@ -308,7 +308,7 @@
     double[] sourceArray = new double[] {1,2,3};
     Directory dir = newDirectory();
     final Counter trackBytes = Counter.newCounter();
-    Writer w = Floats.getWriter(dir, "test", trackBytes, newIOContext(random), Type.FLOAT_64);
+    DocValuesConsumer w = Floats.getWriter(dir, "test", trackBytes, newIOContext(random), Type.FLOAT_64);
     for (int i = 0; i < sourceArray.length; i++) {
       w.add(i, sourceArray[i]);
     }
@@ -332,7 +332,7 @@
     for (int rx = 1; rx < maxBit; rx++, maxV *= 2) {
       Directory dir = newDirectory();
       final Counter trackBytes = Counter.newCounter();
-      Writer w = Ints.getWriter(dir, "test", trackBytes, type, newIOContext(random));
+      DocValuesConsumer w = Ints.getWriter(dir, "test", trackBytes, type, newIOContext(random));
       for (int i = 0; i < NUM_VALUES; i++) {
         final long v = random.nextLong() % (1 + maxV);
         values[i] = v;
@@ -364,7 +364,7 @@
   private void runTestFloats(Type type, double delta) throws IOException {
     Directory dir = newDirectory();
     final Counter trackBytes = Counter.newCounter();
-    Writer w = Floats.getWriter(dir, "test", trackBytes, newIOContext(random), type);
+    DocValuesConsumer w = Floats.getWriter(dir, "test", trackBytes, newIOContext(random), type);
     final int NUM_VALUES = 777 + random.nextInt(777);;
     final double[] values = new double[NUM_VALUES];
     for (int i = 0; i < NUM_VALUES; i++) {
Index: lucene/src/test/org/apache/lucene/search/TestDocValuesScoring.java
===================================================================
--- lucene/src/test/org/apache/lucene/search/TestDocValuesScoring.java	(revision 1227169)
+++ lucene/src/test/org/apache/lucene/search/TestDocValuesScoring.java	(working copy)
@@ -30,13 +30,11 @@
 import org.apache.lucene.index.Term;
 import org.apache.lucene.index.DocValues.Source;
 import org.apache.lucene.index.IndexReader.AtomicReaderContext;
-import org.apache.lucene.search.similarities.DefaultSimilarityProvider;
 import org.apache.lucene.search.similarities.Similarity;
 import org.apache.lucene.search.similarities.SimilarityProvider;
 import org.apache.lucene.store.Directory;
 import org.apache.lucene.util.BytesRef;
 import org.apache.lucene.util.LuceneTestCase;
-import org.apache.lucene.util.TermContext;
 
 /**
  * Tests the use of indexdocvalues in scoring.
Index: lucene/src/java/org/apache/lucene/store/CompoundFileWriter.java
===================================================================
--- lucene/src/java/org/apache/lucene/store/CompoundFileWriter.java	(revision 1227169)
+++ lucene/src/java/org/apache/lucene/store/CompoundFileWriter.java	(working copy)
@@ -91,7 +91,7 @@
   // all entries that are written to a sep. file but not yet moved into CFS
   private final Queue<FileEntry> pendingEntries = new LinkedList<FileEntry>();
   private boolean closed = false;
-  private volatile IndexOutput dataOut;
+  private IndexOutput dataOut;
   private final AtomicBoolean outputTaken = new AtomicBoolean(false);
   final String entryTableName;
   final String dataFileName;
@@ -113,16 +113,25 @@
         IndexFileNames.stripExtension(name), "",
         IndexFileNames.COMPOUND_FILE_ENTRIES_EXTENSION);
     dataFileName = name;
-    boolean success = false;
-    try {
-      dataOut = directory.createOutput(dataFileName, IOContext.DEFAULT);
-      dataOut.writeVInt(FORMAT_CURRENT);
-      success = true;
-    } finally {
-      if (!success) {
-        IOUtils.closeWhileHandlingException(dataOut);
+    
+  }
+  
+  private synchronized IndexOutput getOutput() throws IOException {
+    if (dataOut == null) {
+      IndexOutput dataOutput = null;
+      boolean success = false;
+      try {
+        dataOutput = directory.createOutput(dataFileName, IOContext.DEFAULT);
+        dataOutput.writeVInt(FORMAT_CURRENT);
+        dataOut = dataOutput;
+        success = true;
+      } finally {
+        if (!success) {
+          IOUtils.closeWhileHandlingException(dataOutput);
+        }
       }
-    }
+    } 
+    return dataOut;
   }
 
   /** Returns the directory of the compound file. */
@@ -148,11 +157,13 @@
     }
     IOException priorException = null;
     IndexOutput entryTableOut = null;
+    IndexOutput dataOut = null;
     try {
       if (!pendingEntries.isEmpty() || outputTaken.get()) {
         throw new IllegalStateException("CFS has pending open files");
       }
       closed = true;
+      dataOut = getOutput();
       // open the compound stream
       assert dataOut != null;
       long finalLength = dataOut.getFilePointer();
@@ -246,7 +257,7 @@
       seenIDs.add(id);
       final DirectCFSIndexOutput out;
       if (outputTaken.compareAndSet(false, true)) {
-        out = new DirectCFSIndexOutput(dataOut, entry, false);
+        out = new DirectCFSIndexOutput(getOutput(), entry, false);
         outputLocked = true;
         success = true;
       } else {
@@ -280,7 +291,7 @@
       try {
         while (!pendingEntries.isEmpty()) {
           FileEntry entry = pendingEntries.poll();
-          copyFileEntry(dataOut, entry);
+          copyFileEntry(getOutput(), entry);
           entries.put(entry.file, entry);
         }
       } finally {
Index: lucene/src/java/org/apache/lucene/codecs/lucene40/Lucene40NormsFormat.java
===================================================================
--- lucene/src/java/org/apache/lucene/codecs/lucene40/Lucene40NormsFormat.java	(revision 1227169)
+++ lucene/src/java/org/apache/lucene/codecs/lucene40/Lucene40NormsFormat.java	(working copy)
@@ -1,53 +1,112 @@
 package org.apache.lucene.codecs.lucene40;
 
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
 import java.io.IOException;
 import java.util.Set;
 
 import org.apache.lucene.codecs.NormsFormat;
-import org.apache.lucene.codecs.NormsReader;
-import org.apache.lucene.codecs.NormsWriter;
+import org.apache.lucene.codecs.PerDocConsumer;
+import org.apache.lucene.codecs.PerDocProducer;
+import org.apache.lucene.index.DocValues;
+import org.apache.lucene.index.DocValues.Type;
+import org.apache.lucene.index.FieldInfo;
 import org.apache.lucene.index.FieldInfos;
+import org.apache.lucene.index.IndexFileNames;
+import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.index.PerDocWriteState;
 import org.apache.lucene.index.SegmentInfo;
-import org.apache.lucene.index.SegmentWriteState;
+import org.apache.lucene.index.SegmentReadState;
 import org.apache.lucene.store.Directory;
-import org.apache.lucene.store.IOContext;
 
 public class Lucene40NormsFormat extends NormsFormat {
-
+  private final static String NORMS_SEGMENT_SUFFIX = "nrm";
+  
   @Override
-  public NormsReader normsReader(Directory dir, SegmentInfo info, FieldInfos fields, IOContext context, Directory separateNormsDir) throws IOException {
-    return new Lucene40NormsReader(dir, info, fields, context, separateNormsDir);
+  public PerDocConsumer docsConsumer(PerDocWriteState state) throws IOException {
+    return new Lucene40NormsDocValuesConsumer(state, NORMS_SEGMENT_SUFFIX);
   }
 
   @Override
-  public NormsWriter normsWriter(SegmentWriteState state) throws IOException {
-    return new Lucene40NormsWriter(state.directory, state.segmentName, state.context);
+  public PerDocProducer docsProducer(SegmentReadState state) throws IOException {
+    return new Lucene40NormsDocValuesProducer(state, NORMS_SEGMENT_SUFFIX);
   }
 
   @Override
-  public void files(Directory dir, SegmentInfo info, Set<String> files) throws IOException {
-    Lucene40NormsReader.files(dir, info, files);
+  public void files(Directory dir, SegmentInfo info, Set<String> files)
+      throws IOException {
+    Lucene40NormsDocValuesConsumer.files(dir, info, files);
+
   }
 
   @Override
-  public void separateFiles(Directory dir, SegmentInfo info, Set<String> files) throws IOException {
-    Lucene40NormsReader.separateFiles(dir, info, files);
+  public PerDocProducer docsProducer(SegmentReadState state,
+      Directory separateNormsDir) throws IOException {
+    return docsProducer(state);
   }
+  
+ 
+  public static class Lucene40NormsDocValuesProducer extends Lucene40DocValuesProducer {
+
+    public Lucene40NormsDocValuesProducer(SegmentReadState state,
+        String segmentSuffix) throws IOException {
+      super(state, segmentSuffix);
+    }
+
+    @Override
+    protected boolean canLoad(FieldInfo info) {
+      return !info.omitNorms && info.isIndexed;
+    }
+
+    @Override
+    protected Type getDocValuesType(FieldInfo info) {
+      return Type.BYTES_FIXED_STRAIGHT;
+    }
+
+    @Override
+    protected boolean anyDocValuesFields(FieldInfos infos) {
+      return infos.hasNorms();
+    }
+    
+  }
+  
+  public static class Lucene40NormsDocValuesConsumer extends Lucene40DocValuesConsumer {
+
+    public Lucene40NormsDocValuesConsumer(PerDocWriteState state,
+        String segmentSuffix) throws IOException {
+      super(state, segmentSuffix);
+    }
+
+    @Override
+    protected DocValues pull(IndexReader reader, FieldInfo info)
+        throws IOException {
+      return reader.normValues(info.name);
+    }
+
+    @Override
+    protected boolean canMerge(FieldInfo info) {
+      return !info.omitNorms && info.isIndexed;
+    }
+
+    @Override
+    protected Type getDocValuesType(FieldInfo info) {
+      return Type.BYTES_FIXED_STRAIGHT;
+    }
+    
+    public static void files(Directory dir, SegmentInfo segmentInfo, Set<String> files) throws IOException {
+      FieldInfos fieldInfos = segmentInfo.getFieldInfos();
+      for (FieldInfo fieldInfo : fieldInfos) {
+        if (!fieldInfo.omitNorms && fieldInfo.isIndexed) {
+          files.add(IndexFileNames.segmentFileName(segmentInfo.name, NORMS_SEGMENT_SUFFIX, IndexFileNames.COMPOUND_FILE_EXTENSION));
+          files.add(IndexFileNames.segmentFileName(segmentInfo.name, NORMS_SEGMENT_SUFFIX, IndexFileNames.COMPOUND_FILE_ENTRIES_EXTENSION));
+          assert dir.fileExists(IndexFileNames.segmentFileName(segmentInfo.name, NORMS_SEGMENT_SUFFIX, IndexFileNames.COMPOUND_FILE_ENTRIES_EXTENSION)); 
+          assert dir.fileExists(IndexFileNames.segmentFileName(segmentInfo.name, NORMS_SEGMENT_SUFFIX, IndexFileNames.COMPOUND_FILE_EXTENSION)); 
+          break;
+        }
+      }
+    }
+    
+  }
+  
+ 
+
+
 }
Index: lucene/src/java/org/apache/lucene/codecs/lucene40/Lucene40TermVectorsWriter.java
===================================================================
--- lucene/src/java/org/apache/lucene/codecs/lucene40/Lucene40TermVectorsWriter.java	(revision 1227169)
+++ lucene/src/java/org/apache/lucene/codecs/lucene40/Lucene40TermVectorsWriter.java	(working copy)
@@ -202,18 +202,9 @@
     try {
       close();
     } catch (IOException ignored) {}
-    
-    try {
-      directory.deleteFile(IndexFileNames.segmentFileName(segment, "", Lucene40TermVectorsReader.VECTORS_INDEX_EXTENSION));
-    } catch (IOException ignored) {}
-    
-    try {
-      directory.deleteFile(IndexFileNames.segmentFileName(segment, "", Lucene40TermVectorsReader.VECTORS_DOCUMENTS_EXTENSION));
-    } catch (IOException ignored) {}
-    
-    try {
-      directory.deleteFile(IndexFileNames.segmentFileName(segment, "", Lucene40TermVectorsReader.VECTORS_FIELDS_EXTENSION));
-    } catch (IOException ignored) {}
+    IOUtils.deleteFilesIgnoringExceptions(directory, IndexFileNames.segmentFileName(segment, "", Lucene40TermVectorsReader.VECTORS_INDEX_EXTENSION),
+        IndexFileNames.segmentFileName(segment, "", Lucene40TermVectorsReader.VECTORS_DOCUMENTS_EXTENSION),
+        IndexFileNames.segmentFileName(segment, "", Lucene40TermVectorsReader.VECTORS_FIELDS_EXTENSION));
   }
 
   /**
Index: lucene/src/java/org/apache/lucene/codecs/lucene40/Lucene40NormsReader.java
===================================================================
--- lucene/src/java/org/apache/lucene/codecs/lucene40/Lucene40NormsReader.java	(revision 1227169)
+++ lucene/src/java/org/apache/lucene/codecs/lucene40/Lucene40NormsReader.java	(working copy)
@@ -1,196 +0,0 @@
-package org.apache.lucene.codecs.lucene40;
-
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-import java.io.IOException;
-import java.util.HashMap;
-import java.util.IdentityHashMap;
-import java.util.Map;
-import java.util.Set;
-import java.util.Map.Entry;
-
-import org.apache.lucene.codecs.NormsReader;
-import org.apache.lucene.index.FieldInfo;
-import org.apache.lucene.index.FieldInfos;
-import org.apache.lucene.index.IndexFileNames;
-import org.apache.lucene.index.SegmentInfo;
-import org.apache.lucene.store.Directory;
-import org.apache.lucene.store.IOContext;
-import org.apache.lucene.store.IndexInput;
-import org.apache.lucene.util.IOUtils;
-import org.apache.lucene.util.MapBackedSet;
-import org.apache.lucene.util.StringHelper;
-
-public class Lucene40NormsReader extends NormsReader {
-  // this would be replaced by Source/SourceCache in a dv impl.
-  // for now we have our own mini-version
-  final Map<String,Norm> norms = new HashMap<String,Norm>();
-  // any .nrm or .sNN files we have open at any time.
-  // TODO: just a list, and double-close() separate norms files?
-  final Set<IndexInput> openFiles = new MapBackedSet<IndexInput>(new IdentityHashMap<IndexInput,Boolean>());
-  // points to a singleNormFile
-  IndexInput singleNormStream;
-  final int maxdoc;
-  
-  // note: just like segmentreader in 3.x, we open up all the files here (including separate norms) up front.
-  // but we just don't do any seeks or reading yet.
-  public Lucene40NormsReader(Directory dir, SegmentInfo info, FieldInfos fields, IOContext context, Directory separateNormsDir) throws IOException {
-    maxdoc = info.docCount;
-    String segmentName = info.name;
-    Map<Integer,Long> normGen = info.getNormGen();
-    boolean success = false;
-    try {
-      long nextNormSeek = Lucene40NormsWriter.NORMS_HEADER.length; //skip header (header unused for now)
-      for (FieldInfo fi : fields) {
-        if (fi.isIndexed && !fi.omitNorms) {
-          String fileName = getNormFilename(segmentName, normGen, fi.number);
-          Directory d = hasSeparateNorms(normGen, fi.number) ? separateNormsDir : dir;
-        
-          // singleNormFile means multiple norms share this file
-          boolean singleNormFile = IndexFileNames.matchesExtension(fileName, Lucene40NormsWriter.NORMS_EXTENSION);
-          IndexInput normInput = null;
-          long normSeek;
-
-          if (singleNormFile) {
-            normSeek = nextNormSeek;
-            if (singleNormStream == null) {
-              singleNormStream = d.openInput(fileName, context);
-              openFiles.add(singleNormStream);
-            }
-            // All norms in the .nrm file can share a single IndexInput since
-            // they are only used in a synchronized context.
-            // If this were to change in the future, a clone could be done here.
-            normInput = singleNormStream;
-          } else {
-            normInput = d.openInput(fileName, context);
-            openFiles.add(normInput);
-            // if the segment was created in 3.2 or after, we wrote the header for sure,
-            // and don't need to do the sketchy file size check. otherwise, we check 
-            // if the size is exactly equal to maxDoc to detect a headerless file.
-            // NOTE: remove this check in Lucene 5.0!
-            String version = info.getVersion();
-            final boolean isUnversioned = 
-                (version == null || StringHelper.getVersionComparator().compare(version, "3.2") < 0)
-                && normInput.length() == maxdoc;
-            if (isUnversioned) {
-              normSeek = 0;
-            } else {
-              normSeek = Lucene40NormsWriter.NORMS_HEADER.length;
-            }
-          }
-
-          Norm norm = new Norm();
-          norm.file = normInput;
-          norm.offset = normSeek;
-          norms.put(fi.name, norm);
-          nextNormSeek += maxdoc; // increment also if some norms are separate
-        }
-      }
-      // TODO: change to a real check? see LUCENE-3619
-      assert singleNormStream == null || nextNormSeek == singleNormStream.length();
-      success = true;
-    } finally {
-      if (!success) {
-        IOUtils.closeWhileHandlingException(openFiles);
-      }
-    }
-  }
-  
-  @Override
-  public byte[] norms(String name) throws IOException {
-    Norm norm = norms.get(name);
-    return norm == null ? null : norm.bytes();
-  }
-  
-
-  @Override
-  public void close() throws IOException {
-    try {
-      IOUtils.close(openFiles);
-    } finally {
-      norms.clear();
-      openFiles.clear();
-    }
-  }
-  
-  private static String getNormFilename(String segmentName, Map<Integer,Long> normGen, int number) {
-    if (hasSeparateNorms(normGen, number)) {
-      return IndexFileNames.fileNameFromGeneration(segmentName, Lucene40NormsWriter.SEPARATE_NORMS_EXTENSION + number, normGen.get(number));
-    } else {
-      // single file for all norms
-      return IndexFileNames.fileNameFromGeneration(segmentName, Lucene40NormsWriter.NORMS_EXTENSION, SegmentInfo.WITHOUT_GEN);
-    }
-  }
-  
-  private static boolean hasSeparateNorms(Map<Integer,Long> normGen, int number) {
-    if (normGen == null) {
-      return false;
-    }
-
-    Long gen = normGen.get(number);
-    return gen != null && gen.longValue() != SegmentInfo.NO;
-  }
-  
-  class Norm {
-    IndexInput file;
-    long offset;
-    byte bytes[];
-    
-    synchronized byte[] bytes() throws IOException {
-      if (bytes == null) {
-        bytes = new byte[maxdoc];
-        // some norms share fds
-        synchronized(file) {
-          file.seek(offset);
-          file.readBytes(bytes, 0, bytes.length, false);
-        }
-        // we are done with this file
-        if (file != singleNormStream) {
-          openFiles.remove(file);
-          file.close();
-          file = null;
-        }
-      }
-      return bytes;
-    }
-  }
-  
-  static void files(Directory dir, SegmentInfo info, Set<String> files) throws IOException {
-    // TODO: This is what SI always did... but we can do this cleaner?
-    // like first FI that has norms but doesn't have separate norms?
-    final String normsFileName = IndexFileNames.segmentFileName(info.name, "", Lucene40NormsWriter.NORMS_EXTENSION);
-    if (dir.fileExists(normsFileName)) {
-      files.add(normsFileName);
-    }
-  }
-  
-  /** @deprecated */
-  @Deprecated
-  static void separateFiles(Directory dir, SegmentInfo info, Set<String> files) throws IOException {
-    Map<Integer,Long> normGen = info.getNormGen();
-    if (normGen != null) {
-      for (Entry<Integer,Long> entry : normGen.entrySet()) {
-        long gen = entry.getValue();
-        if (gen >= SegmentInfo.YES) {
-          // Definitely a separate norm file, with generation:
-          files.add(IndexFileNames.fileNameFromGeneration(info.name, Lucene40NormsWriter.SEPARATE_NORMS_EXTENSION + entry.getKey(), gen));
-        }
-      }
-    }
-  }
-}
Index: lucene/src/java/org/apache/lucene/codecs/lucene40/Lucene40DocValuesProducer.java
===================================================================
--- lucene/src/java/org/apache/lucene/codecs/lucene40/Lucene40DocValuesProducer.java	(revision 1227169)
+++ lucene/src/java/org/apache/lucene/codecs/lucene40/Lucene40DocValuesProducer.java	(working copy)
@@ -39,16 +39,15 @@
 public class Lucene40DocValuesProducer extends DocValuesReaderBase {
   protected final TreeMap<String,DocValues> docValues;
   private final Directory cfs;
-
   /**
    * Creates a new {@link Lucene40DocValuesProducer} instance and loads all
    * {@link DocValues} instances for this segment and codec.
    */
-  public Lucene40DocValuesProducer(SegmentReadState state) throws IOException {
-    if (state.fieldInfos.anyDocValuesFields()) {
+  public Lucene40DocValuesProducer(SegmentReadState state, String segmentSuffix) throws IOException {
+    if (anyDocValuesFields(state.fieldInfos)) {
       cfs = new CompoundFileDirectory(state.dir, 
                                       IndexFileNames.segmentFileName(state.segmentInfo.name,
-                                                                     Lucene40DocValuesConsumer.DOC_VALUES_SEGMENT_SUFFIX, IndexFileNames.COMPOUND_FILE_EXTENSION), 
+                                                                     segmentSuffix, IndexFileNames.COMPOUND_FILE_EXTENSION), 
                                       state.context, false);
       docValues = load(state.fieldInfos, state.segmentInfo.name, state.segmentInfo.docCount, cfs, state.context);
     } else {
Index: lucene/src/java/org/apache/lucene/codecs/lucene40/Lucene40NormsWriter.java
===================================================================
--- lucene/src/java/org/apache/lucene/codecs/lucene40/Lucene40NormsWriter.java	(revision 1227169)
+++ lucene/src/java/org/apache/lucene/codecs/lucene40/Lucene40NormsWriter.java	(working copy)
@@ -1,130 +0,0 @@
-package org.apache.lucene.codecs.lucene40;
-
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-import java.io.IOException;
-import java.util.Arrays;
-
-import org.apache.lucene.codecs.NormsWriter;
-import org.apache.lucene.index.FieldInfo;
-import org.apache.lucene.index.IndexFileNames;
-import org.apache.lucene.index.MergeState;
-import org.apache.lucene.store.Directory;
-import org.apache.lucene.store.IOContext;
-import org.apache.lucene.store.IndexOutput;
-import org.apache.lucene.util.Bits;
-import org.apache.lucene.util.IOUtils;
-
-public class Lucene40NormsWriter extends NormsWriter {
-  private IndexOutput out;
-  private int normCount = 0;
-  
-  /** norms header placeholder */
-  static final byte[] NORMS_HEADER = new byte[]{'N','R','M',-1};
-  
-  /** Extension of norms file */
-  static final String NORMS_EXTENSION = "nrm";
-  
-  /** Extension of separate norms file
-   * @deprecated */
-  @Deprecated
-  static final String SEPARATE_NORMS_EXTENSION = "s";
-  
-  public Lucene40NormsWriter(Directory directory, String segment, IOContext context) throws IOException {
-    final String normsFileName = IndexFileNames.segmentFileName(segment, "", NORMS_EXTENSION);
-    boolean success = false;
-    try {
-      out = directory.createOutput(normsFileName, context);
-      out.writeBytes(NORMS_HEADER, 0, NORMS_HEADER.length);
-      success = true;
-    } finally {
-      if (!success) {
-        IOUtils.closeWhileHandlingException(out);
-      }
-    }
-  }
-
-  @Override
-  public void startField(FieldInfo info) throws IOException {
-    assert info.omitNorms == false;
-    normCount++;
-  }
-  
-  @Override
-  public void writeNorm(byte norm) throws IOException {
-    out.writeByte(norm);
-  }
-  
-  @Override
-  public void finish(int numDocs) throws IOException {
-    if (4+normCount*(long)numDocs != out.getFilePointer()) {
-      throw new RuntimeException(".nrm file size mismatch: expected=" + (4+normCount*(long)numDocs) + " actual=" + out.getFilePointer());
-    }
-  }
-  
-  /** we override merge and bulk-merge norms when there are no deletions */
-  @Override
-  public int merge(MergeState mergeState) throws IOException {
-    int numMergedDocs = 0;
-    for (FieldInfo fi : mergeState.fieldInfos) {
-      if (fi.isIndexed && !fi.omitNorms) {
-        startField(fi);
-        int numMergedDocsForField = 0;
-        for (MergeState.IndexReaderAndLiveDocs reader : mergeState.readers) {
-          final int maxDoc = reader.reader.maxDoc();
-          byte normBuffer[] = reader.reader.norms(fi.name);
-          if (normBuffer == null) {
-            // Can be null if this segment doesn't have
-            // any docs with this field
-            normBuffer = new byte[maxDoc];
-            Arrays.fill(normBuffer, (byte)0);
-          }
-          if (reader.liveDocs == null) {
-            //optimized case for segments without deleted docs
-            out.writeBytes(normBuffer, maxDoc);
-            numMergedDocsForField += maxDoc;
-          } else {
-            // this segment has deleted docs, so we have to
-            // check for every doc if it is deleted or not
-            final Bits liveDocs = reader.liveDocs;
-            for (int k = 0; k < maxDoc; k++) {
-              if (liveDocs.get(k)) {
-                numMergedDocsForField++;
-                out.writeByte(normBuffer[k]);
-              }
-            }
-          }
-          mergeState.checkAbort.work(maxDoc);
-        }
-        assert numMergedDocs == 0 || numMergedDocs == numMergedDocsForField;
-        numMergedDocs = numMergedDocsForField;
-      }
-    }
-    finish(numMergedDocs);
-    return numMergedDocs;
-  }
-
-  @Override
-  public void close() throws IOException {
-    try {
-      IOUtils.close(out);
-    } finally {
-      out = null;
-    }
-  }
-}
Index: lucene/src/java/org/apache/lucene/codecs/lucene40/Lucene40DocValuesFormat.java
===================================================================
--- lucene/src/java/org/apache/lucene/codecs/lucene40/Lucene40DocValuesFormat.java	(revision 1227169)
+++ lucene/src/java/org/apache/lucene/codecs/lucene40/Lucene40DocValuesFormat.java	(working copy)
@@ -32,12 +32,12 @@
 
   @Override
   public PerDocConsumer docsConsumer(PerDocWriteState state) throws IOException {
-    return new Lucene40DocValuesConsumer(state);
+    return new Lucene40DocValuesConsumer(state, Lucene40DocValuesConsumer.DOC_VALUES_SEGMENT_SUFFIX);
   }
 
   @Override
   public PerDocProducer docsProducer(SegmentReadState state) throws IOException {
-    return new Lucene40DocValuesProducer(state);
+    return new Lucene40DocValuesProducer(state, Lucene40DocValuesConsumer.DOC_VALUES_SEGMENT_SUFFIX);
   }
 
   @Override
Index: lucene/src/java/org/apache/lucene/codecs/lucene40/Lucene40DocValuesConsumer.java
===================================================================
--- lucene/src/java/org/apache/lucene/codecs/lucene40/Lucene40DocValuesConsumer.java	(revision 1227169)
+++ lucene/src/java/org/apache/lucene/codecs/lucene40/Lucene40DocValuesConsumer.java	(working copy)
@@ -28,6 +28,7 @@
 import org.apache.lucene.index.SegmentInfo;
 import org.apache.lucene.store.CompoundFileDirectory;
 import org.apache.lucene.store.Directory;
+import org.apache.lucene.util.IOUtils;
 
 /**
  * Default PerDocConsumer implementation that uses compound file.
@@ -36,11 +37,13 @@
 public class Lucene40DocValuesConsumer extends DocValuesWriterBase {
   private final Directory mainDirectory;
   private Directory directory;
+  private final String segmentSuffix;
+  public final static String DOC_VALUES_SEGMENT_SUFFIX = "dv";
 
-  final static String DOC_VALUES_SEGMENT_SUFFIX = "dv";
-  
-  public Lucene40DocValuesConsumer(PerDocWriteState state) throws IOException {
+
+  public Lucene40DocValuesConsumer(PerDocWriteState state, String segmentSuffix) throws IOException {
     super(state);
+    this.segmentSuffix = segmentSuffix;
     mainDirectory = state.directory;
     //TODO maybe we should enable a global CFS that all codecs can pull on demand to further reduce the number of files?
   }
@@ -50,7 +53,7 @@
     // lazy init
     if (directory == null) {
       directory = new CompoundFileDirectory(mainDirectory,
-                                            IndexFileNames.segmentFileName(segmentName, DOC_VALUES_SEGMENT_SUFFIX,
+                                            IndexFileNames.segmentFileName(segmentName, segmentSuffix,
                                                                            IndexFileNames.COMPOUND_FILE_EXTENSION), context, true);
     }
     return directory;
@@ -75,4 +78,15 @@
       }
     }
   }
+
+  @Override
+  public void abort() {
+    try {
+      close();
+    } catch (IOException ignored) {}
+    IOUtils.deleteFilesIgnoringExceptions(mainDirectory, IndexFileNames.segmentFileName(
+        segmentName, segmentSuffix, IndexFileNames.COMPOUND_FILE_EXTENSION),
+        IndexFileNames.segmentFileName(segmentName, segmentSuffix,
+            IndexFileNames.COMPOUND_FILE_ENTRIES_EXTENSION));
+  }
 }
Index: lucene/src/java/org/apache/lucene/codecs/lucene40/values/Writer.java
===================================================================
--- lucene/src/java/org/apache/lucene/codecs/lucene40/values/Writer.java	(revision 1227169)
+++ lucene/src/java/org/apache/lucene/codecs/lucene40/values/Writer.java	(working copy)
@@ -20,11 +20,9 @@
 import java.util.Comparator;
 
 import org.apache.lucene.codecs.DocValuesConsumer;
-import org.apache.lucene.index.DocValues.Source;
 import org.apache.lucene.index.DocValues.Type;
 import org.apache.lucene.store.Directory;
 import org.apache.lucene.store.IOContext;
-import org.apache.lucene.util.Bits;
 import org.apache.lucene.util.BytesRef;
 import org.apache.lucene.util.Counter;
 
@@ -41,7 +39,6 @@
  * @lucene.experimental
  */
 public abstract class Writer extends DocValuesConsumer {
-  protected Source currentMergeSource;
   protected final Counter bytesUsed;
 
   /**
@@ -67,102 +64,6 @@
   public static final String DATA_EXTENSION = "dat";
 
   /**
-   * Records the specified <tt>long</tt> value for the docID or throws an
-   * {@link UnsupportedOperationException} if this {@link Writer} doesn't record
-   * <tt>long</tt> values.
-   * 
-   * @throws UnsupportedOperationException
-   *           if this writer doesn't record <tt>long</tt> values
-   */
-  public void add(int docID, long value) throws IOException {
-    throw new UnsupportedOperationException();
-  }
-
-  /**
-   * Records the specified <tt>double</tt> value for the docID or throws an
-   * {@link UnsupportedOperationException} if this {@link Writer} doesn't record
-   * <tt>double</tt> values.
-   * 
-   * @throws UnsupportedOperationException
-   *           if this writer doesn't record <tt>double</tt> values
-   */
-  public void add(int docID, double value) throws IOException {
-    throw new UnsupportedOperationException();
-  }
-
-  /**
-   * Records the specified {@link BytesRef} value for the docID or throws an
-   * {@link UnsupportedOperationException} if this {@link Writer} doesn't record
-   * {@link BytesRef} values.
-   * 
-   * @throws UnsupportedOperationException
-   *           if this writer doesn't record {@link BytesRef} values
-   */
-  public void add(int docID, BytesRef value) throws IOException {
-    throw new UnsupportedOperationException();
-  }
-
-  /**
-   * Merges a document with the given <code>docID</code>. The methods
-   * implementation obtains the value for the <i>sourceDoc</i> id from the
-   * current {@link Source} set to <i>setNextMergeSource(Source)</i>.
-   * <p>
-   * This method is used during merging to provide implementation agnostic
-   * default merge implementation.
-   * </p>
-   * <p>
-   * All documents IDs between the given ID and the previously given ID or
-   * <tt>0</tt> if the method is call the first time are filled with default
-   * values depending on the {@link Writer} implementation. The given document
-   * ID must always be greater than the previous ID or <tt>0</tt> if called the
-   * first time.
-   */
-  protected abstract void mergeDoc(int docID, int sourceDoc) throws IOException;
-
-  /**
-   * Sets the next {@link Source} to consume values from on calls to
-   * {@link #mergeDoc(int, int)}
-   * 
-   * @param mergeSource
-   *          the next {@link Source}, this must not be null
-   */
-  protected void setNextMergeSource(Source mergeSource) {
-    currentMergeSource = mergeSource;
-  }
-
-  /**
-   * Finish writing and close any files and resources used by this Writer.
-   * 
-   * @param docCount
-   *          the total number of documents for this writer. This must be
-   *          greater that or equal to the largest document id passed to one of
-   *          the add methods after the {@link Writer} was created.
-   */
-  public abstract void finish(int docCount) throws IOException;
-
-  @Override
-  protected void merge(SingleSubMergeState state) throws IOException {
-    // This enables bulk copies in subclasses per MergeState, subclasses can
-    // simply override this and decide if they want to merge
-    // segments using this generic implementation or if a bulk merge is possible
-    // / feasible.
-    final Source source = state.reader.getDirectSource();
-    assert source != null;
-    setNextMergeSource(source); // set the current enum we are working on - the
-    // impl. will get the correct reference for the type
-    // it supports
-    int docID = state.docBase;
-    final Bits liveDocs = state.liveDocs;
-    final int docCount = state.docCount;
-    for (int i = 0; i < docCount; i++) {
-      if (liveDocs == null || liveDocs.get(i)) {
-        mergeDoc(docID++, i);
-      }
-    }
-    
-  }
-
-  /**
    * Factory method to create a {@link Writer} instance for a given type. This
    * method returns default implementations for each of the different types
    * defined in the {@link Type} enumeration.
@@ -181,7 +82,7 @@
    * @return a new {@link Writer} instance for the given {@link Type}
    * @throws IOException
    */
-  public static Writer create(Type type, String id, Directory directory,
+  public static DocValuesConsumer create(Type type, String id, Directory directory,
       Comparator<BytesRef> comp, Counter bytesUsed, IOContext context, boolean fasterButMoreRam) throws IOException {
     if (comp == null) {
       comp = BytesRef.getUTF8SortedAsUnicodeComparator();
Index: lucene/src/java/org/apache/lucene/codecs/lucene40/values/Ints.java
===================================================================
--- lucene/src/java/org/apache/lucene/codecs/lucene40/values/Ints.java	(revision 1227169)
+++ lucene/src/java/org/apache/lucene/codecs/lucene40/values/Ints.java	(working copy)
@@ -19,6 +19,7 @@
 
 import java.io.IOException;
 
+import org.apache.lucene.codecs.DocValuesConsumer;
 import org.apache.lucene.index.DocValues;
 import org.apache.lucene.index.DocValues.Type;
 import org.apache.lucene.index.DocValue;
@@ -42,7 +43,7 @@
   private Ints() {
   }
   
-  public static Writer getWriter(Directory dir, String id, Counter bytesUsed,
+  public static DocValuesConsumer getWriter(Directory dir, String id, Counter bytesUsed,
       Type type, IOContext context) throws IOException {
     return type == Type.VAR_INTS ? new PackedIntValues.PackedIntsWriter(dir, id,
         bytesUsed, context) : new IntsWriter(dir, id, bytesUsed, context, type);
Index: lucene/src/java/org/apache/lucene/codecs/lucene40/values/Bytes.java
===================================================================
--- lucene/src/java/org/apache/lucene/codecs/lucene40/values/Bytes.java	(revision 1227169)
+++ lucene/src/java/org/apache/lucene/codecs/lucene40/values/Bytes.java	(working copy)
@@ -22,6 +22,7 @@
 import java.util.Comparator;
 import java.util.concurrent.atomic.AtomicLong;
 
+import org.apache.lucene.codecs.DocValuesConsumer;
 import org.apache.lucene.index.DocValues;
 import org.apache.lucene.index.IndexFileNames;
 import org.apache.lucene.index.DocValue;
@@ -122,7 +123,7 @@
    * @throws IOException
    *           if the files for the writer can not be created.
    */
-  public static Writer getWriter(Directory dir, String id, Mode mode,
+  public static DocValuesConsumer getWriter(Directory dir, String id, Mode mode,
       boolean fixedSize, Comparator<BytesRef> sortComparator,
       Counter bytesUsed, IOContext context, boolean fasterButMoreRam)
       throws IOException {
@@ -295,7 +296,8 @@
      * skipped; they will be filled with 0 bytes.
      */
     @Override
-    public abstract void add(int docID, BytesRef bytes) throws IOException;
+    public
+    abstract void add(int docID, BytesRef bytes) throws IOException;
 
     @Override
     public abstract void finish(int docCount) throws IOException;
Index: lucene/src/java/org/apache/lucene/codecs/lucene40/values/Floats.java
===================================================================
--- lucene/src/java/org/apache/lucene/codecs/lucene40/values/Floats.java	(revision 1227169)
+++ lucene/src/java/org/apache/lucene/codecs/lucene40/values/Floats.java	(working copy)
@@ -18,6 +18,7 @@
  */
 import java.io.IOException;
 
+import org.apache.lucene.codecs.DocValuesConsumer;
 import org.apache.lucene.index.DocValues;
 import org.apache.lucene.index.DocValue;
 import org.apache.lucene.index.DocValues.Source;
@@ -44,7 +45,7 @@
   protected static final int VERSION_START = 0;
   protected static final int VERSION_CURRENT = VERSION_START;
   
-  public static Writer getWriter(Directory dir, String id, Counter bytesUsed,
+  public static DocValuesConsumer getWriter(Directory dir, String id, Counter bytesUsed,
       IOContext context, Type type) throws IOException {
     return new FloatsWriter(dir, id, bytesUsed, context, type);
   }
Index: lucene/src/java/org/apache/lucene/codecs/lucene40/Lucene40StoredFieldsWriter.java
===================================================================
--- lucene/src/java/org/apache/lucene/codecs/lucene40/Lucene40StoredFieldsWriter.java	(revision 1227169)
+++ lucene/src/java/org/apache/lucene/codecs/lucene40/Lucene40StoredFieldsWriter.java	(working copy)
@@ -122,14 +122,9 @@
     try {
       close();
     } catch (IOException ignored) {}
-    
-    try {
-      directory.deleteFile(IndexFileNames.segmentFileName(segment, "", FIELDS_EXTENSION));
-    } catch (IOException ignored) {}
-    
-    try {
-      directory.deleteFile(IndexFileNames.segmentFileName(segment, "", FIELDS_INDEX_EXTENSION));
-    } catch (IOException ignored) {}
+    IOUtils.deleteFilesIgnoringExceptions(directory,
+        IndexFileNames.segmentFileName(segment, "", FIELDS_EXTENSION),
+        IndexFileNames.segmentFileName(segment, "", FIELDS_INDEX_EXTENSION));
   }
 
   public final void writeField(FieldInfo info, IndexableField field) throws IOException {
Index: lucene/src/java/org/apache/lucene/codecs/DocValuesConsumer.java
===================================================================
--- lucene/src/java/org/apache/lucene/codecs/DocValuesConsumer.java	(revision 1227169)
+++ lucene/src/java/org/apache/lucene/codecs/DocValuesConsumer.java	(working copy)
@@ -18,11 +18,14 @@
  */
 import java.io.IOException;
 
+import org.apache.lucene.codecs.lucene40.values.Writer;
 import org.apache.lucene.index.DocValues;
+import org.apache.lucene.index.DocValues.Source;
 import org.apache.lucene.index.IndexReader;
 import org.apache.lucene.index.MergeState;
 import org.apache.lucene.index.DocValue;
 import org.apache.lucene.util.Bits;
+import org.apache.lucene.util.BytesRef;
 
 /**
  * Abstract API that consumes {@link DocValue}s.
@@ -35,6 +38,9 @@
  */
 public abstract class DocValuesConsumer {
 
+  protected Source currentMergeSource;
+  protected final BytesRef spare = new BytesRef();
+
   /**
    * Adds the given {@link DocValue} instance to this
    * {@link DocValuesConsumer}
@@ -83,6 +89,7 @@
         hasMerged = true;
         merge(new SingleSubMergeState(docValues[readerIDX], mergeState.docBase[readerIDX], reader.reader.maxDoc(),
                                     reader.liveDocs));
+        mergeState.checkAbort.work(reader.reader.maxDoc());
       }
     }
     // only finish if no exception is thrown!
@@ -99,12 +106,114 @@
    * @throws IOException
    *           if an {@link IOException} occurs
    */
-  // TODO: can't we have a default implementation here that merges naively with our apis?
-  // this is how stored fields and term vectors work. its a pain to have to impl merging
-  // (should be an optimization to override it)
-  protected abstract void merge(SingleSubMergeState mergeState) throws IOException;
+  protected void merge(SingleSubMergeState state) throws IOException {
+    // This enables bulk copies in subclasses per MergeState, subclasses can
+    // simply override this and decide if they want to merge
+    // segments using this generic implementation or if a bulk merge is possible
+    // / feasible.
+    final Source source = state.reader.getDirectSource();
+    assert source != null;
+    setNextMergeSource(source); // set the current enum we are working on - the
+    // impl. will get the correct reference for the type
+    // it supports
+    int docID = state.docBase;
+    final Bits liveDocs = state.liveDocs;
+    final int docCount = state.docCount;
+    for (int i = 0; i < docCount; i++) {
+      if (liveDocs == null || liveDocs.get(i)) {
+        mergeDoc(docID++, i);
+      }
+    }
+  }
 
   /**
+   * Records the specified <tt>long</tt> value for the docID or throws an
+   * {@link UnsupportedOperationException} if this {@link Writer} doesn't record
+   * <tt>long</tt> values.
+   * 
+   * @throws UnsupportedOperationException
+   *           if this writer doesn't record <tt>long</tt> values
+   */
+  public void add(int docID, long value) throws IOException {
+    throw new UnsupportedOperationException("override this method to support integer types");
+  }
+
+  /**
+   * Records the specified <tt>double</tt> value for the docID or throws an
+   * {@link UnsupportedOperationException} if this {@link Writer} doesn't record
+   * <tt>double</tt> values.
+   * 
+   * @throws UnsupportedOperationException
+   *           if this writer doesn't record <tt>double</tt> values
+   */
+  public void add(int docID, double value) throws IOException {
+    throw new UnsupportedOperationException("override this method to support floating point types");
+  }
+
+  /**
+   * Records the specified {@link BytesRef} value for the docID or throws an
+   * {@link UnsupportedOperationException} if this {@link Writer} doesn't record
+   * {@link BytesRef} values.
+   * 
+   * @throws UnsupportedOperationException
+   *           if this writer doesn't record {@link BytesRef} values
+   */
+  public void add(int docID, BytesRef value) throws IOException {
+    throw new UnsupportedOperationException("override this method to support byte types");
+  }
+
+  /**
+   * Merges a document with the given <code>docID</code>. The methods
+   * implementation obtains the value for the <i>sourceDoc</i> id from the
+   * current {@link Source} set to <i>setNextMergeSource(Source)</i>.
+   * <p>
+   * This method is used during merging to provide implementation agnostic
+   * default merge implementation.
+   * </p>
+   * <p>
+   * All documents IDs between the given ID and the previously given ID or
+   * <tt>0</tt> if the method is call the first time are filled with default
+   * values depending on the {@link Writer} implementation. The given document
+   * ID must always be greater than the previous ID or <tt>0</tt> if called the
+   * first time.
+   */
+  protected void mergeDoc(int docID, int sourceDoc)
+      throws IOException {
+    switch(currentMergeSource.type()) {
+    case BYTES_FIXED_DEREF:
+    case BYTES_FIXED_SORTED:
+    case BYTES_FIXED_STRAIGHT:
+    case BYTES_VAR_DEREF:
+    case BYTES_VAR_SORTED:
+    case BYTES_VAR_STRAIGHT:
+      add(docID, currentMergeSource.getBytes(sourceDoc, spare));
+      break;
+    case FIXED_INTS_16:
+    case FIXED_INTS_32:
+    case FIXED_INTS_64:
+    case FIXED_INTS_8:
+    case VAR_INTS:
+      add(docID, currentMergeSource.getInt(sourceDoc));
+      break;
+    case FLOAT_32:
+    case FLOAT_64:
+      add(docID, currentMergeSource.getFloat(sourceDoc));
+      break;
+    }
+  }
+  
+  /**
+   * Sets the next {@link Source} to consume values from on calls to
+   * {@link #mergeDoc(int, int)}
+   * 
+   * @param mergeSource
+   *          the next {@link Source}, this must not be null
+   */
+  protected final void setNextMergeSource(Source mergeSource) {
+    currentMergeSource = mergeSource;
+  }
+
+  /**
    * Specialized auxiliary MergeState is necessary since we don't want to
    * exploit internals up to the codecs consumer. An instance of this class is
    * created for each merged low level {@link IndexReader} we are merging to
Index: lucene/src/java/org/apache/lucene/codecs/DocValuesReaderBase.java
===================================================================
--- lucene/src/java/org/apache/lucene/codecs/DocValuesReaderBase.java	(revision 1227169)
+++ lucene/src/java/org/apache/lucene/codecs/DocValuesReaderBase.java	(working copy)
@@ -41,7 +41,7 @@
  */
 // TODO: this needs to go under lucene40 codec (its specific to its impl)
 public abstract class DocValuesReaderBase extends PerDocProducer {
-  
+
   protected abstract void closeInternal(Collection<? extends Closeable> closeables) throws IOException;
   protected abstract Map<String, DocValues> docValues();
   
@@ -68,14 +68,14 @@
     try {
 
       for (FieldInfo fieldInfo : fieldInfos) {
-        if (fieldInfo.hasDocValues()) {
+        if (canLoad(fieldInfo)) {
           final String field = fieldInfo.name;
           // TODO can we have a compound file per segment and codec for
           // docvalues?
           final String id = DocValuesWriterBase.docValuesId(segment,
               fieldInfo.number);
           values.put(field,
-              loadDocValues(docCount, dir, id, fieldInfo.getDocValuesType(), context));
+              loadDocValues(docCount, dir, id, getDocValuesType(fieldInfo), context));
         }
       }
       success = true;
@@ -88,6 +88,18 @@
     return values;
   }
   
+  protected boolean canLoad(FieldInfo info) {
+    return info.hasDocValues();
+  }
+  
+  protected Type getDocValuesType(FieldInfo info) {
+    return info.getDocValuesType();
+  }
+  
+  protected boolean anyDocValuesFields(FieldInfos infos) {
+    return infos.anyDocValuesFields();
+  }
+  
   /**
    * Loads a {@link DocValues} instance depending on the given {@link Type}.
    * Codecs that use different implementations for a certain {@link Type} can
Index: lucene/src/java/org/apache/lucene/codecs/DocValuesWriterBase.java
===================================================================
--- lucene/src/java/org/apache/lucene/codecs/DocValuesWriterBase.java	(revision 1227169)
+++ lucene/src/java/org/apache/lucene/codecs/DocValuesWriterBase.java	(working copy)
@@ -23,7 +23,6 @@
 import org.apache.lucene.codecs.lucene40.values.Writer;
 import org.apache.lucene.index.FieldInfo;
 import org.apache.lucene.index.PerDocWriteState;
-import org.apache.lucene.index.DocValues;
 import org.apache.lucene.index.DocValues.Type; // javadoc
 import org.apache.lucene.store.Directory;
 import org.apache.lucene.store.IOContext;
@@ -38,7 +37,6 @@
 //TODO: this needs to go under lucene40 codec (its specific to its impl)
 public abstract class DocValuesWriterBase extends PerDocConsumer {
   protected final String segmentName;
-  protected final String segmentSuffix;
   private final Counter bytesUsed;
   protected final IOContext context;
   private final boolean fasterButMoreRam;
@@ -58,7 +56,6 @@
    */
   protected DocValuesWriterBase(PerDocWriteState state, boolean fasterButMoreRam) {
     this.segmentName = state.segmentName;
-    this.segmentSuffix = state.segmentSuffix;
     this.bytesUsed = state.bytesUsed;
     this.context = state.context;
     this.fasterButMoreRam = fasterButMoreRam;
Index: lucene/src/java/org/apache/lucene/codecs/lucene3x/Lucene3xNormsProducer.java
===================================================================
--- lucene/src/java/org/apache/lucene/codecs/lucene3x/Lucene3xNormsProducer.java	(revision 0)
+++ lucene/src/java/org/apache/lucene/codecs/lucene3x/Lucene3xNormsProducer.java	(working copy)
@@ -0,0 +1,241 @@
+package org.apache.lucene.codecs.lucene3x;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.io.IOException;
+import java.util.HashMap;
+import java.util.IdentityHashMap;
+import java.util.Map;
+import java.util.Set;
+import java.util.Map.Entry;
+
+import org.apache.lucene.codecs.PerDocProducer;
+import org.apache.lucene.index.DocValues;
+import org.apache.lucene.index.DocValues.Source;
+import org.apache.lucene.index.DocValues.Type;
+import org.apache.lucene.index.FieldInfo;
+import org.apache.lucene.index.FieldInfos;
+import org.apache.lucene.index.IndexFileNames;
+import org.apache.lucene.index.SegmentInfo;
+import org.apache.lucene.store.Directory;
+import org.apache.lucene.store.IOContext;
+import org.apache.lucene.store.IndexInput;
+import org.apache.lucene.util.BytesRef;
+import org.apache.lucene.util.IOUtils;
+import org.apache.lucene.util.MapBackedSet;
+import org.apache.lucene.util.StringHelper;
+
+/**
+ * Reads Lucene 3.x norms format and exposes it via DocValues API
+ * @lucene.experimental
+ */
+class Lucene3xNormsProducer extends PerDocProducer {
+  final Map<String,NormsDocValues> norms = new HashMap<String,NormsDocValues>();
+  // any .nrm or .sNN files we have open at any time.
+  // TODO: just a list, and double-close() separate norms files?
+  final Set<IndexInput> openFiles = new MapBackedSet<IndexInput>(new IdentityHashMap<IndexInput,Boolean>());
+  // points to a singleNormFile
+  IndexInput singleNormStream;
+  final int maxdoc;
+  
+  // note: just like segmentreader in 3.x, we open up all the files here (including separate norms) up front.
+  // but we just don't do any seeks or reading yet.
+  public Lucene3xNormsProducer(Directory dir, SegmentInfo info, FieldInfos fields, IOContext context, Directory separateNormsDir) throws IOException {
+    maxdoc = info.docCount;
+    String segmentName = info.name;
+    Map<Integer,Long> normGen = info.getNormGen();
+    boolean success = false;
+    try {
+      long nextNormSeek = Lucene3xNormsConsumer.NORMS_HEADER.length; //skip header (header unused for now)
+      for (FieldInfo fi : fields) {
+        if (fi.isIndexed && !fi.omitNorms) {
+          String fileName = getNormFilename(segmentName, normGen, fi.number);
+          Directory d = hasSeparateNorms(normGen, fi.number) ? separateNormsDir : dir;
+        
+          // singleNormFile means multiple norms share this file
+          boolean singleNormFile = IndexFileNames.matchesExtension(fileName, Lucene3xNormsConsumer.NORMS_EXTENSION);
+          IndexInput normInput = null;
+          long normSeek;
+
+          if (singleNormFile) {
+            normSeek = nextNormSeek;
+            if (singleNormStream == null) {
+              singleNormStream = d.openInput(fileName, context);
+              openFiles.add(singleNormStream);
+            }
+            // All norms in the .nrm file can share a single IndexInput since
+            // they are only used in a synchronized context.
+            // If this were to change in the future, a clone could be done here.
+            normInput = singleNormStream;
+          } else {
+            normInput = d.openInput(fileName, context);
+            openFiles.add(normInput);
+            // if the segment was created in 3.2 or after, we wrote the header for sure,
+            // and don't need to do the sketchy file size check. otherwise, we check 
+            // if the size is exactly equal to maxDoc to detect a headerless file.
+            // NOTE: remove this check in Lucene 5.0!
+            String version = info.getVersion();
+            final boolean isUnversioned = 
+                (version == null || StringHelper.getVersionComparator().compare(version, "3.2") < 0)
+                && normInput.length() == maxdoc;
+            if (isUnversioned) {
+              normSeek = 0;
+            } else {
+              normSeek = Lucene3xNormsConsumer.NORMS_HEADER.length;
+            }
+          }
+          NormsDocValues norm = new NormsDocValues(normInput, normSeek);
+          norms.put(fi.name, norm);
+          nextNormSeek += maxdoc; // increment also if some norms are separate
+        }
+      }
+      // TODO: change to a real check? see LUCENE-3619
+      assert singleNormStream == null || nextNormSeek == singleNormStream.length() : singleNormStream != null ? "len: " + singleNormStream.length() + " expected: " + nextNormSeek : "null";
+      success = true;
+    } finally {
+      if (!success) {
+        IOUtils.closeWhileHandlingException(openFiles);
+      }
+    }
+  }
+  
+  @Override
+  public DocValues docValues(String field) throws IOException {
+    return norms.get(field);
+  }
+  
+  @Override
+  public void close() throws IOException {
+    try {
+      IOUtils.close(openFiles);
+    } finally {
+      norms.clear();
+      openFiles.clear();
+    }
+  }
+  
+  private static String getNormFilename(String segmentName, Map<Integer,Long> normGen, int number) {
+    if (hasSeparateNorms(normGen, number)) {
+      return IndexFileNames.fileNameFromGeneration(segmentName, Lucene3xNormsConsumer.SEPARATE_NORMS_EXTENSION + number, normGen.get(number));
+    } else {
+      // single file for all norms
+      return IndexFileNames.fileNameFromGeneration(segmentName, Lucene3xNormsConsumer.NORMS_EXTENSION, SegmentInfo.WITHOUT_GEN);
+    }
+  }
+  
+  private static boolean hasSeparateNorms(Map<Integer,Long> normGen, int number) {
+    if (normGen == null) {
+      return false;
+    }
+
+    Long gen = normGen.get(number);
+    return gen != null && gen.longValue() != SegmentInfo.NO;
+  }
+  
+  static final class NormSource extends Source {
+    protected NormSource(byte[] bytes) {
+      super(Type.BYTES_FIXED_STRAIGHT);
+      this.bytes = bytes;
+    }
+
+    final byte bytes[];
+    
+    @Override
+    public BytesRef getBytes(int docID, BytesRef ref) {
+      ref.bytes = bytes;
+      ref.offset = docID;
+      ref.length = 1;
+      return ref;
+    }
+
+    @Override
+    public boolean hasArray() {
+      return true;
+    }
+
+    @Override
+    public Object getArray() {
+      return bytes;
+    }
+    
+  }
+  
+  static void files(Directory dir, SegmentInfo info, Set<String> files) throws IOException {
+    // TODO: This is what SI always did... but we can do this cleaner?
+    // like first FI that has norms but doesn't have separate norms?
+    final String normsFileName = IndexFileNames.segmentFileName(info.name, "", Lucene3xNormsConsumer.NORMS_EXTENSION);
+    if (dir.fileExists(normsFileName)) {
+      files.add(normsFileName);
+    }
+  }
+  
+  /** @deprecated */
+  @Deprecated
+  static void separateFiles(Directory dir, SegmentInfo info, Set<String> files) throws IOException {
+    Map<Integer,Long> normGen = info.getNormGen();
+    if (normGen != null) {
+      for (Entry<Integer,Long> entry : normGen.entrySet()) {
+        long gen = entry.getValue();
+        if (gen >= SegmentInfo.YES) {
+          // Definitely a separate norm file, with generation:
+          files.add(IndexFileNames.fileNameFromGeneration(info.name, Lucene3xNormsConsumer.SEPARATE_NORMS_EXTENSION + entry.getKey(), gen));
+        }
+      }
+    }
+  }
+
+  private class NormsDocValues extends DocValues {
+    private final IndexInput file;
+    private final long offset;
+    public NormsDocValues(IndexInput normInput, long normSeek) {
+      this.file = normInput;
+      this.offset = normSeek;
+    }
+
+    @Override
+    public Source load() throws IOException {
+      return new NormSource(bytes());
+    }
+
+    @Override
+    public Source getDirectSource() throws IOException {
+      return getSource();
+    }
+
+    @Override
+    public Type type() {
+      return Type.BYTES_FIXED_STRAIGHT;
+    }
+    
+    byte[] bytes() throws IOException {
+        byte[] bytes = new byte[maxdoc];
+        // some norms share fds
+        synchronized(file) {
+          file.seek(offset);
+          file.readBytes(bytes, 0, bytes.length, false);
+        }
+        // we are done with this file
+        if (file != singleNormStream) {
+          openFiles.remove(file);
+          file.close();
+        }
+      return bytes;
+    }
+    
+  }
+}

Property changes on: lucene/src/java/org/apache/lucene/codecs/lucene3x/Lucene3xNormsProducer.java
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
Index: lucene/src/java/org/apache/lucene/codecs/lucene3x/Lucene3xNormsFormat.java
===================================================================
--- lucene/src/java/org/apache/lucene/codecs/lucene3x/Lucene3xNormsFormat.java	(revision 0)
+++ lucene/src/java/org/apache/lucene/codecs/lucene3x/Lucene3xNormsFormat.java	(working copy)
@@ -0,0 +1,60 @@
+package org.apache.lucene.codecs.lucene3x;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.io.IOException;
+import java.util.Set;
+
+import org.apache.lucene.codecs.NormsFormat;
+import org.apache.lucene.codecs.PerDocConsumer;
+import org.apache.lucene.codecs.PerDocProducer;
+import org.apache.lucene.index.PerDocWriteState;
+import org.apache.lucene.index.SegmentInfo;
+import org.apache.lucene.index.SegmentReadState;
+import org.apache.lucene.store.Directory;
+
+public class Lucene3xNormsFormat extends NormsFormat {
+
+
+  @Override
+  public void files(Directory dir, SegmentInfo info, Set<String> files) throws IOException {
+    Lucene3xNormsProducer.files(dir, info, files);
+  }
+
+  @Override
+  public void separateFiles(Directory dir, SegmentInfo info, Set<String> files) throws IOException {
+    Lucene3xNormsProducer.separateFiles(dir, info, files);
+  }
+
+
+  @Override
+  public PerDocConsumer docsConsumer(PerDocWriteState state) throws IOException {
+    return new Lucene3xNormsConsumer(state.directory, state.segmentName, state.context);
+  }
+
+  @Override
+  public PerDocProducer docsProducer(SegmentReadState state) throws IOException {
+    return docsProducer(state, null);
+  }
+
+  @Override
+  public PerDocProducer docsProducer(SegmentReadState state,
+      Directory separateNormsDir) throws IOException {
+    return new Lucene3xNormsProducer(state.dir, state.segmentInfo, state.fieldInfos, state.context, separateNormsDir);
+  }
+}

Property changes on: lucene/src/java/org/apache/lucene/codecs/lucene3x/Lucene3xNormsFormat.java
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
Index: lucene/src/java/org/apache/lucene/codecs/lucene3x/Lucene3xNormsConsumer.java
===================================================================
--- lucene/src/java/org/apache/lucene/codecs/lucene3x/Lucene3xNormsConsumer.java	(revision 0)
+++ lucene/src/java/org/apache/lucene/codecs/lucene3x/Lucene3xNormsConsumer.java	(working copy)
@@ -0,0 +1,277 @@
+package org.apache.lucene.codecs.lucene3x;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.io.IOException;
+import java.util.Arrays;
+
+import org.apache.lucene.codecs.DocValuesConsumer;
+import org.apache.lucene.codecs.PerDocConsumer;
+import org.apache.lucene.index.DocValue;
+import org.apache.lucene.index.DocValues;
+import org.apache.lucene.index.DocValues.Source;
+import org.apache.lucene.index.DocValues.Type;
+import org.apache.lucene.index.FieldInfo;
+import org.apache.lucene.index.IndexFileNames;
+import org.apache.lucene.index.MergeState;
+import org.apache.lucene.store.Directory;
+import org.apache.lucene.store.IOContext;
+import org.apache.lucene.store.IndexOutput;
+import org.apache.lucene.util.ArrayUtil;
+import org.apache.lucene.util.Bits;
+import org.apache.lucene.util.BytesRef;
+import org.apache.lucene.util.IOUtils;
+
+/**
+ * Writes and Merges Lucene 3.x norms format
+ * @lucene.experimental
+ */
+class Lucene3xNormsConsumer extends PerDocConsumer {
+  
+  /** norms header placeholder */
+  static final byte[] NORMS_HEADER = new byte[]{'N','R','M',-1};
+  
+  /** Extension of norms file */
+  static final String NORMS_EXTENSION = "nrm";
+  
+  /** Extension of separate norms file
+   * @deprecated */
+  @Deprecated
+  static final String SEPARATE_NORMS_EXTENSION = "s";
+
+  private final Directory directory;
+
+  private final String segment;
+
+  private final IOContext context;
+
+  private NormsWriter writer;
+  
+  public Lucene3xNormsConsumer(Directory directory, String segment, IOContext context){
+    this.directory = directory;
+    this.segment = segment;
+    this.context = context;
+  }
+
+  @Override
+  public void merge(MergeState mergeState) throws IOException {
+    getNormsWriter().merge(mergeState);
+  }
+
+  @Override
+  public void close() throws IOException {
+    if (writer != null) {
+      writer.finish();
+    }
+  }
+
+  @Override
+  public DocValuesConsumer addValuesField(Type type, FieldInfo fieldInfo)
+      throws IOException {
+    return new Lucene3xNormsDocValuesConsumer(fieldInfo);
+  }
+  
+  class Lucene3xNormsDocValuesConsumer extends DocValuesConsumer {
+    // Holds all docID/norm pairs we've seen
+    private int[] docIDs = new int[1];
+    private byte[] norms = new byte[1];
+    private int upto;
+    private final FieldInfo fi;
+    
+    Lucene3xNormsDocValuesConsumer(FieldInfo fieldInfo) {
+      fi = fieldInfo;
+    }
+
+    @Override
+    public void finish(int docCount) throws IOException {
+      final NormsWriter normsWriter = getNormsWriter();
+      boolean success = false;
+      try {
+        int uptoDoc = 0;
+        normsWriter.setNumTotalDocs(docCount);
+        if (upto > 0) {
+          normsWriter.startField(fi);
+          int docID = 0;
+          for (; docID < docCount; docID++) {
+            if (uptoDoc < upto && docIDs[uptoDoc] == docID) {
+              normsWriter.writeNorm(norms[uptoDoc]);
+              uptoDoc++;
+            } else {
+              normsWriter.writeNorm((byte) 0);
+            }
+          }
+          // we should have consumed every norm
+          assert uptoDoc == upto;
+  
+        } else {
+          // Fill entire field with default norm:
+          normsWriter.startField(fi);
+          for (; upto < docCount; upto++)
+            normsWriter.writeNorm((byte) 0);
+        }
+        success = true;
+      } finally {
+        if (!success) {
+          normsWriter.abort();
+        }
+      }
+    }
+    
+    @Override
+    public void add(int docID, DocValue docValue) throws IOException {
+      add(docID, docValue.getBytes());
+    }
+    
+    public void add(int docID, BytesRef value) throws IOException {
+      if (docIDs.length <= upto) {
+        assert docIDs.length == upto;
+        docIDs = ArrayUtil.grow(docIDs, 1 + upto);
+      }
+      if (norms.length <= upto) {
+        assert norms.length == upto;
+        norms = ArrayUtil.grow(norms, 1 + upto);
+      }
+      assert value.length == 1;
+      norms[upto] = value.bytes[value.offset];
+      
+      docIDs[upto] = docID;
+      upto++;
+    }
+    
+    
+  }
+  
+  public NormsWriter getNormsWriter() throws IOException {
+    if (writer == null) {
+      writer = new NormsWriter(directory, segment, context);
+    }
+    return writer;
+  }
+  
+  private static class NormsWriter {
+    
+    private final IndexOutput output;
+    private int normCount = 0;
+    private int numTotalDocs = 0;
+    
+    public NormsWriter(Directory directory, String segment, IOContext context) throws IOException {
+      final String normsFileName = IndexFileNames.segmentFileName(segment, "", NORMS_EXTENSION);
+      boolean success = false;
+      IndexOutput out = null;
+      try {
+        out = directory.createOutput(normsFileName, context);
+        output = out;
+        output.writeBytes(NORMS_HEADER, 0, NORMS_HEADER.length);
+        success = true;
+      } finally {
+        if (!success) {
+          IOUtils.closeWhileHandlingException(out);
+        }
+      }
+      
+    }
+    
+    
+    public void setNumTotalDocs(int numTotalDocs) {
+      assert this.numTotalDocs == 0 || numTotalDocs == this.numTotalDocs;
+      this.numTotalDocs = numTotalDocs;
+    }
+    
+    public void startField(FieldInfo info) throws IOException {
+      assert info.omitNorms == false;
+      normCount++;
+    }
+    
+    public void writeNorm(byte norm) throws IOException {
+      output.writeByte(norm);
+    }
+    
+    public void abort() throws IOException {
+      IOUtils.close(output);
+    }
+    
+    public void finish() throws IOException {
+      IOUtils.close(output);
+      
+      if (4+normCount*(long)numTotalDocs != output.getFilePointer()) {
+        throw new IOException(".nrm file size mismatch: expected=" + (4+normCount*(long)numTotalDocs) + " actual=" + output.getFilePointer());
+      }
+    }
+    // TODO: we can actually use the defaul DV merge here and drop this specific stuff entirely
+    /** we override merge and bulk-merge norms when there are no deletions */
+    public void merge(MergeState mergeState) throws IOException {
+      int numMergedDocs = 0;
+      for (FieldInfo fi : mergeState.fieldInfos) {
+        if (fi.isIndexed && !fi.omitNorms) {
+          startField(fi);
+          int numMergedDocsForField = 0;
+          for (MergeState.IndexReaderAndLiveDocs reader : mergeState.readers) {
+            final int maxDoc = reader.reader.maxDoc();
+            byte[] normBuffer;
+            DocValues normValues = reader.reader.normValues(fi.name);
+            if (normValues == null) {
+              // Can be null if this segment doesn't have
+              // any docs with this field
+              normBuffer = new byte[maxDoc];
+              Arrays.fill(normBuffer, (byte)0);
+            } else {
+              Source directSource = normValues.getDirectSource();
+              assert directSource.hasArray();
+              normBuffer = (byte[]) directSource.getArray();
+            }
+            if (reader.liveDocs == null) {
+              //optimized case for segments without deleted docs
+              output.writeBytes(normBuffer, maxDoc);
+              numMergedDocsForField += maxDoc;
+            } else {
+              // this segment has deleted docs, so we have to
+              // check for every doc if it is deleted or not
+              final Bits liveDocs = reader.liveDocs;
+              for (int k = 0; k < maxDoc; k++) {
+                if (liveDocs.get(k)) {
+                  numMergedDocsForField++;
+                  output.writeByte(normBuffer[k]);
+                }
+              }
+            }
+            mergeState.checkAbort.work(maxDoc);
+          }
+          assert numMergedDocs == 0 || numMergedDocs == numMergedDocsForField;
+          numMergedDocs = numMergedDocsForField;
+        }
+      }
+      this.numTotalDocs = numMergedDocs;
+    }
+  }
+
+  @Override
+  public void abort() {
+    try {
+      try {
+        if (writer != null) {
+          writer.abort();
+        }
+      } finally {
+        directory.deleteFile(IndexFileNames.segmentFileName(segment, "",
+            NORMS_EXTENSION));
+      }
+    } catch (IOException e) {
+      // ignore
+    }
+  }
+}

Property changes on: lucene/src/java/org/apache/lucene/codecs/lucene3x/Lucene3xNormsConsumer.java
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
Index: lucene/src/java/org/apache/lucene/codecs/lucene3x/Lucene3xCodec.java
===================================================================
--- lucene/src/java/org/apache/lucene/codecs/lucene3x/Lucene3xCodec.java	(revision 1227169)
+++ lucene/src/java/org/apache/lucene/codecs/lucene3x/Lucene3xCodec.java	(working copy)
@@ -31,7 +31,6 @@
 import org.apache.lucene.codecs.StoredFieldsFormat;
 import org.apache.lucene.codecs.TermVectorsFormat;
 import org.apache.lucene.codecs.lucene40.Lucene40FieldInfosFormat;
-import org.apache.lucene.codecs.lucene40.Lucene40NormsFormat;
 import org.apache.lucene.codecs.lucene40.Lucene40SegmentInfosFormat;
 import org.apache.lucene.codecs.lucene40.Lucene40StoredFieldsFormat;
 import org.apache.lucene.codecs.lucene40.Lucene40TermVectorsFormat;
@@ -65,7 +64,7 @@
   private final SegmentInfosFormat infosFormat = new Lucene40SegmentInfosFormat();
   
   // TODO: this should really be a different impl
-  private final NormsFormat normsFormat = new Lucene40NormsFormat();
+  private final NormsFormat normsFormat = new Lucene3xNormsFormat();
   
   // 3.x doesn't support docvalues
   private final DocValuesFormat docValuesFormat = new DocValuesFormat() {
Index: lucene/src/java/org/apache/lucene/codecs/sep/SepDocValuesConsumer.java
===================================================================
--- lucene/src/java/org/apache/lucene/codecs/sep/SepDocValuesConsumer.java	(revision 1227169)
+++ lucene/src/java/org/apache/lucene/codecs/sep/SepDocValuesConsumer.java	(working copy)
@@ -18,6 +18,7 @@
  */
 
 import java.io.IOException;
+import java.util.HashSet;
 import java.util.Set;
 
 import org.apache.lucene.codecs.DocValuesWriterBase;
@@ -28,6 +29,7 @@
 import org.apache.lucene.index.PerDocWriteState;
 import org.apache.lucene.index.SegmentInfo;
 import org.apache.lucene.store.Directory;
+import org.apache.lucene.util.IOUtils;
 
 /**
  * Implementation of PerDocConsumer that uses separate files.
@@ -35,10 +37,11 @@
  */
 public class SepDocValuesConsumer extends DocValuesWriterBase {
   private final Directory directory;
-  
+  private final FieldInfos fieldInfos;
   public SepDocValuesConsumer(PerDocWriteState state) throws IOException {
     super(state);
     this.directory = state.directory;
+    fieldInfos = state.fieldInfos;
   }
   
   @Override
@@ -49,10 +52,13 @@
   @SuppressWarnings("fallthrough")
   public static void files(Directory dir, SegmentInfo segmentInfo,
       Set<String> files) throws IOException {
-    FieldInfos fieldInfos = segmentInfo.getFieldInfos();
+    files(dir, segmentInfo.getFieldInfos(), segmentInfo.name, files);
+  }
+  
+  private static void files(Directory dir,FieldInfos fieldInfos, String segmentName, Set<String> files)  {
     for (FieldInfo fieldInfo : fieldInfos) {
       if (fieldInfo.hasDocValues()) {
-        String filename = docValuesId(segmentInfo.name, fieldInfo.number);
+        String filename = docValuesId(segmentName, fieldInfo.number);
         switch (fieldInfo.getDocValuesType()) {
           case BYTES_FIXED_DEREF:
           case BYTES_VAR_DEREF:
@@ -61,8 +67,12 @@
           case BYTES_VAR_SORTED:
             files.add(IndexFileNames.segmentFileName(filename, "",
                 Writer.INDEX_EXTENSION));
+            try {
             assert dir.fileExists(IndexFileNames.segmentFileName(filename, "",
                 Writer.INDEX_EXTENSION));
+            } catch (IOException e) {
+            }
+            break;
             // until here all types use an index
           case BYTES_FIXED_STRAIGHT:
           case FLOAT_32:
@@ -74,8 +84,11 @@
           case FIXED_INTS_8:
             files.add(IndexFileNames.segmentFileName(filename, "",
                 Writer.DATA_EXTENSION));
+          try {
             assert dir.fileExists(IndexFileNames.segmentFileName(filename, "",
                 Writer.DATA_EXTENSION));
+          } catch (IOException e) {
+          }
             break;
           default:
             assert false;
@@ -83,4 +96,11 @@
       }
     }
   }
+
+  @Override
+  public void abort() {
+    Set<String> files = new HashSet<String>();
+    files(directory, fieldInfos, segmentName, files);
+    IOUtils.deleteFilesIgnoringExceptions(directory, files.toArray(new String[0]));
+  }
 }
Index: lucene/src/java/org/apache/lucene/codecs/PerDocConsumer.java
===================================================================
--- lucene/src/java/org/apache/lucene/codecs/PerDocConsumer.java	(revision 1227169)
+++ lucene/src/java/org/apache/lucene/codecs/PerDocConsumer.java	(working copy)
@@ -20,7 +20,9 @@
 
 import org.apache.lucene.index.FieldInfo;
 import org.apache.lucene.index.DocValues;
+import org.apache.lucene.index.IndexReader;
 import org.apache.lucene.index.MergeState;
+import org.apache.lucene.index.DocValues.Type;
 
 /**
  * Abstract API that consumes per document values. Concrete implementations of
@@ -32,7 +34,7 @@
  * 
  * @lucene.experimental
  */
-public abstract class PerDocConsumer implements Closeable{
+public abstract class PerDocConsumer implements Closeable {
   /** Adds a new DocValuesField */
   public abstract DocValuesConsumer addValuesField(DocValues.Type type, FieldInfo field)
       throws IOException;
@@ -46,14 +48,57 @@
 
     for (FieldInfo fieldInfo : mergeState.fieldInfos) {
       mergeState.fieldInfo = fieldInfo; // set the field we are merging
-      if (fieldInfo.hasDocValues()) {
+      if (canMerge(fieldInfo)) {
         for (int i = 0; i < docValues.length; i++) {
-          docValues[i] = mergeState.readers.get(i).reader.docValues(fieldInfo.name);
+          docValues[i] = pull(mergeState.readers.get(i).reader, fieldInfo);
         }
-        final DocValuesConsumer docValuesConsumer = addValuesField(fieldInfo.getDocValuesType(), fieldInfo);
+        final DocValuesConsumer docValuesConsumer = addValuesField(getDocValuesType(fieldInfo), fieldInfo);
         assert docValuesConsumer != null;
         docValuesConsumer.merge(mergeState, docValues);
       }
     }
-  }  
+  }
+
+  /**
+   * Pulls a {@link DocValues} instance from the given reader for the given
+   * {@link FieldInfo}. This method is used for merging and uses
+   * {@link IndexReader#docValues(String)} by default.
+   * <p>
+   * To enable {@link DocValues} merging for different {@link DocValues} than
+   * the default override this method accordingly.
+   * <p>
+   */
+  protected DocValues pull(IndexReader reader, FieldInfo info) throws IOException {
+    return reader.docValues(info.name);
+  }
+  
+  /**
+   * Returns <code>true</code> iff the given field can be merged ie. has {@link DocValues}.
+   * By default this method uses {@link FieldInfo#hasDocValues()}.
+   * <p>
+   * To enable {@link DocValues} merging for different {@link DocValues} than
+   * the default override this method accordingly.
+   * <p>
+   */
+  protected boolean canMerge(FieldInfo info) {
+    return info.hasDocValues();
+  }
+  
+  /**
+   * Returns the {@link DocValues} {@link Type} for the given {@link FieldInfo}.
+   * By default this method uses {@link FieldInfo#getDocValuesType()}.
+   * <p>
+   * To enable {@link DocValues} merging for different {@link DocValues} than
+   * the default override this method accordingly.
+   * <p>
+   */
+  protected Type getDocValuesType(FieldInfo info) {
+    return info.getDocValuesType();
+  }
+  
+  /**
+   * Called during indexing if the indexing session is aborted due to a unrecoverable exception.
+   * This method should cleanup all resources.
+   */
+  public abstract void abort();
 }
Index: lucene/src/java/org/apache/lucene/codecs/simpletext/SimpleTextNormsReader.java
===================================================================
--- lucene/src/java/org/apache/lucene/codecs/simpletext/SimpleTextNormsReader.java	(revision 1227169)
+++ lucene/src/java/org/apache/lucene/codecs/simpletext/SimpleTextNormsReader.java	(working copy)
@@ -1,106 +0,0 @@
-package org.apache.lucene.codecs.simpletext;
-
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-import java.io.IOException;
-import java.util.HashMap;
-import java.util.Map;
-import java.util.Set;
-
-import org.apache.lucene.codecs.NormsReader;
-import org.apache.lucene.index.FieldInfos;
-import org.apache.lucene.index.IndexFileNames;
-import org.apache.lucene.index.SegmentInfo;
-import org.apache.lucene.store.Directory;
-import org.apache.lucene.store.IOContext;
-import org.apache.lucene.store.IndexInput;
-import org.apache.lucene.util.BytesRef;
-import org.apache.lucene.util.IOUtils;
-import org.apache.lucene.util.StringHelper;
-
-import static org.apache.lucene.codecs.simpletext.SimpleTextNormsWriter.*;
-
-/**
- * Reads plain-text norms
- * <p>
- * <b><font color="red">FOR RECREATIONAL USE ONLY</font></B>
- * @lucene.experimental
- */
-public class SimpleTextNormsReader extends NormsReader {
-  private Map<String,byte[]> norms = new HashMap<String,byte[]>();
-  
-  public SimpleTextNormsReader(Directory directory, SegmentInfo si, FieldInfos fields, IOContext context) throws IOException {
-    if (fields.hasNorms()) {
-      readNorms(directory.openInput(IndexFileNames.segmentFileName(si.name, "", NORMS_EXTENSION), context), si.docCount);
-    }
-  }
-  
-  // we read in all the norms up front into a hashmap
-  private void readNorms(IndexInput in, int maxDoc) throws IOException {
-    BytesRef scratch = new BytesRef();
-    boolean success = false;
-    try {
-      SimpleTextUtil.readLine(in, scratch);
-      while (!scratch.equals(END)) {
-        assert StringHelper.startsWith(scratch, FIELD);
-        String fieldName = readString(FIELD.length, scratch);
-        byte bytes[] = new byte[maxDoc];
-        for (int i = 0; i < bytes.length; i++) {
-          SimpleTextUtil.readLine(in, scratch);
-          assert StringHelper.startsWith(scratch, DOC);
-          SimpleTextUtil.readLine(in, scratch);
-          assert StringHelper.startsWith(scratch, NORM);
-          bytes[i] = scratch.bytes[scratch.offset + NORM.length];
-        }
-        norms.put(fieldName, bytes);
-        SimpleTextUtil.readLine(in, scratch);
-        assert StringHelper.startsWith(scratch, FIELD) || scratch.equals(END);
-      }
-      success = true;
-    } finally {
-      if (success) {
-        IOUtils.close(in);
-      } else {
-        IOUtils.closeWhileHandlingException(in);
-      }
-    }
-  }
-  
-  @Override
-  public byte[] norms(String name) throws IOException {
-    return norms.get(name);
-  }
-  
-  @Override
-  public void close() throws IOException {
-    norms = null;
-  }
-  
-  static void files(Directory dir, SegmentInfo info, Set<String> files) throws IOException {
-    // TODO: This is what SI always did... but we can do this cleaner?
-    // like first FI that has norms but doesn't have separate norms?
-    final String normsFileName = IndexFileNames.segmentFileName(info.name, "", SimpleTextNormsWriter.NORMS_EXTENSION);
-    if (dir.fileExists(normsFileName)) {
-      files.add(normsFileName);
-    }
-  }
-  
-  private String readString(int offset, BytesRef scratch) {
-    return new String(scratch.bytes, scratch.offset+offset, scratch.length-offset, IOUtils.CHARSET_UTF_8);
-  }
-}
Index: lucene/src/java/org/apache/lucene/codecs/simpletext/SimpleTextStoredFieldsWriter.java
===================================================================
--- lucene/src/java/org/apache/lucene/codecs/simpletext/SimpleTextStoredFieldsWriter.java	(revision 1227169)
+++ lucene/src/java/org/apache/lucene/codecs/simpletext/SimpleTextStoredFieldsWriter.java	(working copy)
@@ -166,9 +166,7 @@
     try {
       close();
     } catch (IOException ignored) {}
-    try {
-      directory.deleteFile(IndexFileNames.segmentFileName(segment, "", FIELDS_EXTENSION));
-    } catch (IOException ignored) {}
+    IOUtils.deleteFilesIgnoringExceptions(directory, IndexFileNames.segmentFileName(segment, "", FIELDS_EXTENSION));
   }
 
   @Override
Index: lucene/src/java/org/apache/lucene/codecs/simpletext/SimpleTextNormsWriter.java
===================================================================
--- lucene/src/java/org/apache/lucene/codecs/simpletext/SimpleTextNormsWriter.java	(revision 1227169)
+++ lucene/src/java/org/apache/lucene/codecs/simpletext/SimpleTextNormsWriter.java	(working copy)
@@ -1,114 +0,0 @@
-package org.apache.lucene.codecs.simpletext;
-
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-import java.io.IOException;
-
-import org.apache.lucene.codecs.NormsWriter;
-import org.apache.lucene.index.FieldInfo;
-import org.apache.lucene.index.IndexFileNames;
-import org.apache.lucene.store.Directory;
-import org.apache.lucene.store.IOContext;
-import org.apache.lucene.store.IndexOutput;
-import org.apache.lucene.util.BytesRef;
-import org.apache.lucene.util.IOUtils;
-
-/**
- * Writes plain-text norms
- * <p>
- * <b><font color="red">FOR RECREATIONAL USE ONLY</font></B>
- * @lucene.experimental
- */
-public class SimpleTextNormsWriter extends NormsWriter {
-  private IndexOutput out;
-  private int docid = 0;
-    
-  /** Extension of norms file */
-  static final String NORMS_EXTENSION = "len";
-  
-  private final BytesRef scratch = new BytesRef();
-  
-  final static BytesRef END     = new BytesRef("END");
-  final static BytesRef FIELD   = new BytesRef("field ");
-  final static BytesRef DOC     = new BytesRef("  doc ");
-  final static BytesRef NORM    = new BytesRef("    norm ");
-  
-  public SimpleTextNormsWriter(Directory directory, String segment, IOContext context) throws IOException {
-    final String normsFileName = IndexFileNames.segmentFileName(segment, "", NORMS_EXTENSION);
-    out = directory.createOutput(normsFileName, context);
-  }
-
-  @Override
-  public void startField(FieldInfo info) throws IOException {
-    assert info.omitNorms == false;
-    docid = 0;
-    write(FIELD);
-    write(info.name);
-    newLine();
-  }
-    
-  @Override
-  public void writeNorm(byte norm) throws IOException {
-    write(DOC);
-    write(Integer.toString(docid));
-    newLine();
-    
-    write(NORM);
-    write(norm);
-    newLine();
-    docid++;
-  }
-    
-  @Override
-  public void finish(int numDocs) throws IOException {
-    if (docid != numDocs) {
-      throw new RuntimeException("mergeNorms produced an invalid result: docCount is " + numDocs
-          + " but only saw " + docid + " file=" + out.toString() + "; now aborting this merge to prevent index corruption");
-    }
-    write(END);
-    newLine();
-  }
-
-  @Override
-  public void close() throws IOException {
-    try {
-      IOUtils.close(out);
-    } finally {
-      out = null;
-    }
-  }
-  
-  private void write(String s) throws IOException {
-    SimpleTextUtil.write(out, s, scratch);
-  }
-  
-  private void write(BytesRef bytes) throws IOException {
-    SimpleTextUtil.write(out, bytes);
-  }
-  
-  private void write(byte b) throws IOException {
-    scratch.grow(1);
-    scratch.bytes[scratch.offset] = b;
-    scratch.length = 1;
-    SimpleTextUtil.write(out, scratch);
-  }
-  
-  private void newLine() throws IOException {
-    SimpleTextUtil.writeNewline(out);
-  }
-}
Index: lucene/src/java/org/apache/lucene/codecs/simpletext/SimpleTextNormsProducer.java
===================================================================
--- lucene/src/java/org/apache/lucene/codecs/simpletext/SimpleTextNormsProducer.java	(revision 0)
+++ lucene/src/java/org/apache/lucene/codecs/simpletext/SimpleTextNormsProducer.java	(working copy)
@@ -0,0 +1,163 @@
+package org.apache.lucene.codecs.simpletext;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import static org.apache.lucene.codecs.simpletext.SimpleTextNormsConsumer.DOC;
+import static org.apache.lucene.codecs.simpletext.SimpleTextNormsConsumer.END;
+import static org.apache.lucene.codecs.simpletext.SimpleTextNormsConsumer.FIELD;
+import static org.apache.lucene.codecs.simpletext.SimpleTextNormsConsumer.NORM;
+import static org.apache.lucene.codecs.simpletext.SimpleTextNormsConsumer.NORMS_EXTENSION;
+
+import java.io.IOException;
+import java.util.HashMap;
+import java.util.Map;
+import java.util.Set;
+
+import org.apache.lucene.codecs.PerDocProducer;
+import org.apache.lucene.index.DocValues;
+import org.apache.lucene.index.DocValues.Source;
+import org.apache.lucene.index.DocValues.Type;
+import org.apache.lucene.index.FieldInfos;
+import org.apache.lucene.index.IndexFileNames;
+import org.apache.lucene.index.SegmentInfo;
+import org.apache.lucene.store.Directory;
+import org.apache.lucene.store.IOContext;
+import org.apache.lucene.store.IndexInput;
+import org.apache.lucene.util.BytesRef;
+import org.apache.lucene.util.IOUtils;
+import org.apache.lucene.util.StringHelper;
+
+/**
+ * Reads plain-text norms
+ * <p>
+ * <b><font color="red">FOR RECREATIONAL USE ONLY</font></B>
+ * @lucene.experimental
+ */
+public class SimpleTextNormsProducer extends PerDocProducer {
+  
+  Map<String,NormsDocValues> norms = new HashMap<String,NormsDocValues>();
+  
+  public SimpleTextNormsProducer(Directory directory, SegmentInfo si, FieldInfos fields, IOContext context) throws IOException {
+    if (fields.hasNorms()) {
+      readNorms(directory.openInput(IndexFileNames.segmentFileName(si.name, "", NORMS_EXTENSION), context), si.docCount);
+    }
+  }
+  
+  // we read in all the norms up front into a hashmap
+  private void readNorms(IndexInput in, int maxDoc) throws IOException {
+    BytesRef scratch = new BytesRef();
+    boolean success = false;
+    try {
+      SimpleTextUtil.readLine(in, scratch);
+      while (!scratch.equals(END)) {
+        assert StringHelper.startsWith(scratch, FIELD);
+        final String fieldName = readString(FIELD.length, scratch);
+        byte bytes[] = new byte[maxDoc];
+        for (int i = 0; i < bytes.length; i++) {
+          SimpleTextUtil.readLine(in, scratch);
+          assert StringHelper.startsWith(scratch, DOC);
+          SimpleTextUtil.readLine(in, scratch);
+          assert StringHelper.startsWith(scratch, NORM);
+          bytes[i] = scratch.bytes[scratch.offset + NORM.length];
+        }
+        norms.put(fieldName, new NormsDocValues(new Norm(bytes)));
+        SimpleTextUtil.readLine(in, scratch);
+        assert StringHelper.startsWith(scratch, FIELD) || scratch.equals(END);
+      }
+      success = true;
+    } finally {
+      if (success) {
+        IOUtils.close(in);
+      } else {
+        IOUtils.closeWhileHandlingException(in);
+      }
+    }
+  }
+  
+  @Override
+  public void close() throws IOException {
+    norms = null;
+  }
+  
+  static void files(Directory dir, SegmentInfo info, Set<String> files) throws IOException {
+    // TODO: This is what SI always did... but we can do this cleaner?
+    // like first FI that has norms but doesn't have separate norms?
+    final String normsFileName = IndexFileNames.segmentFileName(info.name, "", SimpleTextNormsConsumer.NORMS_EXTENSION);
+    if (dir.fileExists(normsFileName)) {
+      files.add(normsFileName);
+    }
+  }
+  
+  private String readString(int offset, BytesRef scratch) {
+    return new String(scratch.bytes, scratch.offset+offset, scratch.length-offset, IOUtils.CHARSET_UTF_8);
+  }
+
+  @Override
+  public DocValues docValues(String field) throws IOException {
+    return norms.get(field);
+  }
+  
+  private class NormsDocValues extends DocValues {
+    private final Source source;
+    public NormsDocValues(Source source) {
+      this.source = source;
+    }
+
+    @Override
+    public Source load() throws IOException {
+      return source;
+    }
+
+    @Override
+    public Source getDirectSource() throws IOException {
+      return getSource();
+    }
+
+    @Override
+    public Type type() {
+      return Type.BYTES_FIXED_STRAIGHT;
+    }
+  }
+  
+  static final class Norm extends Source {
+    protected Norm(byte[] bytes) {
+      super(Type.BYTES_FIXED_STRAIGHT);
+      this.bytes = bytes;
+    }
+    final byte bytes[];
+    
+    @Override
+    public BytesRef getBytes(int docID, BytesRef ref) {
+      ref.bytes = bytes;
+      ref.offset = docID;
+      ref.length = 1;
+      return ref;
+    }
+
+    @Override
+    public boolean hasArray() {
+      return true;
+    }
+
+    @Override
+    public Object getArray() {
+      return bytes;
+    }
+    
+  }
+}

Property changes on: lucene/src/java/org/apache/lucene/codecs/simpletext/SimpleTextNormsProducer.java
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
Index: lucene/src/java/org/apache/lucene/codecs/simpletext/SimpleTextNormsFormat.java
===================================================================
--- lucene/src/java/org/apache/lucene/codecs/simpletext/SimpleTextNormsFormat.java	(revision 1227169)
+++ lucene/src/java/org/apache/lucene/codecs/simpletext/SimpleTextNormsFormat.java	(working copy)
@@ -21,13 +21,12 @@
 import java.util.Set;
 
 import org.apache.lucene.codecs.NormsFormat;
-import org.apache.lucene.codecs.NormsReader;
-import org.apache.lucene.codecs.NormsWriter;
-import org.apache.lucene.index.FieldInfos;
+import org.apache.lucene.codecs.PerDocConsumer;
+import org.apache.lucene.codecs.PerDocProducer;
+import org.apache.lucene.index.PerDocWriteState;
 import org.apache.lucene.index.SegmentInfo;
-import org.apache.lucene.index.SegmentWriteState;
+import org.apache.lucene.index.SegmentReadState;
 import org.apache.lucene.store.Directory;
-import org.apache.lucene.store.IOContext;
 
 /**
  * plain-text norms format
@@ -36,19 +35,28 @@
  * @lucene.experimental
  */
 public class SimpleTextNormsFormat extends NormsFormat {
+  
+  @Override
+  public PerDocConsumer docsConsumer(PerDocWriteState state) throws IOException {
+    return new SimpleTextNormsConsumer(state.directory, state.segmentName, state.context);
+  }
 
   @Override
-  public NormsReader normsReader(Directory dir, SegmentInfo info, FieldInfos fields, IOContext context, Directory separateNormsDir) throws IOException {
-    return new SimpleTextNormsReader(dir, info, fields, context);
+  public PerDocProducer docsProducer(SegmentReadState state) throws IOException {
+    return new SimpleTextNormsProducer(state.dir, state.segmentInfo, state.fieldInfos, state.context);
   }
 
   @Override
-  public NormsWriter normsWriter(SegmentWriteState state) throws IOException {
-    return new SimpleTextNormsWriter(state.directory, state.segmentName, state.context);
+  public void files(Directory dir, SegmentInfo info, Set<String> files)
+      throws IOException {
+    SimpleTextNormsConsumer.files(dir, info, files);
+
   }
 
   @Override
-  public void files(Directory dir, SegmentInfo info, Set<String> files) throws IOException {
-    SimpleTextNormsReader.files(dir, info, files);
+  public PerDocProducer docsProducer(SegmentReadState state,
+      Directory separateNormsDir) throws IOException {
+    return docsProducer(state);
   }
+   
 }
Index: lucene/src/java/org/apache/lucene/codecs/simpletext/SimpleTextNormsConsumer.java
===================================================================
--- lucene/src/java/org/apache/lucene/codecs/simpletext/SimpleTextNormsConsumer.java	(revision 0)
+++ lucene/src/java/org/apache/lucene/codecs/simpletext/SimpleTextNormsConsumer.java	(working copy)
@@ -0,0 +1,276 @@
+package org.apache.lucene.codecs.simpletext;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.io.IOException;
+import java.util.Set;
+
+import org.apache.lucene.codecs.DocValuesConsumer;
+import org.apache.lucene.codecs.PerDocConsumer;
+import org.apache.lucene.index.DocValue;
+import org.apache.lucene.index.DocValues.Type;
+import org.apache.lucene.index.DocValues;
+import org.apache.lucene.index.FieldInfo;
+import org.apache.lucene.index.FieldInfos;
+import org.apache.lucene.index.IndexFileNames;
+import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.index.SegmentInfo;
+import org.apache.lucene.store.Directory;
+import org.apache.lucene.store.IOContext;
+import org.apache.lucene.store.IndexOutput;
+import org.apache.lucene.util.ArrayUtil;
+import org.apache.lucene.util.BytesRef;
+import org.apache.lucene.util.IOUtils;
+
+/**
+ * Writes plain-text norms
+ * <p>
+ * <b><font color="red">FOR RECREATIONAL USE ONLY</font></B>
+ * 
+ * @lucene.experimental
+ */
+public class SimpleTextNormsConsumer extends PerDocConsumer {
+  
+  /** Extension of norms file */
+  static final String NORMS_EXTENSION = "len";
+  final static BytesRef END = new BytesRef("END");
+  final static BytesRef FIELD = new BytesRef("field ");
+  final static BytesRef DOC = new BytesRef("  doc ");
+  final static BytesRef NORM = new BytesRef("    norm ");
+  
+  private NormsWriter writer;
+
+  private final Directory directory;
+
+  private final String segment;
+
+  private final IOContext context;
+
+  public SimpleTextNormsConsumer(Directory directory, String segment,
+      IOContext context) throws IOException {
+    this.directory = directory;
+    this.segment = segment;
+    this.context = context;
+  }
+
+  @Override
+  public void close() throws IOException {
+    if (writer != null) {
+      writer.finish();
+    }
+  }
+  
+  @Override
+  protected DocValues pull(IndexReader reader, FieldInfo info)
+      throws IOException {
+    return reader.normValues(info.name);
+  }
+
+  @Override
+  protected boolean canMerge(FieldInfo info) {
+    return !info.omitNorms && info.isIndexed;
+  }
+
+  @Override
+  protected Type getDocValuesType(FieldInfo info) {
+    return Type.BYTES_FIXED_STRAIGHT;
+  }
+
+  @Override
+  public DocValuesConsumer addValuesField(Type type, FieldInfo fieldInfo)
+      throws IOException {
+    return new SimpleTextNormsDocValuesConsumer(fieldInfo);
+  }
+
+  @Override
+  public void abort() {
+    if (writer != null) {
+      try {
+        writer.abort();
+      } catch (IOException e) {
+      }
+    }
+  }
+
+  private class SimpleTextNormsDocValuesConsumer extends DocValuesConsumer {
+    // Holds all docID/norm pairs we've seen
+    int[] docIDs = new int[1];
+    byte[] norms = new byte[1];
+    int upto;
+    private final FieldInfo fi;
+
+    public SimpleTextNormsDocValuesConsumer(FieldInfo fieldInfo) {
+      fi = fieldInfo;
+    }
+
+    @Override
+    public void add(int docID, DocValue docValue) throws IOException {
+      add(docID, docValue.getBytes());
+    }
+    
+    public void add(int docID, BytesRef value) throws IOException {
+      if (docIDs.length <= upto) {
+        assert docIDs.length == upto;
+        docIDs = ArrayUtil.grow(docIDs, 1 + upto);
+      }
+      if (norms.length <= upto) {
+        assert norms.length == upto;
+        norms = ArrayUtil.grow(norms, 1 + upto);
+      }
+      assert value.length == 1;
+      norms[upto] = value.bytes[value.offset];
+      docIDs[upto] = docID;
+      upto++;
+    }
+
+    @Override
+    public void finish(int docCount) throws IOException {
+      final NormsWriter normsWriter = getNormsWriter();
+      boolean success = false;
+      try {
+        int uptoDoc = 0;
+        normsWriter.setNumTotalDocs(docCount);
+        if (upto > 0) {
+          normsWriter.startField(fi);
+          int docID = 0;
+          for (; docID < docCount; docID++) {
+            if (uptoDoc < upto && docIDs[uptoDoc] == docID) {
+              normsWriter.writeNorm(norms[uptoDoc]);
+              uptoDoc++;
+            } else {
+              normsWriter.writeNorm((byte) 0);
+            }
+          }
+          // we should have consumed every norm
+          assert uptoDoc == upto;
+
+        } else {
+          // Fill entire field with default norm:
+          normsWriter.startField(fi);
+          for (; upto < docCount; upto++)
+            normsWriter.writeNorm((byte) 0);
+        }
+        success = true;
+      } finally {
+        if (!success) {
+          normsWriter.abort();
+        }
+      }
+    }
+  }
+
+  public NormsWriter getNormsWriter() throws IOException {
+    if (writer == null) {
+      writer = new NormsWriter(directory, segment, context);
+    }
+    return writer;
+  }
+
+  private static class NormsWriter {
+
+    private final IndexOutput output;
+    private int numTotalDocs = 0;
+    private int docid = 0;
+
+    private final BytesRef scratch = new BytesRef();
+
+
+    public NormsWriter(Directory directory, String segment, IOContext context)
+        throws IOException {
+      final String normsFileName = IndexFileNames.segmentFileName(segment, "",
+          NORMS_EXTENSION);
+      output = directory.createOutput(normsFileName, context);
+
+    }
+
+    public void startField(FieldInfo info) throws IOException {
+      assert info.omitNorms == false;
+      docid = 0;
+      write(FIELD);
+      write(info.name);
+      newLine();
+    }
+
+    public void writeNorm(byte norm) throws IOException {
+      write(DOC);
+      write(Integer.toString(docid));
+      newLine();
+
+      write(NORM);
+      write(norm);
+      newLine();
+      docid++;
+    }
+
+    public void finish(int numDocs) throws IOException {
+      if (docid != numDocs) {
+        throw new RuntimeException(
+            "mergeNorms produced an invalid result: docCount is " + numDocs
+                + " but only saw " + docid + " file=" + output.toString()
+                + "; now aborting this merge to prevent index corruption");
+      }
+      write(END);
+      newLine();
+    }
+
+    private void write(String s) throws IOException {
+      SimpleTextUtil.write(output, s, scratch);
+    }
+
+    private void write(BytesRef bytes) throws IOException {
+      SimpleTextUtil.write(output, bytes);
+    }
+
+    private void write(byte b) throws IOException {
+      scratch.grow(1);
+      scratch.bytes[scratch.offset] = b;
+      scratch.length = 1;
+      SimpleTextUtil.write(output, scratch);
+    }
+
+    private void newLine() throws IOException {
+      SimpleTextUtil.writeNewline(output);
+    }
+
+    public void setNumTotalDocs(int numTotalDocs) {
+      assert this.numTotalDocs == 0 || numTotalDocs == this.numTotalDocs;
+      this.numTotalDocs = numTotalDocs;
+    }
+
+    public void abort() throws IOException {
+      IOUtils.close(output);
+    }
+
+    public void finish() throws IOException {
+      finish(numTotalDocs);
+      IOUtils.close(output);
+    }
+  }
+
+  public static void files(Directory dir, SegmentInfo info, Set<String> files) throws IOException {
+    FieldInfos fieldInfos = info.getFieldInfos();
+    
+    for (FieldInfo fieldInfo : fieldInfos) {
+      if (!fieldInfo.omitNorms && fieldInfo.isIndexed) {
+        files.add(IndexFileNames.segmentFileName(info.name, "",
+            NORMS_EXTENSION));  
+        break;
+      }
+    }
+  }
+}

Property changes on: lucene/src/java/org/apache/lucene/codecs/simpletext/SimpleTextNormsConsumer.java
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
Index: lucene/src/java/org/apache/lucene/codecs/simpletext/SimpleTextTermVectorsWriter.java
===================================================================
--- lucene/src/java/org/apache/lucene/codecs/simpletext/SimpleTextTermVectorsWriter.java	(revision 1227169)
+++ lucene/src/java/org/apache/lucene/codecs/simpletext/SimpleTextTermVectorsWriter.java	(working copy)
@@ -149,10 +149,7 @@
     try {
       close();
     } catch (IOException ignored) {}
-    
-    try {
-      directory.deleteFile(IndexFileNames.segmentFileName(segment, "", VECTORS_EXTENSION));
-    } catch (IOException ignored) {}
+    IOUtils.deleteFilesIgnoringExceptions(directory, IndexFileNames.segmentFileName(segment, "", VECTORS_EXTENSION));
   }
 
   @Override
Index: lucene/src/java/org/apache/lucene/codecs/NormsFormat.java
===================================================================
--- lucene/src/java/org/apache/lucene/codecs/NormsFormat.java	(revision 1227169)
+++ lucene/src/java/org/apache/lucene/codecs/NormsFormat.java	(working copy)
@@ -20,19 +20,19 @@
 import java.io.IOException;
 import java.util.Set;
 
-import org.apache.lucene.index.FieldInfos;
+import org.apache.lucene.index.PerDocWriteState;
 import org.apache.lucene.index.SegmentInfo;
-import org.apache.lucene.index.SegmentWriteState;
+import org.apache.lucene.index.SegmentReadState;
 import org.apache.lucene.store.Directory;
-import org.apache.lucene.store.IOContext;
 
 /**
  * format for normalization factors
  */
 public abstract class NormsFormat {
-  /** Note: separateNormsDir should not be used! */
-  public abstract NormsReader normsReader(Directory dir, SegmentInfo info, FieldInfos fields, IOContext context, Directory separateNormsDir) throws IOException;
-  public abstract NormsWriter normsWriter(SegmentWriteState state) throws IOException;
+  public abstract PerDocConsumer docsConsumer(PerDocWriteState state) throws IOException;
+  @Deprecated
+  public abstract PerDocProducer docsProducer(SegmentReadState state, Directory separateNormsDir) throws IOException;
+  public abstract PerDocProducer docsProducer(SegmentReadState state) throws IOException;
   public abstract void files(Directory dir, SegmentInfo info, Set<String> files) throws IOException;
   
   /** 
Index: lucene/src/java/org/apache/lucene/codecs/NormsReader.java
===================================================================
--- lucene/src/java/org/apache/lucene/codecs/NormsReader.java	(revision 1227169)
+++ lucene/src/java/org/apache/lucene/codecs/NormsReader.java	(working copy)
@@ -1,26 +0,0 @@
-package org.apache.lucene.codecs;
-
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-import java.io.Closeable;
-import java.io.IOException;
-
-//simple api just for now before switching to docvalues apis
-public abstract class NormsReader implements Closeable {
-  public abstract byte[] norms(String name) throws IOException;
-}
Index: lucene/src/java/org/apache/lucene/codecs/BlockTreeTermsWriter.java
===================================================================
--- lucene/src/java/org/apache/lucene/codecs/BlockTreeTermsWriter.java	(revision 1227169)
+++ lucene/src/java/org/apache/lucene/codecs/BlockTreeTermsWriter.java	(working copy)
@@ -17,10 +17,7 @@
  * limitations under the License.
  */
 
-import java.io.FileOutputStream;
 import java.io.IOException;
-import java.io.OutputStreamWriter;
-import java.io.Writer;
 import java.util.ArrayList;
 import java.util.Comparator;
 import java.util.List;
@@ -42,7 +39,6 @@
 import org.apache.lucene.util.fst.BytesRefFSTEnum;
 import org.apache.lucene.util.fst.FST;
 import org.apache.lucene.util.fst.NoOutputs;
-import org.apache.lucene.util.fst.Util;
 
 /*
   TODO:
@@ -641,6 +637,7 @@
     }
 
     // for debugging
+    @SuppressWarnings("unused")
     private String toString(BytesRef b) {
       try {
         return b.utf8ToString() + " " + b;
Index: lucene/src/java/org/apache/lucene/codecs/NormsWriter.java
===================================================================
--- lucene/src/java/org/apache/lucene/codecs/NormsWriter.java	(revision 1227169)
+++ lucene/src/java/org/apache/lucene/codecs/NormsWriter.java	(working copy)
@@ -1,70 +0,0 @@
-package org.apache.lucene.codecs;
-
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership. The ASF
- * licenses this file to You under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * 
- * http://www.apache.org/licenses/LICENSE-2.0
- * 
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-
-import java.io.Closeable;
-import java.io.IOException;
-import java.util.Arrays;
-
-import org.apache.lucene.index.FieldInfo;
-import org.apache.lucene.index.MergeState;
-import org.apache.lucene.util.Bits;
-
-// simple api just for now before switching to docvalues apis
-public abstract class NormsWriter implements Closeable {
-
-  // TODO: I think IW should set info.normValueType from Similarity,
-  // and then this method just returns DocValuesConsumer
-  public abstract void startField(FieldInfo info) throws IOException;
-  public abstract void writeNorm(byte norm) throws IOException;
-  public abstract void finish(int numDocs) throws IOException;
-  
-  public int merge(MergeState mergeState) throws IOException {
-    int numMergedDocs = 0;
-    for (FieldInfo fi : mergeState.fieldInfos) {
-      if (fi.isIndexed && !fi.omitNorms) {
-        startField(fi);
-        int numMergedDocsForField = 0;
-        for (MergeState.IndexReaderAndLiveDocs reader : mergeState.readers) {
-          final int maxDoc = reader.reader.maxDoc();
-          byte normBuffer[] = reader.reader.norms(fi.name);
-          if (normBuffer == null) {
-            // Can be null if this segment doesn't have
-            // any docs with this field
-            normBuffer = new byte[maxDoc];
-            Arrays.fill(normBuffer, (byte)0);
-          }
-          // this segment has deleted docs, so we have to
-          // check for every doc if it is deleted or not
-          final Bits liveDocs = reader.liveDocs;
-          for (int k = 0; k < maxDoc; k++) {
-            if (liveDocs == null || liveDocs.get(k)) {
-              writeNorm(normBuffer[k]);
-              numMergedDocsForField++;
-            }
-          }
-          mergeState.checkAbort.work(maxDoc);
-        }
-        assert numMergedDocs == 0 || numMergedDocs == numMergedDocsForField;
-        numMergedDocs = numMergedDocsForField;
-      }
-    }
-    finish(numMergedDocs);
-    return numMergedDocs;
-  }
-}
Index: lucene/src/java/org/apache/lucene/index/DocInverterPerField.java
===================================================================
--- lucene/src/java/org/apache/lucene/index/DocInverterPerField.java	(revision 1227169)
+++ lucene/src/java/org/apache/lucene/index/DocInverterPerField.java	(working copy)
@@ -18,7 +18,6 @@
  */
 
 import java.io.IOException;
-import java.io.Reader;
 import org.apache.lucene.analysis.TokenStream;
 import org.apache.lucene.analysis.tokenattributes.OffsetAttribute;
 import org.apache.lucene.analysis.tokenattributes.PositionIncrementAttribute;
@@ -34,7 +33,6 @@
 
 final class DocInverterPerField extends DocFieldConsumerPerField {
 
-  final private DocInverter parent;
   final FieldInfo fieldInfo;
   final InvertedDocConsumerPerField consumer;
   final InvertedDocEndConsumerPerField endConsumer;
@@ -42,7 +40,6 @@
   final FieldInvertState fieldState;
 
   public DocInverterPerField(DocInverter parent, FieldInfo fieldInfo) {
-    this.parent = parent;
     this.fieldInfo = fieldInfo;
     docState = parent.docState;
     fieldState = parent.fieldState;
Index: lucene/src/java/org/apache/lucene/index/MultiDocValues.java
===================================================================
--- lucene/src/java/org/apache/lucene/index/MultiDocValues.java	(revision 1227169)
+++ lucene/src/java/org/apache/lucene/index/MultiDocValues.java	(working copy)
@@ -33,6 +33,13 @@
  * @lucene.internal
  */
 public class MultiDocValues extends DocValues {
+  
+  private static DocValuesPuller DEFAULT_PULLER = new DocValuesPuller();
+  private static final DocValuesPuller NORMS_PULLER = new DocValuesPuller() {
+    public DocValues pull(IndexReader reader, String field) throws IOException {
+      return reader.normValues(field);
+    }
+  };
 
   public static class DocValuesSlice {
     public final static DocValuesSlice[] EMPTY_ARRAY = new DocValuesSlice[0];
@@ -46,6 +53,12 @@
       this.length = length;
     }
   }
+  
+  private static class DocValuesPuller {
+    public DocValues pull(IndexReader reader, String field) throws IOException {
+      return reader.docValues(field);
+    }
+  }
 
   private DocValuesSlice[] slices;
   private int[] starts;
@@ -58,7 +71,6 @@
     this.type = promotedType.type();
     this.valueSize = promotedType.getValueSize();
   }
-  
   /**
    * Returns a single {@link DocValues} instance for this field, merging
    * their values on the fly.
@@ -68,15 +80,32 @@
    * sub-readers (using {@link Gather}) and iterate through them yourself.
    */
   public static DocValues getDocValues(IndexReader r, final String field) throws IOException {
+    return getDocValues(r, field, DEFAULT_PULLER);
+  }
+  
+  /**
+   * Returns a single {@link DocValues} instance for this norms field, merging
+   * their values on the fly.
+   * 
+   * <p>
+   * <b>NOTE</b>: this is a slow way to access DocValues. It's better to get the
+   * sub-readers (using {@link Gather}) and iterate through them yourself.
+   */
+  public static DocValues getNormDocValues(IndexReader r, final String field) throws IOException {
+    return getDocValues(r, field, NORMS_PULLER);
+  }
+  
+ 
+  private static DocValues getDocValues(IndexReader r, final String field, final DocValuesPuller puller) throws IOException {
     final IndexReader[] subs = r.getSequentialSubReaders();
     if (subs == null) {
       // already an atomic reader
-      return r.docValues(field);
+      return puller.pull(r, field);
     } else if (subs.length == 0) {
       // no fields
       return null;
     } else if (subs.length == 1) {
-      return getDocValues(subs[0], field);
+      return getDocValues(subs[0], field, puller);
     } else {      
       final List<DocValuesSlice> slices = new ArrayList<DocValuesSlice>();
       
@@ -89,7 +118,7 @@
       new ReaderUtil.Gather(r) {
         @Override
         protected void add(int base, IndexReader r) throws IOException {
-          final DocValues d = r.docValues(field);
+          final DocValues d = puller.pull(r, field);
           if (d != null) {
             TypePromoter incoming = TypePromoter.create(d.type(), d.getValueSize());
             promotedType[0] = promotedType[0].promote(incoming);
Index: lucene/src/java/org/apache/lucene/index/ParallelReader.java
===================================================================
--- lucene/src/java/org/apache/lucene/index/ParallelReader.java	(revision 1227169)
+++ lucene/src/java/org/apache/lucene/index/ParallelReader.java	(working copy)
@@ -427,4 +427,11 @@
     IndexReader reader = fieldToReader.get(field);
     return reader == null ? null : MultiDocValues.getDocValues(reader, field);
   }
+  
+  // TODO: I suspect this is completely untested!!!!!
+  @Override
+  public DocValues normValues(String field) throws IOException {
+    IndexReader reader = fieldToReader.get(field);
+    return reader == null ? null : MultiDocValues.getNormDocValues(reader, field);
+  }
 }
Index: lucene/src/java/org/apache/lucene/index/SegmentReader.java
===================================================================
--- lucene/src/java/org/apache/lucene/index/SegmentReader.java	(revision 1227169)
+++ lucene/src/java/org/apache/lucene/index/SegmentReader.java	(working copy)
@@ -18,16 +18,15 @@
  */
 
 import java.io.IOException;
-import java.util.ArrayList;
 import java.util.Collection;
 import java.util.HashSet;
-import java.util.List;
 import java.util.Set;
 
 import org.apache.lucene.store.Directory;
 import org.apache.lucene.codecs.PerDocProducer;
 import org.apache.lucene.codecs.StoredFieldsReader;
 import org.apache.lucene.codecs.TermVectorsReader;
+import org.apache.lucene.index.DocValues.Source;
 import org.apache.lucene.index.FieldInfo.IndexOptions;
 import org.apache.lucene.search.FieldCache; // javadocs
 import org.apache.lucene.store.IOContext;
@@ -265,7 +264,14 @@
   @Override
   public byte[] norms(String field) throws IOException {
     ensureOpen();
-    return core.norms.norms(field);
+    DocValues docValues = core.norms.docValues(field);
+    if (docValues != null) {
+      Source source = docValues.getSource();
+      assert source.hasArray(); // TODO cut over to source
+      return (byte[])source.getArray();  
+    }
+    return null;
+    
   }
 
   /** @lucene.internal */
@@ -352,6 +358,17 @@
     }
     return perDoc.docValues(field);
   }
+  
+  @Override
+  public DocValues normValues(String field) throws IOException {
+    ensureOpen();
+    final PerDocProducer perDoc = core.norms;
+    if (perDoc == null) {
+      return null;
+    }
+    return perDoc.docValues(field);
+  }
+  
 
   /**
    * Called when the shared core for this SegmentReader
Index: lucene/src/java/org/apache/lucene/index/InvertedDocEndConsumerPerField.java
===================================================================
--- lucene/src/java/org/apache/lucene/index/InvertedDocEndConsumerPerField.java	(revision 1227169)
+++ lucene/src/java/org/apache/lucene/index/InvertedDocEndConsumerPerField.java	(working copy)
@@ -1,5 +1,7 @@
 package org.apache.lucene.index;
 
+import java.io.IOException;
+
 /**
  * Licensed to the Apache Software Foundation (ASF) under one or more
  * contributor license agreements.  See the NOTICE file distributed with
@@ -18,6 +20,6 @@
  */
 
 abstract class InvertedDocEndConsumerPerField {
-  abstract void finish();
+  abstract void finish() throws IOException;
   abstract void abort();
 }
Index: lucene/src/java/org/apache/lucene/index/BaseMultiReader.java
===================================================================
--- lucene/src/java/org/apache/lucene/index/BaseMultiReader.java	(revision 1227169)
+++ lucene/src/java/org/apache/lucene/index/BaseMultiReader.java	(working copy)
@@ -157,4 +157,9 @@
   public DocValues docValues(String field) throws IOException {
     throw new UnsupportedOperationException("please use MultiDocValues#getDocValues, or wrap your IndexReader with SlowMultiReaderWrapper, if you really need a top level DocValues");
   }
+  
+  @Override
+  public DocValues normValues(String field) throws IOException {
+    throw new UnsupportedOperationException("please use MultiDocValues#getNormValues, or wrap your IndexReader with SlowMultiReaderWrapper, if you really need a top level Norm DocValues ");
+  }
 }
Index: lucene/src/java/org/apache/lucene/index/NormsConsumer.java
===================================================================
--- lucene/src/java/org/apache/lucene/index/NormsConsumer.java	(revision 1227169)
+++ lucene/src/java/org/apache/lucene/index/NormsConsumer.java	(working copy)
@@ -21,8 +21,12 @@
 import java.util.Collection;
 import java.util.Map;
 
+import org.apache.lucene.codecs.DocValuesConsumer;
 import org.apache.lucene.codecs.NormsFormat;
-import org.apache.lucene.codecs.NormsWriter;
+import org.apache.lucene.codecs.PerDocConsumer;
+import org.apache.lucene.document.DocValuesField;
+import org.apache.lucene.index.DocValues.Type;
+import org.apache.lucene.util.BytesRef;
 import org.apache.lucene.util.IOUtils;
 
 // TODO FI: norms could actually be stored as doc store
@@ -33,14 +37,21 @@
  */
 
 final class NormsConsumer extends InvertedDocEndConsumer {
-  final NormsFormat normsFormat;
+  private final NormsFormat normsFormat;
+  private PerDocConsumer consumer;
+  private final DocumentsWriterPerThread dwpt;
   
   public NormsConsumer(DocumentsWriterPerThread dwpt) {
     normsFormat = dwpt.codec.normsFormat();
+    this.dwpt = dwpt;
   }
 
   @Override
-  public void abort() {}
+  public void abort(){
+    if (consumer != null) {
+      consumer.abort();
+    }
+  }
 
   // We only write the _X.nrm file at flush
   void files(Collection<String> files) {}
@@ -49,50 +60,39 @@
    *  not disabled */
   @Override
   public void flush(Map<FieldInfo,InvertedDocEndConsumerPerField> fieldsToFlush, SegmentWriteState state) throws IOException {
-    if (!state.fieldInfos.hasNorms()) {
-      return;
-    }
-
-    NormsWriter normsOut = null;
     boolean success = false;
+    boolean anythingFlushed = false;
     try {
-      normsOut = normsFormat.normsWriter(state);
-
-      for (FieldInfo fi : state.fieldInfos) {
-        final NormsConsumerPerField toWrite = (NormsConsumerPerField) fieldsToFlush.get(fi);
-        int upto = 0;
-        // we must check the final value of omitNorms for the fieldinfo, it could have 
-        // changed for this field since the first time we added it.
-        if (!fi.omitNorms && toWrite != null && toWrite.upto > 0) {
-          normsOut.startField(fi);
-          int docID = 0;
-          for (; docID < state.numDocs; docID++) {
-            if (upto < toWrite.upto && toWrite.docIDs[upto] == docID) {
-              normsOut.writeNorm(toWrite.norms[upto]);
-              upto++;
-            } else {
-              normsOut.writeNorm((byte) 0);
+      if (state.fieldInfos.hasNorms()) {
+        for (FieldInfo fi : state.fieldInfos) {
+          final NormsConsumerPerField toWrite = (NormsConsumerPerField) fieldsToFlush.get(fi);
+          if (!fi.omitNorms) {
+            if (toWrite != null) {
+              anythingFlushed = true;
+              toWrite.flush(state.numDocs);
+            } else if (fi.isIndexed) {
+              anythingFlushed = true;
+              // we must check the final value of omitNorms for the fieldinfo, it could have 
+              // changed for this field since the first time we added it.
+              final DocValuesConsumer valuesConsumer = newConsumer(new PerDocWriteState(state), fi);
+              final DocValue value = new DocValuesField("");
+              value.setBytes(new BytesRef(new byte[] {0x00}), Type.BYTES_FIXED_STRAIGHT);
+              valuesConsumer.add(state.numDocs-1, value);
+              valuesConsumer.finish(state.numDocs);
             }
           }
-
-          // we should have consumed every norm
-          assert upto == toWrite.upto;
-
-          toWrite.reset();
-        } else if (fi.isIndexed && !fi.omitNorms) {
-          // Fill entire field with default norm:
-          normsOut.startField(fi);
-          for(;upto<state.numDocs;upto++)
-            normsOut.writeNorm((byte) 0);
         }
+      } 
+      
+      success = true;
+      if (!anythingFlushed && consumer != null) {
+        consumer.abort();
       }
-      normsOut.finish(state.numDocs);
-      success = true;
     } finally {
       if (success) {
-        IOUtils.close(normsOut);
+        IOUtils.close(consumer);
       } else {
-        IOUtils.closeWhileHandlingException(normsOut);
+        IOUtils.closeWhileHandlingException(consumer);
       }
     }
   }
@@ -106,6 +106,16 @@
   @Override
   InvertedDocEndConsumerPerField addField(DocInverterPerField docInverterPerField,
       FieldInfo fieldInfo) {
-    return new NormsConsumerPerField(docInverterPerField, fieldInfo);
+    return new NormsConsumerPerField(docInverterPerField, fieldInfo, this);
   }
+  
+  DocValuesConsumer newConsumer(PerDocWriteState perDocWriteState,
+      FieldInfo fieldInfo) throws IOException {
+    if (consumer == null) {
+      consumer = normsFormat.docsConsumer(perDocWriteState);
+    }
+    DocValuesConsumer addValuesField = consumer.addValuesField(
+        Type.BYTES_FIXED_STRAIGHT, fieldInfo);
+    return addValuesField;
+  }
 }
Index: lucene/src/java/org/apache/lucene/index/NormsConsumerPerField.java
===================================================================
--- lucene/src/java/org/apache/lucene/index/NormsConsumerPerField.java	(revision 1227169)
+++ lucene/src/java/org/apache/lucene/index/NormsConsumerPerField.java	(working copy)
@@ -1,5 +1,4 @@
 package org.apache.lucene.index;
-
 /**
  * Licensed to the Apache Software Foundation (ASF) under one or more
  * contributor license agreements.  See the NOTICE file distributed with
@@ -16,65 +15,74 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
+import java.io.IOException;
 
+import org.apache.lucene.codecs.DocValuesConsumer;
+import org.apache.lucene.document.DocValuesField;
+import org.apache.lucene.index.DocValues.Type;
 import org.apache.lucene.search.similarities.Similarity;
-import org.apache.lucene.util.ArrayUtil;
+import org.apache.lucene.util.BytesRef;
 
-/** Taps into DocInverter, as an InvertedDocEndConsumer,
- *  which is called at the end of inverting each field.  We
- *  just look at the length for the field (docState.length)
- *  and record the norm. */
-
-final class NormsConsumerPerField extends InvertedDocEndConsumerPerField implements Comparable<NormsConsumerPerField> {
-
-  final FieldInfo fieldInfo;
-  final DocumentsWriterPerThread.DocState docState;
-  final Similarity similarity;
+public class NormsConsumerPerField extends InvertedDocEndConsumerPerField implements Comparable<NormsConsumerPerField> {
+  private final FieldInfo fieldInfo;
+  private final DocumentsWriterPerThread.DocState docState;
+  private final Similarity similarity;
+  private final FieldInvertState fieldState;
+  private DocValuesConsumer consumer;
+  private final DocValue value = new DocValuesField("");
+  private final BytesRef spare = new BytesRef(1);
+  private final NormsConsumer parent;
   
-  // Holds all docID/norm pairs we've seen
-  int[] docIDs = new int[1];
-  byte[] norms = new byte[1];
-  int upto;
-
-  final FieldInvertState fieldState;
-
-  public void reset() {
-    // Shrink back if we are overallocated now:
-    docIDs = ArrayUtil.shrink(docIDs, upto);
-    norms = ArrayUtil.shrink(norms, upto);
-    upto = 0;
-  }
-
-  public NormsConsumerPerField(final DocInverterPerField docInverterPerField, final FieldInfo fieldInfo) {
+  public NormsConsumerPerField(final DocInverterPerField docInverterPerField, final FieldInfo fieldInfo, NormsConsumer parent) {
     this.fieldInfo = fieldInfo;
+    this.parent = parent;
     docState = docInverterPerField.docState;
     fieldState = docInverterPerField.fieldState;
     similarity = docState.similarityProvider.get(fieldInfo.name);
+    spare.length = 1;
+    spare.offset = 0;
+
   }
-
   @Override
-  void abort() {
-    upto = 0;
-  }
-
   public int compareTo(NormsConsumerPerField other) {
     return fieldInfo.name.compareTo(other.fieldInfo.name);
   }
-  
+
   @Override
-  void finish() {
+  void finish() throws IOException {
     if (fieldInfo.isIndexed && !fieldInfo.omitNorms) {
-      if (docIDs.length <= upto) {
-        assert docIDs.length == upto;
-        docIDs = ArrayUtil.grow(docIDs, 1+upto);
-      }
-      if (norms.length <= upto) {
-        assert norms.length == upto;
-        norms = ArrayUtil.grow(norms, 1+upto);
-      }
-      norms[upto] = similarity.computeNorm(fieldState);
-      docIDs[upto] = docState.docID;
-      upto++;
+      DocValuesConsumer consumer = getConsumer();
+      spare.bytes[0] = similarity.computeNorm(fieldState);
+      value.setBytes(spare, Type.BYTES_FIXED_STRAIGHT);
+      consumer.add(docState.docID, value);
+      
+    }    
+  }
+  
+  void flush(int docCount) throws IOException {
+    DocValuesConsumer consumer = this.consumer;
+    if (consumer == null && fieldInfo.isIndexed) {
+       consumer = getConsumer();
+      spare.bytes[0] = 0x00;
+      value.setBytes(spare, Type.BYTES_FIXED_STRAIGHT);
+      consumer.add(docCount-1, value);
+    } 
+    if (consumer != null) {
+      consumer.finish(docCount);
     }
   }
+  
+  private DocValuesConsumer getConsumer() throws IOException {
+    if (consumer == null) {
+      consumer = parent.newConsumer(docState.docWriter.newPerDocWriteState(""), fieldInfo);
+    }
+    return consumer;
+  }
+  
+
+  @Override
+  void abort() {
+    //
+  }
+
 }
Index: lucene/src/java/org/apache/lucene/index/FilterIndexReader.java
===================================================================
--- lucene/src/java/org/apache/lucene/index/FilterIndexReader.java	(revision 1227169)
+++ lucene/src/java/org/apache/lucene/index/FilterIndexReader.java	(working copy)
@@ -419,6 +419,12 @@
     ensureOpen();
     return in.docValues(field);
   }
+  
+  @Override
+  public DocValues normValues(String field) throws IOException {
+    ensureOpen();
+    return in.normValues(field);
+  }
 
   @Override
   public IndexCommit getIndexCommit() throws IOException {
Index: lucene/src/java/org/apache/lucene/index/SegmentMerger.java
===================================================================
--- lucene/src/java/org/apache/lucene/index/SegmentMerger.java	(revision 1227169)
+++ lucene/src/java/org/apache/lucene/index/SegmentMerger.java	(working copy)
@@ -27,7 +27,6 @@
 import org.apache.lucene.codecs.Codec;
 import org.apache.lucene.codecs.FieldInfosWriter;
 import org.apache.lucene.codecs.FieldsConsumer;
-import org.apache.lucene.codecs.NormsWriter;
 import org.apache.lucene.codecs.PerDocConsumer;
 import org.apache.lucene.codecs.StoredFieldsWriter;
 import org.apache.lucene.codecs.TermVectorsWriter;
@@ -125,8 +124,7 @@
     mergePerDoc(segmentWriteState);
     
     if (mergeState.fieldInfos.hasNorms()) {
-      int numMerged = mergeNorms(segmentWriteState);
-      assert numMerged == mergeState.mergedDocCount;
+      mergeNorms(segmentWriteState);
     }
 
     if (mergeState.fieldInfos.hasVectors()) {
@@ -379,20 +377,24 @@
         }
       }
   }
-
-  private int mergeNorms(SegmentWriteState segmentWriteState) throws IOException {
-    final NormsWriter writer = codec.normsFormat().normsWriter(segmentWriteState);
-    
+  
+  private void mergeNorms(SegmentWriteState segmentWriteState) throws IOException {
+    final PerDocConsumer docsConsumer = codec.normsFormat()
+        .docsConsumer(new PerDocWriteState(segmentWriteState));
+    // TODO: remove this check when 3.x indexes are no longer supported
+    // (3.x indexes don't have docvalues)
+    if (docsConsumer == null) {
+      return;
+    }
     boolean success = false;
     try {
-      int numMerged = writer.merge(mergeState);
+      docsConsumer.merge(mergeState);
       success = true;
-      return numMerged;
     } finally {
       if (success) {
-        IOUtils.close(writer);
+        IOUtils.close(docsConsumer);
       } else {
-        IOUtils.closeWhileHandlingException(writer);
+        IOUtils.closeWhileHandlingException(docsConsumer);
       }
     }
   }
Index: lucene/src/java/org/apache/lucene/index/DocFieldProcessor.java
===================================================================
--- lucene/src/java/org/apache/lucene/index/DocFieldProcessor.java	(revision 1227169)
+++ lucene/src/java/org/apache/lucene/index/DocFieldProcessor.java	(working copy)
@@ -131,6 +131,17 @@
       }
     }
     
+    try {
+      PerDocConsumer perDocConsumer = perDocConsumers.get(0);
+      if (perDocConsumer != null) {
+        perDocConsumer.abort();  
+      }
+    } catch (Throwable t) {
+      if (th == null) {
+        th = t;
+      }
+    }
+    
     // If any errors occured, throw it.
     if (th != null) {
       if (th instanceof RuntimeException) throw (RuntimeException) th;
@@ -329,7 +340,6 @@
       perDocConsumer = dvFormat.docsConsumer(perDocWriteState);
       perDocConsumers.put(0, perDocConsumer);
     }
-
     DocValuesConsumer docValuesConsumer = perDocConsumer.addValuesField(valueType, fieldInfo);
     fieldInfo.setDocValuesType(valueType);
 
Index: lucene/src/java/org/apache/lucene/index/IndexReader.java
===================================================================
--- lucene/src/java/org/apache/lucene/index/IndexReader.java	(revision 1227169)
+++ lucene/src/java/org/apache/lucene/index/IndexReader.java	(working copy)
@@ -735,6 +735,7 @@
    *
    * @see org.apache.lucene.document.Field#setBoost(float)
    */
+  // TODO: cut over to source once we support other formats like float
   public abstract byte[] norms(String field) throws IOException;
 
   /**
@@ -1056,6 +1057,8 @@
    * using {@link ReaderUtil#gatherSubReaders} and iterate
    * through them yourself. */
   public abstract DocValues docValues(String field) throws IOException;
+  
+  public abstract DocValues normValues(String field) throws IOException;
 
   private volatile Fields fields;
 
Index: lucene/src/java/org/apache/lucene/index/SegmentCoreReaders.java
===================================================================
--- lucene/src/java/org/apache/lucene/index/SegmentCoreReaders.java	(revision 1227169)
+++ lucene/src/java/org/apache/lucene/index/SegmentCoreReaders.java	(working copy)
@@ -25,7 +25,6 @@
 
 import org.apache.lucene.codecs.Codec;
 import org.apache.lucene.codecs.FieldsProducer;
-import org.apache.lucene.codecs.NormsReader;
 import org.apache.lucene.codecs.PerDocProducer;
 import org.apache.lucene.codecs.PostingsFormat;
 import org.apache.lucene.codecs.StoredFieldsReader;
@@ -54,7 +53,7 @@
   
   final FieldsProducer fields;
   final PerDocProducer perDocProducer;
-  final NormsReader norms;
+  final PerDocProducer norms;
 
   final Directory dir;
   final Directory cfsDir;
@@ -120,7 +119,7 @@
       // ask codec for its Norms: 
       // TODO: since we don't write any norms file if there are no norms,
       // kinda jaky to assume the codec handles the case of no norms file at all gracefully?!
-      norms = codec.normsFormat().normsReader(cfsDir, si, fieldInfos, context, dir);
+      norms = codec.normsFormat().docsProducer(segmentReadState, dir);
       perDocProducer = codec.docValuesFormat().docsProducer(segmentReadState);
 
       final Directory storeDir;
Index: lucene/src/java/org/apache/lucene/index/SlowMultiReaderWrapper.java
===================================================================
--- lucene/src/java/org/apache/lucene/index/SlowMultiReaderWrapper.java	(revision 1227169)
+++ lucene/src/java/org/apache/lucene/index/SlowMultiReaderWrapper.java	(working copy)
@@ -76,8 +76,13 @@
     ensureOpen();
     return MultiDocValues.getDocValues(in, field);
   }
-
+  
   @Override
+  public DocValues normValues(String field) throws IOException {
+    ensureOpen();
+    return MultiDocValues.getNormDocValues(in, field);
+  }
+  @Override
   public Bits getLiveDocs() {
     ensureOpen();
     return MultiFields.getLiveDocs(in);
Index: lucene/src/java/org/apache/lucene/util/IOUtils.java
===================================================================
--- lucene/src/java/org/apache/lucene/util/IOUtils.java	(revision 1227169)
+++ lucene/src/java/org/apache/lucene/util/IOUtils.java	(working copy)
@@ -30,6 +30,8 @@
 import java.nio.charset.CharsetDecoder;
 import java.nio.charset.CodingErrorAction;
 
+import org.apache.lucene.store.Directory;
+
 /** This class emulates the new Java 7 "Try-With-Resources" statement.
  * Remove once Lucene is on Java 7.
  * @lucene.internal */
@@ -318,6 +320,16 @@
       }
     }
   }
+  
+  public static void deleteFilesIgnoringExceptions(Directory dir, String... files) {
+    for (String name : files) {
+      try {
+        dir.deleteFile(name);
+      } catch (IOException ignored) {
+        // ignore
+      }
+    }
+  }
 
 
 }
Index: lucene/contrib/memory/src/java/org/apache/lucene/index/memory/MemoryIndex.java
===================================================================
--- lucene/contrib/memory/src/java/org/apache/lucene/index/memory/MemoryIndex.java	(revision 1227169)
+++ lucene/contrib/memory/src/java/org/apache/lucene/index/memory/MemoryIndex.java	(working copy)
@@ -1160,6 +1160,11 @@
     public DocValues docValues(String field) throws IOException {
       return null;
     }
+    
+    @Override
+    public DocValues normValues(String field) throws IOException {
+      return null;
+    }
   }
 
   
