Index: src/test/org/apache/lucene/index/TestIndexReader.java
===================================================================
--- src/test/org/apache/lucene/index/TestIndexReader.java	(revision 764856)
+++ src/test/org/apache/lucene/index/TestIndexReader.java	(working copy)
@@ -1377,10 +1377,20 @@
         String curField = (String) it1.next();
         byte[] norms1 = index1.norms(curField);
         byte[] norms2 = index2.norms(curField);
-        assertEquals(norms1.length, norms2.length);
-        for (int i = 0; i < norms1.length; i++) {
-          assertEquals("Norm different for doc " + i + " and field '" + curField + "'.", norms1[i], norms2[i]);
-        }      
+        if (norms1 != null && norms2 != null)
+        {
+	        if (norms1 != norms2)
+	        {
+	        	assertEquals(norms1.length, norms2.length);
+	        }
+	        for (int i = 0; i < norms1.length; i++) {
+	          assertEquals("Norm different for doc " + i + " and field '" + curField + "'.", norms1[i], norms2[i]);
+	        }
+        }
+        else
+        {
+        	assertSame(norms1, norms2);
+        }
       }
       
       // check deletions
Index: src/test/org/apache/lucene/index/TestSegmentReader.java
===================================================================
--- src/test/org/apache/lucene/index/TestSegmentReader.java	(revision 764856)
+++ src/test/org/apache/lucene/index/TestSegmentReader.java	(working copy)
@@ -175,10 +175,11 @@
         if (!reader.hasNorms(f.name())) {
           // test for fake norms of 1.0
           byte [] norms = reader.norms(f.name());
-          assertEquals(norms.length,reader.maxDoc());
-          for (int j=0; j<reader.maxDoc(); j++) {
-            assertEquals(norms[j], DefaultSimilarity.encodeNorm(1.0f));
-          }
+         	assertNull(norms);
+//          assertEquals(norms.length,reader.maxDoc());
+//          for (int j=0; j<reader.maxDoc(); j++) {
+//            assertEquals(norms[j], DefaultSimilarity.encodeNorm(1.0f));
+//          }
           norms = new byte[reader.maxDoc()];
           reader.norms(f.name(),norms, 0);
           for (int j=0; j<reader.maxDoc(); j++) {
Index: src/java/org/apache/lucene/search/spans/SpanScorer.java
===================================================================
--- src/java/org/apache/lucene/search/spans/SpanScorer.java	(revision 764856)
+++ src/java/org/apache/lucene/search/spans/SpanScorer.java	(working copy)
@@ -89,7 +89,7 @@
 
   public float score() throws IOException {
     float raw = getSimilarity().tf(freq) * value; // raw score
-    return raw * Similarity.decodeNorm(norms[doc]); // normalize
+    return norms == null? raw : raw * Similarity.decodeNorm(norms[doc]); // normalize
   }
 
   public Explanation explain(final int doc) throws IOException {
Index: src/java/org/apache/lucene/search/spans/SpanWeight.java
===================================================================
--- src/java/org/apache/lucene/search/spans/SpanWeight.java	(revision 764856)
+++ src/java/org/apache/lucene/search/spans/SpanWeight.java	(working copy)
@@ -122,7 +122,7 @@
     Explanation fieldNormExpl = new Explanation();
     byte[] fieldNorms = reader.norms(field);
     float fieldNorm =
-      fieldNorms!=null ? Similarity.decodeNorm(fieldNorms[doc]) : 0.0f;
+      fieldNorms!=null ? Similarity.decodeNorm(fieldNorms[doc]) : 1.0f;
     fieldNormExpl.setValue(fieldNorm);
     fieldNormExpl.setDescription("fieldNorm(field="+field+", doc="+doc+")");
     fieldExpl.addDetail(fieldNormExpl);
Index: src/java/org/apache/lucene/search/MultiPhraseQuery.java
===================================================================
--- src/java/org/apache/lucene/search/MultiPhraseQuery.java	(revision 764856)
+++ src/java/org/apache/lucene/search/MultiPhraseQuery.java	(working copy)
@@ -225,7 +225,7 @@
       Explanation fieldNormExpl = new Explanation();
       byte[] fieldNorms = reader.norms(field);
       float fieldNorm =
-        fieldNorms!=null ? Similarity.decodeNorm(fieldNorms[doc]) : 0.0f;
+        fieldNorms!=null ? Similarity.decodeNorm(fieldNorms[doc]) : 1.0f;
       fieldNormExpl.setValue(fieldNorm);
       fieldNormExpl.setDescription("fieldNorm(field="+field+", doc="+doc+")");
       fieldExpl.addDetail(fieldNormExpl);
Index: src/java/org/apache/lucene/search/PhraseScorer.java
===================================================================
--- src/java/org/apache/lucene/search/PhraseScorer.java	(revision 764856)
+++ src/java/org/apache/lucene/search/PhraseScorer.java	(working copy)
@@ -104,7 +104,7 @@
   public float score() throws IOException {
     //System.out.println("scoring " + first.doc);
     float raw = getSimilarity().tf(freq) * value; // raw score
-    return raw * Similarity.decodeNorm(norms[first.doc]); // normalize
+    return norms == null ? raw : raw * Similarity.decodeNorm(norms[first.doc]); // normalize
   }
 
   public boolean skipTo(int target) throws IOException {
Index: src/java/org/apache/lucene/search/PhraseQuery.java
===================================================================
--- src/java/org/apache/lucene/search/PhraseQuery.java	(revision 764856)
+++ src/java/org/apache/lucene/search/PhraseQuery.java	(working copy)
@@ -216,7 +216,7 @@
       Explanation fieldNormExpl = new Explanation();
       byte[] fieldNorms = reader.norms(field);
       float fieldNorm =
-        fieldNorms!=null ? Similarity.decodeNorm(fieldNorms[doc]) : 0.0f;
+        fieldNorms!=null ? Similarity.decodeNorm(fieldNorms[doc]) : 1.0f;
       fieldNormExpl.setValue(fieldNorm);
       fieldNormExpl.setDescription("fieldNorm(field="+field+", doc="+doc+")");
       fieldExpl.addDetail(fieldNormExpl);
Index: src/java/org/apache/lucene/search/TermQuery.java
===================================================================
--- src/java/org/apache/lucene/search/TermQuery.java	(revision 764856)
+++ src/java/org/apache/lucene/search/TermQuery.java	(working copy)
@@ -111,7 +111,7 @@
       Explanation fieldNormExpl = new Explanation();
       byte[] fieldNorms = reader.norms(field);
       float fieldNorm =
-        fieldNorms!=null ? Similarity.decodeNorm(fieldNorms[doc]) : 0.0f;
+        fieldNorms!=null ? Similarity.decodeNorm(fieldNorms[doc]) : 1.0f;
       fieldNormExpl.setValue(fieldNorm);
       fieldNormExpl.setDescription("fieldNorm(field="+field+", doc="+doc+")");
       fieldExpl.addDetail(fieldNormExpl);
Index: src/java/org/apache/lucene/search/TermScorer.java
===================================================================
--- src/java/org/apache/lucene/search/TermScorer.java	(revision 764856)
+++ src/java/org/apache/lucene/search/TermScorer.java	(working copy)
@@ -78,7 +78,6 @@
     c.setScorer(this);
     while (doc < end) {                           // for docs in window
       c.collect(doc);                      // collect score
-        
       if (++pointer >= pointerMax) {
         pointerMax = termDocs.read(docs, freqs);  // refill buffers
         if (pointerMax != 0) {
@@ -127,7 +126,7 @@
       ? scoreCache[f]                             // cache hit
       : getSimilarity().tf(f)*weightValue;        // cache miss
 
-    return raw * SIM_NORM_DECODER[norms[doc] & 0xFF]; // normalize for field
+    return norms == null ? raw : raw * SIM_NORM_DECODER[norms[doc] & 0xFF]; // normalize for field
   }
 
   /** Skips to the first match beyond the current whose document number is
Index: src/java/org/apache/lucene/index/SegmentReader.java
===================================================================
--- src/java/org/apache/lucene/index/SegmentReader.java	(revision 764856)
+++ src/java/org/apache/lucene/index/SegmentReader.java	(working copy)
@@ -1008,19 +1008,13 @@
     ensureOpen();
     return norms.containsKey(field);
   }
-
+  
   static byte[] createFakeNorms(int size) {
     byte[] ones = new byte[size];
     Arrays.fill(ones, DefaultSimilarity.encodeNorm(1.0f));
     return ones;
   }
 
-  private byte[] ones;
-  private byte[] fakeNorms() {
-    if (ones==null) ones=createFakeNorms(maxDoc());
-    return ones;
-  }
-
   // can return null if norms aren't stored
   protected synchronized byte[] getNorms(String field) throws IOException {
     Norm norm = (Norm) norms.get(field);
@@ -1032,7 +1026,6 @@
   public synchronized byte[] norms(String field) throws IOException {
     ensureOpen();
     byte[] bytes = getNorms(field);
-    if (bytes==null) bytes=fakeNorms();
     return bytes;
   }
 
@@ -1053,7 +1046,7 @@
     ensureOpen();
     Norm norm = (Norm) norms.get(field);
     if (norm == null) {
-      System.arraycopy(fakeNorms(), 0, bytes, offset, maxDoc());
+      Arrays.fill(bytes, offset, bytes.length, DefaultSimilarity.encodeNorm(1.0f));
       return;
     }
   
Index: src/java/org/apache/lucene/index/MultiReader.java
===================================================================
--- src/java/org/apache/lucene/index/MultiReader.java	(revision 764856)
+++ src/java/org/apache/lucene/index/MultiReader.java	(working copy)
@@ -18,6 +18,7 @@
  */
 
 import java.io.IOException;
+import java.util.Arrays;
 import java.util.Collection;
 import java.util.HashMap;
 import java.util.Map;
@@ -27,6 +28,7 @@
 import org.apache.lucene.index.MultiSegmentReader.MultiTermDocs;
 import org.apache.lucene.index.MultiSegmentReader.MultiTermEnum;
 import org.apache.lucene.index.MultiSegmentReader.MultiTermPositions;
+import org.apache.lucene.search.DefaultSimilarity;
 
 /** An IndexReader which reads multiple indexes, appending their content.
  *
@@ -276,19 +278,13 @@
     return false;
   }
 
-  private byte[] ones;
-  private byte[] fakeNorms() {
-    if (ones==null) ones=SegmentReader.createFakeNorms(maxDoc());
-    return ones;
-  }
-  
   public synchronized byte[] norms(String field) throws IOException {
     ensureOpen();
     byte[] bytes = (byte[])normsCache.get(field);
     if (bytes != null)
       return bytes;          // cache hit
     if (!hasNorms(field))
-      return fakeNorms();
+      return null;
 
     bytes = new byte[maxDoc()];
     for (int i = 0; i < subReaders.length; i++)
@@ -301,14 +297,18 @@
     throws IOException {
     ensureOpen();
     byte[] bytes = (byte[])normsCache.get(field);
-    if (bytes==null && !hasNorms(field)) bytes=fakeNorms();
-    if (bytes != null)                            // cache hit
-      System.arraycopy(bytes, 0, result, offset, maxDoc());
-
     for (int i = 0; i < subReaders.length; i++)      // read from segments
       subReaders[i].norms(field, result, offset + starts[i]);
-  }
 
+    if (bytes==null && !hasNorms(field))
+        Arrays.fill(result, offset, result.length, DefaultSimilarity.encodeNorm(1.0f));
+    else if (bytes != null)                            // cache hit
+      System.arraycopy(bytes, 0, result, offset, maxDoc());
+    else
+      for (int i = 0; i < subReaders.length; i++)      // read from segments
+        subReaders[i].norms(field, result, offset + starts[i]);
+}
+
   protected void doSetNorm(int n, String field, byte value)
     throws CorruptIndexException, IOException {
     synchronized (normsCache) {
Index: src/java/org/apache/lucene/index/MultiSegmentReader.java
===================================================================
--- src/java/org/apache/lucene/index/MultiSegmentReader.java	(revision 764856)
+++ src/java/org/apache/lucene/index/MultiSegmentReader.java	(working copy)
@@ -18,6 +18,7 @@
  */
 
 import java.io.IOException;
+import java.util.Arrays;
 import java.util.Collection;
 import java.util.HashMap;
 import java.util.HashSet;
@@ -27,6 +28,7 @@
 
 import org.apache.lucene.document.Document;
 import org.apache.lucene.document.FieldSelector;
+import org.apache.lucene.search.DefaultSimilarity;
 import org.apache.lucene.store.Directory;
 
 /** 
@@ -385,19 +387,13 @@
     return false;
   }
 
-  private byte[] ones;
-  private byte[] fakeNorms() {
-    if (ones==null) ones=SegmentReader.createFakeNorms(maxDoc());
-    return ones;
-  }
-
   public synchronized byte[] norms(String field) throws IOException {
     ensureOpen();
     byte[] bytes = (byte[])normsCache.get(field);
     if (bytes != null)
       return bytes;          // cache hit
     if (!hasNorms(field))
-      return fakeNorms();
+      return null;
 
     bytes = new byte[maxDoc()];
     for (int i = 0; i < subReaders.length; i++)
@@ -410,12 +406,13 @@
     throws IOException {
     ensureOpen();
     byte[] bytes = (byte[])normsCache.get(field);
-    if (bytes==null && !hasNorms(field)) bytes=fakeNorms();
-    if (bytes != null)                            // cache hit
+    if (bytes==null && !hasNorms(field))
+        Arrays.fill(result, offset, result.length, DefaultSimilarity.encodeNorm(1.0f));
+    else if (bytes != null)                            // cache hit
       System.arraycopy(bytes, 0, result, offset, maxDoc());
-
-    for (int i = 0; i < subReaders.length; i++)      // read from segments
-      subReaders[i].norms(field, result, offset + starts[i]);
+    else
+      for (int i = 0; i < subReaders.length; i++)      // read from segments
+        subReaders[i].norms(field, result, offset + starts[i]);
   }
 
   protected void doSetNorm(int n, String field, byte value)
