Index: src/java/org/apache/lucene/analysis/CachedTokenizer.java
===================================================================
--- src/java/org/apache/lucene/analysis/CachedTokenizer.java	(revision 0)
+++ src/java/org/apache/lucene/analysis/CachedTokenizer.java	(revision 0)
@@ -0,0 +1,62 @@
+package org.apache.lucene.analysis;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.io.IOException;
+import java.util.Iterator;
+import java.util.LinkedList;
+import java.util.List;
+
+/**
+ * This class can be used if the Tokens of a TokenStream
+ * are intended to be consumed more than once or you already have tokens that are ready pre-analyzed.
+ *  It assumes all tokens are passed in to begin with, unlike
+ * the {@link org.apache.lucene.analysis.CachingTokenFilter} which caches as it goes.
+ *
+ * CachedTokenFilter implements the optional method
+ * {@link TokenStream#reset()}, which repositions the
+ * stream to the first Token.
+ *
+ */
+public class CachedTokenizer extends Tokenizer {
+  private List/*<Token>*/ cache;
+  private Iterator iterator;
+
+  public CachedTokenizer(List/*<Token>*/ tokens) {
+    this.cache = tokens;
+    iterator = cache.iterator();
+  }
+
+  public Token next() throws IOException {
+
+    if (!iterator.hasNext()) {
+      // the cache is exhausted, return null
+      return null;
+    }
+
+    return (Token) iterator.next();
+  }
+
+  public void reset() throws IOException {
+    iterator = cache.iterator();
+  }
+
+  public List getCache() {
+    return cache;
+  }
+}
\ No newline at end of file

Property changes on: src/java/org/apache/lucene/analysis/CachedTokenizer.java
___________________________________________________________________
Name: svn:eol-style
   + native

Index: src/java/org/apache/lucene/analysis/CachedAnalyzer.java
===================================================================
--- src/java/org/apache/lucene/analysis/CachedAnalyzer.java	(revision 0)
+++ src/java/org/apache/lucene/analysis/CachedAnalyzer.java	(revision 0)
@@ -0,0 +1,25 @@
+package org.apache.lucene.analysis;
+
+import java.io.Reader;
+import java.util.List;
+
+
+/**
+ * Very simple Analyzer that wraps a {@link java.util.List} of {@link org.apache.lucene.analysis.Token}s
+ * and spits them back out via the {@link org.apache.lucene.analysis.CachedTokenizer}.
+ *
+ * @see org.apache.lucene.analysis.CachingTokenFilter for a {@link org.apache.lucene.analysis.TokenFilter} that caches as it processes the stream
+ * as opposed to this version which assumes the Tokens have already been processed.
+ *
+ **/
+public class CachedAnalyzer extends Analyzer{
+  private List/*<Token>*/ cache;
+
+  public CachedAnalyzer(List/*<Token>*/ cache) {
+    this.cache = cache;
+  }
+
+  public TokenStream tokenStream(String fieldName, Reader reader) {
+    return new CachedTokenizer(cache);
+  }
+}

Property changes on: src/java/org/apache/lucene/analysis/CachedAnalyzer.java
___________________________________________________________________
Name: svn:eol-style
   + native

Index: contrib/analyzers/src/test/org/apache/lucene/analysis/buffered/TestCollaboratingAnalyzer.java
===================================================================
--- contrib/analyzers/src/test/org/apache/lucene/analysis/buffered/TestCollaboratingAnalyzer.java	(revision 0)
+++ contrib/analyzers/src/test/org/apache/lucene/analysis/buffered/TestCollaboratingAnalyzer.java	(revision 0)
@@ -0,0 +1,37 @@
+package org.apache.lucene.analysis.buffered;
+
+/**
+ * Copyright 2004 The Apache Software Foundation
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import junit.framework.TestCase;
+
+public class TestCollaboratingAnalyzer extends TestCase {
+
+
+  public TestCollaboratingAnalyzer(String s) {
+    super(s);
+  }
+
+  protected void setUp() {
+  }
+
+  protected void tearDown() {
+
+  }
+
+  public void test() {
+  }
+}
\ No newline at end of file

Property changes on: contrib/analyzers/src/test/org/apache/lucene/analysis/buffered/TestCollaboratingAnalyzer.java
___________________________________________________________________
Name: svn:eol-style
   + native

Index: contrib/analyzers/src/java/org/apache/lucene/analysis/buffered/CollaboratingAnalyzer.java
===================================================================
--- contrib/analyzers/src/java/org/apache/lucene/analysis/buffered/CollaboratingAnalyzer.java	(revision 0)
+++ contrib/analyzers/src/java/org/apache/lucene/analysis/buffered/CollaboratingAnalyzer.java	(revision 0)
@@ -0,0 +1,94 @@
+package org.apache.lucene.analysis.buffered;
+
+import org.apache.lucene.analysis.Analyzer;
+import org.apache.lucene.analysis.TokenStream;
+import org.apache.lucene.analysis.Tokenizer;
+
+import java.io.Reader;
+
+
+/**
+ *  The CollaboratingAnalyzer coordinates the creation of Tokens to be used between two fields (source and buffered).  Unlike
+ * the {@link org.apache.lucene.analysis.CachingTokenFilter}, the CollaboratingAnalyzer allows the bufferedField tokens
+ * to be different from the original tokens.
+ * <p/>
+ * Why is this useful?  For instance, if you want to have two fields, one that lowercases all terms and the other is an
+ * leaves the tokens as is, with all other analysis being equal.  In this case, you could setup your buffered Field to use
+ * the tokens that have been lowercased, while emitting the "regular" tokens for the as is case.
+ * <p/>
+ * Another example might be in the case of identifying proper nouns in content.  Assuming you have a token filter that
+ * identifies proper nouns, you could use the CollaboratingAnalyzer to store the identified proper nouns in
+ * it's buffer to then be emitted when the proper noun field is invoked.
+ * <p/>
+ * ******************************
+ * WARNING: In order for the CollaboratingAnalyzer to work, the <i>sourceField</i> must be added to the {@link org.apache.lucene.document.Document}
+ * before the <i>bufferedField</i>
+ *
+ *
+ *
+ **/
+public abstract class CollaboratingAnalyzer extends Analyzer {
+  protected String sourceField;
+  protected String bufferedField;
+  protected BufferingTokenFilter bufferingTF;
+
+
+  public CollaboratingAnalyzer(String sourceField, String bufferedField) {
+    this.sourceField = sourceField;
+    this.bufferedField = bufferedField;
+  }
+
+  /**
+   * Create and return the {@link org.apache.lucene.analysis.TokenStream} to be consumed by Lucene indexing.
+   *
+   * The {@link org.apache.lucene.analysis.TokenStream} is created based on whether the
+   *
+   * @param fieldName
+   * @param reader
+   * @return
+   */
+  public TokenStream tokenStream(String fieldName, Reader reader) {
+    TokenStream result = null;
+    if (fieldName.equals(sourceField)){
+      bufferingTF = createBufferingTokenFilter(createTokenizer(reader));
+      result = bufferingTF;
+    } else if (fieldName.equals(bufferedField)){
+      result = new BufferedTokenizer(bufferingTF.getBuffer());
+    } else {
+      result = createDefaultTokenStream(reader);
+    }
+    return result;
+
+  }
+
+  /**
+   * Create the TokenStream to be used when the input Field is not the {@link #getSourceField()} or the {@link #getBufferedField()}
+   * @param reader The unmodified input {@link java.io.Reader} from {@link #tokenStream(String, java.io.Reader)}
+   * @return A new {@link TokenStream}
+   */
+  protected abstract TokenStream createDefaultTokenStream(Reader reader);
+
+  /**
+   * Create the {@link Tokenizer} to be used on the sourceField
+   * @param reader
+   * @return
+   */
+  protected abstract Tokenizer createTokenizer(Reader reader);
+
+  /**
+   * Implement your {@link org.apache.lucene.analysis.buffered.BufferingTokenFilter} as you see fit (i.e with as
+   * many upstream filters as desired)
+   * @param input The Analyzer will pass in the original TokenStream created by {@link #createTokenizer(java.io.Reader)}.
+   * @return A {@link BufferingTokenFilter} implemented to modify the buffered token streams as appropriate
+   */
+  protected abstract BufferingTokenFilter createBufferingTokenFilter(TokenStream input);
+
+
+  public String getBufferedField() {
+    return bufferedField;
+  }
+
+  public String getSourceField() {
+    return sourceField;
+  }
+}

Property changes on: contrib/analyzers/src/java/org/apache/lucene/analysis/buffered/CollaboratingAnalyzer.java
___________________________________________________________________
Name: svn:eol-style
   + native

Index: contrib/analyzers/src/java/org/apache/lucene/analysis/buffered/BufferedTokenizer.java
===================================================================
--- contrib/analyzers/src/java/org/apache/lucene/analysis/buffered/BufferedTokenizer.java	(revision 0)
+++ contrib/analyzers/src/java/org/apache/lucene/analysis/buffered/BufferedTokenizer.java	(revision 0)
@@ -0,0 +1,41 @@
+package org.apache.lucene.analysis.buffered;
+
+import org.apache.lucene.analysis.Token;
+import org.apache.lucene.analysis.Tokenizer;
+
+import java.io.IOException;
+import java.io.Reader;
+import java.util.Iterator;
+import java.util.List;
+
+
+/**
+ * The BufferedTokenizer returns tokens from the cache.
+ *
+ * @see org.apache.lucene.analysis.CachingTokenFilter
+ *
+ **/
+class BufferedTokenizer extends Tokenizer {
+  private List/*<Token>*/ cache;
+  private Iterator/*<Token>*/ iterator;
+
+  public BufferedTokenizer(List cache) {
+    this.cache = cache;
+    iterator = cache.iterator();
+  }
+
+  public Token next() throws IOException {
+    if (!iterator.hasNext()) {
+      // the cache is exhausted, return null
+      return null;
+    }
+
+    return (Token) iterator.next();
+  }
+
+
+  protected void reset(Reader input) throws IOException {
+    iterator = cache.iterator();
+  }
+
+}

Property changes on: contrib/analyzers/src/java/org/apache/lucene/analysis/buffered/BufferedTokenizer.java
___________________________________________________________________
Name: svn:eol-style
   + native

Index: contrib/analyzers/src/java/org/apache/lucene/analysis/buffered/BufferingTokenFilter.java
===================================================================
--- contrib/analyzers/src/java/org/apache/lucene/analysis/buffered/BufferingTokenFilter.java	(revision 0)
+++ contrib/analyzers/src/java/org/apache/lucene/analysis/buffered/BufferingTokenFilter.java	(revision 0)
@@ -0,0 +1,44 @@
+package org.apache.lucene.analysis.buffered;
+
+import org.apache.lucene.analysis.TokenFilter;
+import org.apache.lucene.analysis.Token;
+import org.apache.lucene.analysis.TokenStream;
+
+import java.util.List;
+import java.util.ArrayList;
+import java.io.IOException;
+
+
+/**
+ *
+ *
+ **/
+public abstract class BufferingTokenFilter extends TokenFilter {
+
+  protected List/*<Token>*/ buffer = new ArrayList/*<Token>*/();
+
+  public BufferingTokenFilter(TokenStream input) {
+    super(input);
+  }
+
+  public Token next() throws IOException {
+    Token result = input.next();
+    Token tokenToBuffer = modifyToken(result);
+    if (tokenToBuffer != null) {
+      buffer.add(tokenToBuffer);
+    }
+    return result;
+  }
+
+  /**
+   * Modify the input {@link org.apache.lucene.analysis.Token} as appropriate to be stored in {@link #getBuffer()}
+   * @param input The input {@link org.apache.lucene.analysis.Token} to modify
+   * @return The {@link org.apache.lucene.analysis.Token} to be buffered.  Return null if the token should not be buffered.
+   */
+  protected abstract Token modifyToken(Token input);
+
+
+  public List getBuffer() {
+    return buffer;
+  }
+}

Property changes on: contrib/analyzers/src/java/org/apache/lucene/analysis/buffered/BufferingTokenFilter.java
___________________________________________________________________
Name: svn:eol-style
   + native

