Index: modules/analysis/common/src/test/org/apache/lucene/analysis/path/TestPathHierarchyTokenizer.java
===================================================================
--- modules/analysis/common/src/test/org/apache/lucene/analysis/path/TestPathHierarchyTokenizer.java	(révision 1099818)
+++ modules/analysis/common/src/test/org/apache/lucene/analysis/path/TestPathHierarchyTokenizer.java	(copie de travail)
@@ -127,4 +127,213 @@
         new int[]{1, 0, 0, 0},
         path.length());
   }
+
+  public void testBasicReverse() throws Exception {
+    String path = "/a/b/c";
+    PathHierarchyTokenizer t = new PathHierarchyTokenizer( new StringReader(path), true );
+    assertTokenStreamContents(t,
+        new String[]{"/a/b/c", "a/b/c", "b/c", "c"},
+        new int[]{0, 1, 3, 5},
+        new int[]{6, 6, 6, 6},
+        new int[]{1, 0, 0, 0},
+        path.length());
+  }
+
+  public void testEndOfDelimiterReverse() throws Exception {
+    String path = "/a/b/c/";
+    PathHierarchyTokenizer t = new PathHierarchyTokenizer( new StringReader(path), true );
+    assertTokenStreamContents(t,
+        new String[]{"/a/b/c/", "a/b/c/", "b/c/", "c/"},
+        new int[]{0, 1, 3, 5},
+        new int[]{7, 7, 7, 7},
+        new int[]{1, 0, 0, 0},
+        path.length());
+  }
+
+  public void testStartOfCharReverse() throws Exception {
+    String path = "a/b/c";
+    PathHierarchyTokenizer t = new PathHierarchyTokenizer( new StringReader(path), true );
+    assertTokenStreamContents(t,
+        new String[]{"a/b/c", "b/c", "c"},
+        new int[]{0, 2, 4},
+        new int[]{5, 5, 5},
+        new int[]{1, 0, 0},
+        path.length());
+  }
+
+  public void testStartOfCharEndOfDelimiterReverse() throws Exception {
+    String path = "a/b/c/";
+    PathHierarchyTokenizer t = new PathHierarchyTokenizer( new StringReader(path), true );
+    assertTokenStreamContents(t,
+        new String[]{"a/b/c/", "b/c/", "c/"},
+        new int[]{0, 2, 4},
+        new int[]{6, 6, 6},
+        new int[]{1, 0, 0},
+        path.length());
+  }
+
+  public void testOnlyDelimiterReverse() throws Exception {
+    String path = "/";
+    PathHierarchyTokenizer t = new PathHierarchyTokenizer( new StringReader(path), true );
+    assertTokenStreamContents(t,
+        new String[]{"/"},
+        new int[]{0},
+        new int[]{1},
+        new int[]{1},
+        path.length());
+  }
+
+  public void testOnlyDelimitersReverse() throws Exception {
+    String path = "//";
+    PathHierarchyTokenizer t = new PathHierarchyTokenizer( new StringReader(path), true );
+    assertTokenStreamContents(t,
+        new String[]{"//", "/"},
+        new int[]{0, 1},
+        new int[]{2, 2},
+        new int[]{1, 0},
+        path.length());
+  }
+
+  public void testBasicSkip() throws Exception {
+    String path = "/a/b/c";
+    PathHierarchyTokenizer t = new PathHierarchyTokenizer( new StringReader(path), 1 );
+    assertTokenStreamContents(t,
+        new String[]{"/b", "/b/c"},
+        new int[]{2, 2},
+        new int[]{4, 6},
+        new int[]{1, 0},
+        path.length());
+  }
+
+  public void testEndOfDelimiterSkip() throws Exception {
+    String path = "/a/b/c/";
+    PathHierarchyTokenizer t = new PathHierarchyTokenizer( new StringReader(path), 1 );
+    assertTokenStreamContents(t,
+        new String[]{"/b", "/b/c", "/b/c/"},
+        new int[]{2, 2, 2},
+        new int[]{4, 6, 7},
+        new int[]{1, 0, 0},
+        path.length());
+  }
+
+  public void testStartOfCharSkip() throws Exception {
+    String path = "a/b/c";
+    PathHierarchyTokenizer t = new PathHierarchyTokenizer( new StringReader(path), 1 );
+    assertTokenStreamContents(t,
+        new String[]{"/b", "/b/c"},
+        new int[]{1, 1},
+        new int[]{3, 5},
+        new int[]{1, 0},
+        path.length());
+  }
+
+  public void testStartOfCharEndOfDelimiterSkip() throws Exception {
+    String path = "a/b/c/";
+    PathHierarchyTokenizer t = new PathHierarchyTokenizer( new StringReader(path), 1 );
+    assertTokenStreamContents(t,
+        new String[]{"/b", "/b/c", "/b/c/"},
+        new int[]{1, 1, 1},
+        new int[]{3, 5, 6},
+        new int[]{1, 0, 0},
+        path.length());
+  }
+
+  public void testOnlyDelimiterSkip() throws Exception {
+    String path = "/";
+    PathHierarchyTokenizer t = new PathHierarchyTokenizer( new StringReader(path), 1 );
+    assertTokenStreamContents(t,
+        new String[]{},
+        new int[]{},
+        new int[]{},
+        new int[]{},
+        path.length());
+  }
+
+  public void testOnlyDelimitersSkip() throws Exception {
+    String path = "//";
+    PathHierarchyTokenizer t = new PathHierarchyTokenizer( new StringReader(path), 1 );
+    assertTokenStreamContents(t,
+        new String[]{"/"},
+        new int[]{1},
+        new int[]{2},
+        new int[]{1},
+        path.length());
+  }
+
+  public void testEndOfDelimiterReverseSkip() throws Exception {
+    String path = "/a/b/c/";
+    PathHierarchyTokenizer t = new PathHierarchyTokenizer( new StringReader(path), true, 1 );
+    assertTokenStreamContents(t,
+        new String[]{"/a/b/", "a/b/", "b/"},
+        new int[]{0, 1, 3},
+        new int[]{5, 5, 5},
+        new int[]{1, 0, 0},
+        path.length());
+  }
+
+  public void testStartOfCharReverseSkip() throws Exception {
+    String path = "a/b/c";
+    PathHierarchyTokenizer t = new PathHierarchyTokenizer( new StringReader(path), true, 1 );
+    assertTokenStreamContents(t,
+        new String[]{"a/b/", "b/"},
+        new int[]{0, 2},
+        new int[]{4, 4},
+        new int[]{1, 0},
+        path.length());
+  }
+
+  public void testStartOfCharEndOfDelimiterReverseSkip() throws Exception {
+    String path = "a/b/c/";
+    PathHierarchyTokenizer t = new PathHierarchyTokenizer( new StringReader(path), true, 1 );
+    assertTokenStreamContents(t,
+        new String[]{"a/b/", "b/"},
+        new int[]{0, 2},
+        new int[]{4, 4},
+        new int[]{1, 0},
+        path.length());
+  }
+
+  public void testOnlyDelimiterReverseSkip() throws Exception {
+    String path = "/";
+    PathHierarchyTokenizer t = new PathHierarchyTokenizer( new StringReader(path), true, 1 );
+    assertTokenStreamContents(t,
+        new String[]{},
+        new int[]{},
+        new int[]{},
+        new int[]{},
+        path.length());
+  }
+
+  public void testOnlyDelimitersReverseSkip() throws Exception {
+    String path = "//";
+    PathHierarchyTokenizer t = new PathHierarchyTokenizer( new StringReader(path), true, 1 );
+    assertTokenStreamContents(t,
+        new String[]{"/"},
+        new int[]{0},
+        new int[]{1},
+        new int[]{1},
+        path.length());
+  }
+
+  public void testSkip2() throws Exception {
+    String path = "/a/b/c/";
+    PathHierarchyTokenizer t = new PathHierarchyTokenizer( new StringReader(path), 2 );
+    assertTokenStreamContents(t,
+        new String[]{"/c", "/c/"},
+        new int[]{5, 5},
+        new int[]{7, 8},
+        new int[]{1, 0},
+        path.length());
+  }
+
+  public void testReverseSkip2() throws Exception {
+    String path = "/a/b/c/";
+    PathHierarchyTokenizer t = new PathHierarchyTokenizer( new StringReader(path), true, 2 );
+    assertTokenStreamContents(t,
+        new String[]{"/a/", "a/"},
+        new int[]{0, 1},
+        new int[]{3, 3},
+        new int[]{1, 0},
+        path.length());
+  }
 }
Index: modules/analysis/common/src/java/org/apache/lucene/analysis/path/PathHierarchyTokenizer.java
===================================================================
--- modules/analysis/common/src/java/org/apache/lucene/analysis/path/PathHierarchyTokenizer.java	(révision 1099818)
+++ modules/analysis/common/src/java/org/apache/lucene/analysis/path/PathHierarchyTokenizer.java	(copie de travail)
@@ -19,6 +19,9 @@
 import java.io.IOException;
 import java.io.Reader;
 
+import java.util.List;
+import java.util.ArrayList;
+
 import org.apache.lucene.analysis.Tokenizer;
 import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
 import org.apache.lucene.analysis.tokenattributes.OffsetAttribute;
@@ -29,109 +32,258 @@
  * Take something like:
  * 
  * <pre>
- *  /soemthing/something/else
+ *  /something/something/else
  * </pre>
  * 
  * and make:
  *  
  * <pre>
- *  /soemthing
- *  /soemthing/something
- *  /soemthing/something/else
+ *  /something
+ *  /something/something
+ *  /something/something/else
  * </pre>
+ *
+ * Can also take something like:
+ *
+ * <pre>
+ * www.site.co.uk
+ * </pre>
+ *
+ * and make:
+ *
+ * <pre>
+ * www.site.co.uk
+ * site.co.uk
+ * co.uk
+ * uk
+ * </pre>
  * 
  */
 public class PathHierarchyTokenizer extends Tokenizer {
 
   public PathHierarchyTokenizer(Reader input) {
-    this(input, DEFAULT_BUFFER_SIZE, DEFAULT_DELIMITER);
+    this(input, DEFAULT_BUFFER_SIZE, DEFAULT_DELIMITER, DEFAULT_DELIMITER, DEFAULT_REVERSE, DEFAULT_SKIP);
   }
 
+  public PathHierarchyTokenizer(Reader input, boolean reverse) {
+    this(input, DEFAULT_BUFFER_SIZE, DEFAULT_DELIMITER, DEFAULT_DELIMITER, reverse, DEFAULT_SKIP);
+  }
+
+  public PathHierarchyTokenizer(Reader input, int skip) {
+    this(input, DEFAULT_BUFFER_SIZE, DEFAULT_DELIMITER, DEFAULT_DELIMITER, DEFAULT_REVERSE, skip);
+  }
+
+  public PathHierarchyTokenizer(Reader input, boolean reverse, int skip) {
+    this(input, DEFAULT_BUFFER_SIZE, DEFAULT_DELIMITER, DEFAULT_DELIMITER, reverse, skip);
+  }
+
   public PathHierarchyTokenizer(Reader input, int bufferSize, char delimiter) {
-    this(input, bufferSize, delimiter, delimiter);
+    this(input, bufferSize, delimiter, delimiter, DEFAULT_REVERSE, DEFAULT_SKIP);
   }
 
   public PathHierarchyTokenizer(Reader input, char delimiter, char replacement) {
-    this(input, DEFAULT_BUFFER_SIZE, delimiter, replacement);
+    this(input, DEFAULT_BUFFER_SIZE, delimiter, replacement, DEFAULT_REVERSE, DEFAULT_SKIP);
   }
 
   public PathHierarchyTokenizer(Reader input, int bufferSize, char delimiter, char replacement) {
+    this(input, bufferSize, delimiter, replacement, DEFAULT_REVERSE, DEFAULT_SKIP);
+  }
+
+  public PathHierarchyTokenizer(Reader input, char delimiter, boolean reverse) {
+    this(input, DEFAULT_BUFFER_SIZE, delimiter, delimiter, reverse, DEFAULT_SKIP);
+  }
+
+  public PathHierarchyTokenizer(Reader input, char delimiter, int skip) {
+    this(input, DEFAULT_BUFFER_SIZE, delimiter, delimiter, DEFAULT_REVERSE, skip);
+  }
+
+  public PathHierarchyTokenizer(Reader input, char delimiter, boolean reverse, int skip) {
+    this(input, DEFAULT_BUFFER_SIZE, delimiter, delimiter, reverse, skip);
+  }
+
+  public PathHierarchyTokenizer(Reader input, char delimiter, char replacement, boolean reverse) {
+    this(input, DEFAULT_BUFFER_SIZE, delimiter, replacement, reverse, DEFAULT_SKIP);
+  }
+
+  public PathHierarchyTokenizer(Reader input, char delimiter, char replacement, int skip) {
+    this(input, DEFAULT_BUFFER_SIZE, delimiter, replacement, DEFAULT_REVERSE, skip);
+  }
+
+  public PathHierarchyTokenizer(Reader input, char delimiter, char replacement, boolean reverse, int skip) {
+    this(input, DEFAULT_BUFFER_SIZE, delimiter, replacement, reverse, skip);
+  }
+
+  public PathHierarchyTokenizer(Reader input, int bufferSize, char delimiter, char replacement, boolean reverse, int skip) {
     super(input);
     termAtt.resizeBuffer(bufferSize);
+    this.bufferSize = bufferSize;
     this.delimiter = delimiter;
     this.replacement = replacement;
-    endDelimiter = false;
+    this.reverse = reverse;
+    this.skip = skip;
     resultToken = new StringBuilder(bufferSize);
-  }
-  
+    resultTokenBuffer = new char[bufferSize];
+    delimiterPositions = new ArrayList<Integer>(bufferSize/10);
+ }
   private static final int DEFAULT_BUFFER_SIZE = 1024;
   public static final char DEFAULT_DELIMITER = '/';
+  public static final boolean DEFAULT_REVERSE = false;
+  public static final int DEFAULT_SKIP = 0;
+  private final int bufferSize;
   private final char delimiter;
   private final char replacement;
+  private final boolean reverse;
+  private final int skip;
   
   private final CharTermAttribute termAtt = addAttribute(CharTermAttribute.class);
   private final OffsetAttribute offsetAtt = addAttribute(OffsetAttribute.class);
   private final PositionIncrementAttribute posAtt = addAttribute(PositionIncrementAttribute.class);
+  private int startOffset = 0;
   private int finalOffset = 0;
-  private boolean endDelimiter;
+  private int skipped = 0;
+  private boolean endDelimiter = false;
   private StringBuilder resultToken;
 
+  private List<Integer> delimiterPositions;
+  private int delimitersCount = -1;
+  private char[] resultTokenBuffer;
+
   @Override
   public final boolean incrementToken() throws IOException {
-    clearAttributes();
-    termAtt.append( resultToken );
-    if(resultToken.length() == 0){
-      posAtt.setPositionIncrement(1);
+    if(reverse){
+
+      clearAttributes();
+      if(delimitersCount == -1){
+        int length = 0;
+        delimiterPositions.add(0);
+        while (true) {
+          int c = input.read();
+          if( c < 0 ) {
+            break;
+          }
+          length++;
+          if( c == delimiter ) {
+            delimiterPositions.add(length);
+            resultToken.append(replacement);
+          }
+          else{
+            resultToken.append((char)c);
+          }
+        }
+        delimitersCount = delimiterPositions.size();
+        if( delimiterPositions.get(delimitersCount-1) < length ){
+          delimiterPositions.add(length);
+          delimitersCount++;
+        }
+        if( resultTokenBuffer.length < resultToken.length() ){
+          resultTokenBuffer = new char[resultToken.length()];
+        }
+        resultToken.getChars(0, resultToken.length(), resultTokenBuffer, 0);
+        resultToken.setLength(0);
+        finalOffset = delimiterPositions.get(delimitersCount-1 - skip);
+        posAtt.setPositionIncrement(1);
+      }
+      else{
+        posAtt.setPositionIncrement(0);
+      }
+
+      while( skipped < delimitersCount-skip-1 ){
+        int start = delimiterPositions.get(skipped);
+        termAtt.copyBuffer(resultTokenBuffer, start, finalOffset - start);
+        offsetAtt.setOffset(correctOffset(start), correctOffset(finalOffset));
+        skipped++;
+        return true;
+      }
+
+      return false;
+
     }
     else{
-      posAtt.setPositionIncrement(0);
-    }
-    int length = 0;
-    boolean added = false;
-    if( endDelimiter ){
-      termAtt.append(replacement);
-      length++;
-      endDelimiter = false;
-      added = true;
-    }
 
-    while (true) {
-      int c = input.read();
-      if( c < 0 ) {
-        length += resultToken.length();
-        termAtt.setLength(length);
-        finalOffset = correctOffset(length);
-        offsetAtt.setOffset(correctOffset(0), finalOffset);
-        if( added ){
-          resultToken.setLength(0);
-          resultToken.append(termAtt.buffer(), 0, length);
-        }
-        return added;
+      clearAttributes();
+      termAtt.append( resultToken );
+      if(resultToken.length() == 0){
+        posAtt.setPositionIncrement(1);
       }
-      added = true;
-      if( c == delimiter ) {
-        if( length > 0 ){
-          endDelimiter = true;
-          break;
+      else{
+        posAtt.setPositionIncrement(0);
+      }
+      int length = 0;
+      boolean added = false;
+      if( endDelimiter ){
+        termAtt.append(replacement);
+        length++;
+        endDelimiter = false;
+        added = true;
+      }
+
+      while (true) {
+        int c = input.read();
+        if( c < 0 ){
+          if( skipped > skip ) {
+            length += resultToken.length();
+            termAtt.setLength(length);
+            finalOffset = correctOffset(startOffset + length);
+            offsetAtt.setOffset(correctOffset(startOffset), finalOffset);
+            if( added ){
+              resultToken.setLength(0);
+              resultToken.append(termAtt.buffer(), 0, length);
+            }
+            return added;
+          }
+          else{
+            return false;
+          }
         }
-        else{
-          termAtt.append(replacement);
-          length++;
+        if( !added ){
+          added = true;
+          skipped++;
+          if( skipped > skip ){
+            termAtt.append(c == delimiter ? replacement : (char)c);
+            length++;
+          }
+          else {
+            startOffset++;
+          }
         }
+        else {
+          if( c == delimiter ){
+            if( skipped > skip ){
+              endDelimiter = true;
+              break;
+            }
+            else if( skipped < skip ){
+              startOffset++;
+            }
+            skipped++;
+            if( skipped > skip ){
+              termAtt.append(replacement);
+              length++;
+            }
+            else {
+              startOffset++;
+            }
+          }
+          else {
+            if( skipped > skip ){
+              termAtt.append((char)c);
+              length++;
+            }
+            else {
+              startOffset++;
+            }
+          }
+        }
       }
-      else {
-        termAtt.append((char)c);
-        length++;
-      }
+      length += resultToken.length();
+      termAtt.setLength(length);
+      finalOffset = correctOffset(startOffset + length);
+      offsetAtt.setOffset(correctOffset(startOffset), finalOffset);
+      resultToken.setLength(0);
+      resultToken.append(termAtt.buffer(), 0, length);
+      return true;
+
     }
-
-    length += resultToken.length();
-    termAtt.setLength(length);
-    finalOffset = correctOffset(length);
-    offsetAtt.setOffset(correctOffset(0), finalOffset);
-    resultToken.setLength(0);
-    resultToken.append(termAtt.buffer(), 0, length);
-    return true;
   }
   
   @Override
@@ -146,5 +298,10 @@
     resultToken.setLength(0);
     finalOffset = 0;
     endDelimiter = false;
+    skipped = 0;
+    delimitersCount = -1;
+    if(reverse){
+      delimiterPositions.clear();
+    }
   }
 }
Index: lucene/contrib/CHANGES.txt
===================================================================
--- lucene/contrib/CHANGES.txt	(révision 1099818)
+++ lucene/contrib/CHANGES.txt	(copie de travail)
@@ -8,6 +8,9 @@
 
 New Features
 
+ * LUCENE-3071: Added skip and reverse parameters to PathHierarchyTokenizer
+   (Olivier Favre)
+
  * LUCENE-2604: Added RegexpQuery support to contrib/queryparser.
    (Simon Willnauer, Robert Muir)
 
