Index: src/main/java/org/apache/jackrabbit/core/query/lucene/AbstractIndex.java
===================================================================
--- src/main/java/org/apache/jackrabbit/core/query/lucene/AbstractIndex.java (revision 794307)
+++ src/main/java/org/apache/jackrabbit/core/query/lucene/AbstractIndex.java (working copy)
@@ -20,6 +20,9 @@
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.IndexWriter;
import org.apache.lucene.index.Term;
+import org.apache.lucene.index.IndexDeletionPolicy;
+import org.apache.lucene.index.LogDocMergePolicy;
+import org.apache.lucene.index.SerialMergeScheduler;
import org.apache.lucene.store.Directory;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field;
@@ -181,16 +184,16 @@
public Object call() throws Exception {
long time = System.currentTimeMillis();
writer.addDocument(doc);
- return new Long(System.currentTimeMillis() - time);
+ return System.currentTimeMillis() - time;
}
};
}
DynamicPooledExecutor.Result[] results = EXECUTOR.executeAndWait(commands);
invalidateSharedReader();
IOException ex = null;
- for (int i = 0; i < results.length; i++) {
- if (results[i].getException() != null) {
- Throwable cause = results[i].getException().getCause();
+ for (DynamicPooledExecutor.Result result : results) {
+ if (result.getException() != null) {
+ Throwable cause = result.getException().getCause();
if (ex == null) {
// only throw the first exception
if (cause instanceof IOException) {
@@ -203,7 +206,7 @@
log.warn("Exception while inverting document", cause);
}
} else {
- log.debug("Inverted document in {} ms", results[i].get());
+ log.debug("Inverted document in {} ms", result.get());
}
}
if (ex != null) {
@@ -238,7 +241,13 @@
indexWriter = null;
}
if (indexReader == null) {
- IndexReader reader = IndexReader.open(getDirectory());
+ IndexDeletionPolicy idp = getIndexDeletionPolicy();
+ IndexReader reader;
+ if (idp != null) {
+ reader = IndexReader.open(getDirectory(), idp);
+ } else {
+ reader = IndexReader.open(getDirectory());
+ }
reader.setTermInfosIndexDivisor(termInfosIndexDivisor);
indexReader = new CommittableIndexReader(reader);
}
@@ -246,6 +255,17 @@
}
/**
+ * Returns the index deletion policy for this index. This implementation
+ * always returns null.
+ *
+ * @return the index deletion policy for this index or null if
+ * none is present.
+ */
+ protected IndexDeletionPolicy getIndexDeletionPolicy() {
+ return null;
+ }
+
+ /**
* Returns a read-only index reader, that can be used concurrently with
* other threads writing to this index. The returned index reader is
* read-only, that is, any attempt to delete a document from the index
Index: src/main/java/org/apache/jackrabbit/core/query/lucene/directory/FSDirectoryManager.java
===================================================================
--- src/main/java/org/apache/jackrabbit/core/query/lucene/directory/FSDirectoryManager.java (revision 794307)
+++ src/main/java/org/apache/jackrabbit/core/query/lucene/directory/FSDirectoryManager.java (working copy)
@@ -19,6 +19,10 @@
import org.apache.lucene.store.Directory;
import org.apache.lucene.store.FSDirectory;
import org.apache.lucene.store.NativeFSLockFactory;
+import org.apache.lucene.store.IndexOutput;
+import org.apache.lucene.store.IndexInput;
+import org.apache.lucene.store.Lock;
+import org.apache.lucene.store.LockFactory;
import org.apache.jackrabbit.core.query.lucene.SearchIndex;
import java.io.IOException;
@@ -61,7 +65,7 @@
} else {
dir = new File(baseDir, name);
}
- return FSDirectory.getDirectory(dir, new NativeFSLockFactory(dir));
+ return new FSDir(dir);
}
/**
@@ -113,4 +117,99 @@
*/
public void dispose() {
}
+
+ //-----------------------< internal >---------------------------------------
+
+ private static final class FSDir extends Directory {
+
+ private static final FileFilter FILTER = new FileFilter() {
+ public boolean accept(File pathname) {
+ return pathname.isFile();
+ }
+ };
+
+ private final FSDirectory directory;
+
+ public FSDir(File dir) throws IOException {
+ directory = FSDirectory.getDirectory(dir,
+ new NativeFSLockFactory(dir));
+ }
+
+ public String[] list() throws IOException {
+ File[] files = directory.getFile().listFiles(FILTER);
+ if (files == null) {
+ return null;
+ }
+ String[] names = new String[files.length];
+ for (int i = 0; i < names.length; i++) {
+ names[i] = files[i].getName();
+ }
+ return names;
+ }
+
+ public boolean fileExists(String name) throws IOException {
+ return directory.fileExists(name);
+ }
+
+ public long fileModified(String name) throws IOException {
+ return directory.fileModified(name);
+ }
+
+ public void touchFile(String name) throws IOException {
+ directory.touchFile(name);
+ }
+
+ public void deleteFile(String name) throws IOException {
+ directory.deleteFile(name);
+ }
+
+ public void renameFile(String from, String to) throws IOException {
+ directory.renameFile(from, to);
+ }
+
+ public long fileLength(String name) throws IOException {
+ return directory.fileLength(name);
+ }
+
+ public IndexOutput createOutput(String name) throws IOException {
+ return directory.createOutput(name);
+ }
+
+ public IndexInput openInput(String name) throws IOException {
+ return directory.openInput(name);
+ }
+
+ public void close() throws IOException {
+ directory.close();
+ }
+
+ public IndexInput openInput(String name, int bufferSize)
+ throws IOException {
+ return directory.openInput(name, bufferSize);
+ }
+
+ public Lock makeLock(String name) {
+ return directory.makeLock(name);
+ }
+
+ public void clearLock(String name) throws IOException {
+ directory.clearLock(name);
+ }
+
+ public void setLockFactory(LockFactory lockFactory) {
+ directory.setLockFactory(lockFactory);
+ }
+
+ public LockFactory getLockFactory() {
+ return directory.getLockFactory();
+ }
+
+ public String getLockID() {
+ return directory.getLockID();
+ }
+
+ public String toString() {
+ return this.getClass().getName() + "@" + directory;
+ }
+ }
}
Index: src/main/java/org/apache/jackrabbit/core/query/lucene/IndexDeletionPolicyImpl.java
===================================================================
--- src/main/java/org/apache/jackrabbit/core/query/lucene/IndexDeletionPolicyImpl.java (revision 0)
+++ src/main/java/org/apache/jackrabbit/core/query/lucene/IndexDeletionPolicyImpl.java (revision 0)
@@ -0,0 +1,101 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.jackrabbit.core.query.lucene;
+
+import org.apache.lucene.index.IndexDeletionPolicy;
+import org.apache.lucene.index.IndexCommit;
+import org.apache.lucene.store.Directory;
+
+import java.util.List;
+import java.io.IOException;
+
+/**
+ * IndexDeletionPolicyImpl...
+ */
+public class IndexDeletionPolicyImpl implements IndexDeletionPolicy {
+
+ private static final String SEGMENTS = "segments";
+
+ private final PersistentIndex index;
+
+ private final long maxAge;
+
+ public IndexDeletionPolicyImpl(PersistentIndex index, long maxAge)
+ throws IOException {
+ this.index = index;
+ this.maxAge = maxAge;
+ // read current generation
+ readCurrentGeneration();
+ }
+
+ public void onInit(List commits) throws IOException {
+ checkCommits(commits);
+ }
+
+ public void onCommit(List commits) throws IOException {
+ checkCommits(commits);
+
+ // report back current generation
+ IndexCommit current = (IndexCommit) commits.get(commits.size() - 1);
+ String name = current.getSegmentsFileName();
+ if (name.equals(SEGMENTS)) {
+ index.setCurrentGeneration(0);
+ } else {
+ index.setCurrentGeneration(
+ Long.parseLong(name.substring(SEGMENTS.length() + 1),
+ Character.MAX_RADIX));
+ }
+ }
+
+ //-------------------------------< internal >-------------------------------
+
+ private void checkCommits(List commits) throws IOException {
+ long currentTime = System.currentTimeMillis();
+ for (int i = 0; i < commits.size() - 1; i++) {
+ IndexCommit ic = (IndexCommit) commits.get(i);
+ long lastModified = index.getDirectory().fileModified(ic.getSegmentsFileName());
+ if (currentTime - lastModified > maxAge) {
+ ic.delete();
+ } else {
+ // following commits are younger, no need to check
+ break;
+ }
+ }
+ }
+
+ void readCurrentGeneration() throws IOException {
+ Directory dir = index.getDirectory();
+ String[] names = dir.list();
+ long max = 0;
+ if (names != null) {
+ for (String name : names) {
+ long gen = -1;
+ if (name.startsWith(SEGMENTS)) {
+ if (name.length() == SEGMENTS.length()) {
+ gen = 0;
+ } else if (name.charAt(SEGMENTS.length()) == '_') {
+ gen = Long.parseLong(name.substring(SEGMENTS.length() + 1), Character.MAX_RADIX);
+ }
+ }
+ if (gen > max) {
+ max = gen;
+ }
+ }
+ }
+ index.setCurrentGeneration(max);
+ }
+}
Property changes on: src\main\java\org\apache\jackrabbit\core\query\lucene\IndexDeletionPolicyImpl.java
___________________________________________________________________
Added: svn:eol-style
+ native
Index: src/main/java/org/apache/jackrabbit/core/query/lucene/IndexHistory.java
===================================================================
--- src/main/java/org/apache/jackrabbit/core/query/lucene/IndexHistory.java (revision 0)
+++ src/main/java/org/apache/jackrabbit/core/query/lucene/IndexHistory.java (revision 0)
@@ -0,0 +1,208 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.jackrabbit.core.query.lucene;
+
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+import org.apache.lucene.store.Directory;
+
+import java.util.TreeMap;
+import java.util.Collections;
+import java.util.Iterator;
+import java.util.Map;
+import java.io.IOException;
+
+/**
+ * IndexHistory implements a history of index segments. Whenever
+ * the index is flushed a new {@link IndexInfos} instance is created which
+ * represents the current state of the index. This includes the names of the
+ * index segments as well as their current generation number.
+ */
+class IndexHistory {
+
+ /**
+ * The logger instance for this class.
+ */
+ private static final Logger log = LoggerFactory.getLogger(IndexHistory.class);
+
+ /**
+ * Name of the file that contains the index infos.
+ */
+ private static final String INDEXES = "indexes";
+
+ /**
+ * the directory from where to read the index history.
+ */
+ private final Directory indexDir;
+
+ /**
+ * The maximum age (in milliseconds) of an index infos generation until it
+ * is removed.
+ */
+ private final long maxAge;
+
+ /**
+ * Maps generation (Long) to {@link IndexInfos}. Youngest generation first
+ * (-> higher value).
+ */
+ private final Map indexInfosMap = new TreeMap(Collections.reverseOrder());
+
+ /**
+ * Creates a new IndexHistory from the given dir.
+ *
+ * @param dir the directory from where to read the index history.
+ * @param maxAge the maximum age in milliseconds for unused index infos.
+ * @throws IOException if an error occurs while reading the index history.
+ */
+ IndexHistory(Directory dir, long maxAge) throws IOException {
+ this.indexDir = dir;
+ this.maxAge = maxAge;
+ // read all index infos
+ String[] names = dir.list();
+ if (names != null) {
+ for (String name : names) {
+ if (name.startsWith(INDEXES)) {
+ long gen;
+ if (name.length() == INDEXES.length()) {
+ gen = 0;
+ } else if (name.charAt(INDEXES.length()) == '_') {
+ gen = Long.parseLong(name.substring(INDEXES.length() + 1), Character.MAX_RADIX);
+ } else {
+ continue;
+ }
+ IndexInfos infos = new IndexInfos(dir, INDEXES, gen);
+ indexInfosMap.put(gen, infos);
+ }
+ }
+ }
+ }
+
+ /**
+ * Returns the time when the index segment with the given indexName
+ * was in use for the last time. The returned time does not accurately
+ * say until when an index segment was in use, but it does guarantee that
+ * the index segment in question was not in use anymore at the returned
+ * time.
+ *
+ * There are two special cases of return values:
+ *
+ * - {@link Long#MAX_VALUE}: indicates that the index segment is still in active use.
+ * - {@link Long#MIN_VALUE}: indicates that there is no index segment with the given name.
+ *
+ *
+ * @param indexName name of an index segement.
+ * @return the time when the index segment with the given name was in use
+ * the last time.
+ */
+ long getLastUseOf(String indexName) {
+ Long previous = null;
+ for (Map.Entry entry : indexInfosMap.entrySet()) {
+ IndexInfos infos = entry.getValue();
+ if (infos.contains(indexName)) {
+ if (previous == null) {
+ // still in use
+ return Long.MAX_VALUE;
+ } else {
+ return previous;
+ }
+ }
+ previous = infos.getLastModified();
+ }
+ return Long.MIN_VALUE;
+ }
+
+ /**
+ * Removes index infos older than {@link #maxAge} from this history.
+ */
+ void pruneOutdated() {
+ long threshold = System.currentTimeMillis() - maxAge;
+ log.debug("Pruning index infos older than: " + threshold + "(" + indexDir + ")");
+ Iterator it = indexInfosMap.values().iterator();
+ // never prune the current generation
+ if (it.hasNext()) {
+ IndexInfos infos = (IndexInfos) it.next();
+ log.debug("Skipping first index infos. generation=" + infos.getGeneration());
+ }
+ while (it.hasNext()) {
+ IndexInfos infos = (IndexInfos) it.next();
+ if (infos.getLastModified() < threshold) {
+ // check associated redo log
+ try {
+ String logName = getRedoLogName(infos.getGeneration());
+ if (indexDir.fileExists(logName)) {
+ long lastModified = indexDir.fileModified(logName);
+ if (lastModified > threshold) {
+ log.debug("Keeping redo log with generation={}, timestamp={}",
+ infos.getGeneration(), lastModified);
+ continue;
+ }
+ // try do delete it
+ try {
+ indexDir.deleteFile(logName);
+ log.debug("Deleted redo log with generation={}, timestamp={}",
+ infos.getGeneration(), lastModified);
+ } catch (IOException e) {
+ log.warn("Unable to delete: " + indexDir + "/" + logName);
+ continue;
+ }
+ }
+ // delete index infos
+ try {
+ indexDir.deleteFile(infos.getFileName());
+ log.debug("Deleted index infos with generation={}",
+ infos.getGeneration());
+ it.remove();
+ } catch (IOException e) {
+ log.warn("Unable to delete: " + indexDir + "/" + infos.getFileName());
+ }
+ } catch (IOException e) {
+ log.warn("Failed to check if {} is outdated: {}",
+ infos.getFileName(), e);
+ }
+ }
+ }
+ }
+
+ /**
+ * Adds an index infos to the history. This method will not modify nor keep
+ * a reference to the passed infos.
+ *
+ * @param infos the index infos to add.
+ */
+ void addIndexInfos(IndexInfos infos) {
+ // must clone infos because it is modifiable
+ indexInfosMap.put(infos.getGeneration(), infos.clone());
+ }
+
+ //-------------------------------< internal >-------------------------------
+
+ /**
+ * Returns the name of the redo log file with the given generation.
+ *
+ * @param generation the index infos generation.
+ * @return the name of the redo log file with the given generation.
+ */
+ String getRedoLogName(long generation) {
+ if (generation == 0) {
+ return RedoLog.REDO_LOG;
+ } else {
+ return RedoLog.REDO_LOG_PREFIX +
+ Long.toString(generation, Character.MAX_RADIX) +
+ RedoLog.DOT_LOG;
+ }
+ }
+}
Property changes on: src\main\java\org\apache\jackrabbit\core\query\lucene\IndexHistory.java
___________________________________________________________________
Added: svn:eol-style
+ native
Index: src/main/java/org/apache/jackrabbit/core/query/lucene/IndexInfo.java
===================================================================
--- src/main/java/org/apache/jackrabbit/core/query/lucene/IndexInfo.java (revision 0)
+++ src/main/java/org/apache/jackrabbit/core/query/lucene/IndexInfo.java (revision 0)
@@ -0,0 +1,76 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.jackrabbit.core.query.lucene;
+
+/**
+ * IndexInfo implements a single index info, which consists of a
+ * index segment name and a generation number.
+ */
+final class IndexInfo implements Cloneable {
+
+ /**
+ * The name of the index segment.
+ */
+ private final String name;
+
+ /**
+ * The generation number.
+ */
+ private long generation;
+
+ /**
+ * Creates a new index info.
+ *
+ * @param name the name of the index segment.
+ * @param generation the generation.
+ */
+ IndexInfo(String name, long generation) {
+ this.name = name;
+ this.generation = generation;
+ }
+
+ /**
+ * @return the name of the index segment.
+ */
+ public String getName() {
+ return name;
+ }
+
+ /**
+ * @return the generation of this index info.
+ */
+ public long getGeneration() {
+ return generation;
+ }
+
+ /**
+ * Sets a new generation
+ * @param generation
+ */
+ public void setGeneration(long generation) {
+ this.generation = generation;
+ }
+
+ public IndexInfo clone() {
+ try {
+ return (IndexInfo) super.clone();
+ } catch (CloneNotSupportedException e) {
+ // will never happen, this class is cloneable
+ throw new RuntimeException();
+ }
+ }
+}
Property changes on: src\main\java\org\apache\jackrabbit\core\query\lucene\IndexInfo.java
___________________________________________________________________
Added: svn:eol-style
+ native
Index: src/main/java/org/apache/jackrabbit/core/query/lucene/IndexInfos.java
===================================================================
--- src/main/java/org/apache/jackrabbit/core/query/lucene/IndexInfos.java (revision 794307)
+++ src/main/java/org/apache/jackrabbit/core/query/lucene/IndexInfos.java (working copy)
@@ -23,134 +23,171 @@
import java.io.OutputStream;
import java.util.ArrayList;
import java.util.List;
-import java.util.Set;
-import java.util.HashSet;
+import java.util.LinkedHashMap;
+import java.util.Iterator;
+import java.util.NoSuchElementException;
+import java.util.Map;
import org.apache.lucene.store.Directory;
import org.apache.jackrabbit.core.query.lucene.directory.IndexInputStream;
import org.apache.jackrabbit.core.query.lucene.directory.IndexOutputStream;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
/**
- * Stores a sequence of index names.
+ * Stores a sequence of index names and their current generation.
*/
-class IndexInfos {
+class IndexInfos implements Cloneable {
/**
+ * Logger instance for this class
+ */
+ private static final Logger log = LoggerFactory.getLogger(IndexInfos.class);
+
+ /**
+ * IndexInfos version for Jackrabbit 1.0 to 1.5.x
+ */
+ private static final int NAMES_ONLY = 0;
+
+ /**
+ * IndexInfos version for Jackrabbit 2.0
+ */
+ private static final int WITH_GENERATION = 1;
+
+ /**
* For new segment names.
*/
private int counter = 0;
/**
- * Flag that indicates if index infos needs to be written to disk.
+ * Map of {@link IndexInfo}s. Key=name
*/
- private boolean dirty = false;
+ private LinkedHashMap indexes = new LinkedHashMap();
/**
- * List of index names
+ * The directory where the index infos are stored.
*/
- private List indexes = new ArrayList();
+ private final Directory directory;
/**
- * Set of names for quick lookup.
+ * Base name of the file where the infos are stored.
*/
- private Set names = new HashSet();
+ private final String name;
/**
- * Name of the file where the infos are stored.
+ * The generation for this index infos.
*/
- private final String name;
+ private long generation = 0;
/**
- * Creates a new IndexInfos using fileName.
+ * When this index infos were last modified.
+ */
+ private long lastModified;
+
+ /**
+ * Creates a new IndexInfos using baseName and reads the
+ * current generation.
*
- * @param fileName the name of the file where infos are stored.
+ * @param dir the directory where the index infos are stored.
+ * @param baseName the name of the file where infos are stored.
+ * @throws IOException if an error occurs while reading the index infos
+ * file.
*/
- IndexInfos(String fileName) {
- this.name = fileName;
+ IndexInfos(Directory dir, String baseName) throws IOException {
+ this.directory = dir;
+ this.name = baseName;
+ long gen = getCurrentGeneration(getFileNames(dir, baseName), baseName);
+ if (gen == -1) {
+ // write initial infos
+ write();
+ } else {
+ this.generation = gen;
+ read();
+ }
}
/**
- * Returns true if this index infos exists in
- * dir.
+ * Creates a new IndexInfos using fileName and reads the given
+ * generation of the index infos.
*
- * @param dir the directory where to look for the index infos.
- * @return true if it exists; false otherwise.
- * @throws IOException if an error occurs while reading from the directory.
+ * @param dir the directory where the index infos are stored.
+ * @param baseName the name of the file where infos are stored.
+ * @param generation the generation to read.
+ * @throws IOException if an error occurs while reading the index infos
+ * file.
*/
- boolean exists(Directory dir) throws IOException {
- return dir.fileExists(name);
+ IndexInfos(Directory dir, String baseName, long generation) throws IOException {
+ if (generation < 0) {
+ throw new IllegalArgumentException();
+ }
+ this.directory = dir;
+ this.name = baseName;
+ this.generation = generation;
+ read();
}
/**
- * Returns the name of the file where infos are stored.
+ * Returns the name of the file with the most current version where infos
+ * are stored.
*
* @return the name of the file where infos are stored.
*/
String getFileName() {
- return name;
+ return getFileName(generation);
}
/**
- * Reads the index infos.
+ * Writes the index infos to disk.
*
- * @param dir the directory from where to read the index infos.
* @throws IOException if an error occurs.
*/
- void read(Directory dir) throws IOException {
- InputStream in = new IndexInputStream(dir.openInput(name));
+ void write() throws IOException {
+ // increment generation
+ generation++;
+ String newName = getFileName();
+ boolean success = false;
try {
- DataInputStream di = new DataInputStream(in);
- counter = di.readInt();
- for (int i = di.readInt(); i > 0; i--) {
- String indexName = di.readUTF();
- indexes.add(indexName);
- names.add(indexName);
+ OutputStream out = new IndexOutputStream(
+ directory.createOutput(newName));
+ try {
+ log.debug("Writing IndexInfos {}", newName);
+ DataOutputStream dataOut = new DataOutputStream(out);
+ dataOut.writeInt(WITH_GENERATION);
+ dataOut.writeInt(counter);
+ dataOut.writeInt(indexes.size());
+ for (Iterator it = iterator(); it.hasNext(); ) {
+ IndexInfo info = (IndexInfo) it.next();
+ dataOut.writeUTF(info.getName());
+ dataOut.writeLong(info.getGeneration());
+ log.debug(" + {}:{}", info.getName(), info.getGeneration());
+ }
+ } finally {
+ out.close();
}
+ lastModified = System.currentTimeMillis();
+ success = true;
} finally {
- in.close();
- }
- }
-
- /**
- * Writes the index infos to disk if they are dirty.
- *
- * @param dir the directory where to write the index infos.
- * @throws IOException if an error occurs.
- */
- void write(Directory dir) throws IOException {
- // do not write if not dirty
- if (!dirty) {
- return;
- }
-
- OutputStream out = new IndexOutputStream(dir.createOutput(name + ".new"));
- try {
- DataOutputStream dataOut = new DataOutputStream(out);
- dataOut.writeInt(counter);
- dataOut.writeInt(indexes.size());
- for (int i = 0; i < indexes.size(); i++) {
- dataOut.writeUTF(getName(i));
+ if (!success) {
+ // try to delete the file and decrement generation
+ try {
+ directory.deleteFile(newName);
+ } catch (IOException e) {
+ log.warn("Unable to delete file: " + directory + "/" + newName);
+ }
+ generation--;
}
- } finally {
- out.close();
}
- // delete old
- if (dir.fileExists(name)) {
- dir.deleteFile(name);
- }
- dir.renameFile(name + ".new", name);
- dirty = false;
}
/**
- * Returns the index name at position i.
- * @param i the position.
- * @return the index name.
+ * @return an iterator over the {@link IndexInfo}s contained in this index
+ * infos.
*/
- String getName(int i) {
- return (String) indexes.get(i);
+ Iterator iterator() {
+ return indexes.values().iterator();
}
+
/**
* Returns the number of index names.
* @return the number of index names.
@@ -160,39 +197,44 @@
}
/**
+ * @return the time when this index infos where last modified.
+ */
+ long getLastModified() {
+ return lastModified;
+ }
+
+ /**
* Adds a name to the index infos.
+ *
* @param name the name to add.
+ * @param generation the current generation of the index.
*/
- void addName(String name) {
- if (names.contains(name)) {
+ void addName(String name, long generation) {
+ if (indexes.containsKey(name)) {
throw new IllegalArgumentException("already contains: " + name);
}
- indexes.add(name);
- names.add(name);
- dirty = true;
+ indexes.put(name, new IndexInfo(name, generation));
}
+ void updateGeneration(String name, long generation) {
+ IndexInfo info = indexes.get(name);
+ if (info == null) {
+ throw new NoSuchElementException(name);
+ }
+ if (info.getGeneration() != generation) {
+ info.setGeneration(generation);
+ }
+ }
+
/**
* Removes the name from the index infos.
* @param name the name to remove.
*/
void removeName(String name) {
indexes.remove(name);
- names.remove(name);
- dirty = true;
}
/**
- * Removes the name from the index infos.
- * @param i the position.
- */
- void removeName(int i) {
- Object name = indexes.remove(i);
- names.remove(name);
- dirty = true;
- }
-
- /**
* Returns true if name exists in this
* IndexInfos; false otherwise.
*
@@ -200,15 +242,151 @@
* @return true it is exists in this IndexInfos.
*/
boolean contains(String name) {
- return names.contains(name);
+ return indexes.containsKey(name);
}
/**
+ * @return the generation of this index infos.
+ */
+ long getGeneration() {
+ return generation;
+ }
+
+ /**
* Returns a new unique name for an index folder.
* @return a new unique name for an index folder.
*/
String newName() {
- dirty = true;
return "_" + Integer.toString(counter++, Character.MAX_RADIX);
}
+
+ /**
+ * Clones this index infos.
+ *
+ * @return a clone of this index infos.
+ */
+ public IndexInfos clone() {
+ try {
+ IndexInfos clone = (IndexInfos) super.clone();
+ clone.indexes = (LinkedHashMap) indexes.clone();
+ for (Map.Entry entry : clone.indexes.entrySet()) {
+ entry.setValue(entry.getValue().clone());
+ }
+ return clone;
+ } catch (CloneNotSupportedException e) {
+ // never happens, this class is cloneable
+ throw new RuntimeException();
+ }
+ }
+
+ //----------------------------------< internal >----------------------------
+
+ /**
+ * Reads the index infos with the currently set {@link #generation}.
+ *
+ * @throws IOException if an error occurs.
+ */
+ private void read() throws IOException {
+ String fileName = getFileName(generation);
+ InputStream in = new IndexInputStream(directory.openInput(fileName));
+ try {
+ LinkedHashMap indexes = new LinkedHashMap();
+ DataInputStream di = new DataInputStream(in);
+ int version;
+ if (generation == 0) {
+ version = NAMES_ONLY;
+ } else {
+ version = di.readInt();
+ }
+ int counter = di.readInt();
+ for (int i = di.readInt(); i > 0; i--) {
+ String indexName = di.readUTF();
+ long gen = 0;
+ if (version >= WITH_GENERATION) {
+ gen = di.readLong();
+ }
+ indexes.put(indexName, new IndexInfo(indexName, gen));
+ }
+ // when successfully read set values
+ this.lastModified = directory.fileModified(fileName);
+ this.indexes = indexes;
+ this.counter = counter;
+ } finally {
+ in.close();
+ }
+ }
+
+ /**
+ * Returns the name of the file with the given generation where infos
+ * are stored.
+ *
+ * @param gen the generation of the file.
+ * @return the name of the file where infos are stored.
+ */
+ private String getFileName(long gen) {
+ if (gen == 0) {
+ return name;
+ } else {
+ return name + "_" + Long.toString(gen, Character.MAX_RADIX);
+ }
+ }
+
+ /**
+ * Returns all generations of this index infos.
+ *
+ * @param directory the directory where the index infos are stored.
+ * @param base the base name for the index infos.
+ * @return names of all generation files of this index infos.
+ */
+ private static String[] getFileNames(Directory directory, final String base) {
+ String[] names = new String[0];
+ try {
+ names = directory.list();
+ } catch (IOException e) {
+ // TODO: log warning? or throw?
+ }
+ List nameList = new ArrayList(names.length);
+ for (String n : names) {
+ if (n.startsWith(base)) {
+ nameList.add(n);
+ }
+ }
+ return nameList.toArray(new String[nameList.size()]);
+ }
+
+ /**
+ * Parse the generation off the file name and return it.
+ *
+ * @param fileName the generation file that contains index infos.
+ * @param base the base name.
+ * @return the generation of the given file.
+ */
+ private static long generationFromFileName(String fileName, String base) {
+ if (fileName.equals(base)) {
+ return 0;
+ } else {
+ return Long.parseLong(fileName.substring(base.length() + 1),
+ Character.MAX_RADIX);
+ }
+ }
+
+ /**
+ * Returns the most current generation of the given files.
+ *
+ * @param fileNames the file names from where to obtain the generation.
+ * @param base the base name.
+ * @return the most current generation.
+ */
+ private static long getCurrentGeneration(String[] fileNames, String base) {
+ long max = -1;
+ int i = 0;
+ while (i < fileNames.length) {
+ long gen = generationFromFileName(fileNames[i], base);
+ if (gen > max) {
+ max = gen;
+ }
+ i++;
+ }
+ return max;
+ }
}
Index: src/main/java/org/apache/jackrabbit/core/query/lucene/MultiIndex.java
===================================================================
--- src/main/java/org/apache/jackrabbit/core/query/lucene/MultiIndex.java (revision 794307)
+++ src/main/java/org/apache/jackrabbit/core/query/lucene/MultiIndex.java (working copy)
@@ -48,6 +48,8 @@
import java.util.Map;
import java.util.Collection;
import java.util.Collections;
+import java.util.Calendar;
+import java.text.DateFormat;
/**
* A MultiIndex consists of a {@link VolatileIndex} and multiple
@@ -90,12 +92,18 @@
/**
* Names of active persistent index directories.
*/
- private final IndexInfos indexNames = new IndexInfos("indexes");
+ private final IndexInfos indexNames;
/**
+ * The history of the multi index.
+ */
+ private final IndexHistory indexHistory;
+
+ /**
* Names of index directories that can be deleted.
+ * Key = index name (String), Value = time when last in use (Long)
*/
- private final Set deletable = new HashSet();
+ private final Map deletable = new HashMap();
/**
* List of open persistent indexes. This list may also contain an open
@@ -182,7 +190,7 @@
/**
* The RedoLog of this MultiIndex.
*/
- private final RedoLog redoLog;
+ private RedoLog redoLog;
/**
* The indexing queue with pending text extraction jobs.
@@ -227,17 +235,19 @@
this.indexDir = directoryManager.getDirectory(".");
this.handler = handler;
this.cache = new DocNumberCache(handler.getCacheSize());
- this.redoLog = new RedoLog(indexDir);
this.excludedIDs = new HashSet(excludedIDs);
this.nsMappings = handler.getNamespaceMappings();
- if (indexNames.exists(indexDir)) {
- indexNames.read(indexDir);
- }
+ indexNames = new IndexInfos(indexDir, "indexes");
+ this.indexHistory = new IndexHistory(indexDir,
+ handler.getMaxHistoryAge() * 1000);
+
// as of 1.5 deletable file is not used anymore
removeDeletable();
+ this.redoLog = RedoLog.create(indexDir, indexNames.getGeneration());
+
// initialize IndexMerger
merger = new IndexMerger(this, handler.getIndexMergerPoolSize());
merger.setMaxMergeDocs(handler.getMaxMergeDocs());
@@ -250,8 +260,9 @@
this.indexingQueue = new IndexingQueue(store);
// open persistent indexes
- for (int i = 0; i < indexNames.size(); i++) {
- String name = indexNames.getName(i);
+ for (Iterator it = indexNames.iterator(); it.hasNext(); ) {
+ IndexInfo info = (IndexInfo) it.next();
+ String name = info.getName();
// only open if it still exists
// it is possible that indexNames still contains a name for
// an index that has been deleted, but indexNames has not been
@@ -263,7 +274,8 @@
}
PersistentIndex index = new PersistentIndex(name,
handler.getTextAnalyzer(), handler.getSimilarity(),
- cache, indexingQueue, directoryManager);
+ cache, indexingQueue, directoryManager,
+ handler.getMaxHistoryAge());
index.setMaxFieldLength(handler.getMaxFieldLength());
index.setUseCompoundFile(handler.getUseCompoundFile());
index.setTermInfosIndexDivisor(handler.getTermInfosIndexDivisor());
@@ -372,7 +384,7 @@
NodeState rootState = (NodeState) stateMgr.getItemState(rootId);
count = createIndex(rootState, rootPath, stateMgr, count);
executeAndLog(new Commit(getTransactionId()));
- log.info("Created initial index for {} nodes", new Long(count));
+ log.info("Created initial index for {} nodes", count);
releaseMultiReader();
scheduleFlushTask();
} catch (Exception e) {
@@ -414,7 +426,6 @@
long transactionId = nextTransactionId++;
executeAndLog(new Start(transactionId));
- boolean flush = false;
for (NodeId id : remove) {
executeAndLog(new DeleteNode(transactionId, id));
@@ -424,15 +435,10 @@
if (document != null) {
executeAndLog(new AddNode(transactionId, document));
// commit volatile index if needed
- flush |= checkVolatileCommit();
+ checkVolatileCommit();
}
}
executeAndLog(new Commit(transactionId));
-
- // flush whole index when volatile index has been commited.
- if (flush) {
- flush();
- }
} finally {
synchronized (updateMonitor) {
updateInProgress = false;
@@ -484,8 +490,7 @@
if (num > 0) {
redoLog.append(new DeleteNode(getTransactionId(), id));
}
- for (int i = 0; i < indexes.size(); i++) {
- PersistentIndex index = (PersistentIndex) indexes.get(i);
+ for (PersistentIndex index : indexes) {
// only remove documents from registered indexes
if (indexNames.contains(index.getName())) {
int removed = index.removeDocument(idTerm);
@@ -576,7 +581,8 @@
}
PersistentIndex index = new PersistentIndex(indexName,
handler.getTextAnalyzer(), handler.getSimilarity(),
- cache, indexingQueue, directoryManager);
+ cache, indexingQueue, directoryManager,
+ handler.getMaxHistoryAge());
index.setMaxFieldLength(handler.getMaxFieldLength());
index.setUseCompoundFile(handler.getUseCompoundFile());
index.setTermInfosIndexDivisor(handler.getTermInfosIndexDivisor());
@@ -629,7 +635,7 @@
long time = System.currentTimeMillis();
index.getReadOnlyIndexReader(true).release();
time = System.currentTimeMillis() - time;
- log.debug("hierarchy cache initialized in {} ms", new Long(time));
+ log.debug("hierarchy cache initialized in {} ms", time);
}
synchronized (this) {
@@ -721,8 +727,7 @@
if (multiReader == null) {
List readerList =
new ArrayList();
- for (int i = 0; i < indexes.size(); i++) {
- PersistentIndex pIdx = (PersistentIndex) indexes.get(i);
+ for (PersistentIndex pIdx : indexes) {
if (indexNames.contains(pIdx.getName())) {
readerList.add(pIdx.getReadOnlyIndexReader(initCache));
}
@@ -772,8 +777,8 @@
log.error("Exception while closing search index.", e);
}
volatileIndex.close();
- for (int i = 0; i < indexes.size(); i++) {
- ((PersistentIndex) indexes.get(i)).close();
+ for (PersistentIndex index : indexes) {
+ index.close();
}
// close indexing queue
@@ -846,9 +851,9 @@
}
/**
- * Removes the index from the list of active sub indexes. The
- * Index is not acutally deleted right away, but postponed to the transaction
- * commit.
+ * Removes the index from the list of active sub indexes.
+ * Depending on the {@link SearchIndex#getMaxHistoryAge()}, the
+ * Index is not deleted right away.
*
* This method does not close the index, but rather expects that the index
* has already been closed.
@@ -861,7 +866,7 @@
indexNames.removeName(index.getName());
synchronized (deletable) {
log.debug("Moved " + index.getName() + " to deletable");
- deletable.add(index.getName());
+ deletable.put(index.getName(), System.currentTimeMillis());
}
}
@@ -873,34 +878,64 @@
*/
void flush() throws IOException {
synchronized (this) {
- // commit volatile index
- executeAndLog(new Start(Action.INTERNAL_TRANSACTION));
- commitVolatileIndex();
+ // only start transaction when there is something to commit
+ boolean transactionStarted = false;
+
+ if (volatileIndex.getNumDocuments() > 0) {
+ // commit volatile index
+ executeAndLog(new Start(Action.INTERNAL_TRANSACTION));
+ transactionStarted = true;
+ commitVolatileIndex();
+ }
+
+ boolean indexesModified = false;
// commit persistent indexes
for (int i = indexes.size() - 1; i >= 0; i--) {
- PersistentIndex index = (PersistentIndex) indexes.get(i);
+ PersistentIndex index = indexes.get(i);
// only commit indexes we own
// index merger also places PersistentIndex instances in indexes,
// but does not make them public by registering the name in indexNames
if (indexNames.contains(index.getName())) {
+ long gen = index.getCurrentGeneration();
index.commit();
+ if (gen != index.getCurrentGeneration()) {
+ indexesModified = true;
+ log.debug("Committed revision {} of index {}",
+ Long.toString(index.getCurrentGeneration(), Character.MAX_RADIX),
+ index.getName());
+ }
// check if index still contains documents
if (index.getNumDocuments() == 0) {
+ if (!transactionStarted) {
+ executeAndLog(new Start(Action.INTERNAL_TRANSACTION));
+ transactionStarted = true;
+ }
executeAndLog(new DeleteIndex(getTransactionId(), index.getName()));
}
}
}
- executeAndLog(new Commit(getTransactionId()));
- indexNames.write(indexDir);
+ if (transactionStarted) {
+ executeAndLog(new Commit(getTransactionId()));
+ }
- // reset redo log
- redoLog.clear();
+ if (transactionStarted || indexesModified || redoLog.hasEntries()) {
+ indexNames.write();
+ indexHistory.addIndexInfos(indexNames);
+
+ // close redo.log and create a new one based
+ // on the new indexNames generation
+ redoLog.close();
+ redoLog = RedoLog.create(indexDir, indexNames.getGeneration());
+ }
+
lastFlushTime = System.currentTimeMillis();
}
+ indexHistory.pruneOutdated();
+
// delete obsolete indexes
attemptDelete();
}
@@ -938,12 +973,29 @@
*/
private void enqueueUnusedSegments() throws IOException {
// walk through index segments
- String[] dirNames = directoryManager.getDirectoryNames();
- for (int i = 0; i < dirNames.length; i++) {
- if (dirNames[i].startsWith("_") && !indexNames.contains(dirNames[i])) {
- deletable.add(dirNames[i]);
+ for (String name : directoryManager.getDirectoryNames()) {
+ if (!name.startsWith("_")) {
+ continue;
}
+ long lastUse = indexHistory.getLastUseOf(name);
+ if (lastUse != Long.MAX_VALUE) {
+ if (log.isDebugEnabled()) {
+ String msg = "Segment " + name + " not is use anymore. ";
+ if (lastUse != Long.MIN_VALUE) {
+ Calendar cal = Calendar.getInstance();
+ DateFormat df = DateFormat.getInstance();
+ cal.setTimeInMillis(lastUse);
+ msg += "Unused since: " + df.format(cal.getTime());
+ } else {
+ msg += "(orphaned)";
+ }
+ log.debug(msg);
+ }
+ deletable.put(name, lastUse);
+ }
}
+ // now prune outdated index infos
+ indexHistory.pruneOutdated();
}
private void scheduleFlushTask() {
@@ -953,6 +1005,8 @@
/**
* Resets the volatile index to a new instance.
+ *
+ * @throws IOException if the volatile index cannot be reset.
*/
private void resetVolatileIndex() throws IOException {
volatileIndex = new VolatileIndex(handler.getTextAnalyzer(),
@@ -1049,7 +1103,7 @@
* node.
*
* @param node the current NodeState.
- * @param path the path of the current node.
+ * @param path the path of the current node state.
* @param stateMgr the shared item state manager.
* @param count the number of nodes already indexed.
* @return the number of nodes indexed so far.
@@ -1071,7 +1125,7 @@
if (++count % 100 == 0) {
PathResolver resolver = new DefaultNamePathResolver(
handler.getContext().getNamespaceRegistry());
- log.info("indexing... {} ({})", resolver.getJCRPath(path), new Long(count));
+ log.info("indexing... {} ({})", resolver.getJCRPath(path), count);
}
if (count % 10 == 0) {
checkIndexingQueue(true);
@@ -1095,16 +1149,21 @@
}
/**
- * Attempts to delete all files recorded in {@link #deletable}.
+ * Attempts to delete all files that are older than
+ *{@link SearchIndex#getMaxHistoryAge()}.
*/
private void attemptDelete() {
synchronized (deletable) {
- for (Iterator it = deletable.iterator(); it.hasNext(); ) {
- String indexName = it.next();
- if (directoryManager.delete(indexName)) {
- it.remove();
- } else {
- log.info("Unable to delete obsolete index: " + indexName);
+ for (Iterator> it = deletable.entrySet().iterator(); it.hasNext(); ) {
+ Map.Entry entry = it.next();
+ String indexName = entry.getKey();
+ long lastUse = entry.getValue();
+ if (System.currentTimeMillis() - handler.getMaxHistoryAge() * 1000 > lastUse) {
+ if (directoryManager.delete(indexName)) {
+ it.remove();
+ } else {
+ log.info("Unable to delete obsolete index: " + indexName);
+ }
}
}
}
@@ -1190,7 +1249,7 @@
// now update index with the remaining ones if there are any
if (!finished.isEmpty()) {
log.info("updating index with {} nodes from indexing queue.",
- new Long(finished.size()));
+ finished.size());
// remove documents from the queue
for (NodeId id : finished.keySet()) {
@@ -1477,7 +1536,7 @@
public void execute(MultiIndex index) throws IOException {
PersistentIndex idx = index.getOrCreateIndex(indexName);
if (!index.indexNames.contains(indexName)) {
- index.indexNames.addName(indexName);
+ index.indexNames.addName(indexName, idx.getCurrentGeneration());
// now that the index is in the active list let the merger know about it
index.merger.indexAdded(indexName, idx.getNumDocuments());
}
@@ -1842,7 +1901,7 @@
if (num == 0) {
for (int i = index.indexes.size() - 1; i >= 0; i--) {
// only look in registered indexes
- PersistentIndex idx = (PersistentIndex) index.indexes.get(i);
+ PersistentIndex idx = index.indexes.get(i);
if (index.indexNames.contains(idx.getName())) {
num = idx.removeDocument(idTerm);
if (num > 0) {
@@ -1924,6 +1983,8 @@
* Creates a new VolatileCommit action.
*
* @param transactionId the id of the transaction that executes this action.
+ * @param targetIndex the name of the index where the volatile index
+ * will be committed.
*/
VolatileCommit(long transactionId, String targetIndex) {
super(transactionId, Action.TYPE_VOLATILE_COMMIT);
Index: src/main/java/org/apache/jackrabbit/core/query/lucene/PersistentIndex.java
===================================================================
--- src/main/java/org/apache/jackrabbit/core/query/lucene/PersistentIndex.java (revision 794307)
+++ src/main/java/org/apache/jackrabbit/core/query/lucene/PersistentIndex.java (working copy)
@@ -19,6 +19,7 @@
import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.Term;
+import org.apache.lucene.index.IndexDeletionPolicy;
import org.apache.lucene.store.Directory;
import org.apache.lucene.store.IndexInput;
import org.apache.lucene.store.IndexOutput;
@@ -43,6 +44,17 @@
private IndexListener listener;
/**
+ * The index deletion policy. Old index generations are deleted when they
+ * reach a certain age.
+ */
+ private final IndexDeletionPolicyImpl indexDelPolicy;
+
+ /**
+ * The current generation of this persistent index.
+ */
+ private long generation;
+
+ /**
* Creates a new PersistentIndex.
*
* @param name the name of this index.
@@ -51,17 +63,21 @@
* @param cache the document number cache
* @param indexingQueue the indexing queue.
* @param directoryManager the directory manager.
+ * @param generationMaxAge age in seconds after which an index generation is
+ * deleted.
* @throws IOException if an error occurs while opening / creating the
* index.
*/
PersistentIndex(String name, Analyzer analyzer,
Similarity similarity, DocNumberCache cache,
IndexingQueue indexingQueue,
- DirectoryManager directoryManager)
+ DirectoryManager directoryManager, long generationMaxAge)
throws IOException {
super(analyzer, similarity, directoryManager.getDirectory(name),
cache, indexingQueue);
this.name = name;
+ this.indexDelPolicy = new IndexDeletionPolicyImpl(this,
+ generationMaxAge * 1000);
if (isExisting()) {
IndexMigration.migrate(this, directoryManager);
}
@@ -79,6 +95,13 @@
}
/**
+ * @return the index deletion policy of this index.
+ */
+ protected IndexDeletionPolicy getIndexDeletionPolicy() {
+ return indexDelPolicy;
+ }
+
+ /**
* Merges the provided indexes into this index. After this completes, the
* index is optimized.
*
@@ -108,10 +131,10 @@
Directory dir = index.getDirectory();
Directory dest = getDirectory();
String[] files = dir.list();
- for (int i = 0; i < files.length; i++) {
- IndexInput in = dir.openInput(files[i]);
+ for (String file : files) {
+ IndexInput in = dir.openInput(file);
try {
- IndexOutput out = dest.createOutput(files[i]);
+ IndexOutput out = dest.createOutput(file);
try {
long remaining = in.length();
while (remaining > 0) {
@@ -127,6 +150,8 @@
in.close();
}
}
+ // refresh current generation
+ indexDelPolicy.readCurrentGeneration();
}
/**
@@ -169,4 +194,21 @@
String getName() {
return name;
}
+
+ /**
+ * @return the current generation of this index.
+ */
+ long getCurrentGeneration() {
+ return generation;
+ }
+
+ /**
+ * Sets the current generation of this index. This method should only be
+ * called by {@link IndexDeletionPolicyImpl}.
+ *
+ * @param generation the current generation.
+ */
+ void setCurrentGeneration(long generation) {
+ this.generation = generation;
+ }
}
Index: src/main/java/org/apache/jackrabbit/core/query/lucene/Recovery.java
===================================================================
--- src/main/java/org/apache/jackrabbit/core/query/lucene/Recovery.java (revision 794307)
+++ src/main/java/org/apache/jackrabbit/core/query/lucene/Recovery.java (working copy)
@@ -21,7 +21,6 @@
import java.util.Set;
import java.util.HashSet;
-import java.util.Iterator;
import java.util.List;
import java.io.IOException;
@@ -46,9 +45,9 @@
private final RedoLog redoLog;
/**
- * The ids of the uncommitted transactions. Set of Integer objects.
+ * The ids of the uncommitted transactions. Set of Long objects.
*/
- private final Set losers = new HashSet();
+ private final Set losers = new HashSet();
/**
* Creates a new Recovery instance.
@@ -92,23 +91,22 @@
* @throws IOException if the recovery fails.
*/
private void run() throws IOException {
- List actions = redoLog.getActions();
+ List actions = redoLog.getActions();
// find loser transactions
- for (Iterator it = actions.iterator(); it.hasNext();) {
- MultiIndex.Action a = (MultiIndex.Action) it.next();
+ for (MultiIndex.Action a : actions) {
if (a.getType() == MultiIndex.Action.TYPE_START) {
- losers.add(new Long(a.getTransactionId()));
+ losers.add(a.getTransactionId());
} else if (a.getType() == MultiIndex.Action.TYPE_COMMIT) {
- losers.remove(new Long(a.getTransactionId()));
+ losers.remove(a.getTransactionId());
}
}
// find last volatile commit without changes from a loser
int lastSafeVolatileCommit = -1;
- Set transactionIds = new HashSet();
+ Set transactionIds = new HashSet();
for (int i = 0; i < actions.size(); i++) {
- MultiIndex.Action a = (MultiIndex.Action) actions.get(i);
+ MultiIndex.Action a = actions.get(i);
if (a.getType() == MultiIndex.Action.TYPE_COMMIT) {
transactionIds.clear();
} else if (a.getType() == MultiIndex.Action.TYPE_VOLATILE_COMMIT) {
@@ -121,13 +119,13 @@
lastSafeVolatileCommit = i;
}
} else {
- transactionIds.add(new Long(a.getTransactionId()));
+ transactionIds.add(a.getTransactionId());
}
}
// delete dirty indexes
for (int i = lastSafeVolatileCommit + 1; i < actions.size(); i++) {
- MultiIndex.Action a = (MultiIndex.Action) actions.get(i);
+ MultiIndex.Action a = actions.get(i);
if (a.getType() == MultiIndex.Action.TYPE_CREATE_INDEX) {
a.undo(index);
}
@@ -136,7 +134,7 @@
// replay actions up to last safe volatile commit
// ignore add node actions, they are included in volatile commits
for (int i = 0; i < actions.size() && i <= lastSafeVolatileCommit; i++) {
- MultiIndex.Action a = (MultiIndex.Action) actions.get(i);
+ MultiIndex.Action a = actions.get(i);
switch (a.getType()) {
case MultiIndex.Action.TYPE_ADD_INDEX:
case MultiIndex.Action.TYPE_CREATE_INDEX:
@@ -156,7 +154,7 @@
// now replay the rest until we encounter a loser transaction
for (int i = lastSafeVolatileCommit + 1; i < actions.size(); i++) {
- MultiIndex.Action a = (MultiIndex.Action) actions.get(i);
+ MultiIndex.Action a = actions.get(i);
if (losers.contains(new Long(a.getTransactionId()))) {
break;
} else {
Index: src/main/java/org/apache/jackrabbit/core/query/lucene/RedoLog.java
===================================================================
--- src/main/java/org/apache/jackrabbit/core/query/lucene/RedoLog.java (revision 794307)
+++ src/main/java/org/apache/jackrabbit/core/query/lucene/RedoLog.java (working copy)
@@ -52,9 +52,19 @@
/**
* Default name of the redo log file
*/
- private static final String REDO_LOG = "redo.log";
+ static final String REDO_LOG = "redo.log";
/**
+ * Prefix of the redo log files.
+ */
+ static final String REDO_LOG_PREFIX = "redo_";
+
+ /**
+ * The .log extension.
+ */
+ static final String DOT_LOG = ".log";
+
+ /**
* Implements a {@link ActionCollector} that counts all entries and sets
* {@link #entryCount}.
*/
@@ -70,6 +80,11 @@
private final Directory dir;
/**
+ * The name of the log file.
+ */
+ private final String fileName;
+
+ /**
* The number of log entries in the log file
*/
private int entryCount = 0;
@@ -84,14 +99,36 @@
* given directory.
*
* @param dir the directory where the redo log file is located.
+ * @param fileName the name of the redo log file.
* @throws IOException if an error occurs while reading the redo log.
*/
- RedoLog(Directory dir) throws IOException {
+ private RedoLog(Directory dir, String fileName) throws IOException {
this.dir = dir;
+ this.fileName = fileName;
read(ENTRY_COUNTER);
}
/**
+ * Creates a new RedoLog instance, which stores its log in the
+ * given directory.
+ *
+ * @param dir the directory where the redo log file is located.
+ * @param generation the redo log generation number.
+ * @return the redo log.
+ * @throws IOException if the redo log cannot be created.
+ */
+ static RedoLog create(Directory dir, long generation) throws IOException {
+ String fileName;
+ if (generation == 0) {
+ fileName = RedoLog.REDO_LOG;
+ } else {
+ fileName = RedoLog.REDO_LOG_PREFIX + Long.toString(
+ generation, Character.MAX_RADIX) + RedoLog.DOT_LOG;
+ }
+ return new RedoLog(dir, fileName);
+ }
+
+ /**
* Returns true if this redo log contains any entries,
* false otherwise.
* @return true if this redo log contains any entries,
@@ -117,8 +154,8 @@
* redo log.
* @throws IOException if an error occurs while reading from the redo log.
*/
- List getActions() throws IOException {
- final List actions = new ArrayList();
+ List getActions() throws IOException {
+ final List actions = new ArrayList();
read(new ActionCollector() {
public void collect(MultiIndex.Action a) {
actions.add(a);
@@ -159,18 +196,30 @@
out.close();
out = null;
}
- dir.deleteFile(REDO_LOG);
+ dir.deleteFile(fileName);
entryCount = 0;
}
/**
+ * Closes this redo log.
+ *
+ * @throws IOException if an error occurs while flushing pending writes.
+ */
+ void close() throws IOException {
+ if (out != null) {
+ out.close();
+ out = null;
+ }
+ }
+
+ /**
* Initializes the {@link #out} stream if it is not yet set.
* @throws IOException if an error occurs while creating the
* output stream.
*/
private void initOut() throws IOException {
if (out == null) {
- OutputStream os = new IndexOutputStream(dir.createOutput(REDO_LOG));
+ OutputStream os = new IndexOutputStream(dir.createOutput(fileName));
out = new BufferedWriter(new OutputStreamWriter(os));
}
}
@@ -183,10 +232,10 @@
* log file.
*/
private void read(ActionCollector collector) throws IOException {
- if (!dir.fileExists(REDO_LOG)) {
+ if (!dir.fileExists(fileName)) {
return;
}
- InputStream in = new IndexInputStream(dir.openInput(REDO_LOG));
+ InputStream in = new IndexInputStream(dir.openInput(fileName));
try {
BufferedReader reader = new BufferedReader(new InputStreamReader(in));
String line;
Index: src/main/java/org/apache/jackrabbit/core/query/lucene/SearchIndex.java
===================================================================
--- src/main/java/org/apache/jackrabbit/core/query/lucene/SearchIndex.java (revision 794307)
+++ src/main/java/org/apache/jackrabbit/core/query/lucene/SearchIndex.java (working copy)
@@ -251,6 +251,13 @@
private int volatileIdleTime = 3;
/**
+ * The maximum age (in seconds) of the index history. The default value is
+ * zero. Which means, index commits are deleted as soon as they are not used
+ * anymore.
+ */
+ private long maxHistoryAge = 0;
+
+ /**
* maxMergeDocs config parameter
*/
private int maxMergeDocs = DEFAULT_MAX_MERGE_DOCS;
@@ -2196,6 +2203,24 @@
this.indexMergerPoolSize = indexMergerPoolSize;
}
+ /**
+ * @return the maximum age in seconds for outdated generations of
+ * {@link IndexInfos}.
+ */
+ public long getMaxHistoryAge() {
+ return maxHistoryAge;
+ }
+
+ /**
+ * Sets a new value for the maximum age in seconds for outdated generations
+ * of {@link IndexInfos}.
+ *
+ * @param maxHistoryAge age in seconds.
+ */
+ public void setMaxHistoryAge(long maxHistoryAge) {
+ this.maxHistoryAge = maxHistoryAge;
+ }
+
//----------------------------< internal >----------------------------------
/**