Return-Path:
Delivered-To: apmail-lucene-java-commits-archive@www.apache.org
Received: (qmail 58940 invoked from network); 4 Jul 2007 15:17:09 -0000
Received: from hermes.apache.org (HELO mail.apache.org) (140.211.11.2)
by minotaur.apache.org with SMTP; 4 Jul 2007 15:17:09 -0000
Received: (qmail 67969 invoked by uid 500); 4 Jul 2007 15:17:11 -0000
Delivered-To: apmail-lucene-java-commits-archive@lucene.apache.org
Received: (qmail 67922 invoked by uid 500); 4 Jul 2007 15:17:11 -0000
Mailing-List: contact java-commits-help@lucene.apache.org; run by ezmlm
Precedence: bulk
List-Help:
List-Unsubscribe:
List-Post:
List-Id:
Reply-To: java-dev@lucene.apache.org
Delivered-To: mailing list java-commits@lucene.apache.org
Received: (qmail 67863 invoked by uid 99); 4 Jul 2007 15:17:10 -0000
Received: from herse.apache.org (HELO herse.apache.org) (140.211.11.133)
by apache.org (qpsmtpd/0.29) with ESMTP; Wed, 04 Jul 2007 08:17:10 -0700
X-ASF-Spam-Status: No, hits=-99.5 required=10.0
tests=ALL_TRUSTED,NO_REAL_NAME
X-Spam-Check-By: apache.org
Received: from [140.211.11.3] (HELO eris.apache.org) (140.211.11.3)
by apache.org (qpsmtpd/0.29) with ESMTP; Wed, 04 Jul 2007 08:17:03 -0700
Received: by eris.apache.org (Postfix, from userid 65534)
id 14E1A1A9825; Wed, 4 Jul 2007 08:16:43 -0700 (PDT)
Content-Type: text/plain; charset="utf-8"
MIME-Version: 1.0
Content-Transfer-Encoding: 7bit
Subject: svn commit: r553236 [5/6] - in /lucene/java/trunk: ./
contrib/xml-query-parser/src/test/org/apache/lucene/xmlparser/ docs/
src/java/org/apache/lucene/analysis/ src/java/org/apache/lucene/index/
src/java/org/apache/lucene/store/ src/site/src/documentati...
Date: Wed, 04 Jul 2007 15:16:40 -0000
To: java-commits@lucene.apache.org
From: mikemccand@apache.org
X-Mailer: svnmailer-1.1.0
Message-Id: <20070704151643.14E1A1A9825@eris.apache.org>
X-Virus-Checked: Checked by ClamAV on apache.org
Propchange: lucene/java/trunk/src/java/org/apache/lucene/index/DocumentsWriter.java
------------------------------------------------------------------------------
svn:eol-style = native
Modified: lucene/java/trunk/src/java/org/apache/lucene/index/FieldInfo.java
URL: http://svn.apache.org/viewvc/lucene/java/trunk/src/java/org/apache/lucene/index/FieldInfo.java?view=diff&rev=553236&r1=553235&r2=553236
==============================================================================
--- lucene/java/trunk/src/java/org/apache/lucene/index/FieldInfo.java (original)
+++ lucene/java/trunk/src/java/org/apache/lucene/index/FieldInfo.java Wed Jul 4 08:16:38 2007
@@ -43,4 +43,9 @@
this.omitNorms = omitNorms;
this.storePayloads = storePayloads;
}
+
+ public Object clone() {
+ return new FieldInfo(name, isIndexed, number, storeTermVector, storePositionWithTermVector,
+ storeOffsetWithTermVector, omitNorms, storePayloads);
+ }
}
Modified: lucene/java/trunk/src/java/org/apache/lucene/index/FieldInfos.java
URL: http://svn.apache.org/viewvc/lucene/java/trunk/src/java/org/apache/lucene/index/FieldInfos.java?view=diff&rev=553236&r1=553235&r2=553236
==============================================================================
--- lucene/java/trunk/src/java/org/apache/lucene/index/FieldInfos.java (original)
+++ lucene/java/trunk/src/java/org/apache/lucene/index/FieldInfos.java Wed Jul 4 08:16:38 2007
@@ -62,6 +62,20 @@
}
}
+ /**
+ * Returns a deep clone of this FieldInfos instance.
+ */
+ public Object clone() {
+ FieldInfos fis = new FieldInfos();
+ final int numField = byNumber.size();
+ for(int i=0;i= size + this.docStoreOffset;
+ } else {
+ this.docStoreOffset = 0;
+ this.size = (int) (indexStream.length() / 8);
+ }
}
/**
@@ -100,7 +120,7 @@
}
final Document doc(int n, FieldSelector fieldSelector) throws CorruptIndexException, IOException {
- indexStream.seek(n * 8L);
+ indexStream.seek((n + docStoreOffset) * 8L);
long position = indexStream.readLong();
fieldsStream.seek(position);
Modified: lucene/java/trunk/src/java/org/apache/lucene/index/FieldsWriter.java
URL: http://svn.apache.org/viewvc/lucene/java/trunk/src/java/org/apache/lucene/index/FieldsWriter.java?view=diff&rev=553236&r1=553235&r2=553236
==============================================================================
--- lucene/java/trunk/src/java/org/apache/lucene/index/FieldsWriter.java (original)
+++ lucene/java/trunk/src/java/org/apache/lucene/index/FieldsWriter.java Wed Jul 4 08:16:38 2007
@@ -24,6 +24,7 @@
import org.apache.lucene.document.Document;
import org.apache.lucene.document.Fieldable;
import org.apache.lucene.store.Directory;
+import org.apache.lucene.store.RAMOutputStream;
import org.apache.lucene.store.IndexOutput;
final class FieldsWriter
@@ -38,15 +39,92 @@
private IndexOutput indexStream;
+ private boolean doClose;
+
FieldsWriter(Directory d, String segment, FieldInfos fn) throws IOException {
fieldInfos = fn;
fieldsStream = d.createOutput(segment + ".fdt");
indexStream = d.createOutput(segment + ".fdx");
+ doClose = true;
+ }
+
+ FieldsWriter(IndexOutput fdx, IndexOutput fdt, FieldInfos fn) throws IOException {
+ fieldInfos = fn;
+ fieldsStream = fdt;
+ indexStream = fdx;
+ doClose = false;
+ }
+
+ // Writes the contents of buffer into the fields stream
+ // and adds a new entry for this document into the index
+ // stream. This assumes the buffer was already written
+ // in the correct fields format.
+ void flushDocument(RAMOutputStream buffer) throws IOException {
+ indexStream.writeLong(fieldsStream.getFilePointer());
+ buffer.writeTo(fieldsStream);
+ }
+
+ void flush() throws IOException {
+ indexStream.flush();
+ fieldsStream.flush();
}
final void close() throws IOException {
+ if (doClose) {
fieldsStream.close();
indexStream.close();
+ }
+ }
+
+ final void writeField(FieldInfo fi, Fieldable field) throws IOException {
+ // if the field as an instanceof FieldsReader.FieldForMerge, we're in merge mode
+ // and field.binaryValue() already returns the compressed value for a field
+ // with isCompressed()==true, so we disable compression in that case
+ boolean disableCompression = (field instanceof FieldsReader.FieldForMerge);
+ fieldsStream.writeVInt(fi.number);
+ byte bits = 0;
+ if (field.isTokenized())
+ bits |= FieldsWriter.FIELD_IS_TOKENIZED;
+ if (field.isBinary())
+ bits |= FieldsWriter.FIELD_IS_BINARY;
+ if (field.isCompressed())
+ bits |= FieldsWriter.FIELD_IS_COMPRESSED;
+
+ fieldsStream.writeByte(bits);
+
+ if (field.isCompressed()) {
+ // compression is enabled for the current field
+ byte[] data = null;
+
+ if (disableCompression) {
+ // optimized case for merging, the data
+ // is already compressed
+ data = field.binaryValue();
+ } else {
+ // check if it is a binary field
+ if (field.isBinary()) {
+ data = compress(field.binaryValue());
+ }
+ else {
+ data = compress(field.stringValue().getBytes("UTF-8"));
+ }
+ }
+ final int len = data.length;
+ fieldsStream.writeVInt(len);
+ fieldsStream.writeBytes(data, len);
+ }
+ else {
+ // compression is disabled for the current field
+ if (field.isBinary()) {
+ byte[] data = field.binaryValue();
+ final int len = data.length;
+ fieldsStream.writeVInt(len);
+ fieldsStream.writeBytes(data, len);
+ }
+ else {
+ fieldsStream.writeString(field.stringValue());
+ }
+ }
}
final void addDocument(Document doc) throws IOException {
@@ -64,57 +142,8 @@
fieldIterator = doc.getFields().iterator();
while (fieldIterator.hasNext()) {
Fieldable field = (Fieldable) fieldIterator.next();
- // if the field as an instanceof FieldsReader.FieldForMerge, we're in merge mode
- // and field.binaryValue() already returns the compressed value for a field
- // with isCompressed()==true, so we disable compression in that case
- boolean disableCompression = (field instanceof FieldsReader.FieldForMerge);
- if (field.isStored()) {
- fieldsStream.writeVInt(fieldInfos.fieldNumber(field.name()));
-
- byte bits = 0;
- if (field.isTokenized())
- bits |= FieldsWriter.FIELD_IS_TOKENIZED;
- if (field.isBinary())
- bits |= FieldsWriter.FIELD_IS_BINARY;
- if (field.isCompressed())
- bits |= FieldsWriter.FIELD_IS_COMPRESSED;
-
- fieldsStream.writeByte(bits);
-
- if (field.isCompressed()) {
- // compression is enabled for the current field
- byte[] data = null;
-
- if (disableCompression) {
- // optimized case for merging, the data
- // is already compressed
- data = field.binaryValue();
- } else {
- // check if it is a binary field
- if (field.isBinary()) {
- data = compress(field.binaryValue());
- }
- else {
- data = compress(field.stringValue().getBytes("UTF-8"));
- }
- }
- final int len = data.length;
- fieldsStream.writeVInt(len);
- fieldsStream.writeBytes(data, len);
- }
- else {
- // compression is disabled for the current field
- if (field.isBinary()) {
- byte[] data = field.binaryValue();
- final int len = data.length;
- fieldsStream.writeVInt(len);
- fieldsStream.writeBytes(data, len);
- }
- else {
- fieldsStream.writeString(field.stringValue());
- }
- }
- }
+ if (field.isStored())
+ writeField(fieldInfos.fieldInfo(field.name()), field);
}
}
Modified: lucene/java/trunk/src/java/org/apache/lucene/index/IndexFileDeleter.java
URL: http://svn.apache.org/viewvc/lucene/java/trunk/src/java/org/apache/lucene/index/IndexFileDeleter.java?view=diff&rev=553236&r1=553235&r2=553236
==============================================================================
--- lucene/java/trunk/src/java/org/apache/lucene/index/IndexFileDeleter.java (original)
+++ lucene/java/trunk/src/java/org/apache/lucene/index/IndexFileDeleter.java Wed Jul 4 08:16:38 2007
@@ -97,6 +97,7 @@
private PrintStream infoStream;
private Directory directory;
private IndexDeletionPolicy policy;
+ private DocumentsWriter docWriter;
void setInfoStream(PrintStream infoStream) {
this.infoStream = infoStream;
@@ -116,10 +117,12 @@
* @throws CorruptIndexException if the index is corrupt
* @throws IOException if there is a low-level IO error
*/
- public IndexFileDeleter(Directory directory, IndexDeletionPolicy policy, SegmentInfos segmentInfos, PrintStream infoStream)
+ public IndexFileDeleter(Directory directory, IndexDeletionPolicy policy, SegmentInfos segmentInfos, PrintStream infoStream, DocumentsWriter docWriter)
throws CorruptIndexException, IOException {
+ this.docWriter = docWriter;
this.infoStream = infoStream;
+
this.policy = policy;
this.directory = directory;
@@ -294,7 +297,7 @@
public void checkpoint(SegmentInfos segmentInfos, boolean isCommit) throws IOException {
if (infoStream != null) {
- message("now checkpoint \"" + segmentInfos.getCurrentSegmentFileName() + "\" [isCommit = " + isCommit + "]");
+ message("now checkpoint \"" + segmentInfos.getCurrentSegmentFileName() + "\" [" + segmentInfos.size() + " segments " + "; isCommit = " + isCommit + "]");
}
// Try again now to delete any previously un-deletable
@@ -310,6 +313,8 @@
// Incref the files:
incRef(segmentInfos, isCommit);
+ if (docWriter != null)
+ incRef(docWriter.files());
if (isCommit) {
// Append to our commits list:
@@ -325,9 +330,8 @@
// DecRef old files from the last checkpoint, if any:
int size = lastFiles.size();
if (size > 0) {
- for(int i=0;iclose should be called.
These changes are buffered in memory and periodically
- flushed to the {@link Directory} (during the above method calls). A flush is triggered when there are
- enough buffered deletes (see {@link
- #setMaxBufferedDeleteTerms}) or enough added documents
- (see {@link #setMaxBufferedDocs}) since the last flush,
- whichever is sooner. You can also force a flush by
- calling {@link #flush}. When a flush occurs, both pending
- deletes and added documents are flushed to the index. A
- flush may also trigger one or more segment merges.
+ flushed to the {@link Directory} (during the above method
+ calls). A flush is triggered when there are enough
+ buffered deletes (see {@link #setMaxBufferedDeleteTerms})
+ or enough added documents since the last flush, whichever
+ is sooner. For the added documents, flushing is triggered
+ either by RAM usage of the documents (see {@link
+ #setRAMBufferSizeMB}) or the number of added documents
+ (this is the default; see {@link #setMaxBufferedDocs}).
+ For best indexing speed you should flush by RAM usage with
+ a large RAM buffer. You can also force a flush by calling
+ {@link #flush}. When a flush occurs, both pending deletes
+ and added documents are flushed to the index. A flush may
+ also trigger one or more segment merges.
The optional autoCommit
argument to the
@@ -181,7 +186,20 @@
/**
* Default value is 10. Change using {@link #setMaxBufferedDocs(int)}.
*/
+
public final static int DEFAULT_MAX_BUFFERED_DOCS = 10;
+ /* new merge policy
+ public final static int DEFAULT_MAX_BUFFERED_DOCS = 0;
+ */
+
+ /**
+ * Default value is 0 MB (which means flush only by doc
+ * count). Change using {@link #setRAMBufferSizeMB}.
+ */
+ public final static double DEFAULT_RAM_BUFFER_SIZE_MB = 0.0;
+ /* new merge policy
+ public final static double DEFAULT_RAM_BUFFER_SIZE_MB = 16.0;
+ */
/**
* Default value is 1000. Change using {@link #setMaxBufferedDeleteTerms(int)}.
@@ -224,8 +242,7 @@
private boolean autoCommit = true; // false if we should commit only on close
SegmentInfos segmentInfos = new SegmentInfos(); // the segments
- SegmentInfos ramSegmentInfos = new SegmentInfos(); // the segments in ramDirectory
- private final RAMDirectory ramDirectory = new RAMDirectory(); // for temp segs
+ private DocumentsWriter docWriter;
private IndexFileDeleter deleter;
private Lock writeLock;
@@ -621,11 +638,14 @@
rollbackSegmentInfos = (SegmentInfos) segmentInfos.clone();
}
+ docWriter = new DocumentsWriter(directory, this);
+ docWriter.setInfoStream(infoStream);
+
// Default deleter (for backwards compatibility) is
// KeepOnlyLastCommitDeleter:
deleter = new IndexFileDeleter(directory,
deletionPolicy == null ? new KeepOnlyLastCommitDeletionPolicy() : deletionPolicy,
- segmentInfos, infoStream);
+ segmentInfos, infoStream, docWriter);
} catch (IOException e) {
this.writeLock.release();
@@ -683,31 +703,64 @@
return maxFieldLength;
}
- /** Determines the minimal number of documents required before the buffered
- * in-memory documents are merged and a new Segment is created.
- * Since Documents are merged in a {@link org.apache.lucene.store.RAMDirectory},
- * large value gives faster indexing. At the same time, mergeFactor limits
- * the number of files open in a FSDirectory.
- *
- *
The default value is 10.
- *
- * @throws IllegalArgumentException if maxBufferedDocs is smaller than 2
+ /** Determines the minimal number of documents required
+ * before the buffered in-memory documents are flushed as
+ * a new Segment. Large values generally gives faster
+ * indexing.
+ *
+ *
When this is set, the writer will flush every
+ * maxBufferedDocs added documents and never flush by RAM
+ * usage.
+ *
+ * The default value is 0 (writer flushes by RAM
+ * usage).
+ *
+ * @throws IllegalArgumentException if maxBufferedDocs is
+ * smaller than 2
+ * @see #setRAMBufferSizeMB
*/
public void setMaxBufferedDocs(int maxBufferedDocs) {
ensureOpen();
if (maxBufferedDocs < 2)
throw new IllegalArgumentException("maxBufferedDocs must at least be 2");
- this.minMergeDocs = maxBufferedDocs;
+ docWriter.setMaxBufferedDocs(maxBufferedDocs);
}
/**
- * Returns the number of buffered added documents that will
+ * Returns 0 if this writer is flushing by RAM usage, else
+ * returns the number of buffered added documents that will
* trigger a flush.
* @see #setMaxBufferedDocs
*/
public int getMaxBufferedDocs() {
ensureOpen();
- return minMergeDocs;
+ return docWriter.getMaxBufferedDocs();
+ }
+
+ /** Determines the amount of RAM that may be used for
+ * buffering added documents before they are flushed as a
+ * new Segment. Generally for faster indexing performance
+ * it's best to flush by RAM usage instead of document
+ * count and use as large a RAM buffer as you can.
+ *
+ * When this is set, the writer will flush whenever
+ * buffered documents use this much RAM.
+ *
+ * The default value is {@link #DEFAULT_RAM_BUFFER_SIZE_MB}.
+ */
+ public void setRAMBufferSizeMB(double mb) {
+ if (mb <= 0.0)
+ throw new IllegalArgumentException("ramBufferSize should be > 0.0 MB");
+ docWriter.setRAMBufferSizeMB(mb);
+ }
+
+ /**
+ * Returns 0.0 if this writer is flushing by document
+ * count, else returns the value set by {@link
+ * #setRAMBufferSizeMB}.
+ */
+ public double getRAMBufferSizeMB() {
+ return docWriter.getRAMBufferSizeMB();
}
/**
@@ -788,6 +841,7 @@
public void setInfoStream(PrintStream infoStream) {
ensureOpen();
this.infoStream = infoStream;
+ docWriter.setInfoStream(infoStream);
deleter.setInfoStream(infoStream);
}
@@ -871,7 +925,7 @@
*/
public synchronized void close() throws CorruptIndexException, IOException {
if (!closed) {
- flushRamSegments();
+ flush(true, true);
if (commitPending) {
segmentInfos.write(directory); // now commit changes
@@ -880,18 +934,79 @@
rollbackSegmentInfos = null;
}
- ramDirectory.close();
if (writeLock != null) {
writeLock.release(); // release write lock
writeLock = null;
}
closed = true;
+ docWriter = null;
if(closeDir)
directory.close();
}
}
+ /** Tells the docWriter to close its currently open shared
+ * doc stores (stored fields & vectors files). */
+ private void flushDocStores() throws IOException {
+
+ List files = docWriter.files();
+
+ if (files.size() > 0) {
+ String docStoreSegment;
+
+ boolean success = false;
+ try {
+ docStoreSegment = docWriter.closeDocStore();
+ success = true;
+ } finally {
+ if (!success)
+ docWriter.abort();
+ }
+
+ if (useCompoundFile && docStoreSegment != null) {
+ // Now build compound doc store file
+ checkpoint();
+
+ success = false;
+
+ final int numSegments = segmentInfos.size();
+
+ try {
+ CompoundFileWriter cfsWriter = new CompoundFileWriter(directory, docStoreSegment + "." + IndexFileNames.COMPOUND_FILE_STORE_EXTENSION);
+ final int size = files.size();
+ for(int i=0;i The default value is {@link #DEFAULT_MAX_BUFFERED_DOCS}.
-
+ /** Determines amount of RAM usage by the buffered docs at
+ * which point we trigger a flush to the index.
*/
- private int minMergeDocs = DEFAULT_MAX_BUFFERED_DOCS;
-
+ private double ramBufferSize = DEFAULT_RAM_BUFFER_SIZE_MB*1024F*1024F;
/** Determines the largest number of documents ever merged by addDocument().
* Small values (e.g., less than 10,000) are best for interactive indexing,
@@ -1151,6 +1241,7 @@
*/
private PrintStream infoStream = null;
+
private static PrintStream defaultInfoStream = null;
/** Merges all segments together into a single segment,
@@ -1219,16 +1310,16 @@
*/
public synchronized void optimize() throws CorruptIndexException, IOException {
ensureOpen();
- flushRamSegments();
+ flush();
while (segmentInfos.size() > 1 ||
(segmentInfos.size() == 1 &&
(SegmentReader.hasDeletions(segmentInfos.info(0)) ||
SegmentReader.hasSeparateNorms(segmentInfos.info(0)) ||
segmentInfos.info(0).dir != directory ||
(useCompoundFile &&
- (!SegmentReader.usesCompoundFile(segmentInfos.info(0))))))) {
+ !segmentInfos.info(0).getUseCompoundFile())))) {
int minSegment = segmentInfos.size() - mergeFactor;
- mergeSegments(segmentInfos, minSegment < 0 ? 0 : minSegment, segmentInfos.size());
+ mergeSegments(minSegment < 0 ? 0 : minSegment, segmentInfos.size());
}
}
@@ -1245,7 +1336,7 @@
localRollbackSegmentInfos = (SegmentInfos) segmentInfos.clone();
localAutoCommit = autoCommit;
if (localAutoCommit) {
- flushRamSegments();
+ flush();
// Turn off auto-commit during our local transaction:
autoCommit = false;
} else
@@ -1335,16 +1426,18 @@
segmentInfos.clear();
segmentInfos.addAll(rollbackSegmentInfos);
+ docWriter.abort();
+
// Ask deleter to locate unreferenced files & remove
// them:
deleter.checkpoint(segmentInfos, false);
deleter.refresh();
- ramSegmentInfos = new SegmentInfos();
bufferedDeleteTerms.clear();
numBufferedDeleteTerms = 0;
commitPending = false;
+ docWriter.abort();
close();
} else {
@@ -1439,7 +1532,7 @@
for (int base = start; base < segmentInfos.size(); base++) {
int end = Math.min(segmentInfos.size(), base+mergeFactor);
if (end-base > 1) {
- mergeSegments(segmentInfos, base, end);
+ mergeSegments(base, end);
}
}
}
@@ -1479,7 +1572,7 @@
// segments in S may not since they could come from multiple indexes.
// Here is the merge algorithm for addIndexesNoOptimize():
//
- // 1 Flush ram segments.
+ // 1 Flush ram.
// 2 Consider a combined sequence with segments from T followed
// by segments from S (same as current addIndexes(Directory[])).
// 3 Assume the highest level for segments in S is h. Call
@@ -1500,13 +1593,18 @@
// copy a segment, which may cause doc count to change because deleted
// docs are garbage collected.
- // 1 flush ram segments
+ // 1 flush ram
ensureOpen();
- flushRamSegments();
+ flush();
// 2 copy segment infos and find the highest level from dirs
- int startUpperBound = minMergeDocs;
+ int startUpperBound = docWriter.getMaxBufferedDocs();
+
+ /* new merge policy
+ if (startUpperBound == 0)
+ startUpperBound = 10;
+ */
boolean success = false;
@@ -1566,7 +1664,7 @@
// copy those segments from S
for (int i = segmentCount - numSegmentsToCopy; i < segmentCount; i++) {
- mergeSegments(segmentInfos, i, i + 1);
+ mergeSegments(i, i + 1);
}
if (checkNonDecreasingLevels(segmentCount - numSegmentsToCopy)) {
success = true;
@@ -1575,7 +1673,7 @@
}
// invariants do not hold, simply merge those segments
- mergeSegments(segmentInfos, segmentCount - numTailSegments, segmentCount);
+ mergeSegments(segmentCount - numTailSegments, segmentCount);
// maybe merge segments again if necessary
if (segmentInfos.info(segmentInfos.size() - 1).docCount > startUpperBound) {
@@ -1637,7 +1735,8 @@
}
segmentInfos.setSize(0); // pop old infos & add new
- info = new SegmentInfo(mergedName, docCount, directory, false, true);
+ info = new SegmentInfo(mergedName, docCount, directory, false, true,
+ -1, null, false);
segmentInfos.addElement(info);
success = true;
@@ -1720,29 +1819,19 @@
* buffered added documents or buffered deleted terms are
* large enough.
*/
- protected final void maybeFlushRamSegments() throws CorruptIndexException, IOException {
- // A flush is triggered if enough new documents are buffered or
- // if enough delete terms are buffered
- if (ramSegmentInfos.size() >= minMergeDocs || numBufferedDeleteTerms >= maxBufferedDeleteTerms) {
- flushRamSegments();
- }
+ protected final synchronized void maybeFlush() throws CorruptIndexException, IOException {
+ // We only check for flush due to number of buffered
+ // delete terms, because triggering of a flush due to
+ // too many added documents is handled by
+ // DocumentsWriter
+ if (numBufferedDeleteTerms >= maxBufferedDeleteTerms && docWriter.setFlushPending())
+ flush(true, false);
}
- /** Expert: Flushes all RAM-resident segments (buffered documents), then may merge segments. */
- private final synchronized void flushRamSegments() throws CorruptIndexException, IOException {
- flushRamSegments(true);
+ public final synchronized void flush() throws CorruptIndexException, IOException {
+ flush(true, false);
}
-
- /** Expert: Flushes all RAM-resident segments (buffered documents),
- * then may merge segments if triggerMerge==true. */
- protected final synchronized void flushRamSegments(boolean triggerMerge)
- throws CorruptIndexException, IOException {
- if (ramSegmentInfos.size() > 0 || bufferedDeleteTerms.size() > 0) {
- mergeSegments(ramSegmentInfos, 0, ramSegmentInfos.size());
- if (triggerMerge) maybeMergeSegments(minMergeDocs);
- }
- }
-
+
/**
* Flush all in-memory buffered updates (adds and deletes)
* to the Directory.
@@ -1751,9 +1840,158 @@
* @throws CorruptIndexException if the index is corrupt
* @throws IOException if there is a low-level IO error
*/
- public final synchronized void flush() throws CorruptIndexException, IOException {
+ public final synchronized void flush(boolean triggerMerge, boolean flushDocStores) throws CorruptIndexException, IOException {
ensureOpen();
- flushRamSegments();
+
+ // Make sure no threads are actively adding a document
+ docWriter.pauseAllThreads();
+
+ try {
+
+ SegmentInfo newSegment = null;
+
+ final int numDocs = docWriter.getNumDocsInRAM();
+
+ // Always flush docs if there are any
+ boolean flushDocs = numDocs > 0;
+
+ // With autoCommit=true we always must flush the doc
+ // stores when we flush
+ flushDocStores |= autoCommit;
+ String docStoreSegment = docWriter.getDocStoreSegment();
+ if (docStoreSegment == null)
+ flushDocStores = false;
+
+ // Always flush deletes if there are any delete terms.
+ // TODO: when autoCommit=false we don't have to flush
+ // deletes with every flushed segment; we can save
+ // CPU/IO by buffering longer & flushing deletes only
+ // when they are full or writer is being closed. We
+ // have to fix the "applyDeletesSelectively" logic to
+ // apply to more than just the last flushed segment
+ boolean flushDeletes = bufferedDeleteTerms.size() > 0;
+
+ if (infoStream != null)
+ infoStream.println(" flush: flushDocs=" + flushDocs +
+ " flushDeletes=" + flushDeletes +
+ " flushDocStores=" + flushDocStores +
+ " numDocs=" + numDocs);
+
+ int docStoreOffset = docWriter.getDocStoreOffset();
+ boolean docStoreIsCompoundFile = false;
+
+ // Check if the doc stores must be separately flushed
+ // because other segments, besides the one we are about
+ // to flush, reference it
+ if (flushDocStores && (!flushDocs || !docWriter.getSegment().equals(docWriter.getDocStoreSegment()))) {
+ // We must separately flush the doc store
+ if (infoStream != null)
+ infoStream.println(" flush shared docStore segment " + docStoreSegment);
+
+ flushDocStores();
+ flushDocStores = false;
+ docStoreIsCompoundFile = useCompoundFile;
+ }
+
+ String segment = docWriter.getSegment();
+
+ if (flushDocs || flushDeletes) {
+
+ SegmentInfos rollback = null;
+
+ if (flushDeletes)
+ rollback = (SegmentInfos) segmentInfos.clone();
+
+ boolean success = false;
+
+ try {
+ if (flushDocs) {
+
+ if (0 == docStoreOffset && flushDocStores) {
+ // This means we are flushing private doc stores
+ // with this segment, so it will not be shared
+ // with other segments
+ assert docStoreSegment != null;
+ assert docStoreSegment.equals(segment);
+ docStoreOffset = -1;
+ docStoreIsCompoundFile = false;
+ docStoreSegment = null;
+ }
+
+ int flushedDocCount = docWriter.flush(flushDocStores);
+
+ newSegment = new SegmentInfo(segment,
+ flushedDocCount,
+ directory, false, true,
+ docStoreOffset, docStoreSegment,
+ docStoreIsCompoundFile);
+ segmentInfos.addElement(newSegment);
+ }
+
+ if (flushDeletes) {
+ // we should be able to change this so we can
+ // buffer deletes longer and then flush them to
+ // multiple flushed segments, when
+ // autoCommit=false
+ applyDeletes(flushDocs);
+ doAfterFlush();
+ }
+
+ checkpoint();
+ success = true;
+ } finally {
+ if (!success) {
+ if (flushDeletes) {
+ // Fully replace the segmentInfos since flushed
+ // deletes could have changed any of the
+ // SegmentInfo instances:
+ segmentInfos.clear();
+ segmentInfos.addAll(rollback);
+ } else {
+ // Remove segment we added, if any:
+ if (newSegment != null &&
+ segmentInfos.size() > 0 &&
+ segmentInfos.info(segmentInfos.size()-1) == newSegment)
+ segmentInfos.remove(segmentInfos.size()-1);
+ }
+ if (flushDocs)
+ docWriter.abort();
+ deleter.checkpoint(segmentInfos, false);
+ deleter.refresh();
+ }
+ }
+
+ deleter.checkpoint(segmentInfos, autoCommit);
+
+ if (flushDocs && useCompoundFile) {
+ success = false;
+ try {
+ docWriter.createCompoundFile(segment);
+ newSegment.setUseCompoundFile(true);
+ checkpoint();
+ success = true;
+ } finally {
+ if (!success) {
+ newSegment.setUseCompoundFile(false);
+ deleter.refresh();
+ }
+ }
+
+ deleter.checkpoint(segmentInfos, autoCommit);
+ }
+
+ /* new merge policy
+ if (0 == docWriter.getMaxBufferedDocs())
+ maybeMergeSegments(mergeFactor * numDocs / 2);
+ else
+ maybeMergeSegments(docWriter.getMaxBufferedDocs());
+ */
+ maybeMergeSegments(docWriter.getMaxBufferedDocs());
+ }
+ } finally {
+ docWriter.clearFlushPending();
+ docWriter.resumeAllThreads();
+ }
}
/** Expert: Return the total size of all index files currently cached in memory.
@@ -1761,15 +1999,15 @@
*/
public final long ramSizeInBytes() {
ensureOpen();
- return ramDirectory.sizeInBytes();
+ return docWriter.getRAMUsed();
}
/** Expert: Return the number of documents whose segments are currently cached in memory.
- * Useful when calling flushRamSegments()
+ * Useful when calling flush()
*/
public final synchronized int numRamDocs() {
ensureOpen();
- return ramSegmentInfos.size();
+ return docWriter.getNumDocsInRAM();
}
/** Incremental segment merger. */
@@ -1777,6 +2015,10 @@
long lowerBound = -1;
long upperBound = startUpperBound;
+ /* new merge policy
+ if (upperBound == 0) upperBound = 10;
+ */
+
while (upperBound < maxMergeDocs) {
int minSegment = segmentInfos.size();
int maxSegment = -1;
@@ -1808,7 +2050,7 @@
while (numSegments >= mergeFactor) {
// merge the leftmost* mergeFactor segments
- int docCount = mergeSegments(segmentInfos, minSegment, minSegment + mergeFactor);
+ int docCount = mergeSegments(minSegment, minSegment + mergeFactor);
numSegments -= mergeFactor;
if (docCount > upperBound) {
@@ -1837,39 +2079,108 @@
* Merges the named range of segments, replacing them in the stack with a
* single segment.
*/
- private final int mergeSegments(SegmentInfos sourceSegments, int minSegment, int end)
+
+ private final int mergeSegments(int minSegment, int end)
throws CorruptIndexException, IOException {
- // We may be called solely because there are deletes
- // pending, in which case doMerge is false:
- boolean doMerge = end > 0;
final String mergedName = newSegmentName();
+
SegmentMerger merger = null;
-
- final List ramSegmentsToDelete = new ArrayList();
-
SegmentInfo newSegment = null;
int mergedDocCount = 0;
- boolean anyDeletes = (bufferedDeleteTerms.size() != 0);
// This is try/finally to make sure merger's readers are closed:
try {
- if (doMerge) {
- if (infoStream != null) infoStream.print("merging segments");
- merger = new SegmentMerger(this, mergedName);
-
- for (int i = minSegment; i < end; i++) {
- SegmentInfo si = sourceSegments.info(i);
- if (infoStream != null)
- infoStream.print(" " + si.name + " (" + si.docCount + " docs)");
- IndexReader reader = SegmentReader.get(si, MERGE_READ_BUFFER_SIZE); // no need to set deleter (yet)
- merger.add(reader);
- if (reader.directory() == this.ramDirectory) {
- ramSegmentsToDelete.add(si);
- }
- }
+ if (infoStream != null) infoStream.print("merging segments");
+
+ // Check whether this merge will allow us to skip
+ // merging the doc stores (stored field & vectors).
+ // This is a very substantial optimization (saves tons
+ // of IO) that can only be applied with
+ // autoCommit=false.
+
+ Directory lastDir = directory;
+ String lastDocStoreSegment = null;
+ boolean mergeDocStores = false;
+ boolean doFlushDocStore = false;
+ int next = -1;
+
+ // Test each segment to be merged
+ for (int i = minSegment; i < end; i++) {
+ SegmentInfo si = segmentInfos.info(i);
+
+ // If it has deletions we must merge the doc stores
+ if (si.hasDeletions())
+ mergeDocStores = true;
+
+ // If it has its own (private) doc stores we must
+ // merge the doc stores
+ if (-1 == si.getDocStoreOffset())
+ mergeDocStores = true;
+
+ // If it has a different doc store segment than
+ // previous segments, we must merge the doc stores
+ String docStoreSegment = si.getDocStoreSegment();
+ if (docStoreSegment == null)
+ mergeDocStores = true;
+ else if (lastDocStoreSegment == null)
+ lastDocStoreSegment = docStoreSegment;
+ else if (!lastDocStoreSegment.equals(docStoreSegment))
+ mergeDocStores = true;
+
+ // Segments' docScoreOffsets must be in-order,
+ // contiguous. For the default merge policy now
+ // this will always be the case but for an arbitrary
+ // merge policy this may not be the case
+ if (-1 == next)
+ next = si.getDocStoreOffset() + si.docCount;
+ else if (next != si.getDocStoreOffset())
+ mergeDocStores = true;
+ else
+ next = si.getDocStoreOffset() + si.docCount;
+
+ // If the segment comes from a different directory
+ // we must merge
+ if (lastDir != si.dir)
+ mergeDocStores = true;
+
+ // If the segment is referencing the current "live"
+ // doc store outputs then we must merge
+ if (si.getDocStoreOffset() != -1 && si.getDocStoreSegment().equals(docWriter.getDocStoreSegment()))
+ doFlushDocStore = true;
+ }
+
+ final int docStoreOffset;
+ final String docStoreSegment;
+ final boolean docStoreIsCompoundFile;
+ if (mergeDocStores) {
+ docStoreOffset = -1;
+ docStoreSegment = null;
+ docStoreIsCompoundFile = false;
+ } else {
+ SegmentInfo si = segmentInfos.info(minSegment);
+ docStoreOffset = si.getDocStoreOffset();
+ docStoreSegment = si.getDocStoreSegment();
+ docStoreIsCompoundFile = si.getDocStoreIsCompoundFile();
+ }
+
+ if (mergeDocStores && doFlushDocStore)
+ // SegmentMerger intends to merge the doc stores
+ // (stored fields, vectors), and at least one of the
+ // segments to be merged refers to the currently
+ // live doc stores.
+ flushDocStores();
+
+ merger = new SegmentMerger(this, mergedName);
+
+ for (int i = minSegment; i < end; i++) {
+ SegmentInfo si = segmentInfos.info(i);
+ if (infoStream != null)
+ infoStream.print(" " + si.name + " (" + si.docCount + " docs)");
+ IndexReader reader = SegmentReader.get(si, MERGE_READ_BUFFER_SIZE, mergeDocStores); // no need to set deleter (yet)
+ merger.add(reader);
}
SegmentInfos rollback = null;
@@ -1879,65 +2190,32 @@
// if we hit exception when doing the merge:
try {
- if (doMerge) {
- mergedDocCount = merger.merge();
-
- if (infoStream != null) {
- infoStream.println(" into "+mergedName+" ("+mergedDocCount+" docs)");
- }
+ mergedDocCount = merger.merge(mergeDocStores);
- newSegment = new SegmentInfo(mergedName, mergedDocCount,
- directory, false, true);
+ if (infoStream != null) {
+ infoStream.println(" into "+mergedName+" ("+mergedDocCount+" docs)");
}
+
+ newSegment = new SegmentInfo(mergedName, mergedDocCount,
+ directory, false, true,
+ docStoreOffset,
+ docStoreSegment,
+ docStoreIsCompoundFile);
- if (sourceSegments != ramSegmentInfos || anyDeletes) {
- // Now save the SegmentInfo instances that
- // we are replacing:
- rollback = (SegmentInfos) segmentInfos.clone();
- }
+ rollback = (SegmentInfos) segmentInfos.clone();
- if (doMerge) {
- if (sourceSegments == ramSegmentInfos) {
- segmentInfos.addElement(newSegment);
- } else {
- for (int i = end-1; i > minSegment; i--) // remove old infos & add new
- sourceSegments.remove(i);
+ for (int i = end-1; i > minSegment; i--) // remove old infos & add new
+ segmentInfos.remove(i);
- segmentInfos.set(minSegment, newSegment);
- }
- }
+ segmentInfos.set(minSegment, newSegment);
- if (sourceSegments == ramSegmentInfos) {
- maybeApplyDeletes(doMerge);
- doAfterFlush();
- }
-
checkpoint();
success = true;
} finally {
-
- if (success) {
- // The non-ram-segments case is already committed
- // (above), so all the remains for ram segments case
- // is to clear the ram segments:
- if (sourceSegments == ramSegmentInfos) {
- ramSegmentInfos.removeAllElements();
- }
- } else {
-
- // Must rollback so our state matches index:
- if (sourceSegments == ramSegmentInfos && !anyDeletes) {
- // Simple case: newSegment may or may not have
- // been added to the end of our segment infos,
- // so just check & remove if so:
- if (newSegment != null &&
- segmentInfos.size() > 0 &&
- segmentInfos.info(segmentInfos.size()-1) == newSegment) {
- segmentInfos.remove(segmentInfos.size()-1);
- }
- } else if (rollback != null) {
+ if (!success) {
+ if (rollback != null) {
// Rollback the individual SegmentInfo
// instances, but keep original SegmentInfos
// instance (so we don't try to write again the
@@ -1952,16 +2230,13 @@
}
} finally {
// close readers before we attempt to delete now-obsolete segments
- if (doMerge) merger.closeReaders();
+ merger.closeReaders();
}
- // Delete the RAM segments
- deleter.deleteDirect(ramDirectory, ramSegmentsToDelete);
-
// Give deleter a chance to remove files now.
deleter.checkpoint(segmentInfos, autoCommit);
- if (useCompoundFile && doMerge) {
+ if (useCompoundFile) {
boolean success = false;
@@ -1988,19 +2263,23 @@
}
// Called during flush to apply any buffered deletes. If
- // doMerge is true then a new segment was just created and
- // flushed from the ram segments.
- private final void maybeApplyDeletes(boolean doMerge) throws CorruptIndexException, IOException {
+ // flushedNewSegment is true then a new segment was just
+ // created and flushed from the ram segments, so we will
+ // selectively apply the deletes to that new segment.
+ private final void applyDeletes(boolean flushedNewSegment) throws CorruptIndexException, IOException {
if (bufferedDeleteTerms.size() > 0) {
if (infoStream != null)
infoStream.println("flush " + numBufferedDeleteTerms + " buffered deleted terms on "
+ segmentInfos.size() + " segments.");
- if (doMerge) {
+ if (flushedNewSegment) {
IndexReader reader = null;
try {
- reader = SegmentReader.get(segmentInfos.info(segmentInfos.size() - 1));
+ // Open readers w/o opening the stored fields /
+ // vectors because these files may still be held
+ // open for writing by docWriter
+ reader = SegmentReader.get(segmentInfos.info(segmentInfos.size() - 1), false);
// Apply delete terms to the segment just flushed from ram
// apply appropriately so that a delete term is only applied to
@@ -2018,14 +2297,14 @@
}
int infosEnd = segmentInfos.size();
- if (doMerge) {
+ if (flushedNewSegment) {
infosEnd--;
}
for (int i = 0; i < infosEnd; i++) {
IndexReader reader = null;
try {
- reader = SegmentReader.get(segmentInfos.info(i));
+ reader = SegmentReader.get(segmentInfos.info(i), false);
// Apply delete terms to disk segments
// except the one just flushed from ram.
@@ -2049,7 +2328,12 @@
private final boolean checkNonDecreasingLevels(int start) {
int lowerBound = -1;
- int upperBound = minMergeDocs;
+ int upperBound = docWriter.getMaxBufferedDocs();
+
+ /* new merge policy
+ if (upperBound == 0)
+ upperBound = 10;
+ */
for (int i = segmentInfos.size() - 1; i >= start; i--) {
int docCount = segmentInfos.info(i).docCount;
@@ -2098,10 +2382,11 @@
// well as the disk segments.
private void bufferDeleteTerm(Term term) {
Num num = (Num) bufferedDeleteTerms.get(term);
+ int numDoc = docWriter.getNumDocsInRAM();
if (num == null) {
- bufferedDeleteTerms.put(term, new Num(ramSegmentInfos.size()));
+ bufferedDeleteTerms.put(term, new Num(numDoc));
} else {
- num.setNum(ramSegmentInfos.size());
+ num.setNum(numDoc);
}
numBufferedDeleteTerms++;
}
Modified: lucene/java/trunk/src/java/org/apache/lucene/index/SegmentInfo.java
URL: http://svn.apache.org/viewvc/lucene/java/trunk/src/java/org/apache/lucene/index/SegmentInfo.java?view=diff&rev=553236&r1=553235&r2=553236
==============================================================================
--- lucene/java/trunk/src/java/org/apache/lucene/index/SegmentInfo.java (original)
+++ lucene/java/trunk/src/java/org/apache/lucene/index/SegmentInfo.java Wed Jul 4 08:16:38 2007
@@ -65,6 +65,12 @@
private List files; // cached list of files that this segment uses
// in the Directory
+ private int docStoreOffset; // if this segment shares stored fields & vectors, this
+ // offset is where in that file this segment's docs begin
+ private String docStoreSegment; // name used to derive fields/vectors file we share with
+ // other segments
+ private boolean docStoreIsCompoundFile; // whether doc store files are stored in compound file (*.cfx)
+
public SegmentInfo(String name, int docCount, Directory dir) {
this.name = name;
this.docCount = docCount;
@@ -73,13 +79,25 @@
isCompoundFile = CHECK_DIR;
preLockless = true;
hasSingleNormFile = false;
+ docStoreOffset = -1;
+ docStoreSegment = name;
+ docStoreIsCompoundFile = false;
}
public SegmentInfo(String name, int docCount, Directory dir, boolean isCompoundFile, boolean hasSingleNormFile) {
+ this(name, docCount, dir, isCompoundFile, hasSingleNormFile, -1, null, false);
+ }
+
+ public SegmentInfo(String name, int docCount, Directory dir, boolean isCompoundFile, boolean hasSingleNormFile,
+ int docStoreOffset, String docStoreSegment, boolean docStoreIsCompoundFile) {
this(name, docCount, dir);
this.isCompoundFile = (byte) (isCompoundFile ? YES : NO);
this.hasSingleNormFile = hasSingleNormFile;
preLockless = false;
+ this.docStoreOffset = docStoreOffset;
+ this.docStoreSegment = docStoreSegment;
+ this.docStoreIsCompoundFile = docStoreIsCompoundFile;
+ assert docStoreOffset == -1 || docStoreSegment != null;
}
/**
@@ -92,6 +110,8 @@
dir = src.dir;
preLockless = src.preLockless;
delGen = src.delGen;
+ docStoreOffset = src.docStoreOffset;
+ docStoreIsCompoundFile = src.docStoreIsCompoundFile;
if (src.normGen == null) {
normGen = null;
} else {
@@ -116,6 +136,20 @@
docCount = input.readInt();
if (format <= SegmentInfos.FORMAT_LOCKLESS) {
delGen = input.readLong();
+ if (format <= SegmentInfos.FORMAT_SHARED_DOC_STORE) {
+ docStoreOffset = input.readInt();
+ if (docStoreOffset != -1) {
+ docStoreSegment = input.readString();
+ docStoreIsCompoundFile = (1 == input.readByte());
+ } else {
+ docStoreSegment = name;
+ docStoreIsCompoundFile = false;
+ }
+ } else {
+ docStoreOffset = -1;
+ docStoreSegment = name;
+ docStoreIsCompoundFile = false;
+ }
if (format <= SegmentInfos.FORMAT_SINGLE_NORM_FILE) {
hasSingleNormFile = (1 == input.readByte());
} else {
@@ -138,6 +172,9 @@
isCompoundFile = CHECK_DIR;
preLockless = true;
hasSingleNormFile = false;
+ docStoreOffset = -1;
+ docStoreIsCompoundFile = false;
+ docStoreSegment = null;
}
}
@@ -368,6 +405,28 @@
return dir.fileExists(name + "." + IndexFileNames.COMPOUND_FILE_EXTENSION);
}
}
+
+ int getDocStoreOffset() {
+ return docStoreOffset;
+ }
+
+ boolean getDocStoreIsCompoundFile() {
+ return docStoreIsCompoundFile;
+ }
+
+ void setDocStoreIsCompoundFile(boolean v) {
+ docStoreIsCompoundFile = v;
+ files = null;
+ }
+
+ String getDocStoreSegment() {
+ return docStoreSegment;
+ }
+
+ void setDocStoreOffset(int offset) {
+ docStoreOffset = offset;
+ files = null;
+ }
/**
* Save this segment's info.
@@ -377,6 +436,12 @@
output.writeString(name);
output.writeInt(docCount);
output.writeLong(delGen);
+ output.writeInt(docStoreOffset);
+ if (docStoreOffset != -1) {
+ output.writeString(docStoreSegment);
+ output.writeByte((byte) (docStoreIsCompoundFile ? 1:0));
+ }
+
output.writeByte((byte) (hasSingleNormFile ? 1:0));
if (normGen == null) {
output.writeInt(NO);
@@ -389,6 +454,11 @@
output.writeByte(isCompoundFile);
}
+ private void addIfExists(List files, String fileName) throws IOException {
+ if (dir.fileExists(fileName))
+ files.add(fileName);
+ }
+
/*
* Return all files referenced by this SegmentInfo. The
* returns List is a locally cached List so you should not
@@ -409,13 +479,28 @@
if (useCompoundFile) {
files.add(name + "." + IndexFileNames.COMPOUND_FILE_EXTENSION);
} else {
- for (int i = 0; i < IndexFileNames.INDEX_EXTENSIONS_IN_COMPOUND_FILE.length; i++) {
- String ext = IndexFileNames.INDEX_EXTENSIONS_IN_COMPOUND_FILE[i];
- String fileName = name + "." + ext;
- if (dir.fileExists(fileName)) {
- files.add(fileName);
- }
+ final String[] exts = IndexFileNames.NON_STORE_INDEX_EXTENSIONS;
+ for(int i=0;i number mapping are the same. So, we start
+ // with the fieldInfos of the last segment in this
+ // case, to keep that numbering.
+ final SegmentReader sr = (SegmentReader) readers.elementAt(readers.size()-1);
+ fieldInfos = (FieldInfos) sr.fieldInfos.clone();
+ } else {
+ fieldInfos = new FieldInfos(); // merge field names
+ }
+
int docCount = 0;
for (int i = 0; i < readers.size(); i++) {
IndexReader reader = (IndexReader) readers.elementAt(i);
@@ -187,30 +223,40 @@
}
fieldInfos.write(directory, segment + ".fnm");
- FieldsWriter fieldsWriter = // merge field values
- new FieldsWriter(directory, segment, fieldInfos);
-
- // for merging we don't want to compress/uncompress the data, so to tell the FieldsReader that we're
- // in merge mode, we use this FieldSelector
- FieldSelector fieldSelectorMerge = new FieldSelector() {
- public FieldSelectorResult accept(String fieldName) {
- return FieldSelectorResult.LOAD_FOR_MERGE;
- }
- };
+ if (mergeDocStores) {
+
+ FieldsWriter fieldsWriter = // merge field values
+ new FieldsWriter(directory, segment, fieldInfos);
- try {
- for (int i = 0; i < readers.size(); i++) {
- IndexReader reader = (IndexReader) readers.elementAt(i);
- int maxDoc = reader.maxDoc();
- for (int j = 0; j < maxDoc; j++)
- if (!reader.isDeleted(j)) { // skip deleted docs
- fieldsWriter.addDocument(reader.document(j, fieldSelectorMerge));
- docCount++;
- }
+ // for merging we don't want to compress/uncompress the data, so to tell the FieldsReader that we're
+ // in merge mode, we use this FieldSelector
+ FieldSelector fieldSelectorMerge = new FieldSelector() {
+ public FieldSelectorResult accept(String fieldName) {
+ return FieldSelectorResult.LOAD_FOR_MERGE;
+ }
+ };
+
+ try {
+ for (int i = 0; i < readers.size(); i++) {
+ IndexReader reader = (IndexReader) readers.elementAt(i);
+ int maxDoc = reader.maxDoc();
+ for (int j = 0; j < maxDoc; j++)
+ if (!reader.isDeleted(j)) { // skip deleted docs
+ fieldsWriter.addDocument(reader.document(j, fieldSelectorMerge));
+ docCount++;
+ }
+ }
+ } finally {
+ fieldsWriter.close();
}
- } finally {
- fieldsWriter.close();
- }
+
+ } else
+ // If we are skipping the doc stores, that means there
+ // are no deletions in any of these segments, so we
+ // just sum numDocs() of each segment to get total docCount
+ for (int i = 0; i < readers.size(); i++)
+ docCount += ((IndexReader) readers.elementAt(i)).numDocs();
+
return docCount;
}
@@ -355,6 +401,7 @@
for (int i = 0; i < n; i++) {
SegmentMergeInfo smi = smis[i];
TermPositions postings = smi.getPositions();
+ assert postings != null;
int base = smi.base;
int[] docMap = smi.getDocMap();
postings.seek(smi.termEnum);
Modified: lucene/java/trunk/src/java/org/apache/lucene/index/SegmentReader.java
URL: http://svn.apache.org/viewvc/lucene/java/trunk/src/java/org/apache/lucene/index/SegmentReader.java?view=diff&rev=553236&r1=553235&r2=553236
==============================================================================
--- lucene/java/trunk/src/java/org/apache/lucene/index/SegmentReader.java (original)
+++ lucene/java/trunk/src/java/org/apache/lucene/index/SegmentReader.java Wed Jul 4 08:16:38 2007
@@ -60,6 +60,7 @@
// Compound File Reader when based on a compound file segment
CompoundFileReader cfsReader = null;
+ CompoundFileReader storeCFSReader = null;
private class Norm {
public Norm(IndexInput in, int number, long normSeek)
@@ -128,7 +129,15 @@
* @throws IOException if there is a low-level IO error
*/
public static SegmentReader get(SegmentInfo si) throws CorruptIndexException, IOException {
- return get(si.dir, si, null, false, false, BufferedIndexInput.BUFFER_SIZE);
+ return get(si.dir, si, null, false, false, BufferedIndexInput.BUFFER_SIZE, true);
+ }
+
+ /**
+ * @throws CorruptIndexException if the index is corrupt
+ * @throws IOException if there is a low-level IO error
+ */
+ public static SegmentReader get(SegmentInfo si, boolean doOpenStores) throws CorruptIndexException, IOException {
+ return get(si.dir, si, null, false, false, BufferedIndexInput.BUFFER_SIZE, doOpenStores);
}
/**
@@ -136,7 +145,15 @@
* @throws IOException if there is a low-level IO error
*/
public static SegmentReader get(SegmentInfo si, int readBufferSize) throws CorruptIndexException, IOException {
- return get(si.dir, si, null, false, false, readBufferSize);
+ return get(si.dir, si, null, false, false, readBufferSize, true);
+ }
+
+ /**
+ * @throws CorruptIndexException if the index is corrupt
+ * @throws IOException if there is a low-level IO error
+ */
+ public static SegmentReader get(SegmentInfo si, int readBufferSize, boolean doOpenStores) throws CorruptIndexException, IOException {
+ return get(si.dir, si, null, false, false, readBufferSize, doOpenStores);
}
/**
@@ -145,7 +162,7 @@
*/
public static SegmentReader get(SegmentInfos sis, SegmentInfo si,
boolean closeDir) throws CorruptIndexException, IOException {
- return get(si.dir, si, sis, closeDir, true, BufferedIndexInput.BUFFER_SIZE);
+ return get(si.dir, si, sis, closeDir, true, BufferedIndexInput.BUFFER_SIZE, true);
}
/**
@@ -157,6 +174,19 @@
boolean closeDir, boolean ownDir,
int readBufferSize)
throws CorruptIndexException, IOException {
+ return get(dir, si, sis, closeDir, ownDir, readBufferSize, true);
+ }
+
+ /**
+ * @throws CorruptIndexException if the index is corrupt
+ * @throws IOException if there is a low-level IO error
+ */
+ public static SegmentReader get(Directory dir, SegmentInfo si,
+ SegmentInfos sis,
+ boolean closeDir, boolean ownDir,
+ int readBufferSize,
+ boolean doOpenStores)
+ throws CorruptIndexException, IOException {
SegmentReader instance;
try {
instance = (SegmentReader)IMPL.newInstance();
@@ -164,11 +194,11 @@
throw new RuntimeException("cannot load SegmentReader class: " + e, e);
}
instance.init(dir, sis, closeDir, ownDir);
- instance.initialize(si, readBufferSize);
+ instance.initialize(si, readBufferSize, doOpenStores);
return instance;
}
- private void initialize(SegmentInfo si, int readBufferSize) throws CorruptIndexException, IOException {
+ private void initialize(SegmentInfo si, int readBufferSize, boolean doOpenStores) throws CorruptIndexException, IOException {
segment = si.name;
this.si = si;
@@ -178,17 +208,45 @@
// Use compound file directory for some files, if it exists
Directory cfsDir = directory();
if (si.getUseCompoundFile()) {
- cfsReader = new CompoundFileReader(directory(), segment + ".cfs", readBufferSize);
+ cfsReader = new CompoundFileReader(directory(), segment + "." + IndexFileNames.COMPOUND_FILE_EXTENSION, readBufferSize);
cfsDir = cfsReader;
}
+ final Directory storeDir;
+
+ if (doOpenStores) {
+ if (si.getDocStoreOffset() != -1) {
+ if (si.getDocStoreIsCompoundFile()) {
+ storeCFSReader = new CompoundFileReader(directory(), si.getDocStoreSegment() + "." + IndexFileNames.COMPOUND_FILE_STORE_EXTENSION, readBufferSize);
+ storeDir = storeCFSReader;
+ } else {
+ storeDir = directory();
+ }
+ } else {
+ storeDir = cfsDir;
+ }
+ } else
+ storeDir = null;
+
// No compound file exists - use the multi-file format
fieldInfos = new FieldInfos(cfsDir, segment + ".fnm");
- fieldsReader = new FieldsReader(cfsDir, segment, fieldInfos, readBufferSize);
- // Verify two sources of "maxDoc" agree:
- if (fieldsReader.size() != si.docCount) {
- throw new CorruptIndexException("doc counts differ for segment " + si.name + ": fieldsReader shows " + fieldsReader.size() + " but segmentInfo shows " + si.docCount);
+ final String fieldsSegment;
+ final Directory dir;
+
+ if (si.getDocStoreOffset() != -1)
+ fieldsSegment = si.getDocStoreSegment();
+ else
+ fieldsSegment = segment;
+
+ if (doOpenStores) {
+ fieldsReader = new FieldsReader(storeDir, fieldsSegment, fieldInfos, readBufferSize,
+ si.getDocStoreOffset(), si.docCount);
+
+ // Verify two sources of "maxDoc" agree:
+ if (si.getDocStoreOffset() == -1 && fieldsReader.size() != si.docCount) {
+ throw new CorruptIndexException("doc counts differ for segment " + si.name + ": fieldsReader shows " + fieldsReader.size() + " but segmentInfo shows " + si.docCount);
+ }
}
tis = new TermInfosReader(cfsDir, segment, fieldInfos, readBufferSize);
@@ -209,8 +267,13 @@
proxStream = cfsDir.openInput(segment + ".prx", readBufferSize);
openNorms(cfsDir, readBufferSize);
- if (fieldInfos.hasVectors()) { // open term vector files only as needed
- termVectorsReaderOrig = new TermVectorsReader(cfsDir, segment, fieldInfos, readBufferSize);
+ if (doOpenStores && fieldInfos.hasVectors()) { // open term vector files only as needed
+ final String vectorsSegment;
+ if (si.getDocStoreOffset() != -1)
+ vectorsSegment = si.getDocStoreSegment();
+ else
+ vectorsSegment = segment;
+ termVectorsReaderOrig = new TermVectorsReader(storeDir, vectorsSegment, fieldInfos, readBufferSize, si.getDocStoreOffset(), si.docCount);
}
success = true;
} finally {
@@ -273,6 +336,9 @@
if (cfsReader != null)
cfsReader.close();
+
+ if (storeCFSReader != null)
+ storeCFSReader.close();
}
static boolean hasDeletions(SegmentInfo si) throws IOException {
Modified: lucene/java/trunk/src/java/org/apache/lucene/index/TermVectorsReader.java
URL: http://svn.apache.org/viewvc/lucene/java/trunk/src/java/org/apache/lucene/index/TermVectorsReader.java?view=diff&rev=553236&r1=553235&r2=553236
==============================================================================
--- lucene/java/trunk/src/java/org/apache/lucene/index/TermVectorsReader.java (original)
+++ lucene/java/trunk/src/java/org/apache/lucene/index/TermVectorsReader.java Wed Jul 4 08:16:38 2007
@@ -33,6 +33,10 @@
private IndexInput tvd;
private IndexInput tvf;
private int size;
+
+ // The docID offset where our docs begin in the index
+ // file. This will be 0 if we have our own private file.
+ private int docStoreOffset;
private int tvdFormat;
private int tvfFormat;
@@ -44,6 +48,11 @@
TermVectorsReader(Directory d, String segment, FieldInfos fieldInfos, int readBufferSize)
throws CorruptIndexException, IOException {
+ this(d, segment, fieldInfos, BufferedIndexInput.BUFFER_SIZE, -1, 0);
+ }
+
+ TermVectorsReader(Directory d, String segment, FieldInfos fieldInfos, int readBufferSize, int docStoreOffset, int size)
+ throws CorruptIndexException, IOException {
if (d.fileExists(segment + TermVectorsWriter.TVX_EXTENSION)) {
tvx = d.openInput(segment + TermVectorsWriter.TVX_EXTENSION, readBufferSize);
checkValidFormat(tvx);
@@ -51,7 +60,16 @@
tvdFormat = checkValidFormat(tvd);
tvf = d.openInput(segment + TermVectorsWriter.TVF_EXTENSION, readBufferSize);
tvfFormat = checkValidFormat(tvf);
- size = (int) tvx.length() / 8;
+ if (-1 == docStoreOffset) {
+ this.docStoreOffset = 0;
+ this.size = (int) (tvx.length() / 8);
+ } else {
+ this.docStoreOffset = docStoreOffset;
+ this.size = size;
+ // Verify the file is long enough to hold all of our
+ // docs
+ assert ((int) (tvx.length()/8)) >= size + docStoreOffset;
+ }
}
this.fieldInfos = fieldInfos;
@@ -102,7 +120,7 @@
//We don't need to do this in other seeks because we already have the
// file pointer
//that was written in another file
- tvx.seek((docNum * 8L) + TermVectorsWriter.FORMAT_SIZE);
+ tvx.seek(((docNum + docStoreOffset) * 8L) + TermVectorsWriter.FORMAT_SIZE);
//System.out.println("TVX Pointer: " + tvx.getFilePointer());
long position = tvx.readLong();
@@ -154,7 +172,7 @@
// Check if no term vectors are available for this segment at all
if (tvx != null) {
//We need to offset by
- tvx.seek((docNum * 8L) + TermVectorsWriter.FORMAT_SIZE);
+ tvx.seek(((docNum + docStoreOffset) * 8L) + TermVectorsWriter.FORMAT_SIZE);
long position = tvx.readLong();
tvd.seek(position);
Modified: lucene/java/trunk/src/java/org/apache/lucene/store/IndexOutput.java
URL: http://svn.apache.org/viewvc/lucene/java/trunk/src/java/org/apache/lucene/store/IndexOutput.java?view=diff&rev=553236&r1=553235&r2=553236
==============================================================================
--- lucene/java/trunk/src/java/org/apache/lucene/store/IndexOutput.java (original)
+++ lucene/java/trunk/src/java/org/apache/lucene/store/IndexOutput.java Wed Jul 4 08:16:38 2007
@@ -125,6 +125,31 @@
}
}
+ /** Writes a sequence of UTF-8 encoded characters from a char[].
+ * @param s the source of the characters
+ * @param start the first character in the sequence
+ * @param length the number of characters in the sequence
+ * @see IndexInput#readChars(char[],int,int)
+ */
+ public void writeChars(char[] s, int start, int length)
+ throws IOException {
+ final int end = start + length;
+ for (int i = start; i < end; i++) {
+ final int code = (int)s[i];
+ if (code >= 0x01 && code <= 0x7F)
+ writeByte((byte)code);
+ else if (((code >= 0x80) && (code <= 0x7FF)) || code == 0) {
+ writeByte((byte)(0xC0 | (code >> 6)));
+ writeByte((byte)(0x80 | (code & 0x3F)));
+ } else {
+ writeByte((byte)(0xE0 | (code >>> 12)));
+ writeByte((byte)(0x80 | ((code >> 6) & 0x3F)));
+ writeByte((byte)(0x80 | (code & 0x3F)));
+ }
+ }
+ }
+
+
/** Forces any buffered output to be written. */
public abstract void flush() throws IOException;
Modified: lucene/java/trunk/src/site/src/documentation/content/xdocs/fileformats.xml
URL: http://svn.apache.org/viewvc/lucene/java/trunk/src/site/src/documentation/content/xdocs/fileformats.xml?view=diff&rev=553236&r1=553235&r2=553236
==============================================================================
--- lucene/java/trunk/src/site/src/documentation/content/xdocs/fileformats.xml (original)
+++ lucene/java/trunk/src/site/src/documentation/content/xdocs/fileformats.xml Wed Jul 4 08:16:38 2007
@@ -60,6 +60,15 @@
Lucene will not be able to read the index.
+
+ In version 2.3, the file format was changed to allow
+ segments to share a single set of doc store (vectors &
+ stored fields) files. This allows for faster indexing
+ in certain cases. The change is fully backwards
+ compatible (in the same way as the lock-less commits
+ change in 2.1).
+
+
Definitions
@@ -809,9 +818,15 @@
NormGenNumField,
IsCompoundFile>SegCount
+
+ 2.3 and above:
+ Segments --> Format, Version, NameCounter, SegCount, <SegName, SegSize, DelGen, DocStoreOffset, [DocStoreSegment, DocStoreIsCompoundFile], HasSingleNormFile, NumField,
+ NormGenNumField,
+ IsCompoundFile>SegCount
+
- Format, NameCounter, SegCount, SegSize, NumField --> Int32
+ Format, NameCounter, SegCount, SegSize, NumField, DocStoreOffset --> Int32
@@ -819,11 +834,11 @@
- SegName --> String
+ SegName, DocStoreSegment --> String
- IsCompoundFile, HasSingleNormFile --> Int8
+ IsCompoundFile, HasSingleNormFile, DocStoreIsCompoundFile --> Int8
@@ -889,6 +904,29 @@
"Normalization Factors" below for details.
+
+ DocStoreOffset, DocStoreSegment,
+ DocStoreIsCompoundFile: If DocStoreOffset is -1,
+ this segment has its own doc store (stored fields
+ values and term vectors) files and DocStoreSegment
+ and DocStoreIsCompoundFile are not stored. In
+ this case all files for stored field values
+ (*.fdt and *.fdx) and term
+ vectors (*.tvf, *.tvd and
+ *.tvx) will be stored with this segment.
+ Otherwise, DocStoreSegment is the name of the
+ segment that has the shared doc store files;
+ DocStoreIsCompoundFile is 1 if that segment is
+ stored in compound file format (as a .cfx
+ file); and DocStoreOffset is the starting document
+ in the shared doc store files where this segment's
+ documents begin. In this case, this segment does
+ not store its own doc store files but instead
+ shares a single set of these files with other
+ segments.
+
+
+
Lock File
@@ -946,6 +984,14 @@
FileData --> raw file data
The raw file data is the data from the individual files named above.
+
+ Starting with Lucene 2.3, doc store files (stored
+ field values and term vectors) can be shared in a
+ single set of files for more than one segment. When
+ compound file is enabled, these shared files will be
+ added into a single compound file (same format as
+ above) but with the extension .cfx.
+