lucene-java-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From mikemcc...@apache.org
Subject svn commit: r636458 [1/3] - /lucene/java/trunk/src/java/org/apache/lucene/index/
Date Wed, 12 Mar 2008 19:09:16 GMT
Author: mikemccand
Date: Wed Mar 12 12:09:12 2008
New Revision: 636458

URL: http://svn.apache.org/viewvc?rev=636458&view=rev
Log:
LUCENE-1212: factor DocumentsWriter into separate source files

Added:
    lucene/java/trunk/src/java/org/apache/lucene/index/AbortException.java   (with props)
    lucene/java/trunk/src/java/org/apache/lucene/index/BufferedDeletes.java   (with props)
    lucene/java/trunk/src/java/org/apache/lucene/index/BufferedNorms.java   (with props)
    lucene/java/trunk/src/java/org/apache/lucene/index/ByteBlockPool.java   (with props)
    lucene/java/trunk/src/java/org/apache/lucene/index/ByteSliceReader.java   (with props)
    lucene/java/trunk/src/java/org/apache/lucene/index/CharBlockPool.java   (with props)
    lucene/java/trunk/src/java/org/apache/lucene/index/DocumentsWriterFieldData.java   (with
props)
    lucene/java/trunk/src/java/org/apache/lucene/index/DocumentsWriterFieldMergeState.java
  (with props)
    lucene/java/trunk/src/java/org/apache/lucene/index/DocumentsWriterThreadState.java   (with
props)
    lucene/java/trunk/src/java/org/apache/lucene/index/Posting.java   (with props)
    lucene/java/trunk/src/java/org/apache/lucene/index/PostingVector.java   (with props)
    lucene/java/trunk/src/java/org/apache/lucene/index/ReusableStringReader.java   (with props)
Modified:
    lucene/java/trunk/src/java/org/apache/lucene/index/DocumentsWriter.java

Added: lucene/java/trunk/src/java/org/apache/lucene/index/AbortException.java
URL: http://svn.apache.org/viewvc/lucene/java/trunk/src/java/org/apache/lucene/index/AbortException.java?rev=636458&view=auto
==============================================================================
--- lucene/java/trunk/src/java/org/apache/lucene/index/AbortException.java (added)
+++ lucene/java/trunk/src/java/org/apache/lucene/index/AbortException.java Wed Mar 12 12:09:12
2008
@@ -0,0 +1,29 @@
+package org.apache.lucene.index;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.io.IOException;
+
+// Used only internally to DW to call abort "up the stack"
+class AbortException extends IOException {
+  public AbortException(Throwable cause, DocumentsWriter docWriter) {
+    super();
+    initCause(cause);
+    docWriter.setAborting();
+  }
+}

Propchange: lucene/java/trunk/src/java/org/apache/lucene/index/AbortException.java
------------------------------------------------------------------------------
    svn:eol-style = native

Added: lucene/java/trunk/src/java/org/apache/lucene/index/BufferedDeletes.java
URL: http://svn.apache.org/viewvc/lucene/java/trunk/src/java/org/apache/lucene/index/BufferedDeletes.java?rev=636458&view=auto
==============================================================================
--- lucene/java/trunk/src/java/org/apache/lucene/index/BufferedDeletes.java (added)
+++ lucene/java/trunk/src/java/org/apache/lucene/index/BufferedDeletes.java Wed Mar 12 12:09:12
2008
@@ -0,0 +1,146 @@
+package org.apache.lucene.index;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.util.HashMap;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Iterator;
+import java.util.Map.Entry;
+
+/** Holds buffered deletes, by docID, term or query.  We
+ *  hold two instances of this class: one for the deletes
+ *  prior to the last flush, the other for deletes after
+ *  the last flush.  This is so if we need to abort
+ *  (discard all buffered docs) we can also discard the
+ *  buffered deletes yet keep the deletes done during
+ *  previously flushed segments. */
+class BufferedDeletes {
+  int numTerms;
+  HashMap terms = new HashMap();
+  HashMap queries = new HashMap();
+  List docIDs = new ArrayList();
+
+  // Number of documents a delete term applies to.
+  final static class Num {
+    private int num;
+
+    Num(int num) {
+      this.num = num;
+    }
+
+    int getNum() {
+      return num;
+    }
+
+    void setNum(int num) {
+      // Only record the new number if it's greater than the
+      // current one.  This is important because if multiple
+      // threads are replacing the same doc at nearly the
+      // same time, it's possible that one thread that got a
+      // higher docID is scheduled before the other
+      // threads.
+      if (num > this.num)
+        this.num = num;
+    }
+  }
+
+
+
+  void update(BufferedDeletes in) {
+    numTerms += in.numTerms;
+    terms.putAll(in.terms);
+    queries.putAll(in.queries);
+    docIDs.addAll(in.docIDs);
+    in.terms.clear();
+    in.numTerms = 0;
+    in.queries.clear();
+    in.docIDs.clear();
+  }
+    
+  void clear() {
+    terms.clear();
+    queries.clear();
+    docIDs.clear();
+    numTerms = 0;
+  }
+
+  boolean any() {
+    return terms.size() > 0 || docIDs.size() > 0 || queries.size() > 0;
+  }
+
+  // Remaps all buffered deletes based on a completed
+  // merge
+  synchronized void remap(MergeDocIDRemapper mapper,
+                          SegmentInfos infos,
+                          int[][] docMaps,
+                          int[] delCounts,
+                          MergePolicy.OneMerge merge,
+                          int mergeDocCount) {
+
+    final HashMap newDeleteTerms;
+
+    // Remap delete-by-term
+    if (terms.size() > 0) {
+      newDeleteTerms = new HashMap();
+      Iterator iter = terms.entrySet().iterator();
+      while(iter.hasNext()) {
+        Entry entry = (Entry) iter.next();
+        Num num = (Num) entry.getValue();
+        newDeleteTerms.put(entry.getKey(),
+                           new Num(mapper.remap(num.getNum())));
+      }
+    } else
+      newDeleteTerms = null;
+
+    // Remap delete-by-docID
+    final List newDeleteDocIDs;
+
+    if (docIDs.size() > 0) {
+      newDeleteDocIDs = new ArrayList(docIDs.size());
+      Iterator iter = docIDs.iterator();
+      while(iter.hasNext()) {
+        Integer num = (Integer) iter.next();
+        newDeleteDocIDs.add(new Integer(mapper.remap(num.intValue())));
+      }
+    } else
+      newDeleteDocIDs = null;
+
+    // Remap delete-by-query
+    final HashMap newDeleteQueries;
+    
+    if (queries.size() > 0) {
+      newDeleteQueries = new HashMap(queries.size());
+      Iterator iter = queries.entrySet().iterator();
+      while(iter.hasNext()) {
+        Entry entry = (Entry) iter.next();
+        Integer num = (Integer) entry.getValue();
+        newDeleteQueries.put(entry.getKey(),
+                             new Integer(mapper.remap(num.intValue())));
+      }
+    } else
+      newDeleteQueries = null;
+
+    if (newDeleteTerms != null)
+      terms = newDeleteTerms;
+    if (newDeleteDocIDs != null)
+      docIDs = newDeleteDocIDs;
+    if (newDeleteQueries != null)
+      queries = newDeleteQueries;
+  }
+}
\ No newline at end of file

Propchange: lucene/java/trunk/src/java/org/apache/lucene/index/BufferedDeletes.java
------------------------------------------------------------------------------
    svn:eol-style = native

Added: lucene/java/trunk/src/java/org/apache/lucene/index/BufferedNorms.java
URL: http://svn.apache.org/viewvc/lucene/java/trunk/src/java/org/apache/lucene/index/BufferedNorms.java?rev=636458&view=auto
==============================================================================
--- lucene/java/trunk/src/java/org/apache/lucene/index/BufferedNorms.java (added)
+++ lucene/java/trunk/src/java/org/apache/lucene/index/BufferedNorms.java Wed Mar 12 12:09:12
2008
@@ -0,0 +1,60 @@
+package org.apache.lucene.index;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.io.IOException;
+import org.apache.lucene.store.RAMOutputStream;
+import org.apache.lucene.search.Similarity;
+
+/* Stores norms, buffered in RAM, until they are flushed
+ * to a partial segment. */
+final class BufferedNorms {
+
+  RAMOutputStream out;
+  int upto;
+
+  private static final byte defaultNorm = Similarity.encodeNorm(1.0f);
+
+  BufferedNorms() {
+    out = new RAMOutputStream();
+  }
+
+  void add(float norm) throws IOException {
+    byte b = Similarity.encodeNorm(norm);
+    out.writeByte(b);
+    upto++;
+  }
+
+  void reset() {
+    out.reset();
+    upto = 0;
+  }
+
+  void fill(int docID) throws IOException {
+    // Must now fill in docs that didn't have this
+    // field.  Note that this is how norms can consume
+    // tremendous storage when the docs have widely
+    // varying different fields, because we are not
+    // storing the norms sparsely (see LUCENE-830)
+    if (upto < docID) {
+      DocumentsWriter.fillBytes(out, defaultNorm, docID-upto);
+      upto = docID;
+    }
+  }
+}
+

Propchange: lucene/java/trunk/src/java/org/apache/lucene/index/BufferedNorms.java
------------------------------------------------------------------------------
    svn:eol-style = native

Added: lucene/java/trunk/src/java/org/apache/lucene/index/ByteBlockPool.java
URL: http://svn.apache.org/viewvc/lucene/java/trunk/src/java/org/apache/lucene/index/ByteBlockPool.java?rev=636458&view=auto
==============================================================================
--- lucene/java/trunk/src/java/org/apache/lucene/index/ByteBlockPool.java (added)
+++ lucene/java/trunk/src/java/org/apache/lucene/index/ByteBlockPool.java Wed Mar 12 12:09:12
2008
@@ -0,0 +1,142 @@
+package org.apache.lucene.index;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/* Class that Posting and PostingVector use to write byte
+ * streams into shared fixed-size byte[] arrays.  The idea
+ * is to allocate slices of increasing lengths For
+ * example, the first slice is 5 bytes, the next slice is
+ * 14, etc.  We start by writing our bytes into the first
+ * 5 bytes.  When we hit the end of the slice, we allocate
+ * the next slice and then write the address of the new
+ * slice into the last 4 bytes of the previous slice (the
+ * "forwarding address").
+ *
+ * Each slice is filled with 0's initially, and we mark
+ * the end with a non-zero byte.  This way the methods
+ * that are writing into the slice don't need to record
+ * its length and instead allocate a new slice once they
+ * hit a non-zero byte. */
+
+import java.util.Arrays;
+
+final class ByteBlockPool {
+
+  public byte[][] buffers = new byte[10][];
+
+  int bufferUpto = -1;                        // Which buffer we are upto
+  public int byteUpto = DocumentsWriter.BYTE_BLOCK_SIZE;             // Where we are in head
buffer
+
+  public byte[] buffer;                              // Current head buffer
+  public int byteOffset = -DocumentsWriter.BYTE_BLOCK_SIZE;          // Current head offset
+
+  private boolean trackAllocations;
+  DocumentsWriter docWriter;
+
+  public ByteBlockPool(DocumentsWriter docWriter, boolean trackAllocations) {
+    this.docWriter = docWriter;
+    this.trackAllocations = trackAllocations;
+  }
+
+  public void reset() {
+    if (bufferUpto != -1) {
+      // We allocated at least one buffer
+
+      for(int i=0;i<bufferUpto;i++)
+        // Fully zero fill buffers that we fully used
+        Arrays.fill(buffers[i], (byte) 0);
+
+      // Partial zero fill the final buffer
+      Arrays.fill(buffers[bufferUpto], 0, byteUpto, (byte) 0);
+          
+      if (bufferUpto > 0)
+        // Recycle all but the first buffer
+        docWriter.recycleByteBlocks(buffers, 1, 1+bufferUpto);
+
+      // Re-use the first buffer
+      bufferUpto = 0;
+      byteUpto = 0;
+      byteOffset = 0;
+      buffer = buffers[0];
+    }
+  }
+
+  public void nextBuffer() {
+    if (1+bufferUpto == buffers.length) {
+      byte[][] newBuffers = new byte[(int) (buffers.length*1.5)][];
+      System.arraycopy(buffers, 0, newBuffers, 0, buffers.length);
+      buffers = newBuffers;
+    }
+    buffer = buffers[1+bufferUpto] = docWriter.getByteBlock(trackAllocations);
+    bufferUpto++;
+
+    byteUpto = 0;
+    byteOffset += DocumentsWriter.BYTE_BLOCK_SIZE;
+  }
+
+  public int newSlice(final int size) {
+    if (byteUpto > DocumentsWriter.BYTE_BLOCK_SIZE-size)
+      nextBuffer();
+    final int upto = byteUpto;
+    byteUpto += size;
+    buffer[byteUpto-1] = 16;
+    return upto;
+  }
+
+  // Size of each slice.  These arrays should be at most 16
+  // elements.  First array is just a compact way to encode
+  // X+1 with a max.  Second array is the length of each
+  // slice, ie first slice is 5 bytes, next slice is 14
+  // bytes, etc.
+  final static int[] nextLevelArray = {1, 2, 3, 4, 5, 6, 7, 8, 9, 9};
+  final static int[] levelSizeArray = {5, 14, 20, 30, 40, 40, 80, 80, 120, 200};
+  final static int FIRST_LEVEL_SIZE = levelSizeArray[0];
+
+  public int allocSlice(final byte[] slice, final int upto) {
+
+    final int level = slice[upto] & 15;
+    final int newLevel = nextLevelArray[level];
+    final int newSize = levelSizeArray[newLevel];
+
+    // Maybe allocate another block
+    if (byteUpto > DocumentsWriter.BYTE_BLOCK_SIZE-newSize)
+      nextBuffer();
+
+    final int newUpto = byteUpto;
+    final int offset = newUpto + byteOffset;
+    byteUpto += newSize;
+
+    // Copy forward the past 3 bytes (which we are about
+    // to overwrite with the forwarding address):
+    buffer[newUpto] = slice[upto-3];
+    buffer[newUpto+1] = slice[upto-2];
+    buffer[newUpto+2] = slice[upto-1];
+
+    // Write forwarding address at end of last slice:
+    slice[upto-3] = (byte) (offset >>> 24);
+    slice[upto-2] = (byte) (offset >>> 16);
+    slice[upto-1] = (byte) (offset >>> 8);
+    slice[upto] = (byte) offset;
+        
+    // Write new level:
+    buffer[byteUpto-1] = (byte) (16|newLevel);
+
+    return newUpto+3;
+  }
+}
+

Propchange: lucene/java/trunk/src/java/org/apache/lucene/index/ByteBlockPool.java
------------------------------------------------------------------------------
    svn:eol-style = native

Added: lucene/java/trunk/src/java/org/apache/lucene/index/ByteSliceReader.java
URL: http://svn.apache.org/viewvc/lucene/java/trunk/src/java/org/apache/lucene/index/ByteSliceReader.java?rev=636458&view=auto
==============================================================================
--- lucene/java/trunk/src/java/org/apache/lucene/index/ByteSliceReader.java (added)
+++ lucene/java/trunk/src/java/org/apache/lucene/index/ByteSliceReader.java Wed Mar 12 12:09:12
2008
@@ -0,0 +1,136 @@
+package org.apache.lucene.index;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import org.apache.lucene.store.IndexInput;
+import org.apache.lucene.store.IndexOutput;
+import java.io.IOException;
+
+/* IndexInput that knows how to read the byte slices written
+ * by Posting and PostingVector.  We read the bytes in
+ * each slice until we hit the end of that slice at which
+ * point we read the forwarding address of the next slice
+ * and then jump to it.*/
+final class ByteSliceReader extends IndexInput {
+  ByteBlockPool pool;
+  int bufferUpto;
+  byte[] buffer;
+  public int upto;
+  int limit;
+  int level;
+  public int bufferOffset;
+
+  public int endIndex;
+
+  public void init(ByteBlockPool pool, int startIndex, int endIndex) {
+
+    assert endIndex-startIndex > 0;
+
+    this.pool = pool;
+    this.endIndex = endIndex;
+
+    level = 0;
+    bufferUpto = startIndex / DocumentsWriter.BYTE_BLOCK_SIZE;
+    bufferOffset = bufferUpto * DocumentsWriter.BYTE_BLOCK_SIZE;
+    buffer = pool.buffers[bufferUpto];
+    upto = startIndex & DocumentsWriter.BYTE_BLOCK_MASK;
+
+    final int firstSize = ByteBlockPool.levelSizeArray[0];
+
+    if (startIndex+firstSize >= endIndex) {
+      // There is only this one slice to read
+      limit = endIndex & DocumentsWriter.BYTE_BLOCK_MASK;
+    } else
+      limit = upto+firstSize-4;
+  }
+
+  public byte readByte() {
+    // Assert that we are not @ EOF
+    assert upto + bufferOffset < endIndex;
+    if (upto == limit)
+      nextSlice();
+    return buffer[upto++];
+  }
+
+  public long writeTo(IndexOutput out) throws IOException {
+    long size = 0;
+    while(true) {
+      if (limit + bufferOffset == endIndex) {
+        assert endIndex - bufferOffset >= upto;
+        out.writeBytes(buffer, upto, limit-upto);
+        size += limit-upto;
+        break;
+      } else {
+        out.writeBytes(buffer, upto, limit-upto);
+        size += limit-upto;
+        nextSlice();
+      }
+    }
+
+    return size;
+  }
+
+  public void nextSlice() {
+
+    // Skip to our next slice
+    final int nextIndex = ((buffer[limit]&0xff)<<24) + ((buffer[1+limit]&0xff)<<16)
+ ((buffer[2+limit]&0xff)<<8) + (buffer[3+limit]&0xff);
+
+    level = ByteBlockPool.nextLevelArray[level];
+    final int newSize = ByteBlockPool.levelSizeArray[level];
+
+    bufferUpto = nextIndex / DocumentsWriter.BYTE_BLOCK_SIZE;
+    bufferOffset = bufferUpto * DocumentsWriter.BYTE_BLOCK_SIZE;
+
+    buffer = pool.buffers[bufferUpto];
+    upto = nextIndex & DocumentsWriter.BYTE_BLOCK_MASK;
+
+    if (nextIndex + newSize >= endIndex) {
+      // We are advancing to the final slice
+      assert endIndex - nextIndex > 0;
+      limit = endIndex - bufferOffset;
+    } else {
+      // This is not the final slice (subtract 4 for the
+      // forwarding address at the end of this new slice)
+      limit = upto+newSize-4;
+    }
+  }
+
+  public void readBytes(byte[] b, int offset, int len) {
+    while(len > 0) {
+      final int numLeft = limit-upto;
+      if (numLeft < len) {
+        // Read entire slice
+        System.arraycopy(buffer, upto, b, offset, numLeft);
+        offset += numLeft;
+        len -= numLeft;
+        nextSlice();
+      } else {
+        // This slice is the last one
+        System.arraycopy(buffer, upto, b, offset, len);
+        upto += len;
+        break;
+      }
+    }
+  }
+
+  public long getFilePointer() {throw new RuntimeException("not implemented");}
+  public long length() {throw new RuntimeException("not implemented");}
+  public void seek(long pos) {throw new RuntimeException("not implemented");}
+  public void close() {throw new RuntimeException("not implemented");}
+}
+

Propchange: lucene/java/trunk/src/java/org/apache/lucene/index/ByteSliceReader.java
------------------------------------------------------------------------------
    svn:eol-style = native

Added: lucene/java/trunk/src/java/org/apache/lucene/index/CharBlockPool.java
URL: http://svn.apache.org/viewvc/lucene/java/trunk/src/java/org/apache/lucene/index/CharBlockPool.java?rev=636458&view=auto
==============================================================================
--- lucene/java/trunk/src/java/org/apache/lucene/index/CharBlockPool.java (added)
+++ lucene/java/trunk/src/java/org/apache/lucene/index/CharBlockPool.java Wed Mar 12 12:09:12
2008
@@ -0,0 +1,56 @@
+package org.apache.lucene.index;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+final class CharBlockPool {
+
+  public char[][] buffers = new char[10][];
+  int numBuffer;
+
+  int bufferUpto = -1;                        // Which buffer we are upto
+  public int byteUpto = DocumentsWriter.CHAR_BLOCK_SIZE;             // Where we are in head
buffer
+
+  public char[] buffer;                              // Current head buffer
+  public int byteOffset = -DocumentsWriter.CHAR_BLOCK_SIZE;          // Current head offset
+  private DocumentsWriter docWriter;
+
+  public CharBlockPool(DocumentsWriter docWriter) {
+    this.docWriter = docWriter;
+  }
+
+  public void reset() {
+    docWriter.recycleCharBlocks(buffers, 1+bufferUpto);
+    bufferUpto = -1;
+    byteUpto = DocumentsWriter.CHAR_BLOCK_SIZE;
+    byteOffset = -DocumentsWriter.CHAR_BLOCK_SIZE;
+  }
+
+  public void nextBuffer() {
+    if (1+bufferUpto == buffers.length) {
+      char[][] newBuffers = new char[(int) (buffers.length*1.5)][];
+      System.arraycopy(buffers, 0, newBuffers, 0, buffers.length);
+      buffers = newBuffers;
+    }
+    buffer = buffers[1+bufferUpto] = docWriter.getCharBlock();
+    bufferUpto++;
+
+    byteUpto = 0;
+    byteOffset += DocumentsWriter.CHAR_BLOCK_SIZE;
+  }
+}
+

Propchange: lucene/java/trunk/src/java/org/apache/lucene/index/CharBlockPool.java
------------------------------------------------------------------------------
    svn:eol-style = native



Mime
View raw message