activemq-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From chir...@apache.org
Subject svn commit: r677302 [3/4] - in /activemq/sandbox/xindice-stripped: ./ src/ src/main/ src/main/java/ src/main/java/org/ src/main/java/org/apache/ src/main/java/org/apache/xindice/ src/main/java/org/apache/xindice/core/ src/main/java/org/apache/xindice/c...
Date Wed, 16 Jul 2008 15:18:22 GMT
Added: activemq/sandbox/xindice-stripped/src/main/java/org/apache/xindice/core/filer/HashFiler.java
URL: http://svn.apache.org/viewvc/activemq/sandbox/xindice-stripped/src/main/java/org/apache/xindice/core/filer/HashFiler.java?rev=677302&view=auto
==============================================================================
--- activemq/sandbox/xindice-stripped/src/main/java/org/apache/xindice/core/filer/HashFiler.java (added)
+++ activemq/sandbox/xindice-stripped/src/main/java/org/apache/xindice/core/filer/HashFiler.java Wed Jul 16 08:18:20 2008
@@ -0,0 +1,503 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ * $Id: HashFiler.java 571938 2007-09-02 10:14:13Z vgritsenko $
+ */
+
+package org.apache.xindice.core.filer;
+
+import java.io.DataInput;
+import java.io.DataOutput;
+import java.io.File;
+import java.io.IOException;
+import java.io.RandomAccessFile;
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.Iterator;
+import java.util.List;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.xindice.core.DBException;
+import org.apache.xindice.core.FaultCodes;
+import org.apache.xindice.core.data.Key;
+import org.apache.xindice.core.data.Record;
+import org.apache.xindice.core.data.RecordSet;
+import org.apache.xindice.core.data.Value;
+
+/**
+ * HashFiler is a Filer implementation based on the Paged class.  By
+ * extending Paged, HashFiler inherits the ability to maintain Record
+ * metadata such as creation and modification time.  It also provides
+ * quite a bit more flexibility in its ability to retreive blocks of
+ * data and allocate Record space.
+ *
+ * <br/>
+ * HashFile has folowing configuration attributes:
+ * <ul>
+ * <li><strong>pagesize</strong>: Size of the page used by the paged file.
+ *     Default page size is 4096 bytes. This parameter can be set only
+ *     before paged file is created. Once it is created, this parameter
+ *     can not be changed.</li>
+ * <li><strong>pagecount</strong>: This parameter has a special meaning
+ *     for HashFiler. This determines the size of the hash table main
+ *     storage, which is equal to the number of pages filer will be
+ *     created with. The default is 1024. Please note that if made
+ *     too small, it will affect efficiency of the hash table.</li>
+ * <li><strong>maxkeysize</strong>: Maximum allowed size of the key.
+ *     Default maximum key size is 256 bytes.</li>
+ * <li><strong>max-descriptors</strong>: Defines maximum amount of
+ *     simultaneously opened file descriptors this paged file can have.
+ *     Several descriptors are needed to provide multithreaded access
+ *     to the underlying file. Too large number will limit amount of
+ *     collections you can open. Default value is 16
+ *     (DEFAULT_DESCRIPTORS_MAX).</li>
+ * </ul>
+ *
+ * @version $Revision: 571938 $, $Date: 2007-09-02 06:14:13 -0400 (Sun, 02 Sep 2007) $
+ */
+public class HashFiler extends Paged
+                       implements Filer {
+
+    private static final Log log = LogFactory.getLog(HashFiler.class);
+
+    /**
+     * Record page status
+     */
+    protected static final byte RECORD = 1;
+
+    private HashFileHeader fileHeader;
+
+
+    public HashFiler() {
+        super();
+        fileHeader = (HashFileHeader) getFileHeader();
+    }
+
+    public void setLocation(File root, String location) {
+        setFile(new File(root, location + ".tbl"));
+    }
+
+    public String getName() {
+        return "HashFiler";
+    }
+
+    @Override
+    protected void initFileHeader() {
+        super.initFileHeader();
+        fileHeader.setTotalCount(fileHeader.getPageCount());
+    }
+    
+    private Page seekRecordPage(Key key) throws IOException {
+        int hash = key.hashCode();
+        long pageNum = hash % fileHeader.getPageCount();
+        Page p = getPage(pageNum);
+        synchronized (p) {
+            while (true) {
+                HashPageHeader ph = (HashPageHeader) p.getPageHeader();
+                if (ph.getStatus() == RECORD && ph.getKeyHash() == hash && p.getKey().equals(key)) {
+                    return p;
+                }
+
+                pageNum = ph.getNextCollision();
+                if (pageNum == NO_PAGE) {
+                    return null;
+                }
+                p = getPage(pageNum);
+            }
+        }
+    }
+
+    public Record readRecord(Key key) throws DBException {
+        return readRecord(key, false);
+    }
+
+    public Record readRecord(Key key, boolean metaOnly) throws DBException {
+        if (key == null || key.getLength() == 0) {
+            return null;
+        }
+        checkOpened();
+        try {
+            Page startPage = seekRecordPage(key);
+            if (startPage != null) {
+                Value v = metaOnly ? null: readValue(startPage);
+                HashPageHeader sph = (HashPageHeader) startPage.getPageHeader();
+
+                HashMap meta = new HashMap(3);
+                meta.put(Record.CREATED, new Long(sph.getCreated()));
+                meta.put(Record.MODIFIED, new Long(sph.getModified()));
+
+                return new Record(key, v, meta);
+            }
+        } catch (Exception e) {
+            if (log.isWarnEnabled()) {
+                log.warn("ignored exception", e);
+            }
+        }
+        return null;
+    }
+
+    private Page seekInsertionPage(Key key) throws IOException {
+        // Calculate hash and retrieve chain head page
+        int hash = key.hashCode();
+        Page p = getPage(hash % fileHeader.getPageCount());
+
+        // Synchronize by chain head page
+        synchronized (p) {
+            HashPageHeader ph;
+            while (true) {
+                ph = (HashPageHeader) p.getPageHeader();
+                if (ph.getStatus() == UNUSED || ph.getStatus() == DELETED
+                        || (ph.getStatus() == RECORD && ph.getKeyHash() == hash && p.getKey().equals(key))) {
+                    // Found free page
+                    break;
+                }
+
+                // Check the chain
+                long pageNum = ph.getNextCollision();
+                if (pageNum == NO_PAGE) {
+                    // Reached end of chain, add new page
+                    Page np = getFreePage();
+
+                    ph.setNextCollision(np.getPageNum());
+                    p.write();
+
+                    p = np;
+                    ph = (HashPageHeader) p.getPageHeader();
+                    ph.setNextCollision(NO_PAGE);
+                    break;
+                }
+
+                // Go to the next page in chain
+                p = getPage(pageNum);
+            }
+
+            // Here we have a page
+            long t = System.currentTimeMillis();
+            if (ph.getStatus() == UNUSED || ph.getStatus() == DELETED) {
+                // This is a new Record
+                fileHeader.incRecordCount();
+                ph.setCreated(t);
+            }
+            ph.setModified(t);
+            ph.setStatus(RECORD);
+        }
+
+        return p;
+    }
+
+    public Record writeRecord(Key key, Value value) throws DBException {
+        // Check that key is not larger than space on the page
+        if (key == null || key.getLength() == 0 || key.getLength() > fileHeader.getPageSize() - fileHeader.getPageHeaderSize()) {
+            throw new FilerException(FaultCodes.DBE_CANNOT_CREATE, "Invalid key: '" + key + "'");
+        }
+        if (value == null) {
+            throw new FilerException(FaultCodes.DBE_CANNOT_CREATE, "Invalid null value");
+        }
+        checkOpened();
+        Page p = null;
+        try {
+            p = seekInsertionPage(key);
+            p.setKey(key);
+            writeValue(p, value);
+        } catch (Exception e) {
+            // FIXME It's not enough. At this point, new record could have been added to the chain
+            if (p != null) {
+                p.getPageHeader().setStatus(DELETED);
+                try {
+                    p.write();
+                } catch (IOException ignored) {
+                    // Double exception
+                }
+            }
+
+            throw new FilerException(FaultCodes.DBE_CANNOT_CREATE, "Exception: " + e, e);
+        }
+
+        flush();
+
+        HashPageHeader ph = (HashPageHeader) p.getPageHeader();
+        HashMap meta = new HashMap(3);
+        meta.put(Record.CREATED, new Long(ph.getCreated()));
+        meta.put(Record.MODIFIED, new Long(ph.getModified()));
+        return new Record(key, value, meta);
+    }
+
+    /**
+     * Mark pages in primary store as 'DELETED', and let Paged handle all
+     * overflow pages.
+     */
+    protected void unlinkPages(Page page) throws IOException {
+        // Handle the page if it's in primary space by setting its status to
+        // DELETED and freeing any overflow pages linked to it.
+        if (page.getPageNum() < fileHeader.getPageCount()) {
+            long nextPage = page.getPageHeader().getNextPage();
+            page.getPageHeader().setStatus(DELETED);
+            page.getPageHeader().setNextPage(NO_PAGE);
+            page.write();
+
+            // If there are no chained pages, we are done.
+            if (nextPage == NO_PAGE) {
+                return;
+            }
+
+            // Free the chained pages from the page that was just removed
+            page = getPage(nextPage);
+        }
+
+        super.unlinkPages(page);
+    }
+
+    public boolean deleteRecord(Key key) throws DBException {
+        if (key == null || key.getLength() == 0) {
+            return false;
+        }
+        checkOpened();
+        try {
+            int hash = key.hashCode();
+            long pageNum = hash % fileHeader.getPageCount();
+
+            Page page = getPage(pageNum);
+            synchronized (page) {
+                HashPageHeader prevHead = null;
+                HashPageHeader pageHead;
+
+                Page prev = null;
+                while (true) {
+                    pageHead = (HashPageHeader) page.getPageHeader();
+                    if (pageHead.getStatus() == RECORD && pageHead.getKeyHash() == hash && page.getKey().equals(key)) {
+                        break;
+                    }
+
+                    pageNum = pageHead.getNextCollision();
+                    if (pageNum == NO_PAGE) {
+                        return false;
+                    }
+                    prev = page;
+                    prevHead = pageHead;
+                    page = getPage(pageNum);
+                }
+
+                if (prev != null) {
+                    prevHead.setNextCollision(pageHead.nextCollision);
+                    pageHead.setNextCollision(NO_PAGE);
+                    prev.write();
+                }
+
+                unlinkPages(page);
+            }
+
+            fileHeader.decRecordCount();
+            flush();
+
+            return true;
+        } catch (Exception e) {
+            if (log.isWarnEnabled()) {
+                log.warn("ignored exception", e);
+            }
+        }
+        return false;
+    }
+
+    public long getRecordCount() throws DBException {
+        checkOpened();
+        return fileHeader.getRecordCount();
+    }
+
+    public RecordSet getRecordSet() throws DBException {
+        checkOpened();
+        return new HashFilerRecordSet();
+    }
+
+    /**
+     * HashFilerRecordSet that does not use a BTree.
+     */
+    private class HashFilerRecordSet implements RecordSet {
+        private List keys = new ArrayList();
+        private Iterator iter;
+
+        public HashFilerRecordSet() {
+            try {
+                long pageNum = 0;
+
+                // Iterate over main hash table...
+                while (pageNum < fileHeader.getPageCount()) {
+                    Page p = getPage(pageNum);
+                    HashPageHeader ph = (HashPageHeader) p.getPageHeader();
+                    if (ph.getStatus() == RECORD) {
+                        keys.add(p.getKey());
+                    }
+
+                    // ... and over collision chains
+                    while (ph.getNextCollision() != NO_PAGE) {
+                        long pn = ph.getNextCollision();
+                        p = getPage(pn);
+                        ph = (HashPageHeader) p.getPageHeader();
+                        if (ph.getStatus() == RECORD) {
+                            keys.add(p.getKey());
+                        }
+                    }
+
+                    pageNum++;
+                }
+
+                iter = keys.iterator();
+            } catch (Exception e) {
+                if (log.isWarnEnabled()) {
+                    log.warn("ignored exception", e);
+                }
+            }
+        }
+
+        public synchronized Key getNextKey() {
+            return (Key) iter.next();
+        }
+
+        public synchronized Record getNextRecord() throws DBException {
+            return readRecord((Key) iter.next(), false);
+        }
+
+        public synchronized Value getNextValue() throws DBException {
+            return getNextRecord().getValue();
+        }
+
+        public synchronized boolean hasMoreRecords() {
+            return iter.hasNext();
+        }
+    }
+
+    ////////////////////////////////////////////////////////////////////
+
+    public FileHeader createFileHeader() {
+        return new HashFileHeader();
+    }
+
+    public PageHeader createPageHeader() {
+        return new HashPageHeader();
+    }
+
+    /**
+     * HashFileHeader
+     */
+    private final class HashFileHeader extends FileHeader {
+        private long totalBytes;
+
+        public HashFileHeader() {
+            super();
+            // For hash filer, totalCount >= pageCount. See setConfig().
+            setTotalCount(getPageCount());
+        }
+
+        protected synchronized void read(RandomAccessFile raf) throws IOException {
+            super.read(raf);
+            totalBytes = raf.readLong();
+        }
+
+        protected synchronized void write(RandomAccessFile raf) throws IOException {
+            super.write(raf);
+            raf.writeLong(totalBytes);
+        }
+
+        /** The total number of bytes in use by the file */
+        public synchronized void setTotalBytes(long totalBytes) {
+            this.totalBytes = totalBytes;
+            setDirty();
+        }
+
+        /** The total number of bytes in use by the file */
+        public synchronized long getTotalBytes() {
+            return totalBytes;
+        }
+
+        /** Adjust total number of bytes in use by the file */
+        public synchronized void addTotalBytes(int count) {
+            totalBytes += count;
+        }
+    }
+
+    /**
+     * HashPageHeader
+     */
+    protected final class HashPageHeader extends PageHeader {
+        private long created = 0;
+        private long modified = 0;
+        private long nextCollision = NO_PAGE;
+
+        public HashPageHeader() {
+        }
+
+        public HashPageHeader(DataInput dis) throws IOException {
+            super(dis);
+        }
+
+        public synchronized void read(DataInput dis) throws IOException {
+            super.read(dis);
+
+            if (getStatus() == UNUSED) {
+                return;
+            }
+
+            created = dis.readLong();
+            modified = dis.readLong();
+            nextCollision = dis.readLong();
+        }
+
+        public synchronized void write(DataOutput dos) throws IOException {
+            super.write(dos);
+            dos.writeLong(created);
+            dos.writeLong(modified);
+            dos.writeLong(nextCollision);
+        }
+
+        public synchronized void setRecordLen(int recordLen) {
+            fileHeader.addTotalBytes(recordLen - getRecordLen());
+            super.setRecordLen(recordLen);
+        }
+
+        /** UNIX-time when this record was created */
+        public synchronized void setCreated(long created) {
+            this.created = created;
+            setDirty();
+        }
+
+        /** UNIX-time when this record was created */
+        public synchronized long getCreated() {
+            return created;
+        }
+
+        /** UNIX-time when this record was last modified */
+        public synchronized void setModified(long modified) {
+            this.modified = modified;
+            setDirty();
+        }
+
+        /** UNIX-time when this record was last modified */
+        public synchronized long getModified() {
+            return modified;
+        }
+
+        /** The next page for a Record collision (if any) */
+        public synchronized void setNextCollision(long nextCollision) {
+            this.nextCollision = nextCollision;
+            setDirty();
+        }
+
+        /** The next page for a Record collision (if any) */
+        public synchronized long getNextCollision() {
+            return nextCollision;
+        }
+    }
+}

Added: activemq/sandbox/xindice-stripped/src/main/java/org/apache/xindice/core/filer/MemFiler.java
URL: http://svn.apache.org/viewvc/activemq/sandbox/xindice-stripped/src/main/java/org/apache/xindice/core/filer/MemFiler.java?rev=677302&view=auto
==============================================================================
--- activemq/sandbox/xindice-stripped/src/main/java/org/apache/xindice/core/filer/MemFiler.java (added)
+++ activemq/sandbox/xindice-stripped/src/main/java/org/apache/xindice/core/filer/MemFiler.java Wed Jul 16 08:18:20 2008
@@ -0,0 +1,183 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ * $Id: MemFiler.java 541516 2007-05-25 02:46:51Z vgritsenko $
+ */
+
+package org.apache.xindice.core.filer;
+
+import java.io.File;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.Iterator;
+import java.util.Map;
+
+import org.apache.xindice.core.DBException;
+import org.apache.xindice.core.FaultCodes;
+import org.apache.xindice.core.data.Key;
+import org.apache.xindice.core.data.Record;
+import org.apache.xindice.core.data.RecordSet;
+import org.apache.xindice.core.data.Value;
+
+/**
+ * MemFiler is an In-Memory Filer implementation for Xindice.  MemFiler can be
+ * used for temporary collections and caching.  It's basically a layering on
+ * top of HashMap.
+ *
+ * @version $Revision: 541516 $, $Date: 2007-05-24 22:46:51 -0400 (Thu, 24 May 2007) $
+ */
+public final class MemFiler implements Filer {
+    private Map hashTable = null;
+    private boolean opened = false;
+    private boolean readOnly = false;
+
+    public MemFiler() {
+        hashTable = Collections.synchronizedMap(new HashMap());
+    }
+
+    public MemFiler(Map hashTable, boolean readOnly) {
+        this.hashTable = hashTable;
+        this.readOnly = readOnly;
+    }
+
+    public MemFiler(Map hashTable) {
+        this(hashTable, false);
+    }
+
+    public void setLocation(File root, String location) {
+    }
+
+    public String getName() {
+        return "MemFiler";
+    }
+
+    private void checkOpened() throws DBException {
+        if (!opened) {
+            throw new FilerException(FaultCodes.COL_COLLECTION_CLOSED, "Filer is closed");
+        }
+    }
+
+    private void checkReadOnly() throws DBException {
+        if (readOnly) {
+            throw new FilerException(FaultCodes.COL_COLLECTION_READ_ONLY, "Filer is read-only");
+        }
+    }
+
+    public boolean create() {
+        hashTable.clear();
+        return true;
+    }
+
+    public boolean open() {
+        opened = true;
+        return opened;
+    }
+
+    public boolean isOpened() {
+        return opened;
+    }
+
+    public boolean exists() {
+        return true;
+    }
+
+    public boolean drop() {
+        hashTable.clear();
+        opened = false;
+        return !opened;
+    }
+
+    public boolean close() {
+        opened = false;
+        return !opened;
+    }
+
+    public void flush() {
+    }
+
+    public Record readRecord(Key key) throws DBException {
+        return readRecord(key, false);
+    }
+
+    public Record readRecord(Key key, boolean metaOnly) throws DBException {
+        if (key == null || key.getLength() == 0) {
+            return null;
+        }
+        checkOpened();
+        return (Record) hashTable.get(key);
+    }
+
+    public Record writeRecord(Key key, Value value) throws DBException {
+        if (key == null || key.getLength() == 0) {
+            throw new FilerException(FaultCodes.DBE_CANNOT_CREATE, "Invalid key: '" + key + "'");
+        }
+        if (value == null) {
+            throw new FilerException(FaultCodes.DBE_CANNOT_CREATE, "Invalid null value");
+        }
+        checkOpened();
+        checkReadOnly();
+        hashTable.put(key, new Record(key, value));
+        return new Record(key, value);
+    }
+
+    public boolean deleteRecord(Key key) throws DBException {
+        if (key == null || key.getLength() == 0) {
+            return false;
+        }
+        checkOpened();
+        checkReadOnly();
+        return hashTable.remove(key) != null;
+    }
+
+    public long getRecordCount() throws DBException {
+        checkOpened();
+        return hashTable.size();
+    }
+
+    public RecordSet getRecordSet() throws DBException {
+        checkOpened();
+        return new MemRecordSet();
+    }
+
+
+    /**
+     * MemRecordSet
+     */
+
+    private class MemRecordSet implements RecordSet {
+        private Iterator iter = hashTable.values().iterator();
+
+        public synchronized boolean hasMoreRecords() throws DBException {
+            return iter.hasNext();
+        }
+
+        public synchronized Record getNextRecord() throws DBException {
+            checkOpened();
+            return (Record) iter.next();
+        }
+
+        public synchronized Value getNextValue() throws DBException {
+            checkOpened();
+            return ((Record) iter.next()).getValue();
+        }
+
+        public synchronized Key getNextKey() {
+            return ((Record) iter.next()).getKey();
+        }
+    }
+}
+
+

Added: activemq/sandbox/xindice-stripped/src/main/java/org/apache/xindice/core/filer/Paged.java
URL: http://svn.apache.org/viewvc/activemq/sandbox/xindice-stripped/src/main/java/org/apache/xindice/core/filer/Paged.java?rev=677302&view=auto
==============================================================================
--- activemq/sandbox/xindice-stripped/src/main/java/org/apache/xindice/core/filer/Paged.java (added)
+++ activemq/sandbox/xindice-stripped/src/main/java/org/apache/xindice/core/filer/Paged.java Wed Jul 16 08:18:20 2008
@@ -0,0 +1,1457 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ * $Id: Paged.java 571938 2007-09-02 10:14:13Z vgritsenko $
+ */
+
+package org.apache.xindice.core.filer;
+
+import java.io.ByteArrayInputStream;
+import java.io.ByteArrayOutputStream;
+import java.io.DataInput;
+import java.io.DataInputStream;
+import java.io.DataOutput;
+import java.io.DataOutputStream;
+import java.io.File;
+import java.io.IOException;
+import java.io.InputStream;
+import java.io.OutputStream;
+import java.io.RandomAccessFile;
+import java.lang.ref.WeakReference;
+import java.util.Collection;
+import java.util.EmptyStackException;
+import java.util.HashMap;
+import java.util.Iterator;
+import java.util.Map;
+import java.util.Stack;
+import java.util.WeakHashMap;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.xindice.core.DBException;
+import org.apache.xindice.core.DBObject;
+import org.apache.xindice.core.FaultCodes;
+import org.apache.xindice.core.data.Key;
+import org.apache.xindice.core.data.Value;
+
+/**
+ * Paged is a paged file implementation that is foundation for both the
+ * BTree class and the HashFiler. It provides flexible paged I/O and
+ * page caching functionality.
+ *
+ * <br>
+ * Paged has folowing configuration attributes:
+ * <ul>
+ * <li><strong>pagesize</strong>: Size of the page used by the paged file.
+ *     Default page size is 4096 bytes. This parameter can be set only
+ *     before paged file is created. Once it is created, this parameter
+ *     can not be changed.</li>
+ * <li><strong>pagecount</strong>: Number of pages filer will be created
+ *     with.</li>
+ * <li><strong>maxkeysize</strong>: Maximum allowed size of the key.
+ *     Default maximum key size is 256 bytes.</li>
+ * <li><strong>max-descriptors</strong>: Defines maximum amount of
+ *     simultaneously opened file descriptors this paged file can have.
+ *     Several descriptors are needed to provide multithreaded access
+ *     to the underlying file. Too large number will limit amount of
+ *     collections you can open. Default value is 16
+ *     (DEFAULT_DESCRIPTORS_MAX).</li>
+ * </ul>
+ *
+ * <br>FIXME: Currently it seems that maxkeysize is not used anywhere.
+ * <br>TODO: Introduce Paged interface, implementations.
+ *
+ * @version $Revision: 571938 $, $Date: 2007-09-02 06:14:13 -0400 (Sun, 02 Sep 2007) $
+ */
+public abstract class Paged implements DBObject {
+
+    private static final Log log = LogFactory.getLog(Paged.class);
+
+    /**
+     * The maximum number of pages that will be held in the dirty cache.
+     * Once number reaches the limit, pages are flushed to disk.
+     */
+    private static final int MAX_DIRTY_SIZE = 128;
+
+    /**
+     * Name of the configuration attribute "pagesize"
+     */
+    protected static final String CONFIG_PAGESIZE = "pagesize";
+
+    /**
+     * Name of the configuration attribute "pagecount"
+     */
+    protected static final String CONFIG_PAGECOUNT = "pagecount";
+
+    /**
+     * Name of the configuration attribute "maxkeysize"
+     */
+    protected static final String CONFIG_KEYSIZE_MAX = "maxkeysize";
+
+    /**
+     * Name of the configuration attribute "max-descriptors"
+     */
+    protected static final String CONFIG_DESCRIPTORS_MAX = "max-descriptors";
+
+    /**
+     * Default value of the "pagesize".
+     */
+    private static final int DEFAULT_PAGESIZE = 4096;
+
+    /**
+     * Default value of the "pagecount".
+     */
+    private static final int DEFAULT_PAGECOUNT = 1024;
+
+    /**
+     * File header size
+     */
+    private static final int FILE_HEADER_SIZE = 4096;
+
+    /**
+     * Default value of the "maxkeysize".
+     */
+    private static final int DEFAULT_KEYSIZE_MAX = 256;
+
+    /**
+     * Default value of the maximum number of open random access files paged
+     * can have. This number balances resources utilization and parallelism of
+     * access to the paged file.
+     */
+    private static final int DEFAULT_DESCRIPTORS_MAX = 16;
+
+
+    /**
+     * Unused page status
+     */
+    protected static final byte UNUSED = 0;
+
+    /**
+     * Overflow page status
+     */
+    protected static final byte OVERFLOW = 126;
+
+    /**
+     * Deleted page status
+     */
+    protected static final byte DELETED = 127;
+
+    /**
+     * Page ID of non-existent page
+     */
+    protected static final int NO_PAGE = -1;
+
+    /**
+     * Map of pages in use. Guarantees that page with same number will be loaded
+     * into memory just once, allowing to synchronize on page objects to guarantee
+     * no two threads are writing into same page at once.
+     *
+     * <p>Map contains weak references to the Page objects, keys are pages themselves.
+     * Access is synchronized by the {@link #pagesLock}.
+     */
+    private final Map pages = new WeakHashMap();
+
+    /**
+     * Lock for synchronizing access to the {@link #pages} map.
+     */
+    private final Object pagesLock = new Object();
+
+    /**
+     * Cache of modified pages waiting to be written out.
+     * Access is synchronized by the {@link #dirtyLock}.
+     */
+    private Map dirty = new HashMap();
+
+    /**
+     * Lock for synchronizing access to the {@link #dirty} map.
+     */
+    private final Object dirtyLock = new Object();
+
+    /**
+     * Random access file descriptors cache.
+     * Access to it and to {@link #descriptorsCount} is synchronized by itself.
+     */
+    private final Stack descriptors = new Stack();
+
+    /**
+     * The number of random access file objects that exist, either in the
+     * cache {@link #descriptors}, or currently in use.
+     */
+    private int descriptorsCount;
+
+    /**
+     * The maximum number of random access file objects that can be opened
+     * by this paged instance.
+     */
+    private int descriptorsMax = DEFAULT_DESCRIPTORS_MAX;
+
+    /**
+     * Whether the file is opened or not.
+     */
+    private boolean opened;
+
+    /**
+     * The underlying file where the Paged object stores its pages.
+     */
+    private File file;
+
+    /**
+     * Header of this Paged
+     */
+    private final FileHeader fileHeader;
+
+    /**
+     * Default value used when a new file is created.  Ignored if 
+     * opening an existing file.
+     */
+    private int pageSize = DEFAULT_PAGESIZE;
+
+    /**
+     * Default value used when a new file is created.  Ignored if 
+     * opening an existing file.
+     */
+    private long pageCount = DEFAULT_PAGECOUNT;
+
+    /**
+     * Default value used when a new file is created.
+     */
+    private short maxKeySize = DEFAULT_KEYSIZE_MAX;
+
+
+    public Paged() {
+        descriptorsMax = DEFAULT_DESCRIPTORS_MAX;
+        fileHeader = createFileHeader();
+    }
+
+    public Paged(File file) {
+        this();
+        setFile(file);
+    }    
+
+    /**
+     * setFile sets the file object for this Paged.
+     *
+     * @param file The File
+     */
+    protected final void setFile(final File file) {
+        this.file = file;
+    }
+
+    /**
+     * getFile returns the file object for this Paged.
+     *
+     * @return The File
+     */
+    protected final File getFile() {
+        return file;
+    }
+
+    /**
+     * Obtain RandomAccessFile ('descriptor') object out of the pool.
+     * If no descriptors available, and maximum amount already allocated,
+     * the call will block.
+     */
+    protected final RandomAccessFile getDescriptor() throws IOException {
+        synchronized (descriptors) {
+            // If there are descriptors in the cache return one.
+            if (!descriptors.empty()) {
+                return (RandomAccessFile) descriptors.pop();
+            }
+            // Otherwise we need to get one some other way.
+
+            // First try to create a new one if there's room
+            if (descriptorsCount < descriptorsMax) {
+                descriptorsCount++;
+                return new RandomAccessFile(file, "rw");
+            }
+
+            // Otherwise we have to wait for one to be released by another thread.
+            while (true) {
+                try {
+                    descriptors.wait();
+                    return (RandomAccessFile) descriptors.pop();
+                } catch (InterruptedException e) {
+                    // Ignore, and continue to wait
+                } catch (EmptyStackException e) {
+                    // Ignore, and continue to wait
+                }
+            }
+        }
+    }
+
+    /**
+     * Puts a RandomAccessFile ('descriptor') back into the descriptor pool.
+     */
+    protected final void putDescriptor(RandomAccessFile raf) {
+        if (raf != null) {
+            synchronized (descriptors) {
+                descriptors.push(raf);
+                descriptors.notify();
+            }
+        }
+    }
+
+    /**
+     * Closes a RandomAccessFile ('descriptor') and removes it from the pool.
+     */
+    protected final void closeDescriptor(RandomAccessFile raf) {
+        if (raf != null) {
+            try {
+                raf.close();
+            } catch (IOException e) {
+                // Ignore close exception
+            }
+
+            // Synchronization is necessary as decrement operation is not atomic
+            synchronized (descriptors) {
+                descriptorsCount --;
+            }
+        }
+    }
+
+    /**
+     * getPage returns the page specified by pageNum.
+     *
+     * @param pageNum The Page number
+     * @return The requested Page
+     * @throws IOException if an Exception occurs
+     */
+    protected final Page getPage(long pageNum) throws IOException {
+        final PageKey k = new PageKey(pageNum);
+        Page p = null;
+        synchronized (pagesLock) {
+            // Check if page is already loaded in the page cache
+            WeakReference ref = (WeakReference) pages.get(k);
+            if (ref != null) {
+                p = (Page) ref.get();
+                // Fall through to p.read(). Even if page present in the pages
+                // map, it still has to be read - it could be that it was just
+                // added to the map but read() was not called yet.
+            }
+
+            // If not found, create it and add it to the pages cache
+            if (p == null) {
+                p = new Page(pageNum);
+                pages.put(p, new WeakReference(p));
+            }
+        }
+
+        // Load the page from disk if necessary
+        p.read();
+        return p;
+    }
+
+    /**
+     * readValue reads the multi-Paged Value starting at the specified
+     * Page.
+     *
+     * @param page The starting Page
+     * @return The Value
+     * @throws IOException if an Exception occurs
+     */
+    protected final Value readValue(Page page) throws IOException {
+        final PageHeader sph = page.getPageHeader();
+        ByteArrayOutputStream bos = new ByteArrayOutputStream(sph.getRecordLen());
+
+        // Loop until we've read all the pages into memory.
+        Page p = page;
+        while (true) {
+            PageHeader ph = p.getPageHeader();
+
+            // Add the contents of the page onto the stream
+            p.streamTo(bos);
+
+            // Continue following the list of pages until we get to the end.
+            long nextPage = ph.getNextPage();
+            if (nextPage == NO_PAGE) {
+                break;
+            }
+            p = getPage(nextPage);
+        }
+
+        // Return a Value with the collected contents of all pages.
+        return new Value(bos.toByteArray());
+    }
+
+    /**
+     * readValue reads the multi-Paged Value starting at the specified
+     * page number.
+     *
+     * @param page The starting page number
+     * @return The Value
+     * @throws IOException if an Exception occurs
+     */
+    protected final Value readValue(long page) throws IOException {
+        return readValue(getPage(page));
+    }
+
+    /**
+     * writeValue writes the multi-Paged Value starting at the specified
+     * Page.
+     *
+     * @param page The starting Page
+     * @param value The Value to write
+     * @throws IOException if an Exception occurs
+     */
+    protected final void writeValue(Page page, Value value) throws IOException {
+        if (value == null) {
+            throw new IOException("Can't write a null value");
+        }
+
+        InputStream is = value.getInputStream();
+
+        // Write as much as we can onto the primary page.
+        PageHeader hdr = page.getPageHeader();
+        hdr.setRecordLen(value.getLength());
+        page.streamFrom(is);
+
+        // Write out the rest of the value onto any needed overflow pages
+        while (is.available() > 0) {
+            Page lpage = page;
+            PageHeader lhdr = hdr;
+
+            // Find an overflow page to use
+            long np = lhdr.getNextPage();
+            if (np != NO_PAGE) {
+                // Use an existing page.
+                page = getPage(np);
+            } else {
+                // Create a new overflow page
+                page = getFreePage();
+                lhdr.setNextPage(page.getPageNum());
+            }
+
+            // Mark the page as an overflow page.
+            hdr = page.getPageHeader();
+            hdr.setStatus(OVERFLOW);
+
+            // Write some more of the value to the overflow page.
+            page.streamFrom(is);
+            lpage.write();
+        }
+
+        // Cleanup any unused overflow pages. i.e. the value is smaller then the
+        // last time it was written.
+        long np = hdr.getNextPage();
+        if (np != NO_PAGE) {
+            unlinkPages(np);
+        }
+
+        hdr.setNextPage(NO_PAGE);
+        page.write();
+    }
+
+    /**
+     * writeValue writes the multi-Paged Value starting at the specified
+     * page number.
+     *
+     * @param page The starting page number
+     * @param value The Value to write
+     * @throws IOException if an Exception occurs
+     */
+    protected final void writeValue(long page, Value value) throws IOException {
+        writeValue(getPage(page), value);
+    }
+
+    /**
+     * unlinkPages unlinks a set of pages starting at the specified Page.
+     *
+     * @param page The starting Page to unlink
+     * @throws IOException if an Exception occurs
+     */
+    protected void unlinkPages(Page page) throws IOException {
+        // Add any overflow pages to the list of free pages.
+        // Get the first and last page in the chain.
+        long firstPage = page.pageNum;
+        while (page.header.nextPage != NO_PAGE) {
+            page = getPage(page.header.nextPage);
+        }
+        long lastPage = page.pageNum;
+
+        // Free the chain
+        synchronized (fileHeader) {
+            // If there are already some free pages, add the start of the chain
+            // to the list of free pages.
+            if (fileHeader.lastFreePage != NO_PAGE) {
+                Page p = getPage(fileHeader.lastFreePage);
+                p.header.setNextPage(firstPage);
+                p.write();
+            }
+
+            // Otherwise set the chain as the list of free pages.
+            if (fileHeader.firstFreePage == NO_PAGE) {
+                fileHeader.setFirstFreePage(firstPage);
+            }
+
+            // Add a reference to the end of the chain.
+            fileHeader.setLastFreePage(lastPage);
+        }
+    }
+
+    /**
+     * unlinkPages unlinks a set of pages starting at the specified
+     * page number.
+     *
+     * @param pageNum The starting page number to unlink
+     * @throws IOException if an Exception occurs
+     */
+    protected final void unlinkPages(long pageNum) throws IOException {
+        unlinkPages(getPage(pageNum));
+    }
+
+    /**
+     * getFreePage returns the first free Page from secondary storage.
+     * If no Pages are available, the file is grown as appropriate.
+     *
+     * @return The next free Page
+     * @throws IOException if an Exception occurs
+     */
+    protected final Page getFreePage() throws IOException {
+        Page p = null;
+
+        // Synchronize read and write to the fileHeader.firstFreePage
+        synchronized (fileHeader) {
+            if (fileHeader.firstFreePage != NO_PAGE) {
+                // Steal a deleted page
+                p = getPage(fileHeader.firstFreePage);
+                fileHeader.setFirstFreePage(p.getPageHeader().nextPage);
+                if (fileHeader.firstFreePage == NO_PAGE) {
+                    fileHeader.setLastFreePage(NO_PAGE);
+                }
+            }
+        }
+
+        if (p == null) {
+            // No deleted pages, grow the file
+            p = getPage(fileHeader.incTotalCount());
+        }
+
+        // Initialize The Page Header (Cleanly)
+        p.header.setNextPage(NO_PAGE);
+        p.header.setStatus(UNUSED);
+        return p;
+    }
+
+    /**
+     * @throws DBException COL_COLLECTION_CLOSED if paged file is closed
+     */
+    protected final void checkOpened() throws DBException {
+        if (!opened) {
+            throw new FilerException(FaultCodes.COL_COLLECTION_CLOSED,
+                                     "Filer is closed");
+        }
+    }
+
+    /**
+     * getFileHeader returns the FileHeader
+     *
+     * @return The FileHeader
+     */
+    protected FileHeader getFileHeader() {
+        return fileHeader;
+    }
+
+    protected void initFileHeader() {
+        fileHeader.setPageSize(pageSize);
+        fileHeader.setPageCount(pageCount);
+        fileHeader.setMaxKeySize(maxKeySize);
+    }
+
+    /**
+     * @return True if this paged file exists
+     */
+    public boolean exists() {
+        return file.exists();
+    }
+
+    public boolean create() throws DBException {
+        try {
+            initFileHeader();            
+            createFile();
+            fileHeader.write();
+            flush();
+            return true;
+        } catch (Exception e) {
+            throw new FilerException(FaultCodes.GEN_CRITICAL_ERROR,
+                                     "Error creating " + file.getName(), e);
+        }
+    }
+
+    private void createFile() throws IOException {
+        RandomAccessFile raf = null;
+        try {
+            raf = getDescriptor();
+            long o = fileHeader.headerSize + (fileHeader.pageCount + 1) * fileHeader.pageSize - 1;
+            raf.seek(o);
+            raf.write(0);
+        }  finally {
+            putDescriptor(raf);
+        }
+    }
+
+    public synchronized boolean open() throws DBException {
+        RandomAccessFile raf = null;
+        try {
+            if (exists()) {
+                initFileHeader();            
+                raf = getDescriptor();
+                fileHeader.read();
+
+                // This is the only property that can be changed after creation
+                fileHeader.setMaxKeySize(maxKeySize);
+
+                opened = true;
+            } else {
+                opened = false;
+            }
+            return opened;
+        } catch (Exception e) {
+            throw new FilerException(FaultCodes.GEN_CRITICAL_ERROR,
+                                     "Error opening " + file.getName(), e);
+        } finally {
+            putDescriptor(raf);
+        }
+    }
+
+    public synchronized boolean close() throws DBException {
+        if (isOpened()) {
+            try {
+                // First of all, mark as closed to prevent operations
+                opened = false;
+                flush();
+
+                synchronized (descriptors) {
+                    final int total = descriptorsCount;
+                    // Close descriptors in cache
+                    while (!descriptors.empty()) {
+                        closeDescriptor((RandomAccessFile)descriptors.pop());
+                    }
+                    // Attempt to close descriptors in use. Max wait time = 0.5s * MAX_DESCRIPTORS
+                    int n = descriptorsCount;
+                    while (descriptorsCount > 0 && n > 0) {
+                        descriptors.wait(500);
+                        if (descriptors.isEmpty()) {
+                            n--;
+                        } else {
+                            closeDescriptor((RandomAccessFile)descriptors.pop());
+                        }
+                    }
+                    if (descriptorsCount > 0) {
+                        log.warn(descriptorsCount + " out of " + total + " files were not closed during close.");
+                    }
+                }
+
+                // clear cache
+                synchronized (pagesLock) {
+                    pages.clear();
+                }
+            } catch (Exception e) {
+                // Failed to close, leave open
+                opened = true;
+                throw new FilerException(FaultCodes.GEN_CRITICAL_ERROR,
+                                         "Error closing " + file.getName(), e);
+            }
+        }
+
+        return true;
+    }
+
+    public boolean isOpened() {
+        return opened;
+    }
+
+    public boolean drop() throws DBException {
+        try {
+            close();
+            if (exists()) {
+                return getFile().delete();
+            } else {
+                return true;
+            }
+        } catch (Exception e) {
+            throw new FilerException(FaultCodes.COL_CANNOT_DROP,
+                                     "Can't drop " + file.getName(), e);
+        }
+    }
+
+    void addDirty(Page page) throws IOException {
+        boolean flush;
+        synchronized (dirtyLock) {
+            dirty.put(page, page);
+            flush = dirty.size() > MAX_DIRTY_SIZE;
+        }
+
+        if (flush) {
+            // Too many dirty pages... flush them
+            try {
+                flush();
+            } catch (Exception e) {
+                throw new IOException(e.getMessage());
+            }
+        }
+    }
+
+    public void flush() throws DBException {
+        // This method is not synchronized
+
+        // Error flag/counter
+        int error = 0;
+
+        // Obtain collection of dirty pages
+        Collection pages;
+        synchronized (dirtyLock) {
+           pages = dirty.values();
+           dirty = new HashMap();
+        }
+
+        // Flush dirty pages
+        Iterator i = pages.iterator();
+        while (i.hasNext()) {
+            Page p = (Page) i.next();
+            try {
+                p.flush();
+            } catch (Exception e) {
+                log.warn("Exception while flushing page " + p.pageNum, e);
+                error++;
+            }
+        }
+
+        // Flush header
+        if (fileHeader.dirty) {
+            try {
+                fileHeader.write();
+            } catch (Exception e) {
+                log.warn("Exception while flushing file header", e);
+                error++;
+            }
+        }
+
+        if (error != 0) {
+            throw new FilerException(FaultCodes.GEN_CRITICAL_ERROR,
+                                     "Error performing flush! Failed to flush " + error + " pages!");
+        }
+    }
+
+
+    /**
+     * createFileHeader must be implemented by a Paged implementation
+     * in order to create an appropriate subclass instance of a FileHeader.
+     *
+     * @return a new FileHeader
+     */
+    protected abstract FileHeader createFileHeader();
+
+    /**
+     * createPageHeader must be implemented by a Paged implementation
+     * in order to create an appropriate subclass instance of a PageHeader.
+     *
+     * @return a new PageHeader
+     */
+    protected abstract PageHeader createPageHeader();
+
+
+    // These are a bunch of utility methods for subclasses
+
+    public static Value[] insertArrayValue(Value[] vals, Value val, int idx) {
+        Value[] newVals = new Value[vals.length + 1];
+        if (idx > 0) {
+            System.arraycopy(vals, 0, newVals, 0, idx);
+        }
+        newVals[idx] = val;
+        if (idx < vals.length) {
+            System.arraycopy(vals, idx, newVals, idx + 1, vals.length - idx);
+        }
+        return newVals;
+    }
+
+    public static Value[] deleteArrayValue(Value[] vals, int idx) {
+        Value[] newVals = new Value[vals.length - 1];
+        if (idx > 0) {
+            System.arraycopy(vals, 0, newVals, 0, idx);
+        }
+        if (idx < newVals.length) {
+            System.arraycopy(vals, idx + 1, newVals, idx, newVals.length - idx);
+        }
+        return newVals;
+    }
+
+    public static long[] insertArrayLong(long[] vals, long val, int idx) {
+        long[] newVals = new long[vals.length + 1];
+        if (idx > 0) {
+            System.arraycopy(vals, 0, newVals, 0, idx);
+        }
+        newVals[idx] = val;
+        if (idx < vals.length) {
+            System.arraycopy(vals, idx, newVals, idx + 1, vals.length - idx);
+        }
+        return newVals;
+    }
+
+    public static long[] deleteArrayLong(long[] vals, int idx) {
+        long[] newVals = new long[vals.length - 1];
+        if (idx > 0) {
+            System.arraycopy(vals, 0, newVals, 0, idx);
+        }
+        if (idx < newVals.length) {
+            System.arraycopy(vals, idx + 1, newVals, idx, newVals.length - idx);
+        }
+        return newVals;
+    }
+
+    public static int[] insertArrayInt(int[] vals, int val, int idx) {
+        int[] newVals = new int[vals.length + 1];
+        if (idx > 0) {
+            System.arraycopy(vals, 0, newVals, 0, idx);
+        }
+        newVals[idx] = val;
+        if (idx < vals.length) {
+            System.arraycopy(vals, idx, newVals, idx + 1, vals.length - idx);
+        }
+        return newVals;
+    }
+
+    public static int[] deleteArrayInt(int[] vals, int idx) {
+        int[] newVals = new int[vals.length - 1];
+        if (idx > 0) {
+            System.arraycopy(vals, 0, newVals, 0, idx);
+        }
+        if (idx < newVals.length) {
+            System.arraycopy(vals, idx + 1, newVals, idx, newVals.length - idx);
+        }
+        return newVals;
+    }
+
+    public static short[] insertArrayShort(short[] vals, short val, int idx) {
+        short[] newVals = new short[vals.length + 1];
+        if (idx > 0) {
+            System.arraycopy(vals, 0, newVals, 0, idx);
+        }
+        newVals[idx] = val;
+        if (idx < vals.length) {
+            System.arraycopy(vals, idx, newVals, idx + 1, vals.length - idx);
+        }
+
+        return newVals;
+    }
+
+    public static short[] deleteArrayShort(short[] vals, int idx) {
+        short[] newVals = new short[vals.length - 1];
+        if (idx > 0) {
+            System.arraycopy(vals, 0, newVals, 0, idx);
+        }
+        if (idx < newVals.length) {
+            System.arraycopy(vals, idx + 1, newVals, idx, newVals.length - idx);
+        }
+
+        return newVals;
+    }
+
+
+    /**
+     * Paged file's header
+     */
+    protected abstract class FileHeader {
+        private boolean dirty;
+        private int workSize;
+
+        private short headerSize;
+
+        /**
+         * Size of the page in bytes.
+         */
+        private int pageSize;
+
+        /**
+         * Number of pages initially allocated for the file.
+         * Has a special (historical) meaning for HashFiler.
+         */
+        private long pageCount;
+
+        /**
+         * Number of pages used by the filer. Initially set to 0.
+         * Has somewhat different (historical) meaning for HashFiler.
+         */
+        private long totalCount;
+
+        private long firstFreePage = -1;
+        private long lastFreePage = -1;
+        private byte pageHeaderSize = 64;
+        private short maxKeySize = DEFAULT_KEYSIZE_MAX;
+        private long recordCount;
+
+
+        public FileHeader() {
+            this.pageSize = DEFAULT_PAGESIZE;
+            this.pageCount = DEFAULT_PAGECOUNT;
+            this.headerSize = (short) FILE_HEADER_SIZE;
+            calculateWorkSize();
+        }
+
+        public synchronized final void read() throws IOException {
+            RandomAccessFile raf = getDescriptor();
+            try {
+                raf.seek(0);
+                read(raf);
+                calculateWorkSize();
+            } finally {
+                putDescriptor(raf);
+            }
+        }
+
+        protected synchronized void read(RandomAccessFile raf) throws IOException {
+            headerSize = raf.readShort();
+            pageSize = raf.readInt();
+            pageCount = raf.readLong();
+            totalCount = raf.readLong();
+            firstFreePage = raf.readLong();
+            lastFreePage = raf.readLong();
+            pageHeaderSize = raf.readByte();
+            maxKeySize = raf.readShort();
+            recordCount = raf.readLong();
+        }
+
+        public synchronized final void write() throws IOException {
+            if (dirty) {
+                RandomAccessFile raf = getDescriptor();
+                try {
+                    raf.seek(0);
+                    write(raf);
+                    dirty = false;
+                } finally {
+                    putDescriptor(raf);
+                }
+            }
+        }
+
+        protected synchronized void write(RandomAccessFile raf) throws IOException {
+            raf.writeShort(headerSize);
+            raf.writeInt(pageSize);
+            raf.writeLong(pageCount);
+            raf.writeLong(totalCount);
+            raf.writeLong(firstFreePage);
+            raf.writeLong(lastFreePage);
+            raf.writeByte(pageHeaderSize);
+            raf.writeShort(maxKeySize);
+            raf.writeLong(recordCount);
+        }
+
+        public synchronized final void setDirty() {
+            dirty = true;
+        }
+
+        public synchronized final boolean isDirty() {
+            return dirty;
+        }
+
+        /**
+         * The size of the FileHeader. Usually 1 OS Page.
+         * This method should be called only while initializing Paged, not during normal processing.
+         */
+        public synchronized final void setHeaderSize(short headerSize) {
+            this.headerSize = headerSize;
+            dirty = true;
+        }
+
+        /** The size of the FileHeader.  Usually 1 OS Page */
+        public synchronized final short getHeaderSize() {
+            return headerSize;
+        }
+
+        /**
+         * The size of a page. Usually a multiple of a FS block.
+         * This method should be called only while initializing Paged, not during normal processing.
+         */
+        public synchronized final void setPageSize(int pageSize) {
+            this.pageSize = pageSize;
+            calculateWorkSize();
+            dirty = true;
+        }
+
+        /** The size of a page.  Usually a multiple of a FS block */
+        public synchronized final int getPageSize() {
+            return pageSize;
+        }
+
+        /**
+         * The number of pages in primary/initial storage.
+         * This method should be called only while initializing Paged, not during normal processing.
+         */
+        public synchronized final void setPageCount(long pageCount) {
+            this.pageCount = pageCount;
+            dirty = true;
+        }
+
+        /** The number of pages in primary storage */
+        public synchronized final long getPageCount() {
+            return pageCount;
+        }
+
+        /**
+         * The number of used pages in the file.
+         * This method should be called only while initializing Paged, not during normal processing.
+         */
+        public synchronized final void setTotalCount(long totalCount) {
+            this.totalCount = totalCount;
+            dirty = true;
+        }
+
+        public synchronized final long incTotalCount() {
+            dirty = true;
+            return this.totalCount++;
+        }
+
+        /** The number of used pages in the file */
+        public synchronized final long getTotalCount() {
+            return totalCount;
+        }
+
+        /** The first free page in unused secondary space */
+        public synchronized final void setFirstFreePage(long firstFreePage) {
+            this.firstFreePage = firstFreePage;
+            dirty = true;
+        }
+
+        /** The first free page in unused secondary space */
+        public synchronized final long getFirstFreePage() {
+            return firstFreePage;
+        }
+
+        /** The last free page in unused secondary space */
+        public synchronized final void setLastFreePage(long lastFreePage) {
+            this.lastFreePage = lastFreePage;
+            dirty = true;
+        }
+
+        /** The last free page in unused secondary space */
+        public synchronized final long getLastFreePage() {
+            return lastFreePage;
+        }
+
+        /**
+         * Set the size of a page header.
+         *
+         * Normally, 64 is sufficient.
+         */
+        public synchronized final void setPageHeaderSize(byte pageHeaderSize) {
+            this.pageHeaderSize = pageHeaderSize;
+            calculateWorkSize();
+            dirty = true;
+        }
+
+        /**
+         * Get the size of a page header.
+         */
+        public synchronized final byte getPageHeaderSize() {
+            return pageHeaderSize;
+        }
+
+        /**
+         * Set the maximum number of bytes a key can be.
+         *
+         * Normally, 256 is good
+         */
+        public synchronized final void setMaxKeySize(short maxKeySize) {
+            this.maxKeySize = maxKeySize;
+            dirty = true;
+        }
+
+        /**
+         * Get the maximum number of bytes a key can be.
+         */
+        public synchronized final short getMaxKeySize() {
+            return maxKeySize;
+        }
+
+        /** Increment the number of records being managed by the file */
+        public synchronized final void incRecordCount() {
+            recordCount++;
+            dirty = true;
+        }
+
+        /** Decrement the number of records being managed by the file */
+        public synchronized final void decRecordCount() {
+            recordCount--;
+            dirty = true;
+        }
+
+        /** The number of records being managed by the file (not pages) */
+        public synchronized final long getRecordCount() {
+            return recordCount;
+        }
+
+        private synchronized void calculateWorkSize() {
+            workSize = pageSize - pageHeaderSize;
+        }
+
+        public synchronized final int getWorkSize() {
+            return workSize;
+        }
+    }
+
+    /**
+     * Paged file page's header
+     */
+    protected abstract static class PageHeader implements Streamable {
+        private boolean dirty;
+        private byte status = UNUSED;
+        private short keyLen;
+        private int keyHash;
+        private int dataLen;
+        private int recordLen;
+        private long nextPage = NO_PAGE;
+
+        public PageHeader() {
+        }
+
+        public PageHeader(DataInput dis) throws IOException {
+            read(dis);
+        }
+
+        public synchronized void read(DataInput dis) throws IOException {
+            status = dis.readByte();
+            dirty = false;
+            if (status == UNUSED) {
+                return;
+            }
+
+            keyLen = dis.readShort();
+            keyHash = dis.readInt();
+            dataLen = dis.readInt();
+            recordLen = dis.readInt();
+            nextPage = dis.readLong();
+        }
+
+        public synchronized void write(DataOutput dos) throws IOException {
+            dirty = false;
+            dos.writeByte(status);
+            dos.writeShort(keyLen);
+            dos.writeInt(keyHash);
+            dos.writeInt(dataLen);
+            dos.writeInt(recordLen);
+            dos.writeLong(nextPage);
+        }
+
+        public synchronized final boolean isDirty() {
+            return dirty;
+        }
+
+        public synchronized final void setDirty() {
+            dirty = true;
+        }
+
+        /** The status of this page (UNUSED, RECORD, DELETED, etc...) */
+        public synchronized final void setStatus(byte status) {
+            this.status = status;
+            dirty = true;
+        }
+
+        /** The status of this page (UNUSED, RECORD, DELETED, etc...) */
+        public synchronized final byte getStatus() {
+            return status;
+        }
+
+        public synchronized final void setKey(Key key) {
+            // setKey WIPES OUT the Page data
+            setRecordLen(0);
+            dataLen = 0;
+            keyHash = key.hashCode();
+            keyLen = (short) key.getLength();
+            dirty = true;
+        }
+
+        /** The length of the Key */
+        public synchronized final void setKeyLen(short keyLen) {
+            this.keyLen = keyLen;
+            dirty = true;
+        }
+
+        /** The length of the Key */
+        public synchronized final short getKeyLen() {
+            return keyLen;
+        }
+
+        /** The hashed value of the Key for quick comparisons */
+        public synchronized final void setKeyHash(int keyHash) {
+            this.keyHash = keyHash;
+            dirty = true;
+        }
+
+        /** The hashed value of the Key for quick comparisons */
+        public synchronized final int getKeyHash() {
+            return keyHash;
+        }
+
+        /** The length of the Data */
+        public synchronized final void setDataLen(int dataLen) {
+            this.dataLen = dataLen;
+            dirty = true;
+        }
+
+        /** The length of the Data */
+        public synchronized final int getDataLen() {
+            return dataLen;
+        }
+
+        /** The length of the Record's value */
+        public synchronized void setRecordLen(int recordLen) {
+            this.recordLen = recordLen;
+            dirty = true;
+        }
+
+        /** The length of the Record's value */
+        public synchronized final int getRecordLen() {
+            return recordLen;
+        }
+
+        /** The next page for this Record (if overflowed) */
+        public synchronized final void setNextPage(long nextPage) {
+            this.nextPage = nextPage;
+            dirty = true;
+        }
+
+        /** The next page for this Record (if overflowed) */
+        public synchronized final long getNextPage() {
+            return nextPage;
+        }
+    }
+
+    /**
+     * The object wrapping page number.
+     */
+    protected static class PageKey implements Comparable {
+
+        /**
+         * This page number
+         */
+        protected final long pageNum;
+
+
+        public PageKey(long pageNum) {
+            this.pageNum = pageNum;
+        }
+
+        // No synchronization - pageNum is final
+        public long getPageNum() {
+            return pageNum;
+        }
+
+        // No synchronization: pageNum is final.
+        public int compareTo(Object o) {
+            return (int) (pageNum - ((PageKey) o).pageNum);
+        }
+
+        /**
+         * Return page hash code, which is hash code of its {@link #pageNum}.
+         *
+         * @return Page hash code
+         */
+        public int hashCode() {
+            // Unroll new Long(pageNum).hashCode()
+            return (int) (pageNum ^ (pageNum >> 32));
+        }
+
+        /**
+         * Pages are equal if they are the same or have equal pageNum.
+         *
+         * @param obj Another page
+         * @return true if pages are equal
+         */
+        public boolean equals(Object obj) {
+            if (obj == this) {
+                return true;
+            }
+
+            if (obj instanceof PageKey) {
+                return pageNum == ((PageKey) obj).pageNum;
+            }
+
+            return false;
+        }
+    }
+
+    /**
+     * Paged file's page
+     */
+    protected final class Page extends PageKey {
+
+        /**
+         * The Header for this Page
+         */
+        private final PageHeader header;
+
+        /**
+         * The offset into the file that this page starts
+         */
+        private final long offset;
+
+        /**
+         * The data for this page. Null if page is not loaded.
+         */
+        private byte[] data;
+
+        /**
+         * The position (relative) of the Key in the data array
+         */
+        private int keyPos;
+
+        /**
+         * The position (relative) of the Data in the data array
+         */
+        private int dataPos;
+
+
+        private Page(long pageNum) {
+            super(pageNum);
+            this.header = createPageHeader();
+            this.offset = fileHeader.headerSize + (pageNum * fileHeader.pageSize);
+        }
+
+        /**
+         * Reads a page into the memory, once. Subsequent calls are ignored.
+         */
+        public synchronized void read() throws IOException {
+            if (data == null) {
+                RandomAccessFile raf = null;
+                try {
+                    byte[] data = new byte[fileHeader.pageSize];
+                    raf = getDescriptor();
+                    raf.seek(this.offset);
+                    raf.read(data);
+
+                    // Read in the header
+                    ByteArrayInputStream bis = new ByteArrayInputStream(data);
+                    this.header.read(new DataInputStream(bis));
+
+                    this.keyPos = fileHeader.pageHeaderSize;
+                    this.dataPos = this.keyPos + this.header.keyLen;
+
+                    // Successfully read all the data
+                    this.data = data;
+                } finally {
+                    putDescriptor(raf);
+                }
+            }
+        }
+
+        /**
+         * Writes out the header into the this.data, and adds a page to the set of
+         * dirty pages.
+         */
+        public void write() throws IOException {
+            // Write out the header into the this.data
+            synchronized (this) {
+                ByteArrayOutputStream bos = new ByteArrayOutputStream(fileHeader.getPageHeaderSize());
+                header.write(new DataOutputStream(bos));
+                byte[] b = bos.toByteArray();
+                System.arraycopy(b, 0, data, 0, b.length);
+            }
+
+            // Add to the list of dirty pages
+            Paged.this.addDirty(this);
+        }
+
+        /**
+         * Flushes content of the dirty page into the file
+         */
+        public synchronized void flush() throws IOException {
+            RandomAccessFile raf = null;
+            try {
+                raf = getDescriptor();
+                if (this.offset >= raf.length()) {
+                    // Grow the file
+                    long o = fileHeader.headerSize + (fileHeader.totalCount * 3 / 2 + 1) * fileHeader.pageSize - 1;
+                    raf.seek(o);
+                    raf.writeByte(0);
+                }
+                raf.seek(this.offset);
+                raf.write(this.data);
+            } finally {
+                putDescriptor(raf);
+            }
+        }
+
+        // No synchronization - header is final
+        public PageHeader getPageHeader() {
+            return this.header;
+        }
+
+        public synchronized void setKey(Key key) {
+            header.setKey(key);
+            // Insert the key into the data array.
+            key.copyTo(this.data, this.keyPos);
+
+            // Set the start of data to skip over the key.
+            this.dataPos = this.keyPos + header.keyLen;
+        }
+
+        public synchronized Key getKey() {
+            if (header.keyLen == 0) {
+                return null;
+            }
+
+            return new Key(this.data, this.keyPos, header.keyLen);
+        }
+
+        public synchronized void streamTo(OutputStream os) throws IOException {
+            if (header.dataLen > 0) {
+                os.write(this.data, this.dataPos, header.dataLen);
+            }
+        }
+
+        public synchronized void streamFrom(InputStream is) throws IOException {
+            int avail = is.available();
+            header.dataLen = fileHeader.workSize - header.keyLen;
+            if (avail < header.dataLen) {
+                header.dataLen = avail;
+            }
+            if (header.dataLen > 0) {
+                is.read(this.data, this.keyPos + header.keyLen, header.dataLen);
+            }
+        }
+    }
+
+    public int getDescriptorsMax() {
+        return descriptorsMax;
+    }
+
+    public void setDescriptorsMax(int descriptorsMax) {
+        this.descriptorsMax = descriptorsMax;
+    }
+
+    public int getPageSize() {
+        return pageSize;
+    }
+
+    public void setPageSize(int pageSize) {
+        this.pageSize = pageSize;
+    }
+
+    public long getPageCount() {
+        return pageCount;
+    }
+
+    public void setPageCount(long pageCount) {
+        this.pageCount = pageCount;
+    }
+
+    public short getMaxKeySize() {
+        return maxKeySize;
+    }
+
+    public void setMaxKeySize(short maxKeySize) {
+        this.maxKeySize = maxKeySize;
+    }
+}

Added: activemq/sandbox/xindice-stripped/src/main/java/org/apache/xindice/core/filer/SizeableMemFiler.java
URL: http://svn.apache.org/viewvc/activemq/sandbox/xindice-stripped/src/main/java/org/apache/xindice/core/filer/SizeableMemFiler.java?rev=677302&view=auto
==============================================================================
--- activemq/sandbox/xindice-stripped/src/main/java/org/apache/xindice/core/filer/SizeableMemFiler.java (added)
+++ activemq/sandbox/xindice-stripped/src/main/java/org/apache/xindice/core/filer/SizeableMemFiler.java Wed Jul 16 08:18:20 2008
@@ -0,0 +1,195 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ * $Id: SizeableMemFiler.java 541516 2007-05-25 02:46:51Z vgritsenko $
+ */
+
+package org.apache.xindice.core.filer;
+
+import java.io.File;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.Iterator;
+import java.util.Map;
+
+import org.apache.xindice.core.DBException;
+import org.apache.xindice.core.FaultCodes;
+import org.apache.xindice.core.data.Key;
+import org.apache.xindice.core.data.Record;
+import org.apache.xindice.core.data.RecordSet;
+import org.apache.xindice.core.data.Value;
+
+/**
+ * SizeableMemFiler is an In-Memory Filer implementation for Xindice.  SizeableMemFiler can be
+ * used for temporary collections and caching.  It's basically a layering on
+ * top of HashMap.
+ *
+ * @author Terry Rosenbaum (terry@amicas.com)
+ * @version $Revision: 541516 $, $Date: 2007-05-24 22:46:51 -0400 (Thu, 24 May 2007) $
+ */
+public final class SizeableMemFiler implements Filer {
+
+    public static final String INITIAL_SIZE_KEY = "org.apache.xindice.core.filer.SizeableMemFiler.INITIAL_SIZE";
+    public static final int INITIAL_SIZE = 89;
+
+    private Map hashTable = null;
+    private boolean opened = false;
+    private boolean readOnly = false;
+
+
+    public SizeableMemFiler() {
+        String aSizeString = System.getProperty(INITIAL_SIZE_KEY);
+        int aSize = INITIAL_SIZE;
+        try {
+            if (aSizeString != null) {
+                aSize = Integer.parseInt(aSizeString);
+            }
+        } catch (Exception anException) {
+            aSize = INITIAL_SIZE;
+        }
+        hashTable = Collections.synchronizedMap(new HashMap(aSize));
+    }
+
+    public SizeableMemFiler(Map hashTable, boolean readOnly) {
+        this.hashTable = hashTable;
+        this.readOnly = readOnly;
+    }
+
+    public SizeableMemFiler(Map hashTable) {
+        this(hashTable, false);
+    }
+
+    public void setLocation(File root, String location) {
+    }
+
+    public String getName() {
+        return "SizeableMemFiler";
+    }
+
+    private void checkOpened() throws DBException {
+        if (!opened) {
+            throw new FilerException(FaultCodes.COL_COLLECTION_CLOSED, "Filer is closed");
+        }
+    }
+
+    private void checkReadOnly() throws DBException {
+        if (readOnly) {
+            throw new FilerException(FaultCodes.COL_COLLECTION_READ_ONLY, "Filer is read-only");
+        }
+    }
+
+    public boolean create() {
+        hashTable.clear();
+        return true;
+    }
+
+    public boolean open() {
+        opened = true;
+        return opened;
+    }
+
+    public boolean isOpened() {
+        return opened;
+    }
+
+    public boolean exists() {
+        return true;
+    }
+
+    public boolean drop() {
+        hashTable.clear();
+        opened = false;
+        return !opened;
+    }
+
+    public boolean close() {
+        opened = false;
+        return !opened;
+    }
+
+    public void flush() {
+    }
+
+    public Record readRecord(Key key) throws DBException {
+        return readRecord(key, false);
+    }
+
+    public Record readRecord(Key key, boolean metaOnly) throws DBException {
+        if (key == null || key.getLength() == 0) {
+            return null;
+        }
+        checkOpened();
+        return (Record) hashTable.get(key);
+    }
+
+    public Record writeRecord(Key key, Value value) throws DBException {
+        if (key == null || key.getLength() == 0) {
+            throw new FilerException(FaultCodes.DBE_CANNOT_CREATE, "Invalid key: '" + key + "'");
+        }
+        if (value == null) {
+            throw new FilerException(FaultCodes.DBE_CANNOT_CREATE, "Invalid null value");
+        }
+        checkOpened();
+        checkReadOnly();
+        hashTable.put(key, new Record(key, value));
+        return new Record(key, value);
+    }
+
+    public boolean deleteRecord(Key key) throws DBException {
+        if (key == null || key.getLength() == 0) {
+            return false;
+        }
+        checkOpened();
+        checkReadOnly();
+        return hashTable.remove(key) != null;
+    }
+
+    public long getRecordCount() throws DBException {
+        checkOpened();
+        return hashTable.size();
+    }
+
+    public RecordSet getRecordSet() throws DBException {
+        checkOpened();
+        return new MemRecordSet();
+    }
+
+    /**
+     * MemRecordSet
+     */
+
+    private class MemRecordSet implements RecordSet {
+        private Iterator iter = hashTable.values().iterator();
+
+        public synchronized boolean hasMoreRecords() throws DBException {
+            return iter.hasNext();
+        }
+
+        public synchronized Record getNextRecord() throws DBException {
+            checkOpened();
+            return (Record) iter.next();
+        }
+
+        public synchronized Value getNextValue() throws DBException {
+            checkOpened();
+            return ((Record) iter.next()).getValue();
+        }
+
+        public synchronized Key getNextKey() {
+            return ((Record) iter.next()).getKey();
+        }
+    }
+}

Added: activemq/sandbox/xindice-stripped/src/main/java/org/apache/xindice/core/filer/Streamable.java
URL: http://svn.apache.org/viewvc/activemq/sandbox/xindice-stripped/src/main/java/org/apache/xindice/core/filer/Streamable.java?rev=677302&view=auto
==============================================================================
--- activemq/sandbox/xindice-stripped/src/main/java/org/apache/xindice/core/filer/Streamable.java (added)
+++ activemq/sandbox/xindice-stripped/src/main/java/org/apache/xindice/core/filer/Streamable.java Wed Jul 16 08:18:20 2008
@@ -0,0 +1,49 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ * $Id: Streamable.java 541508 2007-05-25 01:54:12Z vgritsenko $
+ */
+
+package org.apache.xindice.core.filer;
+
+import java.io.DataInput;
+import java.io.DataOutput;
+import java.io.IOException;
+
+/**
+ * Streamable is an interface implemented by objects used by Filers and
+ * Indexers in order to serialize objects to and from IO streams.
+ *
+ * @version $Revision: 541508 $, $Date: 2007-05-24 21:54:12 -0400 (Thu, 24 May 2007) $
+ */
+public interface Streamable {
+
+    /**
+     * read reads the object state from the stream.
+     *
+     * @param is The DataInputStream
+     * @throws IOException if an IOException occurs
+     */
+    public void read(DataInput is) throws IOException;
+
+    /**
+     * write writes the object state to the stream.
+     *
+     * @param os The DataOutputStream
+     * @throws IOException if an IOException occurs
+     */
+    public void write(DataOutput os) throws IOException;
+}

Added: activemq/sandbox/xindice-stripped/src/main/java/org/apache/xindice/core/filer/package.html
URL: http://svn.apache.org/viewvc/activemq/sandbox/xindice-stripped/src/main/java/org/apache/xindice/core/filer/package.html?rev=677302&view=auto
==============================================================================
--- activemq/sandbox/xindice-stripped/src/main/java/org/apache/xindice/core/filer/package.html (added)
+++ activemq/sandbox/xindice-stripped/src/main/java/org/apache/xindice/core/filer/package.html Wed Jul 16 08:18:20 2008
@@ -0,0 +1,28 @@
+<!--
+  - Licensed to the Apache Software Foundation (ASF) under one or more
+  - contributor license agreements.  See the NOTICE file distributed with
+  - this work for additional information regarding copyright ownership.
+  - The ASF licenses this file to You under the Apache License, Version 2.0
+  - (the "License"); you may not use this file except in compliance with
+  - the License.  You may obtain a copy of the License at
+  -
+  -    http://www.apache.org/licenses/LICENSE-2.0
+  -
+  - Unless required by applicable law or agreed to in writing, software
+  - distributed under the License is distributed on an "AS IS" BASIS,
+  - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  - See the License for the specific language governing permissions and
+  - limitations under the License.
+  -
+  - $Id: package.html 511410 2007-02-25 02:47:42Z vgritsenko $
+  -->
+
+<html>
+  <title>Xindice Filers.</title>
+  <body>
+    <p>Defines the Xindice Filer interface and implements several Filers,
+    including BTreeFiler and HashFiler.</p>
+ </body>
+</html>
+
+

Added: activemq/sandbox/xindice-stripped/src/main/java/org/apache/xindice/core/package.html
URL: http://svn.apache.org/viewvc/activemq/sandbox/xindice-stripped/src/main/java/org/apache/xindice/core/package.html?rev=677302&view=auto
==============================================================================
--- activemq/sandbox/xindice-stripped/src/main/java/org/apache/xindice/core/package.html (added)
+++ activemq/sandbox/xindice-stripped/src/main/java/org/apache/xindice/core/package.html Wed Jul 16 08:18:20 2008
@@ -0,0 +1,27 @@
+<!--
+  - Licensed to the Apache Software Foundation (ASF) under one or more
+  - contributor license agreements.  See the NOTICE file distributed with
+  - this work for additional information regarding copyright ownership.
+  - The ASF licenses this file to You under the Apache License, Version 2.0
+  - (the "License"); you may not use this file except in compliance with
+  - the License.  You may obtain a copy of the License at
+  -
+  -    http://www.apache.org/licenses/LICENSE-2.0
+  -
+  - Unless required by applicable law or agreed to in writing, software
+  - distributed under the License is distributed on an "AS IS" BASIS,
+  - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  - See the License for the specific language governing permissions and
+  - limitations under the License.
+  -
+  - $Id: package.html 511410 2007-02-25 02:47:42Z vgritsenko $
+  -->
+
+<html>
+  <title>The Xindice Core.</title>
+  <body>
+    <p>This is the top-level package for all Xindice Core functionality.</p>
+ </body>
+</html>
+
+

Added: activemq/sandbox/xindice-stripped/src/main/java/org/apache/xindice/util/FileCache.java
URL: http://svn.apache.org/viewvc/activemq/sandbox/xindice-stripped/src/main/java/org/apache/xindice/util/FileCache.java?rev=677302&view=auto
==============================================================================
--- activemq/sandbox/xindice-stripped/src/main/java/org/apache/xindice/util/FileCache.java (added)
+++ activemq/sandbox/xindice-stripped/src/main/java/org/apache/xindice/util/FileCache.java Wed Jul 16 08:18:20 2008
@@ -0,0 +1,100 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ * $Id: FileCache.java 541508 2007-05-25 01:54:12Z vgritsenko $
+ */
+
+package org.apache.xindice.util;
+
+import java.io.File;
+import java.io.FileInputStream;
+import java.io.IOException;
+import java.util.Map;
+import java.util.WeakHashMap;
+
+/**
+ * FileCache caches the content of files in memory.
+ *
+ * @version $Revision: 541508 $, $Date: 2007-05-24 21:54:12 -0400 (Thu, 24 May 2007) $
+ */
+public class FileCache {
+
+    /**
+     * Caches FileCacheInfo objects. Keys are File objects.
+     */
+    private final Map cache = new WeakHashMap();
+
+    public FileCache() {
+    }
+
+    public final boolean isInCache(File file) {
+        return (cache.get(file) != null);
+    }
+
+    public final boolean isInCache(String name) {
+        return (cache.get(new File(name)) != null);
+    }
+
+    public final boolean isModified(String name) {
+        return isModified(new File(name));
+    }
+
+    public final boolean isModified(File file) {
+        FileCacheInfo finfo = (FileCacheInfo) cache.get(file);
+        return !file.exists()
+                || finfo == null
+                || (file.lastModified() != finfo.lastModified);
+    }
+
+    public final byte[] getFile(String name) throws IOException {
+        return getFile(new File(name));
+    }
+
+    public final byte[] getFile(File file) throws IOException {
+        if (!file.exists()) {
+            return null;
+        }
+
+        FileCacheInfo finfo = (FileCacheInfo) cache.get(file);
+        long lastmod = file.lastModified();
+        if (finfo == null || finfo.lastModified != lastmod) {
+            FileInputStream fis = new FileInputStream(file);
+            byte[] content = new byte[fis.available()];
+            fis.read(content);
+            fis.close();
+            finfo = new FileCacheInfo(file, lastmod, content);
+            cache.put(file, finfo);
+            return content;
+        } else {
+            return finfo.content;
+        }
+    }
+
+    /**
+     * FileCacheInfo
+     */
+    private class FileCacheInfo {
+        public File file;
+        public long lastModified = 0;
+        public byte[] content;
+
+        public FileCacheInfo(File file, long lastModified, byte[] content) {
+            this.file = file;
+            this.lastModified = lastModified;
+            this.content = content;
+        }
+    }
+}



Mime
View raw message