activemq-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From chir...@apache.org
Subject svn commit: r677944 [9/11] - in /activemq/sandbox/kahadb: ./ src/ src/main/ src/main/java/ src/main/java/org/ src/main/java/org/apache/ src/main/java/org/apache/kahadb/ src/main/java/org/apache/kahadb/impl/ src/main/java/org/apache/kahadb/impl/async/ s...
Date Fri, 18 Jul 2008 15:49:52 GMT
Added: activemq/sandbox/kahadb/src/main/java/org/apache/kahadb/xindice/fs/LockManager.java
URL: http://svn.apache.org/viewvc/activemq/sandbox/kahadb/src/main/java/org/apache/kahadb/xindice/fs/LockManager.java?rev=677944&view=auto
==============================================================================
--- activemq/sandbox/kahadb/src/main/java/org/apache/kahadb/xindice/fs/LockManager.java (added)
+++ activemq/sandbox/kahadb/src/main/java/org/apache/kahadb/xindice/fs/LockManager.java Fri Jul 18 08:49:48 2008
@@ -0,0 +1,190 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ * $Id: LockManager.java 541508 2007-05-25 01:54:12Z vgritsenko $
+ */
+
+package org.apache.kahadb.xindice.fs;
+
+import java.util.HashMap;
+import java.util.Map;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+
+/**
+ * LockManager manages resource locks.  A resource is any shared object that
+ * can be represented as a long.  The LockManager should be constructed using
+ * the maximum number of concurrent clients a specific resource might have.
+ *
+ * @version $Revision: 541508 $, $Date: 2007-05-24 21:54:12 -0400 (Thu, 24 May 2007) $
+ */
+public final class LockManager {
+
+    private static final Log log = LogFactory.getLog(LockManager.class);
+
+    private int maxLocks = 0;
+    private Map locks = new HashMap(); // Long to LockInfo
+
+    public LockManager(int maxLocks) {
+        this.maxLocks = maxLocks;
+    }
+
+    /**
+     * acquireSharedLock acquires a shared lock on a resource.  Shared locks
+     * are typically used for reads.
+     */
+    public void acquireSharedLock(long id) {
+        acquireSharedLock(new Long(id));
+    }
+
+    /**
+     * acquireSharedLock acquires a shared lock on a resource.  Shared locks
+     * are typically used for reads.
+     */
+    public void acquireSharedLock(Object key) {
+        LockInfo info;
+        synchronized (this) {
+            info = (LockInfo) locks.get(key);
+            if (info == null) {
+                info = new LockInfo();
+                locks.put(key, info);
+            }
+        }
+        synchronized (info) {
+            while (info.count >= maxLocks) {
+                try {
+                    info.wait();
+                } catch (Exception e) {
+                    if (log.isWarnEnabled()) {
+                        log.warn("ignored exception", e);
+                    }
+                }
+            }
+            info.count++;
+        }
+    }
+
+    /**
+     * releaseSharedLock releases a shared lock on a resource.  Shared locks
+     * are typically used for reads.
+     */
+    public void releaseSharedLock(long id) {
+        releaseSharedLock(new Long(id));
+    }
+
+    /**
+     * releaseSharedLock releases a shared lock on a resource.  Shared locks
+     * are typically used for reads.
+     */
+    public void releaseSharedLock(Object key) {
+        LockInfo info;
+        synchronized (this) {
+            info = (LockInfo) locks.get(key);
+        }
+        if (info == null) {
+            return;
+        }
+        
+        synchronized (info) {
+            info.count--;
+            info.notify();
+            if (info.count == 0) {
+                synchronized (this) {
+                    locks.remove(key);
+                }
+            }
+        }
+    }
+
+    /**
+     * acquireExclusiveLock acquires an exclusive lock on a resource.  Exclusive
+     * locks are typically used for writes.
+     */
+    public void acquireExclusiveLock(long id) {
+        acquireExclusiveLock(new Long(id));
+    }
+
+    /**
+     * acquireExclusiveLock acquires an exclusive lock on a resource.  Exclusive
+     * locks are typically used for writes.
+     */
+    public void acquireExclusiveLock(Object key) {
+        LockInfo info;
+        synchronized (this) {
+            info = (LockInfo) locks.get(key);
+            if (info == null) {
+                info = new LockInfo();
+                locks.put(key, info);
+            }
+        }
+        synchronized (info) {
+            int total = 0;
+            int stolen = 0;
+            while (total < maxLocks && info.count >= maxLocks) {
+                try {
+                    info.wait();
+                } catch (Exception e) {
+                    if (log.isWarnEnabled()) {
+                        log.warn("ignored exception", e);
+                    }
+                }
+                stolen = maxLocks - info.count;
+                total += stolen;
+                info.count += stolen;
+            }
+        }
+    }
+
+    /**
+     * releaseExclusiveLock releases an exclusive lock on a resource.  Exclusive
+     * locks are typically used for writes.
+     */
+    public void releaseExclusiveLock(long id) {
+        releaseExclusiveLock(new Long(id));
+    }
+
+    /**
+     * releaseExclusiveLock releases an exclusive lock on a resource.  Exclusive
+     * locks are typically used for writes.
+     */
+    public void releaseExclusiveLock(Object key) {
+        LockInfo info;
+        synchronized (this) {
+            info = (LockInfo) locks.get(key);
+        }
+        if (info == null) {
+            return;
+        }
+        
+        synchronized (info) {
+            info.count = 0;
+            info.notify();
+            if (info.count == 0) {
+                synchronized (this) {
+                    locks.remove(key);
+                }
+            }
+        }
+    }
+
+    /**
+     * LockInfo is just an int wrapper.
+     */
+    private class LockInfo {
+        public int count = 0;
+    }
+}

Added: activemq/sandbox/kahadb/src/main/java/org/apache/kahadb/xindice/hash/HashIndex.java
URL: http://svn.apache.org/viewvc/activemq/sandbox/kahadb/src/main/java/org/apache/kahadb/xindice/hash/HashIndex.java?rev=677944&view=auto
==============================================================================
--- activemq/sandbox/kahadb/src/main/java/org/apache/kahadb/xindice/hash/HashIndex.java (added)
+++ activemq/sandbox/kahadb/src/main/java/org/apache/kahadb/xindice/hash/HashIndex.java Fri Jul 18 08:49:48 2008
@@ -0,0 +1,495 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ * $Id: HashFiler.java 571938 2007-09-02 10:14:13Z vgritsenko $
+ */
+
+package org.apache.kahadb.xindice.hash;
+
+import java.io.DataInput;
+import java.io.DataOutput;
+import java.io.File;
+import java.io.IOException;
+import java.io.RandomAccessFile;
+import java.util.ArrayList;
+import java.util.Iterator;
+import java.util.List;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.kahadb.xindice.FaultCodes;
+import org.apache.kahadb.xindice.Index;
+import org.apache.kahadb.xindice.IndexException;
+import org.apache.kahadb.xindice.Key;
+import org.apache.kahadb.xindice.Record;
+import org.apache.kahadb.xindice.RecordSet;
+import org.apache.kahadb.xindice.Value;
+import org.apache.kahadb.xindice.page.Paged;
+
+/**
+ * HashFiler is a Filer implementation based on the Paged class.  By
+ * extending Paged, HashFiler inherits the ability to maintain Record
+ * metadata such as creation and modification time.  It also provides
+ * quite a bit more flexibility in its ability to retreive blocks of
+ * data and allocate Record space.
+ *
+ * <br/>
+ * HashFile has folowing configuration attributes:
+ * <ul>
+ * <li><strong>pagesize</strong>: Size of the page used by the paged file.
+ *     Default page size is 4096 bytes. This parameter can be set only
+ *     before paged file is created. Once it is created, this parameter
+ *     can not be changed.</li>
+ * <li><strong>pagecount</strong>: This parameter has a special meaning
+ *     for HashFiler. This determines the size of the hash table main
+ *     storage, which is equal to the number of pages filer will be
+ *     created with. The default is 1024. Please note that if made
+ *     too small, it will affect efficiency of the hash table.</li>
+ * <li><strong>maxkeysize</strong>: Maximum allowed size of the key.
+ *     Default maximum key size is 256 bytes.</li>
+ * <li><strong>max-descriptors</strong>: Defines maximum amount of
+ *     simultaneously opened file descriptors this paged file can have.
+ *     Several descriptors are needed to provide multithreaded access
+ *     to the underlying file. Too large number will limit amount of
+ *     collections you can open. Default value is 16
+ *     (DEFAULT_DESCRIPTORS_MAX).</li>
+ * </ul>
+ *
+ * @version $Revision: 571938 $, $Date: 2007-09-02 06:14:13 -0400 (Sun, 02 Sep 2007) $
+ */
+public class HashIndex extends Paged
+                       implements Index {
+
+    private static final Log log = LogFactory.getLog(HashIndex.class);
+
+    /**
+     * Record page status
+     */
+    protected static final byte RECORD = 1;
+
+    private HashFileHeader fileHeader;
+
+
+    public HashIndex() {
+        super();
+        fileHeader = (HashFileHeader) getFileHeader();
+    }
+
+    public void setLocation(File root, String location) {
+        setFile(new File(root, location + ".tbl"));
+    }
+
+    public String getName() {
+        return "HashFiler";
+    }
+
+    @Override
+    protected void initFileHeader() {
+        super.initFileHeader();
+        fileHeader.setTotalCount(fileHeader.getPageCount());
+    }
+    
+    private Page seekRecordPage(Key key) throws IOException {
+        int hash = Math.abs(key.hashCode());
+        long pageNum = hash % fileHeader.getPageCount();
+        Page p = getPage(pageNum);
+        synchronized (p) {
+            while (true) {
+                HashPageHeader ph = (HashPageHeader) p.getPageHeader();
+                if (ph.getStatus() == RECORD && ph.getKeyHash() == key.hashCode() && p.getKey().equals(key)) {
+                    return p;
+                }
+
+                pageNum = ph.getNextCollision();
+                if (pageNum == NO_PAGE) {
+                    return null;
+                }
+                p = getPage(pageNum);
+            }
+        }
+    }
+
+    public Record readRecord(Key key) throws IndexException {
+        return readRecord(key, false);
+    }
+
+    public Record readRecord(Key key, boolean metaOnly) throws IndexException {
+        if (key == null || key.getLength() == 0) {
+            return null;
+        }
+        checkOpened();
+        try {
+            Page startPage = seekRecordPage(key);
+            if (startPage != null) {
+                Value v = metaOnly ? null: readValue(startPage);
+                HashPageHeader sph = (HashPageHeader) startPage.getPageHeader();
+
+                return new Record(key, v);
+            }
+        } catch (Exception e) {
+            if (log.isWarnEnabled()) {
+                log.warn("ignored exception", e);
+            }
+        }
+        return null;
+    }
+
+    private Page seekInsertionPage(Key key) throws IOException {
+        // Calculate hash and retrieve chain head page
+        int hash = Math.abs(key.hashCode());
+        Page p = getPage(hash % fileHeader.getPageCount());
+
+        // Synchronize by chain head page
+        synchronized (p) {
+            HashPageHeader ph;
+            while (true) {
+                ph = (HashPageHeader) p.getPageHeader();
+                if (ph.getStatus() == UNUSED || ph.getStatus() == DELETED
+                        || (ph.getStatus() == RECORD && ph.getKeyHash() == key.hashCode() && p.getKey().equals(key))) {
+                    // Found free page
+                    break;
+                }
+
+                // Check the chain
+                long pageNum = ph.getNextCollision();
+                if (pageNum == NO_PAGE) {
+                    // Reached end of chain, add new page
+                    Page np = getFreePage();
+
+                    ph.setNextCollision(np.getPageNum());
+                    p.write();
+
+                    p = np;
+                    ph = (HashPageHeader) p.getPageHeader();
+                    ph.setNextCollision(NO_PAGE);
+                    break;
+                }
+
+                // Go to the next page in chain
+                p = getPage(pageNum);
+            }
+
+            // Here we have a page
+            long t = System.currentTimeMillis();
+            if (ph.getStatus() == UNUSED || ph.getStatus() == DELETED) {
+                // This is a new Record
+                fileHeader.incRecordCount();
+                ph.setCreated(t);
+            }
+            ph.setModified(t);
+            ph.setStatus(RECORD);
+        }
+
+        return p;
+    }
+
+    public Record writeRecord(Key key, Value value) throws IndexException {
+        // Check that key is not larger than space on the page
+        if (key == null || key.getLength() == 0 || key.getLength() > fileHeader.getPageSize() - fileHeader.getPageHeaderSize()) {
+            throw new IndexException(FaultCodes.DBE_CANNOT_CREATE, "Invalid key: '" + key + "'");
+        }
+        if (value == null) {
+            throw new IndexException(FaultCodes.DBE_CANNOT_CREATE, "Invalid null value");
+        }
+        checkOpened();
+        Page p = null;
+        try {
+            p = seekInsertionPage(key);
+            p.setKey(key);
+            writeValue(p, value);
+        } catch (Exception e) {
+            // FIXME It's not enough. At this point, new record could have been added to the chain
+            if (p != null) {
+                p.getPageHeader().setStatus(DELETED);
+                try {
+                    p.write();
+                } catch (IOException ignored) {
+                    // Double exception
+                }
+            }
+
+            throw new IndexException(FaultCodes.DBE_CANNOT_CREATE, "Exception: " + e, e);
+        }
+
+        flush();
+        return new Record(key, value);
+    }
+
+    /**
+     * Mark pages in primary store as 'DELETED', and let Paged handle all
+     * overflow pages.
+     */
+    protected void unlinkPages(Page page) throws IOException {
+        // Handle the page if it's in primary space by setting its status to
+        // DELETED and freeing any overflow pages linked to it.
+        if (page.getPageNum() < fileHeader.getPageCount()) {
+            long nextPage = page.getPageHeader().getNextPage();
+            page.getPageHeader().setStatus(DELETED);
+            page.getPageHeader().setNextPage(NO_PAGE);
+            page.write();
+
+            // If there are no chained pages, we are done.
+            if (nextPage == NO_PAGE) {
+                return;
+            }
+
+            // Free the chained pages from the page that was just removed
+            page = getPage(nextPage);
+        }
+
+        super.unlinkPages(page);
+    }
+
+    public boolean deleteRecord(Key key) throws IndexException {
+        if (key == null || key.getLength() == 0) {
+            return false;
+        }
+        checkOpened();
+        try {
+            int hash = Math.abs(key.hashCode());
+            long pageNum = hash % fileHeader.getPageCount();
+
+            Page page = getPage(pageNum);
+            synchronized (page) {
+                HashPageHeader prevHead = null;
+                HashPageHeader pageHead;
+
+                Page prev = null;
+                while (true) {
+                    pageHead = (HashPageHeader) page.getPageHeader();
+                    if (pageHead.getStatus() == RECORD && pageHead.getKeyHash() == key.hashCode() && page.getKey().equals(key)) {
+                        break;
+                    }
+
+                    pageNum = pageHead.getNextCollision();
+                    if (pageNum == NO_PAGE) {
+                        return false;
+                    }
+                    prev = page;
+                    prevHead = pageHead;
+                    page = getPage(pageNum);
+                }
+
+                if (prev != null) {
+                    prevHead.setNextCollision(pageHead.nextCollision);
+                    pageHead.setNextCollision(NO_PAGE);
+                    prev.write();
+                }
+
+                unlinkPages(page);
+            }
+
+            fileHeader.decRecordCount();
+            flush();
+
+            return true;
+        } catch (Exception e) {
+            if (log.isWarnEnabled()) {
+                log.warn("ignored exception", e);
+            }
+        }
+        return false;
+    }
+
+    public long getRecordCount() throws IndexException {
+        checkOpened();
+        return fileHeader.getRecordCount();
+    }
+
+    public RecordSet getRecordSet() throws IndexException {
+        checkOpened();
+        return new HashFilerRecordSet();
+    }
+
+    /**
+     * HashFilerRecordSet that does not use a BTree.
+     */
+    private class HashFilerRecordSet implements RecordSet {
+        private List keys = new ArrayList();
+        private Iterator iter;
+
+        public HashFilerRecordSet() {
+            try {
+                long pageNum = 0;
+
+                // Iterate over main hash table...
+                while (pageNum < fileHeader.getPageCount()) {
+                    Page p = getPage(pageNum);
+                    HashPageHeader ph = (HashPageHeader) p.getPageHeader();
+                    if (ph.getStatus() == RECORD) {
+                        keys.add(p.getKey());
+                    }
+
+                    // ... and over collision chains
+                    while (ph.getNextCollision() != NO_PAGE) {
+                        long pn = ph.getNextCollision();
+                        p = getPage(pn);
+                        ph = (HashPageHeader) p.getPageHeader();
+                        if (ph.getStatus() == RECORD) {
+                            keys.add(p.getKey());
+                        }
+                    }
+
+                    pageNum++;
+                }
+
+                iter = keys.iterator();
+            } catch (Exception e) {
+                if (log.isWarnEnabled()) {
+                    log.warn("ignored exception", e);
+                }
+            }
+        }
+
+        public synchronized Key getNextKey() {
+            return (Key) iter.next();
+        }
+
+        public synchronized Record getNextRecord() throws IndexException {
+            return readRecord((Key) iter.next(), false);
+        }
+
+        public synchronized Value getNextValue() throws IndexException {
+            return getNextRecord().getValue();
+        }
+
+        public synchronized boolean hasMoreRecords() {
+            return iter.hasNext();
+        }
+    }
+
+    ////////////////////////////////////////////////////////////////////
+
+    public FileHeader createFileHeader() {
+        return new HashFileHeader();
+    }
+
+    public PageHeader createPageHeader() {
+        return new HashPageHeader();
+    }
+
+    /**
+     * HashFileHeader
+     */
+    private final class HashFileHeader extends FileHeader {
+        private long totalBytes;
+
+        public HashFileHeader() {
+            super();
+            // For hash filer, totalCount >= pageCount. See setConfig().
+            setTotalCount(getPageCount());
+        }
+
+        protected synchronized void read(RandomAccessFile raf) throws IOException {
+            super.read(raf);
+            totalBytes = raf.readLong();
+        }
+
+        protected synchronized void write(RandomAccessFile raf) throws IOException {
+            super.write(raf);
+            raf.writeLong(totalBytes);
+        }
+
+        /** The total number of bytes in use by the file */
+        public synchronized void setTotalBytes(long totalBytes) {
+            this.totalBytes = totalBytes;
+            setDirty();
+        }
+
+        /** The total number of bytes in use by the file */
+        public synchronized long getTotalBytes() {
+            return totalBytes;
+        }
+
+        /** Adjust total number of bytes in use by the file */
+        public synchronized void addTotalBytes(int count) {
+            totalBytes += count;
+        }
+    }
+
+    /**
+     * HashPageHeader
+     */
+    protected final class HashPageHeader extends PageHeader {
+        private long created = 0;
+        private long modified = 0;
+        private long nextCollision = NO_PAGE;
+
+        public HashPageHeader() {
+        }
+
+        public HashPageHeader(DataInput dis) throws IOException {
+            super(dis);
+        }
+
+        public synchronized void read(DataInput dis) throws IOException {
+            super.read(dis);
+
+            if (getStatus() == UNUSED) {
+                return;
+            }
+
+            created = dis.readLong();
+            modified = dis.readLong();
+            nextCollision = dis.readLong();
+        }
+
+        public synchronized void write(DataOutput dos) throws IOException {
+            super.write(dos);
+            dos.writeLong(created);
+            dos.writeLong(modified);
+            dos.writeLong(nextCollision);
+        }
+
+        public synchronized void setRecordLen(int recordLen) {
+            fileHeader.addTotalBytes(recordLen - getRecordLen());
+            super.setRecordLen(recordLen);
+        }
+
+        /** UNIX-time when this record was created */
+        public synchronized void setCreated(long created) {
+            this.created = created;
+            setDirty();
+        }
+
+        /** UNIX-time when this record was created */
+        public synchronized long getCreated() {
+            return created;
+        }
+
+        /** UNIX-time when this record was last modified */
+        public synchronized void setModified(long modified) {
+            this.modified = modified;
+            setDirty();
+        }
+
+        /** UNIX-time when this record was last modified */
+        public synchronized long getModified() {
+            return modified;
+        }
+
+        /** The next page for a Record collision (if any) */
+        public synchronized void setNextCollision(long nextCollision) {
+            this.nextCollision = nextCollision;
+            setDirty();
+        }
+
+        /** The next page for a Record collision (if any) */
+        public synchronized long getNextCollision() {
+            return nextCollision;
+        }
+    }
+}

Added: activemq/sandbox/kahadb/src/main/java/org/apache/kahadb/xindice/mem/MemIndex.java
URL: http://svn.apache.org/viewvc/activemq/sandbox/kahadb/src/main/java/org/apache/kahadb/xindice/mem/MemIndex.java?rev=677944&view=auto
==============================================================================
--- activemq/sandbox/kahadb/src/main/java/org/apache/kahadb/xindice/mem/MemIndex.java (added)
+++ activemq/sandbox/kahadb/src/main/java/org/apache/kahadb/xindice/mem/MemIndex.java Fri Jul 18 08:49:48 2008
@@ -0,0 +1,182 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ * $Id: MemIndex.java 541516 2007-05-25 02:46:51Z vgritsenko $
+ */
+
+package org.apache.kahadb.xindice.mem;
+
+import java.io.File;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.Iterator;
+import java.util.Map;
+
+import org.apache.kahadb.xindice.FaultCodes;
+import org.apache.kahadb.xindice.Index;
+import org.apache.kahadb.xindice.IndexException;
+import org.apache.kahadb.xindice.Key;
+import org.apache.kahadb.xindice.Record;
+import org.apache.kahadb.xindice.RecordSet;
+import org.apache.kahadb.xindice.Value;
+
+/**
+ * MemIndex is an In-Memory Index implementation for Xindice. MemIndex can be
+ * used for temporary collections and caching. It's basically a layering on top
+ * of HashMap.
+ * 
+ * @version $Revision: 541516 $, $Date: 2007-05-24 22:46:51 -0400 (Thu, 24 May
+ *          2007) $
+ */
+public final class MemIndex implements Index {
+    private Map<Key, Record> hashTable = null;
+    private boolean opened = false;
+    private boolean readOnly = false;
+
+    public MemIndex() {
+        hashTable = Collections.synchronizedMap(new HashMap<Key, Record>());
+    }
+
+    public MemIndex(Map<Key, Record> hashTable, boolean readOnly) {
+        this.hashTable = hashTable;
+        this.readOnly = readOnly;
+    }
+
+    public MemIndex(Map<Key, Record> hashTable) {
+        this(hashTable, false);
+    }
+
+    public void setLocation(File root, String location) {
+    }
+
+    public String getName() {
+        return "MemIndex";
+    }
+
+    private void checkOpened() throws IndexException {
+        if (!opened) {
+            throw new IndexException(FaultCodes.COL_COLLECTION_CLOSED, "Index is closed");
+        }
+    }
+
+    private void checkReadOnly() throws IndexException {
+        if (readOnly) {
+            throw new IndexException(FaultCodes.COL_COLLECTION_READ_ONLY, "Index is read-only");
+        }
+    }
+
+    public boolean create() {
+        hashTable.clear();
+        return true;
+    }
+
+    public boolean open() {
+        opened = true;
+        return opened;
+    }
+
+    public boolean isOpened() {
+        return opened;
+    }
+
+    public boolean exists() {
+        return true;
+    }
+
+    public boolean drop() {
+        hashTable.clear();
+        opened = false;
+        return !opened;
+    }
+
+    public boolean close() {
+        opened = false;
+        return !opened;
+    }
+
+    public void flush() {
+    }
+
+    public Record readRecord(Key key) throws IndexException {
+        return readRecord(key, false);
+    }
+
+    public Record readRecord(Key key, boolean metaOnly) throws IndexException {
+        if (key == null || key.getLength() == 0) {
+            return null;
+        }
+        checkOpened();
+        return hashTable.get(key);
+    }
+
+    public Record writeRecord(Key key, Value value) throws IndexException {
+        if (key == null || key.getLength() == 0) {
+            throw new IndexException(FaultCodes.DBE_CANNOT_CREATE, "Invalid key: '" + key + "'");
+        }
+        if (value == null) {
+            throw new IndexException(FaultCodes.DBE_CANNOT_CREATE, "Invalid null value");
+        }
+        checkOpened();
+        checkReadOnly();
+        hashTable.put(key, new Record(key, value));
+        return new Record(key, value);
+    }
+
+    public boolean deleteRecord(Key key) throws IndexException {
+        if (key == null || key.getLength() == 0) {
+            return false;
+        }
+        checkOpened();
+        checkReadOnly();
+        return hashTable.remove(key) != null;
+    }
+
+    public long getRecordCount() throws IndexException {
+        checkOpened();
+        return hashTable.size();
+    }
+
+    public RecordSet getRecordSet() throws IndexException {
+        checkOpened();
+        return new MemRecordSet();
+    }
+
+    /**
+     * MemRecordSet
+     */
+
+    private class MemRecordSet implements RecordSet {
+        private Iterator<Record> iter = hashTable.values().iterator();
+
+        public synchronized boolean hasMoreRecords() throws IndexException {
+            return iter.hasNext();
+        }
+
+        public synchronized Record getNextRecord() throws IndexException {
+            checkOpened();
+            return iter.next();
+        }
+
+        public synchronized Value getNextValue() throws IndexException {
+            checkOpened();
+            return iter.next().getValue();
+        }
+
+        public synchronized Key getNextKey() {
+            return iter.next().getKey();
+        }
+    }
+}

Added: activemq/sandbox/kahadb/src/main/java/org/apache/kahadb/xindice/page/Paged.java
URL: http://svn.apache.org/viewvc/activemq/sandbox/kahadb/src/main/java/org/apache/kahadb/xindice/page/Paged.java?rev=677944&view=auto
==============================================================================
--- activemq/sandbox/kahadb/src/main/java/org/apache/kahadb/xindice/page/Paged.java (added)
+++ activemq/sandbox/kahadb/src/main/java/org/apache/kahadb/xindice/page/Paged.java Fri Jul 18 08:49:48 2008
@@ -0,0 +1,1456 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ * $Id: Paged.java 571938 2007-09-02 10:14:13Z vgritsenko $
+ */
+
+package org.apache.kahadb.xindice.page;
+
+import java.io.ByteArrayInputStream;
+import java.io.ByteArrayOutputStream;
+import java.io.DataInput;
+import java.io.DataInputStream;
+import java.io.DataOutput;
+import java.io.DataOutputStream;
+import java.io.File;
+import java.io.IOException;
+import java.io.InputStream;
+import java.io.OutputStream;
+import java.io.RandomAccessFile;
+import java.lang.ref.WeakReference;
+import java.util.Collection;
+import java.util.EmptyStackException;
+import java.util.HashMap;
+import java.util.Iterator;
+import java.util.Map;
+import java.util.Stack;
+import java.util.WeakHashMap;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.kahadb.xindice.FaultCodes;
+import org.apache.kahadb.xindice.IndexException;
+import org.apache.kahadb.xindice.Key;
+import org.apache.kahadb.xindice.Value;
+
+/**
+ * Paged is a paged file implementation that is foundation for both the
+ * BTree class and the HashFiler. It provides flexible paged I/O and
+ * page caching functionality.
+ *
+ * <br>
+ * Paged has folowing configuration attributes:
+ * <ul>
+ * <li><strong>pagesize</strong>: Size of the page used by the paged file.
+ *     Default page size is 4096 bytes. This parameter can be set only
+ *     before paged file is created. Once it is created, this parameter
+ *     can not be changed.</li>
+ * <li><strong>pagecount</strong>: Number of pages filer will be created
+ *     with.</li>
+ * <li><strong>maxkeysize</strong>: Maximum allowed size of the key.
+ *     Default maximum key size is 256 bytes.</li>
+ * <li><strong>max-descriptors</strong>: Defines maximum amount of
+ *     simultaneously opened file descriptors this paged file can have.
+ *     Several descriptors are needed to provide multithreaded access
+ *     to the underlying file. Too large number will limit amount of
+ *     collections you can open. Default value is 16
+ *     (DEFAULT_DESCRIPTORS_MAX).</li>
+ * </ul>
+ *
+ * <br>FIXME: Currently it seems that maxkeysize is not used anywhere.
+ * <br>TODO: Introduce Paged interface, implementations.
+ *
+ * @version $Revision: 571938 $, $Date: 2007-09-02 06:14:13 -0400 (Sun, 02 Sep 2007) $
+ */
+public abstract class Paged {
+
+    private static final Log log = LogFactory.getLog(Paged.class);
+
+    /**
+     * The maximum number of pages that will be held in the dirty cache.
+     * Once number reaches the limit, pages are flushed to disk.
+     */
+    private static final int MAX_DIRTY_SIZE = 128;
+
+    /**
+     * Name of the configuration attribute "pagesize"
+     */
+    protected static final String CONFIG_PAGESIZE = "pagesize";
+
+    /**
+     * Name of the configuration attribute "pagecount"
+     */
+    protected static final String CONFIG_PAGECOUNT = "pagecount";
+
+    /**
+     * Name of the configuration attribute "maxkeysize"
+     */
+    protected static final String CONFIG_KEYSIZE_MAX = "maxkeysize";
+
+    /**
+     * Name of the configuration attribute "max-descriptors"
+     */
+    protected static final String CONFIG_DESCRIPTORS_MAX = "max-descriptors";
+
+    /**
+     * Default value of the "pagesize".
+     */
+    private static final int DEFAULT_PAGESIZE = 4096;
+
+    /**
+     * Default value of the "pagecount".
+     */
+    private static final int DEFAULT_PAGECOUNT = 1024;
+
+    /**
+     * File header size
+     */
+    private static final int FILE_HEADER_SIZE = 4096;
+
+    /**
+     * Default value of the "maxkeysize".
+     */
+    private static final int DEFAULT_KEYSIZE_MAX = 256;
+
+    /**
+     * Default value of the maximum number of open random access files paged
+     * can have. This number balances resources utilization and parallelism of
+     * access to the paged file.
+     */
+    private static final int DEFAULT_DESCRIPTORS_MAX = 16;
+
+
+    /**
+     * Unused page status
+     */
+    protected static final byte UNUSED = 0;
+
+    /**
+     * Overflow page status
+     */
+    protected static final byte OVERFLOW = 126;
+
+    /**
+     * Deleted page status
+     */
+    protected static final byte DELETED = 127;
+
+    /**
+     * Page ID of non-existent page
+     */
+    protected static final int NO_PAGE = -1;
+
+    /**
+     * Map of pages in use. Guarantees that page with same number will be loaded
+     * into memory just once, allowing to synchronize on page objects to guarantee
+     * no two threads are writing into same page at once.
+     *
+     * <p>Map contains weak references to the Page objects, keys are pages themselves.
+     * Access is synchronized by the {@link #pagesLock}.
+     */
+    private final Map pages = new WeakHashMap();
+
+    /**
+     * Lock for synchronizing access to the {@link #pages} map.
+     */
+    private final Object pagesLock = new Object();
+
+    /**
+     * Cache of modified pages waiting to be written out.
+     * Access is synchronized by the {@link #dirtyLock}.
+     */
+    private Map dirty = new HashMap();
+
+    /**
+     * Lock for synchronizing access to the {@link #dirty} map.
+     */
+    private final Object dirtyLock = new Object();
+
+    /**
+     * Random access file descriptors cache.
+     * Access to it and to {@link #descriptorsCount} is synchronized by itself.
+     */
+    private final Stack descriptors = new Stack();
+
+    /**
+     * The number of random access file objects that exist, either in the
+     * cache {@link #descriptors}, or currently in use.
+     */
+    private int descriptorsCount;
+
+    /**
+     * The maximum number of random access file objects that can be opened
+     * by this paged instance.
+     */
+    private int descriptorsMax = DEFAULT_DESCRIPTORS_MAX;
+
+    /**
+     * Whether the file is opened or not.
+     */
+    private boolean opened;
+
+    /**
+     * The underlying file where the Paged object stores its pages.
+     */
+    private File file;
+
+    /**
+     * Header of this Paged
+     */
+    private final FileHeader fileHeader;
+
+    /**
+     * Default value used when a new file is created.  Ignored if 
+     * opening an existing file.
+     */
+    private int pageSize = DEFAULT_PAGESIZE;
+
+    /**
+     * Default value used when a new file is created.  Ignored if 
+     * opening an existing file.
+     */
+    private long pageCount = DEFAULT_PAGECOUNT;
+
+    /**
+     * Default value used when a new file is created.
+     */
+    private short maxKeySize = DEFAULT_KEYSIZE_MAX;
+
+
+    public Paged() {
+        descriptorsMax = DEFAULT_DESCRIPTORS_MAX;
+        fileHeader = createFileHeader();
+    }
+
+    public Paged(File file) {
+        this();
+        setFile(file);
+    }    
+
+    /**
+     * setFile sets the file object for this Paged.
+     *
+     * @param file The File
+     */
+    protected final void setFile(final File file) {
+        this.file = file;
+    }
+
+    /**
+     * getFile returns the file object for this Paged.
+     *
+     * @return The File
+     */
+    protected final File getFile() {
+        return file;
+    }
+
+    /**
+     * Obtain RandomAccessFile ('descriptor') object out of the pool.
+     * If no descriptors available, and maximum amount already allocated,
+     * the call will block.
+     */
+    protected final RandomAccessFile getDescriptor() throws IOException {
+        synchronized (descriptors) {
+            // If there are descriptors in the cache return one.
+            if (!descriptors.empty()) {
+                return (RandomAccessFile) descriptors.pop();
+            }
+            // Otherwise we need to get one some other way.
+
+            // First try to create a new one if there's room
+            if (descriptorsCount < descriptorsMax) {
+                descriptorsCount++;
+                return new RandomAccessFile(file, "rw");
+            }
+
+            // Otherwise we have to wait for one to be released by another thread.
+            while (true) {
+                try {
+                    descriptors.wait();
+                    return (RandomAccessFile) descriptors.pop();
+                } catch (InterruptedException e) {
+                    // Ignore, and continue to wait
+                } catch (EmptyStackException e) {
+                    // Ignore, and continue to wait
+                }
+            }
+        }
+    }
+
+    /**
+     * Puts a RandomAccessFile ('descriptor') back into the descriptor pool.
+     */
+    protected final void putDescriptor(RandomAccessFile raf) {
+        if (raf != null) {
+            synchronized (descriptors) {
+                descriptors.push(raf);
+                descriptors.notify();
+            }
+        }
+    }
+
+    /**
+     * Closes a RandomAccessFile ('descriptor') and removes it from the pool.
+     */
+    protected final void closeDescriptor(RandomAccessFile raf) {
+        if (raf != null) {
+            try {
+                raf.close();
+            } catch (IOException e) {
+                // Ignore close exception
+            }
+
+            // Synchronization is necessary as decrement operation is not atomic
+            synchronized (descriptors) {
+                descriptorsCount --;
+            }
+        }
+    }
+
+    /**
+     * getPage returns the page specified by pageNum.
+     *
+     * @param pageNum The Page number
+     * @return The requested Page
+     * @throws IOException if an Exception occurs
+     */
+    protected final Page getPage(long pageNum) throws IOException {
+        final PageKey k = new PageKey(pageNum);
+        Page p = null;
+        synchronized (pagesLock) {
+            // Check if page is already loaded in the page cache
+            WeakReference ref = (WeakReference) pages.get(k);
+            if (ref != null) {
+                p = (Page) ref.get();
+                // Fall through to p.read(). Even if page present in the pages
+                // map, it still has to be read - it could be that it was just
+                // added to the map but read() was not called yet.
+            }
+
+            // If not found, create it and add it to the pages cache
+            if (p == null) {
+                p = new Page(pageNum);
+                pages.put(p, new WeakReference(p));
+            }
+        }
+
+        // Load the page from disk if necessary
+        p.read();
+        return p;
+    }
+
+    /**
+     * readValue reads the multi-Paged Value starting at the specified
+     * Page.
+     *
+     * @param page The starting Page
+     * @return The Value
+     * @throws IOException if an Exception occurs
+     */
+    protected final Value readValue(Page page) throws IOException {
+        final PageHeader sph = page.getPageHeader();
+        ByteArrayOutputStream bos = new ByteArrayOutputStream(sph.getRecordLen());
+
+        // Loop until we've read all the pages into memory.
+        Page p = page;
+        while (true) {
+            PageHeader ph = p.getPageHeader();
+
+            // Add the contents of the page onto the stream
+            p.streamTo(bos);
+
+            // Continue following the list of pages until we get to the end.
+            long nextPage = ph.getNextPage();
+            if (nextPage == NO_PAGE) {
+                break;
+            }
+            p = getPage(nextPage);
+        }
+
+        // Return a Value with the collected contents of all pages.
+        return new Value(bos.toByteArray());
+    }
+
+    /**
+     * readValue reads the multi-Paged Value starting at the specified
+     * page number.
+     *
+     * @param page The starting page number
+     * @return The Value
+     * @throws IOException if an Exception occurs
+     */
+    protected final Value readValue(long page) throws IOException {
+        return readValue(getPage(page));
+    }    
+
+    /**
+     * writeValue writes the multi-Paged Value starting at the specified
+     * Page.
+     *
+     * @param page The starting Page
+     * @param value The Value to write
+     * @throws IOException if an Exception occurs
+     */
+    protected final void writeValue(Page page, Value value) throws IOException {
+        if (value == null) {
+            throw new IOException("Can't write a null value");
+        }
+
+        InputStream is = value.getInputStream();
+
+        // Write as much as we can onto the primary page.
+        PageHeader hdr = page.getPageHeader();
+        hdr.setRecordLen(value.getLength());
+        page.streamFrom(is);
+
+        // Write out the rest of the value onto any needed overflow pages
+        while (is.available() > 0) {
+            Page lpage = page;
+            PageHeader lhdr = hdr;
+
+            // Find an overflow page to use
+            long np = lhdr.getNextPage();
+            if (np != NO_PAGE) {
+                // Use an existing page.
+                page = getPage(np);
+            } else {
+                // Create a new overflow page
+                page = getFreePage();
+                lhdr.setNextPage(page.getPageNum());
+            }
+
+            // Mark the page as an overflow page.
+            hdr = page.getPageHeader();
+            hdr.setStatus(OVERFLOW);
+
+            // Write some more of the value to the overflow page.
+            page.streamFrom(is);
+            lpage.write();
+        }
+
+        // Cleanup any unused overflow pages. i.e. the value is smaller then the
+        // last time it was written.
+        long np = hdr.getNextPage();
+        if (np != NO_PAGE) {
+            unlinkPages(np);
+        }
+
+        hdr.setNextPage(NO_PAGE);
+        page.write();
+    }
+
+    /**
+     * writeValue writes the multi-Paged Value starting at the specified
+     * page number.
+     *
+     * @param page The starting page number
+     * @param value The Value to write
+     * @throws IOException if an Exception occurs
+     */
+    protected final void writeValue(long page, Value value) throws IOException {
+        writeValue(getPage(page), value);
+    }
+
+    /**
+     * unlinkPages unlinks a set of pages starting at the specified Page.
+     *
+     * @param page The starting Page to unlink
+     * @throws IOException if an Exception occurs
+     */
+    protected void unlinkPages(Page page) throws IOException {
+        // Add any overflow pages to the list of free pages.
+        // Get the first and last page in the chain.
+        long firstPage = page.pageNum;
+        while (page.header.nextPage != NO_PAGE) {
+            page = getPage(page.header.nextPage);
+        }
+        long lastPage = page.pageNum;
+
+        // Free the chain
+        synchronized (fileHeader) {
+            // If there are already some free pages, add the start of the chain
+            // to the list of free pages.
+            if (fileHeader.lastFreePage != NO_PAGE) {
+                Page p = getPage(fileHeader.lastFreePage);
+                p.header.setNextPage(firstPage);
+                p.write();
+            }
+
+            // Otherwise set the chain as the list of free pages.
+            if (fileHeader.firstFreePage == NO_PAGE) {
+                fileHeader.setFirstFreePage(firstPage);
+            }
+
+            // Add a reference to the end of the chain.
+            fileHeader.setLastFreePage(lastPage);
+        }
+    }
+
+    /**
+     * unlinkPages unlinks a set of pages starting at the specified
+     * page number.
+     *
+     * @param pageNum The starting page number to unlink
+     * @throws IOException if an Exception occurs
+     */
+    protected final void unlinkPages(long pageNum) throws IOException {
+        unlinkPages(getPage(pageNum));
+    }
+
+    /**
+     * getFreePage returns the first free Page from secondary storage.
+     * If no Pages are available, the file is grown as appropriate.
+     *
+     * @return The next free Page
+     * @throws IOException if an Exception occurs
+     */
+    protected final Page getFreePage() throws IOException {
+        Page p = null;
+
+        // Synchronize read and write to the fileHeader.firstFreePage
+        synchronized (fileHeader) {
+            if (fileHeader.firstFreePage != NO_PAGE) {
+                // Steal a deleted page
+                p = getPage(fileHeader.firstFreePage);
+                fileHeader.setFirstFreePage(p.getPageHeader().nextPage);
+                if (fileHeader.firstFreePage == NO_PAGE) {
+                    fileHeader.setLastFreePage(NO_PAGE);
+                }
+            }
+        }
+
+        if (p == null) {
+            // No deleted pages, grow the file
+            p = getPage(fileHeader.incTotalCount());
+        }
+
+        // Initialize The Page Header (Cleanly)
+        p.header.setNextPage(NO_PAGE);
+        p.header.setStatus(UNUSED);
+        return p;
+    }
+
+    /**
+     * @throws IndexException COL_COLLECTION_CLOSED if paged file is closed
+     */
+    protected final void checkOpened() throws IndexException {
+        if (!opened) {
+            throw new IndexException(FaultCodes.COL_COLLECTION_CLOSED,
+                                     "Filer is closed");
+        }
+    }
+
+    /**
+     * getFileHeader returns the FileHeader
+     *
+     * @return The FileHeader
+     */
+    public FileHeader getFileHeader() {
+        return fileHeader;
+    }
+
+    protected void initFileHeader() {
+        fileHeader.setPageSize(pageSize);
+        fileHeader.setPageCount(pageCount);
+        fileHeader.setMaxKeySize(maxKeySize);
+    }
+
+    /**
+     * @return True if this paged file exists
+     */
+    public boolean exists() {
+        return file.exists();
+    }
+
+    public boolean create() throws IndexException {
+        try {
+            initFileHeader();            
+            createFile();
+            fileHeader.write();
+            flush();
+            return true;
+        } catch (Exception e) {
+            throw new IndexException(FaultCodes.GEN_CRITICAL_ERROR,
+                                     "Error creating " + file.getName(), e);
+        }
+    }
+
+    private void createFile() throws IOException {
+        RandomAccessFile raf = null;
+        try {
+            raf = getDescriptor();
+            long o = fileHeader.headerSize + (fileHeader.pageCount + 1) * fileHeader.pageSize - 1;
+            raf.seek(o);
+            raf.write(0);
+        }  finally {
+            putDescriptor(raf);
+        }
+    }
+
+    public synchronized boolean open() throws IndexException {
+        RandomAccessFile raf = null;
+        try {
+            if (exists()) {
+                initFileHeader();            
+                raf = getDescriptor();
+                fileHeader.read();
+
+                // This is the only property that can be changed after creation
+                fileHeader.setMaxKeySize(maxKeySize);
+
+                opened = true;
+            } else {
+                opened = false;
+            }
+            return opened;
+        } catch (Exception e) {
+            throw new IndexException(FaultCodes.GEN_CRITICAL_ERROR,
+                                     "Error opening " + file.getName(), e);
+        } finally {
+            putDescriptor(raf);
+        }
+    }
+
+    public synchronized boolean close() throws IndexException {
+        if (isOpened()) {
+            try {
+                // First of all, mark as closed to prevent operations
+                opened = false;
+                flush();
+
+                synchronized (descriptors) {
+                    final int total = descriptorsCount;
+                    // Close descriptors in cache
+                    while (!descriptors.empty()) {
+                        closeDescriptor((RandomAccessFile)descriptors.pop());
+                    }
+                    // Attempt to close descriptors in use. Max wait time = 0.5s * MAX_DESCRIPTORS
+                    int n = descriptorsCount;
+                    while (descriptorsCount > 0 && n > 0) {
+                        descriptors.wait(500);
+                        if (descriptors.isEmpty()) {
+                            n--;
+                        } else {
+                            closeDescriptor((RandomAccessFile)descriptors.pop());
+                        }
+                    }
+                    if (descriptorsCount > 0) {
+                        log.warn(descriptorsCount + " out of " + total + " files were not closed during close.");
+                    }
+                }
+
+                // clear cache
+                synchronized (pagesLock) {
+                    pages.clear();
+                }
+            } catch (Exception e) {
+                // Failed to close, leave open
+                opened = true;
+                throw new IndexException(FaultCodes.GEN_CRITICAL_ERROR,
+                                         "Error closing " + file.getName(), e);
+            }
+        }
+
+        return true;
+    }
+
+    public boolean isOpened() {
+        return opened;
+    }
+
+    public boolean drop() throws IndexException {
+        try {
+            close();
+            if (exists()) {
+                return getFile().delete();
+            } else {
+                return true;
+            }
+        } catch (Exception e) {
+            throw new IndexException(FaultCodes.COL_CANNOT_DROP,
+                                     "Can't drop " + file.getName(), e);
+        }
+    }
+
+    void addDirty(Page page) throws IOException {
+        boolean flush;
+        synchronized (dirtyLock) {
+            dirty.put(page, page);
+            flush = dirty.size() > MAX_DIRTY_SIZE;
+        }
+
+        if (flush) {
+            // Too many dirty pages... flush them
+            try {
+                flush();
+            } catch (Exception e) {
+                throw new IOException(e.getMessage());
+            }
+        }
+    }
+
+    public void flush() throws IndexException {
+        // This method is not synchronized
+
+        // Error flag/counter
+        int error = 0;
+
+        // Obtain collection of dirty pages
+        Collection pages;
+        synchronized (dirtyLock) {
+           pages = dirty.values();
+           dirty = new HashMap();
+        }
+
+        // Flush dirty pages
+        Iterator i = pages.iterator();
+        while (i.hasNext()) {
+            Page p = (Page) i.next();
+            try {
+                p.flush();
+            } catch (Exception e) {
+                log.warn("Exception while flushing page " + p.pageNum, e);
+                error++;
+            }
+        }
+
+        // Flush header
+        if (fileHeader.dirty) {
+            try {
+                fileHeader.write();
+            } catch (Exception e) {
+                log.warn("Exception while flushing file header", e);
+                error++;
+            }
+        }
+
+        if (error != 0) {
+            throw new IndexException(FaultCodes.GEN_CRITICAL_ERROR,
+                                     "Error performing flush! Failed to flush " + error + " pages!");
+        }
+    }
+
+
+    /**
+     * createFileHeader must be implemented by a Paged implementation
+     * in order to create an appropriate subclass instance of a FileHeader.
+     *
+     * @return a new FileHeader
+     */
+    protected abstract FileHeader createFileHeader();
+
+    /**
+     * createPageHeader must be implemented by a Paged implementation
+     * in order to create an appropriate subclass instance of a PageHeader.
+     *
+     * @return a new PageHeader
+     */
+    protected abstract PageHeader createPageHeader();
+
+
+    // These are a bunch of utility methods for subclasses
+
+    public static Key[] insertArrayValue(Key[] vals, Key val, int idx) {
+        Key[] newVals = new Key[vals.length + 1];
+        if (idx > 0) {
+            System.arraycopy(vals, 0, newVals, 0, idx);
+        }
+        newVals[idx] = val;
+        if (idx < vals.length) {
+            System.arraycopy(vals, idx, newVals, idx + 1, vals.length - idx);
+        }
+        return newVals;
+    }
+
+    public static Key[] deleteArrayValue(Key[] vals, int idx) {
+        Key[] newVals = new Key[vals.length - 1];
+        if (idx > 0) {
+            System.arraycopy(vals, 0, newVals, 0, idx);
+        }
+        if (idx < newVals.length) {
+            System.arraycopy(vals, idx + 1, newVals, idx, newVals.length - idx);
+        }
+        return newVals;
+    }
+
+    public static long[] insertArrayLong(long[] vals, long val, int idx) {
+        long[] newVals = new long[vals.length + 1];
+        if (idx > 0) {
+            System.arraycopy(vals, 0, newVals, 0, idx);
+        }
+        newVals[idx] = val;
+        if (idx < vals.length) {
+            System.arraycopy(vals, idx, newVals, idx + 1, vals.length - idx);
+        }
+        return newVals;
+    }
+
+    public static long[] deleteArrayLong(long[] vals, int idx) {
+        long[] newVals = new long[vals.length - 1];
+        if (idx > 0) {
+            System.arraycopy(vals, 0, newVals, 0, idx);
+        }
+        if (idx < newVals.length) {
+            System.arraycopy(vals, idx + 1, newVals, idx, newVals.length - idx);
+        }
+        return newVals;
+    }
+
+    public static int[] insertArrayInt(int[] vals, int val, int idx) {
+        int[] newVals = new int[vals.length + 1];
+        if (idx > 0) {
+            System.arraycopy(vals, 0, newVals, 0, idx);
+        }
+        newVals[idx] = val;
+        if (idx < vals.length) {
+            System.arraycopy(vals, idx, newVals, idx + 1, vals.length - idx);
+        }
+        return newVals;
+    }
+
+    public static int[] deleteArrayInt(int[] vals, int idx) {
+        int[] newVals = new int[vals.length - 1];
+        if (idx > 0) {
+            System.arraycopy(vals, 0, newVals, 0, idx);
+        }
+        if (idx < newVals.length) {
+            System.arraycopy(vals, idx + 1, newVals, idx, newVals.length - idx);
+        }
+        return newVals;
+    }
+
+    public static short[] insertArrayShort(short[] vals, short val, int idx) {
+        short[] newVals = new short[vals.length + 1];
+        if (idx > 0) {
+            System.arraycopy(vals, 0, newVals, 0, idx);
+        }
+        newVals[idx] = val;
+        if (idx < vals.length) {
+            System.arraycopy(vals, idx, newVals, idx + 1, vals.length - idx);
+        }
+
+        return newVals;
+    }
+
+    public static short[] deleteArrayShort(short[] vals, int idx) {
+        short[] newVals = new short[vals.length - 1];
+        if (idx > 0) {
+            System.arraycopy(vals, 0, newVals, 0, idx);
+        }
+        if (idx < newVals.length) {
+            System.arraycopy(vals, idx + 1, newVals, idx, newVals.length - idx);
+        }
+
+        return newVals;
+    }
+
+
+    /**
+     * Paged file's header
+     */
+    public abstract class FileHeader {
+        private boolean dirty;
+        private int workSize;
+
+        private short headerSize;
+
+        /**
+         * Size of the page in bytes.
+         */
+        private int pageSize;
+
+        /**
+         * Number of pages initially allocated for the file.
+         * Has a special (historical) meaning for HashFiler.
+         */
+        private long pageCount;
+
+        /**
+         * Number of pages used by the filer. Initially set to 0.
+         * Has somewhat different (historical) meaning for HashFiler.
+         */
+        private long totalCount;
+
+        private long firstFreePage = -1;
+        private long lastFreePage = -1;
+        private byte pageHeaderSize = 64;
+        private short maxKeySize = DEFAULT_KEYSIZE_MAX;
+        private long recordCount;
+
+
+        public FileHeader() {
+            this.pageSize = DEFAULT_PAGESIZE;
+            this.pageCount = DEFAULT_PAGECOUNT;
+            this.headerSize = (short) FILE_HEADER_SIZE;
+            calculateWorkSize();
+        }
+
+        public synchronized final void read() throws IOException {
+            RandomAccessFile raf = getDescriptor();
+            try {
+                raf.seek(0);
+                read(raf);
+                calculateWorkSize();
+            } finally {
+                putDescriptor(raf);
+            }
+        }
+
+        protected synchronized void read(RandomAccessFile raf) throws IOException {
+            headerSize = raf.readShort();
+            pageSize = raf.readInt();
+            pageCount = raf.readLong();
+            totalCount = raf.readLong();
+            firstFreePage = raf.readLong();
+            lastFreePage = raf.readLong();
+            pageHeaderSize = raf.readByte();
+            maxKeySize = raf.readShort();
+            recordCount = raf.readLong();
+        }
+
+        public synchronized final void write() throws IOException {
+            if (dirty) {
+                RandomAccessFile raf = getDescriptor();
+                try {
+                    raf.seek(0);
+                    write(raf);
+                    dirty = false;
+                } finally {
+                    putDescriptor(raf);
+                }
+            }
+        }
+
+        protected synchronized void write(RandomAccessFile raf) throws IOException {
+            raf.writeShort(headerSize);
+            raf.writeInt(pageSize);
+            raf.writeLong(pageCount);
+            raf.writeLong(totalCount);
+            raf.writeLong(firstFreePage);
+            raf.writeLong(lastFreePage);
+            raf.writeByte(pageHeaderSize);
+            raf.writeShort(maxKeySize);
+            raf.writeLong(recordCount);
+        }
+
+        public synchronized final void setDirty() {
+            dirty = true;
+        }
+
+        public synchronized final boolean isDirty() {
+            return dirty;
+        }
+
+        /**
+         * The size of the FileHeader. Usually 1 OS Page.
+         * This method should be called only while initializing Paged, not during normal processing.
+         */
+        public synchronized final void setHeaderSize(short headerSize) {
+            this.headerSize = headerSize;
+            dirty = true;
+        }
+
+        /** The size of the FileHeader.  Usually 1 OS Page */
+        public synchronized final short getHeaderSize() {
+            return headerSize;
+        }
+
+        /**
+         * The size of a page. Usually a multiple of a FS block.
+         * This method should be called only while initializing Paged, not during normal processing.
+         */
+        public synchronized final void setPageSize(int pageSize) {
+            this.pageSize = pageSize;
+            calculateWorkSize();
+            dirty = true;
+        }
+
+        /** The size of a page.  Usually a multiple of a FS block */
+        public synchronized final int getPageSize() {
+            return pageSize;
+        }
+
+        /**
+         * The number of pages in primary/initial storage.
+         * This method should be called only while initializing Paged, not during normal processing.
+         */
+        public synchronized final void setPageCount(long pageCount) {
+            this.pageCount = pageCount;
+            dirty = true;
+        }
+
+        /** The number of pages in primary storage */
+        public synchronized final long getPageCount() {
+            return pageCount;
+        }
+
+        /**
+         * The number of used pages in the file.
+         * This method should be called only while initializing Paged, not during normal processing.
+         */
+        public synchronized final void setTotalCount(long totalCount) {
+            this.totalCount = totalCount;
+            dirty = true;
+        }
+
+        public synchronized final long incTotalCount() {
+            dirty = true;
+            return this.totalCount++;
+        }
+
+        /** The number of used pages in the file */
+        public synchronized final long getTotalCount() {
+            return totalCount;
+        }
+
+        /** The first free page in unused secondary space */
+        public synchronized final void setFirstFreePage(long firstFreePage) {
+            this.firstFreePage = firstFreePage;
+            dirty = true;
+        }
+
+        /** The first free page in unused secondary space */
+        public synchronized final long getFirstFreePage() {
+            return firstFreePage;
+        }
+
+        /** The last free page in unused secondary space */
+        public synchronized final void setLastFreePage(long lastFreePage) {
+            this.lastFreePage = lastFreePage;
+            dirty = true;
+        }
+
+        /** The last free page in unused secondary space */
+        public synchronized final long getLastFreePage() {
+            return lastFreePage;
+        }
+
+        /**
+         * Set the size of a page header.
+         *
+         * Normally, 64 is sufficient.
+         */
+        public synchronized final void setPageHeaderSize(byte pageHeaderSize) {
+            this.pageHeaderSize = pageHeaderSize;
+            calculateWorkSize();
+            dirty = true;
+        }
+
+        /**
+         * Get the size of a page header.
+         */
+        public synchronized final byte getPageHeaderSize() {
+            return pageHeaderSize;
+        }
+
+        /**
+         * Set the maximum number of bytes a key can be.
+         *
+         * Normally, 256 is good
+         */
+        public synchronized final void setMaxKeySize(short maxKeySize) {
+            this.maxKeySize = maxKeySize;
+            dirty = true;
+        }
+
+        /**
+         * Get the maximum number of bytes a key can be.
+         */
+        public synchronized final short getMaxKeySize() {
+            return maxKeySize;
+        }
+
+        /** Increment the number of records being managed by the file */
+        public synchronized final void incRecordCount() {
+            recordCount++;
+            dirty = true;
+        }
+
+        /** Decrement the number of records being managed by the file */
+        public synchronized final void decRecordCount() {
+            recordCount--;
+            dirty = true;
+        }
+
+        /** The number of records being managed by the file (not pages) */
+        public synchronized final long getRecordCount() {
+            return recordCount;
+        }
+
+        private synchronized void calculateWorkSize() {
+            workSize = pageSize - pageHeaderSize;
+        }
+
+        public synchronized final int getWorkSize() {
+            return workSize;
+        }
+    }
+
+    /**
+     * Paged file page's header
+     */
+    protected abstract static class PageHeader {
+        private boolean dirty;
+        private byte status = UNUSED;
+        private short keyLen;
+        private int keyHash;
+        private int dataLen;
+        private int recordLen;
+        private long nextPage = NO_PAGE;
+
+        public PageHeader() {
+        }
+
+        public PageHeader(DataInput dis) throws IOException {
+            read(dis);
+        }
+
+        public synchronized void read(DataInput dis) throws IOException {
+            status = dis.readByte();
+            dirty = false;
+            if (status == UNUSED) {
+                return;
+            }
+
+            keyLen = dis.readShort();
+            keyHash = dis.readInt();
+            dataLen = dis.readInt();
+            recordLen = dis.readInt();
+            nextPage = dis.readLong();
+        }
+
+        public synchronized void write(DataOutput dos) throws IOException {
+            dirty = false;
+            dos.writeByte(status);
+            dos.writeShort(keyLen);
+            dos.writeInt(keyHash);
+            dos.writeInt(dataLen);
+            dos.writeInt(recordLen);
+            dos.writeLong(nextPage);
+        }
+
+        public synchronized final boolean isDirty() {
+            return dirty;
+        }
+
+        public synchronized final void setDirty() {
+            dirty = true;
+        }
+
+        /** The status of this page (UNUSED, RECORD, DELETED, etc...) */
+        public synchronized final void setStatus(byte status) {
+            this.status = status;
+            dirty = true;
+        }
+
+        /** The status of this page (UNUSED, RECORD, DELETED, etc...) */
+        public synchronized final byte getStatus() {
+            return status;
+        }
+
+        public synchronized final void setKey(Key key) {
+            // setKey WIPES OUT the Page data
+            setRecordLen(0);
+            dataLen = 0;
+            keyHash = key.hashCode();
+            keyLen = (short) key.getLength();
+            dirty = true;
+        }
+
+        /** The length of the Key */
+        public synchronized final void setKeyLen(short keyLen) {
+            this.keyLen = keyLen;
+            dirty = true;
+        }
+
+        /** The length of the Key */
+        public synchronized final short getKeyLen() {
+            return keyLen;
+        }
+
+        /** The hashed value of the Key for quick comparisons */
+        public synchronized final void setKeyHash(int keyHash) {
+            this.keyHash = keyHash;
+            dirty = true;
+        }
+
+        /** The hashed value of the Key for quick comparisons */
+        public synchronized final int getKeyHash() {
+            return keyHash;
+        }
+
+        /** The length of the Data */
+        public synchronized final void setDataLen(int dataLen) {
+            this.dataLen = dataLen;
+            dirty = true;
+        }
+
+        /** The length of the Data */
+        public synchronized final int getDataLen() {
+            return dataLen;
+        }
+
+        /** The length of the Record's value */
+        public synchronized void setRecordLen(int recordLen) {
+            this.recordLen = recordLen;
+            dirty = true;
+        }
+
+        /** The length of the Record's value */
+        public synchronized final int getRecordLen() {
+            return recordLen;
+        }
+
+        /** The next page for this Record (if overflowed) */
+        public synchronized final void setNextPage(long nextPage) {
+            this.nextPage = nextPage;
+            dirty = true;
+        }
+
+        /** The next page for this Record (if overflowed) */
+        public synchronized final long getNextPage() {
+            return nextPage;
+        }
+    }
+
+    /**
+     * The object wrapping page number.
+     */
+    protected static class PageKey implements Comparable {
+
+        /**
+         * This page number
+         */
+        protected final long pageNum;
+
+
+        public PageKey(long pageNum) {
+            this.pageNum = pageNum;
+        }
+
+        // No synchronization - pageNum is final
+        public long getPageNum() {
+            return pageNum;
+        }
+
+        // No synchronization: pageNum is final.
+        public int compareTo(Object o) {
+            return (int) (pageNum - ((PageKey) o).pageNum);
+        }
+
+        /**
+         * Return page hash code, which is hash code of its {@link #pageNum}.
+         *
+         * @return Page hash code
+         */
+        public int hashCode() {
+            // Unroll new Long(pageNum).hashCode()
+            return (int) (pageNum ^ (pageNum >> 32));
+        }
+
+        /**
+         * Pages are equal if they are the same or have equal pageNum.
+         *
+         * @param obj Another page
+         * @return true if pages are equal
+         */
+        public boolean equals(Object obj) {
+            if (obj == this) {
+                return true;
+            }
+
+            if (obj instanceof PageKey) {
+                return pageNum == ((PageKey) obj).pageNum;
+            }
+
+            return false;
+        }
+    }
+
+    /**
+     * Paged file's page
+     */
+    protected final class Page extends PageKey {
+
+        /**
+         * The Header for this Page
+         */
+        private final PageHeader header;
+
+        /**
+         * The offset into the file that this page starts
+         */
+        private final long offset;
+
+        /**
+         * The data for this page. Null if page is not loaded.
+         */
+        private byte[] data;
+
+        /**
+         * The position (relative) of the Key in the data array
+         */
+        private int keyPos;
+
+        /**
+         * The position (relative) of the Data in the data array
+         */
+        private int dataPos;
+
+
+        private Page(long pageNum) {
+            super(pageNum);
+            this.header = createPageHeader();
+            this.offset = fileHeader.headerSize + (pageNum * fileHeader.pageSize);
+        }
+
+        /**
+         * Reads a page into the memory, once. Subsequent calls are ignored.
+         */
+        public synchronized void read() throws IOException {
+            if (data == null) {
+                RandomAccessFile raf = null;
+                try {
+                    byte[] data = new byte[fileHeader.pageSize];
+                    raf = getDescriptor();
+                    raf.seek(this.offset);
+                    raf.read(data);
+
+                    // Read in the header
+                    ByteArrayInputStream bis = new ByteArrayInputStream(data);
+                    this.header.read(new DataInputStream(bis));
+
+                    this.keyPos = fileHeader.pageHeaderSize;
+                    this.dataPos = this.keyPos + this.header.keyLen;
+
+                    // Successfully read all the data
+                    this.data = data;
+                } finally {
+                    putDescriptor(raf);
+                }
+            }
+        }
+
+        /**
+         * Writes out the header into the this.data, and adds a page to the set of
+         * dirty pages.
+         */
+        public void write() throws IOException {
+            // Write out the header into the this.data
+            synchronized (this) {
+                ByteArrayOutputStream bos = new ByteArrayOutputStream(fileHeader.getPageHeaderSize());
+                header.write(new DataOutputStream(bos));
+                byte[] b = bos.toByteArray();
+                System.arraycopy(b, 0, data, 0, b.length);
+            }
+
+            // Add to the list of dirty pages
+            Paged.this.addDirty(this);
+        }
+
+        /**
+         * Flushes content of the dirty page into the file
+         */
+        public synchronized void flush() throws IOException {
+            RandomAccessFile raf = null;
+            try {
+                raf = getDescriptor();
+                if (this.offset >= raf.length()) {
+                    // Grow the file
+                    long o = fileHeader.headerSize + (fileHeader.totalCount * 3 / 2 + 1) * fileHeader.pageSize - 1;
+                    raf.seek(o);
+                    raf.writeByte(0);
+                }
+                raf.seek(this.offset);
+                raf.write(this.data);
+            } finally {
+                putDescriptor(raf);
+            }
+        }
+
+        // No synchronization - header is final
+        public PageHeader getPageHeader() {
+            return this.header;
+        }
+
+        public synchronized void setKey(Key key) {
+            header.setKey(key);
+            // Insert the key into the data array.
+            key.copyTo(this.data, this.keyPos);
+
+            // Set the start of data to skip over the key.
+            this.dataPos = this.keyPos + header.keyLen;
+        }
+
+        public synchronized Key getKey() {
+            if (header.keyLen == 0) {
+                return null;
+            }
+
+            return new Key(this.data, this.keyPos, header.keyLen);
+        }
+
+        public synchronized void streamTo(OutputStream os) throws IOException {
+            if (header.dataLen > 0) {
+                os.write(this.data, this.dataPos, header.dataLen);
+            }
+        }
+
+        public synchronized void streamFrom(InputStream is) throws IOException {
+            int avail = is.available();
+            header.dataLen = fileHeader.workSize - header.keyLen;
+            if (avail < header.dataLen) {
+                header.dataLen = avail;
+            }
+            if (header.dataLen > 0) {
+                is.read(this.data, this.keyPos + header.keyLen, header.dataLen);
+            }
+        }
+    }
+
+    public int getDescriptorsMax() {
+        return descriptorsMax;
+    }
+
+    public void setDescriptorsMax(int descriptorsMax) {
+        this.descriptorsMax = descriptorsMax;
+    }
+
+    public int getPageSize() {
+        return pageSize;
+    }
+
+    public void setPageSize(int pageSize) {
+        this.pageSize = pageSize;
+    }
+
+    public long getPageCount() {
+        return pageCount;
+    }
+
+    public void setPageCount(long pageCount) {
+        this.pageCount = pageCount;
+    }
+
+    public short getMaxKeySize() {
+        return maxKeySize;
+    }
+
+    public void setMaxKeySize(short maxKeySize) {
+        this.maxKeySize = maxKeySize;
+    }
+}

Added: activemq/sandbox/kahadb/src/test/java/org/apache/kahadb/ListContainerTest.java
URL: http://svn.apache.org/viewvc/activemq/sandbox/kahadb/src/test/java/org/apache/kahadb/ListContainerTest.java?rev=677944&view=auto
==============================================================================
--- activemq/sandbox/kahadb/src/test/java/org/apache/kahadb/ListContainerTest.java (added)
+++ activemq/sandbox/kahadb/src/test/java/org/apache/kahadb/ListContainerTest.java Fri Jul 18 08:49:48 2008
@@ -0,0 +1,330 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.kahadb;
+
+import java.io.IOException;
+import java.util.Iterator;
+import java.util.LinkedList;
+import java.util.List;
+import java.util.ListIterator;
+
+import org.apache.kahadb.ListContainer;
+import org.apache.kahadb.Store;
+import org.apache.kahadb.StoreFactory;
+import junit.framework.TestCase;
+
+public class ListContainerTest extends TestCase {
+    
+    protected static final int COUNT = 10;
+
+    protected String name = "test";
+    protected Store store;
+    protected ListContainer<Object> container;
+    protected LinkedList<Object> testList;
+
+    /*
+     * Test method for 'org.apache.activemq.kaha.ListContainer.size()'
+     */
+    public void testSize() throws Exception {
+        container.addAll(testList);
+        assertEquals(container.size(), testList.size());
+    }
+
+    /*
+     * Test method for 'org.apache.activemq.kaha.ListContainer.addFirst(Object)'
+     */
+    public void testAddFirst() throws Exception {
+        container.addAll(testList);
+        assertEquals(container.size(), testList.size());
+        String first = "first";
+        container.addFirst(first);
+        assertEquals(first, container.get(0));
+        assertEquals(container.size(), testList.size() + 1);
+    }
+
+    /*
+     * Test method for 'org.apache.activemq.kaha.ListContainer.addLast(Object)'
+     */
+    public void testAddLast() throws Exception {
+        container.addAll(testList);
+        assertEquals(container.size(), testList.size());
+        String last = "last";
+        container.addLast(last);
+        assertEquals(last, container.get(testList.size()));
+        assertEquals(container.size(), testList.size() + 1);
+    }
+
+    /*
+     * Test method for 'org.apache.activemq.kaha.ListContainer.removeFirst()'
+     */
+    public void testRemoveFirst() throws Exception {
+        container.addAll(testList);
+        assertEquals(container.size(), testList.size());
+        assertEquals(testList.get(0), container.removeFirst());
+        assertEquals(container.size(), testList.size() - 1);
+        for (int i = 1; i < testList.size(); i++) {
+            assertEquals(testList.get(i), container.get(i - 1));
+        }
+    }
+
+    /*
+     * Test method for 'org.apache.activemq.kaha.ListContainer.removeLast()'
+     */
+    public void testRemoveLast() throws Exception {
+        container.addAll(testList);
+        assertEquals(container.size(), testList.size());
+        assertEquals(testList.get(testList.size() - 1), container.removeLast());
+        assertEquals(container.size(), testList.size() - 1);
+        for (int i = 0; i < testList.size() - 1; i++) {
+            assertEquals(testList.get(i), container.get(i));
+        }
+    }
+
+    /*
+     * Test method for 'java.util.List.iterator()'
+     */
+    public void testIterator() throws Exception {
+        container.addAll(testList);
+        Iterator<Object> j = container.iterator();
+        for (Iterator<Object> i = testList.iterator(); i.hasNext();) {
+            assertEquals(i.next(), j.next());
+        }
+        for (Iterator<Object> i = container.iterator(); i.hasNext();) {
+            i.next();
+            i.remove();
+        }
+        assert container.isEmpty();
+    }
+
+    /*
+     * Test method for 'java.util.List.isEmpty()'
+     */
+    public void testIsEmpty() throws Exception {
+        assertTrue(container.isEmpty());
+    }
+
+    /*
+     * Test method for 'java.util.List.contains(Object)'
+     */
+    public void testContains() throws Exception {
+        container.addAll(testList);
+        for (Iterator<Object> i = testList.iterator(); i.hasNext();) {
+            assertTrue(container.contains(i.next()));
+        }
+    }
+
+    /*
+     * Test method for 'java.util.List.toArray()'
+     */
+    public void testToArray() throws Exception {
+        container.addAll(testList);
+        Object[] a = testList.toArray();
+        Object[] b = container.toArray();
+        assertEquals(a.length, b.length);
+        for (int i = 0; i < a.length; i++) {
+            assertEquals(a[i], b[i]);
+        }
+    }
+
+    /*
+     * Test method for 'java.util.List.remove(Object)'
+     */
+    public void testRemoveObject() throws Exception {
+        container.addAll(testList);
+        assertEquals(container.size(), testList.size());
+        for (int i = 0; i < testList.size(); i++) {
+            container.remove(testList.get(i));
+        }
+        assertTrue(container.isEmpty());
+    }
+
+    /*
+     * Test method for 'java.util.List.containsAll(Collection<?>)'
+     */
+    public void testContainsAll() throws Exception {
+        container.addAll(testList);
+        assertTrue(container.containsAll(testList));
+    }
+
+    /*
+     * Test method for 'java.util.List.removeAll(Collection<?>)'
+     */
+    public void testRemoveAll() throws Exception {
+        container.addAll(testList);
+        assertEquals(testList.size(), container.size());
+        container.removeAll(testList);
+        assertTrue(container.isEmpty());
+    }
+
+    /*
+     * Test method for 'java.util.List.retainAll(Collection<?>)'
+     */
+    public void testRetainAll() throws Exception {
+        container.addAll(testList);
+        assertEquals(testList.size(), container.size());
+        testList.remove(0);
+        container.retainAll(testList);
+        assertEquals(testList.size(), container.size());
+    }
+
+    /*
+     * Test method for 'java.util.List.clear()'
+     */
+    public void testClear() throws Exception {
+        container.addAll(testList);
+        assertEquals(testList.size(), container.size());
+        container.clear();
+        assertTrue(container.isEmpty());
+    }
+
+    /*
+     * Test method for 'java.util.List.get(int)'
+     */
+    public void testGet() throws Exception {
+        container.addAll(testList);
+        for (int i = 0; i < testList.size(); i++) {
+            assertEquals(container.get(i), testList.get(i));
+        }
+    }
+
+    /*
+     * Test method for 'java.util.List.set(int, E)'
+     */
+    public void testSet() throws Exception {
+        container.addAll(testList);
+    }
+
+    /*
+     * Test method for 'java.util.List.add(int, E)'
+     */
+    public void testAddIntE() throws Exception {
+        container.addAll(testList);
+        assertTrue(container.equals(testList));
+        Object testObj = "testObj";
+        int index = 0;
+        testList.set(index, testObj);
+        container.set(index, testObj);
+        assertTrue(container.equals(testList));
+        index = testList.size() - 1;
+        testList.set(index, testObj);
+        container.set(index, testObj);
+        assertTrue(container.equals(testList));
+    }
+
+    /*
+     * Test method for 'java.util.List.remove(int)'
+     */
+    public void testRemoveInt() throws Exception {
+        container.addAll(testList);
+        assertTrue(container.equals(testList));
+        testList.remove(0);
+        container.remove(0);
+        assertTrue(container.equals(testList));
+        int pos = testList.size() - 1;
+        testList.remove(pos);
+        container.remove(pos);
+        assertTrue(container.equals(testList));
+    }
+
+    /*
+     * Test method for 'java.util.List.indexOf(Object)'
+     */
+    public void testIndexOf() throws Exception {
+        container.addAll(testList);
+        assertTrue(container.equals(testList));
+        for (int i = 0; i < testList.size(); i++) {
+            Object o = testList.get(i);
+            assertEquals(i, container.indexOf(o));
+        }
+    }
+
+    /*
+     * Test method for 'java.util.List.listIterator()'
+     */
+    public void testListIterator() throws Exception {
+        container.addAll(testList);
+        ListIterator<Object> containerIter = container.listIterator();
+        ListIterator<Object> testIter = testList.listIterator();
+        assertTrue(testIter.hasNext());
+        assertTrue(containerIter.hasNext());
+        while (testIter.hasNext()) {
+            Object o1 = testIter.next();
+            Object o2 = containerIter.next();
+            assertEquals(o1, o2);
+            testIter.remove();
+            containerIter.remove();
+        }
+        assertTrue(testList.isEmpty());
+        assertTrue(container.isEmpty());
+    }
+
+    /*
+     * Test method for 'java.util.List.listIterator(int)'
+     */
+    public void testListIteratorInt() throws Exception {
+        container.addAll(testList);
+        int start = testList.size() / 2;
+        ListIterator<Object> containerIter = container.listIterator(start);
+        ListIterator<Object> testIter = testList.listIterator(start);
+        assertTrue(testIter.hasNext());
+        assertTrue(containerIter.hasNext());
+        while (testIter.hasNext()) {
+            Object o1 = testIter.next();
+            Object o2 = containerIter.next();
+            assertEquals(o1, o2);
+        }
+    }
+
+    /*
+     * Test method for 'java.util.List.subList(int, int)'
+     */
+    public void testSubList() throws Exception {
+        container.addAll(testList);
+        int start = testList.size() / 2;
+        List<Object> l1 = testList.subList(start, testList.size());
+        List<Object> l2 = container.subList(start, testList.size());
+        assertEquals(l1.size(), l2.size());
+        assertEquals(l1, l2);
+    }
+
+    protected Store getStore() throws IOException {
+        return StoreFactory.open(name, "rw");
+    }
+
+    protected void setUp() throws Exception {
+        super.setUp();
+        name = System.getProperty("basedir", ".") + "/target/activemq-data/list-container.db";
+        StoreFactory.delete(name);
+        store = getStore();
+        store.deleteListContainer(name);
+        container = store.getListContainer(name);
+        container.load();
+        testList = new LinkedList<Object>();
+        for (int i = 0; i < COUNT; i++) {
+            String value = "value:" + i;
+            testList.add(value);
+        }
+    }
+
+    protected void tearDown() throws Exception {
+        super.tearDown();
+        if (store != null) {
+            store.close();
+        }
+        assertTrue(StoreFactory.delete(name));
+    }
+}



Mime
View raw message