jackrabbit-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From ste...@apache.org
Subject svn commit: r1206106 - /jackrabbit/sandbox/microkernel/src/main/java/org/apache/jackrabbit/mk/store/
Date Fri, 25 Nov 2011 10:03:11 GMT
Author: stefan
Date: Fri Nov 25 10:03:10 2011
New Revision: 1206106

URL: http://svn.apache.org/viewvc?rev=1206106&view=rev
Log:
oops, forgot to commit some classes... sorry

Added:
    jackrabbit/sandbox/microkernel/src/main/java/org/apache/jackrabbit/mk/store/AbstractPersistenceManager.java
    jackrabbit/sandbox/microkernel/src/main/java/org/apache/jackrabbit/mk/store/BDbPersistenceManager.java
    jackrabbit/sandbox/microkernel/src/main/java/org/apache/jackrabbit/mk/store/FSPersistenceManager.java
    jackrabbit/sandbox/microkernel/src/main/java/org/apache/jackrabbit/mk/store/H2PersistenceManager.java
    jackrabbit/sandbox/microkernel/src/main/java/org/apache/jackrabbit/mk/store/MongoPersistenceManager.java
    jackrabbit/sandbox/microkernel/src/main/java/org/apache/jackrabbit/mk/store/RevisionProvider.java

Added: jackrabbit/sandbox/microkernel/src/main/java/org/apache/jackrabbit/mk/store/AbstractPersistenceManager.java
URL: http://svn.apache.org/viewvc/jackrabbit/sandbox/microkernel/src/main/java/org/apache/jackrabbit/mk/store/AbstractPersistenceManager.java?rev=1206106&view=auto
==============================================================================
--- jackrabbit/sandbox/microkernel/src/main/java/org/apache/jackrabbit/mk/store/AbstractPersistenceManager.java
(added)
+++ jackrabbit/sandbox/microkernel/src/main/java/org/apache/jackrabbit/mk/store/AbstractPersistenceManager.java
Fri Nov 25 10:03:10 2011
@@ -0,0 +1,216 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.jackrabbit.mk.store;
+
+import org.apache.jackrabbit.mk.model.Commit;
+import org.apache.jackrabbit.mk.model.MutableCommit;
+import org.apache.jackrabbit.mk.model.Node;
+import org.apache.jackrabbit.mk.model.MutableNode;
+import org.apache.jackrabbit.mk.model.StoredCommit;
+import org.apache.jackrabbit.mk.model.StoredNode;
+import org.apache.jackrabbit.mk.util.SimpleLRUCache;
+
+import java.io.File;
+import java.io.InputStream;
+import java.util.Collections;
+import java.util.Map;
+import java.util.concurrent.locks.ReentrantReadWriteLock;
+
+/**
+ *
+ */
+public abstract class AbstractPersistenceManager implements RevisionStore {
+
+    public static final String CACHE_SIZE = "microKernel.cacheSize";
+    public static final int DEFAULT_CACHE_SIZE = 10000;
+
+    private boolean initialized;
+
+    private String headId;
+    private final ReentrantReadWriteLock headLock = new ReentrantReadWriteLock();
+
+    Map<String, Object> cache;
+
+    protected abstract void doInitialize(File homeDir) throws Exception;
+
+    protected abstract void doClose();
+
+    protected abstract String readHead() throws Exception;
+
+    protected abstract void writeHead(String id) throws Exception;
+
+    protected abstract StoredNode readNode(String id) throws NotFoundException, Exception;
+
+    protected abstract String writeNode(Node node) throws Exception;
+
+    protected abstract StoredCommit readCommit(String id) throws NotFoundException, Exception;
+
+    protected abstract String writeCommit(Commit commit) throws Exception;
+
+    protected abstract String writeBlob(InputStream in) throws Exception;
+
+    protected abstract int readBlob(String blobId, long pos, byte[] buff, int off, int length)
throws NotFoundException, Exception;
+
+    protected abstract long blobLength(String blobId) throws NotFoundException, Exception;
+
+    public void initialize(File homeDir) throws Exception {
+        if (initialized) {
+            throw new IllegalStateException("already initialized");
+        }
+
+        cache = Collections.synchronizedMap(SimpleLRUCache.<String, Object>newInstance(determineInitialCacheSize()));
+
+        doInitialize(homeDir);
+
+        // make sure we've got a HEAD commit
+        headId = readHead();
+        if (headId == null || headId.length() == 0) {
+            // assume virgin repository
+            String rootNodeId = writeNode(new MutableNode());
+            MutableCommit initialCommit = new MutableCommit();
+            initialCommit.setCommitTS(System.currentTimeMillis());
+            initialCommit.setRootNodeId(rootNodeId);
+            headId = writeCommit(initialCommit);
+            writeHead(headId);
+        }
+
+        initialized = true;
+    }
+
+    public void close() {
+        verifyInitialized();
+
+        cache.clear();
+
+        doClose();
+
+        initialized = false;
+    }
+
+    protected void verifyInitialized() {
+        if (!initialized) {
+            throw new IllegalStateException("not initialized");
+        }
+    }
+
+    protected int determineInitialCacheSize() {
+        String val = System.getProperty(CACHE_SIZE);
+        return (val != null) ? Integer.parseInt(val) : DEFAULT_CACHE_SIZE;
+    }
+
+    //--------------------------------------------------------< RevisionStore >
+
+    public String putNode(Node node) throws Exception {
+        verifyInitialized();
+
+        return writeNode(node);
+    }
+
+    public String putCommit(Commit commit) throws Exception {
+        verifyInitialized();
+
+        return writeCommit(commit);
+    }
+
+    public void setHeadCommitId(String commitId) throws Exception {
+        verifyInitialized();
+
+        headLock.writeLock().lock();
+        try {
+            writeHead(commitId);
+            headId = commitId;
+        } finally {
+            headLock.writeLock().unlock();
+        }
+    }
+
+    public void lockHead() {
+        headLock.writeLock().lock();
+    }
+
+    public void unlockHead() {
+        headLock.writeLock().unlock();
+    }
+
+    public String putBlob(InputStream in) throws Exception {
+        verifyInitialized();
+
+        return writeBlob(in);
+    }
+
+    //-----------------------------------------------------< RevisionProvider >
+
+    public StoredNode getNode(String id) throws NotFoundException, Exception {
+        verifyInitialized();
+
+        StoredNode node = (StoredNode) cache.get(id);
+        if (node != null) {
+            return node;
+        }
+
+        node = readNode(id);
+
+        cache.put(id, node);
+
+        return node;
+    }
+
+    public StoredCommit getCommit(String id) throws NotFoundException, Exception {
+        verifyInitialized();
+
+        StoredCommit commit = (StoredCommit) cache.get(id);
+        if (commit != null) {
+            return commit;
+        }
+
+        commit = readCommit(id);
+        cache.put(id, commit);
+
+        return commit;
+    }
+
+    public StoredNode getRootNode(String commitId) throws NotFoundException, Exception {
+        return getNode(getCommit(commitId).getRootNodeId());
+    }
+
+    public StoredCommit getHeadCommit() throws Exception {
+        return getCommit(getHeadCommitId());
+    }
+
+    public String getHeadCommitId() throws Exception {
+        verifyInitialized();
+
+        headLock.readLock().lock();
+        try {
+            return headId;
+        } finally {
+            headLock.readLock().unlock();
+        }
+    }
+
+    public int getBlob(String blobId, long pos, byte[] buff, int off, int length) throws
NotFoundException, Exception {
+        verifyInitialized();
+
+        return readBlob(blobId, pos, buff, off, length);
+    }
+
+    public long getBlobLength(String blobId) throws NotFoundException, Exception {
+        verifyInitialized();
+
+        return blobLength(blobId);
+    }
+}

Added: jackrabbit/sandbox/microkernel/src/main/java/org/apache/jackrabbit/mk/store/BDbPersistenceManager.java
URL: http://svn.apache.org/viewvc/jackrabbit/sandbox/microkernel/src/main/java/org/apache/jackrabbit/mk/store/BDbPersistenceManager.java?rev=1206106&view=auto
==============================================================================
--- jackrabbit/sandbox/microkernel/src/main/java/org/apache/jackrabbit/mk/store/BDbPersistenceManager.java
(added)
+++ jackrabbit/sandbox/microkernel/src/main/java/org/apache/jackrabbit/mk/store/BDbPersistenceManager.java
Fri Nov 25 10:03:10 2011
@@ -0,0 +1,183 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.jackrabbit.mk.store;
+
+import com.sleepycat.je.Database;
+import com.sleepycat.je.DatabaseConfig;
+import com.sleepycat.je.DatabaseEntry;
+import com.sleepycat.je.Durability;
+import com.sleepycat.je.Environment;
+import com.sleepycat.je.EnvironmentConfig;
+import com.sleepycat.je.EnvironmentMutableConfig;
+import com.sleepycat.je.LockMode;
+import com.sleepycat.je.OperationStatus;
+import org.apache.jackrabbit.mk.blobs.BlobStore;
+import org.apache.jackrabbit.mk.blobs.FileBlobStore;
+import org.apache.jackrabbit.mk.model.Commit;
+import org.apache.jackrabbit.mk.model.Node;
+import org.apache.jackrabbit.mk.model.StoredCommit;
+import org.apache.jackrabbit.mk.model.StoredNode;
+import org.apache.jackrabbit.mk.store.util.Serializer;
+import org.apache.jackrabbit.mk.util.StringUtils;
+
+import java.io.File;
+import java.io.InputStream;
+import java.security.MessageDigest;
+
+/**
+ *
+ */
+public class BDbPersistenceManager extends AbstractPersistenceManager {
+
+    private final static byte[] HEAD_ID = new byte[]{0};
+    private Environment dbEnv;
+    private Database db;
+    private Database head;
+    private BlobStore blobStore;
+
+    @Override
+    protected void doInitialize(File homeDir) throws Exception {
+        File dbDir = new File(homeDir, "db");
+        if (!dbDir.exists()) {
+            dbDir.mkdir();
+        }
+
+        blobStore = new FileBlobStore(new File(homeDir, "blobs").getCanonicalPath());
+
+        EnvironmentConfig envConfig = new EnvironmentConfig();
+        //envConfig.setTransactional(true);
+        envConfig.setAllowCreate(true);
+        dbEnv = new Environment(dbDir, envConfig);
+
+        EnvironmentMutableConfig envMutableConfig = new EnvironmentMutableConfig();
+        //envMutableConfig.setDurability(Durability.COMMIT_SYNC);
+        //envMutableConfig.setDurability(Durability.COMMIT_NO_SYNC);
+        envMutableConfig.setDurability(Durability.COMMIT_WRITE_NO_SYNC);
+        dbEnv.setMutableConfig(envMutableConfig);
+
+        DatabaseConfig dbConfig = new DatabaseConfig();
+        dbConfig.setAllowCreate(true);
+        //dbConfig.setDeferredWrite(true);
+        db = dbEnv.openDatabase(null, "revs", dbConfig);
+
+        head = dbEnv.openDatabase(null, "head", dbConfig);
+
+        // TODO FIXME workaround in case we're not closed properly
+        Runtime.getRuntime().addShutdownHook(new Thread() {
+            public void run() {
+                close();
+            }
+        });
+    }
+
+    @Override
+    protected void doClose() {
+        try {
+            if (db.getConfig().getDeferredWrite()) {
+                db.sync();
+            }
+            db.close();
+            dbEnv.close();
+
+            db = null;
+            dbEnv = null;
+        } catch (Throwable t) {
+            t.printStackTrace();
+        }
+    }
+
+    @Override
+    protected String readHead() throws Exception {
+        DatabaseEntry key = new DatabaseEntry(HEAD_ID);
+        DatabaseEntry data = new DatabaseEntry();
+
+        if (head.get(null, key, data, LockMode.DEFAULT) == OperationStatus.SUCCESS) {
+            return StringUtils.convertBytesToHex(data.getData());
+        } else {
+            return null;
+        }
+    }
+
+    @Override
+    protected void writeHead(String id) throws Exception {
+        DatabaseEntry key = new DatabaseEntry(HEAD_ID);
+        DatabaseEntry data = new DatabaseEntry(StringUtils.convertHexToBytes(id));
+
+        head.put(null, key, data);
+    }
+
+    @Override
+    protected StoredNode readNode(String id) throws NotFoundException, Exception {
+        DatabaseEntry key = new DatabaseEntry(StringUtils.convertHexToBytes(id));
+        DatabaseEntry data = new DatabaseEntry();
+
+        if (db.get(null, key, data, LockMode.DEFAULT) == OperationStatus.SUCCESS) {
+            return Serializer.fromBytes(id, data.getData(), Node.class);
+        } else {
+            throw new NotFoundException(id);
+        }
+    }
+
+    @Override
+    protected String writeNode(Node node) throws Exception {
+        return persist(Serializer.toBytes(node));
+    }
+
+    @Override
+    protected StoredCommit readCommit(String id) throws NotFoundException, Exception {
+        DatabaseEntry key = new DatabaseEntry(StringUtils.convertHexToBytes(id));
+        DatabaseEntry data = new DatabaseEntry();
+
+        if (db.get(null, key, data, LockMode.DEFAULT) == OperationStatus.SUCCESS) {
+            return Serializer.fromBytes(id, data.getData(), Commit.class);
+        } else {
+            throw new NotFoundException(id);
+        }
+    }
+
+    @Override
+    protected String writeCommit(Commit commit) throws Exception {
+        return persist(Serializer.toBytes(commit));
+    }
+
+    @Override
+    protected String writeBlob(InputStream in) throws Exception {
+        return blobStore.writeBlob(in);
+    }
+
+    @Override
+    protected int readBlob(String blobId, long pos, byte[] buff, int off, int length) throws
NotFoundException, Exception {
+        return blobStore.readBlob(blobId, pos, buff, off, length);
+    }
+
+    @Override
+    protected long blobLength(String blobId) throws NotFoundException, Exception {
+        return blobStore.getBlobLength(blobId);
+    }
+
+    protected String persist(byte[] bytes) throws Exception {
+        byte[] rawId = MessageDigest.getInstance("SHA-1").digest(bytes);
+        String id = StringUtils.convertBytesToHex(rawId);
+
+        DatabaseEntry key = new DatabaseEntry(rawId);
+        DatabaseEntry data = new DatabaseEntry(bytes);
+
+        db.put(null, key, data);
+
+        return id;
+    }
+}

Added: jackrabbit/sandbox/microkernel/src/main/java/org/apache/jackrabbit/mk/store/FSPersistenceManager.java
URL: http://svn.apache.org/viewvc/jackrabbit/sandbox/microkernel/src/main/java/org/apache/jackrabbit/mk/store/FSPersistenceManager.java?rev=1206106&view=auto
==============================================================================
--- jackrabbit/sandbox/microkernel/src/main/java/org/apache/jackrabbit/mk/store/FSPersistenceManager.java
(added)
+++ jackrabbit/sandbox/microkernel/src/main/java/org/apache/jackrabbit/mk/store/FSPersistenceManager.java
Fri Nov 25 10:03:10 2011
@@ -0,0 +1,200 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.jackrabbit.mk.store;
+
+import org.apache.jackrabbit.mk.blobs.BlobStore;
+import org.apache.jackrabbit.mk.blobs.FileBlobStore;
+import org.apache.jackrabbit.mk.model.Commit;
+import org.apache.jackrabbit.mk.model.Node;
+import org.apache.jackrabbit.mk.model.StoredCommit;
+import org.apache.jackrabbit.mk.model.StoredNode;
+import org.apache.jackrabbit.mk.store.util.Serializer;
+import org.apache.jackrabbit.mk.util.IOUtils;
+import org.apache.jackrabbit.mk.util.StringUtils;
+
+import java.io.BufferedInputStream;
+import java.io.ByteArrayInputStream;
+import java.io.ByteArrayOutputStream;
+import java.io.File;
+import java.io.FileInputStream;
+import java.io.FileOutputStream;
+import java.io.InputStream;
+import java.io.OutputStream;
+import java.security.DigestOutputStream;
+import java.security.MessageDigest;
+
+/**
+ *
+ */
+public class FSPersistenceManager extends AbstractPersistenceManager {
+
+    private File dataDir;
+    private File head;
+    private BlobStore blobStore;
+
+    @Override
+    protected void doInitialize(File homeDir) throws Exception {
+        blobStore = new FileBlobStore(new File(homeDir, "blobs").getCanonicalPath());
+
+        dataDir = new File(homeDir, "data");
+        if (!dataDir.exists()) {
+            dataDir.mkdir();
+        }
+
+        head = new File(homeDir, "HEAD");
+        if (!head.exists()) {
+            writeHead("");
+        }
+
+    }
+
+    @Override
+    protected void doClose() {
+    }
+
+    @Override
+    protected String readHead() throws Exception {
+        FileInputStream in = new FileInputStream(head);
+        try {
+            return IOUtils.readString(in);
+        } finally {
+            in.close();
+        }
+    }
+
+    @Override
+    protected void writeHead(String id) throws Exception {
+        FileOutputStream out = new FileOutputStream(head);
+        try {
+            IOUtils.writeString(out, id);
+        } finally {
+            out.close();
+        }
+    }
+
+    @Override
+    protected StoredNode readNode(String id) throws NotFoundException, Exception {
+        File f = getFile(id);
+        if (f.exists()) {
+            BufferedInputStream in = new BufferedInputStream(new FileInputStream(f));
+            ByteArrayOutputStream out = new ByteArrayOutputStream();
+            try {
+                IOUtils.copy(in, out);
+            } finally {
+                in.close();
+            }
+            return Serializer.fromBytes(id, out.toByteArray(), Node.class);
+        } else {
+            throw new NotFoundException(id);
+        }
+    }
+
+    @Override
+    protected String writeNode(Node node) throws Exception {
+        return writeFile(new ByteArrayInputStream(Serializer.toBytes(node)));
+    }
+
+    @Override
+    protected StoredCommit readCommit(String id) throws NotFoundException, Exception {
+        File f = getFile(id);
+        if (f.exists()) {
+            BufferedInputStream in = new BufferedInputStream(new FileInputStream(f));
+            ByteArrayOutputStream out = new ByteArrayOutputStream();
+            try {
+                IOUtils.copy(in, out);
+            } finally {
+                in.close();
+            }
+            return Serializer.fromBytes(id, out.toByteArray(), Commit.class);
+        } else {
+            throw new NotFoundException(id);
+        }
+    }
+
+    @Override
+    protected String writeCommit(Commit commit) throws Exception {
+        return writeFile(new ByteArrayInputStream(Serializer.toBytes(commit)));
+    }
+
+    @Override
+    protected String writeBlob(InputStream in) throws Exception {
+        return blobStore.writeBlob(in);
+    }
+
+    @Override
+    protected int readBlob(String blobId, long pos, byte[] buff, int off, int length) throws
NotFoundException, Exception {
+        return blobStore.readBlob(blobId, pos, buff, off, length);
+    }
+
+    @Override
+    protected long blobLength(String blobId) throws NotFoundException, Exception {
+        return blobStore.getBlobLength(blobId);
+    }
+
+    private File getFile(String id) {
+        StringBuilder buf = new StringBuilder(id.substring(0, 2));
+        buf.append('/');
+        buf.append(id.substring(2));
+        return new File(dataDir, buf.toString());
+    }
+
+    private String writeFile(InputStream in) throws Exception {
+        MessageDigest digest = MessageDigest.getInstance("SHA-1");
+
+        File tmp = File.createTempFile("tmp", null, dataDir);
+        try {
+            FileOutputStream fos = new FileOutputStream(tmp);
+
+            //OutputStream out = Channels.newOutputStream(fos.getChannel());
+            OutputStream out = fos;
+            DigestOutputStream dos = new DigestOutputStream(out, digest);
+
+            try {
+                int read;
+                byte[] buf = new byte[8192];
+                while ((read = in.read(buf, 0, buf.length)) != -1) {
+                    dos.write(buf, 0, read);
+                }
+            } finally {
+                dos.flush();
+                //fos.getChannel().force(true);
+                fos.close();
+                in.close();
+            }
+
+            String id = StringUtils.convertBytesToHex(digest.digest());
+            File dst = getFile(id);
+            if (dst.exists()) {
+                // already exists
+                return id;
+            }
+            // move tmp file
+            tmp.setReadOnly();
+            if (tmp.renameTo(dst)) {
+                return id;
+            }
+            // make sure parent dir exists and try again
+            dst.getParentFile().mkdir();
+            if (tmp.renameTo(dst)) {
+                return id;
+            }
+            throw new Exception("failed to create " + dst);
+        } finally {
+            tmp.delete();
+        }
+    }
+}

Added: jackrabbit/sandbox/microkernel/src/main/java/org/apache/jackrabbit/mk/store/H2PersistenceManager.java
URL: http://svn.apache.org/viewvc/jackrabbit/sandbox/microkernel/src/main/java/org/apache/jackrabbit/mk/store/H2PersistenceManager.java?rev=1206106&view=auto
==============================================================================
--- jackrabbit/sandbox/microkernel/src/main/java/org/apache/jackrabbit/mk/store/H2PersistenceManager.java
(added)
+++ jackrabbit/sandbox/microkernel/src/main/java/org/apache/jackrabbit/mk/store/H2PersistenceManager.java
Fri Nov 25 10:03:10 2011
@@ -0,0 +1,214 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.jackrabbit.mk.store;
+
+import org.apache.jackrabbit.mk.blobs.DbBlobStore;
+import org.apache.jackrabbit.mk.model.Commit;
+import org.apache.jackrabbit.mk.model.Node;
+import org.apache.jackrabbit.mk.model.StoredCommit;
+import org.apache.jackrabbit.mk.model.StoredNode;
+import org.apache.jackrabbit.mk.store.util.Serializer;
+import org.apache.jackrabbit.mk.util.StringUtils;
+import org.h2.jdbcx.JdbcConnectionPool;
+
+import java.io.File;
+import java.io.InputStream;
+import java.security.MessageDigest;
+import java.sql.Connection;
+import java.sql.PreparedStatement;
+import java.sql.ResultSet;
+import java.sql.Statement;
+
+/**
+ *
+ */
+public class H2PersistenceManager extends AbstractPersistenceManager {
+
+    private static final boolean FAST = Boolean.getBoolean("mk.fastDb");
+
+    private JdbcConnectionPool cp;
+    private DbBlobStore blobStore;
+
+    @Override
+    protected void doInitialize(File homeDir) throws Exception {
+        File dbDir = new File(homeDir, "db");
+        if (!dbDir.exists()) {
+            dbDir.mkdir();
+        }
+
+        Class.forName("org.h2.Driver");
+        String url = "jdbc:h2:" + dbDir.getCanonicalPath() + "/revs";
+        if (FAST) {
+            url += ";log=0;undo_log=0";
+        }
+        cp = JdbcConnectionPool.create(url, "sa", "");
+        cp.setMaxConnections(40);
+        Connection con = cp.getConnection();
+        try {
+            Statement stmt = con.createStatement();
+            stmt.execute("create table if not exists REVS (ID binary primary key, DATA binary)");
+            stmt.execute("create table if not exists head(id varchar) as select ''");
+            stmt.execute("create sequence if not exists datastore_id");
+            blobStore = new DbBlobStore();
+            blobStore.setConnectionPool(cp);
+        } finally {
+            con.close();
+        }
+    }
+
+    @Override
+    protected void doClose() {
+        cp.dispose();
+    }
+
+    @Override
+    protected String readHead() throws Exception {
+        Connection con = cp.getConnection();
+        try {
+            PreparedStatement stmt = con.prepareStatement("select * from head");
+            ResultSet rs = stmt.executeQuery();
+            String headId = null;
+            if (rs.next()) {
+                headId = rs.getString(1);
+            }
+            stmt.close();
+            return headId;
+        } finally {
+            con.close();
+        }
+    }
+
+    @Override
+    protected void writeHead(String id) throws Exception {
+        Connection con = cp.getConnection();
+        try {
+            PreparedStatement stmt = con.prepareStatement("update head set id=?");
+            stmt.setString(1, id);
+            stmt.execute();
+            stmt.close();
+        } finally {
+            con.close();
+        }
+    }
+
+    @Override
+    protected StoredNode readNode(String id) throws NotFoundException, Exception {
+        Connection con = cp.getConnection();
+        try {
+            PreparedStatement stmt = con.prepareStatement("select DATA from REVS where ID
= ?");
+            try {
+                stmt.setBytes(1, StringUtils.convertHexToBytes(id));
+                ResultSet rs = stmt.executeQuery();
+                if (rs.next()) {
+                    return Serializer.fromBytes(id, rs.getBytes(1), Node.class);
+                } else {
+                    throw new NotFoundException(id);
+                }
+            } finally {
+                stmt.close();
+            }
+        } finally {
+            con.close();
+        }
+    }
+
+    @Override
+    protected String writeNode(Node node) throws Exception {
+        byte[] bytes = Serializer.toBytes(node);
+        byte[] rawId = MessageDigest.getInstance("SHA-1").digest(bytes);
+        String id = StringUtils.convertBytesToHex(rawId);
+
+        Connection con = cp.getConnection();
+        try {
+            PreparedStatement stmt = con
+                    .prepareStatement(
+                            "insert into REVS (ID, DATA) select ?, ? where not exists (select
1 from revs where ID = ?)");
+            try {
+                stmt.setBytes(1, rawId);
+                stmt.setBytes(2, bytes);
+                stmt.setBytes(3, rawId);
+                stmt.executeUpdate();
+            } finally {
+                stmt.close();
+            }
+        } finally {
+            con.close();
+        }
+        return id;
+    }
+
+    @Override
+    protected StoredCommit readCommit(String id) throws NotFoundException, Exception {
+        Connection con = cp.getConnection();
+        try {
+            PreparedStatement stmt = con.prepareStatement("select DATA from REVS where ID
= ?");
+            try {
+                stmt.setBytes(1, StringUtils.convertHexToBytes(id));
+                ResultSet rs = stmt.executeQuery();
+                if (rs.next()) {
+                    return Serializer.fromBytes(id, rs.getBytes(1), Commit.class);
+                } else {
+                    throw new NotFoundException(id);
+                }
+            } finally {
+                stmt.close();
+            }
+        } finally {
+            con.close();
+        }
+    }
+
+    @Override
+    protected String writeCommit(Commit commit) throws Exception {
+        byte[] bytes = Serializer.toBytes(commit);
+        byte[] rawId = MessageDigest.getInstance("SHA-1").digest(bytes);
+        String id = StringUtils.convertBytesToHex(rawId);
+
+        Connection con = cp.getConnection();
+        try {
+            PreparedStatement stmt = con
+                    .prepareStatement(
+                            "insert into REVS (ID, DATA) select ?, ? where not exists (select
1 from revs where ID = ?)");
+            try {
+                stmt.setBytes(1, rawId);
+                stmt.setBytes(2, bytes);
+                stmt.setBytes(3, rawId);
+                stmt.executeUpdate();
+            } finally {
+                stmt.close();
+            }
+        } finally {
+            con.close();
+        }
+        return id;
+    }
+
+    @Override
+    protected String writeBlob(InputStream in) throws Exception {
+        return blobStore.writeBlob(in);
+    }
+
+    @Override
+    protected int readBlob(String blobId, long pos, byte[] buff, int off, int length) throws
NotFoundException, Exception {
+        return blobStore.readBlob(blobId, pos, buff, off, length);
+    }
+
+    @Override
+    protected long blobLength(String blobId) throws NotFoundException, Exception {
+        return blobStore.getBlobLength(blobId);
+    }
+}

Added: jackrabbit/sandbox/microkernel/src/main/java/org/apache/jackrabbit/mk/store/MongoPersistenceManager.java
URL: http://svn.apache.org/viewvc/jackrabbit/sandbox/microkernel/src/main/java/org/apache/jackrabbit/mk/store/MongoPersistenceManager.java?rev=1206106&view=auto
==============================================================================
--- jackrabbit/sandbox/microkernel/src/main/java/org/apache/jackrabbit/mk/store/MongoPersistenceManager.java
(added)
+++ jackrabbit/sandbox/microkernel/src/main/java/org/apache/jackrabbit/mk/store/MongoPersistenceManager.java
Fri Nov 25 10:03:10 2011
@@ -0,0 +1,205 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.jackrabbit.mk.store;
+
+import com.mongodb.BasicDBObject;
+import com.mongodb.DB;
+import com.mongodb.DBCollection;
+import com.mongodb.DBObject;
+import com.mongodb.Mongo;
+import com.mongodb.MongoException;
+import com.mongodb.WriteConcern;
+import com.mongodb.gridfs.GridFS;
+import com.mongodb.gridfs.GridFSDBFile;
+import com.mongodb.gridfs.GridFSInputFile;
+import org.apache.jackrabbit.mk.model.Commit;
+import org.apache.jackrabbit.mk.model.Node;
+import org.apache.jackrabbit.mk.model.StoredCommit;
+import org.apache.jackrabbit.mk.model.StoredNode;
+import org.apache.jackrabbit.mk.store.util.Serializer;
+import org.apache.jackrabbit.mk.util.IOUtils;
+import org.apache.jackrabbit.mk.util.StringUtils;
+import org.bson.types.ObjectId;
+
+import java.io.File;
+import java.io.InputStream;
+import java.security.MessageDigest;
+
+/**
+ *
+ */
+public class MongoPersistenceManager extends AbstractPersistenceManager {
+
+    private static String HEAD_COLLECTION = "head";
+    private static String NODES_COLLECTION = "nodes";
+    private static String COMMITS_COLLECTION = "commits";
+    private static String ID_FIELD = "id";
+    private static String DATA_FIELD = "data";
+
+    private Mongo con;
+    private DB db;
+    private DBCollection nodes;
+    private DBCollection commits;
+    private GridFS fs;
+
+    @Override
+    protected void doInitialize(File homeDir) throws Exception {
+        con = new Mongo();
+        //con = new Mongo("localhost", 27017);
+
+        db = con.getDB("mk");
+        db.setWriteConcern(WriteConcern.SAFE);
+
+        if (!db.collectionExists(HEAD_COLLECTION)) {
+            // capped collection of size 1
+            db.createCollection(HEAD_COLLECTION, new BasicDBObject("capped", true).append("size",
256).append("max", 1));
+        }
+
+        nodes = db.getCollection(NODES_COLLECTION);
+        nodes.ensureIndex(
+                new BasicDBObject(ID_FIELD, 1),
+                new BasicDBObject("unique", true));
+
+        commits = db.getCollection(COMMITS_COLLECTION);
+        commits.ensureIndex(
+                new BasicDBObject(ID_FIELD, 1),
+                new BasicDBObject("unique", true));
+
+        fs = new GridFS(db);
+    }
+
+    @Override
+    protected void doClose() {
+        con.close();
+        con = null;
+        db = null;
+    }
+
+    @Override
+    protected String readHead() throws Exception {
+        DBObject entry = db.getCollection(HEAD_COLLECTION).findOne();
+        if (entry == null) {
+            return null;
+        }
+        return (String) entry.get(ID_FIELD);
+    }
+
+    @Override
+    protected void writeHead(String id) throws Exception {
+        // capped collection of size 1
+        db.getCollection(HEAD_COLLECTION).insert(new BasicDBObject(ID_FIELD, id));
+    }
+
+    @Override
+    protected StoredNode readNode(String id) throws NotFoundException, Exception {
+        BasicDBObject key = new BasicDBObject();
+        //key.put(ID_FIELD, StringUtils.convertHexToBytes(id));
+        key.put(ID_FIELD, id);
+        DBObject nodeObject = nodes.findOne(key);
+        if (nodeObject != null) {
+            // todo use json serialization format
+            byte[] bytes = (byte[]) nodeObject.get(DATA_FIELD);
+            return Serializer.fromBytes(id, bytes, Node.class);
+        } else {
+            throw new NotFoundException(id);
+        }
+    }
+
+    @Override
+    protected String writeNode(Node node) throws Exception {
+        byte[] bytes = Serializer.toBytes(node);
+        byte[] key = MessageDigest.getInstance("SHA-1").digest(bytes);
+        String id = StringUtils.convertBytesToHex(key);
+        //BasicDBObject nodeObject = new BasicDBObject(ID_FIELD, key).append(DATA_FIELD,
bytes);
+        // todo use json serialization format
+        BasicDBObject nodeObject = new BasicDBObject(ID_FIELD, id).append(DATA_FIELD, bytes);
+        try {
+            nodes.insert(nodeObject);
+        } catch (MongoException.DuplicateKey ignore) {
+            // fall through
+        }
+
+        return id;
+    }
+
+    @Override
+    protected StoredCommit readCommit(String id) throws NotFoundException, Exception {
+        BasicDBObject key = new BasicDBObject();
+        //key.put(ID_FIELD, StringUtils.convertHexToBytes(id));
+        key.put(ID_FIELD, id);
+        DBObject commitObject = commits.findOne(key);
+        if (commitObject != null) {
+            // todo use json serialization format
+            byte[] bytes = (byte[]) commitObject.get(DATA_FIELD);
+            return Serializer.fromBytes(id, bytes, Commit.class);
+        } else {
+            throw new NotFoundException(id);
+        }
+    }
+
+    @Override
+    protected String writeCommit(Commit commit) throws Exception {
+        byte[] bytes = Serializer.toBytes(commit);
+        byte[] key = MessageDigest.getInstance("SHA-1").digest(bytes);
+        String id = StringUtils.convertBytesToHex(key);
+        //BasicDBObject commitObject = new BasicDBObject(ID_FIELD, key).append(DATA_FIELD,
bytes);
+        // todo use json serialization format
+        BasicDBObject commitObject = new BasicDBObject(ID_FIELD, id).append(DATA_FIELD, bytes);
+        try {
+            commits.insert(commitObject);
+        } catch (MongoException.DuplicateKey ignore) {
+            // fall through
+        }
+
+        return id;
+    }
+
+    @Override
+    protected String writeBlob(InputStream in) throws Exception {
+        GridFSInputFile f = fs.createFile(in, true);
+        //f.save(0x20000);   // save in 128k chunks
+        f.save();
+
+        return f.getId().toString();
+    }
+
+    @Override
+    protected int readBlob(String blobId, long pos, byte[] buff, int off, int length) throws
NotFoundException, Exception {
+        GridFSDBFile f = fs.findOne(new ObjectId(blobId));
+        if (f == null) {
+            throw new NotFoundException(blobId);
+        }
+        // todo provide a more efficient implementation
+        InputStream in = f.getInputStream();
+        try {
+            in.skip(pos);
+            return in.read(buff, off, length);
+        } finally {
+            IOUtils.closeQuietly(in);
+        }
+    }
+
+    @Override
+    protected long blobLength(String blobId) throws NotFoundException, Exception {
+        GridFSDBFile f = fs.findOne(new ObjectId(blobId));
+        if (f == null) {
+            throw new NotFoundException(blobId);
+        }
+
+        return f.getLength();
+    }
+}

Added: jackrabbit/sandbox/microkernel/src/main/java/org/apache/jackrabbit/mk/store/RevisionProvider.java
URL: http://svn.apache.org/viewvc/jackrabbit/sandbox/microkernel/src/main/java/org/apache/jackrabbit/mk/store/RevisionProvider.java?rev=1206106&view=auto
==============================================================================
--- jackrabbit/sandbox/microkernel/src/main/java/org/apache/jackrabbit/mk/store/RevisionProvider.java
(added)
+++ jackrabbit/sandbox/microkernel/src/main/java/org/apache/jackrabbit/mk/store/RevisionProvider.java
Fri Nov 25 10:03:10 2011
@@ -0,0 +1,34 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.jackrabbit.mk.store;
+
+import org.apache.jackrabbit.mk.model.StoredCommit;
+import org.apache.jackrabbit.mk.model.StoredNode;
+
+/**
+ *
+ */
+public interface RevisionProvider {
+
+    StoredNode getNode(String id) throws NotFoundException, Exception;
+    StoredCommit getCommit(String id) throws NotFoundException, Exception;
+    StoredNode getRootNode(String commitId) throws NotFoundException, Exception;
+    StoredCommit getHeadCommit() throws Exception;
+    String getHeadCommitId() throws Exception;
+    int getBlob(String blobId, long pos, byte[] buff, int off, int length) throws NotFoundException,
Exception;
+    long getBlobLength(String blobId) throws NotFoundException, Exception;
+}



Mime
View raw message