jackrabbit-oak-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From ang...@apache.org
Subject svn commit: r1576270 - in /jackrabbit/oak/trunk/oak-run: ./ src/main/java/org/apache/jackrabbit/oak/fixture/ src/main/java/org/apache/jackrabbit/oak/run/
Date Tue, 11 Mar 2014 10:43:51 GMT
Author: angela
Date: Tue Mar 11 10:43:50 2014
New Revision: 1576270

URL: http://svn.apache.org/r1576270
Log:
OAK-1426 : Cleanup options in oak-run

- enhance 'server' mode to be run with different repository implementations -> see benchmark
fixtures

Added:
    jackrabbit/oak/trunk/oak-run/src/main/java/org/apache/jackrabbit/oak/fixture/OakFixture.java
  (with props)
Modified:
    jackrabbit/oak/trunk/oak-run/README.md
    jackrabbit/oak/trunk/oak-run/src/main/java/org/apache/jackrabbit/oak/fixture/OakRepositoryFixture.java
    jackrabbit/oak/trunk/oak-run/src/main/java/org/apache/jackrabbit/oak/run/Main.java

Modified: jackrabbit/oak/trunk/oak-run/README.md
URL: http://svn.apache.org/viewvc/jackrabbit/oak/trunk/oak-run/README.md?rev=1576270&r1=1576269&r2=1576270&view=diff
==============================================================================
--- jackrabbit/oak/trunk/oak-run/README.md (original)
+++ jackrabbit/oak/trunk/oak-run/README.md Tue Mar 11 10:43:50 2014
@@ -47,12 +47,44 @@ The Oak server mode starts a full Oak in
 and makes it available over a simple HTTP mapping defined in the `oak-http`
 component. To start this mode, use:
 
-    $ java -jar oak-run-*.jar [/path/to/mk...]
+    $ java -jar oak-run-*.jar server [uri] [fixture] [options]
 
 If no arguments are specified, the command starts an in-memory repository
-and makes it available at http://localhost:8080/. Possible path arguments
-specify the locations of on-disk MicroKernel backends that are each opened
-and mapped to URLs under http://localhost:8080/.
+and makes it available at http://localhost:8080/. Specify an `uri` and a
+`fixture` argument to change the host name and port and specify a different
+repository backend.
+
+The optional fixture argument allows to specify the repository implementation
+to be used. The following fixtures are currently supported:
+
+| Fixture     | Description                                           |
+|-------------|-------------------------------------------------------|
+| Jackrabbit  | Jackrabbit with the default embedded Derby  bundle PM |
+| Oak-Memory  | Oak with default in-memory storage                    |
+| Oak-MemoryNS| Oak with default in-memory NodeStore                  |
+| Oak-MemoryMK| Oak with default in-memory MicroKernel                |
+| Oak-Mongo   | Oak with the default Mongo backend                    |
+| Oak-MongoNS | Oak with the Mongo NodeStore                          |
+| Oak-MongoMK | Oak with the Mongo MicroKernel                        |
+| Oak-Tar     | Oak with the Tar backend (aka Segment NodeStore)      |
+| Oak-H2      | Oak with the MK using embedded H2 database            |
+
+
+Depending on the fixture the following options are available:
+
+    --cache 100            - cache size (in MB)
+    --host localhost       - MongoDB host
+    --port 27101           - MongoDB port
+    --db <name>            - MongoDB database (default is a generated name)
+    --clusterIds           - Cluster Ids for the Mongo setup: a comma separated list of integers
+    --base <file>          - Tar and H2: Path to the base file
+    --mmap <64bit?>        - TarMK memory mapping (the default on 64 bit JVMs)
+
+Examples:
+
+    $ java -jar oak-run-*.jar server
+    $ java -jar oak-run-*.jar server http://localhost:4503 Oak-Tar --base myOak
+    $ java -jar oak-run-*.jar server http://localhost:4502 Oak-Mongo --db myOak --clusterIds
c1,c2,c3
 
 See the documentation in the `oak-http` component for details about the
 available functionality.
@@ -72,6 +104,7 @@ The following benchmark options (with de
     --port 27101           - MongoDB port
     --db <name>            - MongoDB database (default is a generated name)
     --dropDBAfterTest true - Whether to drop the MongoDB database after the test
+    --base target          - Path to the base file (Tar and H2 setup),
     --mmap <64bit?>        - TarMK memory mapping (the default on 64 bit JVMs)
     --cache 100            - cache size (in MB)
     --wikipedia <file>     - Wikipedia dump
@@ -122,8 +155,12 @@ Finally the benchmark runner supports th
 | Fixture     | Description                                           |
 |-------------|-------------------------------------------------------|
 | Jackrabbit  | Jackrabbit with the default embedded Derby  bundle PM |
-| Oak-Memory  | Oak with in-memory storage                            |
-| Oak-Mongo   | Oak with the Mongo backend                            |
+| Oak-Memory  | Oak with default in-memory storage                    |
+| Oak-MemoryNS| Oak with default in-memory NodeStore                  |
+| Oak-MemoryMK| Oak with default in-memory MicroKernel                |
+| Oak-Mongo   | Oak with the default Mongo backend                    |
+| Oak-MongoNS | Oak with the Mongo NodeStore                          |
+| Oak-MongoMK | Oak with the Mongo MicroKernel                        |
 | Oak-Tar     | Oak with the Tar backend (aka Segment NodeStore)      |
 | Oak-H2      | Oak with the MK using embedded H2 database            |
 

Added: jackrabbit/oak/trunk/oak-run/src/main/java/org/apache/jackrabbit/oak/fixture/OakFixture.java
URL: http://svn.apache.org/viewvc/jackrabbit/oak/trunk/oak-run/src/main/java/org/apache/jackrabbit/oak/fixture/OakFixture.java?rev=1576270&view=auto
==============================================================================
--- jackrabbit/oak/trunk/oak-run/src/main/java/org/apache/jackrabbit/oak/fixture/OakFixture.java
(added)
+++ jackrabbit/oak/trunk/oak-run/src/main/java/org/apache/jackrabbit/oak/fixture/OakFixture.java
Tue Mar 11 10:43:50 2014
@@ -0,0 +1,291 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.jackrabbit.oak.fixture;
+
+import java.io.File;
+
+import org.apache.commons.io.FileUtils;
+import org.apache.jackrabbit.mk.api.MicroKernel;
+import org.apache.jackrabbit.mk.core.MicroKernelImpl;
+import org.apache.jackrabbit.oak.Oak;
+import org.apache.jackrabbit.oak.kernel.KernelNodeStore;
+import org.apache.jackrabbit.oak.plugins.blob.BlobStoreConfiguration;
+import org.apache.jackrabbit.oak.plugins.blob.BlobStoreHelper;
+import org.apache.jackrabbit.oak.plugins.blob.cloud.CloudBlobStore;
+import org.apache.jackrabbit.oak.plugins.blob.datastore.DataStoreBlobStore;
+import org.apache.jackrabbit.oak.plugins.document.DocumentMK;
+import org.apache.jackrabbit.oak.plugins.document.util.MongoConnection;
+import org.apache.jackrabbit.oak.plugins.memory.MemoryNodeStore;
+import org.apache.jackrabbit.oak.plugins.segment.SegmentNodeStore;
+import org.apache.jackrabbit.oak.plugins.segment.SegmentStore;
+import org.apache.jackrabbit.oak.plugins.segment.file.FileStore;
+import org.apache.jackrabbit.oak.spi.blob.BlobStore;
+
+public abstract class OakFixture {
+
+    public static final String OAK_MEMORY = "Oak-Memory";
+    public static final String OAK_MEMORY_NS = "Oak-MemoryNS";
+    public static final String OAK_MEMORY_MK = "Oak-MemoryMK";
+
+    public static final String OAK_MONGO = "Oak-Mongo";
+    public static final String OAK_MONGO_NS = "Oak-MongoNS";
+    public static final String OAK_MONGO_MK = "Oak-MongoMK";
+
+    public static final String OAK_H2 = "Oak-H2";
+    public static final String OAK_TAR = "Oak-Tar";
+
+
+    private final String name;
+    protected final String unique;
+
+    protected OakFixture(String name) {
+        this.name = name;
+        this.unique = String.format("%s-%d", name, System.currentTimeMillis());
+    }
+
+    public abstract Oak getOak(int clusterId) throws Exception;
+    public abstract Oak[] setUpCluster(int n) throws Exception;
+    public abstract void tearDownCluster();
+
+    @Override
+    public String toString() {
+        return name;
+    }
+
+    public static OakFixture getMemory(long cacheSize) {
+        return getMemory(OAK_MEMORY, false, cacheSize);
+    }
+
+    public static OakFixture getMemoryNS(long cacheSize) {
+        return getMemory(OAK_MEMORY_NS, false, cacheSize);
+    }
+
+    public static OakFixture getMemoryMK(long cacheSize) {
+        return getMemory(OAK_MEMORY_MK, true, cacheSize);
+    }
+
+    public static OakFixture getMemory(String name, final boolean useMk, final long cacheSize)
{
+        return new OakFixture(name) {
+            @Override
+            public Oak getOak(int clusterId) throws Exception {
+                Oak oak;
+                if (useMk) {
+                    MicroKernel kernel = new MicroKernelImpl();
+                    oak = new Oak(new KernelNodeStore(kernel, cacheSize));
+                } else {
+                    oak = new Oak(new MemoryNodeStore());
+                }
+                return oak;
+            }
+
+            @Override
+            public Oak[] setUpCluster(int n) throws Exception {
+                Oak[] cluster = new Oak[n];
+                for (int i = 0; i < cluster.length; i++) {
+                    Oak oak;
+                    if (useMk) {
+                        MicroKernel kernel = new MicroKernelImpl();
+                        oak = new Oak(new KernelNodeStore(kernel, cacheSize));
+                    } else {
+                        oak = new Oak(new MemoryNodeStore());
+                    }
+                    cluster[i] = oak;
+                }
+                return cluster;
+            }
+
+            @Override
+            public void tearDownCluster() {
+                // nothing to do
+            }
+        };
+    }
+
+    public static OakFixture getMongo(String host, int port, String database,
+                                      boolean dropDBAfterTest, long cacheSize) {
+        return getMongo(OAK_MONGO, false, host, port, database,
+                dropDBAfterTest, cacheSize);
+    }
+
+    public static OakFixture getMongoMK(String host, int port, String database,
+                                        boolean dropDBAfterTest, long cacheSize) {
+        return getMongo(OAK_MONGO_MK, true, host, port, database,
+                dropDBAfterTest, cacheSize);
+    }
+
+    public static OakFixture getMongoNS(String host, int port, String database,
+                                        boolean dropDBAfterTest, long cacheSize) {
+        return getMongo(OAK_MONGO_NS, false, host, port, database,
+                dropDBAfterTest, cacheSize);
+    }
+
+    public static OakFixture getMongo(String name, final boolean useMk, final String host,
+                                      final int port, final String database,
+                                      final boolean dropDBAfterTest, final long cacheSize)
{
+        return new OakFixture(name) {
+            private String dbName = database != null ? database : unique;
+            private DocumentMK[] kernels;
+            private BlobStore blobStore;
+
+            private BlobStore getBlobStore() {
+                BlobStoreConfiguration config =
+                        BlobStoreConfiguration.newInstance().loadFromSystemProps();
+                try {
+                    blobStore = BlobStoreHelper.create(config).orNull();
+                } catch (Exception e) {
+                    throw new RuntimeException(e);
+                }
+
+                return blobStore;
+            }
+
+            @Override
+            public Oak getOak(int clusterId) throws Exception {
+                MongoConnection mongo = new MongoConnection(host, port, dbName);
+                BlobStore blobStore = getBlobStore();
+                DocumentMK.Builder mkBuilder = new DocumentMK.Builder().
+                        setMongoDB(mongo.getDB()).
+                        memoryCacheSize(cacheSize).
+                        setClusterId(clusterId).setLogging(false);
+                if (blobStore != null) {
+                    mkBuilder.setBlobStore(blobStore);
+                }
+                DocumentMK dmk = mkBuilder.open();
+                Oak oak;
+                if (useMk) {
+                    oak = new Oak(new KernelNodeStore(dmk, cacheSize));
+                } else {
+                    oak = new Oak(dmk.getNodeStore());
+                }
+                return oak;
+            }
+
+            @Override
+            public Oak[] setUpCluster(int n) throws Exception {
+                Oak[] cluster = new Oak[n];
+                kernels = new DocumentMK[cluster.length];
+                for (int i = 0; i < cluster.length; i++) {
+                    MongoConnection mongo = new MongoConnection(host, port, dbName);
+                    BlobStore blobStore = getBlobStore();
+                    DocumentMK.Builder mkBuilder = new DocumentMK.Builder().
+                            setMongoDB(mongo.getDB()).
+                            memoryCacheSize(cacheSize).
+                            setClusterId(i).setLogging(false);
+                    if (blobStore != null) {
+                        mkBuilder.setBlobStore(blobStore);
+                    }
+                    kernels[i] = mkBuilder.open();
+                    Oak oak;
+                    if (useMk) {
+                        oak = new Oak(new KernelNodeStore(kernels[i], cacheSize));
+                    } else {
+                        oak = new Oak(kernels[i].getNodeStore());
+                    }
+                    cluster[i] = oak;
+                }
+                return cluster;
+            }
+
+            @Override
+            public void tearDownCluster() {
+                for (DocumentMK kernel : kernels) {
+                    kernel.dispose();
+                }
+                if (dropDBAfterTest) {
+                    try {
+                        MongoConnection mongo =
+                                new MongoConnection(host, port, dbName);
+                        mongo.getDB().dropDatabase();
+                        mongo.close();
+                        if (blobStore instanceof CloudBlobStore) {
+                            ((CloudBlobStore) blobStore).deleteBucket();
+                        } else if (blobStore instanceof DataStoreBlobStore) {
+                            ((DataStoreBlobStore) blobStore).clearInUse();
+                            ((DataStoreBlobStore) blobStore).deleteAllOlderThan(
+                                    System.currentTimeMillis() + 10000000);
+                        }
+                    } catch (Exception e) {
+                        throw new RuntimeException(e);
+                    }
+                }
+            }
+        };
+    }
+
+    public static OakFixture getTar(
+            final File base, final int maxFileSizeMB, final int cacheSizeMB,
+            final boolean memoryMapping) {
+        return new OakFixture(OAK_TAR) {
+            private SegmentStore[] stores;
+
+            @Override
+            public Oak getOak(int clusterId) throws Exception {
+                FileStore fs = new FileStore(base, maxFileSizeMB, cacheSizeMB, memoryMapping);
+                return new Oak(new SegmentNodeStore(fs));
+            }
+
+            @Override
+            public Oak[] setUpCluster(int n) throws Exception {
+                Oak[] cluster = new Oak[n];
+                stores = new FileStore[cluster.length];
+                for (int i = 0; i < cluster.length; i++) {
+                    stores[i] = new FileStore(
+                            new File(base, unique),
+                            maxFileSizeMB, cacheSizeMB, memoryMapping);
+                    cluster[i] = new Oak(new SegmentNodeStore(stores[i]));
+                }
+                return cluster;
+            }
+            @Override
+            public void tearDownCluster() {
+                for (SegmentStore store : stores) {
+                    store.close();
+                }
+                FileUtils.deleteQuietly(new File(base, unique));
+            }
+        };
+    }
+
+    public static OakFixture getH2MK(final File base, final long cacheSize) {
+        return new OakFixture(OAK_H2) {
+            private MicroKernelImpl[] kernels;
+
+            @Override
+            public Oak getOak(int clusterId) throws Exception {
+                return new Oak(new KernelNodeStore(new MicroKernelImpl(base.getPath()), cacheSize));
+            }
+
+            @Override
+            public Oak[] setUpCluster(int n) throws Exception {
+                Oak[] cluster = new Oak[n];
+                kernels = new MicroKernelImpl[cluster.length];
+                for (int i = 0; i < cluster.length; i++) {
+                    kernels[i] = new MicroKernelImpl(new File(base, unique).getPath());
+                    cluster[i] = new Oak(new KernelNodeStore(kernels[i], cacheSize));
+                }
+                return cluster;
+            }
+            @Override
+            public void tearDownCluster() {
+                for (MicroKernelImpl kernel : kernels) {
+                    kernel.dispose();
+                }
+                FileUtils.deleteQuietly(new File(base, unique));
+            }
+        };
+    }
+}
\ No newline at end of file

Propchange: jackrabbit/oak/trunk/oak-run/src/main/java/org/apache/jackrabbit/oak/fixture/OakFixture.java
------------------------------------------------------------------------------
    svn:eol-style = native

Modified: jackrabbit/oak/trunk/oak-run/src/main/java/org/apache/jackrabbit/oak/fixture/OakRepositoryFixture.java
URL: http://svn.apache.org/viewvc/jackrabbit/oak/trunk/oak-run/src/main/java/org/apache/jackrabbit/oak/fixture/OakRepositoryFixture.java?rev=1576270&r1=1576269&r2=1576270&view=diff
==============================================================================
--- jackrabbit/oak/trunk/oak-run/src/main/java/org/apache/jackrabbit/oak/fixture/OakRepositoryFixture.java
(original)
+++ jackrabbit/oak/trunk/oak-run/src/main/java/org/apache/jackrabbit/oak/fixture/OakRepositoryFixture.java
Tue Mar 11 10:43:50 2014
@@ -17,224 +17,64 @@
 package org.apache.jackrabbit.oak.fixture;
 
 import java.io.File;
-
 import javax.jcr.Repository;
 
-import org.apache.commons.io.FileUtils;
 import org.apache.jackrabbit.api.JackrabbitRepository;
-import org.apache.jackrabbit.mk.api.MicroKernel;
-import org.apache.jackrabbit.oak.plugins.memory.MemoryNodeStore;
-import org.apache.jackrabbit.oak.spi.blob.BlobStore;
-import org.apache.jackrabbit.mk.core.MicroKernelImpl;
 import org.apache.jackrabbit.oak.Oak;
 import org.apache.jackrabbit.oak.jcr.Jcr;
-import org.apache.jackrabbit.oak.kernel.KernelNodeStore;
-import org.apache.jackrabbit.oak.plugins.blob.BlobStoreConfiguration;
-import org.apache.jackrabbit.oak.plugins.blob.BlobStoreHelper;
-import org.apache.jackrabbit.oak.plugins.blob.cloud.CloudBlobStore;
-import org.apache.jackrabbit.oak.plugins.blob.datastore.DataStoreBlobStore;
-import org.apache.jackrabbit.oak.plugins.document.DocumentMK;
-import org.apache.jackrabbit.oak.plugins.document.util.MongoConnection;
-import org.apache.jackrabbit.oak.plugins.segment.SegmentNodeStore;
-import org.apache.jackrabbit.oak.plugins.segment.SegmentStore;
-import org.apache.jackrabbit.oak.plugins.segment.file.FileStore;
-
-public abstract class OakRepositoryFixture implements RepositoryFixture {
-
-    public static RepositoryFixture getMemory(final long cacheSize) {
-        return getMemory("Oak-Memory", false, cacheSize);
-    }
-
-    public static RepositoryFixture getMemoryNS(final long cacheSize) {
-        return getMemory("Oak-MemoryNS", false, cacheSize);
-    }
-
-    public static RepositoryFixture getMemoryMK(final long cacheSize) {
-        return getMemory("Oak-MemoryMK", true, cacheSize);
-    }
-
-    private static RepositoryFixture getMemory(String name, final boolean useMK, final long
cacheSize) {
-        return new OakRepositoryFixture(name) {
-            @Override
-            protected Repository[] internalSetUpCluster(int n) throws Exception {
-                Repository[] cluster = new Repository[n];
-                for (int i = 0; i < cluster.length; i++) {
-                    Oak oak;
-                    if (useMK) {
-                        MicroKernel kernel = new MicroKernelImpl();
-                        oak = new Oak(new KernelNodeStore(kernel, cacheSize));
-                    } else {
-                        oak = new Oak(new MemoryNodeStore());
-                    }
-                    cluster[i] = new Jcr(oak).createRepository();
-                }
-                return cluster;
-            }
-        };
+
+public class OakRepositoryFixture implements RepositoryFixture {
+
+    public static RepositoryFixture getMemory(long cacheSize) {
+        return getMemory(OakFixture.OAK_MEMORY, false, cacheSize);
     }
 
-    public static RepositoryFixture getH2MK(
-            final File base, final long cacheSize) {
-        return new OakRepositoryFixture("Oak-H2") {
-            private MicroKernelImpl[] kernels;
-            @Override
-            protected Repository[] internalSetUpCluster(int n) throws Exception {
-                Repository[] cluster = new Repository[n];
-                kernels = new MicroKernelImpl[cluster.length];
-                for (int i = 0; i < cluster.length; i++) {
-                    kernels[i] = new MicroKernelImpl(
-                            new File(base, unique).getPath());
-                    Oak oak = new Oak(new KernelNodeStore(kernels[i], cacheSize));
-                    cluster[i] = new Jcr(oak).createRepository();
-                }
-                return cluster;
-            }
-            @Override
-            public void tearDownCluster() {
-                super.tearDownCluster();
-                for (MicroKernelImpl kernel : kernels) {
-                    kernel.dispose();
-                }
-                FileUtils.deleteQuietly(new File(base, unique));
-            }
-        };
+    public static RepositoryFixture getMemoryNS(long cacheSize) {
+        return getMemory(OakFixture.OAK_MEMORY_NS, false, cacheSize);
     }
 
-    public static RepositoryFixture getMongo(
-            final String host, final int port, final String database,
-            final boolean dropDBAfterTest, final long cacheSize) {
-        return getMongo("Oak-Mongo", false, host, port, database,
-                dropDBAfterTest, cacheSize);
-    }
-
-    public static RepositoryFixture getMongoMK(
-            final String host, final int port, final String database,
-            final boolean dropDBAfterTest, final long cacheSize) {
-        return getMongo("Oak-MongoMK", true, host, port, database,
-                dropDBAfterTest, cacheSize);
-    }
-
-    public static RepositoryFixture getMongoNS(
-            final String host, final int port, final String database,
-            final boolean dropDBAfterTest, final long cacheSize) {
-        return getMongo("Oak-MongoNS", false, host, port, database,
-                dropDBAfterTest, cacheSize);
-    }
-
-    private static RepositoryFixture getMongo(String name, final boolean useMK,
-            final String host, final int port, final String database,
-            final boolean dropDBAfterTest, final long cacheSize) {
-
-        return new OakRepositoryFixture(name) {
-            private String dbName = database != null ? database : unique;
-            private DocumentMK[] kernels;
-            private BlobStore blobStore;
-
-            private BlobStore getBlobStore() {
-                BlobStoreConfiguration config =
-                        BlobStoreConfiguration.newInstance().loadFromSystemProps();
-                try {
-                    blobStore =
-                            BlobStoreHelper.create(config).orNull();
-                } catch (Exception e) {
-                    throw new RuntimeException(e);
-                }
+    public static RepositoryFixture getMemoryMK(long cacheSize) {
+        return getMemory(OakFixture.OAK_MEMORY_MK, true, cacheSize);
+    }
 
-                return blobStore;
-            }
+    private static RepositoryFixture getMemory(String name, boolean useMK, long cacheSize)
{
+        return new OakRepositoryFixture(OakFixture.getMemory(name, useMK, cacheSize));
+    }
 
-            @Override
-            protected Repository[] internalSetUpCluster(int n) throws Exception {
-                Repository[] cluster = new Repository[n];
-                kernels = new DocumentMK[cluster.length];
-                for (int i = 0; i < cluster.length; i++) {
-                    MongoConnection mongo =
-                            new MongoConnection(host, port, dbName);
-                    BlobStore blobStore = getBlobStore();
-                    DocumentMK.Builder mkBuilder = new DocumentMK.Builder().
-                            setMongoDB(mongo.getDB()).
-                            memoryCacheSize(cacheSize).
-                            setClusterId(i).setLogging(false);
-                    if (blobStore != null) {
-                            mkBuilder.setBlobStore(blobStore);
-                    }
-                    kernels[i] = mkBuilder.open();
-                    Oak oak;
-                    if (useMK) {
-                        oak = new Oak(new KernelNodeStore(kernels[i], cacheSize));
-                    } else {
-                        oak = new Oak(kernels[i].getNodeStore());
-                    }
-                    cluster[i] = new Jcr(oak).createRepository();
-                }
-                return cluster;
-            }
+    public static RepositoryFixture getH2MK(File base, long cacheSize) {
+        return new OakRepositoryFixture(OakFixture.getH2MK(base, cacheSize));
+    }
 
-            @Override
-            public void tearDownCluster() {
-                super.tearDownCluster();
-                for (DocumentMK kernel : kernels) {
-                    kernel.dispose();
-                }
-                if (dropDBAfterTest) {
-                    try {
-                        MongoConnection mongo =
-                                new MongoConnection(host, port, dbName);
-                        mongo.getDB().dropDatabase();
-                        mongo.close();
-                        if (blobStore instanceof CloudBlobStore) {
-                            ((CloudBlobStore) blobStore).deleteBucket();
-                        } else if (blobStore instanceof DataStoreBlobStore) {
-                            ((DataStoreBlobStore) blobStore).clearInUse();
-                            ((DataStoreBlobStore) blobStore).deleteAllOlderThan(
-                                    System.currentTimeMillis() + 10000000);
-                        }
-                    } catch (Exception e) {
-                        throw new RuntimeException(e);
-                    }
-                }
-            }
-        };
+    public static RepositoryFixture getMongo(String host, int port, String database,
+                                             boolean dropDBAfterTest, long cacheSize) {
+        return getMongo(OakFixture.OAK_MONGO, false, host, port, database, dropDBAfterTest,
cacheSize);
     }
 
-    public static RepositoryFixture getTar(
-            final File base, final int maxFileSizeMB, final int cacheSizeMB,
-            final boolean memoryMapping) {
-        return new OakRepositoryFixture("Oak-Tar") {
-            private SegmentStore[] stores;
-            @Override
-            protected Repository[] internalSetUpCluster(int n) throws Exception {
-                Repository[] cluster = new Repository[n];
-                stores = new FileStore[cluster.length];
-                for (int i = 0; i < cluster.length; i++) {
-                    stores[i] = new FileStore(
-                            new File(base, unique),
-                            maxFileSizeMB, cacheSizeMB, memoryMapping);
-                    Oak oak = new Oak(new SegmentNodeStore(stores[i]));
-                    cluster[i] = new Jcr(oak).createRepository();
-                }
-                return cluster;
-            }
-            @Override
-            public void tearDownCluster() {
-                super.tearDownCluster();
-                for (SegmentStore store : stores) {
-                    store.close();
-                }
-                FileUtils.deleteQuietly(new File(base, unique));
-            }
-        };
+    public static RepositoryFixture getMongoMK(String host, int port, String database,
+                                               boolean dropDBAfterTest, long cacheSize) {
+        return getMongo(OakFixture.OAK_MONGO_MK, true, host, port, database, dropDBAfterTest,
cacheSize);
     }
 
-    private final String name;
+    public static RepositoryFixture getMongoNS(String host, int port, String database,
+                                               boolean dropDBAfterTest, long cacheSize) {
+        return getMongo(OakFixture.OAK_MONGO_NS, false, host, port, database, dropDBAfterTest,
cacheSize);
+    }
 
-    protected final String unique;
+    private static RepositoryFixture getMongo(String name, boolean useMK,
+                                              String host, int port, String database,
+                                              boolean dropDBAfterTest, long cacheSize) {
+        return new OakRepositoryFixture(OakFixture.getMongo(name, useMK, host, port, database,
dropDBAfterTest, cacheSize));
+    }
 
+    public static RepositoryFixture getTar(File base, int maxFileSizeMB, int cacheSizeMB,
boolean memoryMapping) {
+        return new OakRepositoryFixture(OakFixture.getTar(base, maxFileSizeMB, cacheSizeMB,
memoryMapping));
+    }
+
+    private final OakFixture oakFixture;
     private Repository[] cluster;
 
-    protected OakRepositoryFixture(String name) {
-        this.name = name;
-        this.unique = String.format("%s-%d", name, System.currentTimeMillis());
+    protected OakRepositoryFixture(OakFixture oakFixture) {
+        this.oakFixture = oakFixture;
     }
 
     @Override
@@ -244,7 +84,11 @@ public abstract class OakRepositoryFixtu
 
     @Override
     public final Repository[] setUpCluster(int n) throws Exception {
-        cluster = internalSetUpCluster(n);
+        Oak[] oaks = oakFixture.setUpCluster(n);
+        cluster = new Repository[oaks.length];
+        for (int i = 0; i < oaks.length; i++) {
+            cluster[i] = new Jcr(oaks[i]).createRepository();;
+        }
         return cluster;
     }
 
@@ -262,12 +106,11 @@ public abstract class OakRepositoryFixtu
                 }
             }
         }
+        oakFixture.tearDownCluster();
     }
 
     @Override
     public String toString() {
-        return name;
+        return oakFixture.toString();
     }
-
-    protected abstract Repository[] internalSetUpCluster(int n) throws Exception;
 }

Modified: jackrabbit/oak/trunk/oak-run/src/main/java/org/apache/jackrabbit/oak/run/Main.java
URL: http://svn.apache.org/viewvc/jackrabbit/oak/trunk/oak-run/src/main/java/org/apache/jackrabbit/oak/run/Main.java?rev=1576270&r1=1576269&r2=1576270&view=diff
==============================================================================
--- jackrabbit/oak/trunk/oak-run/src/main/java/org/apache/jackrabbit/oak/run/Main.java (original)
+++ jackrabbit/oak/trunk/oak-run/src/main/java/org/apache/jackrabbit/oak/run/Main.java Tue
Mar 11 10:43:50 2014
@@ -31,17 +31,18 @@ import javax.jcr.Repository;
 
 import com.google.common.collect.Maps;
 import com.google.common.collect.Queues;
+import joptsimple.OptionParser;
+import joptsimple.OptionSet;
+import joptsimple.OptionSpec;
 import org.apache.jackrabbit.core.RepositoryContext;
 import org.apache.jackrabbit.core.config.RepositoryConfig;
-import org.apache.jackrabbit.mk.core.MicroKernelImpl;
 import org.apache.jackrabbit.oak.Oak;
 import org.apache.jackrabbit.oak.api.ContentRepository;
 import org.apache.jackrabbit.oak.benchmark.BenchmarkRunner;
+import org.apache.jackrabbit.oak.fixture.OakFixture;
 import org.apache.jackrabbit.oak.http.OakServlet;
 import org.apache.jackrabbit.oak.jcr.Jcr;
-import org.apache.jackrabbit.oak.kernel.KernelNodeStore;
 import org.apache.jackrabbit.oak.plugins.backup.FileStoreBackup;
-import org.apache.jackrabbit.oak.plugins.memory.MemoryNodeStore;
 import org.apache.jackrabbit.oak.plugins.segment.Segment;
 import org.apache.jackrabbit.oak.plugins.segment.SegmentIdFactory;
 import org.apache.jackrabbit.oak.plugins.segment.SegmentNodeStore;
@@ -63,6 +64,8 @@ public class Main {
     public static final int PORT = 8080;
     public static final String URI = "http://localhost:" + PORT + "/";
 
+    private static final int MB = 1024 * 1024;
+
     private Main() {
     }
 
@@ -237,30 +240,89 @@ public class Main {
         }
     }
 
-    private static void server(String uri, String[] args) throws Exception {
-        // TODO add support for different repo implementations (see fixtures for benchmarks)
-        Map<NodeStore, String> storeMap;
-        if (args.length == 0) {
-            System.out.println("Starting an in-memory repository");
-            System.out.println(uri + " -> [memory]");
-            NodeStore store = new MemoryNodeStore();
-            storeMap = Collections.singletonMap(store, "");
-        } else if (args.length == 1) {
-            System.out.println("Starting a standalone repository");
-            System.out.println(uri + " -> " + args[0]);
-            NodeStore store = new KernelNodeStore(new MicroKernelImpl(args[0]));
-            storeMap = Collections.singletonMap(store, "");
+    private static void server(String defaultUri, String[] args) throws Exception {
+        OptionParser parser = new OptionParser();
+
+        OptionSpec<Integer> cache = parser.accepts("cache", "cache size (MB)").withRequiredArg().ofType(Integer.class).defaultsTo(100);
+
+        // tar/h2 specific option
+        OptionSpec<File> base = parser.accepts("base", "Base directory").withRequiredArg().ofType(File.class);
+        OptionSpec<Boolean> mmap = parser.accepts("mmap", "TarMK memory mapping").withOptionalArg().ofType(Boolean.class).defaultsTo("64".equals(System.getProperty("sun.arch.data.model")));
+
+        // mongo specific options:
+        OptionSpec<String> host = parser.accepts("host", "MongoDB host").withRequiredArg().defaultsTo("localhost");
+        OptionSpec<Integer> port = parser.accepts("port", "MongoDB port").withRequiredArg().ofType(Integer.class).defaultsTo(27017);
+        OptionSpec<String> dbName = parser.accepts("db", "MongoDB database").withRequiredArg();
+        OptionSpec<Integer> clusterIds = parser.accepts("clusterIds", "Cluster Ids").withOptionalArg().ofType(Integer.class).withValuesSeparatedBy(',');
+
+        OptionSet options = parser.parse(args);
+
+        OakFixture oakFixture;
+
+        List<String> arglist = options.nonOptionArguments();
+        String uri = (arglist.isEmpty()) ? defaultUri : arglist.get(0);
+        String fix = (arglist.size() <= 1) ? OakFixture.OAK_MEMORY : arglist.get(1);
+
+        int cacheSize = cache.value(options);
+        List<Integer> cIds = Collections.emptyList();
+        if (fix.startsWith(OakFixture.OAK_MEMORY)) {
+            if (OakFixture.OAK_MEMORY_NS.equals(fix)) {
+                oakFixture = OakFixture.getMemoryNS(cacheSize * MB);
+            } else if (OakFixture.OAK_MEMORY_MK.equals(fix)) {
+                oakFixture = OakFixture.getMemoryMK(cacheSize * MB);
+            } else {
+                oakFixture = OakFixture.getMemory(cacheSize * MB);
+            }
+        } else if (fix.startsWith(OakFixture.OAK_MONGO)) {
+            cIds = clusterIds.values(options);
+            String db = dbName.value(options);
+            if (db == null) {
+                throw new IllegalArgumentException("Required argument db missing");
+            }
+            if (OakFixture.OAK_MONGO_NS.equals(fix)) {
+                oakFixture = OakFixture.getMongoNS(
+                        host.value(options), port.value(options),
+                        db, false,
+                        cacheSize * MB);
+            } else if (OakFixture.OAK_MONGO_MK.equals(fix)) {
+                oakFixture = OakFixture.getMongoMK(
+                        host.value(options), port.value(options),
+                        db, false, cacheSize * MB);
+            } else {
+                oakFixture = OakFixture.getMongo(
+                        host.value(options), port.value(options),
+                        db, false, cacheSize * MB);
+            }
+
+        } else if (fix.equals(OakFixture.OAK_TAR)) {
+            File baseFile = base.value(options);
+            if (baseFile == null) {
+                throw new IllegalArgumentException("Required argument base missing.");
+            }
+            oakFixture = OakFixture.getTar(baseFile, 256, cacheSize, mmap.value(options));
+        } else if (fix.equals(OakFixture.OAK_H2)) {
+            File baseFile = base.value(options);
+            if (baseFile == null) {
+                throw new IllegalArgumentException("Required argument base missing.");
+            }
+            oakFixture = OakFixture.getH2MK(baseFile, cacheSize * MB);
         } else {
-            System.out.println("Starting a clustered repository");
-            storeMap = new HashMap<NodeStore, String>(args.length);
-            for (int i = 0; i < args.length; i++) {
-                // FIXME: Use a clustered MicroKernel implementation
-                System.out.println(uri + "/node" + i + "/ -> " + args[i]);
-                KernelNodeStore store = new KernelNodeStore(new MicroKernelImpl(args[i]));
-                storeMap.put(store, "/node" + i);
+            throw new IllegalArgumentException("Unsupported repository setup " + fix);
+        }
+
+        Map<Oak, String> m;
+        if (cIds.isEmpty()) {
+            System.out.println("Starting " + oakFixture.toString() + " repository -> "
+ uri);
+            m = Collections.singletonMap(oakFixture.getOak(0), "");
+        } else {
+            System.out.println("Starting a clustered repository " + oakFixture.toString()
+ " -> " + uri);
+            m = new HashMap<Oak, String>(cIds.size());
+
+            for (int i = 0; i < cIds.size(); i++) {
+                m.put(oakFixture.getOak(i), "/node" + i);
             }
         }
-        new HttpServer(uri, storeMap);
+        new HttpServer(uri, m);
     }
 
     public static class HttpServer {
@@ -270,10 +332,10 @@ public class Main {
         private final Server server;
 
         public HttpServer(String uri) throws Exception {
-            this(uri, Collections.singletonMap(new MemoryNodeStore(), ""));
+            this(uri, Collections.singletonMap(new Oak(), ""));
         }
 
-        public HttpServer(String uri, Map<? extends NodeStore, String> storeMap) throws
Exception {
+        public HttpServer(String uri, Map<Oak, String> oakMap) throws Exception {
             int port = java.net.URI.create(uri).getPort();
             if (port == -1) {
                 // use default
@@ -283,7 +345,7 @@ public class Main {
             context = new ServletContextHandler();
             context.setContextPath("/");
 
-            for (Map.Entry<? extends NodeStore, String> entry : storeMap.entrySet())
{
+            for (Map.Entry<Oak, String> entry : oakMap.entrySet()) {
                 addServlets(entry.getKey(), entry.getValue());
             }
 
@@ -300,8 +362,7 @@ public class Main {
             server.stop();
         }
 
-        private void addServlets(NodeStore store, String path) {
-            Oak oak = new Oak(store);
+        private void addServlets(Oak oak, String path) {
             Jcr jcr = new Jcr(oak);
 
             // 1 - OakServer



Mime
View raw message